4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
30 #include "qemu/bitops.h"
32 #include "hw/semihosting/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #include "translate.h"
54 #if defined(CONFIG_USER_ONLY)
57 #define IS_USER(s) (s->user)
60 /* These are TCG temporaries used only by the legacy iwMMXt decoder */
61 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
62 /* These are TCG globals which alias CPUARMState fields */
63 static TCGv_i32 cpu_R
[16];
64 TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
65 TCGv_i64 cpu_exclusive_addr
;
66 TCGv_i64 cpu_exclusive_val
;
68 #include "exec/gen-icount.h"
70 static const char * const regnames
[] =
71 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
72 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
74 /* Function prototypes for gen_ functions calling Neon helpers. */
75 typedef void NeonGenThreeOpEnvFn(TCGv_i32
, TCGv_env
, TCGv_i32
,
77 /* Function prototypes for gen_ functions for fix point conversions */
78 typedef void VFPGenFixPointFn(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
80 /* initialize TCG globals. */
81 void arm_translate_init(void)
85 for (i
= 0; i
< 16; i
++) {
86 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
87 offsetof(CPUARMState
, regs
[i
]),
90 cpu_CF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, CF
), "CF");
91 cpu_NF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, NF
), "NF");
92 cpu_VF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, VF
), "VF");
93 cpu_ZF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, ZF
), "ZF");
95 cpu_exclusive_addr
= tcg_global_mem_new_i64(cpu_env
,
96 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
97 cpu_exclusive_val
= tcg_global_mem_new_i64(cpu_env
,
98 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
100 a64_translate_init();
103 /* Generate a label used for skipping this instruction */
104 static void arm_gen_condlabel(DisasContext
*s
)
107 s
->condlabel
= gen_new_label();
113 * Constant expanders for the decoders.
116 static int negate(DisasContext
*s
, int x
)
121 static int plus_2(DisasContext
*s
, int x
)
126 static int times_2(DisasContext
*s
, int x
)
131 static int times_4(DisasContext
*s
, int x
)
136 /* Flags for the disas_set_da_iss info argument:
137 * lower bits hold the Rt register number, higher bits are flags.
139 typedef enum ISSInfo
{
142 ISSInvalid
= (1 << 5),
143 ISSIsAcqRel
= (1 << 6),
144 ISSIsWrite
= (1 << 7),
145 ISSIs16Bit
= (1 << 8),
148 /* Save the syndrome information for a Data Abort */
149 static void disas_set_da_iss(DisasContext
*s
, MemOp memop
, ISSInfo issinfo
)
152 int sas
= memop
& MO_SIZE
;
153 bool sse
= memop
& MO_SIGN
;
154 bool is_acqrel
= issinfo
& ISSIsAcqRel
;
155 bool is_write
= issinfo
& ISSIsWrite
;
156 bool is_16bit
= issinfo
& ISSIs16Bit
;
157 int srt
= issinfo
& ISSRegMask
;
159 if (issinfo
& ISSInvalid
) {
160 /* Some callsites want to conditionally provide ISS info,
161 * eg "only if this was not a writeback"
167 /* For AArch32, insns where the src/dest is R15 never generate
168 * ISS information. Catching that here saves checking at all
174 syn
= syn_data_abort_with_iss(0, sas
, sse
, srt
, 0, is_acqrel
,
175 0, 0, 0, is_write
, 0, is_16bit
);
176 disas_set_insn_syndrome(s
, syn
);
179 static inline int get_a32_user_mem_index(DisasContext
*s
)
181 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
183 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
184 * otherwise, access as if at PL0.
186 switch (s
->mmu_idx
) {
187 case ARMMMUIdx_E2
: /* this one is UNPREDICTABLE */
188 case ARMMMUIdx_E10_0
:
189 case ARMMMUIdx_E10_1
:
190 case ARMMMUIdx_E10_1_PAN
:
191 return arm_to_core_mmu_idx(ARMMMUIdx_E10_0
);
193 case ARMMMUIdx_SE10_0
:
194 case ARMMMUIdx_SE10_1
:
195 case ARMMMUIdx_SE10_1_PAN
:
196 return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0
);
197 case ARMMMUIdx_MUser
:
198 case ARMMMUIdx_MPriv
:
199 return arm_to_core_mmu_idx(ARMMMUIdx_MUser
);
200 case ARMMMUIdx_MUserNegPri
:
201 case ARMMMUIdx_MPrivNegPri
:
202 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri
);
203 case ARMMMUIdx_MSUser
:
204 case ARMMMUIdx_MSPriv
:
205 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser
);
206 case ARMMMUIdx_MSUserNegPri
:
207 case ARMMMUIdx_MSPrivNegPri
:
208 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri
);
210 g_assert_not_reached();
214 static inline TCGv_i32
load_cpu_offset(int offset
)
216 TCGv_i32 tmp
= tcg_temp_new_i32();
217 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
221 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
223 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
225 tcg_gen_st_i32(var
, cpu_env
, offset
);
226 tcg_temp_free_i32(var
);
229 #define store_cpu_field(var, name) \
230 store_cpu_offset(var, offsetof(CPUARMState, name))
232 /* The architectural value of PC. */
233 static uint32_t read_pc(DisasContext
*s
)
235 return s
->pc_curr
+ (s
->thumb
? 4 : 8);
238 /* Set a variable to the value of a CPU register. */
239 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
242 tcg_gen_movi_i32(var
, read_pc(s
));
244 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
248 /* Create a new temporary and set it to the value of a CPU register. */
249 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
251 TCGv_i32 tmp
= tcg_temp_new_i32();
252 load_reg_var(s
, tmp
, reg
);
257 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
258 * This is used for load/store for which use of PC implies (literal),
259 * or ADD that implies ADR.
261 static TCGv_i32
add_reg_for_lit(DisasContext
*s
, int reg
, int ofs
)
263 TCGv_i32 tmp
= tcg_temp_new_i32();
266 tcg_gen_movi_i32(tmp
, (read_pc(s
) & ~3) + ofs
);
268 tcg_gen_addi_i32(tmp
, cpu_R
[reg
], ofs
);
273 /* Set a CPU register. The source must be a temporary and will be
275 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
278 /* In Thumb mode, we must ignore bit 0.
279 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
280 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
281 * We choose to ignore [1:0] in ARM mode for all architecture versions.
283 tcg_gen_andi_i32(var
, var
, s
->thumb
? ~1 : ~3);
284 s
->base
.is_jmp
= DISAS_JUMP
;
286 tcg_gen_mov_i32(cpu_R
[reg
], var
);
287 tcg_temp_free_i32(var
);
291 * Variant of store_reg which applies v8M stack-limit checks before updating
292 * SP. If the check fails this will result in an exception being taken.
293 * We disable the stack checks for CONFIG_USER_ONLY because we have
294 * no idea what the stack limits should be in that case.
295 * If stack checking is not being done this just acts like store_reg().
297 static void store_sp_checked(DisasContext
*s
, TCGv_i32 var
)
299 #ifndef CONFIG_USER_ONLY
300 if (s
->v8m_stackcheck
) {
301 gen_helper_v8m_stackcheck(cpu_env
, var
);
304 store_reg(s
, 13, var
);
307 /* Value extensions. */
308 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
309 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
310 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
311 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
313 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
314 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
317 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
319 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
320 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
321 tcg_temp_free_i32(tmp_mask
);
323 /* Set NZCV flags from the high 4 bits of var. */
324 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
326 static void gen_exception_internal(int excp
)
328 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
330 assert(excp_is_internal(excp
));
331 gen_helper_exception_internal(cpu_env
, tcg_excp
);
332 tcg_temp_free_i32(tcg_excp
);
335 static void gen_step_complete_exception(DisasContext
*s
)
337 /* We just completed step of an insn. Move from Active-not-pending
338 * to Active-pending, and then also take the swstep exception.
339 * This corresponds to making the (IMPDEF) choice to prioritize
340 * swstep exceptions over asynchronous exceptions taken to an exception
341 * level where debug is disabled. This choice has the advantage that
342 * we do not need to maintain internal state corresponding to the
343 * ISV/EX syndrome bits between completion of the step and generation
344 * of the exception, and our syndrome information is always correct.
347 gen_swstep_exception(s
, 1, s
->is_ldex
);
348 s
->base
.is_jmp
= DISAS_NORETURN
;
351 static void gen_singlestep_exception(DisasContext
*s
)
353 /* Generate the right kind of exception for singlestep, which is
354 * either the architectural singlestep or EXCP_DEBUG for QEMU's
355 * gdb singlestepping.
358 gen_step_complete_exception(s
);
360 gen_exception_internal(EXCP_DEBUG
);
364 static inline bool is_singlestepping(DisasContext
*s
)
366 /* Return true if we are singlestepping either because of
367 * architectural singlestep or QEMU gdbstub singlestep. This does
368 * not include the command line '-singlestep' mode which is rather
369 * misnamed as it only means "one instruction per TB" and doesn't
370 * affect the code we generate.
372 return s
->base
.singlestep_enabled
|| s
->ss_active
;
375 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
377 TCGv_i32 tmp1
= tcg_temp_new_i32();
378 TCGv_i32 tmp2
= tcg_temp_new_i32();
379 tcg_gen_ext16s_i32(tmp1
, a
);
380 tcg_gen_ext16s_i32(tmp2
, b
);
381 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
382 tcg_temp_free_i32(tmp2
);
383 tcg_gen_sari_i32(a
, a
, 16);
384 tcg_gen_sari_i32(b
, b
, 16);
385 tcg_gen_mul_i32(b
, b
, a
);
386 tcg_gen_mov_i32(a
, tmp1
);
387 tcg_temp_free_i32(tmp1
);
390 /* Byteswap each halfword. */
391 static void gen_rev16(TCGv_i32 dest
, TCGv_i32 var
)
393 TCGv_i32 tmp
= tcg_temp_new_i32();
394 TCGv_i32 mask
= tcg_const_i32(0x00ff00ff);
395 tcg_gen_shri_i32(tmp
, var
, 8);
396 tcg_gen_and_i32(tmp
, tmp
, mask
);
397 tcg_gen_and_i32(var
, var
, mask
);
398 tcg_gen_shli_i32(var
, var
, 8);
399 tcg_gen_or_i32(dest
, var
, tmp
);
400 tcg_temp_free_i32(mask
);
401 tcg_temp_free_i32(tmp
);
404 /* Byteswap low halfword and sign extend. */
405 static void gen_revsh(TCGv_i32 dest
, TCGv_i32 var
)
407 tcg_gen_ext16u_i32(var
, var
);
408 tcg_gen_bswap16_i32(var
, var
);
409 tcg_gen_ext16s_i32(dest
, var
);
412 /* Swap low and high halfwords. */
413 static void gen_swap_half(TCGv_i32 dest
, TCGv_i32 var
)
415 tcg_gen_rotri_i32(dest
, var
, 16);
418 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
419 tmp = (t0 ^ t1) & 0x8000;
422 t0 = (t0 + t1) ^ tmp;
425 static void gen_add16(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
427 TCGv_i32 tmp
= tcg_temp_new_i32();
428 tcg_gen_xor_i32(tmp
, t0
, t1
);
429 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
430 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
431 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
432 tcg_gen_add_i32(t0
, t0
, t1
);
433 tcg_gen_xor_i32(dest
, t0
, tmp
);
434 tcg_temp_free_i32(tmp
);
437 /* Set N and Z flags from var. */
438 static inline void gen_logic_CC(TCGv_i32 var
)
440 tcg_gen_mov_i32(cpu_NF
, var
);
441 tcg_gen_mov_i32(cpu_ZF
, var
);
444 /* dest = T0 + T1 + CF. */
445 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
447 tcg_gen_add_i32(dest
, t0
, t1
);
448 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
451 /* dest = T0 - T1 + CF - 1. */
452 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
454 tcg_gen_sub_i32(dest
, t0
, t1
);
455 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
456 tcg_gen_subi_i32(dest
, dest
, 1);
459 /* dest = T0 + T1. Compute C, N, V and Z flags */
460 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
462 TCGv_i32 tmp
= tcg_temp_new_i32();
463 tcg_gen_movi_i32(tmp
, 0);
464 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
465 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
466 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
467 tcg_gen_xor_i32(tmp
, t0
, t1
);
468 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
469 tcg_temp_free_i32(tmp
);
470 tcg_gen_mov_i32(dest
, cpu_NF
);
473 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
474 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
476 TCGv_i32 tmp
= tcg_temp_new_i32();
477 if (TCG_TARGET_HAS_add2_i32
) {
478 tcg_gen_movi_i32(tmp
, 0);
479 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
480 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
482 TCGv_i64 q0
= tcg_temp_new_i64();
483 TCGv_i64 q1
= tcg_temp_new_i64();
484 tcg_gen_extu_i32_i64(q0
, t0
);
485 tcg_gen_extu_i32_i64(q1
, t1
);
486 tcg_gen_add_i64(q0
, q0
, q1
);
487 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
488 tcg_gen_add_i64(q0
, q0
, q1
);
489 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
490 tcg_temp_free_i64(q0
);
491 tcg_temp_free_i64(q1
);
493 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
494 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
495 tcg_gen_xor_i32(tmp
, t0
, t1
);
496 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
497 tcg_temp_free_i32(tmp
);
498 tcg_gen_mov_i32(dest
, cpu_NF
);
501 /* dest = T0 - T1. Compute C, N, V and Z flags */
502 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
505 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
506 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
507 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
508 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
509 tmp
= tcg_temp_new_i32();
510 tcg_gen_xor_i32(tmp
, t0
, t1
);
511 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
512 tcg_temp_free_i32(tmp
);
513 tcg_gen_mov_i32(dest
, cpu_NF
);
516 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
517 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
519 TCGv_i32 tmp
= tcg_temp_new_i32();
520 tcg_gen_not_i32(tmp
, t1
);
521 gen_adc_CC(dest
, t0
, tmp
);
522 tcg_temp_free_i32(tmp
);
525 #define GEN_SHIFT(name) \
526 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
528 TCGv_i32 tmp1, tmp2, tmp3; \
529 tmp1 = tcg_temp_new_i32(); \
530 tcg_gen_andi_i32(tmp1, t1, 0xff); \
531 tmp2 = tcg_const_i32(0); \
532 tmp3 = tcg_const_i32(0x1f); \
533 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
534 tcg_temp_free_i32(tmp3); \
535 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
536 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
537 tcg_temp_free_i32(tmp2); \
538 tcg_temp_free_i32(tmp1); \
544 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
547 tmp1
= tcg_temp_new_i32();
548 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
549 tmp2
= tcg_const_i32(0x1f);
550 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
551 tcg_temp_free_i32(tmp2
);
552 tcg_gen_sar_i32(dest
, t0
, tmp1
);
553 tcg_temp_free_i32(tmp1
);
556 static void shifter_out_im(TCGv_i32 var
, int shift
)
558 tcg_gen_extract_i32(cpu_CF
, var
, shift
, 1);
561 /* Shift by immediate. Includes special handling for shift == 0. */
562 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
563 int shift
, int flags
)
569 shifter_out_im(var
, 32 - shift
);
570 tcg_gen_shli_i32(var
, var
, shift
);
576 tcg_gen_shri_i32(cpu_CF
, var
, 31);
578 tcg_gen_movi_i32(var
, 0);
581 shifter_out_im(var
, shift
- 1);
582 tcg_gen_shri_i32(var
, var
, shift
);
589 shifter_out_im(var
, shift
- 1);
592 tcg_gen_sari_i32(var
, var
, shift
);
594 case 3: /* ROR/RRX */
597 shifter_out_im(var
, shift
- 1);
598 tcg_gen_rotri_i32(var
, var
, shift
); break;
600 TCGv_i32 tmp
= tcg_temp_new_i32();
601 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
603 shifter_out_im(var
, 0);
604 tcg_gen_shri_i32(var
, var
, 1);
605 tcg_gen_or_i32(var
, var
, tmp
);
606 tcg_temp_free_i32(tmp
);
611 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
612 TCGv_i32 shift
, int flags
)
616 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
617 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
618 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
619 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
624 gen_shl(var
, var
, shift
);
627 gen_shr(var
, var
, shift
);
630 gen_sar(var
, var
, shift
);
632 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
633 tcg_gen_rotr_i32(var
, var
, shift
); break;
636 tcg_temp_free_i32(shift
);
640 * Generate a conditional based on ARM condition code cc.
641 * This is common between ARM and Aarch64 targets.
643 void arm_test_cc(DisasCompare
*cmp
, int cc
)
674 case 8: /* hi: C && !Z */
675 case 9: /* ls: !C || Z -> !(C && !Z) */
677 value
= tcg_temp_new_i32();
679 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
680 ZF is non-zero for !Z; so AND the two subexpressions. */
681 tcg_gen_neg_i32(value
, cpu_CF
);
682 tcg_gen_and_i32(value
, value
, cpu_ZF
);
685 case 10: /* ge: N == V -> N ^ V == 0 */
686 case 11: /* lt: N != V -> N ^ V != 0 */
687 /* Since we're only interested in the sign bit, == 0 is >= 0. */
689 value
= tcg_temp_new_i32();
691 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
694 case 12: /* gt: !Z && N == V */
695 case 13: /* le: Z || N != V */
697 value
= tcg_temp_new_i32();
699 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
700 * the sign bit then AND with ZF to yield the result. */
701 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
702 tcg_gen_sari_i32(value
, value
, 31);
703 tcg_gen_andc_i32(value
, cpu_ZF
, value
);
706 case 14: /* always */
707 case 15: /* always */
708 /* Use the ALWAYS condition, which will fold early.
709 * It doesn't matter what we use for the value. */
710 cond
= TCG_COND_ALWAYS
;
715 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
720 cond
= tcg_invert_cond(cond
);
726 cmp
->value_global
= global
;
729 void arm_free_cc(DisasCompare
*cmp
)
731 if (!cmp
->value_global
) {
732 tcg_temp_free_i32(cmp
->value
);
736 void arm_jump_cc(DisasCompare
*cmp
, TCGLabel
*label
)
738 tcg_gen_brcondi_i32(cmp
->cond
, cmp
->value
, 0, label
);
741 void arm_gen_test_cc(int cc
, TCGLabel
*label
)
744 arm_test_cc(&cmp
, cc
);
745 arm_jump_cc(&cmp
, label
);
749 static inline void gen_set_condexec(DisasContext
*s
)
751 if (s
->condexec_mask
) {
752 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
753 TCGv_i32 tmp
= tcg_temp_new_i32();
754 tcg_gen_movi_i32(tmp
, val
);
755 store_cpu_field(tmp
, condexec_bits
);
759 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
761 tcg_gen_movi_i32(cpu_R
[15], val
);
764 /* Set PC and Thumb state from var. var is marked as dead. */
765 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
767 s
->base
.is_jmp
= DISAS_JUMP
;
768 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
769 tcg_gen_andi_i32(var
, var
, 1);
770 store_cpu_field(var
, thumb
);
774 * Set PC and Thumb state from var. var is marked as dead.
775 * For M-profile CPUs, include logic to detect exception-return
776 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
777 * and BX reg, and no others, and happens only for code in Handler mode.
778 * The Security Extension also requires us to check for the FNC_RETURN
779 * which signals a function return from non-secure state; this can happen
780 * in both Handler and Thread mode.
781 * To avoid having to do multiple comparisons in inline generated code,
782 * we make the check we do here loose, so it will match for EXC_RETURN
783 * in Thread mode. For system emulation do_v7m_exception_exit() checks
784 * for these spurious cases and returns without doing anything (giving
785 * the same behaviour as for a branch to a non-magic address).
787 * In linux-user mode it is unclear what the right behaviour for an
788 * attempted FNC_RETURN should be, because in real hardware this will go
789 * directly to Secure code (ie not the Linux kernel) which will then treat
790 * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
791 * attempt behave the way it would on a CPU without the security extension,
792 * which is to say "like a normal branch". That means we can simply treat
793 * all branches as normal with no magic address behaviour.
795 static inline void gen_bx_excret(DisasContext
*s
, TCGv_i32 var
)
797 /* Generate the same code here as for a simple bx, but flag via
798 * s->base.is_jmp that we need to do the rest of the work later.
801 #ifndef CONFIG_USER_ONLY
802 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
) ||
803 (s
->v7m_handler_mode
&& arm_dc_feature(s
, ARM_FEATURE_M
))) {
804 s
->base
.is_jmp
= DISAS_BX_EXCRET
;
809 static inline void gen_bx_excret_final_code(DisasContext
*s
)
811 /* Generate the code to finish possible exception return and end the TB */
812 TCGLabel
*excret_label
= gen_new_label();
815 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
)) {
816 /* Covers FNC_RETURN and EXC_RETURN magic */
817 min_magic
= FNC_RETURN_MIN_MAGIC
;
819 /* EXC_RETURN magic only */
820 min_magic
= EXC_RETURN_MIN_MAGIC
;
823 /* Is the new PC value in the magic range indicating exception return? */
824 tcg_gen_brcondi_i32(TCG_COND_GEU
, cpu_R
[15], min_magic
, excret_label
);
825 /* No: end the TB as we would for a DISAS_JMP */
826 if (is_singlestepping(s
)) {
827 gen_singlestep_exception(s
);
829 tcg_gen_exit_tb(NULL
, 0);
831 gen_set_label(excret_label
);
832 /* Yes: this is an exception return.
833 * At this point in runtime env->regs[15] and env->thumb will hold
834 * the exception-return magic number, which do_v7m_exception_exit()
835 * will read. Nothing else will be able to see those values because
836 * the cpu-exec main loop guarantees that we will always go straight
837 * from raising the exception to the exception-handling code.
839 * gen_ss_advance(s) does nothing on M profile currently but
840 * calling it is conceptually the right thing as we have executed
841 * this instruction (compare SWI, HVC, SMC handling).
844 gen_exception_internal(EXCP_EXCEPTION_EXIT
);
847 static inline void gen_bxns(DisasContext
*s
, int rm
)
849 TCGv_i32 var
= load_reg(s
, rm
);
851 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
852 * we need to sync state before calling it, but:
853 * - we don't need to do gen_set_pc_im() because the bxns helper will
854 * always set the PC itself
855 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
856 * unless it's outside an IT block or the last insn in an IT block,
857 * so we know that condexec == 0 (already set at the top of the TB)
858 * is correct in the non-UNPREDICTABLE cases, and we can choose
859 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
861 gen_helper_v7m_bxns(cpu_env
, var
);
862 tcg_temp_free_i32(var
);
863 s
->base
.is_jmp
= DISAS_EXIT
;
866 static inline void gen_blxns(DisasContext
*s
, int rm
)
868 TCGv_i32 var
= load_reg(s
, rm
);
870 /* We don't need to sync condexec state, for the same reason as bxns.
871 * We do however need to set the PC, because the blxns helper reads it.
872 * The blxns helper may throw an exception.
874 gen_set_pc_im(s
, s
->base
.pc_next
);
875 gen_helper_v7m_blxns(cpu_env
, var
);
876 tcg_temp_free_i32(var
);
877 s
->base
.is_jmp
= DISAS_EXIT
;
880 /* Variant of store_reg which uses branch&exchange logic when storing
881 to r15 in ARM architecture v7 and above. The source must be a temporary
882 and will be marked as dead. */
883 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv_i32 var
)
885 if (reg
== 15 && ENABLE_ARCH_7
) {
888 store_reg(s
, reg
, var
);
892 /* Variant of store_reg which uses branch&exchange logic when storing
893 * to r15 in ARM architecture v5T and above. This is used for storing
894 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
895 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
896 static inline void store_reg_from_load(DisasContext
*s
, int reg
, TCGv_i32 var
)
898 if (reg
== 15 && ENABLE_ARCH_5
) {
899 gen_bx_excret(s
, var
);
901 store_reg(s
, reg
, var
);
905 #ifdef CONFIG_USER_ONLY
906 #define IS_USER_ONLY 1
908 #define IS_USER_ONLY 0
911 /* Abstractions of "generate code to do a guest load/store for
912 * AArch32", where a vaddr is always 32 bits (and is zero
913 * extended if we're a 64 bit core) and data is also
914 * 32 bits unless specifically doing a 64 bit access.
915 * These functions work like tcg_gen_qemu_{ld,st}* except
916 * that the address argument is TCGv_i32 rather than TCGv.
919 static inline TCGv
gen_aa32_addr(DisasContext
*s
, TCGv_i32 a32
, MemOp op
)
921 TCGv addr
= tcg_temp_new();
922 tcg_gen_extu_i32_tl(addr
, a32
);
924 /* Not needed for user-mode BE32, where we use MO_BE instead. */
925 if (!IS_USER_ONLY
&& s
->sctlr_b
&& (op
& MO_SIZE
) < MO_32
) {
926 tcg_gen_xori_tl(addr
, addr
, 4 - (1 << (op
& MO_SIZE
)));
931 static void gen_aa32_ld_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
932 int index
, MemOp opc
)
936 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
937 !arm_dc_feature(s
, ARM_FEATURE_M_MAIN
)) {
941 addr
= gen_aa32_addr(s
, a32
, opc
);
942 tcg_gen_qemu_ld_i32(val
, addr
, index
, opc
);
946 static void gen_aa32_st_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
947 int index
, MemOp opc
)
951 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
952 !arm_dc_feature(s
, ARM_FEATURE_M_MAIN
)) {
956 addr
= gen_aa32_addr(s
, a32
, opc
);
957 tcg_gen_qemu_st_i32(val
, addr
, index
, opc
);
961 #define DO_GEN_LD(SUFF, OPC) \
962 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
963 TCGv_i32 a32, int index) \
965 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
968 #define DO_GEN_ST(SUFF, OPC) \
969 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
970 TCGv_i32 a32, int index) \
972 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
975 static inline void gen_aa32_frob64(DisasContext
*s
, TCGv_i64 val
)
977 /* Not needed for user-mode BE32, where we use MO_BE instead. */
978 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
979 tcg_gen_rotri_i64(val
, val
, 32);
983 static void gen_aa32_ld_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
984 int index
, MemOp opc
)
986 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
987 tcg_gen_qemu_ld_i64(val
, addr
, index
, opc
);
988 gen_aa32_frob64(s
, val
);
992 static inline void gen_aa32_ld64(DisasContext
*s
, TCGv_i64 val
,
993 TCGv_i32 a32
, int index
)
995 gen_aa32_ld_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
998 static void gen_aa32_st_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
999 int index
, MemOp opc
)
1001 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1003 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1004 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1005 TCGv_i64 tmp
= tcg_temp_new_i64();
1006 tcg_gen_rotri_i64(tmp
, val
, 32);
1007 tcg_gen_qemu_st_i64(tmp
, addr
, index
, opc
);
1008 tcg_temp_free_i64(tmp
);
1010 tcg_gen_qemu_st_i64(val
, addr
, index
, opc
);
1012 tcg_temp_free(addr
);
1015 static inline void gen_aa32_st64(DisasContext
*s
, TCGv_i64 val
,
1016 TCGv_i32 a32
, int index
)
1018 gen_aa32_st_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1021 DO_GEN_LD(8u, MO_UB
)
1022 DO_GEN_LD(16u, MO_UW
)
1023 DO_GEN_LD(32u, MO_UL
)
1025 DO_GEN_ST(16, MO_UW
)
1026 DO_GEN_ST(32, MO_UL
)
1028 static inline void gen_hvc(DisasContext
*s
, int imm16
)
1030 /* The pre HVC helper handles cases when HVC gets trapped
1031 * as an undefined insn by runtime configuration (ie before
1032 * the insn really executes).
1034 gen_set_pc_im(s
, s
->pc_curr
);
1035 gen_helper_pre_hvc(cpu_env
);
1036 /* Otherwise we will treat this as a real exception which
1037 * happens after execution of the insn. (The distinction matters
1038 * for the PC value reported to the exception handler and also
1039 * for single stepping.)
1042 gen_set_pc_im(s
, s
->base
.pc_next
);
1043 s
->base
.is_jmp
= DISAS_HVC
;
1046 static inline void gen_smc(DisasContext
*s
)
1048 /* As with HVC, we may take an exception either before or after
1049 * the insn executes.
1053 gen_set_pc_im(s
, s
->pc_curr
);
1054 tmp
= tcg_const_i32(syn_aa32_smc());
1055 gen_helper_pre_smc(cpu_env
, tmp
);
1056 tcg_temp_free_i32(tmp
);
1057 gen_set_pc_im(s
, s
->base
.pc_next
);
1058 s
->base
.is_jmp
= DISAS_SMC
;
1061 static void gen_exception_internal_insn(DisasContext
*s
, uint32_t pc
, int excp
)
1063 gen_set_condexec(s
);
1064 gen_set_pc_im(s
, pc
);
1065 gen_exception_internal(excp
);
1066 s
->base
.is_jmp
= DISAS_NORETURN
;
1069 static void gen_exception_insn(DisasContext
*s
, uint32_t pc
, int excp
,
1070 int syn
, uint32_t target_el
)
1072 gen_set_condexec(s
);
1073 gen_set_pc_im(s
, pc
);
1074 gen_exception(excp
, syn
, target_el
);
1075 s
->base
.is_jmp
= DISAS_NORETURN
;
1078 static void gen_exception_bkpt_insn(DisasContext
*s
, uint32_t syn
)
1082 gen_set_condexec(s
);
1083 gen_set_pc_im(s
, s
->pc_curr
);
1084 tcg_syn
= tcg_const_i32(syn
);
1085 gen_helper_exception_bkpt_insn(cpu_env
, tcg_syn
);
1086 tcg_temp_free_i32(tcg_syn
);
1087 s
->base
.is_jmp
= DISAS_NORETURN
;
1090 static void unallocated_encoding(DisasContext
*s
)
1092 /* Unallocated and reserved encodings are uncategorized */
1093 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
, syn_uncategorized(),
1094 default_exception_el(s
));
1097 static void gen_exception_el(DisasContext
*s
, int excp
, uint32_t syn
,
1103 gen_set_condexec(s
);
1104 gen_set_pc_im(s
, s
->pc_curr
);
1105 tcg_excp
= tcg_const_i32(excp
);
1106 tcg_syn
= tcg_const_i32(syn
);
1107 gen_helper_exception_with_syndrome(cpu_env
, tcg_excp
, tcg_syn
, tcg_el
);
1108 tcg_temp_free_i32(tcg_syn
);
1109 tcg_temp_free_i32(tcg_excp
);
1110 s
->base
.is_jmp
= DISAS_NORETURN
;
1113 /* Force a TB lookup after an instruction that changes the CPU state. */
1114 static inline void gen_lookup_tb(DisasContext
*s
)
1116 tcg_gen_movi_i32(cpu_R
[15], s
->base
.pc_next
);
1117 s
->base
.is_jmp
= DISAS_EXIT
;
1120 static inline void gen_hlt(DisasContext
*s
, int imm
)
1122 /* HLT. This has two purposes.
1123 * Architecturally, it is an external halting debug instruction.
1124 * Since QEMU doesn't implement external debug, we treat this as
1125 * it is required for halting debug disabled: it will UNDEF.
1126 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1127 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1128 * must trigger semihosting even for ARMv7 and earlier, where
1129 * HLT was an undefined encoding.
1130 * In system mode, we don't allow userspace access to
1131 * semihosting, to provide some semblance of security
1132 * (and for consistency with our 32-bit semihosting).
1134 if (semihosting_enabled() &&
1135 #ifndef CONFIG_USER_ONLY
1136 s
->current_el
!= 0 &&
1138 (imm
== (s
->thumb
? 0x3c : 0xf000))) {
1139 gen_exception_internal_insn(s
, s
->pc_curr
, EXCP_SEMIHOST
);
1143 unallocated_encoding(s
);
1147 * Return the offset of a "full" NEON Dreg.
1149 static long neon_full_reg_offset(unsigned reg
)
1151 return offsetof(CPUARMState
, vfp
.zregs
[reg
>> 1].d
[reg
& 1]);
1155 * Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1156 * where 0 is the least significant end of the register.
1158 static long neon_element_offset(int reg
, int element
, MemOp memop
)
1160 int element_size
= 1 << (memop
& MO_SIZE
);
1161 int ofs
= element
* element_size
;
1162 #ifdef HOST_WORDS_BIGENDIAN
1164 * Calculate the offset assuming fully little-endian,
1165 * then XOR to account for the order of the 8-byte units.
1167 if (element_size
< 8) {
1168 ofs
^= 8 - element_size
;
1171 return neon_full_reg_offset(reg
) + ofs
;
1174 /* Return the offset of a VFP Dreg (dp = true) or VFP Sreg (dp = false). */
1175 static long vfp_reg_offset(bool dp
, unsigned reg
)
1178 return neon_element_offset(reg
, 0, MO_64
);
1180 return neon_element_offset(reg
>> 1, reg
& 1, MO_32
);
1184 static inline void vfp_load_reg64(TCGv_i64 var
, int reg
)
1186 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(true, reg
));
1189 static inline void vfp_store_reg64(TCGv_i64 var
, int reg
)
1191 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(true, reg
));
1194 static inline void vfp_load_reg32(TCGv_i32 var
, int reg
)
1196 tcg_gen_ld_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
1199 static inline void vfp_store_reg32(TCGv_i32 var
, int reg
)
1201 tcg_gen_st_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
1204 static void read_neon_element32(TCGv_i32 dest
, int reg
, int ele
, MemOp memop
)
1206 long off
= neon_element_offset(reg
, ele
, memop
);
1210 tcg_gen_ld8s_i32(dest
, cpu_env
, off
);
1213 tcg_gen_ld8u_i32(dest
, cpu_env
, off
);
1216 tcg_gen_ld16s_i32(dest
, cpu_env
, off
);
1219 tcg_gen_ld16u_i32(dest
, cpu_env
, off
);
1223 tcg_gen_ld_i32(dest
, cpu_env
, off
);
1226 g_assert_not_reached();
1230 static void read_neon_element64(TCGv_i64 dest
, int reg
, int ele
, MemOp memop
)
1232 long off
= neon_element_offset(reg
, ele
, memop
);
1236 tcg_gen_ld32s_i64(dest
, cpu_env
, off
);
1239 tcg_gen_ld32u_i64(dest
, cpu_env
, off
);
1242 tcg_gen_ld_i64(dest
, cpu_env
, off
);
1245 g_assert_not_reached();
1249 static void write_neon_element32(TCGv_i32 src
, int reg
, int ele
, MemOp memop
)
1251 long off
= neon_element_offset(reg
, ele
, memop
);
1255 tcg_gen_st8_i32(src
, cpu_env
, off
);
1258 tcg_gen_st16_i32(src
, cpu_env
, off
);
1261 tcg_gen_st_i32(src
, cpu_env
, off
);
1264 g_assert_not_reached();
1268 static void write_neon_element64(TCGv_i64 src
, int reg
, int ele
, MemOp memop
)
1270 long off
= neon_element_offset(reg
, ele
, memop
);
1274 tcg_gen_st32_i64(src
, cpu_env
, off
);
1277 tcg_gen_st_i64(src
, cpu_env
, off
);
1280 g_assert_not_reached();
1284 static TCGv_ptr
vfp_reg_ptr(bool dp
, int reg
)
1286 TCGv_ptr ret
= tcg_temp_new_ptr();
1287 tcg_gen_addi_ptr(ret
, cpu_env
, vfp_reg_offset(dp
, reg
));
1291 #define ARM_CP_RW_BIT (1 << 20)
1293 /* Include the VFP and Neon decoders */
1294 #include "decode-m-nocp.c.inc"
1295 #include "translate-vfp.c.inc"
1296 #include "translate-neon.c.inc"
1298 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1300 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1303 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1305 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1308 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1310 TCGv_i32 var
= tcg_temp_new_i32();
1311 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1315 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1317 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1318 tcg_temp_free_i32(var
);
1321 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1323 iwmmxt_store_reg(cpu_M0
, rn
);
1326 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1328 iwmmxt_load_reg(cpu_M0
, rn
);
1331 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1333 iwmmxt_load_reg(cpu_V1
, rn
);
1334 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1337 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1339 iwmmxt_load_reg(cpu_V1
, rn
);
1340 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1343 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1345 iwmmxt_load_reg(cpu_V1
, rn
);
1346 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1349 #define IWMMXT_OP(name) \
1350 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1352 iwmmxt_load_reg(cpu_V1, rn); \
1353 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1356 #define IWMMXT_OP_ENV(name) \
1357 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1359 iwmmxt_load_reg(cpu_V1, rn); \
1360 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1363 #define IWMMXT_OP_ENV_SIZE(name) \
1364 IWMMXT_OP_ENV(name##b) \
1365 IWMMXT_OP_ENV(name##w) \
1366 IWMMXT_OP_ENV(name##l)
1368 #define IWMMXT_OP_ENV1(name) \
1369 static inline void gen_op_iwmmxt_##name##_M0(void) \
1371 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1385 IWMMXT_OP_ENV_SIZE(unpackl
)
1386 IWMMXT_OP_ENV_SIZE(unpackh
)
1388 IWMMXT_OP_ENV1(unpacklub
)
1389 IWMMXT_OP_ENV1(unpackluw
)
1390 IWMMXT_OP_ENV1(unpacklul
)
1391 IWMMXT_OP_ENV1(unpackhub
)
1392 IWMMXT_OP_ENV1(unpackhuw
)
1393 IWMMXT_OP_ENV1(unpackhul
)
1394 IWMMXT_OP_ENV1(unpacklsb
)
1395 IWMMXT_OP_ENV1(unpacklsw
)
1396 IWMMXT_OP_ENV1(unpacklsl
)
1397 IWMMXT_OP_ENV1(unpackhsb
)
1398 IWMMXT_OP_ENV1(unpackhsw
)
1399 IWMMXT_OP_ENV1(unpackhsl
)
1401 IWMMXT_OP_ENV_SIZE(cmpeq
)
1402 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1403 IWMMXT_OP_ENV_SIZE(cmpgts
)
1405 IWMMXT_OP_ENV_SIZE(mins
)
1406 IWMMXT_OP_ENV_SIZE(minu
)
1407 IWMMXT_OP_ENV_SIZE(maxs
)
1408 IWMMXT_OP_ENV_SIZE(maxu
)
1410 IWMMXT_OP_ENV_SIZE(subn
)
1411 IWMMXT_OP_ENV_SIZE(addn
)
1412 IWMMXT_OP_ENV_SIZE(subu
)
1413 IWMMXT_OP_ENV_SIZE(addu
)
1414 IWMMXT_OP_ENV_SIZE(subs
)
1415 IWMMXT_OP_ENV_SIZE(adds
)
1417 IWMMXT_OP_ENV(avgb0
)
1418 IWMMXT_OP_ENV(avgb1
)
1419 IWMMXT_OP_ENV(avgw0
)
1420 IWMMXT_OP_ENV(avgw1
)
1422 IWMMXT_OP_ENV(packuw
)
1423 IWMMXT_OP_ENV(packul
)
1424 IWMMXT_OP_ENV(packuq
)
1425 IWMMXT_OP_ENV(packsw
)
1426 IWMMXT_OP_ENV(packsl
)
1427 IWMMXT_OP_ENV(packsq
)
1429 static void gen_op_iwmmxt_set_mup(void)
1432 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1433 tcg_gen_ori_i32(tmp
, tmp
, 2);
1434 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1437 static void gen_op_iwmmxt_set_cup(void)
1440 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1441 tcg_gen_ori_i32(tmp
, tmp
, 1);
1442 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1445 static void gen_op_iwmmxt_setpsr_nz(void)
1447 TCGv_i32 tmp
= tcg_temp_new_i32();
1448 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1449 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1452 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1454 iwmmxt_load_reg(cpu_V1
, rn
);
1455 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1456 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1459 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1466 rd
= (insn
>> 16) & 0xf;
1467 tmp
= load_reg(s
, rd
);
1469 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1470 if (insn
& (1 << 24)) {
1472 if (insn
& (1 << 23))
1473 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1475 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1476 tcg_gen_mov_i32(dest
, tmp
);
1477 if (insn
& (1 << 21))
1478 store_reg(s
, rd
, tmp
);
1480 tcg_temp_free_i32(tmp
);
1481 } else if (insn
& (1 << 21)) {
1483 tcg_gen_mov_i32(dest
, tmp
);
1484 if (insn
& (1 << 23))
1485 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1487 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1488 store_reg(s
, rd
, tmp
);
1489 } else if (!(insn
& (1 << 23)))
1494 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1496 int rd
= (insn
>> 0) & 0xf;
1499 if (insn
& (1 << 8)) {
1500 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1503 tmp
= iwmmxt_load_creg(rd
);
1506 tmp
= tcg_temp_new_i32();
1507 iwmmxt_load_reg(cpu_V0
, rd
);
1508 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
1510 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1511 tcg_gen_mov_i32(dest
, tmp
);
1512 tcg_temp_free_i32(tmp
);
1516 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1517 (ie. an undefined instruction). */
1518 static int disas_iwmmxt_insn(DisasContext
*s
, uint32_t insn
)
1521 int rdhi
, rdlo
, rd0
, rd1
, i
;
1523 TCGv_i32 tmp
, tmp2
, tmp3
;
1525 if ((insn
& 0x0e000e00) == 0x0c000000) {
1526 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1528 rdlo
= (insn
>> 12) & 0xf;
1529 rdhi
= (insn
>> 16) & 0xf;
1530 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1531 iwmmxt_load_reg(cpu_V0
, wrd
);
1532 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1533 tcg_gen_extrh_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1534 } else { /* TMCRR */
1535 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1536 iwmmxt_store_reg(cpu_V0
, wrd
);
1537 gen_op_iwmmxt_set_mup();
1542 wrd
= (insn
>> 12) & 0xf;
1543 addr
= tcg_temp_new_i32();
1544 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1545 tcg_temp_free_i32(addr
);
1548 if (insn
& ARM_CP_RW_BIT
) {
1549 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1550 tmp
= tcg_temp_new_i32();
1551 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1552 iwmmxt_store_creg(wrd
, tmp
);
1555 if (insn
& (1 << 8)) {
1556 if (insn
& (1 << 22)) { /* WLDRD */
1557 gen_aa32_ld64(s
, cpu_M0
, addr
, get_mem_index(s
));
1559 } else { /* WLDRW wRd */
1560 tmp
= tcg_temp_new_i32();
1561 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1564 tmp
= tcg_temp_new_i32();
1565 if (insn
& (1 << 22)) { /* WLDRH */
1566 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
1567 } else { /* WLDRB */
1568 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
1572 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1573 tcg_temp_free_i32(tmp
);
1575 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1578 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1579 tmp
= iwmmxt_load_creg(wrd
);
1580 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1582 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1583 tmp
= tcg_temp_new_i32();
1584 if (insn
& (1 << 8)) {
1585 if (insn
& (1 << 22)) { /* WSTRD */
1586 gen_aa32_st64(s
, cpu_M0
, addr
, get_mem_index(s
));
1587 } else { /* WSTRW wRd */
1588 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1589 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1592 if (insn
& (1 << 22)) { /* WSTRH */
1593 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1594 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
1595 } else { /* WSTRB */
1596 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1597 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
1601 tcg_temp_free_i32(tmp
);
1603 tcg_temp_free_i32(addr
);
1607 if ((insn
& 0x0f000000) != 0x0e000000)
1610 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1611 case 0x000: /* WOR */
1612 wrd
= (insn
>> 12) & 0xf;
1613 rd0
= (insn
>> 0) & 0xf;
1614 rd1
= (insn
>> 16) & 0xf;
1615 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1616 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1617 gen_op_iwmmxt_setpsr_nz();
1618 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1619 gen_op_iwmmxt_set_mup();
1620 gen_op_iwmmxt_set_cup();
1622 case 0x011: /* TMCR */
1625 rd
= (insn
>> 12) & 0xf;
1626 wrd
= (insn
>> 16) & 0xf;
1628 case ARM_IWMMXT_wCID
:
1629 case ARM_IWMMXT_wCASF
:
1631 case ARM_IWMMXT_wCon
:
1632 gen_op_iwmmxt_set_cup();
1634 case ARM_IWMMXT_wCSSF
:
1635 tmp
= iwmmxt_load_creg(wrd
);
1636 tmp2
= load_reg(s
, rd
);
1637 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1638 tcg_temp_free_i32(tmp2
);
1639 iwmmxt_store_creg(wrd
, tmp
);
1641 case ARM_IWMMXT_wCGR0
:
1642 case ARM_IWMMXT_wCGR1
:
1643 case ARM_IWMMXT_wCGR2
:
1644 case ARM_IWMMXT_wCGR3
:
1645 gen_op_iwmmxt_set_cup();
1646 tmp
= load_reg(s
, rd
);
1647 iwmmxt_store_creg(wrd
, tmp
);
1653 case 0x100: /* WXOR */
1654 wrd
= (insn
>> 12) & 0xf;
1655 rd0
= (insn
>> 0) & 0xf;
1656 rd1
= (insn
>> 16) & 0xf;
1657 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1658 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1659 gen_op_iwmmxt_setpsr_nz();
1660 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1661 gen_op_iwmmxt_set_mup();
1662 gen_op_iwmmxt_set_cup();
1664 case 0x111: /* TMRC */
1667 rd
= (insn
>> 12) & 0xf;
1668 wrd
= (insn
>> 16) & 0xf;
1669 tmp
= iwmmxt_load_creg(wrd
);
1670 store_reg(s
, rd
, tmp
);
1672 case 0x300: /* WANDN */
1673 wrd
= (insn
>> 12) & 0xf;
1674 rd0
= (insn
>> 0) & 0xf;
1675 rd1
= (insn
>> 16) & 0xf;
1676 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1677 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1678 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1679 gen_op_iwmmxt_setpsr_nz();
1680 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1681 gen_op_iwmmxt_set_mup();
1682 gen_op_iwmmxt_set_cup();
1684 case 0x200: /* WAND */
1685 wrd
= (insn
>> 12) & 0xf;
1686 rd0
= (insn
>> 0) & 0xf;
1687 rd1
= (insn
>> 16) & 0xf;
1688 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1689 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1690 gen_op_iwmmxt_setpsr_nz();
1691 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1692 gen_op_iwmmxt_set_mup();
1693 gen_op_iwmmxt_set_cup();
1695 case 0x810: case 0xa10: /* WMADD */
1696 wrd
= (insn
>> 12) & 0xf;
1697 rd0
= (insn
>> 0) & 0xf;
1698 rd1
= (insn
>> 16) & 0xf;
1699 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1700 if (insn
& (1 << 21))
1701 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1703 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1704 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1705 gen_op_iwmmxt_set_mup();
1707 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1708 wrd
= (insn
>> 12) & 0xf;
1709 rd0
= (insn
>> 16) & 0xf;
1710 rd1
= (insn
>> 0) & 0xf;
1711 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1712 switch ((insn
>> 22) & 3) {
1714 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1717 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1720 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1725 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1726 gen_op_iwmmxt_set_mup();
1727 gen_op_iwmmxt_set_cup();
1729 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1730 wrd
= (insn
>> 12) & 0xf;
1731 rd0
= (insn
>> 16) & 0xf;
1732 rd1
= (insn
>> 0) & 0xf;
1733 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1734 switch ((insn
>> 22) & 3) {
1736 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1739 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1742 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1747 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1748 gen_op_iwmmxt_set_mup();
1749 gen_op_iwmmxt_set_cup();
1751 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1752 wrd
= (insn
>> 12) & 0xf;
1753 rd0
= (insn
>> 16) & 0xf;
1754 rd1
= (insn
>> 0) & 0xf;
1755 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1756 if (insn
& (1 << 22))
1757 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1759 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1760 if (!(insn
& (1 << 20)))
1761 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1762 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1763 gen_op_iwmmxt_set_mup();
1765 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1766 wrd
= (insn
>> 12) & 0xf;
1767 rd0
= (insn
>> 16) & 0xf;
1768 rd1
= (insn
>> 0) & 0xf;
1769 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1770 if (insn
& (1 << 21)) {
1771 if (insn
& (1 << 20))
1772 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1774 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1776 if (insn
& (1 << 20))
1777 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1779 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1781 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1782 gen_op_iwmmxt_set_mup();
1784 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1785 wrd
= (insn
>> 12) & 0xf;
1786 rd0
= (insn
>> 16) & 0xf;
1787 rd1
= (insn
>> 0) & 0xf;
1788 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1789 if (insn
& (1 << 21))
1790 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1792 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1793 if (!(insn
& (1 << 20))) {
1794 iwmmxt_load_reg(cpu_V1
, wrd
);
1795 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1797 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1798 gen_op_iwmmxt_set_mup();
1800 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1801 wrd
= (insn
>> 12) & 0xf;
1802 rd0
= (insn
>> 16) & 0xf;
1803 rd1
= (insn
>> 0) & 0xf;
1804 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1805 switch ((insn
>> 22) & 3) {
1807 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1810 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1813 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1818 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1819 gen_op_iwmmxt_set_mup();
1820 gen_op_iwmmxt_set_cup();
1822 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1823 wrd
= (insn
>> 12) & 0xf;
1824 rd0
= (insn
>> 16) & 0xf;
1825 rd1
= (insn
>> 0) & 0xf;
1826 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1827 if (insn
& (1 << 22)) {
1828 if (insn
& (1 << 20))
1829 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1831 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1833 if (insn
& (1 << 20))
1834 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1836 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1838 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1839 gen_op_iwmmxt_set_mup();
1840 gen_op_iwmmxt_set_cup();
1842 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1843 wrd
= (insn
>> 12) & 0xf;
1844 rd0
= (insn
>> 16) & 0xf;
1845 rd1
= (insn
>> 0) & 0xf;
1846 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1847 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1848 tcg_gen_andi_i32(tmp
, tmp
, 7);
1849 iwmmxt_load_reg(cpu_V1
, rd1
);
1850 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1851 tcg_temp_free_i32(tmp
);
1852 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1853 gen_op_iwmmxt_set_mup();
1855 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1856 if (((insn
>> 6) & 3) == 3)
1858 rd
= (insn
>> 12) & 0xf;
1859 wrd
= (insn
>> 16) & 0xf;
1860 tmp
= load_reg(s
, rd
);
1861 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1862 switch ((insn
>> 6) & 3) {
1864 tmp2
= tcg_const_i32(0xff);
1865 tmp3
= tcg_const_i32((insn
& 7) << 3);
1868 tmp2
= tcg_const_i32(0xffff);
1869 tmp3
= tcg_const_i32((insn
& 3) << 4);
1872 tmp2
= tcg_const_i32(0xffffffff);
1873 tmp3
= tcg_const_i32((insn
& 1) << 5);
1879 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1880 tcg_temp_free_i32(tmp3
);
1881 tcg_temp_free_i32(tmp2
);
1882 tcg_temp_free_i32(tmp
);
1883 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1884 gen_op_iwmmxt_set_mup();
1886 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1887 rd
= (insn
>> 12) & 0xf;
1888 wrd
= (insn
>> 16) & 0xf;
1889 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1891 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1892 tmp
= tcg_temp_new_i32();
1893 switch ((insn
>> 22) & 3) {
1895 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1896 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1898 tcg_gen_ext8s_i32(tmp
, tmp
);
1900 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1904 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1905 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1907 tcg_gen_ext16s_i32(tmp
, tmp
);
1909 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1913 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1914 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1917 store_reg(s
, rd
, tmp
);
1919 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1920 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1922 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1923 switch ((insn
>> 22) & 3) {
1925 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1928 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1931 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1934 tcg_gen_shli_i32(tmp
, tmp
, 28);
1936 tcg_temp_free_i32(tmp
);
1938 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1939 if (((insn
>> 6) & 3) == 3)
1941 rd
= (insn
>> 12) & 0xf;
1942 wrd
= (insn
>> 16) & 0xf;
1943 tmp
= load_reg(s
, rd
);
1944 switch ((insn
>> 6) & 3) {
1946 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1949 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1952 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1955 tcg_temp_free_i32(tmp
);
1956 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1957 gen_op_iwmmxt_set_mup();
1959 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1960 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1962 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1963 tmp2
= tcg_temp_new_i32();
1964 tcg_gen_mov_i32(tmp2
, tmp
);
1965 switch ((insn
>> 22) & 3) {
1967 for (i
= 0; i
< 7; i
++) {
1968 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1969 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1973 for (i
= 0; i
< 3; i
++) {
1974 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1975 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1979 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
1980 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1984 tcg_temp_free_i32(tmp2
);
1985 tcg_temp_free_i32(tmp
);
1987 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1988 wrd
= (insn
>> 12) & 0xf;
1989 rd0
= (insn
>> 16) & 0xf;
1990 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1991 switch ((insn
>> 22) & 3) {
1993 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
1996 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
1999 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
2004 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2005 gen_op_iwmmxt_set_mup();
2007 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2008 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2010 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2011 tmp2
= tcg_temp_new_i32();
2012 tcg_gen_mov_i32(tmp2
, tmp
);
2013 switch ((insn
>> 22) & 3) {
2015 for (i
= 0; i
< 7; i
++) {
2016 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2017 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2021 for (i
= 0; i
< 3; i
++) {
2022 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2023 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2027 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2028 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2032 tcg_temp_free_i32(tmp2
);
2033 tcg_temp_free_i32(tmp
);
2035 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2036 rd
= (insn
>> 12) & 0xf;
2037 rd0
= (insn
>> 16) & 0xf;
2038 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
2040 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2041 tmp
= tcg_temp_new_i32();
2042 switch ((insn
>> 22) & 3) {
2044 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
2047 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
2050 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
2053 store_reg(s
, rd
, tmp
);
2055 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2056 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2057 wrd
= (insn
>> 12) & 0xf;
2058 rd0
= (insn
>> 16) & 0xf;
2059 rd1
= (insn
>> 0) & 0xf;
2060 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2061 switch ((insn
>> 22) & 3) {
2063 if (insn
& (1 << 21))
2064 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2066 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2069 if (insn
& (1 << 21))
2070 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2072 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2075 if (insn
& (1 << 21))
2076 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2078 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2083 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2084 gen_op_iwmmxt_set_mup();
2085 gen_op_iwmmxt_set_cup();
2087 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2088 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2089 wrd
= (insn
>> 12) & 0xf;
2090 rd0
= (insn
>> 16) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2092 switch ((insn
>> 22) & 3) {
2094 if (insn
& (1 << 21))
2095 gen_op_iwmmxt_unpacklsb_M0();
2097 gen_op_iwmmxt_unpacklub_M0();
2100 if (insn
& (1 << 21))
2101 gen_op_iwmmxt_unpacklsw_M0();
2103 gen_op_iwmmxt_unpackluw_M0();
2106 if (insn
& (1 << 21))
2107 gen_op_iwmmxt_unpacklsl_M0();
2109 gen_op_iwmmxt_unpacklul_M0();
2114 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2115 gen_op_iwmmxt_set_mup();
2116 gen_op_iwmmxt_set_cup();
2118 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2119 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2120 wrd
= (insn
>> 12) & 0xf;
2121 rd0
= (insn
>> 16) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2123 switch ((insn
>> 22) & 3) {
2125 if (insn
& (1 << 21))
2126 gen_op_iwmmxt_unpackhsb_M0();
2128 gen_op_iwmmxt_unpackhub_M0();
2131 if (insn
& (1 << 21))
2132 gen_op_iwmmxt_unpackhsw_M0();
2134 gen_op_iwmmxt_unpackhuw_M0();
2137 if (insn
& (1 << 21))
2138 gen_op_iwmmxt_unpackhsl_M0();
2140 gen_op_iwmmxt_unpackhul_M0();
2145 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2146 gen_op_iwmmxt_set_mup();
2147 gen_op_iwmmxt_set_cup();
2149 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2150 case 0x214: case 0x614: case 0xa14: case 0xe14:
2151 if (((insn
>> 22) & 3) == 0)
2153 wrd
= (insn
>> 12) & 0xf;
2154 rd0
= (insn
>> 16) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2156 tmp
= tcg_temp_new_i32();
2157 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2158 tcg_temp_free_i32(tmp
);
2161 switch ((insn
>> 22) & 3) {
2163 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2166 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2169 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2172 tcg_temp_free_i32(tmp
);
2173 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2174 gen_op_iwmmxt_set_mup();
2175 gen_op_iwmmxt_set_cup();
2177 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2178 case 0x014: case 0x414: case 0x814: case 0xc14:
2179 if (((insn
>> 22) & 3) == 0)
2181 wrd
= (insn
>> 12) & 0xf;
2182 rd0
= (insn
>> 16) & 0xf;
2183 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2184 tmp
= tcg_temp_new_i32();
2185 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2186 tcg_temp_free_i32(tmp
);
2189 switch ((insn
>> 22) & 3) {
2191 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2194 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2197 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2200 tcg_temp_free_i32(tmp
);
2201 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2202 gen_op_iwmmxt_set_mup();
2203 gen_op_iwmmxt_set_cup();
2205 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2206 case 0x114: case 0x514: case 0x914: case 0xd14:
2207 if (((insn
>> 22) & 3) == 0)
2209 wrd
= (insn
>> 12) & 0xf;
2210 rd0
= (insn
>> 16) & 0xf;
2211 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2212 tmp
= tcg_temp_new_i32();
2213 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2214 tcg_temp_free_i32(tmp
);
2217 switch ((insn
>> 22) & 3) {
2219 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2222 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2225 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2228 tcg_temp_free_i32(tmp
);
2229 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2230 gen_op_iwmmxt_set_mup();
2231 gen_op_iwmmxt_set_cup();
2233 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2234 case 0x314: case 0x714: case 0xb14: case 0xf14:
2235 if (((insn
>> 22) & 3) == 0)
2237 wrd
= (insn
>> 12) & 0xf;
2238 rd0
= (insn
>> 16) & 0xf;
2239 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2240 tmp
= tcg_temp_new_i32();
2241 switch ((insn
>> 22) & 3) {
2243 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2244 tcg_temp_free_i32(tmp
);
2247 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2250 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2251 tcg_temp_free_i32(tmp
);
2254 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2257 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2258 tcg_temp_free_i32(tmp
);
2261 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2264 tcg_temp_free_i32(tmp
);
2265 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2266 gen_op_iwmmxt_set_mup();
2267 gen_op_iwmmxt_set_cup();
2269 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2270 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2271 wrd
= (insn
>> 12) & 0xf;
2272 rd0
= (insn
>> 16) & 0xf;
2273 rd1
= (insn
>> 0) & 0xf;
2274 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2275 switch ((insn
>> 22) & 3) {
2277 if (insn
& (1 << 21))
2278 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2280 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2283 if (insn
& (1 << 21))
2284 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2286 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2289 if (insn
& (1 << 21))
2290 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2292 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2297 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2298 gen_op_iwmmxt_set_mup();
2300 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2301 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2302 wrd
= (insn
>> 12) & 0xf;
2303 rd0
= (insn
>> 16) & 0xf;
2304 rd1
= (insn
>> 0) & 0xf;
2305 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2306 switch ((insn
>> 22) & 3) {
2308 if (insn
& (1 << 21))
2309 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2311 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2314 if (insn
& (1 << 21))
2315 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2317 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2320 if (insn
& (1 << 21))
2321 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2323 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2328 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2329 gen_op_iwmmxt_set_mup();
2331 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2332 case 0x402: case 0x502: case 0x602: case 0x702:
2333 wrd
= (insn
>> 12) & 0xf;
2334 rd0
= (insn
>> 16) & 0xf;
2335 rd1
= (insn
>> 0) & 0xf;
2336 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2337 tmp
= tcg_const_i32((insn
>> 20) & 3);
2338 iwmmxt_load_reg(cpu_V1
, rd1
);
2339 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2340 tcg_temp_free_i32(tmp
);
2341 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2342 gen_op_iwmmxt_set_mup();
2344 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2345 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2346 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2347 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2348 wrd
= (insn
>> 12) & 0xf;
2349 rd0
= (insn
>> 16) & 0xf;
2350 rd1
= (insn
>> 0) & 0xf;
2351 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2352 switch ((insn
>> 20) & 0xf) {
2354 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2357 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2360 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2363 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2366 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2369 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2372 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2375 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2378 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2383 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2384 gen_op_iwmmxt_set_mup();
2385 gen_op_iwmmxt_set_cup();
2387 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2388 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2389 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2390 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2391 wrd
= (insn
>> 12) & 0xf;
2392 rd0
= (insn
>> 16) & 0xf;
2393 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2394 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2395 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2396 tcg_temp_free_i32(tmp
);
2397 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2398 gen_op_iwmmxt_set_mup();
2399 gen_op_iwmmxt_set_cup();
2401 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2402 case 0x418: case 0x518: case 0x618: case 0x718:
2403 case 0x818: case 0x918: case 0xa18: case 0xb18:
2404 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2405 wrd
= (insn
>> 12) & 0xf;
2406 rd0
= (insn
>> 16) & 0xf;
2407 rd1
= (insn
>> 0) & 0xf;
2408 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2409 switch ((insn
>> 20) & 0xf) {
2411 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2414 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2417 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2420 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2423 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2426 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2429 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2432 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2435 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2440 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2441 gen_op_iwmmxt_set_mup();
2442 gen_op_iwmmxt_set_cup();
2444 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2445 case 0x408: case 0x508: case 0x608: case 0x708:
2446 case 0x808: case 0x908: case 0xa08: case 0xb08:
2447 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2448 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2450 wrd
= (insn
>> 12) & 0xf;
2451 rd0
= (insn
>> 16) & 0xf;
2452 rd1
= (insn
>> 0) & 0xf;
2453 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2454 switch ((insn
>> 22) & 3) {
2456 if (insn
& (1 << 21))
2457 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2459 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2462 if (insn
& (1 << 21))
2463 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2465 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2468 if (insn
& (1 << 21))
2469 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2471 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2474 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2475 gen_op_iwmmxt_set_mup();
2476 gen_op_iwmmxt_set_cup();
2478 case 0x201: case 0x203: case 0x205: case 0x207:
2479 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2480 case 0x211: case 0x213: case 0x215: case 0x217:
2481 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2482 wrd
= (insn
>> 5) & 0xf;
2483 rd0
= (insn
>> 12) & 0xf;
2484 rd1
= (insn
>> 0) & 0xf;
2485 if (rd0
== 0xf || rd1
== 0xf)
2487 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2488 tmp
= load_reg(s
, rd0
);
2489 tmp2
= load_reg(s
, rd1
);
2490 switch ((insn
>> 16) & 0xf) {
2491 case 0x0: /* TMIA */
2492 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2494 case 0x8: /* TMIAPH */
2495 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2497 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2498 if (insn
& (1 << 16))
2499 tcg_gen_shri_i32(tmp
, tmp
, 16);
2500 if (insn
& (1 << 17))
2501 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2502 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2505 tcg_temp_free_i32(tmp2
);
2506 tcg_temp_free_i32(tmp
);
2509 tcg_temp_free_i32(tmp2
);
2510 tcg_temp_free_i32(tmp
);
2511 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2512 gen_op_iwmmxt_set_mup();
2521 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2522 (ie. an undefined instruction). */
2523 static int disas_dsp_insn(DisasContext
*s
, uint32_t insn
)
2525 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2528 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2529 /* Multiply with Internal Accumulate Format */
2530 rd0
= (insn
>> 12) & 0xf;
2532 acc
= (insn
>> 5) & 7;
2537 tmp
= load_reg(s
, rd0
);
2538 tmp2
= load_reg(s
, rd1
);
2539 switch ((insn
>> 16) & 0xf) {
2541 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2543 case 0x8: /* MIAPH */
2544 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2546 case 0xc: /* MIABB */
2547 case 0xd: /* MIABT */
2548 case 0xe: /* MIATB */
2549 case 0xf: /* MIATT */
2550 if (insn
& (1 << 16))
2551 tcg_gen_shri_i32(tmp
, tmp
, 16);
2552 if (insn
& (1 << 17))
2553 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2554 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2559 tcg_temp_free_i32(tmp2
);
2560 tcg_temp_free_i32(tmp
);
2562 gen_op_iwmmxt_movq_wRn_M0(acc
);
2566 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2567 /* Internal Accumulator Access Format */
2568 rdhi
= (insn
>> 16) & 0xf;
2569 rdlo
= (insn
>> 12) & 0xf;
2575 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2576 iwmmxt_load_reg(cpu_V0
, acc
);
2577 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2578 tcg_gen_extrh_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2579 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2581 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2582 iwmmxt_store_reg(cpu_V0
, acc
);
2590 static inline bool use_goto_tb(DisasContext
*s
, target_ulong dest
)
2592 #ifndef CONFIG_USER_ONLY
2593 return (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
2594 ((s
->base
.pc_next
- 1) & TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
2600 static void gen_goto_ptr(void)
2602 tcg_gen_lookup_and_goto_ptr();
2605 /* This will end the TB but doesn't guarantee we'll return to
2606 * cpu_loop_exec. Any live exit_requests will be processed as we
2607 * enter the next TB.
2609 static void gen_goto_tb(DisasContext
*s
, int n
, target_ulong dest
)
2611 if (use_goto_tb(s
, dest
)) {
2613 gen_set_pc_im(s
, dest
);
2614 tcg_gen_exit_tb(s
->base
.tb
, n
);
2616 gen_set_pc_im(s
, dest
);
2619 s
->base
.is_jmp
= DISAS_NORETURN
;
2622 /* Jump, specifying which TB number to use if we gen_goto_tb() */
2623 static inline void gen_jmp_tb(DisasContext
*s
, uint32_t dest
, int tbno
)
2625 if (unlikely(is_singlestepping(s
))) {
2626 /* An indirect jump so that we still trigger the debug exception. */
2627 gen_set_pc_im(s
, dest
);
2628 s
->base
.is_jmp
= DISAS_JUMP
;
2630 gen_goto_tb(s
, tbno
, dest
);
2634 static inline void gen_jmp(DisasContext
*s
, uint32_t dest
)
2636 gen_jmp_tb(s
, dest
, 0);
2639 static inline void gen_mulxy(TCGv_i32 t0
, TCGv_i32 t1
, int x
, int y
)
2642 tcg_gen_sari_i32(t0
, t0
, 16);
2646 tcg_gen_sari_i32(t1
, t1
, 16);
2649 tcg_gen_mul_i32(t0
, t0
, t1
);
2652 /* Return the mask of PSR bits set by a MSR instruction. */
2653 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
)
2657 if (flags
& (1 << 0)) {
2660 if (flags
& (1 << 1)) {
2663 if (flags
& (1 << 2)) {
2666 if (flags
& (1 << 3)) {
2670 /* Mask out undefined and reserved bits. */
2671 mask
&= aarch32_cpsr_valid_mask(s
->features
, s
->isar
);
2673 /* Mask out execution state. */
2678 /* Mask out privileged bits. */
2685 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
2686 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv_i32 t0
)
2690 /* ??? This is also undefined in system mode. */
2694 tmp
= load_cpu_field(spsr
);
2695 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
2696 tcg_gen_andi_i32(t0
, t0
, mask
);
2697 tcg_gen_or_i32(tmp
, tmp
, t0
);
2698 store_cpu_field(tmp
, spsr
);
2700 gen_set_cpsr(t0
, mask
);
2702 tcg_temp_free_i32(t0
);
2707 /* Returns nonzero if access to the PSR is not permitted. */
2708 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
2711 tmp
= tcg_temp_new_i32();
2712 tcg_gen_movi_i32(tmp
, val
);
2713 return gen_set_psr(s
, mask
, spsr
, tmp
);
2716 static bool msr_banked_access_decode(DisasContext
*s
, int r
, int sysm
, int rn
,
2717 int *tgtmode
, int *regno
)
2719 /* Decode the r and sysm fields of MSR/MRS banked accesses into
2720 * the target mode and register number, and identify the various
2721 * unpredictable cases.
2722 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
2723 * + executed in user mode
2724 * + using R15 as the src/dest register
2725 * + accessing an unimplemented register
2726 * + accessing a register that's inaccessible at current PL/security state*
2727 * + accessing a register that you could access with a different insn
2728 * We choose to UNDEF in all these cases.
2729 * Since we don't know which of the various AArch32 modes we are in
2730 * we have to defer some checks to runtime.
2731 * Accesses to Monitor mode registers from Secure EL1 (which implies
2732 * that EL3 is AArch64) must trap to EL3.
2734 * If the access checks fail this function will emit code to take
2735 * an exception and return false. Otherwise it will return true,
2736 * and set *tgtmode and *regno appropriately.
2738 int exc_target
= default_exception_el(s
);
2740 /* These instructions are present only in ARMv8, or in ARMv7 with the
2741 * Virtualization Extensions.
2743 if (!arm_dc_feature(s
, ARM_FEATURE_V8
) &&
2744 !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
2748 if (IS_USER(s
) || rn
== 15) {
2752 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
2753 * of registers into (r, sysm).
2756 /* SPSRs for other modes */
2758 case 0xe: /* SPSR_fiq */
2759 *tgtmode
= ARM_CPU_MODE_FIQ
;
2761 case 0x10: /* SPSR_irq */
2762 *tgtmode
= ARM_CPU_MODE_IRQ
;
2764 case 0x12: /* SPSR_svc */
2765 *tgtmode
= ARM_CPU_MODE_SVC
;
2767 case 0x14: /* SPSR_abt */
2768 *tgtmode
= ARM_CPU_MODE_ABT
;
2770 case 0x16: /* SPSR_und */
2771 *tgtmode
= ARM_CPU_MODE_UND
;
2773 case 0x1c: /* SPSR_mon */
2774 *tgtmode
= ARM_CPU_MODE_MON
;
2776 case 0x1e: /* SPSR_hyp */
2777 *tgtmode
= ARM_CPU_MODE_HYP
;
2779 default: /* unallocated */
2782 /* We arbitrarily assign SPSR a register number of 16. */
2785 /* general purpose registers for other modes */
2787 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
2788 *tgtmode
= ARM_CPU_MODE_USR
;
2791 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
2792 *tgtmode
= ARM_CPU_MODE_FIQ
;
2795 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
2796 *tgtmode
= ARM_CPU_MODE_IRQ
;
2797 *regno
= sysm
& 1 ? 13 : 14;
2799 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
2800 *tgtmode
= ARM_CPU_MODE_SVC
;
2801 *regno
= sysm
& 1 ? 13 : 14;
2803 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
2804 *tgtmode
= ARM_CPU_MODE_ABT
;
2805 *regno
= sysm
& 1 ? 13 : 14;
2807 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
2808 *tgtmode
= ARM_CPU_MODE_UND
;
2809 *regno
= sysm
& 1 ? 13 : 14;
2811 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
2812 *tgtmode
= ARM_CPU_MODE_MON
;
2813 *regno
= sysm
& 1 ? 13 : 14;
2815 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
2816 *tgtmode
= ARM_CPU_MODE_HYP
;
2817 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
2818 *regno
= sysm
& 1 ? 13 : 17;
2820 default: /* unallocated */
2825 /* Catch the 'accessing inaccessible register' cases we can detect
2826 * at translate time.
2829 case ARM_CPU_MODE_MON
:
2830 if (!arm_dc_feature(s
, ARM_FEATURE_EL3
) || s
->ns
) {
2833 if (s
->current_el
== 1) {
2834 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
2835 * then accesses to Mon registers trap to EL3
2837 TCGv_i32 tcg_el
= tcg_const_i32(3);
2839 gen_exception_el(s
, EXCP_UDEF
, syn_uncategorized(), tcg_el
);
2840 tcg_temp_free_i32(tcg_el
);
2844 case ARM_CPU_MODE_HYP
:
2846 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
2847 * (and so we can forbid accesses from EL2 or below). elr_hyp
2848 * can be accessed also from Hyp mode, so forbid accesses from
2851 if (!arm_dc_feature(s
, ARM_FEATURE_EL2
) || s
->current_el
< 2 ||
2852 (s
->current_el
< 3 && *regno
!= 17)) {
2863 /* If we get here then some access check did not pass */
2864 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
2865 syn_uncategorized(), exc_target
);
2869 static void gen_msr_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
2871 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
2872 int tgtmode
= 0, regno
= 0;
2874 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
2878 /* Sync state because msr_banked() can raise exceptions */
2879 gen_set_condexec(s
);
2880 gen_set_pc_im(s
, s
->pc_curr
);
2881 tcg_reg
= load_reg(s
, rn
);
2882 tcg_tgtmode
= tcg_const_i32(tgtmode
);
2883 tcg_regno
= tcg_const_i32(regno
);
2884 gen_helper_msr_banked(cpu_env
, tcg_reg
, tcg_tgtmode
, tcg_regno
);
2885 tcg_temp_free_i32(tcg_tgtmode
);
2886 tcg_temp_free_i32(tcg_regno
);
2887 tcg_temp_free_i32(tcg_reg
);
2888 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
2891 static void gen_mrs_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
2893 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
2894 int tgtmode
= 0, regno
= 0;
2896 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
2900 /* Sync state because mrs_banked() can raise exceptions */
2901 gen_set_condexec(s
);
2902 gen_set_pc_im(s
, s
->pc_curr
);
2903 tcg_reg
= tcg_temp_new_i32();
2904 tcg_tgtmode
= tcg_const_i32(tgtmode
);
2905 tcg_regno
= tcg_const_i32(regno
);
2906 gen_helper_mrs_banked(tcg_reg
, cpu_env
, tcg_tgtmode
, tcg_regno
);
2907 tcg_temp_free_i32(tcg_tgtmode
);
2908 tcg_temp_free_i32(tcg_regno
);
2909 store_reg(s
, rn
, tcg_reg
);
2910 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
2913 /* Store value to PC as for an exception return (ie don't
2914 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
2915 * will do the masking based on the new value of the Thumb bit.
2917 static void store_pc_exc_ret(DisasContext
*s
, TCGv_i32 pc
)
2919 tcg_gen_mov_i32(cpu_R
[15], pc
);
2920 tcg_temp_free_i32(pc
);
2923 /* Generate a v6 exception return. Marks both values as dead. */
2924 static void gen_rfe(DisasContext
*s
, TCGv_i32 pc
, TCGv_i32 cpsr
)
2926 store_pc_exc_ret(s
, pc
);
2927 /* The cpsr_write_eret helper will mask the low bits of PC
2928 * appropriately depending on the new Thumb bit, so it must
2929 * be called after storing the new PC.
2931 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
2934 gen_helper_cpsr_write_eret(cpu_env
, cpsr
);
2935 tcg_temp_free_i32(cpsr
);
2936 /* Must exit loop to check un-masked IRQs */
2937 s
->base
.is_jmp
= DISAS_EXIT
;
2940 /* Generate an old-style exception return. Marks pc as dead. */
2941 static void gen_exception_return(DisasContext
*s
, TCGv_i32 pc
)
2943 gen_rfe(s
, pc
, load_cpu_field(spsr
));
2946 static void gen_gvec_fn3_qc(uint32_t rd_ofs
, uint32_t rn_ofs
, uint32_t rm_ofs
,
2947 uint32_t opr_sz
, uint32_t max_sz
,
2948 gen_helper_gvec_3_ptr
*fn
)
2950 TCGv_ptr qc_ptr
= tcg_temp_new_ptr();
2952 tcg_gen_addi_ptr(qc_ptr
, cpu_env
, offsetof(CPUARMState
, vfp
.qc
));
2953 tcg_gen_gvec_3_ptr(rd_ofs
, rn_ofs
, rm_ofs
, qc_ptr
,
2954 opr_sz
, max_sz
, 0, fn
);
2955 tcg_temp_free_ptr(qc_ptr
);
2958 void gen_gvec_sqrdmlah_qc(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
2959 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
2961 static gen_helper_gvec_3_ptr
* const fns
[2] = {
2962 gen_helper_gvec_qrdmlah_s16
, gen_helper_gvec_qrdmlah_s32
2964 tcg_debug_assert(vece
>= 1 && vece
<= 2);
2965 gen_gvec_fn3_qc(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, fns
[vece
- 1]);
2968 void gen_gvec_sqrdmlsh_qc(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
2969 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
2971 static gen_helper_gvec_3_ptr
* const fns
[2] = {
2972 gen_helper_gvec_qrdmlsh_s16
, gen_helper_gvec_qrdmlsh_s32
2974 tcg_debug_assert(vece
>= 1 && vece
<= 2);
2975 gen_gvec_fn3_qc(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, fns
[vece
- 1]);
2978 #define GEN_CMP0(NAME, COND) \
2979 static void gen_##NAME##0_i32(TCGv_i32 d, TCGv_i32 a) \
2981 tcg_gen_setcondi_i32(COND, d, a, 0); \
2982 tcg_gen_neg_i32(d, d); \
2984 static void gen_##NAME##0_i64(TCGv_i64 d, TCGv_i64 a) \
2986 tcg_gen_setcondi_i64(COND, d, a, 0); \
2987 tcg_gen_neg_i64(d, d); \
2989 static void gen_##NAME##0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) \
2991 TCGv_vec zero = tcg_const_zeros_vec_matching(d); \
2992 tcg_gen_cmp_vec(COND, vece, d, a, zero); \
2993 tcg_temp_free_vec(zero); \
2995 void gen_gvec_##NAME##0(unsigned vece, uint32_t d, uint32_t m, \
2996 uint32_t opr_sz, uint32_t max_sz) \
2998 const GVecGen2 op[4] = { \
2999 { .fno = gen_helper_gvec_##NAME##0_b, \
3000 .fniv = gen_##NAME##0_vec, \
3001 .opt_opc = vecop_list_cmp, \
3003 { .fno = gen_helper_gvec_##NAME##0_h, \
3004 .fniv = gen_##NAME##0_vec, \
3005 .opt_opc = vecop_list_cmp, \
3007 { .fni4 = gen_##NAME##0_i32, \
3008 .fniv = gen_##NAME##0_vec, \
3009 .opt_opc = vecop_list_cmp, \
3011 { .fni8 = gen_##NAME##0_i64, \
3012 .fniv = gen_##NAME##0_vec, \
3013 .opt_opc = vecop_list_cmp, \
3014 .prefer_i64 = TCG_TARGET_REG_BITS == 64, \
3017 tcg_gen_gvec_2(d, m, opr_sz, max_sz, &op[vece]); \
3020 static const TCGOpcode vecop_list_cmp
[] = {
3024 GEN_CMP0(ceq
, TCG_COND_EQ
)
3025 GEN_CMP0(cle
, TCG_COND_LE
)
3026 GEN_CMP0(cge
, TCG_COND_GE
)
3027 GEN_CMP0(clt
, TCG_COND_LT
)
3028 GEN_CMP0(cgt
, TCG_COND_GT
)
3032 static void gen_ssra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3034 tcg_gen_vec_sar8i_i64(a
, a
, shift
);
3035 tcg_gen_vec_add8_i64(d
, d
, a
);
3038 static void gen_ssra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3040 tcg_gen_vec_sar16i_i64(a
, a
, shift
);
3041 tcg_gen_vec_add16_i64(d
, d
, a
);
3044 static void gen_ssra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
3046 tcg_gen_sari_i32(a
, a
, shift
);
3047 tcg_gen_add_i32(d
, d
, a
);
3050 static void gen_ssra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3052 tcg_gen_sari_i64(a
, a
, shift
);
3053 tcg_gen_add_i64(d
, d
, a
);
3056 static void gen_ssra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
3058 tcg_gen_sari_vec(vece
, a
, a
, sh
);
3059 tcg_gen_add_vec(vece
, d
, d
, a
);
3062 void gen_gvec_ssra(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
3063 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
)
3065 static const TCGOpcode vecop_list
[] = {
3066 INDEX_op_sari_vec
, INDEX_op_add_vec
, 0
3068 static const GVecGen2i ops
[4] = {
3069 { .fni8
= gen_ssra8_i64
,
3070 .fniv
= gen_ssra_vec
,
3071 .fno
= gen_helper_gvec_ssra_b
,
3073 .opt_opc
= vecop_list
,
3075 { .fni8
= gen_ssra16_i64
,
3076 .fniv
= gen_ssra_vec
,
3077 .fno
= gen_helper_gvec_ssra_h
,
3079 .opt_opc
= vecop_list
,
3081 { .fni4
= gen_ssra32_i32
,
3082 .fniv
= gen_ssra_vec
,
3083 .fno
= gen_helper_gvec_ssra_s
,
3085 .opt_opc
= vecop_list
,
3087 { .fni8
= gen_ssra64_i64
,
3088 .fniv
= gen_ssra_vec
,
3089 .fno
= gen_helper_gvec_ssra_b
,
3090 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3091 .opt_opc
= vecop_list
,
3096 /* tszimm encoding produces immediates in the range [1..esize]. */
3097 tcg_debug_assert(shift
> 0);
3098 tcg_debug_assert(shift
<= (8 << vece
));
3101 * Shifts larger than the element size are architecturally valid.
3102 * Signed results in all sign bits.
3104 shift
= MIN(shift
, (8 << vece
) - 1);
3105 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, opr_sz
, max_sz
, shift
, &ops
[vece
]);
3108 static void gen_usra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3110 tcg_gen_vec_shr8i_i64(a
, a
, shift
);
3111 tcg_gen_vec_add8_i64(d
, d
, a
);
3114 static void gen_usra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3116 tcg_gen_vec_shr16i_i64(a
, a
, shift
);
3117 tcg_gen_vec_add16_i64(d
, d
, a
);
3120 static void gen_usra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
3122 tcg_gen_shri_i32(a
, a
, shift
);
3123 tcg_gen_add_i32(d
, d
, a
);
3126 static void gen_usra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3128 tcg_gen_shri_i64(a
, a
, shift
);
3129 tcg_gen_add_i64(d
, d
, a
);
3132 static void gen_usra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
3134 tcg_gen_shri_vec(vece
, a
, a
, sh
);
3135 tcg_gen_add_vec(vece
, d
, d
, a
);
3138 void gen_gvec_usra(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
3139 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
)
3141 static const TCGOpcode vecop_list
[] = {
3142 INDEX_op_shri_vec
, INDEX_op_add_vec
, 0
3144 static const GVecGen2i ops
[4] = {
3145 { .fni8
= gen_usra8_i64
,
3146 .fniv
= gen_usra_vec
,
3147 .fno
= gen_helper_gvec_usra_b
,
3149 .opt_opc
= vecop_list
,
3151 { .fni8
= gen_usra16_i64
,
3152 .fniv
= gen_usra_vec
,
3153 .fno
= gen_helper_gvec_usra_h
,
3155 .opt_opc
= vecop_list
,
3157 { .fni4
= gen_usra32_i32
,
3158 .fniv
= gen_usra_vec
,
3159 .fno
= gen_helper_gvec_usra_s
,
3161 .opt_opc
= vecop_list
,
3163 { .fni8
= gen_usra64_i64
,
3164 .fniv
= gen_usra_vec
,
3165 .fno
= gen_helper_gvec_usra_d
,
3166 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3168 .opt_opc
= vecop_list
,
3172 /* tszimm encoding produces immediates in the range [1..esize]. */
3173 tcg_debug_assert(shift
> 0);
3174 tcg_debug_assert(shift
<= (8 << vece
));
3177 * Shifts larger than the element size are architecturally valid.
3178 * Unsigned results in all zeros as input to accumulate: nop.
3180 if (shift
< (8 << vece
)) {
3181 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, opr_sz
, max_sz
, shift
, &ops
[vece
]);
3183 /* Nop, but we do need to clear the tail. */
3184 tcg_gen_gvec_mov(vece
, rd_ofs
, rd_ofs
, opr_sz
, max_sz
);
3189 * Shift one less than the requested amount, and the low bit is
3190 * the rounding bit. For the 8 and 16-bit operations, because we
3191 * mask the low bit, we can perform a normal integer shift instead
3192 * of a vector shift.
3194 static void gen_srshr8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t sh
)
3196 TCGv_i64 t
= tcg_temp_new_i64();
3198 tcg_gen_shri_i64(t
, a
, sh
- 1);
3199 tcg_gen_andi_i64(t
, t
, dup_const(MO_8
, 1));
3200 tcg_gen_vec_sar8i_i64(d
, a
, sh
);
3201 tcg_gen_vec_add8_i64(d
, d
, t
);
3202 tcg_temp_free_i64(t
);
3205 static void gen_srshr16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t sh
)
3207 TCGv_i64 t
= tcg_temp_new_i64();
3209 tcg_gen_shri_i64(t
, a
, sh
- 1);
3210 tcg_gen_andi_i64(t
, t
, dup_const(MO_16
, 1));
3211 tcg_gen_vec_sar16i_i64(d
, a
, sh
);
3212 tcg_gen_vec_add16_i64(d
, d
, t
);
3213 tcg_temp_free_i64(t
);
3216 static void gen_srshr32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t sh
)
3218 TCGv_i32 t
= tcg_temp_new_i32();
3220 tcg_gen_extract_i32(t
, a
, sh
- 1, 1);
3221 tcg_gen_sari_i32(d
, a
, sh
);
3222 tcg_gen_add_i32(d
, d
, t
);
3223 tcg_temp_free_i32(t
);
3226 static void gen_srshr64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t sh
)
3228 TCGv_i64 t
= tcg_temp_new_i64();
3230 tcg_gen_extract_i64(t
, a
, sh
- 1, 1);
3231 tcg_gen_sari_i64(d
, a
, sh
);
3232 tcg_gen_add_i64(d
, d
, t
);
3233 tcg_temp_free_i64(t
);
3236 static void gen_srshr_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
3238 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
3239 TCGv_vec ones
= tcg_temp_new_vec_matching(d
);
3241 tcg_gen_shri_vec(vece
, t
, a
, sh
- 1);
3242 tcg_gen_dupi_vec(vece
, ones
, 1);
3243 tcg_gen_and_vec(vece
, t
, t
, ones
);
3244 tcg_gen_sari_vec(vece
, d
, a
, sh
);
3245 tcg_gen_add_vec(vece
, d
, d
, t
);
3247 tcg_temp_free_vec(t
);
3248 tcg_temp_free_vec(ones
);
3251 void gen_gvec_srshr(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
3252 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
)
3254 static const TCGOpcode vecop_list
[] = {
3255 INDEX_op_shri_vec
, INDEX_op_sari_vec
, INDEX_op_add_vec
, 0
3257 static const GVecGen2i ops
[4] = {
3258 { .fni8
= gen_srshr8_i64
,
3259 .fniv
= gen_srshr_vec
,
3260 .fno
= gen_helper_gvec_srshr_b
,
3261 .opt_opc
= vecop_list
,
3263 { .fni8
= gen_srshr16_i64
,
3264 .fniv
= gen_srshr_vec
,
3265 .fno
= gen_helper_gvec_srshr_h
,
3266 .opt_opc
= vecop_list
,
3268 { .fni4
= gen_srshr32_i32
,
3269 .fniv
= gen_srshr_vec
,
3270 .fno
= gen_helper_gvec_srshr_s
,
3271 .opt_opc
= vecop_list
,
3273 { .fni8
= gen_srshr64_i64
,
3274 .fniv
= gen_srshr_vec
,
3275 .fno
= gen_helper_gvec_srshr_d
,
3276 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3277 .opt_opc
= vecop_list
,
3281 /* tszimm encoding produces immediates in the range [1..esize] */
3282 tcg_debug_assert(shift
> 0);
3283 tcg_debug_assert(shift
<= (8 << vece
));
3285 if (shift
== (8 << vece
)) {
3287 * Shifts larger than the element size are architecturally valid.
3288 * Signed results in all sign bits. With rounding, this produces
3289 * (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
3292 tcg_gen_gvec_dup_imm(vece
, rd_ofs
, opr_sz
, max_sz
, 0);
3294 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, opr_sz
, max_sz
, shift
, &ops
[vece
]);
3298 static void gen_srsra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t sh
)
3300 TCGv_i64 t
= tcg_temp_new_i64();
3302 gen_srshr8_i64(t
, a
, sh
);
3303 tcg_gen_vec_add8_i64(d
, d
, t
);
3304 tcg_temp_free_i64(t
);
3307 static void gen_srsra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t sh
)
3309 TCGv_i64 t
= tcg_temp_new_i64();
3311 gen_srshr16_i64(t
, a
, sh
);
3312 tcg_gen_vec_add16_i64(d
, d
, t
);
3313 tcg_temp_free_i64(t
);
3316 static void gen_srsra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t sh
)
3318 TCGv_i32 t
= tcg_temp_new_i32();
3320 gen_srshr32_i32(t
, a
, sh
);
3321 tcg_gen_add_i32(d
, d
, t
);
3322 tcg_temp_free_i32(t
);
3325 static void gen_srsra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t sh
)
3327 TCGv_i64 t
= tcg_temp_new_i64();
3329 gen_srshr64_i64(t
, a
, sh
);
3330 tcg_gen_add_i64(d
, d
, t
);
3331 tcg_temp_free_i64(t
);
3334 static void gen_srsra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
3336 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
3338 gen_srshr_vec(vece
, t
, a
, sh
);
3339 tcg_gen_add_vec(vece
, d
, d
, t
);
3340 tcg_temp_free_vec(t
);
3343 void gen_gvec_srsra(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
3344 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
)
3346 static const TCGOpcode vecop_list
[] = {
3347 INDEX_op_shri_vec
, INDEX_op_sari_vec
, INDEX_op_add_vec
, 0
3349 static const GVecGen2i ops
[4] = {
3350 { .fni8
= gen_srsra8_i64
,
3351 .fniv
= gen_srsra_vec
,
3352 .fno
= gen_helper_gvec_srsra_b
,
3353 .opt_opc
= vecop_list
,
3356 { .fni8
= gen_srsra16_i64
,
3357 .fniv
= gen_srsra_vec
,
3358 .fno
= gen_helper_gvec_srsra_h
,
3359 .opt_opc
= vecop_list
,
3362 { .fni4
= gen_srsra32_i32
,
3363 .fniv
= gen_srsra_vec
,
3364 .fno
= gen_helper_gvec_srsra_s
,
3365 .opt_opc
= vecop_list
,
3368 { .fni8
= gen_srsra64_i64
,
3369 .fniv
= gen_srsra_vec
,
3370 .fno
= gen_helper_gvec_srsra_d
,
3371 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3372 .opt_opc
= vecop_list
,
3377 /* tszimm encoding produces immediates in the range [1..esize] */
3378 tcg_debug_assert(shift
> 0);
3379 tcg_debug_assert(shift
<= (8 << vece
));
3382 * Shifts larger than the element size are architecturally valid.
3383 * Signed results in all sign bits. With rounding, this produces
3384 * (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
3385 * I.e. always zero. With accumulation, this leaves D unchanged.
3387 if (shift
== (8 << vece
)) {
3388 /* Nop, but we do need to clear the tail. */
3389 tcg_gen_gvec_mov(vece
, rd_ofs
, rd_ofs
, opr_sz
, max_sz
);
3391 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, opr_sz
, max_sz
, shift
, &ops
[vece
]);
3395 static void gen_urshr8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t sh
)
3397 TCGv_i64 t
= tcg_temp_new_i64();
3399 tcg_gen_shri_i64(t
, a
, sh
- 1);
3400 tcg_gen_andi_i64(t
, t
, dup_const(MO_8
, 1));
3401 tcg_gen_vec_shr8i_i64(d
, a
, sh
);
3402 tcg_gen_vec_add8_i64(d
, d
, t
);
3403 tcg_temp_free_i64(t
);
3406 static void gen_urshr16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t sh
)
3408 TCGv_i64 t
= tcg_temp_new_i64();
3410 tcg_gen_shri_i64(t
, a
, sh
- 1);
3411 tcg_gen_andi_i64(t
, t
, dup_const(MO_16
, 1));
3412 tcg_gen_vec_shr16i_i64(d
, a
, sh
);
3413 tcg_gen_vec_add16_i64(d
, d
, t
);
3414 tcg_temp_free_i64(t
);
3417 static void gen_urshr32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t sh
)
3419 TCGv_i32 t
= tcg_temp_new_i32();
3421 tcg_gen_extract_i32(t
, a
, sh
- 1, 1);
3422 tcg_gen_shri_i32(d
, a
, sh
);
3423 tcg_gen_add_i32(d
, d
, t
);
3424 tcg_temp_free_i32(t
);
3427 static void gen_urshr64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t sh
)
3429 TCGv_i64 t
= tcg_temp_new_i64();
3431 tcg_gen_extract_i64(t
, a
, sh
- 1, 1);
3432 tcg_gen_shri_i64(d
, a
, sh
);
3433 tcg_gen_add_i64(d
, d
, t
);
3434 tcg_temp_free_i64(t
);
3437 static void gen_urshr_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t shift
)
3439 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
3440 TCGv_vec ones
= tcg_temp_new_vec_matching(d
);
3442 tcg_gen_shri_vec(vece
, t
, a
, shift
- 1);
3443 tcg_gen_dupi_vec(vece
, ones
, 1);
3444 tcg_gen_and_vec(vece
, t
, t
, ones
);
3445 tcg_gen_shri_vec(vece
, d
, a
, shift
);
3446 tcg_gen_add_vec(vece
, d
, d
, t
);
3448 tcg_temp_free_vec(t
);
3449 tcg_temp_free_vec(ones
);
3452 void gen_gvec_urshr(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
3453 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
)
3455 static const TCGOpcode vecop_list
[] = {
3456 INDEX_op_shri_vec
, INDEX_op_add_vec
, 0
3458 static const GVecGen2i ops
[4] = {
3459 { .fni8
= gen_urshr8_i64
,
3460 .fniv
= gen_urshr_vec
,
3461 .fno
= gen_helper_gvec_urshr_b
,
3462 .opt_opc
= vecop_list
,
3464 { .fni8
= gen_urshr16_i64
,
3465 .fniv
= gen_urshr_vec
,
3466 .fno
= gen_helper_gvec_urshr_h
,
3467 .opt_opc
= vecop_list
,
3469 { .fni4
= gen_urshr32_i32
,
3470 .fniv
= gen_urshr_vec
,
3471 .fno
= gen_helper_gvec_urshr_s
,
3472 .opt_opc
= vecop_list
,
3474 { .fni8
= gen_urshr64_i64
,
3475 .fniv
= gen_urshr_vec
,
3476 .fno
= gen_helper_gvec_urshr_d
,
3477 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3478 .opt_opc
= vecop_list
,
3482 /* tszimm encoding produces immediates in the range [1..esize] */
3483 tcg_debug_assert(shift
> 0);
3484 tcg_debug_assert(shift
<= (8 << vece
));
3486 if (shift
== (8 << vece
)) {
3488 * Shifts larger than the element size are architecturally valid.
3489 * Unsigned results in zero. With rounding, this produces a
3490 * copy of the most significant bit.
3492 tcg_gen_gvec_shri(vece
, rd_ofs
, rm_ofs
, shift
- 1, opr_sz
, max_sz
);
3494 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, opr_sz
, max_sz
, shift
, &ops
[vece
]);
3498 static void gen_ursra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t sh
)
3500 TCGv_i64 t
= tcg_temp_new_i64();
3503 tcg_gen_vec_shr8i_i64(t
, a
, 7);
3505 gen_urshr8_i64(t
, a
, sh
);
3507 tcg_gen_vec_add8_i64(d
, d
, t
);
3508 tcg_temp_free_i64(t
);
3511 static void gen_ursra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t sh
)
3513 TCGv_i64 t
= tcg_temp_new_i64();
3516 tcg_gen_vec_shr16i_i64(t
, a
, 15);
3518 gen_urshr16_i64(t
, a
, sh
);
3520 tcg_gen_vec_add16_i64(d
, d
, t
);
3521 tcg_temp_free_i64(t
);
3524 static void gen_ursra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t sh
)
3526 TCGv_i32 t
= tcg_temp_new_i32();
3529 tcg_gen_shri_i32(t
, a
, 31);
3531 gen_urshr32_i32(t
, a
, sh
);
3533 tcg_gen_add_i32(d
, d
, t
);
3534 tcg_temp_free_i32(t
);
3537 static void gen_ursra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t sh
)
3539 TCGv_i64 t
= tcg_temp_new_i64();
3542 tcg_gen_shri_i64(t
, a
, 63);
3544 gen_urshr64_i64(t
, a
, sh
);
3546 tcg_gen_add_i64(d
, d
, t
);
3547 tcg_temp_free_i64(t
);
3550 static void gen_ursra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
3552 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
3554 if (sh
== (8 << vece
)) {
3555 tcg_gen_shri_vec(vece
, t
, a
, sh
- 1);
3557 gen_urshr_vec(vece
, t
, a
, sh
);
3559 tcg_gen_add_vec(vece
, d
, d
, t
);
3560 tcg_temp_free_vec(t
);
3563 void gen_gvec_ursra(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
3564 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
)
3566 static const TCGOpcode vecop_list
[] = {
3567 INDEX_op_shri_vec
, INDEX_op_add_vec
, 0
3569 static const GVecGen2i ops
[4] = {
3570 { .fni8
= gen_ursra8_i64
,
3571 .fniv
= gen_ursra_vec
,
3572 .fno
= gen_helper_gvec_ursra_b
,
3573 .opt_opc
= vecop_list
,
3576 { .fni8
= gen_ursra16_i64
,
3577 .fniv
= gen_ursra_vec
,
3578 .fno
= gen_helper_gvec_ursra_h
,
3579 .opt_opc
= vecop_list
,
3582 { .fni4
= gen_ursra32_i32
,
3583 .fniv
= gen_ursra_vec
,
3584 .fno
= gen_helper_gvec_ursra_s
,
3585 .opt_opc
= vecop_list
,
3588 { .fni8
= gen_ursra64_i64
,
3589 .fniv
= gen_ursra_vec
,
3590 .fno
= gen_helper_gvec_ursra_d
,
3591 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3592 .opt_opc
= vecop_list
,
3597 /* tszimm encoding produces immediates in the range [1..esize] */
3598 tcg_debug_assert(shift
> 0);
3599 tcg_debug_assert(shift
<= (8 << vece
));
3601 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, opr_sz
, max_sz
, shift
, &ops
[vece
]);
3604 static void gen_shr8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3606 uint64_t mask
= dup_const(MO_8
, 0xff >> shift
);
3607 TCGv_i64 t
= tcg_temp_new_i64();
3609 tcg_gen_shri_i64(t
, a
, shift
);
3610 tcg_gen_andi_i64(t
, t
, mask
);
3611 tcg_gen_andi_i64(d
, d
, ~mask
);
3612 tcg_gen_or_i64(d
, d
, t
);
3613 tcg_temp_free_i64(t
);
3616 static void gen_shr16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3618 uint64_t mask
= dup_const(MO_16
, 0xffff >> shift
);
3619 TCGv_i64 t
= tcg_temp_new_i64();
3621 tcg_gen_shri_i64(t
, a
, shift
);
3622 tcg_gen_andi_i64(t
, t
, mask
);
3623 tcg_gen_andi_i64(d
, d
, ~mask
);
3624 tcg_gen_or_i64(d
, d
, t
);
3625 tcg_temp_free_i64(t
);
3628 static void gen_shr32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
3630 tcg_gen_shri_i32(a
, a
, shift
);
3631 tcg_gen_deposit_i32(d
, d
, a
, 0, 32 - shift
);
3634 static void gen_shr64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3636 tcg_gen_shri_i64(a
, a
, shift
);
3637 tcg_gen_deposit_i64(d
, d
, a
, 0, 64 - shift
);
3640 static void gen_shr_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
3642 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
3643 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
3645 tcg_gen_dupi_vec(vece
, m
, MAKE_64BIT_MASK((8 << vece
) - sh
, sh
));
3646 tcg_gen_shri_vec(vece
, t
, a
, sh
);
3647 tcg_gen_and_vec(vece
, d
, d
, m
);
3648 tcg_gen_or_vec(vece
, d
, d
, t
);
3650 tcg_temp_free_vec(t
);
3651 tcg_temp_free_vec(m
);
3654 void gen_gvec_sri(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
3655 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
)
3657 static const TCGOpcode vecop_list
[] = { INDEX_op_shri_vec
, 0 };
3658 const GVecGen2i ops
[4] = {
3659 { .fni8
= gen_shr8_ins_i64
,
3660 .fniv
= gen_shr_ins_vec
,
3661 .fno
= gen_helper_gvec_sri_b
,
3663 .opt_opc
= vecop_list
,
3665 { .fni8
= gen_shr16_ins_i64
,
3666 .fniv
= gen_shr_ins_vec
,
3667 .fno
= gen_helper_gvec_sri_h
,
3669 .opt_opc
= vecop_list
,
3671 { .fni4
= gen_shr32_ins_i32
,
3672 .fniv
= gen_shr_ins_vec
,
3673 .fno
= gen_helper_gvec_sri_s
,
3675 .opt_opc
= vecop_list
,
3677 { .fni8
= gen_shr64_ins_i64
,
3678 .fniv
= gen_shr_ins_vec
,
3679 .fno
= gen_helper_gvec_sri_d
,
3680 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3682 .opt_opc
= vecop_list
,
3686 /* tszimm encoding produces immediates in the range [1..esize]. */
3687 tcg_debug_assert(shift
> 0);
3688 tcg_debug_assert(shift
<= (8 << vece
));
3690 /* Shift of esize leaves destination unchanged. */
3691 if (shift
< (8 << vece
)) {
3692 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, opr_sz
, max_sz
, shift
, &ops
[vece
]);
3694 /* Nop, but we do need to clear the tail. */
3695 tcg_gen_gvec_mov(vece
, rd_ofs
, rd_ofs
, opr_sz
, max_sz
);
3699 static void gen_shl8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3701 uint64_t mask
= dup_const(MO_8
, 0xff << shift
);
3702 TCGv_i64 t
= tcg_temp_new_i64();
3704 tcg_gen_shli_i64(t
, a
, shift
);
3705 tcg_gen_andi_i64(t
, t
, mask
);
3706 tcg_gen_andi_i64(d
, d
, ~mask
);
3707 tcg_gen_or_i64(d
, d
, t
);
3708 tcg_temp_free_i64(t
);
3711 static void gen_shl16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3713 uint64_t mask
= dup_const(MO_16
, 0xffff << shift
);
3714 TCGv_i64 t
= tcg_temp_new_i64();
3716 tcg_gen_shli_i64(t
, a
, shift
);
3717 tcg_gen_andi_i64(t
, t
, mask
);
3718 tcg_gen_andi_i64(d
, d
, ~mask
);
3719 tcg_gen_or_i64(d
, d
, t
);
3720 tcg_temp_free_i64(t
);
3723 static void gen_shl32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
3725 tcg_gen_deposit_i32(d
, d
, a
, shift
, 32 - shift
);
3728 static void gen_shl64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3730 tcg_gen_deposit_i64(d
, d
, a
, shift
, 64 - shift
);
3733 static void gen_shl_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
3735 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
3736 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
3738 tcg_gen_shli_vec(vece
, t
, a
, sh
);
3739 tcg_gen_dupi_vec(vece
, m
, MAKE_64BIT_MASK(0, sh
));
3740 tcg_gen_and_vec(vece
, d
, d
, m
);
3741 tcg_gen_or_vec(vece
, d
, d
, t
);
3743 tcg_temp_free_vec(t
);
3744 tcg_temp_free_vec(m
);
3747 void gen_gvec_sli(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
3748 int64_t shift
, uint32_t opr_sz
, uint32_t max_sz
)
3750 static const TCGOpcode vecop_list
[] = { INDEX_op_shli_vec
, 0 };
3751 const GVecGen2i ops
[4] = {
3752 { .fni8
= gen_shl8_ins_i64
,
3753 .fniv
= gen_shl_ins_vec
,
3754 .fno
= gen_helper_gvec_sli_b
,
3756 .opt_opc
= vecop_list
,
3758 { .fni8
= gen_shl16_ins_i64
,
3759 .fniv
= gen_shl_ins_vec
,
3760 .fno
= gen_helper_gvec_sli_h
,
3762 .opt_opc
= vecop_list
,
3764 { .fni4
= gen_shl32_ins_i32
,
3765 .fniv
= gen_shl_ins_vec
,
3766 .fno
= gen_helper_gvec_sli_s
,
3768 .opt_opc
= vecop_list
,
3770 { .fni8
= gen_shl64_ins_i64
,
3771 .fniv
= gen_shl_ins_vec
,
3772 .fno
= gen_helper_gvec_sli_d
,
3773 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3775 .opt_opc
= vecop_list
,
3779 /* tszimm encoding produces immediates in the range [0..esize-1]. */
3780 tcg_debug_assert(shift
>= 0);
3781 tcg_debug_assert(shift
< (8 << vece
));
3784 tcg_gen_gvec_mov(vece
, rd_ofs
, rm_ofs
, opr_sz
, max_sz
);
3786 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, opr_sz
, max_sz
, shift
, &ops
[vece
]);
3790 static void gen_mla8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
3792 gen_helper_neon_mul_u8(a
, a
, b
);
3793 gen_helper_neon_add_u8(d
, d
, a
);
3796 static void gen_mls8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
3798 gen_helper_neon_mul_u8(a
, a
, b
);
3799 gen_helper_neon_sub_u8(d
, d
, a
);
3802 static void gen_mla16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
3804 gen_helper_neon_mul_u16(a
, a
, b
);
3805 gen_helper_neon_add_u16(d
, d
, a
);
3808 static void gen_mls16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
3810 gen_helper_neon_mul_u16(a
, a
, b
);
3811 gen_helper_neon_sub_u16(d
, d
, a
);
3814 static void gen_mla32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
3816 tcg_gen_mul_i32(a
, a
, b
);
3817 tcg_gen_add_i32(d
, d
, a
);
3820 static void gen_mls32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
3822 tcg_gen_mul_i32(a
, a
, b
);
3823 tcg_gen_sub_i32(d
, d
, a
);
3826 static void gen_mla64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
3828 tcg_gen_mul_i64(a
, a
, b
);
3829 tcg_gen_add_i64(d
, d
, a
);
3832 static void gen_mls64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
3834 tcg_gen_mul_i64(a
, a
, b
);
3835 tcg_gen_sub_i64(d
, d
, a
);
3838 static void gen_mla_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
3840 tcg_gen_mul_vec(vece
, a
, a
, b
);
3841 tcg_gen_add_vec(vece
, d
, d
, a
);
3844 static void gen_mls_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
3846 tcg_gen_mul_vec(vece
, a
, a
, b
);
3847 tcg_gen_sub_vec(vece
, d
, d
, a
);
3850 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
3851 * these tables are shared with AArch64 which does support them.
3853 void gen_gvec_mla(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
3854 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
3856 static const TCGOpcode vecop_list
[] = {
3857 INDEX_op_mul_vec
, INDEX_op_add_vec
, 0
3859 static const GVecGen3 ops
[4] = {
3860 { .fni4
= gen_mla8_i32
,
3861 .fniv
= gen_mla_vec
,
3863 .opt_opc
= vecop_list
,
3865 { .fni4
= gen_mla16_i32
,
3866 .fniv
= gen_mla_vec
,
3868 .opt_opc
= vecop_list
,
3870 { .fni4
= gen_mla32_i32
,
3871 .fniv
= gen_mla_vec
,
3873 .opt_opc
= vecop_list
,
3875 { .fni8
= gen_mla64_i64
,
3876 .fniv
= gen_mla_vec
,
3877 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3879 .opt_opc
= vecop_list
,
3882 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &ops
[vece
]);
3885 void gen_gvec_mls(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
3886 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
3888 static const TCGOpcode vecop_list
[] = {
3889 INDEX_op_mul_vec
, INDEX_op_sub_vec
, 0
3891 static const GVecGen3 ops
[4] = {
3892 { .fni4
= gen_mls8_i32
,
3893 .fniv
= gen_mls_vec
,
3895 .opt_opc
= vecop_list
,
3897 { .fni4
= gen_mls16_i32
,
3898 .fniv
= gen_mls_vec
,
3900 .opt_opc
= vecop_list
,
3902 { .fni4
= gen_mls32_i32
,
3903 .fniv
= gen_mls_vec
,
3905 .opt_opc
= vecop_list
,
3907 { .fni8
= gen_mls64_i64
,
3908 .fniv
= gen_mls_vec
,
3909 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3911 .opt_opc
= vecop_list
,
3914 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &ops
[vece
]);
3917 /* CMTST : test is "if (X & Y != 0)". */
3918 static void gen_cmtst_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
3920 tcg_gen_and_i32(d
, a
, b
);
3921 tcg_gen_setcondi_i32(TCG_COND_NE
, d
, d
, 0);
3922 tcg_gen_neg_i32(d
, d
);
3925 void gen_cmtst_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
3927 tcg_gen_and_i64(d
, a
, b
);
3928 tcg_gen_setcondi_i64(TCG_COND_NE
, d
, d
, 0);
3929 tcg_gen_neg_i64(d
, d
);
3932 static void gen_cmtst_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
3934 tcg_gen_and_vec(vece
, d
, a
, b
);
3935 tcg_gen_dupi_vec(vece
, a
, 0);
3936 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, d
, d
, a
);
3939 void gen_gvec_cmtst(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
3940 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
3942 static const TCGOpcode vecop_list
[] = { INDEX_op_cmp_vec
, 0 };
3943 static const GVecGen3 ops
[4] = {
3944 { .fni4
= gen_helper_neon_tst_u8
,
3945 .fniv
= gen_cmtst_vec
,
3946 .opt_opc
= vecop_list
,
3948 { .fni4
= gen_helper_neon_tst_u16
,
3949 .fniv
= gen_cmtst_vec
,
3950 .opt_opc
= vecop_list
,
3952 { .fni4
= gen_cmtst_i32
,
3953 .fniv
= gen_cmtst_vec
,
3954 .opt_opc
= vecop_list
,
3956 { .fni8
= gen_cmtst_i64
,
3957 .fniv
= gen_cmtst_vec
,
3958 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3959 .opt_opc
= vecop_list
,
3962 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &ops
[vece
]);
3965 void gen_ushl_i32(TCGv_i32 dst
, TCGv_i32 src
, TCGv_i32 shift
)
3967 TCGv_i32 lval
= tcg_temp_new_i32();
3968 TCGv_i32 rval
= tcg_temp_new_i32();
3969 TCGv_i32 lsh
= tcg_temp_new_i32();
3970 TCGv_i32 rsh
= tcg_temp_new_i32();
3971 TCGv_i32 zero
= tcg_const_i32(0);
3972 TCGv_i32 max
= tcg_const_i32(32);
3975 * Rely on the TCG guarantee that out of range shifts produce
3976 * unspecified results, not undefined behaviour (i.e. no trap).
3977 * Discard out-of-range results after the fact.
3979 tcg_gen_ext8s_i32(lsh
, shift
);
3980 tcg_gen_neg_i32(rsh
, lsh
);
3981 tcg_gen_shl_i32(lval
, src
, lsh
);
3982 tcg_gen_shr_i32(rval
, src
, rsh
);
3983 tcg_gen_movcond_i32(TCG_COND_LTU
, dst
, lsh
, max
, lval
, zero
);
3984 tcg_gen_movcond_i32(TCG_COND_LTU
, dst
, rsh
, max
, rval
, dst
);
3986 tcg_temp_free_i32(lval
);
3987 tcg_temp_free_i32(rval
);
3988 tcg_temp_free_i32(lsh
);
3989 tcg_temp_free_i32(rsh
);
3990 tcg_temp_free_i32(zero
);
3991 tcg_temp_free_i32(max
);
3994 void gen_ushl_i64(TCGv_i64 dst
, TCGv_i64 src
, TCGv_i64 shift
)
3996 TCGv_i64 lval
= tcg_temp_new_i64();
3997 TCGv_i64 rval
= tcg_temp_new_i64();
3998 TCGv_i64 lsh
= tcg_temp_new_i64();
3999 TCGv_i64 rsh
= tcg_temp_new_i64();
4000 TCGv_i64 zero
= tcg_const_i64(0);
4001 TCGv_i64 max
= tcg_const_i64(64);
4004 * Rely on the TCG guarantee that out of range shifts produce
4005 * unspecified results, not undefined behaviour (i.e. no trap).
4006 * Discard out-of-range results after the fact.
4008 tcg_gen_ext8s_i64(lsh
, shift
);
4009 tcg_gen_neg_i64(rsh
, lsh
);
4010 tcg_gen_shl_i64(lval
, src
, lsh
);
4011 tcg_gen_shr_i64(rval
, src
, rsh
);
4012 tcg_gen_movcond_i64(TCG_COND_LTU
, dst
, lsh
, max
, lval
, zero
);
4013 tcg_gen_movcond_i64(TCG_COND_LTU
, dst
, rsh
, max
, rval
, dst
);
4015 tcg_temp_free_i64(lval
);
4016 tcg_temp_free_i64(rval
);
4017 tcg_temp_free_i64(lsh
);
4018 tcg_temp_free_i64(rsh
);
4019 tcg_temp_free_i64(zero
);
4020 tcg_temp_free_i64(max
);
4023 static void gen_ushl_vec(unsigned vece
, TCGv_vec dst
,
4024 TCGv_vec src
, TCGv_vec shift
)
4026 TCGv_vec lval
= tcg_temp_new_vec_matching(dst
);
4027 TCGv_vec rval
= tcg_temp_new_vec_matching(dst
);
4028 TCGv_vec lsh
= tcg_temp_new_vec_matching(dst
);
4029 TCGv_vec rsh
= tcg_temp_new_vec_matching(dst
);
4032 tcg_gen_neg_vec(vece
, rsh
, shift
);
4034 tcg_gen_mov_vec(lsh
, shift
);
4036 msk
= tcg_temp_new_vec_matching(dst
);
4037 tcg_gen_dupi_vec(vece
, msk
, 0xff);
4038 tcg_gen_and_vec(vece
, lsh
, shift
, msk
);
4039 tcg_gen_and_vec(vece
, rsh
, rsh
, msk
);
4040 tcg_temp_free_vec(msk
);
4044 * Rely on the TCG guarantee that out of range shifts produce
4045 * unspecified results, not undefined behaviour (i.e. no trap).
4046 * Discard out-of-range results after the fact.
4048 tcg_gen_shlv_vec(vece
, lval
, src
, lsh
);
4049 tcg_gen_shrv_vec(vece
, rval
, src
, rsh
);
4051 max
= tcg_temp_new_vec_matching(dst
);
4052 tcg_gen_dupi_vec(vece
, max
, 8 << vece
);
4055 * The choice of LT (signed) and GEU (unsigned) are biased toward
4056 * the instructions of the x86_64 host. For MO_8, the whole byte
4057 * is significant so we must use an unsigned compare; otherwise we
4058 * have already masked to a byte and so a signed compare works.
4059 * Other tcg hosts have a full set of comparisons and do not care.
4062 tcg_gen_cmp_vec(TCG_COND_GEU
, vece
, lsh
, lsh
, max
);
4063 tcg_gen_cmp_vec(TCG_COND_GEU
, vece
, rsh
, rsh
, max
);
4064 tcg_gen_andc_vec(vece
, lval
, lval
, lsh
);
4065 tcg_gen_andc_vec(vece
, rval
, rval
, rsh
);
4067 tcg_gen_cmp_vec(TCG_COND_LT
, vece
, lsh
, lsh
, max
);
4068 tcg_gen_cmp_vec(TCG_COND_LT
, vece
, rsh
, rsh
, max
);
4069 tcg_gen_and_vec(vece
, lval
, lval
, lsh
);
4070 tcg_gen_and_vec(vece
, rval
, rval
, rsh
);
4072 tcg_gen_or_vec(vece
, dst
, lval
, rval
);
4074 tcg_temp_free_vec(max
);
4075 tcg_temp_free_vec(lval
);
4076 tcg_temp_free_vec(rval
);
4077 tcg_temp_free_vec(lsh
);
4078 tcg_temp_free_vec(rsh
);
4081 void gen_gvec_ushl(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
4082 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
4084 static const TCGOpcode vecop_list
[] = {
4085 INDEX_op_neg_vec
, INDEX_op_shlv_vec
,
4086 INDEX_op_shrv_vec
, INDEX_op_cmp_vec
, 0
4088 static const GVecGen3 ops
[4] = {
4089 { .fniv
= gen_ushl_vec
,
4090 .fno
= gen_helper_gvec_ushl_b
,
4091 .opt_opc
= vecop_list
,
4093 { .fniv
= gen_ushl_vec
,
4094 .fno
= gen_helper_gvec_ushl_h
,
4095 .opt_opc
= vecop_list
,
4097 { .fni4
= gen_ushl_i32
,
4098 .fniv
= gen_ushl_vec
,
4099 .opt_opc
= vecop_list
,
4101 { .fni8
= gen_ushl_i64
,
4102 .fniv
= gen_ushl_vec
,
4103 .opt_opc
= vecop_list
,
4106 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &ops
[vece
]);
4109 void gen_sshl_i32(TCGv_i32 dst
, TCGv_i32 src
, TCGv_i32 shift
)
4111 TCGv_i32 lval
= tcg_temp_new_i32();
4112 TCGv_i32 rval
= tcg_temp_new_i32();
4113 TCGv_i32 lsh
= tcg_temp_new_i32();
4114 TCGv_i32 rsh
= tcg_temp_new_i32();
4115 TCGv_i32 zero
= tcg_const_i32(0);
4116 TCGv_i32 max
= tcg_const_i32(31);
4119 * Rely on the TCG guarantee that out of range shifts produce
4120 * unspecified results, not undefined behaviour (i.e. no trap).
4121 * Discard out-of-range results after the fact.
4123 tcg_gen_ext8s_i32(lsh
, shift
);
4124 tcg_gen_neg_i32(rsh
, lsh
);
4125 tcg_gen_shl_i32(lval
, src
, lsh
);
4126 tcg_gen_umin_i32(rsh
, rsh
, max
);
4127 tcg_gen_sar_i32(rval
, src
, rsh
);
4128 tcg_gen_movcond_i32(TCG_COND_LEU
, lval
, lsh
, max
, lval
, zero
);
4129 tcg_gen_movcond_i32(TCG_COND_LT
, dst
, lsh
, zero
, rval
, lval
);
4131 tcg_temp_free_i32(lval
);
4132 tcg_temp_free_i32(rval
);
4133 tcg_temp_free_i32(lsh
);
4134 tcg_temp_free_i32(rsh
);
4135 tcg_temp_free_i32(zero
);
4136 tcg_temp_free_i32(max
);
4139 void gen_sshl_i64(TCGv_i64 dst
, TCGv_i64 src
, TCGv_i64 shift
)
4141 TCGv_i64 lval
= tcg_temp_new_i64();
4142 TCGv_i64 rval
= tcg_temp_new_i64();
4143 TCGv_i64 lsh
= tcg_temp_new_i64();
4144 TCGv_i64 rsh
= tcg_temp_new_i64();
4145 TCGv_i64 zero
= tcg_const_i64(0);
4146 TCGv_i64 max
= tcg_const_i64(63);
4149 * Rely on the TCG guarantee that out of range shifts produce
4150 * unspecified results, not undefined behaviour (i.e. no trap).
4151 * Discard out-of-range results after the fact.
4153 tcg_gen_ext8s_i64(lsh
, shift
);
4154 tcg_gen_neg_i64(rsh
, lsh
);
4155 tcg_gen_shl_i64(lval
, src
, lsh
);
4156 tcg_gen_umin_i64(rsh
, rsh
, max
);
4157 tcg_gen_sar_i64(rval
, src
, rsh
);
4158 tcg_gen_movcond_i64(TCG_COND_LEU
, lval
, lsh
, max
, lval
, zero
);
4159 tcg_gen_movcond_i64(TCG_COND_LT
, dst
, lsh
, zero
, rval
, lval
);
4161 tcg_temp_free_i64(lval
);
4162 tcg_temp_free_i64(rval
);
4163 tcg_temp_free_i64(lsh
);
4164 tcg_temp_free_i64(rsh
);
4165 tcg_temp_free_i64(zero
);
4166 tcg_temp_free_i64(max
);
4169 static void gen_sshl_vec(unsigned vece
, TCGv_vec dst
,
4170 TCGv_vec src
, TCGv_vec shift
)
4172 TCGv_vec lval
= tcg_temp_new_vec_matching(dst
);
4173 TCGv_vec rval
= tcg_temp_new_vec_matching(dst
);
4174 TCGv_vec lsh
= tcg_temp_new_vec_matching(dst
);
4175 TCGv_vec rsh
= tcg_temp_new_vec_matching(dst
);
4176 TCGv_vec tmp
= tcg_temp_new_vec_matching(dst
);
4179 * Rely on the TCG guarantee that out of range shifts produce
4180 * unspecified results, not undefined behaviour (i.e. no trap).
4181 * Discard out-of-range results after the fact.
4183 tcg_gen_neg_vec(vece
, rsh
, shift
);
4185 tcg_gen_mov_vec(lsh
, shift
);
4187 tcg_gen_dupi_vec(vece
, tmp
, 0xff);
4188 tcg_gen_and_vec(vece
, lsh
, shift
, tmp
);
4189 tcg_gen_and_vec(vece
, rsh
, rsh
, tmp
);
4192 /* Bound rsh so out of bound right shift gets -1. */
4193 tcg_gen_dupi_vec(vece
, tmp
, (8 << vece
) - 1);
4194 tcg_gen_umin_vec(vece
, rsh
, rsh
, tmp
);
4195 tcg_gen_cmp_vec(TCG_COND_GT
, vece
, tmp
, lsh
, tmp
);
4197 tcg_gen_shlv_vec(vece
, lval
, src
, lsh
);
4198 tcg_gen_sarv_vec(vece
, rval
, src
, rsh
);
4200 /* Select in-bound left shift. */
4201 tcg_gen_andc_vec(vece
, lval
, lval
, tmp
);
4203 /* Select between left and right shift. */
4205 tcg_gen_dupi_vec(vece
, tmp
, 0);
4206 tcg_gen_cmpsel_vec(TCG_COND_LT
, vece
, dst
, lsh
, tmp
, rval
, lval
);
4208 tcg_gen_dupi_vec(vece
, tmp
, 0x80);
4209 tcg_gen_cmpsel_vec(TCG_COND_LT
, vece
, dst
, lsh
, tmp
, lval
, rval
);
4212 tcg_temp_free_vec(lval
);
4213 tcg_temp_free_vec(rval
);
4214 tcg_temp_free_vec(lsh
);
4215 tcg_temp_free_vec(rsh
);
4216 tcg_temp_free_vec(tmp
);
4219 void gen_gvec_sshl(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
4220 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
4222 static const TCGOpcode vecop_list
[] = {
4223 INDEX_op_neg_vec
, INDEX_op_umin_vec
, INDEX_op_shlv_vec
,
4224 INDEX_op_sarv_vec
, INDEX_op_cmp_vec
, INDEX_op_cmpsel_vec
, 0
4226 static const GVecGen3 ops
[4] = {
4227 { .fniv
= gen_sshl_vec
,
4228 .fno
= gen_helper_gvec_sshl_b
,
4229 .opt_opc
= vecop_list
,
4231 { .fniv
= gen_sshl_vec
,
4232 .fno
= gen_helper_gvec_sshl_h
,
4233 .opt_opc
= vecop_list
,
4235 { .fni4
= gen_sshl_i32
,
4236 .fniv
= gen_sshl_vec
,
4237 .opt_opc
= vecop_list
,
4239 { .fni8
= gen_sshl_i64
,
4240 .fniv
= gen_sshl_vec
,
4241 .opt_opc
= vecop_list
,
4244 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &ops
[vece
]);
4247 static void gen_uqadd_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4248 TCGv_vec a
, TCGv_vec b
)
4250 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4251 tcg_gen_add_vec(vece
, x
, a
, b
);
4252 tcg_gen_usadd_vec(vece
, t
, a
, b
);
4253 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4254 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4255 tcg_temp_free_vec(x
);
4258 void gen_gvec_uqadd_qc(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
4259 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
4261 static const TCGOpcode vecop_list
[] = {
4262 INDEX_op_usadd_vec
, INDEX_op_cmp_vec
, INDEX_op_add_vec
, 0
4264 static const GVecGen4 ops
[4] = {
4265 { .fniv
= gen_uqadd_vec
,
4266 .fno
= gen_helper_gvec_uqadd_b
,
4268 .opt_opc
= vecop_list
,
4270 { .fniv
= gen_uqadd_vec
,
4271 .fno
= gen_helper_gvec_uqadd_h
,
4273 .opt_opc
= vecop_list
,
4275 { .fniv
= gen_uqadd_vec
,
4276 .fno
= gen_helper_gvec_uqadd_s
,
4278 .opt_opc
= vecop_list
,
4280 { .fniv
= gen_uqadd_vec
,
4281 .fno
= gen_helper_gvec_uqadd_d
,
4283 .opt_opc
= vecop_list
,
4286 tcg_gen_gvec_4(rd_ofs
, offsetof(CPUARMState
, vfp
.qc
),
4287 rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &ops
[vece
]);
4290 static void gen_sqadd_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4291 TCGv_vec a
, TCGv_vec b
)
4293 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4294 tcg_gen_add_vec(vece
, x
, a
, b
);
4295 tcg_gen_ssadd_vec(vece
, t
, a
, b
);
4296 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4297 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4298 tcg_temp_free_vec(x
);
4301 void gen_gvec_sqadd_qc(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
4302 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
4304 static const TCGOpcode vecop_list
[] = {
4305 INDEX_op_ssadd_vec
, INDEX_op_cmp_vec
, INDEX_op_add_vec
, 0
4307 static const GVecGen4 ops
[4] = {
4308 { .fniv
= gen_sqadd_vec
,
4309 .fno
= gen_helper_gvec_sqadd_b
,
4310 .opt_opc
= vecop_list
,
4313 { .fniv
= gen_sqadd_vec
,
4314 .fno
= gen_helper_gvec_sqadd_h
,
4315 .opt_opc
= vecop_list
,
4318 { .fniv
= gen_sqadd_vec
,
4319 .fno
= gen_helper_gvec_sqadd_s
,
4320 .opt_opc
= vecop_list
,
4323 { .fniv
= gen_sqadd_vec
,
4324 .fno
= gen_helper_gvec_sqadd_d
,
4325 .opt_opc
= vecop_list
,
4329 tcg_gen_gvec_4(rd_ofs
, offsetof(CPUARMState
, vfp
.qc
),
4330 rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &ops
[vece
]);
4333 static void gen_uqsub_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4334 TCGv_vec a
, TCGv_vec b
)
4336 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4337 tcg_gen_sub_vec(vece
, x
, a
, b
);
4338 tcg_gen_ussub_vec(vece
, t
, a
, b
);
4339 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4340 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4341 tcg_temp_free_vec(x
);
4344 void gen_gvec_uqsub_qc(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
4345 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
4347 static const TCGOpcode vecop_list
[] = {
4348 INDEX_op_ussub_vec
, INDEX_op_cmp_vec
, INDEX_op_sub_vec
, 0
4350 static const GVecGen4 ops
[4] = {
4351 { .fniv
= gen_uqsub_vec
,
4352 .fno
= gen_helper_gvec_uqsub_b
,
4353 .opt_opc
= vecop_list
,
4356 { .fniv
= gen_uqsub_vec
,
4357 .fno
= gen_helper_gvec_uqsub_h
,
4358 .opt_opc
= vecop_list
,
4361 { .fniv
= gen_uqsub_vec
,
4362 .fno
= gen_helper_gvec_uqsub_s
,
4363 .opt_opc
= vecop_list
,
4366 { .fniv
= gen_uqsub_vec
,
4367 .fno
= gen_helper_gvec_uqsub_d
,
4368 .opt_opc
= vecop_list
,
4372 tcg_gen_gvec_4(rd_ofs
, offsetof(CPUARMState
, vfp
.qc
),
4373 rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &ops
[vece
]);
4376 static void gen_sqsub_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4377 TCGv_vec a
, TCGv_vec b
)
4379 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4380 tcg_gen_sub_vec(vece
, x
, a
, b
);
4381 tcg_gen_sssub_vec(vece
, t
, a
, b
);
4382 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4383 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4384 tcg_temp_free_vec(x
);
4387 void gen_gvec_sqsub_qc(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
4388 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
4390 static const TCGOpcode vecop_list
[] = {
4391 INDEX_op_sssub_vec
, INDEX_op_cmp_vec
, INDEX_op_sub_vec
, 0
4393 static const GVecGen4 ops
[4] = {
4394 { .fniv
= gen_sqsub_vec
,
4395 .fno
= gen_helper_gvec_sqsub_b
,
4396 .opt_opc
= vecop_list
,
4399 { .fniv
= gen_sqsub_vec
,
4400 .fno
= gen_helper_gvec_sqsub_h
,
4401 .opt_opc
= vecop_list
,
4404 { .fniv
= gen_sqsub_vec
,
4405 .fno
= gen_helper_gvec_sqsub_s
,
4406 .opt_opc
= vecop_list
,
4409 { .fniv
= gen_sqsub_vec
,
4410 .fno
= gen_helper_gvec_sqsub_d
,
4411 .opt_opc
= vecop_list
,
4415 tcg_gen_gvec_4(rd_ofs
, offsetof(CPUARMState
, vfp
.qc
),
4416 rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &ops
[vece
]);
4419 static void gen_sabd_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4421 TCGv_i32 t
= tcg_temp_new_i32();
4423 tcg_gen_sub_i32(t
, a
, b
);
4424 tcg_gen_sub_i32(d
, b
, a
);
4425 tcg_gen_movcond_i32(TCG_COND_LT
, d
, a
, b
, d
, t
);
4426 tcg_temp_free_i32(t
);
4429 static void gen_sabd_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4431 TCGv_i64 t
= tcg_temp_new_i64();
4433 tcg_gen_sub_i64(t
, a
, b
);
4434 tcg_gen_sub_i64(d
, b
, a
);
4435 tcg_gen_movcond_i64(TCG_COND_LT
, d
, a
, b
, d
, t
);
4436 tcg_temp_free_i64(t
);
4439 static void gen_sabd_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4441 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
4443 tcg_gen_smin_vec(vece
, t
, a
, b
);
4444 tcg_gen_smax_vec(vece
, d
, a
, b
);
4445 tcg_gen_sub_vec(vece
, d
, d
, t
);
4446 tcg_temp_free_vec(t
);
4449 void gen_gvec_sabd(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
4450 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
4452 static const TCGOpcode vecop_list
[] = {
4453 INDEX_op_sub_vec
, INDEX_op_smin_vec
, INDEX_op_smax_vec
, 0
4455 static const GVecGen3 ops
[4] = {
4456 { .fniv
= gen_sabd_vec
,
4457 .fno
= gen_helper_gvec_sabd_b
,
4458 .opt_opc
= vecop_list
,
4460 { .fniv
= gen_sabd_vec
,
4461 .fno
= gen_helper_gvec_sabd_h
,
4462 .opt_opc
= vecop_list
,
4464 { .fni4
= gen_sabd_i32
,
4465 .fniv
= gen_sabd_vec
,
4466 .fno
= gen_helper_gvec_sabd_s
,
4467 .opt_opc
= vecop_list
,
4469 { .fni8
= gen_sabd_i64
,
4470 .fniv
= gen_sabd_vec
,
4471 .fno
= gen_helper_gvec_sabd_d
,
4472 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4473 .opt_opc
= vecop_list
,
4476 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &ops
[vece
]);
4479 static void gen_uabd_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4481 TCGv_i32 t
= tcg_temp_new_i32();
4483 tcg_gen_sub_i32(t
, a
, b
);
4484 tcg_gen_sub_i32(d
, b
, a
);
4485 tcg_gen_movcond_i32(TCG_COND_LTU
, d
, a
, b
, d
, t
);
4486 tcg_temp_free_i32(t
);
4489 static void gen_uabd_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4491 TCGv_i64 t
= tcg_temp_new_i64();
4493 tcg_gen_sub_i64(t
, a
, b
);
4494 tcg_gen_sub_i64(d
, b
, a
);
4495 tcg_gen_movcond_i64(TCG_COND_LTU
, d
, a
, b
, d
, t
);
4496 tcg_temp_free_i64(t
);
4499 static void gen_uabd_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4501 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
4503 tcg_gen_umin_vec(vece
, t
, a
, b
);
4504 tcg_gen_umax_vec(vece
, d
, a
, b
);
4505 tcg_gen_sub_vec(vece
, d
, d
, t
);
4506 tcg_temp_free_vec(t
);
4509 void gen_gvec_uabd(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
4510 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
4512 static const TCGOpcode vecop_list
[] = {
4513 INDEX_op_sub_vec
, INDEX_op_umin_vec
, INDEX_op_umax_vec
, 0
4515 static const GVecGen3 ops
[4] = {
4516 { .fniv
= gen_uabd_vec
,
4517 .fno
= gen_helper_gvec_uabd_b
,
4518 .opt_opc
= vecop_list
,
4520 { .fniv
= gen_uabd_vec
,
4521 .fno
= gen_helper_gvec_uabd_h
,
4522 .opt_opc
= vecop_list
,
4524 { .fni4
= gen_uabd_i32
,
4525 .fniv
= gen_uabd_vec
,
4526 .fno
= gen_helper_gvec_uabd_s
,
4527 .opt_opc
= vecop_list
,
4529 { .fni8
= gen_uabd_i64
,
4530 .fniv
= gen_uabd_vec
,
4531 .fno
= gen_helper_gvec_uabd_d
,
4532 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4533 .opt_opc
= vecop_list
,
4536 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &ops
[vece
]);
4539 static void gen_saba_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4541 TCGv_i32 t
= tcg_temp_new_i32();
4542 gen_sabd_i32(t
, a
, b
);
4543 tcg_gen_add_i32(d
, d
, t
);
4544 tcg_temp_free_i32(t
);
4547 static void gen_saba_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4549 TCGv_i64 t
= tcg_temp_new_i64();
4550 gen_sabd_i64(t
, a
, b
);
4551 tcg_gen_add_i64(d
, d
, t
);
4552 tcg_temp_free_i64(t
);
4555 static void gen_saba_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4557 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
4558 gen_sabd_vec(vece
, t
, a
, b
);
4559 tcg_gen_add_vec(vece
, d
, d
, t
);
4560 tcg_temp_free_vec(t
);
4563 void gen_gvec_saba(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
4564 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
4566 static const TCGOpcode vecop_list
[] = {
4567 INDEX_op_sub_vec
, INDEX_op_add_vec
,
4568 INDEX_op_smin_vec
, INDEX_op_smax_vec
, 0
4570 static const GVecGen3 ops
[4] = {
4571 { .fniv
= gen_saba_vec
,
4572 .fno
= gen_helper_gvec_saba_b
,
4573 .opt_opc
= vecop_list
,
4576 { .fniv
= gen_saba_vec
,
4577 .fno
= gen_helper_gvec_saba_h
,
4578 .opt_opc
= vecop_list
,
4581 { .fni4
= gen_saba_i32
,
4582 .fniv
= gen_saba_vec
,
4583 .fno
= gen_helper_gvec_saba_s
,
4584 .opt_opc
= vecop_list
,
4587 { .fni8
= gen_saba_i64
,
4588 .fniv
= gen_saba_vec
,
4589 .fno
= gen_helper_gvec_saba_d
,
4590 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4591 .opt_opc
= vecop_list
,
4595 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &ops
[vece
]);
4598 static void gen_uaba_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4600 TCGv_i32 t
= tcg_temp_new_i32();
4601 gen_uabd_i32(t
, a
, b
);
4602 tcg_gen_add_i32(d
, d
, t
);
4603 tcg_temp_free_i32(t
);
4606 static void gen_uaba_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4608 TCGv_i64 t
= tcg_temp_new_i64();
4609 gen_uabd_i64(t
, a
, b
);
4610 tcg_gen_add_i64(d
, d
, t
);
4611 tcg_temp_free_i64(t
);
4614 static void gen_uaba_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4616 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
4617 gen_uabd_vec(vece
, t
, a
, b
);
4618 tcg_gen_add_vec(vece
, d
, d
, t
);
4619 tcg_temp_free_vec(t
);
4622 void gen_gvec_uaba(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
4623 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
4625 static const TCGOpcode vecop_list
[] = {
4626 INDEX_op_sub_vec
, INDEX_op_add_vec
,
4627 INDEX_op_umin_vec
, INDEX_op_umax_vec
, 0
4629 static const GVecGen3 ops
[4] = {
4630 { .fniv
= gen_uaba_vec
,
4631 .fno
= gen_helper_gvec_uaba_b
,
4632 .opt_opc
= vecop_list
,
4635 { .fniv
= gen_uaba_vec
,
4636 .fno
= gen_helper_gvec_uaba_h
,
4637 .opt_opc
= vecop_list
,
4640 { .fni4
= gen_uaba_i32
,
4641 .fniv
= gen_uaba_vec
,
4642 .fno
= gen_helper_gvec_uaba_s
,
4643 .opt_opc
= vecop_list
,
4646 { .fni8
= gen_uaba_i64
,
4647 .fniv
= gen_uaba_vec
,
4648 .fno
= gen_helper_gvec_uaba_d
,
4649 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4650 .opt_opc
= vecop_list
,
4654 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &ops
[vece
]);
4657 static void do_coproc_insn(DisasContext
*s
, int cpnum
, int is64
,
4658 int opc1
, int crn
, int crm
, int opc2
,
4659 bool isread
, int rt
, int rt2
)
4661 const ARMCPRegInfo
*ri
;
4663 ri
= get_arm_cp_reginfo(s
->cp_regs
,
4664 ENCODE_CP_REG(cpnum
, is64
, s
->ns
, crn
, crm
, opc1
, opc2
));
4668 /* Check access permissions */
4669 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
4670 unallocated_encoding(s
);
4674 if (s
->hstr_active
|| ri
->accessfn
||
4675 (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && cpnum
< 14)) {
4676 /* Emit code to perform further access permissions checks at
4677 * runtime; this may result in an exception.
4678 * Note that on XScale all cp0..c13 registers do an access check
4679 * call in order to handle c15_cpar.
4682 TCGv_i32 tcg_syn
, tcg_isread
;
4685 /* Note that since we are an implementation which takes an
4686 * exception on a trapped conditional instruction only if the
4687 * instruction passes its condition code check, we can take
4688 * advantage of the clause in the ARM ARM that allows us to set
4689 * the COND field in the instruction to 0xE in all cases.
4690 * We could fish the actual condition out of the insn (ARM)
4691 * or the condexec bits (Thumb) but it isn't necessary.
4696 syndrome
= syn_cp14_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
4699 syndrome
= syn_cp14_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
4705 syndrome
= syn_cp15_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
4708 syndrome
= syn_cp15_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
4713 /* ARMv8 defines that only coprocessors 14 and 15 exist,
4714 * so this can only happen if this is an ARMv7 or earlier CPU,
4715 * in which case the syndrome information won't actually be
4718 assert(!arm_dc_feature(s
, ARM_FEATURE_V8
));
4719 syndrome
= syn_uncategorized();
4723 gen_set_condexec(s
);
4724 gen_set_pc_im(s
, s
->pc_curr
);
4725 tmpptr
= tcg_const_ptr(ri
);
4726 tcg_syn
= tcg_const_i32(syndrome
);
4727 tcg_isread
= tcg_const_i32(isread
);
4728 gen_helper_access_check_cp_reg(cpu_env
, tmpptr
, tcg_syn
,
4730 tcg_temp_free_ptr(tmpptr
);
4731 tcg_temp_free_i32(tcg_syn
);
4732 tcg_temp_free_i32(tcg_isread
);
4733 } else if (ri
->type
& ARM_CP_RAISES_EXC
) {
4735 * The readfn or writefn might raise an exception;
4736 * synchronize the CPU state in case it does.
4738 gen_set_condexec(s
);
4739 gen_set_pc_im(s
, s
->pc_curr
);
4742 /* Handle special cases first */
4743 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
4748 unallocated_encoding(s
);
4751 gen_set_pc_im(s
, s
->base
.pc_next
);
4752 s
->base
.is_jmp
= DISAS_WFI
;
4758 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
4767 if (ri
->type
& ARM_CP_CONST
) {
4768 tmp64
= tcg_const_i64(ri
->resetvalue
);
4769 } else if (ri
->readfn
) {
4771 tmp64
= tcg_temp_new_i64();
4772 tmpptr
= tcg_const_ptr(ri
);
4773 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
4774 tcg_temp_free_ptr(tmpptr
);
4776 tmp64
= tcg_temp_new_i64();
4777 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
4779 tmp
= tcg_temp_new_i32();
4780 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
4781 store_reg(s
, rt
, tmp
);
4782 tmp
= tcg_temp_new_i32();
4783 tcg_gen_extrh_i64_i32(tmp
, tmp64
);
4784 tcg_temp_free_i64(tmp64
);
4785 store_reg(s
, rt2
, tmp
);
4788 if (ri
->type
& ARM_CP_CONST
) {
4789 tmp
= tcg_const_i32(ri
->resetvalue
);
4790 } else if (ri
->readfn
) {
4792 tmp
= tcg_temp_new_i32();
4793 tmpptr
= tcg_const_ptr(ri
);
4794 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
4795 tcg_temp_free_ptr(tmpptr
);
4797 tmp
= load_cpu_offset(ri
->fieldoffset
);
4800 /* Destination register of r15 for 32 bit loads sets
4801 * the condition codes from the high 4 bits of the value
4804 tcg_temp_free_i32(tmp
);
4806 store_reg(s
, rt
, tmp
);
4811 if (ri
->type
& ARM_CP_CONST
) {
4812 /* If not forbidden by access permissions, treat as WI */
4817 TCGv_i32 tmplo
, tmphi
;
4818 TCGv_i64 tmp64
= tcg_temp_new_i64();
4819 tmplo
= load_reg(s
, rt
);
4820 tmphi
= load_reg(s
, rt2
);
4821 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
4822 tcg_temp_free_i32(tmplo
);
4823 tcg_temp_free_i32(tmphi
);
4825 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
4826 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
4827 tcg_temp_free_ptr(tmpptr
);
4829 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
4831 tcg_temp_free_i64(tmp64
);
4836 tmp
= load_reg(s
, rt
);
4837 tmpptr
= tcg_const_ptr(ri
);
4838 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
4839 tcg_temp_free_ptr(tmpptr
);
4840 tcg_temp_free_i32(tmp
);
4842 TCGv_i32 tmp
= load_reg(s
, rt
);
4843 store_cpu_offset(tmp
, ri
->fieldoffset
);
4848 /* I/O operations must end the TB here (whether read or write) */
4849 need_exit_tb
= ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) &&
4850 (ri
->type
& ARM_CP_IO
));
4852 if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
4854 * A write to any coprocessor register that ends a TB
4855 * must rebuild the hflags for the next TB.
4857 TCGv_i32 tcg_el
= tcg_const_i32(s
->current_el
);
4858 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
4859 gen_helper_rebuild_hflags_m32(cpu_env
, tcg_el
);
4861 if (ri
->type
& ARM_CP_NEWEL
) {
4862 gen_helper_rebuild_hflags_a32_newel(cpu_env
);
4864 gen_helper_rebuild_hflags_a32(cpu_env
, tcg_el
);
4867 tcg_temp_free_i32(tcg_el
);
4869 * We default to ending the TB on a coprocessor register write,
4870 * but allow this to be suppressed by the register definition
4871 * (usually only necessary to work around guest bugs).
4873 need_exit_tb
= true;
4882 /* Unknown register; this might be a guest error or a QEMU
4883 * unimplemented feature.
4886 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
4887 "64 bit system register cp:%d opc1: %d crm:%d "
4889 isread
? "read" : "write", cpnum
, opc1
, crm
,
4890 s
->ns
? "non-secure" : "secure");
4892 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
4893 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
4895 isread
? "read" : "write", cpnum
, opc1
, crn
, crm
, opc2
,
4896 s
->ns
? "non-secure" : "secure");
4899 unallocated_encoding(s
);
4903 /* Decode XScale DSP or iWMMXt insn (in the copro space, cp=0 or 1) */
4904 static void disas_xscale_insn(DisasContext
*s
, uint32_t insn
)
4906 int cpnum
= (insn
>> 8) & 0xf;
4908 if (extract32(s
->c15_cpar
, cpnum
, 1) == 0) {
4909 unallocated_encoding(s
);
4910 } else if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
4911 if (disas_iwmmxt_insn(s
, insn
)) {
4912 unallocated_encoding(s
);
4914 } else if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
)) {
4915 if (disas_dsp_insn(s
, insn
)) {
4916 unallocated_encoding(s
);
4921 /* Store a 64-bit value to a register pair. Clobbers val. */
4922 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
4925 tmp
= tcg_temp_new_i32();
4926 tcg_gen_extrl_i64_i32(tmp
, val
);
4927 store_reg(s
, rlow
, tmp
);
4928 tmp
= tcg_temp_new_i32();
4929 tcg_gen_extrh_i64_i32(tmp
, val
);
4930 store_reg(s
, rhigh
, tmp
);
4933 /* load and add a 64-bit value from a register pair. */
4934 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
4940 /* Load 64-bit value rd:rn. */
4941 tmpl
= load_reg(s
, rlow
);
4942 tmph
= load_reg(s
, rhigh
);
4943 tmp
= tcg_temp_new_i64();
4944 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
4945 tcg_temp_free_i32(tmpl
);
4946 tcg_temp_free_i32(tmph
);
4947 tcg_gen_add_i64(val
, val
, tmp
);
4948 tcg_temp_free_i64(tmp
);
4951 /* Set N and Z flags from hi|lo. */
4952 static void gen_logicq_cc(TCGv_i32 lo
, TCGv_i32 hi
)
4954 tcg_gen_mov_i32(cpu_NF
, hi
);
4955 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
4958 /* Load/Store exclusive instructions are implemented by remembering
4959 the value/address loaded, and seeing if these are the same
4960 when the store is performed. This should be sufficient to implement
4961 the architecturally mandated semantics, and avoids having to monitor
4962 regular stores. The compare vs the remembered value is done during
4963 the cmpxchg operation, but we must compare the addresses manually. */
4964 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
4965 TCGv_i32 addr
, int size
)
4967 TCGv_i32 tmp
= tcg_temp_new_i32();
4968 MemOp opc
= size
| MO_ALIGN
| s
->be_data
;
4973 TCGv_i32 tmp2
= tcg_temp_new_i32();
4974 TCGv_i64 t64
= tcg_temp_new_i64();
4976 /* For AArch32, architecturally the 32-bit word at the lowest
4977 * address is always Rt and the one at addr+4 is Rt2, even if
4978 * the CPU is big-endian. That means we don't want to do a
4979 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
4980 * for an architecturally 64-bit access, but instead do a
4981 * 64-bit access using MO_BE if appropriate and then split
4983 * This only makes a difference for BE32 user-mode, where
4984 * frob64() must not flip the two halves of the 64-bit data
4985 * but this code must treat BE32 user-mode like BE32 system.
4987 TCGv taddr
= gen_aa32_addr(s
, addr
, opc
);
4989 tcg_gen_qemu_ld_i64(t64
, taddr
, get_mem_index(s
), opc
);
4990 tcg_temp_free(taddr
);
4991 tcg_gen_mov_i64(cpu_exclusive_val
, t64
);
4992 if (s
->be_data
== MO_BE
) {
4993 tcg_gen_extr_i64_i32(tmp2
, tmp
, t64
);
4995 tcg_gen_extr_i64_i32(tmp
, tmp2
, t64
);
4997 tcg_temp_free_i64(t64
);
4999 store_reg(s
, rt2
, tmp2
);
5001 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), opc
);
5002 tcg_gen_extu_i32_i64(cpu_exclusive_val
, tmp
);
5005 store_reg(s
, rt
, tmp
);
5006 tcg_gen_extu_i32_i64(cpu_exclusive_addr
, addr
);
5009 static void gen_clrex(DisasContext
*s
)
5011 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
5014 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
5015 TCGv_i32 addr
, int size
)
5017 TCGv_i32 t0
, t1
, t2
;
5020 TCGLabel
*done_label
;
5021 TCGLabel
*fail_label
;
5022 MemOp opc
= size
| MO_ALIGN
| s
->be_data
;
5024 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5030 fail_label
= gen_new_label();
5031 done_label
= gen_new_label();
5032 extaddr
= tcg_temp_new_i64();
5033 tcg_gen_extu_i32_i64(extaddr
, addr
);
5034 tcg_gen_brcond_i64(TCG_COND_NE
, extaddr
, cpu_exclusive_addr
, fail_label
);
5035 tcg_temp_free_i64(extaddr
);
5037 taddr
= gen_aa32_addr(s
, addr
, opc
);
5038 t0
= tcg_temp_new_i32();
5039 t1
= load_reg(s
, rt
);
5041 TCGv_i64 o64
= tcg_temp_new_i64();
5042 TCGv_i64 n64
= tcg_temp_new_i64();
5044 t2
= load_reg(s
, rt2
);
5045 /* For AArch32, architecturally the 32-bit word at the lowest
5046 * address is always Rt and the one at addr+4 is Rt2, even if
5047 * the CPU is big-endian. Since we're going to treat this as a
5048 * single 64-bit BE store, we need to put the two halves in the
5049 * opposite order for BE to LE, so that they end up in the right
5051 * We don't want gen_aa32_frob64() because that does the wrong
5052 * thing for BE32 usermode.
5054 if (s
->be_data
== MO_BE
) {
5055 tcg_gen_concat_i32_i64(n64
, t2
, t1
);
5057 tcg_gen_concat_i32_i64(n64
, t1
, t2
);
5059 tcg_temp_free_i32(t2
);
5061 tcg_gen_atomic_cmpxchg_i64(o64
, taddr
, cpu_exclusive_val
, n64
,
5062 get_mem_index(s
), opc
);
5063 tcg_temp_free_i64(n64
);
5065 tcg_gen_setcond_i64(TCG_COND_NE
, o64
, o64
, cpu_exclusive_val
);
5066 tcg_gen_extrl_i64_i32(t0
, o64
);
5068 tcg_temp_free_i64(o64
);
5070 t2
= tcg_temp_new_i32();
5071 tcg_gen_extrl_i64_i32(t2
, cpu_exclusive_val
);
5072 tcg_gen_atomic_cmpxchg_i32(t0
, taddr
, t2
, t1
, get_mem_index(s
), opc
);
5073 tcg_gen_setcond_i32(TCG_COND_NE
, t0
, t0
, t2
);
5074 tcg_temp_free_i32(t2
);
5076 tcg_temp_free_i32(t1
);
5077 tcg_temp_free(taddr
);
5078 tcg_gen_mov_i32(cpu_R
[rd
], t0
);
5079 tcg_temp_free_i32(t0
);
5080 tcg_gen_br(done_label
);
5082 gen_set_label(fail_label
);
5083 tcg_gen_movi_i32(cpu_R
[rd
], 1);
5084 gen_set_label(done_label
);
5085 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
5091 * @mode: mode field from insn (which stack to store to)
5092 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
5093 * @writeback: true if writeback bit set
5095 * Generate code for the SRS (Store Return State) insn.
5097 static void gen_srs(DisasContext
*s
,
5098 uint32_t mode
, uint32_t amode
, bool writeback
)
5105 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
5106 * and specified mode is monitor mode
5107 * - UNDEFINED in Hyp mode
5108 * - UNPREDICTABLE in User or System mode
5109 * - UNPREDICTABLE if the specified mode is:
5110 * -- not implemented
5111 * -- not a valid mode number
5112 * -- a mode that's at a higher exception level
5113 * -- Monitor, if we are Non-secure
5114 * For the UNPREDICTABLE cases we choose to UNDEF.
5116 if (s
->current_el
== 1 && !s
->ns
&& mode
== ARM_CPU_MODE_MON
) {
5117 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
, syn_uncategorized(), 3);
5121 if (s
->current_el
== 0 || s
->current_el
== 2) {
5126 case ARM_CPU_MODE_USR
:
5127 case ARM_CPU_MODE_FIQ
:
5128 case ARM_CPU_MODE_IRQ
:
5129 case ARM_CPU_MODE_SVC
:
5130 case ARM_CPU_MODE_ABT
:
5131 case ARM_CPU_MODE_UND
:
5132 case ARM_CPU_MODE_SYS
:
5134 case ARM_CPU_MODE_HYP
:
5135 if (s
->current_el
== 1 || !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
5139 case ARM_CPU_MODE_MON
:
5140 /* No need to check specifically for "are we non-secure" because
5141 * we've already made EL0 UNDEF and handled the trap for S-EL1;
5142 * so if this isn't EL3 then we must be non-secure.
5144 if (s
->current_el
!= 3) {
5153 unallocated_encoding(s
);
5157 addr
= tcg_temp_new_i32();
5158 tmp
= tcg_const_i32(mode
);
5159 /* get_r13_banked() will raise an exception if called from System mode */
5160 gen_set_condexec(s
);
5161 gen_set_pc_im(s
, s
->pc_curr
);
5162 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
5163 tcg_temp_free_i32(tmp
);
5180 tcg_gen_addi_i32(addr
, addr
, offset
);
5181 tmp
= load_reg(s
, 14);
5182 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
5183 tcg_temp_free_i32(tmp
);
5184 tmp
= load_cpu_field(spsr
);
5185 tcg_gen_addi_i32(addr
, addr
, 4);
5186 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
5187 tcg_temp_free_i32(tmp
);
5205 tcg_gen_addi_i32(addr
, addr
, offset
);
5206 tmp
= tcg_const_i32(mode
);
5207 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
5208 tcg_temp_free_i32(tmp
);
5210 tcg_temp_free_i32(addr
);
5211 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
5214 /* Skip this instruction if the ARM condition is false */
5215 static void arm_skip_unless(DisasContext
*s
, uint32_t cond
)
5217 arm_gen_condlabel(s
);
5218 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
5223 * Constant expanders used by T16/T32 decode
5226 /* Return only the rotation part of T32ExpandImm. */
5227 static int t32_expandimm_rot(DisasContext
*s
, int x
)
5229 return x
& 0xc00 ? extract32(x
, 7, 5) : 0;
5232 /* Return the unrotated immediate from T32ExpandImm. */
5233 static int t32_expandimm_imm(DisasContext
*s
, int x
)
5235 int imm
= extract32(x
, 0, 8);
5237 switch (extract32(x
, 8, 4)) {
5239 /* Nothing to do. */
5241 case 1: /* 00XY00XY */
5244 case 2: /* XY00XY00 */
5247 case 3: /* XYXYXYXY */
5251 /* Rotated constant. */
5258 static int t32_branch24(DisasContext
*s
, int x
)
5260 /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */
5261 x
^= !(x
< 0) * (3 << 21);
5262 /* Append the final zero. */
5266 static int t16_setflags(DisasContext
*s
)
5268 return s
->condexec_mask
== 0;
5271 static int t16_push_list(DisasContext
*s
, int x
)
5273 return (x
& 0xff) | (x
& 0x100) << (14 - 8);
5276 static int t16_pop_list(DisasContext
*s
, int x
)
5278 return (x
& 0xff) | (x
& 0x100) << (15 - 8);
5282 * Include the generated decoders.
5285 #include "decode-a32.c.inc"
5286 #include "decode-a32-uncond.c.inc"
5287 #include "decode-t32.c.inc"
5288 #include "decode-t16.c.inc"
5290 static bool valid_cp(DisasContext
*s
, int cp
)
5293 * Return true if this coprocessor field indicates something
5294 * that's really a possible coprocessor.
5295 * For v7 and earlier, coprocessors 8..15 were reserved for Arm use,
5296 * and of those only cp14 and cp15 were used for registers.
5297 * cp10 and cp11 were used for VFP and Neon, whose decode is
5298 * dealt with elsewhere. With the advent of fp16, cp9 is also
5300 * For v8A and later, the encoding has been tightened so that
5301 * only cp14 and cp15 are valid, and other values aren't considered
5302 * to be in the coprocessor-instruction space at all. v8M still
5303 * permits coprocessors 0..7.
5304 * For XScale, we must not decode the XScale cp0, cp1 space as
5305 * a standard coprocessor insn, because we want to fall through to
5306 * the legacy disas_xscale_insn() decoder after decodetree is done.
5308 if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && (cp
== 0 || cp
== 1)) {
5312 if (arm_dc_feature(s
, ARM_FEATURE_V8
) &&
5313 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
5316 return cp
< 8 || cp
>= 14;
5319 static bool trans_MCR(DisasContext
*s
, arg_MCR
*a
)
5321 if (!valid_cp(s
, a
->cp
)) {
5324 do_coproc_insn(s
, a
->cp
, false, a
->opc1
, a
->crn
, a
->crm
, a
->opc2
,
5329 static bool trans_MRC(DisasContext
*s
, arg_MRC
*a
)
5331 if (!valid_cp(s
, a
->cp
)) {
5334 do_coproc_insn(s
, a
->cp
, false, a
->opc1
, a
->crn
, a
->crm
, a
->opc2
,
5339 static bool trans_MCRR(DisasContext
*s
, arg_MCRR
*a
)
5341 if (!valid_cp(s
, a
->cp
)) {
5344 do_coproc_insn(s
, a
->cp
, true, a
->opc1
, 0, a
->crm
, 0,
5345 false, a
->rt
, a
->rt2
);
5349 static bool trans_MRRC(DisasContext
*s
, arg_MRRC
*a
)
5351 if (!valid_cp(s
, a
->cp
)) {
5354 do_coproc_insn(s
, a
->cp
, true, a
->opc1
, 0, a
->crm
, 0,
5355 true, a
->rt
, a
->rt2
);
5359 /* Helpers to swap operands for reverse-subtract. */
5360 static void gen_rsb(TCGv_i32 dst
, TCGv_i32 a
, TCGv_i32 b
)
5362 tcg_gen_sub_i32(dst
, b
, a
);
5365 static void gen_rsb_CC(TCGv_i32 dst
, TCGv_i32 a
, TCGv_i32 b
)
5367 gen_sub_CC(dst
, b
, a
);
5370 static void gen_rsc(TCGv_i32 dest
, TCGv_i32 a
, TCGv_i32 b
)
5372 gen_sub_carry(dest
, b
, a
);
5375 static void gen_rsc_CC(TCGv_i32 dest
, TCGv_i32 a
, TCGv_i32 b
)
5377 gen_sbc_CC(dest
, b
, a
);
5381 * Helpers for the data processing routines.
5383 * After the computation store the results back.
5384 * This may be suppressed altogether (STREG_NONE), require a runtime
5385 * check against the stack limits (STREG_SP_CHECK), or generate an
5386 * exception return. Oh, or store into a register.
5388 * Always return true, indicating success for a trans_* function.
5397 static bool store_reg_kind(DisasContext
*s
, int rd
,
5398 TCGv_i32 val
, StoreRegKind kind
)
5402 tcg_temp_free_i32(val
);
5405 /* See ALUWritePC: Interworking only from a32 mode. */
5407 store_reg(s
, rd
, val
);
5409 store_reg_bx(s
, rd
, val
);
5412 case STREG_SP_CHECK
:
5413 store_sp_checked(s
, val
);
5416 gen_exception_return(s
, val
);
5419 g_assert_not_reached();
5423 * Data Processing (register)
5425 * Operate, with set flags, one register source,
5426 * one immediate shifted register source, and a destination.
5428 static bool op_s_rrr_shi(DisasContext
*s
, arg_s_rrr_shi
*a
,
5429 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
),
5430 int logic_cc
, StoreRegKind kind
)
5432 TCGv_i32 tmp1
, tmp2
;
5434 tmp2
= load_reg(s
, a
->rm
);
5435 gen_arm_shift_im(tmp2
, a
->shty
, a
->shim
, logic_cc
);
5436 tmp1
= load_reg(s
, a
->rn
);
5438 gen(tmp1
, tmp1
, tmp2
);
5439 tcg_temp_free_i32(tmp2
);
5444 return store_reg_kind(s
, a
->rd
, tmp1
, kind
);
5447 static bool op_s_rxr_shi(DisasContext
*s
, arg_s_rrr_shi
*a
,
5448 void (*gen
)(TCGv_i32
, TCGv_i32
),
5449 int logic_cc
, StoreRegKind kind
)
5453 tmp
= load_reg(s
, a
->rm
);
5454 gen_arm_shift_im(tmp
, a
->shty
, a
->shim
, logic_cc
);
5460 return store_reg_kind(s
, a
->rd
, tmp
, kind
);
5464 * Data-processing (register-shifted register)
5466 * Operate, with set flags, one register source,
5467 * one register shifted register source, and a destination.
5469 static bool op_s_rrr_shr(DisasContext
*s
, arg_s_rrr_shr
*a
,
5470 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
),
5471 int logic_cc
, StoreRegKind kind
)
5473 TCGv_i32 tmp1
, tmp2
;
5475 tmp1
= load_reg(s
, a
->rs
);
5476 tmp2
= load_reg(s
, a
->rm
);
5477 gen_arm_shift_reg(tmp2
, a
->shty
, tmp1
, logic_cc
);
5478 tmp1
= load_reg(s
, a
->rn
);
5480 gen(tmp1
, tmp1
, tmp2
);
5481 tcg_temp_free_i32(tmp2
);
5486 return store_reg_kind(s
, a
->rd
, tmp1
, kind
);
5489 static bool op_s_rxr_shr(DisasContext
*s
, arg_s_rrr_shr
*a
,
5490 void (*gen
)(TCGv_i32
, TCGv_i32
),
5491 int logic_cc
, StoreRegKind kind
)
5493 TCGv_i32 tmp1
, tmp2
;
5495 tmp1
= load_reg(s
, a
->rs
);
5496 tmp2
= load_reg(s
, a
->rm
);
5497 gen_arm_shift_reg(tmp2
, a
->shty
, tmp1
, logic_cc
);
5503 return store_reg_kind(s
, a
->rd
, tmp2
, kind
);
5507 * Data-processing (immediate)
5509 * Operate, with set flags, one register source,
5510 * one rotated immediate, and a destination.
5512 * Note that logic_cc && a->rot setting CF based on the msb of the
5513 * immediate is the reason why we must pass in the unrotated form
5516 static bool op_s_rri_rot(DisasContext
*s
, arg_s_rri_rot
*a
,
5517 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
),
5518 int logic_cc
, StoreRegKind kind
)
5520 TCGv_i32 tmp1
, tmp2
;
5523 imm
= ror32(a
->imm
, a
->rot
);
5524 if (logic_cc
&& a
->rot
) {
5525 tcg_gen_movi_i32(cpu_CF
, imm
>> 31);
5527 tmp2
= tcg_const_i32(imm
);
5528 tmp1
= load_reg(s
, a
->rn
);
5530 gen(tmp1
, tmp1
, tmp2
);
5531 tcg_temp_free_i32(tmp2
);
5536 return store_reg_kind(s
, a
->rd
, tmp1
, kind
);
5539 static bool op_s_rxi_rot(DisasContext
*s
, arg_s_rri_rot
*a
,
5540 void (*gen
)(TCGv_i32
, TCGv_i32
),
5541 int logic_cc
, StoreRegKind kind
)
5546 imm
= ror32(a
->imm
, a
->rot
);
5547 if (logic_cc
&& a
->rot
) {
5548 tcg_gen_movi_i32(cpu_CF
, imm
>> 31);
5550 tmp
= tcg_const_i32(imm
);
5556 return store_reg_kind(s
, a
->rd
, tmp
, kind
);
5559 #define DO_ANY3(NAME, OP, L, K) \
5560 static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
5561 { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
5562 static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
5563 { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
5564 static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
5565 { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
5567 #define DO_ANY2(NAME, OP, L, K) \
5568 static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
5569 { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
5570 static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
5571 { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
5572 static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
5573 { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
5575 #define DO_CMP2(NAME, OP, L) \
5576 static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
5577 { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
5578 static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
5579 { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
5580 static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
5581 { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
5583 DO_ANY3(AND
, tcg_gen_and_i32
, a
->s
, STREG_NORMAL
)
5584 DO_ANY3(EOR
, tcg_gen_xor_i32
, a
->s
, STREG_NORMAL
)
5585 DO_ANY3(ORR
, tcg_gen_or_i32
, a
->s
, STREG_NORMAL
)
5586 DO_ANY3(BIC
, tcg_gen_andc_i32
, a
->s
, STREG_NORMAL
)
5588 DO_ANY3(RSB
, a
->s
? gen_rsb_CC
: gen_rsb
, false, STREG_NORMAL
)
5589 DO_ANY3(ADC
, a
->s
? gen_adc_CC
: gen_add_carry
, false, STREG_NORMAL
)
5590 DO_ANY3(SBC
, a
->s
? gen_sbc_CC
: gen_sub_carry
, false, STREG_NORMAL
)
5591 DO_ANY3(RSC
, a
->s
? gen_rsc_CC
: gen_rsc
, false, STREG_NORMAL
)
5593 DO_CMP2(TST
, tcg_gen_and_i32
, true)
5594 DO_CMP2(TEQ
, tcg_gen_xor_i32
, true)
5595 DO_CMP2(CMN
, gen_add_CC
, false)
5596 DO_CMP2(CMP
, gen_sub_CC
, false)
5598 DO_ANY3(ADD
, a
->s
? gen_add_CC
: tcg_gen_add_i32
, false,
5599 a
->rd
== 13 && a
->rn
== 13 ? STREG_SP_CHECK
: STREG_NORMAL
)
5602 * Note for the computation of StoreRegKind we return out of the
5603 * middle of the functions that are expanded by DO_ANY3, and that
5604 * we modify a->s via that parameter before it is used by OP.
5606 DO_ANY3(SUB
, a
->s
? gen_sub_CC
: tcg_gen_sub_i32
, false,
5608 StoreRegKind ret
= STREG_NORMAL
;
5609 if (a
->rd
== 15 && a
->s
) {
5611 * See ALUExceptionReturn:
5612 * In User mode, UNPREDICTABLE; we choose UNDEF.
5613 * In Hyp mode, UNDEFINED.
5615 if (IS_USER(s
) || s
->current_el
== 2) {
5616 unallocated_encoding(s
);
5619 /* There is no writeback of nzcv to PSTATE. */
5621 ret
= STREG_EXC_RET
;
5622 } else if (a
->rd
== 13 && a
->rn
== 13) {
5623 ret
= STREG_SP_CHECK
;
5628 DO_ANY2(MOV
, tcg_gen_mov_i32
, a
->s
,
5630 StoreRegKind ret
= STREG_NORMAL
;
5631 if (a
->rd
== 15 && a
->s
) {
5633 * See ALUExceptionReturn:
5634 * In User mode, UNPREDICTABLE; we choose UNDEF.
5635 * In Hyp mode, UNDEFINED.
5637 if (IS_USER(s
) || s
->current_el
== 2) {
5638 unallocated_encoding(s
);
5641 /* There is no writeback of nzcv to PSTATE. */
5643 ret
= STREG_EXC_RET
;
5644 } else if (a
->rd
== 13) {
5645 ret
= STREG_SP_CHECK
;
5650 DO_ANY2(MVN
, tcg_gen_not_i32
, a
->s
, STREG_NORMAL
)
5653 * ORN is only available with T32, so there is no register-shifted-register
5654 * form of the insn. Using the DO_ANY3 macro would create an unused function.
5656 static bool trans_ORN_rrri(DisasContext
*s
, arg_s_rrr_shi
*a
)
5658 return op_s_rrr_shi(s
, a
, tcg_gen_orc_i32
, a
->s
, STREG_NORMAL
);
5661 static bool trans_ORN_rri(DisasContext
*s
, arg_s_rri_rot
*a
)
5663 return op_s_rri_rot(s
, a
, tcg_gen_orc_i32
, a
->s
, STREG_NORMAL
);
5670 static bool trans_ADR(DisasContext
*s
, arg_ri
*a
)
5672 store_reg_bx(s
, a
->rd
, add_reg_for_lit(s
, 15, a
->imm
));
5676 static bool trans_MOVW(DisasContext
*s
, arg_MOVW
*a
)
5680 if (!ENABLE_ARCH_6T2
) {
5684 tmp
= tcg_const_i32(a
->imm
);
5685 store_reg(s
, a
->rd
, tmp
);
5689 static bool trans_MOVT(DisasContext
*s
, arg_MOVW
*a
)
5693 if (!ENABLE_ARCH_6T2
) {
5697 tmp
= load_reg(s
, a
->rd
);
5698 tcg_gen_ext16u_i32(tmp
, tmp
);
5699 tcg_gen_ori_i32(tmp
, tmp
, a
->imm
<< 16);
5700 store_reg(s
, a
->rd
, tmp
);
5705 * Multiply and multiply accumulate
5708 static bool op_mla(DisasContext
*s
, arg_s_rrrr
*a
, bool add
)
5712 t1
= load_reg(s
, a
->rn
);
5713 t2
= load_reg(s
, a
->rm
);
5714 tcg_gen_mul_i32(t1
, t1
, t2
);
5715 tcg_temp_free_i32(t2
);
5717 t2
= load_reg(s
, a
->ra
);
5718 tcg_gen_add_i32(t1
, t1
, t2
);
5719 tcg_temp_free_i32(t2
);
5724 store_reg(s
, a
->rd
, t1
);
5728 static bool trans_MUL(DisasContext
*s
, arg_MUL
*a
)
5730 return op_mla(s
, a
, false);
5733 static bool trans_MLA(DisasContext
*s
, arg_MLA
*a
)
5735 return op_mla(s
, a
, true);
5738 static bool trans_MLS(DisasContext
*s
, arg_MLS
*a
)
5742 if (!ENABLE_ARCH_6T2
) {
5745 t1
= load_reg(s
, a
->rn
);
5746 t2
= load_reg(s
, a
->rm
);
5747 tcg_gen_mul_i32(t1
, t1
, t2
);
5748 tcg_temp_free_i32(t2
);
5749 t2
= load_reg(s
, a
->ra
);
5750 tcg_gen_sub_i32(t1
, t2
, t1
);
5751 tcg_temp_free_i32(t2
);
5752 store_reg(s
, a
->rd
, t1
);
5756 static bool op_mlal(DisasContext
*s
, arg_s_rrrr
*a
, bool uns
, bool add
)
5758 TCGv_i32 t0
, t1
, t2
, t3
;
5760 t0
= load_reg(s
, a
->rm
);
5761 t1
= load_reg(s
, a
->rn
);
5763 tcg_gen_mulu2_i32(t0
, t1
, t0
, t1
);
5765 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
5768 t2
= load_reg(s
, a
->ra
);
5769 t3
= load_reg(s
, a
->rd
);
5770 tcg_gen_add2_i32(t0
, t1
, t0
, t1
, t2
, t3
);
5771 tcg_temp_free_i32(t2
);
5772 tcg_temp_free_i32(t3
);
5775 gen_logicq_cc(t0
, t1
);
5777 store_reg(s
, a
->ra
, t0
);
5778 store_reg(s
, a
->rd
, t1
);
5782 static bool trans_UMULL(DisasContext
*s
, arg_UMULL
*a
)
5784 return op_mlal(s
, a
, true, false);
5787 static bool trans_SMULL(DisasContext
*s
, arg_SMULL
*a
)
5789 return op_mlal(s
, a
, false, false);
5792 static bool trans_UMLAL(DisasContext
*s
, arg_UMLAL
*a
)
5794 return op_mlal(s
, a
, true, true);
5797 static bool trans_SMLAL(DisasContext
*s
, arg_SMLAL
*a
)
5799 return op_mlal(s
, a
, false, true);
5802 static bool trans_UMAAL(DisasContext
*s
, arg_UMAAL
*a
)
5804 TCGv_i32 t0
, t1
, t2
, zero
;
5807 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
5812 t0
= load_reg(s
, a
->rm
);
5813 t1
= load_reg(s
, a
->rn
);
5814 tcg_gen_mulu2_i32(t0
, t1
, t0
, t1
);
5815 zero
= tcg_const_i32(0);
5816 t2
= load_reg(s
, a
->ra
);
5817 tcg_gen_add2_i32(t0
, t1
, t0
, t1
, t2
, zero
);
5818 tcg_temp_free_i32(t2
);
5819 t2
= load_reg(s
, a
->rd
);
5820 tcg_gen_add2_i32(t0
, t1
, t0
, t1
, t2
, zero
);
5821 tcg_temp_free_i32(t2
);
5822 tcg_temp_free_i32(zero
);
5823 store_reg(s
, a
->ra
, t0
);
5824 store_reg(s
, a
->rd
, t1
);
5829 * Saturating addition and subtraction
5832 static bool op_qaddsub(DisasContext
*s
, arg_rrr
*a
, bool add
, bool doub
)
5837 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
5838 : !ENABLE_ARCH_5TE
) {
5842 t0
= load_reg(s
, a
->rm
);
5843 t1
= load_reg(s
, a
->rn
);
5845 gen_helper_add_saturate(t1
, cpu_env
, t1
, t1
);
5848 gen_helper_add_saturate(t0
, cpu_env
, t0
, t1
);
5850 gen_helper_sub_saturate(t0
, cpu_env
, t0
, t1
);
5852 tcg_temp_free_i32(t1
);
5853 store_reg(s
, a
->rd
, t0
);
5857 #define DO_QADDSUB(NAME, ADD, DOUB) \
5858 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
5860 return op_qaddsub(s, a, ADD, DOUB); \
5863 DO_QADDSUB(QADD
, true, false)
5864 DO_QADDSUB(QSUB
, false, false)
5865 DO_QADDSUB(QDADD
, true, true)
5866 DO_QADDSUB(QDSUB
, false, true)
5871 * Halfword multiply and multiply accumulate
5874 static bool op_smlaxxx(DisasContext
*s
, arg_rrrr
*a
,
5875 int add_long
, bool nt
, bool mt
)
5877 TCGv_i32 t0
, t1
, tl
, th
;
5880 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
5881 : !ENABLE_ARCH_5TE
) {
5885 t0
= load_reg(s
, a
->rn
);
5886 t1
= load_reg(s
, a
->rm
);
5887 gen_mulxy(t0
, t1
, nt
, mt
);
5888 tcg_temp_free_i32(t1
);
5892 store_reg(s
, a
->rd
, t0
);
5895 t1
= load_reg(s
, a
->ra
);
5896 gen_helper_add_setq(t0
, cpu_env
, t0
, t1
);
5897 tcg_temp_free_i32(t1
);
5898 store_reg(s
, a
->rd
, t0
);
5901 tl
= load_reg(s
, a
->ra
);
5902 th
= load_reg(s
, a
->rd
);
5903 /* Sign-extend the 32-bit product to 64 bits. */
5904 t1
= tcg_temp_new_i32();
5905 tcg_gen_sari_i32(t1
, t0
, 31);
5906 tcg_gen_add2_i32(tl
, th
, tl
, th
, t0
, t1
);
5907 tcg_temp_free_i32(t0
);
5908 tcg_temp_free_i32(t1
);
5909 store_reg(s
, a
->ra
, tl
);
5910 store_reg(s
, a
->rd
, th
);
5913 g_assert_not_reached();
5918 #define DO_SMLAX(NAME, add, nt, mt) \
5919 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
5921 return op_smlaxxx(s, a, add, nt, mt); \
5924 DO_SMLAX(SMULBB
, 0, 0, 0)
5925 DO_SMLAX(SMULBT
, 0, 0, 1)
5926 DO_SMLAX(SMULTB
, 0, 1, 0)
5927 DO_SMLAX(SMULTT
, 0, 1, 1)
5929 DO_SMLAX(SMLABB
, 1, 0, 0)
5930 DO_SMLAX(SMLABT
, 1, 0, 1)
5931 DO_SMLAX(SMLATB
, 1, 1, 0)
5932 DO_SMLAX(SMLATT
, 1, 1, 1)
5934 DO_SMLAX(SMLALBB
, 2, 0, 0)
5935 DO_SMLAX(SMLALBT
, 2, 0, 1)
5936 DO_SMLAX(SMLALTB
, 2, 1, 0)
5937 DO_SMLAX(SMLALTT
, 2, 1, 1)
5941 static bool op_smlawx(DisasContext
*s
, arg_rrrr
*a
, bool add
, bool mt
)
5945 if (!ENABLE_ARCH_5TE
) {
5949 t0
= load_reg(s
, a
->rn
);
5950 t1
= load_reg(s
, a
->rm
);
5952 * Since the nominal result is product<47:16>, shift the 16-bit
5953 * input up by 16 bits, so that the result is at product<63:32>.
5956 tcg_gen_andi_i32(t1
, t1
, 0xffff0000);
5958 tcg_gen_shli_i32(t1
, t1
, 16);
5960 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
5961 tcg_temp_free_i32(t0
);
5963 t0
= load_reg(s
, a
->ra
);
5964 gen_helper_add_setq(t1
, cpu_env
, t1
, t0
);
5965 tcg_temp_free_i32(t0
);
5967 store_reg(s
, a
->rd
, t1
);
5971 #define DO_SMLAWX(NAME, add, mt) \
5972 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
5974 return op_smlawx(s, a, add, mt); \
5977 DO_SMLAWX(SMULWB
, 0, 0)
5978 DO_SMLAWX(SMULWT
, 0, 1)
5979 DO_SMLAWX(SMLAWB
, 1, 0)
5980 DO_SMLAWX(SMLAWT
, 1, 1)
5985 * MSR (immediate) and hints
5988 static bool trans_YIELD(DisasContext
*s
, arg_YIELD
*a
)
5991 * When running single-threaded TCG code, use the helper to ensure that
5992 * the next round-robin scheduled vCPU gets a crack. When running in
5993 * MTTCG we don't generate jumps to the helper as it won't affect the
5994 * scheduling of other vCPUs.
5996 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
5997 gen_set_pc_im(s
, s
->base
.pc_next
);
5998 s
->base
.is_jmp
= DISAS_YIELD
;
6003 static bool trans_WFE(DisasContext
*s
, arg_WFE
*a
)
6006 * When running single-threaded TCG code, use the helper to ensure that
6007 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
6008 * just skip this instruction. Currently the SEV/SEVL instructions,
6009 * which are *one* of many ways to wake the CPU from WFE, are not
6010 * implemented so we can't sleep like WFI does.
6012 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
6013 gen_set_pc_im(s
, s
->base
.pc_next
);
6014 s
->base
.is_jmp
= DISAS_WFE
;
6019 static bool trans_WFI(DisasContext
*s
, arg_WFI
*a
)
6021 /* For WFI, halt the vCPU until an IRQ. */
6022 gen_set_pc_im(s
, s
->base
.pc_next
);
6023 s
->base
.is_jmp
= DISAS_WFI
;
6027 static bool trans_NOP(DisasContext
*s
, arg_NOP
*a
)
6032 static bool trans_MSR_imm(DisasContext
*s
, arg_MSR_imm
*a
)
6034 uint32_t val
= ror32(a
->imm
, a
->rot
* 2);
6035 uint32_t mask
= msr_mask(s
, a
->mask
, a
->r
);
6037 if (gen_set_psr_im(s
, mask
, a
->r
, val
)) {
6038 unallocated_encoding(s
);
6044 * Cyclic Redundancy Check
6047 static bool op_crc32(DisasContext
*s
, arg_rrr
*a
, bool c
, MemOp sz
)
6049 TCGv_i32 t1
, t2
, t3
;
6051 if (!dc_isar_feature(aa32_crc32
, s
)) {
6055 t1
= load_reg(s
, a
->rn
);
6056 t2
= load_reg(s
, a
->rm
);
6067 g_assert_not_reached();
6069 t3
= tcg_const_i32(1 << sz
);
6071 gen_helper_crc32c(t1
, t1
, t2
, t3
);
6073 gen_helper_crc32(t1
, t1
, t2
, t3
);
6075 tcg_temp_free_i32(t2
);
6076 tcg_temp_free_i32(t3
);
6077 store_reg(s
, a
->rd
, t1
);
6081 #define DO_CRC32(NAME, c, sz) \
6082 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
6083 { return op_crc32(s, a, c, sz); }
6085 DO_CRC32(CRC32B
, false, MO_8
)
6086 DO_CRC32(CRC32H
, false, MO_16
)
6087 DO_CRC32(CRC32W
, false, MO_32
)
6088 DO_CRC32(CRC32CB
, true, MO_8
)
6089 DO_CRC32(CRC32CH
, true, MO_16
)
6090 DO_CRC32(CRC32CW
, true, MO_32
)
6095 * Miscellaneous instructions
6098 static bool trans_MRS_bank(DisasContext
*s
, arg_MRS_bank
*a
)
6100 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
6103 gen_mrs_banked(s
, a
->r
, a
->sysm
, a
->rd
);
6107 static bool trans_MSR_bank(DisasContext
*s
, arg_MSR_bank
*a
)
6109 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
6112 gen_msr_banked(s
, a
->r
, a
->sysm
, a
->rn
);
6116 static bool trans_MRS_reg(DisasContext
*s
, arg_MRS_reg
*a
)
6120 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
6125 unallocated_encoding(s
);
6128 tmp
= load_cpu_field(spsr
);
6130 tmp
= tcg_temp_new_i32();
6131 gen_helper_cpsr_read(tmp
, cpu_env
);
6133 store_reg(s
, a
->rd
, tmp
);
6137 static bool trans_MSR_reg(DisasContext
*s
, arg_MSR_reg
*a
)
6140 uint32_t mask
= msr_mask(s
, a
->mask
, a
->r
);
6142 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
6145 tmp
= load_reg(s
, a
->rn
);
6146 if (gen_set_psr(s
, mask
, a
->r
, tmp
)) {
6147 unallocated_encoding(s
);
6152 static bool trans_MRS_v7m(DisasContext
*s
, arg_MRS_v7m
*a
)
6156 if (!arm_dc_feature(s
, ARM_FEATURE_M
)) {
6159 tmp
= tcg_const_i32(a
->sysm
);
6160 gen_helper_v7m_mrs(tmp
, cpu_env
, tmp
);
6161 store_reg(s
, a
->rd
, tmp
);
6165 static bool trans_MSR_v7m(DisasContext
*s
, arg_MSR_v7m
*a
)
6169 if (!arm_dc_feature(s
, ARM_FEATURE_M
)) {
6172 addr
= tcg_const_i32((a
->mask
<< 10) | a
->sysm
);
6173 reg
= load_reg(s
, a
->rn
);
6174 gen_helper_v7m_msr(cpu_env
, addr
, reg
);
6175 tcg_temp_free_i32(addr
);
6176 tcg_temp_free_i32(reg
);
6177 /* If we wrote to CONTROL, the EL might have changed */
6178 gen_helper_rebuild_hflags_m32_newel(cpu_env
);
6183 static bool trans_BX(DisasContext
*s
, arg_BX
*a
)
6185 if (!ENABLE_ARCH_4T
) {
6188 gen_bx_excret(s
, load_reg(s
, a
->rm
));
6192 static bool trans_BXJ(DisasContext
*s
, arg_BXJ
*a
)
6194 if (!ENABLE_ARCH_5J
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
6197 /* Trivial implementation equivalent to bx. */
6198 gen_bx(s
, load_reg(s
, a
->rm
));
6202 static bool trans_BLX_r(DisasContext
*s
, arg_BLX_r
*a
)
6206 if (!ENABLE_ARCH_5
) {
6209 tmp
= load_reg(s
, a
->rm
);
6210 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| s
->thumb
);
6216 * BXNS/BLXNS: only exist for v8M with the security extensions,
6217 * and always UNDEF if NonSecure. We don't implement these in
6218 * the user-only mode either (in theory you can use them from
6219 * Secure User mode but they are too tied in to system emulation).
6221 static bool trans_BXNS(DisasContext
*s
, arg_BXNS
*a
)
6223 if (!s
->v8m_secure
|| IS_USER_ONLY
) {
6224 unallocated_encoding(s
);
6231 static bool trans_BLXNS(DisasContext
*s
, arg_BLXNS
*a
)
6233 if (!s
->v8m_secure
|| IS_USER_ONLY
) {
6234 unallocated_encoding(s
);
6236 gen_blxns(s
, a
->rm
);
6241 static bool trans_CLZ(DisasContext
*s
, arg_CLZ
*a
)
6245 if (!ENABLE_ARCH_5
) {
6248 tmp
= load_reg(s
, a
->rm
);
6249 tcg_gen_clzi_i32(tmp
, tmp
, 32);
6250 store_reg(s
, a
->rd
, tmp
);
6254 static bool trans_ERET(DisasContext
*s
, arg_ERET
*a
)
6258 if (!arm_dc_feature(s
, ARM_FEATURE_V7VE
)) {
6262 unallocated_encoding(s
);
6265 if (s
->current_el
== 2) {
6266 /* ERET from Hyp uses ELR_Hyp, not LR */
6267 tmp
= load_cpu_field(elr_el
[2]);
6269 tmp
= load_reg(s
, 14);
6271 gen_exception_return(s
, tmp
);
6275 static bool trans_HLT(DisasContext
*s
, arg_HLT
*a
)
6281 static bool trans_BKPT(DisasContext
*s
, arg_BKPT
*a
)
6283 if (!ENABLE_ARCH_5
) {
6286 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
6287 semihosting_enabled() &&
6288 #ifndef CONFIG_USER_ONLY
6292 gen_exception_internal_insn(s
, s
->pc_curr
, EXCP_SEMIHOST
);
6294 gen_exception_bkpt_insn(s
, syn_aa32_bkpt(a
->imm
, false));
6299 static bool trans_HVC(DisasContext
*s
, arg_HVC
*a
)
6301 if (!ENABLE_ARCH_7
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
6305 unallocated_encoding(s
);
6312 static bool trans_SMC(DisasContext
*s
, arg_SMC
*a
)
6314 if (!ENABLE_ARCH_6K
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
6318 unallocated_encoding(s
);
6325 static bool trans_SG(DisasContext
*s
, arg_SG
*a
)
6327 if (!arm_dc_feature(s
, ARM_FEATURE_M
) ||
6328 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
6333 * The bulk of the behaviour for this instruction is implemented
6334 * in v7m_handle_execute_nsc(), which deals with the insn when
6335 * it is executed by a CPU in non-secure state from memory
6336 * which is Secure & NonSecure-Callable.
6337 * Here we only need to handle the remaining cases:
6338 * * in NS memory (including the "security extension not
6339 * implemented" case) : NOP
6340 * * in S memory but CPU already secure (clear IT bits)
6341 * We know that the attribute for the memory this insn is
6342 * in must match the current CPU state, because otherwise
6343 * get_phys_addr_pmsav8 would have generated an exception.
6345 if (s
->v8m_secure
) {
6346 /* Like the IT insn, we don't need to generate any code */
6347 s
->condexec_cond
= 0;
6348 s
->condexec_mask
= 0;
6353 static bool trans_TT(DisasContext
*s
, arg_TT
*a
)
6357 if (!arm_dc_feature(s
, ARM_FEATURE_M
) ||
6358 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
6361 if (a
->rd
== 13 || a
->rd
== 15 || a
->rn
== 15) {
6362 /* We UNDEF for these UNPREDICTABLE cases */
6363 unallocated_encoding(s
);
6366 if (a
->A
&& !s
->v8m_secure
) {
6367 /* This case is UNDEFINED. */
6368 unallocated_encoding(s
);
6372 addr
= load_reg(s
, a
->rn
);
6373 tmp
= tcg_const_i32((a
->A
<< 1) | a
->T
);
6374 gen_helper_v7m_tt(tmp
, cpu_env
, addr
, tmp
);
6375 tcg_temp_free_i32(addr
);
6376 store_reg(s
, a
->rd
, tmp
);
6381 * Load/store register index
6384 static ISSInfo
make_issinfo(DisasContext
*s
, int rd
, bool p
, bool w
)
6388 /* ISS not valid if writeback */
6391 if (s
->base
.pc_next
- s
->pc_curr
== 2) {
6400 static TCGv_i32
op_addr_rr_pre(DisasContext
*s
, arg_ldst_rr
*a
)
6402 TCGv_i32 addr
= load_reg(s
, a
->rn
);
6404 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
6405 gen_helper_v8m_stackcheck(cpu_env
, addr
);
6409 TCGv_i32 ofs
= load_reg(s
, a
->rm
);
6410 gen_arm_shift_im(ofs
, a
->shtype
, a
->shimm
, 0);
6412 tcg_gen_add_i32(addr
, addr
, ofs
);
6414 tcg_gen_sub_i32(addr
, addr
, ofs
);
6416 tcg_temp_free_i32(ofs
);
6421 static void op_addr_rr_post(DisasContext
*s
, arg_ldst_rr
*a
,
6422 TCGv_i32 addr
, int address_offset
)
6425 TCGv_i32 ofs
= load_reg(s
, a
->rm
);
6426 gen_arm_shift_im(ofs
, a
->shtype
, a
->shimm
, 0);
6428 tcg_gen_add_i32(addr
, addr
, ofs
);
6430 tcg_gen_sub_i32(addr
, addr
, ofs
);
6432 tcg_temp_free_i32(ofs
);
6434 tcg_temp_free_i32(addr
);
6437 tcg_gen_addi_i32(addr
, addr
, address_offset
);
6438 store_reg(s
, a
->rn
, addr
);
6441 static bool op_load_rr(DisasContext
*s
, arg_ldst_rr
*a
,
6442 MemOp mop
, int mem_idx
)
6444 ISSInfo issinfo
= make_issinfo(s
, a
->rt
, a
->p
, a
->w
);
6447 addr
= op_addr_rr_pre(s
, a
);
6449 tmp
= tcg_temp_new_i32();
6450 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, mop
| s
->be_data
);
6451 disas_set_da_iss(s
, mop
, issinfo
);
6454 * Perform base writeback before the loaded value to
6455 * ensure correct behavior with overlapping index registers.
6457 op_addr_rr_post(s
, a
, addr
, 0);
6458 store_reg_from_load(s
, a
->rt
, tmp
);
6462 static bool op_store_rr(DisasContext
*s
, arg_ldst_rr
*a
,
6463 MemOp mop
, int mem_idx
)
6465 ISSInfo issinfo
= make_issinfo(s
, a
->rt
, a
->p
, a
->w
) | ISSIsWrite
;
6468 addr
= op_addr_rr_pre(s
, a
);
6470 tmp
= load_reg(s
, a
->rt
);
6471 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, mop
| s
->be_data
);
6472 disas_set_da_iss(s
, mop
, issinfo
);
6473 tcg_temp_free_i32(tmp
);
6475 op_addr_rr_post(s
, a
, addr
, 0);
6479 static bool trans_LDRD_rr(DisasContext
*s
, arg_ldst_rr
*a
)
6481 int mem_idx
= get_mem_index(s
);
6484 if (!ENABLE_ARCH_5TE
) {
6488 unallocated_encoding(s
);
6491 addr
= op_addr_rr_pre(s
, a
);
6493 tmp
= tcg_temp_new_i32();
6494 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
6495 store_reg(s
, a
->rt
, tmp
);
6497 tcg_gen_addi_i32(addr
, addr
, 4);
6499 tmp
= tcg_temp_new_i32();
6500 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
6501 store_reg(s
, a
->rt
+ 1, tmp
);
6503 /* LDRD w/ base writeback is undefined if the registers overlap. */
6504 op_addr_rr_post(s
, a
, addr
, -4);
6508 static bool trans_STRD_rr(DisasContext
*s
, arg_ldst_rr
*a
)
6510 int mem_idx
= get_mem_index(s
);
6513 if (!ENABLE_ARCH_5TE
) {
6517 unallocated_encoding(s
);
6520 addr
= op_addr_rr_pre(s
, a
);
6522 tmp
= load_reg(s
, a
->rt
);
6523 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
6524 tcg_temp_free_i32(tmp
);
6526 tcg_gen_addi_i32(addr
, addr
, 4);
6528 tmp
= load_reg(s
, a
->rt
+ 1);
6529 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
6530 tcg_temp_free_i32(tmp
);
6532 op_addr_rr_post(s
, a
, addr
, -4);
6537 * Load/store immediate index
6540 static TCGv_i32
op_addr_ri_pre(DisasContext
*s
, arg_ldst_ri
*a
)
6548 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
6550 * Stackcheck. Here we know 'addr' is the current SP;
6551 * U is set if we're moving SP up, else down. It is
6552 * UNKNOWN whether the limit check triggers when SP starts
6553 * below the limit and ends up above it; we chose to do so.
6556 TCGv_i32 newsp
= tcg_temp_new_i32();
6557 tcg_gen_addi_i32(newsp
, cpu_R
[13], ofs
);
6558 gen_helper_v8m_stackcheck(cpu_env
, newsp
);
6559 tcg_temp_free_i32(newsp
);
6561 gen_helper_v8m_stackcheck(cpu_env
, cpu_R
[13]);
6565 return add_reg_for_lit(s
, a
->rn
, a
->p
? ofs
: 0);
6568 static void op_addr_ri_post(DisasContext
*s
, arg_ldst_ri
*a
,
6569 TCGv_i32 addr
, int address_offset
)
6573 address_offset
+= a
->imm
;
6575 address_offset
-= a
->imm
;
6578 tcg_temp_free_i32(addr
);
6581 tcg_gen_addi_i32(addr
, addr
, address_offset
);
6582 store_reg(s
, a
->rn
, addr
);
6585 static bool op_load_ri(DisasContext
*s
, arg_ldst_ri
*a
,
6586 MemOp mop
, int mem_idx
)
6588 ISSInfo issinfo
= make_issinfo(s
, a
->rt
, a
->p
, a
->w
);
6591 addr
= op_addr_ri_pre(s
, a
);
6593 tmp
= tcg_temp_new_i32();
6594 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, mop
| s
->be_data
);
6595 disas_set_da_iss(s
, mop
, issinfo
);
6598 * Perform base writeback before the loaded value to
6599 * ensure correct behavior with overlapping index registers.
6601 op_addr_ri_post(s
, a
, addr
, 0);
6602 store_reg_from_load(s
, a
->rt
, tmp
);
6606 static bool op_store_ri(DisasContext
*s
, arg_ldst_ri
*a
,
6607 MemOp mop
, int mem_idx
)
6609 ISSInfo issinfo
= make_issinfo(s
, a
->rt
, a
->p
, a
->w
) | ISSIsWrite
;
6612 addr
= op_addr_ri_pre(s
, a
);
6614 tmp
= load_reg(s
, a
->rt
);
6615 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, mop
| s
->be_data
);
6616 disas_set_da_iss(s
, mop
, issinfo
);
6617 tcg_temp_free_i32(tmp
);
6619 op_addr_ri_post(s
, a
, addr
, 0);
6623 static bool op_ldrd_ri(DisasContext
*s
, arg_ldst_ri
*a
, int rt2
)
6625 int mem_idx
= get_mem_index(s
);
6628 addr
= op_addr_ri_pre(s
, a
);
6630 tmp
= tcg_temp_new_i32();
6631 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
6632 store_reg(s
, a
->rt
, tmp
);
6634 tcg_gen_addi_i32(addr
, addr
, 4);
6636 tmp
= tcg_temp_new_i32();
6637 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
6638 store_reg(s
, rt2
, tmp
);
6640 /* LDRD w/ base writeback is undefined if the registers overlap. */
6641 op_addr_ri_post(s
, a
, addr
, -4);
6645 static bool trans_LDRD_ri_a32(DisasContext
*s
, arg_ldst_ri
*a
)
6647 if (!ENABLE_ARCH_5TE
|| (a
->rt
& 1)) {
6650 return op_ldrd_ri(s
, a
, a
->rt
+ 1);
6653 static bool trans_LDRD_ri_t32(DisasContext
*s
, arg_ldst_ri2
*a
)
6656 .u
= a
->u
, .w
= a
->w
, .p
= a
->p
,
6657 .rn
= a
->rn
, .rt
= a
->rt
, .imm
= a
->imm
6659 return op_ldrd_ri(s
, &b
, a
->rt2
);
6662 static bool op_strd_ri(DisasContext
*s
, arg_ldst_ri
*a
, int rt2
)
6664 int mem_idx
= get_mem_index(s
);
6667 addr
= op_addr_ri_pre(s
, a
);
6669 tmp
= load_reg(s
, a
->rt
);
6670 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
6671 tcg_temp_free_i32(tmp
);
6673 tcg_gen_addi_i32(addr
, addr
, 4);
6675 tmp
= load_reg(s
, rt2
);
6676 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
6677 tcg_temp_free_i32(tmp
);
6679 op_addr_ri_post(s
, a
, addr
, -4);
6683 static bool trans_STRD_ri_a32(DisasContext
*s
, arg_ldst_ri
*a
)
6685 if (!ENABLE_ARCH_5TE
|| (a
->rt
& 1)) {
6688 return op_strd_ri(s
, a
, a
->rt
+ 1);
6691 static bool trans_STRD_ri_t32(DisasContext
*s
, arg_ldst_ri2
*a
)
6694 .u
= a
->u
, .w
= a
->w
, .p
= a
->p
,
6695 .rn
= a
->rn
, .rt
= a
->rt
, .imm
= a
->imm
6697 return op_strd_ri(s
, &b
, a
->rt2
);
6700 #define DO_LDST(NAME, WHICH, MEMOP) \
6701 static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \
6703 return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \
6705 static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \
6707 return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \
6709 static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \
6711 return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \
6713 static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \
6715 return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \
6718 DO_LDST(LDR
, load
, MO_UL
)
6719 DO_LDST(LDRB
, load
, MO_UB
)
6720 DO_LDST(LDRH
, load
, MO_UW
)
6721 DO_LDST(LDRSB
, load
, MO_SB
)
6722 DO_LDST(LDRSH
, load
, MO_SW
)
6724 DO_LDST(STR
, store
, MO_UL
)
6725 DO_LDST(STRB
, store
, MO_UB
)
6726 DO_LDST(STRH
, store
, MO_UW
)
6731 * Synchronization primitives
6734 static bool op_swp(DisasContext
*s
, arg_SWP
*a
, MemOp opc
)
6740 addr
= load_reg(s
, a
->rn
);
6741 taddr
= gen_aa32_addr(s
, addr
, opc
);
6742 tcg_temp_free_i32(addr
);
6744 tmp
= load_reg(s
, a
->rt2
);
6745 tcg_gen_atomic_xchg_i32(tmp
, taddr
, tmp
, get_mem_index(s
), opc
);
6746 tcg_temp_free(taddr
);
6748 store_reg(s
, a
->rt
, tmp
);
6752 static bool trans_SWP(DisasContext
*s
, arg_SWP
*a
)
6754 return op_swp(s
, a
, MO_UL
| MO_ALIGN
);
6757 static bool trans_SWPB(DisasContext
*s
, arg_SWP
*a
)
6759 return op_swp(s
, a
, MO_UB
);
6763 * Load/Store Exclusive and Load-Acquire/Store-Release
6766 static bool op_strex(DisasContext
*s
, arg_STREX
*a
, MemOp mop
, bool rel
)
6769 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
6770 bool v8a
= ENABLE_ARCH_8
&& !arm_dc_feature(s
, ARM_FEATURE_M
);
6772 /* We UNDEF for these UNPREDICTABLE cases. */
6773 if (a
->rd
== 15 || a
->rn
== 15 || a
->rt
== 15
6774 || a
->rd
== a
->rn
|| a
->rd
== a
->rt
6775 || (!v8a
&& s
->thumb
&& (a
->rd
== 13 || a
->rt
== 13))
6779 || (!v8a
&& s
->thumb
&& a
->rt2
== 13)))) {
6780 unallocated_encoding(s
);
6785 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
6788 addr
= tcg_temp_local_new_i32();
6789 load_reg_var(s
, addr
, a
->rn
);
6790 tcg_gen_addi_i32(addr
, addr
, a
->imm
);
6792 gen_store_exclusive(s
, a
->rd
, a
->rt
, a
->rt2
, addr
, mop
);
6793 tcg_temp_free_i32(addr
);
6797 static bool trans_STREX(DisasContext
*s
, arg_STREX
*a
)
6799 if (!ENABLE_ARCH_6
) {
6802 return op_strex(s
, a
, MO_32
, false);
6805 static bool trans_STREXD_a32(DisasContext
*s
, arg_STREX
*a
)
6807 if (!ENABLE_ARCH_6K
) {
6810 /* We UNDEF for these UNPREDICTABLE cases. */
6812 unallocated_encoding(s
);
6816 return op_strex(s
, a
, MO_64
, false);
6819 static bool trans_STREXD_t32(DisasContext
*s
, arg_STREX
*a
)
6821 return op_strex(s
, a
, MO_64
, false);
6824 static bool trans_STREXB(DisasContext
*s
, arg_STREX
*a
)
6826 if (s
->thumb
? !ENABLE_ARCH_7
: !ENABLE_ARCH_6K
) {
6829 return op_strex(s
, a
, MO_8
, false);
6832 static bool trans_STREXH(DisasContext
*s
, arg_STREX
*a
)
6834 if (s
->thumb
? !ENABLE_ARCH_7
: !ENABLE_ARCH_6K
) {
6837 return op_strex(s
, a
, MO_16
, false);
6840 static bool trans_STLEX(DisasContext
*s
, arg_STREX
*a
)
6842 if (!ENABLE_ARCH_8
) {
6845 return op_strex(s
, a
, MO_32
, true);
6848 static bool trans_STLEXD_a32(DisasContext
*s
, arg_STREX
*a
)
6850 if (!ENABLE_ARCH_8
) {
6853 /* We UNDEF for these UNPREDICTABLE cases. */
6855 unallocated_encoding(s
);
6859 return op_strex(s
, a
, MO_64
, true);
6862 static bool trans_STLEXD_t32(DisasContext
*s
, arg_STREX
*a
)
6864 if (!ENABLE_ARCH_8
) {
6867 return op_strex(s
, a
, MO_64
, true);
6870 static bool trans_STLEXB(DisasContext
*s
, arg_STREX
*a
)
6872 if (!ENABLE_ARCH_8
) {
6875 return op_strex(s
, a
, MO_8
, true);
6878 static bool trans_STLEXH(DisasContext
*s
, arg_STREX
*a
)
6880 if (!ENABLE_ARCH_8
) {
6883 return op_strex(s
, a
, MO_16
, true);
6886 static bool op_stl(DisasContext
*s
, arg_STL
*a
, MemOp mop
)
6890 if (!ENABLE_ARCH_8
) {
6893 /* We UNDEF for these UNPREDICTABLE cases. */
6894 if (a
->rn
== 15 || a
->rt
== 15) {
6895 unallocated_encoding(s
);
6899 addr
= load_reg(s
, a
->rn
);
6900 tmp
= load_reg(s
, a
->rt
);
6901 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
6902 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
), mop
| s
->be_data
);
6903 disas_set_da_iss(s
, mop
, a
->rt
| ISSIsAcqRel
| ISSIsWrite
);
6905 tcg_temp_free_i32(tmp
);
6906 tcg_temp_free_i32(addr
);
6910 static bool trans_STL(DisasContext
*s
, arg_STL
*a
)
6912 return op_stl(s
, a
, MO_UL
);
6915 static bool trans_STLB(DisasContext
*s
, arg_STL
*a
)
6917 return op_stl(s
, a
, MO_UB
);
6920 static bool trans_STLH(DisasContext
*s
, arg_STL
*a
)
6922 return op_stl(s
, a
, MO_UW
);
6925 static bool op_ldrex(DisasContext
*s
, arg_LDREX
*a
, MemOp mop
, bool acq
)
6928 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
6929 bool v8a
= ENABLE_ARCH_8
&& !arm_dc_feature(s
, ARM_FEATURE_M
);
6931 /* We UNDEF for these UNPREDICTABLE cases. */
6932 if (a
->rn
== 15 || a
->rt
== 15
6933 || (!v8a
&& s
->thumb
&& a
->rt
== 13)
6935 && (a
->rt2
== 15 || a
->rt
== a
->rt2
6936 || (!v8a
&& s
->thumb
&& a
->rt2
== 13)))) {
6937 unallocated_encoding(s
);
6941 addr
= tcg_temp_local_new_i32();
6942 load_reg_var(s
, addr
, a
->rn
);
6943 tcg_gen_addi_i32(addr
, addr
, a
->imm
);
6945 gen_load_exclusive(s
, a
->rt
, a
->rt2
, addr
, mop
);
6946 tcg_temp_free_i32(addr
);
6949 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
6954 static bool trans_LDREX(DisasContext
*s
, arg_LDREX
*a
)
6956 if (!ENABLE_ARCH_6
) {
6959 return op_ldrex(s
, a
, MO_32
, false);
6962 static bool trans_LDREXD_a32(DisasContext
*s
, arg_LDREX
*a
)
6964 if (!ENABLE_ARCH_6K
) {
6967 /* We UNDEF for these UNPREDICTABLE cases. */
6969 unallocated_encoding(s
);
6973 return op_ldrex(s
, a
, MO_64
, false);
6976 static bool trans_LDREXD_t32(DisasContext
*s
, arg_LDREX
*a
)
6978 return op_ldrex(s
, a
, MO_64
, false);
6981 static bool trans_LDREXB(DisasContext
*s
, arg_LDREX
*a
)
6983 if (s
->thumb
? !ENABLE_ARCH_7
: !ENABLE_ARCH_6K
) {
6986 return op_ldrex(s
, a
, MO_8
, false);
6989 static bool trans_LDREXH(DisasContext
*s
, arg_LDREX
*a
)
6991 if (s
->thumb
? !ENABLE_ARCH_7
: !ENABLE_ARCH_6K
) {
6994 return op_ldrex(s
, a
, MO_16
, false);
6997 static bool trans_LDAEX(DisasContext
*s
, arg_LDREX
*a
)
6999 if (!ENABLE_ARCH_8
) {
7002 return op_ldrex(s
, a
, MO_32
, true);
7005 static bool trans_LDAEXD_a32(DisasContext
*s
, arg_LDREX
*a
)
7007 if (!ENABLE_ARCH_8
) {
7010 /* We UNDEF for these UNPREDICTABLE cases. */
7012 unallocated_encoding(s
);
7016 return op_ldrex(s
, a
, MO_64
, true);
7019 static bool trans_LDAEXD_t32(DisasContext
*s
, arg_LDREX
*a
)
7021 if (!ENABLE_ARCH_8
) {
7024 return op_ldrex(s
, a
, MO_64
, true);
7027 static bool trans_LDAEXB(DisasContext
*s
, arg_LDREX
*a
)
7029 if (!ENABLE_ARCH_8
) {
7032 return op_ldrex(s
, a
, MO_8
, true);
7035 static bool trans_LDAEXH(DisasContext
*s
, arg_LDREX
*a
)
7037 if (!ENABLE_ARCH_8
) {
7040 return op_ldrex(s
, a
, MO_16
, true);
7043 static bool op_lda(DisasContext
*s
, arg_LDA
*a
, MemOp mop
)
7047 if (!ENABLE_ARCH_8
) {
7050 /* We UNDEF for these UNPREDICTABLE cases. */
7051 if (a
->rn
== 15 || a
->rt
== 15) {
7052 unallocated_encoding(s
);
7056 addr
= load_reg(s
, a
->rn
);
7057 tmp
= tcg_temp_new_i32();
7058 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), mop
| s
->be_data
);
7059 disas_set_da_iss(s
, mop
, a
->rt
| ISSIsAcqRel
);
7060 tcg_temp_free_i32(addr
);
7062 store_reg(s
, a
->rt
, tmp
);
7063 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
7067 static bool trans_LDA(DisasContext
*s
, arg_LDA
*a
)
7069 return op_lda(s
, a
, MO_UL
);
7072 static bool trans_LDAB(DisasContext
*s
, arg_LDA
*a
)
7074 return op_lda(s
, a
, MO_UB
);
7077 static bool trans_LDAH(DisasContext
*s
, arg_LDA
*a
)
7079 return op_lda(s
, a
, MO_UW
);
7083 * Media instructions
7086 static bool trans_USADA8(DisasContext
*s
, arg_USADA8
*a
)
7090 if (!ENABLE_ARCH_6
) {
7094 t1
= load_reg(s
, a
->rn
);
7095 t2
= load_reg(s
, a
->rm
);
7096 gen_helper_usad8(t1
, t1
, t2
);
7097 tcg_temp_free_i32(t2
);
7099 t2
= load_reg(s
, a
->ra
);
7100 tcg_gen_add_i32(t1
, t1
, t2
);
7101 tcg_temp_free_i32(t2
);
7103 store_reg(s
, a
->rd
, t1
);
7107 static bool op_bfx(DisasContext
*s
, arg_UBFX
*a
, bool u
)
7110 int width
= a
->widthm1
+ 1;
7113 if (!ENABLE_ARCH_6T2
) {
7116 if (shift
+ width
> 32) {
7117 /* UNPREDICTABLE; we choose to UNDEF */
7118 unallocated_encoding(s
);
7122 tmp
= load_reg(s
, a
->rn
);
7124 tcg_gen_extract_i32(tmp
, tmp
, shift
, width
);
7126 tcg_gen_sextract_i32(tmp
, tmp
, shift
, width
);
7128 store_reg(s
, a
->rd
, tmp
);
7132 static bool trans_SBFX(DisasContext
*s
, arg_SBFX
*a
)
7134 return op_bfx(s
, a
, false);
7137 static bool trans_UBFX(DisasContext
*s
, arg_UBFX
*a
)
7139 return op_bfx(s
, a
, true);
7142 static bool trans_BFCI(DisasContext
*s
, arg_BFCI
*a
)
7145 int msb
= a
->msb
, lsb
= a
->lsb
;
7148 if (!ENABLE_ARCH_6T2
) {
7152 /* UNPREDICTABLE; we choose to UNDEF */
7153 unallocated_encoding(s
);
7157 width
= msb
+ 1 - lsb
;
7160 tmp
= tcg_const_i32(0);
7163 tmp
= load_reg(s
, a
->rn
);
7166 TCGv_i32 tmp2
= load_reg(s
, a
->rd
);
7167 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, lsb
, width
);
7168 tcg_temp_free_i32(tmp2
);
7170 store_reg(s
, a
->rd
, tmp
);
7174 static bool trans_UDF(DisasContext
*s
, arg_UDF
*a
)
7176 unallocated_encoding(s
);
7181 * Parallel addition and subtraction
7184 static bool op_par_addsub(DisasContext
*s
, arg_rrr
*a
,
7185 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
7190 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
7195 t0
= load_reg(s
, a
->rn
);
7196 t1
= load_reg(s
, a
->rm
);
7200 tcg_temp_free_i32(t1
);
7201 store_reg(s
, a
->rd
, t0
);
7205 static bool op_par_addsub_ge(DisasContext
*s
, arg_rrr
*a
,
7206 void (*gen
)(TCGv_i32
, TCGv_i32
,
7207 TCGv_i32
, TCGv_ptr
))
7213 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
7218 t0
= load_reg(s
, a
->rn
);
7219 t1
= load_reg(s
, a
->rm
);
7221 ge
= tcg_temp_new_ptr();
7222 tcg_gen_addi_ptr(ge
, cpu_env
, offsetof(CPUARMState
, GE
));
7223 gen(t0
, t0
, t1
, ge
);
7225 tcg_temp_free_ptr(ge
);
7226 tcg_temp_free_i32(t1
);
7227 store_reg(s
, a
->rd
, t0
);
7231 #define DO_PAR_ADDSUB(NAME, helper) \
7232 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
7234 return op_par_addsub(s, a, helper); \
7237 #define DO_PAR_ADDSUB_GE(NAME, helper) \
7238 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
7240 return op_par_addsub_ge(s, a, helper); \
7243 DO_PAR_ADDSUB_GE(SADD16
, gen_helper_sadd16
)
7244 DO_PAR_ADDSUB_GE(SASX
, gen_helper_saddsubx
)
7245 DO_PAR_ADDSUB_GE(SSAX
, gen_helper_ssubaddx
)
7246 DO_PAR_ADDSUB_GE(SSUB16
, gen_helper_ssub16
)
7247 DO_PAR_ADDSUB_GE(SADD8
, gen_helper_sadd8
)
7248 DO_PAR_ADDSUB_GE(SSUB8
, gen_helper_ssub8
)
7250 DO_PAR_ADDSUB_GE(UADD16
, gen_helper_uadd16
)
7251 DO_PAR_ADDSUB_GE(UASX
, gen_helper_uaddsubx
)
7252 DO_PAR_ADDSUB_GE(USAX
, gen_helper_usubaddx
)
7253 DO_PAR_ADDSUB_GE(USUB16
, gen_helper_usub16
)
7254 DO_PAR_ADDSUB_GE(UADD8
, gen_helper_uadd8
)
7255 DO_PAR_ADDSUB_GE(USUB8
, gen_helper_usub8
)
7257 DO_PAR_ADDSUB(QADD16
, gen_helper_qadd16
)
7258 DO_PAR_ADDSUB(QASX
, gen_helper_qaddsubx
)
7259 DO_PAR_ADDSUB(QSAX
, gen_helper_qsubaddx
)
7260 DO_PAR_ADDSUB(QSUB16
, gen_helper_qsub16
)
7261 DO_PAR_ADDSUB(QADD8
, gen_helper_qadd8
)
7262 DO_PAR_ADDSUB(QSUB8
, gen_helper_qsub8
)
7264 DO_PAR_ADDSUB(UQADD16
, gen_helper_uqadd16
)
7265 DO_PAR_ADDSUB(UQASX
, gen_helper_uqaddsubx
)
7266 DO_PAR_ADDSUB(UQSAX
, gen_helper_uqsubaddx
)
7267 DO_PAR_ADDSUB(UQSUB16
, gen_helper_uqsub16
)
7268 DO_PAR_ADDSUB(UQADD8
, gen_helper_uqadd8
)
7269 DO_PAR_ADDSUB(UQSUB8
, gen_helper_uqsub8
)
7271 DO_PAR_ADDSUB(SHADD16
, gen_helper_shadd16
)
7272 DO_PAR_ADDSUB(SHASX
, gen_helper_shaddsubx
)
7273 DO_PAR_ADDSUB(SHSAX
, gen_helper_shsubaddx
)
7274 DO_PAR_ADDSUB(SHSUB16
, gen_helper_shsub16
)
7275 DO_PAR_ADDSUB(SHADD8
, gen_helper_shadd8
)
7276 DO_PAR_ADDSUB(SHSUB8
, gen_helper_shsub8
)
7278 DO_PAR_ADDSUB(UHADD16
, gen_helper_uhadd16
)
7279 DO_PAR_ADDSUB(UHASX
, gen_helper_uhaddsubx
)
7280 DO_PAR_ADDSUB(UHSAX
, gen_helper_uhsubaddx
)
7281 DO_PAR_ADDSUB(UHSUB16
, gen_helper_uhsub16
)
7282 DO_PAR_ADDSUB(UHADD8
, gen_helper_uhadd8
)
7283 DO_PAR_ADDSUB(UHSUB8
, gen_helper_uhsub8
)
7285 #undef DO_PAR_ADDSUB
7286 #undef DO_PAR_ADDSUB_GE
7289 * Packing, unpacking, saturation, and reversal
7292 static bool trans_PKH(DisasContext
*s
, arg_PKH
*a
)
7298 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
7303 tn
= load_reg(s
, a
->rn
);
7304 tm
= load_reg(s
, a
->rm
);
7310 tcg_gen_sari_i32(tm
, tm
, shift
);
7311 tcg_gen_deposit_i32(tn
, tn
, tm
, 0, 16);
7314 tcg_gen_shli_i32(tm
, tm
, shift
);
7315 tcg_gen_deposit_i32(tn
, tm
, tn
, 0, 16);
7317 tcg_temp_free_i32(tm
);
7318 store_reg(s
, a
->rd
, tn
);
7322 static bool op_sat(DisasContext
*s
, arg_sat
*a
,
7323 void (*gen
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
7325 TCGv_i32 tmp
, satimm
;
7328 if (!ENABLE_ARCH_6
) {
7332 tmp
= load_reg(s
, a
->rn
);
7334 tcg_gen_sari_i32(tmp
, tmp
, shift
? shift
: 31);
7336 tcg_gen_shli_i32(tmp
, tmp
, shift
);
7339 satimm
= tcg_const_i32(a
->satimm
);
7340 gen(tmp
, cpu_env
, tmp
, satimm
);
7341 tcg_temp_free_i32(satimm
);
7343 store_reg(s
, a
->rd
, tmp
);
7347 static bool trans_SSAT(DisasContext
*s
, arg_sat
*a
)
7349 return op_sat(s
, a
, gen_helper_ssat
);
7352 static bool trans_USAT(DisasContext
*s
, arg_sat
*a
)
7354 return op_sat(s
, a
, gen_helper_usat
);
7357 static bool trans_SSAT16(DisasContext
*s
, arg_sat
*a
)
7359 if (s
->thumb
&& !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
7362 return op_sat(s
, a
, gen_helper_ssat16
);
7365 static bool trans_USAT16(DisasContext
*s
, arg_sat
*a
)
7367 if (s
->thumb
&& !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
7370 return op_sat(s
, a
, gen_helper_usat16
);
7373 static bool op_xta(DisasContext
*s
, arg_rrr_rot
*a
,
7374 void (*gen_extract
)(TCGv_i32
, TCGv_i32
),
7375 void (*gen_add
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
7379 if (!ENABLE_ARCH_6
) {
7383 tmp
= load_reg(s
, a
->rm
);
7385 * TODO: In many cases we could do a shift instead of a rotate.
7386 * Combined with a simple extend, that becomes an extract.
7388 tcg_gen_rotri_i32(tmp
, tmp
, a
->rot
* 8);
7389 gen_extract(tmp
, tmp
);
7392 TCGv_i32 tmp2
= load_reg(s
, a
->rn
);
7393 gen_add(tmp
, tmp
, tmp2
);
7394 tcg_temp_free_i32(tmp2
);
7396 store_reg(s
, a
->rd
, tmp
);
7400 static bool trans_SXTAB(DisasContext
*s
, arg_rrr_rot
*a
)
7402 return op_xta(s
, a
, tcg_gen_ext8s_i32
, tcg_gen_add_i32
);
7405 static bool trans_SXTAH(DisasContext
*s
, arg_rrr_rot
*a
)
7407 return op_xta(s
, a
, tcg_gen_ext16s_i32
, tcg_gen_add_i32
);
7410 static bool trans_SXTAB16(DisasContext
*s
, arg_rrr_rot
*a
)
7412 if (s
->thumb
&& !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
7415 return op_xta(s
, a
, gen_helper_sxtb16
, gen_add16
);
7418 static bool trans_UXTAB(DisasContext
*s
, arg_rrr_rot
*a
)
7420 return op_xta(s
, a
, tcg_gen_ext8u_i32
, tcg_gen_add_i32
);
7423 static bool trans_UXTAH(DisasContext
*s
, arg_rrr_rot
*a
)
7425 return op_xta(s
, a
, tcg_gen_ext16u_i32
, tcg_gen_add_i32
);
7428 static bool trans_UXTAB16(DisasContext
*s
, arg_rrr_rot
*a
)
7430 if (s
->thumb
&& !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
7433 return op_xta(s
, a
, gen_helper_uxtb16
, gen_add16
);
7436 static bool trans_SEL(DisasContext
*s
, arg_rrr
*a
)
7438 TCGv_i32 t1
, t2
, t3
;
7441 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
7446 t1
= load_reg(s
, a
->rn
);
7447 t2
= load_reg(s
, a
->rm
);
7448 t3
= tcg_temp_new_i32();
7449 tcg_gen_ld_i32(t3
, cpu_env
, offsetof(CPUARMState
, GE
));
7450 gen_helper_sel_flags(t1
, t3
, t1
, t2
);
7451 tcg_temp_free_i32(t3
);
7452 tcg_temp_free_i32(t2
);
7453 store_reg(s
, a
->rd
, t1
);
7457 static bool op_rr(DisasContext
*s
, arg_rr
*a
,
7458 void (*gen
)(TCGv_i32
, TCGv_i32
))
7462 tmp
= load_reg(s
, a
->rm
);
7464 store_reg(s
, a
->rd
, tmp
);
7468 static bool trans_REV(DisasContext
*s
, arg_rr
*a
)
7470 if (!ENABLE_ARCH_6
) {
7473 return op_rr(s
, a
, tcg_gen_bswap32_i32
);
7476 static bool trans_REV16(DisasContext
*s
, arg_rr
*a
)
7478 if (!ENABLE_ARCH_6
) {
7481 return op_rr(s
, a
, gen_rev16
);
7484 static bool trans_REVSH(DisasContext
*s
, arg_rr
*a
)
7486 if (!ENABLE_ARCH_6
) {
7489 return op_rr(s
, a
, gen_revsh
);
7492 static bool trans_RBIT(DisasContext
*s
, arg_rr
*a
)
7494 if (!ENABLE_ARCH_6T2
) {
7497 return op_rr(s
, a
, gen_helper_rbit
);
7501 * Signed multiply, signed and unsigned divide
7504 static bool op_smlad(DisasContext
*s
, arg_rrrr
*a
, bool m_swap
, bool sub
)
7508 if (!ENABLE_ARCH_6
) {
7512 t1
= load_reg(s
, a
->rn
);
7513 t2
= load_reg(s
, a
->rm
);
7515 gen_swap_half(t2
, t2
);
7517 gen_smul_dual(t1
, t2
);
7521 * This subtraction cannot overflow, so we can do a simple
7522 * 32-bit subtraction and then a possible 32-bit saturating
7525 tcg_gen_sub_i32(t1
, t1
, t2
);
7526 tcg_temp_free_i32(t2
);
7529 t2
= load_reg(s
, a
->ra
);
7530 gen_helper_add_setq(t1
, cpu_env
, t1
, t2
);
7531 tcg_temp_free_i32(t2
);
7533 } else if (a
->ra
== 15) {
7534 /* Single saturation-checking addition */
7535 gen_helper_add_setq(t1
, cpu_env
, t1
, t2
);
7536 tcg_temp_free_i32(t2
);
7539 * We need to add the products and Ra together and then
7540 * determine whether the final result overflowed. Doing
7541 * this as two separate add-and-check-overflow steps incorrectly
7542 * sets Q for cases like (-32768 * -32768) + (-32768 * -32768) + -1.
7543 * Do all the arithmetic at 64-bits and then check for overflow.
7546 TCGv_i32 t3
, qf
, one
;
7548 p64
= tcg_temp_new_i64();
7549 q64
= tcg_temp_new_i64();
7550 tcg_gen_ext_i32_i64(p64
, t1
);
7551 tcg_gen_ext_i32_i64(q64
, t2
);
7552 tcg_gen_add_i64(p64
, p64
, q64
);
7553 load_reg_var(s
, t2
, a
->ra
);
7554 tcg_gen_ext_i32_i64(q64
, t2
);
7555 tcg_gen_add_i64(p64
, p64
, q64
);
7556 tcg_temp_free_i64(q64
);
7558 tcg_gen_extr_i64_i32(t1
, t2
, p64
);
7559 tcg_temp_free_i64(p64
);
7561 * t1 is the low half of the result which goes into Rd.
7562 * We have overflow and must set Q if the high half (t2)
7563 * is different from the sign-extension of t1.
7565 t3
= tcg_temp_new_i32();
7566 tcg_gen_sari_i32(t3
, t1
, 31);
7567 qf
= load_cpu_field(QF
);
7568 one
= tcg_const_i32(1);
7569 tcg_gen_movcond_i32(TCG_COND_NE
, qf
, t2
, t3
, one
, qf
);
7570 store_cpu_field(qf
, QF
);
7571 tcg_temp_free_i32(one
);
7572 tcg_temp_free_i32(t3
);
7573 tcg_temp_free_i32(t2
);
7575 store_reg(s
, a
->rd
, t1
);
7579 static bool trans_SMLAD(DisasContext
*s
, arg_rrrr
*a
)
7581 return op_smlad(s
, a
, false, false);
7584 static bool trans_SMLADX(DisasContext
*s
, arg_rrrr
*a
)
7586 return op_smlad(s
, a
, true, false);
7589 static bool trans_SMLSD(DisasContext
*s
, arg_rrrr
*a
)
7591 return op_smlad(s
, a
, false, true);
7594 static bool trans_SMLSDX(DisasContext
*s
, arg_rrrr
*a
)
7596 return op_smlad(s
, a
, true, true);
7599 static bool op_smlald(DisasContext
*s
, arg_rrrr
*a
, bool m_swap
, bool sub
)
7604 if (!ENABLE_ARCH_6
) {
7608 t1
= load_reg(s
, a
->rn
);
7609 t2
= load_reg(s
, a
->rm
);
7611 gen_swap_half(t2
, t2
);
7613 gen_smul_dual(t1
, t2
);
7615 l1
= tcg_temp_new_i64();
7616 l2
= tcg_temp_new_i64();
7617 tcg_gen_ext_i32_i64(l1
, t1
);
7618 tcg_gen_ext_i32_i64(l2
, t2
);
7619 tcg_temp_free_i32(t1
);
7620 tcg_temp_free_i32(t2
);
7623 tcg_gen_sub_i64(l1
, l1
, l2
);
7625 tcg_gen_add_i64(l1
, l1
, l2
);
7627 tcg_temp_free_i64(l2
);
7629 gen_addq(s
, l1
, a
->ra
, a
->rd
);
7630 gen_storeq_reg(s
, a
->ra
, a
->rd
, l1
);
7631 tcg_temp_free_i64(l1
);
7635 static bool trans_SMLALD(DisasContext
*s
, arg_rrrr
*a
)
7637 return op_smlald(s
, a
, false, false);
7640 static bool trans_SMLALDX(DisasContext
*s
, arg_rrrr
*a
)
7642 return op_smlald(s
, a
, true, false);
7645 static bool trans_SMLSLD(DisasContext
*s
, arg_rrrr
*a
)
7647 return op_smlald(s
, a
, false, true);
7650 static bool trans_SMLSLDX(DisasContext
*s
, arg_rrrr
*a
)
7652 return op_smlald(s
, a
, true, true);
7655 static bool op_smmla(DisasContext
*s
, arg_rrrr
*a
, bool round
, bool sub
)
7660 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
7665 t1
= load_reg(s
, a
->rn
);
7666 t2
= load_reg(s
, a
->rm
);
7667 tcg_gen_muls2_i32(t2
, t1
, t1
, t2
);
7670 TCGv_i32 t3
= load_reg(s
, a
->ra
);
7673 * For SMMLS, we need a 64-bit subtract. Borrow caused by
7674 * a non-zero multiplicand lowpart, and the correct result
7675 * lowpart for rounding.
7677 TCGv_i32 zero
= tcg_const_i32(0);
7678 tcg_gen_sub2_i32(t2
, t1
, zero
, t3
, t2
, t1
);
7679 tcg_temp_free_i32(zero
);
7681 tcg_gen_add_i32(t1
, t1
, t3
);
7683 tcg_temp_free_i32(t3
);
7687 * Adding 0x80000000 to the 64-bit quantity means that we have
7688 * carry in to the high word when the low word has the msb set.
7690 tcg_gen_shri_i32(t2
, t2
, 31);
7691 tcg_gen_add_i32(t1
, t1
, t2
);
7693 tcg_temp_free_i32(t2
);
7694 store_reg(s
, a
->rd
, t1
);
7698 static bool trans_SMMLA(DisasContext
*s
, arg_rrrr
*a
)
7700 return op_smmla(s
, a
, false, false);
7703 static bool trans_SMMLAR(DisasContext
*s
, arg_rrrr
*a
)
7705 return op_smmla(s
, a
, true, false);
7708 static bool trans_SMMLS(DisasContext
*s
, arg_rrrr
*a
)
7710 return op_smmla(s
, a
, false, true);
7713 static bool trans_SMMLSR(DisasContext
*s
, arg_rrrr
*a
)
7715 return op_smmla(s
, a
, true, true);
7718 static bool op_div(DisasContext
*s
, arg_rrr
*a
, bool u
)
7723 ? !dc_isar_feature(aa32_thumb_div
, s
)
7724 : !dc_isar_feature(aa32_arm_div
, s
)) {
7728 t1
= load_reg(s
, a
->rn
);
7729 t2
= load_reg(s
, a
->rm
);
7731 gen_helper_udiv(t1
, t1
, t2
);
7733 gen_helper_sdiv(t1
, t1
, t2
);
7735 tcg_temp_free_i32(t2
);
7736 store_reg(s
, a
->rd
, t1
);
7740 static bool trans_SDIV(DisasContext
*s
, arg_rrr
*a
)
7742 return op_div(s
, a
, false);
7745 static bool trans_UDIV(DisasContext
*s
, arg_rrr
*a
)
7747 return op_div(s
, a
, true);
7751 * Block data transfer
7754 static TCGv_i32
op_addr_block_pre(DisasContext
*s
, arg_ldst_block
*a
, int n
)
7756 TCGv_i32 addr
= load_reg(s
, a
->rn
);
7761 tcg_gen_addi_i32(addr
, addr
, 4);
7764 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7766 } else if (!a
->i
&& n
!= 1) {
7767 /* post decrement */
7768 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7771 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
7773 * If the writeback is incrementing SP rather than
7774 * decrementing it, and the initial SP is below the
7775 * stack limit but the final written-back SP would
7776 * be above, then then we must not perform any memory
7777 * accesses, but it is IMPDEF whether we generate
7778 * an exception. We choose to do so in this case.
7779 * At this point 'addr' is the lowest address, so
7780 * either the original SP (if incrementing) or our
7781 * final SP (if decrementing), so that's what we check.
7783 gen_helper_v8m_stackcheck(cpu_env
, addr
);
7789 static void op_addr_block_post(DisasContext
*s
, arg_ldst_block
*a
,
7790 TCGv_i32 addr
, int n
)
7796 /* post increment */
7797 tcg_gen_addi_i32(addr
, addr
, 4);
7799 /* post decrement */
7800 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
7802 } else if (!a
->i
&& n
!= 1) {
7804 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
7806 store_reg(s
, a
->rn
, addr
);
7808 tcg_temp_free_i32(addr
);
7812 static bool op_stm(DisasContext
*s
, arg_ldst_block
*a
, int min_n
)
7814 int i
, j
, n
, list
, mem_idx
;
7816 TCGv_i32 addr
, tmp
, tmp2
;
7821 /* Only usable in supervisor mode. */
7822 unallocated_encoding(s
);
7829 if (n
< min_n
|| a
->rn
== 15) {
7830 unallocated_encoding(s
);
7834 addr
= op_addr_block_pre(s
, a
, n
);
7835 mem_idx
= get_mem_index(s
);
7837 for (i
= j
= 0; i
< 16; i
++) {
7838 if (!(list
& (1 << i
))) {
7842 if (user
&& i
!= 15) {
7843 tmp
= tcg_temp_new_i32();
7844 tmp2
= tcg_const_i32(i
);
7845 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
7846 tcg_temp_free_i32(tmp2
);
7848 tmp
= load_reg(s
, i
);
7850 gen_aa32_st32(s
, tmp
, addr
, mem_idx
);
7851 tcg_temp_free_i32(tmp
);
7853 /* No need to add after the last transfer. */
7855 tcg_gen_addi_i32(addr
, addr
, 4);
7859 op_addr_block_post(s
, a
, addr
, n
);
7863 static bool trans_STM(DisasContext
*s
, arg_ldst_block
*a
)
7865 /* BitCount(list) < 1 is UNPREDICTABLE */
7866 return op_stm(s
, a
, 1);
7869 static bool trans_STM_t32(DisasContext
*s
, arg_ldst_block
*a
)
7871 /* Writeback register in register list is UNPREDICTABLE for T32. */
7872 if (a
->w
&& (a
->list
& (1 << a
->rn
))) {
7873 unallocated_encoding(s
);
7876 /* BitCount(list) < 2 is UNPREDICTABLE */
7877 return op_stm(s
, a
, 2);
7880 static bool do_ldm(DisasContext
*s
, arg_ldst_block
*a
, int min_n
)
7882 int i
, j
, n
, list
, mem_idx
;
7885 bool exc_return
= false;
7886 TCGv_i32 addr
, tmp
, tmp2
, loaded_var
;
7889 /* LDM (user), LDM (exception return) */
7891 /* Only usable in supervisor mode. */
7892 unallocated_encoding(s
);
7895 if (extract32(a
->list
, 15, 1)) {
7899 /* LDM (user) does not allow writeback. */
7901 unallocated_encoding(s
);
7909 if (n
< min_n
|| a
->rn
== 15) {
7910 unallocated_encoding(s
);
7914 addr
= op_addr_block_pre(s
, a
, n
);
7915 mem_idx
= get_mem_index(s
);
7916 loaded_base
= false;
7919 for (i
= j
= 0; i
< 16; i
++) {
7920 if (!(list
& (1 << i
))) {
7924 tmp
= tcg_temp_new_i32();
7925 gen_aa32_ld32u(s
, tmp
, addr
, mem_idx
);
7927 tmp2
= tcg_const_i32(i
);
7928 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
7929 tcg_temp_free_i32(tmp2
);
7930 tcg_temp_free_i32(tmp
);
7931 } else if (i
== a
->rn
) {
7934 } else if (i
== 15 && exc_return
) {
7935 store_pc_exc_ret(s
, tmp
);
7937 store_reg_from_load(s
, i
, tmp
);
7940 /* No need to add after the last transfer. */
7942 tcg_gen_addi_i32(addr
, addr
, 4);
7946 op_addr_block_post(s
, a
, addr
, n
);
7949 /* Note that we reject base == pc above. */
7950 store_reg(s
, a
->rn
, loaded_var
);
7954 /* Restore CPSR from SPSR. */
7955 tmp
= load_cpu_field(spsr
);
7956 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7959 gen_helper_cpsr_write_eret(cpu_env
, tmp
);
7960 tcg_temp_free_i32(tmp
);
7961 /* Must exit loop to check un-masked IRQs */
7962 s
->base
.is_jmp
= DISAS_EXIT
;
7967 static bool trans_LDM_a32(DisasContext
*s
, arg_ldst_block
*a
)
7970 * Writeback register in register list is UNPREDICTABLE
7971 * for ArchVersion() >= 7. Prior to v7, A32 would write
7972 * an UNKNOWN value to the base register.
7974 if (ENABLE_ARCH_7
&& a
->w
&& (a
->list
& (1 << a
->rn
))) {
7975 unallocated_encoding(s
);
7978 /* BitCount(list) < 1 is UNPREDICTABLE */
7979 return do_ldm(s
, a
, 1);
7982 static bool trans_LDM_t32(DisasContext
*s
, arg_ldst_block
*a
)
7984 /* Writeback register in register list is UNPREDICTABLE for T32. */
7985 if (a
->w
&& (a
->list
& (1 << a
->rn
))) {
7986 unallocated_encoding(s
);
7989 /* BitCount(list) < 2 is UNPREDICTABLE */
7990 return do_ldm(s
, a
, 2);
7993 static bool trans_LDM_t16(DisasContext
*s
, arg_ldst_block
*a
)
7995 /* Writeback is conditional on the base register not being loaded. */
7996 a
->w
= !(a
->list
& (1 << a
->rn
));
7997 /* BitCount(list) < 1 is UNPREDICTABLE */
7998 return do_ldm(s
, a
, 1);
8001 static bool trans_CLRM(DisasContext
*s
, arg_CLRM
*a
)
8006 if (!dc_isar_feature(aa32_m_sec_state
, s
)) {
8010 if (extract32(a
->list
, 13, 1)) {
8015 /* UNPREDICTABLE; we choose to UNDEF */
8019 zero
= tcg_const_i32(0);
8020 for (i
= 0; i
< 15; i
++) {
8021 if (extract32(a
->list
, i
, 1)) {
8023 tcg_gen_mov_i32(cpu_R
[i
], zero
);
8026 if (extract32(a
->list
, 15, 1)) {
8028 * Clear APSR (by calling the MSR helper with the same argument
8029 * as for "MSR APSR_nzcvqg, Rn": mask = 0b1100, SYSM=0)
8031 TCGv_i32 maskreg
= tcg_const_i32(0xc << 8);
8032 gen_helper_v7m_msr(cpu_env
, maskreg
, zero
);
8033 tcg_temp_free_i32(maskreg
);
8035 tcg_temp_free_i32(zero
);
8040 * Branch, branch with link
8043 static bool trans_B(DisasContext
*s
, arg_i
*a
)
8045 gen_jmp(s
, read_pc(s
) + a
->imm
);
8049 static bool trans_B_cond_thumb(DisasContext
*s
, arg_ci
*a
)
8051 /* This has cond from encoding, required to be outside IT block. */
8052 if (a
->cond
>= 0xe) {
8055 if (s
->condexec_mask
) {
8056 unallocated_encoding(s
);
8059 arm_skip_unless(s
, a
->cond
);
8060 gen_jmp(s
, read_pc(s
) + a
->imm
);
8064 static bool trans_BL(DisasContext
*s
, arg_i
*a
)
8066 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| s
->thumb
);
8067 gen_jmp(s
, read_pc(s
) + a
->imm
);
8071 static bool trans_BLX_i(DisasContext
*s
, arg_BLX_i
*a
)
8076 * BLX <imm> would be useless on M-profile; the encoding space
8077 * is used for other insns from v8.1M onward, and UNDEFs before that.
8079 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
8083 /* For A32, ARM_FEATURE_V5 is checked near the start of the uncond block. */
8084 if (s
->thumb
&& (a
->imm
& 2)) {
8087 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| s
->thumb
);
8088 tmp
= tcg_const_i32(!s
->thumb
);
8089 store_cpu_field(tmp
, thumb
);
8090 gen_jmp(s
, (read_pc(s
) & ~3) + a
->imm
);
8094 static bool trans_BL_BLX_prefix(DisasContext
*s
, arg_BL_BLX_prefix
*a
)
8096 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
8097 tcg_gen_movi_i32(cpu_R
[14], read_pc(s
) + (a
->imm
<< 12));
8101 static bool trans_BL_suffix(DisasContext
*s
, arg_BL_suffix
*a
)
8103 TCGv_i32 tmp
= tcg_temp_new_i32();
8105 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
8106 tcg_gen_addi_i32(tmp
, cpu_R
[14], (a
->imm
<< 1) | 1);
8107 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| 1);
8112 static bool trans_BLX_suffix(DisasContext
*s
, arg_BLX_suffix
*a
)
8116 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
8117 if (!ENABLE_ARCH_5
) {
8120 tmp
= tcg_temp_new_i32();
8121 tcg_gen_addi_i32(tmp
, cpu_R
[14], a
->imm
<< 1);
8122 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
8123 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| 1);
8128 static bool trans_BF(DisasContext
*s
, arg_BF
*a
)
8131 * M-profile branch future insns. The architecture permits an
8132 * implementation to implement these as NOPs (equivalent to
8133 * discarding the LO_BRANCH_INFO cache immediately), and we
8134 * take that IMPDEF option because for QEMU a "real" implementation
8135 * would be complicated and wouldn't execute any faster.
8137 if (!dc_isar_feature(aa32_lob
, s
)) {
8141 /* SEE "Related encodings" (loop insns) */
8148 static bool trans_DLS(DisasContext
*s
, arg_DLS
*a
)
8150 /* M-profile low-overhead loop start */
8153 if (!dc_isar_feature(aa32_lob
, s
)) {
8156 if (a
->rn
== 13 || a
->rn
== 15) {
8157 /* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
8161 /* Not a while loop, no tail predication: just set LR to the count */
8162 tmp
= load_reg(s
, a
->rn
);
8163 store_reg(s
, 14, tmp
);
8167 static bool trans_WLS(DisasContext
*s
, arg_WLS
*a
)
8169 /* M-profile low-overhead while-loop start */
8171 TCGLabel
*nextlabel
;
8173 if (!dc_isar_feature(aa32_lob
, s
)) {
8176 if (a
->rn
== 13 || a
->rn
== 15) {
8177 /* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
8180 if (s
->condexec_mask
) {
8182 * WLS in an IT block is CONSTRAINED UNPREDICTABLE;
8183 * we choose to UNDEF, because otherwise our use of
8184 * gen_goto_tb(1) would clash with the use of TB exit 1
8185 * in the dc->condjmp condition-failed codepath in
8186 * arm_tr_tb_stop() and we'd get an assertion.
8190 nextlabel
= gen_new_label();
8191 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_R
[a
->rn
], 0, nextlabel
);
8192 tmp
= load_reg(s
, a
->rn
);
8193 store_reg(s
, 14, tmp
);
8194 gen_jmp_tb(s
, s
->base
.pc_next
, 1);
8196 gen_set_label(nextlabel
);
8197 gen_jmp(s
, read_pc(s
) + a
->imm
);
8201 static bool trans_LE(DisasContext
*s
, arg_LE
*a
)
8204 * M-profile low-overhead loop end. The architecture permits an
8205 * implementation to discard the LO_BRANCH_INFO cache at any time,
8206 * and we take the IMPDEF option to never set it in the first place
8207 * (equivalent to always discarding it immediately), because for QEMU
8208 * a "real" implementation would be complicated and wouldn't execute
8213 if (!dc_isar_feature(aa32_lob
, s
)) {
8218 /* Not loop-forever. If LR <= 1 this is the last loop: do nothing. */
8219 arm_gen_condlabel(s
);
8220 tcg_gen_brcondi_i32(TCG_COND_LEU
, cpu_R
[14], 1, s
->condlabel
);
8222 tmp
= load_reg(s
, 14);
8223 tcg_gen_addi_i32(tmp
, tmp
, -1);
8224 store_reg(s
, 14, tmp
);
8226 /* Jump back to the loop start */
8227 gen_jmp(s
, read_pc(s
) - a
->imm
);
8231 static bool op_tbranch(DisasContext
*s
, arg_tbranch
*a
, bool half
)
8235 tmp
= load_reg(s
, a
->rm
);
8237 tcg_gen_add_i32(tmp
, tmp
, tmp
);
8239 addr
= load_reg(s
, a
->rn
);
8240 tcg_gen_add_i32(addr
, addr
, tmp
);
8242 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
),
8243 half
? MO_UW
| s
->be_data
: MO_UB
);
8244 tcg_temp_free_i32(addr
);
8246 tcg_gen_add_i32(tmp
, tmp
, tmp
);
8247 tcg_gen_addi_i32(tmp
, tmp
, read_pc(s
));
8248 store_reg(s
, 15, tmp
);
8252 static bool trans_TBB(DisasContext
*s
, arg_tbranch
*a
)
8254 return op_tbranch(s
, a
, false);
8257 static bool trans_TBH(DisasContext
*s
, arg_tbranch
*a
)
8259 return op_tbranch(s
, a
, true);
8262 static bool trans_CBZ(DisasContext
*s
, arg_CBZ
*a
)
8264 TCGv_i32 tmp
= load_reg(s
, a
->rn
);
8266 arm_gen_condlabel(s
);
8267 tcg_gen_brcondi_i32(a
->nz
? TCG_COND_EQ
: TCG_COND_NE
,
8268 tmp
, 0, s
->condlabel
);
8269 tcg_temp_free_i32(tmp
);
8270 gen_jmp(s
, read_pc(s
) + a
->imm
);
8275 * Supervisor call - both T32 & A32 come here so we need to check
8276 * which mode we are in when checking for semihosting.
8279 static bool trans_SVC(DisasContext
*s
, arg_SVC
*a
)
8281 const uint32_t semihost_imm
= s
->thumb
? 0xab : 0x123456;
8283 if (!arm_dc_feature(s
, ARM_FEATURE_M
) && semihosting_enabled() &&
8284 #ifndef CONFIG_USER_ONLY
8287 (a
->imm
== semihost_imm
)) {
8288 gen_exception_internal_insn(s
, s
->pc_curr
, EXCP_SEMIHOST
);
8290 gen_set_pc_im(s
, s
->base
.pc_next
);
8291 s
->svc_imm
= a
->imm
;
8292 s
->base
.is_jmp
= DISAS_SWI
;
8298 * Unconditional system instructions
8301 static bool trans_RFE(DisasContext
*s
, arg_RFE
*a
)
8303 static const int8_t pre_offset
[4] = {
8304 /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4
8306 static const int8_t post_offset
[4] = {
8307 /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0
8309 TCGv_i32 addr
, t1
, t2
;
8311 if (!ENABLE_ARCH_6
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
8315 unallocated_encoding(s
);
8319 addr
= load_reg(s
, a
->rn
);
8320 tcg_gen_addi_i32(addr
, addr
, pre_offset
[a
->pu
]);
8322 /* Load PC into tmp and CPSR into tmp2. */
8323 t1
= tcg_temp_new_i32();
8324 gen_aa32_ld32u(s
, t1
, addr
, get_mem_index(s
));
8325 tcg_gen_addi_i32(addr
, addr
, 4);
8326 t2
= tcg_temp_new_i32();
8327 gen_aa32_ld32u(s
, t2
, addr
, get_mem_index(s
));
8330 /* Base writeback. */
8331 tcg_gen_addi_i32(addr
, addr
, post_offset
[a
->pu
]);
8332 store_reg(s
, a
->rn
, addr
);
8334 tcg_temp_free_i32(addr
);
8340 static bool trans_SRS(DisasContext
*s
, arg_SRS
*a
)
8342 if (!ENABLE_ARCH_6
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
8345 gen_srs(s
, a
->mode
, a
->pu
, a
->w
);
8349 static bool trans_CPS(DisasContext
*s
, arg_CPS
*a
)
8353 if (!ENABLE_ARCH_6
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
8357 /* Implemented as NOP in user mode. */
8360 /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */
8382 gen_set_psr_im(s
, mask
, 0, val
);
8387 static bool trans_CPS_v7m(DisasContext
*s
, arg_CPS_v7m
*a
)
8389 TCGv_i32 tmp
, addr
, el
;
8391 if (!arm_dc_feature(s
, ARM_FEATURE_M
)) {
8395 /* Implemented as NOP in user mode. */
8399 tmp
= tcg_const_i32(a
->im
);
8402 addr
= tcg_const_i32(19);
8403 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8404 tcg_temp_free_i32(addr
);
8408 addr
= tcg_const_i32(16);
8409 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
8410 tcg_temp_free_i32(addr
);
8412 el
= tcg_const_i32(s
->current_el
);
8413 gen_helper_rebuild_hflags_m32(cpu_env
, el
);
8414 tcg_temp_free_i32(el
);
8415 tcg_temp_free_i32(tmp
);
8421 * Clear-Exclusive, Barriers
8424 static bool trans_CLREX(DisasContext
*s
, arg_CLREX
*a
)
8427 ? !ENABLE_ARCH_7
&& !arm_dc_feature(s
, ARM_FEATURE_M
)
8428 : !ENABLE_ARCH_6K
) {
8435 static bool trans_DSB(DisasContext
*s
, arg_DSB
*a
)
8437 if (!ENABLE_ARCH_7
&& !arm_dc_feature(s
, ARM_FEATURE_M
)) {
8440 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
8444 static bool trans_DMB(DisasContext
*s
, arg_DMB
*a
)
8446 return trans_DSB(s
, NULL
);
8449 static bool trans_ISB(DisasContext
*s
, arg_ISB
*a
)
8451 if (!ENABLE_ARCH_7
&& !arm_dc_feature(s
, ARM_FEATURE_M
)) {
8455 * We need to break the TB after this insn to execute
8456 * self-modifying code correctly and also to take
8457 * any pending interrupts immediately.
8459 gen_goto_tb(s
, 0, s
->base
.pc_next
);
8463 static bool trans_SB(DisasContext
*s
, arg_SB
*a
)
8465 if (!dc_isar_feature(aa32_sb
, s
)) {
8469 * TODO: There is no speculation barrier opcode
8470 * for TCG; MB and end the TB instead.
8472 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
8473 gen_goto_tb(s
, 0, s
->base
.pc_next
);
8477 static bool trans_SETEND(DisasContext
*s
, arg_SETEND
*a
)
8479 if (!ENABLE_ARCH_6
) {
8482 if (a
->E
!= (s
->be_data
== MO_BE
)) {
8483 gen_helper_setend(cpu_env
);
8484 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
8490 * Preload instructions
8491 * All are nops, contingent on the appropriate arch level.
8494 static bool trans_PLD(DisasContext
*s
, arg_PLD
*a
)
8496 return ENABLE_ARCH_5TE
;
8499 static bool trans_PLDW(DisasContext
*s
, arg_PLD
*a
)
8501 return arm_dc_feature(s
, ARM_FEATURE_V7MP
);
8504 static bool trans_PLI(DisasContext
*s
, arg_PLD
*a
)
8506 return ENABLE_ARCH_7
;
8513 static bool trans_IT(DisasContext
*s
, arg_IT
*a
)
8515 int cond_mask
= a
->cond_mask
;
8518 * No actual code generated for this insn, just setup state.
8520 * Combinations of firstcond and mask which set up an 0b1111
8521 * condition are UNPREDICTABLE; we take the CONSTRAINED
8522 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
8523 * i.e. both meaning "execute always".
8525 s
->condexec_cond
= (cond_mask
>> 4) & 0xe;
8526 s
->condexec_mask
= cond_mask
& 0x1f;
8530 /* v8.1M CSEL/CSINC/CSNEG/CSINV */
8531 static bool trans_CSEL(DisasContext
*s
, arg_CSEL
*a
)
8533 TCGv_i32 rn
, rm
, zero
;
8536 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
8541 /* SEE "Related encodings" (MVE shifts) */
8545 if (a
->rd
== 13 || a
->rd
== 15 || a
->rn
== 13 || a
->fcond
>= 14) {
8546 /* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
8550 /* In this insn input reg fields of 0b1111 mean "zero", not "PC" */
8552 rn
= tcg_const_i32(0);
8554 rn
= load_reg(s
, a
->rn
);
8557 rm
= tcg_const_i32(0);
8559 rm
= load_reg(s
, a
->rm
);
8566 tcg_gen_addi_i32(rm
, rm
, 1);
8569 tcg_gen_not_i32(rm
, rm
);
8572 tcg_gen_neg_i32(rm
, rm
);
8575 g_assert_not_reached();
8578 arm_test_cc(&c
, a
->fcond
);
8579 zero
= tcg_const_i32(0);
8580 tcg_gen_movcond_i32(c
.cond
, rn
, c
.value
, zero
, rn
, rm
);
8582 tcg_temp_free_i32(zero
);
8584 store_reg(s
, a
->rd
, rn
);
8585 tcg_temp_free_i32(rm
);
8594 static void disas_arm_insn(DisasContext
*s
, unsigned int insn
)
8596 unsigned int cond
= insn
>> 28;
8598 /* M variants do not implement ARM mode; this must raise the INVSTATE
8599 * UsageFault exception.
8601 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
8602 gen_exception_insn(s
, s
->pc_curr
, EXCP_INVSTATE
, syn_uncategorized(),
8603 default_exception_el(s
));
8608 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8609 * choose to UNDEF. In ARMv5 and above the space is used
8610 * for miscellaneous unconditional instructions.
8612 if (!arm_dc_feature(s
, ARM_FEATURE_V5
)) {
8613 unallocated_encoding(s
);
8617 /* Unconditional instructions. */
8618 /* TODO: Perhaps merge these into one decodetree output file. */
8619 if (disas_a32_uncond(s
, insn
) ||
8620 disas_vfp_uncond(s
, insn
) ||
8621 disas_neon_dp(s
, insn
) ||
8622 disas_neon_ls(s
, insn
) ||
8623 disas_neon_shared(s
, insn
)) {
8626 /* fall back to legacy decoder */
8628 if ((insn
& 0x0e000f00) == 0x0c000100) {
8629 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
8630 /* iWMMXt register transfer. */
8631 if (extract32(s
->c15_cpar
, 1, 1)) {
8632 if (!disas_iwmmxt_insn(s
, insn
)) {
8641 /* if not always execute, we generate a conditional jump to
8643 arm_skip_unless(s
, cond
);
8646 /* TODO: Perhaps merge these into one decodetree output file. */
8647 if (disas_a32(s
, insn
) ||
8648 disas_vfp(s
, insn
)) {
8651 /* fall back to legacy decoder */
8652 /* TODO: convert xscale/iwmmxt decoder to decodetree ?? */
8653 if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
)) {
8654 if (((insn
& 0x0c000e00) == 0x0c000000)
8655 && ((insn
& 0x03000000) != 0x03000000)) {
8656 /* Coprocessor insn, coprocessor 0 or 1 */
8657 disas_xscale_insn(s
, insn
);
8663 unallocated_encoding(s
);
8666 static bool thumb_insn_is_16bit(DisasContext
*s
, uint32_t pc
, uint32_t insn
)
8669 * Return true if this is a 16 bit instruction. We must be precise
8670 * about this (matching the decode).
8672 if ((insn
>> 11) < 0x1d) {
8673 /* Definitely a 16-bit instruction */
8677 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
8678 * first half of a 32-bit Thumb insn. Thumb-1 cores might
8679 * end up actually treating this as two 16-bit insns, though,
8680 * if it's half of a bl/blx pair that might span a page boundary.
8682 if (arm_dc_feature(s
, ARM_FEATURE_THUMB2
) ||
8683 arm_dc_feature(s
, ARM_FEATURE_M
)) {
8684 /* Thumb2 cores (including all M profile ones) always treat
8685 * 32-bit insns as 32-bit.
8690 if ((insn
>> 11) == 0x1e && pc
- s
->page_start
< TARGET_PAGE_SIZE
- 3) {
8691 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
8692 * is not on the next page; we merge this into a 32-bit
8697 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
8698 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
8699 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
8700 * -- handle as single 16 bit insn
8705 /* Translate a 32-bit thumb instruction. */
8706 static void disas_thumb2_insn(DisasContext
*s
, uint32_t insn
)
8709 * ARMv6-M supports a limited subset of Thumb2 instructions.
8710 * Other Thumb1 architectures allow only 32-bit
8711 * combined BL/BLX prefix and suffix.
8713 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
8714 !arm_dc_feature(s
, ARM_FEATURE_V7
)) {
8717 static const uint32_t armv6m_insn
[] = {0xf3808000 /* msr */,
8718 0xf3b08040 /* dsb */,
8719 0xf3b08050 /* dmb */,
8720 0xf3b08060 /* isb */,
8721 0xf3e08000 /* mrs */,
8722 0xf000d000 /* bl */};
8723 static const uint32_t armv6m_mask
[] = {0xffe0d000,
8730 for (i
= 0; i
< ARRAY_SIZE(armv6m_insn
); i
++) {
8731 if ((insn
& armv6m_mask
[i
]) == armv6m_insn
[i
]) {
8739 } else if ((insn
& 0xf800e800) != 0xf000e800) {
8740 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB2
)) {
8741 unallocated_encoding(s
);
8746 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
8748 * NOCP takes precedence over any UNDEF for (almost) the
8749 * entire wide range of coprocessor-space encodings, so check
8750 * for it first before proceeding to actually decode eg VFP
8751 * insns. This decode also handles the few insns which are
8752 * in copro space but do not have NOCP checks (eg VLLDM, VLSTM).
8754 if (disas_m_nocp(s
, insn
)) {
8759 if ((insn
& 0xef000000) == 0xef000000) {
8761 * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
8763 * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
8765 uint32_t a32_insn
= (insn
& 0xe2ffffff) |
8766 ((insn
& (1 << 28)) >> 4) | (1 << 28);
8768 if (disas_neon_dp(s
, a32_insn
)) {
8773 if ((insn
& 0xff100000) == 0xf9000000) {
8775 * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
8777 * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
8779 uint32_t a32_insn
= (insn
& 0x00ffffff) | 0xf4000000;
8781 if (disas_neon_ls(s
, a32_insn
)) {
8787 * TODO: Perhaps merge these into one decodetree output file.
8788 * Note disas_vfp is written for a32 with cond field in the
8789 * top nibble. The t32 encoding requires 0xe in the top nibble.
8791 if (disas_t32(s
, insn
) ||
8792 disas_vfp_uncond(s
, insn
) ||
8793 disas_neon_shared(s
, insn
) ||
8794 ((insn
>> 28) == 0xe && disas_vfp(s
, insn
))) {
8799 unallocated_encoding(s
);
8802 static void disas_thumb_insn(DisasContext
*s
, uint32_t insn
)
8804 if (!disas_t16(s
, insn
)) {
8805 unallocated_encoding(s
);
8809 static bool insn_crosses_page(CPUARMState
*env
, DisasContext
*s
)
8811 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
8812 * (False positives are OK, false negatives are not.)
8813 * We know this is a Thumb insn, and our caller ensures we are
8814 * only called if dc->base.pc_next is less than 4 bytes from the page
8815 * boundary, so we cross the page if the first 16 bits indicate
8816 * that this is a 32 bit insn.
8818 uint16_t insn
= arm_lduw_code(env
, s
->base
.pc_next
, s
->sctlr_b
);
8820 return !thumb_insn_is_16bit(s
, s
->base
.pc_next
, insn
);
8823 static void arm_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
8825 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8826 CPUARMState
*env
= cs
->env_ptr
;
8827 ARMCPU
*cpu
= env_archcpu(env
);
8828 uint32_t tb_flags
= dc
->base
.tb
->flags
;
8829 uint32_t condexec
, core_mmu_idx
;
8831 dc
->isar
= &cpu
->isar
;
8835 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
8836 * there is no secure EL1, so we route exceptions to EL3.
8838 dc
->secure_routed_to_el3
= arm_feature(env
, ARM_FEATURE_EL3
) &&
8839 !arm_el_is_aa64(env
, 3);
8840 dc
->thumb
= FIELD_EX32(tb_flags
, TBFLAG_AM32
, THUMB
);
8841 dc
->be_data
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, BE_DATA
) ? MO_BE
: MO_LE
;
8842 condexec
= FIELD_EX32(tb_flags
, TBFLAG_AM32
, CONDEXEC
);
8843 dc
->condexec_mask
= (condexec
& 0xf) << 1;
8844 dc
->condexec_cond
= condexec
>> 4;
8846 core_mmu_idx
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, MMUIDX
);
8847 dc
->mmu_idx
= core_to_arm_mmu_idx(env
, core_mmu_idx
);
8848 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
8849 #if !defined(CONFIG_USER_ONLY)
8850 dc
->user
= (dc
->current_el
== 0);
8852 dc
->fp_excp_el
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, FPEXC_EL
);
8854 if (arm_feature(env
, ARM_FEATURE_M
)) {
8855 dc
->vfp_enabled
= 1;
8856 dc
->be_data
= MO_TE
;
8857 dc
->v7m_handler_mode
= FIELD_EX32(tb_flags
, TBFLAG_M32
, HANDLER
);
8858 dc
->v8m_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
8859 regime_is_secure(env
, dc
->mmu_idx
);
8860 dc
->v8m_stackcheck
= FIELD_EX32(tb_flags
, TBFLAG_M32
, STACKCHECK
);
8861 dc
->v8m_fpccr_s_wrong
=
8862 FIELD_EX32(tb_flags
, TBFLAG_M32
, FPCCR_S_WRONG
);
8863 dc
->v7m_new_fp_ctxt_needed
=
8864 FIELD_EX32(tb_flags
, TBFLAG_M32
, NEW_FP_CTXT_NEEDED
);
8865 dc
->v7m_lspact
= FIELD_EX32(tb_flags
, TBFLAG_M32
, LSPACT
);
8868 FIELD_EX32(tb_flags
, TBFLAG_ANY
, BE_DATA
) ? MO_BE
: MO_LE
;
8869 dc
->debug_target_el
=
8870 FIELD_EX32(tb_flags
, TBFLAG_ANY
, DEBUG_TARGET_EL
);
8871 dc
->sctlr_b
= FIELD_EX32(tb_flags
, TBFLAG_A32
, SCTLR_B
);
8872 dc
->hstr_active
= FIELD_EX32(tb_flags
, TBFLAG_A32
, HSTR_ACTIVE
);
8873 dc
->ns
= FIELD_EX32(tb_flags
, TBFLAG_A32
, NS
);
8874 dc
->vfp_enabled
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VFPEN
);
8875 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
8876 dc
->c15_cpar
= FIELD_EX32(tb_flags
, TBFLAG_A32
, XSCALE_CPAR
);
8878 dc
->vec_len
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VECLEN
);
8879 dc
->vec_stride
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VECSTRIDE
);
8882 dc
->cp_regs
= cpu
->cp_regs
;
8883 dc
->features
= env
->features
;
8885 /* Single step state. The code-generation logic here is:
8887 * generate code with no special handling for single-stepping (except
8888 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
8889 * this happens anyway because those changes are all system register or
8891 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
8892 * emit code for one insn
8893 * emit code to clear PSTATE.SS
8894 * emit code to generate software step exception for completed step
8895 * end TB (as usual for having generated an exception)
8896 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
8897 * emit code to generate a software step exception
8900 dc
->ss_active
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, SS_ACTIVE
);
8901 dc
->pstate_ss
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, PSTATE_SS
);
8902 dc
->is_ldex
= false;
8904 dc
->page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
8906 /* If architectural single step active, limit to 1. */
8907 if (is_singlestepping(dc
)) {
8908 dc
->base
.max_insns
= 1;
8911 /* ARM is a fixed-length ISA. Bound the number of insns to execute
8912 to those left on the page. */
8914 int bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
8915 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
8918 cpu_V0
= tcg_temp_new_i64();
8919 cpu_V1
= tcg_temp_new_i64();
8920 cpu_M0
= tcg_temp_new_i64();
8923 static void arm_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8925 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8927 /* A note on handling of the condexec (IT) bits:
8929 * We want to avoid the overhead of having to write the updated condexec
8930 * bits back to the CPUARMState for every instruction in an IT block. So:
8931 * (1) if the condexec bits are not already zero then we write
8932 * zero back into the CPUARMState now. This avoids complications trying
8933 * to do it at the end of the block. (For example if we don't do this
8934 * it's hard to identify whether we can safely skip writing condexec
8935 * at the end of the TB, which we definitely want to do for the case
8936 * where a TB doesn't do anything with the IT state at all.)
8937 * (2) if we are going to leave the TB then we call gen_set_condexec()
8938 * which will write the correct value into CPUARMState if zero is wrong.
8939 * This is done both for leaving the TB at the end, and for leaving
8940 * it because of an exception we know will happen, which is done in
8941 * gen_exception_insn(). The latter is necessary because we need to
8942 * leave the TB with the PC/IT state just prior to execution of the
8943 * instruction which caused the exception.
8944 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
8945 * then the CPUARMState will be wrong and we need to reset it.
8946 * This is handled in the same way as restoration of the
8947 * PC in these situations; we save the value of the condexec bits
8948 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
8949 * then uses this to restore them after an exception.
8951 * Note that there are no instructions which can read the condexec
8952 * bits, and none which can write non-static values to them, so
8953 * we don't need to care about whether CPUARMState is correct in the
8957 /* Reset the conditional execution bits immediately. This avoids
8958 complications trying to do it at the end of the block. */
8959 if (dc
->condexec_mask
|| dc
->condexec_cond
) {
8960 TCGv_i32 tmp
= tcg_temp_new_i32();
8961 tcg_gen_movi_i32(tmp
, 0);
8962 store_cpu_field(tmp
, condexec_bits
);
8966 static void arm_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8968 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8970 tcg_gen_insn_start(dc
->base
.pc_next
,
8971 (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1),
8973 dc
->insn_start
= tcg_last_op();
8976 static bool arm_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
8977 const CPUBreakpoint
*bp
)
8979 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8981 if (bp
->flags
& BP_CPU
) {
8982 gen_set_condexec(dc
);
8983 gen_set_pc_im(dc
, dc
->base
.pc_next
);
8984 gen_helper_check_breakpoints(cpu_env
);
8985 /* End the TB early; it's likely not going to be executed */
8986 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8988 gen_exception_internal_insn(dc
, dc
->base
.pc_next
, EXCP_DEBUG
);
8989 /* The address covered by the breakpoint must be
8990 included in [tb->pc, tb->pc + tb->size) in order
8991 to for it to be properly cleared -- thus we
8992 increment the PC here so that the logic setting
8993 tb->size below does the right thing. */
8994 /* TODO: Advance PC by correct instruction length to
8995 * avoid disassembler error messages */
8996 dc
->base
.pc_next
+= 2;
8997 dc
->base
.is_jmp
= DISAS_NORETURN
;
9003 static bool arm_pre_translate_insn(DisasContext
*dc
)
9005 #ifdef CONFIG_USER_ONLY
9006 /* Intercept jump to the magic kernel page. */
9007 if (dc
->base
.pc_next
>= 0xffff0000) {
9008 /* We always get here via a jump, so know we are not in a
9009 conditional execution block. */
9010 gen_exception_internal(EXCP_KERNEL_TRAP
);
9011 dc
->base
.is_jmp
= DISAS_NORETURN
;
9016 if (dc
->ss_active
&& !dc
->pstate_ss
) {
9017 /* Singlestep state is Active-pending.
9018 * If we're in this state at the start of a TB then either
9019 * a) we just took an exception to an EL which is being debugged
9020 * and this is the first insn in the exception handler
9021 * b) debug exceptions were masked and we just unmasked them
9022 * without changing EL (eg by clearing PSTATE.D)
9023 * In either case we're going to take a swstep exception in the
9024 * "did not step an insn" case, and so the syndrome ISV and EX
9025 * bits should be zero.
9027 assert(dc
->base
.num_insns
== 1);
9028 gen_swstep_exception(dc
, 0, 0);
9029 dc
->base
.is_jmp
= DISAS_NORETURN
;
9036 static void arm_post_translate_insn(DisasContext
*dc
)
9038 if (dc
->condjmp
&& !dc
->base
.is_jmp
) {
9039 gen_set_label(dc
->condlabel
);
9042 translator_loop_temp_check(&dc
->base
);
9045 static void arm_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
9047 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
9048 CPUARMState
*env
= cpu
->env_ptr
;
9051 if (arm_pre_translate_insn(dc
)) {
9055 dc
->pc_curr
= dc
->base
.pc_next
;
9056 insn
= arm_ldl_code(env
, dc
->base
.pc_next
, dc
->sctlr_b
);
9058 dc
->base
.pc_next
+= 4;
9059 disas_arm_insn(dc
, insn
);
9061 arm_post_translate_insn(dc
);
9063 /* ARM is a fixed-length ISA. We performed the cross-page check
9064 in init_disas_context by adjusting max_insns. */
9067 static bool thumb_insn_is_unconditional(DisasContext
*s
, uint32_t insn
)
9069 /* Return true if this Thumb insn is always unconditional,
9070 * even inside an IT block. This is true of only a very few
9071 * instructions: BKPT, HLT, and SG.
9073 * A larger class of instructions are UNPREDICTABLE if used
9074 * inside an IT block; we do not need to detect those here, because
9075 * what we do by default (perform the cc check and update the IT
9076 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
9077 * choice for those situations.
9079 * insn is either a 16-bit or a 32-bit instruction; the two are
9080 * distinguishable because for the 16-bit case the top 16 bits
9081 * are zeroes, and that isn't a valid 32-bit encoding.
9083 if ((insn
& 0xffffff00) == 0xbe00) {
9088 if ((insn
& 0xffffffc0) == 0xba80 && arm_dc_feature(s
, ARM_FEATURE_V8
) &&
9089 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
9090 /* HLT: v8A only. This is unconditional even when it is going to
9091 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
9092 * For v7 cores this was a plain old undefined encoding and so
9093 * honours its cc check. (We might be using the encoding as
9094 * a semihosting trap, but we don't change the cc check behaviour
9095 * on that account, because a debugger connected to a real v7A
9096 * core and emulating semihosting traps by catching the UNDEF
9097 * exception would also only see cases where the cc check passed.
9098 * No guest code should be trying to do a HLT semihosting trap
9099 * in an IT block anyway.
9104 if (insn
== 0xe97fe97f && arm_dc_feature(s
, ARM_FEATURE_V8
) &&
9105 arm_dc_feature(s
, ARM_FEATURE_M
)) {
9113 static void thumb_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
9115 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
9116 CPUARMState
*env
= cpu
->env_ptr
;
9120 if (arm_pre_translate_insn(dc
)) {
9124 dc
->pc_curr
= dc
->base
.pc_next
;
9125 insn
= arm_lduw_code(env
, dc
->base
.pc_next
, dc
->sctlr_b
);
9126 is_16bit
= thumb_insn_is_16bit(dc
, dc
->base
.pc_next
, insn
);
9127 dc
->base
.pc_next
+= 2;
9129 uint32_t insn2
= arm_lduw_code(env
, dc
->base
.pc_next
, dc
->sctlr_b
);
9131 insn
= insn
<< 16 | insn2
;
9132 dc
->base
.pc_next
+= 2;
9136 if (dc
->condexec_mask
&& !thumb_insn_is_unconditional(dc
, insn
)) {
9137 uint32_t cond
= dc
->condexec_cond
;
9140 * Conditionally skip the insn. Note that both 0xe and 0xf mean
9141 * "always"; 0xf is not "never".
9144 arm_skip_unless(dc
, cond
);
9149 disas_thumb_insn(dc
, insn
);
9151 disas_thumb2_insn(dc
, insn
);
9154 /* Advance the Thumb condexec condition. */
9155 if (dc
->condexec_mask
) {
9156 dc
->condexec_cond
= ((dc
->condexec_cond
& 0xe) |
9157 ((dc
->condexec_mask
>> 4) & 1));
9158 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
9159 if (dc
->condexec_mask
== 0) {
9160 dc
->condexec_cond
= 0;
9164 arm_post_translate_insn(dc
);
9166 /* Thumb is a variable-length ISA. Stop translation when the next insn
9167 * will touch a new page. This ensures that prefetch aborts occur at
9170 * We want to stop the TB if the next insn starts in a new page,
9171 * or if it spans between this page and the next. This means that
9172 * if we're looking at the last halfword in the page we need to
9173 * see if it's a 16-bit Thumb insn (which will fit in this TB)
9174 * or a 32-bit Thumb insn (which won't).
9175 * This is to avoid generating a silly TB with a single 16-bit insn
9176 * in it at the end of this page (which would execute correctly
9177 * but isn't very efficient).
9179 if (dc
->base
.is_jmp
== DISAS_NEXT
9180 && (dc
->base
.pc_next
- dc
->page_start
>= TARGET_PAGE_SIZE
9181 || (dc
->base
.pc_next
- dc
->page_start
>= TARGET_PAGE_SIZE
- 3
9182 && insn_crosses_page(env
, dc
)))) {
9183 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
9187 static void arm_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
9189 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
9191 if (tb_cflags(dc
->base
.tb
) & CF_LAST_IO
&& dc
->condjmp
) {
9192 /* FIXME: This can theoretically happen with self-modifying code. */
9193 cpu_abort(cpu
, "IO on conditional branch instruction");
9196 /* At this stage dc->condjmp will only be set when the skipped
9197 instruction was a conditional branch or trap, and the PC has
9198 already been written. */
9199 gen_set_condexec(dc
);
9200 if (dc
->base
.is_jmp
== DISAS_BX_EXCRET
) {
9201 /* Exception return branches need some special case code at the
9202 * end of the TB, which is complex enough that it has to
9203 * handle the single-step vs not and the condition-failed
9204 * insn codepath itself.
9206 gen_bx_excret_final_code(dc
);
9207 } else if (unlikely(is_singlestepping(dc
))) {
9208 /* Unconditional and "condition passed" instruction codepath. */
9209 switch (dc
->base
.is_jmp
) {
9212 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
9213 default_exception_el(dc
));
9217 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
9221 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
9224 case DISAS_TOO_MANY
:
9225 case DISAS_UPDATE_EXIT
:
9226 case DISAS_UPDATE_NOCHAIN
:
9227 gen_set_pc_im(dc
, dc
->base
.pc_next
);
9230 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
9231 gen_singlestep_exception(dc
);
9233 case DISAS_NORETURN
:
9237 /* While branches must always occur at the end of an IT block,
9238 there are a few other things that can cause us to terminate
9239 the TB in the middle of an IT block:
9240 - Exception generating instructions (bkpt, swi, undefined).
9242 - Hardware watchpoints.
9243 Hardware breakpoints have already been handled and skip this code.
9245 switch (dc
->base
.is_jmp
) {
9247 case DISAS_TOO_MANY
:
9248 gen_goto_tb(dc
, 1, dc
->base
.pc_next
);
9250 case DISAS_UPDATE_NOCHAIN
:
9251 gen_set_pc_im(dc
, dc
->base
.pc_next
);
9256 case DISAS_UPDATE_EXIT
:
9257 gen_set_pc_im(dc
, dc
->base
.pc_next
);
9260 /* indicate that the hash table must be used to find the next TB */
9261 tcg_gen_exit_tb(NULL
, 0);
9263 case DISAS_NORETURN
:
9264 /* nothing more to generate */
9268 TCGv_i32 tmp
= tcg_const_i32((dc
->thumb
&&
9269 !(dc
->insn
& (1U << 31))) ? 2 : 4);
9271 gen_helper_wfi(cpu_env
, tmp
);
9272 tcg_temp_free_i32(tmp
);
9273 /* The helper doesn't necessarily throw an exception, but we
9274 * must go back to the main loop to check for interrupts anyway.
9276 tcg_gen_exit_tb(NULL
, 0);
9280 gen_helper_wfe(cpu_env
);
9283 gen_helper_yield(cpu_env
);
9286 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
9287 default_exception_el(dc
));
9290 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
9293 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
9299 /* "Condition failed" instruction codepath for the branch/trap insn */
9300 gen_set_label(dc
->condlabel
);
9301 gen_set_condexec(dc
);
9302 if (unlikely(is_singlestepping(dc
))) {
9303 gen_set_pc_im(dc
, dc
->base
.pc_next
);
9304 gen_singlestep_exception(dc
);
9306 gen_goto_tb(dc
, 1, dc
->base
.pc_next
);
9311 static void arm_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
9313 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
9315 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
9316 log_target_disas(cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
9319 static const TranslatorOps arm_translator_ops
= {
9320 .init_disas_context
= arm_tr_init_disas_context
,
9321 .tb_start
= arm_tr_tb_start
,
9322 .insn_start
= arm_tr_insn_start
,
9323 .breakpoint_check
= arm_tr_breakpoint_check
,
9324 .translate_insn
= arm_tr_translate_insn
,
9325 .tb_stop
= arm_tr_tb_stop
,
9326 .disas_log
= arm_tr_disas_log
,
9329 static const TranslatorOps thumb_translator_ops
= {
9330 .init_disas_context
= arm_tr_init_disas_context
,
9331 .tb_start
= arm_tr_tb_start
,
9332 .insn_start
= arm_tr_insn_start
,
9333 .breakpoint_check
= arm_tr_breakpoint_check
,
9334 .translate_insn
= thumb_tr_translate_insn
,
9335 .tb_stop
= arm_tr_tb_stop
,
9336 .disas_log
= arm_tr_disas_log
,
9339 /* generate intermediate code for basic block 'tb'. */
9340 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
9342 DisasContext dc
= { };
9343 const TranslatorOps
*ops
= &arm_translator_ops
;
9345 if (FIELD_EX32(tb
->flags
, TBFLAG_AM32
, THUMB
)) {
9346 ops
= &thumb_translator_ops
;
9348 #ifdef TARGET_AARCH64
9349 if (FIELD_EX32(tb
->flags
, TBFLAG_ANY
, AARCH64_STATE
)) {
9350 ops
= &aarch64_translator_ops
;
9354 translator_loop(ops
, &dc
.base
, cpu
, tb
, max_insns
);
9357 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
,
9362 env
->condexec_bits
= 0;
9363 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;
9365 env
->regs
[15] = data
[0];
9366 env
->condexec_bits
= data
[1];
9367 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;