4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
30 #include "qemu/bitops.h"
32 #include "hw/semihosting/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
59 #define IS_USER(s) (s->user)
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
64 static TCGv_i32 cpu_R
[16];
65 TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
66 TCGv_i64 cpu_exclusive_addr
;
67 TCGv_i64 cpu_exclusive_val
;
69 #include "exec/gen-icount.h"
71 static const char * const regnames
[] =
72 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
75 /* Function prototypes for gen_ functions calling Neon helpers. */
76 typedef void NeonGenThreeOpEnvFn(TCGv_i32
, TCGv_env
, TCGv_i32
,
78 /* Function prototypes for gen_ functions for fix point conversions */
79 typedef void VFPGenFixPointFn(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
81 /* initialize TCG globals. */
82 void arm_translate_init(void)
86 for (i
= 0; i
< 16; i
++) {
87 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
88 offsetof(CPUARMState
, regs
[i
]),
91 cpu_CF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, CF
), "CF");
92 cpu_NF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, NF
), "NF");
93 cpu_VF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, VF
), "VF");
94 cpu_ZF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, ZF
), "ZF");
96 cpu_exclusive_addr
= tcg_global_mem_new_i64(cpu_env
,
97 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
98 cpu_exclusive_val
= tcg_global_mem_new_i64(cpu_env
,
99 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
101 a64_translate_init();
104 /* Flags for the disas_set_da_iss info argument:
105 * lower bits hold the Rt register number, higher bits are flags.
107 typedef enum ISSInfo
{
110 ISSInvalid
= (1 << 5),
111 ISSIsAcqRel
= (1 << 6),
112 ISSIsWrite
= (1 << 7),
113 ISSIs16Bit
= (1 << 8),
116 /* Save the syndrome information for a Data Abort */
117 static void disas_set_da_iss(DisasContext
*s
, MemOp memop
, ISSInfo issinfo
)
120 int sas
= memop
& MO_SIZE
;
121 bool sse
= memop
& MO_SIGN
;
122 bool is_acqrel
= issinfo
& ISSIsAcqRel
;
123 bool is_write
= issinfo
& ISSIsWrite
;
124 bool is_16bit
= issinfo
& ISSIs16Bit
;
125 int srt
= issinfo
& ISSRegMask
;
127 if (issinfo
& ISSInvalid
) {
128 /* Some callsites want to conditionally provide ISS info,
129 * eg "only if this was not a writeback"
135 /* For AArch32, insns where the src/dest is R15 never generate
136 * ISS information. Catching that here saves checking at all
142 syn
= syn_data_abort_with_iss(0, sas
, sse
, srt
, 0, is_acqrel
,
143 0, 0, 0, is_write
, 0, is_16bit
);
144 disas_set_insn_syndrome(s
, syn
);
147 static inline int get_a32_user_mem_index(DisasContext
*s
)
149 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
151 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
152 * otherwise, access as if at PL0.
154 switch (s
->mmu_idx
) {
155 case ARMMMUIdx_E2
: /* this one is UNPREDICTABLE */
156 case ARMMMUIdx_E10_0
:
157 case ARMMMUIdx_E10_1
:
158 case ARMMMUIdx_E10_1_PAN
:
159 return arm_to_core_mmu_idx(ARMMMUIdx_E10_0
);
161 case ARMMMUIdx_SE10_0
:
162 case ARMMMUIdx_SE10_1
:
163 case ARMMMUIdx_SE10_1_PAN
:
164 return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0
);
165 case ARMMMUIdx_MUser
:
166 case ARMMMUIdx_MPriv
:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser
);
168 case ARMMMUIdx_MUserNegPri
:
169 case ARMMMUIdx_MPrivNegPri
:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri
);
171 case ARMMMUIdx_MSUser
:
172 case ARMMMUIdx_MSPriv
:
173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser
);
174 case ARMMMUIdx_MSUserNegPri
:
175 case ARMMMUIdx_MSPrivNegPri
:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri
);
178 g_assert_not_reached();
182 static inline TCGv_i32
load_cpu_offset(int offset
)
184 TCGv_i32 tmp
= tcg_temp_new_i32();
185 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
189 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
191 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
193 tcg_gen_st_i32(var
, cpu_env
, offset
);
194 tcg_temp_free_i32(var
);
197 #define store_cpu_field(var, name) \
198 store_cpu_offset(var, offsetof(CPUARMState, name))
200 /* The architectural value of PC. */
201 static uint32_t read_pc(DisasContext
*s
)
203 return s
->pc_curr
+ (s
->thumb
? 4 : 8);
206 /* Set a variable to the value of a CPU register. */
207 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
210 tcg_gen_movi_i32(var
, read_pc(s
));
212 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
216 /* Create a new temporary and set it to the value of a CPU register. */
217 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
219 TCGv_i32 tmp
= tcg_temp_new_i32();
220 load_reg_var(s
, tmp
, reg
);
225 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
226 * This is used for load/store for which use of PC implies (literal),
227 * or ADD that implies ADR.
229 static TCGv_i32
add_reg_for_lit(DisasContext
*s
, int reg
, int ofs
)
231 TCGv_i32 tmp
= tcg_temp_new_i32();
234 tcg_gen_movi_i32(tmp
, (read_pc(s
) & ~3) + ofs
);
236 tcg_gen_addi_i32(tmp
, cpu_R
[reg
], ofs
);
241 /* Set a CPU register. The source must be a temporary and will be
243 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
246 /* In Thumb mode, we must ignore bit 0.
247 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
248 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
249 * We choose to ignore [1:0] in ARM mode for all architecture versions.
251 tcg_gen_andi_i32(var
, var
, s
->thumb
? ~1 : ~3);
252 s
->base
.is_jmp
= DISAS_JUMP
;
254 tcg_gen_mov_i32(cpu_R
[reg
], var
);
255 tcg_temp_free_i32(var
);
259 * Variant of store_reg which applies v8M stack-limit checks before updating
260 * SP. If the check fails this will result in an exception being taken.
261 * We disable the stack checks for CONFIG_USER_ONLY because we have
262 * no idea what the stack limits should be in that case.
263 * If stack checking is not being done this just acts like store_reg().
265 static void store_sp_checked(DisasContext
*s
, TCGv_i32 var
)
267 #ifndef CONFIG_USER_ONLY
268 if (s
->v8m_stackcheck
) {
269 gen_helper_v8m_stackcheck(cpu_env
, var
);
272 store_reg(s
, 13, var
);
275 /* Value extensions. */
276 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
277 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
278 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
279 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
281 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
282 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
285 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
287 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
288 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
289 tcg_temp_free_i32(tmp_mask
);
291 /* Set NZCV flags from the high 4 bits of var. */
292 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
294 static void gen_exception_internal(int excp
)
296 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
298 assert(excp_is_internal(excp
));
299 gen_helper_exception_internal(cpu_env
, tcg_excp
);
300 tcg_temp_free_i32(tcg_excp
);
303 static void gen_step_complete_exception(DisasContext
*s
)
305 /* We just completed step of an insn. Move from Active-not-pending
306 * to Active-pending, and then also take the swstep exception.
307 * This corresponds to making the (IMPDEF) choice to prioritize
308 * swstep exceptions over asynchronous exceptions taken to an exception
309 * level where debug is disabled. This choice has the advantage that
310 * we do not need to maintain internal state corresponding to the
311 * ISV/EX syndrome bits between completion of the step and generation
312 * of the exception, and our syndrome information is always correct.
315 gen_swstep_exception(s
, 1, s
->is_ldex
);
316 s
->base
.is_jmp
= DISAS_NORETURN
;
319 static void gen_singlestep_exception(DisasContext
*s
)
321 /* Generate the right kind of exception for singlestep, which is
322 * either the architectural singlestep or EXCP_DEBUG for QEMU's
323 * gdb singlestepping.
326 gen_step_complete_exception(s
);
328 gen_exception_internal(EXCP_DEBUG
);
332 static inline bool is_singlestepping(DisasContext
*s
)
334 /* Return true if we are singlestepping either because of
335 * architectural singlestep or QEMU gdbstub singlestep. This does
336 * not include the command line '-singlestep' mode which is rather
337 * misnamed as it only means "one instruction per TB" and doesn't
338 * affect the code we generate.
340 return s
->base
.singlestep_enabled
|| s
->ss_active
;
343 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
345 TCGv_i32 tmp1
= tcg_temp_new_i32();
346 TCGv_i32 tmp2
= tcg_temp_new_i32();
347 tcg_gen_ext16s_i32(tmp1
, a
);
348 tcg_gen_ext16s_i32(tmp2
, b
);
349 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
350 tcg_temp_free_i32(tmp2
);
351 tcg_gen_sari_i32(a
, a
, 16);
352 tcg_gen_sari_i32(b
, b
, 16);
353 tcg_gen_mul_i32(b
, b
, a
);
354 tcg_gen_mov_i32(a
, tmp1
);
355 tcg_temp_free_i32(tmp1
);
358 /* Byteswap each halfword. */
359 static void gen_rev16(TCGv_i32 dest
, TCGv_i32 var
)
361 TCGv_i32 tmp
= tcg_temp_new_i32();
362 TCGv_i32 mask
= tcg_const_i32(0x00ff00ff);
363 tcg_gen_shri_i32(tmp
, var
, 8);
364 tcg_gen_and_i32(tmp
, tmp
, mask
);
365 tcg_gen_and_i32(var
, var
, mask
);
366 tcg_gen_shli_i32(var
, var
, 8);
367 tcg_gen_or_i32(dest
, var
, tmp
);
368 tcg_temp_free_i32(mask
);
369 tcg_temp_free_i32(tmp
);
372 /* Byteswap low halfword and sign extend. */
373 static void gen_revsh(TCGv_i32 dest
, TCGv_i32 var
)
375 tcg_gen_ext16u_i32(var
, var
);
376 tcg_gen_bswap16_i32(var
, var
);
377 tcg_gen_ext16s_i32(dest
, var
);
380 /* 32x32->64 multiply. Marks inputs as dead. */
381 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
383 TCGv_i32 lo
= tcg_temp_new_i32();
384 TCGv_i32 hi
= tcg_temp_new_i32();
387 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
388 tcg_temp_free_i32(a
);
389 tcg_temp_free_i32(b
);
391 ret
= tcg_temp_new_i64();
392 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
393 tcg_temp_free_i32(lo
);
394 tcg_temp_free_i32(hi
);
399 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
401 TCGv_i32 lo
= tcg_temp_new_i32();
402 TCGv_i32 hi
= tcg_temp_new_i32();
405 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
406 tcg_temp_free_i32(a
);
407 tcg_temp_free_i32(b
);
409 ret
= tcg_temp_new_i64();
410 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
411 tcg_temp_free_i32(lo
);
412 tcg_temp_free_i32(hi
);
417 /* Swap low and high halfwords. */
418 static void gen_swap_half(TCGv_i32 var
)
420 tcg_gen_rotri_i32(var
, var
, 16);
423 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
424 tmp = (t0 ^ t1) & 0x8000;
427 t0 = (t0 + t1) ^ tmp;
430 static void gen_add16(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
432 TCGv_i32 tmp
= tcg_temp_new_i32();
433 tcg_gen_xor_i32(tmp
, t0
, t1
);
434 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
435 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
436 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
437 tcg_gen_add_i32(t0
, t0
, t1
);
438 tcg_gen_xor_i32(dest
, t0
, tmp
);
439 tcg_temp_free_i32(tmp
);
442 /* Set N and Z flags from var. */
443 static inline void gen_logic_CC(TCGv_i32 var
)
445 tcg_gen_mov_i32(cpu_NF
, var
);
446 tcg_gen_mov_i32(cpu_ZF
, var
);
449 /* dest = T0 + T1 + CF. */
450 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
452 tcg_gen_add_i32(dest
, t0
, t1
);
453 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
456 /* dest = T0 - T1 + CF - 1. */
457 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
459 tcg_gen_sub_i32(dest
, t0
, t1
);
460 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
461 tcg_gen_subi_i32(dest
, dest
, 1);
464 /* dest = T0 + T1. Compute C, N, V and Z flags */
465 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
467 TCGv_i32 tmp
= tcg_temp_new_i32();
468 tcg_gen_movi_i32(tmp
, 0);
469 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
470 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
471 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
472 tcg_gen_xor_i32(tmp
, t0
, t1
);
473 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
474 tcg_temp_free_i32(tmp
);
475 tcg_gen_mov_i32(dest
, cpu_NF
);
478 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
479 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
481 TCGv_i32 tmp
= tcg_temp_new_i32();
482 if (TCG_TARGET_HAS_add2_i32
) {
483 tcg_gen_movi_i32(tmp
, 0);
484 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
485 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
487 TCGv_i64 q0
= tcg_temp_new_i64();
488 TCGv_i64 q1
= tcg_temp_new_i64();
489 tcg_gen_extu_i32_i64(q0
, t0
);
490 tcg_gen_extu_i32_i64(q1
, t1
);
491 tcg_gen_add_i64(q0
, q0
, q1
);
492 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
493 tcg_gen_add_i64(q0
, q0
, q1
);
494 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
495 tcg_temp_free_i64(q0
);
496 tcg_temp_free_i64(q1
);
498 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
499 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
500 tcg_gen_xor_i32(tmp
, t0
, t1
);
501 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
502 tcg_temp_free_i32(tmp
);
503 tcg_gen_mov_i32(dest
, cpu_NF
);
506 /* dest = T0 - T1. Compute C, N, V and Z flags */
507 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
510 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
511 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
512 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
513 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
514 tmp
= tcg_temp_new_i32();
515 tcg_gen_xor_i32(tmp
, t0
, t1
);
516 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
517 tcg_temp_free_i32(tmp
);
518 tcg_gen_mov_i32(dest
, cpu_NF
);
521 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
522 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
524 TCGv_i32 tmp
= tcg_temp_new_i32();
525 tcg_gen_not_i32(tmp
, t1
);
526 gen_adc_CC(dest
, t0
, tmp
);
527 tcg_temp_free_i32(tmp
);
530 #define GEN_SHIFT(name) \
531 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
533 TCGv_i32 tmp1, tmp2, tmp3; \
534 tmp1 = tcg_temp_new_i32(); \
535 tcg_gen_andi_i32(tmp1, t1, 0xff); \
536 tmp2 = tcg_const_i32(0); \
537 tmp3 = tcg_const_i32(0x1f); \
538 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
539 tcg_temp_free_i32(tmp3); \
540 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
541 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
542 tcg_temp_free_i32(tmp2); \
543 tcg_temp_free_i32(tmp1); \
549 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
552 tmp1
= tcg_temp_new_i32();
553 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
554 tmp2
= tcg_const_i32(0x1f);
555 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
556 tcg_temp_free_i32(tmp2
);
557 tcg_gen_sar_i32(dest
, t0
, tmp1
);
558 tcg_temp_free_i32(tmp1
);
561 static void shifter_out_im(TCGv_i32 var
, int shift
)
563 tcg_gen_extract_i32(cpu_CF
, var
, shift
, 1);
566 /* Shift by immediate. Includes special handling for shift == 0. */
567 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
568 int shift
, int flags
)
574 shifter_out_im(var
, 32 - shift
);
575 tcg_gen_shli_i32(var
, var
, shift
);
581 tcg_gen_shri_i32(cpu_CF
, var
, 31);
583 tcg_gen_movi_i32(var
, 0);
586 shifter_out_im(var
, shift
- 1);
587 tcg_gen_shri_i32(var
, var
, shift
);
594 shifter_out_im(var
, shift
- 1);
597 tcg_gen_sari_i32(var
, var
, shift
);
599 case 3: /* ROR/RRX */
602 shifter_out_im(var
, shift
- 1);
603 tcg_gen_rotri_i32(var
, var
, shift
); break;
605 TCGv_i32 tmp
= tcg_temp_new_i32();
606 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
608 shifter_out_im(var
, 0);
609 tcg_gen_shri_i32(var
, var
, 1);
610 tcg_gen_or_i32(var
, var
, tmp
);
611 tcg_temp_free_i32(tmp
);
616 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
617 TCGv_i32 shift
, int flags
)
621 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
622 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
623 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
624 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
629 gen_shl(var
, var
, shift
);
632 gen_shr(var
, var
, shift
);
635 gen_sar(var
, var
, shift
);
637 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
638 tcg_gen_rotr_i32(var
, var
, shift
); break;
641 tcg_temp_free_i32(shift
);
645 * Generate a conditional based on ARM condition code cc.
646 * This is common between ARM and Aarch64 targets.
648 void arm_test_cc(DisasCompare
*cmp
, int cc
)
679 case 8: /* hi: C && !Z */
680 case 9: /* ls: !C || Z -> !(C && !Z) */
682 value
= tcg_temp_new_i32();
684 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
685 ZF is non-zero for !Z; so AND the two subexpressions. */
686 tcg_gen_neg_i32(value
, cpu_CF
);
687 tcg_gen_and_i32(value
, value
, cpu_ZF
);
690 case 10: /* ge: N == V -> N ^ V == 0 */
691 case 11: /* lt: N != V -> N ^ V != 0 */
692 /* Since we're only interested in the sign bit, == 0 is >= 0. */
694 value
= tcg_temp_new_i32();
696 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
699 case 12: /* gt: !Z && N == V */
700 case 13: /* le: Z || N != V */
702 value
= tcg_temp_new_i32();
704 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
705 * the sign bit then AND with ZF to yield the result. */
706 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
707 tcg_gen_sari_i32(value
, value
, 31);
708 tcg_gen_andc_i32(value
, cpu_ZF
, value
);
711 case 14: /* always */
712 case 15: /* always */
713 /* Use the ALWAYS condition, which will fold early.
714 * It doesn't matter what we use for the value. */
715 cond
= TCG_COND_ALWAYS
;
720 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
725 cond
= tcg_invert_cond(cond
);
731 cmp
->value_global
= global
;
734 void arm_free_cc(DisasCompare
*cmp
)
736 if (!cmp
->value_global
) {
737 tcg_temp_free_i32(cmp
->value
);
741 void arm_jump_cc(DisasCompare
*cmp
, TCGLabel
*label
)
743 tcg_gen_brcondi_i32(cmp
->cond
, cmp
->value
, 0, label
);
746 void arm_gen_test_cc(int cc
, TCGLabel
*label
)
749 arm_test_cc(&cmp
, cc
);
750 arm_jump_cc(&cmp
, label
);
754 static inline void gen_set_condexec(DisasContext
*s
)
756 if (s
->condexec_mask
) {
757 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
758 TCGv_i32 tmp
= tcg_temp_new_i32();
759 tcg_gen_movi_i32(tmp
, val
);
760 store_cpu_field(tmp
, condexec_bits
);
764 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
766 tcg_gen_movi_i32(cpu_R
[15], val
);
769 /* Set PC and Thumb state from var. var is marked as dead. */
770 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
772 s
->base
.is_jmp
= DISAS_JUMP
;
773 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
774 tcg_gen_andi_i32(var
, var
, 1);
775 store_cpu_field(var
, thumb
);
779 * Set PC and Thumb state from var. var is marked as dead.
780 * For M-profile CPUs, include logic to detect exception-return
781 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
782 * and BX reg, and no others, and happens only for code in Handler mode.
783 * The Security Extension also requires us to check for the FNC_RETURN
784 * which signals a function return from non-secure state; this can happen
785 * in both Handler and Thread mode.
786 * To avoid having to do multiple comparisons in inline generated code,
787 * we make the check we do here loose, so it will match for EXC_RETURN
788 * in Thread mode. For system emulation do_v7m_exception_exit() checks
789 * for these spurious cases and returns without doing anything (giving
790 * the same behaviour as for a branch to a non-magic address).
792 * In linux-user mode it is unclear what the right behaviour for an
793 * attempted FNC_RETURN should be, because in real hardware this will go
794 * directly to Secure code (ie not the Linux kernel) which will then treat
795 * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
796 * attempt behave the way it would on a CPU without the security extension,
797 * which is to say "like a normal branch". That means we can simply treat
798 * all branches as normal with no magic address behaviour.
800 static inline void gen_bx_excret(DisasContext
*s
, TCGv_i32 var
)
802 /* Generate the same code here as for a simple bx, but flag via
803 * s->base.is_jmp that we need to do the rest of the work later.
806 #ifndef CONFIG_USER_ONLY
807 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
) ||
808 (s
->v7m_handler_mode
&& arm_dc_feature(s
, ARM_FEATURE_M
))) {
809 s
->base
.is_jmp
= DISAS_BX_EXCRET
;
814 static inline void gen_bx_excret_final_code(DisasContext
*s
)
816 /* Generate the code to finish possible exception return and end the TB */
817 TCGLabel
*excret_label
= gen_new_label();
820 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
)) {
821 /* Covers FNC_RETURN and EXC_RETURN magic */
822 min_magic
= FNC_RETURN_MIN_MAGIC
;
824 /* EXC_RETURN magic only */
825 min_magic
= EXC_RETURN_MIN_MAGIC
;
828 /* Is the new PC value in the magic range indicating exception return? */
829 tcg_gen_brcondi_i32(TCG_COND_GEU
, cpu_R
[15], min_magic
, excret_label
);
830 /* No: end the TB as we would for a DISAS_JMP */
831 if (is_singlestepping(s
)) {
832 gen_singlestep_exception(s
);
834 tcg_gen_exit_tb(NULL
, 0);
836 gen_set_label(excret_label
);
837 /* Yes: this is an exception return.
838 * At this point in runtime env->regs[15] and env->thumb will hold
839 * the exception-return magic number, which do_v7m_exception_exit()
840 * will read. Nothing else will be able to see those values because
841 * the cpu-exec main loop guarantees that we will always go straight
842 * from raising the exception to the exception-handling code.
844 * gen_ss_advance(s) does nothing on M profile currently but
845 * calling it is conceptually the right thing as we have executed
846 * this instruction (compare SWI, HVC, SMC handling).
849 gen_exception_internal(EXCP_EXCEPTION_EXIT
);
852 static inline void gen_bxns(DisasContext
*s
, int rm
)
854 TCGv_i32 var
= load_reg(s
, rm
);
856 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
857 * we need to sync state before calling it, but:
858 * - we don't need to do gen_set_pc_im() because the bxns helper will
859 * always set the PC itself
860 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
861 * unless it's outside an IT block or the last insn in an IT block,
862 * so we know that condexec == 0 (already set at the top of the TB)
863 * is correct in the non-UNPREDICTABLE cases, and we can choose
864 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
866 gen_helper_v7m_bxns(cpu_env
, var
);
867 tcg_temp_free_i32(var
);
868 s
->base
.is_jmp
= DISAS_EXIT
;
871 static inline void gen_blxns(DisasContext
*s
, int rm
)
873 TCGv_i32 var
= load_reg(s
, rm
);
875 /* We don't need to sync condexec state, for the same reason as bxns.
876 * We do however need to set the PC, because the blxns helper reads it.
877 * The blxns helper may throw an exception.
879 gen_set_pc_im(s
, s
->base
.pc_next
);
880 gen_helper_v7m_blxns(cpu_env
, var
);
881 tcg_temp_free_i32(var
);
882 s
->base
.is_jmp
= DISAS_EXIT
;
885 /* Variant of store_reg which uses branch&exchange logic when storing
886 to r15 in ARM architecture v7 and above. The source must be a temporary
887 and will be marked as dead. */
888 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv_i32 var
)
890 if (reg
== 15 && ENABLE_ARCH_7
) {
893 store_reg(s
, reg
, var
);
897 /* Variant of store_reg which uses branch&exchange logic when storing
898 * to r15 in ARM architecture v5T and above. This is used for storing
899 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
900 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
901 static inline void store_reg_from_load(DisasContext
*s
, int reg
, TCGv_i32 var
)
903 if (reg
== 15 && ENABLE_ARCH_5
) {
904 gen_bx_excret(s
, var
);
906 store_reg(s
, reg
, var
);
910 #ifdef CONFIG_USER_ONLY
911 #define IS_USER_ONLY 1
913 #define IS_USER_ONLY 0
916 /* Abstractions of "generate code to do a guest load/store for
917 * AArch32", where a vaddr is always 32 bits (and is zero
918 * extended if we're a 64 bit core) and data is also
919 * 32 bits unless specifically doing a 64 bit access.
920 * These functions work like tcg_gen_qemu_{ld,st}* except
921 * that the address argument is TCGv_i32 rather than TCGv.
924 static inline TCGv
gen_aa32_addr(DisasContext
*s
, TCGv_i32 a32
, MemOp op
)
926 TCGv addr
= tcg_temp_new();
927 tcg_gen_extu_i32_tl(addr
, a32
);
929 /* Not needed for user-mode BE32, where we use MO_BE instead. */
930 if (!IS_USER_ONLY
&& s
->sctlr_b
&& (op
& MO_SIZE
) < MO_32
) {
931 tcg_gen_xori_tl(addr
, addr
, 4 - (1 << (op
& MO_SIZE
)));
936 static void gen_aa32_ld_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
937 int index
, MemOp opc
)
941 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
942 !arm_dc_feature(s
, ARM_FEATURE_M_MAIN
)) {
946 addr
= gen_aa32_addr(s
, a32
, opc
);
947 tcg_gen_qemu_ld_i32(val
, addr
, index
, opc
);
951 static void gen_aa32_st_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
952 int index
, MemOp opc
)
956 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
957 !arm_dc_feature(s
, ARM_FEATURE_M_MAIN
)) {
961 addr
= gen_aa32_addr(s
, a32
, opc
);
962 tcg_gen_qemu_st_i32(val
, addr
, index
, opc
);
966 #define DO_GEN_LD(SUFF, OPC) \
967 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
968 TCGv_i32 a32, int index) \
970 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
973 #define DO_GEN_ST(SUFF, OPC) \
974 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
975 TCGv_i32 a32, int index) \
977 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
980 static inline void gen_aa32_frob64(DisasContext
*s
, TCGv_i64 val
)
982 /* Not needed for user-mode BE32, where we use MO_BE instead. */
983 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
984 tcg_gen_rotri_i64(val
, val
, 32);
988 static void gen_aa32_ld_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
989 int index
, MemOp opc
)
991 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
992 tcg_gen_qemu_ld_i64(val
, addr
, index
, opc
);
993 gen_aa32_frob64(s
, val
);
997 static inline void gen_aa32_ld64(DisasContext
*s
, TCGv_i64 val
,
998 TCGv_i32 a32
, int index
)
1000 gen_aa32_ld_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1003 static void gen_aa32_st_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
1004 int index
, MemOp opc
)
1006 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1008 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1009 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1010 TCGv_i64 tmp
= tcg_temp_new_i64();
1011 tcg_gen_rotri_i64(tmp
, val
, 32);
1012 tcg_gen_qemu_st_i64(tmp
, addr
, index
, opc
);
1013 tcg_temp_free_i64(tmp
);
1015 tcg_gen_qemu_st_i64(val
, addr
, index
, opc
);
1017 tcg_temp_free(addr
);
1020 static inline void gen_aa32_st64(DisasContext
*s
, TCGv_i64 val
,
1021 TCGv_i32 a32
, int index
)
1023 gen_aa32_st_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1026 DO_GEN_LD(8u, MO_UB
)
1027 DO_GEN_LD(16u, MO_UW
)
1028 DO_GEN_LD(32u, MO_UL
)
1030 DO_GEN_ST(16, MO_UW
)
1031 DO_GEN_ST(32, MO_UL
)
1033 static inline void gen_hvc(DisasContext
*s
, int imm16
)
1035 /* The pre HVC helper handles cases when HVC gets trapped
1036 * as an undefined insn by runtime configuration (ie before
1037 * the insn really executes).
1039 gen_set_pc_im(s
, s
->pc_curr
);
1040 gen_helper_pre_hvc(cpu_env
);
1041 /* Otherwise we will treat this as a real exception which
1042 * happens after execution of the insn. (The distinction matters
1043 * for the PC value reported to the exception handler and also
1044 * for single stepping.)
1047 gen_set_pc_im(s
, s
->base
.pc_next
);
1048 s
->base
.is_jmp
= DISAS_HVC
;
1051 static inline void gen_smc(DisasContext
*s
)
1053 /* As with HVC, we may take an exception either before or after
1054 * the insn executes.
1058 gen_set_pc_im(s
, s
->pc_curr
);
1059 tmp
= tcg_const_i32(syn_aa32_smc());
1060 gen_helper_pre_smc(cpu_env
, tmp
);
1061 tcg_temp_free_i32(tmp
);
1062 gen_set_pc_im(s
, s
->base
.pc_next
);
1063 s
->base
.is_jmp
= DISAS_SMC
;
1066 static void gen_exception_internal_insn(DisasContext
*s
, uint32_t pc
, int excp
)
1068 gen_set_condexec(s
);
1069 gen_set_pc_im(s
, pc
);
1070 gen_exception_internal(excp
);
1071 s
->base
.is_jmp
= DISAS_NORETURN
;
1074 static void gen_exception_insn(DisasContext
*s
, uint32_t pc
, int excp
,
1075 int syn
, uint32_t target_el
)
1077 gen_set_condexec(s
);
1078 gen_set_pc_im(s
, pc
);
1079 gen_exception(excp
, syn
, target_el
);
1080 s
->base
.is_jmp
= DISAS_NORETURN
;
1083 static void gen_exception_bkpt_insn(DisasContext
*s
, uint32_t syn
)
1087 gen_set_condexec(s
);
1088 gen_set_pc_im(s
, s
->pc_curr
);
1089 tcg_syn
= tcg_const_i32(syn
);
1090 gen_helper_exception_bkpt_insn(cpu_env
, tcg_syn
);
1091 tcg_temp_free_i32(tcg_syn
);
1092 s
->base
.is_jmp
= DISAS_NORETURN
;
1095 static void unallocated_encoding(DisasContext
*s
)
1097 /* Unallocated and reserved encodings are uncategorized */
1098 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
, syn_uncategorized(),
1099 default_exception_el(s
));
1102 /* Force a TB lookup after an instruction that changes the CPU state. */
1103 static inline void gen_lookup_tb(DisasContext
*s
)
1105 tcg_gen_movi_i32(cpu_R
[15], s
->base
.pc_next
);
1106 s
->base
.is_jmp
= DISAS_EXIT
;
1109 static inline void gen_hlt(DisasContext
*s
, int imm
)
1111 /* HLT. This has two purposes.
1112 * Architecturally, it is an external halting debug instruction.
1113 * Since QEMU doesn't implement external debug, we treat this as
1114 * it is required for halting debug disabled: it will UNDEF.
1115 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1116 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1117 * must trigger semihosting even for ARMv7 and earlier, where
1118 * HLT was an undefined encoding.
1119 * In system mode, we don't allow userspace access to
1120 * semihosting, to provide some semblance of security
1121 * (and for consistency with our 32-bit semihosting).
1123 if (semihosting_enabled() &&
1124 #ifndef CONFIG_USER_ONLY
1125 s
->current_el
!= 0 &&
1127 (imm
== (s
->thumb
? 0x3c : 0xf000))) {
1128 gen_exception_internal_insn(s
, s
->pc_curr
, EXCP_SEMIHOST
);
1132 unallocated_encoding(s
);
1135 static TCGv_ptr
get_fpstatus_ptr(int neon
)
1137 TCGv_ptr statusptr
= tcg_temp_new_ptr();
1140 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
1142 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
1144 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
1148 static inline long vfp_reg_offset(bool dp
, unsigned reg
)
1151 return offsetof(CPUARMState
, vfp
.zregs
[reg
>> 1].d
[reg
& 1]);
1153 long ofs
= offsetof(CPUARMState
, vfp
.zregs
[reg
>> 2].d
[(reg
>> 1) & 1]);
1155 ofs
+= offsetof(CPU_DoubleU
, l
.upper
);
1157 ofs
+= offsetof(CPU_DoubleU
, l
.lower
);
1163 /* Return the offset of a 32-bit piece of a NEON register.
1164 zero is the least significant end of the register. */
1166 neon_reg_offset (int reg
, int n
)
1170 return vfp_reg_offset(0, sreg
);
1173 /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1174 * where 0 is the least significant end of the register.
1177 neon_element_offset(int reg
, int element
, MemOp size
)
1179 int element_size
= 1 << size
;
1180 int ofs
= element
* element_size
;
1181 #ifdef HOST_WORDS_BIGENDIAN
1182 /* Calculate the offset assuming fully little-endian,
1183 * then XOR to account for the order of the 8-byte units.
1185 if (element_size
< 8) {
1186 ofs
^= 8 - element_size
;
1189 return neon_reg_offset(reg
, 0) + ofs
;
1192 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1194 TCGv_i32 tmp
= tcg_temp_new_i32();
1195 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1199 static void neon_load_element(TCGv_i32 var
, int reg
, int ele
, MemOp mop
)
1201 long offset
= neon_element_offset(reg
, ele
, mop
& MO_SIZE
);
1205 tcg_gen_ld8u_i32(var
, cpu_env
, offset
);
1208 tcg_gen_ld16u_i32(var
, cpu_env
, offset
);
1211 tcg_gen_ld_i32(var
, cpu_env
, offset
);
1214 g_assert_not_reached();
1218 static void neon_load_element64(TCGv_i64 var
, int reg
, int ele
, MemOp mop
)
1220 long offset
= neon_element_offset(reg
, ele
, mop
& MO_SIZE
);
1224 tcg_gen_ld8u_i64(var
, cpu_env
, offset
);
1227 tcg_gen_ld16u_i64(var
, cpu_env
, offset
);
1230 tcg_gen_ld32u_i64(var
, cpu_env
, offset
);
1233 tcg_gen_ld_i64(var
, cpu_env
, offset
);
1236 g_assert_not_reached();
1240 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1242 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1243 tcg_temp_free_i32(var
);
1246 static void neon_store_element(int reg
, int ele
, MemOp size
, TCGv_i32 var
)
1248 long offset
= neon_element_offset(reg
, ele
, size
);
1252 tcg_gen_st8_i32(var
, cpu_env
, offset
);
1255 tcg_gen_st16_i32(var
, cpu_env
, offset
);
1258 tcg_gen_st_i32(var
, cpu_env
, offset
);
1261 g_assert_not_reached();
1265 static void neon_store_element64(int reg
, int ele
, MemOp size
, TCGv_i64 var
)
1267 long offset
= neon_element_offset(reg
, ele
, size
);
1271 tcg_gen_st8_i64(var
, cpu_env
, offset
);
1274 tcg_gen_st16_i64(var
, cpu_env
, offset
);
1277 tcg_gen_st32_i64(var
, cpu_env
, offset
);
1280 tcg_gen_st_i64(var
, cpu_env
, offset
);
1283 g_assert_not_reached();
1287 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1289 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1292 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1294 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1297 static inline void neon_load_reg32(TCGv_i32 var
, int reg
)
1299 tcg_gen_ld_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
1302 static inline void neon_store_reg32(TCGv_i32 var
, int reg
)
1304 tcg_gen_st_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
1307 static TCGv_ptr
vfp_reg_ptr(bool dp
, int reg
)
1309 TCGv_ptr ret
= tcg_temp_new_ptr();
1310 tcg_gen_addi_ptr(ret
, cpu_env
, vfp_reg_offset(dp
, reg
));
1314 #define ARM_CP_RW_BIT (1 << 20)
1316 /* Include the VFP and Neon decoders */
1317 #include "translate-vfp.inc.c"
1318 #include "translate-neon.inc.c"
1320 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1322 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1325 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1327 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1330 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1332 TCGv_i32 var
= tcg_temp_new_i32();
1333 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1337 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1339 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1340 tcg_temp_free_i32(var
);
1343 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1345 iwmmxt_store_reg(cpu_M0
, rn
);
1348 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1350 iwmmxt_load_reg(cpu_M0
, rn
);
1353 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1355 iwmmxt_load_reg(cpu_V1
, rn
);
1356 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1359 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1361 iwmmxt_load_reg(cpu_V1
, rn
);
1362 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1365 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1367 iwmmxt_load_reg(cpu_V1
, rn
);
1368 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1371 #define IWMMXT_OP(name) \
1372 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1374 iwmmxt_load_reg(cpu_V1, rn); \
1375 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1378 #define IWMMXT_OP_ENV(name) \
1379 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1381 iwmmxt_load_reg(cpu_V1, rn); \
1382 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1385 #define IWMMXT_OP_ENV_SIZE(name) \
1386 IWMMXT_OP_ENV(name##b) \
1387 IWMMXT_OP_ENV(name##w) \
1388 IWMMXT_OP_ENV(name##l)
1390 #define IWMMXT_OP_ENV1(name) \
1391 static inline void gen_op_iwmmxt_##name##_M0(void) \
1393 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1407 IWMMXT_OP_ENV_SIZE(unpackl
)
1408 IWMMXT_OP_ENV_SIZE(unpackh
)
1410 IWMMXT_OP_ENV1(unpacklub
)
1411 IWMMXT_OP_ENV1(unpackluw
)
1412 IWMMXT_OP_ENV1(unpacklul
)
1413 IWMMXT_OP_ENV1(unpackhub
)
1414 IWMMXT_OP_ENV1(unpackhuw
)
1415 IWMMXT_OP_ENV1(unpackhul
)
1416 IWMMXT_OP_ENV1(unpacklsb
)
1417 IWMMXT_OP_ENV1(unpacklsw
)
1418 IWMMXT_OP_ENV1(unpacklsl
)
1419 IWMMXT_OP_ENV1(unpackhsb
)
1420 IWMMXT_OP_ENV1(unpackhsw
)
1421 IWMMXT_OP_ENV1(unpackhsl
)
1423 IWMMXT_OP_ENV_SIZE(cmpeq
)
1424 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1425 IWMMXT_OP_ENV_SIZE(cmpgts
)
1427 IWMMXT_OP_ENV_SIZE(mins
)
1428 IWMMXT_OP_ENV_SIZE(minu
)
1429 IWMMXT_OP_ENV_SIZE(maxs
)
1430 IWMMXT_OP_ENV_SIZE(maxu
)
1432 IWMMXT_OP_ENV_SIZE(subn
)
1433 IWMMXT_OP_ENV_SIZE(addn
)
1434 IWMMXT_OP_ENV_SIZE(subu
)
1435 IWMMXT_OP_ENV_SIZE(addu
)
1436 IWMMXT_OP_ENV_SIZE(subs
)
1437 IWMMXT_OP_ENV_SIZE(adds
)
1439 IWMMXT_OP_ENV(avgb0
)
1440 IWMMXT_OP_ENV(avgb1
)
1441 IWMMXT_OP_ENV(avgw0
)
1442 IWMMXT_OP_ENV(avgw1
)
1444 IWMMXT_OP_ENV(packuw
)
1445 IWMMXT_OP_ENV(packul
)
1446 IWMMXT_OP_ENV(packuq
)
1447 IWMMXT_OP_ENV(packsw
)
1448 IWMMXT_OP_ENV(packsl
)
1449 IWMMXT_OP_ENV(packsq
)
1451 static void gen_op_iwmmxt_set_mup(void)
1454 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1455 tcg_gen_ori_i32(tmp
, tmp
, 2);
1456 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1459 static void gen_op_iwmmxt_set_cup(void)
1462 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1463 tcg_gen_ori_i32(tmp
, tmp
, 1);
1464 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1467 static void gen_op_iwmmxt_setpsr_nz(void)
1469 TCGv_i32 tmp
= tcg_temp_new_i32();
1470 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1471 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1474 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1476 iwmmxt_load_reg(cpu_V1
, rn
);
1477 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1478 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1481 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1488 rd
= (insn
>> 16) & 0xf;
1489 tmp
= load_reg(s
, rd
);
1491 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1492 if (insn
& (1 << 24)) {
1494 if (insn
& (1 << 23))
1495 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1497 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1498 tcg_gen_mov_i32(dest
, tmp
);
1499 if (insn
& (1 << 21))
1500 store_reg(s
, rd
, tmp
);
1502 tcg_temp_free_i32(tmp
);
1503 } else if (insn
& (1 << 21)) {
1505 tcg_gen_mov_i32(dest
, tmp
);
1506 if (insn
& (1 << 23))
1507 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1509 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1510 store_reg(s
, rd
, tmp
);
1511 } else if (!(insn
& (1 << 23)))
1516 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1518 int rd
= (insn
>> 0) & 0xf;
1521 if (insn
& (1 << 8)) {
1522 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1525 tmp
= iwmmxt_load_creg(rd
);
1528 tmp
= tcg_temp_new_i32();
1529 iwmmxt_load_reg(cpu_V0
, rd
);
1530 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
1532 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1533 tcg_gen_mov_i32(dest
, tmp
);
1534 tcg_temp_free_i32(tmp
);
1538 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1539 (ie. an undefined instruction). */
1540 static int disas_iwmmxt_insn(DisasContext
*s
, uint32_t insn
)
1543 int rdhi
, rdlo
, rd0
, rd1
, i
;
1545 TCGv_i32 tmp
, tmp2
, tmp3
;
1547 if ((insn
& 0x0e000e00) == 0x0c000000) {
1548 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1550 rdlo
= (insn
>> 12) & 0xf;
1551 rdhi
= (insn
>> 16) & 0xf;
1552 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1553 iwmmxt_load_reg(cpu_V0
, wrd
);
1554 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1555 tcg_gen_extrh_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1556 } else { /* TMCRR */
1557 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1558 iwmmxt_store_reg(cpu_V0
, wrd
);
1559 gen_op_iwmmxt_set_mup();
1564 wrd
= (insn
>> 12) & 0xf;
1565 addr
= tcg_temp_new_i32();
1566 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1567 tcg_temp_free_i32(addr
);
1570 if (insn
& ARM_CP_RW_BIT
) {
1571 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1572 tmp
= tcg_temp_new_i32();
1573 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1574 iwmmxt_store_creg(wrd
, tmp
);
1577 if (insn
& (1 << 8)) {
1578 if (insn
& (1 << 22)) { /* WLDRD */
1579 gen_aa32_ld64(s
, cpu_M0
, addr
, get_mem_index(s
));
1581 } else { /* WLDRW wRd */
1582 tmp
= tcg_temp_new_i32();
1583 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1586 tmp
= tcg_temp_new_i32();
1587 if (insn
& (1 << 22)) { /* WLDRH */
1588 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
1589 } else { /* WLDRB */
1590 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
1594 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1595 tcg_temp_free_i32(tmp
);
1597 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1600 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1601 tmp
= iwmmxt_load_creg(wrd
);
1602 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1604 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1605 tmp
= tcg_temp_new_i32();
1606 if (insn
& (1 << 8)) {
1607 if (insn
& (1 << 22)) { /* WSTRD */
1608 gen_aa32_st64(s
, cpu_M0
, addr
, get_mem_index(s
));
1609 } else { /* WSTRW wRd */
1610 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1611 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1614 if (insn
& (1 << 22)) { /* WSTRH */
1615 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1616 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
1617 } else { /* WSTRB */
1618 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1619 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
1623 tcg_temp_free_i32(tmp
);
1625 tcg_temp_free_i32(addr
);
1629 if ((insn
& 0x0f000000) != 0x0e000000)
1632 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1633 case 0x000: /* WOR */
1634 wrd
= (insn
>> 12) & 0xf;
1635 rd0
= (insn
>> 0) & 0xf;
1636 rd1
= (insn
>> 16) & 0xf;
1637 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1638 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1639 gen_op_iwmmxt_setpsr_nz();
1640 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1641 gen_op_iwmmxt_set_mup();
1642 gen_op_iwmmxt_set_cup();
1644 case 0x011: /* TMCR */
1647 rd
= (insn
>> 12) & 0xf;
1648 wrd
= (insn
>> 16) & 0xf;
1650 case ARM_IWMMXT_wCID
:
1651 case ARM_IWMMXT_wCASF
:
1653 case ARM_IWMMXT_wCon
:
1654 gen_op_iwmmxt_set_cup();
1656 case ARM_IWMMXT_wCSSF
:
1657 tmp
= iwmmxt_load_creg(wrd
);
1658 tmp2
= load_reg(s
, rd
);
1659 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1660 tcg_temp_free_i32(tmp2
);
1661 iwmmxt_store_creg(wrd
, tmp
);
1663 case ARM_IWMMXT_wCGR0
:
1664 case ARM_IWMMXT_wCGR1
:
1665 case ARM_IWMMXT_wCGR2
:
1666 case ARM_IWMMXT_wCGR3
:
1667 gen_op_iwmmxt_set_cup();
1668 tmp
= load_reg(s
, rd
);
1669 iwmmxt_store_creg(wrd
, tmp
);
1675 case 0x100: /* WXOR */
1676 wrd
= (insn
>> 12) & 0xf;
1677 rd0
= (insn
>> 0) & 0xf;
1678 rd1
= (insn
>> 16) & 0xf;
1679 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1680 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1681 gen_op_iwmmxt_setpsr_nz();
1682 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1683 gen_op_iwmmxt_set_mup();
1684 gen_op_iwmmxt_set_cup();
1686 case 0x111: /* TMRC */
1689 rd
= (insn
>> 12) & 0xf;
1690 wrd
= (insn
>> 16) & 0xf;
1691 tmp
= iwmmxt_load_creg(wrd
);
1692 store_reg(s
, rd
, tmp
);
1694 case 0x300: /* WANDN */
1695 wrd
= (insn
>> 12) & 0xf;
1696 rd0
= (insn
>> 0) & 0xf;
1697 rd1
= (insn
>> 16) & 0xf;
1698 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1699 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1700 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1701 gen_op_iwmmxt_setpsr_nz();
1702 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1703 gen_op_iwmmxt_set_mup();
1704 gen_op_iwmmxt_set_cup();
1706 case 0x200: /* WAND */
1707 wrd
= (insn
>> 12) & 0xf;
1708 rd0
= (insn
>> 0) & 0xf;
1709 rd1
= (insn
>> 16) & 0xf;
1710 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1711 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1712 gen_op_iwmmxt_setpsr_nz();
1713 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1714 gen_op_iwmmxt_set_mup();
1715 gen_op_iwmmxt_set_cup();
1717 case 0x810: case 0xa10: /* WMADD */
1718 wrd
= (insn
>> 12) & 0xf;
1719 rd0
= (insn
>> 0) & 0xf;
1720 rd1
= (insn
>> 16) & 0xf;
1721 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1722 if (insn
& (1 << 21))
1723 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1725 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1726 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1727 gen_op_iwmmxt_set_mup();
1729 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1730 wrd
= (insn
>> 12) & 0xf;
1731 rd0
= (insn
>> 16) & 0xf;
1732 rd1
= (insn
>> 0) & 0xf;
1733 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1734 switch ((insn
>> 22) & 3) {
1736 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1739 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1742 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1747 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1748 gen_op_iwmmxt_set_mup();
1749 gen_op_iwmmxt_set_cup();
1751 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1752 wrd
= (insn
>> 12) & 0xf;
1753 rd0
= (insn
>> 16) & 0xf;
1754 rd1
= (insn
>> 0) & 0xf;
1755 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1756 switch ((insn
>> 22) & 3) {
1758 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1761 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1764 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1769 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1770 gen_op_iwmmxt_set_mup();
1771 gen_op_iwmmxt_set_cup();
1773 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1774 wrd
= (insn
>> 12) & 0xf;
1775 rd0
= (insn
>> 16) & 0xf;
1776 rd1
= (insn
>> 0) & 0xf;
1777 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1778 if (insn
& (1 << 22))
1779 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1781 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1782 if (!(insn
& (1 << 20)))
1783 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1784 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1785 gen_op_iwmmxt_set_mup();
1787 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1788 wrd
= (insn
>> 12) & 0xf;
1789 rd0
= (insn
>> 16) & 0xf;
1790 rd1
= (insn
>> 0) & 0xf;
1791 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1792 if (insn
& (1 << 21)) {
1793 if (insn
& (1 << 20))
1794 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1796 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1798 if (insn
& (1 << 20))
1799 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1801 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1803 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1804 gen_op_iwmmxt_set_mup();
1806 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1807 wrd
= (insn
>> 12) & 0xf;
1808 rd0
= (insn
>> 16) & 0xf;
1809 rd1
= (insn
>> 0) & 0xf;
1810 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1811 if (insn
& (1 << 21))
1812 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1814 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1815 if (!(insn
& (1 << 20))) {
1816 iwmmxt_load_reg(cpu_V1
, wrd
);
1817 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1819 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1820 gen_op_iwmmxt_set_mup();
1822 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1823 wrd
= (insn
>> 12) & 0xf;
1824 rd0
= (insn
>> 16) & 0xf;
1825 rd1
= (insn
>> 0) & 0xf;
1826 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1827 switch ((insn
>> 22) & 3) {
1829 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1832 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1835 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1840 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1841 gen_op_iwmmxt_set_mup();
1842 gen_op_iwmmxt_set_cup();
1844 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1845 wrd
= (insn
>> 12) & 0xf;
1846 rd0
= (insn
>> 16) & 0xf;
1847 rd1
= (insn
>> 0) & 0xf;
1848 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1849 if (insn
& (1 << 22)) {
1850 if (insn
& (1 << 20))
1851 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1853 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1855 if (insn
& (1 << 20))
1856 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1858 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1860 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1861 gen_op_iwmmxt_set_mup();
1862 gen_op_iwmmxt_set_cup();
1864 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1865 wrd
= (insn
>> 12) & 0xf;
1866 rd0
= (insn
>> 16) & 0xf;
1867 rd1
= (insn
>> 0) & 0xf;
1868 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1869 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1870 tcg_gen_andi_i32(tmp
, tmp
, 7);
1871 iwmmxt_load_reg(cpu_V1
, rd1
);
1872 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1873 tcg_temp_free_i32(tmp
);
1874 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1875 gen_op_iwmmxt_set_mup();
1877 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1878 if (((insn
>> 6) & 3) == 3)
1880 rd
= (insn
>> 12) & 0xf;
1881 wrd
= (insn
>> 16) & 0xf;
1882 tmp
= load_reg(s
, rd
);
1883 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1884 switch ((insn
>> 6) & 3) {
1886 tmp2
= tcg_const_i32(0xff);
1887 tmp3
= tcg_const_i32((insn
& 7) << 3);
1890 tmp2
= tcg_const_i32(0xffff);
1891 tmp3
= tcg_const_i32((insn
& 3) << 4);
1894 tmp2
= tcg_const_i32(0xffffffff);
1895 tmp3
= tcg_const_i32((insn
& 1) << 5);
1901 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1902 tcg_temp_free_i32(tmp3
);
1903 tcg_temp_free_i32(tmp2
);
1904 tcg_temp_free_i32(tmp
);
1905 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1906 gen_op_iwmmxt_set_mup();
1908 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1909 rd
= (insn
>> 12) & 0xf;
1910 wrd
= (insn
>> 16) & 0xf;
1911 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1913 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1914 tmp
= tcg_temp_new_i32();
1915 switch ((insn
>> 22) & 3) {
1917 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1918 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1920 tcg_gen_ext8s_i32(tmp
, tmp
);
1922 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1926 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1927 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1929 tcg_gen_ext16s_i32(tmp
, tmp
);
1931 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1935 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1936 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1939 store_reg(s
, rd
, tmp
);
1941 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1942 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1944 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1945 switch ((insn
>> 22) & 3) {
1947 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1950 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1953 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1956 tcg_gen_shli_i32(tmp
, tmp
, 28);
1958 tcg_temp_free_i32(tmp
);
1960 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1961 if (((insn
>> 6) & 3) == 3)
1963 rd
= (insn
>> 12) & 0xf;
1964 wrd
= (insn
>> 16) & 0xf;
1965 tmp
= load_reg(s
, rd
);
1966 switch ((insn
>> 6) & 3) {
1968 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1971 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1974 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1977 tcg_temp_free_i32(tmp
);
1978 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1979 gen_op_iwmmxt_set_mup();
1981 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1982 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1984 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1985 tmp2
= tcg_temp_new_i32();
1986 tcg_gen_mov_i32(tmp2
, tmp
);
1987 switch ((insn
>> 22) & 3) {
1989 for (i
= 0; i
< 7; i
++) {
1990 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1991 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1995 for (i
= 0; i
< 3; i
++) {
1996 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1997 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2001 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2002 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2006 tcg_temp_free_i32(tmp2
);
2007 tcg_temp_free_i32(tmp
);
2009 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2010 wrd
= (insn
>> 12) & 0xf;
2011 rd0
= (insn
>> 16) & 0xf;
2012 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2013 switch ((insn
>> 22) & 3) {
2015 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
2018 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
2021 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
2026 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2027 gen_op_iwmmxt_set_mup();
2029 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2030 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2032 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2033 tmp2
= tcg_temp_new_i32();
2034 tcg_gen_mov_i32(tmp2
, tmp
);
2035 switch ((insn
>> 22) & 3) {
2037 for (i
= 0; i
< 7; i
++) {
2038 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2039 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2043 for (i
= 0; i
< 3; i
++) {
2044 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2045 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2049 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2050 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2054 tcg_temp_free_i32(tmp2
);
2055 tcg_temp_free_i32(tmp
);
2057 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2058 rd
= (insn
>> 12) & 0xf;
2059 rd0
= (insn
>> 16) & 0xf;
2060 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
2062 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2063 tmp
= tcg_temp_new_i32();
2064 switch ((insn
>> 22) & 3) {
2066 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
2069 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
2072 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
2075 store_reg(s
, rd
, tmp
);
2077 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2078 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2079 wrd
= (insn
>> 12) & 0xf;
2080 rd0
= (insn
>> 16) & 0xf;
2081 rd1
= (insn
>> 0) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2083 switch ((insn
>> 22) & 3) {
2085 if (insn
& (1 << 21))
2086 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2088 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2091 if (insn
& (1 << 21))
2092 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2094 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2097 if (insn
& (1 << 21))
2098 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2100 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2105 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2106 gen_op_iwmmxt_set_mup();
2107 gen_op_iwmmxt_set_cup();
2109 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2110 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2111 wrd
= (insn
>> 12) & 0xf;
2112 rd0
= (insn
>> 16) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2114 switch ((insn
>> 22) & 3) {
2116 if (insn
& (1 << 21))
2117 gen_op_iwmmxt_unpacklsb_M0();
2119 gen_op_iwmmxt_unpacklub_M0();
2122 if (insn
& (1 << 21))
2123 gen_op_iwmmxt_unpacklsw_M0();
2125 gen_op_iwmmxt_unpackluw_M0();
2128 if (insn
& (1 << 21))
2129 gen_op_iwmmxt_unpacklsl_M0();
2131 gen_op_iwmmxt_unpacklul_M0();
2136 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2137 gen_op_iwmmxt_set_mup();
2138 gen_op_iwmmxt_set_cup();
2140 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2141 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2142 wrd
= (insn
>> 12) & 0xf;
2143 rd0
= (insn
>> 16) & 0xf;
2144 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2145 switch ((insn
>> 22) & 3) {
2147 if (insn
& (1 << 21))
2148 gen_op_iwmmxt_unpackhsb_M0();
2150 gen_op_iwmmxt_unpackhub_M0();
2153 if (insn
& (1 << 21))
2154 gen_op_iwmmxt_unpackhsw_M0();
2156 gen_op_iwmmxt_unpackhuw_M0();
2159 if (insn
& (1 << 21))
2160 gen_op_iwmmxt_unpackhsl_M0();
2162 gen_op_iwmmxt_unpackhul_M0();
2167 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2168 gen_op_iwmmxt_set_mup();
2169 gen_op_iwmmxt_set_cup();
2171 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2172 case 0x214: case 0x614: case 0xa14: case 0xe14:
2173 if (((insn
>> 22) & 3) == 0)
2175 wrd
= (insn
>> 12) & 0xf;
2176 rd0
= (insn
>> 16) & 0xf;
2177 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2178 tmp
= tcg_temp_new_i32();
2179 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2180 tcg_temp_free_i32(tmp
);
2183 switch ((insn
>> 22) & 3) {
2185 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2188 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2191 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2194 tcg_temp_free_i32(tmp
);
2195 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2196 gen_op_iwmmxt_set_mup();
2197 gen_op_iwmmxt_set_cup();
2199 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2200 case 0x014: case 0x414: case 0x814: case 0xc14:
2201 if (((insn
>> 22) & 3) == 0)
2203 wrd
= (insn
>> 12) & 0xf;
2204 rd0
= (insn
>> 16) & 0xf;
2205 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2206 tmp
= tcg_temp_new_i32();
2207 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2208 tcg_temp_free_i32(tmp
);
2211 switch ((insn
>> 22) & 3) {
2213 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2216 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2219 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2222 tcg_temp_free_i32(tmp
);
2223 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2224 gen_op_iwmmxt_set_mup();
2225 gen_op_iwmmxt_set_cup();
2227 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2228 case 0x114: case 0x514: case 0x914: case 0xd14:
2229 if (((insn
>> 22) & 3) == 0)
2231 wrd
= (insn
>> 12) & 0xf;
2232 rd0
= (insn
>> 16) & 0xf;
2233 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2234 tmp
= tcg_temp_new_i32();
2235 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2236 tcg_temp_free_i32(tmp
);
2239 switch ((insn
>> 22) & 3) {
2241 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2244 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2247 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2250 tcg_temp_free_i32(tmp
);
2251 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2252 gen_op_iwmmxt_set_mup();
2253 gen_op_iwmmxt_set_cup();
2255 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2256 case 0x314: case 0x714: case 0xb14: case 0xf14:
2257 if (((insn
>> 22) & 3) == 0)
2259 wrd
= (insn
>> 12) & 0xf;
2260 rd0
= (insn
>> 16) & 0xf;
2261 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2262 tmp
= tcg_temp_new_i32();
2263 switch ((insn
>> 22) & 3) {
2265 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2266 tcg_temp_free_i32(tmp
);
2269 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2272 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2273 tcg_temp_free_i32(tmp
);
2276 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2279 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2280 tcg_temp_free_i32(tmp
);
2283 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2286 tcg_temp_free_i32(tmp
);
2287 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2288 gen_op_iwmmxt_set_mup();
2289 gen_op_iwmmxt_set_cup();
2291 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2292 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2293 wrd
= (insn
>> 12) & 0xf;
2294 rd0
= (insn
>> 16) & 0xf;
2295 rd1
= (insn
>> 0) & 0xf;
2296 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2297 switch ((insn
>> 22) & 3) {
2299 if (insn
& (1 << 21))
2300 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2302 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2305 if (insn
& (1 << 21))
2306 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2308 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2311 if (insn
& (1 << 21))
2312 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2314 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2319 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2320 gen_op_iwmmxt_set_mup();
2322 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2323 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2324 wrd
= (insn
>> 12) & 0xf;
2325 rd0
= (insn
>> 16) & 0xf;
2326 rd1
= (insn
>> 0) & 0xf;
2327 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2328 switch ((insn
>> 22) & 3) {
2330 if (insn
& (1 << 21))
2331 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2333 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2336 if (insn
& (1 << 21))
2337 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2339 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2342 if (insn
& (1 << 21))
2343 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2345 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2350 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2351 gen_op_iwmmxt_set_mup();
2353 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2354 case 0x402: case 0x502: case 0x602: case 0x702:
2355 wrd
= (insn
>> 12) & 0xf;
2356 rd0
= (insn
>> 16) & 0xf;
2357 rd1
= (insn
>> 0) & 0xf;
2358 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2359 tmp
= tcg_const_i32((insn
>> 20) & 3);
2360 iwmmxt_load_reg(cpu_V1
, rd1
);
2361 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2362 tcg_temp_free_i32(tmp
);
2363 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2364 gen_op_iwmmxt_set_mup();
2366 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2367 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2368 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2369 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2370 wrd
= (insn
>> 12) & 0xf;
2371 rd0
= (insn
>> 16) & 0xf;
2372 rd1
= (insn
>> 0) & 0xf;
2373 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2374 switch ((insn
>> 20) & 0xf) {
2376 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2379 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2382 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2385 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2388 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2391 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2394 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2397 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2400 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2405 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2406 gen_op_iwmmxt_set_mup();
2407 gen_op_iwmmxt_set_cup();
2409 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2410 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2411 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2412 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2413 wrd
= (insn
>> 12) & 0xf;
2414 rd0
= (insn
>> 16) & 0xf;
2415 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2416 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2417 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2418 tcg_temp_free_i32(tmp
);
2419 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2420 gen_op_iwmmxt_set_mup();
2421 gen_op_iwmmxt_set_cup();
2423 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2424 case 0x418: case 0x518: case 0x618: case 0x718:
2425 case 0x818: case 0x918: case 0xa18: case 0xb18:
2426 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2427 wrd
= (insn
>> 12) & 0xf;
2428 rd0
= (insn
>> 16) & 0xf;
2429 rd1
= (insn
>> 0) & 0xf;
2430 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2431 switch ((insn
>> 20) & 0xf) {
2433 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2436 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2439 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2442 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2445 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2448 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2451 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2454 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2457 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2462 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2463 gen_op_iwmmxt_set_mup();
2464 gen_op_iwmmxt_set_cup();
2466 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2467 case 0x408: case 0x508: case 0x608: case 0x708:
2468 case 0x808: case 0x908: case 0xa08: case 0xb08:
2469 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2470 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2472 wrd
= (insn
>> 12) & 0xf;
2473 rd0
= (insn
>> 16) & 0xf;
2474 rd1
= (insn
>> 0) & 0xf;
2475 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2476 switch ((insn
>> 22) & 3) {
2478 if (insn
& (1 << 21))
2479 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2481 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2484 if (insn
& (1 << 21))
2485 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2487 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2490 if (insn
& (1 << 21))
2491 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2493 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2496 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2497 gen_op_iwmmxt_set_mup();
2498 gen_op_iwmmxt_set_cup();
2500 case 0x201: case 0x203: case 0x205: case 0x207:
2501 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2502 case 0x211: case 0x213: case 0x215: case 0x217:
2503 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2504 wrd
= (insn
>> 5) & 0xf;
2505 rd0
= (insn
>> 12) & 0xf;
2506 rd1
= (insn
>> 0) & 0xf;
2507 if (rd0
== 0xf || rd1
== 0xf)
2509 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2510 tmp
= load_reg(s
, rd0
);
2511 tmp2
= load_reg(s
, rd1
);
2512 switch ((insn
>> 16) & 0xf) {
2513 case 0x0: /* TMIA */
2514 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2516 case 0x8: /* TMIAPH */
2517 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2519 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2520 if (insn
& (1 << 16))
2521 tcg_gen_shri_i32(tmp
, tmp
, 16);
2522 if (insn
& (1 << 17))
2523 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2524 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2527 tcg_temp_free_i32(tmp2
);
2528 tcg_temp_free_i32(tmp
);
2531 tcg_temp_free_i32(tmp2
);
2532 tcg_temp_free_i32(tmp
);
2533 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2534 gen_op_iwmmxt_set_mup();
2543 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2544 (ie. an undefined instruction). */
2545 static int disas_dsp_insn(DisasContext
*s
, uint32_t insn
)
2547 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2550 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2551 /* Multiply with Internal Accumulate Format */
2552 rd0
= (insn
>> 12) & 0xf;
2554 acc
= (insn
>> 5) & 7;
2559 tmp
= load_reg(s
, rd0
);
2560 tmp2
= load_reg(s
, rd1
);
2561 switch ((insn
>> 16) & 0xf) {
2563 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2565 case 0x8: /* MIAPH */
2566 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2568 case 0xc: /* MIABB */
2569 case 0xd: /* MIABT */
2570 case 0xe: /* MIATB */
2571 case 0xf: /* MIATT */
2572 if (insn
& (1 << 16))
2573 tcg_gen_shri_i32(tmp
, tmp
, 16);
2574 if (insn
& (1 << 17))
2575 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2576 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2581 tcg_temp_free_i32(tmp2
);
2582 tcg_temp_free_i32(tmp
);
2584 gen_op_iwmmxt_movq_wRn_M0(acc
);
2588 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2589 /* Internal Accumulator Access Format */
2590 rdhi
= (insn
>> 16) & 0xf;
2591 rdlo
= (insn
>> 12) & 0xf;
2597 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2598 iwmmxt_load_reg(cpu_V0
, acc
);
2599 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2600 tcg_gen_extrh_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2601 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2603 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2604 iwmmxt_store_reg(cpu_V0
, acc
);
2612 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2613 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2614 if (dc_isar_feature(aa32_simd_r32, s)) { \
2615 reg = (((insn) >> (bigbit)) & 0x0f) \
2616 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2618 if (insn & (1 << (smallbit))) \
2620 reg = ((insn) >> (bigbit)) & 0x0f; \
2623 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2624 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2625 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2627 static void gen_neon_dup_low16(TCGv_i32 var
)
2629 TCGv_i32 tmp
= tcg_temp_new_i32();
2630 tcg_gen_ext16u_i32(var
, var
);
2631 tcg_gen_shli_i32(tmp
, var
, 16);
2632 tcg_gen_or_i32(var
, var
, tmp
);
2633 tcg_temp_free_i32(tmp
);
2636 static void gen_neon_dup_high16(TCGv_i32 var
)
2638 TCGv_i32 tmp
= tcg_temp_new_i32();
2639 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2640 tcg_gen_shri_i32(tmp
, var
, 16);
2641 tcg_gen_or_i32(var
, var
, tmp
);
2642 tcg_temp_free_i32(tmp
);
2645 static inline bool use_goto_tb(DisasContext
*s
, target_ulong dest
)
2647 #ifndef CONFIG_USER_ONLY
2648 return (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
2649 ((s
->base
.pc_next
- 1) & TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
2655 static void gen_goto_ptr(void)
2657 tcg_gen_lookup_and_goto_ptr();
2660 /* This will end the TB but doesn't guarantee we'll return to
2661 * cpu_loop_exec. Any live exit_requests will be processed as we
2662 * enter the next TB.
2664 static void gen_goto_tb(DisasContext
*s
, int n
, target_ulong dest
)
2666 if (use_goto_tb(s
, dest
)) {
2668 gen_set_pc_im(s
, dest
);
2669 tcg_gen_exit_tb(s
->base
.tb
, n
);
2671 gen_set_pc_im(s
, dest
);
2674 s
->base
.is_jmp
= DISAS_NORETURN
;
2677 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
2679 if (unlikely(is_singlestepping(s
))) {
2680 /* An indirect jump so that we still trigger the debug exception. */
2681 gen_set_pc_im(s
, dest
);
2682 s
->base
.is_jmp
= DISAS_JUMP
;
2684 gen_goto_tb(s
, 0, dest
);
2688 static inline void gen_mulxy(TCGv_i32 t0
, TCGv_i32 t1
, int x
, int y
)
2691 tcg_gen_sari_i32(t0
, t0
, 16);
2695 tcg_gen_sari_i32(t1
, t1
, 16);
2698 tcg_gen_mul_i32(t0
, t0
, t1
);
2701 /* Return the mask of PSR bits set by a MSR instruction. */
2702 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
)
2706 if (flags
& (1 << 0)) {
2709 if (flags
& (1 << 1)) {
2712 if (flags
& (1 << 2)) {
2715 if (flags
& (1 << 3)) {
2719 /* Mask out undefined and reserved bits. */
2720 mask
&= aarch32_cpsr_valid_mask(s
->features
, s
->isar
);
2722 /* Mask out execution state. */
2727 /* Mask out privileged bits. */
2734 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
2735 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv_i32 t0
)
2739 /* ??? This is also undefined in system mode. */
2743 tmp
= load_cpu_field(spsr
);
2744 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
2745 tcg_gen_andi_i32(t0
, t0
, mask
);
2746 tcg_gen_or_i32(tmp
, tmp
, t0
);
2747 store_cpu_field(tmp
, spsr
);
2749 gen_set_cpsr(t0
, mask
);
2751 tcg_temp_free_i32(t0
);
2756 /* Returns nonzero if access to the PSR is not permitted. */
2757 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
2760 tmp
= tcg_temp_new_i32();
2761 tcg_gen_movi_i32(tmp
, val
);
2762 return gen_set_psr(s
, mask
, spsr
, tmp
);
2765 static bool msr_banked_access_decode(DisasContext
*s
, int r
, int sysm
, int rn
,
2766 int *tgtmode
, int *regno
)
2768 /* Decode the r and sysm fields of MSR/MRS banked accesses into
2769 * the target mode and register number, and identify the various
2770 * unpredictable cases.
2771 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
2772 * + executed in user mode
2773 * + using R15 as the src/dest register
2774 * + accessing an unimplemented register
2775 * + accessing a register that's inaccessible at current PL/security state*
2776 * + accessing a register that you could access with a different insn
2777 * We choose to UNDEF in all these cases.
2778 * Since we don't know which of the various AArch32 modes we are in
2779 * we have to defer some checks to runtime.
2780 * Accesses to Monitor mode registers from Secure EL1 (which implies
2781 * that EL3 is AArch64) must trap to EL3.
2783 * If the access checks fail this function will emit code to take
2784 * an exception and return false. Otherwise it will return true,
2785 * and set *tgtmode and *regno appropriately.
2787 int exc_target
= default_exception_el(s
);
2789 /* These instructions are present only in ARMv8, or in ARMv7 with the
2790 * Virtualization Extensions.
2792 if (!arm_dc_feature(s
, ARM_FEATURE_V8
) &&
2793 !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
2797 if (IS_USER(s
) || rn
== 15) {
2801 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
2802 * of registers into (r, sysm).
2805 /* SPSRs for other modes */
2807 case 0xe: /* SPSR_fiq */
2808 *tgtmode
= ARM_CPU_MODE_FIQ
;
2810 case 0x10: /* SPSR_irq */
2811 *tgtmode
= ARM_CPU_MODE_IRQ
;
2813 case 0x12: /* SPSR_svc */
2814 *tgtmode
= ARM_CPU_MODE_SVC
;
2816 case 0x14: /* SPSR_abt */
2817 *tgtmode
= ARM_CPU_MODE_ABT
;
2819 case 0x16: /* SPSR_und */
2820 *tgtmode
= ARM_CPU_MODE_UND
;
2822 case 0x1c: /* SPSR_mon */
2823 *tgtmode
= ARM_CPU_MODE_MON
;
2825 case 0x1e: /* SPSR_hyp */
2826 *tgtmode
= ARM_CPU_MODE_HYP
;
2828 default: /* unallocated */
2831 /* We arbitrarily assign SPSR a register number of 16. */
2834 /* general purpose registers for other modes */
2836 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
2837 *tgtmode
= ARM_CPU_MODE_USR
;
2840 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
2841 *tgtmode
= ARM_CPU_MODE_FIQ
;
2844 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
2845 *tgtmode
= ARM_CPU_MODE_IRQ
;
2846 *regno
= sysm
& 1 ? 13 : 14;
2848 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
2849 *tgtmode
= ARM_CPU_MODE_SVC
;
2850 *regno
= sysm
& 1 ? 13 : 14;
2852 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
2853 *tgtmode
= ARM_CPU_MODE_ABT
;
2854 *regno
= sysm
& 1 ? 13 : 14;
2856 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
2857 *tgtmode
= ARM_CPU_MODE_UND
;
2858 *regno
= sysm
& 1 ? 13 : 14;
2860 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
2861 *tgtmode
= ARM_CPU_MODE_MON
;
2862 *regno
= sysm
& 1 ? 13 : 14;
2864 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
2865 *tgtmode
= ARM_CPU_MODE_HYP
;
2866 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
2867 *regno
= sysm
& 1 ? 13 : 17;
2869 default: /* unallocated */
2874 /* Catch the 'accessing inaccessible register' cases we can detect
2875 * at translate time.
2878 case ARM_CPU_MODE_MON
:
2879 if (!arm_dc_feature(s
, ARM_FEATURE_EL3
) || s
->ns
) {
2882 if (s
->current_el
== 1) {
2883 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
2884 * then accesses to Mon registers trap to EL3
2890 case ARM_CPU_MODE_HYP
:
2892 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
2893 * (and so we can forbid accesses from EL2 or below). elr_hyp
2894 * can be accessed also from Hyp mode, so forbid accesses from
2897 if (!arm_dc_feature(s
, ARM_FEATURE_EL2
) || s
->current_el
< 2 ||
2898 (s
->current_el
< 3 && *regno
!= 17)) {
2909 /* If we get here then some access check did not pass */
2910 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
2911 syn_uncategorized(), exc_target
);
2915 static void gen_msr_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
2917 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
2918 int tgtmode
= 0, regno
= 0;
2920 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
2924 /* Sync state because msr_banked() can raise exceptions */
2925 gen_set_condexec(s
);
2926 gen_set_pc_im(s
, s
->pc_curr
);
2927 tcg_reg
= load_reg(s
, rn
);
2928 tcg_tgtmode
= tcg_const_i32(tgtmode
);
2929 tcg_regno
= tcg_const_i32(regno
);
2930 gen_helper_msr_banked(cpu_env
, tcg_reg
, tcg_tgtmode
, tcg_regno
);
2931 tcg_temp_free_i32(tcg_tgtmode
);
2932 tcg_temp_free_i32(tcg_regno
);
2933 tcg_temp_free_i32(tcg_reg
);
2934 s
->base
.is_jmp
= DISAS_UPDATE
;
2937 static void gen_mrs_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
2939 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
2940 int tgtmode
= 0, regno
= 0;
2942 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
2946 /* Sync state because mrs_banked() can raise exceptions */
2947 gen_set_condexec(s
);
2948 gen_set_pc_im(s
, s
->pc_curr
);
2949 tcg_reg
= tcg_temp_new_i32();
2950 tcg_tgtmode
= tcg_const_i32(tgtmode
);
2951 tcg_regno
= tcg_const_i32(regno
);
2952 gen_helper_mrs_banked(tcg_reg
, cpu_env
, tcg_tgtmode
, tcg_regno
);
2953 tcg_temp_free_i32(tcg_tgtmode
);
2954 tcg_temp_free_i32(tcg_regno
);
2955 store_reg(s
, rn
, tcg_reg
);
2956 s
->base
.is_jmp
= DISAS_UPDATE
;
2959 /* Store value to PC as for an exception return (ie don't
2960 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
2961 * will do the masking based on the new value of the Thumb bit.
2963 static void store_pc_exc_ret(DisasContext
*s
, TCGv_i32 pc
)
2965 tcg_gen_mov_i32(cpu_R
[15], pc
);
2966 tcg_temp_free_i32(pc
);
2969 /* Generate a v6 exception return. Marks both values as dead. */
2970 static void gen_rfe(DisasContext
*s
, TCGv_i32 pc
, TCGv_i32 cpsr
)
2972 store_pc_exc_ret(s
, pc
);
2973 /* The cpsr_write_eret helper will mask the low bits of PC
2974 * appropriately depending on the new Thumb bit, so it must
2975 * be called after storing the new PC.
2977 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
2980 gen_helper_cpsr_write_eret(cpu_env
, cpsr
);
2981 tcg_temp_free_i32(cpsr
);
2982 /* Must exit loop to check un-masked IRQs */
2983 s
->base
.is_jmp
= DISAS_EXIT
;
2986 /* Generate an old-style exception return. Marks pc as dead. */
2987 static void gen_exception_return(DisasContext
*s
, TCGv_i32 pc
)
2989 gen_rfe(s
, pc
, load_cpu_field(spsr
));
2992 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
2994 static inline void gen_neon_add(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
2997 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
2998 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
2999 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3004 static inline void gen_neon_rsb(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
3007 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3008 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3009 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3014 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3015 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3016 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3017 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3018 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
3020 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3021 switch ((size << 1) | u) { \
3023 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3026 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3029 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3032 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3035 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3038 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3040 default: return 1; \
3043 #define GEN_NEON_INTEGER_OP(name) do { \
3044 switch ((size << 1) | u) { \
3046 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3049 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3052 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3055 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3058 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3061 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3063 default: return 1; \
3066 static TCGv_i32
neon_load_scratch(int scratch
)
3068 TCGv_i32 tmp
= tcg_temp_new_i32();
3069 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3073 static void neon_store_scratch(int scratch
, TCGv_i32 var
)
3075 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3076 tcg_temp_free_i32(var
);
3079 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
3083 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3085 gen_neon_dup_high16(tmp
);
3087 gen_neon_dup_low16(tmp
);
3090 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3095 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3099 if (!q
&& size
== 2) {
3102 pd
= vfp_reg_ptr(true, rd
);
3103 pm
= vfp_reg_ptr(true, rm
);
3107 gen_helper_neon_qunzip8(pd
, pm
);
3110 gen_helper_neon_qunzip16(pd
, pm
);
3113 gen_helper_neon_qunzip32(pd
, pm
);
3121 gen_helper_neon_unzip8(pd
, pm
);
3124 gen_helper_neon_unzip16(pd
, pm
);
3130 tcg_temp_free_ptr(pd
);
3131 tcg_temp_free_ptr(pm
);
3135 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3139 if (!q
&& size
== 2) {
3142 pd
= vfp_reg_ptr(true, rd
);
3143 pm
= vfp_reg_ptr(true, rm
);
3147 gen_helper_neon_qzip8(pd
, pm
);
3150 gen_helper_neon_qzip16(pd
, pm
);
3153 gen_helper_neon_qzip32(pd
, pm
);
3161 gen_helper_neon_zip8(pd
, pm
);
3164 gen_helper_neon_zip16(pd
, pm
);
3170 tcg_temp_free_ptr(pd
);
3171 tcg_temp_free_ptr(pm
);
3175 static void gen_neon_trn_u8(TCGv_i32 t0
, TCGv_i32 t1
)
3179 rd
= tcg_temp_new_i32();
3180 tmp
= tcg_temp_new_i32();
3182 tcg_gen_shli_i32(rd
, t0
, 8);
3183 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3184 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3185 tcg_gen_or_i32(rd
, rd
, tmp
);
3187 tcg_gen_shri_i32(t1
, t1
, 8);
3188 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3189 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3190 tcg_gen_or_i32(t1
, t1
, tmp
);
3191 tcg_gen_mov_i32(t0
, rd
);
3193 tcg_temp_free_i32(tmp
);
3194 tcg_temp_free_i32(rd
);
3197 static void gen_neon_trn_u16(TCGv_i32 t0
, TCGv_i32 t1
)
3201 rd
= tcg_temp_new_i32();
3202 tmp
= tcg_temp_new_i32();
3204 tcg_gen_shli_i32(rd
, t0
, 16);
3205 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3206 tcg_gen_or_i32(rd
, rd
, tmp
);
3207 tcg_gen_shri_i32(t1
, t1
, 16);
3208 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3209 tcg_gen_or_i32(t1
, t1
, tmp
);
3210 tcg_gen_mov_i32(t0
, rd
);
3212 tcg_temp_free_i32(tmp
);
3213 tcg_temp_free_i32(rd
);
3216 static inline void gen_neon_narrow(int size
, TCGv_i32 dest
, TCGv_i64 src
)
3219 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
3220 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
3221 case 2: tcg_gen_extrl_i64_i32(dest
, src
); break;
3226 static inline void gen_neon_narrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
3229 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
3230 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
3231 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
3236 static inline void gen_neon_narrow_satu(int size
, TCGv_i32 dest
, TCGv_i64 src
)
3239 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
3240 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
3241 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
3246 static inline void gen_neon_unarrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
3249 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
3250 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
3251 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
3256 static inline void gen_neon_shift_narrow(int size
, TCGv_i32 var
, TCGv_i32 shift
,
3262 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
3263 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
3268 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
3269 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
3276 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
3277 case 2: gen_ushl_i32(var
, var
, shift
); break;
3282 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
3283 case 2: gen_sshl_i32(var
, var
, shift
); break;
3290 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv_i32 src
, int size
, int u
)
3294 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
3295 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
3296 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
3301 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
3302 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
3303 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
3307 tcg_temp_free_i32(src
);
3310 static inline void gen_neon_addl(int size
)
3313 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
3314 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
3315 case 2: tcg_gen_add_i64(CPU_V001
); break;
3320 static inline void gen_neon_subl(int size
)
3323 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
3324 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
3325 case 2: tcg_gen_sub_i64(CPU_V001
); break;
3330 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
3333 case 0: gen_helper_neon_negl_u16(var
, var
); break;
3334 case 1: gen_helper_neon_negl_u32(var
, var
); break;
3336 tcg_gen_neg_i64(var
, var
);
3342 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
3345 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
3346 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
3351 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv_i32 a
, TCGv_i32 b
,
3356 switch ((size
<< 1) | u
) {
3357 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
3358 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
3359 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
3360 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
3362 tmp
= gen_muls_i64_i32(a
, b
);
3363 tcg_gen_mov_i64(dest
, tmp
);
3364 tcg_temp_free_i64(tmp
);
3367 tmp
= gen_mulu_i64_i32(a
, b
);
3368 tcg_gen_mov_i64(dest
, tmp
);
3369 tcg_temp_free_i64(tmp
);
3374 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
3375 Don't forget to clean them now. */
3377 tcg_temp_free_i32(a
);
3378 tcg_temp_free_i32(b
);
3382 static void gen_neon_narrow_op(int op
, int u
, int size
,
3383 TCGv_i32 dest
, TCGv_i64 src
)
3387 gen_neon_unarrow_sats(size
, dest
, src
);
3389 gen_neon_narrow(size
, dest
, src
);
3393 gen_neon_narrow_satu(size
, dest
, src
);
3395 gen_neon_narrow_sats(size
, dest
, src
);
3400 /* Symbolic constants for op fields for Neon 3-register same-length.
3401 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
3404 #define NEON_3R_VHADD 0
3405 #define NEON_3R_VQADD 1
3406 #define NEON_3R_VRHADD 2
3407 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
3408 #define NEON_3R_VHSUB 4
3409 #define NEON_3R_VQSUB 5
3410 #define NEON_3R_VCGT 6
3411 #define NEON_3R_VCGE 7
3412 #define NEON_3R_VSHL 8
3413 #define NEON_3R_VQSHL 9
3414 #define NEON_3R_VRSHL 10
3415 #define NEON_3R_VQRSHL 11
3416 #define NEON_3R_VMAX 12
3417 #define NEON_3R_VMIN 13
3418 #define NEON_3R_VABD 14
3419 #define NEON_3R_VABA 15
3420 #define NEON_3R_VADD_VSUB 16
3421 #define NEON_3R_VTST_VCEQ 17
3422 #define NEON_3R_VML 18 /* VMLA, VMLS */
3423 #define NEON_3R_VMUL 19
3424 #define NEON_3R_VPMAX 20
3425 #define NEON_3R_VPMIN 21
3426 #define NEON_3R_VQDMULH_VQRDMULH 22
3427 #define NEON_3R_VPADD_VQRDMLAH 23
3428 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
3429 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
3430 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
3431 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
3432 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
3433 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
3434 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
3435 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
3437 static const uint8_t neon_3r_sizes
[] = {
3438 [NEON_3R_VHADD
] = 0x7,
3439 [NEON_3R_VQADD
] = 0xf,
3440 [NEON_3R_VRHADD
] = 0x7,
3441 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
3442 [NEON_3R_VHSUB
] = 0x7,
3443 [NEON_3R_VQSUB
] = 0xf,
3444 [NEON_3R_VCGT
] = 0x7,
3445 [NEON_3R_VCGE
] = 0x7,
3446 [NEON_3R_VSHL
] = 0xf,
3447 [NEON_3R_VQSHL
] = 0xf,
3448 [NEON_3R_VRSHL
] = 0xf,
3449 [NEON_3R_VQRSHL
] = 0xf,
3450 [NEON_3R_VMAX
] = 0x7,
3451 [NEON_3R_VMIN
] = 0x7,
3452 [NEON_3R_VABD
] = 0x7,
3453 [NEON_3R_VABA
] = 0x7,
3454 [NEON_3R_VADD_VSUB
] = 0xf,
3455 [NEON_3R_VTST_VCEQ
] = 0x7,
3456 [NEON_3R_VML
] = 0x7,
3457 [NEON_3R_VMUL
] = 0x7,
3458 [NEON_3R_VPMAX
] = 0x7,
3459 [NEON_3R_VPMIN
] = 0x7,
3460 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
3461 [NEON_3R_VPADD_VQRDMLAH
] = 0x7,
3462 [NEON_3R_SHA
] = 0xf, /* size field encodes op type */
3463 [NEON_3R_VFM_VQRDMLSH
] = 0x7, /* For VFM, size bit 1 encodes op */
3464 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
3465 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
3466 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
3467 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
3468 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
3469 [NEON_3R_FLOAT_MISC
] = 0x5, /* size bit 1 encodes op */
3472 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
3473 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
3476 #define NEON_2RM_VREV64 0
3477 #define NEON_2RM_VREV32 1
3478 #define NEON_2RM_VREV16 2
3479 #define NEON_2RM_VPADDL 4
3480 #define NEON_2RM_VPADDL_U 5
3481 #define NEON_2RM_AESE 6 /* Includes AESD */
3482 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
3483 #define NEON_2RM_VCLS 8
3484 #define NEON_2RM_VCLZ 9
3485 #define NEON_2RM_VCNT 10
3486 #define NEON_2RM_VMVN 11
3487 #define NEON_2RM_VPADAL 12
3488 #define NEON_2RM_VPADAL_U 13
3489 #define NEON_2RM_VQABS 14
3490 #define NEON_2RM_VQNEG 15
3491 #define NEON_2RM_VCGT0 16
3492 #define NEON_2RM_VCGE0 17
3493 #define NEON_2RM_VCEQ0 18
3494 #define NEON_2RM_VCLE0 19
3495 #define NEON_2RM_VCLT0 20
3496 #define NEON_2RM_SHA1H 21
3497 #define NEON_2RM_VABS 22
3498 #define NEON_2RM_VNEG 23
3499 #define NEON_2RM_VCGT0_F 24
3500 #define NEON_2RM_VCGE0_F 25
3501 #define NEON_2RM_VCEQ0_F 26
3502 #define NEON_2RM_VCLE0_F 27
3503 #define NEON_2RM_VCLT0_F 28
3504 #define NEON_2RM_VABS_F 30
3505 #define NEON_2RM_VNEG_F 31
3506 #define NEON_2RM_VSWP 32
3507 #define NEON_2RM_VTRN 33
3508 #define NEON_2RM_VUZP 34
3509 #define NEON_2RM_VZIP 35
3510 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
3511 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
3512 #define NEON_2RM_VSHLL 38
3513 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
3514 #define NEON_2RM_VRINTN 40
3515 #define NEON_2RM_VRINTX 41
3516 #define NEON_2RM_VRINTA 42
3517 #define NEON_2RM_VRINTZ 43
3518 #define NEON_2RM_VCVT_F16_F32 44
3519 #define NEON_2RM_VRINTM 45
3520 #define NEON_2RM_VCVT_F32_F16 46
3521 #define NEON_2RM_VRINTP 47
3522 #define NEON_2RM_VCVTAU 48
3523 #define NEON_2RM_VCVTAS 49
3524 #define NEON_2RM_VCVTNU 50
3525 #define NEON_2RM_VCVTNS 51
3526 #define NEON_2RM_VCVTPU 52
3527 #define NEON_2RM_VCVTPS 53
3528 #define NEON_2RM_VCVTMU 54
3529 #define NEON_2RM_VCVTMS 55
3530 #define NEON_2RM_VRECPE 56
3531 #define NEON_2RM_VRSQRTE 57
3532 #define NEON_2RM_VRECPE_F 58
3533 #define NEON_2RM_VRSQRTE_F 59
3534 #define NEON_2RM_VCVT_FS 60
3535 #define NEON_2RM_VCVT_FU 61
3536 #define NEON_2RM_VCVT_SF 62
3537 #define NEON_2RM_VCVT_UF 63
3539 static bool neon_2rm_is_v8_op(int op
)
3541 /* Return true if this neon 2reg-misc op is ARMv8 and up */
3543 case NEON_2RM_VRINTN
:
3544 case NEON_2RM_VRINTA
:
3545 case NEON_2RM_VRINTM
:
3546 case NEON_2RM_VRINTP
:
3547 case NEON_2RM_VRINTZ
:
3548 case NEON_2RM_VRINTX
:
3549 case NEON_2RM_VCVTAU
:
3550 case NEON_2RM_VCVTAS
:
3551 case NEON_2RM_VCVTNU
:
3552 case NEON_2RM_VCVTNS
:
3553 case NEON_2RM_VCVTPU
:
3554 case NEON_2RM_VCVTPS
:
3555 case NEON_2RM_VCVTMU
:
3556 case NEON_2RM_VCVTMS
:
3563 /* Each entry in this array has bit n set if the insn allows
3564 * size value n (otherwise it will UNDEF). Since unallocated
3565 * op values will have no bits set they always UNDEF.
3567 static const uint8_t neon_2rm_sizes
[] = {
3568 [NEON_2RM_VREV64
] = 0x7,
3569 [NEON_2RM_VREV32
] = 0x3,
3570 [NEON_2RM_VREV16
] = 0x1,
3571 [NEON_2RM_VPADDL
] = 0x7,
3572 [NEON_2RM_VPADDL_U
] = 0x7,
3573 [NEON_2RM_AESE
] = 0x1,
3574 [NEON_2RM_AESMC
] = 0x1,
3575 [NEON_2RM_VCLS
] = 0x7,
3576 [NEON_2RM_VCLZ
] = 0x7,
3577 [NEON_2RM_VCNT
] = 0x1,
3578 [NEON_2RM_VMVN
] = 0x1,
3579 [NEON_2RM_VPADAL
] = 0x7,
3580 [NEON_2RM_VPADAL_U
] = 0x7,
3581 [NEON_2RM_VQABS
] = 0x7,
3582 [NEON_2RM_VQNEG
] = 0x7,
3583 [NEON_2RM_VCGT0
] = 0x7,
3584 [NEON_2RM_VCGE0
] = 0x7,
3585 [NEON_2RM_VCEQ0
] = 0x7,
3586 [NEON_2RM_VCLE0
] = 0x7,
3587 [NEON_2RM_VCLT0
] = 0x7,
3588 [NEON_2RM_SHA1H
] = 0x4,
3589 [NEON_2RM_VABS
] = 0x7,
3590 [NEON_2RM_VNEG
] = 0x7,
3591 [NEON_2RM_VCGT0_F
] = 0x4,
3592 [NEON_2RM_VCGE0_F
] = 0x4,
3593 [NEON_2RM_VCEQ0_F
] = 0x4,
3594 [NEON_2RM_VCLE0_F
] = 0x4,
3595 [NEON_2RM_VCLT0_F
] = 0x4,
3596 [NEON_2RM_VABS_F
] = 0x4,
3597 [NEON_2RM_VNEG_F
] = 0x4,
3598 [NEON_2RM_VSWP
] = 0x1,
3599 [NEON_2RM_VTRN
] = 0x7,
3600 [NEON_2RM_VUZP
] = 0x7,
3601 [NEON_2RM_VZIP
] = 0x7,
3602 [NEON_2RM_VMOVN
] = 0x7,
3603 [NEON_2RM_VQMOVN
] = 0x7,
3604 [NEON_2RM_VSHLL
] = 0x7,
3605 [NEON_2RM_SHA1SU1
] = 0x4,
3606 [NEON_2RM_VRINTN
] = 0x4,
3607 [NEON_2RM_VRINTX
] = 0x4,
3608 [NEON_2RM_VRINTA
] = 0x4,
3609 [NEON_2RM_VRINTZ
] = 0x4,
3610 [NEON_2RM_VCVT_F16_F32
] = 0x2,
3611 [NEON_2RM_VRINTM
] = 0x4,
3612 [NEON_2RM_VCVT_F32_F16
] = 0x2,
3613 [NEON_2RM_VRINTP
] = 0x4,
3614 [NEON_2RM_VCVTAU
] = 0x4,
3615 [NEON_2RM_VCVTAS
] = 0x4,
3616 [NEON_2RM_VCVTNU
] = 0x4,
3617 [NEON_2RM_VCVTNS
] = 0x4,
3618 [NEON_2RM_VCVTPU
] = 0x4,
3619 [NEON_2RM_VCVTPS
] = 0x4,
3620 [NEON_2RM_VCVTMU
] = 0x4,
3621 [NEON_2RM_VCVTMS
] = 0x4,
3622 [NEON_2RM_VRECPE
] = 0x4,
3623 [NEON_2RM_VRSQRTE
] = 0x4,
3624 [NEON_2RM_VRECPE_F
] = 0x4,
3625 [NEON_2RM_VRSQRTE_F
] = 0x4,
3626 [NEON_2RM_VCVT_FS
] = 0x4,
3627 [NEON_2RM_VCVT_FU
] = 0x4,
3628 [NEON_2RM_VCVT_SF
] = 0x4,
3629 [NEON_2RM_VCVT_UF
] = 0x4,
3633 /* Expand v8.1 simd helper. */
3634 static int do_v81_helper(DisasContext
*s
, gen_helper_gvec_3_ptr
*fn
,
3635 int q
, int rd
, int rn
, int rm
)
3637 if (dc_isar_feature(aa32_rdm
, s
)) {
3638 int opr_sz
= (1 + q
) * 8;
3639 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd
),
3640 vfp_reg_offset(1, rn
),
3641 vfp_reg_offset(1, rm
), cpu_env
,
3642 opr_sz
, opr_sz
, 0, fn
);
3648 static void gen_ceq0_i32(TCGv_i32 d
, TCGv_i32 a
)
3650 tcg_gen_setcondi_i32(TCG_COND_EQ
, d
, a
, 0);
3651 tcg_gen_neg_i32(d
, d
);
3654 static void gen_ceq0_i64(TCGv_i64 d
, TCGv_i64 a
)
3656 tcg_gen_setcondi_i64(TCG_COND_EQ
, d
, a
, 0);
3657 tcg_gen_neg_i64(d
, d
);
3660 static void gen_ceq0_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
)
3662 TCGv_vec zero
= tcg_const_zeros_vec_matching(d
);
3663 tcg_gen_cmp_vec(TCG_COND_EQ
, vece
, d
, a
, zero
);
3664 tcg_temp_free_vec(zero
);
3667 static const TCGOpcode vecop_list_cmp
[] = {
3671 const GVecGen2 ceq0_op
[4] = {
3672 { .fno
= gen_helper_gvec_ceq0_b
,
3673 .fniv
= gen_ceq0_vec
,
3674 .opt_opc
= vecop_list_cmp
,
3676 { .fno
= gen_helper_gvec_ceq0_h
,
3677 .fniv
= gen_ceq0_vec
,
3678 .opt_opc
= vecop_list_cmp
,
3680 { .fni4
= gen_ceq0_i32
,
3681 .fniv
= gen_ceq0_vec
,
3682 .opt_opc
= vecop_list_cmp
,
3684 { .fni8
= gen_ceq0_i64
,
3685 .fniv
= gen_ceq0_vec
,
3686 .opt_opc
= vecop_list_cmp
,
3687 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3691 static void gen_cle0_i32(TCGv_i32 d
, TCGv_i32 a
)
3693 tcg_gen_setcondi_i32(TCG_COND_LE
, d
, a
, 0);
3694 tcg_gen_neg_i32(d
, d
);
3697 static void gen_cle0_i64(TCGv_i64 d
, TCGv_i64 a
)
3699 tcg_gen_setcondi_i64(TCG_COND_LE
, d
, a
, 0);
3700 tcg_gen_neg_i64(d
, d
);
3703 static void gen_cle0_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
)
3705 TCGv_vec zero
= tcg_const_zeros_vec_matching(d
);
3706 tcg_gen_cmp_vec(TCG_COND_LE
, vece
, d
, a
, zero
);
3707 tcg_temp_free_vec(zero
);
3710 const GVecGen2 cle0_op
[4] = {
3711 { .fno
= gen_helper_gvec_cle0_b
,
3712 .fniv
= gen_cle0_vec
,
3713 .opt_opc
= vecop_list_cmp
,
3715 { .fno
= gen_helper_gvec_cle0_h
,
3716 .fniv
= gen_cle0_vec
,
3717 .opt_opc
= vecop_list_cmp
,
3719 { .fni4
= gen_cle0_i32
,
3720 .fniv
= gen_cle0_vec
,
3721 .opt_opc
= vecop_list_cmp
,
3723 { .fni8
= gen_cle0_i64
,
3724 .fniv
= gen_cle0_vec
,
3725 .opt_opc
= vecop_list_cmp
,
3726 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3730 static void gen_cge0_i32(TCGv_i32 d
, TCGv_i32 a
)
3732 tcg_gen_setcondi_i32(TCG_COND_GE
, d
, a
, 0);
3733 tcg_gen_neg_i32(d
, d
);
3736 static void gen_cge0_i64(TCGv_i64 d
, TCGv_i64 a
)
3738 tcg_gen_setcondi_i64(TCG_COND_GE
, d
, a
, 0);
3739 tcg_gen_neg_i64(d
, d
);
3742 static void gen_cge0_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
)
3744 TCGv_vec zero
= tcg_const_zeros_vec_matching(d
);
3745 tcg_gen_cmp_vec(TCG_COND_GE
, vece
, d
, a
, zero
);
3746 tcg_temp_free_vec(zero
);
3749 const GVecGen2 cge0_op
[4] = {
3750 { .fno
= gen_helper_gvec_cge0_b
,
3751 .fniv
= gen_cge0_vec
,
3752 .opt_opc
= vecop_list_cmp
,
3754 { .fno
= gen_helper_gvec_cge0_h
,
3755 .fniv
= gen_cge0_vec
,
3756 .opt_opc
= vecop_list_cmp
,
3758 { .fni4
= gen_cge0_i32
,
3759 .fniv
= gen_cge0_vec
,
3760 .opt_opc
= vecop_list_cmp
,
3762 { .fni8
= gen_cge0_i64
,
3763 .fniv
= gen_cge0_vec
,
3764 .opt_opc
= vecop_list_cmp
,
3765 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3769 static void gen_clt0_i32(TCGv_i32 d
, TCGv_i32 a
)
3771 tcg_gen_setcondi_i32(TCG_COND_LT
, d
, a
, 0);
3772 tcg_gen_neg_i32(d
, d
);
3775 static void gen_clt0_i64(TCGv_i64 d
, TCGv_i64 a
)
3777 tcg_gen_setcondi_i64(TCG_COND_LT
, d
, a
, 0);
3778 tcg_gen_neg_i64(d
, d
);
3781 static void gen_clt0_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
)
3783 TCGv_vec zero
= tcg_const_zeros_vec_matching(d
);
3784 tcg_gen_cmp_vec(TCG_COND_LT
, vece
, d
, a
, zero
);
3785 tcg_temp_free_vec(zero
);
3788 const GVecGen2 clt0_op
[4] = {
3789 { .fno
= gen_helper_gvec_clt0_b
,
3790 .fniv
= gen_clt0_vec
,
3791 .opt_opc
= vecop_list_cmp
,
3793 { .fno
= gen_helper_gvec_clt0_h
,
3794 .fniv
= gen_clt0_vec
,
3795 .opt_opc
= vecop_list_cmp
,
3797 { .fni4
= gen_clt0_i32
,
3798 .fniv
= gen_clt0_vec
,
3799 .opt_opc
= vecop_list_cmp
,
3801 { .fni8
= gen_clt0_i64
,
3802 .fniv
= gen_clt0_vec
,
3803 .opt_opc
= vecop_list_cmp
,
3804 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3808 static void gen_cgt0_i32(TCGv_i32 d
, TCGv_i32 a
)
3810 tcg_gen_setcondi_i32(TCG_COND_GT
, d
, a
, 0);
3811 tcg_gen_neg_i32(d
, d
);
3814 static void gen_cgt0_i64(TCGv_i64 d
, TCGv_i64 a
)
3816 tcg_gen_setcondi_i64(TCG_COND_GT
, d
, a
, 0);
3817 tcg_gen_neg_i64(d
, d
);
3820 static void gen_cgt0_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
)
3822 TCGv_vec zero
= tcg_const_zeros_vec_matching(d
);
3823 tcg_gen_cmp_vec(TCG_COND_GT
, vece
, d
, a
, zero
);
3824 tcg_temp_free_vec(zero
);
3827 const GVecGen2 cgt0_op
[4] = {
3828 { .fno
= gen_helper_gvec_cgt0_b
,
3829 .fniv
= gen_cgt0_vec
,
3830 .opt_opc
= vecop_list_cmp
,
3832 { .fno
= gen_helper_gvec_cgt0_h
,
3833 .fniv
= gen_cgt0_vec
,
3834 .opt_opc
= vecop_list_cmp
,
3836 { .fni4
= gen_cgt0_i32
,
3837 .fniv
= gen_cgt0_vec
,
3838 .opt_opc
= vecop_list_cmp
,
3840 { .fni8
= gen_cgt0_i64
,
3841 .fniv
= gen_cgt0_vec
,
3842 .opt_opc
= vecop_list_cmp
,
3843 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3847 static void gen_ssra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3849 tcg_gen_vec_sar8i_i64(a
, a
, shift
);
3850 tcg_gen_vec_add8_i64(d
, d
, a
);
3853 static void gen_ssra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3855 tcg_gen_vec_sar16i_i64(a
, a
, shift
);
3856 tcg_gen_vec_add16_i64(d
, d
, a
);
3859 static void gen_ssra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
3861 tcg_gen_sari_i32(a
, a
, shift
);
3862 tcg_gen_add_i32(d
, d
, a
);
3865 static void gen_ssra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3867 tcg_gen_sari_i64(a
, a
, shift
);
3868 tcg_gen_add_i64(d
, d
, a
);
3871 static void gen_ssra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
3873 tcg_gen_sari_vec(vece
, a
, a
, sh
);
3874 tcg_gen_add_vec(vece
, d
, d
, a
);
3877 static const TCGOpcode vecop_list_ssra
[] = {
3878 INDEX_op_sari_vec
, INDEX_op_add_vec
, 0
3881 const GVecGen2i ssra_op
[4] = {
3882 { .fni8
= gen_ssra8_i64
,
3883 .fniv
= gen_ssra_vec
,
3885 .opt_opc
= vecop_list_ssra
,
3887 { .fni8
= gen_ssra16_i64
,
3888 .fniv
= gen_ssra_vec
,
3890 .opt_opc
= vecop_list_ssra
,
3892 { .fni4
= gen_ssra32_i32
,
3893 .fniv
= gen_ssra_vec
,
3895 .opt_opc
= vecop_list_ssra
,
3897 { .fni8
= gen_ssra64_i64
,
3898 .fniv
= gen_ssra_vec
,
3899 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3900 .opt_opc
= vecop_list_ssra
,
3905 static void gen_usra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3907 tcg_gen_vec_shr8i_i64(a
, a
, shift
);
3908 tcg_gen_vec_add8_i64(d
, d
, a
);
3911 static void gen_usra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3913 tcg_gen_vec_shr16i_i64(a
, a
, shift
);
3914 tcg_gen_vec_add16_i64(d
, d
, a
);
3917 static void gen_usra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
3919 tcg_gen_shri_i32(a
, a
, shift
);
3920 tcg_gen_add_i32(d
, d
, a
);
3923 static void gen_usra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3925 tcg_gen_shri_i64(a
, a
, shift
);
3926 tcg_gen_add_i64(d
, d
, a
);
3929 static void gen_usra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
3931 tcg_gen_shri_vec(vece
, a
, a
, sh
);
3932 tcg_gen_add_vec(vece
, d
, d
, a
);
3935 static const TCGOpcode vecop_list_usra
[] = {
3936 INDEX_op_shri_vec
, INDEX_op_add_vec
, 0
3939 const GVecGen2i usra_op
[4] = {
3940 { .fni8
= gen_usra8_i64
,
3941 .fniv
= gen_usra_vec
,
3943 .opt_opc
= vecop_list_usra
,
3945 { .fni8
= gen_usra16_i64
,
3946 .fniv
= gen_usra_vec
,
3948 .opt_opc
= vecop_list_usra
,
3950 { .fni4
= gen_usra32_i32
,
3951 .fniv
= gen_usra_vec
,
3953 .opt_opc
= vecop_list_usra
,
3955 { .fni8
= gen_usra64_i64
,
3956 .fniv
= gen_usra_vec
,
3957 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3959 .opt_opc
= vecop_list_usra
,
3963 static void gen_shr8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3965 uint64_t mask
= dup_const(MO_8
, 0xff >> shift
);
3966 TCGv_i64 t
= tcg_temp_new_i64();
3968 tcg_gen_shri_i64(t
, a
, shift
);
3969 tcg_gen_andi_i64(t
, t
, mask
);
3970 tcg_gen_andi_i64(d
, d
, ~mask
);
3971 tcg_gen_or_i64(d
, d
, t
);
3972 tcg_temp_free_i64(t
);
3975 static void gen_shr16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3977 uint64_t mask
= dup_const(MO_16
, 0xffff >> shift
);
3978 TCGv_i64 t
= tcg_temp_new_i64();
3980 tcg_gen_shri_i64(t
, a
, shift
);
3981 tcg_gen_andi_i64(t
, t
, mask
);
3982 tcg_gen_andi_i64(d
, d
, ~mask
);
3983 tcg_gen_or_i64(d
, d
, t
);
3984 tcg_temp_free_i64(t
);
3987 static void gen_shr32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
3989 tcg_gen_shri_i32(a
, a
, shift
);
3990 tcg_gen_deposit_i32(d
, d
, a
, 0, 32 - shift
);
3993 static void gen_shr64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3995 tcg_gen_shri_i64(a
, a
, shift
);
3996 tcg_gen_deposit_i64(d
, d
, a
, 0, 64 - shift
);
3999 static void gen_shr_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
4002 tcg_gen_mov_vec(d
, a
);
4004 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
4005 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
4007 tcg_gen_dupi_vec(vece
, m
, MAKE_64BIT_MASK((8 << vece
) - sh
, sh
));
4008 tcg_gen_shri_vec(vece
, t
, a
, sh
);
4009 tcg_gen_and_vec(vece
, d
, d
, m
);
4010 tcg_gen_or_vec(vece
, d
, d
, t
);
4012 tcg_temp_free_vec(t
);
4013 tcg_temp_free_vec(m
);
4017 static const TCGOpcode vecop_list_sri
[] = { INDEX_op_shri_vec
, 0 };
4019 const GVecGen2i sri_op
[4] = {
4020 { .fni8
= gen_shr8_ins_i64
,
4021 .fniv
= gen_shr_ins_vec
,
4023 .opt_opc
= vecop_list_sri
,
4025 { .fni8
= gen_shr16_ins_i64
,
4026 .fniv
= gen_shr_ins_vec
,
4028 .opt_opc
= vecop_list_sri
,
4030 { .fni4
= gen_shr32_ins_i32
,
4031 .fniv
= gen_shr_ins_vec
,
4033 .opt_opc
= vecop_list_sri
,
4035 { .fni8
= gen_shr64_ins_i64
,
4036 .fniv
= gen_shr_ins_vec
,
4037 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4039 .opt_opc
= vecop_list_sri
,
4043 static void gen_shl8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4045 uint64_t mask
= dup_const(MO_8
, 0xff << shift
);
4046 TCGv_i64 t
= tcg_temp_new_i64();
4048 tcg_gen_shli_i64(t
, a
, shift
);
4049 tcg_gen_andi_i64(t
, t
, mask
);
4050 tcg_gen_andi_i64(d
, d
, ~mask
);
4051 tcg_gen_or_i64(d
, d
, t
);
4052 tcg_temp_free_i64(t
);
4055 static void gen_shl16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4057 uint64_t mask
= dup_const(MO_16
, 0xffff << shift
);
4058 TCGv_i64 t
= tcg_temp_new_i64();
4060 tcg_gen_shli_i64(t
, a
, shift
);
4061 tcg_gen_andi_i64(t
, t
, mask
);
4062 tcg_gen_andi_i64(d
, d
, ~mask
);
4063 tcg_gen_or_i64(d
, d
, t
);
4064 tcg_temp_free_i64(t
);
4067 static void gen_shl32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
4069 tcg_gen_deposit_i32(d
, d
, a
, shift
, 32 - shift
);
4072 static void gen_shl64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4074 tcg_gen_deposit_i64(d
, d
, a
, shift
, 64 - shift
);
4077 static void gen_shl_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
4080 tcg_gen_mov_vec(d
, a
);
4082 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
4083 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
4085 tcg_gen_dupi_vec(vece
, m
, MAKE_64BIT_MASK(0, sh
));
4086 tcg_gen_shli_vec(vece
, t
, a
, sh
);
4087 tcg_gen_and_vec(vece
, d
, d
, m
);
4088 tcg_gen_or_vec(vece
, d
, d
, t
);
4090 tcg_temp_free_vec(t
);
4091 tcg_temp_free_vec(m
);
4095 static const TCGOpcode vecop_list_sli
[] = { INDEX_op_shli_vec
, 0 };
4097 const GVecGen2i sli_op
[4] = {
4098 { .fni8
= gen_shl8_ins_i64
,
4099 .fniv
= gen_shl_ins_vec
,
4101 .opt_opc
= vecop_list_sli
,
4103 { .fni8
= gen_shl16_ins_i64
,
4104 .fniv
= gen_shl_ins_vec
,
4106 .opt_opc
= vecop_list_sli
,
4108 { .fni4
= gen_shl32_ins_i32
,
4109 .fniv
= gen_shl_ins_vec
,
4111 .opt_opc
= vecop_list_sli
,
4113 { .fni8
= gen_shl64_ins_i64
,
4114 .fniv
= gen_shl_ins_vec
,
4115 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4117 .opt_opc
= vecop_list_sli
,
4121 static void gen_mla8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4123 gen_helper_neon_mul_u8(a
, a
, b
);
4124 gen_helper_neon_add_u8(d
, d
, a
);
4127 static void gen_mls8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4129 gen_helper_neon_mul_u8(a
, a
, b
);
4130 gen_helper_neon_sub_u8(d
, d
, a
);
4133 static void gen_mla16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4135 gen_helper_neon_mul_u16(a
, a
, b
);
4136 gen_helper_neon_add_u16(d
, d
, a
);
4139 static void gen_mls16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4141 gen_helper_neon_mul_u16(a
, a
, b
);
4142 gen_helper_neon_sub_u16(d
, d
, a
);
4145 static void gen_mla32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4147 tcg_gen_mul_i32(a
, a
, b
);
4148 tcg_gen_add_i32(d
, d
, a
);
4151 static void gen_mls32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4153 tcg_gen_mul_i32(a
, a
, b
);
4154 tcg_gen_sub_i32(d
, d
, a
);
4157 static void gen_mla64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4159 tcg_gen_mul_i64(a
, a
, b
);
4160 tcg_gen_add_i64(d
, d
, a
);
4163 static void gen_mls64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4165 tcg_gen_mul_i64(a
, a
, b
);
4166 tcg_gen_sub_i64(d
, d
, a
);
4169 static void gen_mla_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4171 tcg_gen_mul_vec(vece
, a
, a
, b
);
4172 tcg_gen_add_vec(vece
, d
, d
, a
);
4175 static void gen_mls_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4177 tcg_gen_mul_vec(vece
, a
, a
, b
);
4178 tcg_gen_sub_vec(vece
, d
, d
, a
);
4181 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4182 * these tables are shared with AArch64 which does support them.
4185 static const TCGOpcode vecop_list_mla
[] = {
4186 INDEX_op_mul_vec
, INDEX_op_add_vec
, 0
4189 static const TCGOpcode vecop_list_mls
[] = {
4190 INDEX_op_mul_vec
, INDEX_op_sub_vec
, 0
4193 const GVecGen3 mla_op
[4] = {
4194 { .fni4
= gen_mla8_i32
,
4195 .fniv
= gen_mla_vec
,
4197 .opt_opc
= vecop_list_mla
,
4199 { .fni4
= gen_mla16_i32
,
4200 .fniv
= gen_mla_vec
,
4202 .opt_opc
= vecop_list_mla
,
4204 { .fni4
= gen_mla32_i32
,
4205 .fniv
= gen_mla_vec
,
4207 .opt_opc
= vecop_list_mla
,
4209 { .fni8
= gen_mla64_i64
,
4210 .fniv
= gen_mla_vec
,
4211 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4213 .opt_opc
= vecop_list_mla
,
4217 const GVecGen3 mls_op
[4] = {
4218 { .fni4
= gen_mls8_i32
,
4219 .fniv
= gen_mls_vec
,
4221 .opt_opc
= vecop_list_mls
,
4223 { .fni4
= gen_mls16_i32
,
4224 .fniv
= gen_mls_vec
,
4226 .opt_opc
= vecop_list_mls
,
4228 { .fni4
= gen_mls32_i32
,
4229 .fniv
= gen_mls_vec
,
4231 .opt_opc
= vecop_list_mls
,
4233 { .fni8
= gen_mls64_i64
,
4234 .fniv
= gen_mls_vec
,
4235 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4237 .opt_opc
= vecop_list_mls
,
4241 /* CMTST : test is "if (X & Y != 0)". */
4242 static void gen_cmtst_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4244 tcg_gen_and_i32(d
, a
, b
);
4245 tcg_gen_setcondi_i32(TCG_COND_NE
, d
, d
, 0);
4246 tcg_gen_neg_i32(d
, d
);
4249 void gen_cmtst_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4251 tcg_gen_and_i64(d
, a
, b
);
4252 tcg_gen_setcondi_i64(TCG_COND_NE
, d
, d
, 0);
4253 tcg_gen_neg_i64(d
, d
);
4256 static void gen_cmtst_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4258 tcg_gen_and_vec(vece
, d
, a
, b
);
4259 tcg_gen_dupi_vec(vece
, a
, 0);
4260 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, d
, d
, a
);
4263 static const TCGOpcode vecop_list_cmtst
[] = { INDEX_op_cmp_vec
, 0 };
4265 const GVecGen3 cmtst_op
[4] = {
4266 { .fni4
= gen_helper_neon_tst_u8
,
4267 .fniv
= gen_cmtst_vec
,
4268 .opt_opc
= vecop_list_cmtst
,
4270 { .fni4
= gen_helper_neon_tst_u16
,
4271 .fniv
= gen_cmtst_vec
,
4272 .opt_opc
= vecop_list_cmtst
,
4274 { .fni4
= gen_cmtst_i32
,
4275 .fniv
= gen_cmtst_vec
,
4276 .opt_opc
= vecop_list_cmtst
,
4278 { .fni8
= gen_cmtst_i64
,
4279 .fniv
= gen_cmtst_vec
,
4280 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4281 .opt_opc
= vecop_list_cmtst
,
4285 void gen_ushl_i32(TCGv_i32 dst
, TCGv_i32 src
, TCGv_i32 shift
)
4287 TCGv_i32 lval
= tcg_temp_new_i32();
4288 TCGv_i32 rval
= tcg_temp_new_i32();
4289 TCGv_i32 lsh
= tcg_temp_new_i32();
4290 TCGv_i32 rsh
= tcg_temp_new_i32();
4291 TCGv_i32 zero
= tcg_const_i32(0);
4292 TCGv_i32 max
= tcg_const_i32(32);
4295 * Rely on the TCG guarantee that out of range shifts produce
4296 * unspecified results, not undefined behaviour (i.e. no trap).
4297 * Discard out-of-range results after the fact.
4299 tcg_gen_ext8s_i32(lsh
, shift
);
4300 tcg_gen_neg_i32(rsh
, lsh
);
4301 tcg_gen_shl_i32(lval
, src
, lsh
);
4302 tcg_gen_shr_i32(rval
, src
, rsh
);
4303 tcg_gen_movcond_i32(TCG_COND_LTU
, dst
, lsh
, max
, lval
, zero
);
4304 tcg_gen_movcond_i32(TCG_COND_LTU
, dst
, rsh
, max
, rval
, dst
);
4306 tcg_temp_free_i32(lval
);
4307 tcg_temp_free_i32(rval
);
4308 tcg_temp_free_i32(lsh
);
4309 tcg_temp_free_i32(rsh
);
4310 tcg_temp_free_i32(zero
);
4311 tcg_temp_free_i32(max
);
4314 void gen_ushl_i64(TCGv_i64 dst
, TCGv_i64 src
, TCGv_i64 shift
)
4316 TCGv_i64 lval
= tcg_temp_new_i64();
4317 TCGv_i64 rval
= tcg_temp_new_i64();
4318 TCGv_i64 lsh
= tcg_temp_new_i64();
4319 TCGv_i64 rsh
= tcg_temp_new_i64();
4320 TCGv_i64 zero
= tcg_const_i64(0);
4321 TCGv_i64 max
= tcg_const_i64(64);
4324 * Rely on the TCG guarantee that out of range shifts produce
4325 * unspecified results, not undefined behaviour (i.e. no trap).
4326 * Discard out-of-range results after the fact.
4328 tcg_gen_ext8s_i64(lsh
, shift
);
4329 tcg_gen_neg_i64(rsh
, lsh
);
4330 tcg_gen_shl_i64(lval
, src
, lsh
);
4331 tcg_gen_shr_i64(rval
, src
, rsh
);
4332 tcg_gen_movcond_i64(TCG_COND_LTU
, dst
, lsh
, max
, lval
, zero
);
4333 tcg_gen_movcond_i64(TCG_COND_LTU
, dst
, rsh
, max
, rval
, dst
);
4335 tcg_temp_free_i64(lval
);
4336 tcg_temp_free_i64(rval
);
4337 tcg_temp_free_i64(lsh
);
4338 tcg_temp_free_i64(rsh
);
4339 tcg_temp_free_i64(zero
);
4340 tcg_temp_free_i64(max
);
4343 static void gen_ushl_vec(unsigned vece
, TCGv_vec dst
,
4344 TCGv_vec src
, TCGv_vec shift
)
4346 TCGv_vec lval
= tcg_temp_new_vec_matching(dst
);
4347 TCGv_vec rval
= tcg_temp_new_vec_matching(dst
);
4348 TCGv_vec lsh
= tcg_temp_new_vec_matching(dst
);
4349 TCGv_vec rsh
= tcg_temp_new_vec_matching(dst
);
4352 tcg_gen_neg_vec(vece
, rsh
, shift
);
4354 tcg_gen_mov_vec(lsh
, shift
);
4356 msk
= tcg_temp_new_vec_matching(dst
);
4357 tcg_gen_dupi_vec(vece
, msk
, 0xff);
4358 tcg_gen_and_vec(vece
, lsh
, shift
, msk
);
4359 tcg_gen_and_vec(vece
, rsh
, rsh
, msk
);
4360 tcg_temp_free_vec(msk
);
4364 * Rely on the TCG guarantee that out of range shifts produce
4365 * unspecified results, not undefined behaviour (i.e. no trap).
4366 * Discard out-of-range results after the fact.
4368 tcg_gen_shlv_vec(vece
, lval
, src
, lsh
);
4369 tcg_gen_shrv_vec(vece
, rval
, src
, rsh
);
4371 max
= tcg_temp_new_vec_matching(dst
);
4372 tcg_gen_dupi_vec(vece
, max
, 8 << vece
);
4375 * The choice of LT (signed) and GEU (unsigned) are biased toward
4376 * the instructions of the x86_64 host. For MO_8, the whole byte
4377 * is significant so we must use an unsigned compare; otherwise we
4378 * have already masked to a byte and so a signed compare works.
4379 * Other tcg hosts have a full set of comparisons and do not care.
4382 tcg_gen_cmp_vec(TCG_COND_GEU
, vece
, lsh
, lsh
, max
);
4383 tcg_gen_cmp_vec(TCG_COND_GEU
, vece
, rsh
, rsh
, max
);
4384 tcg_gen_andc_vec(vece
, lval
, lval
, lsh
);
4385 tcg_gen_andc_vec(vece
, rval
, rval
, rsh
);
4387 tcg_gen_cmp_vec(TCG_COND_LT
, vece
, lsh
, lsh
, max
);
4388 tcg_gen_cmp_vec(TCG_COND_LT
, vece
, rsh
, rsh
, max
);
4389 tcg_gen_and_vec(vece
, lval
, lval
, lsh
);
4390 tcg_gen_and_vec(vece
, rval
, rval
, rsh
);
4392 tcg_gen_or_vec(vece
, dst
, lval
, rval
);
4394 tcg_temp_free_vec(max
);
4395 tcg_temp_free_vec(lval
);
4396 tcg_temp_free_vec(rval
);
4397 tcg_temp_free_vec(lsh
);
4398 tcg_temp_free_vec(rsh
);
4401 static const TCGOpcode ushl_list
[] = {
4402 INDEX_op_neg_vec
, INDEX_op_shlv_vec
,
4403 INDEX_op_shrv_vec
, INDEX_op_cmp_vec
, 0
4406 const GVecGen3 ushl_op
[4] = {
4407 { .fniv
= gen_ushl_vec
,
4408 .fno
= gen_helper_gvec_ushl_b
,
4409 .opt_opc
= ushl_list
,
4411 { .fniv
= gen_ushl_vec
,
4412 .fno
= gen_helper_gvec_ushl_h
,
4413 .opt_opc
= ushl_list
,
4415 { .fni4
= gen_ushl_i32
,
4416 .fniv
= gen_ushl_vec
,
4417 .opt_opc
= ushl_list
,
4419 { .fni8
= gen_ushl_i64
,
4420 .fniv
= gen_ushl_vec
,
4421 .opt_opc
= ushl_list
,
4425 void gen_sshl_i32(TCGv_i32 dst
, TCGv_i32 src
, TCGv_i32 shift
)
4427 TCGv_i32 lval
= tcg_temp_new_i32();
4428 TCGv_i32 rval
= tcg_temp_new_i32();
4429 TCGv_i32 lsh
= tcg_temp_new_i32();
4430 TCGv_i32 rsh
= tcg_temp_new_i32();
4431 TCGv_i32 zero
= tcg_const_i32(0);
4432 TCGv_i32 max
= tcg_const_i32(31);
4435 * Rely on the TCG guarantee that out of range shifts produce
4436 * unspecified results, not undefined behaviour (i.e. no trap).
4437 * Discard out-of-range results after the fact.
4439 tcg_gen_ext8s_i32(lsh
, shift
);
4440 tcg_gen_neg_i32(rsh
, lsh
);
4441 tcg_gen_shl_i32(lval
, src
, lsh
);
4442 tcg_gen_umin_i32(rsh
, rsh
, max
);
4443 tcg_gen_sar_i32(rval
, src
, rsh
);
4444 tcg_gen_movcond_i32(TCG_COND_LEU
, lval
, lsh
, max
, lval
, zero
);
4445 tcg_gen_movcond_i32(TCG_COND_LT
, dst
, lsh
, zero
, rval
, lval
);
4447 tcg_temp_free_i32(lval
);
4448 tcg_temp_free_i32(rval
);
4449 tcg_temp_free_i32(lsh
);
4450 tcg_temp_free_i32(rsh
);
4451 tcg_temp_free_i32(zero
);
4452 tcg_temp_free_i32(max
);
4455 void gen_sshl_i64(TCGv_i64 dst
, TCGv_i64 src
, TCGv_i64 shift
)
4457 TCGv_i64 lval
= tcg_temp_new_i64();
4458 TCGv_i64 rval
= tcg_temp_new_i64();
4459 TCGv_i64 lsh
= tcg_temp_new_i64();
4460 TCGv_i64 rsh
= tcg_temp_new_i64();
4461 TCGv_i64 zero
= tcg_const_i64(0);
4462 TCGv_i64 max
= tcg_const_i64(63);
4465 * Rely on the TCG guarantee that out of range shifts produce
4466 * unspecified results, not undefined behaviour (i.e. no trap).
4467 * Discard out-of-range results after the fact.
4469 tcg_gen_ext8s_i64(lsh
, shift
);
4470 tcg_gen_neg_i64(rsh
, lsh
);
4471 tcg_gen_shl_i64(lval
, src
, lsh
);
4472 tcg_gen_umin_i64(rsh
, rsh
, max
);
4473 tcg_gen_sar_i64(rval
, src
, rsh
);
4474 tcg_gen_movcond_i64(TCG_COND_LEU
, lval
, lsh
, max
, lval
, zero
);
4475 tcg_gen_movcond_i64(TCG_COND_LT
, dst
, lsh
, zero
, rval
, lval
);
4477 tcg_temp_free_i64(lval
);
4478 tcg_temp_free_i64(rval
);
4479 tcg_temp_free_i64(lsh
);
4480 tcg_temp_free_i64(rsh
);
4481 tcg_temp_free_i64(zero
);
4482 tcg_temp_free_i64(max
);
4485 static void gen_sshl_vec(unsigned vece
, TCGv_vec dst
,
4486 TCGv_vec src
, TCGv_vec shift
)
4488 TCGv_vec lval
= tcg_temp_new_vec_matching(dst
);
4489 TCGv_vec rval
= tcg_temp_new_vec_matching(dst
);
4490 TCGv_vec lsh
= tcg_temp_new_vec_matching(dst
);
4491 TCGv_vec rsh
= tcg_temp_new_vec_matching(dst
);
4492 TCGv_vec tmp
= tcg_temp_new_vec_matching(dst
);
4495 * Rely on the TCG guarantee that out of range shifts produce
4496 * unspecified results, not undefined behaviour (i.e. no trap).
4497 * Discard out-of-range results after the fact.
4499 tcg_gen_neg_vec(vece
, rsh
, shift
);
4501 tcg_gen_mov_vec(lsh
, shift
);
4503 tcg_gen_dupi_vec(vece
, tmp
, 0xff);
4504 tcg_gen_and_vec(vece
, lsh
, shift
, tmp
);
4505 tcg_gen_and_vec(vece
, rsh
, rsh
, tmp
);
4508 /* Bound rsh so out of bound right shift gets -1. */
4509 tcg_gen_dupi_vec(vece
, tmp
, (8 << vece
) - 1);
4510 tcg_gen_umin_vec(vece
, rsh
, rsh
, tmp
);
4511 tcg_gen_cmp_vec(TCG_COND_GT
, vece
, tmp
, lsh
, tmp
);
4513 tcg_gen_shlv_vec(vece
, lval
, src
, lsh
);
4514 tcg_gen_sarv_vec(vece
, rval
, src
, rsh
);
4516 /* Select in-bound left shift. */
4517 tcg_gen_andc_vec(vece
, lval
, lval
, tmp
);
4519 /* Select between left and right shift. */
4521 tcg_gen_dupi_vec(vece
, tmp
, 0);
4522 tcg_gen_cmpsel_vec(TCG_COND_LT
, vece
, dst
, lsh
, tmp
, rval
, lval
);
4524 tcg_gen_dupi_vec(vece
, tmp
, 0x80);
4525 tcg_gen_cmpsel_vec(TCG_COND_LT
, vece
, dst
, lsh
, tmp
, lval
, rval
);
4528 tcg_temp_free_vec(lval
);
4529 tcg_temp_free_vec(rval
);
4530 tcg_temp_free_vec(lsh
);
4531 tcg_temp_free_vec(rsh
);
4532 tcg_temp_free_vec(tmp
);
4535 static const TCGOpcode sshl_list
[] = {
4536 INDEX_op_neg_vec
, INDEX_op_umin_vec
, INDEX_op_shlv_vec
,
4537 INDEX_op_sarv_vec
, INDEX_op_cmp_vec
, INDEX_op_cmpsel_vec
, 0
4540 const GVecGen3 sshl_op
[4] = {
4541 { .fniv
= gen_sshl_vec
,
4542 .fno
= gen_helper_gvec_sshl_b
,
4543 .opt_opc
= sshl_list
,
4545 { .fniv
= gen_sshl_vec
,
4546 .fno
= gen_helper_gvec_sshl_h
,
4547 .opt_opc
= sshl_list
,
4549 { .fni4
= gen_sshl_i32
,
4550 .fniv
= gen_sshl_vec
,
4551 .opt_opc
= sshl_list
,
4553 { .fni8
= gen_sshl_i64
,
4554 .fniv
= gen_sshl_vec
,
4555 .opt_opc
= sshl_list
,
4559 static void gen_uqadd_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4560 TCGv_vec a
, TCGv_vec b
)
4562 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4563 tcg_gen_add_vec(vece
, x
, a
, b
);
4564 tcg_gen_usadd_vec(vece
, t
, a
, b
);
4565 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4566 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4567 tcg_temp_free_vec(x
);
4570 static const TCGOpcode vecop_list_uqadd
[] = {
4571 INDEX_op_usadd_vec
, INDEX_op_cmp_vec
, INDEX_op_add_vec
, 0
4574 const GVecGen4 uqadd_op
[4] = {
4575 { .fniv
= gen_uqadd_vec
,
4576 .fno
= gen_helper_gvec_uqadd_b
,
4578 .opt_opc
= vecop_list_uqadd
,
4580 { .fniv
= gen_uqadd_vec
,
4581 .fno
= gen_helper_gvec_uqadd_h
,
4583 .opt_opc
= vecop_list_uqadd
,
4585 { .fniv
= gen_uqadd_vec
,
4586 .fno
= gen_helper_gvec_uqadd_s
,
4588 .opt_opc
= vecop_list_uqadd
,
4590 { .fniv
= gen_uqadd_vec
,
4591 .fno
= gen_helper_gvec_uqadd_d
,
4593 .opt_opc
= vecop_list_uqadd
,
4597 static void gen_sqadd_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4598 TCGv_vec a
, TCGv_vec b
)
4600 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4601 tcg_gen_add_vec(vece
, x
, a
, b
);
4602 tcg_gen_ssadd_vec(vece
, t
, a
, b
);
4603 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4604 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4605 tcg_temp_free_vec(x
);
4608 static const TCGOpcode vecop_list_sqadd
[] = {
4609 INDEX_op_ssadd_vec
, INDEX_op_cmp_vec
, INDEX_op_add_vec
, 0
4612 const GVecGen4 sqadd_op
[4] = {
4613 { .fniv
= gen_sqadd_vec
,
4614 .fno
= gen_helper_gvec_sqadd_b
,
4615 .opt_opc
= vecop_list_sqadd
,
4618 { .fniv
= gen_sqadd_vec
,
4619 .fno
= gen_helper_gvec_sqadd_h
,
4620 .opt_opc
= vecop_list_sqadd
,
4623 { .fniv
= gen_sqadd_vec
,
4624 .fno
= gen_helper_gvec_sqadd_s
,
4625 .opt_opc
= vecop_list_sqadd
,
4628 { .fniv
= gen_sqadd_vec
,
4629 .fno
= gen_helper_gvec_sqadd_d
,
4630 .opt_opc
= vecop_list_sqadd
,
4635 static void gen_uqsub_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4636 TCGv_vec a
, TCGv_vec b
)
4638 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4639 tcg_gen_sub_vec(vece
, x
, a
, b
);
4640 tcg_gen_ussub_vec(vece
, t
, a
, b
);
4641 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4642 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4643 tcg_temp_free_vec(x
);
4646 static const TCGOpcode vecop_list_uqsub
[] = {
4647 INDEX_op_ussub_vec
, INDEX_op_cmp_vec
, INDEX_op_sub_vec
, 0
4650 const GVecGen4 uqsub_op
[4] = {
4651 { .fniv
= gen_uqsub_vec
,
4652 .fno
= gen_helper_gvec_uqsub_b
,
4653 .opt_opc
= vecop_list_uqsub
,
4656 { .fniv
= gen_uqsub_vec
,
4657 .fno
= gen_helper_gvec_uqsub_h
,
4658 .opt_opc
= vecop_list_uqsub
,
4661 { .fniv
= gen_uqsub_vec
,
4662 .fno
= gen_helper_gvec_uqsub_s
,
4663 .opt_opc
= vecop_list_uqsub
,
4666 { .fniv
= gen_uqsub_vec
,
4667 .fno
= gen_helper_gvec_uqsub_d
,
4668 .opt_opc
= vecop_list_uqsub
,
4673 static void gen_sqsub_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4674 TCGv_vec a
, TCGv_vec b
)
4676 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4677 tcg_gen_sub_vec(vece
, x
, a
, b
);
4678 tcg_gen_sssub_vec(vece
, t
, a
, b
);
4679 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4680 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4681 tcg_temp_free_vec(x
);
4684 static const TCGOpcode vecop_list_sqsub
[] = {
4685 INDEX_op_sssub_vec
, INDEX_op_cmp_vec
, INDEX_op_sub_vec
, 0
4688 const GVecGen4 sqsub_op
[4] = {
4689 { .fniv
= gen_sqsub_vec
,
4690 .fno
= gen_helper_gvec_sqsub_b
,
4691 .opt_opc
= vecop_list_sqsub
,
4694 { .fniv
= gen_sqsub_vec
,
4695 .fno
= gen_helper_gvec_sqsub_h
,
4696 .opt_opc
= vecop_list_sqsub
,
4699 { .fniv
= gen_sqsub_vec
,
4700 .fno
= gen_helper_gvec_sqsub_s
,
4701 .opt_opc
= vecop_list_sqsub
,
4704 { .fniv
= gen_sqsub_vec
,
4705 .fno
= gen_helper_gvec_sqsub_d
,
4706 .opt_opc
= vecop_list_sqsub
,
4711 /* Translate a NEON data processing instruction. Return nonzero if the
4712 instruction is invalid.
4713 We process data in a mixture of 32-bit and 64-bit chunks.
4714 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4716 static int disas_neon_data_insn(DisasContext
*s
, uint32_t insn
)
4720 int rd
, rn
, rm
, rd_ofs
, rn_ofs
, rm_ofs
;
4729 TCGv_i32 tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4730 TCGv_ptr ptr1
, ptr2
, ptr3
;
4733 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
4737 /* FIXME: this access check should not take precedence over UNDEF
4738 * for invalid encodings; we will generate incorrect syndrome information
4739 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4741 if (s
->fp_excp_el
) {
4742 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
4743 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
4747 if (!s
->vfp_enabled
)
4749 q
= (insn
& (1 << 6)) != 0;
4750 u
= (insn
>> 24) & 1;
4751 VFP_DREG_D(rd
, insn
);
4752 VFP_DREG_N(rn
, insn
);
4753 VFP_DREG_M(rm
, insn
);
4754 size
= (insn
>> 20) & 3;
4755 vec_size
= q
? 16 : 8;
4756 rd_ofs
= neon_reg_offset(rd
, 0);
4757 rn_ofs
= neon_reg_offset(rn
, 0);
4758 rm_ofs
= neon_reg_offset(rm
, 0);
4760 if ((insn
& (1 << 23)) == 0) {
4761 /* Three register same length. */
4762 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4763 /* Catch invalid op and bad size combinations: UNDEF */
4764 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4767 /* All insns of this form UNDEF for either this condition or the
4768 * superset of cases "Q==1"; we catch the latter later.
4770 if (q
&& ((rd
| rn
| rm
) & 1)) {
4775 /* The SHA-1/SHA-256 3-register instructions require special
4776 * treatment here, as their size field is overloaded as an
4777 * op type selector, and they all consume their input in a
4783 if (!u
) { /* SHA-1 */
4784 if (!dc_isar_feature(aa32_sha1
, s
)) {
4787 ptr1
= vfp_reg_ptr(true, rd
);
4788 ptr2
= vfp_reg_ptr(true, rn
);
4789 ptr3
= vfp_reg_ptr(true, rm
);
4790 tmp4
= tcg_const_i32(size
);
4791 gen_helper_crypto_sha1_3reg(ptr1
, ptr2
, ptr3
, tmp4
);
4792 tcg_temp_free_i32(tmp4
);
4793 } else { /* SHA-256 */
4794 if (!dc_isar_feature(aa32_sha2
, s
) || size
== 3) {
4797 ptr1
= vfp_reg_ptr(true, rd
);
4798 ptr2
= vfp_reg_ptr(true, rn
);
4799 ptr3
= vfp_reg_ptr(true, rm
);
4802 gen_helper_crypto_sha256h(ptr1
, ptr2
, ptr3
);
4805 gen_helper_crypto_sha256h2(ptr1
, ptr2
, ptr3
);
4808 gen_helper_crypto_sha256su1(ptr1
, ptr2
, ptr3
);
4812 tcg_temp_free_ptr(ptr1
);
4813 tcg_temp_free_ptr(ptr2
);
4814 tcg_temp_free_ptr(ptr3
);
4817 case NEON_3R_VPADD_VQRDMLAH
:
4824 return do_v81_helper(s
, gen_helper_gvec_qrdmlah_s16
,
4827 return do_v81_helper(s
, gen_helper_gvec_qrdmlah_s32
,
4832 case NEON_3R_VFM_VQRDMLSH
:
4843 return do_v81_helper(s
, gen_helper_gvec_qrdmlsh_s16
,
4846 return do_v81_helper(s
, gen_helper_gvec_qrdmlsh_s32
,
4851 case NEON_3R_VADD_VSUB
:
4855 case NEON_3R_VTST_VCEQ
:
4863 /* Already handled by decodetree */
4868 /* 64-bit element instructions. */
4869 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
4870 neon_load_reg64(cpu_V0
, rn
+ pass
);
4871 neon_load_reg64(cpu_V1
, rm
+ pass
);
4875 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
4878 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
4884 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
4886 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
4889 case NEON_3R_VQRSHL
:
4891 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
4894 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
4901 neon_store_reg64(cpu_V0
, rd
+ pass
);
4909 case NEON_3R_VQRSHL
:
4912 /* Shift instruction operands are reversed. */
4918 case NEON_3R_VPADD_VQRDMLAH
:
4923 case NEON_3R_FLOAT_ARITH
:
4924 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
4926 case NEON_3R_FLOAT_MINMAX
:
4927 pairwise
= u
; /* if VPMIN/VPMAX (float) */
4929 case NEON_3R_FLOAT_CMP
:
4931 /* no encoding for U=0 C=1x */
4935 case NEON_3R_FLOAT_ACMP
:
4940 case NEON_3R_FLOAT_MISC
:
4941 /* VMAXNM/VMINNM in ARMv8 */
4942 if (u
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
4946 case NEON_3R_VFM_VQRDMLSH
:
4947 if (!dc_isar_feature(aa32_simdfmac
, s
)) {
4955 if (pairwise
&& q
) {
4956 /* All the pairwise insns UNDEF if Q is set */
4960 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
4965 tmp
= neon_load_reg(rn
, 0);
4966 tmp2
= neon_load_reg(rn
, 1);
4968 tmp
= neon_load_reg(rm
, 0);
4969 tmp2
= neon_load_reg(rm
, 1);
4973 tmp
= neon_load_reg(rn
, pass
);
4974 tmp2
= neon_load_reg(rm
, pass
);
4978 GEN_NEON_INTEGER_OP(hadd
);
4980 case NEON_3R_VRHADD
:
4981 GEN_NEON_INTEGER_OP(rhadd
);
4984 GEN_NEON_INTEGER_OP(hsub
);
4987 GEN_NEON_INTEGER_OP_ENV(qshl
);
4990 GEN_NEON_INTEGER_OP(rshl
);
4992 case NEON_3R_VQRSHL
:
4993 GEN_NEON_INTEGER_OP_ENV(qrshl
);
4996 GEN_NEON_INTEGER_OP(abd
);
4999 GEN_NEON_INTEGER_OP(abd
);
5000 tcg_temp_free_i32(tmp2
);
5001 tmp2
= neon_load_reg(rd
, pass
);
5002 gen_neon_add(size
, tmp
, tmp2
);
5005 GEN_NEON_INTEGER_OP(pmax
);
5008 GEN_NEON_INTEGER_OP(pmin
);
5010 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
5011 if (!u
) { /* VQDMULH */
5014 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5017 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5021 } else { /* VQRDMULH */
5024 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5027 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5033 case NEON_3R_VPADD_VQRDMLAH
:
5035 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
5036 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
5037 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
5041 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
5043 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5044 switch ((u
<< 2) | size
) {
5047 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5050 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
5053 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
5058 tcg_temp_free_ptr(fpstatus
);
5061 case NEON_3R_FLOAT_MULTIPLY
:
5063 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5064 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5066 tcg_temp_free_i32(tmp2
);
5067 tmp2
= neon_load_reg(rd
, pass
);
5069 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5071 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5074 tcg_temp_free_ptr(fpstatus
);
5077 case NEON_3R_FLOAT_CMP
:
5079 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5081 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
5084 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5086 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5089 tcg_temp_free_ptr(fpstatus
);
5092 case NEON_3R_FLOAT_ACMP
:
5094 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5096 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5098 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5100 tcg_temp_free_ptr(fpstatus
);
5103 case NEON_3R_FLOAT_MINMAX
:
5105 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5107 gen_helper_vfp_maxs(tmp
, tmp
, tmp2
, fpstatus
);
5109 gen_helper_vfp_mins(tmp
, tmp
, tmp2
, fpstatus
);
5111 tcg_temp_free_ptr(fpstatus
);
5114 case NEON_3R_FLOAT_MISC
:
5117 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5119 gen_helper_vfp_maxnums(tmp
, tmp
, tmp2
, fpstatus
);
5121 gen_helper_vfp_minnums(tmp
, tmp
, tmp2
, fpstatus
);
5123 tcg_temp_free_ptr(fpstatus
);
5126 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
5128 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
5132 case NEON_3R_VFM_VQRDMLSH
:
5134 /* VFMA, VFMS: fused multiply-add */
5135 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5136 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
5139 gen_helper_vfp_negs(tmp
, tmp
);
5141 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
5142 tcg_temp_free_i32(tmp3
);
5143 tcg_temp_free_ptr(fpstatus
);
5149 tcg_temp_free_i32(tmp2
);
5151 /* Save the result. For elementwise operations we can put it
5152 straight into the destination register. For pairwise operations
5153 we have to be careful to avoid clobbering the source operands. */
5154 if (pairwise
&& rd
== rm
) {
5155 neon_store_scratch(pass
, tmp
);
5157 neon_store_reg(rd
, pass
, tmp
);
5161 if (pairwise
&& rd
== rm
) {
5162 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5163 tmp
= neon_load_scratch(pass
);
5164 neon_store_reg(rd
, pass
, tmp
);
5167 /* End of 3 register same size operations. */
5168 } else if (insn
& (1 << 4)) {
5169 if ((insn
& 0x00380080) != 0) {
5170 /* Two registers and shift. */
5171 op
= (insn
>> 8) & 0xf;
5172 if (insn
& (1 << 7)) {
5180 while ((insn
& (1 << (size
+ 19))) == 0)
5183 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
5185 /* Shift by immediate:
5186 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5187 if (q
&& ((rd
| rm
) & 1)) {
5190 if (!u
&& (op
== 4 || op
== 6)) {
5193 /* Right shifts are encoded as N - shift, where N is the
5194 element size in bits. */
5196 shift
= shift
- (1 << (size
+ 3));
5201 /* Right shift comes here negative. */
5203 /* Shifts larger than the element size are architecturally
5204 * valid. Unsigned results in all zeros; signed results
5208 tcg_gen_gvec_sari(size
, rd_ofs
, rm_ofs
,
5209 MIN(shift
, (8 << size
) - 1),
5210 vec_size
, vec_size
);
5211 } else if (shift
>= 8 << size
) {
5212 tcg_gen_gvec_dup_imm(MO_8
, rd_ofs
, vec_size
,
5215 tcg_gen_gvec_shri(size
, rd_ofs
, rm_ofs
, shift
,
5216 vec_size
, vec_size
);
5221 /* Right shift comes here negative. */
5223 /* Shifts larger than the element size are architecturally
5224 * valid. Unsigned results in all zeros; signed results
5228 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
5229 MIN(shift
, (8 << size
) - 1),
5231 } else if (shift
>= 8 << size
) {
5234 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
5235 shift
, &usra_op
[size
]);
5243 /* Right shift comes here negative. */
5245 /* Shift out of range leaves destination unchanged. */
5246 if (shift
< 8 << size
) {
5247 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
5248 shift
, &sri_op
[size
]);
5252 case 5: /* VSHL, VSLI */
5254 /* Shift out of range leaves destination unchanged. */
5255 if (shift
< 8 << size
) {
5256 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
,
5257 vec_size
, shift
, &sli_op
[size
]);
5260 /* Shifts larger than the element size are
5261 * architecturally valid and results in zero.
5263 if (shift
>= 8 << size
) {
5264 tcg_gen_gvec_dup_imm(size
, rd_ofs
,
5265 vec_size
, vec_size
, 0);
5267 tcg_gen_gvec_shli(size
, rd_ofs
, rm_ofs
, shift
,
5268 vec_size
, vec_size
);
5280 /* To avoid excessive duplication of ops we implement shift
5281 * by immediate using the variable shift operations.
5283 imm
= dup_const(size
, shift
);
5285 for (pass
= 0; pass
< count
; pass
++) {
5287 neon_load_reg64(cpu_V0
, rm
+ pass
);
5288 tcg_gen_movi_i64(cpu_V1
, imm
);
5293 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5295 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5297 case 6: /* VQSHLU */
5298 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
5303 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5306 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5311 g_assert_not_reached();
5315 neon_load_reg64(cpu_V1
, rd
+ pass
);
5316 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5318 neon_store_reg64(cpu_V0
, rd
+ pass
);
5319 } else { /* size < 3 */
5320 /* Operands in T0 and T1. */
5321 tmp
= neon_load_reg(rm
, pass
);
5322 tmp2
= tcg_temp_new_i32();
5323 tcg_gen_movi_i32(tmp2
, imm
);
5327 GEN_NEON_INTEGER_OP(rshl
);
5329 case 6: /* VQSHLU */
5332 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5336 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5340 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5348 GEN_NEON_INTEGER_OP_ENV(qshl
);
5351 g_assert_not_reached();
5353 tcg_temp_free_i32(tmp2
);
5357 tmp2
= neon_load_reg(rd
, pass
);
5358 gen_neon_add(size
, tmp
, tmp2
);
5359 tcg_temp_free_i32(tmp2
);
5361 neon_store_reg(rd
, pass
, tmp
);
5364 } else if (op
< 10) {
5365 /* Shift by immediate and narrow:
5366 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5367 int input_unsigned
= (op
== 8) ? !u
: u
;
5371 shift
= shift
- (1 << (size
+ 3));
5374 tmp64
= tcg_const_i64(shift
);
5375 neon_load_reg64(cpu_V0
, rm
);
5376 neon_load_reg64(cpu_V1
, rm
+ 1);
5377 for (pass
= 0; pass
< 2; pass
++) {
5385 if (input_unsigned
) {
5386 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5388 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5391 if (input_unsigned
) {
5392 gen_ushl_i64(cpu_V0
, in
, tmp64
);
5394 gen_sshl_i64(cpu_V0
, in
, tmp64
);
5397 tmp
= tcg_temp_new_i32();
5398 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5399 neon_store_reg(rd
, pass
, tmp
);
5401 tcg_temp_free_i64(tmp64
);
5404 imm
= (uint16_t)shift
;
5408 imm
= (uint32_t)shift
;
5410 tmp2
= tcg_const_i32(imm
);
5411 tmp4
= neon_load_reg(rm
+ 1, 0);
5412 tmp5
= neon_load_reg(rm
+ 1, 1);
5413 for (pass
= 0; pass
< 2; pass
++) {
5415 tmp
= neon_load_reg(rm
, 0);
5419 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5422 tmp3
= neon_load_reg(rm
, 1);
5426 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5428 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5429 tcg_temp_free_i32(tmp
);
5430 tcg_temp_free_i32(tmp3
);
5431 tmp
= tcg_temp_new_i32();
5432 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5433 neon_store_reg(rd
, pass
, tmp
);
5435 tcg_temp_free_i32(tmp2
);
5437 } else if (op
== 10) {
5439 if (q
|| (rd
& 1)) {
5442 tmp
= neon_load_reg(rm
, 0);
5443 tmp2
= neon_load_reg(rm
, 1);
5444 for (pass
= 0; pass
< 2; pass
++) {
5448 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5451 /* The shift is less than the width of the source
5452 type, so we can just shift the whole register. */
5453 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5454 /* Widen the result of shift: we need to clear
5455 * the potential overflow bits resulting from
5456 * left bits of the narrow input appearing as
5457 * right bits of left the neighbour narrow
5459 if (size
< 2 || !u
) {
5462 imm
= (0xffu
>> (8 - shift
));
5464 } else if (size
== 1) {
5465 imm
= 0xffff >> (16 - shift
);
5468 imm
= 0xffffffff >> (32 - shift
);
5471 imm64
= imm
| (((uint64_t)imm
) << 32);
5475 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5478 neon_store_reg64(cpu_V0
, rd
+ pass
);
5480 } else if (op
>= 14) {
5481 /* VCVT fixed-point. */
5484 VFPGenFixPointFn
*fn
;
5486 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5492 fn
= gen_helper_vfp_ultos
;
5494 fn
= gen_helper_vfp_sltos
;
5498 fn
= gen_helper_vfp_touls_round_to_zero
;
5500 fn
= gen_helper_vfp_tosls_round_to_zero
;
5504 /* We have already masked out the must-be-1 top bit of imm6,
5505 * hence this 32-shift where the ARM ARM has 64-imm6.
5508 fpst
= get_fpstatus_ptr(1);
5509 shiftv
= tcg_const_i32(shift
);
5510 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5511 TCGv_i32 tmpf
= neon_load_reg(rm
, pass
);
5512 fn(tmpf
, tmpf
, shiftv
, fpst
);
5513 neon_store_reg(rd
, pass
, tmpf
);
5515 tcg_temp_free_ptr(fpst
);
5516 tcg_temp_free_i32(shiftv
);
5520 } else { /* (insn & 0x00380080) == 0 */
5521 int invert
, reg_ofs
, vec_size
;
5523 if (q
&& (rd
& 1)) {
5527 op
= (insn
>> 8) & 0xf;
5528 /* One register and immediate. */
5529 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5530 invert
= (insn
& (1 << 5)) != 0;
5531 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5532 * We choose to not special-case this and will behave as if a
5533 * valid constant encoding of 0 had been given.
5552 imm
= (imm
<< 8) | (imm
<< 24);
5555 imm
= (imm
<< 8) | 0xff;
5558 imm
= (imm
<< 16) | 0xffff;
5561 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5570 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5571 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5578 reg_ofs
= neon_reg_offset(rd
, 0);
5579 vec_size
= q
? 16 : 8;
5581 if (op
& 1 && op
< 12) {
5583 /* The immediate value has already been inverted,
5584 * so BIC becomes AND.
5586 tcg_gen_gvec_andi(MO_32
, reg_ofs
, reg_ofs
, imm
,
5587 vec_size
, vec_size
);
5589 tcg_gen_gvec_ori(MO_32
, reg_ofs
, reg_ofs
, imm
,
5590 vec_size
, vec_size
);
5594 if (op
== 14 && invert
) {
5595 TCGv_i64 t64
= tcg_temp_new_i64();
5597 for (pass
= 0; pass
<= q
; ++pass
) {
5601 for (n
= 0; n
< 8; n
++) {
5602 if (imm
& (1 << (n
+ pass
* 8))) {
5603 val
|= 0xffull
<< (n
* 8);
5606 tcg_gen_movi_i64(t64
, val
);
5607 neon_store_reg64(t64
, rd
+ pass
);
5609 tcg_temp_free_i64(t64
);
5611 tcg_gen_gvec_dup_imm(MO_32
, reg_ofs
, vec_size
,
5616 } else { /* (insn & 0x00800010 == 0x00800000) */
5618 op
= (insn
>> 8) & 0xf;
5619 if ((insn
& (1 << 6)) == 0) {
5620 /* Three registers of different lengths. */
5624 /* undefreq: bit 0 : UNDEF if size == 0
5625 * bit 1 : UNDEF if size == 1
5626 * bit 2 : UNDEF if size == 2
5627 * bit 3 : UNDEF if U == 1
5628 * Note that [2:0] set implies 'always UNDEF'
5631 /* prewiden, src1_wide, src2_wide, undefreq */
5632 static const int neon_3reg_wide
[16][4] = {
5633 {1, 0, 0, 0}, /* VADDL */
5634 {1, 1, 0, 0}, /* VADDW */
5635 {1, 0, 0, 0}, /* VSUBL */
5636 {1, 1, 0, 0}, /* VSUBW */
5637 {0, 1, 1, 0}, /* VADDHN */
5638 {0, 0, 0, 0}, /* VABAL */
5639 {0, 1, 1, 0}, /* VSUBHN */
5640 {0, 0, 0, 0}, /* VABDL */
5641 {0, 0, 0, 0}, /* VMLAL */
5642 {0, 0, 0, 9}, /* VQDMLAL */
5643 {0, 0, 0, 0}, /* VMLSL */
5644 {0, 0, 0, 9}, /* VQDMLSL */
5645 {0, 0, 0, 0}, /* Integer VMULL */
5646 {0, 0, 0, 9}, /* VQDMULL */
5647 {0, 0, 0, 0xa}, /* Polynomial VMULL */
5648 {0, 0, 0, 7}, /* Reserved: always UNDEF */
5651 prewiden
= neon_3reg_wide
[op
][0];
5652 src1_wide
= neon_3reg_wide
[op
][1];
5653 src2_wide
= neon_3reg_wide
[op
][2];
5654 undefreq
= neon_3reg_wide
[op
][3];
5656 if ((undefreq
& (1 << size
)) ||
5657 ((undefreq
& 8) && u
)) {
5660 if ((src1_wide
&& (rn
& 1)) ||
5661 (src2_wide
&& (rm
& 1)) ||
5662 (!src2_wide
&& (rd
& 1))) {
5666 /* Handle polynomial VMULL in a single pass. */
5670 tcg_gen_gvec_3_ool(rd_ofs
, rn_ofs
, rm_ofs
, 16, 16,
5671 0, gen_helper_neon_pmull_h
);
5674 if (!dc_isar_feature(aa32_pmull
, s
)) {
5677 tcg_gen_gvec_3_ool(rd_ofs
, rn_ofs
, rm_ofs
, 16, 16,
5678 0, gen_helper_gvec_pmull_q
);
5683 /* Avoid overlapping operands. Wide source operands are
5684 always aligned so will never overlap with wide
5685 destinations in problematic ways. */
5686 if (rd
== rm
&& !src2_wide
) {
5687 tmp
= neon_load_reg(rm
, 1);
5688 neon_store_scratch(2, tmp
);
5689 } else if (rd
== rn
&& !src1_wide
) {
5690 tmp
= neon_load_reg(rn
, 1);
5691 neon_store_scratch(2, tmp
);
5694 for (pass
= 0; pass
< 2; pass
++) {
5696 neon_load_reg64(cpu_V0
, rn
+ pass
);
5699 if (pass
== 1 && rd
== rn
) {
5700 tmp
= neon_load_scratch(2);
5702 tmp
= neon_load_reg(rn
, pass
);
5705 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5709 neon_load_reg64(cpu_V1
, rm
+ pass
);
5712 if (pass
== 1 && rd
== rm
) {
5713 tmp2
= neon_load_scratch(2);
5715 tmp2
= neon_load_reg(rm
, pass
);
5718 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5722 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5723 gen_neon_addl(size
);
5725 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5726 gen_neon_subl(size
);
5728 case 5: case 7: /* VABAL, VABDL */
5729 switch ((size
<< 1) | u
) {
5731 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5734 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5737 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5740 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5743 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5746 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5750 tcg_temp_free_i32(tmp2
);
5751 tcg_temp_free_i32(tmp
);
5753 case 8: case 9: case 10: case 11: case 12: case 13:
5754 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5755 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5757 default: /* 15 is RESERVED: caught earlier */
5762 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5763 neon_store_reg64(cpu_V0
, rd
+ pass
);
5764 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5766 neon_load_reg64(cpu_V1
, rd
+ pass
);
5768 case 10: /* VMLSL */
5769 gen_neon_negl(cpu_V0
, size
);
5771 case 5: case 8: /* VABAL, VMLAL */
5772 gen_neon_addl(size
);
5774 case 9: case 11: /* VQDMLAL, VQDMLSL */
5775 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5777 gen_neon_negl(cpu_V0
, size
);
5779 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5784 neon_store_reg64(cpu_V0
, rd
+ pass
);
5785 } else if (op
== 4 || op
== 6) {
5786 /* Narrowing operation. */
5787 tmp
= tcg_temp_new_i32();
5791 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5794 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
5797 tcg_gen_extrh_i64_i32(tmp
, cpu_V0
);
5804 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
5807 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
5810 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
5811 tcg_gen_extrh_i64_i32(tmp
, cpu_V0
);
5819 neon_store_reg(rd
, 0, tmp3
);
5820 neon_store_reg(rd
, 1, tmp
);
5823 /* Write back the result. */
5824 neon_store_reg64(cpu_V0
, rd
+ pass
);
5828 /* Two registers and a scalar. NB that for ops of this form
5829 * the ARM ARM labels bit 24 as Q, but it is in our variable
5836 case 1: /* Float VMLA scalar */
5837 case 5: /* Floating point VMLS scalar */
5838 case 9: /* Floating point VMUL scalar */
5843 case 0: /* Integer VMLA scalar */
5844 case 4: /* Integer VMLS scalar */
5845 case 8: /* Integer VMUL scalar */
5846 case 12: /* VQDMULH scalar */
5847 case 13: /* VQRDMULH scalar */
5848 if (u
&& ((rd
| rn
) & 1)) {
5851 tmp
= neon_get_scalar(size
, rm
);
5852 neon_store_scratch(0, tmp
);
5853 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5854 tmp
= neon_load_scratch(0);
5855 tmp2
= neon_load_reg(rn
, pass
);
5858 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5860 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5862 } else if (op
== 13) {
5864 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5866 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5868 } else if (op
& 1) {
5869 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5870 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5871 tcg_temp_free_ptr(fpstatus
);
5874 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5875 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5876 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5880 tcg_temp_free_i32(tmp2
);
5883 tmp2
= neon_load_reg(rd
, pass
);
5886 gen_neon_add(size
, tmp
, tmp2
);
5890 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5891 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5892 tcg_temp_free_ptr(fpstatus
);
5896 gen_neon_rsb(size
, tmp
, tmp2
);
5900 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5901 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5902 tcg_temp_free_ptr(fpstatus
);
5908 tcg_temp_free_i32(tmp2
);
5910 neon_store_reg(rd
, pass
, tmp
);
5913 case 3: /* VQDMLAL scalar */
5914 case 7: /* VQDMLSL scalar */
5915 case 11: /* VQDMULL scalar */
5920 case 2: /* VMLAL sclar */
5921 case 6: /* VMLSL scalar */
5922 case 10: /* VMULL scalar */
5926 tmp2
= neon_get_scalar(size
, rm
);
5927 /* We need a copy of tmp2 because gen_neon_mull
5928 * deletes it during pass 0. */
5929 tmp4
= tcg_temp_new_i32();
5930 tcg_gen_mov_i32(tmp4
, tmp2
);
5931 tmp3
= neon_load_reg(rn
, 1);
5933 for (pass
= 0; pass
< 2; pass
++) {
5935 tmp
= neon_load_reg(rn
, 0);
5940 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5942 neon_load_reg64(cpu_V1
, rd
+ pass
);
5946 gen_neon_negl(cpu_V0
, size
);
5949 gen_neon_addl(size
);
5952 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5954 gen_neon_negl(cpu_V0
, size
);
5956 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5962 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5967 neon_store_reg64(cpu_V0
, rd
+ pass
);
5970 case 14: /* VQRDMLAH scalar */
5971 case 15: /* VQRDMLSH scalar */
5973 NeonGenThreeOpEnvFn
*fn
;
5975 if (!dc_isar_feature(aa32_rdm
, s
)) {
5978 if (u
&& ((rd
| rn
) & 1)) {
5983 fn
= gen_helper_neon_qrdmlah_s16
;
5985 fn
= gen_helper_neon_qrdmlah_s32
;
5989 fn
= gen_helper_neon_qrdmlsh_s16
;
5991 fn
= gen_helper_neon_qrdmlsh_s32
;
5995 tmp2
= neon_get_scalar(size
, rm
);
5996 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
5997 tmp
= neon_load_reg(rn
, pass
);
5998 tmp3
= neon_load_reg(rd
, pass
);
5999 fn(tmp
, cpu_env
, tmp
, tmp2
, tmp3
);
6000 tcg_temp_free_i32(tmp3
);
6001 neon_store_reg(rd
, pass
, tmp
);
6003 tcg_temp_free_i32(tmp2
);
6007 g_assert_not_reached();
6010 } else { /* size == 3 */
6013 imm
= (insn
>> 8) & 0xf;
6018 if (q
&& ((rd
| rn
| rm
) & 1)) {
6023 neon_load_reg64(cpu_V0
, rn
);
6025 neon_load_reg64(cpu_V1
, rn
+ 1);
6027 } else if (imm
== 8) {
6028 neon_load_reg64(cpu_V0
, rn
+ 1);
6030 neon_load_reg64(cpu_V1
, rm
);
6033 tmp64
= tcg_temp_new_i64();
6035 neon_load_reg64(cpu_V0
, rn
);
6036 neon_load_reg64(tmp64
, rn
+ 1);
6038 neon_load_reg64(cpu_V0
, rn
+ 1);
6039 neon_load_reg64(tmp64
, rm
);
6041 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
6042 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
6043 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6045 neon_load_reg64(cpu_V1
, rm
);
6047 neon_load_reg64(cpu_V1
, rm
+ 1);
6050 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6051 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
6052 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
6053 tcg_temp_free_i64(tmp64
);
6056 neon_load_reg64(cpu_V0
, rn
);
6057 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
6058 neon_load_reg64(cpu_V1
, rm
);
6059 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6060 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6062 neon_store_reg64(cpu_V0
, rd
);
6064 neon_store_reg64(cpu_V1
, rd
+ 1);
6066 } else if ((insn
& (1 << 11)) == 0) {
6067 /* Two register misc. */
6068 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
6069 size
= (insn
>> 18) & 3;
6070 /* UNDEF for unknown op values and bad op-size combinations */
6071 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
6074 if (neon_2rm_is_v8_op(op
) &&
6075 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
6078 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
6079 q
&& ((rm
| rd
) & 1)) {
6083 case NEON_2RM_VREV64
:
6084 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
6085 tmp
= neon_load_reg(rm
, pass
* 2);
6086 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
6088 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6089 case 1: gen_swap_half(tmp
); break;
6090 case 2: /* no-op */ break;
6093 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
6095 neon_store_reg(rd
, pass
* 2, tmp2
);
6098 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
6099 case 1: gen_swap_half(tmp2
); break;
6102 neon_store_reg(rd
, pass
* 2, tmp2
);
6106 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
6107 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
6108 for (pass
= 0; pass
< q
+ 1; pass
++) {
6109 tmp
= neon_load_reg(rm
, pass
* 2);
6110 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
6111 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
6112 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
6114 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
6115 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
6116 case 2: tcg_gen_add_i64(CPU_V001
); break;
6119 if (op
>= NEON_2RM_VPADAL
) {
6121 neon_load_reg64(cpu_V1
, rd
+ pass
);
6122 gen_neon_addl(size
);
6124 neon_store_reg64(cpu_V0
, rd
+ pass
);
6130 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
6131 tmp
= neon_load_reg(rm
, n
);
6132 tmp2
= neon_load_reg(rd
, n
+ 1);
6133 neon_store_reg(rm
, n
, tmp2
);
6134 neon_store_reg(rd
, n
+ 1, tmp
);
6141 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
6146 if (gen_neon_zip(rd
, rm
, size
, q
)) {
6150 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
6151 /* also VQMOVUN; op field and mnemonics don't line up */
6156 for (pass
= 0; pass
< 2; pass
++) {
6157 neon_load_reg64(cpu_V0
, rm
+ pass
);
6158 tmp
= tcg_temp_new_i32();
6159 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
6164 neon_store_reg(rd
, 0, tmp2
);
6165 neon_store_reg(rd
, 1, tmp
);
6169 case NEON_2RM_VSHLL
:
6170 if (q
|| (rd
& 1)) {
6173 tmp
= neon_load_reg(rm
, 0);
6174 tmp2
= neon_load_reg(rm
, 1);
6175 for (pass
= 0; pass
< 2; pass
++) {
6178 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
6179 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
6180 neon_store_reg64(cpu_V0
, rd
+ pass
);
6183 case NEON_2RM_VCVT_F16_F32
:
6188 if (!dc_isar_feature(aa32_fp16_spconv
, s
) ||
6192 fpst
= get_fpstatus_ptr(true);
6193 ahp
= get_ahp_flag();
6194 tmp
= neon_load_reg(rm
, 0);
6195 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp
);
6196 tmp2
= neon_load_reg(rm
, 1);
6197 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, tmp2
, fpst
, ahp
);
6198 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6199 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6200 tcg_temp_free_i32(tmp
);
6201 tmp
= neon_load_reg(rm
, 2);
6202 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp
);
6203 tmp3
= neon_load_reg(rm
, 3);
6204 neon_store_reg(rd
, 0, tmp2
);
6205 gen_helper_vfp_fcvt_f32_to_f16(tmp3
, tmp3
, fpst
, ahp
);
6206 tcg_gen_shli_i32(tmp3
, tmp3
, 16);
6207 tcg_gen_or_i32(tmp3
, tmp3
, tmp
);
6208 neon_store_reg(rd
, 1, tmp3
);
6209 tcg_temp_free_i32(tmp
);
6210 tcg_temp_free_i32(ahp
);
6211 tcg_temp_free_ptr(fpst
);
6214 case NEON_2RM_VCVT_F32_F16
:
6218 if (!dc_isar_feature(aa32_fp16_spconv
, s
) ||
6222 fpst
= get_fpstatus_ptr(true);
6223 ahp
= get_ahp_flag();
6224 tmp3
= tcg_temp_new_i32();
6225 tmp
= neon_load_reg(rm
, 0);
6226 tmp2
= neon_load_reg(rm
, 1);
6227 tcg_gen_ext16u_i32(tmp3
, tmp
);
6228 gen_helper_vfp_fcvt_f16_to_f32(tmp3
, tmp3
, fpst
, ahp
);
6229 neon_store_reg(rd
, 0, tmp3
);
6230 tcg_gen_shri_i32(tmp
, tmp
, 16);
6231 gen_helper_vfp_fcvt_f16_to_f32(tmp
, tmp
, fpst
, ahp
);
6232 neon_store_reg(rd
, 1, tmp
);
6233 tmp3
= tcg_temp_new_i32();
6234 tcg_gen_ext16u_i32(tmp3
, tmp2
);
6235 gen_helper_vfp_fcvt_f16_to_f32(tmp3
, tmp3
, fpst
, ahp
);
6236 neon_store_reg(rd
, 2, tmp3
);
6237 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
6238 gen_helper_vfp_fcvt_f16_to_f32(tmp2
, tmp2
, fpst
, ahp
);
6239 neon_store_reg(rd
, 3, tmp2
);
6240 tcg_temp_free_i32(ahp
);
6241 tcg_temp_free_ptr(fpst
);
6244 case NEON_2RM_AESE
: case NEON_2RM_AESMC
:
6245 if (!dc_isar_feature(aa32_aes
, s
) || ((rm
| rd
) & 1)) {
6248 ptr1
= vfp_reg_ptr(true, rd
);
6249 ptr2
= vfp_reg_ptr(true, rm
);
6251 /* Bit 6 is the lowest opcode bit; it distinguishes between
6252 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6254 tmp3
= tcg_const_i32(extract32(insn
, 6, 1));
6256 if (op
== NEON_2RM_AESE
) {
6257 gen_helper_crypto_aese(ptr1
, ptr2
, tmp3
);
6259 gen_helper_crypto_aesmc(ptr1
, ptr2
, tmp3
);
6261 tcg_temp_free_ptr(ptr1
);
6262 tcg_temp_free_ptr(ptr2
);
6263 tcg_temp_free_i32(tmp3
);
6265 case NEON_2RM_SHA1H
:
6266 if (!dc_isar_feature(aa32_sha1
, s
) || ((rm
| rd
) & 1)) {
6269 ptr1
= vfp_reg_ptr(true, rd
);
6270 ptr2
= vfp_reg_ptr(true, rm
);
6272 gen_helper_crypto_sha1h(ptr1
, ptr2
);
6274 tcg_temp_free_ptr(ptr1
);
6275 tcg_temp_free_ptr(ptr2
);
6277 case NEON_2RM_SHA1SU1
:
6278 if ((rm
| rd
) & 1) {
6281 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6283 if (!dc_isar_feature(aa32_sha2
, s
)) {
6286 } else if (!dc_isar_feature(aa32_sha1
, s
)) {
6289 ptr1
= vfp_reg_ptr(true, rd
);
6290 ptr2
= vfp_reg_ptr(true, rm
);
6292 gen_helper_crypto_sha256su0(ptr1
, ptr2
);
6294 gen_helper_crypto_sha1su1(ptr1
, ptr2
);
6296 tcg_temp_free_ptr(ptr1
);
6297 tcg_temp_free_ptr(ptr2
);
6301 tcg_gen_gvec_not(0, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
6304 tcg_gen_gvec_neg(size
, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
6307 tcg_gen_gvec_abs(size
, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
6310 case NEON_2RM_VCEQ0
:
6311 tcg_gen_gvec_2(rd_ofs
, rm_ofs
, vec_size
,
6312 vec_size
, &ceq0_op
[size
]);
6314 case NEON_2RM_VCGT0
:
6315 tcg_gen_gvec_2(rd_ofs
, rm_ofs
, vec_size
,
6316 vec_size
, &cgt0_op
[size
]);
6318 case NEON_2RM_VCLE0
:
6319 tcg_gen_gvec_2(rd_ofs
, rm_ofs
, vec_size
,
6320 vec_size
, &cle0_op
[size
]);
6322 case NEON_2RM_VCGE0
:
6323 tcg_gen_gvec_2(rd_ofs
, rm_ofs
, vec_size
,
6324 vec_size
, &cge0_op
[size
]);
6326 case NEON_2RM_VCLT0
:
6327 tcg_gen_gvec_2(rd_ofs
, rm_ofs
, vec_size
,
6328 vec_size
, &clt0_op
[size
]);
6333 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6334 tmp
= neon_load_reg(rm
, pass
);
6336 case NEON_2RM_VREV32
:
6338 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6339 case 1: gen_swap_half(tmp
); break;
6343 case NEON_2RM_VREV16
:
6344 gen_rev16(tmp
, tmp
);
6348 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
6349 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
6350 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
6356 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
6357 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
6358 case 2: tcg_gen_clzi_i32(tmp
, tmp
, 32); break;
6363 gen_helper_neon_cnt_u8(tmp
, tmp
);
6365 case NEON_2RM_VQABS
:
6368 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
6371 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
6374 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
6379 case NEON_2RM_VQNEG
:
6382 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
6385 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
6388 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
6393 case NEON_2RM_VCGT0_F
:
6395 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6396 tmp2
= tcg_const_i32(0);
6397 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6398 tcg_temp_free_i32(tmp2
);
6399 tcg_temp_free_ptr(fpstatus
);
6402 case NEON_2RM_VCGE0_F
:
6404 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6405 tmp2
= tcg_const_i32(0);
6406 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6407 tcg_temp_free_i32(tmp2
);
6408 tcg_temp_free_ptr(fpstatus
);
6411 case NEON_2RM_VCEQ0_F
:
6413 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6414 tmp2
= tcg_const_i32(0);
6415 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6416 tcg_temp_free_i32(tmp2
);
6417 tcg_temp_free_ptr(fpstatus
);
6420 case NEON_2RM_VCLE0_F
:
6422 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6423 tmp2
= tcg_const_i32(0);
6424 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6425 tcg_temp_free_i32(tmp2
);
6426 tcg_temp_free_ptr(fpstatus
);
6429 case NEON_2RM_VCLT0_F
:
6431 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6432 tmp2
= tcg_const_i32(0);
6433 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6434 tcg_temp_free_i32(tmp2
);
6435 tcg_temp_free_ptr(fpstatus
);
6438 case NEON_2RM_VABS_F
:
6439 gen_helper_vfp_abss(tmp
, tmp
);
6441 case NEON_2RM_VNEG_F
:
6442 gen_helper_vfp_negs(tmp
, tmp
);
6445 tmp2
= neon_load_reg(rd
, pass
);
6446 neon_store_reg(rm
, pass
, tmp2
);
6449 tmp2
= neon_load_reg(rd
, pass
);
6451 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6452 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6455 neon_store_reg(rm
, pass
, tmp2
);
6457 case NEON_2RM_VRINTN
:
6458 case NEON_2RM_VRINTA
:
6459 case NEON_2RM_VRINTM
:
6460 case NEON_2RM_VRINTP
:
6461 case NEON_2RM_VRINTZ
:
6464 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6467 if (op
== NEON_2RM_VRINTZ
) {
6468 rmode
= FPROUNDING_ZERO
;
6470 rmode
= fp_decode_rm
[((op
& 0x6) >> 1) ^ 1];
6473 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6474 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6476 gen_helper_rints(tmp
, tmp
, fpstatus
);
6477 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6479 tcg_temp_free_ptr(fpstatus
);
6480 tcg_temp_free_i32(tcg_rmode
);
6483 case NEON_2RM_VRINTX
:
6485 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6486 gen_helper_rints_exact(tmp
, tmp
, fpstatus
);
6487 tcg_temp_free_ptr(fpstatus
);
6490 case NEON_2RM_VCVTAU
:
6491 case NEON_2RM_VCVTAS
:
6492 case NEON_2RM_VCVTNU
:
6493 case NEON_2RM_VCVTNS
:
6494 case NEON_2RM_VCVTPU
:
6495 case NEON_2RM_VCVTPS
:
6496 case NEON_2RM_VCVTMU
:
6497 case NEON_2RM_VCVTMS
:
6499 bool is_signed
= !extract32(insn
, 7, 1);
6500 TCGv_ptr fpst
= get_fpstatus_ptr(1);
6501 TCGv_i32 tcg_rmode
, tcg_shift
;
6502 int rmode
= fp_decode_rm
[extract32(insn
, 8, 2)];
6504 tcg_shift
= tcg_const_i32(0);
6505 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6506 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6510 gen_helper_vfp_tosls(tmp
, tmp
,
6513 gen_helper_vfp_touls(tmp
, tmp
,
6517 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6519 tcg_temp_free_i32(tcg_rmode
);
6520 tcg_temp_free_i32(tcg_shift
);
6521 tcg_temp_free_ptr(fpst
);
6524 case NEON_2RM_VRECPE
:
6526 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6527 gen_helper_recpe_u32(tmp
, tmp
, fpstatus
);
6528 tcg_temp_free_ptr(fpstatus
);
6531 case NEON_2RM_VRSQRTE
:
6533 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6534 gen_helper_rsqrte_u32(tmp
, tmp
, fpstatus
);
6535 tcg_temp_free_ptr(fpstatus
);
6538 case NEON_2RM_VRECPE_F
:
6540 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6541 gen_helper_recpe_f32(tmp
, tmp
, fpstatus
);
6542 tcg_temp_free_ptr(fpstatus
);
6545 case NEON_2RM_VRSQRTE_F
:
6547 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6548 gen_helper_rsqrte_f32(tmp
, tmp
, fpstatus
);
6549 tcg_temp_free_ptr(fpstatus
);
6552 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6554 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6555 gen_helper_vfp_sitos(tmp
, tmp
, fpstatus
);
6556 tcg_temp_free_ptr(fpstatus
);
6559 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6561 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6562 gen_helper_vfp_uitos(tmp
, tmp
, fpstatus
);
6563 tcg_temp_free_ptr(fpstatus
);
6566 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6568 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6569 gen_helper_vfp_tosizs(tmp
, tmp
, fpstatus
);
6570 tcg_temp_free_ptr(fpstatus
);
6573 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6575 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6576 gen_helper_vfp_touizs(tmp
, tmp
, fpstatus
);
6577 tcg_temp_free_ptr(fpstatus
);
6581 /* Reserved op values were caught by the
6582 * neon_2rm_sizes[] check earlier.
6586 neon_store_reg(rd
, pass
, tmp
);
6590 } else if ((insn
& (1 << 10)) == 0) {
6592 int n
= ((insn
>> 8) & 3) + 1;
6593 if ((rn
+ n
) > 32) {
6594 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6595 * helper function running off the end of the register file.
6600 if (insn
& (1 << 6)) {
6601 tmp
= neon_load_reg(rd
, 0);
6603 tmp
= tcg_temp_new_i32();
6604 tcg_gen_movi_i32(tmp
, 0);
6606 tmp2
= neon_load_reg(rm
, 0);
6607 ptr1
= vfp_reg_ptr(true, rn
);
6608 tmp5
= tcg_const_i32(n
);
6609 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, ptr1
, tmp5
);
6610 tcg_temp_free_i32(tmp
);
6611 if (insn
& (1 << 6)) {
6612 tmp
= neon_load_reg(rd
, 1);
6614 tmp
= tcg_temp_new_i32();
6615 tcg_gen_movi_i32(tmp
, 0);
6617 tmp3
= neon_load_reg(rm
, 1);
6618 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, ptr1
, tmp5
);
6619 tcg_temp_free_i32(tmp5
);
6620 tcg_temp_free_ptr(ptr1
);
6621 neon_store_reg(rd
, 0, tmp2
);
6622 neon_store_reg(rd
, 1, tmp3
);
6623 tcg_temp_free_i32(tmp
);
6624 } else if ((insn
& 0x380) == 0) {
6629 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6632 if (insn
& (1 << 16)) {
6634 element
= (insn
>> 17) & 7;
6635 } else if (insn
& (1 << 17)) {
6637 element
= (insn
>> 18) & 3;
6640 element
= (insn
>> 19) & 1;
6642 tcg_gen_gvec_dup_mem(size
, neon_reg_offset(rd
, 0),
6643 neon_element_offset(rm
, element
, size
),
6644 q
? 16 : 8, q
? 16 : 8);
6653 static int disas_coproc_insn(DisasContext
*s
, uint32_t insn
)
6655 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
6656 const ARMCPRegInfo
*ri
;
6658 cpnum
= (insn
>> 8) & 0xf;
6660 /* First check for coprocessor space used for XScale/iwMMXt insns */
6661 if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && (cpnum
< 2)) {
6662 if (extract32(s
->c15_cpar
, cpnum
, 1) == 0) {
6665 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
6666 return disas_iwmmxt_insn(s
, insn
);
6667 } else if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
)) {
6668 return disas_dsp_insn(s
, insn
);
6673 /* Otherwise treat as a generic register access */
6674 is64
= (insn
& (1 << 25)) == 0;
6675 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
6683 opc1
= (insn
>> 4) & 0xf;
6685 rt2
= (insn
>> 16) & 0xf;
6687 crn
= (insn
>> 16) & 0xf;
6688 opc1
= (insn
>> 21) & 7;
6689 opc2
= (insn
>> 5) & 7;
6692 isread
= (insn
>> 20) & 1;
6693 rt
= (insn
>> 12) & 0xf;
6695 ri
= get_arm_cp_reginfo(s
->cp_regs
,
6696 ENCODE_CP_REG(cpnum
, is64
, s
->ns
, crn
, crm
, opc1
, opc2
));
6700 /* Check access permissions */
6701 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
6705 if (s
->hstr_active
|| ri
->accessfn
||
6706 (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && cpnum
< 14)) {
6707 /* Emit code to perform further access permissions checks at
6708 * runtime; this may result in an exception.
6709 * Note that on XScale all cp0..c13 registers do an access check
6710 * call in order to handle c15_cpar.
6713 TCGv_i32 tcg_syn
, tcg_isread
;
6716 /* Note that since we are an implementation which takes an
6717 * exception on a trapped conditional instruction only if the
6718 * instruction passes its condition code check, we can take
6719 * advantage of the clause in the ARM ARM that allows us to set
6720 * the COND field in the instruction to 0xE in all cases.
6721 * We could fish the actual condition out of the insn (ARM)
6722 * or the condexec bits (Thumb) but it isn't necessary.
6727 syndrome
= syn_cp14_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
6730 syndrome
= syn_cp14_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
6736 syndrome
= syn_cp15_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
6739 syndrome
= syn_cp15_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
6744 /* ARMv8 defines that only coprocessors 14 and 15 exist,
6745 * so this can only happen if this is an ARMv7 or earlier CPU,
6746 * in which case the syndrome information won't actually be
6749 assert(!arm_dc_feature(s
, ARM_FEATURE_V8
));
6750 syndrome
= syn_uncategorized();
6754 gen_set_condexec(s
);
6755 gen_set_pc_im(s
, s
->pc_curr
);
6756 tmpptr
= tcg_const_ptr(ri
);
6757 tcg_syn
= tcg_const_i32(syndrome
);
6758 tcg_isread
= tcg_const_i32(isread
);
6759 gen_helper_access_check_cp_reg(cpu_env
, tmpptr
, tcg_syn
,
6761 tcg_temp_free_ptr(tmpptr
);
6762 tcg_temp_free_i32(tcg_syn
);
6763 tcg_temp_free_i32(tcg_isread
);
6764 } else if (ri
->type
& ARM_CP_RAISES_EXC
) {
6766 * The readfn or writefn might raise an exception;
6767 * synchronize the CPU state in case it does.
6769 gen_set_condexec(s
);
6770 gen_set_pc_im(s
, s
->pc_curr
);
6773 /* Handle special cases first */
6774 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
6781 gen_set_pc_im(s
, s
->base
.pc_next
);
6782 s
->base
.is_jmp
= DISAS_WFI
;
6788 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
6797 if (ri
->type
& ARM_CP_CONST
) {
6798 tmp64
= tcg_const_i64(ri
->resetvalue
);
6799 } else if (ri
->readfn
) {
6801 tmp64
= tcg_temp_new_i64();
6802 tmpptr
= tcg_const_ptr(ri
);
6803 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
6804 tcg_temp_free_ptr(tmpptr
);
6806 tmp64
= tcg_temp_new_i64();
6807 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6809 tmp
= tcg_temp_new_i32();
6810 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
6811 store_reg(s
, rt
, tmp
);
6812 tmp
= tcg_temp_new_i32();
6813 tcg_gen_extrh_i64_i32(tmp
, tmp64
);
6814 tcg_temp_free_i64(tmp64
);
6815 store_reg(s
, rt2
, tmp
);
6818 if (ri
->type
& ARM_CP_CONST
) {
6819 tmp
= tcg_const_i32(ri
->resetvalue
);
6820 } else if (ri
->readfn
) {
6822 tmp
= tcg_temp_new_i32();
6823 tmpptr
= tcg_const_ptr(ri
);
6824 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
6825 tcg_temp_free_ptr(tmpptr
);
6827 tmp
= load_cpu_offset(ri
->fieldoffset
);
6830 /* Destination register of r15 for 32 bit loads sets
6831 * the condition codes from the high 4 bits of the value
6834 tcg_temp_free_i32(tmp
);
6836 store_reg(s
, rt
, tmp
);
6841 if (ri
->type
& ARM_CP_CONST
) {
6842 /* If not forbidden by access permissions, treat as WI */
6847 TCGv_i32 tmplo
, tmphi
;
6848 TCGv_i64 tmp64
= tcg_temp_new_i64();
6849 tmplo
= load_reg(s
, rt
);
6850 tmphi
= load_reg(s
, rt2
);
6851 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
6852 tcg_temp_free_i32(tmplo
);
6853 tcg_temp_free_i32(tmphi
);
6855 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
6856 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
6857 tcg_temp_free_ptr(tmpptr
);
6859 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
6861 tcg_temp_free_i64(tmp64
);
6866 tmp
= load_reg(s
, rt
);
6867 tmpptr
= tcg_const_ptr(ri
);
6868 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
6869 tcg_temp_free_ptr(tmpptr
);
6870 tcg_temp_free_i32(tmp
);
6872 TCGv_i32 tmp
= load_reg(s
, rt
);
6873 store_cpu_offset(tmp
, ri
->fieldoffset
);
6878 /* I/O operations must end the TB here (whether read or write) */
6879 need_exit_tb
= ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) &&
6880 (ri
->type
& ARM_CP_IO
));
6882 if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
6884 * A write to any coprocessor register that ends a TB
6885 * must rebuild the hflags for the next TB.
6887 TCGv_i32 tcg_el
= tcg_const_i32(s
->current_el
);
6888 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
6889 gen_helper_rebuild_hflags_m32(cpu_env
, tcg_el
);
6891 if (ri
->type
& ARM_CP_NEWEL
) {
6892 gen_helper_rebuild_hflags_a32_newel(cpu_env
);
6894 gen_helper_rebuild_hflags_a32(cpu_env
, tcg_el
);
6897 tcg_temp_free_i32(tcg_el
);
6899 * We default to ending the TB on a coprocessor register write,
6900 * but allow this to be suppressed by the register definition
6901 * (usually only necessary to work around guest bugs).
6903 need_exit_tb
= true;
6912 /* Unknown register; this might be a guest error or a QEMU
6913 * unimplemented feature.
6916 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
6917 "64 bit system register cp:%d opc1: %d crm:%d "
6919 isread
? "read" : "write", cpnum
, opc1
, crm
,
6920 s
->ns
? "non-secure" : "secure");
6922 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
6923 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
6925 isread
? "read" : "write", cpnum
, opc1
, crn
, crm
, opc2
,
6926 s
->ns
? "non-secure" : "secure");
6933 /* Store a 64-bit value to a register pair. Clobbers val. */
6934 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
6937 tmp
= tcg_temp_new_i32();
6938 tcg_gen_extrl_i64_i32(tmp
, val
);
6939 store_reg(s
, rlow
, tmp
);
6940 tmp
= tcg_temp_new_i32();
6941 tcg_gen_extrh_i64_i32(tmp
, val
);
6942 store_reg(s
, rhigh
, tmp
);
6945 /* load and add a 64-bit value from a register pair. */
6946 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
6952 /* Load 64-bit value rd:rn. */
6953 tmpl
= load_reg(s
, rlow
);
6954 tmph
= load_reg(s
, rhigh
);
6955 tmp
= tcg_temp_new_i64();
6956 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
6957 tcg_temp_free_i32(tmpl
);
6958 tcg_temp_free_i32(tmph
);
6959 tcg_gen_add_i64(val
, val
, tmp
);
6960 tcg_temp_free_i64(tmp
);
6963 /* Set N and Z flags from hi|lo. */
6964 static void gen_logicq_cc(TCGv_i32 lo
, TCGv_i32 hi
)
6966 tcg_gen_mov_i32(cpu_NF
, hi
);
6967 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
6970 /* Load/Store exclusive instructions are implemented by remembering
6971 the value/address loaded, and seeing if these are the same
6972 when the store is performed. This should be sufficient to implement
6973 the architecturally mandated semantics, and avoids having to monitor
6974 regular stores. The compare vs the remembered value is done during
6975 the cmpxchg operation, but we must compare the addresses manually. */
6976 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
6977 TCGv_i32 addr
, int size
)
6979 TCGv_i32 tmp
= tcg_temp_new_i32();
6980 MemOp opc
= size
| MO_ALIGN
| s
->be_data
;
6985 TCGv_i32 tmp2
= tcg_temp_new_i32();
6986 TCGv_i64 t64
= tcg_temp_new_i64();
6988 /* For AArch32, architecturally the 32-bit word at the lowest
6989 * address is always Rt and the one at addr+4 is Rt2, even if
6990 * the CPU is big-endian. That means we don't want to do a
6991 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
6992 * for an architecturally 64-bit access, but instead do a
6993 * 64-bit access using MO_BE if appropriate and then split
6995 * This only makes a difference for BE32 user-mode, where
6996 * frob64() must not flip the two halves of the 64-bit data
6997 * but this code must treat BE32 user-mode like BE32 system.
6999 TCGv taddr
= gen_aa32_addr(s
, addr
, opc
);
7001 tcg_gen_qemu_ld_i64(t64
, taddr
, get_mem_index(s
), opc
);
7002 tcg_temp_free(taddr
);
7003 tcg_gen_mov_i64(cpu_exclusive_val
, t64
);
7004 if (s
->be_data
== MO_BE
) {
7005 tcg_gen_extr_i64_i32(tmp2
, tmp
, t64
);
7007 tcg_gen_extr_i64_i32(tmp
, tmp2
, t64
);
7009 tcg_temp_free_i64(t64
);
7011 store_reg(s
, rt2
, tmp2
);
7013 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), opc
);
7014 tcg_gen_extu_i32_i64(cpu_exclusive_val
, tmp
);
7017 store_reg(s
, rt
, tmp
);
7018 tcg_gen_extu_i32_i64(cpu_exclusive_addr
, addr
);
7021 static void gen_clrex(DisasContext
*s
)
7023 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7026 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7027 TCGv_i32 addr
, int size
)
7029 TCGv_i32 t0
, t1
, t2
;
7032 TCGLabel
*done_label
;
7033 TCGLabel
*fail_label
;
7034 MemOp opc
= size
| MO_ALIGN
| s
->be_data
;
7036 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7042 fail_label
= gen_new_label();
7043 done_label
= gen_new_label();
7044 extaddr
= tcg_temp_new_i64();
7045 tcg_gen_extu_i32_i64(extaddr
, addr
);
7046 tcg_gen_brcond_i64(TCG_COND_NE
, extaddr
, cpu_exclusive_addr
, fail_label
);
7047 tcg_temp_free_i64(extaddr
);
7049 taddr
= gen_aa32_addr(s
, addr
, opc
);
7050 t0
= tcg_temp_new_i32();
7051 t1
= load_reg(s
, rt
);
7053 TCGv_i64 o64
= tcg_temp_new_i64();
7054 TCGv_i64 n64
= tcg_temp_new_i64();
7056 t2
= load_reg(s
, rt2
);
7057 /* For AArch32, architecturally the 32-bit word at the lowest
7058 * address is always Rt and the one at addr+4 is Rt2, even if
7059 * the CPU is big-endian. Since we're going to treat this as a
7060 * single 64-bit BE store, we need to put the two halves in the
7061 * opposite order for BE to LE, so that they end up in the right
7063 * We don't want gen_aa32_frob64() because that does the wrong
7064 * thing for BE32 usermode.
7066 if (s
->be_data
== MO_BE
) {
7067 tcg_gen_concat_i32_i64(n64
, t2
, t1
);
7069 tcg_gen_concat_i32_i64(n64
, t1
, t2
);
7071 tcg_temp_free_i32(t2
);
7073 tcg_gen_atomic_cmpxchg_i64(o64
, taddr
, cpu_exclusive_val
, n64
,
7074 get_mem_index(s
), opc
);
7075 tcg_temp_free_i64(n64
);
7077 tcg_gen_setcond_i64(TCG_COND_NE
, o64
, o64
, cpu_exclusive_val
);
7078 tcg_gen_extrl_i64_i32(t0
, o64
);
7080 tcg_temp_free_i64(o64
);
7082 t2
= tcg_temp_new_i32();
7083 tcg_gen_extrl_i64_i32(t2
, cpu_exclusive_val
);
7084 tcg_gen_atomic_cmpxchg_i32(t0
, taddr
, t2
, t1
, get_mem_index(s
), opc
);
7085 tcg_gen_setcond_i32(TCG_COND_NE
, t0
, t0
, t2
);
7086 tcg_temp_free_i32(t2
);
7088 tcg_temp_free_i32(t1
);
7089 tcg_temp_free(taddr
);
7090 tcg_gen_mov_i32(cpu_R
[rd
], t0
);
7091 tcg_temp_free_i32(t0
);
7092 tcg_gen_br(done_label
);
7094 gen_set_label(fail_label
);
7095 tcg_gen_movi_i32(cpu_R
[rd
], 1);
7096 gen_set_label(done_label
);
7097 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7103 * @mode: mode field from insn (which stack to store to)
7104 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7105 * @writeback: true if writeback bit set
7107 * Generate code for the SRS (Store Return State) insn.
7109 static void gen_srs(DisasContext
*s
,
7110 uint32_t mode
, uint32_t amode
, bool writeback
)
7117 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7118 * and specified mode is monitor mode
7119 * - UNDEFINED in Hyp mode
7120 * - UNPREDICTABLE in User or System mode
7121 * - UNPREDICTABLE if the specified mode is:
7122 * -- not implemented
7123 * -- not a valid mode number
7124 * -- a mode that's at a higher exception level
7125 * -- Monitor, if we are Non-secure
7126 * For the UNPREDICTABLE cases we choose to UNDEF.
7128 if (s
->current_el
== 1 && !s
->ns
&& mode
== ARM_CPU_MODE_MON
) {
7129 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
, syn_uncategorized(), 3);
7133 if (s
->current_el
== 0 || s
->current_el
== 2) {
7138 case ARM_CPU_MODE_USR
:
7139 case ARM_CPU_MODE_FIQ
:
7140 case ARM_CPU_MODE_IRQ
:
7141 case ARM_CPU_MODE_SVC
:
7142 case ARM_CPU_MODE_ABT
:
7143 case ARM_CPU_MODE_UND
:
7144 case ARM_CPU_MODE_SYS
:
7146 case ARM_CPU_MODE_HYP
:
7147 if (s
->current_el
== 1 || !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
7151 case ARM_CPU_MODE_MON
:
7152 /* No need to check specifically for "are we non-secure" because
7153 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7154 * so if this isn't EL3 then we must be non-secure.
7156 if (s
->current_el
!= 3) {
7165 unallocated_encoding(s
);
7169 addr
= tcg_temp_new_i32();
7170 tmp
= tcg_const_i32(mode
);
7171 /* get_r13_banked() will raise an exception if called from System mode */
7172 gen_set_condexec(s
);
7173 gen_set_pc_im(s
, s
->pc_curr
);
7174 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7175 tcg_temp_free_i32(tmp
);
7192 tcg_gen_addi_i32(addr
, addr
, offset
);
7193 tmp
= load_reg(s
, 14);
7194 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
7195 tcg_temp_free_i32(tmp
);
7196 tmp
= load_cpu_field(spsr
);
7197 tcg_gen_addi_i32(addr
, addr
, 4);
7198 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
7199 tcg_temp_free_i32(tmp
);
7217 tcg_gen_addi_i32(addr
, addr
, offset
);
7218 tmp
= tcg_const_i32(mode
);
7219 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7220 tcg_temp_free_i32(tmp
);
7222 tcg_temp_free_i32(addr
);
7223 s
->base
.is_jmp
= DISAS_UPDATE
;
7226 /* Generate a label used for skipping this instruction */
7227 static void arm_gen_condlabel(DisasContext
*s
)
7230 s
->condlabel
= gen_new_label();
7235 /* Skip this instruction if the ARM condition is false */
7236 static void arm_skip_unless(DisasContext
*s
, uint32_t cond
)
7238 arm_gen_condlabel(s
);
7239 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
7244 * Constant expanders for the decoders.
7247 static int negate(DisasContext
*s
, int x
)
7252 static int plus_2(DisasContext
*s
, int x
)
7257 static int times_2(DisasContext
*s
, int x
)
7262 static int times_4(DisasContext
*s
, int x
)
7267 /* Return only the rotation part of T32ExpandImm. */
7268 static int t32_expandimm_rot(DisasContext
*s
, int x
)
7270 return x
& 0xc00 ? extract32(x
, 7, 5) : 0;
7273 /* Return the unrotated immediate from T32ExpandImm. */
7274 static int t32_expandimm_imm(DisasContext
*s
, int x
)
7276 int imm
= extract32(x
, 0, 8);
7278 switch (extract32(x
, 8, 4)) {
7280 /* Nothing to do. */
7282 case 1: /* 00XY00XY */
7285 case 2: /* XY00XY00 */
7288 case 3: /* XYXYXYXY */
7292 /* Rotated constant. */
7299 static int t32_branch24(DisasContext
*s
, int x
)
7301 /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */
7302 x
^= !(x
< 0) * (3 << 21);
7303 /* Append the final zero. */
7307 static int t16_setflags(DisasContext
*s
)
7309 return s
->condexec_mask
== 0;
7312 static int t16_push_list(DisasContext
*s
, int x
)
7314 return (x
& 0xff) | (x
& 0x100) << (14 - 8);
7317 static int t16_pop_list(DisasContext
*s
, int x
)
7319 return (x
& 0xff) | (x
& 0x100) << (15 - 8);
7323 * Include the generated decoders.
7326 #include "decode-a32.inc.c"
7327 #include "decode-a32-uncond.inc.c"
7328 #include "decode-t32.inc.c"
7329 #include "decode-t16.inc.c"
7331 /* Helpers to swap operands for reverse-subtract. */
7332 static void gen_rsb(TCGv_i32 dst
, TCGv_i32 a
, TCGv_i32 b
)
7334 tcg_gen_sub_i32(dst
, b
, a
);
7337 static void gen_rsb_CC(TCGv_i32 dst
, TCGv_i32 a
, TCGv_i32 b
)
7339 gen_sub_CC(dst
, b
, a
);
7342 static void gen_rsc(TCGv_i32 dest
, TCGv_i32 a
, TCGv_i32 b
)
7344 gen_sub_carry(dest
, b
, a
);
7347 static void gen_rsc_CC(TCGv_i32 dest
, TCGv_i32 a
, TCGv_i32 b
)
7349 gen_sbc_CC(dest
, b
, a
);
7353 * Helpers for the data processing routines.
7355 * After the computation store the results back.
7356 * This may be suppressed altogether (STREG_NONE), require a runtime
7357 * check against the stack limits (STREG_SP_CHECK), or generate an
7358 * exception return. Oh, or store into a register.
7360 * Always return true, indicating success for a trans_* function.
7369 static bool store_reg_kind(DisasContext
*s
, int rd
,
7370 TCGv_i32 val
, StoreRegKind kind
)
7374 tcg_temp_free_i32(val
);
7377 /* See ALUWritePC: Interworking only from a32 mode. */
7379 store_reg(s
, rd
, val
);
7381 store_reg_bx(s
, rd
, val
);
7384 case STREG_SP_CHECK
:
7385 store_sp_checked(s
, val
);
7388 gen_exception_return(s
, val
);
7391 g_assert_not_reached();
7395 * Data Processing (register)
7397 * Operate, with set flags, one register source,
7398 * one immediate shifted register source, and a destination.
7400 static bool op_s_rrr_shi(DisasContext
*s
, arg_s_rrr_shi
*a
,
7401 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
),
7402 int logic_cc
, StoreRegKind kind
)
7404 TCGv_i32 tmp1
, tmp2
;
7406 tmp2
= load_reg(s
, a
->rm
);
7407 gen_arm_shift_im(tmp2
, a
->shty
, a
->shim
, logic_cc
);
7408 tmp1
= load_reg(s
, a
->rn
);
7410 gen(tmp1
, tmp1
, tmp2
);
7411 tcg_temp_free_i32(tmp2
);
7416 return store_reg_kind(s
, a
->rd
, tmp1
, kind
);
7419 static bool op_s_rxr_shi(DisasContext
*s
, arg_s_rrr_shi
*a
,
7420 void (*gen
)(TCGv_i32
, TCGv_i32
),
7421 int logic_cc
, StoreRegKind kind
)
7425 tmp
= load_reg(s
, a
->rm
);
7426 gen_arm_shift_im(tmp
, a
->shty
, a
->shim
, logic_cc
);
7432 return store_reg_kind(s
, a
->rd
, tmp
, kind
);
7436 * Data-processing (register-shifted register)
7438 * Operate, with set flags, one register source,
7439 * one register shifted register source, and a destination.
7441 static bool op_s_rrr_shr(DisasContext
*s
, arg_s_rrr_shr
*a
,
7442 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
),
7443 int logic_cc
, StoreRegKind kind
)
7445 TCGv_i32 tmp1
, tmp2
;
7447 tmp1
= load_reg(s
, a
->rs
);
7448 tmp2
= load_reg(s
, a
->rm
);
7449 gen_arm_shift_reg(tmp2
, a
->shty
, tmp1
, logic_cc
);
7450 tmp1
= load_reg(s
, a
->rn
);
7452 gen(tmp1
, tmp1
, tmp2
);
7453 tcg_temp_free_i32(tmp2
);
7458 return store_reg_kind(s
, a
->rd
, tmp1
, kind
);
7461 static bool op_s_rxr_shr(DisasContext
*s
, arg_s_rrr_shr
*a
,
7462 void (*gen
)(TCGv_i32
, TCGv_i32
),
7463 int logic_cc
, StoreRegKind kind
)
7465 TCGv_i32 tmp1
, tmp2
;
7467 tmp1
= load_reg(s
, a
->rs
);
7468 tmp2
= load_reg(s
, a
->rm
);
7469 gen_arm_shift_reg(tmp2
, a
->shty
, tmp1
, logic_cc
);
7475 return store_reg_kind(s
, a
->rd
, tmp2
, kind
);
7479 * Data-processing (immediate)
7481 * Operate, with set flags, one register source,
7482 * one rotated immediate, and a destination.
7484 * Note that logic_cc && a->rot setting CF based on the msb of the
7485 * immediate is the reason why we must pass in the unrotated form
7488 static bool op_s_rri_rot(DisasContext
*s
, arg_s_rri_rot
*a
,
7489 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
),
7490 int logic_cc
, StoreRegKind kind
)
7492 TCGv_i32 tmp1
, tmp2
;
7495 imm
= ror32(a
->imm
, a
->rot
);
7496 if (logic_cc
&& a
->rot
) {
7497 tcg_gen_movi_i32(cpu_CF
, imm
>> 31);
7499 tmp2
= tcg_const_i32(imm
);
7500 tmp1
= load_reg(s
, a
->rn
);
7502 gen(tmp1
, tmp1
, tmp2
);
7503 tcg_temp_free_i32(tmp2
);
7508 return store_reg_kind(s
, a
->rd
, tmp1
, kind
);
7511 static bool op_s_rxi_rot(DisasContext
*s
, arg_s_rri_rot
*a
,
7512 void (*gen
)(TCGv_i32
, TCGv_i32
),
7513 int logic_cc
, StoreRegKind kind
)
7518 imm
= ror32(a
->imm
, a
->rot
);
7519 if (logic_cc
&& a
->rot
) {
7520 tcg_gen_movi_i32(cpu_CF
, imm
>> 31);
7522 tmp
= tcg_const_i32(imm
);
7528 return store_reg_kind(s
, a
->rd
, tmp
, kind
);
7531 #define DO_ANY3(NAME, OP, L, K) \
7532 static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
7533 { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
7534 static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
7535 { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
7536 static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
7537 { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
7539 #define DO_ANY2(NAME, OP, L, K) \
7540 static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
7541 { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
7542 static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
7543 { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
7544 static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
7545 { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
7547 #define DO_CMP2(NAME, OP, L) \
7548 static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
7549 { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
7550 static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
7551 { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
7552 static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
7553 { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
7555 DO_ANY3(AND
, tcg_gen_and_i32
, a
->s
, STREG_NORMAL
)
7556 DO_ANY3(EOR
, tcg_gen_xor_i32
, a
->s
, STREG_NORMAL
)
7557 DO_ANY3(ORR
, tcg_gen_or_i32
, a
->s
, STREG_NORMAL
)
7558 DO_ANY3(BIC
, tcg_gen_andc_i32
, a
->s
, STREG_NORMAL
)
7560 DO_ANY3(RSB
, a
->s
? gen_rsb_CC
: gen_rsb
, false, STREG_NORMAL
)
7561 DO_ANY3(ADC
, a
->s
? gen_adc_CC
: gen_add_carry
, false, STREG_NORMAL
)
7562 DO_ANY3(SBC
, a
->s
? gen_sbc_CC
: gen_sub_carry
, false, STREG_NORMAL
)
7563 DO_ANY3(RSC
, a
->s
? gen_rsc_CC
: gen_rsc
, false, STREG_NORMAL
)
7565 DO_CMP2(TST
, tcg_gen_and_i32
, true)
7566 DO_CMP2(TEQ
, tcg_gen_xor_i32
, true)
7567 DO_CMP2(CMN
, gen_add_CC
, false)
7568 DO_CMP2(CMP
, gen_sub_CC
, false)
7570 DO_ANY3(ADD
, a
->s
? gen_add_CC
: tcg_gen_add_i32
, false,
7571 a
->rd
== 13 && a
->rn
== 13 ? STREG_SP_CHECK
: STREG_NORMAL
)
7574 * Note for the computation of StoreRegKind we return out of the
7575 * middle of the functions that are expanded by DO_ANY3, and that
7576 * we modify a->s via that parameter before it is used by OP.
7578 DO_ANY3(SUB
, a
->s
? gen_sub_CC
: tcg_gen_sub_i32
, false,
7580 StoreRegKind ret
= STREG_NORMAL
;
7581 if (a
->rd
== 15 && a
->s
) {
7583 * See ALUExceptionReturn:
7584 * In User mode, UNPREDICTABLE; we choose UNDEF.
7585 * In Hyp mode, UNDEFINED.
7587 if (IS_USER(s
) || s
->current_el
== 2) {
7588 unallocated_encoding(s
);
7591 /* There is no writeback of nzcv to PSTATE. */
7593 ret
= STREG_EXC_RET
;
7594 } else if (a
->rd
== 13 && a
->rn
== 13) {
7595 ret
= STREG_SP_CHECK
;
7600 DO_ANY2(MOV
, tcg_gen_mov_i32
, a
->s
,
7602 StoreRegKind ret
= STREG_NORMAL
;
7603 if (a
->rd
== 15 && a
->s
) {
7605 * See ALUExceptionReturn:
7606 * In User mode, UNPREDICTABLE; we choose UNDEF.
7607 * In Hyp mode, UNDEFINED.
7609 if (IS_USER(s
) || s
->current_el
== 2) {
7610 unallocated_encoding(s
);
7613 /* There is no writeback of nzcv to PSTATE. */
7615 ret
= STREG_EXC_RET
;
7616 } else if (a
->rd
== 13) {
7617 ret
= STREG_SP_CHECK
;
7622 DO_ANY2(MVN
, tcg_gen_not_i32
, a
->s
, STREG_NORMAL
)
7625 * ORN is only available with T32, so there is no register-shifted-register
7626 * form of the insn. Using the DO_ANY3 macro would create an unused function.
7628 static bool trans_ORN_rrri(DisasContext
*s
, arg_s_rrr_shi
*a
)
7630 return op_s_rrr_shi(s
, a
, tcg_gen_orc_i32
, a
->s
, STREG_NORMAL
);
7633 static bool trans_ORN_rri(DisasContext
*s
, arg_s_rri_rot
*a
)
7635 return op_s_rri_rot(s
, a
, tcg_gen_orc_i32
, a
->s
, STREG_NORMAL
);
7642 static bool trans_ADR(DisasContext
*s
, arg_ri
*a
)
7644 store_reg_bx(s
, a
->rd
, add_reg_for_lit(s
, 15, a
->imm
));
7648 static bool trans_MOVW(DisasContext
*s
, arg_MOVW
*a
)
7652 if (!ENABLE_ARCH_6T2
) {
7656 tmp
= tcg_const_i32(a
->imm
);
7657 store_reg(s
, a
->rd
, tmp
);
7661 static bool trans_MOVT(DisasContext
*s
, arg_MOVW
*a
)
7665 if (!ENABLE_ARCH_6T2
) {
7669 tmp
= load_reg(s
, a
->rd
);
7670 tcg_gen_ext16u_i32(tmp
, tmp
);
7671 tcg_gen_ori_i32(tmp
, tmp
, a
->imm
<< 16);
7672 store_reg(s
, a
->rd
, tmp
);
7677 * Multiply and multiply accumulate
7680 static bool op_mla(DisasContext
*s
, arg_s_rrrr
*a
, bool add
)
7684 t1
= load_reg(s
, a
->rn
);
7685 t2
= load_reg(s
, a
->rm
);
7686 tcg_gen_mul_i32(t1
, t1
, t2
);
7687 tcg_temp_free_i32(t2
);
7689 t2
= load_reg(s
, a
->ra
);
7690 tcg_gen_add_i32(t1
, t1
, t2
);
7691 tcg_temp_free_i32(t2
);
7696 store_reg(s
, a
->rd
, t1
);
7700 static bool trans_MUL(DisasContext
*s
, arg_MUL
*a
)
7702 return op_mla(s
, a
, false);
7705 static bool trans_MLA(DisasContext
*s
, arg_MLA
*a
)
7707 return op_mla(s
, a
, true);
7710 static bool trans_MLS(DisasContext
*s
, arg_MLS
*a
)
7714 if (!ENABLE_ARCH_6T2
) {
7717 t1
= load_reg(s
, a
->rn
);
7718 t2
= load_reg(s
, a
->rm
);
7719 tcg_gen_mul_i32(t1
, t1
, t2
);
7720 tcg_temp_free_i32(t2
);
7721 t2
= load_reg(s
, a
->ra
);
7722 tcg_gen_sub_i32(t1
, t2
, t1
);
7723 tcg_temp_free_i32(t2
);
7724 store_reg(s
, a
->rd
, t1
);
7728 static bool op_mlal(DisasContext
*s
, arg_s_rrrr
*a
, bool uns
, bool add
)
7730 TCGv_i32 t0
, t1
, t2
, t3
;
7732 t0
= load_reg(s
, a
->rm
);
7733 t1
= load_reg(s
, a
->rn
);
7735 tcg_gen_mulu2_i32(t0
, t1
, t0
, t1
);
7737 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
7740 t2
= load_reg(s
, a
->ra
);
7741 t3
= load_reg(s
, a
->rd
);
7742 tcg_gen_add2_i32(t0
, t1
, t0
, t1
, t2
, t3
);
7743 tcg_temp_free_i32(t2
);
7744 tcg_temp_free_i32(t3
);
7747 gen_logicq_cc(t0
, t1
);
7749 store_reg(s
, a
->ra
, t0
);
7750 store_reg(s
, a
->rd
, t1
);
7754 static bool trans_UMULL(DisasContext
*s
, arg_UMULL
*a
)
7756 return op_mlal(s
, a
, true, false);
7759 static bool trans_SMULL(DisasContext
*s
, arg_SMULL
*a
)
7761 return op_mlal(s
, a
, false, false);
7764 static bool trans_UMLAL(DisasContext
*s
, arg_UMLAL
*a
)
7766 return op_mlal(s
, a
, true, true);
7769 static bool trans_SMLAL(DisasContext
*s
, arg_SMLAL
*a
)
7771 return op_mlal(s
, a
, false, true);
7774 static bool trans_UMAAL(DisasContext
*s
, arg_UMAAL
*a
)
7776 TCGv_i32 t0
, t1
, t2
, zero
;
7779 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
7784 t0
= load_reg(s
, a
->rm
);
7785 t1
= load_reg(s
, a
->rn
);
7786 tcg_gen_mulu2_i32(t0
, t1
, t0
, t1
);
7787 zero
= tcg_const_i32(0);
7788 t2
= load_reg(s
, a
->ra
);
7789 tcg_gen_add2_i32(t0
, t1
, t0
, t1
, t2
, zero
);
7790 tcg_temp_free_i32(t2
);
7791 t2
= load_reg(s
, a
->rd
);
7792 tcg_gen_add2_i32(t0
, t1
, t0
, t1
, t2
, zero
);
7793 tcg_temp_free_i32(t2
);
7794 tcg_temp_free_i32(zero
);
7795 store_reg(s
, a
->ra
, t0
);
7796 store_reg(s
, a
->rd
, t1
);
7801 * Saturating addition and subtraction
7804 static bool op_qaddsub(DisasContext
*s
, arg_rrr
*a
, bool add
, bool doub
)
7809 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
7810 : !ENABLE_ARCH_5TE
) {
7814 t0
= load_reg(s
, a
->rm
);
7815 t1
= load_reg(s
, a
->rn
);
7817 gen_helper_add_saturate(t1
, cpu_env
, t1
, t1
);
7820 gen_helper_add_saturate(t0
, cpu_env
, t0
, t1
);
7822 gen_helper_sub_saturate(t0
, cpu_env
, t0
, t1
);
7824 tcg_temp_free_i32(t1
);
7825 store_reg(s
, a
->rd
, t0
);
7829 #define DO_QADDSUB(NAME, ADD, DOUB) \
7830 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
7832 return op_qaddsub(s, a, ADD, DOUB); \
7835 DO_QADDSUB(QADD
, true, false)
7836 DO_QADDSUB(QSUB
, false, false)
7837 DO_QADDSUB(QDADD
, true, true)
7838 DO_QADDSUB(QDSUB
, false, true)
7843 * Halfword multiply and multiply accumulate
7846 static bool op_smlaxxx(DisasContext
*s
, arg_rrrr
*a
,
7847 int add_long
, bool nt
, bool mt
)
7849 TCGv_i32 t0
, t1
, tl
, th
;
7852 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
7853 : !ENABLE_ARCH_5TE
) {
7857 t0
= load_reg(s
, a
->rn
);
7858 t1
= load_reg(s
, a
->rm
);
7859 gen_mulxy(t0
, t1
, nt
, mt
);
7860 tcg_temp_free_i32(t1
);
7864 store_reg(s
, a
->rd
, t0
);
7867 t1
= load_reg(s
, a
->ra
);
7868 gen_helper_add_setq(t0
, cpu_env
, t0
, t1
);
7869 tcg_temp_free_i32(t1
);
7870 store_reg(s
, a
->rd
, t0
);
7873 tl
= load_reg(s
, a
->ra
);
7874 th
= load_reg(s
, a
->rd
);
7875 /* Sign-extend the 32-bit product to 64 bits. */
7876 t1
= tcg_temp_new_i32();
7877 tcg_gen_sari_i32(t1
, t0
, 31);
7878 tcg_gen_add2_i32(tl
, th
, tl
, th
, t0
, t1
);
7879 tcg_temp_free_i32(t0
);
7880 tcg_temp_free_i32(t1
);
7881 store_reg(s
, a
->ra
, tl
);
7882 store_reg(s
, a
->rd
, th
);
7885 g_assert_not_reached();
7890 #define DO_SMLAX(NAME, add, nt, mt) \
7891 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
7893 return op_smlaxxx(s, a, add, nt, mt); \
7896 DO_SMLAX(SMULBB
, 0, 0, 0)
7897 DO_SMLAX(SMULBT
, 0, 0, 1)
7898 DO_SMLAX(SMULTB
, 0, 1, 0)
7899 DO_SMLAX(SMULTT
, 0, 1, 1)
7901 DO_SMLAX(SMLABB
, 1, 0, 0)
7902 DO_SMLAX(SMLABT
, 1, 0, 1)
7903 DO_SMLAX(SMLATB
, 1, 1, 0)
7904 DO_SMLAX(SMLATT
, 1, 1, 1)
7906 DO_SMLAX(SMLALBB
, 2, 0, 0)
7907 DO_SMLAX(SMLALBT
, 2, 0, 1)
7908 DO_SMLAX(SMLALTB
, 2, 1, 0)
7909 DO_SMLAX(SMLALTT
, 2, 1, 1)
7913 static bool op_smlawx(DisasContext
*s
, arg_rrrr
*a
, bool add
, bool mt
)
7917 if (!ENABLE_ARCH_5TE
) {
7921 t0
= load_reg(s
, a
->rn
);
7922 t1
= load_reg(s
, a
->rm
);
7924 * Since the nominal result is product<47:16>, shift the 16-bit
7925 * input up by 16 bits, so that the result is at product<63:32>.
7928 tcg_gen_andi_i32(t1
, t1
, 0xffff0000);
7930 tcg_gen_shli_i32(t1
, t1
, 16);
7932 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
7933 tcg_temp_free_i32(t0
);
7935 t0
= load_reg(s
, a
->ra
);
7936 gen_helper_add_setq(t1
, cpu_env
, t1
, t0
);
7937 tcg_temp_free_i32(t0
);
7939 store_reg(s
, a
->rd
, t1
);
7943 #define DO_SMLAWX(NAME, add, mt) \
7944 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
7946 return op_smlawx(s, a, add, mt); \
7949 DO_SMLAWX(SMULWB
, 0, 0)
7950 DO_SMLAWX(SMULWT
, 0, 1)
7951 DO_SMLAWX(SMLAWB
, 1, 0)
7952 DO_SMLAWX(SMLAWT
, 1, 1)
7957 * MSR (immediate) and hints
7960 static bool trans_YIELD(DisasContext
*s
, arg_YIELD
*a
)
7963 * When running single-threaded TCG code, use the helper to ensure that
7964 * the next round-robin scheduled vCPU gets a crack. When running in
7965 * MTTCG we don't generate jumps to the helper as it won't affect the
7966 * scheduling of other vCPUs.
7968 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
7969 gen_set_pc_im(s
, s
->base
.pc_next
);
7970 s
->base
.is_jmp
= DISAS_YIELD
;
7975 static bool trans_WFE(DisasContext
*s
, arg_WFE
*a
)
7978 * When running single-threaded TCG code, use the helper to ensure that
7979 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
7980 * just skip this instruction. Currently the SEV/SEVL instructions,
7981 * which are *one* of many ways to wake the CPU from WFE, are not
7982 * implemented so we can't sleep like WFI does.
7984 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
7985 gen_set_pc_im(s
, s
->base
.pc_next
);
7986 s
->base
.is_jmp
= DISAS_WFE
;
7991 static bool trans_WFI(DisasContext
*s
, arg_WFI
*a
)
7993 /* For WFI, halt the vCPU until an IRQ. */
7994 gen_set_pc_im(s
, s
->base
.pc_next
);
7995 s
->base
.is_jmp
= DISAS_WFI
;
7999 static bool trans_NOP(DisasContext
*s
, arg_NOP
*a
)
8004 static bool trans_MSR_imm(DisasContext
*s
, arg_MSR_imm
*a
)
8006 uint32_t val
= ror32(a
->imm
, a
->rot
* 2);
8007 uint32_t mask
= msr_mask(s
, a
->mask
, a
->r
);
8009 if (gen_set_psr_im(s
, mask
, a
->r
, val
)) {
8010 unallocated_encoding(s
);
8016 * Cyclic Redundancy Check
8019 static bool op_crc32(DisasContext
*s
, arg_rrr
*a
, bool c
, MemOp sz
)
8021 TCGv_i32 t1
, t2
, t3
;
8023 if (!dc_isar_feature(aa32_crc32
, s
)) {
8027 t1
= load_reg(s
, a
->rn
);
8028 t2
= load_reg(s
, a
->rm
);
8039 g_assert_not_reached();
8041 t3
= tcg_const_i32(1 << sz
);
8043 gen_helper_crc32c(t1
, t1
, t2
, t3
);
8045 gen_helper_crc32(t1
, t1
, t2
, t3
);
8047 tcg_temp_free_i32(t2
);
8048 tcg_temp_free_i32(t3
);
8049 store_reg(s
, a
->rd
, t1
);
8053 #define DO_CRC32(NAME, c, sz) \
8054 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
8055 { return op_crc32(s, a, c, sz); }
8057 DO_CRC32(CRC32B
, false, MO_8
)
8058 DO_CRC32(CRC32H
, false, MO_16
)
8059 DO_CRC32(CRC32W
, false, MO_32
)
8060 DO_CRC32(CRC32CB
, true, MO_8
)
8061 DO_CRC32(CRC32CH
, true, MO_16
)
8062 DO_CRC32(CRC32CW
, true, MO_32
)
8067 * Miscellaneous instructions
8070 static bool trans_MRS_bank(DisasContext
*s
, arg_MRS_bank
*a
)
8072 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
8075 gen_mrs_banked(s
, a
->r
, a
->sysm
, a
->rd
);
8079 static bool trans_MSR_bank(DisasContext
*s
, arg_MSR_bank
*a
)
8081 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
8084 gen_msr_banked(s
, a
->r
, a
->sysm
, a
->rn
);
8088 static bool trans_MRS_reg(DisasContext
*s
, arg_MRS_reg
*a
)
8092 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
8097 unallocated_encoding(s
);
8100 tmp
= load_cpu_field(spsr
);
8102 tmp
= tcg_temp_new_i32();
8103 gen_helper_cpsr_read(tmp
, cpu_env
);
8105 store_reg(s
, a
->rd
, tmp
);
8109 static bool trans_MSR_reg(DisasContext
*s
, arg_MSR_reg
*a
)
8112 uint32_t mask
= msr_mask(s
, a
->mask
, a
->r
);
8114 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
8117 tmp
= load_reg(s
, a
->rn
);
8118 if (gen_set_psr(s
, mask
, a
->r
, tmp
)) {
8119 unallocated_encoding(s
);
8124 static bool trans_MRS_v7m(DisasContext
*s
, arg_MRS_v7m
*a
)
8128 if (!arm_dc_feature(s
, ARM_FEATURE_M
)) {
8131 tmp
= tcg_const_i32(a
->sysm
);
8132 gen_helper_v7m_mrs(tmp
, cpu_env
, tmp
);
8133 store_reg(s
, a
->rd
, tmp
);
8137 static bool trans_MSR_v7m(DisasContext
*s
, arg_MSR_v7m
*a
)
8141 if (!arm_dc_feature(s
, ARM_FEATURE_M
)) {
8144 addr
= tcg_const_i32((a
->mask
<< 10) | a
->sysm
);
8145 reg
= load_reg(s
, a
->rn
);
8146 gen_helper_v7m_msr(cpu_env
, addr
, reg
);
8147 tcg_temp_free_i32(addr
);
8148 tcg_temp_free_i32(reg
);
8149 /* If we wrote to CONTROL, the EL might have changed */
8150 gen_helper_rebuild_hflags_m32_newel(cpu_env
);
8155 static bool trans_BX(DisasContext
*s
, arg_BX
*a
)
8157 if (!ENABLE_ARCH_4T
) {
8160 gen_bx_excret(s
, load_reg(s
, a
->rm
));
8164 static bool trans_BXJ(DisasContext
*s
, arg_BXJ
*a
)
8166 if (!ENABLE_ARCH_5J
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
8169 /* Trivial implementation equivalent to bx. */
8170 gen_bx(s
, load_reg(s
, a
->rm
));
8174 static bool trans_BLX_r(DisasContext
*s
, arg_BLX_r
*a
)
8178 if (!ENABLE_ARCH_5
) {
8181 tmp
= load_reg(s
, a
->rm
);
8182 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| s
->thumb
);
8188 * BXNS/BLXNS: only exist for v8M with the security extensions,
8189 * and always UNDEF if NonSecure. We don't implement these in
8190 * the user-only mode either (in theory you can use them from
8191 * Secure User mode but they are too tied in to system emulation).
8193 static bool trans_BXNS(DisasContext
*s
, arg_BXNS
*a
)
8195 if (!s
->v8m_secure
|| IS_USER_ONLY
) {
8196 unallocated_encoding(s
);
8203 static bool trans_BLXNS(DisasContext
*s
, arg_BLXNS
*a
)
8205 if (!s
->v8m_secure
|| IS_USER_ONLY
) {
8206 unallocated_encoding(s
);
8208 gen_blxns(s
, a
->rm
);
8213 static bool trans_CLZ(DisasContext
*s
, arg_CLZ
*a
)
8217 if (!ENABLE_ARCH_5
) {
8220 tmp
= load_reg(s
, a
->rm
);
8221 tcg_gen_clzi_i32(tmp
, tmp
, 32);
8222 store_reg(s
, a
->rd
, tmp
);
8226 static bool trans_ERET(DisasContext
*s
, arg_ERET
*a
)
8230 if (!arm_dc_feature(s
, ARM_FEATURE_V7VE
)) {
8234 unallocated_encoding(s
);
8237 if (s
->current_el
== 2) {
8238 /* ERET from Hyp uses ELR_Hyp, not LR */
8239 tmp
= load_cpu_field(elr_el
[2]);
8241 tmp
= load_reg(s
, 14);
8243 gen_exception_return(s
, tmp
);
8247 static bool trans_HLT(DisasContext
*s
, arg_HLT
*a
)
8253 static bool trans_BKPT(DisasContext
*s
, arg_BKPT
*a
)
8255 if (!ENABLE_ARCH_5
) {
8258 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
8259 semihosting_enabled() &&
8260 #ifndef CONFIG_USER_ONLY
8264 gen_exception_internal_insn(s
, s
->pc_curr
, EXCP_SEMIHOST
);
8266 gen_exception_bkpt_insn(s
, syn_aa32_bkpt(a
->imm
, false));
8271 static bool trans_HVC(DisasContext
*s
, arg_HVC
*a
)
8273 if (!ENABLE_ARCH_7
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
8277 unallocated_encoding(s
);
8284 static bool trans_SMC(DisasContext
*s
, arg_SMC
*a
)
8286 if (!ENABLE_ARCH_6K
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
8290 unallocated_encoding(s
);
8297 static bool trans_SG(DisasContext
*s
, arg_SG
*a
)
8299 if (!arm_dc_feature(s
, ARM_FEATURE_M
) ||
8300 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
8305 * The bulk of the behaviour for this instruction is implemented
8306 * in v7m_handle_execute_nsc(), which deals with the insn when
8307 * it is executed by a CPU in non-secure state from memory
8308 * which is Secure & NonSecure-Callable.
8309 * Here we only need to handle the remaining cases:
8310 * * in NS memory (including the "security extension not
8311 * implemented" case) : NOP
8312 * * in S memory but CPU already secure (clear IT bits)
8313 * We know that the attribute for the memory this insn is
8314 * in must match the current CPU state, because otherwise
8315 * get_phys_addr_pmsav8 would have generated an exception.
8317 if (s
->v8m_secure
) {
8318 /* Like the IT insn, we don't need to generate any code */
8319 s
->condexec_cond
= 0;
8320 s
->condexec_mask
= 0;
8325 static bool trans_TT(DisasContext
*s
, arg_TT
*a
)
8329 if (!arm_dc_feature(s
, ARM_FEATURE_M
) ||
8330 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
8333 if (a
->rd
== 13 || a
->rd
== 15 || a
->rn
== 15) {
8334 /* We UNDEF for these UNPREDICTABLE cases */
8335 unallocated_encoding(s
);
8338 if (a
->A
&& !s
->v8m_secure
) {
8339 /* This case is UNDEFINED. */
8340 unallocated_encoding(s
);
8344 addr
= load_reg(s
, a
->rn
);
8345 tmp
= tcg_const_i32((a
->A
<< 1) | a
->T
);
8346 gen_helper_v7m_tt(tmp
, cpu_env
, addr
, tmp
);
8347 tcg_temp_free_i32(addr
);
8348 store_reg(s
, a
->rd
, tmp
);
8353 * Load/store register index
8356 static ISSInfo
make_issinfo(DisasContext
*s
, int rd
, bool p
, bool w
)
8360 /* ISS not valid if writeback */
8363 if (s
->base
.pc_next
- s
->pc_curr
== 2) {
8372 static TCGv_i32
op_addr_rr_pre(DisasContext
*s
, arg_ldst_rr
*a
)
8374 TCGv_i32 addr
= load_reg(s
, a
->rn
);
8376 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
8377 gen_helper_v8m_stackcheck(cpu_env
, addr
);
8381 TCGv_i32 ofs
= load_reg(s
, a
->rm
);
8382 gen_arm_shift_im(ofs
, a
->shtype
, a
->shimm
, 0);
8384 tcg_gen_add_i32(addr
, addr
, ofs
);
8386 tcg_gen_sub_i32(addr
, addr
, ofs
);
8388 tcg_temp_free_i32(ofs
);
8393 static void op_addr_rr_post(DisasContext
*s
, arg_ldst_rr
*a
,
8394 TCGv_i32 addr
, int address_offset
)
8397 TCGv_i32 ofs
= load_reg(s
, a
->rm
);
8398 gen_arm_shift_im(ofs
, a
->shtype
, a
->shimm
, 0);
8400 tcg_gen_add_i32(addr
, addr
, ofs
);
8402 tcg_gen_sub_i32(addr
, addr
, ofs
);
8404 tcg_temp_free_i32(ofs
);
8406 tcg_temp_free_i32(addr
);
8409 tcg_gen_addi_i32(addr
, addr
, address_offset
);
8410 store_reg(s
, a
->rn
, addr
);
8413 static bool op_load_rr(DisasContext
*s
, arg_ldst_rr
*a
,
8414 MemOp mop
, int mem_idx
)
8416 ISSInfo issinfo
= make_issinfo(s
, a
->rt
, a
->p
, a
->w
);
8419 addr
= op_addr_rr_pre(s
, a
);
8421 tmp
= tcg_temp_new_i32();
8422 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, mop
| s
->be_data
);
8423 disas_set_da_iss(s
, mop
, issinfo
);
8426 * Perform base writeback before the loaded value to
8427 * ensure correct behavior with overlapping index registers.
8429 op_addr_rr_post(s
, a
, addr
, 0);
8430 store_reg_from_load(s
, a
->rt
, tmp
);
8434 static bool op_store_rr(DisasContext
*s
, arg_ldst_rr
*a
,
8435 MemOp mop
, int mem_idx
)
8437 ISSInfo issinfo
= make_issinfo(s
, a
->rt
, a
->p
, a
->w
) | ISSIsWrite
;
8440 addr
= op_addr_rr_pre(s
, a
);
8442 tmp
= load_reg(s
, a
->rt
);
8443 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, mop
| s
->be_data
);
8444 disas_set_da_iss(s
, mop
, issinfo
);
8445 tcg_temp_free_i32(tmp
);
8447 op_addr_rr_post(s
, a
, addr
, 0);
8451 static bool trans_LDRD_rr(DisasContext
*s
, arg_ldst_rr
*a
)
8453 int mem_idx
= get_mem_index(s
);
8456 if (!ENABLE_ARCH_5TE
) {
8460 unallocated_encoding(s
);
8463 addr
= op_addr_rr_pre(s
, a
);
8465 tmp
= tcg_temp_new_i32();
8466 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
8467 store_reg(s
, a
->rt
, tmp
);
8469 tcg_gen_addi_i32(addr
, addr
, 4);
8471 tmp
= tcg_temp_new_i32();
8472 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
8473 store_reg(s
, a
->rt
+ 1, tmp
);
8475 /* LDRD w/ base writeback is undefined if the registers overlap. */
8476 op_addr_rr_post(s
, a
, addr
, -4);
8480 static bool trans_STRD_rr(DisasContext
*s
, arg_ldst_rr
*a
)
8482 int mem_idx
= get_mem_index(s
);
8485 if (!ENABLE_ARCH_5TE
) {
8489 unallocated_encoding(s
);
8492 addr
= op_addr_rr_pre(s
, a
);
8494 tmp
= load_reg(s
, a
->rt
);
8495 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
8496 tcg_temp_free_i32(tmp
);
8498 tcg_gen_addi_i32(addr
, addr
, 4);
8500 tmp
= load_reg(s
, a
->rt
+ 1);
8501 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
8502 tcg_temp_free_i32(tmp
);
8504 op_addr_rr_post(s
, a
, addr
, -4);
8509 * Load/store immediate index
8512 static TCGv_i32
op_addr_ri_pre(DisasContext
*s
, arg_ldst_ri
*a
)
8520 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
8522 * Stackcheck. Here we know 'addr' is the current SP;
8523 * U is set if we're moving SP up, else down. It is
8524 * UNKNOWN whether the limit check triggers when SP starts
8525 * below the limit and ends up above it; we chose to do so.
8528 TCGv_i32 newsp
= tcg_temp_new_i32();
8529 tcg_gen_addi_i32(newsp
, cpu_R
[13], ofs
);
8530 gen_helper_v8m_stackcheck(cpu_env
, newsp
);
8531 tcg_temp_free_i32(newsp
);
8533 gen_helper_v8m_stackcheck(cpu_env
, cpu_R
[13]);
8537 return add_reg_for_lit(s
, a
->rn
, a
->p
? ofs
: 0);
8540 static void op_addr_ri_post(DisasContext
*s
, arg_ldst_ri
*a
,
8541 TCGv_i32 addr
, int address_offset
)
8545 address_offset
+= a
->imm
;
8547 address_offset
-= a
->imm
;
8550 tcg_temp_free_i32(addr
);
8553 tcg_gen_addi_i32(addr
, addr
, address_offset
);
8554 store_reg(s
, a
->rn
, addr
);
8557 static bool op_load_ri(DisasContext
*s
, arg_ldst_ri
*a
,
8558 MemOp mop
, int mem_idx
)
8560 ISSInfo issinfo
= make_issinfo(s
, a
->rt
, a
->p
, a
->w
);
8563 addr
= op_addr_ri_pre(s
, a
);
8565 tmp
= tcg_temp_new_i32();
8566 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, mop
| s
->be_data
);
8567 disas_set_da_iss(s
, mop
, issinfo
);
8570 * Perform base writeback before the loaded value to
8571 * ensure correct behavior with overlapping index registers.
8573 op_addr_ri_post(s
, a
, addr
, 0);
8574 store_reg_from_load(s
, a
->rt
, tmp
);
8578 static bool op_store_ri(DisasContext
*s
, arg_ldst_ri
*a
,
8579 MemOp mop
, int mem_idx
)
8581 ISSInfo issinfo
= make_issinfo(s
, a
->rt
, a
->p
, a
->w
) | ISSIsWrite
;
8584 addr
= op_addr_ri_pre(s
, a
);
8586 tmp
= load_reg(s
, a
->rt
);
8587 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, mop
| s
->be_data
);
8588 disas_set_da_iss(s
, mop
, issinfo
);
8589 tcg_temp_free_i32(tmp
);
8591 op_addr_ri_post(s
, a
, addr
, 0);
8595 static bool op_ldrd_ri(DisasContext
*s
, arg_ldst_ri
*a
, int rt2
)
8597 int mem_idx
= get_mem_index(s
);
8600 addr
= op_addr_ri_pre(s
, a
);
8602 tmp
= tcg_temp_new_i32();
8603 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
8604 store_reg(s
, a
->rt
, tmp
);
8606 tcg_gen_addi_i32(addr
, addr
, 4);
8608 tmp
= tcg_temp_new_i32();
8609 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
8610 store_reg(s
, rt2
, tmp
);
8612 /* LDRD w/ base writeback is undefined if the registers overlap. */
8613 op_addr_ri_post(s
, a
, addr
, -4);
8617 static bool trans_LDRD_ri_a32(DisasContext
*s
, arg_ldst_ri
*a
)
8619 if (!ENABLE_ARCH_5TE
|| (a
->rt
& 1)) {
8622 return op_ldrd_ri(s
, a
, a
->rt
+ 1);
8625 static bool trans_LDRD_ri_t32(DisasContext
*s
, arg_ldst_ri2
*a
)
8628 .u
= a
->u
, .w
= a
->w
, .p
= a
->p
,
8629 .rn
= a
->rn
, .rt
= a
->rt
, .imm
= a
->imm
8631 return op_ldrd_ri(s
, &b
, a
->rt2
);
8634 static bool op_strd_ri(DisasContext
*s
, arg_ldst_ri
*a
, int rt2
)
8636 int mem_idx
= get_mem_index(s
);
8639 addr
= op_addr_ri_pre(s
, a
);
8641 tmp
= load_reg(s
, a
->rt
);
8642 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
8643 tcg_temp_free_i32(tmp
);
8645 tcg_gen_addi_i32(addr
, addr
, 4);
8647 tmp
= load_reg(s
, rt2
);
8648 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
8649 tcg_temp_free_i32(tmp
);
8651 op_addr_ri_post(s
, a
, addr
, -4);
8655 static bool trans_STRD_ri_a32(DisasContext
*s
, arg_ldst_ri
*a
)
8657 if (!ENABLE_ARCH_5TE
|| (a
->rt
& 1)) {
8660 return op_strd_ri(s
, a
, a
->rt
+ 1);
8663 static bool trans_STRD_ri_t32(DisasContext
*s
, arg_ldst_ri2
*a
)
8666 .u
= a
->u
, .w
= a
->w
, .p
= a
->p
,
8667 .rn
= a
->rn
, .rt
= a
->rt
, .imm
= a
->imm
8669 return op_strd_ri(s
, &b
, a
->rt2
);
8672 #define DO_LDST(NAME, WHICH, MEMOP) \
8673 static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \
8675 return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \
8677 static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \
8679 return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \
8681 static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \
8683 return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \
8685 static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \
8687 return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \
8690 DO_LDST(LDR
, load
, MO_UL
)
8691 DO_LDST(LDRB
, load
, MO_UB
)
8692 DO_LDST(LDRH
, load
, MO_UW
)
8693 DO_LDST(LDRSB
, load
, MO_SB
)
8694 DO_LDST(LDRSH
, load
, MO_SW
)
8696 DO_LDST(STR
, store
, MO_UL
)
8697 DO_LDST(STRB
, store
, MO_UB
)
8698 DO_LDST(STRH
, store
, MO_UW
)
8703 * Synchronization primitives
8706 static bool op_swp(DisasContext
*s
, arg_SWP
*a
, MemOp opc
)
8712 addr
= load_reg(s
, a
->rn
);
8713 taddr
= gen_aa32_addr(s
, addr
, opc
);
8714 tcg_temp_free_i32(addr
);
8716 tmp
= load_reg(s
, a
->rt2
);
8717 tcg_gen_atomic_xchg_i32(tmp
, taddr
, tmp
, get_mem_index(s
), opc
);
8718 tcg_temp_free(taddr
);
8720 store_reg(s
, a
->rt
, tmp
);
8724 static bool trans_SWP(DisasContext
*s
, arg_SWP
*a
)
8726 return op_swp(s
, a
, MO_UL
| MO_ALIGN
);
8729 static bool trans_SWPB(DisasContext
*s
, arg_SWP
*a
)
8731 return op_swp(s
, a
, MO_UB
);
8735 * Load/Store Exclusive and Load-Acquire/Store-Release
8738 static bool op_strex(DisasContext
*s
, arg_STREX
*a
, MemOp mop
, bool rel
)
8741 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
8742 bool v8a
= ENABLE_ARCH_8
&& !arm_dc_feature(s
, ARM_FEATURE_M
);
8744 /* We UNDEF for these UNPREDICTABLE cases. */
8745 if (a
->rd
== 15 || a
->rn
== 15 || a
->rt
== 15
8746 || a
->rd
== a
->rn
|| a
->rd
== a
->rt
8747 || (!v8a
&& s
->thumb
&& (a
->rd
== 13 || a
->rt
== 13))
8751 || (!v8a
&& s
->thumb
&& a
->rt2
== 13)))) {
8752 unallocated_encoding(s
);
8757 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
8760 addr
= tcg_temp_local_new_i32();
8761 load_reg_var(s
, addr
, a
->rn
);
8762 tcg_gen_addi_i32(addr
, addr
, a
->imm
);
8764 gen_store_exclusive(s
, a
->rd
, a
->rt
, a
->rt2
, addr
, mop
);
8765 tcg_temp_free_i32(addr
);
8769 static bool trans_STREX(DisasContext
*s
, arg_STREX
*a
)
8771 if (!ENABLE_ARCH_6
) {
8774 return op_strex(s
, a
, MO_32
, false);
8777 static bool trans_STREXD_a32(DisasContext
*s
, arg_STREX
*a
)
8779 if (!ENABLE_ARCH_6K
) {
8782 /* We UNDEF for these UNPREDICTABLE cases. */
8784 unallocated_encoding(s
);
8788 return op_strex(s
, a
, MO_64
, false);
8791 static bool trans_STREXD_t32(DisasContext
*s
, arg_STREX
*a
)
8793 return op_strex(s
, a
, MO_64
, false);
8796 static bool trans_STREXB(DisasContext
*s
, arg_STREX
*a
)
8798 if (s
->thumb
? !ENABLE_ARCH_7
: !ENABLE_ARCH_6K
) {
8801 return op_strex(s
, a
, MO_8
, false);
8804 static bool trans_STREXH(DisasContext
*s
, arg_STREX
*a
)
8806 if (s
->thumb
? !ENABLE_ARCH_7
: !ENABLE_ARCH_6K
) {
8809 return op_strex(s
, a
, MO_16
, false);
8812 static bool trans_STLEX(DisasContext
*s
, arg_STREX
*a
)
8814 if (!ENABLE_ARCH_8
) {
8817 return op_strex(s
, a
, MO_32
, true);
8820 static bool trans_STLEXD_a32(DisasContext
*s
, arg_STREX
*a
)
8822 if (!ENABLE_ARCH_8
) {
8825 /* We UNDEF for these UNPREDICTABLE cases. */
8827 unallocated_encoding(s
);
8831 return op_strex(s
, a
, MO_64
, true);
8834 static bool trans_STLEXD_t32(DisasContext
*s
, arg_STREX
*a
)
8836 if (!ENABLE_ARCH_8
) {
8839 return op_strex(s
, a
, MO_64
, true);
8842 static bool trans_STLEXB(DisasContext
*s
, arg_STREX
*a
)
8844 if (!ENABLE_ARCH_8
) {
8847 return op_strex(s
, a
, MO_8
, true);
8850 static bool trans_STLEXH(DisasContext
*s
, arg_STREX
*a
)
8852 if (!ENABLE_ARCH_8
) {
8855 return op_strex(s
, a
, MO_16
, true);
8858 static bool op_stl(DisasContext
*s
, arg_STL
*a
, MemOp mop
)
8862 if (!ENABLE_ARCH_8
) {
8865 /* We UNDEF for these UNPREDICTABLE cases. */
8866 if (a
->rn
== 15 || a
->rt
== 15) {
8867 unallocated_encoding(s
);
8871 addr
= load_reg(s
, a
->rn
);
8872 tmp
= load_reg(s
, a
->rt
);
8873 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
8874 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
), mop
| s
->be_data
);
8875 disas_set_da_iss(s
, mop
, a
->rt
| ISSIsAcqRel
| ISSIsWrite
);
8877 tcg_temp_free_i32(tmp
);
8878 tcg_temp_free_i32(addr
);
8882 static bool trans_STL(DisasContext
*s
, arg_STL
*a
)
8884 return op_stl(s
, a
, MO_UL
);
8887 static bool trans_STLB(DisasContext
*s
, arg_STL
*a
)
8889 return op_stl(s
, a
, MO_UB
);
8892 static bool trans_STLH(DisasContext
*s
, arg_STL
*a
)
8894 return op_stl(s
, a
, MO_UW
);
8897 static bool op_ldrex(DisasContext
*s
, arg_LDREX
*a
, MemOp mop
, bool acq
)
8900 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
8901 bool v8a
= ENABLE_ARCH_8
&& !arm_dc_feature(s
, ARM_FEATURE_M
);
8903 /* We UNDEF for these UNPREDICTABLE cases. */
8904 if (a
->rn
== 15 || a
->rt
== 15
8905 || (!v8a
&& s
->thumb
&& a
->rt
== 13)
8907 && (a
->rt2
== 15 || a
->rt
== a
->rt2
8908 || (!v8a
&& s
->thumb
&& a
->rt2
== 13)))) {
8909 unallocated_encoding(s
);
8913 addr
= tcg_temp_local_new_i32();
8914 load_reg_var(s
, addr
, a
->rn
);
8915 tcg_gen_addi_i32(addr
, addr
, a
->imm
);
8917 gen_load_exclusive(s
, a
->rt
, a
->rt2
, addr
, mop
);
8918 tcg_temp_free_i32(addr
);
8921 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
8926 static bool trans_LDREX(DisasContext
*s
, arg_LDREX
*a
)
8928 if (!ENABLE_ARCH_6
) {
8931 return op_ldrex(s
, a
, MO_32
, false);
8934 static bool trans_LDREXD_a32(DisasContext
*s
, arg_LDREX
*a
)
8936 if (!ENABLE_ARCH_6K
) {
8939 /* We UNDEF for these UNPREDICTABLE cases. */
8941 unallocated_encoding(s
);
8945 return op_ldrex(s
, a
, MO_64
, false);
8948 static bool trans_LDREXD_t32(DisasContext
*s
, arg_LDREX
*a
)
8950 return op_ldrex(s
, a
, MO_64
, false);
8953 static bool trans_LDREXB(DisasContext
*s
, arg_LDREX
*a
)
8955 if (s
->thumb
? !ENABLE_ARCH_7
: !ENABLE_ARCH_6K
) {
8958 return op_ldrex(s
, a
, MO_8
, false);
8961 static bool trans_LDREXH(DisasContext
*s
, arg_LDREX
*a
)
8963 if (s
->thumb
? !ENABLE_ARCH_7
: !ENABLE_ARCH_6K
) {
8966 return op_ldrex(s
, a
, MO_16
, false);
8969 static bool trans_LDAEX(DisasContext
*s
, arg_LDREX
*a
)
8971 if (!ENABLE_ARCH_8
) {
8974 return op_ldrex(s
, a
, MO_32
, true);
8977 static bool trans_LDAEXD_a32(DisasContext
*s
, arg_LDREX
*a
)
8979 if (!ENABLE_ARCH_8
) {
8982 /* We UNDEF for these UNPREDICTABLE cases. */
8984 unallocated_encoding(s
);
8988 return op_ldrex(s
, a
, MO_64
, true);
8991 static bool trans_LDAEXD_t32(DisasContext
*s
, arg_LDREX
*a
)
8993 if (!ENABLE_ARCH_8
) {
8996 return op_ldrex(s
, a
, MO_64
, true);
8999 static bool trans_LDAEXB(DisasContext
*s
, arg_LDREX
*a
)
9001 if (!ENABLE_ARCH_8
) {
9004 return op_ldrex(s
, a
, MO_8
, true);
9007 static bool trans_LDAEXH(DisasContext
*s
, arg_LDREX
*a
)
9009 if (!ENABLE_ARCH_8
) {
9012 return op_ldrex(s
, a
, MO_16
, true);
9015 static bool op_lda(DisasContext
*s
, arg_LDA
*a
, MemOp mop
)
9019 if (!ENABLE_ARCH_8
) {
9022 /* We UNDEF for these UNPREDICTABLE cases. */
9023 if (a
->rn
== 15 || a
->rt
== 15) {
9024 unallocated_encoding(s
);
9028 addr
= load_reg(s
, a
->rn
);
9029 tmp
= tcg_temp_new_i32();
9030 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), mop
| s
->be_data
);
9031 disas_set_da_iss(s
, mop
, a
->rt
| ISSIsAcqRel
);
9032 tcg_temp_free_i32(addr
);
9034 store_reg(s
, a
->rt
, tmp
);
9035 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
9039 static bool trans_LDA(DisasContext
*s
, arg_LDA
*a
)
9041 return op_lda(s
, a
, MO_UL
);
9044 static bool trans_LDAB(DisasContext
*s
, arg_LDA
*a
)
9046 return op_lda(s
, a
, MO_UB
);
9049 static bool trans_LDAH(DisasContext
*s
, arg_LDA
*a
)
9051 return op_lda(s
, a
, MO_UW
);
9055 * Media instructions
9058 static bool trans_USADA8(DisasContext
*s
, arg_USADA8
*a
)
9062 if (!ENABLE_ARCH_6
) {
9066 t1
= load_reg(s
, a
->rn
);
9067 t2
= load_reg(s
, a
->rm
);
9068 gen_helper_usad8(t1
, t1
, t2
);
9069 tcg_temp_free_i32(t2
);
9071 t2
= load_reg(s
, a
->ra
);
9072 tcg_gen_add_i32(t1
, t1
, t2
);
9073 tcg_temp_free_i32(t2
);
9075 store_reg(s
, a
->rd
, t1
);
9079 static bool op_bfx(DisasContext
*s
, arg_UBFX
*a
, bool u
)
9082 int width
= a
->widthm1
+ 1;
9085 if (!ENABLE_ARCH_6T2
) {
9088 if (shift
+ width
> 32) {
9089 /* UNPREDICTABLE; we choose to UNDEF */
9090 unallocated_encoding(s
);
9094 tmp
= load_reg(s
, a
->rn
);
9096 tcg_gen_extract_i32(tmp
, tmp
, shift
, width
);
9098 tcg_gen_sextract_i32(tmp
, tmp
, shift
, width
);
9100 store_reg(s
, a
->rd
, tmp
);
9104 static bool trans_SBFX(DisasContext
*s
, arg_SBFX
*a
)
9106 return op_bfx(s
, a
, false);
9109 static bool trans_UBFX(DisasContext
*s
, arg_UBFX
*a
)
9111 return op_bfx(s
, a
, true);
9114 static bool trans_BFCI(DisasContext
*s
, arg_BFCI
*a
)
9117 int msb
= a
->msb
, lsb
= a
->lsb
;
9120 if (!ENABLE_ARCH_6T2
) {
9124 /* UNPREDICTABLE; we choose to UNDEF */
9125 unallocated_encoding(s
);
9129 width
= msb
+ 1 - lsb
;
9132 tmp
= tcg_const_i32(0);
9135 tmp
= load_reg(s
, a
->rn
);
9138 TCGv_i32 tmp2
= load_reg(s
, a
->rd
);
9139 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, lsb
, width
);
9140 tcg_temp_free_i32(tmp2
);
9142 store_reg(s
, a
->rd
, tmp
);
9146 static bool trans_UDF(DisasContext
*s
, arg_UDF
*a
)
9148 unallocated_encoding(s
);
9153 * Parallel addition and subtraction
9156 static bool op_par_addsub(DisasContext
*s
, arg_rrr
*a
,
9157 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
9162 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
9167 t0
= load_reg(s
, a
->rn
);
9168 t1
= load_reg(s
, a
->rm
);
9172 tcg_temp_free_i32(t1
);
9173 store_reg(s
, a
->rd
, t0
);
9177 static bool op_par_addsub_ge(DisasContext
*s
, arg_rrr
*a
,
9178 void (*gen
)(TCGv_i32
, TCGv_i32
,
9179 TCGv_i32
, TCGv_ptr
))
9185 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
9190 t0
= load_reg(s
, a
->rn
);
9191 t1
= load_reg(s
, a
->rm
);
9193 ge
= tcg_temp_new_ptr();
9194 tcg_gen_addi_ptr(ge
, cpu_env
, offsetof(CPUARMState
, GE
));
9195 gen(t0
, t0
, t1
, ge
);
9197 tcg_temp_free_ptr(ge
);
9198 tcg_temp_free_i32(t1
);
9199 store_reg(s
, a
->rd
, t0
);
9203 #define DO_PAR_ADDSUB(NAME, helper) \
9204 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
9206 return op_par_addsub(s, a, helper); \
9209 #define DO_PAR_ADDSUB_GE(NAME, helper) \
9210 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
9212 return op_par_addsub_ge(s, a, helper); \
9215 DO_PAR_ADDSUB_GE(SADD16
, gen_helper_sadd16
)
9216 DO_PAR_ADDSUB_GE(SASX
, gen_helper_saddsubx
)
9217 DO_PAR_ADDSUB_GE(SSAX
, gen_helper_ssubaddx
)
9218 DO_PAR_ADDSUB_GE(SSUB16
, gen_helper_ssub16
)
9219 DO_PAR_ADDSUB_GE(SADD8
, gen_helper_sadd8
)
9220 DO_PAR_ADDSUB_GE(SSUB8
, gen_helper_ssub8
)
9222 DO_PAR_ADDSUB_GE(UADD16
, gen_helper_uadd16
)
9223 DO_PAR_ADDSUB_GE(UASX
, gen_helper_uaddsubx
)
9224 DO_PAR_ADDSUB_GE(USAX
, gen_helper_usubaddx
)
9225 DO_PAR_ADDSUB_GE(USUB16
, gen_helper_usub16
)
9226 DO_PAR_ADDSUB_GE(UADD8
, gen_helper_uadd8
)
9227 DO_PAR_ADDSUB_GE(USUB8
, gen_helper_usub8
)
9229 DO_PAR_ADDSUB(QADD16
, gen_helper_qadd16
)
9230 DO_PAR_ADDSUB(QASX
, gen_helper_qaddsubx
)
9231 DO_PAR_ADDSUB(QSAX
, gen_helper_qsubaddx
)
9232 DO_PAR_ADDSUB(QSUB16
, gen_helper_qsub16
)
9233 DO_PAR_ADDSUB(QADD8
, gen_helper_qadd8
)
9234 DO_PAR_ADDSUB(QSUB8
, gen_helper_qsub8
)
9236 DO_PAR_ADDSUB(UQADD16
, gen_helper_uqadd16
)
9237 DO_PAR_ADDSUB(UQASX
, gen_helper_uqaddsubx
)
9238 DO_PAR_ADDSUB(UQSAX
, gen_helper_uqsubaddx
)
9239 DO_PAR_ADDSUB(UQSUB16
, gen_helper_uqsub16
)
9240 DO_PAR_ADDSUB(UQADD8
, gen_helper_uqadd8
)
9241 DO_PAR_ADDSUB(UQSUB8
, gen_helper_uqsub8
)
9243 DO_PAR_ADDSUB(SHADD16
, gen_helper_shadd16
)
9244 DO_PAR_ADDSUB(SHASX
, gen_helper_shaddsubx
)
9245 DO_PAR_ADDSUB(SHSAX
, gen_helper_shsubaddx
)
9246 DO_PAR_ADDSUB(SHSUB16
, gen_helper_shsub16
)
9247 DO_PAR_ADDSUB(SHADD8
, gen_helper_shadd8
)
9248 DO_PAR_ADDSUB(SHSUB8
, gen_helper_shsub8
)
9250 DO_PAR_ADDSUB(UHADD16
, gen_helper_uhadd16
)
9251 DO_PAR_ADDSUB(UHASX
, gen_helper_uhaddsubx
)
9252 DO_PAR_ADDSUB(UHSAX
, gen_helper_uhsubaddx
)
9253 DO_PAR_ADDSUB(UHSUB16
, gen_helper_uhsub16
)
9254 DO_PAR_ADDSUB(UHADD8
, gen_helper_uhadd8
)
9255 DO_PAR_ADDSUB(UHSUB8
, gen_helper_uhsub8
)
9257 #undef DO_PAR_ADDSUB
9258 #undef DO_PAR_ADDSUB_GE
9261 * Packing, unpacking, saturation, and reversal
9264 static bool trans_PKH(DisasContext
*s
, arg_PKH
*a
)
9270 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
9275 tn
= load_reg(s
, a
->rn
);
9276 tm
= load_reg(s
, a
->rm
);
9282 tcg_gen_sari_i32(tm
, tm
, shift
);
9283 tcg_gen_deposit_i32(tn
, tn
, tm
, 0, 16);
9286 tcg_gen_shli_i32(tm
, tm
, shift
);
9287 tcg_gen_deposit_i32(tn
, tm
, tn
, 0, 16);
9289 tcg_temp_free_i32(tm
);
9290 store_reg(s
, a
->rd
, tn
);
9294 static bool op_sat(DisasContext
*s
, arg_sat
*a
,
9295 void (*gen
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
9297 TCGv_i32 tmp
, satimm
;
9300 if (!ENABLE_ARCH_6
) {
9304 tmp
= load_reg(s
, a
->rn
);
9306 tcg_gen_sari_i32(tmp
, tmp
, shift
? shift
: 31);
9308 tcg_gen_shli_i32(tmp
, tmp
, shift
);
9311 satimm
= tcg_const_i32(a
->satimm
);
9312 gen(tmp
, cpu_env
, tmp
, satimm
);
9313 tcg_temp_free_i32(satimm
);
9315 store_reg(s
, a
->rd
, tmp
);
9319 static bool trans_SSAT(DisasContext
*s
, arg_sat
*a
)
9321 return op_sat(s
, a
, gen_helper_ssat
);
9324 static bool trans_USAT(DisasContext
*s
, arg_sat
*a
)
9326 return op_sat(s
, a
, gen_helper_usat
);
9329 static bool trans_SSAT16(DisasContext
*s
, arg_sat
*a
)
9331 if (s
->thumb
&& !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9334 return op_sat(s
, a
, gen_helper_ssat16
);
9337 static bool trans_USAT16(DisasContext
*s
, arg_sat
*a
)
9339 if (s
->thumb
&& !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9342 return op_sat(s
, a
, gen_helper_usat16
);
9345 static bool op_xta(DisasContext
*s
, arg_rrr_rot
*a
,
9346 void (*gen_extract
)(TCGv_i32
, TCGv_i32
),
9347 void (*gen_add
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
9351 if (!ENABLE_ARCH_6
) {
9355 tmp
= load_reg(s
, a
->rm
);
9357 * TODO: In many cases we could do a shift instead of a rotate.
9358 * Combined with a simple extend, that becomes an extract.
9360 tcg_gen_rotri_i32(tmp
, tmp
, a
->rot
* 8);
9361 gen_extract(tmp
, tmp
);
9364 TCGv_i32 tmp2
= load_reg(s
, a
->rn
);
9365 gen_add(tmp
, tmp
, tmp2
);
9366 tcg_temp_free_i32(tmp2
);
9368 store_reg(s
, a
->rd
, tmp
);
9372 static bool trans_SXTAB(DisasContext
*s
, arg_rrr_rot
*a
)
9374 return op_xta(s
, a
, tcg_gen_ext8s_i32
, tcg_gen_add_i32
);
9377 static bool trans_SXTAH(DisasContext
*s
, arg_rrr_rot
*a
)
9379 return op_xta(s
, a
, tcg_gen_ext16s_i32
, tcg_gen_add_i32
);
9382 static bool trans_SXTAB16(DisasContext
*s
, arg_rrr_rot
*a
)
9384 if (s
->thumb
&& !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9387 return op_xta(s
, a
, gen_helper_sxtb16
, gen_add16
);
9390 static bool trans_UXTAB(DisasContext
*s
, arg_rrr_rot
*a
)
9392 return op_xta(s
, a
, tcg_gen_ext8u_i32
, tcg_gen_add_i32
);
9395 static bool trans_UXTAH(DisasContext
*s
, arg_rrr_rot
*a
)
9397 return op_xta(s
, a
, tcg_gen_ext16u_i32
, tcg_gen_add_i32
);
9400 static bool trans_UXTAB16(DisasContext
*s
, arg_rrr_rot
*a
)
9402 if (s
->thumb
&& !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9405 return op_xta(s
, a
, gen_helper_uxtb16
, gen_add16
);
9408 static bool trans_SEL(DisasContext
*s
, arg_rrr
*a
)
9410 TCGv_i32 t1
, t2
, t3
;
9413 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
9418 t1
= load_reg(s
, a
->rn
);
9419 t2
= load_reg(s
, a
->rm
);
9420 t3
= tcg_temp_new_i32();
9421 tcg_gen_ld_i32(t3
, cpu_env
, offsetof(CPUARMState
, GE
));
9422 gen_helper_sel_flags(t1
, t3
, t1
, t2
);
9423 tcg_temp_free_i32(t3
);
9424 tcg_temp_free_i32(t2
);
9425 store_reg(s
, a
->rd
, t1
);
9429 static bool op_rr(DisasContext
*s
, arg_rr
*a
,
9430 void (*gen
)(TCGv_i32
, TCGv_i32
))
9434 tmp
= load_reg(s
, a
->rm
);
9436 store_reg(s
, a
->rd
, tmp
);
9440 static bool trans_REV(DisasContext
*s
, arg_rr
*a
)
9442 if (!ENABLE_ARCH_6
) {
9445 return op_rr(s
, a
, tcg_gen_bswap32_i32
);
9448 static bool trans_REV16(DisasContext
*s
, arg_rr
*a
)
9450 if (!ENABLE_ARCH_6
) {
9453 return op_rr(s
, a
, gen_rev16
);
9456 static bool trans_REVSH(DisasContext
*s
, arg_rr
*a
)
9458 if (!ENABLE_ARCH_6
) {
9461 return op_rr(s
, a
, gen_revsh
);
9464 static bool trans_RBIT(DisasContext
*s
, arg_rr
*a
)
9466 if (!ENABLE_ARCH_6T2
) {
9469 return op_rr(s
, a
, gen_helper_rbit
);
9473 * Signed multiply, signed and unsigned divide
9476 static bool op_smlad(DisasContext
*s
, arg_rrrr
*a
, bool m_swap
, bool sub
)
9480 if (!ENABLE_ARCH_6
) {
9484 t1
= load_reg(s
, a
->rn
);
9485 t2
= load_reg(s
, a
->rm
);
9489 gen_smul_dual(t1
, t2
);
9492 /* This subtraction cannot overflow. */
9493 tcg_gen_sub_i32(t1
, t1
, t2
);
9496 * This addition cannot overflow 32 bits; however it may
9497 * overflow considered as a signed operation, in which case
9498 * we must set the Q flag.
9500 gen_helper_add_setq(t1
, cpu_env
, t1
, t2
);
9502 tcg_temp_free_i32(t2
);
9505 t2
= load_reg(s
, a
->ra
);
9506 gen_helper_add_setq(t1
, cpu_env
, t1
, t2
);
9507 tcg_temp_free_i32(t2
);
9509 store_reg(s
, a
->rd
, t1
);
9513 static bool trans_SMLAD(DisasContext
*s
, arg_rrrr
*a
)
9515 return op_smlad(s
, a
, false, false);
9518 static bool trans_SMLADX(DisasContext
*s
, arg_rrrr
*a
)
9520 return op_smlad(s
, a
, true, false);
9523 static bool trans_SMLSD(DisasContext
*s
, arg_rrrr
*a
)
9525 return op_smlad(s
, a
, false, true);
9528 static bool trans_SMLSDX(DisasContext
*s
, arg_rrrr
*a
)
9530 return op_smlad(s
, a
, true, true);
9533 static bool op_smlald(DisasContext
*s
, arg_rrrr
*a
, bool m_swap
, bool sub
)
9538 if (!ENABLE_ARCH_6
) {
9542 t1
= load_reg(s
, a
->rn
);
9543 t2
= load_reg(s
, a
->rm
);
9547 gen_smul_dual(t1
, t2
);
9549 l1
= tcg_temp_new_i64();
9550 l2
= tcg_temp_new_i64();
9551 tcg_gen_ext_i32_i64(l1
, t1
);
9552 tcg_gen_ext_i32_i64(l2
, t2
);
9553 tcg_temp_free_i32(t1
);
9554 tcg_temp_free_i32(t2
);
9557 tcg_gen_sub_i64(l1
, l1
, l2
);
9559 tcg_gen_add_i64(l1
, l1
, l2
);
9561 tcg_temp_free_i64(l2
);
9563 gen_addq(s
, l1
, a
->ra
, a
->rd
);
9564 gen_storeq_reg(s
, a
->ra
, a
->rd
, l1
);
9565 tcg_temp_free_i64(l1
);
9569 static bool trans_SMLALD(DisasContext
*s
, arg_rrrr
*a
)
9571 return op_smlald(s
, a
, false, false);
9574 static bool trans_SMLALDX(DisasContext
*s
, arg_rrrr
*a
)
9576 return op_smlald(s
, a
, true, false);
9579 static bool trans_SMLSLD(DisasContext
*s
, arg_rrrr
*a
)
9581 return op_smlald(s
, a
, false, true);
9584 static bool trans_SMLSLDX(DisasContext
*s
, arg_rrrr
*a
)
9586 return op_smlald(s
, a
, true, true);
9589 static bool op_smmla(DisasContext
*s
, arg_rrrr
*a
, bool round
, bool sub
)
9594 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
9599 t1
= load_reg(s
, a
->rn
);
9600 t2
= load_reg(s
, a
->rm
);
9601 tcg_gen_muls2_i32(t2
, t1
, t1
, t2
);
9604 TCGv_i32 t3
= load_reg(s
, a
->ra
);
9607 * For SMMLS, we need a 64-bit subtract. Borrow caused by
9608 * a non-zero multiplicand lowpart, and the correct result
9609 * lowpart for rounding.
9611 TCGv_i32 zero
= tcg_const_i32(0);
9612 tcg_gen_sub2_i32(t2
, t1
, zero
, t3
, t2
, t1
);
9613 tcg_temp_free_i32(zero
);
9615 tcg_gen_add_i32(t1
, t1
, t3
);
9617 tcg_temp_free_i32(t3
);
9621 * Adding 0x80000000 to the 64-bit quantity means that we have
9622 * carry in to the high word when the low word has the msb set.
9624 tcg_gen_shri_i32(t2
, t2
, 31);
9625 tcg_gen_add_i32(t1
, t1
, t2
);
9627 tcg_temp_free_i32(t2
);
9628 store_reg(s
, a
->rd
, t1
);
9632 static bool trans_SMMLA(DisasContext
*s
, arg_rrrr
*a
)
9634 return op_smmla(s
, a
, false, false);
9637 static bool trans_SMMLAR(DisasContext
*s
, arg_rrrr
*a
)
9639 return op_smmla(s
, a
, true, false);
9642 static bool trans_SMMLS(DisasContext
*s
, arg_rrrr
*a
)
9644 return op_smmla(s
, a
, false, true);
9647 static bool trans_SMMLSR(DisasContext
*s
, arg_rrrr
*a
)
9649 return op_smmla(s
, a
, true, true);
9652 static bool op_div(DisasContext
*s
, arg_rrr
*a
, bool u
)
9657 ? !dc_isar_feature(aa32_thumb_div
, s
)
9658 : !dc_isar_feature(aa32_arm_div
, s
)) {
9662 t1
= load_reg(s
, a
->rn
);
9663 t2
= load_reg(s
, a
->rm
);
9665 gen_helper_udiv(t1
, t1
, t2
);
9667 gen_helper_sdiv(t1
, t1
, t2
);
9669 tcg_temp_free_i32(t2
);
9670 store_reg(s
, a
->rd
, t1
);
9674 static bool trans_SDIV(DisasContext
*s
, arg_rrr
*a
)
9676 return op_div(s
, a
, false);
9679 static bool trans_UDIV(DisasContext
*s
, arg_rrr
*a
)
9681 return op_div(s
, a
, true);
9685 * Block data transfer
9688 static TCGv_i32
op_addr_block_pre(DisasContext
*s
, arg_ldst_block
*a
, int n
)
9690 TCGv_i32 addr
= load_reg(s
, a
->rn
);
9695 tcg_gen_addi_i32(addr
, addr
, 4);
9698 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
9700 } else if (!a
->i
&& n
!= 1) {
9701 /* post decrement */
9702 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
9705 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
9707 * If the writeback is incrementing SP rather than
9708 * decrementing it, and the initial SP is below the
9709 * stack limit but the final written-back SP would
9710 * be above, then then we must not perform any memory
9711 * accesses, but it is IMPDEF whether we generate
9712 * an exception. We choose to do so in this case.
9713 * At this point 'addr' is the lowest address, so
9714 * either the original SP (if incrementing) or our
9715 * final SP (if decrementing), so that's what we check.
9717 gen_helper_v8m_stackcheck(cpu_env
, addr
);
9723 static void op_addr_block_post(DisasContext
*s
, arg_ldst_block
*a
,
9724 TCGv_i32 addr
, int n
)
9730 /* post increment */
9731 tcg_gen_addi_i32(addr
, addr
, 4);
9733 /* post decrement */
9734 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
9736 } else if (!a
->i
&& n
!= 1) {
9738 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
9740 store_reg(s
, a
->rn
, addr
);
9742 tcg_temp_free_i32(addr
);
9746 static bool op_stm(DisasContext
*s
, arg_ldst_block
*a
, int min_n
)
9748 int i
, j
, n
, list
, mem_idx
;
9750 TCGv_i32 addr
, tmp
, tmp2
;
9755 /* Only usable in supervisor mode. */
9756 unallocated_encoding(s
);
9763 if (n
< min_n
|| a
->rn
== 15) {
9764 unallocated_encoding(s
);
9768 addr
= op_addr_block_pre(s
, a
, n
);
9769 mem_idx
= get_mem_index(s
);
9771 for (i
= j
= 0; i
< 16; i
++) {
9772 if (!(list
& (1 << i
))) {
9776 if (user
&& i
!= 15) {
9777 tmp
= tcg_temp_new_i32();
9778 tmp2
= tcg_const_i32(i
);
9779 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
9780 tcg_temp_free_i32(tmp2
);
9782 tmp
= load_reg(s
, i
);
9784 gen_aa32_st32(s
, tmp
, addr
, mem_idx
);
9785 tcg_temp_free_i32(tmp
);
9787 /* No need to add after the last transfer. */
9789 tcg_gen_addi_i32(addr
, addr
, 4);
9793 op_addr_block_post(s
, a
, addr
, n
);
9797 static bool trans_STM(DisasContext
*s
, arg_ldst_block
*a
)
9799 /* BitCount(list) < 1 is UNPREDICTABLE */
9800 return op_stm(s
, a
, 1);
9803 static bool trans_STM_t32(DisasContext
*s
, arg_ldst_block
*a
)
9805 /* Writeback register in register list is UNPREDICTABLE for T32. */
9806 if (a
->w
&& (a
->list
& (1 << a
->rn
))) {
9807 unallocated_encoding(s
);
9810 /* BitCount(list) < 2 is UNPREDICTABLE */
9811 return op_stm(s
, a
, 2);
9814 static bool do_ldm(DisasContext
*s
, arg_ldst_block
*a
, int min_n
)
9816 int i
, j
, n
, list
, mem_idx
;
9819 bool exc_return
= false;
9820 TCGv_i32 addr
, tmp
, tmp2
, loaded_var
;
9823 /* LDM (user), LDM (exception return) */
9825 /* Only usable in supervisor mode. */
9826 unallocated_encoding(s
);
9829 if (extract32(a
->list
, 15, 1)) {
9833 /* LDM (user) does not allow writeback. */
9835 unallocated_encoding(s
);
9843 if (n
< min_n
|| a
->rn
== 15) {
9844 unallocated_encoding(s
);
9848 addr
= op_addr_block_pre(s
, a
, n
);
9849 mem_idx
= get_mem_index(s
);
9850 loaded_base
= false;
9853 for (i
= j
= 0; i
< 16; i
++) {
9854 if (!(list
& (1 << i
))) {
9858 tmp
= tcg_temp_new_i32();
9859 gen_aa32_ld32u(s
, tmp
, addr
, mem_idx
);
9861 tmp2
= tcg_const_i32(i
);
9862 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
9863 tcg_temp_free_i32(tmp2
);
9864 tcg_temp_free_i32(tmp
);
9865 } else if (i
== a
->rn
) {
9868 } else if (i
== 15 && exc_return
) {
9869 store_pc_exc_ret(s
, tmp
);
9871 store_reg_from_load(s
, i
, tmp
);
9874 /* No need to add after the last transfer. */
9876 tcg_gen_addi_i32(addr
, addr
, 4);
9880 op_addr_block_post(s
, a
, addr
, n
);
9883 /* Note that we reject base == pc above. */
9884 store_reg(s
, a
->rn
, loaded_var
);
9888 /* Restore CPSR from SPSR. */
9889 tmp
= load_cpu_field(spsr
);
9890 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
9893 gen_helper_cpsr_write_eret(cpu_env
, tmp
);
9894 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
9897 tcg_temp_free_i32(tmp
);
9898 /* Must exit loop to check un-masked IRQs */
9899 s
->base
.is_jmp
= DISAS_EXIT
;
9904 static bool trans_LDM_a32(DisasContext
*s
, arg_ldst_block
*a
)
9907 * Writeback register in register list is UNPREDICTABLE
9908 * for ArchVersion() >= 7. Prior to v7, A32 would write
9909 * an UNKNOWN value to the base register.
9911 if (ENABLE_ARCH_7
&& a
->w
&& (a
->list
& (1 << a
->rn
))) {
9912 unallocated_encoding(s
);
9915 /* BitCount(list) < 1 is UNPREDICTABLE */
9916 return do_ldm(s
, a
, 1);
9919 static bool trans_LDM_t32(DisasContext
*s
, arg_ldst_block
*a
)
9921 /* Writeback register in register list is UNPREDICTABLE for T32. */
9922 if (a
->w
&& (a
->list
& (1 << a
->rn
))) {
9923 unallocated_encoding(s
);
9926 /* BitCount(list) < 2 is UNPREDICTABLE */
9927 return do_ldm(s
, a
, 2);
9930 static bool trans_LDM_t16(DisasContext
*s
, arg_ldst_block
*a
)
9932 /* Writeback is conditional on the base register not being loaded. */
9933 a
->w
= !(a
->list
& (1 << a
->rn
));
9934 /* BitCount(list) < 1 is UNPREDICTABLE */
9935 return do_ldm(s
, a
, 1);
9939 * Branch, branch with link
9942 static bool trans_B(DisasContext
*s
, arg_i
*a
)
9944 gen_jmp(s
, read_pc(s
) + a
->imm
);
9948 static bool trans_B_cond_thumb(DisasContext
*s
, arg_ci
*a
)
9950 /* This has cond from encoding, required to be outside IT block. */
9951 if (a
->cond
>= 0xe) {
9954 if (s
->condexec_mask
) {
9955 unallocated_encoding(s
);
9958 arm_skip_unless(s
, a
->cond
);
9959 gen_jmp(s
, read_pc(s
) + a
->imm
);
9963 static bool trans_BL(DisasContext
*s
, arg_i
*a
)
9965 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| s
->thumb
);
9966 gen_jmp(s
, read_pc(s
) + a
->imm
);
9970 static bool trans_BLX_i(DisasContext
*s
, arg_BLX_i
*a
)
9974 /* For A32, ARCH(5) is checked near the start of the uncond block. */
9975 if (s
->thumb
&& (a
->imm
& 2)) {
9978 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| s
->thumb
);
9979 tmp
= tcg_const_i32(!s
->thumb
);
9980 store_cpu_field(tmp
, thumb
);
9981 gen_jmp(s
, (read_pc(s
) & ~3) + a
->imm
);
9985 static bool trans_BL_BLX_prefix(DisasContext
*s
, arg_BL_BLX_prefix
*a
)
9987 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
9988 tcg_gen_movi_i32(cpu_R
[14], read_pc(s
) + (a
->imm
<< 12));
9992 static bool trans_BL_suffix(DisasContext
*s
, arg_BL_suffix
*a
)
9994 TCGv_i32 tmp
= tcg_temp_new_i32();
9996 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
9997 tcg_gen_addi_i32(tmp
, cpu_R
[14], (a
->imm
<< 1) | 1);
9998 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| 1);
10003 static bool trans_BLX_suffix(DisasContext
*s
, arg_BLX_suffix
*a
)
10007 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
10008 if (!ENABLE_ARCH_5
) {
10011 tmp
= tcg_temp_new_i32();
10012 tcg_gen_addi_i32(tmp
, cpu_R
[14], a
->imm
<< 1);
10013 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
10014 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| 1);
10019 static bool op_tbranch(DisasContext
*s
, arg_tbranch
*a
, bool half
)
10021 TCGv_i32 addr
, tmp
;
10023 tmp
= load_reg(s
, a
->rm
);
10025 tcg_gen_add_i32(tmp
, tmp
, tmp
);
10027 addr
= load_reg(s
, a
->rn
);
10028 tcg_gen_add_i32(addr
, addr
, tmp
);
10030 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
),
10031 half
? MO_UW
| s
->be_data
: MO_UB
);
10032 tcg_temp_free_i32(addr
);
10034 tcg_gen_add_i32(tmp
, tmp
, tmp
);
10035 tcg_gen_addi_i32(tmp
, tmp
, read_pc(s
));
10036 store_reg(s
, 15, tmp
);
10040 static bool trans_TBB(DisasContext
*s
, arg_tbranch
*a
)
10042 return op_tbranch(s
, a
, false);
10045 static bool trans_TBH(DisasContext
*s
, arg_tbranch
*a
)
10047 return op_tbranch(s
, a
, true);
10050 static bool trans_CBZ(DisasContext
*s
, arg_CBZ
*a
)
10052 TCGv_i32 tmp
= load_reg(s
, a
->rn
);
10054 arm_gen_condlabel(s
);
10055 tcg_gen_brcondi_i32(a
->nz
? TCG_COND_EQ
: TCG_COND_NE
,
10056 tmp
, 0, s
->condlabel
);
10057 tcg_temp_free_i32(tmp
);
10058 gen_jmp(s
, read_pc(s
) + a
->imm
);
10063 * Supervisor call - both T32 & A32 come here so we need to check
10064 * which mode we are in when checking for semihosting.
10067 static bool trans_SVC(DisasContext
*s
, arg_SVC
*a
)
10069 const uint32_t semihost_imm
= s
->thumb
? 0xab : 0x123456;
10071 if (!arm_dc_feature(s
, ARM_FEATURE_M
) && semihosting_enabled() &&
10072 #ifndef CONFIG_USER_ONLY
10075 (a
->imm
== semihost_imm
)) {
10076 gen_exception_internal_insn(s
, s
->pc_curr
, EXCP_SEMIHOST
);
10078 gen_set_pc_im(s
, s
->base
.pc_next
);
10079 s
->svc_imm
= a
->imm
;
10080 s
->base
.is_jmp
= DISAS_SWI
;
10086 * Unconditional system instructions
10089 static bool trans_RFE(DisasContext
*s
, arg_RFE
*a
)
10091 static const int8_t pre_offset
[4] = {
10092 /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4
10094 static const int8_t post_offset
[4] = {
10095 /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0
10097 TCGv_i32 addr
, t1
, t2
;
10099 if (!ENABLE_ARCH_6
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
10103 unallocated_encoding(s
);
10107 addr
= load_reg(s
, a
->rn
);
10108 tcg_gen_addi_i32(addr
, addr
, pre_offset
[a
->pu
]);
10110 /* Load PC into tmp and CPSR into tmp2. */
10111 t1
= tcg_temp_new_i32();
10112 gen_aa32_ld32u(s
, t1
, addr
, get_mem_index(s
));
10113 tcg_gen_addi_i32(addr
, addr
, 4);
10114 t2
= tcg_temp_new_i32();
10115 gen_aa32_ld32u(s
, t2
, addr
, get_mem_index(s
));
10118 /* Base writeback. */
10119 tcg_gen_addi_i32(addr
, addr
, post_offset
[a
->pu
]);
10120 store_reg(s
, a
->rn
, addr
);
10122 tcg_temp_free_i32(addr
);
10124 gen_rfe(s
, t1
, t2
);
10128 static bool trans_SRS(DisasContext
*s
, arg_SRS
*a
)
10130 if (!ENABLE_ARCH_6
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
10133 gen_srs(s
, a
->mode
, a
->pu
, a
->w
);
10137 static bool trans_CPS(DisasContext
*s
, arg_CPS
*a
)
10139 uint32_t mask
, val
;
10141 if (!ENABLE_ARCH_6
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
10145 /* Implemented as NOP in user mode. */
10148 /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */
10170 gen_set_psr_im(s
, mask
, 0, val
);
10175 static bool trans_CPS_v7m(DisasContext
*s
, arg_CPS_v7m
*a
)
10177 TCGv_i32 tmp
, addr
, el
;
10179 if (!arm_dc_feature(s
, ARM_FEATURE_M
)) {
10183 /* Implemented as NOP in user mode. */
10187 tmp
= tcg_const_i32(a
->im
);
10190 addr
= tcg_const_i32(19);
10191 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
10192 tcg_temp_free_i32(addr
);
10196 addr
= tcg_const_i32(16);
10197 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
10198 tcg_temp_free_i32(addr
);
10200 el
= tcg_const_i32(s
->current_el
);
10201 gen_helper_rebuild_hflags_m32(cpu_env
, el
);
10202 tcg_temp_free_i32(el
);
10203 tcg_temp_free_i32(tmp
);
10209 * Clear-Exclusive, Barriers
10212 static bool trans_CLREX(DisasContext
*s
, arg_CLREX
*a
)
10215 ? !ENABLE_ARCH_7
&& !arm_dc_feature(s
, ARM_FEATURE_M
)
10216 : !ENABLE_ARCH_6K
) {
10223 static bool trans_DSB(DisasContext
*s
, arg_DSB
*a
)
10225 if (!ENABLE_ARCH_7
&& !arm_dc_feature(s
, ARM_FEATURE_M
)) {
10228 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
10232 static bool trans_DMB(DisasContext
*s
, arg_DMB
*a
)
10234 return trans_DSB(s
, NULL
);
10237 static bool trans_ISB(DisasContext
*s
, arg_ISB
*a
)
10239 if (!ENABLE_ARCH_7
&& !arm_dc_feature(s
, ARM_FEATURE_M
)) {
10243 * We need to break the TB after this insn to execute
10244 * self-modifying code correctly and also to take
10245 * any pending interrupts immediately.
10247 gen_goto_tb(s
, 0, s
->base
.pc_next
);
10251 static bool trans_SB(DisasContext
*s
, arg_SB
*a
)
10253 if (!dc_isar_feature(aa32_sb
, s
)) {
10257 * TODO: There is no speculation barrier opcode
10258 * for TCG; MB and end the TB instead.
10260 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
10261 gen_goto_tb(s
, 0, s
->base
.pc_next
);
10265 static bool trans_SETEND(DisasContext
*s
, arg_SETEND
*a
)
10267 if (!ENABLE_ARCH_6
) {
10270 if (a
->E
!= (s
->be_data
== MO_BE
)) {
10271 gen_helper_setend(cpu_env
);
10272 s
->base
.is_jmp
= DISAS_UPDATE
;
10278 * Preload instructions
10279 * All are nops, contingent on the appropriate arch level.
10282 static bool trans_PLD(DisasContext
*s
, arg_PLD
*a
)
10284 return ENABLE_ARCH_5TE
;
10287 static bool trans_PLDW(DisasContext
*s
, arg_PLD
*a
)
10289 return arm_dc_feature(s
, ARM_FEATURE_V7MP
);
10292 static bool trans_PLI(DisasContext
*s
, arg_PLD
*a
)
10294 return ENABLE_ARCH_7
;
10301 static bool trans_IT(DisasContext
*s
, arg_IT
*a
)
10303 int cond_mask
= a
->cond_mask
;
10306 * No actual code generated for this insn, just setup state.
10308 * Combinations of firstcond and mask which set up an 0b1111
10309 * condition are UNPREDICTABLE; we take the CONSTRAINED
10310 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
10311 * i.e. both meaning "execute always".
10313 s
->condexec_cond
= (cond_mask
>> 4) & 0xe;
10314 s
->condexec_mask
= cond_mask
& 0x1f;
10322 static void disas_arm_insn(DisasContext
*s
, unsigned int insn
)
10324 unsigned int cond
= insn
>> 28;
10326 /* M variants do not implement ARM mode; this must raise the INVSTATE
10327 * UsageFault exception.
10329 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10330 gen_exception_insn(s
, s
->pc_curr
, EXCP_INVSTATE
, syn_uncategorized(),
10331 default_exception_el(s
));
10336 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
10337 * choose to UNDEF. In ARMv5 and above the space is used
10338 * for miscellaneous unconditional instructions.
10342 /* Unconditional instructions. */
10343 /* TODO: Perhaps merge these into one decodetree output file. */
10344 if (disas_a32_uncond(s
, insn
) ||
10345 disas_vfp_uncond(s
, insn
) ||
10346 disas_neon_dp(s
, insn
) ||
10347 disas_neon_ls(s
, insn
) ||
10348 disas_neon_shared(s
, insn
)) {
10351 /* fall back to legacy decoder */
10353 if (((insn
>> 25) & 7) == 1) {
10354 /* NEON Data processing. */
10355 if (disas_neon_data_insn(s
, insn
)) {
10360 if ((insn
& 0x0e000f00) == 0x0c000100) {
10361 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
10362 /* iWMMXt register transfer. */
10363 if (extract32(s
->c15_cpar
, 1, 1)) {
10364 if (!disas_iwmmxt_insn(s
, insn
)) {
10373 /* if not always execute, we generate a conditional jump to
10374 next instruction */
10375 arm_skip_unless(s
, cond
);
10378 /* TODO: Perhaps merge these into one decodetree output file. */
10379 if (disas_a32(s
, insn
) ||
10380 disas_vfp(s
, insn
)) {
10383 /* fall back to legacy decoder */
10385 switch ((insn
>> 24) & 0xf) {
10389 if (((insn
>> 8) & 0xe) == 10) {
10390 /* VFP, but failed disas_vfp. */
10393 if (disas_coproc_insn(s
, insn
)) {
10400 unallocated_encoding(s
);
10405 static bool thumb_insn_is_16bit(DisasContext
*s
, uint32_t pc
, uint32_t insn
)
10408 * Return true if this is a 16 bit instruction. We must be precise
10409 * about this (matching the decode).
10411 if ((insn
>> 11) < 0x1d) {
10412 /* Definitely a 16-bit instruction */
10416 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10417 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10418 * end up actually treating this as two 16-bit insns, though,
10419 * if it's half of a bl/blx pair that might span a page boundary.
10421 if (arm_dc_feature(s
, ARM_FEATURE_THUMB2
) ||
10422 arm_dc_feature(s
, ARM_FEATURE_M
)) {
10423 /* Thumb2 cores (including all M profile ones) always treat
10424 * 32-bit insns as 32-bit.
10429 if ((insn
>> 11) == 0x1e && pc
- s
->page_start
< TARGET_PAGE_SIZE
- 3) {
10430 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10431 * is not on the next page; we merge this into a 32-bit
10436 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10437 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10438 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10439 * -- handle as single 16 bit insn
10444 /* Translate a 32-bit thumb instruction. */
10445 static void disas_thumb2_insn(DisasContext
*s
, uint32_t insn
)
10448 * ARMv6-M supports a limited subset of Thumb2 instructions.
10449 * Other Thumb1 architectures allow only 32-bit
10450 * combined BL/BLX prefix and suffix.
10452 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
10453 !arm_dc_feature(s
, ARM_FEATURE_V7
)) {
10455 bool found
= false;
10456 static const uint32_t armv6m_insn
[] = {0xf3808000 /* msr */,
10457 0xf3b08040 /* dsb */,
10458 0xf3b08050 /* dmb */,
10459 0xf3b08060 /* isb */,
10460 0xf3e08000 /* mrs */,
10461 0xf000d000 /* bl */};
10462 static const uint32_t armv6m_mask
[] = {0xffe0d000,
10469 for (i
= 0; i
< ARRAY_SIZE(armv6m_insn
); i
++) {
10470 if ((insn
& armv6m_mask
[i
]) == armv6m_insn
[i
]) {
10478 } else if ((insn
& 0xf800e800) != 0xf000e800) {
10482 if ((insn
& 0xef000000) == 0xef000000) {
10484 * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
10486 * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
10488 uint32_t a32_insn
= (insn
& 0xe2ffffff) |
10489 ((insn
& (1 << 28)) >> 4) | (1 << 28);
10491 if (disas_neon_dp(s
, a32_insn
)) {
10496 if ((insn
& 0xff100000) == 0xf9000000) {
10498 * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
10500 * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
10502 uint32_t a32_insn
= (insn
& 0x00ffffff) | 0xf4000000;
10504 if (disas_neon_ls(s
, a32_insn
)) {
10510 * TODO: Perhaps merge these into one decodetree output file.
10511 * Note disas_vfp is written for a32 with cond field in the
10512 * top nibble. The t32 encoding requires 0xe in the top nibble.
10514 if (disas_t32(s
, insn
) ||
10515 disas_vfp_uncond(s
, insn
) ||
10516 disas_neon_shared(s
, insn
) ||
10517 ((insn
>> 28) == 0xe && disas_vfp(s
, insn
))) {
10520 /* fall back to legacy decoder */
10522 switch ((insn
>> 25) & 0xf) {
10523 case 0: case 1: case 2: case 3:
10524 /* 16-bit instructions. Should never happen. */
10526 case 6: case 7: case 14: case 15:
10528 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10529 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10530 if (extract32(insn
, 24, 2) == 3) {
10531 goto illegal_op
; /* op0 = 0b11 : unallocated */
10534 if (((insn
>> 8) & 0xe) == 10 &&
10535 dc_isar_feature(aa32_fpsp_v2
, s
)) {
10536 /* FP, and the CPU supports it */
10539 /* All other insns: NOCP */
10540 gen_exception_insn(s
, s
->pc_curr
, EXCP_NOCP
,
10541 syn_uncategorized(),
10542 default_exception_el(s
));
10546 if (((insn
>> 24) & 3) == 3) {
10547 /* Translate into the equivalent ARM encoding. */
10548 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
10549 if (disas_neon_data_insn(s
, insn
)) {
10552 } else if (((insn
>> 8) & 0xe) == 10) {
10553 /* VFP, but failed disas_vfp. */
10556 if (insn
& (1 << 28))
10558 if (disas_coproc_insn(s
, insn
)) {
10567 unallocated_encoding(s
);
10571 static void disas_thumb_insn(DisasContext
*s
, uint32_t insn
)
10573 if (!disas_t16(s
, insn
)) {
10574 unallocated_encoding(s
);
10578 static bool insn_crosses_page(CPUARMState
*env
, DisasContext
*s
)
10580 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
10581 * (False positives are OK, false negatives are not.)
10582 * We know this is a Thumb insn, and our caller ensures we are
10583 * only called if dc->base.pc_next is less than 4 bytes from the page
10584 * boundary, so we cross the page if the first 16 bits indicate
10585 * that this is a 32 bit insn.
10587 uint16_t insn
= arm_lduw_code(env
, s
->base
.pc_next
, s
->sctlr_b
);
10589 return !thumb_insn_is_16bit(s
, s
->base
.pc_next
, insn
);
10592 static void arm_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
10594 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
10595 CPUARMState
*env
= cs
->env_ptr
;
10596 ARMCPU
*cpu
= env_archcpu(env
);
10597 uint32_t tb_flags
= dc
->base
.tb
->flags
;
10598 uint32_t condexec
, core_mmu_idx
;
10600 dc
->isar
= &cpu
->isar
;
10604 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
10605 * there is no secure EL1, so we route exceptions to EL3.
10607 dc
->secure_routed_to_el3
= arm_feature(env
, ARM_FEATURE_EL3
) &&
10608 !arm_el_is_aa64(env
, 3);
10609 dc
->thumb
= FIELD_EX32(tb_flags
, TBFLAG_AM32
, THUMB
);
10610 dc
->be_data
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, BE_DATA
) ? MO_BE
: MO_LE
;
10611 condexec
= FIELD_EX32(tb_flags
, TBFLAG_AM32
, CONDEXEC
);
10612 dc
->condexec_mask
= (condexec
& 0xf) << 1;
10613 dc
->condexec_cond
= condexec
>> 4;
10615 core_mmu_idx
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, MMUIDX
);
10616 dc
->mmu_idx
= core_to_arm_mmu_idx(env
, core_mmu_idx
);
10617 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
10618 #if !defined(CONFIG_USER_ONLY)
10619 dc
->user
= (dc
->current_el
== 0);
10621 dc
->fp_excp_el
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, FPEXC_EL
);
10623 if (arm_feature(env
, ARM_FEATURE_M
)) {
10624 dc
->vfp_enabled
= 1;
10625 dc
->be_data
= MO_TE
;
10626 dc
->v7m_handler_mode
= FIELD_EX32(tb_flags
, TBFLAG_M32
, HANDLER
);
10627 dc
->v8m_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
10628 regime_is_secure(env
, dc
->mmu_idx
);
10629 dc
->v8m_stackcheck
= FIELD_EX32(tb_flags
, TBFLAG_M32
, STACKCHECK
);
10630 dc
->v8m_fpccr_s_wrong
=
10631 FIELD_EX32(tb_flags
, TBFLAG_M32
, FPCCR_S_WRONG
);
10632 dc
->v7m_new_fp_ctxt_needed
=
10633 FIELD_EX32(tb_flags
, TBFLAG_M32
, NEW_FP_CTXT_NEEDED
);
10634 dc
->v7m_lspact
= FIELD_EX32(tb_flags
, TBFLAG_M32
, LSPACT
);
10637 FIELD_EX32(tb_flags
, TBFLAG_ANY
, BE_DATA
) ? MO_BE
: MO_LE
;
10638 dc
->debug_target_el
=
10639 FIELD_EX32(tb_flags
, TBFLAG_ANY
, DEBUG_TARGET_EL
);
10640 dc
->sctlr_b
= FIELD_EX32(tb_flags
, TBFLAG_A32
, SCTLR_B
);
10641 dc
->hstr_active
= FIELD_EX32(tb_flags
, TBFLAG_A32
, HSTR_ACTIVE
);
10642 dc
->ns
= FIELD_EX32(tb_flags
, TBFLAG_A32
, NS
);
10643 dc
->vfp_enabled
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VFPEN
);
10644 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
10645 dc
->c15_cpar
= FIELD_EX32(tb_flags
, TBFLAG_A32
, XSCALE_CPAR
);
10647 dc
->vec_len
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VECLEN
);
10648 dc
->vec_stride
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VECSTRIDE
);
10651 dc
->cp_regs
= cpu
->cp_regs
;
10652 dc
->features
= env
->features
;
10654 /* Single step state. The code-generation logic here is:
10656 * generate code with no special handling for single-stepping (except
10657 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
10658 * this happens anyway because those changes are all system register or
10660 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
10661 * emit code for one insn
10662 * emit code to clear PSTATE.SS
10663 * emit code to generate software step exception for completed step
10664 * end TB (as usual for having generated an exception)
10665 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
10666 * emit code to generate a software step exception
10669 dc
->ss_active
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, SS_ACTIVE
);
10670 dc
->pstate_ss
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, PSTATE_SS
);
10671 dc
->is_ldex
= false;
10673 dc
->page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
10675 /* If architectural single step active, limit to 1. */
10676 if (is_singlestepping(dc
)) {
10677 dc
->base
.max_insns
= 1;
10680 /* ARM is a fixed-length ISA. Bound the number of insns to execute
10681 to those left on the page. */
10683 int bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
10684 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
10687 cpu_V0
= tcg_temp_new_i64();
10688 cpu_V1
= tcg_temp_new_i64();
10689 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
10690 cpu_M0
= tcg_temp_new_i64();
10693 static void arm_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
10695 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
10697 /* A note on handling of the condexec (IT) bits:
10699 * We want to avoid the overhead of having to write the updated condexec
10700 * bits back to the CPUARMState for every instruction in an IT block. So:
10701 * (1) if the condexec bits are not already zero then we write
10702 * zero back into the CPUARMState now. This avoids complications trying
10703 * to do it at the end of the block. (For example if we don't do this
10704 * it's hard to identify whether we can safely skip writing condexec
10705 * at the end of the TB, which we definitely want to do for the case
10706 * where a TB doesn't do anything with the IT state at all.)
10707 * (2) if we are going to leave the TB then we call gen_set_condexec()
10708 * which will write the correct value into CPUARMState if zero is wrong.
10709 * This is done both for leaving the TB at the end, and for leaving
10710 * it because of an exception we know will happen, which is done in
10711 * gen_exception_insn(). The latter is necessary because we need to
10712 * leave the TB with the PC/IT state just prior to execution of the
10713 * instruction which caused the exception.
10714 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
10715 * then the CPUARMState will be wrong and we need to reset it.
10716 * This is handled in the same way as restoration of the
10717 * PC in these situations; we save the value of the condexec bits
10718 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
10719 * then uses this to restore them after an exception.
10721 * Note that there are no instructions which can read the condexec
10722 * bits, and none which can write non-static values to them, so
10723 * we don't need to care about whether CPUARMState is correct in the
10727 /* Reset the conditional execution bits immediately. This avoids
10728 complications trying to do it at the end of the block. */
10729 if (dc
->condexec_mask
|| dc
->condexec_cond
) {
10730 TCGv_i32 tmp
= tcg_temp_new_i32();
10731 tcg_gen_movi_i32(tmp
, 0);
10732 store_cpu_field(tmp
, condexec_bits
);
10736 static void arm_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
10738 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
10740 tcg_gen_insn_start(dc
->base
.pc_next
,
10741 (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1),
10743 dc
->insn_start
= tcg_last_op();
10746 static bool arm_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
10747 const CPUBreakpoint
*bp
)
10749 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
10751 if (bp
->flags
& BP_CPU
) {
10752 gen_set_condexec(dc
);
10753 gen_set_pc_im(dc
, dc
->base
.pc_next
);
10754 gen_helper_check_breakpoints(cpu_env
);
10755 /* End the TB early; it's likely not going to be executed */
10756 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
10758 gen_exception_internal_insn(dc
, dc
->base
.pc_next
, EXCP_DEBUG
);
10759 /* The address covered by the breakpoint must be
10760 included in [tb->pc, tb->pc + tb->size) in order
10761 to for it to be properly cleared -- thus we
10762 increment the PC here so that the logic setting
10763 tb->size below does the right thing. */
10764 /* TODO: Advance PC by correct instruction length to
10765 * avoid disassembler error messages */
10766 dc
->base
.pc_next
+= 2;
10767 dc
->base
.is_jmp
= DISAS_NORETURN
;
10773 static bool arm_pre_translate_insn(DisasContext
*dc
)
10775 #ifdef CONFIG_USER_ONLY
10776 /* Intercept jump to the magic kernel page. */
10777 if (dc
->base
.pc_next
>= 0xffff0000) {
10778 /* We always get here via a jump, so know we are not in a
10779 conditional execution block. */
10780 gen_exception_internal(EXCP_KERNEL_TRAP
);
10781 dc
->base
.is_jmp
= DISAS_NORETURN
;
10786 if (dc
->ss_active
&& !dc
->pstate_ss
) {
10787 /* Singlestep state is Active-pending.
10788 * If we're in this state at the start of a TB then either
10789 * a) we just took an exception to an EL which is being debugged
10790 * and this is the first insn in the exception handler
10791 * b) debug exceptions were masked and we just unmasked them
10792 * without changing EL (eg by clearing PSTATE.D)
10793 * In either case we're going to take a swstep exception in the
10794 * "did not step an insn" case, and so the syndrome ISV and EX
10795 * bits should be zero.
10797 assert(dc
->base
.num_insns
== 1);
10798 gen_swstep_exception(dc
, 0, 0);
10799 dc
->base
.is_jmp
= DISAS_NORETURN
;
10806 static void arm_post_translate_insn(DisasContext
*dc
)
10808 if (dc
->condjmp
&& !dc
->base
.is_jmp
) {
10809 gen_set_label(dc
->condlabel
);
10812 translator_loop_temp_check(&dc
->base
);
10815 static void arm_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
10817 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
10818 CPUARMState
*env
= cpu
->env_ptr
;
10821 if (arm_pre_translate_insn(dc
)) {
10825 dc
->pc_curr
= dc
->base
.pc_next
;
10826 insn
= arm_ldl_code(env
, dc
->base
.pc_next
, dc
->sctlr_b
);
10828 dc
->base
.pc_next
+= 4;
10829 disas_arm_insn(dc
, insn
);
10831 arm_post_translate_insn(dc
);
10833 /* ARM is a fixed-length ISA. We performed the cross-page check
10834 in init_disas_context by adjusting max_insns. */
10837 static bool thumb_insn_is_unconditional(DisasContext
*s
, uint32_t insn
)
10839 /* Return true if this Thumb insn is always unconditional,
10840 * even inside an IT block. This is true of only a very few
10841 * instructions: BKPT, HLT, and SG.
10843 * A larger class of instructions are UNPREDICTABLE if used
10844 * inside an IT block; we do not need to detect those here, because
10845 * what we do by default (perform the cc check and update the IT
10846 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
10847 * choice for those situations.
10849 * insn is either a 16-bit or a 32-bit instruction; the two are
10850 * distinguishable because for the 16-bit case the top 16 bits
10851 * are zeroes, and that isn't a valid 32-bit encoding.
10853 if ((insn
& 0xffffff00) == 0xbe00) {
10858 if ((insn
& 0xffffffc0) == 0xba80 && arm_dc_feature(s
, ARM_FEATURE_V8
) &&
10859 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
10860 /* HLT: v8A only. This is unconditional even when it is going to
10861 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
10862 * For v7 cores this was a plain old undefined encoding and so
10863 * honours its cc check. (We might be using the encoding as
10864 * a semihosting trap, but we don't change the cc check behaviour
10865 * on that account, because a debugger connected to a real v7A
10866 * core and emulating semihosting traps by catching the UNDEF
10867 * exception would also only see cases where the cc check passed.
10868 * No guest code should be trying to do a HLT semihosting trap
10869 * in an IT block anyway.
10874 if (insn
== 0xe97fe97f && arm_dc_feature(s
, ARM_FEATURE_V8
) &&
10875 arm_dc_feature(s
, ARM_FEATURE_M
)) {
10883 static void thumb_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
10885 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
10886 CPUARMState
*env
= cpu
->env_ptr
;
10890 if (arm_pre_translate_insn(dc
)) {
10894 dc
->pc_curr
= dc
->base
.pc_next
;
10895 insn
= arm_lduw_code(env
, dc
->base
.pc_next
, dc
->sctlr_b
);
10896 is_16bit
= thumb_insn_is_16bit(dc
, dc
->base
.pc_next
, insn
);
10897 dc
->base
.pc_next
+= 2;
10899 uint32_t insn2
= arm_lduw_code(env
, dc
->base
.pc_next
, dc
->sctlr_b
);
10901 insn
= insn
<< 16 | insn2
;
10902 dc
->base
.pc_next
+= 2;
10906 if (dc
->condexec_mask
&& !thumb_insn_is_unconditional(dc
, insn
)) {
10907 uint32_t cond
= dc
->condexec_cond
;
10910 * Conditionally skip the insn. Note that both 0xe and 0xf mean
10911 * "always"; 0xf is not "never".
10914 arm_skip_unless(dc
, cond
);
10919 disas_thumb_insn(dc
, insn
);
10921 disas_thumb2_insn(dc
, insn
);
10924 /* Advance the Thumb condexec condition. */
10925 if (dc
->condexec_mask
) {
10926 dc
->condexec_cond
= ((dc
->condexec_cond
& 0xe) |
10927 ((dc
->condexec_mask
>> 4) & 1));
10928 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
10929 if (dc
->condexec_mask
== 0) {
10930 dc
->condexec_cond
= 0;
10934 arm_post_translate_insn(dc
);
10936 /* Thumb is a variable-length ISA. Stop translation when the next insn
10937 * will touch a new page. This ensures that prefetch aborts occur at
10940 * We want to stop the TB if the next insn starts in a new page,
10941 * or if it spans between this page and the next. This means that
10942 * if we're looking at the last halfword in the page we need to
10943 * see if it's a 16-bit Thumb insn (which will fit in this TB)
10944 * or a 32-bit Thumb insn (which won't).
10945 * This is to avoid generating a silly TB with a single 16-bit insn
10946 * in it at the end of this page (which would execute correctly
10947 * but isn't very efficient).
10949 if (dc
->base
.is_jmp
== DISAS_NEXT
10950 && (dc
->base
.pc_next
- dc
->page_start
>= TARGET_PAGE_SIZE
10951 || (dc
->base
.pc_next
- dc
->page_start
>= TARGET_PAGE_SIZE
- 3
10952 && insn_crosses_page(env
, dc
)))) {
10953 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
10957 static void arm_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
10959 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
10961 if (tb_cflags(dc
->base
.tb
) & CF_LAST_IO
&& dc
->condjmp
) {
10962 /* FIXME: This can theoretically happen with self-modifying code. */
10963 cpu_abort(cpu
, "IO on conditional branch instruction");
10966 /* At this stage dc->condjmp will only be set when the skipped
10967 instruction was a conditional branch or trap, and the PC has
10968 already been written. */
10969 gen_set_condexec(dc
);
10970 if (dc
->base
.is_jmp
== DISAS_BX_EXCRET
) {
10971 /* Exception return branches need some special case code at the
10972 * end of the TB, which is complex enough that it has to
10973 * handle the single-step vs not and the condition-failed
10974 * insn codepath itself.
10976 gen_bx_excret_final_code(dc
);
10977 } else if (unlikely(is_singlestepping(dc
))) {
10978 /* Unconditional and "condition passed" instruction codepath. */
10979 switch (dc
->base
.is_jmp
) {
10981 gen_ss_advance(dc
);
10982 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
10983 default_exception_el(dc
));
10986 gen_ss_advance(dc
);
10987 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
10990 gen_ss_advance(dc
);
10991 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
10994 case DISAS_TOO_MANY
:
10996 gen_set_pc_im(dc
, dc
->base
.pc_next
);
10999 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
11000 gen_singlestep_exception(dc
);
11002 case DISAS_NORETURN
:
11006 /* While branches must always occur at the end of an IT block,
11007 there are a few other things that can cause us to terminate
11008 the TB in the middle of an IT block:
11009 - Exception generating instructions (bkpt, swi, undefined).
11011 - Hardware watchpoints.
11012 Hardware breakpoints have already been handled and skip this code.
11014 switch(dc
->base
.is_jmp
) {
11016 case DISAS_TOO_MANY
:
11017 gen_goto_tb(dc
, 1, dc
->base
.pc_next
);
11023 gen_set_pc_im(dc
, dc
->base
.pc_next
);
11026 /* indicate that the hash table must be used to find the next TB */
11027 tcg_gen_exit_tb(NULL
, 0);
11029 case DISAS_NORETURN
:
11030 /* nothing more to generate */
11034 TCGv_i32 tmp
= tcg_const_i32((dc
->thumb
&&
11035 !(dc
->insn
& (1U << 31))) ? 2 : 4);
11037 gen_helper_wfi(cpu_env
, tmp
);
11038 tcg_temp_free_i32(tmp
);
11039 /* The helper doesn't necessarily throw an exception, but we
11040 * must go back to the main loop to check for interrupts anyway.
11042 tcg_gen_exit_tb(NULL
, 0);
11046 gen_helper_wfe(cpu_env
);
11049 gen_helper_yield(cpu_env
);
11052 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
11053 default_exception_el(dc
));
11056 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
11059 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
11065 /* "Condition failed" instruction codepath for the branch/trap insn */
11066 gen_set_label(dc
->condlabel
);
11067 gen_set_condexec(dc
);
11068 if (unlikely(is_singlestepping(dc
))) {
11069 gen_set_pc_im(dc
, dc
->base
.pc_next
);
11070 gen_singlestep_exception(dc
);
11072 gen_goto_tb(dc
, 1, dc
->base
.pc_next
);
11077 static void arm_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
11079 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11081 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
11082 log_target_disas(cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
11085 static const TranslatorOps arm_translator_ops
= {
11086 .init_disas_context
= arm_tr_init_disas_context
,
11087 .tb_start
= arm_tr_tb_start
,
11088 .insn_start
= arm_tr_insn_start
,
11089 .breakpoint_check
= arm_tr_breakpoint_check
,
11090 .translate_insn
= arm_tr_translate_insn
,
11091 .tb_stop
= arm_tr_tb_stop
,
11092 .disas_log
= arm_tr_disas_log
,
11095 static const TranslatorOps thumb_translator_ops
= {
11096 .init_disas_context
= arm_tr_init_disas_context
,
11097 .tb_start
= arm_tr_tb_start
,
11098 .insn_start
= arm_tr_insn_start
,
11099 .breakpoint_check
= arm_tr_breakpoint_check
,
11100 .translate_insn
= thumb_tr_translate_insn
,
11101 .tb_stop
= arm_tr_tb_stop
,
11102 .disas_log
= arm_tr_disas_log
,
11105 /* generate intermediate code for basic block 'tb'. */
11106 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
11108 DisasContext dc
= { };
11109 const TranslatorOps
*ops
= &arm_translator_ops
;
11111 if (FIELD_EX32(tb
->flags
, TBFLAG_AM32
, THUMB
)) {
11112 ops
= &thumb_translator_ops
;
11114 #ifdef TARGET_AARCH64
11115 if (FIELD_EX32(tb
->flags
, TBFLAG_ANY
, AARCH64_STATE
)) {
11116 ops
= &aarch64_translator_ops
;
11120 translator_loop(ops
, &dc
.base
, cpu
, tb
, max_insns
);
11123 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
,
11124 target_ulong
*data
)
11128 env
->condexec_bits
= 0;
11129 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;
11131 env
->regs
[15] = data
[0];
11132 env
->condexec_bits
= data
[1];
11133 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;