4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
30 #include "qemu/bitops.h"
32 #include "hw/semihosting/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
59 #define IS_USER(s) (s->user)
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
64 static TCGv_i32 cpu_R
[16];
65 TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
66 TCGv_i64 cpu_exclusive_addr
;
67 TCGv_i64 cpu_exclusive_val
;
69 #include "exec/gen-icount.h"
71 static const char * const regnames
[] =
72 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
75 /* Function prototypes for gen_ functions calling Neon helpers. */
76 typedef void NeonGenThreeOpEnvFn(TCGv_i32
, TCGv_env
, TCGv_i32
,
78 /* Function prototypes for gen_ functions for fix point conversions */
79 typedef void VFPGenFixPointFn(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
81 /* initialize TCG globals. */
82 void arm_translate_init(void)
86 for (i
= 0; i
< 16; i
++) {
87 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
88 offsetof(CPUARMState
, regs
[i
]),
91 cpu_CF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, CF
), "CF");
92 cpu_NF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, NF
), "NF");
93 cpu_VF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, VF
), "VF");
94 cpu_ZF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, ZF
), "ZF");
96 cpu_exclusive_addr
= tcg_global_mem_new_i64(cpu_env
,
97 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
98 cpu_exclusive_val
= tcg_global_mem_new_i64(cpu_env
,
99 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
101 a64_translate_init();
104 /* Flags for the disas_set_da_iss info argument:
105 * lower bits hold the Rt register number, higher bits are flags.
107 typedef enum ISSInfo
{
110 ISSInvalid
= (1 << 5),
111 ISSIsAcqRel
= (1 << 6),
112 ISSIsWrite
= (1 << 7),
113 ISSIs16Bit
= (1 << 8),
116 /* Save the syndrome information for a Data Abort */
117 static void disas_set_da_iss(DisasContext
*s
, MemOp memop
, ISSInfo issinfo
)
120 int sas
= memop
& MO_SIZE
;
121 bool sse
= memop
& MO_SIGN
;
122 bool is_acqrel
= issinfo
& ISSIsAcqRel
;
123 bool is_write
= issinfo
& ISSIsWrite
;
124 bool is_16bit
= issinfo
& ISSIs16Bit
;
125 int srt
= issinfo
& ISSRegMask
;
127 if (issinfo
& ISSInvalid
) {
128 /* Some callsites want to conditionally provide ISS info,
129 * eg "only if this was not a writeback"
135 /* For AArch32, insns where the src/dest is R15 never generate
136 * ISS information. Catching that here saves checking at all
142 syn
= syn_data_abort_with_iss(0, sas
, sse
, srt
, 0, is_acqrel
,
143 0, 0, 0, is_write
, 0, is_16bit
);
144 disas_set_insn_syndrome(s
, syn
);
147 static inline int get_a32_user_mem_index(DisasContext
*s
)
149 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
151 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
152 * otherwise, access as if at PL0.
154 switch (s
->mmu_idx
) {
155 case ARMMMUIdx_E2
: /* this one is UNPREDICTABLE */
156 case ARMMMUIdx_E10_0
:
157 case ARMMMUIdx_E10_1
:
158 case ARMMMUIdx_E10_1_PAN
:
159 return arm_to_core_mmu_idx(ARMMMUIdx_E10_0
);
161 case ARMMMUIdx_SE10_0
:
162 case ARMMMUIdx_SE10_1
:
163 case ARMMMUIdx_SE10_1_PAN
:
164 return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0
);
165 case ARMMMUIdx_MUser
:
166 case ARMMMUIdx_MPriv
:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser
);
168 case ARMMMUIdx_MUserNegPri
:
169 case ARMMMUIdx_MPrivNegPri
:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri
);
171 case ARMMMUIdx_MSUser
:
172 case ARMMMUIdx_MSPriv
:
173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser
);
174 case ARMMMUIdx_MSUserNegPri
:
175 case ARMMMUIdx_MSPrivNegPri
:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri
);
178 g_assert_not_reached();
182 static inline TCGv_i32
load_cpu_offset(int offset
)
184 TCGv_i32 tmp
= tcg_temp_new_i32();
185 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
189 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
191 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
193 tcg_gen_st_i32(var
, cpu_env
, offset
);
194 tcg_temp_free_i32(var
);
197 #define store_cpu_field(var, name) \
198 store_cpu_offset(var, offsetof(CPUARMState, name))
200 /* The architectural value of PC. */
201 static uint32_t read_pc(DisasContext
*s
)
203 return s
->pc_curr
+ (s
->thumb
? 4 : 8);
206 /* Set a variable to the value of a CPU register. */
207 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
210 tcg_gen_movi_i32(var
, read_pc(s
));
212 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
216 /* Create a new temporary and set it to the value of a CPU register. */
217 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
219 TCGv_i32 tmp
= tcg_temp_new_i32();
220 load_reg_var(s
, tmp
, reg
);
225 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
226 * This is used for load/store for which use of PC implies (literal),
227 * or ADD that implies ADR.
229 static TCGv_i32
add_reg_for_lit(DisasContext
*s
, int reg
, int ofs
)
231 TCGv_i32 tmp
= tcg_temp_new_i32();
234 tcg_gen_movi_i32(tmp
, (read_pc(s
) & ~3) + ofs
);
236 tcg_gen_addi_i32(tmp
, cpu_R
[reg
], ofs
);
241 /* Set a CPU register. The source must be a temporary and will be
243 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
246 /* In Thumb mode, we must ignore bit 0.
247 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
248 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
249 * We choose to ignore [1:0] in ARM mode for all architecture versions.
251 tcg_gen_andi_i32(var
, var
, s
->thumb
? ~1 : ~3);
252 s
->base
.is_jmp
= DISAS_JUMP
;
254 tcg_gen_mov_i32(cpu_R
[reg
], var
);
255 tcg_temp_free_i32(var
);
259 * Variant of store_reg which applies v8M stack-limit checks before updating
260 * SP. If the check fails this will result in an exception being taken.
261 * We disable the stack checks for CONFIG_USER_ONLY because we have
262 * no idea what the stack limits should be in that case.
263 * If stack checking is not being done this just acts like store_reg().
265 static void store_sp_checked(DisasContext
*s
, TCGv_i32 var
)
267 #ifndef CONFIG_USER_ONLY
268 if (s
->v8m_stackcheck
) {
269 gen_helper_v8m_stackcheck(cpu_env
, var
);
272 store_reg(s
, 13, var
);
275 /* Value extensions. */
276 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
277 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
278 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
279 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
281 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
282 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
285 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
287 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
288 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
289 tcg_temp_free_i32(tmp_mask
);
291 /* Set NZCV flags from the high 4 bits of var. */
292 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
294 static void gen_exception_internal(int excp
)
296 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
298 assert(excp_is_internal(excp
));
299 gen_helper_exception_internal(cpu_env
, tcg_excp
);
300 tcg_temp_free_i32(tcg_excp
);
303 static void gen_step_complete_exception(DisasContext
*s
)
305 /* We just completed step of an insn. Move from Active-not-pending
306 * to Active-pending, and then also take the swstep exception.
307 * This corresponds to making the (IMPDEF) choice to prioritize
308 * swstep exceptions over asynchronous exceptions taken to an exception
309 * level where debug is disabled. This choice has the advantage that
310 * we do not need to maintain internal state corresponding to the
311 * ISV/EX syndrome bits between completion of the step and generation
312 * of the exception, and our syndrome information is always correct.
315 gen_swstep_exception(s
, 1, s
->is_ldex
);
316 s
->base
.is_jmp
= DISAS_NORETURN
;
319 static void gen_singlestep_exception(DisasContext
*s
)
321 /* Generate the right kind of exception for singlestep, which is
322 * either the architectural singlestep or EXCP_DEBUG for QEMU's
323 * gdb singlestepping.
326 gen_step_complete_exception(s
);
328 gen_exception_internal(EXCP_DEBUG
);
332 static inline bool is_singlestepping(DisasContext
*s
)
334 /* Return true if we are singlestepping either because of
335 * architectural singlestep or QEMU gdbstub singlestep. This does
336 * not include the command line '-singlestep' mode which is rather
337 * misnamed as it only means "one instruction per TB" and doesn't
338 * affect the code we generate.
340 return s
->base
.singlestep_enabled
|| s
->ss_active
;
343 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
345 TCGv_i32 tmp1
= tcg_temp_new_i32();
346 TCGv_i32 tmp2
= tcg_temp_new_i32();
347 tcg_gen_ext16s_i32(tmp1
, a
);
348 tcg_gen_ext16s_i32(tmp2
, b
);
349 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
350 tcg_temp_free_i32(tmp2
);
351 tcg_gen_sari_i32(a
, a
, 16);
352 tcg_gen_sari_i32(b
, b
, 16);
353 tcg_gen_mul_i32(b
, b
, a
);
354 tcg_gen_mov_i32(a
, tmp1
);
355 tcg_temp_free_i32(tmp1
);
358 /* Byteswap each halfword. */
359 static void gen_rev16(TCGv_i32 dest
, TCGv_i32 var
)
361 TCGv_i32 tmp
= tcg_temp_new_i32();
362 TCGv_i32 mask
= tcg_const_i32(0x00ff00ff);
363 tcg_gen_shri_i32(tmp
, var
, 8);
364 tcg_gen_and_i32(tmp
, tmp
, mask
);
365 tcg_gen_and_i32(var
, var
, mask
);
366 tcg_gen_shli_i32(var
, var
, 8);
367 tcg_gen_or_i32(dest
, var
, tmp
);
368 tcg_temp_free_i32(mask
);
369 tcg_temp_free_i32(tmp
);
372 /* Byteswap low halfword and sign extend. */
373 static void gen_revsh(TCGv_i32 dest
, TCGv_i32 var
)
375 tcg_gen_ext16u_i32(var
, var
);
376 tcg_gen_bswap16_i32(var
, var
);
377 tcg_gen_ext16s_i32(dest
, var
);
380 /* 32x32->64 multiply. Marks inputs as dead. */
381 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
383 TCGv_i32 lo
= tcg_temp_new_i32();
384 TCGv_i32 hi
= tcg_temp_new_i32();
387 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
388 tcg_temp_free_i32(a
);
389 tcg_temp_free_i32(b
);
391 ret
= tcg_temp_new_i64();
392 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
393 tcg_temp_free_i32(lo
);
394 tcg_temp_free_i32(hi
);
399 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
401 TCGv_i32 lo
= tcg_temp_new_i32();
402 TCGv_i32 hi
= tcg_temp_new_i32();
405 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
406 tcg_temp_free_i32(a
);
407 tcg_temp_free_i32(b
);
409 ret
= tcg_temp_new_i64();
410 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
411 tcg_temp_free_i32(lo
);
412 tcg_temp_free_i32(hi
);
417 /* Swap low and high halfwords. */
418 static void gen_swap_half(TCGv_i32 var
)
420 tcg_gen_rotri_i32(var
, var
, 16);
423 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
424 tmp = (t0 ^ t1) & 0x8000;
427 t0 = (t0 + t1) ^ tmp;
430 static void gen_add16(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
432 TCGv_i32 tmp
= tcg_temp_new_i32();
433 tcg_gen_xor_i32(tmp
, t0
, t1
);
434 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
435 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
436 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
437 tcg_gen_add_i32(t0
, t0
, t1
);
438 tcg_gen_xor_i32(dest
, t0
, tmp
);
439 tcg_temp_free_i32(tmp
);
442 /* Set N and Z flags from var. */
443 static inline void gen_logic_CC(TCGv_i32 var
)
445 tcg_gen_mov_i32(cpu_NF
, var
);
446 tcg_gen_mov_i32(cpu_ZF
, var
);
449 /* dest = T0 + T1 + CF. */
450 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
452 tcg_gen_add_i32(dest
, t0
, t1
);
453 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
456 /* dest = T0 - T1 + CF - 1. */
457 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
459 tcg_gen_sub_i32(dest
, t0
, t1
);
460 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
461 tcg_gen_subi_i32(dest
, dest
, 1);
464 /* dest = T0 + T1. Compute C, N, V and Z flags */
465 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
467 TCGv_i32 tmp
= tcg_temp_new_i32();
468 tcg_gen_movi_i32(tmp
, 0);
469 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
470 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
471 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
472 tcg_gen_xor_i32(tmp
, t0
, t1
);
473 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
474 tcg_temp_free_i32(tmp
);
475 tcg_gen_mov_i32(dest
, cpu_NF
);
478 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
479 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
481 TCGv_i32 tmp
= tcg_temp_new_i32();
482 if (TCG_TARGET_HAS_add2_i32
) {
483 tcg_gen_movi_i32(tmp
, 0);
484 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
485 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
487 TCGv_i64 q0
= tcg_temp_new_i64();
488 TCGv_i64 q1
= tcg_temp_new_i64();
489 tcg_gen_extu_i32_i64(q0
, t0
);
490 tcg_gen_extu_i32_i64(q1
, t1
);
491 tcg_gen_add_i64(q0
, q0
, q1
);
492 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
493 tcg_gen_add_i64(q0
, q0
, q1
);
494 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
495 tcg_temp_free_i64(q0
);
496 tcg_temp_free_i64(q1
);
498 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
499 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
500 tcg_gen_xor_i32(tmp
, t0
, t1
);
501 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
502 tcg_temp_free_i32(tmp
);
503 tcg_gen_mov_i32(dest
, cpu_NF
);
506 /* dest = T0 - T1. Compute C, N, V and Z flags */
507 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
510 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
511 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
512 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
513 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
514 tmp
= tcg_temp_new_i32();
515 tcg_gen_xor_i32(tmp
, t0
, t1
);
516 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
517 tcg_temp_free_i32(tmp
);
518 tcg_gen_mov_i32(dest
, cpu_NF
);
521 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
522 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
524 TCGv_i32 tmp
= tcg_temp_new_i32();
525 tcg_gen_not_i32(tmp
, t1
);
526 gen_adc_CC(dest
, t0
, tmp
);
527 tcg_temp_free_i32(tmp
);
530 #define GEN_SHIFT(name) \
531 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
533 TCGv_i32 tmp1, tmp2, tmp3; \
534 tmp1 = tcg_temp_new_i32(); \
535 tcg_gen_andi_i32(tmp1, t1, 0xff); \
536 tmp2 = tcg_const_i32(0); \
537 tmp3 = tcg_const_i32(0x1f); \
538 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
539 tcg_temp_free_i32(tmp3); \
540 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
541 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
542 tcg_temp_free_i32(tmp2); \
543 tcg_temp_free_i32(tmp1); \
549 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
552 tmp1
= tcg_temp_new_i32();
553 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
554 tmp2
= tcg_const_i32(0x1f);
555 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
556 tcg_temp_free_i32(tmp2
);
557 tcg_gen_sar_i32(dest
, t0
, tmp1
);
558 tcg_temp_free_i32(tmp1
);
561 static void shifter_out_im(TCGv_i32 var
, int shift
)
563 tcg_gen_extract_i32(cpu_CF
, var
, shift
, 1);
566 /* Shift by immediate. Includes special handling for shift == 0. */
567 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
568 int shift
, int flags
)
574 shifter_out_im(var
, 32 - shift
);
575 tcg_gen_shli_i32(var
, var
, shift
);
581 tcg_gen_shri_i32(cpu_CF
, var
, 31);
583 tcg_gen_movi_i32(var
, 0);
586 shifter_out_im(var
, shift
- 1);
587 tcg_gen_shri_i32(var
, var
, shift
);
594 shifter_out_im(var
, shift
- 1);
597 tcg_gen_sari_i32(var
, var
, shift
);
599 case 3: /* ROR/RRX */
602 shifter_out_im(var
, shift
- 1);
603 tcg_gen_rotri_i32(var
, var
, shift
); break;
605 TCGv_i32 tmp
= tcg_temp_new_i32();
606 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
608 shifter_out_im(var
, 0);
609 tcg_gen_shri_i32(var
, var
, 1);
610 tcg_gen_or_i32(var
, var
, tmp
);
611 tcg_temp_free_i32(tmp
);
616 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
617 TCGv_i32 shift
, int flags
)
621 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
622 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
623 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
624 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
629 gen_shl(var
, var
, shift
);
632 gen_shr(var
, var
, shift
);
635 gen_sar(var
, var
, shift
);
637 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
638 tcg_gen_rotr_i32(var
, var
, shift
); break;
641 tcg_temp_free_i32(shift
);
645 * Generate a conditional based on ARM condition code cc.
646 * This is common between ARM and Aarch64 targets.
648 void arm_test_cc(DisasCompare
*cmp
, int cc
)
679 case 8: /* hi: C && !Z */
680 case 9: /* ls: !C || Z -> !(C && !Z) */
682 value
= tcg_temp_new_i32();
684 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
685 ZF is non-zero for !Z; so AND the two subexpressions. */
686 tcg_gen_neg_i32(value
, cpu_CF
);
687 tcg_gen_and_i32(value
, value
, cpu_ZF
);
690 case 10: /* ge: N == V -> N ^ V == 0 */
691 case 11: /* lt: N != V -> N ^ V != 0 */
692 /* Since we're only interested in the sign bit, == 0 is >= 0. */
694 value
= tcg_temp_new_i32();
696 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
699 case 12: /* gt: !Z && N == V */
700 case 13: /* le: Z || N != V */
702 value
= tcg_temp_new_i32();
704 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
705 * the sign bit then AND with ZF to yield the result. */
706 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
707 tcg_gen_sari_i32(value
, value
, 31);
708 tcg_gen_andc_i32(value
, cpu_ZF
, value
);
711 case 14: /* always */
712 case 15: /* always */
713 /* Use the ALWAYS condition, which will fold early.
714 * It doesn't matter what we use for the value. */
715 cond
= TCG_COND_ALWAYS
;
720 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
725 cond
= tcg_invert_cond(cond
);
731 cmp
->value_global
= global
;
734 void arm_free_cc(DisasCompare
*cmp
)
736 if (!cmp
->value_global
) {
737 tcg_temp_free_i32(cmp
->value
);
741 void arm_jump_cc(DisasCompare
*cmp
, TCGLabel
*label
)
743 tcg_gen_brcondi_i32(cmp
->cond
, cmp
->value
, 0, label
);
746 void arm_gen_test_cc(int cc
, TCGLabel
*label
)
749 arm_test_cc(&cmp
, cc
);
750 arm_jump_cc(&cmp
, label
);
754 static inline void gen_set_condexec(DisasContext
*s
)
756 if (s
->condexec_mask
) {
757 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
758 TCGv_i32 tmp
= tcg_temp_new_i32();
759 tcg_gen_movi_i32(tmp
, val
);
760 store_cpu_field(tmp
, condexec_bits
);
764 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
766 tcg_gen_movi_i32(cpu_R
[15], val
);
769 /* Set PC and Thumb state from var. var is marked as dead. */
770 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
772 s
->base
.is_jmp
= DISAS_JUMP
;
773 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
774 tcg_gen_andi_i32(var
, var
, 1);
775 store_cpu_field(var
, thumb
);
779 * Set PC and Thumb state from var. var is marked as dead.
780 * For M-profile CPUs, include logic to detect exception-return
781 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
782 * and BX reg, and no others, and happens only for code in Handler mode.
783 * The Security Extension also requires us to check for the FNC_RETURN
784 * which signals a function return from non-secure state; this can happen
785 * in both Handler and Thread mode.
786 * To avoid having to do multiple comparisons in inline generated code,
787 * we make the check we do here loose, so it will match for EXC_RETURN
788 * in Thread mode. For system emulation do_v7m_exception_exit() checks
789 * for these spurious cases and returns without doing anything (giving
790 * the same behaviour as for a branch to a non-magic address).
792 * In linux-user mode it is unclear what the right behaviour for an
793 * attempted FNC_RETURN should be, because in real hardware this will go
794 * directly to Secure code (ie not the Linux kernel) which will then treat
795 * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
796 * attempt behave the way it would on a CPU without the security extension,
797 * which is to say "like a normal branch". That means we can simply treat
798 * all branches as normal with no magic address behaviour.
800 static inline void gen_bx_excret(DisasContext
*s
, TCGv_i32 var
)
802 /* Generate the same code here as for a simple bx, but flag via
803 * s->base.is_jmp that we need to do the rest of the work later.
806 #ifndef CONFIG_USER_ONLY
807 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
) ||
808 (s
->v7m_handler_mode
&& arm_dc_feature(s
, ARM_FEATURE_M
))) {
809 s
->base
.is_jmp
= DISAS_BX_EXCRET
;
814 static inline void gen_bx_excret_final_code(DisasContext
*s
)
816 /* Generate the code to finish possible exception return and end the TB */
817 TCGLabel
*excret_label
= gen_new_label();
820 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
)) {
821 /* Covers FNC_RETURN and EXC_RETURN magic */
822 min_magic
= FNC_RETURN_MIN_MAGIC
;
824 /* EXC_RETURN magic only */
825 min_magic
= EXC_RETURN_MIN_MAGIC
;
828 /* Is the new PC value in the magic range indicating exception return? */
829 tcg_gen_brcondi_i32(TCG_COND_GEU
, cpu_R
[15], min_magic
, excret_label
);
830 /* No: end the TB as we would for a DISAS_JMP */
831 if (is_singlestepping(s
)) {
832 gen_singlestep_exception(s
);
834 tcg_gen_exit_tb(NULL
, 0);
836 gen_set_label(excret_label
);
837 /* Yes: this is an exception return.
838 * At this point in runtime env->regs[15] and env->thumb will hold
839 * the exception-return magic number, which do_v7m_exception_exit()
840 * will read. Nothing else will be able to see those values because
841 * the cpu-exec main loop guarantees that we will always go straight
842 * from raising the exception to the exception-handling code.
844 * gen_ss_advance(s) does nothing on M profile currently but
845 * calling it is conceptually the right thing as we have executed
846 * this instruction (compare SWI, HVC, SMC handling).
849 gen_exception_internal(EXCP_EXCEPTION_EXIT
);
852 static inline void gen_bxns(DisasContext
*s
, int rm
)
854 TCGv_i32 var
= load_reg(s
, rm
);
856 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
857 * we need to sync state before calling it, but:
858 * - we don't need to do gen_set_pc_im() because the bxns helper will
859 * always set the PC itself
860 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
861 * unless it's outside an IT block or the last insn in an IT block,
862 * so we know that condexec == 0 (already set at the top of the TB)
863 * is correct in the non-UNPREDICTABLE cases, and we can choose
864 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
866 gen_helper_v7m_bxns(cpu_env
, var
);
867 tcg_temp_free_i32(var
);
868 s
->base
.is_jmp
= DISAS_EXIT
;
871 static inline void gen_blxns(DisasContext
*s
, int rm
)
873 TCGv_i32 var
= load_reg(s
, rm
);
875 /* We don't need to sync condexec state, for the same reason as bxns.
876 * We do however need to set the PC, because the blxns helper reads it.
877 * The blxns helper may throw an exception.
879 gen_set_pc_im(s
, s
->base
.pc_next
);
880 gen_helper_v7m_blxns(cpu_env
, var
);
881 tcg_temp_free_i32(var
);
882 s
->base
.is_jmp
= DISAS_EXIT
;
885 /* Variant of store_reg which uses branch&exchange logic when storing
886 to r15 in ARM architecture v7 and above. The source must be a temporary
887 and will be marked as dead. */
888 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv_i32 var
)
890 if (reg
== 15 && ENABLE_ARCH_7
) {
893 store_reg(s
, reg
, var
);
897 /* Variant of store_reg which uses branch&exchange logic when storing
898 * to r15 in ARM architecture v5T and above. This is used for storing
899 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
900 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
901 static inline void store_reg_from_load(DisasContext
*s
, int reg
, TCGv_i32 var
)
903 if (reg
== 15 && ENABLE_ARCH_5
) {
904 gen_bx_excret(s
, var
);
906 store_reg(s
, reg
, var
);
910 #ifdef CONFIG_USER_ONLY
911 #define IS_USER_ONLY 1
913 #define IS_USER_ONLY 0
916 /* Abstractions of "generate code to do a guest load/store for
917 * AArch32", where a vaddr is always 32 bits (and is zero
918 * extended if we're a 64 bit core) and data is also
919 * 32 bits unless specifically doing a 64 bit access.
920 * These functions work like tcg_gen_qemu_{ld,st}* except
921 * that the address argument is TCGv_i32 rather than TCGv.
924 static inline TCGv
gen_aa32_addr(DisasContext
*s
, TCGv_i32 a32
, MemOp op
)
926 TCGv addr
= tcg_temp_new();
927 tcg_gen_extu_i32_tl(addr
, a32
);
929 /* Not needed for user-mode BE32, where we use MO_BE instead. */
930 if (!IS_USER_ONLY
&& s
->sctlr_b
&& (op
& MO_SIZE
) < MO_32
) {
931 tcg_gen_xori_tl(addr
, addr
, 4 - (1 << (op
& MO_SIZE
)));
936 static void gen_aa32_ld_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
937 int index
, MemOp opc
)
941 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
942 !arm_dc_feature(s
, ARM_FEATURE_M_MAIN
)) {
946 addr
= gen_aa32_addr(s
, a32
, opc
);
947 tcg_gen_qemu_ld_i32(val
, addr
, index
, opc
);
951 static void gen_aa32_st_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
952 int index
, MemOp opc
)
956 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
957 !arm_dc_feature(s
, ARM_FEATURE_M_MAIN
)) {
961 addr
= gen_aa32_addr(s
, a32
, opc
);
962 tcg_gen_qemu_st_i32(val
, addr
, index
, opc
);
966 #define DO_GEN_LD(SUFF, OPC) \
967 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
968 TCGv_i32 a32, int index) \
970 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
973 #define DO_GEN_ST(SUFF, OPC) \
974 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
975 TCGv_i32 a32, int index) \
977 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
980 static inline void gen_aa32_frob64(DisasContext
*s
, TCGv_i64 val
)
982 /* Not needed for user-mode BE32, where we use MO_BE instead. */
983 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
984 tcg_gen_rotri_i64(val
, val
, 32);
988 static void gen_aa32_ld_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
989 int index
, MemOp opc
)
991 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
992 tcg_gen_qemu_ld_i64(val
, addr
, index
, opc
);
993 gen_aa32_frob64(s
, val
);
997 static inline void gen_aa32_ld64(DisasContext
*s
, TCGv_i64 val
,
998 TCGv_i32 a32
, int index
)
1000 gen_aa32_ld_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1003 static void gen_aa32_st_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
1004 int index
, MemOp opc
)
1006 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1008 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1009 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1010 TCGv_i64 tmp
= tcg_temp_new_i64();
1011 tcg_gen_rotri_i64(tmp
, val
, 32);
1012 tcg_gen_qemu_st_i64(tmp
, addr
, index
, opc
);
1013 tcg_temp_free_i64(tmp
);
1015 tcg_gen_qemu_st_i64(val
, addr
, index
, opc
);
1017 tcg_temp_free(addr
);
1020 static inline void gen_aa32_st64(DisasContext
*s
, TCGv_i64 val
,
1021 TCGv_i32 a32
, int index
)
1023 gen_aa32_st_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1026 DO_GEN_LD(8u, MO_UB
)
1027 DO_GEN_LD(16u, MO_UW
)
1028 DO_GEN_LD(32u, MO_UL
)
1030 DO_GEN_ST(16, MO_UW
)
1031 DO_GEN_ST(32, MO_UL
)
1033 static inline void gen_hvc(DisasContext
*s
, int imm16
)
1035 /* The pre HVC helper handles cases when HVC gets trapped
1036 * as an undefined insn by runtime configuration (ie before
1037 * the insn really executes).
1039 gen_set_pc_im(s
, s
->pc_curr
);
1040 gen_helper_pre_hvc(cpu_env
);
1041 /* Otherwise we will treat this as a real exception which
1042 * happens after execution of the insn. (The distinction matters
1043 * for the PC value reported to the exception handler and also
1044 * for single stepping.)
1047 gen_set_pc_im(s
, s
->base
.pc_next
);
1048 s
->base
.is_jmp
= DISAS_HVC
;
1051 static inline void gen_smc(DisasContext
*s
)
1053 /* As with HVC, we may take an exception either before or after
1054 * the insn executes.
1058 gen_set_pc_im(s
, s
->pc_curr
);
1059 tmp
= tcg_const_i32(syn_aa32_smc());
1060 gen_helper_pre_smc(cpu_env
, tmp
);
1061 tcg_temp_free_i32(tmp
);
1062 gen_set_pc_im(s
, s
->base
.pc_next
);
1063 s
->base
.is_jmp
= DISAS_SMC
;
1066 static void gen_exception_internal_insn(DisasContext
*s
, uint32_t pc
, int excp
)
1068 gen_set_condexec(s
);
1069 gen_set_pc_im(s
, pc
);
1070 gen_exception_internal(excp
);
1071 s
->base
.is_jmp
= DISAS_NORETURN
;
1074 static void gen_exception_insn(DisasContext
*s
, uint32_t pc
, int excp
,
1075 int syn
, uint32_t target_el
)
1077 gen_set_condexec(s
);
1078 gen_set_pc_im(s
, pc
);
1079 gen_exception(excp
, syn
, target_el
);
1080 s
->base
.is_jmp
= DISAS_NORETURN
;
1083 static void gen_exception_bkpt_insn(DisasContext
*s
, uint32_t syn
)
1087 gen_set_condexec(s
);
1088 gen_set_pc_im(s
, s
->pc_curr
);
1089 tcg_syn
= tcg_const_i32(syn
);
1090 gen_helper_exception_bkpt_insn(cpu_env
, tcg_syn
);
1091 tcg_temp_free_i32(tcg_syn
);
1092 s
->base
.is_jmp
= DISAS_NORETURN
;
1095 static void unallocated_encoding(DisasContext
*s
)
1097 /* Unallocated and reserved encodings are uncategorized */
1098 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
, syn_uncategorized(),
1099 default_exception_el(s
));
1102 /* Force a TB lookup after an instruction that changes the CPU state. */
1103 static inline void gen_lookup_tb(DisasContext
*s
)
1105 tcg_gen_movi_i32(cpu_R
[15], s
->base
.pc_next
);
1106 s
->base
.is_jmp
= DISAS_EXIT
;
1109 static inline void gen_hlt(DisasContext
*s
, int imm
)
1111 /* HLT. This has two purposes.
1112 * Architecturally, it is an external halting debug instruction.
1113 * Since QEMU doesn't implement external debug, we treat this as
1114 * it is required for halting debug disabled: it will UNDEF.
1115 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1116 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1117 * must trigger semihosting even for ARMv7 and earlier, where
1118 * HLT was an undefined encoding.
1119 * In system mode, we don't allow userspace access to
1120 * semihosting, to provide some semblance of security
1121 * (and for consistency with our 32-bit semihosting).
1123 if (semihosting_enabled() &&
1124 #ifndef CONFIG_USER_ONLY
1125 s
->current_el
!= 0 &&
1127 (imm
== (s
->thumb
? 0x3c : 0xf000))) {
1128 gen_exception_internal_insn(s
, s
->pc_curr
, EXCP_SEMIHOST
);
1132 unallocated_encoding(s
);
1135 static TCGv_ptr
get_fpstatus_ptr(int neon
)
1137 TCGv_ptr statusptr
= tcg_temp_new_ptr();
1140 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
1142 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
1144 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
1148 static inline long vfp_reg_offset(bool dp
, unsigned reg
)
1151 return offsetof(CPUARMState
, vfp
.zregs
[reg
>> 1].d
[reg
& 1]);
1153 long ofs
= offsetof(CPUARMState
, vfp
.zregs
[reg
>> 2].d
[(reg
>> 1) & 1]);
1155 ofs
+= offsetof(CPU_DoubleU
, l
.upper
);
1157 ofs
+= offsetof(CPU_DoubleU
, l
.lower
);
1163 /* Return the offset of a 32-bit piece of a NEON register.
1164 zero is the least significant end of the register. */
1166 neon_reg_offset (int reg
, int n
)
1170 return vfp_reg_offset(0, sreg
);
1173 /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1174 * where 0 is the least significant end of the register.
1177 neon_element_offset(int reg
, int element
, MemOp size
)
1179 int element_size
= 1 << size
;
1180 int ofs
= element
* element_size
;
1181 #ifdef HOST_WORDS_BIGENDIAN
1182 /* Calculate the offset assuming fully little-endian,
1183 * then XOR to account for the order of the 8-byte units.
1185 if (element_size
< 8) {
1186 ofs
^= 8 - element_size
;
1189 return neon_reg_offset(reg
, 0) + ofs
;
1192 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1194 TCGv_i32 tmp
= tcg_temp_new_i32();
1195 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1199 static void neon_load_element(TCGv_i32 var
, int reg
, int ele
, MemOp mop
)
1201 long offset
= neon_element_offset(reg
, ele
, mop
& MO_SIZE
);
1205 tcg_gen_ld8u_i32(var
, cpu_env
, offset
);
1208 tcg_gen_ld16u_i32(var
, cpu_env
, offset
);
1211 tcg_gen_ld_i32(var
, cpu_env
, offset
);
1214 g_assert_not_reached();
1218 static void neon_load_element64(TCGv_i64 var
, int reg
, int ele
, MemOp mop
)
1220 long offset
= neon_element_offset(reg
, ele
, mop
& MO_SIZE
);
1224 tcg_gen_ld8u_i64(var
, cpu_env
, offset
);
1227 tcg_gen_ld16u_i64(var
, cpu_env
, offset
);
1230 tcg_gen_ld32u_i64(var
, cpu_env
, offset
);
1233 tcg_gen_ld_i64(var
, cpu_env
, offset
);
1236 g_assert_not_reached();
1240 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1242 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1243 tcg_temp_free_i32(var
);
1246 static void neon_store_element(int reg
, int ele
, MemOp size
, TCGv_i32 var
)
1248 long offset
= neon_element_offset(reg
, ele
, size
);
1252 tcg_gen_st8_i32(var
, cpu_env
, offset
);
1255 tcg_gen_st16_i32(var
, cpu_env
, offset
);
1258 tcg_gen_st_i32(var
, cpu_env
, offset
);
1261 g_assert_not_reached();
1265 static void neon_store_element64(int reg
, int ele
, MemOp size
, TCGv_i64 var
)
1267 long offset
= neon_element_offset(reg
, ele
, size
);
1271 tcg_gen_st8_i64(var
, cpu_env
, offset
);
1274 tcg_gen_st16_i64(var
, cpu_env
, offset
);
1277 tcg_gen_st32_i64(var
, cpu_env
, offset
);
1280 tcg_gen_st_i64(var
, cpu_env
, offset
);
1283 g_assert_not_reached();
1287 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1289 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1292 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1294 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1297 static inline void neon_load_reg32(TCGv_i32 var
, int reg
)
1299 tcg_gen_ld_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
1302 static inline void neon_store_reg32(TCGv_i32 var
, int reg
)
1304 tcg_gen_st_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
1307 static TCGv_ptr
vfp_reg_ptr(bool dp
, int reg
)
1309 TCGv_ptr ret
= tcg_temp_new_ptr();
1310 tcg_gen_addi_ptr(ret
, cpu_env
, vfp_reg_offset(dp
, reg
));
1314 #define ARM_CP_RW_BIT (1 << 20)
1316 /* Include the VFP decoder */
1317 #include "translate-vfp.inc.c"
1319 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1321 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1324 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1326 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1329 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1331 TCGv_i32 var
= tcg_temp_new_i32();
1332 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1336 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1338 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1339 tcg_temp_free_i32(var
);
1342 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1344 iwmmxt_store_reg(cpu_M0
, rn
);
1347 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1349 iwmmxt_load_reg(cpu_M0
, rn
);
1352 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1354 iwmmxt_load_reg(cpu_V1
, rn
);
1355 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1358 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1360 iwmmxt_load_reg(cpu_V1
, rn
);
1361 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1364 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1366 iwmmxt_load_reg(cpu_V1
, rn
);
1367 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1370 #define IWMMXT_OP(name) \
1371 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1373 iwmmxt_load_reg(cpu_V1, rn); \
1374 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1377 #define IWMMXT_OP_ENV(name) \
1378 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1380 iwmmxt_load_reg(cpu_V1, rn); \
1381 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1384 #define IWMMXT_OP_ENV_SIZE(name) \
1385 IWMMXT_OP_ENV(name##b) \
1386 IWMMXT_OP_ENV(name##w) \
1387 IWMMXT_OP_ENV(name##l)
1389 #define IWMMXT_OP_ENV1(name) \
1390 static inline void gen_op_iwmmxt_##name##_M0(void) \
1392 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1406 IWMMXT_OP_ENV_SIZE(unpackl
)
1407 IWMMXT_OP_ENV_SIZE(unpackh
)
1409 IWMMXT_OP_ENV1(unpacklub
)
1410 IWMMXT_OP_ENV1(unpackluw
)
1411 IWMMXT_OP_ENV1(unpacklul
)
1412 IWMMXT_OP_ENV1(unpackhub
)
1413 IWMMXT_OP_ENV1(unpackhuw
)
1414 IWMMXT_OP_ENV1(unpackhul
)
1415 IWMMXT_OP_ENV1(unpacklsb
)
1416 IWMMXT_OP_ENV1(unpacklsw
)
1417 IWMMXT_OP_ENV1(unpacklsl
)
1418 IWMMXT_OP_ENV1(unpackhsb
)
1419 IWMMXT_OP_ENV1(unpackhsw
)
1420 IWMMXT_OP_ENV1(unpackhsl
)
1422 IWMMXT_OP_ENV_SIZE(cmpeq
)
1423 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1424 IWMMXT_OP_ENV_SIZE(cmpgts
)
1426 IWMMXT_OP_ENV_SIZE(mins
)
1427 IWMMXT_OP_ENV_SIZE(minu
)
1428 IWMMXT_OP_ENV_SIZE(maxs
)
1429 IWMMXT_OP_ENV_SIZE(maxu
)
1431 IWMMXT_OP_ENV_SIZE(subn
)
1432 IWMMXT_OP_ENV_SIZE(addn
)
1433 IWMMXT_OP_ENV_SIZE(subu
)
1434 IWMMXT_OP_ENV_SIZE(addu
)
1435 IWMMXT_OP_ENV_SIZE(subs
)
1436 IWMMXT_OP_ENV_SIZE(adds
)
1438 IWMMXT_OP_ENV(avgb0
)
1439 IWMMXT_OP_ENV(avgb1
)
1440 IWMMXT_OP_ENV(avgw0
)
1441 IWMMXT_OP_ENV(avgw1
)
1443 IWMMXT_OP_ENV(packuw
)
1444 IWMMXT_OP_ENV(packul
)
1445 IWMMXT_OP_ENV(packuq
)
1446 IWMMXT_OP_ENV(packsw
)
1447 IWMMXT_OP_ENV(packsl
)
1448 IWMMXT_OP_ENV(packsq
)
1450 static void gen_op_iwmmxt_set_mup(void)
1453 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1454 tcg_gen_ori_i32(tmp
, tmp
, 2);
1455 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1458 static void gen_op_iwmmxt_set_cup(void)
1461 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1462 tcg_gen_ori_i32(tmp
, tmp
, 1);
1463 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1466 static void gen_op_iwmmxt_setpsr_nz(void)
1468 TCGv_i32 tmp
= tcg_temp_new_i32();
1469 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1470 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1473 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1475 iwmmxt_load_reg(cpu_V1
, rn
);
1476 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1477 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1480 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1487 rd
= (insn
>> 16) & 0xf;
1488 tmp
= load_reg(s
, rd
);
1490 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1491 if (insn
& (1 << 24)) {
1493 if (insn
& (1 << 23))
1494 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1496 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1497 tcg_gen_mov_i32(dest
, tmp
);
1498 if (insn
& (1 << 21))
1499 store_reg(s
, rd
, tmp
);
1501 tcg_temp_free_i32(tmp
);
1502 } else if (insn
& (1 << 21)) {
1504 tcg_gen_mov_i32(dest
, tmp
);
1505 if (insn
& (1 << 23))
1506 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1508 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1509 store_reg(s
, rd
, tmp
);
1510 } else if (!(insn
& (1 << 23)))
1515 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1517 int rd
= (insn
>> 0) & 0xf;
1520 if (insn
& (1 << 8)) {
1521 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1524 tmp
= iwmmxt_load_creg(rd
);
1527 tmp
= tcg_temp_new_i32();
1528 iwmmxt_load_reg(cpu_V0
, rd
);
1529 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
1531 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1532 tcg_gen_mov_i32(dest
, tmp
);
1533 tcg_temp_free_i32(tmp
);
1537 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1538 (ie. an undefined instruction). */
1539 static int disas_iwmmxt_insn(DisasContext
*s
, uint32_t insn
)
1542 int rdhi
, rdlo
, rd0
, rd1
, i
;
1544 TCGv_i32 tmp
, tmp2
, tmp3
;
1546 if ((insn
& 0x0e000e00) == 0x0c000000) {
1547 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1549 rdlo
= (insn
>> 12) & 0xf;
1550 rdhi
= (insn
>> 16) & 0xf;
1551 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1552 iwmmxt_load_reg(cpu_V0
, wrd
);
1553 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1554 tcg_gen_extrh_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1555 } else { /* TMCRR */
1556 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1557 iwmmxt_store_reg(cpu_V0
, wrd
);
1558 gen_op_iwmmxt_set_mup();
1563 wrd
= (insn
>> 12) & 0xf;
1564 addr
= tcg_temp_new_i32();
1565 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1566 tcg_temp_free_i32(addr
);
1569 if (insn
& ARM_CP_RW_BIT
) {
1570 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1571 tmp
= tcg_temp_new_i32();
1572 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1573 iwmmxt_store_creg(wrd
, tmp
);
1576 if (insn
& (1 << 8)) {
1577 if (insn
& (1 << 22)) { /* WLDRD */
1578 gen_aa32_ld64(s
, cpu_M0
, addr
, get_mem_index(s
));
1580 } else { /* WLDRW wRd */
1581 tmp
= tcg_temp_new_i32();
1582 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1585 tmp
= tcg_temp_new_i32();
1586 if (insn
& (1 << 22)) { /* WLDRH */
1587 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
1588 } else { /* WLDRB */
1589 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
1593 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1594 tcg_temp_free_i32(tmp
);
1596 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1599 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1600 tmp
= iwmmxt_load_creg(wrd
);
1601 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1603 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1604 tmp
= tcg_temp_new_i32();
1605 if (insn
& (1 << 8)) {
1606 if (insn
& (1 << 22)) { /* WSTRD */
1607 gen_aa32_st64(s
, cpu_M0
, addr
, get_mem_index(s
));
1608 } else { /* WSTRW wRd */
1609 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1610 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1613 if (insn
& (1 << 22)) { /* WSTRH */
1614 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1615 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
1616 } else { /* WSTRB */
1617 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1618 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
1622 tcg_temp_free_i32(tmp
);
1624 tcg_temp_free_i32(addr
);
1628 if ((insn
& 0x0f000000) != 0x0e000000)
1631 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1632 case 0x000: /* WOR */
1633 wrd
= (insn
>> 12) & 0xf;
1634 rd0
= (insn
>> 0) & 0xf;
1635 rd1
= (insn
>> 16) & 0xf;
1636 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1637 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1638 gen_op_iwmmxt_setpsr_nz();
1639 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1640 gen_op_iwmmxt_set_mup();
1641 gen_op_iwmmxt_set_cup();
1643 case 0x011: /* TMCR */
1646 rd
= (insn
>> 12) & 0xf;
1647 wrd
= (insn
>> 16) & 0xf;
1649 case ARM_IWMMXT_wCID
:
1650 case ARM_IWMMXT_wCASF
:
1652 case ARM_IWMMXT_wCon
:
1653 gen_op_iwmmxt_set_cup();
1655 case ARM_IWMMXT_wCSSF
:
1656 tmp
= iwmmxt_load_creg(wrd
);
1657 tmp2
= load_reg(s
, rd
);
1658 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1659 tcg_temp_free_i32(tmp2
);
1660 iwmmxt_store_creg(wrd
, tmp
);
1662 case ARM_IWMMXT_wCGR0
:
1663 case ARM_IWMMXT_wCGR1
:
1664 case ARM_IWMMXT_wCGR2
:
1665 case ARM_IWMMXT_wCGR3
:
1666 gen_op_iwmmxt_set_cup();
1667 tmp
= load_reg(s
, rd
);
1668 iwmmxt_store_creg(wrd
, tmp
);
1674 case 0x100: /* WXOR */
1675 wrd
= (insn
>> 12) & 0xf;
1676 rd0
= (insn
>> 0) & 0xf;
1677 rd1
= (insn
>> 16) & 0xf;
1678 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1679 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1680 gen_op_iwmmxt_setpsr_nz();
1681 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1682 gen_op_iwmmxt_set_mup();
1683 gen_op_iwmmxt_set_cup();
1685 case 0x111: /* TMRC */
1688 rd
= (insn
>> 12) & 0xf;
1689 wrd
= (insn
>> 16) & 0xf;
1690 tmp
= iwmmxt_load_creg(wrd
);
1691 store_reg(s
, rd
, tmp
);
1693 case 0x300: /* WANDN */
1694 wrd
= (insn
>> 12) & 0xf;
1695 rd0
= (insn
>> 0) & 0xf;
1696 rd1
= (insn
>> 16) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1698 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1699 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1700 gen_op_iwmmxt_setpsr_nz();
1701 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1702 gen_op_iwmmxt_set_mup();
1703 gen_op_iwmmxt_set_cup();
1705 case 0x200: /* WAND */
1706 wrd
= (insn
>> 12) & 0xf;
1707 rd0
= (insn
>> 0) & 0xf;
1708 rd1
= (insn
>> 16) & 0xf;
1709 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1710 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1711 gen_op_iwmmxt_setpsr_nz();
1712 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1713 gen_op_iwmmxt_set_mup();
1714 gen_op_iwmmxt_set_cup();
1716 case 0x810: case 0xa10: /* WMADD */
1717 wrd
= (insn
>> 12) & 0xf;
1718 rd0
= (insn
>> 0) & 0xf;
1719 rd1
= (insn
>> 16) & 0xf;
1720 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1721 if (insn
& (1 << 21))
1722 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1724 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1725 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1726 gen_op_iwmmxt_set_mup();
1728 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1729 wrd
= (insn
>> 12) & 0xf;
1730 rd0
= (insn
>> 16) & 0xf;
1731 rd1
= (insn
>> 0) & 0xf;
1732 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1733 switch ((insn
>> 22) & 3) {
1735 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1738 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1741 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1746 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1747 gen_op_iwmmxt_set_mup();
1748 gen_op_iwmmxt_set_cup();
1750 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1751 wrd
= (insn
>> 12) & 0xf;
1752 rd0
= (insn
>> 16) & 0xf;
1753 rd1
= (insn
>> 0) & 0xf;
1754 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1755 switch ((insn
>> 22) & 3) {
1757 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1760 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1763 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1768 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1769 gen_op_iwmmxt_set_mup();
1770 gen_op_iwmmxt_set_cup();
1772 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1773 wrd
= (insn
>> 12) & 0xf;
1774 rd0
= (insn
>> 16) & 0xf;
1775 rd1
= (insn
>> 0) & 0xf;
1776 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1777 if (insn
& (1 << 22))
1778 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1780 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1781 if (!(insn
& (1 << 20)))
1782 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1783 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1784 gen_op_iwmmxt_set_mup();
1786 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1787 wrd
= (insn
>> 12) & 0xf;
1788 rd0
= (insn
>> 16) & 0xf;
1789 rd1
= (insn
>> 0) & 0xf;
1790 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1791 if (insn
& (1 << 21)) {
1792 if (insn
& (1 << 20))
1793 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1795 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1797 if (insn
& (1 << 20))
1798 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1800 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1802 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1803 gen_op_iwmmxt_set_mup();
1805 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1806 wrd
= (insn
>> 12) & 0xf;
1807 rd0
= (insn
>> 16) & 0xf;
1808 rd1
= (insn
>> 0) & 0xf;
1809 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1810 if (insn
& (1 << 21))
1811 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1813 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1814 if (!(insn
& (1 << 20))) {
1815 iwmmxt_load_reg(cpu_V1
, wrd
);
1816 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1818 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1819 gen_op_iwmmxt_set_mup();
1821 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1822 wrd
= (insn
>> 12) & 0xf;
1823 rd0
= (insn
>> 16) & 0xf;
1824 rd1
= (insn
>> 0) & 0xf;
1825 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1826 switch ((insn
>> 22) & 3) {
1828 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1831 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1834 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1839 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1840 gen_op_iwmmxt_set_mup();
1841 gen_op_iwmmxt_set_cup();
1843 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1844 wrd
= (insn
>> 12) & 0xf;
1845 rd0
= (insn
>> 16) & 0xf;
1846 rd1
= (insn
>> 0) & 0xf;
1847 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1848 if (insn
& (1 << 22)) {
1849 if (insn
& (1 << 20))
1850 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1852 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1854 if (insn
& (1 << 20))
1855 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1857 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1859 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1860 gen_op_iwmmxt_set_mup();
1861 gen_op_iwmmxt_set_cup();
1863 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1864 wrd
= (insn
>> 12) & 0xf;
1865 rd0
= (insn
>> 16) & 0xf;
1866 rd1
= (insn
>> 0) & 0xf;
1867 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1868 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1869 tcg_gen_andi_i32(tmp
, tmp
, 7);
1870 iwmmxt_load_reg(cpu_V1
, rd1
);
1871 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1872 tcg_temp_free_i32(tmp
);
1873 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1874 gen_op_iwmmxt_set_mup();
1876 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1877 if (((insn
>> 6) & 3) == 3)
1879 rd
= (insn
>> 12) & 0xf;
1880 wrd
= (insn
>> 16) & 0xf;
1881 tmp
= load_reg(s
, rd
);
1882 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1883 switch ((insn
>> 6) & 3) {
1885 tmp2
= tcg_const_i32(0xff);
1886 tmp3
= tcg_const_i32((insn
& 7) << 3);
1889 tmp2
= tcg_const_i32(0xffff);
1890 tmp3
= tcg_const_i32((insn
& 3) << 4);
1893 tmp2
= tcg_const_i32(0xffffffff);
1894 tmp3
= tcg_const_i32((insn
& 1) << 5);
1900 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
1901 tcg_temp_free_i32(tmp3
);
1902 tcg_temp_free_i32(tmp2
);
1903 tcg_temp_free_i32(tmp
);
1904 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1905 gen_op_iwmmxt_set_mup();
1907 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1908 rd
= (insn
>> 12) & 0xf;
1909 wrd
= (insn
>> 16) & 0xf;
1910 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
1912 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1913 tmp
= tcg_temp_new_i32();
1914 switch ((insn
>> 22) & 3) {
1916 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
1917 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1919 tcg_gen_ext8s_i32(tmp
, tmp
);
1921 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
1925 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
1926 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1928 tcg_gen_ext16s_i32(tmp
, tmp
);
1930 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1934 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
1935 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1938 store_reg(s
, rd
, tmp
);
1940 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1941 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1943 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1944 switch ((insn
>> 22) & 3) {
1946 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
1949 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
1952 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
1955 tcg_gen_shli_i32(tmp
, tmp
, 28);
1957 tcg_temp_free_i32(tmp
);
1959 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1960 if (((insn
>> 6) & 3) == 3)
1962 rd
= (insn
>> 12) & 0xf;
1963 wrd
= (insn
>> 16) & 0xf;
1964 tmp
= load_reg(s
, rd
);
1965 switch ((insn
>> 6) & 3) {
1967 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
1970 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
1973 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
1976 tcg_temp_free_i32(tmp
);
1977 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1978 gen_op_iwmmxt_set_mup();
1980 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1981 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
1983 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
1984 tmp2
= tcg_temp_new_i32();
1985 tcg_gen_mov_i32(tmp2
, tmp
);
1986 switch ((insn
>> 22) & 3) {
1988 for (i
= 0; i
< 7; i
++) {
1989 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
1990 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1994 for (i
= 0; i
< 3; i
++) {
1995 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
1996 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2000 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2001 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2005 tcg_temp_free_i32(tmp2
);
2006 tcg_temp_free_i32(tmp
);
2008 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2009 wrd
= (insn
>> 12) & 0xf;
2010 rd0
= (insn
>> 16) & 0xf;
2011 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2012 switch ((insn
>> 22) & 3) {
2014 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
2017 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
2020 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
2025 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2026 gen_op_iwmmxt_set_mup();
2028 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2029 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2031 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2032 tmp2
= tcg_temp_new_i32();
2033 tcg_gen_mov_i32(tmp2
, tmp
);
2034 switch ((insn
>> 22) & 3) {
2036 for (i
= 0; i
< 7; i
++) {
2037 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2038 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2042 for (i
= 0; i
< 3; i
++) {
2043 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2044 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2048 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2049 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2053 tcg_temp_free_i32(tmp2
);
2054 tcg_temp_free_i32(tmp
);
2056 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2057 rd
= (insn
>> 12) & 0xf;
2058 rd0
= (insn
>> 16) & 0xf;
2059 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
2061 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2062 tmp
= tcg_temp_new_i32();
2063 switch ((insn
>> 22) & 3) {
2065 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
2068 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
2071 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
2074 store_reg(s
, rd
, tmp
);
2076 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2077 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2078 wrd
= (insn
>> 12) & 0xf;
2079 rd0
= (insn
>> 16) & 0xf;
2080 rd1
= (insn
>> 0) & 0xf;
2081 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2082 switch ((insn
>> 22) & 3) {
2084 if (insn
& (1 << 21))
2085 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2087 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2090 if (insn
& (1 << 21))
2091 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2093 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2096 if (insn
& (1 << 21))
2097 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2099 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2104 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2105 gen_op_iwmmxt_set_mup();
2106 gen_op_iwmmxt_set_cup();
2108 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2109 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2110 wrd
= (insn
>> 12) & 0xf;
2111 rd0
= (insn
>> 16) & 0xf;
2112 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2113 switch ((insn
>> 22) & 3) {
2115 if (insn
& (1 << 21))
2116 gen_op_iwmmxt_unpacklsb_M0();
2118 gen_op_iwmmxt_unpacklub_M0();
2121 if (insn
& (1 << 21))
2122 gen_op_iwmmxt_unpacklsw_M0();
2124 gen_op_iwmmxt_unpackluw_M0();
2127 if (insn
& (1 << 21))
2128 gen_op_iwmmxt_unpacklsl_M0();
2130 gen_op_iwmmxt_unpacklul_M0();
2135 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2136 gen_op_iwmmxt_set_mup();
2137 gen_op_iwmmxt_set_cup();
2139 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2140 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2141 wrd
= (insn
>> 12) & 0xf;
2142 rd0
= (insn
>> 16) & 0xf;
2143 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2144 switch ((insn
>> 22) & 3) {
2146 if (insn
& (1 << 21))
2147 gen_op_iwmmxt_unpackhsb_M0();
2149 gen_op_iwmmxt_unpackhub_M0();
2152 if (insn
& (1 << 21))
2153 gen_op_iwmmxt_unpackhsw_M0();
2155 gen_op_iwmmxt_unpackhuw_M0();
2158 if (insn
& (1 << 21))
2159 gen_op_iwmmxt_unpackhsl_M0();
2161 gen_op_iwmmxt_unpackhul_M0();
2166 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2167 gen_op_iwmmxt_set_mup();
2168 gen_op_iwmmxt_set_cup();
2170 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2171 case 0x214: case 0x614: case 0xa14: case 0xe14:
2172 if (((insn
>> 22) & 3) == 0)
2174 wrd
= (insn
>> 12) & 0xf;
2175 rd0
= (insn
>> 16) & 0xf;
2176 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2177 tmp
= tcg_temp_new_i32();
2178 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2179 tcg_temp_free_i32(tmp
);
2182 switch ((insn
>> 22) & 3) {
2184 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2187 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2190 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2193 tcg_temp_free_i32(tmp
);
2194 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2195 gen_op_iwmmxt_set_mup();
2196 gen_op_iwmmxt_set_cup();
2198 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2199 case 0x014: case 0x414: case 0x814: case 0xc14:
2200 if (((insn
>> 22) & 3) == 0)
2202 wrd
= (insn
>> 12) & 0xf;
2203 rd0
= (insn
>> 16) & 0xf;
2204 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2205 tmp
= tcg_temp_new_i32();
2206 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2207 tcg_temp_free_i32(tmp
);
2210 switch ((insn
>> 22) & 3) {
2212 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2215 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2218 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2221 tcg_temp_free_i32(tmp
);
2222 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2223 gen_op_iwmmxt_set_mup();
2224 gen_op_iwmmxt_set_cup();
2226 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2227 case 0x114: case 0x514: case 0x914: case 0xd14:
2228 if (((insn
>> 22) & 3) == 0)
2230 wrd
= (insn
>> 12) & 0xf;
2231 rd0
= (insn
>> 16) & 0xf;
2232 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2233 tmp
= tcg_temp_new_i32();
2234 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2235 tcg_temp_free_i32(tmp
);
2238 switch ((insn
>> 22) & 3) {
2240 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2243 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2246 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2249 tcg_temp_free_i32(tmp
);
2250 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2251 gen_op_iwmmxt_set_mup();
2252 gen_op_iwmmxt_set_cup();
2254 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2255 case 0x314: case 0x714: case 0xb14: case 0xf14:
2256 if (((insn
>> 22) & 3) == 0)
2258 wrd
= (insn
>> 12) & 0xf;
2259 rd0
= (insn
>> 16) & 0xf;
2260 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2261 tmp
= tcg_temp_new_i32();
2262 switch ((insn
>> 22) & 3) {
2264 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2265 tcg_temp_free_i32(tmp
);
2268 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2271 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2272 tcg_temp_free_i32(tmp
);
2275 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2278 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2279 tcg_temp_free_i32(tmp
);
2282 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2285 tcg_temp_free_i32(tmp
);
2286 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2287 gen_op_iwmmxt_set_mup();
2288 gen_op_iwmmxt_set_cup();
2290 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2291 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2292 wrd
= (insn
>> 12) & 0xf;
2293 rd0
= (insn
>> 16) & 0xf;
2294 rd1
= (insn
>> 0) & 0xf;
2295 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2296 switch ((insn
>> 22) & 3) {
2298 if (insn
& (1 << 21))
2299 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2301 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2304 if (insn
& (1 << 21))
2305 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2307 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2310 if (insn
& (1 << 21))
2311 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2313 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2318 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2319 gen_op_iwmmxt_set_mup();
2321 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2322 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2323 wrd
= (insn
>> 12) & 0xf;
2324 rd0
= (insn
>> 16) & 0xf;
2325 rd1
= (insn
>> 0) & 0xf;
2326 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2327 switch ((insn
>> 22) & 3) {
2329 if (insn
& (1 << 21))
2330 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2332 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2335 if (insn
& (1 << 21))
2336 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2338 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2341 if (insn
& (1 << 21))
2342 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2344 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2349 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2350 gen_op_iwmmxt_set_mup();
2352 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2353 case 0x402: case 0x502: case 0x602: case 0x702:
2354 wrd
= (insn
>> 12) & 0xf;
2355 rd0
= (insn
>> 16) & 0xf;
2356 rd1
= (insn
>> 0) & 0xf;
2357 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2358 tmp
= tcg_const_i32((insn
>> 20) & 3);
2359 iwmmxt_load_reg(cpu_V1
, rd1
);
2360 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2361 tcg_temp_free_i32(tmp
);
2362 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2363 gen_op_iwmmxt_set_mup();
2365 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2366 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2367 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2368 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2369 wrd
= (insn
>> 12) & 0xf;
2370 rd0
= (insn
>> 16) & 0xf;
2371 rd1
= (insn
>> 0) & 0xf;
2372 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2373 switch ((insn
>> 20) & 0xf) {
2375 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2378 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2381 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2384 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2387 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2390 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2393 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2396 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2399 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2404 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2405 gen_op_iwmmxt_set_mup();
2406 gen_op_iwmmxt_set_cup();
2408 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2409 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2410 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2411 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2412 wrd
= (insn
>> 12) & 0xf;
2413 rd0
= (insn
>> 16) & 0xf;
2414 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2415 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2416 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2417 tcg_temp_free_i32(tmp
);
2418 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2419 gen_op_iwmmxt_set_mup();
2420 gen_op_iwmmxt_set_cup();
2422 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2423 case 0x418: case 0x518: case 0x618: case 0x718:
2424 case 0x818: case 0x918: case 0xa18: case 0xb18:
2425 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2426 wrd
= (insn
>> 12) & 0xf;
2427 rd0
= (insn
>> 16) & 0xf;
2428 rd1
= (insn
>> 0) & 0xf;
2429 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2430 switch ((insn
>> 20) & 0xf) {
2432 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2435 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2438 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2441 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2444 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2447 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2450 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2453 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2456 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2461 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2462 gen_op_iwmmxt_set_mup();
2463 gen_op_iwmmxt_set_cup();
2465 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2466 case 0x408: case 0x508: case 0x608: case 0x708:
2467 case 0x808: case 0x908: case 0xa08: case 0xb08:
2468 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2469 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2471 wrd
= (insn
>> 12) & 0xf;
2472 rd0
= (insn
>> 16) & 0xf;
2473 rd1
= (insn
>> 0) & 0xf;
2474 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2475 switch ((insn
>> 22) & 3) {
2477 if (insn
& (1 << 21))
2478 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2480 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2483 if (insn
& (1 << 21))
2484 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2486 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2489 if (insn
& (1 << 21))
2490 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2492 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2495 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2496 gen_op_iwmmxt_set_mup();
2497 gen_op_iwmmxt_set_cup();
2499 case 0x201: case 0x203: case 0x205: case 0x207:
2500 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2501 case 0x211: case 0x213: case 0x215: case 0x217:
2502 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2503 wrd
= (insn
>> 5) & 0xf;
2504 rd0
= (insn
>> 12) & 0xf;
2505 rd1
= (insn
>> 0) & 0xf;
2506 if (rd0
== 0xf || rd1
== 0xf)
2508 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2509 tmp
= load_reg(s
, rd0
);
2510 tmp2
= load_reg(s
, rd1
);
2511 switch ((insn
>> 16) & 0xf) {
2512 case 0x0: /* TMIA */
2513 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2515 case 0x8: /* TMIAPH */
2516 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2518 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2519 if (insn
& (1 << 16))
2520 tcg_gen_shri_i32(tmp
, tmp
, 16);
2521 if (insn
& (1 << 17))
2522 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2523 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2526 tcg_temp_free_i32(tmp2
);
2527 tcg_temp_free_i32(tmp
);
2530 tcg_temp_free_i32(tmp2
);
2531 tcg_temp_free_i32(tmp
);
2532 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2533 gen_op_iwmmxt_set_mup();
2542 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2543 (ie. an undefined instruction). */
2544 static int disas_dsp_insn(DisasContext
*s
, uint32_t insn
)
2546 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2549 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2550 /* Multiply with Internal Accumulate Format */
2551 rd0
= (insn
>> 12) & 0xf;
2553 acc
= (insn
>> 5) & 7;
2558 tmp
= load_reg(s
, rd0
);
2559 tmp2
= load_reg(s
, rd1
);
2560 switch ((insn
>> 16) & 0xf) {
2562 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2564 case 0x8: /* MIAPH */
2565 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2567 case 0xc: /* MIABB */
2568 case 0xd: /* MIABT */
2569 case 0xe: /* MIATB */
2570 case 0xf: /* MIATT */
2571 if (insn
& (1 << 16))
2572 tcg_gen_shri_i32(tmp
, tmp
, 16);
2573 if (insn
& (1 << 17))
2574 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2575 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2580 tcg_temp_free_i32(tmp2
);
2581 tcg_temp_free_i32(tmp
);
2583 gen_op_iwmmxt_movq_wRn_M0(acc
);
2587 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2588 /* Internal Accumulator Access Format */
2589 rdhi
= (insn
>> 16) & 0xf;
2590 rdlo
= (insn
>> 12) & 0xf;
2596 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2597 iwmmxt_load_reg(cpu_V0
, acc
);
2598 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2599 tcg_gen_extrh_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2600 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2602 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2603 iwmmxt_store_reg(cpu_V0
, acc
);
2611 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2612 #define VFP_SREG(insn, bigbit, smallbit) \
2613 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2614 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2615 if (dc_isar_feature(aa32_simd_r32, s)) { \
2616 reg = (((insn) >> (bigbit)) & 0x0f) \
2617 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2619 if (insn & (1 << (smallbit))) \
2621 reg = ((insn) >> (bigbit)) & 0x0f; \
2624 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2625 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2626 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2627 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2628 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2629 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2631 static void gen_neon_dup_low16(TCGv_i32 var
)
2633 TCGv_i32 tmp
= tcg_temp_new_i32();
2634 tcg_gen_ext16u_i32(var
, var
);
2635 tcg_gen_shli_i32(tmp
, var
, 16);
2636 tcg_gen_or_i32(var
, var
, tmp
);
2637 tcg_temp_free_i32(tmp
);
2640 static void gen_neon_dup_high16(TCGv_i32 var
)
2642 TCGv_i32 tmp
= tcg_temp_new_i32();
2643 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2644 tcg_gen_shri_i32(tmp
, var
, 16);
2645 tcg_gen_or_i32(var
, var
, tmp
);
2646 tcg_temp_free_i32(tmp
);
2650 * Disassemble a VFP instruction. Returns nonzero if an error occurred
2651 * (ie. an undefined instruction).
2653 static int disas_vfp_insn(DisasContext
*s
, uint32_t insn
)
2655 if (!arm_dc_feature(s
, ARM_FEATURE_VFP
)) {
2660 * If the decodetree decoder handles this insn it will always
2661 * emit code to either execute the insn or generate an appropriate
2662 * exception; so we don't need to ever return non-zero to tell
2663 * the calling code to emit an UNDEF exception.
2665 if (extract32(insn
, 28, 4) == 0xf) {
2666 if (disas_vfp_uncond(s
, insn
)) {
2670 if (disas_vfp(s
, insn
)) {
2674 /* If the decodetree decoder didn't handle this insn, it must be UNDEF */
2678 static inline bool use_goto_tb(DisasContext
*s
, target_ulong dest
)
2680 #ifndef CONFIG_USER_ONLY
2681 return (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
2682 ((s
->base
.pc_next
- 1) & TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
2688 static void gen_goto_ptr(void)
2690 tcg_gen_lookup_and_goto_ptr();
2693 /* This will end the TB but doesn't guarantee we'll return to
2694 * cpu_loop_exec. Any live exit_requests will be processed as we
2695 * enter the next TB.
2697 static void gen_goto_tb(DisasContext
*s
, int n
, target_ulong dest
)
2699 if (use_goto_tb(s
, dest
)) {
2701 gen_set_pc_im(s
, dest
);
2702 tcg_gen_exit_tb(s
->base
.tb
, n
);
2704 gen_set_pc_im(s
, dest
);
2707 s
->base
.is_jmp
= DISAS_NORETURN
;
2710 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
2712 if (unlikely(is_singlestepping(s
))) {
2713 /* An indirect jump so that we still trigger the debug exception. */
2714 gen_set_pc_im(s
, dest
);
2715 s
->base
.is_jmp
= DISAS_JUMP
;
2717 gen_goto_tb(s
, 0, dest
);
2721 static inline void gen_mulxy(TCGv_i32 t0
, TCGv_i32 t1
, int x
, int y
)
2724 tcg_gen_sari_i32(t0
, t0
, 16);
2728 tcg_gen_sari_i32(t1
, t1
, 16);
2731 tcg_gen_mul_i32(t0
, t0
, t1
);
2734 /* Return the mask of PSR bits set by a MSR instruction. */
2735 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
)
2739 if (flags
& (1 << 0)) {
2742 if (flags
& (1 << 1)) {
2745 if (flags
& (1 << 2)) {
2748 if (flags
& (1 << 3)) {
2752 /* Mask out undefined and reserved bits. */
2753 mask
&= aarch32_cpsr_valid_mask(s
->features
, s
->isar
);
2755 /* Mask out execution state. */
2760 /* Mask out privileged bits. */
2767 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
2768 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv_i32 t0
)
2772 /* ??? This is also undefined in system mode. */
2776 tmp
= load_cpu_field(spsr
);
2777 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
2778 tcg_gen_andi_i32(t0
, t0
, mask
);
2779 tcg_gen_or_i32(tmp
, tmp
, t0
);
2780 store_cpu_field(tmp
, spsr
);
2782 gen_set_cpsr(t0
, mask
);
2784 tcg_temp_free_i32(t0
);
2789 /* Returns nonzero if access to the PSR is not permitted. */
2790 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
2793 tmp
= tcg_temp_new_i32();
2794 tcg_gen_movi_i32(tmp
, val
);
2795 return gen_set_psr(s
, mask
, spsr
, tmp
);
2798 static bool msr_banked_access_decode(DisasContext
*s
, int r
, int sysm
, int rn
,
2799 int *tgtmode
, int *regno
)
2801 /* Decode the r and sysm fields of MSR/MRS banked accesses into
2802 * the target mode and register number, and identify the various
2803 * unpredictable cases.
2804 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
2805 * + executed in user mode
2806 * + using R15 as the src/dest register
2807 * + accessing an unimplemented register
2808 * + accessing a register that's inaccessible at current PL/security state*
2809 * + accessing a register that you could access with a different insn
2810 * We choose to UNDEF in all these cases.
2811 * Since we don't know which of the various AArch32 modes we are in
2812 * we have to defer some checks to runtime.
2813 * Accesses to Monitor mode registers from Secure EL1 (which implies
2814 * that EL3 is AArch64) must trap to EL3.
2816 * If the access checks fail this function will emit code to take
2817 * an exception and return false. Otherwise it will return true,
2818 * and set *tgtmode and *regno appropriately.
2820 int exc_target
= default_exception_el(s
);
2822 /* These instructions are present only in ARMv8, or in ARMv7 with the
2823 * Virtualization Extensions.
2825 if (!arm_dc_feature(s
, ARM_FEATURE_V8
) &&
2826 !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
2830 if (IS_USER(s
) || rn
== 15) {
2834 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
2835 * of registers into (r, sysm).
2838 /* SPSRs for other modes */
2840 case 0xe: /* SPSR_fiq */
2841 *tgtmode
= ARM_CPU_MODE_FIQ
;
2843 case 0x10: /* SPSR_irq */
2844 *tgtmode
= ARM_CPU_MODE_IRQ
;
2846 case 0x12: /* SPSR_svc */
2847 *tgtmode
= ARM_CPU_MODE_SVC
;
2849 case 0x14: /* SPSR_abt */
2850 *tgtmode
= ARM_CPU_MODE_ABT
;
2852 case 0x16: /* SPSR_und */
2853 *tgtmode
= ARM_CPU_MODE_UND
;
2855 case 0x1c: /* SPSR_mon */
2856 *tgtmode
= ARM_CPU_MODE_MON
;
2858 case 0x1e: /* SPSR_hyp */
2859 *tgtmode
= ARM_CPU_MODE_HYP
;
2861 default: /* unallocated */
2864 /* We arbitrarily assign SPSR a register number of 16. */
2867 /* general purpose registers for other modes */
2869 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
2870 *tgtmode
= ARM_CPU_MODE_USR
;
2873 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
2874 *tgtmode
= ARM_CPU_MODE_FIQ
;
2877 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
2878 *tgtmode
= ARM_CPU_MODE_IRQ
;
2879 *regno
= sysm
& 1 ? 13 : 14;
2881 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
2882 *tgtmode
= ARM_CPU_MODE_SVC
;
2883 *regno
= sysm
& 1 ? 13 : 14;
2885 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
2886 *tgtmode
= ARM_CPU_MODE_ABT
;
2887 *regno
= sysm
& 1 ? 13 : 14;
2889 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
2890 *tgtmode
= ARM_CPU_MODE_UND
;
2891 *regno
= sysm
& 1 ? 13 : 14;
2893 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
2894 *tgtmode
= ARM_CPU_MODE_MON
;
2895 *regno
= sysm
& 1 ? 13 : 14;
2897 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
2898 *tgtmode
= ARM_CPU_MODE_HYP
;
2899 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
2900 *regno
= sysm
& 1 ? 13 : 17;
2902 default: /* unallocated */
2907 /* Catch the 'accessing inaccessible register' cases we can detect
2908 * at translate time.
2911 case ARM_CPU_MODE_MON
:
2912 if (!arm_dc_feature(s
, ARM_FEATURE_EL3
) || s
->ns
) {
2915 if (s
->current_el
== 1) {
2916 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
2917 * then accesses to Mon registers trap to EL3
2923 case ARM_CPU_MODE_HYP
:
2925 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
2926 * (and so we can forbid accesses from EL2 or below). elr_hyp
2927 * can be accessed also from Hyp mode, so forbid accesses from
2930 if (!arm_dc_feature(s
, ARM_FEATURE_EL2
) || s
->current_el
< 2 ||
2931 (s
->current_el
< 3 && *regno
!= 17)) {
2942 /* If we get here then some access check did not pass */
2943 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
2944 syn_uncategorized(), exc_target
);
2948 static void gen_msr_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
2950 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
2951 int tgtmode
= 0, regno
= 0;
2953 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
2957 /* Sync state because msr_banked() can raise exceptions */
2958 gen_set_condexec(s
);
2959 gen_set_pc_im(s
, s
->pc_curr
);
2960 tcg_reg
= load_reg(s
, rn
);
2961 tcg_tgtmode
= tcg_const_i32(tgtmode
);
2962 tcg_regno
= tcg_const_i32(regno
);
2963 gen_helper_msr_banked(cpu_env
, tcg_reg
, tcg_tgtmode
, tcg_regno
);
2964 tcg_temp_free_i32(tcg_tgtmode
);
2965 tcg_temp_free_i32(tcg_regno
);
2966 tcg_temp_free_i32(tcg_reg
);
2967 s
->base
.is_jmp
= DISAS_UPDATE
;
2970 static void gen_mrs_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
2972 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
2973 int tgtmode
= 0, regno
= 0;
2975 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
2979 /* Sync state because mrs_banked() can raise exceptions */
2980 gen_set_condexec(s
);
2981 gen_set_pc_im(s
, s
->pc_curr
);
2982 tcg_reg
= tcg_temp_new_i32();
2983 tcg_tgtmode
= tcg_const_i32(tgtmode
);
2984 tcg_regno
= tcg_const_i32(regno
);
2985 gen_helper_mrs_banked(tcg_reg
, cpu_env
, tcg_tgtmode
, tcg_regno
);
2986 tcg_temp_free_i32(tcg_tgtmode
);
2987 tcg_temp_free_i32(tcg_regno
);
2988 store_reg(s
, rn
, tcg_reg
);
2989 s
->base
.is_jmp
= DISAS_UPDATE
;
2992 /* Store value to PC as for an exception return (ie don't
2993 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
2994 * will do the masking based on the new value of the Thumb bit.
2996 static void store_pc_exc_ret(DisasContext
*s
, TCGv_i32 pc
)
2998 tcg_gen_mov_i32(cpu_R
[15], pc
);
2999 tcg_temp_free_i32(pc
);
3002 /* Generate a v6 exception return. Marks both values as dead. */
3003 static void gen_rfe(DisasContext
*s
, TCGv_i32 pc
, TCGv_i32 cpsr
)
3005 store_pc_exc_ret(s
, pc
);
3006 /* The cpsr_write_eret helper will mask the low bits of PC
3007 * appropriately depending on the new Thumb bit, so it must
3008 * be called after storing the new PC.
3010 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
3013 gen_helper_cpsr_write_eret(cpu_env
, cpsr
);
3014 tcg_temp_free_i32(cpsr
);
3015 /* Must exit loop to check un-masked IRQs */
3016 s
->base
.is_jmp
= DISAS_EXIT
;
3019 /* Generate an old-style exception return. Marks pc as dead. */
3020 static void gen_exception_return(DisasContext
*s
, TCGv_i32 pc
)
3022 gen_rfe(s
, pc
, load_cpu_field(spsr
));
3025 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3027 static inline void gen_neon_add(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
3030 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3031 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3032 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3037 static inline void gen_neon_rsb(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
3040 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3041 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3042 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3047 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3048 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3049 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3050 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3051 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
3053 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3054 switch ((size << 1) | u) { \
3056 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3059 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3062 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3065 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3068 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3071 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3073 default: return 1; \
3076 #define GEN_NEON_INTEGER_OP(name) do { \
3077 switch ((size << 1) | u) { \
3079 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3082 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3085 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3088 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3091 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3094 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3096 default: return 1; \
3099 static TCGv_i32
neon_load_scratch(int scratch
)
3101 TCGv_i32 tmp
= tcg_temp_new_i32();
3102 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3106 static void neon_store_scratch(int scratch
, TCGv_i32 var
)
3108 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3109 tcg_temp_free_i32(var
);
3112 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
3116 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3118 gen_neon_dup_high16(tmp
);
3120 gen_neon_dup_low16(tmp
);
3123 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3128 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3132 if (!q
&& size
== 2) {
3135 pd
= vfp_reg_ptr(true, rd
);
3136 pm
= vfp_reg_ptr(true, rm
);
3140 gen_helper_neon_qunzip8(pd
, pm
);
3143 gen_helper_neon_qunzip16(pd
, pm
);
3146 gen_helper_neon_qunzip32(pd
, pm
);
3154 gen_helper_neon_unzip8(pd
, pm
);
3157 gen_helper_neon_unzip16(pd
, pm
);
3163 tcg_temp_free_ptr(pd
);
3164 tcg_temp_free_ptr(pm
);
3168 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3172 if (!q
&& size
== 2) {
3175 pd
= vfp_reg_ptr(true, rd
);
3176 pm
= vfp_reg_ptr(true, rm
);
3180 gen_helper_neon_qzip8(pd
, pm
);
3183 gen_helper_neon_qzip16(pd
, pm
);
3186 gen_helper_neon_qzip32(pd
, pm
);
3194 gen_helper_neon_zip8(pd
, pm
);
3197 gen_helper_neon_zip16(pd
, pm
);
3203 tcg_temp_free_ptr(pd
);
3204 tcg_temp_free_ptr(pm
);
3208 static void gen_neon_trn_u8(TCGv_i32 t0
, TCGv_i32 t1
)
3212 rd
= tcg_temp_new_i32();
3213 tmp
= tcg_temp_new_i32();
3215 tcg_gen_shli_i32(rd
, t0
, 8);
3216 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3217 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3218 tcg_gen_or_i32(rd
, rd
, tmp
);
3220 tcg_gen_shri_i32(t1
, t1
, 8);
3221 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3222 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3223 tcg_gen_or_i32(t1
, t1
, tmp
);
3224 tcg_gen_mov_i32(t0
, rd
);
3226 tcg_temp_free_i32(tmp
);
3227 tcg_temp_free_i32(rd
);
3230 static void gen_neon_trn_u16(TCGv_i32 t0
, TCGv_i32 t1
)
3234 rd
= tcg_temp_new_i32();
3235 tmp
= tcg_temp_new_i32();
3237 tcg_gen_shli_i32(rd
, t0
, 16);
3238 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3239 tcg_gen_or_i32(rd
, rd
, tmp
);
3240 tcg_gen_shri_i32(t1
, t1
, 16);
3241 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3242 tcg_gen_or_i32(t1
, t1
, tmp
);
3243 tcg_gen_mov_i32(t0
, rd
);
3245 tcg_temp_free_i32(tmp
);
3246 tcg_temp_free_i32(rd
);
3254 } const neon_ls_element_type
[11] = {
3268 /* Translate a NEON load/store element instruction. Return nonzero if the
3269 instruction is invalid. */
3270 static int disas_neon_ls_insn(DisasContext
*s
, uint32_t insn
)
3290 /* FIXME: this access check should not take precedence over UNDEF
3291 * for invalid encodings; we will generate incorrect syndrome information
3292 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3294 if (s
->fp_excp_el
) {
3295 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
3296 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
3300 if (!s
->vfp_enabled
)
3302 VFP_DREG_D(rd
, insn
);
3303 rn
= (insn
>> 16) & 0xf;
3305 load
= (insn
& (1 << 21)) != 0;
3306 endian
= s
->be_data
;
3307 mmu_idx
= get_mem_index(s
);
3308 if ((insn
& (1 << 23)) == 0) {
3309 /* Load store all elements. */
3310 op
= (insn
>> 8) & 0xf;
3311 size
= (insn
>> 6) & 3;
3314 /* Catch UNDEF cases for bad values of align field */
3317 if (((insn
>> 5) & 1) == 1) {
3322 if (((insn
>> 4) & 3) == 3) {
3329 nregs
= neon_ls_element_type
[op
].nregs
;
3330 interleave
= neon_ls_element_type
[op
].interleave
;
3331 spacing
= neon_ls_element_type
[op
].spacing
;
3332 if (size
== 3 && (interleave
| spacing
) != 1) {
3335 /* For our purposes, bytes are always little-endian. */
3339 /* Consecutive little-endian elements from a single register
3340 * can be promoted to a larger little-endian operation.
3342 if (interleave
== 1 && endian
== MO_LE
) {
3345 tmp64
= tcg_temp_new_i64();
3346 addr
= tcg_temp_new_i32();
3347 tmp2
= tcg_const_i32(1 << size
);
3348 load_reg_var(s
, addr
, rn
);
3349 for (reg
= 0; reg
< nregs
; reg
++) {
3350 for (n
= 0; n
< 8 >> size
; n
++) {
3352 for (xs
= 0; xs
< interleave
; xs
++) {
3353 int tt
= rd
+ reg
+ spacing
* xs
;
3356 gen_aa32_ld_i64(s
, tmp64
, addr
, mmu_idx
, endian
| size
);
3357 neon_store_element64(tt
, n
, size
, tmp64
);
3359 neon_load_element64(tmp64
, tt
, n
, size
);
3360 gen_aa32_st_i64(s
, tmp64
, addr
, mmu_idx
, endian
| size
);
3362 tcg_gen_add_i32(addr
, addr
, tmp2
);
3366 tcg_temp_free_i32(addr
);
3367 tcg_temp_free_i32(tmp2
);
3368 tcg_temp_free_i64(tmp64
);
3369 stride
= nregs
* interleave
* 8;
3371 size
= (insn
>> 10) & 3;
3373 /* Load single element to all lanes. */
3374 int a
= (insn
>> 4) & 1;
3378 size
= (insn
>> 6) & 3;
3379 nregs
= ((insn
>> 8) & 3) + 1;
3382 if (nregs
!= 4 || a
== 0) {
3385 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3388 if (nregs
== 1 && a
== 1 && size
== 0) {
3391 if (nregs
== 3 && a
== 1) {
3394 addr
= tcg_temp_new_i32();
3395 load_reg_var(s
, addr
, rn
);
3397 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
3398 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
3400 stride
= (insn
& (1 << 5)) ? 2 : 1;
3401 vec_size
= nregs
== 1 ? stride
* 8 : 8;
3403 tmp
= tcg_temp_new_i32();
3404 for (reg
= 0; reg
< nregs
; reg
++) {
3405 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
),
3407 if ((rd
& 1) && vec_size
== 16) {
3408 /* We cannot write 16 bytes at once because the
3409 * destination is unaligned.
3411 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(rd
, 0),
3413 tcg_gen_gvec_mov(0, neon_reg_offset(rd
+ 1, 0),
3414 neon_reg_offset(rd
, 0), 8, 8);
3416 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(rd
, 0),
3417 vec_size
, vec_size
, tmp
);
3419 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3422 tcg_temp_free_i32(tmp
);
3423 tcg_temp_free_i32(addr
);
3424 stride
= (1 << size
) * nregs
;
3426 /* Single element. */
3427 int idx
= (insn
>> 4) & 0xf;
3431 reg_idx
= (insn
>> 5) & 7;
3435 reg_idx
= (insn
>> 6) & 3;
3436 stride
= (insn
& (1 << 5)) ? 2 : 1;
3439 reg_idx
= (insn
>> 7) & 1;
3440 stride
= (insn
& (1 << 6)) ? 2 : 1;
3445 nregs
= ((insn
>> 8) & 3) + 1;
3446 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3449 if (((idx
& (1 << size
)) != 0) ||
3450 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
3455 if ((idx
& 1) != 0) {
3460 if (size
== 2 && (idx
& 2) != 0) {
3465 if ((size
== 2) && ((idx
& 3) == 3)) {
3472 if ((rd
+ stride
* (nregs
- 1)) > 31) {
3473 /* Attempts to write off the end of the register file
3474 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3475 * the neon_load_reg() would write off the end of the array.
3479 tmp
= tcg_temp_new_i32();
3480 addr
= tcg_temp_new_i32();
3481 load_reg_var(s
, addr
, rn
);
3482 for (reg
= 0; reg
< nregs
; reg
++) {
3484 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
),
3486 neon_store_element(rd
, reg_idx
, size
, tmp
);
3487 } else { /* Store */
3488 neon_load_element(tmp
, rd
, reg_idx
, size
);
3489 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
),
3493 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3495 tcg_temp_free_i32(addr
);
3496 tcg_temp_free_i32(tmp
);
3497 stride
= nregs
* (1 << size
);
3503 base
= load_reg(s
, rn
);
3505 tcg_gen_addi_i32(base
, base
, stride
);
3508 index
= load_reg(s
, rm
);
3509 tcg_gen_add_i32(base
, base
, index
);
3510 tcg_temp_free_i32(index
);
3512 store_reg(s
, rn
, base
);
3517 static inline void gen_neon_narrow(int size
, TCGv_i32 dest
, TCGv_i64 src
)
3520 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
3521 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
3522 case 2: tcg_gen_extrl_i64_i32(dest
, src
); break;
3527 static inline void gen_neon_narrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
3530 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
3531 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
3532 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
3537 static inline void gen_neon_narrow_satu(int size
, TCGv_i32 dest
, TCGv_i64 src
)
3540 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
3541 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
3542 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
3547 static inline void gen_neon_unarrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
3550 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
3551 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
3552 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
3557 static inline void gen_neon_shift_narrow(int size
, TCGv_i32 var
, TCGv_i32 shift
,
3563 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
3564 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
3569 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
3570 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
3577 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
3578 case 2: gen_ushl_i32(var
, var
, shift
); break;
3583 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
3584 case 2: gen_sshl_i32(var
, var
, shift
); break;
3591 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv_i32 src
, int size
, int u
)
3595 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
3596 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
3597 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
3602 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
3603 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
3604 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
3608 tcg_temp_free_i32(src
);
3611 static inline void gen_neon_addl(int size
)
3614 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
3615 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
3616 case 2: tcg_gen_add_i64(CPU_V001
); break;
3621 static inline void gen_neon_subl(int size
)
3624 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
3625 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
3626 case 2: tcg_gen_sub_i64(CPU_V001
); break;
3631 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
3634 case 0: gen_helper_neon_negl_u16(var
, var
); break;
3635 case 1: gen_helper_neon_negl_u32(var
, var
); break;
3637 tcg_gen_neg_i64(var
, var
);
3643 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
3646 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
3647 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
3652 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv_i32 a
, TCGv_i32 b
,
3657 switch ((size
<< 1) | u
) {
3658 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
3659 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
3660 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
3661 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
3663 tmp
= gen_muls_i64_i32(a
, b
);
3664 tcg_gen_mov_i64(dest
, tmp
);
3665 tcg_temp_free_i64(tmp
);
3668 tmp
= gen_mulu_i64_i32(a
, b
);
3669 tcg_gen_mov_i64(dest
, tmp
);
3670 tcg_temp_free_i64(tmp
);
3675 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
3676 Don't forget to clean them now. */
3678 tcg_temp_free_i32(a
);
3679 tcg_temp_free_i32(b
);
3683 static void gen_neon_narrow_op(int op
, int u
, int size
,
3684 TCGv_i32 dest
, TCGv_i64 src
)
3688 gen_neon_unarrow_sats(size
, dest
, src
);
3690 gen_neon_narrow(size
, dest
, src
);
3694 gen_neon_narrow_satu(size
, dest
, src
);
3696 gen_neon_narrow_sats(size
, dest
, src
);
3701 /* Symbolic constants for op fields for Neon 3-register same-length.
3702 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
3705 #define NEON_3R_VHADD 0
3706 #define NEON_3R_VQADD 1
3707 #define NEON_3R_VRHADD 2
3708 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
3709 #define NEON_3R_VHSUB 4
3710 #define NEON_3R_VQSUB 5
3711 #define NEON_3R_VCGT 6
3712 #define NEON_3R_VCGE 7
3713 #define NEON_3R_VSHL 8
3714 #define NEON_3R_VQSHL 9
3715 #define NEON_3R_VRSHL 10
3716 #define NEON_3R_VQRSHL 11
3717 #define NEON_3R_VMAX 12
3718 #define NEON_3R_VMIN 13
3719 #define NEON_3R_VABD 14
3720 #define NEON_3R_VABA 15
3721 #define NEON_3R_VADD_VSUB 16
3722 #define NEON_3R_VTST_VCEQ 17
3723 #define NEON_3R_VML 18 /* VMLA, VMLS */
3724 #define NEON_3R_VMUL 19
3725 #define NEON_3R_VPMAX 20
3726 #define NEON_3R_VPMIN 21
3727 #define NEON_3R_VQDMULH_VQRDMULH 22
3728 #define NEON_3R_VPADD_VQRDMLAH 23
3729 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
3730 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
3731 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
3732 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
3733 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
3734 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
3735 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
3736 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
3738 static const uint8_t neon_3r_sizes
[] = {
3739 [NEON_3R_VHADD
] = 0x7,
3740 [NEON_3R_VQADD
] = 0xf,
3741 [NEON_3R_VRHADD
] = 0x7,
3742 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
3743 [NEON_3R_VHSUB
] = 0x7,
3744 [NEON_3R_VQSUB
] = 0xf,
3745 [NEON_3R_VCGT
] = 0x7,
3746 [NEON_3R_VCGE
] = 0x7,
3747 [NEON_3R_VSHL
] = 0xf,
3748 [NEON_3R_VQSHL
] = 0xf,
3749 [NEON_3R_VRSHL
] = 0xf,
3750 [NEON_3R_VQRSHL
] = 0xf,
3751 [NEON_3R_VMAX
] = 0x7,
3752 [NEON_3R_VMIN
] = 0x7,
3753 [NEON_3R_VABD
] = 0x7,
3754 [NEON_3R_VABA
] = 0x7,
3755 [NEON_3R_VADD_VSUB
] = 0xf,
3756 [NEON_3R_VTST_VCEQ
] = 0x7,
3757 [NEON_3R_VML
] = 0x7,
3758 [NEON_3R_VMUL
] = 0x7,
3759 [NEON_3R_VPMAX
] = 0x7,
3760 [NEON_3R_VPMIN
] = 0x7,
3761 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
3762 [NEON_3R_VPADD_VQRDMLAH
] = 0x7,
3763 [NEON_3R_SHA
] = 0xf, /* size field encodes op type */
3764 [NEON_3R_VFM_VQRDMLSH
] = 0x7, /* For VFM, size bit 1 encodes op */
3765 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
3766 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
3767 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
3768 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
3769 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
3770 [NEON_3R_FLOAT_MISC
] = 0x5, /* size bit 1 encodes op */
3773 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
3774 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
3777 #define NEON_2RM_VREV64 0
3778 #define NEON_2RM_VREV32 1
3779 #define NEON_2RM_VREV16 2
3780 #define NEON_2RM_VPADDL 4
3781 #define NEON_2RM_VPADDL_U 5
3782 #define NEON_2RM_AESE 6 /* Includes AESD */
3783 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
3784 #define NEON_2RM_VCLS 8
3785 #define NEON_2RM_VCLZ 9
3786 #define NEON_2RM_VCNT 10
3787 #define NEON_2RM_VMVN 11
3788 #define NEON_2RM_VPADAL 12
3789 #define NEON_2RM_VPADAL_U 13
3790 #define NEON_2RM_VQABS 14
3791 #define NEON_2RM_VQNEG 15
3792 #define NEON_2RM_VCGT0 16
3793 #define NEON_2RM_VCGE0 17
3794 #define NEON_2RM_VCEQ0 18
3795 #define NEON_2RM_VCLE0 19
3796 #define NEON_2RM_VCLT0 20
3797 #define NEON_2RM_SHA1H 21
3798 #define NEON_2RM_VABS 22
3799 #define NEON_2RM_VNEG 23
3800 #define NEON_2RM_VCGT0_F 24
3801 #define NEON_2RM_VCGE0_F 25
3802 #define NEON_2RM_VCEQ0_F 26
3803 #define NEON_2RM_VCLE0_F 27
3804 #define NEON_2RM_VCLT0_F 28
3805 #define NEON_2RM_VABS_F 30
3806 #define NEON_2RM_VNEG_F 31
3807 #define NEON_2RM_VSWP 32
3808 #define NEON_2RM_VTRN 33
3809 #define NEON_2RM_VUZP 34
3810 #define NEON_2RM_VZIP 35
3811 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
3812 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
3813 #define NEON_2RM_VSHLL 38
3814 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
3815 #define NEON_2RM_VRINTN 40
3816 #define NEON_2RM_VRINTX 41
3817 #define NEON_2RM_VRINTA 42
3818 #define NEON_2RM_VRINTZ 43
3819 #define NEON_2RM_VCVT_F16_F32 44
3820 #define NEON_2RM_VRINTM 45
3821 #define NEON_2RM_VCVT_F32_F16 46
3822 #define NEON_2RM_VRINTP 47
3823 #define NEON_2RM_VCVTAU 48
3824 #define NEON_2RM_VCVTAS 49
3825 #define NEON_2RM_VCVTNU 50
3826 #define NEON_2RM_VCVTNS 51
3827 #define NEON_2RM_VCVTPU 52
3828 #define NEON_2RM_VCVTPS 53
3829 #define NEON_2RM_VCVTMU 54
3830 #define NEON_2RM_VCVTMS 55
3831 #define NEON_2RM_VRECPE 56
3832 #define NEON_2RM_VRSQRTE 57
3833 #define NEON_2RM_VRECPE_F 58
3834 #define NEON_2RM_VRSQRTE_F 59
3835 #define NEON_2RM_VCVT_FS 60
3836 #define NEON_2RM_VCVT_FU 61
3837 #define NEON_2RM_VCVT_SF 62
3838 #define NEON_2RM_VCVT_UF 63
3840 static bool neon_2rm_is_v8_op(int op
)
3842 /* Return true if this neon 2reg-misc op is ARMv8 and up */
3844 case NEON_2RM_VRINTN
:
3845 case NEON_2RM_VRINTA
:
3846 case NEON_2RM_VRINTM
:
3847 case NEON_2RM_VRINTP
:
3848 case NEON_2RM_VRINTZ
:
3849 case NEON_2RM_VRINTX
:
3850 case NEON_2RM_VCVTAU
:
3851 case NEON_2RM_VCVTAS
:
3852 case NEON_2RM_VCVTNU
:
3853 case NEON_2RM_VCVTNS
:
3854 case NEON_2RM_VCVTPU
:
3855 case NEON_2RM_VCVTPS
:
3856 case NEON_2RM_VCVTMU
:
3857 case NEON_2RM_VCVTMS
:
3864 /* Each entry in this array has bit n set if the insn allows
3865 * size value n (otherwise it will UNDEF). Since unallocated
3866 * op values will have no bits set they always UNDEF.
3868 static const uint8_t neon_2rm_sizes
[] = {
3869 [NEON_2RM_VREV64
] = 0x7,
3870 [NEON_2RM_VREV32
] = 0x3,
3871 [NEON_2RM_VREV16
] = 0x1,
3872 [NEON_2RM_VPADDL
] = 0x7,
3873 [NEON_2RM_VPADDL_U
] = 0x7,
3874 [NEON_2RM_AESE
] = 0x1,
3875 [NEON_2RM_AESMC
] = 0x1,
3876 [NEON_2RM_VCLS
] = 0x7,
3877 [NEON_2RM_VCLZ
] = 0x7,
3878 [NEON_2RM_VCNT
] = 0x1,
3879 [NEON_2RM_VMVN
] = 0x1,
3880 [NEON_2RM_VPADAL
] = 0x7,
3881 [NEON_2RM_VPADAL_U
] = 0x7,
3882 [NEON_2RM_VQABS
] = 0x7,
3883 [NEON_2RM_VQNEG
] = 0x7,
3884 [NEON_2RM_VCGT0
] = 0x7,
3885 [NEON_2RM_VCGE0
] = 0x7,
3886 [NEON_2RM_VCEQ0
] = 0x7,
3887 [NEON_2RM_VCLE0
] = 0x7,
3888 [NEON_2RM_VCLT0
] = 0x7,
3889 [NEON_2RM_SHA1H
] = 0x4,
3890 [NEON_2RM_VABS
] = 0x7,
3891 [NEON_2RM_VNEG
] = 0x7,
3892 [NEON_2RM_VCGT0_F
] = 0x4,
3893 [NEON_2RM_VCGE0_F
] = 0x4,
3894 [NEON_2RM_VCEQ0_F
] = 0x4,
3895 [NEON_2RM_VCLE0_F
] = 0x4,
3896 [NEON_2RM_VCLT0_F
] = 0x4,
3897 [NEON_2RM_VABS_F
] = 0x4,
3898 [NEON_2RM_VNEG_F
] = 0x4,
3899 [NEON_2RM_VSWP
] = 0x1,
3900 [NEON_2RM_VTRN
] = 0x7,
3901 [NEON_2RM_VUZP
] = 0x7,
3902 [NEON_2RM_VZIP
] = 0x7,
3903 [NEON_2RM_VMOVN
] = 0x7,
3904 [NEON_2RM_VQMOVN
] = 0x7,
3905 [NEON_2RM_VSHLL
] = 0x7,
3906 [NEON_2RM_SHA1SU1
] = 0x4,
3907 [NEON_2RM_VRINTN
] = 0x4,
3908 [NEON_2RM_VRINTX
] = 0x4,
3909 [NEON_2RM_VRINTA
] = 0x4,
3910 [NEON_2RM_VRINTZ
] = 0x4,
3911 [NEON_2RM_VCVT_F16_F32
] = 0x2,
3912 [NEON_2RM_VRINTM
] = 0x4,
3913 [NEON_2RM_VCVT_F32_F16
] = 0x2,
3914 [NEON_2RM_VRINTP
] = 0x4,
3915 [NEON_2RM_VCVTAU
] = 0x4,
3916 [NEON_2RM_VCVTAS
] = 0x4,
3917 [NEON_2RM_VCVTNU
] = 0x4,
3918 [NEON_2RM_VCVTNS
] = 0x4,
3919 [NEON_2RM_VCVTPU
] = 0x4,
3920 [NEON_2RM_VCVTPS
] = 0x4,
3921 [NEON_2RM_VCVTMU
] = 0x4,
3922 [NEON_2RM_VCVTMS
] = 0x4,
3923 [NEON_2RM_VRECPE
] = 0x4,
3924 [NEON_2RM_VRSQRTE
] = 0x4,
3925 [NEON_2RM_VRECPE_F
] = 0x4,
3926 [NEON_2RM_VRSQRTE_F
] = 0x4,
3927 [NEON_2RM_VCVT_FS
] = 0x4,
3928 [NEON_2RM_VCVT_FU
] = 0x4,
3929 [NEON_2RM_VCVT_SF
] = 0x4,
3930 [NEON_2RM_VCVT_UF
] = 0x4,
3934 /* Expand v8.1 simd helper. */
3935 static int do_v81_helper(DisasContext
*s
, gen_helper_gvec_3_ptr
*fn
,
3936 int q
, int rd
, int rn
, int rm
)
3938 if (dc_isar_feature(aa32_rdm
, s
)) {
3939 int opr_sz
= (1 + q
) * 8;
3940 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd
),
3941 vfp_reg_offset(1, rn
),
3942 vfp_reg_offset(1, rm
), cpu_env
,
3943 opr_sz
, opr_sz
, 0, fn
);
3949 static void gen_ssra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3951 tcg_gen_vec_sar8i_i64(a
, a
, shift
);
3952 tcg_gen_vec_add8_i64(d
, d
, a
);
3955 static void gen_ssra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3957 tcg_gen_vec_sar16i_i64(a
, a
, shift
);
3958 tcg_gen_vec_add16_i64(d
, d
, a
);
3961 static void gen_ssra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
3963 tcg_gen_sari_i32(a
, a
, shift
);
3964 tcg_gen_add_i32(d
, d
, a
);
3967 static void gen_ssra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
3969 tcg_gen_sari_i64(a
, a
, shift
);
3970 tcg_gen_add_i64(d
, d
, a
);
3973 static void gen_ssra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
3975 tcg_gen_sari_vec(vece
, a
, a
, sh
);
3976 tcg_gen_add_vec(vece
, d
, d
, a
);
3979 static const TCGOpcode vecop_list_ssra
[] = {
3980 INDEX_op_sari_vec
, INDEX_op_add_vec
, 0
3983 const GVecGen2i ssra_op
[4] = {
3984 { .fni8
= gen_ssra8_i64
,
3985 .fniv
= gen_ssra_vec
,
3987 .opt_opc
= vecop_list_ssra
,
3989 { .fni8
= gen_ssra16_i64
,
3990 .fniv
= gen_ssra_vec
,
3992 .opt_opc
= vecop_list_ssra
,
3994 { .fni4
= gen_ssra32_i32
,
3995 .fniv
= gen_ssra_vec
,
3997 .opt_opc
= vecop_list_ssra
,
3999 { .fni8
= gen_ssra64_i64
,
4000 .fniv
= gen_ssra_vec
,
4001 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4002 .opt_opc
= vecop_list_ssra
,
4007 static void gen_usra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4009 tcg_gen_vec_shr8i_i64(a
, a
, shift
);
4010 tcg_gen_vec_add8_i64(d
, d
, a
);
4013 static void gen_usra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4015 tcg_gen_vec_shr16i_i64(a
, a
, shift
);
4016 tcg_gen_vec_add16_i64(d
, d
, a
);
4019 static void gen_usra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
4021 tcg_gen_shri_i32(a
, a
, shift
);
4022 tcg_gen_add_i32(d
, d
, a
);
4025 static void gen_usra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4027 tcg_gen_shri_i64(a
, a
, shift
);
4028 tcg_gen_add_i64(d
, d
, a
);
4031 static void gen_usra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
4033 tcg_gen_shri_vec(vece
, a
, a
, sh
);
4034 tcg_gen_add_vec(vece
, d
, d
, a
);
4037 static const TCGOpcode vecop_list_usra
[] = {
4038 INDEX_op_shri_vec
, INDEX_op_add_vec
, 0
4041 const GVecGen2i usra_op
[4] = {
4042 { .fni8
= gen_usra8_i64
,
4043 .fniv
= gen_usra_vec
,
4045 .opt_opc
= vecop_list_usra
,
4047 { .fni8
= gen_usra16_i64
,
4048 .fniv
= gen_usra_vec
,
4050 .opt_opc
= vecop_list_usra
,
4052 { .fni4
= gen_usra32_i32
,
4053 .fniv
= gen_usra_vec
,
4055 .opt_opc
= vecop_list_usra
,
4057 { .fni8
= gen_usra64_i64
,
4058 .fniv
= gen_usra_vec
,
4059 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4061 .opt_opc
= vecop_list_usra
,
4065 static void gen_shr8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4067 uint64_t mask
= dup_const(MO_8
, 0xff >> shift
);
4068 TCGv_i64 t
= tcg_temp_new_i64();
4070 tcg_gen_shri_i64(t
, a
, shift
);
4071 tcg_gen_andi_i64(t
, t
, mask
);
4072 tcg_gen_andi_i64(d
, d
, ~mask
);
4073 tcg_gen_or_i64(d
, d
, t
);
4074 tcg_temp_free_i64(t
);
4077 static void gen_shr16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4079 uint64_t mask
= dup_const(MO_16
, 0xffff >> shift
);
4080 TCGv_i64 t
= tcg_temp_new_i64();
4082 tcg_gen_shri_i64(t
, a
, shift
);
4083 tcg_gen_andi_i64(t
, t
, mask
);
4084 tcg_gen_andi_i64(d
, d
, ~mask
);
4085 tcg_gen_or_i64(d
, d
, t
);
4086 tcg_temp_free_i64(t
);
4089 static void gen_shr32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
4091 tcg_gen_shri_i32(a
, a
, shift
);
4092 tcg_gen_deposit_i32(d
, d
, a
, 0, 32 - shift
);
4095 static void gen_shr64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4097 tcg_gen_shri_i64(a
, a
, shift
);
4098 tcg_gen_deposit_i64(d
, d
, a
, 0, 64 - shift
);
4101 static void gen_shr_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
4104 tcg_gen_mov_vec(d
, a
);
4106 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
4107 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
4109 tcg_gen_dupi_vec(vece
, m
, MAKE_64BIT_MASK((8 << vece
) - sh
, sh
));
4110 tcg_gen_shri_vec(vece
, t
, a
, sh
);
4111 tcg_gen_and_vec(vece
, d
, d
, m
);
4112 tcg_gen_or_vec(vece
, d
, d
, t
);
4114 tcg_temp_free_vec(t
);
4115 tcg_temp_free_vec(m
);
4119 static const TCGOpcode vecop_list_sri
[] = { INDEX_op_shri_vec
, 0 };
4121 const GVecGen2i sri_op
[4] = {
4122 { .fni8
= gen_shr8_ins_i64
,
4123 .fniv
= gen_shr_ins_vec
,
4125 .opt_opc
= vecop_list_sri
,
4127 { .fni8
= gen_shr16_ins_i64
,
4128 .fniv
= gen_shr_ins_vec
,
4130 .opt_opc
= vecop_list_sri
,
4132 { .fni4
= gen_shr32_ins_i32
,
4133 .fniv
= gen_shr_ins_vec
,
4135 .opt_opc
= vecop_list_sri
,
4137 { .fni8
= gen_shr64_ins_i64
,
4138 .fniv
= gen_shr_ins_vec
,
4139 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4141 .opt_opc
= vecop_list_sri
,
4145 static void gen_shl8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4147 uint64_t mask
= dup_const(MO_8
, 0xff << shift
);
4148 TCGv_i64 t
= tcg_temp_new_i64();
4150 tcg_gen_shli_i64(t
, a
, shift
);
4151 tcg_gen_andi_i64(t
, t
, mask
);
4152 tcg_gen_andi_i64(d
, d
, ~mask
);
4153 tcg_gen_or_i64(d
, d
, t
);
4154 tcg_temp_free_i64(t
);
4157 static void gen_shl16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4159 uint64_t mask
= dup_const(MO_16
, 0xffff << shift
);
4160 TCGv_i64 t
= tcg_temp_new_i64();
4162 tcg_gen_shli_i64(t
, a
, shift
);
4163 tcg_gen_andi_i64(t
, t
, mask
);
4164 tcg_gen_andi_i64(d
, d
, ~mask
);
4165 tcg_gen_or_i64(d
, d
, t
);
4166 tcg_temp_free_i64(t
);
4169 static void gen_shl32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
4171 tcg_gen_deposit_i32(d
, d
, a
, shift
, 32 - shift
);
4174 static void gen_shl64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4176 tcg_gen_deposit_i64(d
, d
, a
, shift
, 64 - shift
);
4179 static void gen_shl_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
4182 tcg_gen_mov_vec(d
, a
);
4184 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
4185 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
4187 tcg_gen_dupi_vec(vece
, m
, MAKE_64BIT_MASK(0, sh
));
4188 tcg_gen_shli_vec(vece
, t
, a
, sh
);
4189 tcg_gen_and_vec(vece
, d
, d
, m
);
4190 tcg_gen_or_vec(vece
, d
, d
, t
);
4192 tcg_temp_free_vec(t
);
4193 tcg_temp_free_vec(m
);
4197 static const TCGOpcode vecop_list_sli
[] = { INDEX_op_shli_vec
, 0 };
4199 const GVecGen2i sli_op
[4] = {
4200 { .fni8
= gen_shl8_ins_i64
,
4201 .fniv
= gen_shl_ins_vec
,
4203 .opt_opc
= vecop_list_sli
,
4205 { .fni8
= gen_shl16_ins_i64
,
4206 .fniv
= gen_shl_ins_vec
,
4208 .opt_opc
= vecop_list_sli
,
4210 { .fni4
= gen_shl32_ins_i32
,
4211 .fniv
= gen_shl_ins_vec
,
4213 .opt_opc
= vecop_list_sli
,
4215 { .fni8
= gen_shl64_ins_i64
,
4216 .fniv
= gen_shl_ins_vec
,
4217 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4219 .opt_opc
= vecop_list_sli
,
4223 static void gen_mla8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4225 gen_helper_neon_mul_u8(a
, a
, b
);
4226 gen_helper_neon_add_u8(d
, d
, a
);
4229 static void gen_mls8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4231 gen_helper_neon_mul_u8(a
, a
, b
);
4232 gen_helper_neon_sub_u8(d
, d
, a
);
4235 static void gen_mla16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4237 gen_helper_neon_mul_u16(a
, a
, b
);
4238 gen_helper_neon_add_u16(d
, d
, a
);
4241 static void gen_mls16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4243 gen_helper_neon_mul_u16(a
, a
, b
);
4244 gen_helper_neon_sub_u16(d
, d
, a
);
4247 static void gen_mla32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4249 tcg_gen_mul_i32(a
, a
, b
);
4250 tcg_gen_add_i32(d
, d
, a
);
4253 static void gen_mls32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4255 tcg_gen_mul_i32(a
, a
, b
);
4256 tcg_gen_sub_i32(d
, d
, a
);
4259 static void gen_mla64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4261 tcg_gen_mul_i64(a
, a
, b
);
4262 tcg_gen_add_i64(d
, d
, a
);
4265 static void gen_mls64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4267 tcg_gen_mul_i64(a
, a
, b
);
4268 tcg_gen_sub_i64(d
, d
, a
);
4271 static void gen_mla_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4273 tcg_gen_mul_vec(vece
, a
, a
, b
);
4274 tcg_gen_add_vec(vece
, d
, d
, a
);
4277 static void gen_mls_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4279 tcg_gen_mul_vec(vece
, a
, a
, b
);
4280 tcg_gen_sub_vec(vece
, d
, d
, a
);
4283 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4284 * these tables are shared with AArch64 which does support them.
4287 static const TCGOpcode vecop_list_mla
[] = {
4288 INDEX_op_mul_vec
, INDEX_op_add_vec
, 0
4291 static const TCGOpcode vecop_list_mls
[] = {
4292 INDEX_op_mul_vec
, INDEX_op_sub_vec
, 0
4295 const GVecGen3 mla_op
[4] = {
4296 { .fni4
= gen_mla8_i32
,
4297 .fniv
= gen_mla_vec
,
4299 .opt_opc
= vecop_list_mla
,
4301 { .fni4
= gen_mla16_i32
,
4302 .fniv
= gen_mla_vec
,
4304 .opt_opc
= vecop_list_mla
,
4306 { .fni4
= gen_mla32_i32
,
4307 .fniv
= gen_mla_vec
,
4309 .opt_opc
= vecop_list_mla
,
4311 { .fni8
= gen_mla64_i64
,
4312 .fniv
= gen_mla_vec
,
4313 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4315 .opt_opc
= vecop_list_mla
,
4319 const GVecGen3 mls_op
[4] = {
4320 { .fni4
= gen_mls8_i32
,
4321 .fniv
= gen_mls_vec
,
4323 .opt_opc
= vecop_list_mls
,
4325 { .fni4
= gen_mls16_i32
,
4326 .fniv
= gen_mls_vec
,
4328 .opt_opc
= vecop_list_mls
,
4330 { .fni4
= gen_mls32_i32
,
4331 .fniv
= gen_mls_vec
,
4333 .opt_opc
= vecop_list_mls
,
4335 { .fni8
= gen_mls64_i64
,
4336 .fniv
= gen_mls_vec
,
4337 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4339 .opt_opc
= vecop_list_mls
,
4343 /* CMTST : test is "if (X & Y != 0)". */
4344 static void gen_cmtst_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4346 tcg_gen_and_i32(d
, a
, b
);
4347 tcg_gen_setcondi_i32(TCG_COND_NE
, d
, d
, 0);
4348 tcg_gen_neg_i32(d
, d
);
4351 void gen_cmtst_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4353 tcg_gen_and_i64(d
, a
, b
);
4354 tcg_gen_setcondi_i64(TCG_COND_NE
, d
, d
, 0);
4355 tcg_gen_neg_i64(d
, d
);
4358 static void gen_cmtst_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4360 tcg_gen_and_vec(vece
, d
, a
, b
);
4361 tcg_gen_dupi_vec(vece
, a
, 0);
4362 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, d
, d
, a
);
4365 static const TCGOpcode vecop_list_cmtst
[] = { INDEX_op_cmp_vec
, 0 };
4367 const GVecGen3 cmtst_op
[4] = {
4368 { .fni4
= gen_helper_neon_tst_u8
,
4369 .fniv
= gen_cmtst_vec
,
4370 .opt_opc
= vecop_list_cmtst
,
4372 { .fni4
= gen_helper_neon_tst_u16
,
4373 .fniv
= gen_cmtst_vec
,
4374 .opt_opc
= vecop_list_cmtst
,
4376 { .fni4
= gen_cmtst_i32
,
4377 .fniv
= gen_cmtst_vec
,
4378 .opt_opc
= vecop_list_cmtst
,
4380 { .fni8
= gen_cmtst_i64
,
4381 .fniv
= gen_cmtst_vec
,
4382 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4383 .opt_opc
= vecop_list_cmtst
,
4387 void gen_ushl_i32(TCGv_i32 dst
, TCGv_i32 src
, TCGv_i32 shift
)
4389 TCGv_i32 lval
= tcg_temp_new_i32();
4390 TCGv_i32 rval
= tcg_temp_new_i32();
4391 TCGv_i32 lsh
= tcg_temp_new_i32();
4392 TCGv_i32 rsh
= tcg_temp_new_i32();
4393 TCGv_i32 zero
= tcg_const_i32(0);
4394 TCGv_i32 max
= tcg_const_i32(32);
4397 * Rely on the TCG guarantee that out of range shifts produce
4398 * unspecified results, not undefined behaviour (i.e. no trap).
4399 * Discard out-of-range results after the fact.
4401 tcg_gen_ext8s_i32(lsh
, shift
);
4402 tcg_gen_neg_i32(rsh
, lsh
);
4403 tcg_gen_shl_i32(lval
, src
, lsh
);
4404 tcg_gen_shr_i32(rval
, src
, rsh
);
4405 tcg_gen_movcond_i32(TCG_COND_LTU
, dst
, lsh
, max
, lval
, zero
);
4406 tcg_gen_movcond_i32(TCG_COND_LTU
, dst
, rsh
, max
, rval
, dst
);
4408 tcg_temp_free_i32(lval
);
4409 tcg_temp_free_i32(rval
);
4410 tcg_temp_free_i32(lsh
);
4411 tcg_temp_free_i32(rsh
);
4412 tcg_temp_free_i32(zero
);
4413 tcg_temp_free_i32(max
);
4416 void gen_ushl_i64(TCGv_i64 dst
, TCGv_i64 src
, TCGv_i64 shift
)
4418 TCGv_i64 lval
= tcg_temp_new_i64();
4419 TCGv_i64 rval
= tcg_temp_new_i64();
4420 TCGv_i64 lsh
= tcg_temp_new_i64();
4421 TCGv_i64 rsh
= tcg_temp_new_i64();
4422 TCGv_i64 zero
= tcg_const_i64(0);
4423 TCGv_i64 max
= tcg_const_i64(64);
4426 * Rely on the TCG guarantee that out of range shifts produce
4427 * unspecified results, not undefined behaviour (i.e. no trap).
4428 * Discard out-of-range results after the fact.
4430 tcg_gen_ext8s_i64(lsh
, shift
);
4431 tcg_gen_neg_i64(rsh
, lsh
);
4432 tcg_gen_shl_i64(lval
, src
, lsh
);
4433 tcg_gen_shr_i64(rval
, src
, rsh
);
4434 tcg_gen_movcond_i64(TCG_COND_LTU
, dst
, lsh
, max
, lval
, zero
);
4435 tcg_gen_movcond_i64(TCG_COND_LTU
, dst
, rsh
, max
, rval
, dst
);
4437 tcg_temp_free_i64(lval
);
4438 tcg_temp_free_i64(rval
);
4439 tcg_temp_free_i64(lsh
);
4440 tcg_temp_free_i64(rsh
);
4441 tcg_temp_free_i64(zero
);
4442 tcg_temp_free_i64(max
);
4445 static void gen_ushl_vec(unsigned vece
, TCGv_vec dst
,
4446 TCGv_vec src
, TCGv_vec shift
)
4448 TCGv_vec lval
= tcg_temp_new_vec_matching(dst
);
4449 TCGv_vec rval
= tcg_temp_new_vec_matching(dst
);
4450 TCGv_vec lsh
= tcg_temp_new_vec_matching(dst
);
4451 TCGv_vec rsh
= tcg_temp_new_vec_matching(dst
);
4454 tcg_gen_neg_vec(vece
, rsh
, shift
);
4456 tcg_gen_mov_vec(lsh
, shift
);
4458 msk
= tcg_temp_new_vec_matching(dst
);
4459 tcg_gen_dupi_vec(vece
, msk
, 0xff);
4460 tcg_gen_and_vec(vece
, lsh
, shift
, msk
);
4461 tcg_gen_and_vec(vece
, rsh
, rsh
, msk
);
4462 tcg_temp_free_vec(msk
);
4466 * Rely on the TCG guarantee that out of range shifts produce
4467 * unspecified results, not undefined behaviour (i.e. no trap).
4468 * Discard out-of-range results after the fact.
4470 tcg_gen_shlv_vec(vece
, lval
, src
, lsh
);
4471 tcg_gen_shrv_vec(vece
, rval
, src
, rsh
);
4473 max
= tcg_temp_new_vec_matching(dst
);
4474 tcg_gen_dupi_vec(vece
, max
, 8 << vece
);
4477 * The choice of LT (signed) and GEU (unsigned) are biased toward
4478 * the instructions of the x86_64 host. For MO_8, the whole byte
4479 * is significant so we must use an unsigned compare; otherwise we
4480 * have already masked to a byte and so a signed compare works.
4481 * Other tcg hosts have a full set of comparisons and do not care.
4484 tcg_gen_cmp_vec(TCG_COND_GEU
, vece
, lsh
, lsh
, max
);
4485 tcg_gen_cmp_vec(TCG_COND_GEU
, vece
, rsh
, rsh
, max
);
4486 tcg_gen_andc_vec(vece
, lval
, lval
, lsh
);
4487 tcg_gen_andc_vec(vece
, rval
, rval
, rsh
);
4489 tcg_gen_cmp_vec(TCG_COND_LT
, vece
, lsh
, lsh
, max
);
4490 tcg_gen_cmp_vec(TCG_COND_LT
, vece
, rsh
, rsh
, max
);
4491 tcg_gen_and_vec(vece
, lval
, lval
, lsh
);
4492 tcg_gen_and_vec(vece
, rval
, rval
, rsh
);
4494 tcg_gen_or_vec(vece
, dst
, lval
, rval
);
4496 tcg_temp_free_vec(max
);
4497 tcg_temp_free_vec(lval
);
4498 tcg_temp_free_vec(rval
);
4499 tcg_temp_free_vec(lsh
);
4500 tcg_temp_free_vec(rsh
);
4503 static const TCGOpcode ushl_list
[] = {
4504 INDEX_op_neg_vec
, INDEX_op_shlv_vec
,
4505 INDEX_op_shrv_vec
, INDEX_op_cmp_vec
, 0
4508 const GVecGen3 ushl_op
[4] = {
4509 { .fniv
= gen_ushl_vec
,
4510 .fno
= gen_helper_gvec_ushl_b
,
4511 .opt_opc
= ushl_list
,
4513 { .fniv
= gen_ushl_vec
,
4514 .fno
= gen_helper_gvec_ushl_h
,
4515 .opt_opc
= ushl_list
,
4517 { .fni4
= gen_ushl_i32
,
4518 .fniv
= gen_ushl_vec
,
4519 .opt_opc
= ushl_list
,
4521 { .fni8
= gen_ushl_i64
,
4522 .fniv
= gen_ushl_vec
,
4523 .opt_opc
= ushl_list
,
4527 void gen_sshl_i32(TCGv_i32 dst
, TCGv_i32 src
, TCGv_i32 shift
)
4529 TCGv_i32 lval
= tcg_temp_new_i32();
4530 TCGv_i32 rval
= tcg_temp_new_i32();
4531 TCGv_i32 lsh
= tcg_temp_new_i32();
4532 TCGv_i32 rsh
= tcg_temp_new_i32();
4533 TCGv_i32 zero
= tcg_const_i32(0);
4534 TCGv_i32 max
= tcg_const_i32(31);
4537 * Rely on the TCG guarantee that out of range shifts produce
4538 * unspecified results, not undefined behaviour (i.e. no trap).
4539 * Discard out-of-range results after the fact.
4541 tcg_gen_ext8s_i32(lsh
, shift
);
4542 tcg_gen_neg_i32(rsh
, lsh
);
4543 tcg_gen_shl_i32(lval
, src
, lsh
);
4544 tcg_gen_umin_i32(rsh
, rsh
, max
);
4545 tcg_gen_sar_i32(rval
, src
, rsh
);
4546 tcg_gen_movcond_i32(TCG_COND_LEU
, lval
, lsh
, max
, lval
, zero
);
4547 tcg_gen_movcond_i32(TCG_COND_LT
, dst
, lsh
, zero
, rval
, lval
);
4549 tcg_temp_free_i32(lval
);
4550 tcg_temp_free_i32(rval
);
4551 tcg_temp_free_i32(lsh
);
4552 tcg_temp_free_i32(rsh
);
4553 tcg_temp_free_i32(zero
);
4554 tcg_temp_free_i32(max
);
4557 void gen_sshl_i64(TCGv_i64 dst
, TCGv_i64 src
, TCGv_i64 shift
)
4559 TCGv_i64 lval
= tcg_temp_new_i64();
4560 TCGv_i64 rval
= tcg_temp_new_i64();
4561 TCGv_i64 lsh
= tcg_temp_new_i64();
4562 TCGv_i64 rsh
= tcg_temp_new_i64();
4563 TCGv_i64 zero
= tcg_const_i64(0);
4564 TCGv_i64 max
= tcg_const_i64(63);
4567 * Rely on the TCG guarantee that out of range shifts produce
4568 * unspecified results, not undefined behaviour (i.e. no trap).
4569 * Discard out-of-range results after the fact.
4571 tcg_gen_ext8s_i64(lsh
, shift
);
4572 tcg_gen_neg_i64(rsh
, lsh
);
4573 tcg_gen_shl_i64(lval
, src
, lsh
);
4574 tcg_gen_umin_i64(rsh
, rsh
, max
);
4575 tcg_gen_sar_i64(rval
, src
, rsh
);
4576 tcg_gen_movcond_i64(TCG_COND_LEU
, lval
, lsh
, max
, lval
, zero
);
4577 tcg_gen_movcond_i64(TCG_COND_LT
, dst
, lsh
, zero
, rval
, lval
);
4579 tcg_temp_free_i64(lval
);
4580 tcg_temp_free_i64(rval
);
4581 tcg_temp_free_i64(lsh
);
4582 tcg_temp_free_i64(rsh
);
4583 tcg_temp_free_i64(zero
);
4584 tcg_temp_free_i64(max
);
4587 static void gen_sshl_vec(unsigned vece
, TCGv_vec dst
,
4588 TCGv_vec src
, TCGv_vec shift
)
4590 TCGv_vec lval
= tcg_temp_new_vec_matching(dst
);
4591 TCGv_vec rval
= tcg_temp_new_vec_matching(dst
);
4592 TCGv_vec lsh
= tcg_temp_new_vec_matching(dst
);
4593 TCGv_vec rsh
= tcg_temp_new_vec_matching(dst
);
4594 TCGv_vec tmp
= tcg_temp_new_vec_matching(dst
);
4597 * Rely on the TCG guarantee that out of range shifts produce
4598 * unspecified results, not undefined behaviour (i.e. no trap).
4599 * Discard out-of-range results after the fact.
4601 tcg_gen_neg_vec(vece
, rsh
, shift
);
4603 tcg_gen_mov_vec(lsh
, shift
);
4605 tcg_gen_dupi_vec(vece
, tmp
, 0xff);
4606 tcg_gen_and_vec(vece
, lsh
, shift
, tmp
);
4607 tcg_gen_and_vec(vece
, rsh
, rsh
, tmp
);
4610 /* Bound rsh so out of bound right shift gets -1. */
4611 tcg_gen_dupi_vec(vece
, tmp
, (8 << vece
) - 1);
4612 tcg_gen_umin_vec(vece
, rsh
, rsh
, tmp
);
4613 tcg_gen_cmp_vec(TCG_COND_GT
, vece
, tmp
, lsh
, tmp
);
4615 tcg_gen_shlv_vec(vece
, lval
, src
, lsh
);
4616 tcg_gen_sarv_vec(vece
, rval
, src
, rsh
);
4618 /* Select in-bound left shift. */
4619 tcg_gen_andc_vec(vece
, lval
, lval
, tmp
);
4621 /* Select between left and right shift. */
4623 tcg_gen_dupi_vec(vece
, tmp
, 0);
4624 tcg_gen_cmpsel_vec(TCG_COND_LT
, vece
, dst
, lsh
, tmp
, rval
, lval
);
4626 tcg_gen_dupi_vec(vece
, tmp
, 0x80);
4627 tcg_gen_cmpsel_vec(TCG_COND_LT
, vece
, dst
, lsh
, tmp
, lval
, rval
);
4630 tcg_temp_free_vec(lval
);
4631 tcg_temp_free_vec(rval
);
4632 tcg_temp_free_vec(lsh
);
4633 tcg_temp_free_vec(rsh
);
4634 tcg_temp_free_vec(tmp
);
4637 static const TCGOpcode sshl_list
[] = {
4638 INDEX_op_neg_vec
, INDEX_op_umin_vec
, INDEX_op_shlv_vec
,
4639 INDEX_op_sarv_vec
, INDEX_op_cmp_vec
, INDEX_op_cmpsel_vec
, 0
4642 const GVecGen3 sshl_op
[4] = {
4643 { .fniv
= gen_sshl_vec
,
4644 .fno
= gen_helper_gvec_sshl_b
,
4645 .opt_opc
= sshl_list
,
4647 { .fniv
= gen_sshl_vec
,
4648 .fno
= gen_helper_gvec_sshl_h
,
4649 .opt_opc
= sshl_list
,
4651 { .fni4
= gen_sshl_i32
,
4652 .fniv
= gen_sshl_vec
,
4653 .opt_opc
= sshl_list
,
4655 { .fni8
= gen_sshl_i64
,
4656 .fniv
= gen_sshl_vec
,
4657 .opt_opc
= sshl_list
,
4661 static void gen_uqadd_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4662 TCGv_vec a
, TCGv_vec b
)
4664 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4665 tcg_gen_add_vec(vece
, x
, a
, b
);
4666 tcg_gen_usadd_vec(vece
, t
, a
, b
);
4667 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4668 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4669 tcg_temp_free_vec(x
);
4672 static const TCGOpcode vecop_list_uqadd
[] = {
4673 INDEX_op_usadd_vec
, INDEX_op_cmp_vec
, INDEX_op_add_vec
, 0
4676 const GVecGen4 uqadd_op
[4] = {
4677 { .fniv
= gen_uqadd_vec
,
4678 .fno
= gen_helper_gvec_uqadd_b
,
4680 .opt_opc
= vecop_list_uqadd
,
4682 { .fniv
= gen_uqadd_vec
,
4683 .fno
= gen_helper_gvec_uqadd_h
,
4685 .opt_opc
= vecop_list_uqadd
,
4687 { .fniv
= gen_uqadd_vec
,
4688 .fno
= gen_helper_gvec_uqadd_s
,
4690 .opt_opc
= vecop_list_uqadd
,
4692 { .fniv
= gen_uqadd_vec
,
4693 .fno
= gen_helper_gvec_uqadd_d
,
4695 .opt_opc
= vecop_list_uqadd
,
4699 static void gen_sqadd_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4700 TCGv_vec a
, TCGv_vec b
)
4702 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4703 tcg_gen_add_vec(vece
, x
, a
, b
);
4704 tcg_gen_ssadd_vec(vece
, t
, a
, b
);
4705 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4706 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4707 tcg_temp_free_vec(x
);
4710 static const TCGOpcode vecop_list_sqadd
[] = {
4711 INDEX_op_ssadd_vec
, INDEX_op_cmp_vec
, INDEX_op_add_vec
, 0
4714 const GVecGen4 sqadd_op
[4] = {
4715 { .fniv
= gen_sqadd_vec
,
4716 .fno
= gen_helper_gvec_sqadd_b
,
4717 .opt_opc
= vecop_list_sqadd
,
4720 { .fniv
= gen_sqadd_vec
,
4721 .fno
= gen_helper_gvec_sqadd_h
,
4722 .opt_opc
= vecop_list_sqadd
,
4725 { .fniv
= gen_sqadd_vec
,
4726 .fno
= gen_helper_gvec_sqadd_s
,
4727 .opt_opc
= vecop_list_sqadd
,
4730 { .fniv
= gen_sqadd_vec
,
4731 .fno
= gen_helper_gvec_sqadd_d
,
4732 .opt_opc
= vecop_list_sqadd
,
4737 static void gen_uqsub_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4738 TCGv_vec a
, TCGv_vec b
)
4740 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4741 tcg_gen_sub_vec(vece
, x
, a
, b
);
4742 tcg_gen_ussub_vec(vece
, t
, a
, b
);
4743 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4744 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4745 tcg_temp_free_vec(x
);
4748 static const TCGOpcode vecop_list_uqsub
[] = {
4749 INDEX_op_ussub_vec
, INDEX_op_cmp_vec
, INDEX_op_sub_vec
, 0
4752 const GVecGen4 uqsub_op
[4] = {
4753 { .fniv
= gen_uqsub_vec
,
4754 .fno
= gen_helper_gvec_uqsub_b
,
4755 .opt_opc
= vecop_list_uqsub
,
4758 { .fniv
= gen_uqsub_vec
,
4759 .fno
= gen_helper_gvec_uqsub_h
,
4760 .opt_opc
= vecop_list_uqsub
,
4763 { .fniv
= gen_uqsub_vec
,
4764 .fno
= gen_helper_gvec_uqsub_s
,
4765 .opt_opc
= vecop_list_uqsub
,
4768 { .fniv
= gen_uqsub_vec
,
4769 .fno
= gen_helper_gvec_uqsub_d
,
4770 .opt_opc
= vecop_list_uqsub
,
4775 static void gen_sqsub_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4776 TCGv_vec a
, TCGv_vec b
)
4778 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4779 tcg_gen_sub_vec(vece
, x
, a
, b
);
4780 tcg_gen_sssub_vec(vece
, t
, a
, b
);
4781 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4782 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4783 tcg_temp_free_vec(x
);
4786 static const TCGOpcode vecop_list_sqsub
[] = {
4787 INDEX_op_sssub_vec
, INDEX_op_cmp_vec
, INDEX_op_sub_vec
, 0
4790 const GVecGen4 sqsub_op
[4] = {
4791 { .fniv
= gen_sqsub_vec
,
4792 .fno
= gen_helper_gvec_sqsub_b
,
4793 .opt_opc
= vecop_list_sqsub
,
4796 { .fniv
= gen_sqsub_vec
,
4797 .fno
= gen_helper_gvec_sqsub_h
,
4798 .opt_opc
= vecop_list_sqsub
,
4801 { .fniv
= gen_sqsub_vec
,
4802 .fno
= gen_helper_gvec_sqsub_s
,
4803 .opt_opc
= vecop_list_sqsub
,
4806 { .fniv
= gen_sqsub_vec
,
4807 .fno
= gen_helper_gvec_sqsub_d
,
4808 .opt_opc
= vecop_list_sqsub
,
4813 /* Translate a NEON data processing instruction. Return nonzero if the
4814 instruction is invalid.
4815 We process data in a mixture of 32-bit and 64-bit chunks.
4816 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4818 static int disas_neon_data_insn(DisasContext
*s
, uint32_t insn
)
4822 int rd
, rn
, rm
, rd_ofs
, rn_ofs
, rm_ofs
;
4831 TCGv_i32 tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4832 TCGv_ptr ptr1
, ptr2
, ptr3
;
4835 /* FIXME: this access check should not take precedence over UNDEF
4836 * for invalid encodings; we will generate incorrect syndrome information
4837 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4839 if (s
->fp_excp_el
) {
4840 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
4841 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
4845 if (!s
->vfp_enabled
)
4847 q
= (insn
& (1 << 6)) != 0;
4848 u
= (insn
>> 24) & 1;
4849 VFP_DREG_D(rd
, insn
);
4850 VFP_DREG_N(rn
, insn
);
4851 VFP_DREG_M(rm
, insn
);
4852 size
= (insn
>> 20) & 3;
4853 vec_size
= q
? 16 : 8;
4854 rd_ofs
= neon_reg_offset(rd
, 0);
4855 rn_ofs
= neon_reg_offset(rn
, 0);
4856 rm_ofs
= neon_reg_offset(rm
, 0);
4858 if ((insn
& (1 << 23)) == 0) {
4859 /* Three register same length. */
4860 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4861 /* Catch invalid op and bad size combinations: UNDEF */
4862 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4865 /* All insns of this form UNDEF for either this condition or the
4866 * superset of cases "Q==1"; we catch the latter later.
4868 if (q
&& ((rd
| rn
| rm
) & 1)) {
4873 /* The SHA-1/SHA-256 3-register instructions require special
4874 * treatment here, as their size field is overloaded as an
4875 * op type selector, and they all consume their input in a
4881 if (!u
) { /* SHA-1 */
4882 if (!dc_isar_feature(aa32_sha1
, s
)) {
4885 ptr1
= vfp_reg_ptr(true, rd
);
4886 ptr2
= vfp_reg_ptr(true, rn
);
4887 ptr3
= vfp_reg_ptr(true, rm
);
4888 tmp4
= tcg_const_i32(size
);
4889 gen_helper_crypto_sha1_3reg(ptr1
, ptr2
, ptr3
, tmp4
);
4890 tcg_temp_free_i32(tmp4
);
4891 } else { /* SHA-256 */
4892 if (!dc_isar_feature(aa32_sha2
, s
) || size
== 3) {
4895 ptr1
= vfp_reg_ptr(true, rd
);
4896 ptr2
= vfp_reg_ptr(true, rn
);
4897 ptr3
= vfp_reg_ptr(true, rm
);
4900 gen_helper_crypto_sha256h(ptr1
, ptr2
, ptr3
);
4903 gen_helper_crypto_sha256h2(ptr1
, ptr2
, ptr3
);
4906 gen_helper_crypto_sha256su1(ptr1
, ptr2
, ptr3
);
4910 tcg_temp_free_ptr(ptr1
);
4911 tcg_temp_free_ptr(ptr2
);
4912 tcg_temp_free_ptr(ptr3
);
4915 case NEON_3R_VPADD_VQRDMLAH
:
4922 return do_v81_helper(s
, gen_helper_gvec_qrdmlah_s16
,
4925 return do_v81_helper(s
, gen_helper_gvec_qrdmlah_s32
,
4930 case NEON_3R_VFM_VQRDMLSH
:
4941 return do_v81_helper(s
, gen_helper_gvec_qrdmlsh_s16
,
4944 return do_v81_helper(s
, gen_helper_gvec_qrdmlsh_s32
,
4949 case NEON_3R_LOGIC
: /* Logic ops. */
4950 switch ((u
<< 2) | size
) {
4952 tcg_gen_gvec_and(0, rd_ofs
, rn_ofs
, rm_ofs
,
4953 vec_size
, vec_size
);
4956 tcg_gen_gvec_andc(0, rd_ofs
, rn_ofs
, rm_ofs
,
4957 vec_size
, vec_size
);
4960 tcg_gen_gvec_or(0, rd_ofs
, rn_ofs
, rm_ofs
,
4961 vec_size
, vec_size
);
4964 tcg_gen_gvec_orc(0, rd_ofs
, rn_ofs
, rm_ofs
,
4965 vec_size
, vec_size
);
4968 tcg_gen_gvec_xor(0, rd_ofs
, rn_ofs
, rm_ofs
,
4969 vec_size
, vec_size
);
4972 tcg_gen_gvec_bitsel(MO_8
, rd_ofs
, rd_ofs
, rn_ofs
, rm_ofs
,
4973 vec_size
, vec_size
);
4976 tcg_gen_gvec_bitsel(MO_8
, rd_ofs
, rm_ofs
, rn_ofs
, rd_ofs
,
4977 vec_size
, vec_size
);
4980 tcg_gen_gvec_bitsel(MO_8
, rd_ofs
, rm_ofs
, rd_ofs
, rn_ofs
,
4981 vec_size
, vec_size
);
4986 case NEON_3R_VADD_VSUB
:
4988 tcg_gen_gvec_sub(size
, rd_ofs
, rn_ofs
, rm_ofs
,
4989 vec_size
, vec_size
);
4991 tcg_gen_gvec_add(size
, rd_ofs
, rn_ofs
, rm_ofs
,
4992 vec_size
, vec_size
);
4997 tcg_gen_gvec_4(rd_ofs
, offsetof(CPUARMState
, vfp
.qc
),
4998 rn_ofs
, rm_ofs
, vec_size
, vec_size
,
4999 (u
? uqadd_op
: sqadd_op
) + size
);
5003 tcg_gen_gvec_4(rd_ofs
, offsetof(CPUARMState
, vfp
.qc
),
5004 rn_ofs
, rm_ofs
, vec_size
, vec_size
,
5005 (u
? uqsub_op
: sqsub_op
) + size
);
5008 case NEON_3R_VMUL
: /* VMUL */
5010 /* Polynomial case allows only P8. */
5014 tcg_gen_gvec_3_ool(rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
,
5015 0, gen_helper_gvec_pmul_b
);
5017 tcg_gen_gvec_mul(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5018 vec_size
, vec_size
);
5022 case NEON_3R_VML
: /* VMLA, VMLS */
5023 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
,
5024 u
? &mls_op
[size
] : &mla_op
[size
]);
5027 case NEON_3R_VTST_VCEQ
:
5029 tcg_gen_gvec_cmp(TCG_COND_EQ
, size
, rd_ofs
, rn_ofs
, rm_ofs
,
5030 vec_size
, vec_size
);
5032 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
,
5033 vec_size
, vec_size
, &cmtst_op
[size
]);
5038 tcg_gen_gvec_cmp(u
? TCG_COND_GTU
: TCG_COND_GT
, size
,
5039 rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
);
5043 tcg_gen_gvec_cmp(u
? TCG_COND_GEU
: TCG_COND_GE
, size
,
5044 rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
);
5049 tcg_gen_gvec_umax(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5050 vec_size
, vec_size
);
5052 tcg_gen_gvec_smax(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5053 vec_size
, vec_size
);
5058 tcg_gen_gvec_umin(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5059 vec_size
, vec_size
);
5061 tcg_gen_gvec_smin(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5062 vec_size
, vec_size
);
5067 /* Note the operation is vshl vd,vm,vn */
5068 tcg_gen_gvec_3(rd_ofs
, rm_ofs
, rn_ofs
, vec_size
, vec_size
,
5069 u
? &ushl_op
[size
] : &sshl_op
[size
]);
5074 /* 64-bit element instructions. */
5075 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5076 neon_load_reg64(cpu_V0
, rn
+ pass
);
5077 neon_load_reg64(cpu_V1
, rm
+ pass
);
5081 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5084 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5090 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5092 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5095 case NEON_3R_VQRSHL
:
5097 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
5100 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
5107 neon_store_reg64(cpu_V0
, rd
+ pass
);
5115 case NEON_3R_VQRSHL
:
5118 /* Shift instruction operands are reversed. */
5124 case NEON_3R_VPADD_VQRDMLAH
:
5129 case NEON_3R_FLOAT_ARITH
:
5130 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
5132 case NEON_3R_FLOAT_MINMAX
:
5133 pairwise
= u
; /* if VPMIN/VPMAX (float) */
5135 case NEON_3R_FLOAT_CMP
:
5137 /* no encoding for U=0 C=1x */
5141 case NEON_3R_FLOAT_ACMP
:
5146 case NEON_3R_FLOAT_MISC
:
5147 /* VMAXNM/VMINNM in ARMv8 */
5148 if (u
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
5152 case NEON_3R_VFM_VQRDMLSH
:
5153 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
)) {
5161 if (pairwise
&& q
) {
5162 /* All the pairwise insns UNDEF if Q is set */
5166 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5171 tmp
= neon_load_reg(rn
, 0);
5172 tmp2
= neon_load_reg(rn
, 1);
5174 tmp
= neon_load_reg(rm
, 0);
5175 tmp2
= neon_load_reg(rm
, 1);
5179 tmp
= neon_load_reg(rn
, pass
);
5180 tmp2
= neon_load_reg(rm
, pass
);
5184 GEN_NEON_INTEGER_OP(hadd
);
5186 case NEON_3R_VRHADD
:
5187 GEN_NEON_INTEGER_OP(rhadd
);
5190 GEN_NEON_INTEGER_OP(hsub
);
5193 GEN_NEON_INTEGER_OP_ENV(qshl
);
5196 GEN_NEON_INTEGER_OP(rshl
);
5198 case NEON_3R_VQRSHL
:
5199 GEN_NEON_INTEGER_OP_ENV(qrshl
);
5202 GEN_NEON_INTEGER_OP(abd
);
5205 GEN_NEON_INTEGER_OP(abd
);
5206 tcg_temp_free_i32(tmp2
);
5207 tmp2
= neon_load_reg(rd
, pass
);
5208 gen_neon_add(size
, tmp
, tmp2
);
5211 GEN_NEON_INTEGER_OP(pmax
);
5214 GEN_NEON_INTEGER_OP(pmin
);
5216 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
5217 if (!u
) { /* VQDMULH */
5220 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5223 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5227 } else { /* VQRDMULH */
5230 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5233 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5239 case NEON_3R_VPADD_VQRDMLAH
:
5241 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
5242 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
5243 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
5247 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
5249 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5250 switch ((u
<< 2) | size
) {
5253 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5256 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
5259 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
5264 tcg_temp_free_ptr(fpstatus
);
5267 case NEON_3R_FLOAT_MULTIPLY
:
5269 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5270 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5272 tcg_temp_free_i32(tmp2
);
5273 tmp2
= neon_load_reg(rd
, pass
);
5275 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5277 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5280 tcg_temp_free_ptr(fpstatus
);
5283 case NEON_3R_FLOAT_CMP
:
5285 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5287 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
5290 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5292 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5295 tcg_temp_free_ptr(fpstatus
);
5298 case NEON_3R_FLOAT_ACMP
:
5300 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5302 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5304 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5306 tcg_temp_free_ptr(fpstatus
);
5309 case NEON_3R_FLOAT_MINMAX
:
5311 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5313 gen_helper_vfp_maxs(tmp
, tmp
, tmp2
, fpstatus
);
5315 gen_helper_vfp_mins(tmp
, tmp
, tmp2
, fpstatus
);
5317 tcg_temp_free_ptr(fpstatus
);
5320 case NEON_3R_FLOAT_MISC
:
5323 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5325 gen_helper_vfp_maxnums(tmp
, tmp
, tmp2
, fpstatus
);
5327 gen_helper_vfp_minnums(tmp
, tmp
, tmp2
, fpstatus
);
5329 tcg_temp_free_ptr(fpstatus
);
5332 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
5334 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
5338 case NEON_3R_VFM_VQRDMLSH
:
5340 /* VFMA, VFMS: fused multiply-add */
5341 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5342 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
5345 gen_helper_vfp_negs(tmp
, tmp
);
5347 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
5348 tcg_temp_free_i32(tmp3
);
5349 tcg_temp_free_ptr(fpstatus
);
5355 tcg_temp_free_i32(tmp2
);
5357 /* Save the result. For elementwise operations we can put it
5358 straight into the destination register. For pairwise operations
5359 we have to be careful to avoid clobbering the source operands. */
5360 if (pairwise
&& rd
== rm
) {
5361 neon_store_scratch(pass
, tmp
);
5363 neon_store_reg(rd
, pass
, tmp
);
5367 if (pairwise
&& rd
== rm
) {
5368 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5369 tmp
= neon_load_scratch(pass
);
5370 neon_store_reg(rd
, pass
, tmp
);
5373 /* End of 3 register same size operations. */
5374 } else if (insn
& (1 << 4)) {
5375 if ((insn
& 0x00380080) != 0) {
5376 /* Two registers and shift. */
5377 op
= (insn
>> 8) & 0xf;
5378 if (insn
& (1 << 7)) {
5386 while ((insn
& (1 << (size
+ 19))) == 0)
5389 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
5391 /* Shift by immediate:
5392 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5393 if (q
&& ((rd
| rm
) & 1)) {
5396 if (!u
&& (op
== 4 || op
== 6)) {
5399 /* Right shifts are encoded as N - shift, where N is the
5400 element size in bits. */
5402 shift
= shift
- (1 << (size
+ 3));
5407 /* Right shift comes here negative. */
5409 /* Shifts larger than the element size are architecturally
5410 * valid. Unsigned results in all zeros; signed results
5414 tcg_gen_gvec_sari(size
, rd_ofs
, rm_ofs
,
5415 MIN(shift
, (8 << size
) - 1),
5416 vec_size
, vec_size
);
5417 } else if (shift
>= 8 << size
) {
5418 tcg_gen_gvec_dup8i(rd_ofs
, vec_size
, vec_size
, 0);
5420 tcg_gen_gvec_shri(size
, rd_ofs
, rm_ofs
, shift
,
5421 vec_size
, vec_size
);
5426 /* Right shift comes here negative. */
5428 /* Shifts larger than the element size are architecturally
5429 * valid. Unsigned results in all zeros; signed results
5433 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
5434 MIN(shift
, (8 << size
) - 1),
5436 } else if (shift
>= 8 << size
) {
5439 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
5440 shift
, &usra_op
[size
]);
5448 /* Right shift comes here negative. */
5450 /* Shift out of range leaves destination unchanged. */
5451 if (shift
< 8 << size
) {
5452 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
5453 shift
, &sri_op
[size
]);
5457 case 5: /* VSHL, VSLI */
5459 /* Shift out of range leaves destination unchanged. */
5460 if (shift
< 8 << size
) {
5461 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
,
5462 vec_size
, shift
, &sli_op
[size
]);
5465 /* Shifts larger than the element size are
5466 * architecturally valid and results in zero.
5468 if (shift
>= 8 << size
) {
5469 tcg_gen_gvec_dup8i(rd_ofs
, vec_size
, vec_size
, 0);
5471 tcg_gen_gvec_shli(size
, rd_ofs
, rm_ofs
, shift
,
5472 vec_size
, vec_size
);
5484 /* To avoid excessive duplication of ops we implement shift
5485 * by immediate using the variable shift operations.
5487 imm
= dup_const(size
, shift
);
5489 for (pass
= 0; pass
< count
; pass
++) {
5491 neon_load_reg64(cpu_V0
, rm
+ pass
);
5492 tcg_gen_movi_i64(cpu_V1
, imm
);
5497 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5499 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5501 case 6: /* VQSHLU */
5502 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
5507 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5510 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5515 g_assert_not_reached();
5519 neon_load_reg64(cpu_V1
, rd
+ pass
);
5520 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5522 neon_store_reg64(cpu_V0
, rd
+ pass
);
5523 } else { /* size < 3 */
5524 /* Operands in T0 and T1. */
5525 tmp
= neon_load_reg(rm
, pass
);
5526 tmp2
= tcg_temp_new_i32();
5527 tcg_gen_movi_i32(tmp2
, imm
);
5531 GEN_NEON_INTEGER_OP(rshl
);
5533 case 6: /* VQSHLU */
5536 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5540 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5544 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5552 GEN_NEON_INTEGER_OP_ENV(qshl
);
5555 g_assert_not_reached();
5557 tcg_temp_free_i32(tmp2
);
5561 tmp2
= neon_load_reg(rd
, pass
);
5562 gen_neon_add(size
, tmp
, tmp2
);
5563 tcg_temp_free_i32(tmp2
);
5565 neon_store_reg(rd
, pass
, tmp
);
5568 } else if (op
< 10) {
5569 /* Shift by immediate and narrow:
5570 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5571 int input_unsigned
= (op
== 8) ? !u
: u
;
5575 shift
= shift
- (1 << (size
+ 3));
5578 tmp64
= tcg_const_i64(shift
);
5579 neon_load_reg64(cpu_V0
, rm
);
5580 neon_load_reg64(cpu_V1
, rm
+ 1);
5581 for (pass
= 0; pass
< 2; pass
++) {
5589 if (input_unsigned
) {
5590 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5592 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5595 if (input_unsigned
) {
5596 gen_ushl_i64(cpu_V0
, in
, tmp64
);
5598 gen_sshl_i64(cpu_V0
, in
, tmp64
);
5601 tmp
= tcg_temp_new_i32();
5602 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5603 neon_store_reg(rd
, pass
, tmp
);
5605 tcg_temp_free_i64(tmp64
);
5608 imm
= (uint16_t)shift
;
5612 imm
= (uint32_t)shift
;
5614 tmp2
= tcg_const_i32(imm
);
5615 tmp4
= neon_load_reg(rm
+ 1, 0);
5616 tmp5
= neon_load_reg(rm
+ 1, 1);
5617 for (pass
= 0; pass
< 2; pass
++) {
5619 tmp
= neon_load_reg(rm
, 0);
5623 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5626 tmp3
= neon_load_reg(rm
, 1);
5630 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5632 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5633 tcg_temp_free_i32(tmp
);
5634 tcg_temp_free_i32(tmp3
);
5635 tmp
= tcg_temp_new_i32();
5636 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5637 neon_store_reg(rd
, pass
, tmp
);
5639 tcg_temp_free_i32(tmp2
);
5641 } else if (op
== 10) {
5643 if (q
|| (rd
& 1)) {
5646 tmp
= neon_load_reg(rm
, 0);
5647 tmp2
= neon_load_reg(rm
, 1);
5648 for (pass
= 0; pass
< 2; pass
++) {
5652 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5655 /* The shift is less than the width of the source
5656 type, so we can just shift the whole register. */
5657 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5658 /* Widen the result of shift: we need to clear
5659 * the potential overflow bits resulting from
5660 * left bits of the narrow input appearing as
5661 * right bits of left the neighbour narrow
5663 if (size
< 2 || !u
) {
5666 imm
= (0xffu
>> (8 - shift
));
5668 } else if (size
== 1) {
5669 imm
= 0xffff >> (16 - shift
);
5672 imm
= 0xffffffff >> (32 - shift
);
5675 imm64
= imm
| (((uint64_t)imm
) << 32);
5679 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5682 neon_store_reg64(cpu_V0
, rd
+ pass
);
5684 } else if (op
>= 14) {
5685 /* VCVT fixed-point. */
5688 VFPGenFixPointFn
*fn
;
5690 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5696 fn
= gen_helper_vfp_ultos
;
5698 fn
= gen_helper_vfp_sltos
;
5702 fn
= gen_helper_vfp_touls_round_to_zero
;
5704 fn
= gen_helper_vfp_tosls_round_to_zero
;
5708 /* We have already masked out the must-be-1 top bit of imm6,
5709 * hence this 32-shift where the ARM ARM has 64-imm6.
5712 fpst
= get_fpstatus_ptr(1);
5713 shiftv
= tcg_const_i32(shift
);
5714 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5715 TCGv_i32 tmpf
= neon_load_reg(rm
, pass
);
5716 fn(tmpf
, tmpf
, shiftv
, fpst
);
5717 neon_store_reg(rd
, pass
, tmpf
);
5719 tcg_temp_free_ptr(fpst
);
5720 tcg_temp_free_i32(shiftv
);
5724 } else { /* (insn & 0x00380080) == 0 */
5725 int invert
, reg_ofs
, vec_size
;
5727 if (q
&& (rd
& 1)) {
5731 op
= (insn
>> 8) & 0xf;
5732 /* One register and immediate. */
5733 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5734 invert
= (insn
& (1 << 5)) != 0;
5735 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5736 * We choose to not special-case this and will behave as if a
5737 * valid constant encoding of 0 had been given.
5756 imm
= (imm
<< 8) | (imm
<< 24);
5759 imm
= (imm
<< 8) | 0xff;
5762 imm
= (imm
<< 16) | 0xffff;
5765 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5774 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5775 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5782 reg_ofs
= neon_reg_offset(rd
, 0);
5783 vec_size
= q
? 16 : 8;
5785 if (op
& 1 && op
< 12) {
5787 /* The immediate value has already been inverted,
5788 * so BIC becomes AND.
5790 tcg_gen_gvec_andi(MO_32
, reg_ofs
, reg_ofs
, imm
,
5791 vec_size
, vec_size
);
5793 tcg_gen_gvec_ori(MO_32
, reg_ofs
, reg_ofs
, imm
,
5794 vec_size
, vec_size
);
5798 if (op
== 14 && invert
) {
5799 TCGv_i64 t64
= tcg_temp_new_i64();
5801 for (pass
= 0; pass
<= q
; ++pass
) {
5805 for (n
= 0; n
< 8; n
++) {
5806 if (imm
& (1 << (n
+ pass
* 8))) {
5807 val
|= 0xffull
<< (n
* 8);
5810 tcg_gen_movi_i64(t64
, val
);
5811 neon_store_reg64(t64
, rd
+ pass
);
5813 tcg_temp_free_i64(t64
);
5815 tcg_gen_gvec_dup32i(reg_ofs
, vec_size
, vec_size
, imm
);
5819 } else { /* (insn & 0x00800010 == 0x00800000) */
5821 op
= (insn
>> 8) & 0xf;
5822 if ((insn
& (1 << 6)) == 0) {
5823 /* Three registers of different lengths. */
5827 /* undefreq: bit 0 : UNDEF if size == 0
5828 * bit 1 : UNDEF if size == 1
5829 * bit 2 : UNDEF if size == 2
5830 * bit 3 : UNDEF if U == 1
5831 * Note that [2:0] set implies 'always UNDEF'
5834 /* prewiden, src1_wide, src2_wide, undefreq */
5835 static const int neon_3reg_wide
[16][4] = {
5836 {1, 0, 0, 0}, /* VADDL */
5837 {1, 1, 0, 0}, /* VADDW */
5838 {1, 0, 0, 0}, /* VSUBL */
5839 {1, 1, 0, 0}, /* VSUBW */
5840 {0, 1, 1, 0}, /* VADDHN */
5841 {0, 0, 0, 0}, /* VABAL */
5842 {0, 1, 1, 0}, /* VSUBHN */
5843 {0, 0, 0, 0}, /* VABDL */
5844 {0, 0, 0, 0}, /* VMLAL */
5845 {0, 0, 0, 9}, /* VQDMLAL */
5846 {0, 0, 0, 0}, /* VMLSL */
5847 {0, 0, 0, 9}, /* VQDMLSL */
5848 {0, 0, 0, 0}, /* Integer VMULL */
5849 {0, 0, 0, 1}, /* VQDMULL */
5850 {0, 0, 0, 0xa}, /* Polynomial VMULL */
5851 {0, 0, 0, 7}, /* Reserved: always UNDEF */
5854 prewiden
= neon_3reg_wide
[op
][0];
5855 src1_wide
= neon_3reg_wide
[op
][1];
5856 src2_wide
= neon_3reg_wide
[op
][2];
5857 undefreq
= neon_3reg_wide
[op
][3];
5859 if ((undefreq
& (1 << size
)) ||
5860 ((undefreq
& 8) && u
)) {
5863 if ((src1_wide
&& (rn
& 1)) ||
5864 (src2_wide
&& (rm
& 1)) ||
5865 (!src2_wide
&& (rd
& 1))) {
5869 /* Handle polynomial VMULL in a single pass. */
5873 tcg_gen_gvec_3_ool(rd_ofs
, rn_ofs
, rm_ofs
, 16, 16,
5874 0, gen_helper_neon_pmull_h
);
5877 if (!dc_isar_feature(aa32_pmull
, s
)) {
5880 tcg_gen_gvec_3_ool(rd_ofs
, rn_ofs
, rm_ofs
, 16, 16,
5881 0, gen_helper_gvec_pmull_q
);
5886 /* Avoid overlapping operands. Wide source operands are
5887 always aligned so will never overlap with wide
5888 destinations in problematic ways. */
5889 if (rd
== rm
&& !src2_wide
) {
5890 tmp
= neon_load_reg(rm
, 1);
5891 neon_store_scratch(2, tmp
);
5892 } else if (rd
== rn
&& !src1_wide
) {
5893 tmp
= neon_load_reg(rn
, 1);
5894 neon_store_scratch(2, tmp
);
5897 for (pass
= 0; pass
< 2; pass
++) {
5899 neon_load_reg64(cpu_V0
, rn
+ pass
);
5902 if (pass
== 1 && rd
== rn
) {
5903 tmp
= neon_load_scratch(2);
5905 tmp
= neon_load_reg(rn
, pass
);
5908 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5912 neon_load_reg64(cpu_V1
, rm
+ pass
);
5915 if (pass
== 1 && rd
== rm
) {
5916 tmp2
= neon_load_scratch(2);
5918 tmp2
= neon_load_reg(rm
, pass
);
5921 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5925 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5926 gen_neon_addl(size
);
5928 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5929 gen_neon_subl(size
);
5931 case 5: case 7: /* VABAL, VABDL */
5932 switch ((size
<< 1) | u
) {
5934 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5937 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5940 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5943 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5946 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5949 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5953 tcg_temp_free_i32(tmp2
);
5954 tcg_temp_free_i32(tmp
);
5956 case 8: case 9: case 10: case 11: case 12: case 13:
5957 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5958 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5960 default: /* 15 is RESERVED: caught earlier */
5965 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5966 neon_store_reg64(cpu_V0
, rd
+ pass
);
5967 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5969 neon_load_reg64(cpu_V1
, rd
+ pass
);
5971 case 10: /* VMLSL */
5972 gen_neon_negl(cpu_V0
, size
);
5974 case 5: case 8: /* VABAL, VMLAL */
5975 gen_neon_addl(size
);
5977 case 9: case 11: /* VQDMLAL, VQDMLSL */
5978 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5980 gen_neon_negl(cpu_V0
, size
);
5982 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
5987 neon_store_reg64(cpu_V0
, rd
+ pass
);
5988 } else if (op
== 4 || op
== 6) {
5989 /* Narrowing operation. */
5990 tmp
= tcg_temp_new_i32();
5994 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
5997 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
6000 tcg_gen_extrh_i64_i32(tmp
, cpu_V0
);
6007 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
6010 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
6013 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
6014 tcg_gen_extrh_i64_i32(tmp
, cpu_V0
);
6022 neon_store_reg(rd
, 0, tmp3
);
6023 neon_store_reg(rd
, 1, tmp
);
6026 /* Write back the result. */
6027 neon_store_reg64(cpu_V0
, rd
+ pass
);
6031 /* Two registers and a scalar. NB that for ops of this form
6032 * the ARM ARM labels bit 24 as Q, but it is in our variable
6039 case 1: /* Float VMLA scalar */
6040 case 5: /* Floating point VMLS scalar */
6041 case 9: /* Floating point VMUL scalar */
6046 case 0: /* Integer VMLA scalar */
6047 case 4: /* Integer VMLS scalar */
6048 case 8: /* Integer VMUL scalar */
6049 case 12: /* VQDMULH scalar */
6050 case 13: /* VQRDMULH scalar */
6051 if (u
&& ((rd
| rn
) & 1)) {
6054 tmp
= neon_get_scalar(size
, rm
);
6055 neon_store_scratch(0, tmp
);
6056 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
6057 tmp
= neon_load_scratch(0);
6058 tmp2
= neon_load_reg(rn
, pass
);
6061 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6063 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6065 } else if (op
== 13) {
6067 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6069 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6071 } else if (op
& 1) {
6072 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6073 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
6074 tcg_temp_free_ptr(fpstatus
);
6077 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
6078 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
6079 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
6083 tcg_temp_free_i32(tmp2
);
6086 tmp2
= neon_load_reg(rd
, pass
);
6089 gen_neon_add(size
, tmp
, tmp2
);
6093 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6094 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
6095 tcg_temp_free_ptr(fpstatus
);
6099 gen_neon_rsb(size
, tmp
, tmp2
);
6103 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6104 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
6105 tcg_temp_free_ptr(fpstatus
);
6111 tcg_temp_free_i32(tmp2
);
6113 neon_store_reg(rd
, pass
, tmp
);
6116 case 3: /* VQDMLAL scalar */
6117 case 7: /* VQDMLSL scalar */
6118 case 11: /* VQDMULL scalar */
6123 case 2: /* VMLAL sclar */
6124 case 6: /* VMLSL scalar */
6125 case 10: /* VMULL scalar */
6129 tmp2
= neon_get_scalar(size
, rm
);
6130 /* We need a copy of tmp2 because gen_neon_mull
6131 * deletes it during pass 0. */
6132 tmp4
= tcg_temp_new_i32();
6133 tcg_gen_mov_i32(tmp4
, tmp2
);
6134 tmp3
= neon_load_reg(rn
, 1);
6136 for (pass
= 0; pass
< 2; pass
++) {
6138 tmp
= neon_load_reg(rn
, 0);
6143 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6145 neon_load_reg64(cpu_V1
, rd
+ pass
);
6149 gen_neon_negl(cpu_V0
, size
);
6152 gen_neon_addl(size
);
6155 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6157 gen_neon_negl(cpu_V0
, size
);
6159 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6165 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6170 neon_store_reg64(cpu_V0
, rd
+ pass
);
6173 case 14: /* VQRDMLAH scalar */
6174 case 15: /* VQRDMLSH scalar */
6176 NeonGenThreeOpEnvFn
*fn
;
6178 if (!dc_isar_feature(aa32_rdm
, s
)) {
6181 if (u
&& ((rd
| rn
) & 1)) {
6186 fn
= gen_helper_neon_qrdmlah_s16
;
6188 fn
= gen_helper_neon_qrdmlah_s32
;
6192 fn
= gen_helper_neon_qrdmlsh_s16
;
6194 fn
= gen_helper_neon_qrdmlsh_s32
;
6198 tmp2
= neon_get_scalar(size
, rm
);
6199 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
6200 tmp
= neon_load_reg(rn
, pass
);
6201 tmp3
= neon_load_reg(rd
, pass
);
6202 fn(tmp
, cpu_env
, tmp
, tmp2
, tmp3
);
6203 tcg_temp_free_i32(tmp3
);
6204 neon_store_reg(rd
, pass
, tmp
);
6206 tcg_temp_free_i32(tmp2
);
6210 g_assert_not_reached();
6213 } else { /* size == 3 */
6216 imm
= (insn
>> 8) & 0xf;
6221 if (q
&& ((rd
| rn
| rm
) & 1)) {
6226 neon_load_reg64(cpu_V0
, rn
);
6228 neon_load_reg64(cpu_V1
, rn
+ 1);
6230 } else if (imm
== 8) {
6231 neon_load_reg64(cpu_V0
, rn
+ 1);
6233 neon_load_reg64(cpu_V1
, rm
);
6236 tmp64
= tcg_temp_new_i64();
6238 neon_load_reg64(cpu_V0
, rn
);
6239 neon_load_reg64(tmp64
, rn
+ 1);
6241 neon_load_reg64(cpu_V0
, rn
+ 1);
6242 neon_load_reg64(tmp64
, rm
);
6244 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
6245 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
6246 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6248 neon_load_reg64(cpu_V1
, rm
);
6250 neon_load_reg64(cpu_V1
, rm
+ 1);
6253 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6254 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
6255 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
6256 tcg_temp_free_i64(tmp64
);
6259 neon_load_reg64(cpu_V0
, rn
);
6260 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
6261 neon_load_reg64(cpu_V1
, rm
);
6262 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6263 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6265 neon_store_reg64(cpu_V0
, rd
);
6267 neon_store_reg64(cpu_V1
, rd
+ 1);
6269 } else if ((insn
& (1 << 11)) == 0) {
6270 /* Two register misc. */
6271 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
6272 size
= (insn
>> 18) & 3;
6273 /* UNDEF for unknown op values and bad op-size combinations */
6274 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
6277 if (neon_2rm_is_v8_op(op
) &&
6278 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
6281 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
6282 q
&& ((rm
| rd
) & 1)) {
6286 case NEON_2RM_VREV64
:
6287 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
6288 tmp
= neon_load_reg(rm
, pass
* 2);
6289 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
6291 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6292 case 1: gen_swap_half(tmp
); break;
6293 case 2: /* no-op */ break;
6296 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
6298 neon_store_reg(rd
, pass
* 2, tmp2
);
6301 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
6302 case 1: gen_swap_half(tmp2
); break;
6305 neon_store_reg(rd
, pass
* 2, tmp2
);
6309 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
6310 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
6311 for (pass
= 0; pass
< q
+ 1; pass
++) {
6312 tmp
= neon_load_reg(rm
, pass
* 2);
6313 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
6314 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
6315 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
6317 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
6318 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
6319 case 2: tcg_gen_add_i64(CPU_V001
); break;
6322 if (op
>= NEON_2RM_VPADAL
) {
6324 neon_load_reg64(cpu_V1
, rd
+ pass
);
6325 gen_neon_addl(size
);
6327 neon_store_reg64(cpu_V0
, rd
+ pass
);
6333 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
6334 tmp
= neon_load_reg(rm
, n
);
6335 tmp2
= neon_load_reg(rd
, n
+ 1);
6336 neon_store_reg(rm
, n
, tmp2
);
6337 neon_store_reg(rd
, n
+ 1, tmp
);
6344 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
6349 if (gen_neon_zip(rd
, rm
, size
, q
)) {
6353 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
6354 /* also VQMOVUN; op field and mnemonics don't line up */
6359 for (pass
= 0; pass
< 2; pass
++) {
6360 neon_load_reg64(cpu_V0
, rm
+ pass
);
6361 tmp
= tcg_temp_new_i32();
6362 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
6367 neon_store_reg(rd
, 0, tmp2
);
6368 neon_store_reg(rd
, 1, tmp
);
6372 case NEON_2RM_VSHLL
:
6373 if (q
|| (rd
& 1)) {
6376 tmp
= neon_load_reg(rm
, 0);
6377 tmp2
= neon_load_reg(rm
, 1);
6378 for (pass
= 0; pass
< 2; pass
++) {
6381 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
6382 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
6383 neon_store_reg64(cpu_V0
, rd
+ pass
);
6386 case NEON_2RM_VCVT_F16_F32
:
6391 if (!dc_isar_feature(aa32_fp16_spconv
, s
) ||
6395 fpst
= get_fpstatus_ptr(true);
6396 ahp
= get_ahp_flag();
6397 tmp
= neon_load_reg(rm
, 0);
6398 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp
);
6399 tmp2
= neon_load_reg(rm
, 1);
6400 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, tmp2
, fpst
, ahp
);
6401 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6402 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6403 tcg_temp_free_i32(tmp
);
6404 tmp
= neon_load_reg(rm
, 2);
6405 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp
);
6406 tmp3
= neon_load_reg(rm
, 3);
6407 neon_store_reg(rd
, 0, tmp2
);
6408 gen_helper_vfp_fcvt_f32_to_f16(tmp3
, tmp3
, fpst
, ahp
);
6409 tcg_gen_shli_i32(tmp3
, tmp3
, 16);
6410 tcg_gen_or_i32(tmp3
, tmp3
, tmp
);
6411 neon_store_reg(rd
, 1, tmp3
);
6412 tcg_temp_free_i32(tmp
);
6413 tcg_temp_free_i32(ahp
);
6414 tcg_temp_free_ptr(fpst
);
6417 case NEON_2RM_VCVT_F32_F16
:
6421 if (!dc_isar_feature(aa32_fp16_spconv
, s
) ||
6425 fpst
= get_fpstatus_ptr(true);
6426 ahp
= get_ahp_flag();
6427 tmp3
= tcg_temp_new_i32();
6428 tmp
= neon_load_reg(rm
, 0);
6429 tmp2
= neon_load_reg(rm
, 1);
6430 tcg_gen_ext16u_i32(tmp3
, tmp
);
6431 gen_helper_vfp_fcvt_f16_to_f32(tmp3
, tmp3
, fpst
, ahp
);
6432 neon_store_reg(rd
, 0, tmp3
);
6433 tcg_gen_shri_i32(tmp
, tmp
, 16);
6434 gen_helper_vfp_fcvt_f16_to_f32(tmp
, tmp
, fpst
, ahp
);
6435 neon_store_reg(rd
, 1, tmp
);
6436 tmp3
= tcg_temp_new_i32();
6437 tcg_gen_ext16u_i32(tmp3
, tmp2
);
6438 gen_helper_vfp_fcvt_f16_to_f32(tmp3
, tmp3
, fpst
, ahp
);
6439 neon_store_reg(rd
, 2, tmp3
);
6440 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
6441 gen_helper_vfp_fcvt_f16_to_f32(tmp2
, tmp2
, fpst
, ahp
);
6442 neon_store_reg(rd
, 3, tmp2
);
6443 tcg_temp_free_i32(ahp
);
6444 tcg_temp_free_ptr(fpst
);
6447 case NEON_2RM_AESE
: case NEON_2RM_AESMC
:
6448 if (!dc_isar_feature(aa32_aes
, s
) || ((rm
| rd
) & 1)) {
6451 ptr1
= vfp_reg_ptr(true, rd
);
6452 ptr2
= vfp_reg_ptr(true, rm
);
6454 /* Bit 6 is the lowest opcode bit; it distinguishes between
6455 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6457 tmp3
= tcg_const_i32(extract32(insn
, 6, 1));
6459 if (op
== NEON_2RM_AESE
) {
6460 gen_helper_crypto_aese(ptr1
, ptr2
, tmp3
);
6462 gen_helper_crypto_aesmc(ptr1
, ptr2
, tmp3
);
6464 tcg_temp_free_ptr(ptr1
);
6465 tcg_temp_free_ptr(ptr2
);
6466 tcg_temp_free_i32(tmp3
);
6468 case NEON_2RM_SHA1H
:
6469 if (!dc_isar_feature(aa32_sha1
, s
) || ((rm
| rd
) & 1)) {
6472 ptr1
= vfp_reg_ptr(true, rd
);
6473 ptr2
= vfp_reg_ptr(true, rm
);
6475 gen_helper_crypto_sha1h(ptr1
, ptr2
);
6477 tcg_temp_free_ptr(ptr1
);
6478 tcg_temp_free_ptr(ptr2
);
6480 case NEON_2RM_SHA1SU1
:
6481 if ((rm
| rd
) & 1) {
6484 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6486 if (!dc_isar_feature(aa32_sha2
, s
)) {
6489 } else if (!dc_isar_feature(aa32_sha1
, s
)) {
6492 ptr1
= vfp_reg_ptr(true, rd
);
6493 ptr2
= vfp_reg_ptr(true, rm
);
6495 gen_helper_crypto_sha256su0(ptr1
, ptr2
);
6497 gen_helper_crypto_sha1su1(ptr1
, ptr2
);
6499 tcg_temp_free_ptr(ptr1
);
6500 tcg_temp_free_ptr(ptr2
);
6504 tcg_gen_gvec_not(0, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
6507 tcg_gen_gvec_neg(size
, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
6510 tcg_gen_gvec_abs(size
, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
6515 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6516 tmp
= neon_load_reg(rm
, pass
);
6518 case NEON_2RM_VREV32
:
6520 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6521 case 1: gen_swap_half(tmp
); break;
6525 case NEON_2RM_VREV16
:
6526 gen_rev16(tmp
, tmp
);
6530 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
6531 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
6532 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
6538 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
6539 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
6540 case 2: tcg_gen_clzi_i32(tmp
, tmp
, 32); break;
6545 gen_helper_neon_cnt_u8(tmp
, tmp
);
6547 case NEON_2RM_VQABS
:
6550 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
6553 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
6556 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
6561 case NEON_2RM_VQNEG
:
6564 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
6567 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
6570 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
6575 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
6576 tmp2
= tcg_const_i32(0);
6578 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
6579 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
6580 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
6583 tcg_temp_free_i32(tmp2
);
6584 if (op
== NEON_2RM_VCLE0
) {
6585 tcg_gen_not_i32(tmp
, tmp
);
6588 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
6589 tmp2
= tcg_const_i32(0);
6591 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
6592 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
6593 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
6596 tcg_temp_free_i32(tmp2
);
6597 if (op
== NEON_2RM_VCLT0
) {
6598 tcg_gen_not_i32(tmp
, tmp
);
6601 case NEON_2RM_VCEQ0
:
6602 tmp2
= tcg_const_i32(0);
6604 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
6605 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
6606 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
6609 tcg_temp_free_i32(tmp2
);
6611 case NEON_2RM_VCGT0_F
:
6613 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6614 tmp2
= tcg_const_i32(0);
6615 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6616 tcg_temp_free_i32(tmp2
);
6617 tcg_temp_free_ptr(fpstatus
);
6620 case NEON_2RM_VCGE0_F
:
6622 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6623 tmp2
= tcg_const_i32(0);
6624 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6625 tcg_temp_free_i32(tmp2
);
6626 tcg_temp_free_ptr(fpstatus
);
6629 case NEON_2RM_VCEQ0_F
:
6631 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6632 tmp2
= tcg_const_i32(0);
6633 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6634 tcg_temp_free_i32(tmp2
);
6635 tcg_temp_free_ptr(fpstatus
);
6638 case NEON_2RM_VCLE0_F
:
6640 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6641 tmp2
= tcg_const_i32(0);
6642 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6643 tcg_temp_free_i32(tmp2
);
6644 tcg_temp_free_ptr(fpstatus
);
6647 case NEON_2RM_VCLT0_F
:
6649 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6650 tmp2
= tcg_const_i32(0);
6651 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6652 tcg_temp_free_i32(tmp2
);
6653 tcg_temp_free_ptr(fpstatus
);
6656 case NEON_2RM_VABS_F
:
6657 gen_helper_vfp_abss(tmp
, tmp
);
6659 case NEON_2RM_VNEG_F
:
6660 gen_helper_vfp_negs(tmp
, tmp
);
6663 tmp2
= neon_load_reg(rd
, pass
);
6664 neon_store_reg(rm
, pass
, tmp2
);
6667 tmp2
= neon_load_reg(rd
, pass
);
6669 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6670 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6673 neon_store_reg(rm
, pass
, tmp2
);
6675 case NEON_2RM_VRINTN
:
6676 case NEON_2RM_VRINTA
:
6677 case NEON_2RM_VRINTM
:
6678 case NEON_2RM_VRINTP
:
6679 case NEON_2RM_VRINTZ
:
6682 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6685 if (op
== NEON_2RM_VRINTZ
) {
6686 rmode
= FPROUNDING_ZERO
;
6688 rmode
= fp_decode_rm
[((op
& 0x6) >> 1) ^ 1];
6691 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6692 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6694 gen_helper_rints(tmp
, tmp
, fpstatus
);
6695 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6697 tcg_temp_free_ptr(fpstatus
);
6698 tcg_temp_free_i32(tcg_rmode
);
6701 case NEON_2RM_VRINTX
:
6703 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6704 gen_helper_rints_exact(tmp
, tmp
, fpstatus
);
6705 tcg_temp_free_ptr(fpstatus
);
6708 case NEON_2RM_VCVTAU
:
6709 case NEON_2RM_VCVTAS
:
6710 case NEON_2RM_VCVTNU
:
6711 case NEON_2RM_VCVTNS
:
6712 case NEON_2RM_VCVTPU
:
6713 case NEON_2RM_VCVTPS
:
6714 case NEON_2RM_VCVTMU
:
6715 case NEON_2RM_VCVTMS
:
6717 bool is_signed
= !extract32(insn
, 7, 1);
6718 TCGv_ptr fpst
= get_fpstatus_ptr(1);
6719 TCGv_i32 tcg_rmode
, tcg_shift
;
6720 int rmode
= fp_decode_rm
[extract32(insn
, 8, 2)];
6722 tcg_shift
= tcg_const_i32(0);
6723 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6724 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6728 gen_helper_vfp_tosls(tmp
, tmp
,
6731 gen_helper_vfp_touls(tmp
, tmp
,
6735 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6737 tcg_temp_free_i32(tcg_rmode
);
6738 tcg_temp_free_i32(tcg_shift
);
6739 tcg_temp_free_ptr(fpst
);
6742 case NEON_2RM_VRECPE
:
6744 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6745 gen_helper_recpe_u32(tmp
, tmp
, fpstatus
);
6746 tcg_temp_free_ptr(fpstatus
);
6749 case NEON_2RM_VRSQRTE
:
6751 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6752 gen_helper_rsqrte_u32(tmp
, tmp
, fpstatus
);
6753 tcg_temp_free_ptr(fpstatus
);
6756 case NEON_2RM_VRECPE_F
:
6758 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6759 gen_helper_recpe_f32(tmp
, tmp
, fpstatus
);
6760 tcg_temp_free_ptr(fpstatus
);
6763 case NEON_2RM_VRSQRTE_F
:
6765 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6766 gen_helper_rsqrte_f32(tmp
, tmp
, fpstatus
);
6767 tcg_temp_free_ptr(fpstatus
);
6770 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6772 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6773 gen_helper_vfp_sitos(tmp
, tmp
, fpstatus
);
6774 tcg_temp_free_ptr(fpstatus
);
6777 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6779 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6780 gen_helper_vfp_uitos(tmp
, tmp
, fpstatus
);
6781 tcg_temp_free_ptr(fpstatus
);
6784 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6786 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6787 gen_helper_vfp_tosizs(tmp
, tmp
, fpstatus
);
6788 tcg_temp_free_ptr(fpstatus
);
6791 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6793 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6794 gen_helper_vfp_touizs(tmp
, tmp
, fpstatus
);
6795 tcg_temp_free_ptr(fpstatus
);
6799 /* Reserved op values were caught by the
6800 * neon_2rm_sizes[] check earlier.
6804 neon_store_reg(rd
, pass
, tmp
);
6808 } else if ((insn
& (1 << 10)) == 0) {
6810 int n
= ((insn
>> 8) & 3) + 1;
6811 if ((rn
+ n
) > 32) {
6812 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6813 * helper function running off the end of the register file.
6818 if (insn
& (1 << 6)) {
6819 tmp
= neon_load_reg(rd
, 0);
6821 tmp
= tcg_temp_new_i32();
6822 tcg_gen_movi_i32(tmp
, 0);
6824 tmp2
= neon_load_reg(rm
, 0);
6825 ptr1
= vfp_reg_ptr(true, rn
);
6826 tmp5
= tcg_const_i32(n
);
6827 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, ptr1
, tmp5
);
6828 tcg_temp_free_i32(tmp
);
6829 if (insn
& (1 << 6)) {
6830 tmp
= neon_load_reg(rd
, 1);
6832 tmp
= tcg_temp_new_i32();
6833 tcg_gen_movi_i32(tmp
, 0);
6835 tmp3
= neon_load_reg(rm
, 1);
6836 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, ptr1
, tmp5
);
6837 tcg_temp_free_i32(tmp5
);
6838 tcg_temp_free_ptr(ptr1
);
6839 neon_store_reg(rd
, 0, tmp2
);
6840 neon_store_reg(rd
, 1, tmp3
);
6841 tcg_temp_free_i32(tmp
);
6842 } else if ((insn
& 0x380) == 0) {
6847 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6850 if (insn
& (1 << 16)) {
6852 element
= (insn
>> 17) & 7;
6853 } else if (insn
& (1 << 17)) {
6855 element
= (insn
>> 18) & 3;
6858 element
= (insn
>> 19) & 1;
6860 tcg_gen_gvec_dup_mem(size
, neon_reg_offset(rd
, 0),
6861 neon_element_offset(rm
, element
, size
),
6862 q
? 16 : 8, q
? 16 : 8);
6871 /* Advanced SIMD three registers of the same length extension.
6872 * 31 25 23 22 20 16 12 11 10 9 8 3 0
6873 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6874 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6875 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6877 static int disas_neon_insn_3same_ext(DisasContext
*s
, uint32_t insn
)
6879 gen_helper_gvec_3
*fn_gvec
= NULL
;
6880 gen_helper_gvec_3_ptr
*fn_gvec_ptr
= NULL
;
6881 int rd
, rn
, rm
, opr_sz
;
6884 bool is_long
= false, q
= extract32(insn
, 6, 1);
6885 bool ptr_is_env
= false;
6887 if ((insn
& 0xfe200f10) == 0xfc200800) {
6888 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
6889 int size
= extract32(insn
, 20, 1);
6890 data
= extract32(insn
, 23, 2); /* rot */
6891 if (!dc_isar_feature(aa32_vcma
, s
)
6892 || (!size
&& !dc_isar_feature(aa32_fp16_arith
, s
))) {
6895 fn_gvec_ptr
= size
? gen_helper_gvec_fcmlas
: gen_helper_gvec_fcmlah
;
6896 } else if ((insn
& 0xfea00f10) == 0xfc800800) {
6897 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
6898 int size
= extract32(insn
, 20, 1);
6899 data
= extract32(insn
, 24, 1); /* rot */
6900 if (!dc_isar_feature(aa32_vcma
, s
)
6901 || (!size
&& !dc_isar_feature(aa32_fp16_arith
, s
))) {
6904 fn_gvec_ptr
= size
? gen_helper_gvec_fcadds
: gen_helper_gvec_fcaddh
;
6905 } else if ((insn
& 0xfeb00f00) == 0xfc200d00) {
6906 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
6907 bool u
= extract32(insn
, 4, 1);
6908 if (!dc_isar_feature(aa32_dp
, s
)) {
6911 fn_gvec
= u
? gen_helper_gvec_udot_b
: gen_helper_gvec_sdot_b
;
6912 } else if ((insn
& 0xff300f10) == 0xfc200810) {
6913 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
6914 int is_s
= extract32(insn
, 23, 1);
6915 if (!dc_isar_feature(aa32_fhm
, s
)) {
6919 data
= is_s
; /* is_2 == 0 */
6920 fn_gvec_ptr
= gen_helper_gvec_fmlal_a32
;
6926 VFP_DREG_D(rd
, insn
);
6930 if (q
|| !is_long
) {
6931 VFP_DREG_N(rn
, insn
);
6932 VFP_DREG_M(rm
, insn
);
6933 if ((rn
| rm
) & q
& !is_long
) {
6936 off_rn
= vfp_reg_offset(1, rn
);
6937 off_rm
= vfp_reg_offset(1, rm
);
6939 rn
= VFP_SREG_N(insn
);
6940 rm
= VFP_SREG_M(insn
);
6941 off_rn
= vfp_reg_offset(0, rn
);
6942 off_rm
= vfp_reg_offset(0, rm
);
6945 if (s
->fp_excp_el
) {
6946 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
6947 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
6950 if (!s
->vfp_enabled
) {
6954 opr_sz
= (1 + q
) * 8;
6960 ptr
= get_fpstatus_ptr(1);
6962 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd
), off_rn
, off_rm
, ptr
,
6963 opr_sz
, opr_sz
, data
, fn_gvec_ptr
);
6965 tcg_temp_free_ptr(ptr
);
6968 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd
), off_rn
, off_rm
,
6969 opr_sz
, opr_sz
, data
, fn_gvec
);
6974 /* Advanced SIMD two registers and a scalar extension.
6975 * 31 24 23 22 20 16 12 11 10 9 8 3 0
6976 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
6977 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6978 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
6982 static int disas_neon_insn_2reg_scalar_ext(DisasContext
*s
, uint32_t insn
)
6984 gen_helper_gvec_3
*fn_gvec
= NULL
;
6985 gen_helper_gvec_3_ptr
*fn_gvec_ptr
= NULL
;
6986 int rd
, rn
, rm
, opr_sz
, data
;
6988 bool is_long
= false, q
= extract32(insn
, 6, 1);
6989 bool ptr_is_env
= false;
6991 if ((insn
& 0xff000f10) == 0xfe000800) {
6992 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
6993 int rot
= extract32(insn
, 20, 2);
6994 int size
= extract32(insn
, 23, 1);
6997 if (!dc_isar_feature(aa32_vcma
, s
)) {
7001 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
7004 /* For fp16, rm is just Vm, and index is M. */
7005 rm
= extract32(insn
, 0, 4);
7006 index
= extract32(insn
, 5, 1);
7008 /* For fp32, rm is the usual M:Vm, and index is 0. */
7009 VFP_DREG_M(rm
, insn
);
7012 data
= (index
<< 2) | rot
;
7013 fn_gvec_ptr
= (size
? gen_helper_gvec_fcmlas_idx
7014 : gen_helper_gvec_fcmlah_idx
);
7015 } else if ((insn
& 0xffb00f00) == 0xfe200d00) {
7016 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7017 int u
= extract32(insn
, 4, 1);
7019 if (!dc_isar_feature(aa32_dp
, s
)) {
7022 fn_gvec
= u
? gen_helper_gvec_udot_idx_b
: gen_helper_gvec_sdot_idx_b
;
7023 /* rm is just Vm, and index is M. */
7024 data
= extract32(insn
, 5, 1); /* index */
7025 rm
= extract32(insn
, 0, 4);
7026 } else if ((insn
& 0xffa00f10) == 0xfe000810) {
7027 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
7028 int is_s
= extract32(insn
, 20, 1);
7029 int vm20
= extract32(insn
, 0, 3);
7030 int vm3
= extract32(insn
, 3, 1);
7031 int m
= extract32(insn
, 5, 1);
7034 if (!dc_isar_feature(aa32_fhm
, s
)) {
7039 index
= m
* 2 + vm3
;
7045 data
= (index
<< 2) | is_s
; /* is_2 == 0 */
7046 fn_gvec_ptr
= gen_helper_gvec_fmlal_idx_a32
;
7052 VFP_DREG_D(rd
, insn
);
7056 if (q
|| !is_long
) {
7057 VFP_DREG_N(rn
, insn
);
7058 if (rn
& q
& !is_long
) {
7061 off_rn
= vfp_reg_offset(1, rn
);
7062 off_rm
= vfp_reg_offset(1, rm
);
7064 rn
= VFP_SREG_N(insn
);
7065 off_rn
= vfp_reg_offset(0, rn
);
7066 off_rm
= vfp_reg_offset(0, rm
);
7068 if (s
->fp_excp_el
) {
7069 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
7070 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
7073 if (!s
->vfp_enabled
) {
7077 opr_sz
= (1 + q
) * 8;
7083 ptr
= get_fpstatus_ptr(1);
7085 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd
), off_rn
, off_rm
, ptr
,
7086 opr_sz
, opr_sz
, data
, fn_gvec_ptr
);
7088 tcg_temp_free_ptr(ptr
);
7091 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd
), off_rn
, off_rm
,
7092 opr_sz
, opr_sz
, data
, fn_gvec
);
7097 static int disas_coproc_insn(DisasContext
*s
, uint32_t insn
)
7099 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
7100 const ARMCPRegInfo
*ri
;
7102 cpnum
= (insn
>> 8) & 0xf;
7104 /* First check for coprocessor space used for XScale/iwMMXt insns */
7105 if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && (cpnum
< 2)) {
7106 if (extract32(s
->c15_cpar
, cpnum
, 1) == 0) {
7109 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
7110 return disas_iwmmxt_insn(s
, insn
);
7111 } else if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
)) {
7112 return disas_dsp_insn(s
, insn
);
7117 /* Otherwise treat as a generic register access */
7118 is64
= (insn
& (1 << 25)) == 0;
7119 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
7127 opc1
= (insn
>> 4) & 0xf;
7129 rt2
= (insn
>> 16) & 0xf;
7131 crn
= (insn
>> 16) & 0xf;
7132 opc1
= (insn
>> 21) & 7;
7133 opc2
= (insn
>> 5) & 7;
7136 isread
= (insn
>> 20) & 1;
7137 rt
= (insn
>> 12) & 0xf;
7139 ri
= get_arm_cp_reginfo(s
->cp_regs
,
7140 ENCODE_CP_REG(cpnum
, is64
, s
->ns
, crn
, crm
, opc1
, opc2
));
7144 /* Check access permissions */
7145 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
7149 if (s
->hstr_active
|| ri
->accessfn
||
7150 (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && cpnum
< 14)) {
7151 /* Emit code to perform further access permissions checks at
7152 * runtime; this may result in an exception.
7153 * Note that on XScale all cp0..c13 registers do an access check
7154 * call in order to handle c15_cpar.
7157 TCGv_i32 tcg_syn
, tcg_isread
;
7160 /* Note that since we are an implementation which takes an
7161 * exception on a trapped conditional instruction only if the
7162 * instruction passes its condition code check, we can take
7163 * advantage of the clause in the ARM ARM that allows us to set
7164 * the COND field in the instruction to 0xE in all cases.
7165 * We could fish the actual condition out of the insn (ARM)
7166 * or the condexec bits (Thumb) but it isn't necessary.
7171 syndrome
= syn_cp14_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7174 syndrome
= syn_cp14_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7180 syndrome
= syn_cp15_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7183 syndrome
= syn_cp15_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7188 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7189 * so this can only happen if this is an ARMv7 or earlier CPU,
7190 * in which case the syndrome information won't actually be
7193 assert(!arm_dc_feature(s
, ARM_FEATURE_V8
));
7194 syndrome
= syn_uncategorized();
7198 gen_set_condexec(s
);
7199 gen_set_pc_im(s
, s
->pc_curr
);
7200 tmpptr
= tcg_const_ptr(ri
);
7201 tcg_syn
= tcg_const_i32(syndrome
);
7202 tcg_isread
= tcg_const_i32(isread
);
7203 gen_helper_access_check_cp_reg(cpu_env
, tmpptr
, tcg_syn
,
7205 tcg_temp_free_ptr(tmpptr
);
7206 tcg_temp_free_i32(tcg_syn
);
7207 tcg_temp_free_i32(tcg_isread
);
7208 } else if (ri
->type
& ARM_CP_RAISES_EXC
) {
7210 * The readfn or writefn might raise an exception;
7211 * synchronize the CPU state in case it does.
7213 gen_set_condexec(s
);
7214 gen_set_pc_im(s
, s
->pc_curr
);
7217 /* Handle special cases first */
7218 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
7225 gen_set_pc_im(s
, s
->base
.pc_next
);
7226 s
->base
.is_jmp
= DISAS_WFI
;
7232 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
7241 if (ri
->type
& ARM_CP_CONST
) {
7242 tmp64
= tcg_const_i64(ri
->resetvalue
);
7243 } else if (ri
->readfn
) {
7245 tmp64
= tcg_temp_new_i64();
7246 tmpptr
= tcg_const_ptr(ri
);
7247 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
7248 tcg_temp_free_ptr(tmpptr
);
7250 tmp64
= tcg_temp_new_i64();
7251 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7253 tmp
= tcg_temp_new_i32();
7254 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
7255 store_reg(s
, rt
, tmp
);
7256 tmp
= tcg_temp_new_i32();
7257 tcg_gen_extrh_i64_i32(tmp
, tmp64
);
7258 tcg_temp_free_i64(tmp64
);
7259 store_reg(s
, rt2
, tmp
);
7262 if (ri
->type
& ARM_CP_CONST
) {
7263 tmp
= tcg_const_i32(ri
->resetvalue
);
7264 } else if (ri
->readfn
) {
7266 tmp
= tcg_temp_new_i32();
7267 tmpptr
= tcg_const_ptr(ri
);
7268 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
7269 tcg_temp_free_ptr(tmpptr
);
7271 tmp
= load_cpu_offset(ri
->fieldoffset
);
7274 /* Destination register of r15 for 32 bit loads sets
7275 * the condition codes from the high 4 bits of the value
7278 tcg_temp_free_i32(tmp
);
7280 store_reg(s
, rt
, tmp
);
7285 if (ri
->type
& ARM_CP_CONST
) {
7286 /* If not forbidden by access permissions, treat as WI */
7291 TCGv_i32 tmplo
, tmphi
;
7292 TCGv_i64 tmp64
= tcg_temp_new_i64();
7293 tmplo
= load_reg(s
, rt
);
7294 tmphi
= load_reg(s
, rt2
);
7295 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
7296 tcg_temp_free_i32(tmplo
);
7297 tcg_temp_free_i32(tmphi
);
7299 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
7300 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
7301 tcg_temp_free_ptr(tmpptr
);
7303 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7305 tcg_temp_free_i64(tmp64
);
7310 tmp
= load_reg(s
, rt
);
7311 tmpptr
= tcg_const_ptr(ri
);
7312 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
7313 tcg_temp_free_ptr(tmpptr
);
7314 tcg_temp_free_i32(tmp
);
7316 TCGv_i32 tmp
= load_reg(s
, rt
);
7317 store_cpu_offset(tmp
, ri
->fieldoffset
);
7322 /* I/O operations must end the TB here (whether read or write) */
7323 need_exit_tb
= ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) &&
7324 (ri
->type
& ARM_CP_IO
));
7326 if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
7328 * A write to any coprocessor regiser that ends a TB
7329 * must rebuild the hflags for the next TB.
7331 TCGv_i32 tcg_el
= tcg_const_i32(s
->current_el
);
7332 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
7333 gen_helper_rebuild_hflags_m32(cpu_env
, tcg_el
);
7335 if (ri
->type
& ARM_CP_NEWEL
) {
7336 gen_helper_rebuild_hflags_a32_newel(cpu_env
);
7338 gen_helper_rebuild_hflags_a32(cpu_env
, tcg_el
);
7341 tcg_temp_free_i32(tcg_el
);
7343 * We default to ending the TB on a coprocessor register write,
7344 * but allow this to be suppressed by the register definition
7345 * (usually only necessary to work around guest bugs).
7347 need_exit_tb
= true;
7356 /* Unknown register; this might be a guest error or a QEMU
7357 * unimplemented feature.
7360 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7361 "64 bit system register cp:%d opc1: %d crm:%d "
7363 isread
? "read" : "write", cpnum
, opc1
, crm
,
7364 s
->ns
? "non-secure" : "secure");
7366 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7367 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7369 isread
? "read" : "write", cpnum
, opc1
, crn
, crm
, opc2
,
7370 s
->ns
? "non-secure" : "secure");
7377 /* Store a 64-bit value to a register pair. Clobbers val. */
7378 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
7381 tmp
= tcg_temp_new_i32();
7382 tcg_gen_extrl_i64_i32(tmp
, val
);
7383 store_reg(s
, rlow
, tmp
);
7384 tmp
= tcg_temp_new_i32();
7385 tcg_gen_extrh_i64_i32(tmp
, val
);
7386 store_reg(s
, rhigh
, tmp
);
7389 /* load and add a 64-bit value from a register pair. */
7390 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
7396 /* Load 64-bit value rd:rn. */
7397 tmpl
= load_reg(s
, rlow
);
7398 tmph
= load_reg(s
, rhigh
);
7399 tmp
= tcg_temp_new_i64();
7400 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
7401 tcg_temp_free_i32(tmpl
);
7402 tcg_temp_free_i32(tmph
);
7403 tcg_gen_add_i64(val
, val
, tmp
);
7404 tcg_temp_free_i64(tmp
);
7407 /* Set N and Z flags from hi|lo. */
7408 static void gen_logicq_cc(TCGv_i32 lo
, TCGv_i32 hi
)
7410 tcg_gen_mov_i32(cpu_NF
, hi
);
7411 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
7414 /* Load/Store exclusive instructions are implemented by remembering
7415 the value/address loaded, and seeing if these are the same
7416 when the store is performed. This should be sufficient to implement
7417 the architecturally mandated semantics, and avoids having to monitor
7418 regular stores. The compare vs the remembered value is done during
7419 the cmpxchg operation, but we must compare the addresses manually. */
7420 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
7421 TCGv_i32 addr
, int size
)
7423 TCGv_i32 tmp
= tcg_temp_new_i32();
7424 MemOp opc
= size
| MO_ALIGN
| s
->be_data
;
7429 TCGv_i32 tmp2
= tcg_temp_new_i32();
7430 TCGv_i64 t64
= tcg_temp_new_i64();
7432 /* For AArch32, architecturally the 32-bit word at the lowest
7433 * address is always Rt and the one at addr+4 is Rt2, even if
7434 * the CPU is big-endian. That means we don't want to do a
7435 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7436 * for an architecturally 64-bit access, but instead do a
7437 * 64-bit access using MO_BE if appropriate and then split
7439 * This only makes a difference for BE32 user-mode, where
7440 * frob64() must not flip the two halves of the 64-bit data
7441 * but this code must treat BE32 user-mode like BE32 system.
7443 TCGv taddr
= gen_aa32_addr(s
, addr
, opc
);
7445 tcg_gen_qemu_ld_i64(t64
, taddr
, get_mem_index(s
), opc
);
7446 tcg_temp_free(taddr
);
7447 tcg_gen_mov_i64(cpu_exclusive_val
, t64
);
7448 if (s
->be_data
== MO_BE
) {
7449 tcg_gen_extr_i64_i32(tmp2
, tmp
, t64
);
7451 tcg_gen_extr_i64_i32(tmp
, tmp2
, t64
);
7453 tcg_temp_free_i64(t64
);
7455 store_reg(s
, rt2
, tmp2
);
7457 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), opc
);
7458 tcg_gen_extu_i32_i64(cpu_exclusive_val
, tmp
);
7461 store_reg(s
, rt
, tmp
);
7462 tcg_gen_extu_i32_i64(cpu_exclusive_addr
, addr
);
7465 static void gen_clrex(DisasContext
*s
)
7467 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7470 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7471 TCGv_i32 addr
, int size
)
7473 TCGv_i32 t0
, t1
, t2
;
7476 TCGLabel
*done_label
;
7477 TCGLabel
*fail_label
;
7478 MemOp opc
= size
| MO_ALIGN
| s
->be_data
;
7480 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7486 fail_label
= gen_new_label();
7487 done_label
= gen_new_label();
7488 extaddr
= tcg_temp_new_i64();
7489 tcg_gen_extu_i32_i64(extaddr
, addr
);
7490 tcg_gen_brcond_i64(TCG_COND_NE
, extaddr
, cpu_exclusive_addr
, fail_label
);
7491 tcg_temp_free_i64(extaddr
);
7493 taddr
= gen_aa32_addr(s
, addr
, opc
);
7494 t0
= tcg_temp_new_i32();
7495 t1
= load_reg(s
, rt
);
7497 TCGv_i64 o64
= tcg_temp_new_i64();
7498 TCGv_i64 n64
= tcg_temp_new_i64();
7500 t2
= load_reg(s
, rt2
);
7501 /* For AArch32, architecturally the 32-bit word at the lowest
7502 * address is always Rt and the one at addr+4 is Rt2, even if
7503 * the CPU is big-endian. Since we're going to treat this as a
7504 * single 64-bit BE store, we need to put the two halves in the
7505 * opposite order for BE to LE, so that they end up in the right
7507 * We don't want gen_aa32_frob64() because that does the wrong
7508 * thing for BE32 usermode.
7510 if (s
->be_data
== MO_BE
) {
7511 tcg_gen_concat_i32_i64(n64
, t2
, t1
);
7513 tcg_gen_concat_i32_i64(n64
, t1
, t2
);
7515 tcg_temp_free_i32(t2
);
7517 tcg_gen_atomic_cmpxchg_i64(o64
, taddr
, cpu_exclusive_val
, n64
,
7518 get_mem_index(s
), opc
);
7519 tcg_temp_free_i64(n64
);
7521 tcg_gen_setcond_i64(TCG_COND_NE
, o64
, o64
, cpu_exclusive_val
);
7522 tcg_gen_extrl_i64_i32(t0
, o64
);
7524 tcg_temp_free_i64(o64
);
7526 t2
= tcg_temp_new_i32();
7527 tcg_gen_extrl_i64_i32(t2
, cpu_exclusive_val
);
7528 tcg_gen_atomic_cmpxchg_i32(t0
, taddr
, t2
, t1
, get_mem_index(s
), opc
);
7529 tcg_gen_setcond_i32(TCG_COND_NE
, t0
, t0
, t2
);
7530 tcg_temp_free_i32(t2
);
7532 tcg_temp_free_i32(t1
);
7533 tcg_temp_free(taddr
);
7534 tcg_gen_mov_i32(cpu_R
[rd
], t0
);
7535 tcg_temp_free_i32(t0
);
7536 tcg_gen_br(done_label
);
7538 gen_set_label(fail_label
);
7539 tcg_gen_movi_i32(cpu_R
[rd
], 1);
7540 gen_set_label(done_label
);
7541 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7547 * @mode: mode field from insn (which stack to store to)
7548 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7549 * @writeback: true if writeback bit set
7551 * Generate code for the SRS (Store Return State) insn.
7553 static void gen_srs(DisasContext
*s
,
7554 uint32_t mode
, uint32_t amode
, bool writeback
)
7561 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7562 * and specified mode is monitor mode
7563 * - UNDEFINED in Hyp mode
7564 * - UNPREDICTABLE in User or System mode
7565 * - UNPREDICTABLE if the specified mode is:
7566 * -- not implemented
7567 * -- not a valid mode number
7568 * -- a mode that's at a higher exception level
7569 * -- Monitor, if we are Non-secure
7570 * For the UNPREDICTABLE cases we choose to UNDEF.
7572 if (s
->current_el
== 1 && !s
->ns
&& mode
== ARM_CPU_MODE_MON
) {
7573 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
, syn_uncategorized(), 3);
7577 if (s
->current_el
== 0 || s
->current_el
== 2) {
7582 case ARM_CPU_MODE_USR
:
7583 case ARM_CPU_MODE_FIQ
:
7584 case ARM_CPU_MODE_IRQ
:
7585 case ARM_CPU_MODE_SVC
:
7586 case ARM_CPU_MODE_ABT
:
7587 case ARM_CPU_MODE_UND
:
7588 case ARM_CPU_MODE_SYS
:
7590 case ARM_CPU_MODE_HYP
:
7591 if (s
->current_el
== 1 || !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
7595 case ARM_CPU_MODE_MON
:
7596 /* No need to check specifically for "are we non-secure" because
7597 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7598 * so if this isn't EL3 then we must be non-secure.
7600 if (s
->current_el
!= 3) {
7609 unallocated_encoding(s
);
7613 addr
= tcg_temp_new_i32();
7614 tmp
= tcg_const_i32(mode
);
7615 /* get_r13_banked() will raise an exception if called from System mode */
7616 gen_set_condexec(s
);
7617 gen_set_pc_im(s
, s
->pc_curr
);
7618 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7619 tcg_temp_free_i32(tmp
);
7636 tcg_gen_addi_i32(addr
, addr
, offset
);
7637 tmp
= load_reg(s
, 14);
7638 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
7639 tcg_temp_free_i32(tmp
);
7640 tmp
= load_cpu_field(spsr
);
7641 tcg_gen_addi_i32(addr
, addr
, 4);
7642 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
7643 tcg_temp_free_i32(tmp
);
7661 tcg_gen_addi_i32(addr
, addr
, offset
);
7662 tmp
= tcg_const_i32(mode
);
7663 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7664 tcg_temp_free_i32(tmp
);
7666 tcg_temp_free_i32(addr
);
7667 s
->base
.is_jmp
= DISAS_UPDATE
;
7670 /* Generate a label used for skipping this instruction */
7671 static void arm_gen_condlabel(DisasContext
*s
)
7674 s
->condlabel
= gen_new_label();
7679 /* Skip this instruction if the ARM condition is false */
7680 static void arm_skip_unless(DisasContext
*s
, uint32_t cond
)
7682 arm_gen_condlabel(s
);
7683 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
7688 * Constant expanders for the decoders.
7691 static int negate(DisasContext
*s
, int x
)
7696 static int plus_2(DisasContext
*s
, int x
)
7701 static int times_2(DisasContext
*s
, int x
)
7706 static int times_4(DisasContext
*s
, int x
)
7711 /* Return only the rotation part of T32ExpandImm. */
7712 static int t32_expandimm_rot(DisasContext
*s
, int x
)
7714 return x
& 0xc00 ? extract32(x
, 7, 5) : 0;
7717 /* Return the unrotated immediate from T32ExpandImm. */
7718 static int t32_expandimm_imm(DisasContext
*s
, int x
)
7720 int imm
= extract32(x
, 0, 8);
7722 switch (extract32(x
, 8, 4)) {
7724 /* Nothing to do. */
7726 case 1: /* 00XY00XY */
7729 case 2: /* XY00XY00 */
7732 case 3: /* XYXYXYXY */
7736 /* Rotated constant. */
7743 static int t32_branch24(DisasContext
*s
, int x
)
7745 /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */
7746 x
^= !(x
< 0) * (3 << 21);
7747 /* Append the final zero. */
7751 static int t16_setflags(DisasContext
*s
)
7753 return s
->condexec_mask
== 0;
7756 static int t16_push_list(DisasContext
*s
, int x
)
7758 return (x
& 0xff) | (x
& 0x100) << (14 - 8);
7761 static int t16_pop_list(DisasContext
*s
, int x
)
7763 return (x
& 0xff) | (x
& 0x100) << (15 - 8);
7767 * Include the generated decoders.
7770 #include "decode-a32.inc.c"
7771 #include "decode-a32-uncond.inc.c"
7772 #include "decode-t32.inc.c"
7773 #include "decode-t16.inc.c"
7775 /* Helpers to swap operands for reverse-subtract. */
7776 static void gen_rsb(TCGv_i32 dst
, TCGv_i32 a
, TCGv_i32 b
)
7778 tcg_gen_sub_i32(dst
, b
, a
);
7781 static void gen_rsb_CC(TCGv_i32 dst
, TCGv_i32 a
, TCGv_i32 b
)
7783 gen_sub_CC(dst
, b
, a
);
7786 static void gen_rsc(TCGv_i32 dest
, TCGv_i32 a
, TCGv_i32 b
)
7788 gen_sub_carry(dest
, b
, a
);
7791 static void gen_rsc_CC(TCGv_i32 dest
, TCGv_i32 a
, TCGv_i32 b
)
7793 gen_sbc_CC(dest
, b
, a
);
7797 * Helpers for the data processing routines.
7799 * After the computation store the results back.
7800 * This may be suppressed altogether (STREG_NONE), require a runtime
7801 * check against the stack limits (STREG_SP_CHECK), or generate an
7802 * exception return. Oh, or store into a register.
7804 * Always return true, indicating success for a trans_* function.
7813 static bool store_reg_kind(DisasContext
*s
, int rd
,
7814 TCGv_i32 val
, StoreRegKind kind
)
7818 tcg_temp_free_i32(val
);
7821 /* See ALUWritePC: Interworking only from a32 mode. */
7823 store_reg(s
, rd
, val
);
7825 store_reg_bx(s
, rd
, val
);
7828 case STREG_SP_CHECK
:
7829 store_sp_checked(s
, val
);
7832 gen_exception_return(s
, val
);
7835 g_assert_not_reached();
7839 * Data Processing (register)
7841 * Operate, with set flags, one register source,
7842 * one immediate shifted register source, and a destination.
7844 static bool op_s_rrr_shi(DisasContext
*s
, arg_s_rrr_shi
*a
,
7845 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
),
7846 int logic_cc
, StoreRegKind kind
)
7848 TCGv_i32 tmp1
, tmp2
;
7850 tmp2
= load_reg(s
, a
->rm
);
7851 gen_arm_shift_im(tmp2
, a
->shty
, a
->shim
, logic_cc
);
7852 tmp1
= load_reg(s
, a
->rn
);
7854 gen(tmp1
, tmp1
, tmp2
);
7855 tcg_temp_free_i32(tmp2
);
7860 return store_reg_kind(s
, a
->rd
, tmp1
, kind
);
7863 static bool op_s_rxr_shi(DisasContext
*s
, arg_s_rrr_shi
*a
,
7864 void (*gen
)(TCGv_i32
, TCGv_i32
),
7865 int logic_cc
, StoreRegKind kind
)
7869 tmp
= load_reg(s
, a
->rm
);
7870 gen_arm_shift_im(tmp
, a
->shty
, a
->shim
, logic_cc
);
7876 return store_reg_kind(s
, a
->rd
, tmp
, kind
);
7880 * Data-processing (register-shifted register)
7882 * Operate, with set flags, one register source,
7883 * one register shifted register source, and a destination.
7885 static bool op_s_rrr_shr(DisasContext
*s
, arg_s_rrr_shr
*a
,
7886 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
),
7887 int logic_cc
, StoreRegKind kind
)
7889 TCGv_i32 tmp1
, tmp2
;
7891 tmp1
= load_reg(s
, a
->rs
);
7892 tmp2
= load_reg(s
, a
->rm
);
7893 gen_arm_shift_reg(tmp2
, a
->shty
, tmp1
, logic_cc
);
7894 tmp1
= load_reg(s
, a
->rn
);
7896 gen(tmp1
, tmp1
, tmp2
);
7897 tcg_temp_free_i32(tmp2
);
7902 return store_reg_kind(s
, a
->rd
, tmp1
, kind
);
7905 static bool op_s_rxr_shr(DisasContext
*s
, arg_s_rrr_shr
*a
,
7906 void (*gen
)(TCGv_i32
, TCGv_i32
),
7907 int logic_cc
, StoreRegKind kind
)
7909 TCGv_i32 tmp1
, tmp2
;
7911 tmp1
= load_reg(s
, a
->rs
);
7912 tmp2
= load_reg(s
, a
->rm
);
7913 gen_arm_shift_reg(tmp2
, a
->shty
, tmp1
, logic_cc
);
7919 return store_reg_kind(s
, a
->rd
, tmp2
, kind
);
7923 * Data-processing (immediate)
7925 * Operate, with set flags, one register source,
7926 * one rotated immediate, and a destination.
7928 * Note that logic_cc && a->rot setting CF based on the msb of the
7929 * immediate is the reason why we must pass in the unrotated form
7932 static bool op_s_rri_rot(DisasContext
*s
, arg_s_rri_rot
*a
,
7933 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
),
7934 int logic_cc
, StoreRegKind kind
)
7936 TCGv_i32 tmp1
, tmp2
;
7939 imm
= ror32(a
->imm
, a
->rot
);
7940 if (logic_cc
&& a
->rot
) {
7941 tcg_gen_movi_i32(cpu_CF
, imm
>> 31);
7943 tmp2
= tcg_const_i32(imm
);
7944 tmp1
= load_reg(s
, a
->rn
);
7946 gen(tmp1
, tmp1
, tmp2
);
7947 tcg_temp_free_i32(tmp2
);
7952 return store_reg_kind(s
, a
->rd
, tmp1
, kind
);
7955 static bool op_s_rxi_rot(DisasContext
*s
, arg_s_rri_rot
*a
,
7956 void (*gen
)(TCGv_i32
, TCGv_i32
),
7957 int logic_cc
, StoreRegKind kind
)
7962 imm
= ror32(a
->imm
, a
->rot
);
7963 if (logic_cc
&& a
->rot
) {
7964 tcg_gen_movi_i32(cpu_CF
, imm
>> 31);
7966 tmp
= tcg_const_i32(imm
);
7972 return store_reg_kind(s
, a
->rd
, tmp
, kind
);
7975 #define DO_ANY3(NAME, OP, L, K) \
7976 static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
7977 { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
7978 static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
7979 { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
7980 static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
7981 { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
7983 #define DO_ANY2(NAME, OP, L, K) \
7984 static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
7985 { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
7986 static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
7987 { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
7988 static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
7989 { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
7991 #define DO_CMP2(NAME, OP, L) \
7992 static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
7993 { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
7994 static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
7995 { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
7996 static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
7997 { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
7999 DO_ANY3(AND
, tcg_gen_and_i32
, a
->s
, STREG_NORMAL
)
8000 DO_ANY3(EOR
, tcg_gen_xor_i32
, a
->s
, STREG_NORMAL
)
8001 DO_ANY3(ORR
, tcg_gen_or_i32
, a
->s
, STREG_NORMAL
)
8002 DO_ANY3(BIC
, tcg_gen_andc_i32
, a
->s
, STREG_NORMAL
)
8004 DO_ANY3(RSB
, a
->s
? gen_rsb_CC
: gen_rsb
, false, STREG_NORMAL
)
8005 DO_ANY3(ADC
, a
->s
? gen_adc_CC
: gen_add_carry
, false, STREG_NORMAL
)
8006 DO_ANY3(SBC
, a
->s
? gen_sbc_CC
: gen_sub_carry
, false, STREG_NORMAL
)
8007 DO_ANY3(RSC
, a
->s
? gen_rsc_CC
: gen_rsc
, false, STREG_NORMAL
)
8009 DO_CMP2(TST
, tcg_gen_and_i32
, true)
8010 DO_CMP2(TEQ
, tcg_gen_xor_i32
, true)
8011 DO_CMP2(CMN
, gen_add_CC
, false)
8012 DO_CMP2(CMP
, gen_sub_CC
, false)
8014 DO_ANY3(ADD
, a
->s
? gen_add_CC
: tcg_gen_add_i32
, false,
8015 a
->rd
== 13 && a
->rn
== 13 ? STREG_SP_CHECK
: STREG_NORMAL
)
8018 * Note for the computation of StoreRegKind we return out of the
8019 * middle of the functions that are expanded by DO_ANY3, and that
8020 * we modify a->s via that parameter before it is used by OP.
8022 DO_ANY3(SUB
, a
->s
? gen_sub_CC
: tcg_gen_sub_i32
, false,
8024 StoreRegKind ret
= STREG_NORMAL
;
8025 if (a
->rd
== 15 && a
->s
) {
8027 * See ALUExceptionReturn:
8028 * In User mode, UNPREDICTABLE; we choose UNDEF.
8029 * In Hyp mode, UNDEFINED.
8031 if (IS_USER(s
) || s
->current_el
== 2) {
8032 unallocated_encoding(s
);
8035 /* There is no writeback of nzcv to PSTATE. */
8037 ret
= STREG_EXC_RET
;
8038 } else if (a
->rd
== 13 && a
->rn
== 13) {
8039 ret
= STREG_SP_CHECK
;
8044 DO_ANY2(MOV
, tcg_gen_mov_i32
, a
->s
,
8046 StoreRegKind ret
= STREG_NORMAL
;
8047 if (a
->rd
== 15 && a
->s
) {
8049 * See ALUExceptionReturn:
8050 * In User mode, UNPREDICTABLE; we choose UNDEF.
8051 * In Hyp mode, UNDEFINED.
8053 if (IS_USER(s
) || s
->current_el
== 2) {
8054 unallocated_encoding(s
);
8057 /* There is no writeback of nzcv to PSTATE. */
8059 ret
= STREG_EXC_RET
;
8060 } else if (a
->rd
== 13) {
8061 ret
= STREG_SP_CHECK
;
8066 DO_ANY2(MVN
, tcg_gen_not_i32
, a
->s
, STREG_NORMAL
)
8069 * ORN is only available with T32, so there is no register-shifted-register
8070 * form of the insn. Using the DO_ANY3 macro would create an unused function.
8072 static bool trans_ORN_rrri(DisasContext
*s
, arg_s_rrr_shi
*a
)
8074 return op_s_rrr_shi(s
, a
, tcg_gen_orc_i32
, a
->s
, STREG_NORMAL
);
8077 static bool trans_ORN_rri(DisasContext
*s
, arg_s_rri_rot
*a
)
8079 return op_s_rri_rot(s
, a
, tcg_gen_orc_i32
, a
->s
, STREG_NORMAL
);
8086 static bool trans_ADR(DisasContext
*s
, arg_ri
*a
)
8088 store_reg_bx(s
, a
->rd
, add_reg_for_lit(s
, 15, a
->imm
));
8092 static bool trans_MOVW(DisasContext
*s
, arg_MOVW
*a
)
8096 if (!ENABLE_ARCH_6T2
) {
8100 tmp
= tcg_const_i32(a
->imm
);
8101 store_reg(s
, a
->rd
, tmp
);
8105 static bool trans_MOVT(DisasContext
*s
, arg_MOVW
*a
)
8109 if (!ENABLE_ARCH_6T2
) {
8113 tmp
= load_reg(s
, a
->rd
);
8114 tcg_gen_ext16u_i32(tmp
, tmp
);
8115 tcg_gen_ori_i32(tmp
, tmp
, a
->imm
<< 16);
8116 store_reg(s
, a
->rd
, tmp
);
8121 * Multiply and multiply accumulate
8124 static bool op_mla(DisasContext
*s
, arg_s_rrrr
*a
, bool add
)
8128 t1
= load_reg(s
, a
->rn
);
8129 t2
= load_reg(s
, a
->rm
);
8130 tcg_gen_mul_i32(t1
, t1
, t2
);
8131 tcg_temp_free_i32(t2
);
8133 t2
= load_reg(s
, a
->ra
);
8134 tcg_gen_add_i32(t1
, t1
, t2
);
8135 tcg_temp_free_i32(t2
);
8140 store_reg(s
, a
->rd
, t1
);
8144 static bool trans_MUL(DisasContext
*s
, arg_MUL
*a
)
8146 return op_mla(s
, a
, false);
8149 static bool trans_MLA(DisasContext
*s
, arg_MLA
*a
)
8151 return op_mla(s
, a
, true);
8154 static bool trans_MLS(DisasContext
*s
, arg_MLS
*a
)
8158 if (!ENABLE_ARCH_6T2
) {
8161 t1
= load_reg(s
, a
->rn
);
8162 t2
= load_reg(s
, a
->rm
);
8163 tcg_gen_mul_i32(t1
, t1
, t2
);
8164 tcg_temp_free_i32(t2
);
8165 t2
= load_reg(s
, a
->ra
);
8166 tcg_gen_sub_i32(t1
, t2
, t1
);
8167 tcg_temp_free_i32(t2
);
8168 store_reg(s
, a
->rd
, t1
);
8172 static bool op_mlal(DisasContext
*s
, arg_s_rrrr
*a
, bool uns
, bool add
)
8174 TCGv_i32 t0
, t1
, t2
, t3
;
8176 t0
= load_reg(s
, a
->rm
);
8177 t1
= load_reg(s
, a
->rn
);
8179 tcg_gen_mulu2_i32(t0
, t1
, t0
, t1
);
8181 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
8184 t2
= load_reg(s
, a
->ra
);
8185 t3
= load_reg(s
, a
->rd
);
8186 tcg_gen_add2_i32(t0
, t1
, t0
, t1
, t2
, t3
);
8187 tcg_temp_free_i32(t2
);
8188 tcg_temp_free_i32(t3
);
8191 gen_logicq_cc(t0
, t1
);
8193 store_reg(s
, a
->ra
, t0
);
8194 store_reg(s
, a
->rd
, t1
);
8198 static bool trans_UMULL(DisasContext
*s
, arg_UMULL
*a
)
8200 return op_mlal(s
, a
, true, false);
8203 static bool trans_SMULL(DisasContext
*s
, arg_SMULL
*a
)
8205 return op_mlal(s
, a
, false, false);
8208 static bool trans_UMLAL(DisasContext
*s
, arg_UMLAL
*a
)
8210 return op_mlal(s
, a
, true, true);
8213 static bool trans_SMLAL(DisasContext
*s
, arg_SMLAL
*a
)
8215 return op_mlal(s
, a
, false, true);
8218 static bool trans_UMAAL(DisasContext
*s
, arg_UMAAL
*a
)
8220 TCGv_i32 t0
, t1
, t2
, zero
;
8223 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
8228 t0
= load_reg(s
, a
->rm
);
8229 t1
= load_reg(s
, a
->rn
);
8230 tcg_gen_mulu2_i32(t0
, t1
, t0
, t1
);
8231 zero
= tcg_const_i32(0);
8232 t2
= load_reg(s
, a
->ra
);
8233 tcg_gen_add2_i32(t0
, t1
, t0
, t1
, t2
, zero
);
8234 tcg_temp_free_i32(t2
);
8235 t2
= load_reg(s
, a
->rd
);
8236 tcg_gen_add2_i32(t0
, t1
, t0
, t1
, t2
, zero
);
8237 tcg_temp_free_i32(t2
);
8238 tcg_temp_free_i32(zero
);
8239 store_reg(s
, a
->ra
, t0
);
8240 store_reg(s
, a
->rd
, t1
);
8245 * Saturating addition and subtraction
8248 static bool op_qaddsub(DisasContext
*s
, arg_rrr
*a
, bool add
, bool doub
)
8253 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
8254 : !ENABLE_ARCH_5TE
) {
8258 t0
= load_reg(s
, a
->rm
);
8259 t1
= load_reg(s
, a
->rn
);
8261 gen_helper_add_saturate(t1
, cpu_env
, t1
, t1
);
8264 gen_helper_add_saturate(t0
, cpu_env
, t0
, t1
);
8266 gen_helper_sub_saturate(t0
, cpu_env
, t0
, t1
);
8268 tcg_temp_free_i32(t1
);
8269 store_reg(s
, a
->rd
, t0
);
8273 #define DO_QADDSUB(NAME, ADD, DOUB) \
8274 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
8276 return op_qaddsub(s, a, ADD, DOUB); \
8279 DO_QADDSUB(QADD
, true, false)
8280 DO_QADDSUB(QSUB
, false, false)
8281 DO_QADDSUB(QDADD
, true, true)
8282 DO_QADDSUB(QDSUB
, false, true)
8287 * Halfword multiply and multiply accumulate
8290 static bool op_smlaxxx(DisasContext
*s
, arg_rrrr
*a
,
8291 int add_long
, bool nt
, bool mt
)
8293 TCGv_i32 t0
, t1
, tl
, th
;
8296 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
8297 : !ENABLE_ARCH_5TE
) {
8301 t0
= load_reg(s
, a
->rn
);
8302 t1
= load_reg(s
, a
->rm
);
8303 gen_mulxy(t0
, t1
, nt
, mt
);
8304 tcg_temp_free_i32(t1
);
8308 store_reg(s
, a
->rd
, t0
);
8311 t1
= load_reg(s
, a
->ra
);
8312 gen_helper_add_setq(t0
, cpu_env
, t0
, t1
);
8313 tcg_temp_free_i32(t1
);
8314 store_reg(s
, a
->rd
, t0
);
8317 tl
= load_reg(s
, a
->ra
);
8318 th
= load_reg(s
, a
->rd
);
8319 /* Sign-extend the 32-bit product to 64 bits. */
8320 t1
= tcg_temp_new_i32();
8321 tcg_gen_sari_i32(t1
, t0
, 31);
8322 tcg_gen_add2_i32(tl
, th
, tl
, th
, t0
, t1
);
8323 tcg_temp_free_i32(t0
);
8324 tcg_temp_free_i32(t1
);
8325 store_reg(s
, a
->ra
, tl
);
8326 store_reg(s
, a
->rd
, th
);
8329 g_assert_not_reached();
8334 #define DO_SMLAX(NAME, add, nt, mt) \
8335 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
8337 return op_smlaxxx(s, a, add, nt, mt); \
8340 DO_SMLAX(SMULBB
, 0, 0, 0)
8341 DO_SMLAX(SMULBT
, 0, 0, 1)
8342 DO_SMLAX(SMULTB
, 0, 1, 0)
8343 DO_SMLAX(SMULTT
, 0, 1, 1)
8345 DO_SMLAX(SMLABB
, 1, 0, 0)
8346 DO_SMLAX(SMLABT
, 1, 0, 1)
8347 DO_SMLAX(SMLATB
, 1, 1, 0)
8348 DO_SMLAX(SMLATT
, 1, 1, 1)
8350 DO_SMLAX(SMLALBB
, 2, 0, 0)
8351 DO_SMLAX(SMLALBT
, 2, 0, 1)
8352 DO_SMLAX(SMLALTB
, 2, 1, 0)
8353 DO_SMLAX(SMLALTT
, 2, 1, 1)
8357 static bool op_smlawx(DisasContext
*s
, arg_rrrr
*a
, bool add
, bool mt
)
8361 if (!ENABLE_ARCH_5TE
) {
8365 t0
= load_reg(s
, a
->rn
);
8366 t1
= load_reg(s
, a
->rm
);
8368 * Since the nominal result is product<47:16>, shift the 16-bit
8369 * input up by 16 bits, so that the result is at product<63:32>.
8372 tcg_gen_andi_i32(t1
, t1
, 0xffff0000);
8374 tcg_gen_shli_i32(t1
, t1
, 16);
8376 tcg_gen_muls2_i32(t0
, t1
, t0
, t1
);
8377 tcg_temp_free_i32(t0
);
8379 t0
= load_reg(s
, a
->ra
);
8380 gen_helper_add_setq(t1
, cpu_env
, t1
, t0
);
8381 tcg_temp_free_i32(t0
);
8383 store_reg(s
, a
->rd
, t1
);
8387 #define DO_SMLAWX(NAME, add, mt) \
8388 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
8390 return op_smlawx(s, a, add, mt); \
8393 DO_SMLAWX(SMULWB
, 0, 0)
8394 DO_SMLAWX(SMULWT
, 0, 1)
8395 DO_SMLAWX(SMLAWB
, 1, 0)
8396 DO_SMLAWX(SMLAWT
, 1, 1)
8401 * MSR (immediate) and hints
8404 static bool trans_YIELD(DisasContext
*s
, arg_YIELD
*a
)
8407 * When running single-threaded TCG code, use the helper to ensure that
8408 * the next round-robin scheduled vCPU gets a crack. When running in
8409 * MTTCG we don't generate jumps to the helper as it won't affect the
8410 * scheduling of other vCPUs.
8412 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
8413 gen_set_pc_im(s
, s
->base
.pc_next
);
8414 s
->base
.is_jmp
= DISAS_YIELD
;
8419 static bool trans_WFE(DisasContext
*s
, arg_WFE
*a
)
8422 * When running single-threaded TCG code, use the helper to ensure that
8423 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
8424 * just skip this instruction. Currently the SEV/SEVL instructions,
8425 * which are *one* of many ways to wake the CPU from WFE, are not
8426 * implemented so we can't sleep like WFI does.
8428 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
8429 gen_set_pc_im(s
, s
->base
.pc_next
);
8430 s
->base
.is_jmp
= DISAS_WFE
;
8435 static bool trans_WFI(DisasContext
*s
, arg_WFI
*a
)
8437 /* For WFI, halt the vCPU until an IRQ. */
8438 gen_set_pc_im(s
, s
->base
.pc_next
);
8439 s
->base
.is_jmp
= DISAS_WFI
;
8443 static bool trans_NOP(DisasContext
*s
, arg_NOP
*a
)
8448 static bool trans_MSR_imm(DisasContext
*s
, arg_MSR_imm
*a
)
8450 uint32_t val
= ror32(a
->imm
, a
->rot
* 2);
8451 uint32_t mask
= msr_mask(s
, a
->mask
, a
->r
);
8453 if (gen_set_psr_im(s
, mask
, a
->r
, val
)) {
8454 unallocated_encoding(s
);
8460 * Cyclic Redundancy Check
8463 static bool op_crc32(DisasContext
*s
, arg_rrr
*a
, bool c
, MemOp sz
)
8465 TCGv_i32 t1
, t2
, t3
;
8467 if (!dc_isar_feature(aa32_crc32
, s
)) {
8471 t1
= load_reg(s
, a
->rn
);
8472 t2
= load_reg(s
, a
->rm
);
8483 g_assert_not_reached();
8485 t3
= tcg_const_i32(1 << sz
);
8487 gen_helper_crc32c(t1
, t1
, t2
, t3
);
8489 gen_helper_crc32(t1
, t1
, t2
, t3
);
8491 tcg_temp_free_i32(t2
);
8492 tcg_temp_free_i32(t3
);
8493 store_reg(s
, a
->rd
, t1
);
8497 #define DO_CRC32(NAME, c, sz) \
8498 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
8499 { return op_crc32(s, a, c, sz); }
8501 DO_CRC32(CRC32B
, false, MO_8
)
8502 DO_CRC32(CRC32H
, false, MO_16
)
8503 DO_CRC32(CRC32W
, false, MO_32
)
8504 DO_CRC32(CRC32CB
, true, MO_8
)
8505 DO_CRC32(CRC32CH
, true, MO_16
)
8506 DO_CRC32(CRC32CW
, true, MO_32
)
8511 * Miscellaneous instructions
8514 static bool trans_MRS_bank(DisasContext
*s
, arg_MRS_bank
*a
)
8516 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
8519 gen_mrs_banked(s
, a
->r
, a
->sysm
, a
->rd
);
8523 static bool trans_MSR_bank(DisasContext
*s
, arg_MSR_bank
*a
)
8525 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
8528 gen_msr_banked(s
, a
->r
, a
->sysm
, a
->rn
);
8532 static bool trans_MRS_reg(DisasContext
*s
, arg_MRS_reg
*a
)
8536 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
8541 unallocated_encoding(s
);
8544 tmp
= load_cpu_field(spsr
);
8546 tmp
= tcg_temp_new_i32();
8547 gen_helper_cpsr_read(tmp
, cpu_env
);
8549 store_reg(s
, a
->rd
, tmp
);
8553 static bool trans_MSR_reg(DisasContext
*s
, arg_MSR_reg
*a
)
8556 uint32_t mask
= msr_mask(s
, a
->mask
, a
->r
);
8558 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
8561 tmp
= load_reg(s
, a
->rn
);
8562 if (gen_set_psr(s
, mask
, a
->r
, tmp
)) {
8563 unallocated_encoding(s
);
8568 static bool trans_MRS_v7m(DisasContext
*s
, arg_MRS_v7m
*a
)
8572 if (!arm_dc_feature(s
, ARM_FEATURE_M
)) {
8575 tmp
= tcg_const_i32(a
->sysm
);
8576 gen_helper_v7m_mrs(tmp
, cpu_env
, tmp
);
8577 store_reg(s
, a
->rd
, tmp
);
8581 static bool trans_MSR_v7m(DisasContext
*s
, arg_MSR_v7m
*a
)
8583 TCGv_i32 addr
, reg
, el
;
8585 if (!arm_dc_feature(s
, ARM_FEATURE_M
)) {
8588 addr
= tcg_const_i32((a
->mask
<< 10) | a
->sysm
);
8589 reg
= load_reg(s
, a
->rn
);
8590 gen_helper_v7m_msr(cpu_env
, addr
, reg
);
8591 tcg_temp_free_i32(addr
);
8592 tcg_temp_free_i32(reg
);
8593 el
= tcg_const_i32(s
->current_el
);
8594 gen_helper_rebuild_hflags_m32(cpu_env
, el
);
8595 tcg_temp_free_i32(el
);
8600 static bool trans_BX(DisasContext
*s
, arg_BX
*a
)
8602 if (!ENABLE_ARCH_4T
) {
8605 gen_bx_excret(s
, load_reg(s
, a
->rm
));
8609 static bool trans_BXJ(DisasContext
*s
, arg_BXJ
*a
)
8611 if (!ENABLE_ARCH_5J
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
8614 /* Trivial implementation equivalent to bx. */
8615 gen_bx(s
, load_reg(s
, a
->rm
));
8619 static bool trans_BLX_r(DisasContext
*s
, arg_BLX_r
*a
)
8623 if (!ENABLE_ARCH_5
) {
8626 tmp
= load_reg(s
, a
->rm
);
8627 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| s
->thumb
);
8633 * BXNS/BLXNS: only exist for v8M with the security extensions,
8634 * and always UNDEF if NonSecure. We don't implement these in
8635 * the user-only mode either (in theory you can use them from
8636 * Secure User mode but they are too tied in to system emulation).
8638 static bool trans_BXNS(DisasContext
*s
, arg_BXNS
*a
)
8640 if (!s
->v8m_secure
|| IS_USER_ONLY
) {
8641 unallocated_encoding(s
);
8648 static bool trans_BLXNS(DisasContext
*s
, arg_BLXNS
*a
)
8650 if (!s
->v8m_secure
|| IS_USER_ONLY
) {
8651 unallocated_encoding(s
);
8653 gen_blxns(s
, a
->rm
);
8658 static bool trans_CLZ(DisasContext
*s
, arg_CLZ
*a
)
8662 if (!ENABLE_ARCH_5
) {
8665 tmp
= load_reg(s
, a
->rm
);
8666 tcg_gen_clzi_i32(tmp
, tmp
, 32);
8667 store_reg(s
, a
->rd
, tmp
);
8671 static bool trans_ERET(DisasContext
*s
, arg_ERET
*a
)
8675 if (!arm_dc_feature(s
, ARM_FEATURE_V7VE
)) {
8679 unallocated_encoding(s
);
8682 if (s
->current_el
== 2) {
8683 /* ERET from Hyp uses ELR_Hyp, not LR */
8684 tmp
= load_cpu_field(elr_el
[2]);
8686 tmp
= load_reg(s
, 14);
8688 gen_exception_return(s
, tmp
);
8692 static bool trans_HLT(DisasContext
*s
, arg_HLT
*a
)
8698 static bool trans_BKPT(DisasContext
*s
, arg_BKPT
*a
)
8700 if (!ENABLE_ARCH_5
) {
8703 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
8704 semihosting_enabled() &&
8705 #ifndef CONFIG_USER_ONLY
8709 gen_exception_internal_insn(s
, s
->pc_curr
, EXCP_SEMIHOST
);
8711 gen_exception_bkpt_insn(s
, syn_aa32_bkpt(a
->imm
, false));
8716 static bool trans_HVC(DisasContext
*s
, arg_HVC
*a
)
8718 if (!ENABLE_ARCH_7
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
8722 unallocated_encoding(s
);
8729 static bool trans_SMC(DisasContext
*s
, arg_SMC
*a
)
8731 if (!ENABLE_ARCH_6K
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
8735 unallocated_encoding(s
);
8742 static bool trans_SG(DisasContext
*s
, arg_SG
*a
)
8744 if (!arm_dc_feature(s
, ARM_FEATURE_M
) ||
8745 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
8750 * The bulk of the behaviour for this instruction is implemented
8751 * in v7m_handle_execute_nsc(), which deals with the insn when
8752 * it is executed by a CPU in non-secure state from memory
8753 * which is Secure & NonSecure-Callable.
8754 * Here we only need to handle the remaining cases:
8755 * * in NS memory (including the "security extension not
8756 * implemented" case) : NOP
8757 * * in S memory but CPU already secure (clear IT bits)
8758 * We know that the attribute for the memory this insn is
8759 * in must match the current CPU state, because otherwise
8760 * get_phys_addr_pmsav8 would have generated an exception.
8762 if (s
->v8m_secure
) {
8763 /* Like the IT insn, we don't need to generate any code */
8764 s
->condexec_cond
= 0;
8765 s
->condexec_mask
= 0;
8770 static bool trans_TT(DisasContext
*s
, arg_TT
*a
)
8774 if (!arm_dc_feature(s
, ARM_FEATURE_M
) ||
8775 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
8778 if (a
->rd
== 13 || a
->rd
== 15 || a
->rn
== 15) {
8779 /* We UNDEF for these UNPREDICTABLE cases */
8780 unallocated_encoding(s
);
8783 if (a
->A
&& !s
->v8m_secure
) {
8784 /* This case is UNDEFINED. */
8785 unallocated_encoding(s
);
8789 addr
= load_reg(s
, a
->rn
);
8790 tmp
= tcg_const_i32((a
->A
<< 1) | a
->T
);
8791 gen_helper_v7m_tt(tmp
, cpu_env
, addr
, tmp
);
8792 tcg_temp_free_i32(addr
);
8793 store_reg(s
, a
->rd
, tmp
);
8798 * Load/store register index
8801 static ISSInfo
make_issinfo(DisasContext
*s
, int rd
, bool p
, bool w
)
8805 /* ISS not valid if writeback */
8808 if (s
->base
.pc_next
- s
->pc_curr
== 2) {
8817 static TCGv_i32
op_addr_rr_pre(DisasContext
*s
, arg_ldst_rr
*a
)
8819 TCGv_i32 addr
= load_reg(s
, a
->rn
);
8821 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
8822 gen_helper_v8m_stackcheck(cpu_env
, addr
);
8826 TCGv_i32 ofs
= load_reg(s
, a
->rm
);
8827 gen_arm_shift_im(ofs
, a
->shtype
, a
->shimm
, 0);
8829 tcg_gen_add_i32(addr
, addr
, ofs
);
8831 tcg_gen_sub_i32(addr
, addr
, ofs
);
8833 tcg_temp_free_i32(ofs
);
8838 static void op_addr_rr_post(DisasContext
*s
, arg_ldst_rr
*a
,
8839 TCGv_i32 addr
, int address_offset
)
8842 TCGv_i32 ofs
= load_reg(s
, a
->rm
);
8843 gen_arm_shift_im(ofs
, a
->shtype
, a
->shimm
, 0);
8845 tcg_gen_add_i32(addr
, addr
, ofs
);
8847 tcg_gen_sub_i32(addr
, addr
, ofs
);
8849 tcg_temp_free_i32(ofs
);
8851 tcg_temp_free_i32(addr
);
8854 tcg_gen_addi_i32(addr
, addr
, address_offset
);
8855 store_reg(s
, a
->rn
, addr
);
8858 static bool op_load_rr(DisasContext
*s
, arg_ldst_rr
*a
,
8859 MemOp mop
, int mem_idx
)
8861 ISSInfo issinfo
= make_issinfo(s
, a
->rt
, a
->p
, a
->w
);
8864 addr
= op_addr_rr_pre(s
, a
);
8866 tmp
= tcg_temp_new_i32();
8867 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, mop
| s
->be_data
);
8868 disas_set_da_iss(s
, mop
, issinfo
);
8871 * Perform base writeback before the loaded value to
8872 * ensure correct behavior with overlapping index registers.
8874 op_addr_rr_post(s
, a
, addr
, 0);
8875 store_reg_from_load(s
, a
->rt
, tmp
);
8879 static bool op_store_rr(DisasContext
*s
, arg_ldst_rr
*a
,
8880 MemOp mop
, int mem_idx
)
8882 ISSInfo issinfo
= make_issinfo(s
, a
->rt
, a
->p
, a
->w
) | ISSIsWrite
;
8885 addr
= op_addr_rr_pre(s
, a
);
8887 tmp
= load_reg(s
, a
->rt
);
8888 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, mop
| s
->be_data
);
8889 disas_set_da_iss(s
, mop
, issinfo
);
8890 tcg_temp_free_i32(tmp
);
8892 op_addr_rr_post(s
, a
, addr
, 0);
8896 static bool trans_LDRD_rr(DisasContext
*s
, arg_ldst_rr
*a
)
8898 int mem_idx
= get_mem_index(s
);
8901 if (!ENABLE_ARCH_5TE
) {
8905 unallocated_encoding(s
);
8908 addr
= op_addr_rr_pre(s
, a
);
8910 tmp
= tcg_temp_new_i32();
8911 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
8912 store_reg(s
, a
->rt
, tmp
);
8914 tcg_gen_addi_i32(addr
, addr
, 4);
8916 tmp
= tcg_temp_new_i32();
8917 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
8918 store_reg(s
, a
->rt
+ 1, tmp
);
8920 /* LDRD w/ base writeback is undefined if the registers overlap. */
8921 op_addr_rr_post(s
, a
, addr
, -4);
8925 static bool trans_STRD_rr(DisasContext
*s
, arg_ldst_rr
*a
)
8927 int mem_idx
= get_mem_index(s
);
8930 if (!ENABLE_ARCH_5TE
) {
8934 unallocated_encoding(s
);
8937 addr
= op_addr_rr_pre(s
, a
);
8939 tmp
= load_reg(s
, a
->rt
);
8940 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
8941 tcg_temp_free_i32(tmp
);
8943 tcg_gen_addi_i32(addr
, addr
, 4);
8945 tmp
= load_reg(s
, a
->rt
+ 1);
8946 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
8947 tcg_temp_free_i32(tmp
);
8949 op_addr_rr_post(s
, a
, addr
, -4);
8954 * Load/store immediate index
8957 static TCGv_i32
op_addr_ri_pre(DisasContext
*s
, arg_ldst_ri
*a
)
8965 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
8967 * Stackcheck. Here we know 'addr' is the current SP;
8968 * U is set if we're moving SP up, else down. It is
8969 * UNKNOWN whether the limit check triggers when SP starts
8970 * below the limit and ends up above it; we chose to do so.
8973 TCGv_i32 newsp
= tcg_temp_new_i32();
8974 tcg_gen_addi_i32(newsp
, cpu_R
[13], ofs
);
8975 gen_helper_v8m_stackcheck(cpu_env
, newsp
);
8976 tcg_temp_free_i32(newsp
);
8978 gen_helper_v8m_stackcheck(cpu_env
, cpu_R
[13]);
8982 return add_reg_for_lit(s
, a
->rn
, a
->p
? ofs
: 0);
8985 static void op_addr_ri_post(DisasContext
*s
, arg_ldst_ri
*a
,
8986 TCGv_i32 addr
, int address_offset
)
8990 address_offset
+= a
->imm
;
8992 address_offset
-= a
->imm
;
8995 tcg_temp_free_i32(addr
);
8998 tcg_gen_addi_i32(addr
, addr
, address_offset
);
8999 store_reg(s
, a
->rn
, addr
);
9002 static bool op_load_ri(DisasContext
*s
, arg_ldst_ri
*a
,
9003 MemOp mop
, int mem_idx
)
9005 ISSInfo issinfo
= make_issinfo(s
, a
->rt
, a
->p
, a
->w
);
9008 addr
= op_addr_ri_pre(s
, a
);
9010 tmp
= tcg_temp_new_i32();
9011 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, mop
| s
->be_data
);
9012 disas_set_da_iss(s
, mop
, issinfo
);
9015 * Perform base writeback before the loaded value to
9016 * ensure correct behavior with overlapping index registers.
9018 op_addr_ri_post(s
, a
, addr
, 0);
9019 store_reg_from_load(s
, a
->rt
, tmp
);
9023 static bool op_store_ri(DisasContext
*s
, arg_ldst_ri
*a
,
9024 MemOp mop
, int mem_idx
)
9026 ISSInfo issinfo
= make_issinfo(s
, a
->rt
, a
->p
, a
->w
) | ISSIsWrite
;
9029 addr
= op_addr_ri_pre(s
, a
);
9031 tmp
= load_reg(s
, a
->rt
);
9032 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, mop
| s
->be_data
);
9033 disas_set_da_iss(s
, mop
, issinfo
);
9034 tcg_temp_free_i32(tmp
);
9036 op_addr_ri_post(s
, a
, addr
, 0);
9040 static bool op_ldrd_ri(DisasContext
*s
, arg_ldst_ri
*a
, int rt2
)
9042 int mem_idx
= get_mem_index(s
);
9045 addr
= op_addr_ri_pre(s
, a
);
9047 tmp
= tcg_temp_new_i32();
9048 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
9049 store_reg(s
, a
->rt
, tmp
);
9051 tcg_gen_addi_i32(addr
, addr
, 4);
9053 tmp
= tcg_temp_new_i32();
9054 gen_aa32_ld_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
9055 store_reg(s
, rt2
, tmp
);
9057 /* LDRD w/ base writeback is undefined if the registers overlap. */
9058 op_addr_ri_post(s
, a
, addr
, -4);
9062 static bool trans_LDRD_ri_a32(DisasContext
*s
, arg_ldst_ri
*a
)
9064 if (!ENABLE_ARCH_5TE
|| (a
->rt
& 1)) {
9067 return op_ldrd_ri(s
, a
, a
->rt
+ 1);
9070 static bool trans_LDRD_ri_t32(DisasContext
*s
, arg_ldst_ri2
*a
)
9073 .u
= a
->u
, .w
= a
->w
, .p
= a
->p
,
9074 .rn
= a
->rn
, .rt
= a
->rt
, .imm
= a
->imm
9076 return op_ldrd_ri(s
, &b
, a
->rt2
);
9079 static bool op_strd_ri(DisasContext
*s
, arg_ldst_ri
*a
, int rt2
)
9081 int mem_idx
= get_mem_index(s
);
9084 addr
= op_addr_ri_pre(s
, a
);
9086 tmp
= load_reg(s
, a
->rt
);
9087 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
9088 tcg_temp_free_i32(tmp
);
9090 tcg_gen_addi_i32(addr
, addr
, 4);
9092 tmp
= load_reg(s
, rt2
);
9093 gen_aa32_st_i32(s
, tmp
, addr
, mem_idx
, MO_UL
| s
->be_data
);
9094 tcg_temp_free_i32(tmp
);
9096 op_addr_ri_post(s
, a
, addr
, -4);
9100 static bool trans_STRD_ri_a32(DisasContext
*s
, arg_ldst_ri
*a
)
9102 if (!ENABLE_ARCH_5TE
|| (a
->rt
& 1)) {
9105 return op_strd_ri(s
, a
, a
->rt
+ 1);
9108 static bool trans_STRD_ri_t32(DisasContext
*s
, arg_ldst_ri2
*a
)
9111 .u
= a
->u
, .w
= a
->w
, .p
= a
->p
,
9112 .rn
= a
->rn
, .rt
= a
->rt
, .imm
= a
->imm
9114 return op_strd_ri(s
, &b
, a
->rt2
);
9117 #define DO_LDST(NAME, WHICH, MEMOP) \
9118 static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \
9120 return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \
9122 static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \
9124 return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \
9126 static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \
9128 return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \
9130 static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \
9132 return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \
9135 DO_LDST(LDR
, load
, MO_UL
)
9136 DO_LDST(LDRB
, load
, MO_UB
)
9137 DO_LDST(LDRH
, load
, MO_UW
)
9138 DO_LDST(LDRSB
, load
, MO_SB
)
9139 DO_LDST(LDRSH
, load
, MO_SW
)
9141 DO_LDST(STR
, store
, MO_UL
)
9142 DO_LDST(STRB
, store
, MO_UB
)
9143 DO_LDST(STRH
, store
, MO_UW
)
9148 * Synchronization primitives
9151 static bool op_swp(DisasContext
*s
, arg_SWP
*a
, MemOp opc
)
9157 addr
= load_reg(s
, a
->rn
);
9158 taddr
= gen_aa32_addr(s
, addr
, opc
);
9159 tcg_temp_free_i32(addr
);
9161 tmp
= load_reg(s
, a
->rt2
);
9162 tcg_gen_atomic_xchg_i32(tmp
, taddr
, tmp
, get_mem_index(s
), opc
);
9163 tcg_temp_free(taddr
);
9165 store_reg(s
, a
->rt
, tmp
);
9169 static bool trans_SWP(DisasContext
*s
, arg_SWP
*a
)
9171 return op_swp(s
, a
, MO_UL
| MO_ALIGN
);
9174 static bool trans_SWPB(DisasContext
*s
, arg_SWP
*a
)
9176 return op_swp(s
, a
, MO_UB
);
9180 * Load/Store Exclusive and Load-Acquire/Store-Release
9183 static bool op_strex(DisasContext
*s
, arg_STREX
*a
, MemOp mop
, bool rel
)
9186 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
9187 bool v8a
= ENABLE_ARCH_8
&& !arm_dc_feature(s
, ARM_FEATURE_M
);
9189 /* We UNDEF for these UNPREDICTABLE cases. */
9190 if (a
->rd
== 15 || a
->rn
== 15 || a
->rt
== 15
9191 || a
->rd
== a
->rn
|| a
->rd
== a
->rt
9192 || (!v8a
&& s
->thumb
&& (a
->rd
== 13 || a
->rt
== 13))
9196 || (!v8a
&& s
->thumb
&& a
->rt2
== 13)))) {
9197 unallocated_encoding(s
);
9202 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
9205 addr
= tcg_temp_local_new_i32();
9206 load_reg_var(s
, addr
, a
->rn
);
9207 tcg_gen_addi_i32(addr
, addr
, a
->imm
);
9209 gen_store_exclusive(s
, a
->rd
, a
->rt
, a
->rt2
, addr
, mop
);
9210 tcg_temp_free_i32(addr
);
9214 static bool trans_STREX(DisasContext
*s
, arg_STREX
*a
)
9216 if (!ENABLE_ARCH_6
) {
9219 return op_strex(s
, a
, MO_32
, false);
9222 static bool trans_STREXD_a32(DisasContext
*s
, arg_STREX
*a
)
9224 if (!ENABLE_ARCH_6K
) {
9227 /* We UNDEF for these UNPREDICTABLE cases. */
9229 unallocated_encoding(s
);
9233 return op_strex(s
, a
, MO_64
, false);
9236 static bool trans_STREXD_t32(DisasContext
*s
, arg_STREX
*a
)
9238 return op_strex(s
, a
, MO_64
, false);
9241 static bool trans_STREXB(DisasContext
*s
, arg_STREX
*a
)
9243 if (s
->thumb
? !ENABLE_ARCH_7
: !ENABLE_ARCH_6K
) {
9246 return op_strex(s
, a
, MO_8
, false);
9249 static bool trans_STREXH(DisasContext
*s
, arg_STREX
*a
)
9251 if (s
->thumb
? !ENABLE_ARCH_7
: !ENABLE_ARCH_6K
) {
9254 return op_strex(s
, a
, MO_16
, false);
9257 static bool trans_STLEX(DisasContext
*s
, arg_STREX
*a
)
9259 if (!ENABLE_ARCH_8
) {
9262 return op_strex(s
, a
, MO_32
, true);
9265 static bool trans_STLEXD_a32(DisasContext
*s
, arg_STREX
*a
)
9267 if (!ENABLE_ARCH_8
) {
9270 /* We UNDEF for these UNPREDICTABLE cases. */
9272 unallocated_encoding(s
);
9276 return op_strex(s
, a
, MO_64
, true);
9279 static bool trans_STLEXD_t32(DisasContext
*s
, arg_STREX
*a
)
9281 if (!ENABLE_ARCH_8
) {
9284 return op_strex(s
, a
, MO_64
, true);
9287 static bool trans_STLEXB(DisasContext
*s
, arg_STREX
*a
)
9289 if (!ENABLE_ARCH_8
) {
9292 return op_strex(s
, a
, MO_8
, true);
9295 static bool trans_STLEXH(DisasContext
*s
, arg_STREX
*a
)
9297 if (!ENABLE_ARCH_8
) {
9300 return op_strex(s
, a
, MO_16
, true);
9303 static bool op_stl(DisasContext
*s
, arg_STL
*a
, MemOp mop
)
9307 if (!ENABLE_ARCH_8
) {
9310 /* We UNDEF for these UNPREDICTABLE cases. */
9311 if (a
->rn
== 15 || a
->rt
== 15) {
9312 unallocated_encoding(s
);
9316 addr
= load_reg(s
, a
->rn
);
9317 tmp
= load_reg(s
, a
->rt
);
9318 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
9319 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
), mop
| s
->be_data
);
9320 disas_set_da_iss(s
, mop
, a
->rt
| ISSIsAcqRel
| ISSIsWrite
);
9322 tcg_temp_free_i32(tmp
);
9323 tcg_temp_free_i32(addr
);
9327 static bool trans_STL(DisasContext
*s
, arg_STL
*a
)
9329 return op_stl(s
, a
, MO_UL
);
9332 static bool trans_STLB(DisasContext
*s
, arg_STL
*a
)
9334 return op_stl(s
, a
, MO_UB
);
9337 static bool trans_STLH(DisasContext
*s
, arg_STL
*a
)
9339 return op_stl(s
, a
, MO_UW
);
9342 static bool op_ldrex(DisasContext
*s
, arg_LDREX
*a
, MemOp mop
, bool acq
)
9345 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
9346 bool v8a
= ENABLE_ARCH_8
&& !arm_dc_feature(s
, ARM_FEATURE_M
);
9348 /* We UNDEF for these UNPREDICTABLE cases. */
9349 if (a
->rn
== 15 || a
->rt
== 15
9350 || (!v8a
&& s
->thumb
&& a
->rt
== 13)
9352 && (a
->rt2
== 15 || a
->rt
== a
->rt2
9353 || (!v8a
&& s
->thumb
&& a
->rt2
== 13)))) {
9354 unallocated_encoding(s
);
9358 addr
= tcg_temp_local_new_i32();
9359 load_reg_var(s
, addr
, a
->rn
);
9360 tcg_gen_addi_i32(addr
, addr
, a
->imm
);
9362 gen_load_exclusive(s
, a
->rt
, a
->rt2
, addr
, mop
);
9363 tcg_temp_free_i32(addr
);
9366 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
9371 static bool trans_LDREX(DisasContext
*s
, arg_LDREX
*a
)
9373 if (!ENABLE_ARCH_6
) {
9376 return op_ldrex(s
, a
, MO_32
, false);
9379 static bool trans_LDREXD_a32(DisasContext
*s
, arg_LDREX
*a
)
9381 if (!ENABLE_ARCH_6K
) {
9384 /* We UNDEF for these UNPREDICTABLE cases. */
9386 unallocated_encoding(s
);
9390 return op_ldrex(s
, a
, MO_64
, false);
9393 static bool trans_LDREXD_t32(DisasContext
*s
, arg_LDREX
*a
)
9395 return op_ldrex(s
, a
, MO_64
, false);
9398 static bool trans_LDREXB(DisasContext
*s
, arg_LDREX
*a
)
9400 if (s
->thumb
? !ENABLE_ARCH_7
: !ENABLE_ARCH_6K
) {
9403 return op_ldrex(s
, a
, MO_8
, false);
9406 static bool trans_LDREXH(DisasContext
*s
, arg_LDREX
*a
)
9408 if (s
->thumb
? !ENABLE_ARCH_7
: !ENABLE_ARCH_6K
) {
9411 return op_ldrex(s
, a
, MO_16
, false);
9414 static bool trans_LDAEX(DisasContext
*s
, arg_LDREX
*a
)
9416 if (!ENABLE_ARCH_8
) {
9419 return op_ldrex(s
, a
, MO_32
, true);
9422 static bool trans_LDAEXD_a32(DisasContext
*s
, arg_LDREX
*a
)
9424 if (!ENABLE_ARCH_8
) {
9427 /* We UNDEF for these UNPREDICTABLE cases. */
9429 unallocated_encoding(s
);
9433 return op_ldrex(s
, a
, MO_64
, true);
9436 static bool trans_LDAEXD_t32(DisasContext
*s
, arg_LDREX
*a
)
9438 if (!ENABLE_ARCH_8
) {
9441 return op_ldrex(s
, a
, MO_64
, true);
9444 static bool trans_LDAEXB(DisasContext
*s
, arg_LDREX
*a
)
9446 if (!ENABLE_ARCH_8
) {
9449 return op_ldrex(s
, a
, MO_8
, true);
9452 static bool trans_LDAEXH(DisasContext
*s
, arg_LDREX
*a
)
9454 if (!ENABLE_ARCH_8
) {
9457 return op_ldrex(s
, a
, MO_16
, true);
9460 static bool op_lda(DisasContext
*s
, arg_LDA
*a
, MemOp mop
)
9464 if (!ENABLE_ARCH_8
) {
9467 /* We UNDEF for these UNPREDICTABLE cases. */
9468 if (a
->rn
== 15 || a
->rt
== 15) {
9469 unallocated_encoding(s
);
9473 addr
= load_reg(s
, a
->rn
);
9474 tmp
= tcg_temp_new_i32();
9475 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), mop
| s
->be_data
);
9476 disas_set_da_iss(s
, mop
, a
->rt
| ISSIsAcqRel
);
9477 tcg_temp_free_i32(addr
);
9479 store_reg(s
, a
->rt
, tmp
);
9480 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
9484 static bool trans_LDA(DisasContext
*s
, arg_LDA
*a
)
9486 return op_lda(s
, a
, MO_UL
);
9489 static bool trans_LDAB(DisasContext
*s
, arg_LDA
*a
)
9491 return op_lda(s
, a
, MO_UB
);
9494 static bool trans_LDAH(DisasContext
*s
, arg_LDA
*a
)
9496 return op_lda(s
, a
, MO_UW
);
9500 * Media instructions
9503 static bool trans_USADA8(DisasContext
*s
, arg_USADA8
*a
)
9507 if (!ENABLE_ARCH_6
) {
9511 t1
= load_reg(s
, a
->rn
);
9512 t2
= load_reg(s
, a
->rm
);
9513 gen_helper_usad8(t1
, t1
, t2
);
9514 tcg_temp_free_i32(t2
);
9516 t2
= load_reg(s
, a
->ra
);
9517 tcg_gen_add_i32(t1
, t1
, t2
);
9518 tcg_temp_free_i32(t2
);
9520 store_reg(s
, a
->rd
, t1
);
9524 static bool op_bfx(DisasContext
*s
, arg_UBFX
*a
, bool u
)
9527 int width
= a
->widthm1
+ 1;
9530 if (!ENABLE_ARCH_6T2
) {
9533 if (shift
+ width
> 32) {
9534 /* UNPREDICTABLE; we choose to UNDEF */
9535 unallocated_encoding(s
);
9539 tmp
= load_reg(s
, a
->rn
);
9541 tcg_gen_extract_i32(tmp
, tmp
, shift
, width
);
9543 tcg_gen_sextract_i32(tmp
, tmp
, shift
, width
);
9545 store_reg(s
, a
->rd
, tmp
);
9549 static bool trans_SBFX(DisasContext
*s
, arg_SBFX
*a
)
9551 return op_bfx(s
, a
, false);
9554 static bool trans_UBFX(DisasContext
*s
, arg_UBFX
*a
)
9556 return op_bfx(s
, a
, true);
9559 static bool trans_BFCI(DisasContext
*s
, arg_BFCI
*a
)
9562 int msb
= a
->msb
, lsb
= a
->lsb
;
9565 if (!ENABLE_ARCH_6T2
) {
9569 /* UNPREDICTABLE; we choose to UNDEF */
9570 unallocated_encoding(s
);
9574 width
= msb
+ 1 - lsb
;
9577 tmp
= tcg_const_i32(0);
9580 tmp
= load_reg(s
, a
->rn
);
9583 TCGv_i32 tmp2
= load_reg(s
, a
->rd
);
9584 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, lsb
, width
);
9585 tcg_temp_free_i32(tmp2
);
9587 store_reg(s
, a
->rd
, tmp
);
9591 static bool trans_UDF(DisasContext
*s
, arg_UDF
*a
)
9593 unallocated_encoding(s
);
9598 * Parallel addition and subtraction
9601 static bool op_par_addsub(DisasContext
*s
, arg_rrr
*a
,
9602 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
9607 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
9612 t0
= load_reg(s
, a
->rn
);
9613 t1
= load_reg(s
, a
->rm
);
9617 tcg_temp_free_i32(t1
);
9618 store_reg(s
, a
->rd
, t0
);
9622 static bool op_par_addsub_ge(DisasContext
*s
, arg_rrr
*a
,
9623 void (*gen
)(TCGv_i32
, TCGv_i32
,
9624 TCGv_i32
, TCGv_ptr
))
9630 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
9635 t0
= load_reg(s
, a
->rn
);
9636 t1
= load_reg(s
, a
->rm
);
9638 ge
= tcg_temp_new_ptr();
9639 tcg_gen_addi_ptr(ge
, cpu_env
, offsetof(CPUARMState
, GE
));
9640 gen(t0
, t0
, t1
, ge
);
9642 tcg_temp_free_ptr(ge
);
9643 tcg_temp_free_i32(t1
);
9644 store_reg(s
, a
->rd
, t0
);
9648 #define DO_PAR_ADDSUB(NAME, helper) \
9649 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
9651 return op_par_addsub(s, a, helper); \
9654 #define DO_PAR_ADDSUB_GE(NAME, helper) \
9655 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
9657 return op_par_addsub_ge(s, a, helper); \
9660 DO_PAR_ADDSUB_GE(SADD16
, gen_helper_sadd16
)
9661 DO_PAR_ADDSUB_GE(SASX
, gen_helper_saddsubx
)
9662 DO_PAR_ADDSUB_GE(SSAX
, gen_helper_ssubaddx
)
9663 DO_PAR_ADDSUB_GE(SSUB16
, gen_helper_ssub16
)
9664 DO_PAR_ADDSUB_GE(SADD8
, gen_helper_sadd8
)
9665 DO_PAR_ADDSUB_GE(SSUB8
, gen_helper_ssub8
)
9667 DO_PAR_ADDSUB_GE(UADD16
, gen_helper_uadd16
)
9668 DO_PAR_ADDSUB_GE(UASX
, gen_helper_uaddsubx
)
9669 DO_PAR_ADDSUB_GE(USAX
, gen_helper_usubaddx
)
9670 DO_PAR_ADDSUB_GE(USUB16
, gen_helper_usub16
)
9671 DO_PAR_ADDSUB_GE(UADD8
, gen_helper_uadd8
)
9672 DO_PAR_ADDSUB_GE(USUB8
, gen_helper_usub8
)
9674 DO_PAR_ADDSUB(QADD16
, gen_helper_qadd16
)
9675 DO_PAR_ADDSUB(QASX
, gen_helper_qaddsubx
)
9676 DO_PAR_ADDSUB(QSAX
, gen_helper_qsubaddx
)
9677 DO_PAR_ADDSUB(QSUB16
, gen_helper_qsub16
)
9678 DO_PAR_ADDSUB(QADD8
, gen_helper_qadd8
)
9679 DO_PAR_ADDSUB(QSUB8
, gen_helper_qsub8
)
9681 DO_PAR_ADDSUB(UQADD16
, gen_helper_uqadd16
)
9682 DO_PAR_ADDSUB(UQASX
, gen_helper_uqaddsubx
)
9683 DO_PAR_ADDSUB(UQSAX
, gen_helper_uqsubaddx
)
9684 DO_PAR_ADDSUB(UQSUB16
, gen_helper_uqsub16
)
9685 DO_PAR_ADDSUB(UQADD8
, gen_helper_uqadd8
)
9686 DO_PAR_ADDSUB(UQSUB8
, gen_helper_uqsub8
)
9688 DO_PAR_ADDSUB(SHADD16
, gen_helper_shadd16
)
9689 DO_PAR_ADDSUB(SHASX
, gen_helper_shaddsubx
)
9690 DO_PAR_ADDSUB(SHSAX
, gen_helper_shsubaddx
)
9691 DO_PAR_ADDSUB(SHSUB16
, gen_helper_shsub16
)
9692 DO_PAR_ADDSUB(SHADD8
, gen_helper_shadd8
)
9693 DO_PAR_ADDSUB(SHSUB8
, gen_helper_shsub8
)
9695 DO_PAR_ADDSUB(UHADD16
, gen_helper_uhadd16
)
9696 DO_PAR_ADDSUB(UHASX
, gen_helper_uhaddsubx
)
9697 DO_PAR_ADDSUB(UHSAX
, gen_helper_uhsubaddx
)
9698 DO_PAR_ADDSUB(UHSUB16
, gen_helper_uhsub16
)
9699 DO_PAR_ADDSUB(UHADD8
, gen_helper_uhadd8
)
9700 DO_PAR_ADDSUB(UHSUB8
, gen_helper_uhsub8
)
9702 #undef DO_PAR_ADDSUB
9703 #undef DO_PAR_ADDSUB_GE
9706 * Packing, unpacking, saturation, and reversal
9709 static bool trans_PKH(DisasContext
*s
, arg_PKH
*a
)
9715 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
9720 tn
= load_reg(s
, a
->rn
);
9721 tm
= load_reg(s
, a
->rm
);
9727 tcg_gen_sari_i32(tm
, tm
, shift
);
9728 tcg_gen_deposit_i32(tn
, tn
, tm
, 0, 16);
9731 tcg_gen_shli_i32(tm
, tm
, shift
);
9732 tcg_gen_deposit_i32(tn
, tm
, tn
, 0, 16);
9734 tcg_temp_free_i32(tm
);
9735 store_reg(s
, a
->rd
, tn
);
9739 static bool op_sat(DisasContext
*s
, arg_sat
*a
,
9740 void (*gen
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
9742 TCGv_i32 tmp
, satimm
;
9745 if (!ENABLE_ARCH_6
) {
9749 tmp
= load_reg(s
, a
->rn
);
9751 tcg_gen_sari_i32(tmp
, tmp
, shift
? shift
: 31);
9753 tcg_gen_shli_i32(tmp
, tmp
, shift
);
9756 satimm
= tcg_const_i32(a
->satimm
);
9757 gen(tmp
, cpu_env
, tmp
, satimm
);
9758 tcg_temp_free_i32(satimm
);
9760 store_reg(s
, a
->rd
, tmp
);
9764 static bool trans_SSAT(DisasContext
*s
, arg_sat
*a
)
9766 return op_sat(s
, a
, gen_helper_ssat
);
9769 static bool trans_USAT(DisasContext
*s
, arg_sat
*a
)
9771 return op_sat(s
, a
, gen_helper_usat
);
9774 static bool trans_SSAT16(DisasContext
*s
, arg_sat
*a
)
9776 if (s
->thumb
&& !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9779 return op_sat(s
, a
, gen_helper_ssat16
);
9782 static bool trans_USAT16(DisasContext
*s
, arg_sat
*a
)
9784 if (s
->thumb
&& !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9787 return op_sat(s
, a
, gen_helper_usat16
);
9790 static bool op_xta(DisasContext
*s
, arg_rrr_rot
*a
,
9791 void (*gen_extract
)(TCGv_i32
, TCGv_i32
),
9792 void (*gen_add
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
9796 if (!ENABLE_ARCH_6
) {
9800 tmp
= load_reg(s
, a
->rm
);
9802 * TODO: In many cases we could do a shift instead of a rotate.
9803 * Combined with a simple extend, that becomes an extract.
9805 tcg_gen_rotri_i32(tmp
, tmp
, a
->rot
* 8);
9806 gen_extract(tmp
, tmp
);
9809 TCGv_i32 tmp2
= load_reg(s
, a
->rn
);
9810 gen_add(tmp
, tmp
, tmp2
);
9811 tcg_temp_free_i32(tmp2
);
9813 store_reg(s
, a
->rd
, tmp
);
9817 static bool trans_SXTAB(DisasContext
*s
, arg_rrr_rot
*a
)
9819 return op_xta(s
, a
, tcg_gen_ext8s_i32
, tcg_gen_add_i32
);
9822 static bool trans_SXTAH(DisasContext
*s
, arg_rrr_rot
*a
)
9824 return op_xta(s
, a
, tcg_gen_ext16s_i32
, tcg_gen_add_i32
);
9827 static bool trans_SXTAB16(DisasContext
*s
, arg_rrr_rot
*a
)
9829 if (s
->thumb
&& !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9832 return op_xta(s
, a
, gen_helper_sxtb16
, gen_add16
);
9835 static bool trans_UXTAB(DisasContext
*s
, arg_rrr_rot
*a
)
9837 return op_xta(s
, a
, tcg_gen_ext8u_i32
, tcg_gen_add_i32
);
9840 static bool trans_UXTAH(DisasContext
*s
, arg_rrr_rot
*a
)
9842 return op_xta(s
, a
, tcg_gen_ext16u_i32
, tcg_gen_add_i32
);
9845 static bool trans_UXTAB16(DisasContext
*s
, arg_rrr_rot
*a
)
9847 if (s
->thumb
&& !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9850 return op_xta(s
, a
, gen_helper_uxtb16
, gen_add16
);
9853 static bool trans_SEL(DisasContext
*s
, arg_rrr
*a
)
9855 TCGv_i32 t1
, t2
, t3
;
9858 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
9863 t1
= load_reg(s
, a
->rn
);
9864 t2
= load_reg(s
, a
->rm
);
9865 t3
= tcg_temp_new_i32();
9866 tcg_gen_ld_i32(t3
, cpu_env
, offsetof(CPUARMState
, GE
));
9867 gen_helper_sel_flags(t1
, t3
, t1
, t2
);
9868 tcg_temp_free_i32(t3
);
9869 tcg_temp_free_i32(t2
);
9870 store_reg(s
, a
->rd
, t1
);
9874 static bool op_rr(DisasContext
*s
, arg_rr
*a
,
9875 void (*gen
)(TCGv_i32
, TCGv_i32
))
9879 tmp
= load_reg(s
, a
->rm
);
9881 store_reg(s
, a
->rd
, tmp
);
9885 static bool trans_REV(DisasContext
*s
, arg_rr
*a
)
9887 if (!ENABLE_ARCH_6
) {
9890 return op_rr(s
, a
, tcg_gen_bswap32_i32
);
9893 static bool trans_REV16(DisasContext
*s
, arg_rr
*a
)
9895 if (!ENABLE_ARCH_6
) {
9898 return op_rr(s
, a
, gen_rev16
);
9901 static bool trans_REVSH(DisasContext
*s
, arg_rr
*a
)
9903 if (!ENABLE_ARCH_6
) {
9906 return op_rr(s
, a
, gen_revsh
);
9909 static bool trans_RBIT(DisasContext
*s
, arg_rr
*a
)
9911 if (!ENABLE_ARCH_6T2
) {
9914 return op_rr(s
, a
, gen_helper_rbit
);
9918 * Signed multiply, signed and unsigned divide
9921 static bool op_smlad(DisasContext
*s
, arg_rrrr
*a
, bool m_swap
, bool sub
)
9925 if (!ENABLE_ARCH_6
) {
9929 t1
= load_reg(s
, a
->rn
);
9930 t2
= load_reg(s
, a
->rm
);
9934 gen_smul_dual(t1
, t2
);
9937 /* This subtraction cannot overflow. */
9938 tcg_gen_sub_i32(t1
, t1
, t2
);
9941 * This addition cannot overflow 32 bits; however it may
9942 * overflow considered as a signed operation, in which case
9943 * we must set the Q flag.
9945 gen_helper_add_setq(t1
, cpu_env
, t1
, t2
);
9947 tcg_temp_free_i32(t2
);
9950 t2
= load_reg(s
, a
->ra
);
9951 gen_helper_add_setq(t1
, cpu_env
, t1
, t2
);
9952 tcg_temp_free_i32(t2
);
9954 store_reg(s
, a
->rd
, t1
);
9958 static bool trans_SMLAD(DisasContext
*s
, arg_rrrr
*a
)
9960 return op_smlad(s
, a
, false, false);
9963 static bool trans_SMLADX(DisasContext
*s
, arg_rrrr
*a
)
9965 return op_smlad(s
, a
, true, false);
9968 static bool trans_SMLSD(DisasContext
*s
, arg_rrrr
*a
)
9970 return op_smlad(s
, a
, false, true);
9973 static bool trans_SMLSDX(DisasContext
*s
, arg_rrrr
*a
)
9975 return op_smlad(s
, a
, true, true);
9978 static bool op_smlald(DisasContext
*s
, arg_rrrr
*a
, bool m_swap
, bool sub
)
9983 if (!ENABLE_ARCH_6
) {
9987 t1
= load_reg(s
, a
->rn
);
9988 t2
= load_reg(s
, a
->rm
);
9992 gen_smul_dual(t1
, t2
);
9994 l1
= tcg_temp_new_i64();
9995 l2
= tcg_temp_new_i64();
9996 tcg_gen_ext_i32_i64(l1
, t1
);
9997 tcg_gen_ext_i32_i64(l2
, t2
);
9998 tcg_temp_free_i32(t1
);
9999 tcg_temp_free_i32(t2
);
10002 tcg_gen_sub_i64(l1
, l1
, l2
);
10004 tcg_gen_add_i64(l1
, l1
, l2
);
10006 tcg_temp_free_i64(l2
);
10008 gen_addq(s
, l1
, a
->ra
, a
->rd
);
10009 gen_storeq_reg(s
, a
->ra
, a
->rd
, l1
);
10010 tcg_temp_free_i64(l1
);
10014 static bool trans_SMLALD(DisasContext
*s
, arg_rrrr
*a
)
10016 return op_smlald(s
, a
, false, false);
10019 static bool trans_SMLALDX(DisasContext
*s
, arg_rrrr
*a
)
10021 return op_smlald(s
, a
, true, false);
10024 static bool trans_SMLSLD(DisasContext
*s
, arg_rrrr
*a
)
10026 return op_smlald(s
, a
, false, true);
10029 static bool trans_SMLSLDX(DisasContext
*s
, arg_rrrr
*a
)
10031 return op_smlald(s
, a
, true, true);
10034 static bool op_smmla(DisasContext
*s
, arg_rrrr
*a
, bool round
, bool sub
)
10039 ? !arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)
10040 : !ENABLE_ARCH_6
) {
10044 t1
= load_reg(s
, a
->rn
);
10045 t2
= load_reg(s
, a
->rm
);
10046 tcg_gen_muls2_i32(t2
, t1
, t1
, t2
);
10049 TCGv_i32 t3
= load_reg(s
, a
->ra
);
10052 * For SMMLS, we need a 64-bit subtract. Borrow caused by
10053 * a non-zero multiplicand lowpart, and the correct result
10054 * lowpart for rounding.
10056 TCGv_i32 zero
= tcg_const_i32(0);
10057 tcg_gen_sub2_i32(t2
, t1
, zero
, t3
, t2
, t1
);
10058 tcg_temp_free_i32(zero
);
10060 tcg_gen_add_i32(t1
, t1
, t3
);
10062 tcg_temp_free_i32(t3
);
10066 * Adding 0x80000000 to the 64-bit quantity means that we have
10067 * carry in to the high word when the low word has the msb set.
10069 tcg_gen_shri_i32(t2
, t2
, 31);
10070 tcg_gen_add_i32(t1
, t1
, t2
);
10072 tcg_temp_free_i32(t2
);
10073 store_reg(s
, a
->rd
, t1
);
10077 static bool trans_SMMLA(DisasContext
*s
, arg_rrrr
*a
)
10079 return op_smmla(s
, a
, false, false);
10082 static bool trans_SMMLAR(DisasContext
*s
, arg_rrrr
*a
)
10084 return op_smmla(s
, a
, true, false);
10087 static bool trans_SMMLS(DisasContext
*s
, arg_rrrr
*a
)
10089 return op_smmla(s
, a
, false, true);
10092 static bool trans_SMMLSR(DisasContext
*s
, arg_rrrr
*a
)
10094 return op_smmla(s
, a
, true, true);
10097 static bool op_div(DisasContext
*s
, arg_rrr
*a
, bool u
)
10102 ? !dc_isar_feature(aa32_thumb_div
, s
)
10103 : !dc_isar_feature(aa32_arm_div
, s
)) {
10107 t1
= load_reg(s
, a
->rn
);
10108 t2
= load_reg(s
, a
->rm
);
10110 gen_helper_udiv(t1
, t1
, t2
);
10112 gen_helper_sdiv(t1
, t1
, t2
);
10114 tcg_temp_free_i32(t2
);
10115 store_reg(s
, a
->rd
, t1
);
10119 static bool trans_SDIV(DisasContext
*s
, arg_rrr
*a
)
10121 return op_div(s
, a
, false);
10124 static bool trans_UDIV(DisasContext
*s
, arg_rrr
*a
)
10126 return op_div(s
, a
, true);
10130 * Block data transfer
10133 static TCGv_i32
op_addr_block_pre(DisasContext
*s
, arg_ldst_block
*a
, int n
)
10135 TCGv_i32 addr
= load_reg(s
, a
->rn
);
10139 /* pre increment */
10140 tcg_gen_addi_i32(addr
, addr
, 4);
10142 /* pre decrement */
10143 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
10145 } else if (!a
->i
&& n
!= 1) {
10146 /* post decrement */
10147 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
10150 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
10152 * If the writeback is incrementing SP rather than
10153 * decrementing it, and the initial SP is below the
10154 * stack limit but the final written-back SP would
10155 * be above, then then we must not perform any memory
10156 * accesses, but it is IMPDEF whether we generate
10157 * an exception. We choose to do so in this case.
10158 * At this point 'addr' is the lowest address, so
10159 * either the original SP (if incrementing) or our
10160 * final SP (if decrementing), so that's what we check.
10162 gen_helper_v8m_stackcheck(cpu_env
, addr
);
10168 static void op_addr_block_post(DisasContext
*s
, arg_ldst_block
*a
,
10169 TCGv_i32 addr
, int n
)
10175 /* post increment */
10176 tcg_gen_addi_i32(addr
, addr
, 4);
10178 /* post decrement */
10179 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
10181 } else if (!a
->i
&& n
!= 1) {
10182 /* pre decrement */
10183 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
10185 store_reg(s
, a
->rn
, addr
);
10187 tcg_temp_free_i32(addr
);
10191 static bool op_stm(DisasContext
*s
, arg_ldst_block
*a
, int min_n
)
10193 int i
, j
, n
, list
, mem_idx
;
10195 TCGv_i32 addr
, tmp
, tmp2
;
10200 /* Only usable in supervisor mode. */
10201 unallocated_encoding(s
);
10208 if (n
< min_n
|| a
->rn
== 15) {
10209 unallocated_encoding(s
);
10213 addr
= op_addr_block_pre(s
, a
, n
);
10214 mem_idx
= get_mem_index(s
);
10216 for (i
= j
= 0; i
< 16; i
++) {
10217 if (!(list
& (1 << i
))) {
10221 if (user
&& i
!= 15) {
10222 tmp
= tcg_temp_new_i32();
10223 tmp2
= tcg_const_i32(i
);
10224 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
10225 tcg_temp_free_i32(tmp2
);
10227 tmp
= load_reg(s
, i
);
10229 gen_aa32_st32(s
, tmp
, addr
, mem_idx
);
10230 tcg_temp_free_i32(tmp
);
10232 /* No need to add after the last transfer. */
10234 tcg_gen_addi_i32(addr
, addr
, 4);
10238 op_addr_block_post(s
, a
, addr
, n
);
10242 static bool trans_STM(DisasContext
*s
, arg_ldst_block
*a
)
10244 /* BitCount(list) < 1 is UNPREDICTABLE */
10245 return op_stm(s
, a
, 1);
10248 static bool trans_STM_t32(DisasContext
*s
, arg_ldst_block
*a
)
10250 /* Writeback register in register list is UNPREDICTABLE for T32. */
10251 if (a
->w
&& (a
->list
& (1 << a
->rn
))) {
10252 unallocated_encoding(s
);
10255 /* BitCount(list) < 2 is UNPREDICTABLE */
10256 return op_stm(s
, a
, 2);
10259 static bool do_ldm(DisasContext
*s
, arg_ldst_block
*a
, int min_n
)
10261 int i
, j
, n
, list
, mem_idx
;
10264 bool exc_return
= false;
10265 TCGv_i32 addr
, tmp
, tmp2
, loaded_var
;
10268 /* LDM (user), LDM (exception return) */
10270 /* Only usable in supervisor mode. */
10271 unallocated_encoding(s
);
10274 if (extract32(a
->list
, 15, 1)) {
10278 /* LDM (user) does not allow writeback. */
10280 unallocated_encoding(s
);
10288 if (n
< min_n
|| a
->rn
== 15) {
10289 unallocated_encoding(s
);
10293 addr
= op_addr_block_pre(s
, a
, n
);
10294 mem_idx
= get_mem_index(s
);
10295 loaded_base
= false;
10298 for (i
= j
= 0; i
< 16; i
++) {
10299 if (!(list
& (1 << i
))) {
10303 tmp
= tcg_temp_new_i32();
10304 gen_aa32_ld32u(s
, tmp
, addr
, mem_idx
);
10306 tmp2
= tcg_const_i32(i
);
10307 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
10308 tcg_temp_free_i32(tmp2
);
10309 tcg_temp_free_i32(tmp
);
10310 } else if (i
== a
->rn
) {
10312 loaded_base
= true;
10313 } else if (i
== 15 && exc_return
) {
10314 store_pc_exc_ret(s
, tmp
);
10316 store_reg_from_load(s
, i
, tmp
);
10319 /* No need to add after the last transfer. */
10321 tcg_gen_addi_i32(addr
, addr
, 4);
10325 op_addr_block_post(s
, a
, addr
, n
);
10328 /* Note that we reject base == pc above. */
10329 store_reg(s
, a
->rn
, loaded_var
);
10333 /* Restore CPSR from SPSR. */
10334 tmp
= load_cpu_field(spsr
);
10335 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
10338 gen_helper_cpsr_write_eret(cpu_env
, tmp
);
10339 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
10342 tcg_temp_free_i32(tmp
);
10343 /* Must exit loop to check un-masked IRQs */
10344 s
->base
.is_jmp
= DISAS_EXIT
;
10349 static bool trans_LDM_a32(DisasContext
*s
, arg_ldst_block
*a
)
10352 * Writeback register in register list is UNPREDICTABLE
10353 * for ArchVersion() >= 7. Prior to v7, A32 would write
10354 * an UNKNOWN value to the base register.
10356 if (ENABLE_ARCH_7
&& a
->w
&& (a
->list
& (1 << a
->rn
))) {
10357 unallocated_encoding(s
);
10360 /* BitCount(list) < 1 is UNPREDICTABLE */
10361 return do_ldm(s
, a
, 1);
10364 static bool trans_LDM_t32(DisasContext
*s
, arg_ldst_block
*a
)
10366 /* Writeback register in register list is UNPREDICTABLE for T32. */
10367 if (a
->w
&& (a
->list
& (1 << a
->rn
))) {
10368 unallocated_encoding(s
);
10371 /* BitCount(list) < 2 is UNPREDICTABLE */
10372 return do_ldm(s
, a
, 2);
10375 static bool trans_LDM_t16(DisasContext
*s
, arg_ldst_block
*a
)
10377 /* Writeback is conditional on the base register not being loaded. */
10378 a
->w
= !(a
->list
& (1 << a
->rn
));
10379 /* BitCount(list) < 1 is UNPREDICTABLE */
10380 return do_ldm(s
, a
, 1);
10384 * Branch, branch with link
10387 static bool trans_B(DisasContext
*s
, arg_i
*a
)
10389 gen_jmp(s
, read_pc(s
) + a
->imm
);
10393 static bool trans_B_cond_thumb(DisasContext
*s
, arg_ci
*a
)
10395 /* This has cond from encoding, required to be outside IT block. */
10396 if (a
->cond
>= 0xe) {
10399 if (s
->condexec_mask
) {
10400 unallocated_encoding(s
);
10403 arm_skip_unless(s
, a
->cond
);
10404 gen_jmp(s
, read_pc(s
) + a
->imm
);
10408 static bool trans_BL(DisasContext
*s
, arg_i
*a
)
10410 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| s
->thumb
);
10411 gen_jmp(s
, read_pc(s
) + a
->imm
);
10415 static bool trans_BLX_i(DisasContext
*s
, arg_BLX_i
*a
)
10419 /* For A32, ARCH(5) is checked near the start of the uncond block. */
10420 if (s
->thumb
&& (a
->imm
& 2)) {
10423 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| s
->thumb
);
10424 tmp
= tcg_const_i32(!s
->thumb
);
10425 store_cpu_field(tmp
, thumb
);
10426 gen_jmp(s
, (read_pc(s
) & ~3) + a
->imm
);
10430 static bool trans_BL_BLX_prefix(DisasContext
*s
, arg_BL_BLX_prefix
*a
)
10432 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
10433 tcg_gen_movi_i32(cpu_R
[14], read_pc(s
) + (a
->imm
<< 12));
10437 static bool trans_BL_suffix(DisasContext
*s
, arg_BL_suffix
*a
)
10439 TCGv_i32 tmp
= tcg_temp_new_i32();
10441 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
10442 tcg_gen_addi_i32(tmp
, cpu_R
[14], (a
->imm
<< 1) | 1);
10443 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| 1);
10448 static bool trans_BLX_suffix(DisasContext
*s
, arg_BLX_suffix
*a
)
10452 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
10453 if (!ENABLE_ARCH_5
) {
10456 tmp
= tcg_temp_new_i32();
10457 tcg_gen_addi_i32(tmp
, cpu_R
[14], a
->imm
<< 1);
10458 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
10459 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| 1);
10464 static bool op_tbranch(DisasContext
*s
, arg_tbranch
*a
, bool half
)
10466 TCGv_i32 addr
, tmp
;
10468 tmp
= load_reg(s
, a
->rm
);
10470 tcg_gen_add_i32(tmp
, tmp
, tmp
);
10472 addr
= load_reg(s
, a
->rn
);
10473 tcg_gen_add_i32(addr
, addr
, tmp
);
10475 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
),
10476 half
? MO_UW
| s
->be_data
: MO_UB
);
10477 tcg_temp_free_i32(addr
);
10479 tcg_gen_add_i32(tmp
, tmp
, tmp
);
10480 tcg_gen_addi_i32(tmp
, tmp
, read_pc(s
));
10481 store_reg(s
, 15, tmp
);
10485 static bool trans_TBB(DisasContext
*s
, arg_tbranch
*a
)
10487 return op_tbranch(s
, a
, false);
10490 static bool trans_TBH(DisasContext
*s
, arg_tbranch
*a
)
10492 return op_tbranch(s
, a
, true);
10495 static bool trans_CBZ(DisasContext
*s
, arg_CBZ
*a
)
10497 TCGv_i32 tmp
= load_reg(s
, a
->rn
);
10499 arm_gen_condlabel(s
);
10500 tcg_gen_brcondi_i32(a
->nz
? TCG_COND_EQ
: TCG_COND_NE
,
10501 tmp
, 0, s
->condlabel
);
10502 tcg_temp_free_i32(tmp
);
10503 gen_jmp(s
, read_pc(s
) + a
->imm
);
10508 * Supervisor call - both T32 & A32 come here so we need to check
10509 * which mode we are in when checking for semihosting.
10512 static bool trans_SVC(DisasContext
*s
, arg_SVC
*a
)
10514 const uint32_t semihost_imm
= s
->thumb
? 0xab : 0x123456;
10516 if (!arm_dc_feature(s
, ARM_FEATURE_M
) && semihosting_enabled() &&
10517 #ifndef CONFIG_USER_ONLY
10520 (a
->imm
== semihost_imm
)) {
10521 gen_exception_internal_insn(s
, s
->pc_curr
, EXCP_SEMIHOST
);
10523 gen_set_pc_im(s
, s
->base
.pc_next
);
10524 s
->svc_imm
= a
->imm
;
10525 s
->base
.is_jmp
= DISAS_SWI
;
10531 * Unconditional system instructions
10534 static bool trans_RFE(DisasContext
*s
, arg_RFE
*a
)
10536 static const int8_t pre_offset
[4] = {
10537 /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4
10539 static const int8_t post_offset
[4] = {
10540 /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0
10542 TCGv_i32 addr
, t1
, t2
;
10544 if (!ENABLE_ARCH_6
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
10548 unallocated_encoding(s
);
10552 addr
= load_reg(s
, a
->rn
);
10553 tcg_gen_addi_i32(addr
, addr
, pre_offset
[a
->pu
]);
10555 /* Load PC into tmp and CPSR into tmp2. */
10556 t1
= tcg_temp_new_i32();
10557 gen_aa32_ld32u(s
, t1
, addr
, get_mem_index(s
));
10558 tcg_gen_addi_i32(addr
, addr
, 4);
10559 t2
= tcg_temp_new_i32();
10560 gen_aa32_ld32u(s
, t2
, addr
, get_mem_index(s
));
10563 /* Base writeback. */
10564 tcg_gen_addi_i32(addr
, addr
, post_offset
[a
->pu
]);
10565 store_reg(s
, a
->rn
, addr
);
10567 tcg_temp_free_i32(addr
);
10569 gen_rfe(s
, t1
, t2
);
10573 static bool trans_SRS(DisasContext
*s
, arg_SRS
*a
)
10575 if (!ENABLE_ARCH_6
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
10578 gen_srs(s
, a
->mode
, a
->pu
, a
->w
);
10582 static bool trans_CPS(DisasContext
*s
, arg_CPS
*a
)
10584 uint32_t mask
, val
;
10586 if (!ENABLE_ARCH_6
|| arm_dc_feature(s
, ARM_FEATURE_M
)) {
10590 /* Implemented as NOP in user mode. */
10593 /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */
10615 gen_set_psr_im(s
, mask
, 0, val
);
10620 static bool trans_CPS_v7m(DisasContext
*s
, arg_CPS_v7m
*a
)
10622 TCGv_i32 tmp
, addr
;
10624 if (!arm_dc_feature(s
, ARM_FEATURE_M
)) {
10628 /* Implemented as NOP in user mode. */
10632 tmp
= tcg_const_i32(a
->im
);
10635 addr
= tcg_const_i32(19);
10636 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
10637 tcg_temp_free_i32(addr
);
10641 addr
= tcg_const_i32(16);
10642 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
10643 tcg_temp_free_i32(addr
);
10645 tcg_temp_free_i32(tmp
);
10651 * Clear-Exclusive, Barriers
10654 static bool trans_CLREX(DisasContext
*s
, arg_CLREX
*a
)
10657 ? !ENABLE_ARCH_7
&& !arm_dc_feature(s
, ARM_FEATURE_M
)
10658 : !ENABLE_ARCH_6K
) {
10665 static bool trans_DSB(DisasContext
*s
, arg_DSB
*a
)
10667 if (!ENABLE_ARCH_7
&& !arm_dc_feature(s
, ARM_FEATURE_M
)) {
10670 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
10674 static bool trans_DMB(DisasContext
*s
, arg_DMB
*a
)
10676 return trans_DSB(s
, NULL
);
10679 static bool trans_ISB(DisasContext
*s
, arg_ISB
*a
)
10681 if (!ENABLE_ARCH_7
&& !arm_dc_feature(s
, ARM_FEATURE_M
)) {
10685 * We need to break the TB after this insn to execute
10686 * self-modifying code correctly and also to take
10687 * any pending interrupts immediately.
10689 gen_goto_tb(s
, 0, s
->base
.pc_next
);
10693 static bool trans_SB(DisasContext
*s
, arg_SB
*a
)
10695 if (!dc_isar_feature(aa32_sb
, s
)) {
10699 * TODO: There is no speculation barrier opcode
10700 * for TCG; MB and end the TB instead.
10702 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
10703 gen_goto_tb(s
, 0, s
->base
.pc_next
);
10707 static bool trans_SETEND(DisasContext
*s
, arg_SETEND
*a
)
10709 if (!ENABLE_ARCH_6
) {
10712 if (a
->E
!= (s
->be_data
== MO_BE
)) {
10713 gen_helper_setend(cpu_env
);
10714 s
->base
.is_jmp
= DISAS_UPDATE
;
10720 * Preload instructions
10721 * All are nops, contingent on the appropriate arch level.
10724 static bool trans_PLD(DisasContext
*s
, arg_PLD
*a
)
10726 return ENABLE_ARCH_5TE
;
10729 static bool trans_PLDW(DisasContext
*s
, arg_PLD
*a
)
10731 return arm_dc_feature(s
, ARM_FEATURE_V7MP
);
10734 static bool trans_PLI(DisasContext
*s
, arg_PLD
*a
)
10736 return ENABLE_ARCH_7
;
10743 static bool trans_IT(DisasContext
*s
, arg_IT
*a
)
10745 int cond_mask
= a
->cond_mask
;
10748 * No actual code generated for this insn, just setup state.
10750 * Combinations of firstcond and mask which set up an 0b1111
10751 * condition are UNPREDICTABLE; we take the CONSTRAINED
10752 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
10753 * i.e. both meaning "execute always".
10755 s
->condexec_cond
= (cond_mask
>> 4) & 0xe;
10756 s
->condexec_mask
= cond_mask
& 0x1f;
10764 static void disas_arm_insn(DisasContext
*s
, unsigned int insn
)
10766 unsigned int cond
= insn
>> 28;
10768 /* M variants do not implement ARM mode; this must raise the INVSTATE
10769 * UsageFault exception.
10771 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10772 gen_exception_insn(s
, s
->pc_curr
, EXCP_INVSTATE
, syn_uncategorized(),
10773 default_exception_el(s
));
10778 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
10779 * choose to UNDEF. In ARMv5 and above the space is used
10780 * for miscellaneous unconditional instructions.
10784 /* Unconditional instructions. */
10785 if (disas_a32_uncond(s
, insn
)) {
10788 /* fall back to legacy decoder */
10790 if (((insn
>> 25) & 7) == 1) {
10791 /* NEON Data processing. */
10792 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
10796 if (disas_neon_data_insn(s
, insn
)) {
10801 if ((insn
& 0x0f100000) == 0x04000000) {
10802 /* NEON load/store. */
10803 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
10807 if (disas_neon_ls_insn(s
, insn
)) {
10812 if ((insn
& 0x0f000e10) == 0x0e000a00) {
10814 if (disas_vfp_insn(s
, insn
)) {
10819 if ((insn
& 0x0e000f00) == 0x0c000100) {
10820 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
10821 /* iWMMXt register transfer. */
10822 if (extract32(s
->c15_cpar
, 1, 1)) {
10823 if (!disas_iwmmxt_insn(s
, insn
)) {
10828 } else if ((insn
& 0x0e000a00) == 0x0c000800
10829 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
10830 if (disas_neon_insn_3same_ext(s
, insn
)) {
10834 } else if ((insn
& 0x0f000a00) == 0x0e000800
10835 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
10836 if (disas_neon_insn_2reg_scalar_ext(s
, insn
)) {
10844 /* if not always execute, we generate a conditional jump to
10845 next instruction */
10846 arm_skip_unless(s
, cond
);
10849 if (disas_a32(s
, insn
)) {
10852 /* fall back to legacy decoder */
10854 switch ((insn
>> 24) & 0xf) {
10858 if (((insn
>> 8) & 0xe) == 10) {
10860 if (disas_vfp_insn(s
, insn
)) {
10863 } else if (disas_coproc_insn(s
, insn
)) {
10870 unallocated_encoding(s
);
10875 static bool thumb_insn_is_16bit(DisasContext
*s
, uint32_t pc
, uint32_t insn
)
10878 * Return true if this is a 16 bit instruction. We must be precise
10879 * about this (matching the decode).
10881 if ((insn
>> 11) < 0x1d) {
10882 /* Definitely a 16-bit instruction */
10886 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10887 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10888 * end up actually treating this as two 16-bit insns, though,
10889 * if it's half of a bl/blx pair that might span a page boundary.
10891 if (arm_dc_feature(s
, ARM_FEATURE_THUMB2
) ||
10892 arm_dc_feature(s
, ARM_FEATURE_M
)) {
10893 /* Thumb2 cores (including all M profile ones) always treat
10894 * 32-bit insns as 32-bit.
10899 if ((insn
>> 11) == 0x1e && pc
- s
->page_start
< TARGET_PAGE_SIZE
- 3) {
10900 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10901 * is not on the next page; we merge this into a 32-bit
10906 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10907 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10908 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10909 * -- handle as single 16 bit insn
10914 /* Translate a 32-bit thumb instruction. */
10915 static void disas_thumb2_insn(DisasContext
*s
, uint32_t insn
)
10918 * ARMv6-M supports a limited subset of Thumb2 instructions.
10919 * Other Thumb1 architectures allow only 32-bit
10920 * combined BL/BLX prefix and suffix.
10922 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
10923 !arm_dc_feature(s
, ARM_FEATURE_V7
)) {
10925 bool found
= false;
10926 static const uint32_t armv6m_insn
[] = {0xf3808000 /* msr */,
10927 0xf3b08040 /* dsb */,
10928 0xf3b08050 /* dmb */,
10929 0xf3b08060 /* isb */,
10930 0xf3e08000 /* mrs */,
10931 0xf000d000 /* bl */};
10932 static const uint32_t armv6m_mask
[] = {0xffe0d000,
10939 for (i
= 0; i
< ARRAY_SIZE(armv6m_insn
); i
++) {
10940 if ((insn
& armv6m_mask
[i
]) == armv6m_insn
[i
]) {
10948 } else if ((insn
& 0xf800e800) != 0xf000e800) {
10952 if (disas_t32(s
, insn
)) {
10955 /* fall back to legacy decoder */
10957 switch ((insn
>> 25) & 0xf) {
10958 case 0: case 1: case 2: case 3:
10959 /* 16-bit instructions. Should never happen. */
10961 case 6: case 7: case 14: case 15:
10963 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10964 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10965 if (extract32(insn
, 24, 2) == 3) {
10966 goto illegal_op
; /* op0 = 0b11 : unallocated */
10970 * Decode VLLDM and VLSTM first: these are nonstandard because:
10971 * * if there is no FPU then these insns must NOP in
10972 * Secure state and UNDEF in Nonsecure state
10973 * * if there is an FPU then these insns do not have
10974 * the usual behaviour that disas_vfp_insn() provides of
10975 * being controlled by CPACR/NSACR enable bits or the
10976 * lazy-stacking logic.
10978 if (arm_dc_feature(s
, ARM_FEATURE_V8
) &&
10979 (insn
& 0xffa00f00) == 0xec200a00) {
10980 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10982 * We choose to UNDEF if the RAZ bits are non-zero.
10984 if (!s
->v8m_secure
|| (insn
& 0x0040f0ff)) {
10988 if (arm_dc_feature(s
, ARM_FEATURE_VFP
)) {
10989 uint32_t rn
= (insn
>> 16) & 0xf;
10990 TCGv_i32 fptr
= load_reg(s
, rn
);
10992 if (extract32(insn
, 20, 1)) {
10993 gen_helper_v7m_vlldm(cpu_env
, fptr
);
10995 gen_helper_v7m_vlstm(cpu_env
, fptr
);
10997 tcg_temp_free_i32(fptr
);
10999 /* End the TB, because we have updated FP control bits */
11000 s
->base
.is_jmp
= DISAS_UPDATE
;
11004 if (arm_dc_feature(s
, ARM_FEATURE_VFP
) &&
11005 ((insn
>> 8) & 0xe) == 10) {
11006 /* FP, and the CPU supports it */
11007 if (disas_vfp_insn(s
, insn
)) {
11013 /* All other insns: NOCP */
11014 gen_exception_insn(s
, s
->pc_curr
, EXCP_NOCP
, syn_uncategorized(),
11015 default_exception_el(s
));
11018 if ((insn
& 0xfe000a00) == 0xfc000800
11019 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
11020 /* The Thumb2 and ARM encodings are identical. */
11021 if (disas_neon_insn_3same_ext(s
, insn
)) {
11024 } else if ((insn
& 0xff000a00) == 0xfe000800
11025 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
11026 /* The Thumb2 and ARM encodings are identical. */
11027 if (disas_neon_insn_2reg_scalar_ext(s
, insn
)) {
11030 } else if (((insn
>> 24) & 3) == 3) {
11031 /* Translate into the equivalent ARM encoding. */
11032 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
11033 if (disas_neon_data_insn(s
, insn
)) {
11036 } else if (((insn
>> 8) & 0xe) == 10) {
11037 if (disas_vfp_insn(s
, insn
)) {
11041 if (insn
& (1 << 28))
11043 if (disas_coproc_insn(s
, insn
)) {
11049 if ((insn
& 0x01100000) == 0x01000000) {
11050 if (disas_neon_ls_insn(s
, insn
)) {
11058 unallocated_encoding(s
);
11062 static void disas_thumb_insn(DisasContext
*s
, uint32_t insn
)
11064 if (!disas_t16(s
, insn
)) {
11065 unallocated_encoding(s
);
11069 static bool insn_crosses_page(CPUARMState
*env
, DisasContext
*s
)
11071 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
11072 * (False positives are OK, false negatives are not.)
11073 * We know this is a Thumb insn, and our caller ensures we are
11074 * only called if dc->base.pc_next is less than 4 bytes from the page
11075 * boundary, so we cross the page if the first 16 bits indicate
11076 * that this is a 32 bit insn.
11078 uint16_t insn
= arm_lduw_code(env
, s
->base
.pc_next
, s
->sctlr_b
);
11080 return !thumb_insn_is_16bit(s
, s
->base
.pc_next
, insn
);
11083 static void arm_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
11085 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11086 CPUARMState
*env
= cs
->env_ptr
;
11087 ARMCPU
*cpu
= env_archcpu(env
);
11088 uint32_t tb_flags
= dc
->base
.tb
->flags
;
11089 uint32_t condexec
, core_mmu_idx
;
11091 dc
->isar
= &cpu
->isar
;
11095 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11096 * there is no secure EL1, so we route exceptions to EL3.
11098 dc
->secure_routed_to_el3
= arm_feature(env
, ARM_FEATURE_EL3
) &&
11099 !arm_el_is_aa64(env
, 3);
11100 dc
->thumb
= FIELD_EX32(tb_flags
, TBFLAG_AM32
, THUMB
);
11101 dc
->be_data
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, BE_DATA
) ? MO_BE
: MO_LE
;
11102 condexec
= FIELD_EX32(tb_flags
, TBFLAG_AM32
, CONDEXEC
);
11103 dc
->condexec_mask
= (condexec
& 0xf) << 1;
11104 dc
->condexec_cond
= condexec
>> 4;
11106 core_mmu_idx
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, MMUIDX
);
11107 dc
->mmu_idx
= core_to_arm_mmu_idx(env
, core_mmu_idx
);
11108 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
11109 #if !defined(CONFIG_USER_ONLY)
11110 dc
->user
= (dc
->current_el
== 0);
11112 dc
->fp_excp_el
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, FPEXC_EL
);
11114 if (arm_feature(env
, ARM_FEATURE_M
)) {
11115 dc
->vfp_enabled
= 1;
11116 dc
->be_data
= MO_TE
;
11117 dc
->v7m_handler_mode
= FIELD_EX32(tb_flags
, TBFLAG_M32
, HANDLER
);
11118 dc
->v8m_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
11119 regime_is_secure(env
, dc
->mmu_idx
);
11120 dc
->v8m_stackcheck
= FIELD_EX32(tb_flags
, TBFLAG_M32
, STACKCHECK
);
11121 dc
->v8m_fpccr_s_wrong
=
11122 FIELD_EX32(tb_flags
, TBFLAG_M32
, FPCCR_S_WRONG
);
11123 dc
->v7m_new_fp_ctxt_needed
=
11124 FIELD_EX32(tb_flags
, TBFLAG_M32
, NEW_FP_CTXT_NEEDED
);
11125 dc
->v7m_lspact
= FIELD_EX32(tb_flags
, TBFLAG_M32
, LSPACT
);
11128 FIELD_EX32(tb_flags
, TBFLAG_ANY
, BE_DATA
) ? MO_BE
: MO_LE
;
11129 dc
->debug_target_el
=
11130 FIELD_EX32(tb_flags
, TBFLAG_ANY
, DEBUG_TARGET_EL
);
11131 dc
->sctlr_b
= FIELD_EX32(tb_flags
, TBFLAG_A32
, SCTLR_B
);
11132 dc
->hstr_active
= FIELD_EX32(tb_flags
, TBFLAG_A32
, HSTR_ACTIVE
);
11133 dc
->ns
= FIELD_EX32(tb_flags
, TBFLAG_A32
, NS
);
11134 dc
->vfp_enabled
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VFPEN
);
11135 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
11136 dc
->c15_cpar
= FIELD_EX32(tb_flags
, TBFLAG_A32
, XSCALE_CPAR
);
11138 dc
->vec_len
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VECLEN
);
11139 dc
->vec_stride
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VECSTRIDE
);
11142 dc
->cp_regs
= cpu
->cp_regs
;
11143 dc
->features
= env
->features
;
11145 /* Single step state. The code-generation logic here is:
11147 * generate code with no special handling for single-stepping (except
11148 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11149 * this happens anyway because those changes are all system register or
11151 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11152 * emit code for one insn
11153 * emit code to clear PSTATE.SS
11154 * emit code to generate software step exception for completed step
11155 * end TB (as usual for having generated an exception)
11156 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11157 * emit code to generate a software step exception
11160 dc
->ss_active
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, SS_ACTIVE
);
11161 dc
->pstate_ss
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, PSTATE_SS
);
11162 dc
->is_ldex
= false;
11164 dc
->page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
11166 /* If architectural single step active, limit to 1. */
11167 if (is_singlestepping(dc
)) {
11168 dc
->base
.max_insns
= 1;
11171 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11172 to those left on the page. */
11174 int bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
11175 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
11178 cpu_V0
= tcg_temp_new_i64();
11179 cpu_V1
= tcg_temp_new_i64();
11180 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11181 cpu_M0
= tcg_temp_new_i64();
11184 static void arm_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
11186 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11188 /* A note on handling of the condexec (IT) bits:
11190 * We want to avoid the overhead of having to write the updated condexec
11191 * bits back to the CPUARMState for every instruction in an IT block. So:
11192 * (1) if the condexec bits are not already zero then we write
11193 * zero back into the CPUARMState now. This avoids complications trying
11194 * to do it at the end of the block. (For example if we don't do this
11195 * it's hard to identify whether we can safely skip writing condexec
11196 * at the end of the TB, which we definitely want to do for the case
11197 * where a TB doesn't do anything with the IT state at all.)
11198 * (2) if we are going to leave the TB then we call gen_set_condexec()
11199 * which will write the correct value into CPUARMState if zero is wrong.
11200 * This is done both for leaving the TB at the end, and for leaving
11201 * it because of an exception we know will happen, which is done in
11202 * gen_exception_insn(). The latter is necessary because we need to
11203 * leave the TB with the PC/IT state just prior to execution of the
11204 * instruction which caused the exception.
11205 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11206 * then the CPUARMState will be wrong and we need to reset it.
11207 * This is handled in the same way as restoration of the
11208 * PC in these situations; we save the value of the condexec bits
11209 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11210 * then uses this to restore them after an exception.
11212 * Note that there are no instructions which can read the condexec
11213 * bits, and none which can write non-static values to them, so
11214 * we don't need to care about whether CPUARMState is correct in the
11218 /* Reset the conditional execution bits immediately. This avoids
11219 complications trying to do it at the end of the block. */
11220 if (dc
->condexec_mask
|| dc
->condexec_cond
) {
11221 TCGv_i32 tmp
= tcg_temp_new_i32();
11222 tcg_gen_movi_i32(tmp
, 0);
11223 store_cpu_field(tmp
, condexec_bits
);
11227 static void arm_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
11229 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11231 tcg_gen_insn_start(dc
->base
.pc_next
,
11232 (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1),
11234 dc
->insn_start
= tcg_last_op();
11237 static bool arm_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
11238 const CPUBreakpoint
*bp
)
11240 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11242 if (bp
->flags
& BP_CPU
) {
11243 gen_set_condexec(dc
);
11244 gen_set_pc_im(dc
, dc
->base
.pc_next
);
11245 gen_helper_check_breakpoints(cpu_env
);
11246 /* End the TB early; it's likely not going to be executed */
11247 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
11249 gen_exception_internal_insn(dc
, dc
->base
.pc_next
, EXCP_DEBUG
);
11250 /* The address covered by the breakpoint must be
11251 included in [tb->pc, tb->pc + tb->size) in order
11252 to for it to be properly cleared -- thus we
11253 increment the PC here so that the logic setting
11254 tb->size below does the right thing. */
11255 /* TODO: Advance PC by correct instruction length to
11256 * avoid disassembler error messages */
11257 dc
->base
.pc_next
+= 2;
11258 dc
->base
.is_jmp
= DISAS_NORETURN
;
11264 static bool arm_pre_translate_insn(DisasContext
*dc
)
11266 #ifdef CONFIG_USER_ONLY
11267 /* Intercept jump to the magic kernel page. */
11268 if (dc
->base
.pc_next
>= 0xffff0000) {
11269 /* We always get here via a jump, so know we are not in a
11270 conditional execution block. */
11271 gen_exception_internal(EXCP_KERNEL_TRAP
);
11272 dc
->base
.is_jmp
= DISAS_NORETURN
;
11277 if (dc
->ss_active
&& !dc
->pstate_ss
) {
11278 /* Singlestep state is Active-pending.
11279 * If we're in this state at the start of a TB then either
11280 * a) we just took an exception to an EL which is being debugged
11281 * and this is the first insn in the exception handler
11282 * b) debug exceptions were masked and we just unmasked them
11283 * without changing EL (eg by clearing PSTATE.D)
11284 * In either case we're going to take a swstep exception in the
11285 * "did not step an insn" case, and so the syndrome ISV and EX
11286 * bits should be zero.
11288 assert(dc
->base
.num_insns
== 1);
11289 gen_swstep_exception(dc
, 0, 0);
11290 dc
->base
.is_jmp
= DISAS_NORETURN
;
11297 static void arm_post_translate_insn(DisasContext
*dc
)
11299 if (dc
->condjmp
&& !dc
->base
.is_jmp
) {
11300 gen_set_label(dc
->condlabel
);
11303 translator_loop_temp_check(&dc
->base
);
11306 static void arm_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
11308 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11309 CPUARMState
*env
= cpu
->env_ptr
;
11312 if (arm_pre_translate_insn(dc
)) {
11316 dc
->pc_curr
= dc
->base
.pc_next
;
11317 insn
= arm_ldl_code(env
, dc
->base
.pc_next
, dc
->sctlr_b
);
11319 dc
->base
.pc_next
+= 4;
11320 disas_arm_insn(dc
, insn
);
11322 arm_post_translate_insn(dc
);
11324 /* ARM is a fixed-length ISA. We performed the cross-page check
11325 in init_disas_context by adjusting max_insns. */
11328 static bool thumb_insn_is_unconditional(DisasContext
*s
, uint32_t insn
)
11330 /* Return true if this Thumb insn is always unconditional,
11331 * even inside an IT block. This is true of only a very few
11332 * instructions: BKPT, HLT, and SG.
11334 * A larger class of instructions are UNPREDICTABLE if used
11335 * inside an IT block; we do not need to detect those here, because
11336 * what we do by default (perform the cc check and update the IT
11337 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
11338 * choice for those situations.
11340 * insn is either a 16-bit or a 32-bit instruction; the two are
11341 * distinguishable because for the 16-bit case the top 16 bits
11342 * are zeroes, and that isn't a valid 32-bit encoding.
11344 if ((insn
& 0xffffff00) == 0xbe00) {
11349 if ((insn
& 0xffffffc0) == 0xba80 && arm_dc_feature(s
, ARM_FEATURE_V8
) &&
11350 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
11351 /* HLT: v8A only. This is unconditional even when it is going to
11352 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
11353 * For v7 cores this was a plain old undefined encoding and so
11354 * honours its cc check. (We might be using the encoding as
11355 * a semihosting trap, but we don't change the cc check behaviour
11356 * on that account, because a debugger connected to a real v7A
11357 * core and emulating semihosting traps by catching the UNDEF
11358 * exception would also only see cases where the cc check passed.
11359 * No guest code should be trying to do a HLT semihosting trap
11360 * in an IT block anyway.
11365 if (insn
== 0xe97fe97f && arm_dc_feature(s
, ARM_FEATURE_V8
) &&
11366 arm_dc_feature(s
, ARM_FEATURE_M
)) {
11374 static void thumb_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
11376 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11377 CPUARMState
*env
= cpu
->env_ptr
;
11381 if (arm_pre_translate_insn(dc
)) {
11385 dc
->pc_curr
= dc
->base
.pc_next
;
11386 insn
= arm_lduw_code(env
, dc
->base
.pc_next
, dc
->sctlr_b
);
11387 is_16bit
= thumb_insn_is_16bit(dc
, dc
->base
.pc_next
, insn
);
11388 dc
->base
.pc_next
+= 2;
11390 uint32_t insn2
= arm_lduw_code(env
, dc
->base
.pc_next
, dc
->sctlr_b
);
11392 insn
= insn
<< 16 | insn2
;
11393 dc
->base
.pc_next
+= 2;
11397 if (dc
->condexec_mask
&& !thumb_insn_is_unconditional(dc
, insn
)) {
11398 uint32_t cond
= dc
->condexec_cond
;
11401 * Conditionally skip the insn. Note that both 0xe and 0xf mean
11402 * "always"; 0xf is not "never".
11405 arm_skip_unless(dc
, cond
);
11410 disas_thumb_insn(dc
, insn
);
11412 disas_thumb2_insn(dc
, insn
);
11415 /* Advance the Thumb condexec condition. */
11416 if (dc
->condexec_mask
) {
11417 dc
->condexec_cond
= ((dc
->condexec_cond
& 0xe) |
11418 ((dc
->condexec_mask
>> 4) & 1));
11419 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
11420 if (dc
->condexec_mask
== 0) {
11421 dc
->condexec_cond
= 0;
11425 arm_post_translate_insn(dc
);
11427 /* Thumb is a variable-length ISA. Stop translation when the next insn
11428 * will touch a new page. This ensures that prefetch aborts occur at
11431 * We want to stop the TB if the next insn starts in a new page,
11432 * or if it spans between this page and the next. This means that
11433 * if we're looking at the last halfword in the page we need to
11434 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11435 * or a 32-bit Thumb insn (which won't).
11436 * This is to avoid generating a silly TB with a single 16-bit insn
11437 * in it at the end of this page (which would execute correctly
11438 * but isn't very efficient).
11440 if (dc
->base
.is_jmp
== DISAS_NEXT
11441 && (dc
->base
.pc_next
- dc
->page_start
>= TARGET_PAGE_SIZE
11442 || (dc
->base
.pc_next
- dc
->page_start
>= TARGET_PAGE_SIZE
- 3
11443 && insn_crosses_page(env
, dc
)))) {
11444 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
11448 static void arm_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
11450 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11452 if (tb_cflags(dc
->base
.tb
) & CF_LAST_IO
&& dc
->condjmp
) {
11453 /* FIXME: This can theoretically happen with self-modifying code. */
11454 cpu_abort(cpu
, "IO on conditional branch instruction");
11457 /* At this stage dc->condjmp will only be set when the skipped
11458 instruction was a conditional branch or trap, and the PC has
11459 already been written. */
11460 gen_set_condexec(dc
);
11461 if (dc
->base
.is_jmp
== DISAS_BX_EXCRET
) {
11462 /* Exception return branches need some special case code at the
11463 * end of the TB, which is complex enough that it has to
11464 * handle the single-step vs not and the condition-failed
11465 * insn codepath itself.
11467 gen_bx_excret_final_code(dc
);
11468 } else if (unlikely(is_singlestepping(dc
))) {
11469 /* Unconditional and "condition passed" instruction codepath. */
11470 switch (dc
->base
.is_jmp
) {
11472 gen_ss_advance(dc
);
11473 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
11474 default_exception_el(dc
));
11477 gen_ss_advance(dc
);
11478 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
11481 gen_ss_advance(dc
);
11482 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
11485 case DISAS_TOO_MANY
:
11487 gen_set_pc_im(dc
, dc
->base
.pc_next
);
11490 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
11491 gen_singlestep_exception(dc
);
11493 case DISAS_NORETURN
:
11497 /* While branches must always occur at the end of an IT block,
11498 there are a few other things that can cause us to terminate
11499 the TB in the middle of an IT block:
11500 - Exception generating instructions (bkpt, swi, undefined).
11502 - Hardware watchpoints.
11503 Hardware breakpoints have already been handled and skip this code.
11505 switch(dc
->base
.is_jmp
) {
11507 case DISAS_TOO_MANY
:
11508 gen_goto_tb(dc
, 1, dc
->base
.pc_next
);
11514 gen_set_pc_im(dc
, dc
->base
.pc_next
);
11517 /* indicate that the hash table must be used to find the next TB */
11518 tcg_gen_exit_tb(NULL
, 0);
11520 case DISAS_NORETURN
:
11521 /* nothing more to generate */
11525 TCGv_i32 tmp
= tcg_const_i32((dc
->thumb
&&
11526 !(dc
->insn
& (1U << 31))) ? 2 : 4);
11528 gen_helper_wfi(cpu_env
, tmp
);
11529 tcg_temp_free_i32(tmp
);
11530 /* The helper doesn't necessarily throw an exception, but we
11531 * must go back to the main loop to check for interrupts anyway.
11533 tcg_gen_exit_tb(NULL
, 0);
11537 gen_helper_wfe(cpu_env
);
11540 gen_helper_yield(cpu_env
);
11543 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
11544 default_exception_el(dc
));
11547 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
11550 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
11556 /* "Condition failed" instruction codepath for the branch/trap insn */
11557 gen_set_label(dc
->condlabel
);
11558 gen_set_condexec(dc
);
11559 if (unlikely(is_singlestepping(dc
))) {
11560 gen_set_pc_im(dc
, dc
->base
.pc_next
);
11561 gen_singlestep_exception(dc
);
11563 gen_goto_tb(dc
, 1, dc
->base
.pc_next
);
11568 static void arm_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
11570 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11572 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
11573 log_target_disas(cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
11576 static const TranslatorOps arm_translator_ops
= {
11577 .init_disas_context
= arm_tr_init_disas_context
,
11578 .tb_start
= arm_tr_tb_start
,
11579 .insn_start
= arm_tr_insn_start
,
11580 .breakpoint_check
= arm_tr_breakpoint_check
,
11581 .translate_insn
= arm_tr_translate_insn
,
11582 .tb_stop
= arm_tr_tb_stop
,
11583 .disas_log
= arm_tr_disas_log
,
11586 static const TranslatorOps thumb_translator_ops
= {
11587 .init_disas_context
= arm_tr_init_disas_context
,
11588 .tb_start
= arm_tr_tb_start
,
11589 .insn_start
= arm_tr_insn_start
,
11590 .breakpoint_check
= arm_tr_breakpoint_check
,
11591 .translate_insn
= thumb_tr_translate_insn
,
11592 .tb_stop
= arm_tr_tb_stop
,
11593 .disas_log
= arm_tr_disas_log
,
11596 /* generate intermediate code for basic block 'tb'. */
11597 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
11599 DisasContext dc
= { };
11600 const TranslatorOps
*ops
= &arm_translator_ops
;
11602 if (FIELD_EX32(tb
->flags
, TBFLAG_AM32
, THUMB
)) {
11603 ops
= &thumb_translator_ops
;
11605 #ifdef TARGET_AARCH64
11606 if (FIELD_EX32(tb
->flags
, TBFLAG_ANY
, AARCH64_STATE
)) {
11607 ops
= &aarch64_translator_ops
;
11611 translator_loop(ops
, &dc
.base
, cpu
, tb
, max_insns
);
11614 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
,
11615 target_ulong
*data
)
11619 env
->condexec_bits
= 0;
11620 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;
11622 env
->regs
[15] = data
[0];
11623 env
->condexec_bits
= data
[1];
11624 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;