4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
28 #include "tcg-op-gvec.h"
30 #include "qemu/bitops.h"
32 #include "hw/semihosting/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
59 #define IS_USER(s) (s->user)
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
64 static TCGv_i32 cpu_R
[16];
65 TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
66 TCGv_i64 cpu_exclusive_addr
;
67 TCGv_i64 cpu_exclusive_val
;
69 #include "exec/gen-icount.h"
71 static const char * const regnames
[] =
72 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
75 /* Function prototypes for gen_ functions calling Neon helpers. */
76 typedef void NeonGenThreeOpEnvFn(TCGv_i32
, TCGv_env
, TCGv_i32
,
78 /* Function prototypes for gen_ functions for fix point conversions */
79 typedef void VFPGenFixPointFn(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
81 /* initialize TCG globals. */
82 void arm_translate_init(void)
86 for (i
= 0; i
< 16; i
++) {
87 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
88 offsetof(CPUARMState
, regs
[i
]),
91 cpu_CF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, CF
), "CF");
92 cpu_NF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, NF
), "NF");
93 cpu_VF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, VF
), "VF");
94 cpu_ZF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, ZF
), "ZF");
96 cpu_exclusive_addr
= tcg_global_mem_new_i64(cpu_env
,
97 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
98 cpu_exclusive_val
= tcg_global_mem_new_i64(cpu_env
,
99 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
101 a64_translate_init();
104 /* Flags for the disas_set_da_iss info argument:
105 * lower bits hold the Rt register number, higher bits are flags.
107 typedef enum ISSInfo
{
110 ISSInvalid
= (1 << 5),
111 ISSIsAcqRel
= (1 << 6),
112 ISSIsWrite
= (1 << 7),
113 ISSIs16Bit
= (1 << 8),
116 /* Save the syndrome information for a Data Abort */
117 static void disas_set_da_iss(DisasContext
*s
, TCGMemOp memop
, ISSInfo issinfo
)
120 int sas
= memop
& MO_SIZE
;
121 bool sse
= memop
& MO_SIGN
;
122 bool is_acqrel
= issinfo
& ISSIsAcqRel
;
123 bool is_write
= issinfo
& ISSIsWrite
;
124 bool is_16bit
= issinfo
& ISSIs16Bit
;
125 int srt
= issinfo
& ISSRegMask
;
127 if (issinfo
& ISSInvalid
) {
128 /* Some callsites want to conditionally provide ISS info,
129 * eg "only if this was not a writeback"
135 /* For AArch32, insns where the src/dest is R15 never generate
136 * ISS information. Catching that here saves checking at all
142 syn
= syn_data_abort_with_iss(0, sas
, sse
, srt
, 0, is_acqrel
,
143 0, 0, 0, is_write
, 0, is_16bit
);
144 disas_set_insn_syndrome(s
, syn
);
147 static inline int get_a32_user_mem_index(DisasContext
*s
)
149 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
151 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
152 * otherwise, access as if at PL0.
154 switch (s
->mmu_idx
) {
155 case ARMMMUIdx_S1E2
: /* this one is UNPREDICTABLE */
156 case ARMMMUIdx_S12NSE0
:
157 case ARMMMUIdx_S12NSE1
:
158 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0
);
160 case ARMMMUIdx_S1SE0
:
161 case ARMMMUIdx_S1SE1
:
162 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0
);
163 case ARMMMUIdx_MUser
:
164 case ARMMMUIdx_MPriv
:
165 return arm_to_core_mmu_idx(ARMMMUIdx_MUser
);
166 case ARMMMUIdx_MUserNegPri
:
167 case ARMMMUIdx_MPrivNegPri
:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri
);
169 case ARMMMUIdx_MSUser
:
170 case ARMMMUIdx_MSPriv
:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser
);
172 case ARMMMUIdx_MSUserNegPri
:
173 case ARMMMUIdx_MSPrivNegPri
:
174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri
);
177 g_assert_not_reached();
181 static inline TCGv_i32
load_cpu_offset(int offset
)
183 TCGv_i32 tmp
= tcg_temp_new_i32();
184 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
188 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
190 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
192 tcg_gen_st_i32(var
, cpu_env
, offset
);
193 tcg_temp_free_i32(var
);
196 #define store_cpu_field(var, name) \
197 store_cpu_offset(var, offsetof(CPUARMState, name))
199 /* The architectural value of PC. */
200 static uint32_t read_pc(DisasContext
*s
)
202 return s
->pc_curr
+ (s
->thumb
? 4 : 8);
205 /* Set a variable to the value of a CPU register. */
206 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
209 tcg_gen_movi_i32(var
, read_pc(s
));
211 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
215 /* Create a new temporary and set it to the value of a CPU register. */
216 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
218 TCGv_i32 tmp
= tcg_temp_new_i32();
219 load_reg_var(s
, tmp
, reg
);
224 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
225 * This is used for load/store for which use of PC implies (literal),
226 * or ADD that implies ADR.
228 static TCGv_i32
add_reg_for_lit(DisasContext
*s
, int reg
, int ofs
)
230 TCGv_i32 tmp
= tcg_temp_new_i32();
233 tcg_gen_movi_i32(tmp
, (read_pc(s
) & ~3) + ofs
);
235 tcg_gen_addi_i32(tmp
, cpu_R
[reg
], ofs
);
240 /* Set a CPU register. The source must be a temporary and will be
242 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
245 /* In Thumb mode, we must ignore bit 0.
246 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
247 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
248 * We choose to ignore [1:0] in ARM mode for all architecture versions.
250 tcg_gen_andi_i32(var
, var
, s
->thumb
? ~1 : ~3);
251 s
->base
.is_jmp
= DISAS_JUMP
;
253 tcg_gen_mov_i32(cpu_R
[reg
], var
);
254 tcg_temp_free_i32(var
);
258 * Variant of store_reg which applies v8M stack-limit checks before updating
259 * SP. If the check fails this will result in an exception being taken.
260 * We disable the stack checks for CONFIG_USER_ONLY because we have
261 * no idea what the stack limits should be in that case.
262 * If stack checking is not being done this just acts like store_reg().
264 static void store_sp_checked(DisasContext
*s
, TCGv_i32 var
)
266 #ifndef CONFIG_USER_ONLY
267 if (s
->v8m_stackcheck
) {
268 gen_helper_v8m_stackcheck(cpu_env
, var
);
271 store_reg(s
, 13, var
);
274 /* Value extensions. */
275 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
276 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
277 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
278 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
280 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
281 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
284 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
286 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
287 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
288 tcg_temp_free_i32(tmp_mask
);
290 /* Set NZCV flags from the high 4 bits of var. */
291 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
293 static void gen_exception_internal(int excp
)
295 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
297 assert(excp_is_internal(excp
));
298 gen_helper_exception_internal(cpu_env
, tcg_excp
);
299 tcg_temp_free_i32(tcg_excp
);
302 static void gen_step_complete_exception(DisasContext
*s
)
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
314 gen_swstep_exception(s
, 1, s
->is_ldex
);
315 s
->base
.is_jmp
= DISAS_NORETURN
;
318 static void gen_singlestep_exception(DisasContext
*s
)
320 /* Generate the right kind of exception for singlestep, which is
321 * either the architectural singlestep or EXCP_DEBUG for QEMU's
322 * gdb singlestepping.
325 gen_step_complete_exception(s
);
327 gen_exception_internal(EXCP_DEBUG
);
331 static inline bool is_singlestepping(DisasContext
*s
)
333 /* Return true if we are singlestepping either because of
334 * architectural singlestep or QEMU gdbstub singlestep. This does
335 * not include the command line '-singlestep' mode which is rather
336 * misnamed as it only means "one instruction per TB" and doesn't
337 * affect the code we generate.
339 return s
->base
.singlestep_enabled
|| s
->ss_active
;
342 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
344 TCGv_i32 tmp1
= tcg_temp_new_i32();
345 TCGv_i32 tmp2
= tcg_temp_new_i32();
346 tcg_gen_ext16s_i32(tmp1
, a
);
347 tcg_gen_ext16s_i32(tmp2
, b
);
348 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
349 tcg_temp_free_i32(tmp2
);
350 tcg_gen_sari_i32(a
, a
, 16);
351 tcg_gen_sari_i32(b
, b
, 16);
352 tcg_gen_mul_i32(b
, b
, a
);
353 tcg_gen_mov_i32(a
, tmp1
);
354 tcg_temp_free_i32(tmp1
);
357 /* Byteswap each halfword. */
358 static void gen_rev16(TCGv_i32 var
)
360 TCGv_i32 tmp
= tcg_temp_new_i32();
361 TCGv_i32 mask
= tcg_const_i32(0x00ff00ff);
362 tcg_gen_shri_i32(tmp
, var
, 8);
363 tcg_gen_and_i32(tmp
, tmp
, mask
);
364 tcg_gen_and_i32(var
, var
, mask
);
365 tcg_gen_shli_i32(var
, var
, 8);
366 tcg_gen_or_i32(var
, var
, tmp
);
367 tcg_temp_free_i32(mask
);
368 tcg_temp_free_i32(tmp
);
371 /* Byteswap low halfword and sign extend. */
372 static void gen_revsh(TCGv_i32 var
)
374 tcg_gen_ext16u_i32(var
, var
);
375 tcg_gen_bswap16_i32(var
, var
);
376 tcg_gen_ext16s_i32(var
, var
);
379 /* Return (b << 32) + a. Mark inputs as dead */
380 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv_i32 b
)
382 TCGv_i64 tmp64
= tcg_temp_new_i64();
384 tcg_gen_extu_i32_i64(tmp64
, b
);
385 tcg_temp_free_i32(b
);
386 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
387 tcg_gen_add_i64(a
, tmp64
, a
);
389 tcg_temp_free_i64(tmp64
);
393 /* Return (b << 32) - a. Mark inputs as dead. */
394 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv_i32 b
)
396 TCGv_i64 tmp64
= tcg_temp_new_i64();
398 tcg_gen_extu_i32_i64(tmp64
, b
);
399 tcg_temp_free_i32(b
);
400 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
401 tcg_gen_sub_i64(a
, tmp64
, a
);
403 tcg_temp_free_i64(tmp64
);
407 /* 32x32->64 multiply. Marks inputs as dead. */
408 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
410 TCGv_i32 lo
= tcg_temp_new_i32();
411 TCGv_i32 hi
= tcg_temp_new_i32();
414 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
415 tcg_temp_free_i32(a
);
416 tcg_temp_free_i32(b
);
418 ret
= tcg_temp_new_i64();
419 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
420 tcg_temp_free_i32(lo
);
421 tcg_temp_free_i32(hi
);
426 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
428 TCGv_i32 lo
= tcg_temp_new_i32();
429 TCGv_i32 hi
= tcg_temp_new_i32();
432 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
433 tcg_temp_free_i32(a
);
434 tcg_temp_free_i32(b
);
436 ret
= tcg_temp_new_i64();
437 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
438 tcg_temp_free_i32(lo
);
439 tcg_temp_free_i32(hi
);
444 /* Swap low and high halfwords. */
445 static void gen_swap_half(TCGv_i32 var
)
447 TCGv_i32 tmp
= tcg_temp_new_i32();
448 tcg_gen_shri_i32(tmp
, var
, 16);
449 tcg_gen_shli_i32(var
, var
, 16);
450 tcg_gen_or_i32(var
, var
, tmp
);
451 tcg_temp_free_i32(tmp
);
454 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
455 tmp = (t0 ^ t1) & 0x8000;
458 t0 = (t0 + t1) ^ tmp;
461 static void gen_add16(TCGv_i32 t0
, TCGv_i32 t1
)
463 TCGv_i32 tmp
= tcg_temp_new_i32();
464 tcg_gen_xor_i32(tmp
, t0
, t1
);
465 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
466 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
467 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
468 tcg_gen_add_i32(t0
, t0
, t1
);
469 tcg_gen_xor_i32(t0
, t0
, tmp
);
470 tcg_temp_free_i32(tmp
);
471 tcg_temp_free_i32(t1
);
474 /* Set CF to the top bit of var. */
475 static void gen_set_CF_bit31(TCGv_i32 var
)
477 tcg_gen_shri_i32(cpu_CF
, var
, 31);
480 /* Set N and Z flags from var. */
481 static inline void gen_logic_CC(TCGv_i32 var
)
483 tcg_gen_mov_i32(cpu_NF
, var
);
484 tcg_gen_mov_i32(cpu_ZF
, var
);
488 static void gen_adc(TCGv_i32 t0
, TCGv_i32 t1
)
490 tcg_gen_add_i32(t0
, t0
, t1
);
491 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
494 /* dest = T0 + T1 + CF. */
495 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
497 tcg_gen_add_i32(dest
, t0
, t1
);
498 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
501 /* dest = T0 - T1 + CF - 1. */
502 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
504 tcg_gen_sub_i32(dest
, t0
, t1
);
505 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
506 tcg_gen_subi_i32(dest
, dest
, 1);
509 /* dest = T0 + T1. Compute C, N, V and Z flags */
510 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
512 TCGv_i32 tmp
= tcg_temp_new_i32();
513 tcg_gen_movi_i32(tmp
, 0);
514 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
515 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
516 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
517 tcg_gen_xor_i32(tmp
, t0
, t1
);
518 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
519 tcg_temp_free_i32(tmp
);
520 tcg_gen_mov_i32(dest
, cpu_NF
);
523 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
524 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
526 TCGv_i32 tmp
= tcg_temp_new_i32();
527 if (TCG_TARGET_HAS_add2_i32
) {
528 tcg_gen_movi_i32(tmp
, 0);
529 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
530 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
532 TCGv_i64 q0
= tcg_temp_new_i64();
533 TCGv_i64 q1
= tcg_temp_new_i64();
534 tcg_gen_extu_i32_i64(q0
, t0
);
535 tcg_gen_extu_i32_i64(q1
, t1
);
536 tcg_gen_add_i64(q0
, q0
, q1
);
537 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
538 tcg_gen_add_i64(q0
, q0
, q1
);
539 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
540 tcg_temp_free_i64(q0
);
541 tcg_temp_free_i64(q1
);
543 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
544 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
545 tcg_gen_xor_i32(tmp
, t0
, t1
);
546 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
547 tcg_temp_free_i32(tmp
);
548 tcg_gen_mov_i32(dest
, cpu_NF
);
551 /* dest = T0 - T1. Compute C, N, V and Z flags */
552 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
555 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
556 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
557 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
558 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
559 tmp
= tcg_temp_new_i32();
560 tcg_gen_xor_i32(tmp
, t0
, t1
);
561 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
562 tcg_temp_free_i32(tmp
);
563 tcg_gen_mov_i32(dest
, cpu_NF
);
566 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
567 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
569 TCGv_i32 tmp
= tcg_temp_new_i32();
570 tcg_gen_not_i32(tmp
, t1
);
571 gen_adc_CC(dest
, t0
, tmp
);
572 tcg_temp_free_i32(tmp
);
575 #define GEN_SHIFT(name) \
576 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
578 TCGv_i32 tmp1, tmp2, tmp3; \
579 tmp1 = tcg_temp_new_i32(); \
580 tcg_gen_andi_i32(tmp1, t1, 0xff); \
581 tmp2 = tcg_const_i32(0); \
582 tmp3 = tcg_const_i32(0x1f); \
583 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
584 tcg_temp_free_i32(tmp3); \
585 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
586 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
587 tcg_temp_free_i32(tmp2); \
588 tcg_temp_free_i32(tmp1); \
594 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
597 tmp1
= tcg_temp_new_i32();
598 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
599 tmp2
= tcg_const_i32(0x1f);
600 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
601 tcg_temp_free_i32(tmp2
);
602 tcg_gen_sar_i32(dest
, t0
, tmp1
);
603 tcg_temp_free_i32(tmp1
);
606 static void shifter_out_im(TCGv_i32 var
, int shift
)
608 tcg_gen_extract_i32(cpu_CF
, var
, shift
, 1);
611 /* Shift by immediate. Includes special handling for shift == 0. */
612 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
613 int shift
, int flags
)
619 shifter_out_im(var
, 32 - shift
);
620 tcg_gen_shli_i32(var
, var
, shift
);
626 tcg_gen_shri_i32(cpu_CF
, var
, 31);
628 tcg_gen_movi_i32(var
, 0);
631 shifter_out_im(var
, shift
- 1);
632 tcg_gen_shri_i32(var
, var
, shift
);
639 shifter_out_im(var
, shift
- 1);
642 tcg_gen_sari_i32(var
, var
, shift
);
644 case 3: /* ROR/RRX */
647 shifter_out_im(var
, shift
- 1);
648 tcg_gen_rotri_i32(var
, var
, shift
); break;
650 TCGv_i32 tmp
= tcg_temp_new_i32();
651 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
653 shifter_out_im(var
, 0);
654 tcg_gen_shri_i32(var
, var
, 1);
655 tcg_gen_or_i32(var
, var
, tmp
);
656 tcg_temp_free_i32(tmp
);
661 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
662 TCGv_i32 shift
, int flags
)
666 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
667 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
668 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
669 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
674 gen_shl(var
, var
, shift
);
677 gen_shr(var
, var
, shift
);
680 gen_sar(var
, var
, shift
);
682 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
683 tcg_gen_rotr_i32(var
, var
, shift
); break;
686 tcg_temp_free_i32(shift
);
689 #define PAS_OP(pfx) \
691 case 0: gen_pas_helper(glue(pfx,add16)); break; \
692 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
693 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
694 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
695 case 4: gen_pas_helper(glue(pfx,add8)); break; \
696 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
698 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
703 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
705 tmp
= tcg_temp_new_ptr();
706 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
708 tcg_temp_free_ptr(tmp
);
711 tmp
= tcg_temp_new_ptr();
712 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
714 tcg_temp_free_ptr(tmp
);
716 #undef gen_pas_helper
717 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
730 #undef gen_pas_helper
735 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
736 #define PAS_OP(pfx) \
738 case 0: gen_pas_helper(glue(pfx,add8)); break; \
739 case 1: gen_pas_helper(glue(pfx,add16)); break; \
740 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
741 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
742 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
743 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
745 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
750 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
752 tmp
= tcg_temp_new_ptr();
753 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
755 tcg_temp_free_ptr(tmp
);
758 tmp
= tcg_temp_new_ptr();
759 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
761 tcg_temp_free_ptr(tmp
);
763 #undef gen_pas_helper
764 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
777 #undef gen_pas_helper
783 * Generate a conditional based on ARM condition code cc.
784 * This is common between ARM and Aarch64 targets.
786 void arm_test_cc(DisasCompare
*cmp
, int cc
)
817 case 8: /* hi: C && !Z */
818 case 9: /* ls: !C || Z -> !(C && !Z) */
820 value
= tcg_temp_new_i32();
822 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
823 ZF is non-zero for !Z; so AND the two subexpressions. */
824 tcg_gen_neg_i32(value
, cpu_CF
);
825 tcg_gen_and_i32(value
, value
, cpu_ZF
);
828 case 10: /* ge: N == V -> N ^ V == 0 */
829 case 11: /* lt: N != V -> N ^ V != 0 */
830 /* Since we're only interested in the sign bit, == 0 is >= 0. */
832 value
= tcg_temp_new_i32();
834 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
837 case 12: /* gt: !Z && N == V */
838 case 13: /* le: Z || N != V */
840 value
= tcg_temp_new_i32();
842 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
843 * the sign bit then AND with ZF to yield the result. */
844 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
845 tcg_gen_sari_i32(value
, value
, 31);
846 tcg_gen_andc_i32(value
, cpu_ZF
, value
);
849 case 14: /* always */
850 case 15: /* always */
851 /* Use the ALWAYS condition, which will fold early.
852 * It doesn't matter what we use for the value. */
853 cond
= TCG_COND_ALWAYS
;
858 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
863 cond
= tcg_invert_cond(cond
);
869 cmp
->value_global
= global
;
872 void arm_free_cc(DisasCompare
*cmp
)
874 if (!cmp
->value_global
) {
875 tcg_temp_free_i32(cmp
->value
);
879 void arm_jump_cc(DisasCompare
*cmp
, TCGLabel
*label
)
881 tcg_gen_brcondi_i32(cmp
->cond
, cmp
->value
, 0, label
);
884 void arm_gen_test_cc(int cc
, TCGLabel
*label
)
887 arm_test_cc(&cmp
, cc
);
888 arm_jump_cc(&cmp
, label
);
892 static const uint8_t table_logic_cc
[16] = {
911 static inline void gen_set_condexec(DisasContext
*s
)
913 if (s
->condexec_mask
) {
914 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
915 TCGv_i32 tmp
= tcg_temp_new_i32();
916 tcg_gen_movi_i32(tmp
, val
);
917 store_cpu_field(tmp
, condexec_bits
);
921 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
923 tcg_gen_movi_i32(cpu_R
[15], val
);
926 /* Set PC and Thumb state from an immediate address. */
927 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
931 s
->base
.is_jmp
= DISAS_JUMP
;
932 if (s
->thumb
!= (addr
& 1)) {
933 tmp
= tcg_temp_new_i32();
934 tcg_gen_movi_i32(tmp
, addr
& 1);
935 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
936 tcg_temp_free_i32(tmp
);
938 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
941 /* Set PC and Thumb state from var. var is marked as dead. */
942 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
944 s
->base
.is_jmp
= DISAS_JUMP
;
945 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
946 tcg_gen_andi_i32(var
, var
, 1);
947 store_cpu_field(var
, thumb
);
950 /* Set PC and Thumb state from var. var is marked as dead.
951 * For M-profile CPUs, include logic to detect exception-return
952 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
953 * and BX reg, and no others, and happens only for code in Handler mode.
955 static inline void gen_bx_excret(DisasContext
*s
, TCGv_i32 var
)
957 /* Generate the same code here as for a simple bx, but flag via
958 * s->base.is_jmp that we need to do the rest of the work later.
961 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
) ||
962 (s
->v7m_handler_mode
&& arm_dc_feature(s
, ARM_FEATURE_M
))) {
963 s
->base
.is_jmp
= DISAS_BX_EXCRET
;
967 static inline void gen_bx_excret_final_code(DisasContext
*s
)
969 /* Generate the code to finish possible exception return and end the TB */
970 TCGLabel
*excret_label
= gen_new_label();
973 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
)) {
974 /* Covers FNC_RETURN and EXC_RETURN magic */
975 min_magic
= FNC_RETURN_MIN_MAGIC
;
977 /* EXC_RETURN magic only */
978 min_magic
= EXC_RETURN_MIN_MAGIC
;
981 /* Is the new PC value in the magic range indicating exception return? */
982 tcg_gen_brcondi_i32(TCG_COND_GEU
, cpu_R
[15], min_magic
, excret_label
);
983 /* No: end the TB as we would for a DISAS_JMP */
984 if (is_singlestepping(s
)) {
985 gen_singlestep_exception(s
);
987 tcg_gen_exit_tb(NULL
, 0);
989 gen_set_label(excret_label
);
990 /* Yes: this is an exception return.
991 * At this point in runtime env->regs[15] and env->thumb will hold
992 * the exception-return magic number, which do_v7m_exception_exit()
993 * will read. Nothing else will be able to see those values because
994 * the cpu-exec main loop guarantees that we will always go straight
995 * from raising the exception to the exception-handling code.
997 * gen_ss_advance(s) does nothing on M profile currently but
998 * calling it is conceptually the right thing as we have executed
999 * this instruction (compare SWI, HVC, SMC handling).
1002 gen_exception_internal(EXCP_EXCEPTION_EXIT
);
1005 static inline void gen_bxns(DisasContext
*s
, int rm
)
1007 TCGv_i32 var
= load_reg(s
, rm
);
1009 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1010 * we need to sync state before calling it, but:
1011 * - we don't need to do gen_set_pc_im() because the bxns helper will
1012 * always set the PC itself
1013 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1014 * unless it's outside an IT block or the last insn in an IT block,
1015 * so we know that condexec == 0 (already set at the top of the TB)
1016 * is correct in the non-UNPREDICTABLE cases, and we can choose
1017 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1019 gen_helper_v7m_bxns(cpu_env
, var
);
1020 tcg_temp_free_i32(var
);
1021 s
->base
.is_jmp
= DISAS_EXIT
;
1024 static inline void gen_blxns(DisasContext
*s
, int rm
)
1026 TCGv_i32 var
= load_reg(s
, rm
);
1028 /* We don't need to sync condexec state, for the same reason as bxns.
1029 * We do however need to set the PC, because the blxns helper reads it.
1030 * The blxns helper may throw an exception.
1032 gen_set_pc_im(s
, s
->base
.pc_next
);
1033 gen_helper_v7m_blxns(cpu_env
, var
);
1034 tcg_temp_free_i32(var
);
1035 s
->base
.is_jmp
= DISAS_EXIT
;
1038 /* Variant of store_reg which uses branch&exchange logic when storing
1039 to r15 in ARM architecture v7 and above. The source must be a temporary
1040 and will be marked as dead. */
1041 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv_i32 var
)
1043 if (reg
== 15 && ENABLE_ARCH_7
) {
1046 store_reg(s
, reg
, var
);
1050 /* Variant of store_reg which uses branch&exchange logic when storing
1051 * to r15 in ARM architecture v5T and above. This is used for storing
1052 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1053 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1054 static inline void store_reg_from_load(DisasContext
*s
, int reg
, TCGv_i32 var
)
1056 if (reg
== 15 && ENABLE_ARCH_5
) {
1057 gen_bx_excret(s
, var
);
1059 store_reg(s
, reg
, var
);
1063 #ifdef CONFIG_USER_ONLY
1064 #define IS_USER_ONLY 1
1066 #define IS_USER_ONLY 0
1069 /* Abstractions of "generate code to do a guest load/store for
1070 * AArch32", where a vaddr is always 32 bits (and is zero
1071 * extended if we're a 64 bit core) and data is also
1072 * 32 bits unless specifically doing a 64 bit access.
1073 * These functions work like tcg_gen_qemu_{ld,st}* except
1074 * that the address argument is TCGv_i32 rather than TCGv.
1077 static inline TCGv
gen_aa32_addr(DisasContext
*s
, TCGv_i32 a32
, TCGMemOp op
)
1079 TCGv addr
= tcg_temp_new();
1080 tcg_gen_extu_i32_tl(addr
, a32
);
1082 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1083 if (!IS_USER_ONLY
&& s
->sctlr_b
&& (op
& MO_SIZE
) < MO_32
) {
1084 tcg_gen_xori_tl(addr
, addr
, 4 - (1 << (op
& MO_SIZE
)));
1089 static void gen_aa32_ld_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
1090 int index
, TCGMemOp opc
)
1094 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
1095 !arm_dc_feature(s
, ARM_FEATURE_M_MAIN
)) {
1099 addr
= gen_aa32_addr(s
, a32
, opc
);
1100 tcg_gen_qemu_ld_i32(val
, addr
, index
, opc
);
1101 tcg_temp_free(addr
);
1104 static void gen_aa32_st_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
1105 int index
, TCGMemOp opc
)
1109 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
1110 !arm_dc_feature(s
, ARM_FEATURE_M_MAIN
)) {
1114 addr
= gen_aa32_addr(s
, a32
, opc
);
1115 tcg_gen_qemu_st_i32(val
, addr
, index
, opc
);
1116 tcg_temp_free(addr
);
1119 #define DO_GEN_LD(SUFF, OPC) \
1120 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1121 TCGv_i32 a32, int index) \
1123 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1125 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1127 TCGv_i32 a32, int index, \
1130 gen_aa32_ld##SUFF(s, val, a32, index); \
1131 disas_set_da_iss(s, OPC, issinfo); \
1134 #define DO_GEN_ST(SUFF, OPC) \
1135 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1136 TCGv_i32 a32, int index) \
1138 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1140 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1142 TCGv_i32 a32, int index, \
1145 gen_aa32_st##SUFF(s, val, a32, index); \
1146 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1149 static inline void gen_aa32_frob64(DisasContext
*s
, TCGv_i64 val
)
1151 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1152 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1153 tcg_gen_rotri_i64(val
, val
, 32);
1157 static void gen_aa32_ld_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
1158 int index
, TCGMemOp opc
)
1160 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1161 tcg_gen_qemu_ld_i64(val
, addr
, index
, opc
);
1162 gen_aa32_frob64(s
, val
);
1163 tcg_temp_free(addr
);
1166 static inline void gen_aa32_ld64(DisasContext
*s
, TCGv_i64 val
,
1167 TCGv_i32 a32
, int index
)
1169 gen_aa32_ld_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1172 static void gen_aa32_st_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
1173 int index
, TCGMemOp opc
)
1175 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1177 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1178 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1179 TCGv_i64 tmp
= tcg_temp_new_i64();
1180 tcg_gen_rotri_i64(tmp
, val
, 32);
1181 tcg_gen_qemu_st_i64(tmp
, addr
, index
, opc
);
1182 tcg_temp_free_i64(tmp
);
1184 tcg_gen_qemu_st_i64(val
, addr
, index
, opc
);
1186 tcg_temp_free(addr
);
1189 static inline void gen_aa32_st64(DisasContext
*s
, TCGv_i64 val
,
1190 TCGv_i32 a32
, int index
)
1192 gen_aa32_st_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1195 DO_GEN_LD(8s
, MO_SB
)
1196 DO_GEN_LD(8u, MO_UB
)
1197 DO_GEN_LD(16s
, MO_SW
)
1198 DO_GEN_LD(16u, MO_UW
)
1199 DO_GEN_LD(32u, MO_UL
)
1201 DO_GEN_ST(16, MO_UW
)
1202 DO_GEN_ST(32, MO_UL
)
1204 static inline void gen_hvc(DisasContext
*s
, int imm16
)
1206 /* The pre HVC helper handles cases when HVC gets trapped
1207 * as an undefined insn by runtime configuration (ie before
1208 * the insn really executes).
1210 gen_set_pc_im(s
, s
->pc_curr
);
1211 gen_helper_pre_hvc(cpu_env
);
1212 /* Otherwise we will treat this as a real exception which
1213 * happens after execution of the insn. (The distinction matters
1214 * for the PC value reported to the exception handler and also
1215 * for single stepping.)
1218 gen_set_pc_im(s
, s
->base
.pc_next
);
1219 s
->base
.is_jmp
= DISAS_HVC
;
1222 static inline void gen_smc(DisasContext
*s
)
1224 /* As with HVC, we may take an exception either before or after
1225 * the insn executes.
1229 gen_set_pc_im(s
, s
->pc_curr
);
1230 tmp
= tcg_const_i32(syn_aa32_smc());
1231 gen_helper_pre_smc(cpu_env
, tmp
);
1232 tcg_temp_free_i32(tmp
);
1233 gen_set_pc_im(s
, s
->base
.pc_next
);
1234 s
->base
.is_jmp
= DISAS_SMC
;
1237 static void gen_exception_internal_insn(DisasContext
*s
, uint32_t pc
, int excp
)
1239 gen_set_condexec(s
);
1240 gen_set_pc_im(s
, pc
);
1241 gen_exception_internal(excp
);
1242 s
->base
.is_jmp
= DISAS_NORETURN
;
1245 static void gen_exception_insn(DisasContext
*s
, uint32_t pc
, int excp
,
1246 int syn
, uint32_t target_el
)
1248 gen_set_condexec(s
);
1249 gen_set_pc_im(s
, pc
);
1250 gen_exception(excp
, syn
, target_el
);
1251 s
->base
.is_jmp
= DISAS_NORETURN
;
1254 static void gen_exception_bkpt_insn(DisasContext
*s
, uint32_t syn
)
1258 gen_set_condexec(s
);
1259 gen_set_pc_im(s
, s
->pc_curr
);
1260 tcg_syn
= tcg_const_i32(syn
);
1261 gen_helper_exception_bkpt_insn(cpu_env
, tcg_syn
);
1262 tcg_temp_free_i32(tcg_syn
);
1263 s
->base
.is_jmp
= DISAS_NORETURN
;
1266 void unallocated_encoding(DisasContext
*s
)
1268 /* Unallocated and reserved encodings are uncategorized */
1269 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
, syn_uncategorized(),
1270 default_exception_el(s
));
1273 /* Force a TB lookup after an instruction that changes the CPU state. */
1274 static inline void gen_lookup_tb(DisasContext
*s
)
1276 tcg_gen_movi_i32(cpu_R
[15], s
->base
.pc_next
);
1277 s
->base
.is_jmp
= DISAS_EXIT
;
1280 static inline void gen_hlt(DisasContext
*s
, int imm
)
1282 /* HLT. This has two purposes.
1283 * Architecturally, it is an external halting debug instruction.
1284 * Since QEMU doesn't implement external debug, we treat this as
1285 * it is required for halting debug disabled: it will UNDEF.
1286 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1287 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1288 * must trigger semihosting even for ARMv7 and earlier, where
1289 * HLT was an undefined encoding.
1290 * In system mode, we don't allow userspace access to
1291 * semihosting, to provide some semblance of security
1292 * (and for consistency with our 32-bit semihosting).
1294 if (semihosting_enabled() &&
1295 #ifndef CONFIG_USER_ONLY
1296 s
->current_el
!= 0 &&
1298 (imm
== (s
->thumb
? 0x3c : 0xf000))) {
1299 gen_exception_internal_insn(s
, s
->base
.pc_next
, EXCP_SEMIHOST
);
1303 unallocated_encoding(s
);
1306 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
1309 int val
, rm
, shift
, shiftop
;
1312 if (!(insn
& (1 << 25))) {
1315 if (!(insn
& (1 << 23)))
1318 tcg_gen_addi_i32(var
, var
, val
);
1320 /* shift/register */
1322 shift
= (insn
>> 7) & 0x1f;
1323 shiftop
= (insn
>> 5) & 3;
1324 offset
= load_reg(s
, rm
);
1325 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
1326 if (!(insn
& (1 << 23)))
1327 tcg_gen_sub_i32(var
, var
, offset
);
1329 tcg_gen_add_i32(var
, var
, offset
);
1330 tcg_temp_free_i32(offset
);
1334 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
1335 int extra
, TCGv_i32 var
)
1340 if (insn
& (1 << 22)) {
1342 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
1343 if (!(insn
& (1 << 23)))
1347 tcg_gen_addi_i32(var
, var
, val
);
1351 tcg_gen_addi_i32(var
, var
, extra
);
1353 offset
= load_reg(s
, rm
);
1354 if (!(insn
& (1 << 23)))
1355 tcg_gen_sub_i32(var
, var
, offset
);
1357 tcg_gen_add_i32(var
, var
, offset
);
1358 tcg_temp_free_i32(offset
);
1362 static TCGv_ptr
get_fpstatus_ptr(int neon
)
1364 TCGv_ptr statusptr
= tcg_temp_new_ptr();
1367 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
1369 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
1371 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
1375 static inline long vfp_reg_offset(bool dp
, unsigned reg
)
1378 return offsetof(CPUARMState
, vfp
.zregs
[reg
>> 1].d
[reg
& 1]);
1380 long ofs
= offsetof(CPUARMState
, vfp
.zregs
[reg
>> 2].d
[(reg
>> 1) & 1]);
1382 ofs
+= offsetof(CPU_DoubleU
, l
.upper
);
1384 ofs
+= offsetof(CPU_DoubleU
, l
.lower
);
1390 /* Return the offset of a 32-bit piece of a NEON register.
1391 zero is the least significant end of the register. */
1393 neon_reg_offset (int reg
, int n
)
1397 return vfp_reg_offset(0, sreg
);
1400 /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1401 * where 0 is the least significant end of the register.
1404 neon_element_offset(int reg
, int element
, TCGMemOp size
)
1406 int element_size
= 1 << size
;
1407 int ofs
= element
* element_size
;
1408 #ifdef HOST_WORDS_BIGENDIAN
1409 /* Calculate the offset assuming fully little-endian,
1410 * then XOR to account for the order of the 8-byte units.
1412 if (element_size
< 8) {
1413 ofs
^= 8 - element_size
;
1416 return neon_reg_offset(reg
, 0) + ofs
;
1419 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1421 TCGv_i32 tmp
= tcg_temp_new_i32();
1422 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1426 static void neon_load_element(TCGv_i32 var
, int reg
, int ele
, TCGMemOp mop
)
1428 long offset
= neon_element_offset(reg
, ele
, mop
& MO_SIZE
);
1432 tcg_gen_ld8u_i32(var
, cpu_env
, offset
);
1435 tcg_gen_ld16u_i32(var
, cpu_env
, offset
);
1438 tcg_gen_ld_i32(var
, cpu_env
, offset
);
1441 g_assert_not_reached();
1445 static void neon_load_element64(TCGv_i64 var
, int reg
, int ele
, TCGMemOp mop
)
1447 long offset
= neon_element_offset(reg
, ele
, mop
& MO_SIZE
);
1451 tcg_gen_ld8u_i64(var
, cpu_env
, offset
);
1454 tcg_gen_ld16u_i64(var
, cpu_env
, offset
);
1457 tcg_gen_ld32u_i64(var
, cpu_env
, offset
);
1460 tcg_gen_ld_i64(var
, cpu_env
, offset
);
1463 g_assert_not_reached();
1467 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1469 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1470 tcg_temp_free_i32(var
);
1473 static void neon_store_element(int reg
, int ele
, TCGMemOp size
, TCGv_i32 var
)
1475 long offset
= neon_element_offset(reg
, ele
, size
);
1479 tcg_gen_st8_i32(var
, cpu_env
, offset
);
1482 tcg_gen_st16_i32(var
, cpu_env
, offset
);
1485 tcg_gen_st_i32(var
, cpu_env
, offset
);
1488 g_assert_not_reached();
1492 static void neon_store_element64(int reg
, int ele
, TCGMemOp size
, TCGv_i64 var
)
1494 long offset
= neon_element_offset(reg
, ele
, size
);
1498 tcg_gen_st8_i64(var
, cpu_env
, offset
);
1501 tcg_gen_st16_i64(var
, cpu_env
, offset
);
1504 tcg_gen_st32_i64(var
, cpu_env
, offset
);
1507 tcg_gen_st_i64(var
, cpu_env
, offset
);
1510 g_assert_not_reached();
1514 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1516 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1519 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1521 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1524 static inline void neon_load_reg32(TCGv_i32 var
, int reg
)
1526 tcg_gen_ld_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
1529 static inline void neon_store_reg32(TCGv_i32 var
, int reg
)
1531 tcg_gen_st_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
1534 static TCGv_ptr
vfp_reg_ptr(bool dp
, int reg
)
1536 TCGv_ptr ret
= tcg_temp_new_ptr();
1537 tcg_gen_addi_ptr(ret
, cpu_env
, vfp_reg_offset(dp
, reg
));
1541 #define ARM_CP_RW_BIT (1 << 20)
1543 /* Include the VFP decoder */
1544 #include "translate-vfp.inc.c"
1546 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1548 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1551 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1553 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1556 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1558 TCGv_i32 var
= tcg_temp_new_i32();
1559 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1563 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1565 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1566 tcg_temp_free_i32(var
);
1569 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1571 iwmmxt_store_reg(cpu_M0
, rn
);
1574 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1576 iwmmxt_load_reg(cpu_M0
, rn
);
1579 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1581 iwmmxt_load_reg(cpu_V1
, rn
);
1582 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1585 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1587 iwmmxt_load_reg(cpu_V1
, rn
);
1588 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1591 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1593 iwmmxt_load_reg(cpu_V1
, rn
);
1594 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1597 #define IWMMXT_OP(name) \
1598 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1600 iwmmxt_load_reg(cpu_V1, rn); \
1601 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1604 #define IWMMXT_OP_ENV(name) \
1605 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1607 iwmmxt_load_reg(cpu_V1, rn); \
1608 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1611 #define IWMMXT_OP_ENV_SIZE(name) \
1612 IWMMXT_OP_ENV(name##b) \
1613 IWMMXT_OP_ENV(name##w) \
1614 IWMMXT_OP_ENV(name##l)
1616 #define IWMMXT_OP_ENV1(name) \
1617 static inline void gen_op_iwmmxt_##name##_M0(void) \
1619 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1633 IWMMXT_OP_ENV_SIZE(unpackl
)
1634 IWMMXT_OP_ENV_SIZE(unpackh
)
1636 IWMMXT_OP_ENV1(unpacklub
)
1637 IWMMXT_OP_ENV1(unpackluw
)
1638 IWMMXT_OP_ENV1(unpacklul
)
1639 IWMMXT_OP_ENV1(unpackhub
)
1640 IWMMXT_OP_ENV1(unpackhuw
)
1641 IWMMXT_OP_ENV1(unpackhul
)
1642 IWMMXT_OP_ENV1(unpacklsb
)
1643 IWMMXT_OP_ENV1(unpacklsw
)
1644 IWMMXT_OP_ENV1(unpacklsl
)
1645 IWMMXT_OP_ENV1(unpackhsb
)
1646 IWMMXT_OP_ENV1(unpackhsw
)
1647 IWMMXT_OP_ENV1(unpackhsl
)
1649 IWMMXT_OP_ENV_SIZE(cmpeq
)
1650 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1651 IWMMXT_OP_ENV_SIZE(cmpgts
)
1653 IWMMXT_OP_ENV_SIZE(mins
)
1654 IWMMXT_OP_ENV_SIZE(minu
)
1655 IWMMXT_OP_ENV_SIZE(maxs
)
1656 IWMMXT_OP_ENV_SIZE(maxu
)
1658 IWMMXT_OP_ENV_SIZE(subn
)
1659 IWMMXT_OP_ENV_SIZE(addn
)
1660 IWMMXT_OP_ENV_SIZE(subu
)
1661 IWMMXT_OP_ENV_SIZE(addu
)
1662 IWMMXT_OP_ENV_SIZE(subs
)
1663 IWMMXT_OP_ENV_SIZE(adds
)
1665 IWMMXT_OP_ENV(avgb0
)
1666 IWMMXT_OP_ENV(avgb1
)
1667 IWMMXT_OP_ENV(avgw0
)
1668 IWMMXT_OP_ENV(avgw1
)
1670 IWMMXT_OP_ENV(packuw
)
1671 IWMMXT_OP_ENV(packul
)
1672 IWMMXT_OP_ENV(packuq
)
1673 IWMMXT_OP_ENV(packsw
)
1674 IWMMXT_OP_ENV(packsl
)
1675 IWMMXT_OP_ENV(packsq
)
1677 static void gen_op_iwmmxt_set_mup(void)
1680 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1681 tcg_gen_ori_i32(tmp
, tmp
, 2);
1682 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1685 static void gen_op_iwmmxt_set_cup(void)
1688 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1689 tcg_gen_ori_i32(tmp
, tmp
, 1);
1690 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1693 static void gen_op_iwmmxt_setpsr_nz(void)
1695 TCGv_i32 tmp
= tcg_temp_new_i32();
1696 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1697 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1700 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1702 iwmmxt_load_reg(cpu_V1
, rn
);
1703 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1704 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1707 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1714 rd
= (insn
>> 16) & 0xf;
1715 tmp
= load_reg(s
, rd
);
1717 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1718 if (insn
& (1 << 24)) {
1720 if (insn
& (1 << 23))
1721 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1723 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1724 tcg_gen_mov_i32(dest
, tmp
);
1725 if (insn
& (1 << 21))
1726 store_reg(s
, rd
, tmp
);
1728 tcg_temp_free_i32(tmp
);
1729 } else if (insn
& (1 << 21)) {
1731 tcg_gen_mov_i32(dest
, tmp
);
1732 if (insn
& (1 << 23))
1733 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1735 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1736 store_reg(s
, rd
, tmp
);
1737 } else if (!(insn
& (1 << 23)))
1742 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1744 int rd
= (insn
>> 0) & 0xf;
1747 if (insn
& (1 << 8)) {
1748 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1751 tmp
= iwmmxt_load_creg(rd
);
1754 tmp
= tcg_temp_new_i32();
1755 iwmmxt_load_reg(cpu_V0
, rd
);
1756 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
1758 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1759 tcg_gen_mov_i32(dest
, tmp
);
1760 tcg_temp_free_i32(tmp
);
1764 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1765 (ie. an undefined instruction). */
1766 static int disas_iwmmxt_insn(DisasContext
*s
, uint32_t insn
)
1769 int rdhi
, rdlo
, rd0
, rd1
, i
;
1771 TCGv_i32 tmp
, tmp2
, tmp3
;
1773 if ((insn
& 0x0e000e00) == 0x0c000000) {
1774 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1776 rdlo
= (insn
>> 12) & 0xf;
1777 rdhi
= (insn
>> 16) & 0xf;
1778 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1779 iwmmxt_load_reg(cpu_V0
, wrd
);
1780 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1781 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1782 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1783 } else { /* TMCRR */
1784 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1785 iwmmxt_store_reg(cpu_V0
, wrd
);
1786 gen_op_iwmmxt_set_mup();
1791 wrd
= (insn
>> 12) & 0xf;
1792 addr
= tcg_temp_new_i32();
1793 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1794 tcg_temp_free_i32(addr
);
1797 if (insn
& ARM_CP_RW_BIT
) {
1798 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1799 tmp
= tcg_temp_new_i32();
1800 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1801 iwmmxt_store_creg(wrd
, tmp
);
1804 if (insn
& (1 << 8)) {
1805 if (insn
& (1 << 22)) { /* WLDRD */
1806 gen_aa32_ld64(s
, cpu_M0
, addr
, get_mem_index(s
));
1808 } else { /* WLDRW wRd */
1809 tmp
= tcg_temp_new_i32();
1810 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1813 tmp
= tcg_temp_new_i32();
1814 if (insn
& (1 << 22)) { /* WLDRH */
1815 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
1816 } else { /* WLDRB */
1817 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
1821 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1822 tcg_temp_free_i32(tmp
);
1824 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1827 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1828 tmp
= iwmmxt_load_creg(wrd
);
1829 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1831 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1832 tmp
= tcg_temp_new_i32();
1833 if (insn
& (1 << 8)) {
1834 if (insn
& (1 << 22)) { /* WSTRD */
1835 gen_aa32_st64(s
, cpu_M0
, addr
, get_mem_index(s
));
1836 } else { /* WSTRW wRd */
1837 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1838 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1841 if (insn
& (1 << 22)) { /* WSTRH */
1842 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1843 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
1844 } else { /* WSTRB */
1845 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1846 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
1850 tcg_temp_free_i32(tmp
);
1852 tcg_temp_free_i32(addr
);
1856 if ((insn
& 0x0f000000) != 0x0e000000)
1859 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1860 case 0x000: /* WOR */
1861 wrd
= (insn
>> 12) & 0xf;
1862 rd0
= (insn
>> 0) & 0xf;
1863 rd1
= (insn
>> 16) & 0xf;
1864 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1865 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1866 gen_op_iwmmxt_setpsr_nz();
1867 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1868 gen_op_iwmmxt_set_mup();
1869 gen_op_iwmmxt_set_cup();
1871 case 0x011: /* TMCR */
1874 rd
= (insn
>> 12) & 0xf;
1875 wrd
= (insn
>> 16) & 0xf;
1877 case ARM_IWMMXT_wCID
:
1878 case ARM_IWMMXT_wCASF
:
1880 case ARM_IWMMXT_wCon
:
1881 gen_op_iwmmxt_set_cup();
1883 case ARM_IWMMXT_wCSSF
:
1884 tmp
= iwmmxt_load_creg(wrd
);
1885 tmp2
= load_reg(s
, rd
);
1886 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1887 tcg_temp_free_i32(tmp2
);
1888 iwmmxt_store_creg(wrd
, tmp
);
1890 case ARM_IWMMXT_wCGR0
:
1891 case ARM_IWMMXT_wCGR1
:
1892 case ARM_IWMMXT_wCGR2
:
1893 case ARM_IWMMXT_wCGR3
:
1894 gen_op_iwmmxt_set_cup();
1895 tmp
= load_reg(s
, rd
);
1896 iwmmxt_store_creg(wrd
, tmp
);
1902 case 0x100: /* WXOR */
1903 wrd
= (insn
>> 12) & 0xf;
1904 rd0
= (insn
>> 0) & 0xf;
1905 rd1
= (insn
>> 16) & 0xf;
1906 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1907 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1908 gen_op_iwmmxt_setpsr_nz();
1909 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1910 gen_op_iwmmxt_set_mup();
1911 gen_op_iwmmxt_set_cup();
1913 case 0x111: /* TMRC */
1916 rd
= (insn
>> 12) & 0xf;
1917 wrd
= (insn
>> 16) & 0xf;
1918 tmp
= iwmmxt_load_creg(wrd
);
1919 store_reg(s
, rd
, tmp
);
1921 case 0x300: /* WANDN */
1922 wrd
= (insn
>> 12) & 0xf;
1923 rd0
= (insn
>> 0) & 0xf;
1924 rd1
= (insn
>> 16) & 0xf;
1925 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1926 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1927 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1928 gen_op_iwmmxt_setpsr_nz();
1929 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1930 gen_op_iwmmxt_set_mup();
1931 gen_op_iwmmxt_set_cup();
1933 case 0x200: /* WAND */
1934 wrd
= (insn
>> 12) & 0xf;
1935 rd0
= (insn
>> 0) & 0xf;
1936 rd1
= (insn
>> 16) & 0xf;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1938 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1939 gen_op_iwmmxt_setpsr_nz();
1940 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1941 gen_op_iwmmxt_set_mup();
1942 gen_op_iwmmxt_set_cup();
1944 case 0x810: case 0xa10: /* WMADD */
1945 wrd
= (insn
>> 12) & 0xf;
1946 rd0
= (insn
>> 0) & 0xf;
1947 rd1
= (insn
>> 16) & 0xf;
1948 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1949 if (insn
& (1 << 21))
1950 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1952 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1953 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1954 gen_op_iwmmxt_set_mup();
1956 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1957 wrd
= (insn
>> 12) & 0xf;
1958 rd0
= (insn
>> 16) & 0xf;
1959 rd1
= (insn
>> 0) & 0xf;
1960 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1961 switch ((insn
>> 22) & 3) {
1963 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1966 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1969 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1974 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1975 gen_op_iwmmxt_set_mup();
1976 gen_op_iwmmxt_set_cup();
1978 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1979 wrd
= (insn
>> 12) & 0xf;
1980 rd0
= (insn
>> 16) & 0xf;
1981 rd1
= (insn
>> 0) & 0xf;
1982 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1983 switch ((insn
>> 22) & 3) {
1985 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1988 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1991 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1996 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
2000 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2001 wrd
= (insn
>> 12) & 0xf;
2002 rd0
= (insn
>> 16) & 0xf;
2003 rd1
= (insn
>> 0) & 0xf;
2004 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2005 if (insn
& (1 << 22))
2006 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
2008 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
2009 if (!(insn
& (1 << 20)))
2010 gen_op_iwmmxt_addl_M0_wRn(wrd
);
2011 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2012 gen_op_iwmmxt_set_mup();
2014 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2015 wrd
= (insn
>> 12) & 0xf;
2016 rd0
= (insn
>> 16) & 0xf;
2017 rd1
= (insn
>> 0) & 0xf;
2018 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2019 if (insn
& (1 << 21)) {
2020 if (insn
& (1 << 20))
2021 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
2023 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
2025 if (insn
& (1 << 20))
2026 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
2028 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
2030 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2031 gen_op_iwmmxt_set_mup();
2033 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2034 wrd
= (insn
>> 12) & 0xf;
2035 rd0
= (insn
>> 16) & 0xf;
2036 rd1
= (insn
>> 0) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2038 if (insn
& (1 << 21))
2039 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
2041 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
2042 if (!(insn
& (1 << 20))) {
2043 iwmmxt_load_reg(cpu_V1
, wrd
);
2044 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2047 gen_op_iwmmxt_set_mup();
2049 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2050 wrd
= (insn
>> 12) & 0xf;
2051 rd0
= (insn
>> 16) & 0xf;
2052 rd1
= (insn
>> 0) & 0xf;
2053 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2054 switch ((insn
>> 22) & 3) {
2056 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
2059 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
2062 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
2067 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2068 gen_op_iwmmxt_set_mup();
2069 gen_op_iwmmxt_set_cup();
2071 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2072 wrd
= (insn
>> 12) & 0xf;
2073 rd0
= (insn
>> 16) & 0xf;
2074 rd1
= (insn
>> 0) & 0xf;
2075 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2076 if (insn
& (1 << 22)) {
2077 if (insn
& (1 << 20))
2078 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
2080 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
2082 if (insn
& (1 << 20))
2083 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
2085 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
2087 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2091 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2092 wrd
= (insn
>> 12) & 0xf;
2093 rd0
= (insn
>> 16) & 0xf;
2094 rd1
= (insn
>> 0) & 0xf;
2095 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2096 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
2097 tcg_gen_andi_i32(tmp
, tmp
, 7);
2098 iwmmxt_load_reg(cpu_V1
, rd1
);
2099 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2100 tcg_temp_free_i32(tmp
);
2101 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2102 gen_op_iwmmxt_set_mup();
2104 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2105 if (((insn
>> 6) & 3) == 3)
2107 rd
= (insn
>> 12) & 0xf;
2108 wrd
= (insn
>> 16) & 0xf;
2109 tmp
= load_reg(s
, rd
);
2110 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2111 switch ((insn
>> 6) & 3) {
2113 tmp2
= tcg_const_i32(0xff);
2114 tmp3
= tcg_const_i32((insn
& 7) << 3);
2117 tmp2
= tcg_const_i32(0xffff);
2118 tmp3
= tcg_const_i32((insn
& 3) << 4);
2121 tmp2
= tcg_const_i32(0xffffffff);
2122 tmp3
= tcg_const_i32((insn
& 1) << 5);
2128 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
2129 tcg_temp_free_i32(tmp3
);
2130 tcg_temp_free_i32(tmp2
);
2131 tcg_temp_free_i32(tmp
);
2132 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2133 gen_op_iwmmxt_set_mup();
2135 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2136 rd
= (insn
>> 12) & 0xf;
2137 wrd
= (insn
>> 16) & 0xf;
2138 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
2140 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2141 tmp
= tcg_temp_new_i32();
2142 switch ((insn
>> 22) & 3) {
2144 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
2145 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2147 tcg_gen_ext8s_i32(tmp
, tmp
);
2149 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
2153 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
2154 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2156 tcg_gen_ext16s_i32(tmp
, tmp
);
2158 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
2162 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
2163 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2166 store_reg(s
, rd
, tmp
);
2168 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2169 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2171 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2172 switch ((insn
>> 22) & 3) {
2174 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
2177 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
2180 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
2183 tcg_gen_shli_i32(tmp
, tmp
, 28);
2185 tcg_temp_free_i32(tmp
);
2187 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2188 if (((insn
>> 6) & 3) == 3)
2190 rd
= (insn
>> 12) & 0xf;
2191 wrd
= (insn
>> 16) & 0xf;
2192 tmp
= load_reg(s
, rd
);
2193 switch ((insn
>> 6) & 3) {
2195 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
2198 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
2201 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
2204 tcg_temp_free_i32(tmp
);
2205 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2206 gen_op_iwmmxt_set_mup();
2208 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2209 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2211 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2212 tmp2
= tcg_temp_new_i32();
2213 tcg_gen_mov_i32(tmp2
, tmp
);
2214 switch ((insn
>> 22) & 3) {
2216 for (i
= 0; i
< 7; i
++) {
2217 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2218 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2222 for (i
= 0; i
< 3; i
++) {
2223 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2224 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2228 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2229 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2233 tcg_temp_free_i32(tmp2
);
2234 tcg_temp_free_i32(tmp
);
2236 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2237 wrd
= (insn
>> 12) & 0xf;
2238 rd0
= (insn
>> 16) & 0xf;
2239 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2240 switch ((insn
>> 22) & 3) {
2242 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
2245 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
2248 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
2253 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2254 gen_op_iwmmxt_set_mup();
2256 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2257 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2259 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2260 tmp2
= tcg_temp_new_i32();
2261 tcg_gen_mov_i32(tmp2
, tmp
);
2262 switch ((insn
>> 22) & 3) {
2264 for (i
= 0; i
< 7; i
++) {
2265 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2266 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2270 for (i
= 0; i
< 3; i
++) {
2271 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2272 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2276 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2277 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2281 tcg_temp_free_i32(tmp2
);
2282 tcg_temp_free_i32(tmp
);
2284 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2285 rd
= (insn
>> 12) & 0xf;
2286 rd0
= (insn
>> 16) & 0xf;
2287 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
2289 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2290 tmp
= tcg_temp_new_i32();
2291 switch ((insn
>> 22) & 3) {
2293 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
2296 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
2299 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
2302 store_reg(s
, rd
, tmp
);
2304 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2305 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2306 wrd
= (insn
>> 12) & 0xf;
2307 rd0
= (insn
>> 16) & 0xf;
2308 rd1
= (insn
>> 0) & 0xf;
2309 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2310 switch ((insn
>> 22) & 3) {
2312 if (insn
& (1 << 21))
2313 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2315 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2318 if (insn
& (1 << 21))
2319 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2321 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2324 if (insn
& (1 << 21))
2325 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2327 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2332 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2333 gen_op_iwmmxt_set_mup();
2334 gen_op_iwmmxt_set_cup();
2336 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2337 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2338 wrd
= (insn
>> 12) & 0xf;
2339 rd0
= (insn
>> 16) & 0xf;
2340 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2341 switch ((insn
>> 22) & 3) {
2343 if (insn
& (1 << 21))
2344 gen_op_iwmmxt_unpacklsb_M0();
2346 gen_op_iwmmxt_unpacklub_M0();
2349 if (insn
& (1 << 21))
2350 gen_op_iwmmxt_unpacklsw_M0();
2352 gen_op_iwmmxt_unpackluw_M0();
2355 if (insn
& (1 << 21))
2356 gen_op_iwmmxt_unpacklsl_M0();
2358 gen_op_iwmmxt_unpacklul_M0();
2363 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2367 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2368 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2369 wrd
= (insn
>> 12) & 0xf;
2370 rd0
= (insn
>> 16) & 0xf;
2371 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2372 switch ((insn
>> 22) & 3) {
2374 if (insn
& (1 << 21))
2375 gen_op_iwmmxt_unpackhsb_M0();
2377 gen_op_iwmmxt_unpackhub_M0();
2380 if (insn
& (1 << 21))
2381 gen_op_iwmmxt_unpackhsw_M0();
2383 gen_op_iwmmxt_unpackhuw_M0();
2386 if (insn
& (1 << 21))
2387 gen_op_iwmmxt_unpackhsl_M0();
2389 gen_op_iwmmxt_unpackhul_M0();
2394 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2395 gen_op_iwmmxt_set_mup();
2396 gen_op_iwmmxt_set_cup();
2398 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2399 case 0x214: case 0x614: case 0xa14: case 0xe14:
2400 if (((insn
>> 22) & 3) == 0)
2402 wrd
= (insn
>> 12) & 0xf;
2403 rd0
= (insn
>> 16) & 0xf;
2404 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2405 tmp
= tcg_temp_new_i32();
2406 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2407 tcg_temp_free_i32(tmp
);
2410 switch ((insn
>> 22) & 3) {
2412 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2415 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2418 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2421 tcg_temp_free_i32(tmp
);
2422 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2423 gen_op_iwmmxt_set_mup();
2424 gen_op_iwmmxt_set_cup();
2426 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2427 case 0x014: case 0x414: case 0x814: case 0xc14:
2428 if (((insn
>> 22) & 3) == 0)
2430 wrd
= (insn
>> 12) & 0xf;
2431 rd0
= (insn
>> 16) & 0xf;
2432 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2433 tmp
= tcg_temp_new_i32();
2434 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2435 tcg_temp_free_i32(tmp
);
2438 switch ((insn
>> 22) & 3) {
2440 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2443 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2446 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2449 tcg_temp_free_i32(tmp
);
2450 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2451 gen_op_iwmmxt_set_mup();
2452 gen_op_iwmmxt_set_cup();
2454 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2455 case 0x114: case 0x514: case 0x914: case 0xd14:
2456 if (((insn
>> 22) & 3) == 0)
2458 wrd
= (insn
>> 12) & 0xf;
2459 rd0
= (insn
>> 16) & 0xf;
2460 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2461 tmp
= tcg_temp_new_i32();
2462 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2463 tcg_temp_free_i32(tmp
);
2466 switch ((insn
>> 22) & 3) {
2468 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2471 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2474 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2477 tcg_temp_free_i32(tmp
);
2478 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2479 gen_op_iwmmxt_set_mup();
2480 gen_op_iwmmxt_set_cup();
2482 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2483 case 0x314: case 0x714: case 0xb14: case 0xf14:
2484 if (((insn
>> 22) & 3) == 0)
2486 wrd
= (insn
>> 12) & 0xf;
2487 rd0
= (insn
>> 16) & 0xf;
2488 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2489 tmp
= tcg_temp_new_i32();
2490 switch ((insn
>> 22) & 3) {
2492 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2493 tcg_temp_free_i32(tmp
);
2496 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2499 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2500 tcg_temp_free_i32(tmp
);
2503 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2506 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2507 tcg_temp_free_i32(tmp
);
2510 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2513 tcg_temp_free_i32(tmp
);
2514 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2515 gen_op_iwmmxt_set_mup();
2516 gen_op_iwmmxt_set_cup();
2518 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2519 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2520 wrd
= (insn
>> 12) & 0xf;
2521 rd0
= (insn
>> 16) & 0xf;
2522 rd1
= (insn
>> 0) & 0xf;
2523 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2524 switch ((insn
>> 22) & 3) {
2526 if (insn
& (1 << 21))
2527 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2529 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2532 if (insn
& (1 << 21))
2533 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2535 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2538 if (insn
& (1 << 21))
2539 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2541 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2546 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2547 gen_op_iwmmxt_set_mup();
2549 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2550 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2551 wrd
= (insn
>> 12) & 0xf;
2552 rd0
= (insn
>> 16) & 0xf;
2553 rd1
= (insn
>> 0) & 0xf;
2554 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2555 switch ((insn
>> 22) & 3) {
2557 if (insn
& (1 << 21))
2558 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2560 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2563 if (insn
& (1 << 21))
2564 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2566 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2569 if (insn
& (1 << 21))
2570 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2572 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2577 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2578 gen_op_iwmmxt_set_mup();
2580 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2581 case 0x402: case 0x502: case 0x602: case 0x702:
2582 wrd
= (insn
>> 12) & 0xf;
2583 rd0
= (insn
>> 16) & 0xf;
2584 rd1
= (insn
>> 0) & 0xf;
2585 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2586 tmp
= tcg_const_i32((insn
>> 20) & 3);
2587 iwmmxt_load_reg(cpu_V1
, rd1
);
2588 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2589 tcg_temp_free_i32(tmp
);
2590 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2591 gen_op_iwmmxt_set_mup();
2593 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2594 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2595 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2596 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2597 wrd
= (insn
>> 12) & 0xf;
2598 rd0
= (insn
>> 16) & 0xf;
2599 rd1
= (insn
>> 0) & 0xf;
2600 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2601 switch ((insn
>> 20) & 0xf) {
2603 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2606 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2609 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2612 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2615 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2618 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2621 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2624 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2627 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2632 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2633 gen_op_iwmmxt_set_mup();
2634 gen_op_iwmmxt_set_cup();
2636 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2637 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2638 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2639 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2640 wrd
= (insn
>> 12) & 0xf;
2641 rd0
= (insn
>> 16) & 0xf;
2642 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2643 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2644 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2645 tcg_temp_free_i32(tmp
);
2646 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2647 gen_op_iwmmxt_set_mup();
2648 gen_op_iwmmxt_set_cup();
2650 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2651 case 0x418: case 0x518: case 0x618: case 0x718:
2652 case 0x818: case 0x918: case 0xa18: case 0xb18:
2653 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2654 wrd
= (insn
>> 12) & 0xf;
2655 rd0
= (insn
>> 16) & 0xf;
2656 rd1
= (insn
>> 0) & 0xf;
2657 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2658 switch ((insn
>> 20) & 0xf) {
2660 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2663 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2666 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2669 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2672 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2675 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2678 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2681 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2684 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2689 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2690 gen_op_iwmmxt_set_mup();
2691 gen_op_iwmmxt_set_cup();
2693 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2694 case 0x408: case 0x508: case 0x608: case 0x708:
2695 case 0x808: case 0x908: case 0xa08: case 0xb08:
2696 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2697 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2699 wrd
= (insn
>> 12) & 0xf;
2700 rd0
= (insn
>> 16) & 0xf;
2701 rd1
= (insn
>> 0) & 0xf;
2702 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2703 switch ((insn
>> 22) & 3) {
2705 if (insn
& (1 << 21))
2706 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2708 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2711 if (insn
& (1 << 21))
2712 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2714 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2717 if (insn
& (1 << 21))
2718 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2720 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2723 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2724 gen_op_iwmmxt_set_mup();
2725 gen_op_iwmmxt_set_cup();
2727 case 0x201: case 0x203: case 0x205: case 0x207:
2728 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2729 case 0x211: case 0x213: case 0x215: case 0x217:
2730 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2731 wrd
= (insn
>> 5) & 0xf;
2732 rd0
= (insn
>> 12) & 0xf;
2733 rd1
= (insn
>> 0) & 0xf;
2734 if (rd0
== 0xf || rd1
== 0xf)
2736 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2737 tmp
= load_reg(s
, rd0
);
2738 tmp2
= load_reg(s
, rd1
);
2739 switch ((insn
>> 16) & 0xf) {
2740 case 0x0: /* TMIA */
2741 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2743 case 0x8: /* TMIAPH */
2744 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2746 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2747 if (insn
& (1 << 16))
2748 tcg_gen_shri_i32(tmp
, tmp
, 16);
2749 if (insn
& (1 << 17))
2750 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2751 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2754 tcg_temp_free_i32(tmp2
);
2755 tcg_temp_free_i32(tmp
);
2758 tcg_temp_free_i32(tmp2
);
2759 tcg_temp_free_i32(tmp
);
2760 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2761 gen_op_iwmmxt_set_mup();
2770 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2771 (ie. an undefined instruction). */
2772 static int disas_dsp_insn(DisasContext
*s
, uint32_t insn
)
2774 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2777 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2778 /* Multiply with Internal Accumulate Format */
2779 rd0
= (insn
>> 12) & 0xf;
2781 acc
= (insn
>> 5) & 7;
2786 tmp
= load_reg(s
, rd0
);
2787 tmp2
= load_reg(s
, rd1
);
2788 switch ((insn
>> 16) & 0xf) {
2790 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2792 case 0x8: /* MIAPH */
2793 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2795 case 0xc: /* MIABB */
2796 case 0xd: /* MIABT */
2797 case 0xe: /* MIATB */
2798 case 0xf: /* MIATT */
2799 if (insn
& (1 << 16))
2800 tcg_gen_shri_i32(tmp
, tmp
, 16);
2801 if (insn
& (1 << 17))
2802 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2803 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2808 tcg_temp_free_i32(tmp2
);
2809 tcg_temp_free_i32(tmp
);
2811 gen_op_iwmmxt_movq_wRn_M0(acc
);
2815 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2816 /* Internal Accumulator Access Format */
2817 rdhi
= (insn
>> 16) & 0xf;
2818 rdlo
= (insn
>> 12) & 0xf;
2824 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2825 iwmmxt_load_reg(cpu_V0
, acc
);
2826 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2827 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2828 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2829 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2831 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2832 iwmmxt_store_reg(cpu_V0
, acc
);
2840 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2841 #define VFP_SREG(insn, bigbit, smallbit) \
2842 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2843 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2844 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2845 reg = (((insn) >> (bigbit)) & 0x0f) \
2846 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2848 if (insn & (1 << (smallbit))) \
2850 reg = ((insn) >> (bigbit)) & 0x0f; \
2853 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2854 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2855 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2856 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2857 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2858 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2860 static void gen_neon_dup_low16(TCGv_i32 var
)
2862 TCGv_i32 tmp
= tcg_temp_new_i32();
2863 tcg_gen_ext16u_i32(var
, var
);
2864 tcg_gen_shli_i32(tmp
, var
, 16);
2865 tcg_gen_or_i32(var
, var
, tmp
);
2866 tcg_temp_free_i32(tmp
);
2869 static void gen_neon_dup_high16(TCGv_i32 var
)
2871 TCGv_i32 tmp
= tcg_temp_new_i32();
2872 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2873 tcg_gen_shri_i32(tmp
, var
, 16);
2874 tcg_gen_or_i32(var
, var
, tmp
);
2875 tcg_temp_free_i32(tmp
);
2879 * Disassemble a VFP instruction. Returns nonzero if an error occurred
2880 * (ie. an undefined instruction).
2882 static int disas_vfp_insn(DisasContext
*s
, uint32_t insn
)
2884 if (!arm_dc_feature(s
, ARM_FEATURE_VFP
)) {
2889 * If the decodetree decoder handles this insn it will always
2890 * emit code to either execute the insn or generate an appropriate
2891 * exception; so we don't need to ever return non-zero to tell
2892 * the calling code to emit an UNDEF exception.
2894 if (extract32(insn
, 28, 4) == 0xf) {
2895 if (disas_vfp_uncond(s
, insn
)) {
2899 if (disas_vfp(s
, insn
)) {
2903 /* If the decodetree decoder didn't handle this insn, it must be UNDEF */
2907 static inline bool use_goto_tb(DisasContext
*s
, target_ulong dest
)
2909 #ifndef CONFIG_USER_ONLY
2910 return (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
2911 ((s
->base
.pc_next
- 1) & TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
2917 static void gen_goto_ptr(void)
2919 tcg_gen_lookup_and_goto_ptr();
2922 /* This will end the TB but doesn't guarantee we'll return to
2923 * cpu_loop_exec. Any live exit_requests will be processed as we
2924 * enter the next TB.
2926 static void gen_goto_tb(DisasContext
*s
, int n
, target_ulong dest
)
2928 if (use_goto_tb(s
, dest
)) {
2930 gen_set_pc_im(s
, dest
);
2931 tcg_gen_exit_tb(s
->base
.tb
, n
);
2933 gen_set_pc_im(s
, dest
);
2936 s
->base
.is_jmp
= DISAS_NORETURN
;
2939 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
2941 if (unlikely(is_singlestepping(s
))) {
2942 /* An indirect jump so that we still trigger the debug exception. */
2947 gen_goto_tb(s
, 0, dest
);
2951 static inline void gen_mulxy(TCGv_i32 t0
, TCGv_i32 t1
, int x
, int y
)
2954 tcg_gen_sari_i32(t0
, t0
, 16);
2958 tcg_gen_sari_i32(t1
, t1
, 16);
2961 tcg_gen_mul_i32(t0
, t0
, t1
);
2964 /* Return the mask of PSR bits set by a MSR instruction. */
2965 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
)
2970 if (flags
& (1 << 0))
2972 if (flags
& (1 << 1))
2974 if (flags
& (1 << 2))
2976 if (flags
& (1 << 3))
2979 /* Mask out undefined bits. */
2980 mask
&= ~CPSR_RESERVED
;
2981 if (!arm_dc_feature(s
, ARM_FEATURE_V4T
)) {
2984 if (!arm_dc_feature(s
, ARM_FEATURE_V5
)) {
2985 mask
&= ~CPSR_Q
; /* V5TE in reality*/
2987 if (!arm_dc_feature(s
, ARM_FEATURE_V6
)) {
2988 mask
&= ~(CPSR_E
| CPSR_GE
);
2990 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB2
)) {
2993 /* Mask out execution state and reserved bits. */
2995 mask
&= ~(CPSR_EXEC
| CPSR_RESERVED
);
2997 /* Mask out privileged bits. */
3003 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3004 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv_i32 t0
)
3008 /* ??? This is also undefined in system mode. */
3012 tmp
= load_cpu_field(spsr
);
3013 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3014 tcg_gen_andi_i32(t0
, t0
, mask
);
3015 tcg_gen_or_i32(tmp
, tmp
, t0
);
3016 store_cpu_field(tmp
, spsr
);
3018 gen_set_cpsr(t0
, mask
);
3020 tcg_temp_free_i32(t0
);
3025 /* Returns nonzero if access to the PSR is not permitted. */
3026 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3029 tmp
= tcg_temp_new_i32();
3030 tcg_gen_movi_i32(tmp
, val
);
3031 return gen_set_psr(s
, mask
, spsr
, tmp
);
3034 static bool msr_banked_access_decode(DisasContext
*s
, int r
, int sysm
, int rn
,
3035 int *tgtmode
, int *regno
)
3037 /* Decode the r and sysm fields of MSR/MRS banked accesses into
3038 * the target mode and register number, and identify the various
3039 * unpredictable cases.
3040 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
3041 * + executed in user mode
3042 * + using R15 as the src/dest register
3043 * + accessing an unimplemented register
3044 * + accessing a register that's inaccessible at current PL/security state*
3045 * + accessing a register that you could access with a different insn
3046 * We choose to UNDEF in all these cases.
3047 * Since we don't know which of the various AArch32 modes we are in
3048 * we have to defer some checks to runtime.
3049 * Accesses to Monitor mode registers from Secure EL1 (which implies
3050 * that EL3 is AArch64) must trap to EL3.
3052 * If the access checks fail this function will emit code to take
3053 * an exception and return false. Otherwise it will return true,
3054 * and set *tgtmode and *regno appropriately.
3056 int exc_target
= default_exception_el(s
);
3058 /* These instructions are present only in ARMv8, or in ARMv7 with the
3059 * Virtualization Extensions.
3061 if (!arm_dc_feature(s
, ARM_FEATURE_V8
) &&
3062 !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
3066 if (IS_USER(s
) || rn
== 15) {
3070 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
3071 * of registers into (r, sysm).
3074 /* SPSRs for other modes */
3076 case 0xe: /* SPSR_fiq */
3077 *tgtmode
= ARM_CPU_MODE_FIQ
;
3079 case 0x10: /* SPSR_irq */
3080 *tgtmode
= ARM_CPU_MODE_IRQ
;
3082 case 0x12: /* SPSR_svc */
3083 *tgtmode
= ARM_CPU_MODE_SVC
;
3085 case 0x14: /* SPSR_abt */
3086 *tgtmode
= ARM_CPU_MODE_ABT
;
3088 case 0x16: /* SPSR_und */
3089 *tgtmode
= ARM_CPU_MODE_UND
;
3091 case 0x1c: /* SPSR_mon */
3092 *tgtmode
= ARM_CPU_MODE_MON
;
3094 case 0x1e: /* SPSR_hyp */
3095 *tgtmode
= ARM_CPU_MODE_HYP
;
3097 default: /* unallocated */
3100 /* We arbitrarily assign SPSR a register number of 16. */
3103 /* general purpose registers for other modes */
3105 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
3106 *tgtmode
= ARM_CPU_MODE_USR
;
3109 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
3110 *tgtmode
= ARM_CPU_MODE_FIQ
;
3113 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
3114 *tgtmode
= ARM_CPU_MODE_IRQ
;
3115 *regno
= sysm
& 1 ? 13 : 14;
3117 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
3118 *tgtmode
= ARM_CPU_MODE_SVC
;
3119 *regno
= sysm
& 1 ? 13 : 14;
3121 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
3122 *tgtmode
= ARM_CPU_MODE_ABT
;
3123 *regno
= sysm
& 1 ? 13 : 14;
3125 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
3126 *tgtmode
= ARM_CPU_MODE_UND
;
3127 *regno
= sysm
& 1 ? 13 : 14;
3129 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
3130 *tgtmode
= ARM_CPU_MODE_MON
;
3131 *regno
= sysm
& 1 ? 13 : 14;
3133 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
3134 *tgtmode
= ARM_CPU_MODE_HYP
;
3135 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
3136 *regno
= sysm
& 1 ? 13 : 17;
3138 default: /* unallocated */
3143 /* Catch the 'accessing inaccessible register' cases we can detect
3144 * at translate time.
3147 case ARM_CPU_MODE_MON
:
3148 if (!arm_dc_feature(s
, ARM_FEATURE_EL3
) || s
->ns
) {
3151 if (s
->current_el
== 1) {
3152 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
3153 * then accesses to Mon registers trap to EL3
3159 case ARM_CPU_MODE_HYP
:
3161 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
3162 * (and so we can forbid accesses from EL2 or below). elr_hyp
3163 * can be accessed also from Hyp mode, so forbid accesses from
3166 if (!arm_dc_feature(s
, ARM_FEATURE_EL2
) || s
->current_el
< 2 ||
3167 (s
->current_el
< 3 && *regno
!= 17)) {
3178 /* If we get here then some access check did not pass */
3179 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
3180 syn_uncategorized(), exc_target
);
3184 static void gen_msr_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
3186 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
3187 int tgtmode
= 0, regno
= 0;
3189 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
3193 /* Sync state because msr_banked() can raise exceptions */
3194 gen_set_condexec(s
);
3195 gen_set_pc_im(s
, s
->pc_curr
);
3196 tcg_reg
= load_reg(s
, rn
);
3197 tcg_tgtmode
= tcg_const_i32(tgtmode
);
3198 tcg_regno
= tcg_const_i32(regno
);
3199 gen_helper_msr_banked(cpu_env
, tcg_reg
, tcg_tgtmode
, tcg_regno
);
3200 tcg_temp_free_i32(tcg_tgtmode
);
3201 tcg_temp_free_i32(tcg_regno
);
3202 tcg_temp_free_i32(tcg_reg
);
3203 s
->base
.is_jmp
= DISAS_UPDATE
;
3206 static void gen_mrs_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
3208 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
3209 int tgtmode
= 0, regno
= 0;
3211 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
3215 /* Sync state because mrs_banked() can raise exceptions */
3216 gen_set_condexec(s
);
3217 gen_set_pc_im(s
, s
->pc_curr
);
3218 tcg_reg
= tcg_temp_new_i32();
3219 tcg_tgtmode
= tcg_const_i32(tgtmode
);
3220 tcg_regno
= tcg_const_i32(regno
);
3221 gen_helper_mrs_banked(tcg_reg
, cpu_env
, tcg_tgtmode
, tcg_regno
);
3222 tcg_temp_free_i32(tcg_tgtmode
);
3223 tcg_temp_free_i32(tcg_regno
);
3224 store_reg(s
, rn
, tcg_reg
);
3225 s
->base
.is_jmp
= DISAS_UPDATE
;
3228 /* Store value to PC as for an exception return (ie don't
3229 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
3230 * will do the masking based on the new value of the Thumb bit.
3232 static void store_pc_exc_ret(DisasContext
*s
, TCGv_i32 pc
)
3234 tcg_gen_mov_i32(cpu_R
[15], pc
);
3235 tcg_temp_free_i32(pc
);
3238 /* Generate a v6 exception return. Marks both values as dead. */
3239 static void gen_rfe(DisasContext
*s
, TCGv_i32 pc
, TCGv_i32 cpsr
)
3241 store_pc_exc_ret(s
, pc
);
3242 /* The cpsr_write_eret helper will mask the low bits of PC
3243 * appropriately depending on the new Thumb bit, so it must
3244 * be called after storing the new PC.
3246 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
3249 gen_helper_cpsr_write_eret(cpu_env
, cpsr
);
3250 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
3253 tcg_temp_free_i32(cpsr
);
3254 /* Must exit loop to check un-masked IRQs */
3255 s
->base
.is_jmp
= DISAS_EXIT
;
3258 /* Generate an old-style exception return. Marks pc as dead. */
3259 static void gen_exception_return(DisasContext
*s
, TCGv_i32 pc
)
3261 gen_rfe(s
, pc
, load_cpu_field(spsr
));
3265 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
3266 * only call the helper when running single threaded TCG code to ensure
3267 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
3268 * just skip this instruction. Currently the SEV/SEVL instructions
3269 * which are *one* of many ways to wake the CPU from WFE are not
3270 * implemented so we can't sleep like WFI does.
3272 static void gen_nop_hint(DisasContext
*s
, int val
)
3275 /* When running in MTTCG we don't generate jumps to the yield and
3276 * WFE helpers as it won't affect the scheduling of other vCPUs.
3277 * If we wanted to more completely model WFE/SEV so we don't busy
3278 * spin unnecessarily we would need to do something more involved.
3281 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3282 gen_set_pc_im(s
, s
->base
.pc_next
);
3283 s
->base
.is_jmp
= DISAS_YIELD
;
3287 gen_set_pc_im(s
, s
->base
.pc_next
);
3288 s
->base
.is_jmp
= DISAS_WFI
;
3291 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3292 gen_set_pc_im(s
, s
->base
.pc_next
);
3293 s
->base
.is_jmp
= DISAS_WFE
;
3298 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
3304 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3306 static inline void gen_neon_add(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
3309 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3310 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3311 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3316 static inline void gen_neon_rsb(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
3319 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3320 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3321 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3326 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3327 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3328 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3329 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3330 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
3332 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3333 switch ((size << 1) | u) { \
3335 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3338 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3341 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3344 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3347 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3350 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3352 default: return 1; \
3355 #define GEN_NEON_INTEGER_OP(name) do { \
3356 switch ((size << 1) | u) { \
3358 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3361 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3364 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3367 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3370 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3373 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3375 default: return 1; \
3378 static TCGv_i32
neon_load_scratch(int scratch
)
3380 TCGv_i32 tmp
= tcg_temp_new_i32();
3381 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3385 static void neon_store_scratch(int scratch
, TCGv_i32 var
)
3387 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3388 tcg_temp_free_i32(var
);
3391 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
3395 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3397 gen_neon_dup_high16(tmp
);
3399 gen_neon_dup_low16(tmp
);
3402 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3407 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3411 if (!q
&& size
== 2) {
3414 pd
= vfp_reg_ptr(true, rd
);
3415 pm
= vfp_reg_ptr(true, rm
);
3419 gen_helper_neon_qunzip8(pd
, pm
);
3422 gen_helper_neon_qunzip16(pd
, pm
);
3425 gen_helper_neon_qunzip32(pd
, pm
);
3433 gen_helper_neon_unzip8(pd
, pm
);
3436 gen_helper_neon_unzip16(pd
, pm
);
3442 tcg_temp_free_ptr(pd
);
3443 tcg_temp_free_ptr(pm
);
3447 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3451 if (!q
&& size
== 2) {
3454 pd
= vfp_reg_ptr(true, rd
);
3455 pm
= vfp_reg_ptr(true, rm
);
3459 gen_helper_neon_qzip8(pd
, pm
);
3462 gen_helper_neon_qzip16(pd
, pm
);
3465 gen_helper_neon_qzip32(pd
, pm
);
3473 gen_helper_neon_zip8(pd
, pm
);
3476 gen_helper_neon_zip16(pd
, pm
);
3482 tcg_temp_free_ptr(pd
);
3483 tcg_temp_free_ptr(pm
);
3487 static void gen_neon_trn_u8(TCGv_i32 t0
, TCGv_i32 t1
)
3491 rd
= tcg_temp_new_i32();
3492 tmp
= tcg_temp_new_i32();
3494 tcg_gen_shli_i32(rd
, t0
, 8);
3495 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3496 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3497 tcg_gen_or_i32(rd
, rd
, tmp
);
3499 tcg_gen_shri_i32(t1
, t1
, 8);
3500 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3501 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3502 tcg_gen_or_i32(t1
, t1
, tmp
);
3503 tcg_gen_mov_i32(t0
, rd
);
3505 tcg_temp_free_i32(tmp
);
3506 tcg_temp_free_i32(rd
);
3509 static void gen_neon_trn_u16(TCGv_i32 t0
, TCGv_i32 t1
)
3513 rd
= tcg_temp_new_i32();
3514 tmp
= tcg_temp_new_i32();
3516 tcg_gen_shli_i32(rd
, t0
, 16);
3517 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3518 tcg_gen_or_i32(rd
, rd
, tmp
);
3519 tcg_gen_shri_i32(t1
, t1
, 16);
3520 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3521 tcg_gen_or_i32(t1
, t1
, tmp
);
3522 tcg_gen_mov_i32(t0
, rd
);
3524 tcg_temp_free_i32(tmp
);
3525 tcg_temp_free_i32(rd
);
3533 } const neon_ls_element_type
[11] = {
3547 /* Translate a NEON load/store element instruction. Return nonzero if the
3548 instruction is invalid. */
3549 static int disas_neon_ls_insn(DisasContext
*s
, uint32_t insn
)
3569 /* FIXME: this access check should not take precedence over UNDEF
3570 * for invalid encodings; we will generate incorrect syndrome information
3571 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3573 if (s
->fp_excp_el
) {
3574 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
3575 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
3579 if (!s
->vfp_enabled
)
3581 VFP_DREG_D(rd
, insn
);
3582 rn
= (insn
>> 16) & 0xf;
3584 load
= (insn
& (1 << 21)) != 0;
3585 endian
= s
->be_data
;
3586 mmu_idx
= get_mem_index(s
);
3587 if ((insn
& (1 << 23)) == 0) {
3588 /* Load store all elements. */
3589 op
= (insn
>> 8) & 0xf;
3590 size
= (insn
>> 6) & 3;
3593 /* Catch UNDEF cases for bad values of align field */
3596 if (((insn
>> 5) & 1) == 1) {
3601 if (((insn
>> 4) & 3) == 3) {
3608 nregs
= neon_ls_element_type
[op
].nregs
;
3609 interleave
= neon_ls_element_type
[op
].interleave
;
3610 spacing
= neon_ls_element_type
[op
].spacing
;
3611 if (size
== 3 && (interleave
| spacing
) != 1) {
3614 /* For our purposes, bytes are always little-endian. */
3618 /* Consecutive little-endian elements from a single register
3619 * can be promoted to a larger little-endian operation.
3621 if (interleave
== 1 && endian
== MO_LE
) {
3624 tmp64
= tcg_temp_new_i64();
3625 addr
= tcg_temp_new_i32();
3626 tmp2
= tcg_const_i32(1 << size
);
3627 load_reg_var(s
, addr
, rn
);
3628 for (reg
= 0; reg
< nregs
; reg
++) {
3629 for (n
= 0; n
< 8 >> size
; n
++) {
3631 for (xs
= 0; xs
< interleave
; xs
++) {
3632 int tt
= rd
+ reg
+ spacing
* xs
;
3635 gen_aa32_ld_i64(s
, tmp64
, addr
, mmu_idx
, endian
| size
);
3636 neon_store_element64(tt
, n
, size
, tmp64
);
3638 neon_load_element64(tmp64
, tt
, n
, size
);
3639 gen_aa32_st_i64(s
, tmp64
, addr
, mmu_idx
, endian
| size
);
3641 tcg_gen_add_i32(addr
, addr
, tmp2
);
3645 tcg_temp_free_i32(addr
);
3646 tcg_temp_free_i32(tmp2
);
3647 tcg_temp_free_i64(tmp64
);
3648 stride
= nregs
* interleave
* 8;
3650 size
= (insn
>> 10) & 3;
3652 /* Load single element to all lanes. */
3653 int a
= (insn
>> 4) & 1;
3657 size
= (insn
>> 6) & 3;
3658 nregs
= ((insn
>> 8) & 3) + 1;
3661 if (nregs
!= 4 || a
== 0) {
3664 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3667 if (nregs
== 1 && a
== 1 && size
== 0) {
3670 if (nregs
== 3 && a
== 1) {
3673 addr
= tcg_temp_new_i32();
3674 load_reg_var(s
, addr
, rn
);
3676 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
3677 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
3679 stride
= (insn
& (1 << 5)) ? 2 : 1;
3680 vec_size
= nregs
== 1 ? stride
* 8 : 8;
3682 tmp
= tcg_temp_new_i32();
3683 for (reg
= 0; reg
< nregs
; reg
++) {
3684 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
),
3686 if ((rd
& 1) && vec_size
== 16) {
3687 /* We cannot write 16 bytes at once because the
3688 * destination is unaligned.
3690 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(rd
, 0),
3692 tcg_gen_gvec_mov(0, neon_reg_offset(rd
+ 1, 0),
3693 neon_reg_offset(rd
, 0), 8, 8);
3695 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(rd
, 0),
3696 vec_size
, vec_size
, tmp
);
3698 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3701 tcg_temp_free_i32(tmp
);
3702 tcg_temp_free_i32(addr
);
3703 stride
= (1 << size
) * nregs
;
3705 /* Single element. */
3706 int idx
= (insn
>> 4) & 0xf;
3710 reg_idx
= (insn
>> 5) & 7;
3714 reg_idx
= (insn
>> 6) & 3;
3715 stride
= (insn
& (1 << 5)) ? 2 : 1;
3718 reg_idx
= (insn
>> 7) & 1;
3719 stride
= (insn
& (1 << 6)) ? 2 : 1;
3724 nregs
= ((insn
>> 8) & 3) + 1;
3725 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3728 if (((idx
& (1 << size
)) != 0) ||
3729 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
3734 if ((idx
& 1) != 0) {
3739 if (size
== 2 && (idx
& 2) != 0) {
3744 if ((size
== 2) && ((idx
& 3) == 3)) {
3751 if ((rd
+ stride
* (nregs
- 1)) > 31) {
3752 /* Attempts to write off the end of the register file
3753 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3754 * the neon_load_reg() would write off the end of the array.
3758 tmp
= tcg_temp_new_i32();
3759 addr
= tcg_temp_new_i32();
3760 load_reg_var(s
, addr
, rn
);
3761 for (reg
= 0; reg
< nregs
; reg
++) {
3763 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
),
3765 neon_store_element(rd
, reg_idx
, size
, tmp
);
3766 } else { /* Store */
3767 neon_load_element(tmp
, rd
, reg_idx
, size
);
3768 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
),
3772 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
3774 tcg_temp_free_i32(addr
);
3775 tcg_temp_free_i32(tmp
);
3776 stride
= nregs
* (1 << size
);
3782 base
= load_reg(s
, rn
);
3784 tcg_gen_addi_i32(base
, base
, stride
);
3787 index
= load_reg(s
, rm
);
3788 tcg_gen_add_i32(base
, base
, index
);
3789 tcg_temp_free_i32(index
);
3791 store_reg(s
, rn
, base
);
3796 static inline void gen_neon_narrow(int size
, TCGv_i32 dest
, TCGv_i64 src
)
3799 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
3800 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
3801 case 2: tcg_gen_extrl_i64_i32(dest
, src
); break;
3806 static inline void gen_neon_narrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
3809 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
3810 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
3811 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
3816 static inline void gen_neon_narrow_satu(int size
, TCGv_i32 dest
, TCGv_i64 src
)
3819 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
3820 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
3821 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
3826 static inline void gen_neon_unarrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
3829 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
3830 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
3831 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
3836 static inline void gen_neon_shift_narrow(int size
, TCGv_i32 var
, TCGv_i32 shift
,
3842 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
3843 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
3848 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
3849 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
3856 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
3857 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
3862 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
3863 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
3870 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv_i32 src
, int size
, int u
)
3874 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
3875 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
3876 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
3881 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
3882 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
3883 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
3887 tcg_temp_free_i32(src
);
3890 static inline void gen_neon_addl(int size
)
3893 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
3894 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
3895 case 2: tcg_gen_add_i64(CPU_V001
); break;
3900 static inline void gen_neon_subl(int size
)
3903 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
3904 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
3905 case 2: tcg_gen_sub_i64(CPU_V001
); break;
3910 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
3913 case 0: gen_helper_neon_negl_u16(var
, var
); break;
3914 case 1: gen_helper_neon_negl_u32(var
, var
); break;
3916 tcg_gen_neg_i64(var
, var
);
3922 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
3925 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
3926 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
3931 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv_i32 a
, TCGv_i32 b
,
3936 switch ((size
<< 1) | u
) {
3937 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
3938 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
3939 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
3940 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
3942 tmp
= gen_muls_i64_i32(a
, b
);
3943 tcg_gen_mov_i64(dest
, tmp
);
3944 tcg_temp_free_i64(tmp
);
3947 tmp
= gen_mulu_i64_i32(a
, b
);
3948 tcg_gen_mov_i64(dest
, tmp
);
3949 tcg_temp_free_i64(tmp
);
3954 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
3955 Don't forget to clean them now. */
3957 tcg_temp_free_i32(a
);
3958 tcg_temp_free_i32(b
);
3962 static void gen_neon_narrow_op(int op
, int u
, int size
,
3963 TCGv_i32 dest
, TCGv_i64 src
)
3967 gen_neon_unarrow_sats(size
, dest
, src
);
3969 gen_neon_narrow(size
, dest
, src
);
3973 gen_neon_narrow_satu(size
, dest
, src
);
3975 gen_neon_narrow_sats(size
, dest
, src
);
3980 /* Symbolic constants for op fields for Neon 3-register same-length.
3981 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
3984 #define NEON_3R_VHADD 0
3985 #define NEON_3R_VQADD 1
3986 #define NEON_3R_VRHADD 2
3987 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
3988 #define NEON_3R_VHSUB 4
3989 #define NEON_3R_VQSUB 5
3990 #define NEON_3R_VCGT 6
3991 #define NEON_3R_VCGE 7
3992 #define NEON_3R_VSHL 8
3993 #define NEON_3R_VQSHL 9
3994 #define NEON_3R_VRSHL 10
3995 #define NEON_3R_VQRSHL 11
3996 #define NEON_3R_VMAX 12
3997 #define NEON_3R_VMIN 13
3998 #define NEON_3R_VABD 14
3999 #define NEON_3R_VABA 15
4000 #define NEON_3R_VADD_VSUB 16
4001 #define NEON_3R_VTST_VCEQ 17
4002 #define NEON_3R_VML 18 /* VMLA, VMLS */
4003 #define NEON_3R_VMUL 19
4004 #define NEON_3R_VPMAX 20
4005 #define NEON_3R_VPMIN 21
4006 #define NEON_3R_VQDMULH_VQRDMULH 22
4007 #define NEON_3R_VPADD_VQRDMLAH 23
4008 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4009 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
4010 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4011 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4012 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4013 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4014 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4015 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4017 static const uint8_t neon_3r_sizes
[] = {
4018 [NEON_3R_VHADD
] = 0x7,
4019 [NEON_3R_VQADD
] = 0xf,
4020 [NEON_3R_VRHADD
] = 0x7,
4021 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4022 [NEON_3R_VHSUB
] = 0x7,
4023 [NEON_3R_VQSUB
] = 0xf,
4024 [NEON_3R_VCGT
] = 0x7,
4025 [NEON_3R_VCGE
] = 0x7,
4026 [NEON_3R_VSHL
] = 0xf,
4027 [NEON_3R_VQSHL
] = 0xf,
4028 [NEON_3R_VRSHL
] = 0xf,
4029 [NEON_3R_VQRSHL
] = 0xf,
4030 [NEON_3R_VMAX
] = 0x7,
4031 [NEON_3R_VMIN
] = 0x7,
4032 [NEON_3R_VABD
] = 0x7,
4033 [NEON_3R_VABA
] = 0x7,
4034 [NEON_3R_VADD_VSUB
] = 0xf,
4035 [NEON_3R_VTST_VCEQ
] = 0x7,
4036 [NEON_3R_VML
] = 0x7,
4037 [NEON_3R_VMUL
] = 0x7,
4038 [NEON_3R_VPMAX
] = 0x7,
4039 [NEON_3R_VPMIN
] = 0x7,
4040 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4041 [NEON_3R_VPADD_VQRDMLAH
] = 0x7,
4042 [NEON_3R_SHA
] = 0xf, /* size field encodes op type */
4043 [NEON_3R_VFM_VQRDMLSH
] = 0x7, /* For VFM, size bit 1 encodes op */
4044 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4045 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4046 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4047 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4048 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4049 [NEON_3R_FLOAT_MISC
] = 0x5, /* size bit 1 encodes op */
4052 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4053 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4056 #define NEON_2RM_VREV64 0
4057 #define NEON_2RM_VREV32 1
4058 #define NEON_2RM_VREV16 2
4059 #define NEON_2RM_VPADDL 4
4060 #define NEON_2RM_VPADDL_U 5
4061 #define NEON_2RM_AESE 6 /* Includes AESD */
4062 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
4063 #define NEON_2RM_VCLS 8
4064 #define NEON_2RM_VCLZ 9
4065 #define NEON_2RM_VCNT 10
4066 #define NEON_2RM_VMVN 11
4067 #define NEON_2RM_VPADAL 12
4068 #define NEON_2RM_VPADAL_U 13
4069 #define NEON_2RM_VQABS 14
4070 #define NEON_2RM_VQNEG 15
4071 #define NEON_2RM_VCGT0 16
4072 #define NEON_2RM_VCGE0 17
4073 #define NEON_2RM_VCEQ0 18
4074 #define NEON_2RM_VCLE0 19
4075 #define NEON_2RM_VCLT0 20
4076 #define NEON_2RM_SHA1H 21
4077 #define NEON_2RM_VABS 22
4078 #define NEON_2RM_VNEG 23
4079 #define NEON_2RM_VCGT0_F 24
4080 #define NEON_2RM_VCGE0_F 25
4081 #define NEON_2RM_VCEQ0_F 26
4082 #define NEON_2RM_VCLE0_F 27
4083 #define NEON_2RM_VCLT0_F 28
4084 #define NEON_2RM_VABS_F 30
4085 #define NEON_2RM_VNEG_F 31
4086 #define NEON_2RM_VSWP 32
4087 #define NEON_2RM_VTRN 33
4088 #define NEON_2RM_VUZP 34
4089 #define NEON_2RM_VZIP 35
4090 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4091 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4092 #define NEON_2RM_VSHLL 38
4093 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
4094 #define NEON_2RM_VRINTN 40
4095 #define NEON_2RM_VRINTX 41
4096 #define NEON_2RM_VRINTA 42
4097 #define NEON_2RM_VRINTZ 43
4098 #define NEON_2RM_VCVT_F16_F32 44
4099 #define NEON_2RM_VRINTM 45
4100 #define NEON_2RM_VCVT_F32_F16 46
4101 #define NEON_2RM_VRINTP 47
4102 #define NEON_2RM_VCVTAU 48
4103 #define NEON_2RM_VCVTAS 49
4104 #define NEON_2RM_VCVTNU 50
4105 #define NEON_2RM_VCVTNS 51
4106 #define NEON_2RM_VCVTPU 52
4107 #define NEON_2RM_VCVTPS 53
4108 #define NEON_2RM_VCVTMU 54
4109 #define NEON_2RM_VCVTMS 55
4110 #define NEON_2RM_VRECPE 56
4111 #define NEON_2RM_VRSQRTE 57
4112 #define NEON_2RM_VRECPE_F 58
4113 #define NEON_2RM_VRSQRTE_F 59
4114 #define NEON_2RM_VCVT_FS 60
4115 #define NEON_2RM_VCVT_FU 61
4116 #define NEON_2RM_VCVT_SF 62
4117 #define NEON_2RM_VCVT_UF 63
4119 static bool neon_2rm_is_v8_op(int op
)
4121 /* Return true if this neon 2reg-misc op is ARMv8 and up */
4123 case NEON_2RM_VRINTN
:
4124 case NEON_2RM_VRINTA
:
4125 case NEON_2RM_VRINTM
:
4126 case NEON_2RM_VRINTP
:
4127 case NEON_2RM_VRINTZ
:
4128 case NEON_2RM_VRINTX
:
4129 case NEON_2RM_VCVTAU
:
4130 case NEON_2RM_VCVTAS
:
4131 case NEON_2RM_VCVTNU
:
4132 case NEON_2RM_VCVTNS
:
4133 case NEON_2RM_VCVTPU
:
4134 case NEON_2RM_VCVTPS
:
4135 case NEON_2RM_VCVTMU
:
4136 case NEON_2RM_VCVTMS
:
4143 /* Each entry in this array has bit n set if the insn allows
4144 * size value n (otherwise it will UNDEF). Since unallocated
4145 * op values will have no bits set they always UNDEF.
4147 static const uint8_t neon_2rm_sizes
[] = {
4148 [NEON_2RM_VREV64
] = 0x7,
4149 [NEON_2RM_VREV32
] = 0x3,
4150 [NEON_2RM_VREV16
] = 0x1,
4151 [NEON_2RM_VPADDL
] = 0x7,
4152 [NEON_2RM_VPADDL_U
] = 0x7,
4153 [NEON_2RM_AESE
] = 0x1,
4154 [NEON_2RM_AESMC
] = 0x1,
4155 [NEON_2RM_VCLS
] = 0x7,
4156 [NEON_2RM_VCLZ
] = 0x7,
4157 [NEON_2RM_VCNT
] = 0x1,
4158 [NEON_2RM_VMVN
] = 0x1,
4159 [NEON_2RM_VPADAL
] = 0x7,
4160 [NEON_2RM_VPADAL_U
] = 0x7,
4161 [NEON_2RM_VQABS
] = 0x7,
4162 [NEON_2RM_VQNEG
] = 0x7,
4163 [NEON_2RM_VCGT0
] = 0x7,
4164 [NEON_2RM_VCGE0
] = 0x7,
4165 [NEON_2RM_VCEQ0
] = 0x7,
4166 [NEON_2RM_VCLE0
] = 0x7,
4167 [NEON_2RM_VCLT0
] = 0x7,
4168 [NEON_2RM_SHA1H
] = 0x4,
4169 [NEON_2RM_VABS
] = 0x7,
4170 [NEON_2RM_VNEG
] = 0x7,
4171 [NEON_2RM_VCGT0_F
] = 0x4,
4172 [NEON_2RM_VCGE0_F
] = 0x4,
4173 [NEON_2RM_VCEQ0_F
] = 0x4,
4174 [NEON_2RM_VCLE0_F
] = 0x4,
4175 [NEON_2RM_VCLT0_F
] = 0x4,
4176 [NEON_2RM_VABS_F
] = 0x4,
4177 [NEON_2RM_VNEG_F
] = 0x4,
4178 [NEON_2RM_VSWP
] = 0x1,
4179 [NEON_2RM_VTRN
] = 0x7,
4180 [NEON_2RM_VUZP
] = 0x7,
4181 [NEON_2RM_VZIP
] = 0x7,
4182 [NEON_2RM_VMOVN
] = 0x7,
4183 [NEON_2RM_VQMOVN
] = 0x7,
4184 [NEON_2RM_VSHLL
] = 0x7,
4185 [NEON_2RM_SHA1SU1
] = 0x4,
4186 [NEON_2RM_VRINTN
] = 0x4,
4187 [NEON_2RM_VRINTX
] = 0x4,
4188 [NEON_2RM_VRINTA
] = 0x4,
4189 [NEON_2RM_VRINTZ
] = 0x4,
4190 [NEON_2RM_VCVT_F16_F32
] = 0x2,
4191 [NEON_2RM_VRINTM
] = 0x4,
4192 [NEON_2RM_VCVT_F32_F16
] = 0x2,
4193 [NEON_2RM_VRINTP
] = 0x4,
4194 [NEON_2RM_VCVTAU
] = 0x4,
4195 [NEON_2RM_VCVTAS
] = 0x4,
4196 [NEON_2RM_VCVTNU
] = 0x4,
4197 [NEON_2RM_VCVTNS
] = 0x4,
4198 [NEON_2RM_VCVTPU
] = 0x4,
4199 [NEON_2RM_VCVTPS
] = 0x4,
4200 [NEON_2RM_VCVTMU
] = 0x4,
4201 [NEON_2RM_VCVTMS
] = 0x4,
4202 [NEON_2RM_VRECPE
] = 0x4,
4203 [NEON_2RM_VRSQRTE
] = 0x4,
4204 [NEON_2RM_VRECPE_F
] = 0x4,
4205 [NEON_2RM_VRSQRTE_F
] = 0x4,
4206 [NEON_2RM_VCVT_FS
] = 0x4,
4207 [NEON_2RM_VCVT_FU
] = 0x4,
4208 [NEON_2RM_VCVT_SF
] = 0x4,
4209 [NEON_2RM_VCVT_UF
] = 0x4,
4213 /* Expand v8.1 simd helper. */
4214 static int do_v81_helper(DisasContext
*s
, gen_helper_gvec_3_ptr
*fn
,
4215 int q
, int rd
, int rn
, int rm
)
4217 if (dc_isar_feature(aa32_rdm
, s
)) {
4218 int opr_sz
= (1 + q
) * 8;
4219 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd
),
4220 vfp_reg_offset(1, rn
),
4221 vfp_reg_offset(1, rm
), cpu_env
,
4222 opr_sz
, opr_sz
, 0, fn
);
4228 static void gen_ssra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4230 tcg_gen_vec_sar8i_i64(a
, a
, shift
);
4231 tcg_gen_vec_add8_i64(d
, d
, a
);
4234 static void gen_ssra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4236 tcg_gen_vec_sar16i_i64(a
, a
, shift
);
4237 tcg_gen_vec_add16_i64(d
, d
, a
);
4240 static void gen_ssra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
4242 tcg_gen_sari_i32(a
, a
, shift
);
4243 tcg_gen_add_i32(d
, d
, a
);
4246 static void gen_ssra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4248 tcg_gen_sari_i64(a
, a
, shift
);
4249 tcg_gen_add_i64(d
, d
, a
);
4252 static void gen_ssra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
4254 tcg_gen_sari_vec(vece
, a
, a
, sh
);
4255 tcg_gen_add_vec(vece
, d
, d
, a
);
4258 static const TCGOpcode vecop_list_ssra
[] = {
4259 INDEX_op_sari_vec
, INDEX_op_add_vec
, 0
4262 const GVecGen2i ssra_op
[4] = {
4263 { .fni8
= gen_ssra8_i64
,
4264 .fniv
= gen_ssra_vec
,
4266 .opt_opc
= vecop_list_ssra
,
4268 { .fni8
= gen_ssra16_i64
,
4269 .fniv
= gen_ssra_vec
,
4271 .opt_opc
= vecop_list_ssra
,
4273 { .fni4
= gen_ssra32_i32
,
4274 .fniv
= gen_ssra_vec
,
4276 .opt_opc
= vecop_list_ssra
,
4278 { .fni8
= gen_ssra64_i64
,
4279 .fniv
= gen_ssra_vec
,
4280 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4281 .opt_opc
= vecop_list_ssra
,
4286 static void gen_usra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4288 tcg_gen_vec_shr8i_i64(a
, a
, shift
);
4289 tcg_gen_vec_add8_i64(d
, d
, a
);
4292 static void gen_usra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4294 tcg_gen_vec_shr16i_i64(a
, a
, shift
);
4295 tcg_gen_vec_add16_i64(d
, d
, a
);
4298 static void gen_usra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
4300 tcg_gen_shri_i32(a
, a
, shift
);
4301 tcg_gen_add_i32(d
, d
, a
);
4304 static void gen_usra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4306 tcg_gen_shri_i64(a
, a
, shift
);
4307 tcg_gen_add_i64(d
, d
, a
);
4310 static void gen_usra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
4312 tcg_gen_shri_vec(vece
, a
, a
, sh
);
4313 tcg_gen_add_vec(vece
, d
, d
, a
);
4316 static const TCGOpcode vecop_list_usra
[] = {
4317 INDEX_op_shri_vec
, INDEX_op_add_vec
, 0
4320 const GVecGen2i usra_op
[4] = {
4321 { .fni8
= gen_usra8_i64
,
4322 .fniv
= gen_usra_vec
,
4324 .opt_opc
= vecop_list_usra
,
4326 { .fni8
= gen_usra16_i64
,
4327 .fniv
= gen_usra_vec
,
4329 .opt_opc
= vecop_list_usra
,
4331 { .fni4
= gen_usra32_i32
,
4332 .fniv
= gen_usra_vec
,
4334 .opt_opc
= vecop_list_usra
,
4336 { .fni8
= gen_usra64_i64
,
4337 .fniv
= gen_usra_vec
,
4338 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4340 .opt_opc
= vecop_list_usra
,
4344 static void gen_shr8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4346 uint64_t mask
= dup_const(MO_8
, 0xff >> shift
);
4347 TCGv_i64 t
= tcg_temp_new_i64();
4349 tcg_gen_shri_i64(t
, a
, shift
);
4350 tcg_gen_andi_i64(t
, t
, mask
);
4351 tcg_gen_andi_i64(d
, d
, ~mask
);
4352 tcg_gen_or_i64(d
, d
, t
);
4353 tcg_temp_free_i64(t
);
4356 static void gen_shr16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4358 uint64_t mask
= dup_const(MO_16
, 0xffff >> shift
);
4359 TCGv_i64 t
= tcg_temp_new_i64();
4361 tcg_gen_shri_i64(t
, a
, shift
);
4362 tcg_gen_andi_i64(t
, t
, mask
);
4363 tcg_gen_andi_i64(d
, d
, ~mask
);
4364 tcg_gen_or_i64(d
, d
, t
);
4365 tcg_temp_free_i64(t
);
4368 static void gen_shr32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
4370 tcg_gen_shri_i32(a
, a
, shift
);
4371 tcg_gen_deposit_i32(d
, d
, a
, 0, 32 - shift
);
4374 static void gen_shr64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4376 tcg_gen_shri_i64(a
, a
, shift
);
4377 tcg_gen_deposit_i64(d
, d
, a
, 0, 64 - shift
);
4380 static void gen_shr_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
4383 tcg_gen_mov_vec(d
, a
);
4385 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
4386 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
4388 tcg_gen_dupi_vec(vece
, m
, MAKE_64BIT_MASK((8 << vece
) - sh
, sh
));
4389 tcg_gen_shri_vec(vece
, t
, a
, sh
);
4390 tcg_gen_and_vec(vece
, d
, d
, m
);
4391 tcg_gen_or_vec(vece
, d
, d
, t
);
4393 tcg_temp_free_vec(t
);
4394 tcg_temp_free_vec(m
);
4398 static const TCGOpcode vecop_list_sri
[] = { INDEX_op_shri_vec
, 0 };
4400 const GVecGen2i sri_op
[4] = {
4401 { .fni8
= gen_shr8_ins_i64
,
4402 .fniv
= gen_shr_ins_vec
,
4404 .opt_opc
= vecop_list_sri
,
4406 { .fni8
= gen_shr16_ins_i64
,
4407 .fniv
= gen_shr_ins_vec
,
4409 .opt_opc
= vecop_list_sri
,
4411 { .fni4
= gen_shr32_ins_i32
,
4412 .fniv
= gen_shr_ins_vec
,
4414 .opt_opc
= vecop_list_sri
,
4416 { .fni8
= gen_shr64_ins_i64
,
4417 .fniv
= gen_shr_ins_vec
,
4418 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4420 .opt_opc
= vecop_list_sri
,
4424 static void gen_shl8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4426 uint64_t mask
= dup_const(MO_8
, 0xff << shift
);
4427 TCGv_i64 t
= tcg_temp_new_i64();
4429 tcg_gen_shli_i64(t
, a
, shift
);
4430 tcg_gen_andi_i64(t
, t
, mask
);
4431 tcg_gen_andi_i64(d
, d
, ~mask
);
4432 tcg_gen_or_i64(d
, d
, t
);
4433 tcg_temp_free_i64(t
);
4436 static void gen_shl16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4438 uint64_t mask
= dup_const(MO_16
, 0xffff << shift
);
4439 TCGv_i64 t
= tcg_temp_new_i64();
4441 tcg_gen_shli_i64(t
, a
, shift
);
4442 tcg_gen_andi_i64(t
, t
, mask
);
4443 tcg_gen_andi_i64(d
, d
, ~mask
);
4444 tcg_gen_or_i64(d
, d
, t
);
4445 tcg_temp_free_i64(t
);
4448 static void gen_shl32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
4450 tcg_gen_deposit_i32(d
, d
, a
, shift
, 32 - shift
);
4453 static void gen_shl64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4455 tcg_gen_deposit_i64(d
, d
, a
, shift
, 64 - shift
);
4458 static void gen_shl_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
4461 tcg_gen_mov_vec(d
, a
);
4463 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
4464 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
4466 tcg_gen_dupi_vec(vece
, m
, MAKE_64BIT_MASK(0, sh
));
4467 tcg_gen_shli_vec(vece
, t
, a
, sh
);
4468 tcg_gen_and_vec(vece
, d
, d
, m
);
4469 tcg_gen_or_vec(vece
, d
, d
, t
);
4471 tcg_temp_free_vec(t
);
4472 tcg_temp_free_vec(m
);
4476 static const TCGOpcode vecop_list_sli
[] = { INDEX_op_shli_vec
, 0 };
4478 const GVecGen2i sli_op
[4] = {
4479 { .fni8
= gen_shl8_ins_i64
,
4480 .fniv
= gen_shl_ins_vec
,
4482 .opt_opc
= vecop_list_sli
,
4484 { .fni8
= gen_shl16_ins_i64
,
4485 .fniv
= gen_shl_ins_vec
,
4487 .opt_opc
= vecop_list_sli
,
4489 { .fni4
= gen_shl32_ins_i32
,
4490 .fniv
= gen_shl_ins_vec
,
4492 .opt_opc
= vecop_list_sli
,
4494 { .fni8
= gen_shl64_ins_i64
,
4495 .fniv
= gen_shl_ins_vec
,
4496 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4498 .opt_opc
= vecop_list_sli
,
4502 static void gen_mla8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4504 gen_helper_neon_mul_u8(a
, a
, b
);
4505 gen_helper_neon_add_u8(d
, d
, a
);
4508 static void gen_mls8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4510 gen_helper_neon_mul_u8(a
, a
, b
);
4511 gen_helper_neon_sub_u8(d
, d
, a
);
4514 static void gen_mla16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4516 gen_helper_neon_mul_u16(a
, a
, b
);
4517 gen_helper_neon_add_u16(d
, d
, a
);
4520 static void gen_mls16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4522 gen_helper_neon_mul_u16(a
, a
, b
);
4523 gen_helper_neon_sub_u16(d
, d
, a
);
4526 static void gen_mla32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4528 tcg_gen_mul_i32(a
, a
, b
);
4529 tcg_gen_add_i32(d
, d
, a
);
4532 static void gen_mls32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4534 tcg_gen_mul_i32(a
, a
, b
);
4535 tcg_gen_sub_i32(d
, d
, a
);
4538 static void gen_mla64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4540 tcg_gen_mul_i64(a
, a
, b
);
4541 tcg_gen_add_i64(d
, d
, a
);
4544 static void gen_mls64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4546 tcg_gen_mul_i64(a
, a
, b
);
4547 tcg_gen_sub_i64(d
, d
, a
);
4550 static void gen_mla_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4552 tcg_gen_mul_vec(vece
, a
, a
, b
);
4553 tcg_gen_add_vec(vece
, d
, d
, a
);
4556 static void gen_mls_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4558 tcg_gen_mul_vec(vece
, a
, a
, b
);
4559 tcg_gen_sub_vec(vece
, d
, d
, a
);
4562 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4563 * these tables are shared with AArch64 which does support them.
4566 static const TCGOpcode vecop_list_mla
[] = {
4567 INDEX_op_mul_vec
, INDEX_op_add_vec
, 0
4570 static const TCGOpcode vecop_list_mls
[] = {
4571 INDEX_op_mul_vec
, INDEX_op_sub_vec
, 0
4574 const GVecGen3 mla_op
[4] = {
4575 { .fni4
= gen_mla8_i32
,
4576 .fniv
= gen_mla_vec
,
4578 .opt_opc
= vecop_list_mla
,
4580 { .fni4
= gen_mla16_i32
,
4581 .fniv
= gen_mla_vec
,
4583 .opt_opc
= vecop_list_mla
,
4585 { .fni4
= gen_mla32_i32
,
4586 .fniv
= gen_mla_vec
,
4588 .opt_opc
= vecop_list_mla
,
4590 { .fni8
= gen_mla64_i64
,
4591 .fniv
= gen_mla_vec
,
4592 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4594 .opt_opc
= vecop_list_mla
,
4598 const GVecGen3 mls_op
[4] = {
4599 { .fni4
= gen_mls8_i32
,
4600 .fniv
= gen_mls_vec
,
4602 .opt_opc
= vecop_list_mls
,
4604 { .fni4
= gen_mls16_i32
,
4605 .fniv
= gen_mls_vec
,
4607 .opt_opc
= vecop_list_mls
,
4609 { .fni4
= gen_mls32_i32
,
4610 .fniv
= gen_mls_vec
,
4612 .opt_opc
= vecop_list_mls
,
4614 { .fni8
= gen_mls64_i64
,
4615 .fniv
= gen_mls_vec
,
4616 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4618 .opt_opc
= vecop_list_mls
,
4622 /* CMTST : test is "if (X & Y != 0)". */
4623 static void gen_cmtst_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4625 tcg_gen_and_i32(d
, a
, b
);
4626 tcg_gen_setcondi_i32(TCG_COND_NE
, d
, d
, 0);
4627 tcg_gen_neg_i32(d
, d
);
4630 void gen_cmtst_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4632 tcg_gen_and_i64(d
, a
, b
);
4633 tcg_gen_setcondi_i64(TCG_COND_NE
, d
, d
, 0);
4634 tcg_gen_neg_i64(d
, d
);
4637 static void gen_cmtst_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4639 tcg_gen_and_vec(vece
, d
, a
, b
);
4640 tcg_gen_dupi_vec(vece
, a
, 0);
4641 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, d
, d
, a
);
4644 static const TCGOpcode vecop_list_cmtst
[] = { INDEX_op_cmp_vec
, 0 };
4646 const GVecGen3 cmtst_op
[4] = {
4647 { .fni4
= gen_helper_neon_tst_u8
,
4648 .fniv
= gen_cmtst_vec
,
4649 .opt_opc
= vecop_list_cmtst
,
4651 { .fni4
= gen_helper_neon_tst_u16
,
4652 .fniv
= gen_cmtst_vec
,
4653 .opt_opc
= vecop_list_cmtst
,
4655 { .fni4
= gen_cmtst_i32
,
4656 .fniv
= gen_cmtst_vec
,
4657 .opt_opc
= vecop_list_cmtst
,
4659 { .fni8
= gen_cmtst_i64
,
4660 .fniv
= gen_cmtst_vec
,
4661 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4662 .opt_opc
= vecop_list_cmtst
,
4666 static void gen_uqadd_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4667 TCGv_vec a
, TCGv_vec b
)
4669 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4670 tcg_gen_add_vec(vece
, x
, a
, b
);
4671 tcg_gen_usadd_vec(vece
, t
, a
, b
);
4672 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4673 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4674 tcg_temp_free_vec(x
);
4677 static const TCGOpcode vecop_list_uqadd
[] = {
4678 INDEX_op_usadd_vec
, INDEX_op_cmp_vec
, INDEX_op_add_vec
, 0
4681 const GVecGen4 uqadd_op
[4] = {
4682 { .fniv
= gen_uqadd_vec
,
4683 .fno
= gen_helper_gvec_uqadd_b
,
4685 .opt_opc
= vecop_list_uqadd
,
4687 { .fniv
= gen_uqadd_vec
,
4688 .fno
= gen_helper_gvec_uqadd_h
,
4690 .opt_opc
= vecop_list_uqadd
,
4692 { .fniv
= gen_uqadd_vec
,
4693 .fno
= gen_helper_gvec_uqadd_s
,
4695 .opt_opc
= vecop_list_uqadd
,
4697 { .fniv
= gen_uqadd_vec
,
4698 .fno
= gen_helper_gvec_uqadd_d
,
4700 .opt_opc
= vecop_list_uqadd
,
4704 static void gen_sqadd_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4705 TCGv_vec a
, TCGv_vec b
)
4707 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4708 tcg_gen_add_vec(vece
, x
, a
, b
);
4709 tcg_gen_ssadd_vec(vece
, t
, a
, b
);
4710 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4711 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4712 tcg_temp_free_vec(x
);
4715 static const TCGOpcode vecop_list_sqadd
[] = {
4716 INDEX_op_ssadd_vec
, INDEX_op_cmp_vec
, INDEX_op_add_vec
, 0
4719 const GVecGen4 sqadd_op
[4] = {
4720 { .fniv
= gen_sqadd_vec
,
4721 .fno
= gen_helper_gvec_sqadd_b
,
4722 .opt_opc
= vecop_list_sqadd
,
4725 { .fniv
= gen_sqadd_vec
,
4726 .fno
= gen_helper_gvec_sqadd_h
,
4727 .opt_opc
= vecop_list_sqadd
,
4730 { .fniv
= gen_sqadd_vec
,
4731 .fno
= gen_helper_gvec_sqadd_s
,
4732 .opt_opc
= vecop_list_sqadd
,
4735 { .fniv
= gen_sqadd_vec
,
4736 .fno
= gen_helper_gvec_sqadd_d
,
4737 .opt_opc
= vecop_list_sqadd
,
4742 static void gen_uqsub_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4743 TCGv_vec a
, TCGv_vec b
)
4745 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4746 tcg_gen_sub_vec(vece
, x
, a
, b
);
4747 tcg_gen_ussub_vec(vece
, t
, a
, b
);
4748 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4749 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4750 tcg_temp_free_vec(x
);
4753 static const TCGOpcode vecop_list_uqsub
[] = {
4754 INDEX_op_ussub_vec
, INDEX_op_cmp_vec
, INDEX_op_sub_vec
, 0
4757 const GVecGen4 uqsub_op
[4] = {
4758 { .fniv
= gen_uqsub_vec
,
4759 .fno
= gen_helper_gvec_uqsub_b
,
4760 .opt_opc
= vecop_list_uqsub
,
4763 { .fniv
= gen_uqsub_vec
,
4764 .fno
= gen_helper_gvec_uqsub_h
,
4765 .opt_opc
= vecop_list_uqsub
,
4768 { .fniv
= gen_uqsub_vec
,
4769 .fno
= gen_helper_gvec_uqsub_s
,
4770 .opt_opc
= vecop_list_uqsub
,
4773 { .fniv
= gen_uqsub_vec
,
4774 .fno
= gen_helper_gvec_uqsub_d
,
4775 .opt_opc
= vecop_list_uqsub
,
4780 static void gen_sqsub_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4781 TCGv_vec a
, TCGv_vec b
)
4783 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4784 tcg_gen_sub_vec(vece
, x
, a
, b
);
4785 tcg_gen_sssub_vec(vece
, t
, a
, b
);
4786 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4787 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4788 tcg_temp_free_vec(x
);
4791 static const TCGOpcode vecop_list_sqsub
[] = {
4792 INDEX_op_sssub_vec
, INDEX_op_cmp_vec
, INDEX_op_sub_vec
, 0
4795 const GVecGen4 sqsub_op
[4] = {
4796 { .fniv
= gen_sqsub_vec
,
4797 .fno
= gen_helper_gvec_sqsub_b
,
4798 .opt_opc
= vecop_list_sqsub
,
4801 { .fniv
= gen_sqsub_vec
,
4802 .fno
= gen_helper_gvec_sqsub_h
,
4803 .opt_opc
= vecop_list_sqsub
,
4806 { .fniv
= gen_sqsub_vec
,
4807 .fno
= gen_helper_gvec_sqsub_s
,
4808 .opt_opc
= vecop_list_sqsub
,
4811 { .fniv
= gen_sqsub_vec
,
4812 .fno
= gen_helper_gvec_sqsub_d
,
4813 .opt_opc
= vecop_list_sqsub
,
4818 /* Translate a NEON data processing instruction. Return nonzero if the
4819 instruction is invalid.
4820 We process data in a mixture of 32-bit and 64-bit chunks.
4821 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4823 static int disas_neon_data_insn(DisasContext
*s
, uint32_t insn
)
4827 int rd
, rn
, rm
, rd_ofs
, rn_ofs
, rm_ofs
;
4836 TCGv_i32 tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
4837 TCGv_ptr ptr1
, ptr2
, ptr3
;
4840 /* FIXME: this access check should not take precedence over UNDEF
4841 * for invalid encodings; we will generate incorrect syndrome information
4842 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4844 if (s
->fp_excp_el
) {
4845 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
4846 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
4850 if (!s
->vfp_enabled
)
4852 q
= (insn
& (1 << 6)) != 0;
4853 u
= (insn
>> 24) & 1;
4854 VFP_DREG_D(rd
, insn
);
4855 VFP_DREG_N(rn
, insn
);
4856 VFP_DREG_M(rm
, insn
);
4857 size
= (insn
>> 20) & 3;
4858 vec_size
= q
? 16 : 8;
4859 rd_ofs
= neon_reg_offset(rd
, 0);
4860 rn_ofs
= neon_reg_offset(rn
, 0);
4861 rm_ofs
= neon_reg_offset(rm
, 0);
4863 if ((insn
& (1 << 23)) == 0) {
4864 /* Three register same length. */
4865 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
4866 /* Catch invalid op and bad size combinations: UNDEF */
4867 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
4870 /* All insns of this form UNDEF for either this condition or the
4871 * superset of cases "Q==1"; we catch the latter later.
4873 if (q
&& ((rd
| rn
| rm
) & 1)) {
4878 /* The SHA-1/SHA-256 3-register instructions require special
4879 * treatment here, as their size field is overloaded as an
4880 * op type selector, and they all consume their input in a
4886 if (!u
) { /* SHA-1 */
4887 if (!dc_isar_feature(aa32_sha1
, s
)) {
4890 ptr1
= vfp_reg_ptr(true, rd
);
4891 ptr2
= vfp_reg_ptr(true, rn
);
4892 ptr3
= vfp_reg_ptr(true, rm
);
4893 tmp4
= tcg_const_i32(size
);
4894 gen_helper_crypto_sha1_3reg(ptr1
, ptr2
, ptr3
, tmp4
);
4895 tcg_temp_free_i32(tmp4
);
4896 } else { /* SHA-256 */
4897 if (!dc_isar_feature(aa32_sha2
, s
) || size
== 3) {
4900 ptr1
= vfp_reg_ptr(true, rd
);
4901 ptr2
= vfp_reg_ptr(true, rn
);
4902 ptr3
= vfp_reg_ptr(true, rm
);
4905 gen_helper_crypto_sha256h(ptr1
, ptr2
, ptr3
);
4908 gen_helper_crypto_sha256h2(ptr1
, ptr2
, ptr3
);
4911 gen_helper_crypto_sha256su1(ptr1
, ptr2
, ptr3
);
4915 tcg_temp_free_ptr(ptr1
);
4916 tcg_temp_free_ptr(ptr2
);
4917 tcg_temp_free_ptr(ptr3
);
4920 case NEON_3R_VPADD_VQRDMLAH
:
4927 return do_v81_helper(s
, gen_helper_gvec_qrdmlah_s16
,
4930 return do_v81_helper(s
, gen_helper_gvec_qrdmlah_s32
,
4935 case NEON_3R_VFM_VQRDMLSH
:
4946 return do_v81_helper(s
, gen_helper_gvec_qrdmlsh_s16
,
4949 return do_v81_helper(s
, gen_helper_gvec_qrdmlsh_s32
,
4954 case NEON_3R_LOGIC
: /* Logic ops. */
4955 switch ((u
<< 2) | size
) {
4957 tcg_gen_gvec_and(0, rd_ofs
, rn_ofs
, rm_ofs
,
4958 vec_size
, vec_size
);
4961 tcg_gen_gvec_andc(0, rd_ofs
, rn_ofs
, rm_ofs
,
4962 vec_size
, vec_size
);
4965 tcg_gen_gvec_or(0, rd_ofs
, rn_ofs
, rm_ofs
,
4966 vec_size
, vec_size
);
4969 tcg_gen_gvec_orc(0, rd_ofs
, rn_ofs
, rm_ofs
,
4970 vec_size
, vec_size
);
4973 tcg_gen_gvec_xor(0, rd_ofs
, rn_ofs
, rm_ofs
,
4974 vec_size
, vec_size
);
4977 tcg_gen_gvec_bitsel(MO_8
, rd_ofs
, rd_ofs
, rn_ofs
, rm_ofs
,
4978 vec_size
, vec_size
);
4981 tcg_gen_gvec_bitsel(MO_8
, rd_ofs
, rm_ofs
, rn_ofs
, rd_ofs
,
4982 vec_size
, vec_size
);
4985 tcg_gen_gvec_bitsel(MO_8
, rd_ofs
, rm_ofs
, rd_ofs
, rn_ofs
,
4986 vec_size
, vec_size
);
4991 case NEON_3R_VADD_VSUB
:
4993 tcg_gen_gvec_sub(size
, rd_ofs
, rn_ofs
, rm_ofs
,
4994 vec_size
, vec_size
);
4996 tcg_gen_gvec_add(size
, rd_ofs
, rn_ofs
, rm_ofs
,
4997 vec_size
, vec_size
);
5002 tcg_gen_gvec_4(rd_ofs
, offsetof(CPUARMState
, vfp
.qc
),
5003 rn_ofs
, rm_ofs
, vec_size
, vec_size
,
5004 (u
? uqadd_op
: sqadd_op
) + size
);
5008 tcg_gen_gvec_4(rd_ofs
, offsetof(CPUARMState
, vfp
.qc
),
5009 rn_ofs
, rm_ofs
, vec_size
, vec_size
,
5010 (u
? uqsub_op
: sqsub_op
) + size
);
5013 case NEON_3R_VMUL
: /* VMUL */
5015 /* Polynomial case allows only P8 and is handled below. */
5020 tcg_gen_gvec_mul(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5021 vec_size
, vec_size
);
5026 case NEON_3R_VML
: /* VMLA, VMLS */
5027 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
,
5028 u
? &mls_op
[size
] : &mla_op
[size
]);
5031 case NEON_3R_VTST_VCEQ
:
5033 tcg_gen_gvec_cmp(TCG_COND_EQ
, size
, rd_ofs
, rn_ofs
, rm_ofs
,
5034 vec_size
, vec_size
);
5036 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
,
5037 vec_size
, vec_size
, &cmtst_op
[size
]);
5042 tcg_gen_gvec_cmp(u
? TCG_COND_GTU
: TCG_COND_GT
, size
,
5043 rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
);
5047 tcg_gen_gvec_cmp(u
? TCG_COND_GEU
: TCG_COND_GE
, size
,
5048 rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
);
5053 tcg_gen_gvec_umax(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5054 vec_size
, vec_size
);
5056 tcg_gen_gvec_smax(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5057 vec_size
, vec_size
);
5062 tcg_gen_gvec_umin(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5063 vec_size
, vec_size
);
5065 tcg_gen_gvec_smin(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5066 vec_size
, vec_size
);
5072 /* 64-bit element instructions. */
5073 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5074 neon_load_reg64(cpu_V0
, rn
+ pass
);
5075 neon_load_reg64(cpu_V1
, rm
+ pass
);
5079 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5081 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5086 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5089 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5095 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5097 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5100 case NEON_3R_VQRSHL
:
5102 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
5105 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
5112 neon_store_reg64(cpu_V0
, rd
+ pass
);
5121 case NEON_3R_VQRSHL
:
5124 /* Shift instruction operands are reversed. */
5130 case NEON_3R_VPADD_VQRDMLAH
:
5135 case NEON_3R_FLOAT_ARITH
:
5136 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
5138 case NEON_3R_FLOAT_MINMAX
:
5139 pairwise
= u
; /* if VPMIN/VPMAX (float) */
5141 case NEON_3R_FLOAT_CMP
:
5143 /* no encoding for U=0 C=1x */
5147 case NEON_3R_FLOAT_ACMP
:
5152 case NEON_3R_FLOAT_MISC
:
5153 /* VMAXNM/VMINNM in ARMv8 */
5154 if (u
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
5158 case NEON_3R_VFM_VQRDMLSH
:
5159 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
)) {
5167 if (pairwise
&& q
) {
5168 /* All the pairwise insns UNDEF if Q is set */
5172 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5177 tmp
= neon_load_reg(rn
, 0);
5178 tmp2
= neon_load_reg(rn
, 1);
5180 tmp
= neon_load_reg(rm
, 0);
5181 tmp2
= neon_load_reg(rm
, 1);
5185 tmp
= neon_load_reg(rn
, pass
);
5186 tmp2
= neon_load_reg(rm
, pass
);
5190 GEN_NEON_INTEGER_OP(hadd
);
5192 case NEON_3R_VRHADD
:
5193 GEN_NEON_INTEGER_OP(rhadd
);
5196 GEN_NEON_INTEGER_OP(hsub
);
5199 GEN_NEON_INTEGER_OP(shl
);
5202 GEN_NEON_INTEGER_OP_ENV(qshl
);
5205 GEN_NEON_INTEGER_OP(rshl
);
5207 case NEON_3R_VQRSHL
:
5208 GEN_NEON_INTEGER_OP_ENV(qrshl
);
5211 GEN_NEON_INTEGER_OP(abd
);
5214 GEN_NEON_INTEGER_OP(abd
);
5215 tcg_temp_free_i32(tmp2
);
5216 tmp2
= neon_load_reg(rd
, pass
);
5217 gen_neon_add(size
, tmp
, tmp2
);
5220 /* VMUL.P8; other cases already eliminated. */
5221 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
5224 GEN_NEON_INTEGER_OP(pmax
);
5227 GEN_NEON_INTEGER_OP(pmin
);
5229 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
5230 if (!u
) { /* VQDMULH */
5233 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5236 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5240 } else { /* VQRDMULH */
5243 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5246 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5252 case NEON_3R_VPADD_VQRDMLAH
:
5254 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
5255 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
5256 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
5260 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
5262 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5263 switch ((u
<< 2) | size
) {
5266 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5269 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
5272 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
5277 tcg_temp_free_ptr(fpstatus
);
5280 case NEON_3R_FLOAT_MULTIPLY
:
5282 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5283 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5285 tcg_temp_free_i32(tmp2
);
5286 tmp2
= neon_load_reg(rd
, pass
);
5288 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5290 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5293 tcg_temp_free_ptr(fpstatus
);
5296 case NEON_3R_FLOAT_CMP
:
5298 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5300 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
5303 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5305 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5308 tcg_temp_free_ptr(fpstatus
);
5311 case NEON_3R_FLOAT_ACMP
:
5313 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5315 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5317 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5319 tcg_temp_free_ptr(fpstatus
);
5322 case NEON_3R_FLOAT_MINMAX
:
5324 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5326 gen_helper_vfp_maxs(tmp
, tmp
, tmp2
, fpstatus
);
5328 gen_helper_vfp_mins(tmp
, tmp
, tmp2
, fpstatus
);
5330 tcg_temp_free_ptr(fpstatus
);
5333 case NEON_3R_FLOAT_MISC
:
5336 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5338 gen_helper_vfp_maxnums(tmp
, tmp
, tmp2
, fpstatus
);
5340 gen_helper_vfp_minnums(tmp
, tmp
, tmp2
, fpstatus
);
5342 tcg_temp_free_ptr(fpstatus
);
5345 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
5347 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
5351 case NEON_3R_VFM_VQRDMLSH
:
5353 /* VFMA, VFMS: fused multiply-add */
5354 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5355 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
5358 gen_helper_vfp_negs(tmp
, tmp
);
5360 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
5361 tcg_temp_free_i32(tmp3
);
5362 tcg_temp_free_ptr(fpstatus
);
5368 tcg_temp_free_i32(tmp2
);
5370 /* Save the result. For elementwise operations we can put it
5371 straight into the destination register. For pairwise operations
5372 we have to be careful to avoid clobbering the source operands. */
5373 if (pairwise
&& rd
== rm
) {
5374 neon_store_scratch(pass
, tmp
);
5376 neon_store_reg(rd
, pass
, tmp
);
5380 if (pairwise
&& rd
== rm
) {
5381 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5382 tmp
= neon_load_scratch(pass
);
5383 neon_store_reg(rd
, pass
, tmp
);
5386 /* End of 3 register same size operations. */
5387 } else if (insn
& (1 << 4)) {
5388 if ((insn
& 0x00380080) != 0) {
5389 /* Two registers and shift. */
5390 op
= (insn
>> 8) & 0xf;
5391 if (insn
& (1 << 7)) {
5399 while ((insn
& (1 << (size
+ 19))) == 0)
5402 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
5404 /* Shift by immediate:
5405 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5406 if (q
&& ((rd
| rm
) & 1)) {
5409 if (!u
&& (op
== 4 || op
== 6)) {
5412 /* Right shifts are encoded as N - shift, where N is the
5413 element size in bits. */
5415 shift
= shift
- (1 << (size
+ 3));
5420 /* Right shift comes here negative. */
5422 /* Shifts larger than the element size are architecturally
5423 * valid. Unsigned results in all zeros; signed results
5427 tcg_gen_gvec_sari(size
, rd_ofs
, rm_ofs
,
5428 MIN(shift
, (8 << size
) - 1),
5429 vec_size
, vec_size
);
5430 } else if (shift
>= 8 << size
) {
5431 tcg_gen_gvec_dup8i(rd_ofs
, vec_size
, vec_size
, 0);
5433 tcg_gen_gvec_shri(size
, rd_ofs
, rm_ofs
, shift
,
5434 vec_size
, vec_size
);
5439 /* Right shift comes here negative. */
5441 /* Shifts larger than the element size are architecturally
5442 * valid. Unsigned results in all zeros; signed results
5446 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
5447 MIN(shift
, (8 << size
) - 1),
5449 } else if (shift
>= 8 << size
) {
5452 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
5453 shift
, &usra_op
[size
]);
5461 /* Right shift comes here negative. */
5463 /* Shift out of range leaves destination unchanged. */
5464 if (shift
< 8 << size
) {
5465 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
5466 shift
, &sri_op
[size
]);
5470 case 5: /* VSHL, VSLI */
5472 /* Shift out of range leaves destination unchanged. */
5473 if (shift
< 8 << size
) {
5474 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
,
5475 vec_size
, shift
, &sli_op
[size
]);
5478 /* Shifts larger than the element size are
5479 * architecturally valid and results in zero.
5481 if (shift
>= 8 << size
) {
5482 tcg_gen_gvec_dup8i(rd_ofs
, vec_size
, vec_size
, 0);
5484 tcg_gen_gvec_shli(size
, rd_ofs
, rm_ofs
, shift
,
5485 vec_size
, vec_size
);
5497 /* To avoid excessive duplication of ops we implement shift
5498 * by immediate using the variable shift operations.
5500 imm
= dup_const(size
, shift
);
5502 for (pass
= 0; pass
< count
; pass
++) {
5504 neon_load_reg64(cpu_V0
, rm
+ pass
);
5505 tcg_gen_movi_i64(cpu_V1
, imm
);
5510 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5512 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5514 case 6: /* VQSHLU */
5515 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
5520 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5523 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5528 g_assert_not_reached();
5532 neon_load_reg64(cpu_V1
, rd
+ pass
);
5533 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5535 neon_store_reg64(cpu_V0
, rd
+ pass
);
5536 } else { /* size < 3 */
5537 /* Operands in T0 and T1. */
5538 tmp
= neon_load_reg(rm
, pass
);
5539 tmp2
= tcg_temp_new_i32();
5540 tcg_gen_movi_i32(tmp2
, imm
);
5544 GEN_NEON_INTEGER_OP(rshl
);
5546 case 6: /* VQSHLU */
5549 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5553 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5557 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5565 GEN_NEON_INTEGER_OP_ENV(qshl
);
5568 g_assert_not_reached();
5570 tcg_temp_free_i32(tmp2
);
5574 tmp2
= neon_load_reg(rd
, pass
);
5575 gen_neon_add(size
, tmp
, tmp2
);
5576 tcg_temp_free_i32(tmp2
);
5578 neon_store_reg(rd
, pass
, tmp
);
5581 } else if (op
< 10) {
5582 /* Shift by immediate and narrow:
5583 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5584 int input_unsigned
= (op
== 8) ? !u
: u
;
5588 shift
= shift
- (1 << (size
+ 3));
5591 tmp64
= tcg_const_i64(shift
);
5592 neon_load_reg64(cpu_V0
, rm
);
5593 neon_load_reg64(cpu_V1
, rm
+ 1);
5594 for (pass
= 0; pass
< 2; pass
++) {
5602 if (input_unsigned
) {
5603 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5605 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5608 if (input_unsigned
) {
5609 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5611 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5614 tmp
= tcg_temp_new_i32();
5615 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5616 neon_store_reg(rd
, pass
, tmp
);
5618 tcg_temp_free_i64(tmp64
);
5621 imm
= (uint16_t)shift
;
5625 imm
= (uint32_t)shift
;
5627 tmp2
= tcg_const_i32(imm
);
5628 tmp4
= neon_load_reg(rm
+ 1, 0);
5629 tmp5
= neon_load_reg(rm
+ 1, 1);
5630 for (pass
= 0; pass
< 2; pass
++) {
5632 tmp
= neon_load_reg(rm
, 0);
5636 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5639 tmp3
= neon_load_reg(rm
, 1);
5643 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5645 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5646 tcg_temp_free_i32(tmp
);
5647 tcg_temp_free_i32(tmp3
);
5648 tmp
= tcg_temp_new_i32();
5649 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5650 neon_store_reg(rd
, pass
, tmp
);
5652 tcg_temp_free_i32(tmp2
);
5654 } else if (op
== 10) {
5656 if (q
|| (rd
& 1)) {
5659 tmp
= neon_load_reg(rm
, 0);
5660 tmp2
= neon_load_reg(rm
, 1);
5661 for (pass
= 0; pass
< 2; pass
++) {
5665 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5668 /* The shift is less than the width of the source
5669 type, so we can just shift the whole register. */
5670 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5671 /* Widen the result of shift: we need to clear
5672 * the potential overflow bits resulting from
5673 * left bits of the narrow input appearing as
5674 * right bits of left the neighbour narrow
5676 if (size
< 2 || !u
) {
5679 imm
= (0xffu
>> (8 - shift
));
5681 } else if (size
== 1) {
5682 imm
= 0xffff >> (16 - shift
);
5685 imm
= 0xffffffff >> (32 - shift
);
5688 imm64
= imm
| (((uint64_t)imm
) << 32);
5692 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
5695 neon_store_reg64(cpu_V0
, rd
+ pass
);
5697 } else if (op
>= 14) {
5698 /* VCVT fixed-point. */
5701 VFPGenFixPointFn
*fn
;
5703 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
5709 fn
= gen_helper_vfp_ultos
;
5711 fn
= gen_helper_vfp_sltos
;
5715 fn
= gen_helper_vfp_touls_round_to_zero
;
5717 fn
= gen_helper_vfp_tosls_round_to_zero
;
5721 /* We have already masked out the must-be-1 top bit of imm6,
5722 * hence this 32-shift where the ARM ARM has 64-imm6.
5725 fpst
= get_fpstatus_ptr(1);
5726 shiftv
= tcg_const_i32(shift
);
5727 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5728 TCGv_i32 tmpf
= neon_load_reg(rm
, pass
);
5729 fn(tmpf
, tmpf
, shiftv
, fpst
);
5730 neon_store_reg(rd
, pass
, tmpf
);
5732 tcg_temp_free_ptr(fpst
);
5733 tcg_temp_free_i32(shiftv
);
5737 } else { /* (insn & 0x00380080) == 0 */
5738 int invert
, reg_ofs
, vec_size
;
5740 if (q
&& (rd
& 1)) {
5744 op
= (insn
>> 8) & 0xf;
5745 /* One register and immediate. */
5746 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
5747 invert
= (insn
& (1 << 5)) != 0;
5748 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5749 * We choose to not special-case this and will behave as if a
5750 * valid constant encoding of 0 had been given.
5769 imm
= (imm
<< 8) | (imm
<< 24);
5772 imm
= (imm
<< 8) | 0xff;
5775 imm
= (imm
<< 16) | 0xffff;
5778 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
5787 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
5788 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
5795 reg_ofs
= neon_reg_offset(rd
, 0);
5796 vec_size
= q
? 16 : 8;
5798 if (op
& 1 && op
< 12) {
5800 /* The immediate value has already been inverted,
5801 * so BIC becomes AND.
5803 tcg_gen_gvec_andi(MO_32
, reg_ofs
, reg_ofs
, imm
,
5804 vec_size
, vec_size
);
5806 tcg_gen_gvec_ori(MO_32
, reg_ofs
, reg_ofs
, imm
,
5807 vec_size
, vec_size
);
5811 if (op
== 14 && invert
) {
5812 TCGv_i64 t64
= tcg_temp_new_i64();
5814 for (pass
= 0; pass
<= q
; ++pass
) {
5818 for (n
= 0; n
< 8; n
++) {
5819 if (imm
& (1 << (n
+ pass
* 8))) {
5820 val
|= 0xffull
<< (n
* 8);
5823 tcg_gen_movi_i64(t64
, val
);
5824 neon_store_reg64(t64
, rd
+ pass
);
5826 tcg_temp_free_i64(t64
);
5828 tcg_gen_gvec_dup32i(reg_ofs
, vec_size
, vec_size
, imm
);
5832 } else { /* (insn & 0x00800010 == 0x00800000) */
5834 op
= (insn
>> 8) & 0xf;
5835 if ((insn
& (1 << 6)) == 0) {
5836 /* Three registers of different lengths. */
5840 /* undefreq: bit 0 : UNDEF if size == 0
5841 * bit 1 : UNDEF if size == 1
5842 * bit 2 : UNDEF if size == 2
5843 * bit 3 : UNDEF if U == 1
5844 * Note that [2:0] set implies 'always UNDEF'
5847 /* prewiden, src1_wide, src2_wide, undefreq */
5848 static const int neon_3reg_wide
[16][4] = {
5849 {1, 0, 0, 0}, /* VADDL */
5850 {1, 1, 0, 0}, /* VADDW */
5851 {1, 0, 0, 0}, /* VSUBL */
5852 {1, 1, 0, 0}, /* VSUBW */
5853 {0, 1, 1, 0}, /* VADDHN */
5854 {0, 0, 0, 0}, /* VABAL */
5855 {0, 1, 1, 0}, /* VSUBHN */
5856 {0, 0, 0, 0}, /* VABDL */
5857 {0, 0, 0, 0}, /* VMLAL */
5858 {0, 0, 0, 9}, /* VQDMLAL */
5859 {0, 0, 0, 0}, /* VMLSL */
5860 {0, 0, 0, 9}, /* VQDMLSL */
5861 {0, 0, 0, 0}, /* Integer VMULL */
5862 {0, 0, 0, 1}, /* VQDMULL */
5863 {0, 0, 0, 0xa}, /* Polynomial VMULL */
5864 {0, 0, 0, 7}, /* Reserved: always UNDEF */
5867 prewiden
= neon_3reg_wide
[op
][0];
5868 src1_wide
= neon_3reg_wide
[op
][1];
5869 src2_wide
= neon_3reg_wide
[op
][2];
5870 undefreq
= neon_3reg_wide
[op
][3];
5872 if ((undefreq
& (1 << size
)) ||
5873 ((undefreq
& 8) && u
)) {
5876 if ((src1_wide
&& (rn
& 1)) ||
5877 (src2_wide
&& (rm
& 1)) ||
5878 (!src2_wide
&& (rd
& 1))) {
5882 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
5883 * outside the loop below as it only performs a single pass.
5885 if (op
== 14 && size
== 2) {
5886 TCGv_i64 tcg_rn
, tcg_rm
, tcg_rd
;
5888 if (!dc_isar_feature(aa32_pmull
, s
)) {
5891 tcg_rn
= tcg_temp_new_i64();
5892 tcg_rm
= tcg_temp_new_i64();
5893 tcg_rd
= tcg_temp_new_i64();
5894 neon_load_reg64(tcg_rn
, rn
);
5895 neon_load_reg64(tcg_rm
, rm
);
5896 gen_helper_neon_pmull_64_lo(tcg_rd
, tcg_rn
, tcg_rm
);
5897 neon_store_reg64(tcg_rd
, rd
);
5898 gen_helper_neon_pmull_64_hi(tcg_rd
, tcg_rn
, tcg_rm
);
5899 neon_store_reg64(tcg_rd
, rd
+ 1);
5900 tcg_temp_free_i64(tcg_rn
);
5901 tcg_temp_free_i64(tcg_rm
);
5902 tcg_temp_free_i64(tcg_rd
);
5906 /* Avoid overlapping operands. Wide source operands are
5907 always aligned so will never overlap with wide
5908 destinations in problematic ways. */
5909 if (rd
== rm
&& !src2_wide
) {
5910 tmp
= neon_load_reg(rm
, 1);
5911 neon_store_scratch(2, tmp
);
5912 } else if (rd
== rn
&& !src1_wide
) {
5913 tmp
= neon_load_reg(rn
, 1);
5914 neon_store_scratch(2, tmp
);
5917 for (pass
= 0; pass
< 2; pass
++) {
5919 neon_load_reg64(cpu_V0
, rn
+ pass
);
5922 if (pass
== 1 && rd
== rn
) {
5923 tmp
= neon_load_scratch(2);
5925 tmp
= neon_load_reg(rn
, pass
);
5928 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5932 neon_load_reg64(cpu_V1
, rm
+ pass
);
5935 if (pass
== 1 && rd
== rm
) {
5936 tmp2
= neon_load_scratch(2);
5938 tmp2
= neon_load_reg(rm
, pass
);
5941 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
5945 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5946 gen_neon_addl(size
);
5948 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5949 gen_neon_subl(size
);
5951 case 5: case 7: /* VABAL, VABDL */
5952 switch ((size
<< 1) | u
) {
5954 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
5957 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
5960 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
5963 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
5966 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
5969 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
5973 tcg_temp_free_i32(tmp2
);
5974 tcg_temp_free_i32(tmp
);
5976 case 8: case 9: case 10: case 11: case 12: case 13:
5977 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5978 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
5980 case 14: /* Polynomial VMULL */
5981 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
5982 tcg_temp_free_i32(tmp2
);
5983 tcg_temp_free_i32(tmp
);
5985 default: /* 15 is RESERVED: caught earlier */
5990 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
5991 neon_store_reg64(cpu_V0
, rd
+ pass
);
5992 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
5994 neon_load_reg64(cpu_V1
, rd
+ pass
);
5996 case 10: /* VMLSL */
5997 gen_neon_negl(cpu_V0
, size
);
5999 case 5: case 8: /* VABAL, VMLAL */
6000 gen_neon_addl(size
);
6002 case 9: case 11: /* VQDMLAL, VQDMLSL */
6003 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6005 gen_neon_negl(cpu_V0
, size
);
6007 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6012 neon_store_reg64(cpu_V0
, rd
+ pass
);
6013 } else if (op
== 4 || op
== 6) {
6014 /* Narrowing operation. */
6015 tmp
= tcg_temp_new_i32();
6019 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
6022 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
6025 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6026 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
6033 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
6036 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
6039 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
6040 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6041 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
6049 neon_store_reg(rd
, 0, tmp3
);
6050 neon_store_reg(rd
, 1, tmp
);
6053 /* Write back the result. */
6054 neon_store_reg64(cpu_V0
, rd
+ pass
);
6058 /* Two registers and a scalar. NB that for ops of this form
6059 * the ARM ARM labels bit 24 as Q, but it is in our variable
6066 case 1: /* Float VMLA scalar */
6067 case 5: /* Floating point VMLS scalar */
6068 case 9: /* Floating point VMUL scalar */
6073 case 0: /* Integer VMLA scalar */
6074 case 4: /* Integer VMLS scalar */
6075 case 8: /* Integer VMUL scalar */
6076 case 12: /* VQDMULH scalar */
6077 case 13: /* VQRDMULH scalar */
6078 if (u
&& ((rd
| rn
) & 1)) {
6081 tmp
= neon_get_scalar(size
, rm
);
6082 neon_store_scratch(0, tmp
);
6083 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
6084 tmp
= neon_load_scratch(0);
6085 tmp2
= neon_load_reg(rn
, pass
);
6088 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6090 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6092 } else if (op
== 13) {
6094 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6096 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6098 } else if (op
& 1) {
6099 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6100 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
6101 tcg_temp_free_ptr(fpstatus
);
6104 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
6105 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
6106 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
6110 tcg_temp_free_i32(tmp2
);
6113 tmp2
= neon_load_reg(rd
, pass
);
6116 gen_neon_add(size
, tmp
, tmp2
);
6120 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6121 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
6122 tcg_temp_free_ptr(fpstatus
);
6126 gen_neon_rsb(size
, tmp
, tmp2
);
6130 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6131 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
6132 tcg_temp_free_ptr(fpstatus
);
6138 tcg_temp_free_i32(tmp2
);
6140 neon_store_reg(rd
, pass
, tmp
);
6143 case 3: /* VQDMLAL scalar */
6144 case 7: /* VQDMLSL scalar */
6145 case 11: /* VQDMULL scalar */
6150 case 2: /* VMLAL sclar */
6151 case 6: /* VMLSL scalar */
6152 case 10: /* VMULL scalar */
6156 tmp2
= neon_get_scalar(size
, rm
);
6157 /* We need a copy of tmp2 because gen_neon_mull
6158 * deletes it during pass 0. */
6159 tmp4
= tcg_temp_new_i32();
6160 tcg_gen_mov_i32(tmp4
, tmp2
);
6161 tmp3
= neon_load_reg(rn
, 1);
6163 for (pass
= 0; pass
< 2; pass
++) {
6165 tmp
= neon_load_reg(rn
, 0);
6170 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6172 neon_load_reg64(cpu_V1
, rd
+ pass
);
6176 gen_neon_negl(cpu_V0
, size
);
6179 gen_neon_addl(size
);
6182 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6184 gen_neon_negl(cpu_V0
, size
);
6186 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6192 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6197 neon_store_reg64(cpu_V0
, rd
+ pass
);
6200 case 14: /* VQRDMLAH scalar */
6201 case 15: /* VQRDMLSH scalar */
6203 NeonGenThreeOpEnvFn
*fn
;
6205 if (!dc_isar_feature(aa32_rdm
, s
)) {
6208 if (u
&& ((rd
| rn
) & 1)) {
6213 fn
= gen_helper_neon_qrdmlah_s16
;
6215 fn
= gen_helper_neon_qrdmlah_s32
;
6219 fn
= gen_helper_neon_qrdmlsh_s16
;
6221 fn
= gen_helper_neon_qrdmlsh_s32
;
6225 tmp2
= neon_get_scalar(size
, rm
);
6226 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
6227 tmp
= neon_load_reg(rn
, pass
);
6228 tmp3
= neon_load_reg(rd
, pass
);
6229 fn(tmp
, cpu_env
, tmp
, tmp2
, tmp3
);
6230 tcg_temp_free_i32(tmp3
);
6231 neon_store_reg(rd
, pass
, tmp
);
6233 tcg_temp_free_i32(tmp2
);
6237 g_assert_not_reached();
6240 } else { /* size == 3 */
6243 imm
= (insn
>> 8) & 0xf;
6248 if (q
&& ((rd
| rn
| rm
) & 1)) {
6253 neon_load_reg64(cpu_V0
, rn
);
6255 neon_load_reg64(cpu_V1
, rn
+ 1);
6257 } else if (imm
== 8) {
6258 neon_load_reg64(cpu_V0
, rn
+ 1);
6260 neon_load_reg64(cpu_V1
, rm
);
6263 tmp64
= tcg_temp_new_i64();
6265 neon_load_reg64(cpu_V0
, rn
);
6266 neon_load_reg64(tmp64
, rn
+ 1);
6268 neon_load_reg64(cpu_V0
, rn
+ 1);
6269 neon_load_reg64(tmp64
, rm
);
6271 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
6272 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
6273 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6275 neon_load_reg64(cpu_V1
, rm
);
6277 neon_load_reg64(cpu_V1
, rm
+ 1);
6280 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6281 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
6282 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
6283 tcg_temp_free_i64(tmp64
);
6286 neon_load_reg64(cpu_V0
, rn
);
6287 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
6288 neon_load_reg64(cpu_V1
, rm
);
6289 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6290 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6292 neon_store_reg64(cpu_V0
, rd
);
6294 neon_store_reg64(cpu_V1
, rd
+ 1);
6296 } else if ((insn
& (1 << 11)) == 0) {
6297 /* Two register misc. */
6298 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
6299 size
= (insn
>> 18) & 3;
6300 /* UNDEF for unknown op values and bad op-size combinations */
6301 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
6304 if (neon_2rm_is_v8_op(op
) &&
6305 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
6308 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
6309 q
&& ((rm
| rd
) & 1)) {
6313 case NEON_2RM_VREV64
:
6314 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
6315 tmp
= neon_load_reg(rm
, pass
* 2);
6316 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
6318 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6319 case 1: gen_swap_half(tmp
); break;
6320 case 2: /* no-op */ break;
6323 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
6325 neon_store_reg(rd
, pass
* 2, tmp2
);
6328 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
6329 case 1: gen_swap_half(tmp2
); break;
6332 neon_store_reg(rd
, pass
* 2, tmp2
);
6336 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
6337 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
6338 for (pass
= 0; pass
< q
+ 1; pass
++) {
6339 tmp
= neon_load_reg(rm
, pass
* 2);
6340 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
6341 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
6342 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
6344 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
6345 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
6346 case 2: tcg_gen_add_i64(CPU_V001
); break;
6349 if (op
>= NEON_2RM_VPADAL
) {
6351 neon_load_reg64(cpu_V1
, rd
+ pass
);
6352 gen_neon_addl(size
);
6354 neon_store_reg64(cpu_V0
, rd
+ pass
);
6360 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
6361 tmp
= neon_load_reg(rm
, n
);
6362 tmp2
= neon_load_reg(rd
, n
+ 1);
6363 neon_store_reg(rm
, n
, tmp2
);
6364 neon_store_reg(rd
, n
+ 1, tmp
);
6371 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
6376 if (gen_neon_zip(rd
, rm
, size
, q
)) {
6380 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
6381 /* also VQMOVUN; op field and mnemonics don't line up */
6386 for (pass
= 0; pass
< 2; pass
++) {
6387 neon_load_reg64(cpu_V0
, rm
+ pass
);
6388 tmp
= tcg_temp_new_i32();
6389 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
6394 neon_store_reg(rd
, 0, tmp2
);
6395 neon_store_reg(rd
, 1, tmp
);
6399 case NEON_2RM_VSHLL
:
6400 if (q
|| (rd
& 1)) {
6403 tmp
= neon_load_reg(rm
, 0);
6404 tmp2
= neon_load_reg(rm
, 1);
6405 for (pass
= 0; pass
< 2; pass
++) {
6408 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
6409 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
6410 neon_store_reg64(cpu_V0
, rd
+ pass
);
6413 case NEON_2RM_VCVT_F16_F32
:
6418 if (!dc_isar_feature(aa32_fp16_spconv
, s
) ||
6422 fpst
= get_fpstatus_ptr(true);
6423 ahp
= get_ahp_flag();
6424 tmp
= neon_load_reg(rm
, 0);
6425 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp
);
6426 tmp2
= neon_load_reg(rm
, 1);
6427 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, tmp2
, fpst
, ahp
);
6428 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6429 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6430 tcg_temp_free_i32(tmp
);
6431 tmp
= neon_load_reg(rm
, 2);
6432 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp
);
6433 tmp3
= neon_load_reg(rm
, 3);
6434 neon_store_reg(rd
, 0, tmp2
);
6435 gen_helper_vfp_fcvt_f32_to_f16(tmp3
, tmp3
, fpst
, ahp
);
6436 tcg_gen_shli_i32(tmp3
, tmp3
, 16);
6437 tcg_gen_or_i32(tmp3
, tmp3
, tmp
);
6438 neon_store_reg(rd
, 1, tmp3
);
6439 tcg_temp_free_i32(tmp
);
6440 tcg_temp_free_i32(ahp
);
6441 tcg_temp_free_ptr(fpst
);
6444 case NEON_2RM_VCVT_F32_F16
:
6448 if (!dc_isar_feature(aa32_fp16_spconv
, s
) ||
6452 fpst
= get_fpstatus_ptr(true);
6453 ahp
= get_ahp_flag();
6454 tmp3
= tcg_temp_new_i32();
6455 tmp
= neon_load_reg(rm
, 0);
6456 tmp2
= neon_load_reg(rm
, 1);
6457 tcg_gen_ext16u_i32(tmp3
, tmp
);
6458 gen_helper_vfp_fcvt_f16_to_f32(tmp3
, tmp3
, fpst
, ahp
);
6459 neon_store_reg(rd
, 0, tmp3
);
6460 tcg_gen_shri_i32(tmp
, tmp
, 16);
6461 gen_helper_vfp_fcvt_f16_to_f32(tmp
, tmp
, fpst
, ahp
);
6462 neon_store_reg(rd
, 1, tmp
);
6463 tmp3
= tcg_temp_new_i32();
6464 tcg_gen_ext16u_i32(tmp3
, tmp2
);
6465 gen_helper_vfp_fcvt_f16_to_f32(tmp3
, tmp3
, fpst
, ahp
);
6466 neon_store_reg(rd
, 2, tmp3
);
6467 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
6468 gen_helper_vfp_fcvt_f16_to_f32(tmp2
, tmp2
, fpst
, ahp
);
6469 neon_store_reg(rd
, 3, tmp2
);
6470 tcg_temp_free_i32(ahp
);
6471 tcg_temp_free_ptr(fpst
);
6474 case NEON_2RM_AESE
: case NEON_2RM_AESMC
:
6475 if (!dc_isar_feature(aa32_aes
, s
) || ((rm
| rd
) & 1)) {
6478 ptr1
= vfp_reg_ptr(true, rd
);
6479 ptr2
= vfp_reg_ptr(true, rm
);
6481 /* Bit 6 is the lowest opcode bit; it distinguishes between
6482 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6484 tmp3
= tcg_const_i32(extract32(insn
, 6, 1));
6486 if (op
== NEON_2RM_AESE
) {
6487 gen_helper_crypto_aese(ptr1
, ptr2
, tmp3
);
6489 gen_helper_crypto_aesmc(ptr1
, ptr2
, tmp3
);
6491 tcg_temp_free_ptr(ptr1
);
6492 tcg_temp_free_ptr(ptr2
);
6493 tcg_temp_free_i32(tmp3
);
6495 case NEON_2RM_SHA1H
:
6496 if (!dc_isar_feature(aa32_sha1
, s
) || ((rm
| rd
) & 1)) {
6499 ptr1
= vfp_reg_ptr(true, rd
);
6500 ptr2
= vfp_reg_ptr(true, rm
);
6502 gen_helper_crypto_sha1h(ptr1
, ptr2
);
6504 tcg_temp_free_ptr(ptr1
);
6505 tcg_temp_free_ptr(ptr2
);
6507 case NEON_2RM_SHA1SU1
:
6508 if ((rm
| rd
) & 1) {
6511 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6513 if (!dc_isar_feature(aa32_sha2
, s
)) {
6516 } else if (!dc_isar_feature(aa32_sha1
, s
)) {
6519 ptr1
= vfp_reg_ptr(true, rd
);
6520 ptr2
= vfp_reg_ptr(true, rm
);
6522 gen_helper_crypto_sha256su0(ptr1
, ptr2
);
6524 gen_helper_crypto_sha1su1(ptr1
, ptr2
);
6526 tcg_temp_free_ptr(ptr1
);
6527 tcg_temp_free_ptr(ptr2
);
6531 tcg_gen_gvec_not(0, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
6534 tcg_gen_gvec_neg(size
, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
6537 tcg_gen_gvec_abs(size
, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
6542 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6543 tmp
= neon_load_reg(rm
, pass
);
6545 case NEON_2RM_VREV32
:
6547 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6548 case 1: gen_swap_half(tmp
); break;
6552 case NEON_2RM_VREV16
:
6557 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
6558 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
6559 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
6565 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
6566 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
6567 case 2: tcg_gen_clzi_i32(tmp
, tmp
, 32); break;
6572 gen_helper_neon_cnt_u8(tmp
, tmp
);
6574 case NEON_2RM_VQABS
:
6577 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
6580 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
6583 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
6588 case NEON_2RM_VQNEG
:
6591 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
6594 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
6597 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
6602 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
6603 tmp2
= tcg_const_i32(0);
6605 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
6606 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
6607 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
6610 tcg_temp_free_i32(tmp2
);
6611 if (op
== NEON_2RM_VCLE0
) {
6612 tcg_gen_not_i32(tmp
, tmp
);
6615 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
6616 tmp2
= tcg_const_i32(0);
6618 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
6619 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
6620 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
6623 tcg_temp_free_i32(tmp2
);
6624 if (op
== NEON_2RM_VCLT0
) {
6625 tcg_gen_not_i32(tmp
, tmp
);
6628 case NEON_2RM_VCEQ0
:
6629 tmp2
= tcg_const_i32(0);
6631 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
6632 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
6633 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
6636 tcg_temp_free_i32(tmp2
);
6638 case NEON_2RM_VCGT0_F
:
6640 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6641 tmp2
= tcg_const_i32(0);
6642 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6643 tcg_temp_free_i32(tmp2
);
6644 tcg_temp_free_ptr(fpstatus
);
6647 case NEON_2RM_VCGE0_F
:
6649 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6650 tmp2
= tcg_const_i32(0);
6651 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6652 tcg_temp_free_i32(tmp2
);
6653 tcg_temp_free_ptr(fpstatus
);
6656 case NEON_2RM_VCEQ0_F
:
6658 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6659 tmp2
= tcg_const_i32(0);
6660 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6661 tcg_temp_free_i32(tmp2
);
6662 tcg_temp_free_ptr(fpstatus
);
6665 case NEON_2RM_VCLE0_F
:
6667 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6668 tmp2
= tcg_const_i32(0);
6669 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6670 tcg_temp_free_i32(tmp2
);
6671 tcg_temp_free_ptr(fpstatus
);
6674 case NEON_2RM_VCLT0_F
:
6676 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6677 tmp2
= tcg_const_i32(0);
6678 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6679 tcg_temp_free_i32(tmp2
);
6680 tcg_temp_free_ptr(fpstatus
);
6683 case NEON_2RM_VABS_F
:
6684 gen_helper_vfp_abss(tmp
, tmp
);
6686 case NEON_2RM_VNEG_F
:
6687 gen_helper_vfp_negs(tmp
, tmp
);
6690 tmp2
= neon_load_reg(rd
, pass
);
6691 neon_store_reg(rm
, pass
, tmp2
);
6694 tmp2
= neon_load_reg(rd
, pass
);
6696 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
6697 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
6700 neon_store_reg(rm
, pass
, tmp2
);
6702 case NEON_2RM_VRINTN
:
6703 case NEON_2RM_VRINTA
:
6704 case NEON_2RM_VRINTM
:
6705 case NEON_2RM_VRINTP
:
6706 case NEON_2RM_VRINTZ
:
6709 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6712 if (op
== NEON_2RM_VRINTZ
) {
6713 rmode
= FPROUNDING_ZERO
;
6715 rmode
= fp_decode_rm
[((op
& 0x6) >> 1) ^ 1];
6718 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6719 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6721 gen_helper_rints(tmp
, tmp
, fpstatus
);
6722 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6724 tcg_temp_free_ptr(fpstatus
);
6725 tcg_temp_free_i32(tcg_rmode
);
6728 case NEON_2RM_VRINTX
:
6730 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6731 gen_helper_rints_exact(tmp
, tmp
, fpstatus
);
6732 tcg_temp_free_ptr(fpstatus
);
6735 case NEON_2RM_VCVTAU
:
6736 case NEON_2RM_VCVTAS
:
6737 case NEON_2RM_VCVTNU
:
6738 case NEON_2RM_VCVTNS
:
6739 case NEON_2RM_VCVTPU
:
6740 case NEON_2RM_VCVTPS
:
6741 case NEON_2RM_VCVTMU
:
6742 case NEON_2RM_VCVTMS
:
6744 bool is_signed
= !extract32(insn
, 7, 1);
6745 TCGv_ptr fpst
= get_fpstatus_ptr(1);
6746 TCGv_i32 tcg_rmode
, tcg_shift
;
6747 int rmode
= fp_decode_rm
[extract32(insn
, 8, 2)];
6749 tcg_shift
= tcg_const_i32(0);
6750 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
6751 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6755 gen_helper_vfp_tosls(tmp
, tmp
,
6758 gen_helper_vfp_touls(tmp
, tmp
,
6762 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
6764 tcg_temp_free_i32(tcg_rmode
);
6765 tcg_temp_free_i32(tcg_shift
);
6766 tcg_temp_free_ptr(fpst
);
6769 case NEON_2RM_VRECPE
:
6771 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6772 gen_helper_recpe_u32(tmp
, tmp
, fpstatus
);
6773 tcg_temp_free_ptr(fpstatus
);
6776 case NEON_2RM_VRSQRTE
:
6778 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6779 gen_helper_rsqrte_u32(tmp
, tmp
, fpstatus
);
6780 tcg_temp_free_ptr(fpstatus
);
6783 case NEON_2RM_VRECPE_F
:
6785 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6786 gen_helper_recpe_f32(tmp
, tmp
, fpstatus
);
6787 tcg_temp_free_ptr(fpstatus
);
6790 case NEON_2RM_VRSQRTE_F
:
6792 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6793 gen_helper_rsqrte_f32(tmp
, tmp
, fpstatus
);
6794 tcg_temp_free_ptr(fpstatus
);
6797 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
6799 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6800 gen_helper_vfp_sitos(tmp
, tmp
, fpstatus
);
6801 tcg_temp_free_ptr(fpstatus
);
6804 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
6806 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6807 gen_helper_vfp_uitos(tmp
, tmp
, fpstatus
);
6808 tcg_temp_free_ptr(fpstatus
);
6811 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
6813 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6814 gen_helper_vfp_tosizs(tmp
, tmp
, fpstatus
);
6815 tcg_temp_free_ptr(fpstatus
);
6818 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
6820 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6821 gen_helper_vfp_touizs(tmp
, tmp
, fpstatus
);
6822 tcg_temp_free_ptr(fpstatus
);
6826 /* Reserved op values were caught by the
6827 * neon_2rm_sizes[] check earlier.
6831 neon_store_reg(rd
, pass
, tmp
);
6835 } else if ((insn
& (1 << 10)) == 0) {
6837 int n
= ((insn
>> 8) & 3) + 1;
6838 if ((rn
+ n
) > 32) {
6839 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6840 * helper function running off the end of the register file.
6845 if (insn
& (1 << 6)) {
6846 tmp
= neon_load_reg(rd
, 0);
6848 tmp
= tcg_temp_new_i32();
6849 tcg_gen_movi_i32(tmp
, 0);
6851 tmp2
= neon_load_reg(rm
, 0);
6852 ptr1
= vfp_reg_ptr(true, rn
);
6853 tmp5
= tcg_const_i32(n
);
6854 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, ptr1
, tmp5
);
6855 tcg_temp_free_i32(tmp
);
6856 if (insn
& (1 << 6)) {
6857 tmp
= neon_load_reg(rd
, 1);
6859 tmp
= tcg_temp_new_i32();
6860 tcg_gen_movi_i32(tmp
, 0);
6862 tmp3
= neon_load_reg(rm
, 1);
6863 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, ptr1
, tmp5
);
6864 tcg_temp_free_i32(tmp5
);
6865 tcg_temp_free_ptr(ptr1
);
6866 neon_store_reg(rd
, 0, tmp2
);
6867 neon_store_reg(rd
, 1, tmp3
);
6868 tcg_temp_free_i32(tmp
);
6869 } else if ((insn
& 0x380) == 0) {
6874 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
6877 if (insn
& (1 << 16)) {
6879 element
= (insn
>> 17) & 7;
6880 } else if (insn
& (1 << 17)) {
6882 element
= (insn
>> 18) & 3;
6885 element
= (insn
>> 19) & 1;
6887 tcg_gen_gvec_dup_mem(size
, neon_reg_offset(rd
, 0),
6888 neon_element_offset(rm
, element
, size
),
6889 q
? 16 : 8, q
? 16 : 8);
6898 /* Advanced SIMD three registers of the same length extension.
6899 * 31 25 23 22 20 16 12 11 10 9 8 3 0
6900 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6901 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6902 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6904 static int disas_neon_insn_3same_ext(DisasContext
*s
, uint32_t insn
)
6906 gen_helper_gvec_3
*fn_gvec
= NULL
;
6907 gen_helper_gvec_3_ptr
*fn_gvec_ptr
= NULL
;
6908 int rd
, rn
, rm
, opr_sz
;
6911 bool is_long
= false, q
= extract32(insn
, 6, 1);
6912 bool ptr_is_env
= false;
6914 if ((insn
& 0xfe200f10) == 0xfc200800) {
6915 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
6916 int size
= extract32(insn
, 20, 1);
6917 data
= extract32(insn
, 23, 2); /* rot */
6918 if (!dc_isar_feature(aa32_vcma
, s
)
6919 || (!size
&& !dc_isar_feature(aa32_fp16_arith
, s
))) {
6922 fn_gvec_ptr
= size
? gen_helper_gvec_fcmlas
: gen_helper_gvec_fcmlah
;
6923 } else if ((insn
& 0xfea00f10) == 0xfc800800) {
6924 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
6925 int size
= extract32(insn
, 20, 1);
6926 data
= extract32(insn
, 24, 1); /* rot */
6927 if (!dc_isar_feature(aa32_vcma
, s
)
6928 || (!size
&& !dc_isar_feature(aa32_fp16_arith
, s
))) {
6931 fn_gvec_ptr
= size
? gen_helper_gvec_fcadds
: gen_helper_gvec_fcaddh
;
6932 } else if ((insn
& 0xfeb00f00) == 0xfc200d00) {
6933 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
6934 bool u
= extract32(insn
, 4, 1);
6935 if (!dc_isar_feature(aa32_dp
, s
)) {
6938 fn_gvec
= u
? gen_helper_gvec_udot_b
: gen_helper_gvec_sdot_b
;
6939 } else if ((insn
& 0xff300f10) == 0xfc200810) {
6940 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
6941 int is_s
= extract32(insn
, 23, 1);
6942 if (!dc_isar_feature(aa32_fhm
, s
)) {
6946 data
= is_s
; /* is_2 == 0 */
6947 fn_gvec_ptr
= gen_helper_gvec_fmlal_a32
;
6953 VFP_DREG_D(rd
, insn
);
6957 if (q
|| !is_long
) {
6958 VFP_DREG_N(rn
, insn
);
6959 VFP_DREG_M(rm
, insn
);
6960 if ((rn
| rm
) & q
& !is_long
) {
6963 off_rn
= vfp_reg_offset(1, rn
);
6964 off_rm
= vfp_reg_offset(1, rm
);
6966 rn
= VFP_SREG_N(insn
);
6967 rm
= VFP_SREG_M(insn
);
6968 off_rn
= vfp_reg_offset(0, rn
);
6969 off_rm
= vfp_reg_offset(0, rm
);
6972 if (s
->fp_excp_el
) {
6973 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
6974 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
6977 if (!s
->vfp_enabled
) {
6981 opr_sz
= (1 + q
) * 8;
6987 ptr
= get_fpstatus_ptr(1);
6989 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd
), off_rn
, off_rm
, ptr
,
6990 opr_sz
, opr_sz
, data
, fn_gvec_ptr
);
6992 tcg_temp_free_ptr(ptr
);
6995 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd
), off_rn
, off_rm
,
6996 opr_sz
, opr_sz
, data
, fn_gvec
);
7001 /* Advanced SIMD two registers and a scalar extension.
7002 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7003 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7004 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7005 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7009 static int disas_neon_insn_2reg_scalar_ext(DisasContext
*s
, uint32_t insn
)
7011 gen_helper_gvec_3
*fn_gvec
= NULL
;
7012 gen_helper_gvec_3_ptr
*fn_gvec_ptr
= NULL
;
7013 int rd
, rn
, rm
, opr_sz
, data
;
7015 bool is_long
= false, q
= extract32(insn
, 6, 1);
7016 bool ptr_is_env
= false;
7018 if ((insn
& 0xff000f10) == 0xfe000800) {
7019 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
7020 int rot
= extract32(insn
, 20, 2);
7021 int size
= extract32(insn
, 23, 1);
7024 if (!dc_isar_feature(aa32_vcma
, s
)) {
7028 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
7031 /* For fp16, rm is just Vm, and index is M. */
7032 rm
= extract32(insn
, 0, 4);
7033 index
= extract32(insn
, 5, 1);
7035 /* For fp32, rm is the usual M:Vm, and index is 0. */
7036 VFP_DREG_M(rm
, insn
);
7039 data
= (index
<< 2) | rot
;
7040 fn_gvec_ptr
= (size
? gen_helper_gvec_fcmlas_idx
7041 : gen_helper_gvec_fcmlah_idx
);
7042 } else if ((insn
& 0xffb00f00) == 0xfe200d00) {
7043 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7044 int u
= extract32(insn
, 4, 1);
7046 if (!dc_isar_feature(aa32_dp
, s
)) {
7049 fn_gvec
= u
? gen_helper_gvec_udot_idx_b
: gen_helper_gvec_sdot_idx_b
;
7050 /* rm is just Vm, and index is M. */
7051 data
= extract32(insn
, 5, 1); /* index */
7052 rm
= extract32(insn
, 0, 4);
7053 } else if ((insn
& 0xffa00f10) == 0xfe000810) {
7054 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
7055 int is_s
= extract32(insn
, 20, 1);
7056 int vm20
= extract32(insn
, 0, 3);
7057 int vm3
= extract32(insn
, 3, 1);
7058 int m
= extract32(insn
, 5, 1);
7061 if (!dc_isar_feature(aa32_fhm
, s
)) {
7066 index
= m
* 2 + vm3
;
7072 data
= (index
<< 2) | is_s
; /* is_2 == 0 */
7073 fn_gvec_ptr
= gen_helper_gvec_fmlal_idx_a32
;
7079 VFP_DREG_D(rd
, insn
);
7083 if (q
|| !is_long
) {
7084 VFP_DREG_N(rn
, insn
);
7085 if (rn
& q
& !is_long
) {
7088 off_rn
= vfp_reg_offset(1, rn
);
7089 off_rm
= vfp_reg_offset(1, rm
);
7091 rn
= VFP_SREG_N(insn
);
7092 off_rn
= vfp_reg_offset(0, rn
);
7093 off_rm
= vfp_reg_offset(0, rm
);
7095 if (s
->fp_excp_el
) {
7096 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
7097 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
7100 if (!s
->vfp_enabled
) {
7104 opr_sz
= (1 + q
) * 8;
7110 ptr
= get_fpstatus_ptr(1);
7112 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd
), off_rn
, off_rm
, ptr
,
7113 opr_sz
, opr_sz
, data
, fn_gvec_ptr
);
7115 tcg_temp_free_ptr(ptr
);
7118 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd
), off_rn
, off_rm
,
7119 opr_sz
, opr_sz
, data
, fn_gvec
);
7124 static int disas_coproc_insn(DisasContext
*s
, uint32_t insn
)
7126 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
7127 const ARMCPRegInfo
*ri
;
7129 cpnum
= (insn
>> 8) & 0xf;
7131 /* First check for coprocessor space used for XScale/iwMMXt insns */
7132 if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && (cpnum
< 2)) {
7133 if (extract32(s
->c15_cpar
, cpnum
, 1) == 0) {
7136 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
7137 return disas_iwmmxt_insn(s
, insn
);
7138 } else if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
)) {
7139 return disas_dsp_insn(s
, insn
);
7144 /* Otherwise treat as a generic register access */
7145 is64
= (insn
& (1 << 25)) == 0;
7146 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
7154 opc1
= (insn
>> 4) & 0xf;
7156 rt2
= (insn
>> 16) & 0xf;
7158 crn
= (insn
>> 16) & 0xf;
7159 opc1
= (insn
>> 21) & 7;
7160 opc2
= (insn
>> 5) & 7;
7163 isread
= (insn
>> 20) & 1;
7164 rt
= (insn
>> 12) & 0xf;
7166 ri
= get_arm_cp_reginfo(s
->cp_regs
,
7167 ENCODE_CP_REG(cpnum
, is64
, s
->ns
, crn
, crm
, opc1
, opc2
));
7169 /* Check access permissions */
7170 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
7175 (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && cpnum
< 14)) {
7176 /* Emit code to perform further access permissions checks at
7177 * runtime; this may result in an exception.
7178 * Note that on XScale all cp0..c13 registers do an access check
7179 * call in order to handle c15_cpar.
7182 TCGv_i32 tcg_syn
, tcg_isread
;
7185 /* Note that since we are an implementation which takes an
7186 * exception on a trapped conditional instruction only if the
7187 * instruction passes its condition code check, we can take
7188 * advantage of the clause in the ARM ARM that allows us to set
7189 * the COND field in the instruction to 0xE in all cases.
7190 * We could fish the actual condition out of the insn (ARM)
7191 * or the condexec bits (Thumb) but it isn't necessary.
7196 syndrome
= syn_cp14_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7199 syndrome
= syn_cp14_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7205 syndrome
= syn_cp15_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7208 syndrome
= syn_cp15_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7213 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7214 * so this can only happen if this is an ARMv7 or earlier CPU,
7215 * in which case the syndrome information won't actually be
7218 assert(!arm_dc_feature(s
, ARM_FEATURE_V8
));
7219 syndrome
= syn_uncategorized();
7223 gen_set_condexec(s
);
7224 gen_set_pc_im(s
, s
->pc_curr
);
7225 tmpptr
= tcg_const_ptr(ri
);
7226 tcg_syn
= tcg_const_i32(syndrome
);
7227 tcg_isread
= tcg_const_i32(isread
);
7228 gen_helper_access_check_cp_reg(cpu_env
, tmpptr
, tcg_syn
,
7230 tcg_temp_free_ptr(tmpptr
);
7231 tcg_temp_free_i32(tcg_syn
);
7232 tcg_temp_free_i32(tcg_isread
);
7235 /* Handle special cases first */
7236 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
7243 gen_set_pc_im(s
, s
->base
.pc_next
);
7244 s
->base
.is_jmp
= DISAS_WFI
;
7250 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
7259 if (ri
->type
& ARM_CP_CONST
) {
7260 tmp64
= tcg_const_i64(ri
->resetvalue
);
7261 } else if (ri
->readfn
) {
7263 tmp64
= tcg_temp_new_i64();
7264 tmpptr
= tcg_const_ptr(ri
);
7265 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
7266 tcg_temp_free_ptr(tmpptr
);
7268 tmp64
= tcg_temp_new_i64();
7269 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7271 tmp
= tcg_temp_new_i32();
7272 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
7273 store_reg(s
, rt
, tmp
);
7274 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7275 tmp
= tcg_temp_new_i32();
7276 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
7277 tcg_temp_free_i64(tmp64
);
7278 store_reg(s
, rt2
, tmp
);
7281 if (ri
->type
& ARM_CP_CONST
) {
7282 tmp
= tcg_const_i32(ri
->resetvalue
);
7283 } else if (ri
->readfn
) {
7285 tmp
= tcg_temp_new_i32();
7286 tmpptr
= tcg_const_ptr(ri
);
7287 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
7288 tcg_temp_free_ptr(tmpptr
);
7290 tmp
= load_cpu_offset(ri
->fieldoffset
);
7293 /* Destination register of r15 for 32 bit loads sets
7294 * the condition codes from the high 4 bits of the value
7297 tcg_temp_free_i32(tmp
);
7299 store_reg(s
, rt
, tmp
);
7304 if (ri
->type
& ARM_CP_CONST
) {
7305 /* If not forbidden by access permissions, treat as WI */
7310 TCGv_i32 tmplo
, tmphi
;
7311 TCGv_i64 tmp64
= tcg_temp_new_i64();
7312 tmplo
= load_reg(s
, rt
);
7313 tmphi
= load_reg(s
, rt2
);
7314 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
7315 tcg_temp_free_i32(tmplo
);
7316 tcg_temp_free_i32(tmphi
);
7318 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
7319 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
7320 tcg_temp_free_ptr(tmpptr
);
7322 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7324 tcg_temp_free_i64(tmp64
);
7329 tmp
= load_reg(s
, rt
);
7330 tmpptr
= tcg_const_ptr(ri
);
7331 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
7332 tcg_temp_free_ptr(tmpptr
);
7333 tcg_temp_free_i32(tmp
);
7335 TCGv_i32 tmp
= load_reg(s
, rt
);
7336 store_cpu_offset(tmp
, ri
->fieldoffset
);
7341 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
7342 /* I/O operations must end the TB here (whether read or write) */
7345 } else if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
7346 /* We default to ending the TB on a coprocessor register write,
7347 * but allow this to be suppressed by the register definition
7348 * (usually only necessary to work around guest bugs).
7356 /* Unknown register; this might be a guest error or a QEMU
7357 * unimplemented feature.
7360 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7361 "64 bit system register cp:%d opc1: %d crm:%d "
7363 isread
? "read" : "write", cpnum
, opc1
, crm
,
7364 s
->ns
? "non-secure" : "secure");
7366 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7367 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7369 isread
? "read" : "write", cpnum
, opc1
, crn
, crm
, opc2
,
7370 s
->ns
? "non-secure" : "secure");
7377 /* Store a 64-bit value to a register pair. Clobbers val. */
7378 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
7381 tmp
= tcg_temp_new_i32();
7382 tcg_gen_extrl_i64_i32(tmp
, val
);
7383 store_reg(s
, rlow
, tmp
);
7384 tmp
= tcg_temp_new_i32();
7385 tcg_gen_shri_i64(val
, val
, 32);
7386 tcg_gen_extrl_i64_i32(tmp
, val
);
7387 store_reg(s
, rhigh
, tmp
);
7390 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7391 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
7396 /* Load value and extend to 64 bits. */
7397 tmp
= tcg_temp_new_i64();
7398 tmp2
= load_reg(s
, rlow
);
7399 tcg_gen_extu_i32_i64(tmp
, tmp2
);
7400 tcg_temp_free_i32(tmp2
);
7401 tcg_gen_add_i64(val
, val
, tmp
);
7402 tcg_temp_free_i64(tmp
);
7405 /* load and add a 64-bit value from a register pair. */
7406 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
7412 /* Load 64-bit value rd:rn. */
7413 tmpl
= load_reg(s
, rlow
);
7414 tmph
= load_reg(s
, rhigh
);
7415 tmp
= tcg_temp_new_i64();
7416 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
7417 tcg_temp_free_i32(tmpl
);
7418 tcg_temp_free_i32(tmph
);
7419 tcg_gen_add_i64(val
, val
, tmp
);
7420 tcg_temp_free_i64(tmp
);
7423 /* Set N and Z flags from hi|lo. */
7424 static void gen_logicq_cc(TCGv_i32 lo
, TCGv_i32 hi
)
7426 tcg_gen_mov_i32(cpu_NF
, hi
);
7427 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
7430 /* Load/Store exclusive instructions are implemented by remembering
7431 the value/address loaded, and seeing if these are the same
7432 when the store is performed. This should be sufficient to implement
7433 the architecturally mandated semantics, and avoids having to monitor
7434 regular stores. The compare vs the remembered value is done during
7435 the cmpxchg operation, but we must compare the addresses manually. */
7436 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
7437 TCGv_i32 addr
, int size
)
7439 TCGv_i32 tmp
= tcg_temp_new_i32();
7440 TCGMemOp opc
= size
| MO_ALIGN
| s
->be_data
;
7445 TCGv_i32 tmp2
= tcg_temp_new_i32();
7446 TCGv_i64 t64
= tcg_temp_new_i64();
7448 /* For AArch32, architecturally the 32-bit word at the lowest
7449 * address is always Rt and the one at addr+4 is Rt2, even if
7450 * the CPU is big-endian. That means we don't want to do a
7451 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7452 * for an architecturally 64-bit access, but instead do a
7453 * 64-bit access using MO_BE if appropriate and then split
7455 * This only makes a difference for BE32 user-mode, where
7456 * frob64() must not flip the two halves of the 64-bit data
7457 * but this code must treat BE32 user-mode like BE32 system.
7459 TCGv taddr
= gen_aa32_addr(s
, addr
, opc
);
7461 tcg_gen_qemu_ld_i64(t64
, taddr
, get_mem_index(s
), opc
);
7462 tcg_temp_free(taddr
);
7463 tcg_gen_mov_i64(cpu_exclusive_val
, t64
);
7464 if (s
->be_data
== MO_BE
) {
7465 tcg_gen_extr_i64_i32(tmp2
, tmp
, t64
);
7467 tcg_gen_extr_i64_i32(tmp
, tmp2
, t64
);
7469 tcg_temp_free_i64(t64
);
7471 store_reg(s
, rt2
, tmp2
);
7473 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), opc
);
7474 tcg_gen_extu_i32_i64(cpu_exclusive_val
, tmp
);
7477 store_reg(s
, rt
, tmp
);
7478 tcg_gen_extu_i32_i64(cpu_exclusive_addr
, addr
);
7481 static void gen_clrex(DisasContext
*s
)
7483 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7486 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7487 TCGv_i32 addr
, int size
)
7489 TCGv_i32 t0
, t1
, t2
;
7492 TCGLabel
*done_label
;
7493 TCGLabel
*fail_label
;
7494 TCGMemOp opc
= size
| MO_ALIGN
| s
->be_data
;
7496 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7502 fail_label
= gen_new_label();
7503 done_label
= gen_new_label();
7504 extaddr
= tcg_temp_new_i64();
7505 tcg_gen_extu_i32_i64(extaddr
, addr
);
7506 tcg_gen_brcond_i64(TCG_COND_NE
, extaddr
, cpu_exclusive_addr
, fail_label
);
7507 tcg_temp_free_i64(extaddr
);
7509 taddr
= gen_aa32_addr(s
, addr
, opc
);
7510 t0
= tcg_temp_new_i32();
7511 t1
= load_reg(s
, rt
);
7513 TCGv_i64 o64
= tcg_temp_new_i64();
7514 TCGv_i64 n64
= tcg_temp_new_i64();
7516 t2
= load_reg(s
, rt2
);
7517 /* For AArch32, architecturally the 32-bit word at the lowest
7518 * address is always Rt and the one at addr+4 is Rt2, even if
7519 * the CPU is big-endian. Since we're going to treat this as a
7520 * single 64-bit BE store, we need to put the two halves in the
7521 * opposite order for BE to LE, so that they end up in the right
7523 * We don't want gen_aa32_frob64() because that does the wrong
7524 * thing for BE32 usermode.
7526 if (s
->be_data
== MO_BE
) {
7527 tcg_gen_concat_i32_i64(n64
, t2
, t1
);
7529 tcg_gen_concat_i32_i64(n64
, t1
, t2
);
7531 tcg_temp_free_i32(t2
);
7533 tcg_gen_atomic_cmpxchg_i64(o64
, taddr
, cpu_exclusive_val
, n64
,
7534 get_mem_index(s
), opc
);
7535 tcg_temp_free_i64(n64
);
7537 tcg_gen_setcond_i64(TCG_COND_NE
, o64
, o64
, cpu_exclusive_val
);
7538 tcg_gen_extrl_i64_i32(t0
, o64
);
7540 tcg_temp_free_i64(o64
);
7542 t2
= tcg_temp_new_i32();
7543 tcg_gen_extrl_i64_i32(t2
, cpu_exclusive_val
);
7544 tcg_gen_atomic_cmpxchg_i32(t0
, taddr
, t2
, t1
, get_mem_index(s
), opc
);
7545 tcg_gen_setcond_i32(TCG_COND_NE
, t0
, t0
, t2
);
7546 tcg_temp_free_i32(t2
);
7548 tcg_temp_free_i32(t1
);
7549 tcg_temp_free(taddr
);
7550 tcg_gen_mov_i32(cpu_R
[rd
], t0
);
7551 tcg_temp_free_i32(t0
);
7552 tcg_gen_br(done_label
);
7554 gen_set_label(fail_label
);
7555 tcg_gen_movi_i32(cpu_R
[rd
], 1);
7556 gen_set_label(done_label
);
7557 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7563 * @mode: mode field from insn (which stack to store to)
7564 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7565 * @writeback: true if writeback bit set
7567 * Generate code for the SRS (Store Return State) insn.
7569 static void gen_srs(DisasContext
*s
,
7570 uint32_t mode
, uint32_t amode
, bool writeback
)
7577 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7578 * and specified mode is monitor mode
7579 * - UNDEFINED in Hyp mode
7580 * - UNPREDICTABLE in User or System mode
7581 * - UNPREDICTABLE if the specified mode is:
7582 * -- not implemented
7583 * -- not a valid mode number
7584 * -- a mode that's at a higher exception level
7585 * -- Monitor, if we are Non-secure
7586 * For the UNPREDICTABLE cases we choose to UNDEF.
7588 if (s
->current_el
== 1 && !s
->ns
&& mode
== ARM_CPU_MODE_MON
) {
7589 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
, syn_uncategorized(), 3);
7593 if (s
->current_el
== 0 || s
->current_el
== 2) {
7598 case ARM_CPU_MODE_USR
:
7599 case ARM_CPU_MODE_FIQ
:
7600 case ARM_CPU_MODE_IRQ
:
7601 case ARM_CPU_MODE_SVC
:
7602 case ARM_CPU_MODE_ABT
:
7603 case ARM_CPU_MODE_UND
:
7604 case ARM_CPU_MODE_SYS
:
7606 case ARM_CPU_MODE_HYP
:
7607 if (s
->current_el
== 1 || !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
7611 case ARM_CPU_MODE_MON
:
7612 /* No need to check specifically for "are we non-secure" because
7613 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7614 * so if this isn't EL3 then we must be non-secure.
7616 if (s
->current_el
!= 3) {
7625 unallocated_encoding(s
);
7629 addr
= tcg_temp_new_i32();
7630 tmp
= tcg_const_i32(mode
);
7631 /* get_r13_banked() will raise an exception if called from System mode */
7632 gen_set_condexec(s
);
7633 gen_set_pc_im(s
, s
->pc_curr
);
7634 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7635 tcg_temp_free_i32(tmp
);
7652 tcg_gen_addi_i32(addr
, addr
, offset
);
7653 tmp
= load_reg(s
, 14);
7654 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
7655 tcg_temp_free_i32(tmp
);
7656 tmp
= load_cpu_field(spsr
);
7657 tcg_gen_addi_i32(addr
, addr
, 4);
7658 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
7659 tcg_temp_free_i32(tmp
);
7677 tcg_gen_addi_i32(addr
, addr
, offset
);
7678 tmp
= tcg_const_i32(mode
);
7679 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7680 tcg_temp_free_i32(tmp
);
7682 tcg_temp_free_i32(addr
);
7683 s
->base
.is_jmp
= DISAS_UPDATE
;
7686 /* Generate a label used for skipping this instruction */
7687 static void arm_gen_condlabel(DisasContext
*s
)
7690 s
->condlabel
= gen_new_label();
7695 /* Skip this instruction if the ARM condition is false */
7696 static void arm_skip_unless(DisasContext
*s
, uint32_t cond
)
7698 arm_gen_condlabel(s
);
7699 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
7702 static void disas_arm_insn(DisasContext
*s
, unsigned int insn
)
7704 unsigned int cond
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
7711 /* M variants do not implement ARM mode; this must raise the INVSTATE
7712 * UsageFault exception.
7714 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
7715 gen_exception_insn(s
, s
->pc_curr
, EXCP_INVSTATE
, syn_uncategorized(),
7716 default_exception_el(s
));
7721 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7722 * choose to UNDEF. In ARMv5 and above the space is used
7723 * for miscellaneous unconditional instructions.
7727 /* Unconditional instructions. */
7728 if (((insn
>> 25) & 7) == 1) {
7729 /* NEON Data processing. */
7730 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
7734 if (disas_neon_data_insn(s
, insn
)) {
7739 if ((insn
& 0x0f100000) == 0x04000000) {
7740 /* NEON load/store. */
7741 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
7745 if (disas_neon_ls_insn(s
, insn
)) {
7750 if ((insn
& 0x0f000e10) == 0x0e000a00) {
7752 if (disas_vfp_insn(s
, insn
)) {
7757 if (((insn
& 0x0f30f000) == 0x0510f000) ||
7758 ((insn
& 0x0f30f010) == 0x0710f000)) {
7759 if ((insn
& (1 << 22)) == 0) {
7761 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
7765 /* Otherwise PLD; v5TE+ */
7769 if (((insn
& 0x0f70f000) == 0x0450f000) ||
7770 ((insn
& 0x0f70f010) == 0x0650f000)) {
7772 return; /* PLI; V7 */
7774 if (((insn
& 0x0f700000) == 0x04100000) ||
7775 ((insn
& 0x0f700010) == 0x06100000)) {
7776 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
7779 return; /* v7MP: Unallocated memory hint: must NOP */
7782 if ((insn
& 0x0ffffdff) == 0x01010000) {
7785 if (((insn
>> 9) & 1) != !!(s
->be_data
== MO_BE
)) {
7786 gen_helper_setend(cpu_env
);
7787 s
->base
.is_jmp
= DISAS_UPDATE
;
7790 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
7791 switch ((insn
>> 4) & 0xf) {
7799 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
7802 /* We need to break the TB after this insn to execute
7803 * self-modifying code correctly and also to take
7804 * any pending interrupts immediately.
7806 gen_goto_tb(s
, 0, s
->base
.pc_next
);
7809 if ((insn
& 0xf) || !dc_isar_feature(aa32_sb
, s
)) {
7813 * TODO: There is no speculation barrier opcode
7814 * for TCG; MB and end the TB instead.
7816 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
7817 gen_goto_tb(s
, 0, s
->base
.pc_next
);
7822 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
7825 gen_srs(s
, (insn
& 0x1f), (insn
>> 23) & 3, insn
& (1 << 21));
7827 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
7833 rn
= (insn
>> 16) & 0xf;
7834 addr
= load_reg(s
, rn
);
7835 i
= (insn
>> 23) & 3;
7837 case 0: offset
= -4; break; /* DA */
7838 case 1: offset
= 0; break; /* IA */
7839 case 2: offset
= -8; break; /* DB */
7840 case 3: offset
= 4; break; /* IB */
7844 tcg_gen_addi_i32(addr
, addr
, offset
);
7845 /* Load PC into tmp and CPSR into tmp2. */
7846 tmp
= tcg_temp_new_i32();
7847 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
7848 tcg_gen_addi_i32(addr
, addr
, 4);
7849 tmp2
= tcg_temp_new_i32();
7850 gen_aa32_ld32u(s
, tmp2
, addr
, get_mem_index(s
));
7851 if (insn
& (1 << 21)) {
7852 /* Base writeback. */
7854 case 0: offset
= -8; break;
7855 case 1: offset
= 4; break;
7856 case 2: offset
= -4; break;
7857 case 3: offset
= 0; break;
7861 tcg_gen_addi_i32(addr
, addr
, offset
);
7862 store_reg(s
, rn
, addr
);
7864 tcg_temp_free_i32(addr
);
7866 gen_rfe(s
, tmp
, tmp2
);
7868 } else if ((insn
& 0x0e000000) == 0x0a000000) {
7869 /* branch link and change to thumb (blx <offset>) */
7872 tmp
= tcg_temp_new_i32();
7873 tcg_gen_movi_i32(tmp
, s
->base
.pc_next
);
7874 store_reg(s
, 14, tmp
);
7875 /* Sign-extend the 24-bit offset */
7876 offset
= (((int32_t)insn
) << 8) >> 8;
7878 /* offset * 4 + bit24 * 2 + (thumb bit) */
7879 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
7880 /* protected by ARCH(5); above, near the start of uncond block */
7883 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
7884 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
7885 /* iWMMXt register transfer. */
7886 if (extract32(s
->c15_cpar
, 1, 1)) {
7887 if (!disas_iwmmxt_insn(s
, insn
)) {
7892 } else if ((insn
& 0x0e000a00) == 0x0c000800
7893 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
7894 if (disas_neon_insn_3same_ext(s
, insn
)) {
7898 } else if ((insn
& 0x0f000a00) == 0x0e000800
7899 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
7900 if (disas_neon_insn_2reg_scalar_ext(s
, insn
)) {
7904 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
7905 /* Coprocessor double register transfer. */
7907 } else if ((insn
& 0x0f000010) == 0x0e000010) {
7908 /* Additional coprocessor register transfer. */
7909 } else if ((insn
& 0x0ff10020) == 0x01000000) {
7912 /* cps (privileged) */
7916 if (insn
& (1 << 19)) {
7917 if (insn
& (1 << 8))
7919 if (insn
& (1 << 7))
7921 if (insn
& (1 << 6))
7923 if (insn
& (1 << 18))
7926 if (insn
& (1 << 17)) {
7928 val
|= (insn
& 0x1f);
7931 gen_set_psr_im(s
, mask
, 0, val
);
7938 /* if not always execute, we generate a conditional jump to
7940 arm_skip_unless(s
, cond
);
7942 if ((insn
& 0x0f900000) == 0x03000000) {
7943 if ((insn
& (1 << 21)) == 0) {
7945 rd
= (insn
>> 12) & 0xf;
7946 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
7947 if ((insn
& (1 << 22)) == 0) {
7949 tmp
= tcg_temp_new_i32();
7950 tcg_gen_movi_i32(tmp
, val
);
7953 tmp
= load_reg(s
, rd
);
7954 tcg_gen_ext16u_i32(tmp
, tmp
);
7955 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
7957 store_reg(s
, rd
, tmp
);
7959 if (((insn
>> 12) & 0xf) != 0xf)
7961 if (((insn
>> 16) & 0xf) == 0) {
7962 gen_nop_hint(s
, insn
& 0xff);
7964 /* CPSR = immediate */
7966 shift
= ((insn
>> 8) & 0xf) * 2;
7967 val
= ror32(val
, shift
);
7968 i
= ((insn
& (1 << 22)) != 0);
7969 if (gen_set_psr_im(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
),
7975 } else if ((insn
& 0x0f900000) == 0x01000000
7976 && (insn
& 0x00000090) != 0x00000090) {
7977 /* miscellaneous instructions */
7978 op1
= (insn
>> 21) & 3;
7979 sh
= (insn
>> 4) & 0xf;
7982 case 0x0: /* MSR, MRS */
7983 if (insn
& (1 << 9)) {
7984 /* MSR (banked) and MRS (banked) */
7985 int sysm
= extract32(insn
, 16, 4) |
7986 (extract32(insn
, 8, 1) << 4);
7987 int r
= extract32(insn
, 22, 1);
7991 gen_msr_banked(s
, r
, sysm
, rm
);
7994 int rd
= extract32(insn
, 12, 4);
7996 gen_mrs_banked(s
, r
, sysm
, rd
);
8001 /* MSR, MRS (for PSRs) */
8004 tmp
= load_reg(s
, rm
);
8005 i
= ((op1
& 2) != 0);
8006 if (gen_set_psr(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
8010 rd
= (insn
>> 12) & 0xf;
8014 tmp
= load_cpu_field(spsr
);
8016 tmp
= tcg_temp_new_i32();
8017 gen_helper_cpsr_read(tmp
, cpu_env
);
8019 store_reg(s
, rd
, tmp
);
8024 /* branch/exchange thumb (bx). */
8026 tmp
= load_reg(s
, rm
);
8028 } else if (op1
== 3) {
8031 rd
= (insn
>> 12) & 0xf;
8032 tmp
= load_reg(s
, rm
);
8033 tcg_gen_clzi_i32(tmp
, tmp
, 32);
8034 store_reg(s
, rd
, tmp
);
8042 /* Trivial implementation equivalent to bx. */
8043 tmp
= load_reg(s
, rm
);
8054 /* branch link/exchange thumb (blx) */
8055 tmp
= load_reg(s
, rm
);
8056 tmp2
= tcg_temp_new_i32();
8057 tcg_gen_movi_i32(tmp2
, s
->base
.pc_next
);
8058 store_reg(s
, 14, tmp2
);
8064 uint32_t c
= extract32(insn
, 8, 4);
8066 /* Check this CPU supports ARMv8 CRC instructions.
8067 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8068 * Bits 8, 10 and 11 should be zero.
8070 if (!dc_isar_feature(aa32_crc32
, s
) || op1
== 0x3 || (c
& 0xd) != 0) {
8074 rn
= extract32(insn
, 16, 4);
8075 rd
= extract32(insn
, 12, 4);
8077 tmp
= load_reg(s
, rn
);
8078 tmp2
= load_reg(s
, rm
);
8080 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
8081 } else if (op1
== 1) {
8082 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
8084 tmp3
= tcg_const_i32(1 << op1
);
8086 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
8088 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
8090 tcg_temp_free_i32(tmp2
);
8091 tcg_temp_free_i32(tmp3
);
8092 store_reg(s
, rd
, tmp
);
8095 case 0x5: /* saturating add/subtract */
8097 rd
= (insn
>> 12) & 0xf;
8098 rn
= (insn
>> 16) & 0xf;
8099 tmp
= load_reg(s
, rm
);
8100 tmp2
= load_reg(s
, rn
);
8102 gen_helper_add_saturate(tmp2
, cpu_env
, tmp2
, tmp2
);
8104 gen_helper_sub_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8106 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8107 tcg_temp_free_i32(tmp2
);
8108 store_reg(s
, rd
, tmp
);
8110 case 0x6: /* ERET */
8114 if (!arm_dc_feature(s
, ARM_FEATURE_V7VE
)) {
8117 if ((insn
& 0x000fff0f) != 0x0000000e) {
8118 /* UNPREDICTABLE; we choose to UNDEF */
8122 if (s
->current_el
== 2) {
8123 tmp
= load_cpu_field(elr_el
[2]);
8125 tmp
= load_reg(s
, 14);
8127 gen_exception_return(s
, tmp
);
8131 int imm16
= extract32(insn
, 0, 4) | (extract32(insn
, 8, 12) << 4);
8140 gen_exception_bkpt_insn(s
, syn_aa32_bkpt(imm16
, false));
8143 /* Hypervisor call (v7) */
8151 /* Secure monitor call (v6+) */
8159 g_assert_not_reached();
8163 case 0x8: /* signed multiply */
8168 rs
= (insn
>> 8) & 0xf;
8169 rn
= (insn
>> 12) & 0xf;
8170 rd
= (insn
>> 16) & 0xf;
8172 /* (32 * 16) >> 16 */
8173 tmp
= load_reg(s
, rm
);
8174 tmp2
= load_reg(s
, rs
);
8176 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8179 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8180 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8181 tmp
= tcg_temp_new_i32();
8182 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
8183 tcg_temp_free_i64(tmp64
);
8184 if ((sh
& 2) == 0) {
8185 tmp2
= load_reg(s
, rn
);
8186 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8187 tcg_temp_free_i32(tmp2
);
8189 store_reg(s
, rd
, tmp
);
8192 tmp
= load_reg(s
, rm
);
8193 tmp2
= load_reg(s
, rs
);
8194 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
8195 tcg_temp_free_i32(tmp2
);
8197 tmp64
= tcg_temp_new_i64();
8198 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8199 tcg_temp_free_i32(tmp
);
8200 gen_addq(s
, tmp64
, rn
, rd
);
8201 gen_storeq_reg(s
, rn
, rd
, tmp64
);
8202 tcg_temp_free_i64(tmp64
);
8205 tmp2
= load_reg(s
, rn
);
8206 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8207 tcg_temp_free_i32(tmp2
);
8209 store_reg(s
, rd
, tmp
);
8216 } else if (((insn
& 0x0e000000) == 0 &&
8217 (insn
& 0x00000090) != 0x90) ||
8218 ((insn
& 0x0e000000) == (1 << 25))) {
8219 int set_cc
, logic_cc
, shiftop
;
8221 op1
= (insn
>> 21) & 0xf;
8222 set_cc
= (insn
>> 20) & 1;
8223 logic_cc
= table_logic_cc
[op1
] & set_cc
;
8225 /* data processing instruction */
8226 if (insn
& (1 << 25)) {
8227 /* immediate operand */
8229 shift
= ((insn
>> 8) & 0xf) * 2;
8230 val
= ror32(val
, shift
);
8231 tmp2
= tcg_temp_new_i32();
8232 tcg_gen_movi_i32(tmp2
, val
);
8233 if (logic_cc
&& shift
) {
8234 gen_set_CF_bit31(tmp2
);
8239 tmp2
= load_reg(s
, rm
);
8240 shiftop
= (insn
>> 5) & 3;
8241 if (!(insn
& (1 << 4))) {
8242 shift
= (insn
>> 7) & 0x1f;
8243 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8245 rs
= (insn
>> 8) & 0xf;
8246 tmp
= load_reg(s
, rs
);
8247 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
8250 if (op1
!= 0x0f && op1
!= 0x0d) {
8251 rn
= (insn
>> 16) & 0xf;
8252 tmp
= load_reg(s
, rn
);
8256 rd
= (insn
>> 12) & 0xf;
8259 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8263 store_reg_bx(s
, rd
, tmp
);
8266 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8270 store_reg_bx(s
, rd
, tmp
);
8273 if (set_cc
&& rd
== 15) {
8274 /* SUBS r15, ... is used for exception return. */
8278 gen_sub_CC(tmp
, tmp
, tmp2
);
8279 gen_exception_return(s
, tmp
);
8282 gen_sub_CC(tmp
, tmp
, tmp2
);
8284 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8286 store_reg_bx(s
, rd
, tmp
);
8291 gen_sub_CC(tmp
, tmp2
, tmp
);
8293 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8295 store_reg_bx(s
, rd
, tmp
);
8299 gen_add_CC(tmp
, tmp
, tmp2
);
8301 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8303 store_reg_bx(s
, rd
, tmp
);
8307 gen_adc_CC(tmp
, tmp
, tmp2
);
8309 gen_add_carry(tmp
, tmp
, tmp2
);
8311 store_reg_bx(s
, rd
, tmp
);
8315 gen_sbc_CC(tmp
, tmp
, tmp2
);
8317 gen_sub_carry(tmp
, tmp
, tmp2
);
8319 store_reg_bx(s
, rd
, tmp
);
8323 gen_sbc_CC(tmp
, tmp2
, tmp
);
8325 gen_sub_carry(tmp
, tmp2
, tmp
);
8327 store_reg_bx(s
, rd
, tmp
);
8331 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8334 tcg_temp_free_i32(tmp
);
8338 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8341 tcg_temp_free_i32(tmp
);
8345 gen_sub_CC(tmp
, tmp
, tmp2
);
8347 tcg_temp_free_i32(tmp
);
8351 gen_add_CC(tmp
, tmp
, tmp2
);
8353 tcg_temp_free_i32(tmp
);
8356 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8360 store_reg_bx(s
, rd
, tmp
);
8363 if (logic_cc
&& rd
== 15) {
8364 /* MOVS r15, ... is used for exception return. */
8368 gen_exception_return(s
, tmp2
);
8373 store_reg_bx(s
, rd
, tmp2
);
8377 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8381 store_reg_bx(s
, rd
, tmp
);
8385 tcg_gen_not_i32(tmp2
, tmp2
);
8389 store_reg_bx(s
, rd
, tmp2
);
8392 if (op1
!= 0x0f && op1
!= 0x0d) {
8393 tcg_temp_free_i32(tmp2
);
8396 /* other instructions */
8397 op1
= (insn
>> 24) & 0xf;
8401 /* multiplies, extra load/stores */
8402 sh
= (insn
>> 5) & 3;
8405 rd
= (insn
>> 16) & 0xf;
8406 rn
= (insn
>> 12) & 0xf;
8407 rs
= (insn
>> 8) & 0xf;
8409 op1
= (insn
>> 20) & 0xf;
8411 case 0: case 1: case 2: case 3: case 6:
8413 tmp
= load_reg(s
, rs
);
8414 tmp2
= load_reg(s
, rm
);
8415 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8416 tcg_temp_free_i32(tmp2
);
8417 if (insn
& (1 << 22)) {
8418 /* Subtract (mls) */
8420 tmp2
= load_reg(s
, rn
);
8421 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8422 tcg_temp_free_i32(tmp2
);
8423 } else if (insn
& (1 << 21)) {
8425 tmp2
= load_reg(s
, rn
);
8426 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8427 tcg_temp_free_i32(tmp2
);
8429 if (insn
& (1 << 20))
8431 store_reg(s
, rd
, tmp
);
8434 /* 64 bit mul double accumulate (UMAAL) */
8436 tmp
= load_reg(s
, rs
);
8437 tmp2
= load_reg(s
, rm
);
8438 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8439 gen_addq_lo(s
, tmp64
, rn
);
8440 gen_addq_lo(s
, tmp64
, rd
);
8441 gen_storeq_reg(s
, rn
, rd
, tmp64
);
8442 tcg_temp_free_i64(tmp64
);
8444 case 8: case 9: case 10: case 11:
8445 case 12: case 13: case 14: case 15:
8446 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8447 tmp
= load_reg(s
, rs
);
8448 tmp2
= load_reg(s
, rm
);
8449 if (insn
& (1 << 22)) {
8450 tcg_gen_muls2_i32(tmp
, tmp2
, tmp
, tmp2
);
8452 tcg_gen_mulu2_i32(tmp
, tmp2
, tmp
, tmp2
);
8454 if (insn
& (1 << 21)) { /* mult accumulate */
8455 TCGv_i32 al
= load_reg(s
, rn
);
8456 TCGv_i32 ah
= load_reg(s
, rd
);
8457 tcg_gen_add2_i32(tmp
, tmp2
, tmp
, tmp2
, al
, ah
);
8458 tcg_temp_free_i32(al
);
8459 tcg_temp_free_i32(ah
);
8461 if (insn
& (1 << 20)) {
8462 gen_logicq_cc(tmp
, tmp2
);
8464 store_reg(s
, rn
, tmp
);
8465 store_reg(s
, rd
, tmp2
);
8471 rn
= (insn
>> 16) & 0xf;
8472 rd
= (insn
>> 12) & 0xf;
8473 if (insn
& (1 << 23)) {
8474 /* load/store exclusive */
8475 bool is_ld
= extract32(insn
, 20, 1);
8476 bool is_lasr
= !extract32(insn
, 8, 1);
8477 int op2
= (insn
>> 8) & 3;
8478 op1
= (insn
>> 21) & 0x3;
8481 case 0: /* lda/stl */
8487 case 1: /* reserved */
8489 case 2: /* ldaex/stlex */
8492 case 3: /* ldrex/strex */
8501 addr
= tcg_temp_local_new_i32();
8502 load_reg_var(s
, addr
, rn
);
8504 if (is_lasr
&& !is_ld
) {
8505 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
8510 tmp
= tcg_temp_new_i32();
8513 gen_aa32_ld32u_iss(s
, tmp
, addr
,
8518 gen_aa32_ld8u_iss(s
, tmp
, addr
,
8523 gen_aa32_ld16u_iss(s
, tmp
, addr
,
8530 store_reg(s
, rd
, tmp
);
8533 tmp
= load_reg(s
, rm
);
8536 gen_aa32_st32_iss(s
, tmp
, addr
,
8541 gen_aa32_st8_iss(s
, tmp
, addr
,
8546 gen_aa32_st16_iss(s
, tmp
, addr
,
8553 tcg_temp_free_i32(tmp
);
8558 gen_load_exclusive(s
, rd
, 15, addr
, 2);
8560 case 1: /* ldrexd */
8561 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
8563 case 2: /* ldrexb */
8564 gen_load_exclusive(s
, rd
, 15, addr
, 0);
8566 case 3: /* ldrexh */
8567 gen_load_exclusive(s
, rd
, 15, addr
, 1);
8576 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
8578 case 1: /* strexd */
8579 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
8581 case 2: /* strexb */
8582 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
8584 case 3: /* strexh */
8585 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
8591 tcg_temp_free_i32(addr
);
8593 if (is_lasr
&& is_ld
) {
8594 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
8596 } else if ((insn
& 0x00300f00) == 0) {
8597 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
8602 TCGMemOp opc
= s
->be_data
;
8606 if (insn
& (1 << 22)) {
8609 opc
|= MO_UL
| MO_ALIGN
;
8612 addr
= load_reg(s
, rn
);
8613 taddr
= gen_aa32_addr(s
, addr
, opc
);
8614 tcg_temp_free_i32(addr
);
8616 tmp
= load_reg(s
, rm
);
8617 tcg_gen_atomic_xchg_i32(tmp
, taddr
, tmp
,
8618 get_mem_index(s
), opc
);
8619 tcg_temp_free(taddr
);
8620 store_reg(s
, rd
, tmp
);
8627 bool load
= insn
& (1 << 20);
8628 bool wbit
= insn
& (1 << 21);
8629 bool pbit
= insn
& (1 << 24);
8630 bool doubleword
= false;
8633 /* Misc load/store */
8634 rn
= (insn
>> 16) & 0xf;
8635 rd
= (insn
>> 12) & 0xf;
8637 /* ISS not valid if writeback */
8638 issinfo
= (pbit
& !wbit
) ? rd
: ISSInvalid
;
8640 if (!load
&& (sh
& 2)) {
8644 /* UNPREDICTABLE; we choose to UNDEF */
8647 load
= (sh
& 1) == 0;
8651 addr
= load_reg(s
, rn
);
8653 gen_add_datah_offset(s
, insn
, 0, addr
);
8660 tmp
= load_reg(s
, rd
);
8661 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
8662 tcg_temp_free_i32(tmp
);
8663 tcg_gen_addi_i32(addr
, addr
, 4);
8664 tmp
= load_reg(s
, rd
+ 1);
8665 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
8666 tcg_temp_free_i32(tmp
);
8669 tmp
= tcg_temp_new_i32();
8670 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
8671 store_reg(s
, rd
, tmp
);
8672 tcg_gen_addi_i32(addr
, addr
, 4);
8673 tmp
= tcg_temp_new_i32();
8674 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
8677 address_offset
= -4;
8680 tmp
= tcg_temp_new_i32();
8683 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
),
8687 gen_aa32_ld8s_iss(s
, tmp
, addr
, get_mem_index(s
),
8692 gen_aa32_ld16s_iss(s
, tmp
, addr
, get_mem_index(s
),
8698 tmp
= load_reg(s
, rd
);
8699 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
), issinfo
);
8700 tcg_temp_free_i32(tmp
);
8702 /* Perform base writeback before the loaded value to
8703 ensure correct behavior with overlapping index registers.
8704 ldrd with base writeback is undefined if the
8705 destination and index registers overlap. */
8707 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
8708 store_reg(s
, rn
, addr
);
8711 tcg_gen_addi_i32(addr
, addr
, address_offset
);
8712 store_reg(s
, rn
, addr
);
8714 tcg_temp_free_i32(addr
);
8717 /* Complete the load. */
8718 store_reg(s
, rd
, tmp
);
8727 if (insn
& (1 << 4)) {
8729 /* Armv6 Media instructions. */
8731 rn
= (insn
>> 16) & 0xf;
8732 rd
= (insn
>> 12) & 0xf;
8733 rs
= (insn
>> 8) & 0xf;
8734 switch ((insn
>> 23) & 3) {
8735 case 0: /* Parallel add/subtract. */
8736 op1
= (insn
>> 20) & 7;
8737 tmp
= load_reg(s
, rn
);
8738 tmp2
= load_reg(s
, rm
);
8739 sh
= (insn
>> 5) & 7;
8740 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
8742 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
8743 tcg_temp_free_i32(tmp2
);
8744 store_reg(s
, rd
, tmp
);
8747 if ((insn
& 0x00700020) == 0) {
8748 /* Halfword pack. */
8749 tmp
= load_reg(s
, rn
);
8750 tmp2
= load_reg(s
, rm
);
8751 shift
= (insn
>> 7) & 0x1f;
8752 if (insn
& (1 << 6)) {
8757 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8758 tcg_gen_deposit_i32(tmp
, tmp
, tmp2
, 0, 16);
8761 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8762 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, 0, 16);
8764 tcg_temp_free_i32(tmp2
);
8765 store_reg(s
, rd
, tmp
);
8766 } else if ((insn
& 0x00200020) == 0x00200000) {
8768 tmp
= load_reg(s
, rm
);
8769 shift
= (insn
>> 7) & 0x1f;
8770 if (insn
& (1 << 6)) {
8773 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8775 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8777 sh
= (insn
>> 16) & 0x1f;
8778 tmp2
= tcg_const_i32(sh
);
8779 if (insn
& (1 << 22))
8780 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
8782 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
8783 tcg_temp_free_i32(tmp2
);
8784 store_reg(s
, rd
, tmp
);
8785 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
8787 tmp
= load_reg(s
, rm
);
8788 sh
= (insn
>> 16) & 0x1f;
8789 tmp2
= tcg_const_i32(sh
);
8790 if (insn
& (1 << 22))
8791 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
8793 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
8794 tcg_temp_free_i32(tmp2
);
8795 store_reg(s
, rd
, tmp
);
8796 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
8798 tmp
= load_reg(s
, rn
);
8799 tmp2
= load_reg(s
, rm
);
8800 tmp3
= tcg_temp_new_i32();
8801 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
8802 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8803 tcg_temp_free_i32(tmp3
);
8804 tcg_temp_free_i32(tmp2
);
8805 store_reg(s
, rd
, tmp
);
8806 } else if ((insn
& 0x000003e0) == 0x00000060) {
8807 tmp
= load_reg(s
, rm
);
8808 shift
= (insn
>> 10) & 3;
8809 /* ??? In many cases it's not necessary to do a
8810 rotate, a shift is sufficient. */
8811 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8812 op1
= (insn
>> 20) & 7;
8814 case 0: gen_sxtb16(tmp
); break;
8815 case 2: gen_sxtb(tmp
); break;
8816 case 3: gen_sxth(tmp
); break;
8817 case 4: gen_uxtb16(tmp
); break;
8818 case 6: gen_uxtb(tmp
); break;
8819 case 7: gen_uxth(tmp
); break;
8820 default: goto illegal_op
;
8823 tmp2
= load_reg(s
, rn
);
8824 if ((op1
& 3) == 0) {
8825 gen_add16(tmp
, tmp2
);
8827 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8828 tcg_temp_free_i32(tmp2
);
8831 store_reg(s
, rd
, tmp
);
8832 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
8834 tmp
= load_reg(s
, rm
);
8835 if (insn
& (1 << 22)) {
8836 if (insn
& (1 << 7)) {
8840 gen_helper_rbit(tmp
, tmp
);
8843 if (insn
& (1 << 7))
8846 tcg_gen_bswap32_i32(tmp
, tmp
);
8848 store_reg(s
, rd
, tmp
);
8853 case 2: /* Multiplies (Type 3). */
8854 switch ((insn
>> 20) & 0x7) {
8856 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
8857 /* op2 not 00x or 11x : UNDEF */
8860 /* Signed multiply most significant [accumulate].
8861 (SMMUL, SMMLA, SMMLS) */
8862 tmp
= load_reg(s
, rm
);
8863 tmp2
= load_reg(s
, rs
);
8864 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8867 tmp
= load_reg(s
, rd
);
8868 if (insn
& (1 << 6)) {
8869 tmp64
= gen_subq_msw(tmp64
, tmp
);
8871 tmp64
= gen_addq_msw(tmp64
, tmp
);
8874 if (insn
& (1 << 5)) {
8875 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
8877 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8878 tmp
= tcg_temp_new_i32();
8879 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
8880 tcg_temp_free_i64(tmp64
);
8881 store_reg(s
, rn
, tmp
);
8885 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8886 if (insn
& (1 << 7)) {
8889 tmp
= load_reg(s
, rm
);
8890 tmp2
= load_reg(s
, rs
);
8891 if (insn
& (1 << 5))
8892 gen_swap_half(tmp2
);
8893 gen_smul_dual(tmp
, tmp2
);
8894 if (insn
& (1 << 22)) {
8895 /* smlald, smlsld */
8898 tmp64
= tcg_temp_new_i64();
8899 tmp64_2
= tcg_temp_new_i64();
8900 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8901 tcg_gen_ext_i32_i64(tmp64_2
, tmp2
);
8902 tcg_temp_free_i32(tmp
);
8903 tcg_temp_free_i32(tmp2
);
8904 if (insn
& (1 << 6)) {
8905 tcg_gen_sub_i64(tmp64
, tmp64
, tmp64_2
);
8907 tcg_gen_add_i64(tmp64
, tmp64
, tmp64_2
);
8909 tcg_temp_free_i64(tmp64_2
);
8910 gen_addq(s
, tmp64
, rd
, rn
);
8911 gen_storeq_reg(s
, rd
, rn
, tmp64
);
8912 tcg_temp_free_i64(tmp64
);
8914 /* smuad, smusd, smlad, smlsd */
8915 if (insn
& (1 << 6)) {
8916 /* This subtraction cannot overflow. */
8917 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8919 /* This addition cannot overflow 32 bits;
8920 * however it may overflow considered as a
8921 * signed operation, in which case we must set
8924 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8926 tcg_temp_free_i32(tmp2
);
8929 tmp2
= load_reg(s
, rd
);
8930 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8931 tcg_temp_free_i32(tmp2
);
8933 store_reg(s
, rn
, tmp
);
8939 if (!dc_isar_feature(arm_div
, s
)) {
8942 if (((insn
>> 5) & 7) || (rd
!= 15)) {
8945 tmp
= load_reg(s
, rm
);
8946 tmp2
= load_reg(s
, rs
);
8947 if (insn
& (1 << 21)) {
8948 gen_helper_udiv(tmp
, tmp
, tmp2
);
8950 gen_helper_sdiv(tmp
, tmp
, tmp2
);
8952 tcg_temp_free_i32(tmp2
);
8953 store_reg(s
, rn
, tmp
);
8960 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
8962 case 0: /* Unsigned sum of absolute differences. */
8964 tmp
= load_reg(s
, rm
);
8965 tmp2
= load_reg(s
, rs
);
8966 gen_helper_usad8(tmp
, tmp
, tmp2
);
8967 tcg_temp_free_i32(tmp2
);
8969 tmp2
= load_reg(s
, rd
);
8970 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8971 tcg_temp_free_i32(tmp2
);
8973 store_reg(s
, rn
, tmp
);
8975 case 0x20: case 0x24: case 0x28: case 0x2c:
8976 /* Bitfield insert/clear. */
8978 shift
= (insn
>> 7) & 0x1f;
8979 i
= (insn
>> 16) & 0x1f;
8981 /* UNPREDICTABLE; we choose to UNDEF */
8986 tmp
= tcg_temp_new_i32();
8987 tcg_gen_movi_i32(tmp
, 0);
8989 tmp
= load_reg(s
, rm
);
8992 tmp2
= load_reg(s
, rd
);
8993 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, i
);
8994 tcg_temp_free_i32(tmp2
);
8996 store_reg(s
, rd
, tmp
);
8998 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8999 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9001 tmp
= load_reg(s
, rm
);
9002 shift
= (insn
>> 7) & 0x1f;
9003 i
= ((insn
>> 16) & 0x1f) + 1;
9008 tcg_gen_extract_i32(tmp
, tmp
, shift
, i
);
9010 tcg_gen_sextract_i32(tmp
, tmp
, shift
, i
);
9013 store_reg(s
, rd
, tmp
);
9023 /* Check for undefined extension instructions
9024 * per the ARM Bible IE:
9025 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9027 sh
= (0xf << 20) | (0xf << 4);
9028 if (op1
== 0x7 && ((insn
& sh
) == sh
))
9032 /* load/store byte/word */
9033 rn
= (insn
>> 16) & 0xf;
9034 rd
= (insn
>> 12) & 0xf;
9035 tmp2
= load_reg(s
, rn
);
9036 if ((insn
& 0x01200000) == 0x00200000) {
9038 i
= get_a32_user_mem_index(s
);
9040 i
= get_mem_index(s
);
9042 if (insn
& (1 << 24))
9043 gen_add_data_offset(s
, insn
, tmp2
);
9044 if (insn
& (1 << 20)) {
9046 tmp
= tcg_temp_new_i32();
9047 if (insn
& (1 << 22)) {
9048 gen_aa32_ld8u_iss(s
, tmp
, tmp2
, i
, rd
);
9050 gen_aa32_ld32u_iss(s
, tmp
, tmp2
, i
, rd
);
9054 tmp
= load_reg(s
, rd
);
9055 if (insn
& (1 << 22)) {
9056 gen_aa32_st8_iss(s
, tmp
, tmp2
, i
, rd
);
9058 gen_aa32_st32_iss(s
, tmp
, tmp2
, i
, rd
);
9060 tcg_temp_free_i32(tmp
);
9062 if (!(insn
& (1 << 24))) {
9063 gen_add_data_offset(s
, insn
, tmp2
);
9064 store_reg(s
, rn
, tmp2
);
9065 } else if (insn
& (1 << 21)) {
9066 store_reg(s
, rn
, tmp2
);
9068 tcg_temp_free_i32(tmp2
);
9070 if (insn
& (1 << 20)) {
9071 /* Complete the load. */
9072 store_reg_from_load(s
, rd
, tmp
);
9078 int j
, n
, loaded_base
;
9079 bool exc_return
= false;
9080 bool is_load
= extract32(insn
, 20, 1);
9082 TCGv_i32 loaded_var
;
9083 /* load/store multiple words */
9084 /* XXX: store correct base if write back */
9085 if (insn
& (1 << 22)) {
9086 /* LDM (user), LDM (exception return) and STM (user) */
9088 goto illegal_op
; /* only usable in supervisor mode */
9090 if (is_load
&& extract32(insn
, 15, 1)) {
9096 rn
= (insn
>> 16) & 0xf;
9097 addr
= load_reg(s
, rn
);
9099 /* compute total size */
9103 for (i
= 0; i
< 16; i
++) {
9104 if (insn
& (1 << i
))
9107 /* XXX: test invalid n == 0 case ? */
9108 if (insn
& (1 << 23)) {
9109 if (insn
& (1 << 24)) {
9111 tcg_gen_addi_i32(addr
, addr
, 4);
9113 /* post increment */
9116 if (insn
& (1 << 24)) {
9118 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
9120 /* post decrement */
9122 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
9126 for (i
= 0; i
< 16; i
++) {
9127 if (insn
& (1 << i
)) {
9130 tmp
= tcg_temp_new_i32();
9131 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9133 tmp2
= tcg_const_i32(i
);
9134 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
9135 tcg_temp_free_i32(tmp2
);
9136 tcg_temp_free_i32(tmp
);
9137 } else if (i
== rn
) {
9140 } else if (i
== 15 && exc_return
) {
9141 store_pc_exc_ret(s
, tmp
);
9143 store_reg_from_load(s
, i
, tmp
);
9148 tmp
= tcg_temp_new_i32();
9149 tcg_gen_movi_i32(tmp
, read_pc(s
));
9151 tmp
= tcg_temp_new_i32();
9152 tmp2
= tcg_const_i32(i
);
9153 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
9154 tcg_temp_free_i32(tmp2
);
9156 tmp
= load_reg(s
, i
);
9158 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9159 tcg_temp_free_i32(tmp
);
9162 /* no need to add after the last transfer */
9164 tcg_gen_addi_i32(addr
, addr
, 4);
9167 if (insn
& (1 << 21)) {
9169 if (insn
& (1 << 23)) {
9170 if (insn
& (1 << 24)) {
9173 /* post increment */
9174 tcg_gen_addi_i32(addr
, addr
, 4);
9177 if (insn
& (1 << 24)) {
9180 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
9182 /* post decrement */
9183 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
9186 store_reg(s
, rn
, addr
);
9188 tcg_temp_free_i32(addr
);
9191 store_reg(s
, rn
, loaded_var
);
9194 /* Restore CPSR from SPSR. */
9195 tmp
= load_cpu_field(spsr
);
9196 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
9199 gen_helper_cpsr_write_eret(cpu_env
, tmp
);
9200 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
9203 tcg_temp_free_i32(tmp
);
9204 /* Must exit loop to check un-masked IRQs */
9205 s
->base
.is_jmp
= DISAS_EXIT
;
9214 /* branch (and link) */
9215 if (insn
& (1 << 24)) {
9216 tmp
= tcg_temp_new_i32();
9217 tcg_gen_movi_i32(tmp
, s
->base
.pc_next
);
9218 store_reg(s
, 14, tmp
);
9220 offset
= sextract32(insn
<< 2, 0, 26);
9221 gen_jmp(s
, read_pc(s
) + offset
);
9227 if (((insn
>> 8) & 0xe) == 10) {
9229 if (disas_vfp_insn(s
, insn
)) {
9232 } else if (disas_coproc_insn(s
, insn
)) {
9239 gen_set_pc_im(s
, s
->base
.pc_next
);
9240 s
->svc_imm
= extract32(insn
, 0, 24);
9241 s
->base
.is_jmp
= DISAS_SWI
;
9245 unallocated_encoding(s
);
9251 static bool thumb_insn_is_16bit(DisasContext
*s
, uint32_t pc
, uint32_t insn
)
9254 * Return true if this is a 16 bit instruction. We must be precise
9255 * about this (matching the decode).
9257 if ((insn
>> 11) < 0x1d) {
9258 /* Definitely a 16-bit instruction */
9262 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9263 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9264 * end up actually treating this as two 16-bit insns, though,
9265 * if it's half of a bl/blx pair that might span a page boundary.
9267 if (arm_dc_feature(s
, ARM_FEATURE_THUMB2
) ||
9268 arm_dc_feature(s
, ARM_FEATURE_M
)) {
9269 /* Thumb2 cores (including all M profile ones) always treat
9270 * 32-bit insns as 32-bit.
9275 if ((insn
>> 11) == 0x1e && pc
- s
->page_start
< TARGET_PAGE_SIZE
- 3) {
9276 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9277 * is not on the next page; we merge this into a 32-bit
9282 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9283 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9284 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9285 * -- handle as single 16 bit insn
9290 /* Return true if this is a Thumb-2 logical op. */
9292 thumb2_logic_op(int op
)
9297 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9298 then set condition code flags based on the result of the operation.
9299 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9300 to the high bit of T1.
9301 Returns zero if the opcode is valid. */
9304 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
,
9305 TCGv_i32 t0
, TCGv_i32 t1
)
9312 tcg_gen_and_i32(t0
, t0
, t1
);
9316 tcg_gen_andc_i32(t0
, t0
, t1
);
9320 tcg_gen_or_i32(t0
, t0
, t1
);
9324 tcg_gen_orc_i32(t0
, t0
, t1
);
9328 tcg_gen_xor_i32(t0
, t0
, t1
);
9333 gen_add_CC(t0
, t0
, t1
);
9335 tcg_gen_add_i32(t0
, t0
, t1
);
9339 gen_adc_CC(t0
, t0
, t1
);
9345 gen_sbc_CC(t0
, t0
, t1
);
9347 gen_sub_carry(t0
, t0
, t1
);
9352 gen_sub_CC(t0
, t0
, t1
);
9354 tcg_gen_sub_i32(t0
, t0
, t1
);
9358 gen_sub_CC(t0
, t1
, t0
);
9360 tcg_gen_sub_i32(t0
, t1
, t0
);
9362 default: /* 5, 6, 7, 9, 12, 15. */
9368 gen_set_CF_bit31(t1
);
9373 /* Translate a 32-bit thumb instruction. */
9374 static void disas_thumb2_insn(DisasContext
*s
, uint32_t insn
)
9376 uint32_t imm
, shift
, offset
;
9377 uint32_t rd
, rn
, rm
, rs
;
9389 * ARMv6-M supports a limited subset of Thumb2 instructions.
9390 * Other Thumb1 architectures allow only 32-bit
9391 * combined BL/BLX prefix and suffix.
9393 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
9394 !arm_dc_feature(s
, ARM_FEATURE_V7
)) {
9397 static const uint32_t armv6m_insn
[] = {0xf3808000 /* msr */,
9398 0xf3b08040 /* dsb */,
9399 0xf3b08050 /* dmb */,
9400 0xf3b08060 /* isb */,
9401 0xf3e08000 /* mrs */,
9402 0xf000d000 /* bl */};
9403 static const uint32_t armv6m_mask
[] = {0xffe0d000,
9410 for (i
= 0; i
< ARRAY_SIZE(armv6m_insn
); i
++) {
9411 if ((insn
& armv6m_mask
[i
]) == armv6m_insn
[i
]) {
9419 } else if ((insn
& 0xf800e800) != 0xf000e800) {
9423 rn
= (insn
>> 16) & 0xf;
9424 rs
= (insn
>> 12) & 0xf;
9425 rd
= (insn
>> 8) & 0xf;
9427 switch ((insn
>> 25) & 0xf) {
9428 case 0: case 1: case 2: case 3:
9429 /* 16-bit instructions. Should never happen. */
9432 if (insn
& (1 << 22)) {
9433 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9434 * - load/store doubleword, load/store exclusive, ldacq/strel,
9437 if (insn
== 0xe97fe97f && arm_dc_feature(s
, ARM_FEATURE_M
) &&
9438 arm_dc_feature(s
, ARM_FEATURE_V8
)) {
9439 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9441 * The bulk of the behaviour for this instruction is implemented
9442 * in v7m_handle_execute_nsc(), which deals with the insn when
9443 * it is executed by a CPU in non-secure state from memory
9444 * which is Secure & NonSecure-Callable.
9445 * Here we only need to handle the remaining cases:
9446 * * in NS memory (including the "security extension not
9447 * implemented" case) : NOP
9448 * * in S memory but CPU already secure (clear IT bits)
9449 * We know that the attribute for the memory this insn is
9450 * in must match the current CPU state, because otherwise
9451 * get_phys_addr_pmsav8 would have generated an exception.
9453 if (s
->v8m_secure
) {
9454 /* Like the IT insn, we don't need to generate any code */
9455 s
->condexec_cond
= 0;
9456 s
->condexec_mask
= 0;
9458 } else if (insn
& 0x01200000) {
9459 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9460 * - load/store dual (post-indexed)
9461 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9462 * - load/store dual (literal and immediate)
9463 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9464 * - load/store dual (pre-indexed)
9466 bool wback
= extract32(insn
, 21, 1);
9468 if (rn
== 15 && (insn
& (1 << 21))) {
9473 addr
= add_reg_for_lit(s
, rn
, 0);
9474 offset
= (insn
& 0xff) * 4;
9475 if ((insn
& (1 << 23)) == 0) {
9479 if (s
->v8m_stackcheck
&& rn
== 13 && wback
) {
9481 * Here 'addr' is the current SP; if offset is +ve we're
9482 * moving SP up, else down. It is UNKNOWN whether the limit
9483 * check triggers when SP starts below the limit and ends
9484 * up above it; check whichever of the current and final
9485 * SP is lower, so QEMU will trigger in that situation.
9487 if ((int32_t)offset
< 0) {
9488 TCGv_i32 newsp
= tcg_temp_new_i32();
9490 tcg_gen_addi_i32(newsp
, addr
, offset
);
9491 gen_helper_v8m_stackcheck(cpu_env
, newsp
);
9492 tcg_temp_free_i32(newsp
);
9494 gen_helper_v8m_stackcheck(cpu_env
, addr
);
9498 if (insn
& (1 << 24)) {
9499 tcg_gen_addi_i32(addr
, addr
, offset
);
9502 if (insn
& (1 << 20)) {
9504 tmp
= tcg_temp_new_i32();
9505 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9506 store_reg(s
, rs
, tmp
);
9507 tcg_gen_addi_i32(addr
, addr
, 4);
9508 tmp
= tcg_temp_new_i32();
9509 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9510 store_reg(s
, rd
, tmp
);
9513 tmp
= load_reg(s
, rs
);
9514 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9515 tcg_temp_free_i32(tmp
);
9516 tcg_gen_addi_i32(addr
, addr
, 4);
9517 tmp
= load_reg(s
, rd
);
9518 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9519 tcg_temp_free_i32(tmp
);
9522 /* Base writeback. */
9523 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
9524 store_reg(s
, rn
, addr
);
9526 tcg_temp_free_i32(addr
);
9528 } else if ((insn
& (1 << 23)) == 0) {
9529 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9530 * - load/store exclusive word
9534 if (!(insn
& (1 << 20)) &&
9535 arm_dc_feature(s
, ARM_FEATURE_M
) &&
9536 arm_dc_feature(s
, ARM_FEATURE_V8
)) {
9537 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
9540 bool alt
= insn
& (1 << 7);
9541 TCGv_i32 addr
, op
, ttresp
;
9543 if ((insn
& 0x3f) || rd
== 13 || rd
== 15 || rn
== 15) {
9544 /* we UNDEF for these UNPREDICTABLE cases */
9548 if (alt
&& !s
->v8m_secure
) {
9552 addr
= load_reg(s
, rn
);
9553 op
= tcg_const_i32(extract32(insn
, 6, 2));
9554 ttresp
= tcg_temp_new_i32();
9555 gen_helper_v7m_tt(ttresp
, cpu_env
, addr
, op
);
9556 tcg_temp_free_i32(addr
);
9557 tcg_temp_free_i32(op
);
9558 store_reg(s
, rd
, ttresp
);
9563 addr
= tcg_temp_local_new_i32();
9564 load_reg_var(s
, addr
, rn
);
9565 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
9566 if (insn
& (1 << 20)) {
9567 gen_load_exclusive(s
, rs
, 15, addr
, 2);
9569 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
9571 tcg_temp_free_i32(addr
);
9572 } else if ((insn
& (7 << 5)) == 0) {
9574 addr
= load_reg(s
, rn
);
9575 tmp
= load_reg(s
, rm
);
9576 tcg_gen_add_i32(addr
, addr
, tmp
);
9577 if (insn
& (1 << 4)) {
9579 tcg_gen_add_i32(addr
, addr
, tmp
);
9580 tcg_temp_free_i32(tmp
);
9581 tmp
= tcg_temp_new_i32();
9582 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
9584 tcg_temp_free_i32(tmp
);
9585 tmp
= tcg_temp_new_i32();
9586 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
9588 tcg_temp_free_i32(addr
);
9589 tcg_gen_shli_i32(tmp
, tmp
, 1);
9590 tcg_gen_addi_i32(tmp
, tmp
, read_pc(s
));
9591 store_reg(s
, 15, tmp
);
9593 bool is_lasr
= false;
9594 bool is_ld
= extract32(insn
, 20, 1);
9595 int op2
= (insn
>> 6) & 0x3;
9596 op
= (insn
>> 4) & 0x3;
9601 /* Load/store exclusive byte/halfword/doubleword */
9608 /* Load-acquire/store-release */
9614 /* Load-acquire/store-release exclusive */
9620 if (is_lasr
&& !is_ld
) {
9621 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
9624 addr
= tcg_temp_local_new_i32();
9625 load_reg_var(s
, addr
, rn
);
9628 tmp
= tcg_temp_new_i32();
9631 gen_aa32_ld8u_iss(s
, tmp
, addr
, get_mem_index(s
),
9635 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
),
9639 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
),
9645 store_reg(s
, rs
, tmp
);
9647 tmp
= load_reg(s
, rs
);
9650 gen_aa32_st8_iss(s
, tmp
, addr
, get_mem_index(s
),
9654 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
),
9658 gen_aa32_st32_iss(s
, tmp
, addr
, get_mem_index(s
),
9664 tcg_temp_free_i32(tmp
);
9667 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
9669 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
9671 tcg_temp_free_i32(addr
);
9673 if (is_lasr
&& is_ld
) {
9674 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
9678 /* Load/store multiple, RFE, SRS. */
9679 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
9680 /* RFE, SRS: not available in user mode or on M profile */
9681 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
9684 if (insn
& (1 << 20)) {
9686 addr
= load_reg(s
, rn
);
9687 if ((insn
& (1 << 24)) == 0)
9688 tcg_gen_addi_i32(addr
, addr
, -8);
9689 /* Load PC into tmp and CPSR into tmp2. */
9690 tmp
= tcg_temp_new_i32();
9691 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9692 tcg_gen_addi_i32(addr
, addr
, 4);
9693 tmp2
= tcg_temp_new_i32();
9694 gen_aa32_ld32u(s
, tmp2
, addr
, get_mem_index(s
));
9695 if (insn
& (1 << 21)) {
9696 /* Base writeback. */
9697 if (insn
& (1 << 24)) {
9698 tcg_gen_addi_i32(addr
, addr
, 4);
9700 tcg_gen_addi_i32(addr
, addr
, -4);
9702 store_reg(s
, rn
, addr
);
9704 tcg_temp_free_i32(addr
);
9706 gen_rfe(s
, tmp
, tmp2
);
9709 gen_srs(s
, (insn
& 0x1f), (insn
& (1 << 24)) ? 1 : 2,
9713 int i
, loaded_base
= 0;
9714 TCGv_i32 loaded_var
;
9715 bool wback
= extract32(insn
, 21, 1);
9716 /* Load/store multiple. */
9717 addr
= load_reg(s
, rn
);
9719 for (i
= 0; i
< 16; i
++) {
9720 if (insn
& (1 << i
))
9724 if (insn
& (1 << 24)) {
9725 tcg_gen_addi_i32(addr
, addr
, -offset
);
9728 if (s
->v8m_stackcheck
&& rn
== 13 && wback
) {
9730 * If the writeback is incrementing SP rather than
9731 * decrementing it, and the initial SP is below the
9732 * stack limit but the final written-back SP would
9733 * be above, then then we must not perform any memory
9734 * accesses, but it is IMPDEF whether we generate
9735 * an exception. We choose to do so in this case.
9736 * At this point 'addr' is the lowest address, so
9737 * either the original SP (if incrementing) or our
9738 * final SP (if decrementing), so that's what we check.
9740 gen_helper_v8m_stackcheck(cpu_env
, addr
);
9744 for (i
= 0; i
< 16; i
++) {
9745 if ((insn
& (1 << i
)) == 0)
9747 if (insn
& (1 << 20)) {
9749 tmp
= tcg_temp_new_i32();
9750 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9752 gen_bx_excret(s
, tmp
);
9753 } else if (i
== rn
) {
9757 store_reg(s
, i
, tmp
);
9761 tmp
= load_reg(s
, i
);
9762 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9763 tcg_temp_free_i32(tmp
);
9765 tcg_gen_addi_i32(addr
, addr
, 4);
9768 store_reg(s
, rn
, loaded_var
);
9771 /* Base register writeback. */
9772 if (insn
& (1 << 24)) {
9773 tcg_gen_addi_i32(addr
, addr
, -offset
);
9775 /* Fault if writeback register is in register list. */
9776 if (insn
& (1 << rn
))
9778 store_reg(s
, rn
, addr
);
9780 tcg_temp_free_i32(addr
);
9787 op
= (insn
>> 21) & 0xf;
9789 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9792 /* Halfword pack. */
9793 tmp
= load_reg(s
, rn
);
9794 tmp2
= load_reg(s
, rm
);
9795 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
9796 if (insn
& (1 << 5)) {
9801 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
9802 tcg_gen_deposit_i32(tmp
, tmp
, tmp2
, 0, 16);
9805 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
9806 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, 0, 16);
9808 tcg_temp_free_i32(tmp2
);
9809 store_reg(s
, rd
, tmp
);
9811 /* Data processing register constant shift. */
9813 tmp
= tcg_temp_new_i32();
9814 tcg_gen_movi_i32(tmp
, 0);
9816 tmp
= load_reg(s
, rn
);
9818 tmp2
= load_reg(s
, rm
);
9820 shiftop
= (insn
>> 4) & 3;
9821 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
9822 conds
= (insn
& (1 << 20)) != 0;
9823 logic_cc
= (conds
&& thumb2_logic_op(op
));
9824 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
9825 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
9827 tcg_temp_free_i32(tmp2
);
9829 ((op
== 2 && rn
== 15) ||
9830 (op
== 8 && rn
== 13) ||
9831 (op
== 13 && rn
== 13))) {
9832 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
9833 store_sp_checked(s
, tmp
);
9834 } else if (rd
!= 15) {
9835 store_reg(s
, rd
, tmp
);
9837 tcg_temp_free_i32(tmp
);
9841 case 13: /* Misc data processing. */
9842 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
9843 if (op
< 4 && (insn
& 0xf000) != 0xf000)
9846 case 0: /* Register controlled shift. */
9847 tmp
= load_reg(s
, rn
);
9848 tmp2
= load_reg(s
, rm
);
9849 if ((insn
& 0x70) != 0)
9852 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
9853 * - MOV, MOVS (register-shifted register), flagsetting
9855 op
= (insn
>> 21) & 3;
9856 logic_cc
= (insn
& (1 << 20)) != 0;
9857 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
9860 store_reg(s
, rd
, tmp
);
9862 case 1: /* Sign/zero extend. */
9863 op
= (insn
>> 20) & 7;
9865 case 0: /* SXTAH, SXTH */
9866 case 1: /* UXTAH, UXTH */
9867 case 4: /* SXTAB, SXTB */
9868 case 5: /* UXTAB, UXTB */
9870 case 2: /* SXTAB16, SXTB16 */
9871 case 3: /* UXTAB16, UXTB16 */
9872 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9880 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9884 tmp
= load_reg(s
, rm
);
9885 shift
= (insn
>> 4) & 3;
9886 /* ??? In many cases it's not necessary to do a
9887 rotate, a shift is sufficient. */
9888 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
9889 op
= (insn
>> 20) & 7;
9891 case 0: gen_sxth(tmp
); break;
9892 case 1: gen_uxth(tmp
); break;
9893 case 2: gen_sxtb16(tmp
); break;
9894 case 3: gen_uxtb16(tmp
); break;
9895 case 4: gen_sxtb(tmp
); break;
9896 case 5: gen_uxtb(tmp
); break;
9898 g_assert_not_reached();
9901 tmp2
= load_reg(s
, rn
);
9902 if ((op
>> 1) == 1) {
9903 gen_add16(tmp
, tmp2
);
9905 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9906 tcg_temp_free_i32(tmp2
);
9909 store_reg(s
, rd
, tmp
);
9911 case 2: /* SIMD add/subtract. */
9912 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9915 op
= (insn
>> 20) & 7;
9916 shift
= (insn
>> 4) & 7;
9917 if ((op
& 3) == 3 || (shift
& 3) == 3)
9919 tmp
= load_reg(s
, rn
);
9920 tmp2
= load_reg(s
, rm
);
9921 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
9922 tcg_temp_free_i32(tmp2
);
9923 store_reg(s
, rd
, tmp
);
9925 case 3: /* Other data processing. */
9926 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
9928 /* Saturating add/subtract. */
9929 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9932 tmp
= load_reg(s
, rn
);
9933 tmp2
= load_reg(s
, rm
);
9935 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp
);
9937 gen_helper_sub_saturate(tmp
, cpu_env
, tmp2
, tmp
);
9939 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
9940 tcg_temp_free_i32(tmp2
);
9943 case 0x0a: /* rbit */
9944 case 0x08: /* rev */
9945 case 0x09: /* rev16 */
9946 case 0x0b: /* revsh */
9947 case 0x18: /* clz */
9949 case 0x10: /* sel */
9950 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9954 case 0x20: /* crc32/crc32c */
9960 if (!dc_isar_feature(aa32_crc32
, s
)) {
9967 tmp
= load_reg(s
, rn
);
9969 case 0x0a: /* rbit */
9970 gen_helper_rbit(tmp
, tmp
);
9972 case 0x08: /* rev */
9973 tcg_gen_bswap32_i32(tmp
, tmp
);
9975 case 0x09: /* rev16 */
9978 case 0x0b: /* revsh */
9981 case 0x10: /* sel */
9982 tmp2
= load_reg(s
, rm
);
9983 tmp3
= tcg_temp_new_i32();
9984 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
9985 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
9986 tcg_temp_free_i32(tmp3
);
9987 tcg_temp_free_i32(tmp2
);
9989 case 0x18: /* clz */
9990 tcg_gen_clzi_i32(tmp
, tmp
, 32);
10000 uint32_t sz
= op
& 0x3;
10001 uint32_t c
= op
& 0x8;
10003 tmp2
= load_reg(s
, rm
);
10005 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
10006 } else if (sz
== 1) {
10007 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
10009 tmp3
= tcg_const_i32(1 << sz
);
10011 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
10013 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
10015 tcg_temp_free_i32(tmp2
);
10016 tcg_temp_free_i32(tmp3
);
10020 g_assert_not_reached();
10023 store_reg(s
, rd
, tmp
);
10025 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10026 switch ((insn
>> 20) & 7) {
10027 case 0: /* 32 x 32 -> 32 */
10028 case 7: /* Unsigned sum of absolute differences. */
10030 case 1: /* 16 x 16 -> 32 */
10031 case 2: /* Dual multiply add. */
10032 case 3: /* 32 * 16 -> 32msb */
10033 case 4: /* Dual multiply subtract. */
10034 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10035 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10040 op
= (insn
>> 4) & 0xf;
10041 tmp
= load_reg(s
, rn
);
10042 tmp2
= load_reg(s
, rm
);
10043 switch ((insn
>> 20) & 7) {
10044 case 0: /* 32 x 32 -> 32 */
10045 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
10046 tcg_temp_free_i32(tmp2
);
10048 tmp2
= load_reg(s
, rs
);
10050 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
10052 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10053 tcg_temp_free_i32(tmp2
);
10056 case 1: /* 16 x 16 -> 32 */
10057 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
10058 tcg_temp_free_i32(tmp2
);
10060 tmp2
= load_reg(s
, rs
);
10061 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10062 tcg_temp_free_i32(tmp2
);
10065 case 2: /* Dual multiply add. */
10066 case 4: /* Dual multiply subtract. */
10068 gen_swap_half(tmp2
);
10069 gen_smul_dual(tmp
, tmp2
);
10070 if (insn
& (1 << 22)) {
10071 /* This subtraction cannot overflow. */
10072 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10074 /* This addition cannot overflow 32 bits;
10075 * however it may overflow considered as a signed
10076 * operation, in which case we must set the Q flag.
10078 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10080 tcg_temp_free_i32(tmp2
);
10083 tmp2
= load_reg(s
, rs
);
10084 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10085 tcg_temp_free_i32(tmp2
);
10088 case 3: /* 32 * 16 -> 32msb */
10090 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
10093 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
10094 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
10095 tmp
= tcg_temp_new_i32();
10096 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
10097 tcg_temp_free_i64(tmp64
);
10100 tmp2
= load_reg(s
, rs
);
10101 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10102 tcg_temp_free_i32(tmp2
);
10105 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10106 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
10108 tmp
= load_reg(s
, rs
);
10109 if (insn
& (1 << 20)) {
10110 tmp64
= gen_addq_msw(tmp64
, tmp
);
10112 tmp64
= gen_subq_msw(tmp64
, tmp
);
10115 if (insn
& (1 << 4)) {
10116 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
10118 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
10119 tmp
= tcg_temp_new_i32();
10120 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
10121 tcg_temp_free_i64(tmp64
);
10123 case 7: /* Unsigned sum of absolute differences. */
10124 gen_helper_usad8(tmp
, tmp
, tmp2
);
10125 tcg_temp_free_i32(tmp2
);
10127 tmp2
= load_reg(s
, rs
);
10128 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10129 tcg_temp_free_i32(tmp2
);
10133 store_reg(s
, rd
, tmp
);
10135 case 6: case 7: /* 64-bit multiply, Divide. */
10136 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
10137 tmp
= load_reg(s
, rn
);
10138 tmp2
= load_reg(s
, rm
);
10139 if ((op
& 0x50) == 0x10) {
10141 if (!dc_isar_feature(thumb_div
, s
)) {
10145 gen_helper_udiv(tmp
, tmp
, tmp2
);
10147 gen_helper_sdiv(tmp
, tmp
, tmp2
);
10148 tcg_temp_free_i32(tmp2
);
10149 store_reg(s
, rd
, tmp
);
10150 } else if ((op
& 0xe) == 0xc) {
10151 /* Dual multiply accumulate long. */
10152 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10153 tcg_temp_free_i32(tmp
);
10154 tcg_temp_free_i32(tmp2
);
10158 gen_swap_half(tmp2
);
10159 gen_smul_dual(tmp
, tmp2
);
10161 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10163 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10165 tcg_temp_free_i32(tmp2
);
10167 tmp64
= tcg_temp_new_i64();
10168 tcg_gen_ext_i32_i64(tmp64
, tmp
);
10169 tcg_temp_free_i32(tmp
);
10170 gen_addq(s
, tmp64
, rs
, rd
);
10171 gen_storeq_reg(s
, rs
, rd
, tmp64
);
10172 tcg_temp_free_i64(tmp64
);
10175 /* Unsigned 64-bit multiply */
10176 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
10180 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10181 tcg_temp_free_i32(tmp2
);
10182 tcg_temp_free_i32(tmp
);
10185 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
10186 tcg_temp_free_i32(tmp2
);
10187 tmp64
= tcg_temp_new_i64();
10188 tcg_gen_ext_i32_i64(tmp64
, tmp
);
10189 tcg_temp_free_i32(tmp
);
10191 /* Signed 64-bit multiply */
10192 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
10197 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10198 tcg_temp_free_i64(tmp64
);
10201 gen_addq_lo(s
, tmp64
, rs
);
10202 gen_addq_lo(s
, tmp64
, rd
);
10203 } else if (op
& 0x40) {
10204 /* 64-bit accumulate. */
10205 gen_addq(s
, tmp64
, rs
, rd
);
10207 gen_storeq_reg(s
, rs
, rd
, tmp64
);
10208 tcg_temp_free_i64(tmp64
);
10213 case 6: case 7: case 14: case 15:
10215 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10216 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10217 if (extract32(insn
, 24, 2) == 3) {
10218 goto illegal_op
; /* op0 = 0b11 : unallocated */
10222 * Decode VLLDM and VLSTM first: these are nonstandard because:
10223 * * if there is no FPU then these insns must NOP in
10224 * Secure state and UNDEF in Nonsecure state
10225 * * if there is an FPU then these insns do not have
10226 * the usual behaviour that disas_vfp_insn() provides of
10227 * being controlled by CPACR/NSACR enable bits or the
10228 * lazy-stacking logic.
10230 if (arm_dc_feature(s
, ARM_FEATURE_V8
) &&
10231 (insn
& 0xffa00f00) == 0xec200a00) {
10232 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10234 * We choose to UNDEF if the RAZ bits are non-zero.
10236 if (!s
->v8m_secure
|| (insn
& 0x0040f0ff)) {
10240 if (arm_dc_feature(s
, ARM_FEATURE_VFP
)) {
10241 TCGv_i32 fptr
= load_reg(s
, rn
);
10243 if (extract32(insn
, 20, 1)) {
10244 gen_helper_v7m_vlldm(cpu_env
, fptr
);
10246 gen_helper_v7m_vlstm(cpu_env
, fptr
);
10248 tcg_temp_free_i32(fptr
);
10250 /* End the TB, because we have updated FP control bits */
10251 s
->base
.is_jmp
= DISAS_UPDATE
;
10255 if (arm_dc_feature(s
, ARM_FEATURE_VFP
) &&
10256 ((insn
>> 8) & 0xe) == 10) {
10257 /* FP, and the CPU supports it */
10258 if (disas_vfp_insn(s
, insn
)) {
10264 /* All other insns: NOCP */
10265 gen_exception_insn(s
, s
->pc_curr
, EXCP_NOCP
, syn_uncategorized(),
10266 default_exception_el(s
));
10269 if ((insn
& 0xfe000a00) == 0xfc000800
10270 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
10271 /* The Thumb2 and ARM encodings are identical. */
10272 if (disas_neon_insn_3same_ext(s
, insn
)) {
10275 } else if ((insn
& 0xff000a00) == 0xfe000800
10276 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
10277 /* The Thumb2 and ARM encodings are identical. */
10278 if (disas_neon_insn_2reg_scalar_ext(s
, insn
)) {
10281 } else if (((insn
>> 24) & 3) == 3) {
10282 /* Translate into the equivalent ARM encoding. */
10283 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
10284 if (disas_neon_data_insn(s
, insn
)) {
10287 } else if (((insn
>> 8) & 0xe) == 10) {
10288 if (disas_vfp_insn(s
, insn
)) {
10292 if (insn
& (1 << 28))
10294 if (disas_coproc_insn(s
, insn
)) {
10299 case 8: case 9: case 10: case 11:
10300 if (insn
& (1 << 15)) {
10301 /* Branches, misc control. */
10302 if (insn
& 0x5000) {
10303 /* Unconditional branch. */
10304 /* signextend(hw1[10:0]) -> offset[:12]. */
10305 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
10306 /* hw1[10:0] -> offset[11:1]. */
10307 offset
|= (insn
& 0x7ff) << 1;
10308 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10309 offset[24:22] already have the same value because of the
10310 sign extension above. */
10311 offset
^= ((~insn
) & (1 << 13)) << 10;
10312 offset
^= ((~insn
) & (1 << 11)) << 11;
10314 if (insn
& (1 << 14)) {
10315 /* Branch and link. */
10316 tcg_gen_movi_i32(cpu_R
[14], s
->base
.pc_next
| 1);
10319 offset
+= read_pc(s
);
10320 if (insn
& (1 << 12)) {
10322 gen_jmp(s
, offset
);
10325 offset
&= ~(uint32_t)2;
10326 /* thumb2 bx, no need to check */
10327 gen_bx_im(s
, offset
);
10329 } else if (((insn
>> 23) & 7) == 7) {
10331 if (insn
& (1 << 13))
10334 if (insn
& (1 << 26)) {
10335 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10338 if (!(insn
& (1 << 20))) {
10339 /* Hypervisor call (v7) */
10340 int imm16
= extract32(insn
, 16, 4) << 12
10341 | extract32(insn
, 0, 12);
10348 /* Secure monitor call (v6+) */
10356 op
= (insn
>> 20) & 7;
10358 case 0: /* msr cpsr. */
10359 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10360 tmp
= load_reg(s
, rn
);
10361 /* the constant is the mask and SYSm fields */
10362 addr
= tcg_const_i32(insn
& 0xfff);
10363 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
10364 tcg_temp_free_i32(addr
);
10365 tcg_temp_free_i32(tmp
);
10370 case 1: /* msr spsr. */
10371 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10375 if (extract32(insn
, 5, 1)) {
10377 int sysm
= extract32(insn
, 8, 4) |
10378 (extract32(insn
, 4, 1) << 4);
10381 gen_msr_banked(s
, r
, sysm
, rm
);
10385 /* MSR (for PSRs) */
10386 tmp
= load_reg(s
, rn
);
10388 msr_mask(s
, (insn
>> 8) & 0xf, op
== 1),
10392 case 2: /* cps, nop-hint. */
10393 if (((insn
>> 8) & 7) == 0) {
10394 gen_nop_hint(s
, insn
& 0xff);
10396 /* Implemented as NOP in user mode. */
10401 if (insn
& (1 << 10)) {
10402 if (insn
& (1 << 7))
10404 if (insn
& (1 << 6))
10406 if (insn
& (1 << 5))
10408 if (insn
& (1 << 9))
10409 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
10411 if (insn
& (1 << 8)) {
10413 imm
|= (insn
& 0x1f);
10416 gen_set_psr_im(s
, offset
, 0, imm
);
10419 case 3: /* Special control operations. */
10420 if (!arm_dc_feature(s
, ARM_FEATURE_V7
) &&
10421 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
10424 op
= (insn
>> 4) & 0xf;
10426 case 2: /* clrex */
10431 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
10434 /* We need to break the TB after this insn
10435 * to execute self-modifying code correctly
10436 * and also to take any pending interrupts
10439 gen_goto_tb(s
, 0, s
->base
.pc_next
);
10442 if ((insn
& 0xf) || !dc_isar_feature(aa32_sb
, s
)) {
10446 * TODO: There is no speculation barrier opcode
10447 * for TCG; MB and end the TB instead.
10449 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
10450 gen_goto_tb(s
, 0, s
->base
.pc_next
);
10457 /* Trivial implementation equivalent to bx.
10458 * This instruction doesn't exist at all for M-profile.
10460 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10463 tmp
= load_reg(s
, rn
);
10466 case 5: /* Exception return. */
10470 if (rn
!= 14 || rd
!= 15) {
10473 if (s
->current_el
== 2) {
10474 /* ERET from Hyp uses ELR_Hyp, not LR */
10478 tmp
= load_cpu_field(elr_el
[2]);
10480 tmp
= load_reg(s
, rn
);
10481 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
10483 gen_exception_return(s
, tmp
);
10486 if (extract32(insn
, 5, 1) &&
10487 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
10489 int sysm
= extract32(insn
, 16, 4) |
10490 (extract32(insn
, 4, 1) << 4);
10492 gen_mrs_banked(s
, 0, sysm
, rd
);
10496 if (extract32(insn
, 16, 4) != 0xf) {
10499 if (!arm_dc_feature(s
, ARM_FEATURE_M
) &&
10500 extract32(insn
, 0, 8) != 0) {
10505 tmp
= tcg_temp_new_i32();
10506 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10507 addr
= tcg_const_i32(insn
& 0xff);
10508 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
10509 tcg_temp_free_i32(addr
);
10511 gen_helper_cpsr_read(tmp
, cpu_env
);
10513 store_reg(s
, rd
, tmp
);
10516 if (extract32(insn
, 5, 1) &&
10517 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
10519 int sysm
= extract32(insn
, 16, 4) |
10520 (extract32(insn
, 4, 1) << 4);
10522 gen_mrs_banked(s
, 1, sysm
, rd
);
10527 /* Not accessible in user mode. */
10528 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
10532 if (extract32(insn
, 16, 4) != 0xf ||
10533 extract32(insn
, 0, 8) != 0) {
10537 tmp
= load_cpu_field(spsr
);
10538 store_reg(s
, rd
, tmp
);
10543 /* Conditional branch. */
10544 op
= (insn
>> 22) & 0xf;
10545 /* Generate a conditional jump to next instruction. */
10546 arm_skip_unless(s
, op
);
10548 /* offset[11:1] = insn[10:0] */
10549 offset
= (insn
& 0x7ff) << 1;
10550 /* offset[17:12] = insn[21:16]. */
10551 offset
|= (insn
& 0x003f0000) >> 4;
10552 /* offset[31:20] = insn[26]. */
10553 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
10554 /* offset[18] = insn[13]. */
10555 offset
|= (insn
& (1 << 13)) << 5;
10556 /* offset[19] = insn[11]. */
10557 offset
|= (insn
& (1 << 11)) << 8;
10559 /* jump to the offset */
10560 gen_jmp(s
, read_pc(s
) + offset
);
10564 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
10565 * - Data-processing (modified immediate, plain binary immediate)
10567 if (insn
& (1 << 25)) {
10569 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
10570 * - Data-processing (plain binary immediate)
10572 if (insn
& (1 << 24)) {
10573 if (insn
& (1 << 20))
10575 /* Bitfield/Saturate. */
10576 op
= (insn
>> 21) & 7;
10578 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
10580 tmp
= tcg_temp_new_i32();
10581 tcg_gen_movi_i32(tmp
, 0);
10583 tmp
= load_reg(s
, rn
);
10586 case 2: /* Signed bitfield extract. */
10588 if (shift
+ imm
> 32)
10591 tcg_gen_sextract_i32(tmp
, tmp
, shift
, imm
);
10594 case 6: /* Unsigned bitfield extract. */
10596 if (shift
+ imm
> 32)
10599 tcg_gen_extract_i32(tmp
, tmp
, shift
, imm
);
10602 case 3: /* Bitfield insert/clear. */
10605 imm
= imm
+ 1 - shift
;
10607 tmp2
= load_reg(s
, rd
);
10608 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, imm
);
10609 tcg_temp_free_i32(tmp2
);
10614 default: /* Saturate. */
10616 tcg_gen_sari_i32(tmp
, tmp
, shift
);
10618 tcg_gen_shli_i32(tmp
, tmp
, shift
);
10620 tmp2
= tcg_const_i32(imm
);
10623 if ((op
& 1) && shift
== 0) {
10624 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10625 tcg_temp_free_i32(tmp
);
10626 tcg_temp_free_i32(tmp2
);
10629 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
10631 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
10635 if ((op
& 1) && shift
== 0) {
10636 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10637 tcg_temp_free_i32(tmp
);
10638 tcg_temp_free_i32(tmp2
);
10641 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
10643 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
10646 tcg_temp_free_i32(tmp2
);
10649 store_reg(s
, rd
, tmp
);
10651 imm
= ((insn
& 0x04000000) >> 15)
10652 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
10653 if (insn
& (1 << 22)) {
10654 /* 16-bit immediate. */
10655 imm
|= (insn
>> 4) & 0xf000;
10656 if (insn
& (1 << 23)) {
10658 tmp
= load_reg(s
, rd
);
10659 tcg_gen_ext16u_i32(tmp
, tmp
);
10660 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
10663 tmp
= tcg_temp_new_i32();
10664 tcg_gen_movi_i32(tmp
, imm
);
10666 store_reg(s
, rd
, tmp
);
10668 /* Add/sub 12-bit immediate. */
10669 if (insn
& (1 << 23)) {
10672 tmp
= add_reg_for_lit(s
, rn
, imm
);
10673 if (rn
== 13 && rd
== 13) {
10674 /* ADD SP, SP, imm or SUB SP, SP, imm */
10675 store_sp_checked(s
, tmp
);
10677 store_reg(s
, rd
, tmp
);
10683 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
10684 * - Data-processing (modified immediate)
10686 int shifter_out
= 0;
10687 /* modified 12-bit immediate. */
10688 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
10689 imm
= (insn
& 0xff);
10692 /* Nothing to do. */
10694 case 1: /* 00XY00XY */
10697 case 2: /* XY00XY00 */
10701 case 3: /* XYXYXYXY */
10705 default: /* Rotated constant. */
10706 shift
= (shift
<< 1) | (imm
>> 7);
10708 imm
= imm
<< (32 - shift
);
10712 tmp2
= tcg_temp_new_i32();
10713 tcg_gen_movi_i32(tmp2
, imm
);
10714 rn
= (insn
>> 16) & 0xf;
10716 tmp
= tcg_temp_new_i32();
10717 tcg_gen_movi_i32(tmp
, 0);
10719 tmp
= load_reg(s
, rn
);
10721 op
= (insn
>> 21) & 0xf;
10722 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
10723 shifter_out
, tmp
, tmp2
))
10725 tcg_temp_free_i32(tmp2
);
10726 rd
= (insn
>> 8) & 0xf;
10727 if (rd
== 13 && rn
== 13
10728 && (op
== 8 || op
== 13)) {
10729 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
10730 store_sp_checked(s
, tmp
);
10731 } else if (rd
!= 15) {
10732 store_reg(s
, rd
, tmp
);
10734 tcg_temp_free_i32(tmp
);
10739 case 12: /* Load/store single data item. */
10746 if ((insn
& 0x01100000) == 0x01000000) {
10747 if (disas_neon_ls_insn(s
, insn
)) {
10752 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
10754 if (!(insn
& (1 << 20))) {
10758 /* Byte or halfword load space with dest == r15 : memory hints.
10759 * Catch them early so we don't emit pointless addressing code.
10760 * This space is a mix of:
10761 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10762 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10764 * unallocated hints, which must be treated as NOPs
10765 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10766 * which is easiest for the decoding logic
10767 * Some space which must UNDEF
10769 int op1
= (insn
>> 23) & 3;
10770 int op2
= (insn
>> 6) & 0x3f;
10775 /* UNPREDICTABLE, unallocated hint or
10776 * PLD/PLDW/PLI (literal)
10781 return; /* PLD/PLDW/PLI or unallocated hint */
10783 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
10784 return; /* PLD/PLDW/PLI or unallocated hint */
10786 /* UNDEF space, or an UNPREDICTABLE */
10790 memidx
= get_mem_index(s
);
10791 imm
= insn
& 0xfff;
10792 if (insn
& (1 << 23)) {
10793 /* PC relative or Positive offset. */
10794 addr
= add_reg_for_lit(s
, rn
, imm
);
10795 } else if (rn
== 15) {
10796 /* PC relative with negative offset. */
10797 addr
= add_reg_for_lit(s
, rn
, -imm
);
10799 addr
= load_reg(s
, rn
);
10801 switch ((insn
>> 8) & 0xf) {
10802 case 0x0: /* Shifted Register. */
10803 shift
= (insn
>> 4) & 0xf;
10805 tcg_temp_free_i32(addr
);
10808 tmp
= load_reg(s
, rm
);
10809 tcg_gen_shli_i32(tmp
, tmp
, shift
);
10810 tcg_gen_add_i32(addr
, addr
, tmp
);
10811 tcg_temp_free_i32(tmp
);
10813 case 0xc: /* Negative offset. */
10814 tcg_gen_addi_i32(addr
, addr
, -imm
);
10816 case 0xe: /* User privilege. */
10817 tcg_gen_addi_i32(addr
, addr
, imm
);
10818 memidx
= get_a32_user_mem_index(s
);
10820 case 0x9: /* Post-decrement. */
10822 /* Fall through. */
10823 case 0xb: /* Post-increment. */
10827 case 0xd: /* Pre-decrement. */
10829 /* Fall through. */
10830 case 0xf: /* Pre-increment. */
10834 tcg_temp_free_i32(addr
);
10839 issinfo
= writeback
? ISSInvalid
: rs
;
10841 if (s
->v8m_stackcheck
&& rn
== 13 && writeback
) {
10843 * Stackcheck. Here we know 'addr' is the current SP;
10844 * if imm is +ve we're moving SP up, else down. It is
10845 * UNKNOWN whether the limit check triggers when SP starts
10846 * below the limit and ends up above it; we chose to do so.
10848 if ((int32_t)imm
< 0) {
10849 TCGv_i32 newsp
= tcg_temp_new_i32();
10851 tcg_gen_addi_i32(newsp
, addr
, imm
);
10852 gen_helper_v8m_stackcheck(cpu_env
, newsp
);
10853 tcg_temp_free_i32(newsp
);
10855 gen_helper_v8m_stackcheck(cpu_env
, addr
);
10859 if (writeback
&& !postinc
) {
10860 tcg_gen_addi_i32(addr
, addr
, imm
);
10863 if (insn
& (1 << 20)) {
10865 tmp
= tcg_temp_new_i32();
10868 gen_aa32_ld8u_iss(s
, tmp
, addr
, memidx
, issinfo
);
10871 gen_aa32_ld8s_iss(s
, tmp
, addr
, memidx
, issinfo
);
10874 gen_aa32_ld16u_iss(s
, tmp
, addr
, memidx
, issinfo
);
10877 gen_aa32_ld16s_iss(s
, tmp
, addr
, memidx
, issinfo
);
10880 gen_aa32_ld32u_iss(s
, tmp
, addr
, memidx
, issinfo
);
10883 tcg_temp_free_i32(tmp
);
10884 tcg_temp_free_i32(addr
);
10888 gen_bx_excret(s
, tmp
);
10890 store_reg(s
, rs
, tmp
);
10894 tmp
= load_reg(s
, rs
);
10897 gen_aa32_st8_iss(s
, tmp
, addr
, memidx
, issinfo
);
10900 gen_aa32_st16_iss(s
, tmp
, addr
, memidx
, issinfo
);
10903 gen_aa32_st32_iss(s
, tmp
, addr
, memidx
, issinfo
);
10906 tcg_temp_free_i32(tmp
);
10907 tcg_temp_free_i32(addr
);
10910 tcg_temp_free_i32(tmp
);
10913 tcg_gen_addi_i32(addr
, addr
, imm
);
10915 store_reg(s
, rn
, addr
);
10917 tcg_temp_free_i32(addr
);
10926 unallocated_encoding(s
);
10929 static void disas_thumb_insn(DisasContext
*s
, uint32_t insn
)
10931 uint32_t val
, op
, rm
, rn
, rd
, shift
, cond
;
10938 switch (insn
>> 12) {
10942 op
= (insn
>> 11) & 3;
10945 * 0b0001_1xxx_xxxx_xxxx
10946 * - Add, subtract (three low registers)
10947 * - Add, subtract (two low registers and immediate)
10949 rn
= (insn
>> 3) & 7;
10950 tmp
= load_reg(s
, rn
);
10951 if (insn
& (1 << 10)) {
10953 tmp2
= tcg_temp_new_i32();
10954 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
10957 rm
= (insn
>> 6) & 7;
10958 tmp2
= load_reg(s
, rm
);
10960 if (insn
& (1 << 9)) {
10961 if (s
->condexec_mask
)
10962 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10964 gen_sub_CC(tmp
, tmp
, tmp2
);
10966 if (s
->condexec_mask
)
10967 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10969 gen_add_CC(tmp
, tmp
, tmp2
);
10971 tcg_temp_free_i32(tmp2
);
10972 store_reg(s
, rd
, tmp
);
10974 /* shift immediate */
10975 rm
= (insn
>> 3) & 7;
10976 shift
= (insn
>> 6) & 0x1f;
10977 tmp
= load_reg(s
, rm
);
10978 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
10979 if (!s
->condexec_mask
)
10981 store_reg(s
, rd
, tmp
);
10986 * 0b001x_xxxx_xxxx_xxxx
10987 * - Add, subtract, compare, move (one low register and immediate)
10989 op
= (insn
>> 11) & 3;
10990 rd
= (insn
>> 8) & 0x7;
10991 if (op
== 0) { /* mov */
10992 tmp
= tcg_temp_new_i32();
10993 tcg_gen_movi_i32(tmp
, insn
& 0xff);
10994 if (!s
->condexec_mask
)
10996 store_reg(s
, rd
, tmp
);
10998 tmp
= load_reg(s
, rd
);
10999 tmp2
= tcg_temp_new_i32();
11000 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
11003 gen_sub_CC(tmp
, tmp
, tmp2
);
11004 tcg_temp_free_i32(tmp
);
11005 tcg_temp_free_i32(tmp2
);
11008 if (s
->condexec_mask
)
11009 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
11011 gen_add_CC(tmp
, tmp
, tmp2
);
11012 tcg_temp_free_i32(tmp2
);
11013 store_reg(s
, rd
, tmp
);
11016 if (s
->condexec_mask
)
11017 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
11019 gen_sub_CC(tmp
, tmp
, tmp2
);
11020 tcg_temp_free_i32(tmp2
);
11021 store_reg(s
, rd
, tmp
);
11027 if (insn
& (1 << 11)) {
11028 rd
= (insn
>> 8) & 7;
11029 /* load pc-relative. Bit 1 of PC is ignored. */
11030 addr
= add_reg_for_lit(s
, 15, (insn
& 0xff) * 4);
11031 tmp
= tcg_temp_new_i32();
11032 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
),
11034 tcg_temp_free_i32(addr
);
11035 store_reg(s
, rd
, tmp
);
11038 if (insn
& (1 << 10)) {
11039 /* 0b0100_01xx_xxxx_xxxx
11040 * - data processing extended, branch and exchange
11042 rd
= (insn
& 7) | ((insn
>> 4) & 8);
11043 rm
= (insn
>> 3) & 0xf;
11044 op
= (insn
>> 8) & 3;
11047 tmp
= load_reg(s
, rd
);
11048 tmp2
= load_reg(s
, rm
);
11049 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
11050 tcg_temp_free_i32(tmp2
);
11052 /* ADD SP, SP, reg */
11053 store_sp_checked(s
, tmp
);
11055 store_reg(s
, rd
, tmp
);
11059 tmp
= load_reg(s
, rd
);
11060 tmp2
= load_reg(s
, rm
);
11061 gen_sub_CC(tmp
, tmp
, tmp2
);
11062 tcg_temp_free_i32(tmp2
);
11063 tcg_temp_free_i32(tmp
);
11065 case 2: /* mov/cpy */
11066 tmp
= load_reg(s
, rm
);
11069 store_sp_checked(s
, tmp
);
11071 store_reg(s
, rd
, tmp
);
11076 /* 0b0100_0111_xxxx_xxxx
11077 * - branch [and link] exchange thumb register
11079 bool link
= insn
& (1 << 7);
11088 /* BXNS/BLXNS: only exists for v8M with the
11089 * security extensions, and always UNDEF if NonSecure.
11090 * We don't implement these in the user-only mode
11091 * either (in theory you can use them from Secure User
11092 * mode but they are too tied in to system emulation.)
11094 if (!s
->v8m_secure
|| IS_USER_ONLY
) {
11105 tmp
= load_reg(s
, rm
);
11107 val
= (uint32_t)s
->base
.pc_next
| 1;
11108 tmp2
= tcg_temp_new_i32();
11109 tcg_gen_movi_i32(tmp2
, val
);
11110 store_reg(s
, 14, tmp2
);
11113 /* Only BX works as exception-return, not BLX */
11114 gen_bx_excret(s
, tmp
);
11123 * 0b0100_00xx_xxxx_xxxx
11124 * - Data-processing (two low registers)
11127 rm
= (insn
>> 3) & 7;
11128 op
= (insn
>> 6) & 0xf;
11129 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
11130 /* the shift/rotate ops want the operands backwards */
11139 if (op
== 9) { /* neg */
11140 tmp
= tcg_temp_new_i32();
11141 tcg_gen_movi_i32(tmp
, 0);
11142 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
11143 tmp
= load_reg(s
, rd
);
11148 tmp2
= load_reg(s
, rm
);
11150 case 0x0: /* and */
11151 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
11152 if (!s
->condexec_mask
)
11155 case 0x1: /* eor */
11156 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
11157 if (!s
->condexec_mask
)
11160 case 0x2: /* lsl */
11161 if (s
->condexec_mask
) {
11162 gen_shl(tmp2
, tmp2
, tmp
);
11164 gen_helper_shl_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11165 gen_logic_CC(tmp2
);
11168 case 0x3: /* lsr */
11169 if (s
->condexec_mask
) {
11170 gen_shr(tmp2
, tmp2
, tmp
);
11172 gen_helper_shr_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11173 gen_logic_CC(tmp2
);
11176 case 0x4: /* asr */
11177 if (s
->condexec_mask
) {
11178 gen_sar(tmp2
, tmp2
, tmp
);
11180 gen_helper_sar_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11181 gen_logic_CC(tmp2
);
11184 case 0x5: /* adc */
11185 if (s
->condexec_mask
) {
11186 gen_adc(tmp
, tmp2
);
11188 gen_adc_CC(tmp
, tmp
, tmp2
);
11191 case 0x6: /* sbc */
11192 if (s
->condexec_mask
) {
11193 gen_sub_carry(tmp
, tmp
, tmp2
);
11195 gen_sbc_CC(tmp
, tmp
, tmp2
);
11198 case 0x7: /* ror */
11199 if (s
->condexec_mask
) {
11200 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
11201 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
11203 gen_helper_ror_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11204 gen_logic_CC(tmp2
);
11207 case 0x8: /* tst */
11208 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
11212 case 0x9: /* neg */
11213 if (s
->condexec_mask
)
11214 tcg_gen_neg_i32(tmp
, tmp2
);
11216 gen_sub_CC(tmp
, tmp
, tmp2
);
11218 case 0xa: /* cmp */
11219 gen_sub_CC(tmp
, tmp
, tmp2
);
11222 case 0xb: /* cmn */
11223 gen_add_CC(tmp
, tmp
, tmp2
);
11226 case 0xc: /* orr */
11227 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
11228 if (!s
->condexec_mask
)
11231 case 0xd: /* mul */
11232 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
11233 if (!s
->condexec_mask
)
11236 case 0xe: /* bic */
11237 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
11238 if (!s
->condexec_mask
)
11241 case 0xf: /* mvn */
11242 tcg_gen_not_i32(tmp2
, tmp2
);
11243 if (!s
->condexec_mask
)
11244 gen_logic_CC(tmp2
);
11251 store_reg(s
, rm
, tmp2
);
11253 tcg_temp_free_i32(tmp
);
11255 store_reg(s
, rd
, tmp
);
11256 tcg_temp_free_i32(tmp2
);
11259 tcg_temp_free_i32(tmp
);
11260 tcg_temp_free_i32(tmp2
);
11265 /* load/store register offset. */
11267 rn
= (insn
>> 3) & 7;
11268 rm
= (insn
>> 6) & 7;
11269 op
= (insn
>> 9) & 7;
11270 addr
= load_reg(s
, rn
);
11271 tmp
= load_reg(s
, rm
);
11272 tcg_gen_add_i32(addr
, addr
, tmp
);
11273 tcg_temp_free_i32(tmp
);
11275 if (op
< 3) { /* store */
11276 tmp
= load_reg(s
, rd
);
11278 tmp
= tcg_temp_new_i32();
11283 gen_aa32_st32_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11286 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11289 gen_aa32_st8_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11291 case 3: /* ldrsb */
11292 gen_aa32_ld8s_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11295 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11298 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11301 gen_aa32_ld8u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11303 case 7: /* ldrsh */
11304 gen_aa32_ld16s_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11307 if (op
>= 3) { /* load */
11308 store_reg(s
, rd
, tmp
);
11310 tcg_temp_free_i32(tmp
);
11312 tcg_temp_free_i32(addr
);
11316 /* load/store word immediate offset */
11318 rn
= (insn
>> 3) & 7;
11319 addr
= load_reg(s
, rn
);
11320 val
= (insn
>> 4) & 0x7c;
11321 tcg_gen_addi_i32(addr
, addr
, val
);
11323 if (insn
& (1 << 11)) {
11325 tmp
= tcg_temp_new_i32();
11326 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11327 store_reg(s
, rd
, tmp
);
11330 tmp
= load_reg(s
, rd
);
11331 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11332 tcg_temp_free_i32(tmp
);
11334 tcg_temp_free_i32(addr
);
11338 /* load/store byte immediate offset */
11340 rn
= (insn
>> 3) & 7;
11341 addr
= load_reg(s
, rn
);
11342 val
= (insn
>> 6) & 0x1f;
11343 tcg_gen_addi_i32(addr
, addr
, val
);
11345 if (insn
& (1 << 11)) {
11347 tmp
= tcg_temp_new_i32();
11348 gen_aa32_ld8u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11349 store_reg(s
, rd
, tmp
);
11352 tmp
= load_reg(s
, rd
);
11353 gen_aa32_st8_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11354 tcg_temp_free_i32(tmp
);
11356 tcg_temp_free_i32(addr
);
11360 /* load/store halfword immediate offset */
11362 rn
= (insn
>> 3) & 7;
11363 addr
= load_reg(s
, rn
);
11364 val
= (insn
>> 5) & 0x3e;
11365 tcg_gen_addi_i32(addr
, addr
, val
);
11367 if (insn
& (1 << 11)) {
11369 tmp
= tcg_temp_new_i32();
11370 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11371 store_reg(s
, rd
, tmp
);
11374 tmp
= load_reg(s
, rd
);
11375 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11376 tcg_temp_free_i32(tmp
);
11378 tcg_temp_free_i32(addr
);
11382 /* load/store from stack */
11383 rd
= (insn
>> 8) & 7;
11384 addr
= load_reg(s
, 13);
11385 val
= (insn
& 0xff) * 4;
11386 tcg_gen_addi_i32(addr
, addr
, val
);
11388 if (insn
& (1 << 11)) {
11390 tmp
= tcg_temp_new_i32();
11391 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11392 store_reg(s
, rd
, tmp
);
11395 tmp
= load_reg(s
, rd
);
11396 gen_aa32_st32_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11397 tcg_temp_free_i32(tmp
);
11399 tcg_temp_free_i32(addr
);
11404 * 0b1010_xxxx_xxxx_xxxx
11405 * - Add PC/SP (immediate)
11407 rd
= (insn
>> 8) & 7;
11408 val
= (insn
& 0xff) * 4;
11409 tmp
= add_reg_for_lit(s
, insn
& (1 << 11) ? 13 : 15, val
);
11410 store_reg(s
, rd
, tmp
);
11415 op
= (insn
>> 8) & 0xf;
11419 * 0b1011_0000_xxxx_xxxx
11420 * - ADD (SP plus immediate)
11421 * - SUB (SP minus immediate)
11423 tmp
= load_reg(s
, 13);
11424 val
= (insn
& 0x7f) * 4;
11425 if (insn
& (1 << 7))
11426 val
= -(int32_t)val
;
11427 tcg_gen_addi_i32(tmp
, tmp
, val
);
11428 store_sp_checked(s
, tmp
);
11431 case 2: /* sign/zero extend. */
11434 rm
= (insn
>> 3) & 7;
11435 tmp
= load_reg(s
, rm
);
11436 switch ((insn
>> 6) & 3) {
11437 case 0: gen_sxth(tmp
); break;
11438 case 1: gen_sxtb(tmp
); break;
11439 case 2: gen_uxth(tmp
); break;
11440 case 3: gen_uxtb(tmp
); break;
11442 store_reg(s
, rd
, tmp
);
11444 case 4: case 5: case 0xc: case 0xd:
11446 * 0b1011_x10x_xxxx_xxxx
11449 addr
= load_reg(s
, 13);
11450 if (insn
& (1 << 8))
11454 for (i
= 0; i
< 8; i
++) {
11455 if (insn
& (1 << i
))
11458 if ((insn
& (1 << 11)) == 0) {
11459 tcg_gen_addi_i32(addr
, addr
, -offset
);
11462 if (s
->v8m_stackcheck
) {
11464 * Here 'addr' is the lower of "old SP" and "new SP";
11465 * if this is a pop that starts below the limit and ends
11466 * above it, it is UNKNOWN whether the limit check triggers;
11467 * we choose to trigger.
11469 gen_helper_v8m_stackcheck(cpu_env
, addr
);
11472 for (i
= 0; i
< 8; i
++) {
11473 if (insn
& (1 << i
)) {
11474 if (insn
& (1 << 11)) {
11476 tmp
= tcg_temp_new_i32();
11477 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11478 store_reg(s
, i
, tmp
);
11481 tmp
= load_reg(s
, i
);
11482 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11483 tcg_temp_free_i32(tmp
);
11485 /* advance to the next address. */
11486 tcg_gen_addi_i32(addr
, addr
, 4);
11490 if (insn
& (1 << 8)) {
11491 if (insn
& (1 << 11)) {
11493 tmp
= tcg_temp_new_i32();
11494 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11495 /* don't set the pc until the rest of the instruction
11499 tmp
= load_reg(s
, 14);
11500 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11501 tcg_temp_free_i32(tmp
);
11503 tcg_gen_addi_i32(addr
, addr
, 4);
11505 if ((insn
& (1 << 11)) == 0) {
11506 tcg_gen_addi_i32(addr
, addr
, -offset
);
11508 /* write back the new stack pointer */
11509 store_reg(s
, 13, addr
);
11510 /* set the new PC value */
11511 if ((insn
& 0x0900) == 0x0900) {
11512 store_reg_from_load(s
, 15, tmp
);
11516 case 1: case 3: case 9: case 11: /* czb */
11518 tmp
= load_reg(s
, rm
);
11519 arm_gen_condlabel(s
);
11520 if (insn
& (1 << 11))
11521 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
11523 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
11524 tcg_temp_free_i32(tmp
);
11525 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
11526 gen_jmp(s
, read_pc(s
) + offset
);
11529 case 15: /* IT, nop-hint. */
11530 if ((insn
& 0xf) == 0) {
11531 gen_nop_hint(s
, (insn
>> 4) & 0xf);
11537 * Combinations of firstcond and mask which set up an 0b1111
11538 * condition are UNPREDICTABLE; we take the CONSTRAINED
11539 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
11540 * i.e. both meaning "execute always".
11542 s
->condexec_cond
= (insn
>> 4) & 0xe;
11543 s
->condexec_mask
= insn
& 0x1f;
11544 /* No actual code generated for this insn, just setup state. */
11547 case 0xe: /* bkpt */
11549 int imm8
= extract32(insn
, 0, 8);
11551 gen_exception_bkpt_insn(s
, syn_aa32_bkpt(imm8
, true));
11555 case 0xa: /* rev, and hlt */
11557 int op1
= extract32(insn
, 6, 2);
11561 int imm6
= extract32(insn
, 0, 6);
11567 /* Otherwise this is rev */
11569 rn
= (insn
>> 3) & 0x7;
11571 tmp
= load_reg(s
, rn
);
11573 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
11574 case 1: gen_rev16(tmp
); break;
11575 case 3: gen_revsh(tmp
); break;
11577 g_assert_not_reached();
11579 store_reg(s
, rd
, tmp
);
11584 switch ((insn
>> 5) & 7) {
11588 if (((insn
>> 3) & 1) != !!(s
->be_data
== MO_BE
)) {
11589 gen_helper_setend(cpu_env
);
11590 s
->base
.is_jmp
= DISAS_UPDATE
;
11599 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
11600 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
11603 addr
= tcg_const_i32(19);
11604 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
11605 tcg_temp_free_i32(addr
);
11609 addr
= tcg_const_i32(16);
11610 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
11611 tcg_temp_free_i32(addr
);
11613 tcg_temp_free_i32(tmp
);
11616 if (insn
& (1 << 4)) {
11617 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
11621 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
11636 /* load/store multiple */
11637 TCGv_i32 loaded_var
= NULL
;
11638 rn
= (insn
>> 8) & 0x7;
11639 addr
= load_reg(s
, rn
);
11640 for (i
= 0; i
< 8; i
++) {
11641 if (insn
& (1 << i
)) {
11642 if (insn
& (1 << 11)) {
11644 tmp
= tcg_temp_new_i32();
11645 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11649 store_reg(s
, i
, tmp
);
11653 tmp
= load_reg(s
, i
);
11654 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11655 tcg_temp_free_i32(tmp
);
11657 /* advance to the next address */
11658 tcg_gen_addi_i32(addr
, addr
, 4);
11661 if ((insn
& (1 << rn
)) == 0) {
11662 /* base reg not in list: base register writeback */
11663 store_reg(s
, rn
, addr
);
11665 /* base reg in list: if load, complete it now */
11666 if (insn
& (1 << 11)) {
11667 store_reg(s
, rn
, loaded_var
);
11669 tcg_temp_free_i32(addr
);
11674 /* conditional branch or swi */
11675 cond
= (insn
>> 8) & 0xf;
11681 gen_set_pc_im(s
, s
->base
.pc_next
);
11682 s
->svc_imm
= extract32(insn
, 0, 8);
11683 s
->base
.is_jmp
= DISAS_SWI
;
11686 /* generate a conditional jump to next instruction */
11687 arm_skip_unless(s
, cond
);
11689 /* jump to the offset */
11691 offset
= ((int32_t)insn
<< 24) >> 24;
11692 val
+= offset
<< 1;
11697 if (insn
& (1 << 11)) {
11698 /* thumb_insn_is_16bit() ensures we can't get here for
11699 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
11700 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
11702 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
11704 offset
= ((insn
& 0x7ff) << 1);
11705 tmp
= load_reg(s
, 14);
11706 tcg_gen_addi_i32(tmp
, tmp
, offset
);
11707 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
11709 tmp2
= tcg_temp_new_i32();
11710 tcg_gen_movi_i32(tmp2
, s
->base
.pc_next
| 1);
11711 store_reg(s
, 14, tmp2
);
11715 /* unconditional branch */
11717 offset
= ((int32_t)insn
<< 21) >> 21;
11718 val
+= offset
<< 1;
11723 /* thumb_insn_is_16bit() ensures we can't get here for
11724 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
11726 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
11728 if (insn
& (1 << 11)) {
11729 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
11730 offset
= ((insn
& 0x7ff) << 1) | 1;
11731 tmp
= load_reg(s
, 14);
11732 tcg_gen_addi_i32(tmp
, tmp
, offset
);
11734 tmp2
= tcg_temp_new_i32();
11735 tcg_gen_movi_i32(tmp2
, s
->base
.pc_next
| 1);
11736 store_reg(s
, 14, tmp2
);
11739 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
11740 uint32_t uoffset
= ((int32_t)insn
<< 21) >> 9;
11742 tcg_gen_movi_i32(cpu_R
[14], read_pc(s
) + uoffset
);
11749 unallocated_encoding(s
);
11752 static bool insn_crosses_page(CPUARMState
*env
, DisasContext
*s
)
11754 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
11755 * (False positives are OK, false negatives are not.)
11756 * We know this is a Thumb insn, and our caller ensures we are
11757 * only called if dc->base.pc_next is less than 4 bytes from the page
11758 * boundary, so we cross the page if the first 16 bits indicate
11759 * that this is a 32 bit insn.
11761 uint16_t insn
= arm_lduw_code(env
, s
->base
.pc_next
, s
->sctlr_b
);
11763 return !thumb_insn_is_16bit(s
, s
->base
.pc_next
, insn
);
11766 static void arm_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
11768 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11769 CPUARMState
*env
= cs
->env_ptr
;
11770 ARMCPU
*cpu
= env_archcpu(env
);
11771 uint32_t tb_flags
= dc
->base
.tb
->flags
;
11772 uint32_t condexec
, core_mmu_idx
;
11774 dc
->isar
= &cpu
->isar
;
11778 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11779 * there is no secure EL1, so we route exceptions to EL3.
11781 dc
->secure_routed_to_el3
= arm_feature(env
, ARM_FEATURE_EL3
) &&
11782 !arm_el_is_aa64(env
, 3);
11783 dc
->thumb
= FIELD_EX32(tb_flags
, TBFLAG_A32
, THUMB
);
11784 dc
->sctlr_b
= FIELD_EX32(tb_flags
, TBFLAG_A32
, SCTLR_B
);
11785 dc
->be_data
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, BE_DATA
) ? MO_BE
: MO_LE
;
11786 condexec
= FIELD_EX32(tb_flags
, TBFLAG_A32
, CONDEXEC
);
11787 dc
->condexec_mask
= (condexec
& 0xf) << 1;
11788 dc
->condexec_cond
= condexec
>> 4;
11789 core_mmu_idx
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, MMUIDX
);
11790 dc
->mmu_idx
= core_to_arm_mmu_idx(env
, core_mmu_idx
);
11791 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
11792 #if !defined(CONFIG_USER_ONLY)
11793 dc
->user
= (dc
->current_el
== 0);
11795 dc
->ns
= FIELD_EX32(tb_flags
, TBFLAG_A32
, NS
);
11796 dc
->fp_excp_el
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, FPEXC_EL
);
11797 dc
->vfp_enabled
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VFPEN
);
11798 dc
->vec_len
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VECLEN
);
11799 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
11800 dc
->c15_cpar
= FIELD_EX32(tb_flags
, TBFLAG_A32
, XSCALE_CPAR
);
11801 dc
->vec_stride
= 0;
11803 dc
->vec_stride
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VECSTRIDE
);
11806 dc
->v7m_handler_mode
= FIELD_EX32(tb_flags
, TBFLAG_A32
, HANDLER
);
11807 dc
->v8m_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
11808 regime_is_secure(env
, dc
->mmu_idx
);
11809 dc
->v8m_stackcheck
= FIELD_EX32(tb_flags
, TBFLAG_A32
, STACKCHECK
);
11810 dc
->v8m_fpccr_s_wrong
= FIELD_EX32(tb_flags
, TBFLAG_A32
, FPCCR_S_WRONG
);
11811 dc
->v7m_new_fp_ctxt_needed
=
11812 FIELD_EX32(tb_flags
, TBFLAG_A32
, NEW_FP_CTXT_NEEDED
);
11813 dc
->v7m_lspact
= FIELD_EX32(tb_flags
, TBFLAG_A32
, LSPACT
);
11814 dc
->cp_regs
= cpu
->cp_regs
;
11815 dc
->features
= env
->features
;
11817 /* Single step state. The code-generation logic here is:
11819 * generate code with no special handling for single-stepping (except
11820 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11821 * this happens anyway because those changes are all system register or
11823 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11824 * emit code for one insn
11825 * emit code to clear PSTATE.SS
11826 * emit code to generate software step exception for completed step
11827 * end TB (as usual for having generated an exception)
11828 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11829 * emit code to generate a software step exception
11832 dc
->ss_active
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, SS_ACTIVE
);
11833 dc
->pstate_ss
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, PSTATE_SS
);
11834 dc
->is_ldex
= false;
11835 if (!arm_feature(env
, ARM_FEATURE_M
)) {
11836 dc
->debug_target_el
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, DEBUG_TARGET_EL
);
11839 dc
->page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
11841 /* If architectural single step active, limit to 1. */
11842 if (is_singlestepping(dc
)) {
11843 dc
->base
.max_insns
= 1;
11846 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11847 to those left on the page. */
11849 int bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
11850 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
11853 cpu_V0
= tcg_temp_new_i64();
11854 cpu_V1
= tcg_temp_new_i64();
11855 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11856 cpu_M0
= tcg_temp_new_i64();
11859 static void arm_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
11861 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11863 /* A note on handling of the condexec (IT) bits:
11865 * We want to avoid the overhead of having to write the updated condexec
11866 * bits back to the CPUARMState for every instruction in an IT block. So:
11867 * (1) if the condexec bits are not already zero then we write
11868 * zero back into the CPUARMState now. This avoids complications trying
11869 * to do it at the end of the block. (For example if we don't do this
11870 * it's hard to identify whether we can safely skip writing condexec
11871 * at the end of the TB, which we definitely want to do for the case
11872 * where a TB doesn't do anything with the IT state at all.)
11873 * (2) if we are going to leave the TB then we call gen_set_condexec()
11874 * which will write the correct value into CPUARMState if zero is wrong.
11875 * This is done both for leaving the TB at the end, and for leaving
11876 * it because of an exception we know will happen, which is done in
11877 * gen_exception_insn(). The latter is necessary because we need to
11878 * leave the TB with the PC/IT state just prior to execution of the
11879 * instruction which caused the exception.
11880 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11881 * then the CPUARMState will be wrong and we need to reset it.
11882 * This is handled in the same way as restoration of the
11883 * PC in these situations; we save the value of the condexec bits
11884 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11885 * then uses this to restore them after an exception.
11887 * Note that there are no instructions which can read the condexec
11888 * bits, and none which can write non-static values to them, so
11889 * we don't need to care about whether CPUARMState is correct in the
11893 /* Reset the conditional execution bits immediately. This avoids
11894 complications trying to do it at the end of the block. */
11895 if (dc
->condexec_mask
|| dc
->condexec_cond
) {
11896 TCGv_i32 tmp
= tcg_temp_new_i32();
11897 tcg_gen_movi_i32(tmp
, 0);
11898 store_cpu_field(tmp
, condexec_bits
);
11902 static void arm_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
11904 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11906 tcg_gen_insn_start(dc
->base
.pc_next
,
11907 (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1),
11909 dc
->insn_start
= tcg_last_op();
11912 static bool arm_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
11913 const CPUBreakpoint
*bp
)
11915 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11917 if (bp
->flags
& BP_CPU
) {
11918 gen_set_condexec(dc
);
11919 gen_set_pc_im(dc
, dc
->base
.pc_next
);
11920 gen_helper_check_breakpoints(cpu_env
);
11921 /* End the TB early; it's likely not going to be executed */
11922 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
11924 gen_exception_internal_insn(dc
, dc
->base
.pc_next
, EXCP_DEBUG
);
11925 /* The address covered by the breakpoint must be
11926 included in [tb->pc, tb->pc + tb->size) in order
11927 to for it to be properly cleared -- thus we
11928 increment the PC here so that the logic setting
11929 tb->size below does the right thing. */
11930 /* TODO: Advance PC by correct instruction length to
11931 * avoid disassembler error messages */
11932 dc
->base
.pc_next
+= 2;
11933 dc
->base
.is_jmp
= DISAS_NORETURN
;
11939 static bool arm_pre_translate_insn(DisasContext
*dc
)
11941 #ifdef CONFIG_USER_ONLY
11942 /* Intercept jump to the magic kernel page. */
11943 if (dc
->base
.pc_next
>= 0xffff0000) {
11944 /* We always get here via a jump, so know we are not in a
11945 conditional execution block. */
11946 gen_exception_internal(EXCP_KERNEL_TRAP
);
11947 dc
->base
.is_jmp
= DISAS_NORETURN
;
11952 if (dc
->ss_active
&& !dc
->pstate_ss
) {
11953 /* Singlestep state is Active-pending.
11954 * If we're in this state at the start of a TB then either
11955 * a) we just took an exception to an EL which is being debugged
11956 * and this is the first insn in the exception handler
11957 * b) debug exceptions were masked and we just unmasked them
11958 * without changing EL (eg by clearing PSTATE.D)
11959 * In either case we're going to take a swstep exception in the
11960 * "did not step an insn" case, and so the syndrome ISV and EX
11961 * bits should be zero.
11963 assert(dc
->base
.num_insns
== 1);
11964 gen_swstep_exception(dc
, 0, 0);
11965 dc
->base
.is_jmp
= DISAS_NORETURN
;
11972 static void arm_post_translate_insn(DisasContext
*dc
)
11974 if (dc
->condjmp
&& !dc
->base
.is_jmp
) {
11975 gen_set_label(dc
->condlabel
);
11978 translator_loop_temp_check(&dc
->base
);
11981 static void arm_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
11983 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11984 CPUARMState
*env
= cpu
->env_ptr
;
11987 if (arm_pre_translate_insn(dc
)) {
11991 dc
->pc_curr
= dc
->base
.pc_next
;
11992 insn
= arm_ldl_code(env
, dc
->base
.pc_next
, dc
->sctlr_b
);
11994 dc
->base
.pc_next
+= 4;
11995 disas_arm_insn(dc
, insn
);
11997 arm_post_translate_insn(dc
);
11999 /* ARM is a fixed-length ISA. We performed the cross-page check
12000 in init_disas_context by adjusting max_insns. */
12003 static bool thumb_insn_is_unconditional(DisasContext
*s
, uint32_t insn
)
12005 /* Return true if this Thumb insn is always unconditional,
12006 * even inside an IT block. This is true of only a very few
12007 * instructions: BKPT, HLT, and SG.
12009 * A larger class of instructions are UNPREDICTABLE if used
12010 * inside an IT block; we do not need to detect those here, because
12011 * what we do by default (perform the cc check and update the IT
12012 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12013 * choice for those situations.
12015 * insn is either a 16-bit or a 32-bit instruction; the two are
12016 * distinguishable because for the 16-bit case the top 16 bits
12017 * are zeroes, and that isn't a valid 32-bit encoding.
12019 if ((insn
& 0xffffff00) == 0xbe00) {
12024 if ((insn
& 0xffffffc0) == 0xba80 && arm_dc_feature(s
, ARM_FEATURE_V8
) &&
12025 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
12026 /* HLT: v8A only. This is unconditional even when it is going to
12027 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12028 * For v7 cores this was a plain old undefined encoding and so
12029 * honours its cc check. (We might be using the encoding as
12030 * a semihosting trap, but we don't change the cc check behaviour
12031 * on that account, because a debugger connected to a real v7A
12032 * core and emulating semihosting traps by catching the UNDEF
12033 * exception would also only see cases where the cc check passed.
12034 * No guest code should be trying to do a HLT semihosting trap
12035 * in an IT block anyway.
12040 if (insn
== 0xe97fe97f && arm_dc_feature(s
, ARM_FEATURE_V8
) &&
12041 arm_dc_feature(s
, ARM_FEATURE_M
)) {
12049 static void thumb_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
12051 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12052 CPUARMState
*env
= cpu
->env_ptr
;
12056 if (arm_pre_translate_insn(dc
)) {
12060 dc
->pc_curr
= dc
->base
.pc_next
;
12061 insn
= arm_lduw_code(env
, dc
->base
.pc_next
, dc
->sctlr_b
);
12062 is_16bit
= thumb_insn_is_16bit(dc
, dc
->base
.pc_next
, insn
);
12063 dc
->base
.pc_next
+= 2;
12065 uint32_t insn2
= arm_lduw_code(env
, dc
->base
.pc_next
, dc
->sctlr_b
);
12067 insn
= insn
<< 16 | insn2
;
12068 dc
->base
.pc_next
+= 2;
12072 if (dc
->condexec_mask
&& !thumb_insn_is_unconditional(dc
, insn
)) {
12073 uint32_t cond
= dc
->condexec_cond
;
12076 * Conditionally skip the insn. Note that both 0xe and 0xf mean
12077 * "always"; 0xf is not "never".
12080 arm_skip_unless(dc
, cond
);
12085 disas_thumb_insn(dc
, insn
);
12087 disas_thumb2_insn(dc
, insn
);
12090 /* Advance the Thumb condexec condition. */
12091 if (dc
->condexec_mask
) {
12092 dc
->condexec_cond
= ((dc
->condexec_cond
& 0xe) |
12093 ((dc
->condexec_mask
>> 4) & 1));
12094 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
12095 if (dc
->condexec_mask
== 0) {
12096 dc
->condexec_cond
= 0;
12100 arm_post_translate_insn(dc
);
12102 /* Thumb is a variable-length ISA. Stop translation when the next insn
12103 * will touch a new page. This ensures that prefetch aborts occur at
12106 * We want to stop the TB if the next insn starts in a new page,
12107 * or if it spans between this page and the next. This means that
12108 * if we're looking at the last halfword in the page we need to
12109 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12110 * or a 32-bit Thumb insn (which won't).
12111 * This is to avoid generating a silly TB with a single 16-bit insn
12112 * in it at the end of this page (which would execute correctly
12113 * but isn't very efficient).
12115 if (dc
->base
.is_jmp
== DISAS_NEXT
12116 && (dc
->base
.pc_next
- dc
->page_start
>= TARGET_PAGE_SIZE
12117 || (dc
->base
.pc_next
- dc
->page_start
>= TARGET_PAGE_SIZE
- 3
12118 && insn_crosses_page(env
, dc
)))) {
12119 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
12123 static void arm_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
12125 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12127 if (tb_cflags(dc
->base
.tb
) & CF_LAST_IO
&& dc
->condjmp
) {
12128 /* FIXME: This can theoretically happen with self-modifying code. */
12129 cpu_abort(cpu
, "IO on conditional branch instruction");
12132 /* At this stage dc->condjmp will only be set when the skipped
12133 instruction was a conditional branch or trap, and the PC has
12134 already been written. */
12135 gen_set_condexec(dc
);
12136 if (dc
->base
.is_jmp
== DISAS_BX_EXCRET
) {
12137 /* Exception return branches need some special case code at the
12138 * end of the TB, which is complex enough that it has to
12139 * handle the single-step vs not and the condition-failed
12140 * insn codepath itself.
12142 gen_bx_excret_final_code(dc
);
12143 } else if (unlikely(is_singlestepping(dc
))) {
12144 /* Unconditional and "condition passed" instruction codepath. */
12145 switch (dc
->base
.is_jmp
) {
12147 gen_ss_advance(dc
);
12148 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
12149 default_exception_el(dc
));
12152 gen_ss_advance(dc
);
12153 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
12156 gen_ss_advance(dc
);
12157 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
12160 case DISAS_TOO_MANY
:
12162 gen_set_pc_im(dc
, dc
->base
.pc_next
);
12165 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12166 gen_singlestep_exception(dc
);
12168 case DISAS_NORETURN
:
12172 /* While branches must always occur at the end of an IT block,
12173 there are a few other things that can cause us to terminate
12174 the TB in the middle of an IT block:
12175 - Exception generating instructions (bkpt, swi, undefined).
12177 - Hardware watchpoints.
12178 Hardware breakpoints have already been handled and skip this code.
12180 switch(dc
->base
.is_jmp
) {
12182 case DISAS_TOO_MANY
:
12183 gen_goto_tb(dc
, 1, dc
->base
.pc_next
);
12189 gen_set_pc_im(dc
, dc
->base
.pc_next
);
12192 /* indicate that the hash table must be used to find the next TB */
12193 tcg_gen_exit_tb(NULL
, 0);
12195 case DISAS_NORETURN
:
12196 /* nothing more to generate */
12200 TCGv_i32 tmp
= tcg_const_i32((dc
->thumb
&&
12201 !(dc
->insn
& (1U << 31))) ? 2 : 4);
12203 gen_helper_wfi(cpu_env
, tmp
);
12204 tcg_temp_free_i32(tmp
);
12205 /* The helper doesn't necessarily throw an exception, but we
12206 * must go back to the main loop to check for interrupts anyway.
12208 tcg_gen_exit_tb(NULL
, 0);
12212 gen_helper_wfe(cpu_env
);
12215 gen_helper_yield(cpu_env
);
12218 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
12219 default_exception_el(dc
));
12222 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
12225 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
12231 /* "Condition failed" instruction codepath for the branch/trap insn */
12232 gen_set_label(dc
->condlabel
);
12233 gen_set_condexec(dc
);
12234 if (unlikely(is_singlestepping(dc
))) {
12235 gen_set_pc_im(dc
, dc
->base
.pc_next
);
12236 gen_singlestep_exception(dc
);
12238 gen_goto_tb(dc
, 1, dc
->base
.pc_next
);
12243 static void arm_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
12245 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12247 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
12248 log_target_disas(cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
12251 static const TranslatorOps arm_translator_ops
= {
12252 .init_disas_context
= arm_tr_init_disas_context
,
12253 .tb_start
= arm_tr_tb_start
,
12254 .insn_start
= arm_tr_insn_start
,
12255 .breakpoint_check
= arm_tr_breakpoint_check
,
12256 .translate_insn
= arm_tr_translate_insn
,
12257 .tb_stop
= arm_tr_tb_stop
,
12258 .disas_log
= arm_tr_disas_log
,
12261 static const TranslatorOps thumb_translator_ops
= {
12262 .init_disas_context
= arm_tr_init_disas_context
,
12263 .tb_start
= arm_tr_tb_start
,
12264 .insn_start
= arm_tr_insn_start
,
12265 .breakpoint_check
= arm_tr_breakpoint_check
,
12266 .translate_insn
= thumb_tr_translate_insn
,
12267 .tb_stop
= arm_tr_tb_stop
,
12268 .disas_log
= arm_tr_disas_log
,
12271 /* generate intermediate code for basic block 'tb'. */
12272 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
12275 const TranslatorOps
*ops
= &arm_translator_ops
;
12277 if (FIELD_EX32(tb
->flags
, TBFLAG_A32
, THUMB
)) {
12278 ops
= &thumb_translator_ops
;
12280 #ifdef TARGET_AARCH64
12281 if (FIELD_EX32(tb
->flags
, TBFLAG_ANY
, AARCH64_STATE
)) {
12282 ops
= &aarch64_translator_ops
;
12286 translator_loop(ops
, &dc
.base
, cpu
, tb
, max_insns
);
12289 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
,
12290 target_ulong
*data
)
12294 env
->condexec_bits
= 0;
12295 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;
12297 env
->regs
[15] = data
[0];
12298 env
->condexec_bits
= data
[1];
12299 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;