target/arm: Convert Neon 3-reg-same comparisons to decodetree
[qemu/ar7.git] / target / arm / translate.c
blob0e6ecc0969a530b9070d26061ef54861c47dd291
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "qemu/log.h"
30 #include "qemu/bitops.h"
31 #include "arm_ldst.h"
32 #include "hw/semihosting/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
38 #include "exec/log.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
57 #define IS_USER(s) 1
58 #else
59 #define IS_USER(s) (s->user)
60 #endif
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
69 #include "exec/gen-icount.h"
71 static const char * const regnames[] =
72 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
75 /* Function prototypes for gen_ functions calling Neon helpers. */
76 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
77 TCGv_i32, TCGv_i32);
78 /* Function prototypes for gen_ functions for fix point conversions */
79 typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
81 /* initialize TCG globals. */
82 void arm_translate_init(void)
84 int i;
86 for (i = 0; i < 16; i++) {
87 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
88 offsetof(CPUARMState, regs[i]),
89 regnames[i]);
91 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
92 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
93 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
94 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
96 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
97 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
98 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_val), "exclusive_val");
101 a64_translate_init();
104 /* Flags for the disas_set_da_iss info argument:
105 * lower bits hold the Rt register number, higher bits are flags.
107 typedef enum ISSInfo {
108 ISSNone = 0,
109 ISSRegMask = 0x1f,
110 ISSInvalid = (1 << 5),
111 ISSIsAcqRel = (1 << 6),
112 ISSIsWrite = (1 << 7),
113 ISSIs16Bit = (1 << 8),
114 } ISSInfo;
116 /* Save the syndrome information for a Data Abort */
117 static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
119 uint32_t syn;
120 int sas = memop & MO_SIZE;
121 bool sse = memop & MO_SIGN;
122 bool is_acqrel = issinfo & ISSIsAcqRel;
123 bool is_write = issinfo & ISSIsWrite;
124 bool is_16bit = issinfo & ISSIs16Bit;
125 int srt = issinfo & ISSRegMask;
127 if (issinfo & ISSInvalid) {
128 /* Some callsites want to conditionally provide ISS info,
129 * eg "only if this was not a writeback"
131 return;
134 if (srt == 15) {
135 /* For AArch32, insns where the src/dest is R15 never generate
136 * ISS information. Catching that here saves checking at all
137 * the call sites.
139 return;
142 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
143 0, 0, 0, is_write, 0, is_16bit);
144 disas_set_insn_syndrome(s, syn);
147 static inline int get_a32_user_mem_index(DisasContext *s)
149 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
150 * insns:
151 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
152 * otherwise, access as if at PL0.
154 switch (s->mmu_idx) {
155 case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
156 case ARMMMUIdx_E10_0:
157 case ARMMMUIdx_E10_1:
158 case ARMMMUIdx_E10_1_PAN:
159 return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
160 case ARMMMUIdx_SE3:
161 case ARMMMUIdx_SE10_0:
162 case ARMMMUIdx_SE10_1:
163 case ARMMMUIdx_SE10_1_PAN:
164 return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0);
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
177 default:
178 g_assert_not_reached();
182 static inline TCGv_i32 load_cpu_offset(int offset)
184 TCGv_i32 tmp = tcg_temp_new_i32();
185 tcg_gen_ld_i32(tmp, cpu_env, offset);
186 return tmp;
189 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
191 static inline void store_cpu_offset(TCGv_i32 var, int offset)
193 tcg_gen_st_i32(var, cpu_env, offset);
194 tcg_temp_free_i32(var);
197 #define store_cpu_field(var, name) \
198 store_cpu_offset(var, offsetof(CPUARMState, name))
200 /* The architectural value of PC. */
201 static uint32_t read_pc(DisasContext *s)
203 return s->pc_curr + (s->thumb ? 4 : 8);
206 /* Set a variable to the value of a CPU register. */
207 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
209 if (reg == 15) {
210 tcg_gen_movi_i32(var, read_pc(s));
211 } else {
212 tcg_gen_mov_i32(var, cpu_R[reg]);
216 /* Create a new temporary and set it to the value of a CPU register. */
217 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
219 TCGv_i32 tmp = tcg_temp_new_i32();
220 load_reg_var(s, tmp, reg);
221 return tmp;
225 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
226 * This is used for load/store for which use of PC implies (literal),
227 * or ADD that implies ADR.
229 static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
231 TCGv_i32 tmp = tcg_temp_new_i32();
233 if (reg == 15) {
234 tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
235 } else {
236 tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
238 return tmp;
241 /* Set a CPU register. The source must be a temporary and will be
242 marked as dead. */
243 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
245 if (reg == 15) {
246 /* In Thumb mode, we must ignore bit 0.
247 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
248 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
249 * We choose to ignore [1:0] in ARM mode for all architecture versions.
251 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
252 s->base.is_jmp = DISAS_JUMP;
254 tcg_gen_mov_i32(cpu_R[reg], var);
255 tcg_temp_free_i32(var);
259 * Variant of store_reg which applies v8M stack-limit checks before updating
260 * SP. If the check fails this will result in an exception being taken.
261 * We disable the stack checks for CONFIG_USER_ONLY because we have
262 * no idea what the stack limits should be in that case.
263 * If stack checking is not being done this just acts like store_reg().
265 static void store_sp_checked(DisasContext *s, TCGv_i32 var)
267 #ifndef CONFIG_USER_ONLY
268 if (s->v8m_stackcheck) {
269 gen_helper_v8m_stackcheck(cpu_env, var);
271 #endif
272 store_reg(s, 13, var);
275 /* Value extensions. */
276 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
277 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
278 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
279 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
281 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
282 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
285 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
287 TCGv_i32 tmp_mask = tcg_const_i32(mask);
288 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
289 tcg_temp_free_i32(tmp_mask);
291 /* Set NZCV flags from the high 4 bits of var. */
292 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
294 static void gen_exception_internal(int excp)
296 TCGv_i32 tcg_excp = tcg_const_i32(excp);
298 assert(excp_is_internal(excp));
299 gen_helper_exception_internal(cpu_env, tcg_excp);
300 tcg_temp_free_i32(tcg_excp);
303 static void gen_step_complete_exception(DisasContext *s)
305 /* We just completed step of an insn. Move from Active-not-pending
306 * to Active-pending, and then also take the swstep exception.
307 * This corresponds to making the (IMPDEF) choice to prioritize
308 * swstep exceptions over asynchronous exceptions taken to an exception
309 * level where debug is disabled. This choice has the advantage that
310 * we do not need to maintain internal state corresponding to the
311 * ISV/EX syndrome bits between completion of the step and generation
312 * of the exception, and our syndrome information is always correct.
314 gen_ss_advance(s);
315 gen_swstep_exception(s, 1, s->is_ldex);
316 s->base.is_jmp = DISAS_NORETURN;
319 static void gen_singlestep_exception(DisasContext *s)
321 /* Generate the right kind of exception for singlestep, which is
322 * either the architectural singlestep or EXCP_DEBUG for QEMU's
323 * gdb singlestepping.
325 if (s->ss_active) {
326 gen_step_complete_exception(s);
327 } else {
328 gen_exception_internal(EXCP_DEBUG);
332 static inline bool is_singlestepping(DisasContext *s)
334 /* Return true if we are singlestepping either because of
335 * architectural singlestep or QEMU gdbstub singlestep. This does
336 * not include the command line '-singlestep' mode which is rather
337 * misnamed as it only means "one instruction per TB" and doesn't
338 * affect the code we generate.
340 return s->base.singlestep_enabled || s->ss_active;
343 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
345 TCGv_i32 tmp1 = tcg_temp_new_i32();
346 TCGv_i32 tmp2 = tcg_temp_new_i32();
347 tcg_gen_ext16s_i32(tmp1, a);
348 tcg_gen_ext16s_i32(tmp2, b);
349 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
350 tcg_temp_free_i32(tmp2);
351 tcg_gen_sari_i32(a, a, 16);
352 tcg_gen_sari_i32(b, b, 16);
353 tcg_gen_mul_i32(b, b, a);
354 tcg_gen_mov_i32(a, tmp1);
355 tcg_temp_free_i32(tmp1);
358 /* Byteswap each halfword. */
359 static void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
361 TCGv_i32 tmp = tcg_temp_new_i32();
362 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
363 tcg_gen_shri_i32(tmp, var, 8);
364 tcg_gen_and_i32(tmp, tmp, mask);
365 tcg_gen_and_i32(var, var, mask);
366 tcg_gen_shli_i32(var, var, 8);
367 tcg_gen_or_i32(dest, var, tmp);
368 tcg_temp_free_i32(mask);
369 tcg_temp_free_i32(tmp);
372 /* Byteswap low halfword and sign extend. */
373 static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
375 tcg_gen_ext16u_i32(var, var);
376 tcg_gen_bswap16_i32(var, var);
377 tcg_gen_ext16s_i32(dest, var);
380 /* 32x32->64 multiply. Marks inputs as dead. */
381 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
383 TCGv_i32 lo = tcg_temp_new_i32();
384 TCGv_i32 hi = tcg_temp_new_i32();
385 TCGv_i64 ret;
387 tcg_gen_mulu2_i32(lo, hi, a, b);
388 tcg_temp_free_i32(a);
389 tcg_temp_free_i32(b);
391 ret = tcg_temp_new_i64();
392 tcg_gen_concat_i32_i64(ret, lo, hi);
393 tcg_temp_free_i32(lo);
394 tcg_temp_free_i32(hi);
396 return ret;
399 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
401 TCGv_i32 lo = tcg_temp_new_i32();
402 TCGv_i32 hi = tcg_temp_new_i32();
403 TCGv_i64 ret;
405 tcg_gen_muls2_i32(lo, hi, a, b);
406 tcg_temp_free_i32(a);
407 tcg_temp_free_i32(b);
409 ret = tcg_temp_new_i64();
410 tcg_gen_concat_i32_i64(ret, lo, hi);
411 tcg_temp_free_i32(lo);
412 tcg_temp_free_i32(hi);
414 return ret;
417 /* Swap low and high halfwords. */
418 static void gen_swap_half(TCGv_i32 var)
420 tcg_gen_rotri_i32(var, var, 16);
423 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
424 tmp = (t0 ^ t1) & 0x8000;
425 t0 &= ~0x8000;
426 t1 &= ~0x8000;
427 t0 = (t0 + t1) ^ tmp;
430 static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
432 TCGv_i32 tmp = tcg_temp_new_i32();
433 tcg_gen_xor_i32(tmp, t0, t1);
434 tcg_gen_andi_i32(tmp, tmp, 0x8000);
435 tcg_gen_andi_i32(t0, t0, ~0x8000);
436 tcg_gen_andi_i32(t1, t1, ~0x8000);
437 tcg_gen_add_i32(t0, t0, t1);
438 tcg_gen_xor_i32(dest, t0, tmp);
439 tcg_temp_free_i32(tmp);
442 /* Set N and Z flags from var. */
443 static inline void gen_logic_CC(TCGv_i32 var)
445 tcg_gen_mov_i32(cpu_NF, var);
446 tcg_gen_mov_i32(cpu_ZF, var);
449 /* dest = T0 + T1 + CF. */
450 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
452 tcg_gen_add_i32(dest, t0, t1);
453 tcg_gen_add_i32(dest, dest, cpu_CF);
456 /* dest = T0 - T1 + CF - 1. */
457 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
459 tcg_gen_sub_i32(dest, t0, t1);
460 tcg_gen_add_i32(dest, dest, cpu_CF);
461 tcg_gen_subi_i32(dest, dest, 1);
464 /* dest = T0 + T1. Compute C, N, V and Z flags */
465 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
467 TCGv_i32 tmp = tcg_temp_new_i32();
468 tcg_gen_movi_i32(tmp, 0);
469 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
470 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
471 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
472 tcg_gen_xor_i32(tmp, t0, t1);
473 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
474 tcg_temp_free_i32(tmp);
475 tcg_gen_mov_i32(dest, cpu_NF);
478 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
479 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
481 TCGv_i32 tmp = tcg_temp_new_i32();
482 if (TCG_TARGET_HAS_add2_i32) {
483 tcg_gen_movi_i32(tmp, 0);
484 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
485 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
486 } else {
487 TCGv_i64 q0 = tcg_temp_new_i64();
488 TCGv_i64 q1 = tcg_temp_new_i64();
489 tcg_gen_extu_i32_i64(q0, t0);
490 tcg_gen_extu_i32_i64(q1, t1);
491 tcg_gen_add_i64(q0, q0, q1);
492 tcg_gen_extu_i32_i64(q1, cpu_CF);
493 tcg_gen_add_i64(q0, q0, q1);
494 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
495 tcg_temp_free_i64(q0);
496 tcg_temp_free_i64(q1);
498 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
499 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
500 tcg_gen_xor_i32(tmp, t0, t1);
501 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
502 tcg_temp_free_i32(tmp);
503 tcg_gen_mov_i32(dest, cpu_NF);
506 /* dest = T0 - T1. Compute C, N, V and Z flags */
507 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
509 TCGv_i32 tmp;
510 tcg_gen_sub_i32(cpu_NF, t0, t1);
511 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
512 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
513 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
514 tmp = tcg_temp_new_i32();
515 tcg_gen_xor_i32(tmp, t0, t1);
516 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
517 tcg_temp_free_i32(tmp);
518 tcg_gen_mov_i32(dest, cpu_NF);
521 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
522 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
524 TCGv_i32 tmp = tcg_temp_new_i32();
525 tcg_gen_not_i32(tmp, t1);
526 gen_adc_CC(dest, t0, tmp);
527 tcg_temp_free_i32(tmp);
530 #define GEN_SHIFT(name) \
531 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
533 TCGv_i32 tmp1, tmp2, tmp3; \
534 tmp1 = tcg_temp_new_i32(); \
535 tcg_gen_andi_i32(tmp1, t1, 0xff); \
536 tmp2 = tcg_const_i32(0); \
537 tmp3 = tcg_const_i32(0x1f); \
538 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
539 tcg_temp_free_i32(tmp3); \
540 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
541 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
542 tcg_temp_free_i32(tmp2); \
543 tcg_temp_free_i32(tmp1); \
545 GEN_SHIFT(shl)
546 GEN_SHIFT(shr)
547 #undef GEN_SHIFT
549 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
551 TCGv_i32 tmp1, tmp2;
552 tmp1 = tcg_temp_new_i32();
553 tcg_gen_andi_i32(tmp1, t1, 0xff);
554 tmp2 = tcg_const_i32(0x1f);
555 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
556 tcg_temp_free_i32(tmp2);
557 tcg_gen_sar_i32(dest, t0, tmp1);
558 tcg_temp_free_i32(tmp1);
561 static void shifter_out_im(TCGv_i32 var, int shift)
563 tcg_gen_extract_i32(cpu_CF, var, shift, 1);
566 /* Shift by immediate. Includes special handling for shift == 0. */
567 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
568 int shift, int flags)
570 switch (shiftop) {
571 case 0: /* LSL */
572 if (shift != 0) {
573 if (flags)
574 shifter_out_im(var, 32 - shift);
575 tcg_gen_shli_i32(var, var, shift);
577 break;
578 case 1: /* LSR */
579 if (shift == 0) {
580 if (flags) {
581 tcg_gen_shri_i32(cpu_CF, var, 31);
583 tcg_gen_movi_i32(var, 0);
584 } else {
585 if (flags)
586 shifter_out_im(var, shift - 1);
587 tcg_gen_shri_i32(var, var, shift);
589 break;
590 case 2: /* ASR */
591 if (shift == 0)
592 shift = 32;
593 if (flags)
594 shifter_out_im(var, shift - 1);
595 if (shift == 32)
596 shift = 31;
597 tcg_gen_sari_i32(var, var, shift);
598 break;
599 case 3: /* ROR/RRX */
600 if (shift != 0) {
601 if (flags)
602 shifter_out_im(var, shift - 1);
603 tcg_gen_rotri_i32(var, var, shift); break;
604 } else {
605 TCGv_i32 tmp = tcg_temp_new_i32();
606 tcg_gen_shli_i32(tmp, cpu_CF, 31);
607 if (flags)
608 shifter_out_im(var, 0);
609 tcg_gen_shri_i32(var, var, 1);
610 tcg_gen_or_i32(var, var, tmp);
611 tcg_temp_free_i32(tmp);
616 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
617 TCGv_i32 shift, int flags)
619 if (flags) {
620 switch (shiftop) {
621 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
622 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
623 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
624 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
626 } else {
627 switch (shiftop) {
628 case 0:
629 gen_shl(var, var, shift);
630 break;
631 case 1:
632 gen_shr(var, var, shift);
633 break;
634 case 2:
635 gen_sar(var, var, shift);
636 break;
637 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
638 tcg_gen_rotr_i32(var, var, shift); break;
641 tcg_temp_free_i32(shift);
645 * Generate a conditional based on ARM condition code cc.
646 * This is common between ARM and Aarch64 targets.
648 void arm_test_cc(DisasCompare *cmp, int cc)
650 TCGv_i32 value;
651 TCGCond cond;
652 bool global = true;
654 switch (cc) {
655 case 0: /* eq: Z */
656 case 1: /* ne: !Z */
657 cond = TCG_COND_EQ;
658 value = cpu_ZF;
659 break;
661 case 2: /* cs: C */
662 case 3: /* cc: !C */
663 cond = TCG_COND_NE;
664 value = cpu_CF;
665 break;
667 case 4: /* mi: N */
668 case 5: /* pl: !N */
669 cond = TCG_COND_LT;
670 value = cpu_NF;
671 break;
673 case 6: /* vs: V */
674 case 7: /* vc: !V */
675 cond = TCG_COND_LT;
676 value = cpu_VF;
677 break;
679 case 8: /* hi: C && !Z */
680 case 9: /* ls: !C || Z -> !(C && !Z) */
681 cond = TCG_COND_NE;
682 value = tcg_temp_new_i32();
683 global = false;
684 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
685 ZF is non-zero for !Z; so AND the two subexpressions. */
686 tcg_gen_neg_i32(value, cpu_CF);
687 tcg_gen_and_i32(value, value, cpu_ZF);
688 break;
690 case 10: /* ge: N == V -> N ^ V == 0 */
691 case 11: /* lt: N != V -> N ^ V != 0 */
692 /* Since we're only interested in the sign bit, == 0 is >= 0. */
693 cond = TCG_COND_GE;
694 value = tcg_temp_new_i32();
695 global = false;
696 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
697 break;
699 case 12: /* gt: !Z && N == V */
700 case 13: /* le: Z || N != V */
701 cond = TCG_COND_NE;
702 value = tcg_temp_new_i32();
703 global = false;
704 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
705 * the sign bit then AND with ZF to yield the result. */
706 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
707 tcg_gen_sari_i32(value, value, 31);
708 tcg_gen_andc_i32(value, cpu_ZF, value);
709 break;
711 case 14: /* always */
712 case 15: /* always */
713 /* Use the ALWAYS condition, which will fold early.
714 * It doesn't matter what we use for the value. */
715 cond = TCG_COND_ALWAYS;
716 value = cpu_ZF;
717 goto no_invert;
719 default:
720 fprintf(stderr, "Bad condition code 0x%x\n", cc);
721 abort();
724 if (cc & 1) {
725 cond = tcg_invert_cond(cond);
728 no_invert:
729 cmp->cond = cond;
730 cmp->value = value;
731 cmp->value_global = global;
734 void arm_free_cc(DisasCompare *cmp)
736 if (!cmp->value_global) {
737 tcg_temp_free_i32(cmp->value);
741 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
743 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
746 void arm_gen_test_cc(int cc, TCGLabel *label)
748 DisasCompare cmp;
749 arm_test_cc(&cmp, cc);
750 arm_jump_cc(&cmp, label);
751 arm_free_cc(&cmp);
754 static inline void gen_set_condexec(DisasContext *s)
756 if (s->condexec_mask) {
757 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
758 TCGv_i32 tmp = tcg_temp_new_i32();
759 tcg_gen_movi_i32(tmp, val);
760 store_cpu_field(tmp, condexec_bits);
764 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
766 tcg_gen_movi_i32(cpu_R[15], val);
769 /* Set PC and Thumb state from var. var is marked as dead. */
770 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
772 s->base.is_jmp = DISAS_JUMP;
773 tcg_gen_andi_i32(cpu_R[15], var, ~1);
774 tcg_gen_andi_i32(var, var, 1);
775 store_cpu_field(var, thumb);
779 * Set PC and Thumb state from var. var is marked as dead.
780 * For M-profile CPUs, include logic to detect exception-return
781 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
782 * and BX reg, and no others, and happens only for code in Handler mode.
783 * The Security Extension also requires us to check for the FNC_RETURN
784 * which signals a function return from non-secure state; this can happen
785 * in both Handler and Thread mode.
786 * To avoid having to do multiple comparisons in inline generated code,
787 * we make the check we do here loose, so it will match for EXC_RETURN
788 * in Thread mode. For system emulation do_v7m_exception_exit() checks
789 * for these spurious cases and returns without doing anything (giving
790 * the same behaviour as for a branch to a non-magic address).
792 * In linux-user mode it is unclear what the right behaviour for an
793 * attempted FNC_RETURN should be, because in real hardware this will go
794 * directly to Secure code (ie not the Linux kernel) which will then treat
795 * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
796 * attempt behave the way it would on a CPU without the security extension,
797 * which is to say "like a normal branch". That means we can simply treat
798 * all branches as normal with no magic address behaviour.
800 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
802 /* Generate the same code here as for a simple bx, but flag via
803 * s->base.is_jmp that we need to do the rest of the work later.
805 gen_bx(s, var);
806 #ifndef CONFIG_USER_ONLY
807 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
808 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
809 s->base.is_jmp = DISAS_BX_EXCRET;
811 #endif
814 static inline void gen_bx_excret_final_code(DisasContext *s)
816 /* Generate the code to finish possible exception return and end the TB */
817 TCGLabel *excret_label = gen_new_label();
818 uint32_t min_magic;
820 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
821 /* Covers FNC_RETURN and EXC_RETURN magic */
822 min_magic = FNC_RETURN_MIN_MAGIC;
823 } else {
824 /* EXC_RETURN magic only */
825 min_magic = EXC_RETURN_MIN_MAGIC;
828 /* Is the new PC value in the magic range indicating exception return? */
829 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
830 /* No: end the TB as we would for a DISAS_JMP */
831 if (is_singlestepping(s)) {
832 gen_singlestep_exception(s);
833 } else {
834 tcg_gen_exit_tb(NULL, 0);
836 gen_set_label(excret_label);
837 /* Yes: this is an exception return.
838 * At this point in runtime env->regs[15] and env->thumb will hold
839 * the exception-return magic number, which do_v7m_exception_exit()
840 * will read. Nothing else will be able to see those values because
841 * the cpu-exec main loop guarantees that we will always go straight
842 * from raising the exception to the exception-handling code.
844 * gen_ss_advance(s) does nothing on M profile currently but
845 * calling it is conceptually the right thing as we have executed
846 * this instruction (compare SWI, HVC, SMC handling).
848 gen_ss_advance(s);
849 gen_exception_internal(EXCP_EXCEPTION_EXIT);
852 static inline void gen_bxns(DisasContext *s, int rm)
854 TCGv_i32 var = load_reg(s, rm);
856 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
857 * we need to sync state before calling it, but:
858 * - we don't need to do gen_set_pc_im() because the bxns helper will
859 * always set the PC itself
860 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
861 * unless it's outside an IT block or the last insn in an IT block,
862 * so we know that condexec == 0 (already set at the top of the TB)
863 * is correct in the non-UNPREDICTABLE cases, and we can choose
864 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
866 gen_helper_v7m_bxns(cpu_env, var);
867 tcg_temp_free_i32(var);
868 s->base.is_jmp = DISAS_EXIT;
871 static inline void gen_blxns(DisasContext *s, int rm)
873 TCGv_i32 var = load_reg(s, rm);
875 /* We don't need to sync condexec state, for the same reason as bxns.
876 * We do however need to set the PC, because the blxns helper reads it.
877 * The blxns helper may throw an exception.
879 gen_set_pc_im(s, s->base.pc_next);
880 gen_helper_v7m_blxns(cpu_env, var);
881 tcg_temp_free_i32(var);
882 s->base.is_jmp = DISAS_EXIT;
885 /* Variant of store_reg which uses branch&exchange logic when storing
886 to r15 in ARM architecture v7 and above. The source must be a temporary
887 and will be marked as dead. */
888 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
890 if (reg == 15 && ENABLE_ARCH_7) {
891 gen_bx(s, var);
892 } else {
893 store_reg(s, reg, var);
897 /* Variant of store_reg which uses branch&exchange logic when storing
898 * to r15 in ARM architecture v5T and above. This is used for storing
899 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
900 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
901 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
903 if (reg == 15 && ENABLE_ARCH_5) {
904 gen_bx_excret(s, var);
905 } else {
906 store_reg(s, reg, var);
910 #ifdef CONFIG_USER_ONLY
911 #define IS_USER_ONLY 1
912 #else
913 #define IS_USER_ONLY 0
914 #endif
916 /* Abstractions of "generate code to do a guest load/store for
917 * AArch32", where a vaddr is always 32 bits (and is zero
918 * extended if we're a 64 bit core) and data is also
919 * 32 bits unless specifically doing a 64 bit access.
920 * These functions work like tcg_gen_qemu_{ld,st}* except
921 * that the address argument is TCGv_i32 rather than TCGv.
924 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
926 TCGv addr = tcg_temp_new();
927 tcg_gen_extu_i32_tl(addr, a32);
929 /* Not needed for user-mode BE32, where we use MO_BE instead. */
930 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
931 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
933 return addr;
936 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
937 int index, MemOp opc)
939 TCGv addr;
941 if (arm_dc_feature(s, ARM_FEATURE_M) &&
942 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
943 opc |= MO_ALIGN;
946 addr = gen_aa32_addr(s, a32, opc);
947 tcg_gen_qemu_ld_i32(val, addr, index, opc);
948 tcg_temp_free(addr);
951 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
952 int index, MemOp opc)
954 TCGv addr;
956 if (arm_dc_feature(s, ARM_FEATURE_M) &&
957 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
958 opc |= MO_ALIGN;
961 addr = gen_aa32_addr(s, a32, opc);
962 tcg_gen_qemu_st_i32(val, addr, index, opc);
963 tcg_temp_free(addr);
966 #define DO_GEN_LD(SUFF, OPC) \
967 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
968 TCGv_i32 a32, int index) \
970 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
973 #define DO_GEN_ST(SUFF, OPC) \
974 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
975 TCGv_i32 a32, int index) \
977 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
980 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
982 /* Not needed for user-mode BE32, where we use MO_BE instead. */
983 if (!IS_USER_ONLY && s->sctlr_b) {
984 tcg_gen_rotri_i64(val, val, 32);
988 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
989 int index, MemOp opc)
991 TCGv addr = gen_aa32_addr(s, a32, opc);
992 tcg_gen_qemu_ld_i64(val, addr, index, opc);
993 gen_aa32_frob64(s, val);
994 tcg_temp_free(addr);
997 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
998 TCGv_i32 a32, int index)
1000 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1003 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1004 int index, MemOp opc)
1006 TCGv addr = gen_aa32_addr(s, a32, opc);
1008 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1009 if (!IS_USER_ONLY && s->sctlr_b) {
1010 TCGv_i64 tmp = tcg_temp_new_i64();
1011 tcg_gen_rotri_i64(tmp, val, 32);
1012 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1013 tcg_temp_free_i64(tmp);
1014 } else {
1015 tcg_gen_qemu_st_i64(val, addr, index, opc);
1017 tcg_temp_free(addr);
1020 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1021 TCGv_i32 a32, int index)
1023 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1026 DO_GEN_LD(8u, MO_UB)
1027 DO_GEN_LD(16u, MO_UW)
1028 DO_GEN_LD(32u, MO_UL)
1029 DO_GEN_ST(8, MO_UB)
1030 DO_GEN_ST(16, MO_UW)
1031 DO_GEN_ST(32, MO_UL)
1033 static inline void gen_hvc(DisasContext *s, int imm16)
1035 /* The pre HVC helper handles cases when HVC gets trapped
1036 * as an undefined insn by runtime configuration (ie before
1037 * the insn really executes).
1039 gen_set_pc_im(s, s->pc_curr);
1040 gen_helper_pre_hvc(cpu_env);
1041 /* Otherwise we will treat this as a real exception which
1042 * happens after execution of the insn. (The distinction matters
1043 * for the PC value reported to the exception handler and also
1044 * for single stepping.)
1046 s->svc_imm = imm16;
1047 gen_set_pc_im(s, s->base.pc_next);
1048 s->base.is_jmp = DISAS_HVC;
1051 static inline void gen_smc(DisasContext *s)
1053 /* As with HVC, we may take an exception either before or after
1054 * the insn executes.
1056 TCGv_i32 tmp;
1058 gen_set_pc_im(s, s->pc_curr);
1059 tmp = tcg_const_i32(syn_aa32_smc());
1060 gen_helper_pre_smc(cpu_env, tmp);
1061 tcg_temp_free_i32(tmp);
1062 gen_set_pc_im(s, s->base.pc_next);
1063 s->base.is_jmp = DISAS_SMC;
1066 static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
1068 gen_set_condexec(s);
1069 gen_set_pc_im(s, pc);
1070 gen_exception_internal(excp);
1071 s->base.is_jmp = DISAS_NORETURN;
1074 static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
1075 int syn, uint32_t target_el)
1077 gen_set_condexec(s);
1078 gen_set_pc_im(s, pc);
1079 gen_exception(excp, syn, target_el);
1080 s->base.is_jmp = DISAS_NORETURN;
1083 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
1085 TCGv_i32 tcg_syn;
1087 gen_set_condexec(s);
1088 gen_set_pc_im(s, s->pc_curr);
1089 tcg_syn = tcg_const_i32(syn);
1090 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1091 tcg_temp_free_i32(tcg_syn);
1092 s->base.is_jmp = DISAS_NORETURN;
1095 static void unallocated_encoding(DisasContext *s)
1097 /* Unallocated and reserved encodings are uncategorized */
1098 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
1099 default_exception_el(s));
1102 /* Force a TB lookup after an instruction that changes the CPU state. */
1103 static inline void gen_lookup_tb(DisasContext *s)
1105 tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
1106 s->base.is_jmp = DISAS_EXIT;
1109 static inline void gen_hlt(DisasContext *s, int imm)
1111 /* HLT. This has two purposes.
1112 * Architecturally, it is an external halting debug instruction.
1113 * Since QEMU doesn't implement external debug, we treat this as
1114 * it is required for halting debug disabled: it will UNDEF.
1115 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1116 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1117 * must trigger semihosting even for ARMv7 and earlier, where
1118 * HLT was an undefined encoding.
1119 * In system mode, we don't allow userspace access to
1120 * semihosting, to provide some semblance of security
1121 * (and for consistency with our 32-bit semihosting).
1123 if (semihosting_enabled() &&
1124 #ifndef CONFIG_USER_ONLY
1125 s->current_el != 0 &&
1126 #endif
1127 (imm == (s->thumb ? 0x3c : 0xf000))) {
1128 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
1129 return;
1132 unallocated_encoding(s);
1135 static TCGv_ptr get_fpstatus_ptr(int neon)
1137 TCGv_ptr statusptr = tcg_temp_new_ptr();
1138 int offset;
1139 if (neon) {
1140 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1141 } else {
1142 offset = offsetof(CPUARMState, vfp.fp_status);
1144 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1145 return statusptr;
1148 static inline long vfp_reg_offset(bool dp, unsigned reg)
1150 if (dp) {
1151 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1152 } else {
1153 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
1154 if (reg & 1) {
1155 ofs += offsetof(CPU_DoubleU, l.upper);
1156 } else {
1157 ofs += offsetof(CPU_DoubleU, l.lower);
1159 return ofs;
1163 /* Return the offset of a 32-bit piece of a NEON register.
1164 zero is the least significant end of the register. */
1165 static inline long
1166 neon_reg_offset (int reg, int n)
1168 int sreg;
1169 sreg = reg * 2 + n;
1170 return vfp_reg_offset(0, sreg);
1173 /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1174 * where 0 is the least significant end of the register.
1176 static inline long
1177 neon_element_offset(int reg, int element, MemOp size)
1179 int element_size = 1 << size;
1180 int ofs = element * element_size;
1181 #ifdef HOST_WORDS_BIGENDIAN
1182 /* Calculate the offset assuming fully little-endian,
1183 * then XOR to account for the order of the 8-byte units.
1185 if (element_size < 8) {
1186 ofs ^= 8 - element_size;
1188 #endif
1189 return neon_reg_offset(reg, 0) + ofs;
1192 static TCGv_i32 neon_load_reg(int reg, int pass)
1194 TCGv_i32 tmp = tcg_temp_new_i32();
1195 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1196 return tmp;
1199 static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
1201 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1203 switch (mop) {
1204 case MO_UB:
1205 tcg_gen_ld8u_i32(var, cpu_env, offset);
1206 break;
1207 case MO_UW:
1208 tcg_gen_ld16u_i32(var, cpu_env, offset);
1209 break;
1210 case MO_UL:
1211 tcg_gen_ld_i32(var, cpu_env, offset);
1212 break;
1213 default:
1214 g_assert_not_reached();
1218 static void neon_load_element64(TCGv_i64 var, int reg, int ele, MemOp mop)
1220 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1222 switch (mop) {
1223 case MO_UB:
1224 tcg_gen_ld8u_i64(var, cpu_env, offset);
1225 break;
1226 case MO_UW:
1227 tcg_gen_ld16u_i64(var, cpu_env, offset);
1228 break;
1229 case MO_UL:
1230 tcg_gen_ld32u_i64(var, cpu_env, offset);
1231 break;
1232 case MO_Q:
1233 tcg_gen_ld_i64(var, cpu_env, offset);
1234 break;
1235 default:
1236 g_assert_not_reached();
1240 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1242 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1243 tcg_temp_free_i32(var);
1246 static void neon_store_element(int reg, int ele, MemOp size, TCGv_i32 var)
1248 long offset = neon_element_offset(reg, ele, size);
1250 switch (size) {
1251 case MO_8:
1252 tcg_gen_st8_i32(var, cpu_env, offset);
1253 break;
1254 case MO_16:
1255 tcg_gen_st16_i32(var, cpu_env, offset);
1256 break;
1257 case MO_32:
1258 tcg_gen_st_i32(var, cpu_env, offset);
1259 break;
1260 default:
1261 g_assert_not_reached();
1265 static void neon_store_element64(int reg, int ele, MemOp size, TCGv_i64 var)
1267 long offset = neon_element_offset(reg, ele, size);
1269 switch (size) {
1270 case MO_8:
1271 tcg_gen_st8_i64(var, cpu_env, offset);
1272 break;
1273 case MO_16:
1274 tcg_gen_st16_i64(var, cpu_env, offset);
1275 break;
1276 case MO_32:
1277 tcg_gen_st32_i64(var, cpu_env, offset);
1278 break;
1279 case MO_64:
1280 tcg_gen_st_i64(var, cpu_env, offset);
1281 break;
1282 default:
1283 g_assert_not_reached();
1287 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1289 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1292 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1294 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1297 static inline void neon_load_reg32(TCGv_i32 var, int reg)
1299 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1302 static inline void neon_store_reg32(TCGv_i32 var, int reg)
1304 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1307 static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1309 TCGv_ptr ret = tcg_temp_new_ptr();
1310 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1311 return ret;
1314 #define ARM_CP_RW_BIT (1 << 20)
1316 /* Include the VFP and Neon decoders */
1317 #include "translate-vfp.inc.c"
1318 #include "translate-neon.inc.c"
1320 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1322 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1325 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1327 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1330 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1332 TCGv_i32 var = tcg_temp_new_i32();
1333 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1334 return var;
1337 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1339 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1340 tcg_temp_free_i32(var);
1343 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1345 iwmmxt_store_reg(cpu_M0, rn);
1348 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1350 iwmmxt_load_reg(cpu_M0, rn);
1353 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1355 iwmmxt_load_reg(cpu_V1, rn);
1356 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1359 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1361 iwmmxt_load_reg(cpu_V1, rn);
1362 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1365 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1367 iwmmxt_load_reg(cpu_V1, rn);
1368 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1371 #define IWMMXT_OP(name) \
1372 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1374 iwmmxt_load_reg(cpu_V1, rn); \
1375 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1378 #define IWMMXT_OP_ENV(name) \
1379 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1381 iwmmxt_load_reg(cpu_V1, rn); \
1382 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1385 #define IWMMXT_OP_ENV_SIZE(name) \
1386 IWMMXT_OP_ENV(name##b) \
1387 IWMMXT_OP_ENV(name##w) \
1388 IWMMXT_OP_ENV(name##l)
1390 #define IWMMXT_OP_ENV1(name) \
1391 static inline void gen_op_iwmmxt_##name##_M0(void) \
1393 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1396 IWMMXT_OP(maddsq)
1397 IWMMXT_OP(madduq)
1398 IWMMXT_OP(sadb)
1399 IWMMXT_OP(sadw)
1400 IWMMXT_OP(mulslw)
1401 IWMMXT_OP(mulshw)
1402 IWMMXT_OP(mululw)
1403 IWMMXT_OP(muluhw)
1404 IWMMXT_OP(macsw)
1405 IWMMXT_OP(macuw)
1407 IWMMXT_OP_ENV_SIZE(unpackl)
1408 IWMMXT_OP_ENV_SIZE(unpackh)
1410 IWMMXT_OP_ENV1(unpacklub)
1411 IWMMXT_OP_ENV1(unpackluw)
1412 IWMMXT_OP_ENV1(unpacklul)
1413 IWMMXT_OP_ENV1(unpackhub)
1414 IWMMXT_OP_ENV1(unpackhuw)
1415 IWMMXT_OP_ENV1(unpackhul)
1416 IWMMXT_OP_ENV1(unpacklsb)
1417 IWMMXT_OP_ENV1(unpacklsw)
1418 IWMMXT_OP_ENV1(unpacklsl)
1419 IWMMXT_OP_ENV1(unpackhsb)
1420 IWMMXT_OP_ENV1(unpackhsw)
1421 IWMMXT_OP_ENV1(unpackhsl)
1423 IWMMXT_OP_ENV_SIZE(cmpeq)
1424 IWMMXT_OP_ENV_SIZE(cmpgtu)
1425 IWMMXT_OP_ENV_SIZE(cmpgts)
1427 IWMMXT_OP_ENV_SIZE(mins)
1428 IWMMXT_OP_ENV_SIZE(minu)
1429 IWMMXT_OP_ENV_SIZE(maxs)
1430 IWMMXT_OP_ENV_SIZE(maxu)
1432 IWMMXT_OP_ENV_SIZE(subn)
1433 IWMMXT_OP_ENV_SIZE(addn)
1434 IWMMXT_OP_ENV_SIZE(subu)
1435 IWMMXT_OP_ENV_SIZE(addu)
1436 IWMMXT_OP_ENV_SIZE(subs)
1437 IWMMXT_OP_ENV_SIZE(adds)
1439 IWMMXT_OP_ENV(avgb0)
1440 IWMMXT_OP_ENV(avgb1)
1441 IWMMXT_OP_ENV(avgw0)
1442 IWMMXT_OP_ENV(avgw1)
1444 IWMMXT_OP_ENV(packuw)
1445 IWMMXT_OP_ENV(packul)
1446 IWMMXT_OP_ENV(packuq)
1447 IWMMXT_OP_ENV(packsw)
1448 IWMMXT_OP_ENV(packsl)
1449 IWMMXT_OP_ENV(packsq)
1451 static void gen_op_iwmmxt_set_mup(void)
1453 TCGv_i32 tmp;
1454 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1455 tcg_gen_ori_i32(tmp, tmp, 2);
1456 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1459 static void gen_op_iwmmxt_set_cup(void)
1461 TCGv_i32 tmp;
1462 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1463 tcg_gen_ori_i32(tmp, tmp, 1);
1464 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1467 static void gen_op_iwmmxt_setpsr_nz(void)
1469 TCGv_i32 tmp = tcg_temp_new_i32();
1470 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1471 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1474 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1476 iwmmxt_load_reg(cpu_V1, rn);
1477 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1478 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1481 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1482 TCGv_i32 dest)
1484 int rd;
1485 uint32_t offset;
1486 TCGv_i32 tmp;
1488 rd = (insn >> 16) & 0xf;
1489 tmp = load_reg(s, rd);
1491 offset = (insn & 0xff) << ((insn >> 7) & 2);
1492 if (insn & (1 << 24)) {
1493 /* Pre indexed */
1494 if (insn & (1 << 23))
1495 tcg_gen_addi_i32(tmp, tmp, offset);
1496 else
1497 tcg_gen_addi_i32(tmp, tmp, -offset);
1498 tcg_gen_mov_i32(dest, tmp);
1499 if (insn & (1 << 21))
1500 store_reg(s, rd, tmp);
1501 else
1502 tcg_temp_free_i32(tmp);
1503 } else if (insn & (1 << 21)) {
1504 /* Post indexed */
1505 tcg_gen_mov_i32(dest, tmp);
1506 if (insn & (1 << 23))
1507 tcg_gen_addi_i32(tmp, tmp, offset);
1508 else
1509 tcg_gen_addi_i32(tmp, tmp, -offset);
1510 store_reg(s, rd, tmp);
1511 } else if (!(insn & (1 << 23)))
1512 return 1;
1513 return 0;
1516 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1518 int rd = (insn >> 0) & 0xf;
1519 TCGv_i32 tmp;
1521 if (insn & (1 << 8)) {
1522 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1523 return 1;
1524 } else {
1525 tmp = iwmmxt_load_creg(rd);
1527 } else {
1528 tmp = tcg_temp_new_i32();
1529 iwmmxt_load_reg(cpu_V0, rd);
1530 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1532 tcg_gen_andi_i32(tmp, tmp, mask);
1533 tcg_gen_mov_i32(dest, tmp);
1534 tcg_temp_free_i32(tmp);
1535 return 0;
1538 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1539 (ie. an undefined instruction). */
1540 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1542 int rd, wrd;
1543 int rdhi, rdlo, rd0, rd1, i;
1544 TCGv_i32 addr;
1545 TCGv_i32 tmp, tmp2, tmp3;
1547 if ((insn & 0x0e000e00) == 0x0c000000) {
1548 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1549 wrd = insn & 0xf;
1550 rdlo = (insn >> 12) & 0xf;
1551 rdhi = (insn >> 16) & 0xf;
1552 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1553 iwmmxt_load_reg(cpu_V0, wrd);
1554 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1555 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
1556 } else { /* TMCRR */
1557 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1558 iwmmxt_store_reg(cpu_V0, wrd);
1559 gen_op_iwmmxt_set_mup();
1561 return 0;
1564 wrd = (insn >> 12) & 0xf;
1565 addr = tcg_temp_new_i32();
1566 if (gen_iwmmxt_address(s, insn, addr)) {
1567 tcg_temp_free_i32(addr);
1568 return 1;
1570 if (insn & ARM_CP_RW_BIT) {
1571 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1572 tmp = tcg_temp_new_i32();
1573 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1574 iwmmxt_store_creg(wrd, tmp);
1575 } else {
1576 i = 1;
1577 if (insn & (1 << 8)) {
1578 if (insn & (1 << 22)) { /* WLDRD */
1579 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1580 i = 0;
1581 } else { /* WLDRW wRd */
1582 tmp = tcg_temp_new_i32();
1583 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1585 } else {
1586 tmp = tcg_temp_new_i32();
1587 if (insn & (1 << 22)) { /* WLDRH */
1588 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1589 } else { /* WLDRB */
1590 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1593 if (i) {
1594 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1595 tcg_temp_free_i32(tmp);
1597 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 } else {
1600 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1601 tmp = iwmmxt_load_creg(wrd);
1602 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1603 } else {
1604 gen_op_iwmmxt_movq_M0_wRn(wrd);
1605 tmp = tcg_temp_new_i32();
1606 if (insn & (1 << 8)) {
1607 if (insn & (1 << 22)) { /* WSTRD */
1608 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1609 } else { /* WSTRW wRd */
1610 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1611 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1613 } else {
1614 if (insn & (1 << 22)) { /* WSTRH */
1615 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1616 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1617 } else { /* WSTRB */
1618 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1619 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1623 tcg_temp_free_i32(tmp);
1625 tcg_temp_free_i32(addr);
1626 return 0;
1629 if ((insn & 0x0f000000) != 0x0e000000)
1630 return 1;
1632 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1633 case 0x000: /* WOR */
1634 wrd = (insn >> 12) & 0xf;
1635 rd0 = (insn >> 0) & 0xf;
1636 rd1 = (insn >> 16) & 0xf;
1637 gen_op_iwmmxt_movq_M0_wRn(rd0);
1638 gen_op_iwmmxt_orq_M0_wRn(rd1);
1639 gen_op_iwmmxt_setpsr_nz();
1640 gen_op_iwmmxt_movq_wRn_M0(wrd);
1641 gen_op_iwmmxt_set_mup();
1642 gen_op_iwmmxt_set_cup();
1643 break;
1644 case 0x011: /* TMCR */
1645 if (insn & 0xf)
1646 return 1;
1647 rd = (insn >> 12) & 0xf;
1648 wrd = (insn >> 16) & 0xf;
1649 switch (wrd) {
1650 case ARM_IWMMXT_wCID:
1651 case ARM_IWMMXT_wCASF:
1652 break;
1653 case ARM_IWMMXT_wCon:
1654 gen_op_iwmmxt_set_cup();
1655 /* Fall through. */
1656 case ARM_IWMMXT_wCSSF:
1657 tmp = iwmmxt_load_creg(wrd);
1658 tmp2 = load_reg(s, rd);
1659 tcg_gen_andc_i32(tmp, tmp, tmp2);
1660 tcg_temp_free_i32(tmp2);
1661 iwmmxt_store_creg(wrd, tmp);
1662 break;
1663 case ARM_IWMMXT_wCGR0:
1664 case ARM_IWMMXT_wCGR1:
1665 case ARM_IWMMXT_wCGR2:
1666 case ARM_IWMMXT_wCGR3:
1667 gen_op_iwmmxt_set_cup();
1668 tmp = load_reg(s, rd);
1669 iwmmxt_store_creg(wrd, tmp);
1670 break;
1671 default:
1672 return 1;
1674 break;
1675 case 0x100: /* WXOR */
1676 wrd = (insn >> 12) & 0xf;
1677 rd0 = (insn >> 0) & 0xf;
1678 rd1 = (insn >> 16) & 0xf;
1679 gen_op_iwmmxt_movq_M0_wRn(rd0);
1680 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1681 gen_op_iwmmxt_setpsr_nz();
1682 gen_op_iwmmxt_movq_wRn_M0(wrd);
1683 gen_op_iwmmxt_set_mup();
1684 gen_op_iwmmxt_set_cup();
1685 break;
1686 case 0x111: /* TMRC */
1687 if (insn & 0xf)
1688 return 1;
1689 rd = (insn >> 12) & 0xf;
1690 wrd = (insn >> 16) & 0xf;
1691 tmp = iwmmxt_load_creg(wrd);
1692 store_reg(s, rd, tmp);
1693 break;
1694 case 0x300: /* WANDN */
1695 wrd = (insn >> 12) & 0xf;
1696 rd0 = (insn >> 0) & 0xf;
1697 rd1 = (insn >> 16) & 0xf;
1698 gen_op_iwmmxt_movq_M0_wRn(rd0);
1699 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1700 gen_op_iwmmxt_andq_M0_wRn(rd1);
1701 gen_op_iwmmxt_setpsr_nz();
1702 gen_op_iwmmxt_movq_wRn_M0(wrd);
1703 gen_op_iwmmxt_set_mup();
1704 gen_op_iwmmxt_set_cup();
1705 break;
1706 case 0x200: /* WAND */
1707 wrd = (insn >> 12) & 0xf;
1708 rd0 = (insn >> 0) & 0xf;
1709 rd1 = (insn >> 16) & 0xf;
1710 gen_op_iwmmxt_movq_M0_wRn(rd0);
1711 gen_op_iwmmxt_andq_M0_wRn(rd1);
1712 gen_op_iwmmxt_setpsr_nz();
1713 gen_op_iwmmxt_movq_wRn_M0(wrd);
1714 gen_op_iwmmxt_set_mup();
1715 gen_op_iwmmxt_set_cup();
1716 break;
1717 case 0x810: case 0xa10: /* WMADD */
1718 wrd = (insn >> 12) & 0xf;
1719 rd0 = (insn >> 0) & 0xf;
1720 rd1 = (insn >> 16) & 0xf;
1721 gen_op_iwmmxt_movq_M0_wRn(rd0);
1722 if (insn & (1 << 21))
1723 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1724 else
1725 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1726 gen_op_iwmmxt_movq_wRn_M0(wrd);
1727 gen_op_iwmmxt_set_mup();
1728 break;
1729 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1730 wrd = (insn >> 12) & 0xf;
1731 rd0 = (insn >> 16) & 0xf;
1732 rd1 = (insn >> 0) & 0xf;
1733 gen_op_iwmmxt_movq_M0_wRn(rd0);
1734 switch ((insn >> 22) & 3) {
1735 case 0:
1736 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1737 break;
1738 case 1:
1739 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1740 break;
1741 case 2:
1742 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1743 break;
1744 case 3:
1745 return 1;
1747 gen_op_iwmmxt_movq_wRn_M0(wrd);
1748 gen_op_iwmmxt_set_mup();
1749 gen_op_iwmmxt_set_cup();
1750 break;
1751 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1752 wrd = (insn >> 12) & 0xf;
1753 rd0 = (insn >> 16) & 0xf;
1754 rd1 = (insn >> 0) & 0xf;
1755 gen_op_iwmmxt_movq_M0_wRn(rd0);
1756 switch ((insn >> 22) & 3) {
1757 case 0:
1758 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1759 break;
1760 case 1:
1761 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1762 break;
1763 case 2:
1764 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1765 break;
1766 case 3:
1767 return 1;
1769 gen_op_iwmmxt_movq_wRn_M0(wrd);
1770 gen_op_iwmmxt_set_mup();
1771 gen_op_iwmmxt_set_cup();
1772 break;
1773 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1774 wrd = (insn >> 12) & 0xf;
1775 rd0 = (insn >> 16) & 0xf;
1776 rd1 = (insn >> 0) & 0xf;
1777 gen_op_iwmmxt_movq_M0_wRn(rd0);
1778 if (insn & (1 << 22))
1779 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1780 else
1781 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1782 if (!(insn & (1 << 20)))
1783 gen_op_iwmmxt_addl_M0_wRn(wrd);
1784 gen_op_iwmmxt_movq_wRn_M0(wrd);
1785 gen_op_iwmmxt_set_mup();
1786 break;
1787 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1788 wrd = (insn >> 12) & 0xf;
1789 rd0 = (insn >> 16) & 0xf;
1790 rd1 = (insn >> 0) & 0xf;
1791 gen_op_iwmmxt_movq_M0_wRn(rd0);
1792 if (insn & (1 << 21)) {
1793 if (insn & (1 << 20))
1794 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1795 else
1796 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1797 } else {
1798 if (insn & (1 << 20))
1799 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1800 else
1801 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1803 gen_op_iwmmxt_movq_wRn_M0(wrd);
1804 gen_op_iwmmxt_set_mup();
1805 break;
1806 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1807 wrd = (insn >> 12) & 0xf;
1808 rd0 = (insn >> 16) & 0xf;
1809 rd1 = (insn >> 0) & 0xf;
1810 gen_op_iwmmxt_movq_M0_wRn(rd0);
1811 if (insn & (1 << 21))
1812 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1813 else
1814 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1815 if (!(insn & (1 << 20))) {
1816 iwmmxt_load_reg(cpu_V1, wrd);
1817 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1819 gen_op_iwmmxt_movq_wRn_M0(wrd);
1820 gen_op_iwmmxt_set_mup();
1821 break;
1822 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1823 wrd = (insn >> 12) & 0xf;
1824 rd0 = (insn >> 16) & 0xf;
1825 rd1 = (insn >> 0) & 0xf;
1826 gen_op_iwmmxt_movq_M0_wRn(rd0);
1827 switch ((insn >> 22) & 3) {
1828 case 0:
1829 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1830 break;
1831 case 1:
1832 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1833 break;
1834 case 2:
1835 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1836 break;
1837 case 3:
1838 return 1;
1840 gen_op_iwmmxt_movq_wRn_M0(wrd);
1841 gen_op_iwmmxt_set_mup();
1842 gen_op_iwmmxt_set_cup();
1843 break;
1844 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1845 wrd = (insn >> 12) & 0xf;
1846 rd0 = (insn >> 16) & 0xf;
1847 rd1 = (insn >> 0) & 0xf;
1848 gen_op_iwmmxt_movq_M0_wRn(rd0);
1849 if (insn & (1 << 22)) {
1850 if (insn & (1 << 20))
1851 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1852 else
1853 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1854 } else {
1855 if (insn & (1 << 20))
1856 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1857 else
1858 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1860 gen_op_iwmmxt_movq_wRn_M0(wrd);
1861 gen_op_iwmmxt_set_mup();
1862 gen_op_iwmmxt_set_cup();
1863 break;
1864 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1865 wrd = (insn >> 12) & 0xf;
1866 rd0 = (insn >> 16) & 0xf;
1867 rd1 = (insn >> 0) & 0xf;
1868 gen_op_iwmmxt_movq_M0_wRn(rd0);
1869 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1870 tcg_gen_andi_i32(tmp, tmp, 7);
1871 iwmmxt_load_reg(cpu_V1, rd1);
1872 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1873 tcg_temp_free_i32(tmp);
1874 gen_op_iwmmxt_movq_wRn_M0(wrd);
1875 gen_op_iwmmxt_set_mup();
1876 break;
1877 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1878 if (((insn >> 6) & 3) == 3)
1879 return 1;
1880 rd = (insn >> 12) & 0xf;
1881 wrd = (insn >> 16) & 0xf;
1882 tmp = load_reg(s, rd);
1883 gen_op_iwmmxt_movq_M0_wRn(wrd);
1884 switch ((insn >> 6) & 3) {
1885 case 0:
1886 tmp2 = tcg_const_i32(0xff);
1887 tmp3 = tcg_const_i32((insn & 7) << 3);
1888 break;
1889 case 1:
1890 tmp2 = tcg_const_i32(0xffff);
1891 tmp3 = tcg_const_i32((insn & 3) << 4);
1892 break;
1893 case 2:
1894 tmp2 = tcg_const_i32(0xffffffff);
1895 tmp3 = tcg_const_i32((insn & 1) << 5);
1896 break;
1897 default:
1898 tmp2 = NULL;
1899 tmp3 = NULL;
1901 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1902 tcg_temp_free_i32(tmp3);
1903 tcg_temp_free_i32(tmp2);
1904 tcg_temp_free_i32(tmp);
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 break;
1908 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1909 rd = (insn >> 12) & 0xf;
1910 wrd = (insn >> 16) & 0xf;
1911 if (rd == 15 || ((insn >> 22) & 3) == 3)
1912 return 1;
1913 gen_op_iwmmxt_movq_M0_wRn(wrd);
1914 tmp = tcg_temp_new_i32();
1915 switch ((insn >> 22) & 3) {
1916 case 0:
1917 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1918 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1919 if (insn & 8) {
1920 tcg_gen_ext8s_i32(tmp, tmp);
1921 } else {
1922 tcg_gen_andi_i32(tmp, tmp, 0xff);
1924 break;
1925 case 1:
1926 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1927 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1928 if (insn & 8) {
1929 tcg_gen_ext16s_i32(tmp, tmp);
1930 } else {
1931 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1933 break;
1934 case 2:
1935 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1936 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1937 break;
1939 store_reg(s, rd, tmp);
1940 break;
1941 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1942 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1943 return 1;
1944 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1945 switch ((insn >> 22) & 3) {
1946 case 0:
1947 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1948 break;
1949 case 1:
1950 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1951 break;
1952 case 2:
1953 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1954 break;
1956 tcg_gen_shli_i32(tmp, tmp, 28);
1957 gen_set_nzcv(tmp);
1958 tcg_temp_free_i32(tmp);
1959 break;
1960 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1961 if (((insn >> 6) & 3) == 3)
1962 return 1;
1963 rd = (insn >> 12) & 0xf;
1964 wrd = (insn >> 16) & 0xf;
1965 tmp = load_reg(s, rd);
1966 switch ((insn >> 6) & 3) {
1967 case 0:
1968 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1969 break;
1970 case 1:
1971 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1972 break;
1973 case 2:
1974 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1975 break;
1977 tcg_temp_free_i32(tmp);
1978 gen_op_iwmmxt_movq_wRn_M0(wrd);
1979 gen_op_iwmmxt_set_mup();
1980 break;
1981 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1982 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1983 return 1;
1984 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1985 tmp2 = tcg_temp_new_i32();
1986 tcg_gen_mov_i32(tmp2, tmp);
1987 switch ((insn >> 22) & 3) {
1988 case 0:
1989 for (i = 0; i < 7; i ++) {
1990 tcg_gen_shli_i32(tmp2, tmp2, 4);
1991 tcg_gen_and_i32(tmp, tmp, tmp2);
1993 break;
1994 case 1:
1995 for (i = 0; i < 3; i ++) {
1996 tcg_gen_shli_i32(tmp2, tmp2, 8);
1997 tcg_gen_and_i32(tmp, tmp, tmp2);
1999 break;
2000 case 2:
2001 tcg_gen_shli_i32(tmp2, tmp2, 16);
2002 tcg_gen_and_i32(tmp, tmp, tmp2);
2003 break;
2005 gen_set_nzcv(tmp);
2006 tcg_temp_free_i32(tmp2);
2007 tcg_temp_free_i32(tmp);
2008 break;
2009 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2010 wrd = (insn >> 12) & 0xf;
2011 rd0 = (insn >> 16) & 0xf;
2012 gen_op_iwmmxt_movq_M0_wRn(rd0);
2013 switch ((insn >> 22) & 3) {
2014 case 0:
2015 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2016 break;
2017 case 1:
2018 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2019 break;
2020 case 2:
2021 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2022 break;
2023 case 3:
2024 return 1;
2026 gen_op_iwmmxt_movq_wRn_M0(wrd);
2027 gen_op_iwmmxt_set_mup();
2028 break;
2029 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2030 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2031 return 1;
2032 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2033 tmp2 = tcg_temp_new_i32();
2034 tcg_gen_mov_i32(tmp2, tmp);
2035 switch ((insn >> 22) & 3) {
2036 case 0:
2037 for (i = 0; i < 7; i ++) {
2038 tcg_gen_shli_i32(tmp2, tmp2, 4);
2039 tcg_gen_or_i32(tmp, tmp, tmp2);
2041 break;
2042 case 1:
2043 for (i = 0; i < 3; i ++) {
2044 tcg_gen_shli_i32(tmp2, tmp2, 8);
2045 tcg_gen_or_i32(tmp, tmp, tmp2);
2047 break;
2048 case 2:
2049 tcg_gen_shli_i32(tmp2, tmp2, 16);
2050 tcg_gen_or_i32(tmp, tmp, tmp2);
2051 break;
2053 gen_set_nzcv(tmp);
2054 tcg_temp_free_i32(tmp2);
2055 tcg_temp_free_i32(tmp);
2056 break;
2057 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2058 rd = (insn >> 12) & 0xf;
2059 rd0 = (insn >> 16) & 0xf;
2060 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2061 return 1;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0);
2063 tmp = tcg_temp_new_i32();
2064 switch ((insn >> 22) & 3) {
2065 case 0:
2066 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2067 break;
2068 case 1:
2069 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2070 break;
2071 case 2:
2072 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2073 break;
2075 store_reg(s, rd, tmp);
2076 break;
2077 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2078 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2079 wrd = (insn >> 12) & 0xf;
2080 rd0 = (insn >> 16) & 0xf;
2081 rd1 = (insn >> 0) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0);
2083 switch ((insn >> 22) & 3) {
2084 case 0:
2085 if (insn & (1 << 21))
2086 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2087 else
2088 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2089 break;
2090 case 1:
2091 if (insn & (1 << 21))
2092 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2093 else
2094 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2095 break;
2096 case 2:
2097 if (insn & (1 << 21))
2098 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2099 else
2100 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2101 break;
2102 case 3:
2103 return 1;
2105 gen_op_iwmmxt_movq_wRn_M0(wrd);
2106 gen_op_iwmmxt_set_mup();
2107 gen_op_iwmmxt_set_cup();
2108 break;
2109 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2110 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2111 wrd = (insn >> 12) & 0xf;
2112 rd0 = (insn >> 16) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0);
2114 switch ((insn >> 22) & 3) {
2115 case 0:
2116 if (insn & (1 << 21))
2117 gen_op_iwmmxt_unpacklsb_M0();
2118 else
2119 gen_op_iwmmxt_unpacklub_M0();
2120 break;
2121 case 1:
2122 if (insn & (1 << 21))
2123 gen_op_iwmmxt_unpacklsw_M0();
2124 else
2125 gen_op_iwmmxt_unpackluw_M0();
2126 break;
2127 case 2:
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_unpacklsl_M0();
2130 else
2131 gen_op_iwmmxt_unpacklul_M0();
2132 break;
2133 case 3:
2134 return 1;
2136 gen_op_iwmmxt_movq_wRn_M0(wrd);
2137 gen_op_iwmmxt_set_mup();
2138 gen_op_iwmmxt_set_cup();
2139 break;
2140 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2141 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2142 wrd = (insn >> 12) & 0xf;
2143 rd0 = (insn >> 16) & 0xf;
2144 gen_op_iwmmxt_movq_M0_wRn(rd0);
2145 switch ((insn >> 22) & 3) {
2146 case 0:
2147 if (insn & (1 << 21))
2148 gen_op_iwmmxt_unpackhsb_M0();
2149 else
2150 gen_op_iwmmxt_unpackhub_M0();
2151 break;
2152 case 1:
2153 if (insn & (1 << 21))
2154 gen_op_iwmmxt_unpackhsw_M0();
2155 else
2156 gen_op_iwmmxt_unpackhuw_M0();
2157 break;
2158 case 2:
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_unpackhsl_M0();
2161 else
2162 gen_op_iwmmxt_unpackhul_M0();
2163 break;
2164 case 3:
2165 return 1;
2167 gen_op_iwmmxt_movq_wRn_M0(wrd);
2168 gen_op_iwmmxt_set_mup();
2169 gen_op_iwmmxt_set_cup();
2170 break;
2171 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2172 case 0x214: case 0x614: case 0xa14: case 0xe14:
2173 if (((insn >> 22) & 3) == 0)
2174 return 1;
2175 wrd = (insn >> 12) & 0xf;
2176 rd0 = (insn >> 16) & 0xf;
2177 gen_op_iwmmxt_movq_M0_wRn(rd0);
2178 tmp = tcg_temp_new_i32();
2179 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2180 tcg_temp_free_i32(tmp);
2181 return 1;
2183 switch ((insn >> 22) & 3) {
2184 case 1:
2185 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2186 break;
2187 case 2:
2188 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2189 break;
2190 case 3:
2191 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2192 break;
2194 tcg_temp_free_i32(tmp);
2195 gen_op_iwmmxt_movq_wRn_M0(wrd);
2196 gen_op_iwmmxt_set_mup();
2197 gen_op_iwmmxt_set_cup();
2198 break;
2199 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2200 case 0x014: case 0x414: case 0x814: case 0xc14:
2201 if (((insn >> 22) & 3) == 0)
2202 return 1;
2203 wrd = (insn >> 12) & 0xf;
2204 rd0 = (insn >> 16) & 0xf;
2205 gen_op_iwmmxt_movq_M0_wRn(rd0);
2206 tmp = tcg_temp_new_i32();
2207 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2208 tcg_temp_free_i32(tmp);
2209 return 1;
2211 switch ((insn >> 22) & 3) {
2212 case 1:
2213 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2214 break;
2215 case 2:
2216 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2217 break;
2218 case 3:
2219 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2220 break;
2222 tcg_temp_free_i32(tmp);
2223 gen_op_iwmmxt_movq_wRn_M0(wrd);
2224 gen_op_iwmmxt_set_mup();
2225 gen_op_iwmmxt_set_cup();
2226 break;
2227 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2228 case 0x114: case 0x514: case 0x914: case 0xd14:
2229 if (((insn >> 22) & 3) == 0)
2230 return 1;
2231 wrd = (insn >> 12) & 0xf;
2232 rd0 = (insn >> 16) & 0xf;
2233 gen_op_iwmmxt_movq_M0_wRn(rd0);
2234 tmp = tcg_temp_new_i32();
2235 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2236 tcg_temp_free_i32(tmp);
2237 return 1;
2239 switch ((insn >> 22) & 3) {
2240 case 1:
2241 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2242 break;
2243 case 2:
2244 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2245 break;
2246 case 3:
2247 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2248 break;
2250 tcg_temp_free_i32(tmp);
2251 gen_op_iwmmxt_movq_wRn_M0(wrd);
2252 gen_op_iwmmxt_set_mup();
2253 gen_op_iwmmxt_set_cup();
2254 break;
2255 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2256 case 0x314: case 0x714: case 0xb14: case 0xf14:
2257 if (((insn >> 22) & 3) == 0)
2258 return 1;
2259 wrd = (insn >> 12) & 0xf;
2260 rd0 = (insn >> 16) & 0xf;
2261 gen_op_iwmmxt_movq_M0_wRn(rd0);
2262 tmp = tcg_temp_new_i32();
2263 switch ((insn >> 22) & 3) {
2264 case 1:
2265 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2266 tcg_temp_free_i32(tmp);
2267 return 1;
2269 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2270 break;
2271 case 2:
2272 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2273 tcg_temp_free_i32(tmp);
2274 return 1;
2276 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2277 break;
2278 case 3:
2279 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2280 tcg_temp_free_i32(tmp);
2281 return 1;
2283 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2284 break;
2286 tcg_temp_free_i32(tmp);
2287 gen_op_iwmmxt_movq_wRn_M0(wrd);
2288 gen_op_iwmmxt_set_mup();
2289 gen_op_iwmmxt_set_cup();
2290 break;
2291 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2292 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2293 wrd = (insn >> 12) & 0xf;
2294 rd0 = (insn >> 16) & 0xf;
2295 rd1 = (insn >> 0) & 0xf;
2296 gen_op_iwmmxt_movq_M0_wRn(rd0);
2297 switch ((insn >> 22) & 3) {
2298 case 0:
2299 if (insn & (1 << 21))
2300 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2301 else
2302 gen_op_iwmmxt_minub_M0_wRn(rd1);
2303 break;
2304 case 1:
2305 if (insn & (1 << 21))
2306 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2307 else
2308 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2309 break;
2310 case 2:
2311 if (insn & (1 << 21))
2312 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2313 else
2314 gen_op_iwmmxt_minul_M0_wRn(rd1);
2315 break;
2316 case 3:
2317 return 1;
2319 gen_op_iwmmxt_movq_wRn_M0(wrd);
2320 gen_op_iwmmxt_set_mup();
2321 break;
2322 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2323 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2324 wrd = (insn >> 12) & 0xf;
2325 rd0 = (insn >> 16) & 0xf;
2326 rd1 = (insn >> 0) & 0xf;
2327 gen_op_iwmmxt_movq_M0_wRn(rd0);
2328 switch ((insn >> 22) & 3) {
2329 case 0:
2330 if (insn & (1 << 21))
2331 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2332 else
2333 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2334 break;
2335 case 1:
2336 if (insn & (1 << 21))
2337 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2338 else
2339 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2340 break;
2341 case 2:
2342 if (insn & (1 << 21))
2343 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2344 else
2345 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2346 break;
2347 case 3:
2348 return 1;
2350 gen_op_iwmmxt_movq_wRn_M0(wrd);
2351 gen_op_iwmmxt_set_mup();
2352 break;
2353 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2354 case 0x402: case 0x502: case 0x602: case 0x702:
2355 wrd = (insn >> 12) & 0xf;
2356 rd0 = (insn >> 16) & 0xf;
2357 rd1 = (insn >> 0) & 0xf;
2358 gen_op_iwmmxt_movq_M0_wRn(rd0);
2359 tmp = tcg_const_i32((insn >> 20) & 3);
2360 iwmmxt_load_reg(cpu_V1, rd1);
2361 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2362 tcg_temp_free_i32(tmp);
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 break;
2366 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2367 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2368 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2369 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2370 wrd = (insn >> 12) & 0xf;
2371 rd0 = (insn >> 16) & 0xf;
2372 rd1 = (insn >> 0) & 0xf;
2373 gen_op_iwmmxt_movq_M0_wRn(rd0);
2374 switch ((insn >> 20) & 0xf) {
2375 case 0x0:
2376 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2377 break;
2378 case 0x1:
2379 gen_op_iwmmxt_subub_M0_wRn(rd1);
2380 break;
2381 case 0x3:
2382 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2383 break;
2384 case 0x4:
2385 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2386 break;
2387 case 0x5:
2388 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2389 break;
2390 case 0x7:
2391 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2392 break;
2393 case 0x8:
2394 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2395 break;
2396 case 0x9:
2397 gen_op_iwmmxt_subul_M0_wRn(rd1);
2398 break;
2399 case 0xb:
2400 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2401 break;
2402 default:
2403 return 1;
2405 gen_op_iwmmxt_movq_wRn_M0(wrd);
2406 gen_op_iwmmxt_set_mup();
2407 gen_op_iwmmxt_set_cup();
2408 break;
2409 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2410 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2411 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2412 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2413 wrd = (insn >> 12) & 0xf;
2414 rd0 = (insn >> 16) & 0xf;
2415 gen_op_iwmmxt_movq_M0_wRn(rd0);
2416 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2417 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2418 tcg_temp_free_i32(tmp);
2419 gen_op_iwmmxt_movq_wRn_M0(wrd);
2420 gen_op_iwmmxt_set_mup();
2421 gen_op_iwmmxt_set_cup();
2422 break;
2423 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2424 case 0x418: case 0x518: case 0x618: case 0x718:
2425 case 0x818: case 0x918: case 0xa18: case 0xb18:
2426 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2427 wrd = (insn >> 12) & 0xf;
2428 rd0 = (insn >> 16) & 0xf;
2429 rd1 = (insn >> 0) & 0xf;
2430 gen_op_iwmmxt_movq_M0_wRn(rd0);
2431 switch ((insn >> 20) & 0xf) {
2432 case 0x0:
2433 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2434 break;
2435 case 0x1:
2436 gen_op_iwmmxt_addub_M0_wRn(rd1);
2437 break;
2438 case 0x3:
2439 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2440 break;
2441 case 0x4:
2442 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2443 break;
2444 case 0x5:
2445 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2446 break;
2447 case 0x7:
2448 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2449 break;
2450 case 0x8:
2451 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2452 break;
2453 case 0x9:
2454 gen_op_iwmmxt_addul_M0_wRn(rd1);
2455 break;
2456 case 0xb:
2457 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2458 break;
2459 default:
2460 return 1;
2462 gen_op_iwmmxt_movq_wRn_M0(wrd);
2463 gen_op_iwmmxt_set_mup();
2464 gen_op_iwmmxt_set_cup();
2465 break;
2466 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2467 case 0x408: case 0x508: case 0x608: case 0x708:
2468 case 0x808: case 0x908: case 0xa08: case 0xb08:
2469 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2470 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2471 return 1;
2472 wrd = (insn >> 12) & 0xf;
2473 rd0 = (insn >> 16) & 0xf;
2474 rd1 = (insn >> 0) & 0xf;
2475 gen_op_iwmmxt_movq_M0_wRn(rd0);
2476 switch ((insn >> 22) & 3) {
2477 case 1:
2478 if (insn & (1 << 21))
2479 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2480 else
2481 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2482 break;
2483 case 2:
2484 if (insn & (1 << 21))
2485 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2486 else
2487 gen_op_iwmmxt_packul_M0_wRn(rd1);
2488 break;
2489 case 3:
2490 if (insn & (1 << 21))
2491 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2492 else
2493 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2494 break;
2496 gen_op_iwmmxt_movq_wRn_M0(wrd);
2497 gen_op_iwmmxt_set_mup();
2498 gen_op_iwmmxt_set_cup();
2499 break;
2500 case 0x201: case 0x203: case 0x205: case 0x207:
2501 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2502 case 0x211: case 0x213: case 0x215: case 0x217:
2503 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2504 wrd = (insn >> 5) & 0xf;
2505 rd0 = (insn >> 12) & 0xf;
2506 rd1 = (insn >> 0) & 0xf;
2507 if (rd0 == 0xf || rd1 == 0xf)
2508 return 1;
2509 gen_op_iwmmxt_movq_M0_wRn(wrd);
2510 tmp = load_reg(s, rd0);
2511 tmp2 = load_reg(s, rd1);
2512 switch ((insn >> 16) & 0xf) {
2513 case 0x0: /* TMIA */
2514 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2515 break;
2516 case 0x8: /* TMIAPH */
2517 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2518 break;
2519 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2520 if (insn & (1 << 16))
2521 tcg_gen_shri_i32(tmp, tmp, 16);
2522 if (insn & (1 << 17))
2523 tcg_gen_shri_i32(tmp2, tmp2, 16);
2524 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2525 break;
2526 default:
2527 tcg_temp_free_i32(tmp2);
2528 tcg_temp_free_i32(tmp);
2529 return 1;
2531 tcg_temp_free_i32(tmp2);
2532 tcg_temp_free_i32(tmp);
2533 gen_op_iwmmxt_movq_wRn_M0(wrd);
2534 gen_op_iwmmxt_set_mup();
2535 break;
2536 default:
2537 return 1;
2540 return 0;
2543 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2544 (ie. an undefined instruction). */
2545 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2547 int acc, rd0, rd1, rdhi, rdlo;
2548 TCGv_i32 tmp, tmp2;
2550 if ((insn & 0x0ff00f10) == 0x0e200010) {
2551 /* Multiply with Internal Accumulate Format */
2552 rd0 = (insn >> 12) & 0xf;
2553 rd1 = insn & 0xf;
2554 acc = (insn >> 5) & 7;
2556 if (acc != 0)
2557 return 1;
2559 tmp = load_reg(s, rd0);
2560 tmp2 = load_reg(s, rd1);
2561 switch ((insn >> 16) & 0xf) {
2562 case 0x0: /* MIA */
2563 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2564 break;
2565 case 0x8: /* MIAPH */
2566 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2567 break;
2568 case 0xc: /* MIABB */
2569 case 0xd: /* MIABT */
2570 case 0xe: /* MIATB */
2571 case 0xf: /* MIATT */
2572 if (insn & (1 << 16))
2573 tcg_gen_shri_i32(tmp, tmp, 16);
2574 if (insn & (1 << 17))
2575 tcg_gen_shri_i32(tmp2, tmp2, 16);
2576 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2577 break;
2578 default:
2579 return 1;
2581 tcg_temp_free_i32(tmp2);
2582 tcg_temp_free_i32(tmp);
2584 gen_op_iwmmxt_movq_wRn_M0(acc);
2585 return 0;
2588 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2589 /* Internal Accumulator Access Format */
2590 rdhi = (insn >> 16) & 0xf;
2591 rdlo = (insn >> 12) & 0xf;
2592 acc = insn & 7;
2594 if (acc != 0)
2595 return 1;
2597 if (insn & ARM_CP_RW_BIT) { /* MRA */
2598 iwmmxt_load_reg(cpu_V0, acc);
2599 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2600 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
2601 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2602 } else { /* MAR */
2603 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2604 iwmmxt_store_reg(cpu_V0, acc);
2606 return 0;
2609 return 1;
2612 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2613 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2614 if (dc_isar_feature(aa32_simd_r32, s)) { \
2615 reg = (((insn) >> (bigbit)) & 0x0f) \
2616 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2617 } else { \
2618 if (insn & (1 << (smallbit))) \
2619 return 1; \
2620 reg = ((insn) >> (bigbit)) & 0x0f; \
2621 }} while (0)
2623 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2624 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2625 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2627 static void gen_neon_dup_low16(TCGv_i32 var)
2629 TCGv_i32 tmp = tcg_temp_new_i32();
2630 tcg_gen_ext16u_i32(var, var);
2631 tcg_gen_shli_i32(tmp, var, 16);
2632 tcg_gen_or_i32(var, var, tmp);
2633 tcg_temp_free_i32(tmp);
2636 static void gen_neon_dup_high16(TCGv_i32 var)
2638 TCGv_i32 tmp = tcg_temp_new_i32();
2639 tcg_gen_andi_i32(var, var, 0xffff0000);
2640 tcg_gen_shri_i32(tmp, var, 16);
2641 tcg_gen_or_i32(var, var, tmp);
2642 tcg_temp_free_i32(tmp);
2645 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
2647 #ifndef CONFIG_USER_ONLY
2648 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
2649 ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
2650 #else
2651 return true;
2652 #endif
2655 static void gen_goto_ptr(void)
2657 tcg_gen_lookup_and_goto_ptr();
2660 /* This will end the TB but doesn't guarantee we'll return to
2661 * cpu_loop_exec. Any live exit_requests will be processed as we
2662 * enter the next TB.
2664 static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
2666 if (use_goto_tb(s, dest)) {
2667 tcg_gen_goto_tb(n);
2668 gen_set_pc_im(s, dest);
2669 tcg_gen_exit_tb(s->base.tb, n);
2670 } else {
2671 gen_set_pc_im(s, dest);
2672 gen_goto_ptr();
2674 s->base.is_jmp = DISAS_NORETURN;
2677 static inline void gen_jmp (DisasContext *s, uint32_t dest)
2679 if (unlikely(is_singlestepping(s))) {
2680 /* An indirect jump so that we still trigger the debug exception. */
2681 gen_set_pc_im(s, dest);
2682 s->base.is_jmp = DISAS_JUMP;
2683 } else {
2684 gen_goto_tb(s, 0, dest);
2688 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
2690 if (x)
2691 tcg_gen_sari_i32(t0, t0, 16);
2692 else
2693 gen_sxth(t0);
2694 if (y)
2695 tcg_gen_sari_i32(t1, t1, 16);
2696 else
2697 gen_sxth(t1);
2698 tcg_gen_mul_i32(t0, t0, t1);
2701 /* Return the mask of PSR bits set by a MSR instruction. */
2702 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2704 uint32_t mask = 0;
2706 if (flags & (1 << 0)) {
2707 mask |= 0xff;
2709 if (flags & (1 << 1)) {
2710 mask |= 0xff00;
2712 if (flags & (1 << 2)) {
2713 mask |= 0xff0000;
2715 if (flags & (1 << 3)) {
2716 mask |= 0xff000000;
2719 /* Mask out undefined and reserved bits. */
2720 mask &= aarch32_cpsr_valid_mask(s->features, s->isar);
2722 /* Mask out execution state. */
2723 if (!spsr) {
2724 mask &= ~CPSR_EXEC;
2727 /* Mask out privileged bits. */
2728 if (IS_USER(s)) {
2729 mask &= CPSR_USER;
2731 return mask;
2734 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
2735 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
2737 TCGv_i32 tmp;
2738 if (spsr) {
2739 /* ??? This is also undefined in system mode. */
2740 if (IS_USER(s))
2741 return 1;
2743 tmp = load_cpu_field(spsr);
2744 tcg_gen_andi_i32(tmp, tmp, ~mask);
2745 tcg_gen_andi_i32(t0, t0, mask);
2746 tcg_gen_or_i32(tmp, tmp, t0);
2747 store_cpu_field(tmp, spsr);
2748 } else {
2749 gen_set_cpsr(t0, mask);
2751 tcg_temp_free_i32(t0);
2752 gen_lookup_tb(s);
2753 return 0;
2756 /* Returns nonzero if access to the PSR is not permitted. */
2757 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
2759 TCGv_i32 tmp;
2760 tmp = tcg_temp_new_i32();
2761 tcg_gen_movi_i32(tmp, val);
2762 return gen_set_psr(s, mask, spsr, tmp);
2765 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
2766 int *tgtmode, int *regno)
2768 /* Decode the r and sysm fields of MSR/MRS banked accesses into
2769 * the target mode and register number, and identify the various
2770 * unpredictable cases.
2771 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
2772 * + executed in user mode
2773 * + using R15 as the src/dest register
2774 * + accessing an unimplemented register
2775 * + accessing a register that's inaccessible at current PL/security state*
2776 * + accessing a register that you could access with a different insn
2777 * We choose to UNDEF in all these cases.
2778 * Since we don't know which of the various AArch32 modes we are in
2779 * we have to defer some checks to runtime.
2780 * Accesses to Monitor mode registers from Secure EL1 (which implies
2781 * that EL3 is AArch64) must trap to EL3.
2783 * If the access checks fail this function will emit code to take
2784 * an exception and return false. Otherwise it will return true,
2785 * and set *tgtmode and *regno appropriately.
2787 int exc_target = default_exception_el(s);
2789 /* These instructions are present only in ARMv8, or in ARMv7 with the
2790 * Virtualization Extensions.
2792 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
2793 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
2794 goto undef;
2797 if (IS_USER(s) || rn == 15) {
2798 goto undef;
2801 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
2802 * of registers into (r, sysm).
2804 if (r) {
2805 /* SPSRs for other modes */
2806 switch (sysm) {
2807 case 0xe: /* SPSR_fiq */
2808 *tgtmode = ARM_CPU_MODE_FIQ;
2809 break;
2810 case 0x10: /* SPSR_irq */
2811 *tgtmode = ARM_CPU_MODE_IRQ;
2812 break;
2813 case 0x12: /* SPSR_svc */
2814 *tgtmode = ARM_CPU_MODE_SVC;
2815 break;
2816 case 0x14: /* SPSR_abt */
2817 *tgtmode = ARM_CPU_MODE_ABT;
2818 break;
2819 case 0x16: /* SPSR_und */
2820 *tgtmode = ARM_CPU_MODE_UND;
2821 break;
2822 case 0x1c: /* SPSR_mon */
2823 *tgtmode = ARM_CPU_MODE_MON;
2824 break;
2825 case 0x1e: /* SPSR_hyp */
2826 *tgtmode = ARM_CPU_MODE_HYP;
2827 break;
2828 default: /* unallocated */
2829 goto undef;
2831 /* We arbitrarily assign SPSR a register number of 16. */
2832 *regno = 16;
2833 } else {
2834 /* general purpose registers for other modes */
2835 switch (sysm) {
2836 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
2837 *tgtmode = ARM_CPU_MODE_USR;
2838 *regno = sysm + 8;
2839 break;
2840 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
2841 *tgtmode = ARM_CPU_MODE_FIQ;
2842 *regno = sysm;
2843 break;
2844 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
2845 *tgtmode = ARM_CPU_MODE_IRQ;
2846 *regno = sysm & 1 ? 13 : 14;
2847 break;
2848 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
2849 *tgtmode = ARM_CPU_MODE_SVC;
2850 *regno = sysm & 1 ? 13 : 14;
2851 break;
2852 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
2853 *tgtmode = ARM_CPU_MODE_ABT;
2854 *regno = sysm & 1 ? 13 : 14;
2855 break;
2856 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
2857 *tgtmode = ARM_CPU_MODE_UND;
2858 *regno = sysm & 1 ? 13 : 14;
2859 break;
2860 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
2861 *tgtmode = ARM_CPU_MODE_MON;
2862 *regno = sysm & 1 ? 13 : 14;
2863 break;
2864 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
2865 *tgtmode = ARM_CPU_MODE_HYP;
2866 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
2867 *regno = sysm & 1 ? 13 : 17;
2868 break;
2869 default: /* unallocated */
2870 goto undef;
2874 /* Catch the 'accessing inaccessible register' cases we can detect
2875 * at translate time.
2877 switch (*tgtmode) {
2878 case ARM_CPU_MODE_MON:
2879 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
2880 goto undef;
2882 if (s->current_el == 1) {
2883 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
2884 * then accesses to Mon registers trap to EL3
2886 exc_target = 3;
2887 goto undef;
2889 break;
2890 case ARM_CPU_MODE_HYP:
2892 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
2893 * (and so we can forbid accesses from EL2 or below). elr_hyp
2894 * can be accessed also from Hyp mode, so forbid accesses from
2895 * EL0 or EL1.
2897 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
2898 (s->current_el < 3 && *regno != 17)) {
2899 goto undef;
2901 break;
2902 default:
2903 break;
2906 return true;
2908 undef:
2909 /* If we get here then some access check did not pass */
2910 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
2911 syn_uncategorized(), exc_target);
2912 return false;
2915 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
2917 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
2918 int tgtmode = 0, regno = 0;
2920 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
2921 return;
2924 /* Sync state because msr_banked() can raise exceptions */
2925 gen_set_condexec(s);
2926 gen_set_pc_im(s, s->pc_curr);
2927 tcg_reg = load_reg(s, rn);
2928 tcg_tgtmode = tcg_const_i32(tgtmode);
2929 tcg_regno = tcg_const_i32(regno);
2930 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
2931 tcg_temp_free_i32(tcg_tgtmode);
2932 tcg_temp_free_i32(tcg_regno);
2933 tcg_temp_free_i32(tcg_reg);
2934 s->base.is_jmp = DISAS_UPDATE;
2937 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
2939 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
2940 int tgtmode = 0, regno = 0;
2942 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
2943 return;
2946 /* Sync state because mrs_banked() can raise exceptions */
2947 gen_set_condexec(s);
2948 gen_set_pc_im(s, s->pc_curr);
2949 tcg_reg = tcg_temp_new_i32();
2950 tcg_tgtmode = tcg_const_i32(tgtmode);
2951 tcg_regno = tcg_const_i32(regno);
2952 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
2953 tcg_temp_free_i32(tcg_tgtmode);
2954 tcg_temp_free_i32(tcg_regno);
2955 store_reg(s, rn, tcg_reg);
2956 s->base.is_jmp = DISAS_UPDATE;
2959 /* Store value to PC as for an exception return (ie don't
2960 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
2961 * will do the masking based on the new value of the Thumb bit.
2963 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
2965 tcg_gen_mov_i32(cpu_R[15], pc);
2966 tcg_temp_free_i32(pc);
2969 /* Generate a v6 exception return. Marks both values as dead. */
2970 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2972 store_pc_exc_ret(s, pc);
2973 /* The cpsr_write_eret helper will mask the low bits of PC
2974 * appropriately depending on the new Thumb bit, so it must
2975 * be called after storing the new PC.
2977 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
2978 gen_io_start();
2980 gen_helper_cpsr_write_eret(cpu_env, cpsr);
2981 tcg_temp_free_i32(cpsr);
2982 /* Must exit loop to check un-masked IRQs */
2983 s->base.is_jmp = DISAS_EXIT;
2986 /* Generate an old-style exception return. Marks pc as dead. */
2987 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
2989 gen_rfe(s, pc, load_cpu_field(spsr));
2992 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
2994 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
2996 switch (size) {
2997 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
2998 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
2999 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3000 default: abort();
3004 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
3006 switch (size) {
3007 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3008 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3009 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3010 default: return;
3014 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3015 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3016 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3017 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3018 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
3020 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3021 switch ((size << 1) | u) { \
3022 case 0: \
3023 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3024 break; \
3025 case 1: \
3026 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3027 break; \
3028 case 2: \
3029 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3030 break; \
3031 case 3: \
3032 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3033 break; \
3034 case 4: \
3035 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3036 break; \
3037 case 5: \
3038 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3039 break; \
3040 default: return 1; \
3041 }} while (0)
3043 #define GEN_NEON_INTEGER_OP(name) do { \
3044 switch ((size << 1) | u) { \
3045 case 0: \
3046 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3047 break; \
3048 case 1: \
3049 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3050 break; \
3051 case 2: \
3052 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3053 break; \
3054 case 3: \
3055 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3056 break; \
3057 case 4: \
3058 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3059 break; \
3060 case 5: \
3061 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3062 break; \
3063 default: return 1; \
3064 }} while (0)
3066 static TCGv_i32 neon_load_scratch(int scratch)
3068 TCGv_i32 tmp = tcg_temp_new_i32();
3069 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3070 return tmp;
3073 static void neon_store_scratch(int scratch, TCGv_i32 var)
3075 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3076 tcg_temp_free_i32(var);
3079 static inline TCGv_i32 neon_get_scalar(int size, int reg)
3081 TCGv_i32 tmp;
3082 if (size == 1) {
3083 tmp = neon_load_reg(reg & 7, reg >> 4);
3084 if (reg & 8) {
3085 gen_neon_dup_high16(tmp);
3086 } else {
3087 gen_neon_dup_low16(tmp);
3089 } else {
3090 tmp = neon_load_reg(reg & 15, reg >> 4);
3092 return tmp;
3095 static int gen_neon_unzip(int rd, int rm, int size, int q)
3097 TCGv_ptr pd, pm;
3099 if (!q && size == 2) {
3100 return 1;
3102 pd = vfp_reg_ptr(true, rd);
3103 pm = vfp_reg_ptr(true, rm);
3104 if (q) {
3105 switch (size) {
3106 case 0:
3107 gen_helper_neon_qunzip8(pd, pm);
3108 break;
3109 case 1:
3110 gen_helper_neon_qunzip16(pd, pm);
3111 break;
3112 case 2:
3113 gen_helper_neon_qunzip32(pd, pm);
3114 break;
3115 default:
3116 abort();
3118 } else {
3119 switch (size) {
3120 case 0:
3121 gen_helper_neon_unzip8(pd, pm);
3122 break;
3123 case 1:
3124 gen_helper_neon_unzip16(pd, pm);
3125 break;
3126 default:
3127 abort();
3130 tcg_temp_free_ptr(pd);
3131 tcg_temp_free_ptr(pm);
3132 return 0;
3135 static int gen_neon_zip(int rd, int rm, int size, int q)
3137 TCGv_ptr pd, pm;
3139 if (!q && size == 2) {
3140 return 1;
3142 pd = vfp_reg_ptr(true, rd);
3143 pm = vfp_reg_ptr(true, rm);
3144 if (q) {
3145 switch (size) {
3146 case 0:
3147 gen_helper_neon_qzip8(pd, pm);
3148 break;
3149 case 1:
3150 gen_helper_neon_qzip16(pd, pm);
3151 break;
3152 case 2:
3153 gen_helper_neon_qzip32(pd, pm);
3154 break;
3155 default:
3156 abort();
3158 } else {
3159 switch (size) {
3160 case 0:
3161 gen_helper_neon_zip8(pd, pm);
3162 break;
3163 case 1:
3164 gen_helper_neon_zip16(pd, pm);
3165 break;
3166 default:
3167 abort();
3170 tcg_temp_free_ptr(pd);
3171 tcg_temp_free_ptr(pm);
3172 return 0;
3175 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
3177 TCGv_i32 rd, tmp;
3179 rd = tcg_temp_new_i32();
3180 tmp = tcg_temp_new_i32();
3182 tcg_gen_shli_i32(rd, t0, 8);
3183 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3184 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3185 tcg_gen_or_i32(rd, rd, tmp);
3187 tcg_gen_shri_i32(t1, t1, 8);
3188 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3189 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3190 tcg_gen_or_i32(t1, t1, tmp);
3191 tcg_gen_mov_i32(t0, rd);
3193 tcg_temp_free_i32(tmp);
3194 tcg_temp_free_i32(rd);
3197 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
3199 TCGv_i32 rd, tmp;
3201 rd = tcg_temp_new_i32();
3202 tmp = tcg_temp_new_i32();
3204 tcg_gen_shli_i32(rd, t0, 16);
3205 tcg_gen_andi_i32(tmp, t1, 0xffff);
3206 tcg_gen_or_i32(rd, rd, tmp);
3207 tcg_gen_shri_i32(t1, t1, 16);
3208 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3209 tcg_gen_or_i32(t1, t1, tmp);
3210 tcg_gen_mov_i32(t0, rd);
3212 tcg_temp_free_i32(tmp);
3213 tcg_temp_free_i32(rd);
3216 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
3218 switch (size) {
3219 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3220 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3221 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
3222 default: abort();
3226 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
3228 switch (size) {
3229 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3230 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3231 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3232 default: abort();
3236 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
3238 switch (size) {
3239 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3240 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3241 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3242 default: abort();
3246 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
3248 switch (size) {
3249 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
3250 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
3251 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
3252 default: abort();
3256 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
3257 int q, int u)
3259 if (q) {
3260 if (u) {
3261 switch (size) {
3262 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3263 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3264 default: abort();
3266 } else {
3267 switch (size) {
3268 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3269 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3270 default: abort();
3273 } else {
3274 if (u) {
3275 switch (size) {
3276 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
3277 case 2: gen_ushl_i32(var, var, shift); break;
3278 default: abort();
3280 } else {
3281 switch (size) {
3282 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3283 case 2: gen_sshl_i32(var, var, shift); break;
3284 default: abort();
3290 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
3292 if (u) {
3293 switch (size) {
3294 case 0: gen_helper_neon_widen_u8(dest, src); break;
3295 case 1: gen_helper_neon_widen_u16(dest, src); break;
3296 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3297 default: abort();
3299 } else {
3300 switch (size) {
3301 case 0: gen_helper_neon_widen_s8(dest, src); break;
3302 case 1: gen_helper_neon_widen_s16(dest, src); break;
3303 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3304 default: abort();
3307 tcg_temp_free_i32(src);
3310 static inline void gen_neon_addl(int size)
3312 switch (size) {
3313 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3314 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3315 case 2: tcg_gen_add_i64(CPU_V001); break;
3316 default: abort();
3320 static inline void gen_neon_subl(int size)
3322 switch (size) {
3323 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3324 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3325 case 2: tcg_gen_sub_i64(CPU_V001); break;
3326 default: abort();
3330 static inline void gen_neon_negl(TCGv_i64 var, int size)
3332 switch (size) {
3333 case 0: gen_helper_neon_negl_u16(var, var); break;
3334 case 1: gen_helper_neon_negl_u32(var, var); break;
3335 case 2:
3336 tcg_gen_neg_i64(var, var);
3337 break;
3338 default: abort();
3342 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
3344 switch (size) {
3345 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
3346 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
3347 default: abort();
3351 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
3352 int size, int u)
3354 TCGv_i64 tmp;
3356 switch ((size << 1) | u) {
3357 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
3358 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
3359 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
3360 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
3361 case 4:
3362 tmp = gen_muls_i64_i32(a, b);
3363 tcg_gen_mov_i64(dest, tmp);
3364 tcg_temp_free_i64(tmp);
3365 break;
3366 case 5:
3367 tmp = gen_mulu_i64_i32(a, b);
3368 tcg_gen_mov_i64(dest, tmp);
3369 tcg_temp_free_i64(tmp);
3370 break;
3371 default: abort();
3374 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
3375 Don't forget to clean them now. */
3376 if (size < 2) {
3377 tcg_temp_free_i32(a);
3378 tcg_temp_free_i32(b);
3382 static void gen_neon_narrow_op(int op, int u, int size,
3383 TCGv_i32 dest, TCGv_i64 src)
3385 if (op) {
3386 if (u) {
3387 gen_neon_unarrow_sats(size, dest, src);
3388 } else {
3389 gen_neon_narrow(size, dest, src);
3391 } else {
3392 if (u) {
3393 gen_neon_narrow_satu(size, dest, src);
3394 } else {
3395 gen_neon_narrow_sats(size, dest, src);
3400 /* Symbolic constants for op fields for Neon 3-register same-length.
3401 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
3402 * table A7-9.
3404 #define NEON_3R_VHADD 0
3405 #define NEON_3R_VQADD 1
3406 #define NEON_3R_VRHADD 2
3407 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
3408 #define NEON_3R_VHSUB 4
3409 #define NEON_3R_VQSUB 5
3410 #define NEON_3R_VCGT 6
3411 #define NEON_3R_VCGE 7
3412 #define NEON_3R_VSHL 8
3413 #define NEON_3R_VQSHL 9
3414 #define NEON_3R_VRSHL 10
3415 #define NEON_3R_VQRSHL 11
3416 #define NEON_3R_VMAX 12
3417 #define NEON_3R_VMIN 13
3418 #define NEON_3R_VABD 14
3419 #define NEON_3R_VABA 15
3420 #define NEON_3R_VADD_VSUB 16
3421 #define NEON_3R_VTST_VCEQ 17
3422 #define NEON_3R_VML 18 /* VMLA, VMLS */
3423 #define NEON_3R_VMUL 19
3424 #define NEON_3R_VPMAX 20
3425 #define NEON_3R_VPMIN 21
3426 #define NEON_3R_VQDMULH_VQRDMULH 22
3427 #define NEON_3R_VPADD_VQRDMLAH 23
3428 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
3429 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
3430 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
3431 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
3432 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
3433 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
3434 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
3435 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
3437 static const uint8_t neon_3r_sizes[] = {
3438 [NEON_3R_VHADD] = 0x7,
3439 [NEON_3R_VQADD] = 0xf,
3440 [NEON_3R_VRHADD] = 0x7,
3441 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
3442 [NEON_3R_VHSUB] = 0x7,
3443 [NEON_3R_VQSUB] = 0xf,
3444 [NEON_3R_VCGT] = 0x7,
3445 [NEON_3R_VCGE] = 0x7,
3446 [NEON_3R_VSHL] = 0xf,
3447 [NEON_3R_VQSHL] = 0xf,
3448 [NEON_3R_VRSHL] = 0xf,
3449 [NEON_3R_VQRSHL] = 0xf,
3450 [NEON_3R_VMAX] = 0x7,
3451 [NEON_3R_VMIN] = 0x7,
3452 [NEON_3R_VABD] = 0x7,
3453 [NEON_3R_VABA] = 0x7,
3454 [NEON_3R_VADD_VSUB] = 0xf,
3455 [NEON_3R_VTST_VCEQ] = 0x7,
3456 [NEON_3R_VML] = 0x7,
3457 [NEON_3R_VMUL] = 0x7,
3458 [NEON_3R_VPMAX] = 0x7,
3459 [NEON_3R_VPMIN] = 0x7,
3460 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
3461 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
3462 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
3463 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
3464 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
3465 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
3466 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
3467 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
3468 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
3469 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
3472 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
3473 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
3474 * table A7-13.
3476 #define NEON_2RM_VREV64 0
3477 #define NEON_2RM_VREV32 1
3478 #define NEON_2RM_VREV16 2
3479 #define NEON_2RM_VPADDL 4
3480 #define NEON_2RM_VPADDL_U 5
3481 #define NEON_2RM_AESE 6 /* Includes AESD */
3482 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
3483 #define NEON_2RM_VCLS 8
3484 #define NEON_2RM_VCLZ 9
3485 #define NEON_2RM_VCNT 10
3486 #define NEON_2RM_VMVN 11
3487 #define NEON_2RM_VPADAL 12
3488 #define NEON_2RM_VPADAL_U 13
3489 #define NEON_2RM_VQABS 14
3490 #define NEON_2RM_VQNEG 15
3491 #define NEON_2RM_VCGT0 16
3492 #define NEON_2RM_VCGE0 17
3493 #define NEON_2RM_VCEQ0 18
3494 #define NEON_2RM_VCLE0 19
3495 #define NEON_2RM_VCLT0 20
3496 #define NEON_2RM_SHA1H 21
3497 #define NEON_2RM_VABS 22
3498 #define NEON_2RM_VNEG 23
3499 #define NEON_2RM_VCGT0_F 24
3500 #define NEON_2RM_VCGE0_F 25
3501 #define NEON_2RM_VCEQ0_F 26
3502 #define NEON_2RM_VCLE0_F 27
3503 #define NEON_2RM_VCLT0_F 28
3504 #define NEON_2RM_VABS_F 30
3505 #define NEON_2RM_VNEG_F 31
3506 #define NEON_2RM_VSWP 32
3507 #define NEON_2RM_VTRN 33
3508 #define NEON_2RM_VUZP 34
3509 #define NEON_2RM_VZIP 35
3510 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
3511 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
3512 #define NEON_2RM_VSHLL 38
3513 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
3514 #define NEON_2RM_VRINTN 40
3515 #define NEON_2RM_VRINTX 41
3516 #define NEON_2RM_VRINTA 42
3517 #define NEON_2RM_VRINTZ 43
3518 #define NEON_2RM_VCVT_F16_F32 44
3519 #define NEON_2RM_VRINTM 45
3520 #define NEON_2RM_VCVT_F32_F16 46
3521 #define NEON_2RM_VRINTP 47
3522 #define NEON_2RM_VCVTAU 48
3523 #define NEON_2RM_VCVTAS 49
3524 #define NEON_2RM_VCVTNU 50
3525 #define NEON_2RM_VCVTNS 51
3526 #define NEON_2RM_VCVTPU 52
3527 #define NEON_2RM_VCVTPS 53
3528 #define NEON_2RM_VCVTMU 54
3529 #define NEON_2RM_VCVTMS 55
3530 #define NEON_2RM_VRECPE 56
3531 #define NEON_2RM_VRSQRTE 57
3532 #define NEON_2RM_VRECPE_F 58
3533 #define NEON_2RM_VRSQRTE_F 59
3534 #define NEON_2RM_VCVT_FS 60
3535 #define NEON_2RM_VCVT_FU 61
3536 #define NEON_2RM_VCVT_SF 62
3537 #define NEON_2RM_VCVT_UF 63
3539 static bool neon_2rm_is_v8_op(int op)
3541 /* Return true if this neon 2reg-misc op is ARMv8 and up */
3542 switch (op) {
3543 case NEON_2RM_VRINTN:
3544 case NEON_2RM_VRINTA:
3545 case NEON_2RM_VRINTM:
3546 case NEON_2RM_VRINTP:
3547 case NEON_2RM_VRINTZ:
3548 case NEON_2RM_VRINTX:
3549 case NEON_2RM_VCVTAU:
3550 case NEON_2RM_VCVTAS:
3551 case NEON_2RM_VCVTNU:
3552 case NEON_2RM_VCVTNS:
3553 case NEON_2RM_VCVTPU:
3554 case NEON_2RM_VCVTPS:
3555 case NEON_2RM_VCVTMU:
3556 case NEON_2RM_VCVTMS:
3557 return true;
3558 default:
3559 return false;
3563 /* Each entry in this array has bit n set if the insn allows
3564 * size value n (otherwise it will UNDEF). Since unallocated
3565 * op values will have no bits set they always UNDEF.
3567 static const uint8_t neon_2rm_sizes[] = {
3568 [NEON_2RM_VREV64] = 0x7,
3569 [NEON_2RM_VREV32] = 0x3,
3570 [NEON_2RM_VREV16] = 0x1,
3571 [NEON_2RM_VPADDL] = 0x7,
3572 [NEON_2RM_VPADDL_U] = 0x7,
3573 [NEON_2RM_AESE] = 0x1,
3574 [NEON_2RM_AESMC] = 0x1,
3575 [NEON_2RM_VCLS] = 0x7,
3576 [NEON_2RM_VCLZ] = 0x7,
3577 [NEON_2RM_VCNT] = 0x1,
3578 [NEON_2RM_VMVN] = 0x1,
3579 [NEON_2RM_VPADAL] = 0x7,
3580 [NEON_2RM_VPADAL_U] = 0x7,
3581 [NEON_2RM_VQABS] = 0x7,
3582 [NEON_2RM_VQNEG] = 0x7,
3583 [NEON_2RM_VCGT0] = 0x7,
3584 [NEON_2RM_VCGE0] = 0x7,
3585 [NEON_2RM_VCEQ0] = 0x7,
3586 [NEON_2RM_VCLE0] = 0x7,
3587 [NEON_2RM_VCLT0] = 0x7,
3588 [NEON_2RM_SHA1H] = 0x4,
3589 [NEON_2RM_VABS] = 0x7,
3590 [NEON_2RM_VNEG] = 0x7,
3591 [NEON_2RM_VCGT0_F] = 0x4,
3592 [NEON_2RM_VCGE0_F] = 0x4,
3593 [NEON_2RM_VCEQ0_F] = 0x4,
3594 [NEON_2RM_VCLE0_F] = 0x4,
3595 [NEON_2RM_VCLT0_F] = 0x4,
3596 [NEON_2RM_VABS_F] = 0x4,
3597 [NEON_2RM_VNEG_F] = 0x4,
3598 [NEON_2RM_VSWP] = 0x1,
3599 [NEON_2RM_VTRN] = 0x7,
3600 [NEON_2RM_VUZP] = 0x7,
3601 [NEON_2RM_VZIP] = 0x7,
3602 [NEON_2RM_VMOVN] = 0x7,
3603 [NEON_2RM_VQMOVN] = 0x7,
3604 [NEON_2RM_VSHLL] = 0x7,
3605 [NEON_2RM_SHA1SU1] = 0x4,
3606 [NEON_2RM_VRINTN] = 0x4,
3607 [NEON_2RM_VRINTX] = 0x4,
3608 [NEON_2RM_VRINTA] = 0x4,
3609 [NEON_2RM_VRINTZ] = 0x4,
3610 [NEON_2RM_VCVT_F16_F32] = 0x2,
3611 [NEON_2RM_VRINTM] = 0x4,
3612 [NEON_2RM_VCVT_F32_F16] = 0x2,
3613 [NEON_2RM_VRINTP] = 0x4,
3614 [NEON_2RM_VCVTAU] = 0x4,
3615 [NEON_2RM_VCVTAS] = 0x4,
3616 [NEON_2RM_VCVTNU] = 0x4,
3617 [NEON_2RM_VCVTNS] = 0x4,
3618 [NEON_2RM_VCVTPU] = 0x4,
3619 [NEON_2RM_VCVTPS] = 0x4,
3620 [NEON_2RM_VCVTMU] = 0x4,
3621 [NEON_2RM_VCVTMS] = 0x4,
3622 [NEON_2RM_VRECPE] = 0x4,
3623 [NEON_2RM_VRSQRTE] = 0x4,
3624 [NEON_2RM_VRECPE_F] = 0x4,
3625 [NEON_2RM_VRSQRTE_F] = 0x4,
3626 [NEON_2RM_VCVT_FS] = 0x4,
3627 [NEON_2RM_VCVT_FU] = 0x4,
3628 [NEON_2RM_VCVT_SF] = 0x4,
3629 [NEON_2RM_VCVT_UF] = 0x4,
3633 /* Expand v8.1 simd helper. */
3634 static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
3635 int q, int rd, int rn, int rm)
3637 if (dc_isar_feature(aa32_rdm, s)) {
3638 int opr_sz = (1 + q) * 8;
3639 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
3640 vfp_reg_offset(1, rn),
3641 vfp_reg_offset(1, rm), cpu_env,
3642 opr_sz, opr_sz, 0, fn);
3643 return 0;
3645 return 1;
3648 static void gen_ceq0_i32(TCGv_i32 d, TCGv_i32 a)
3650 tcg_gen_setcondi_i32(TCG_COND_EQ, d, a, 0);
3651 tcg_gen_neg_i32(d, d);
3654 static void gen_ceq0_i64(TCGv_i64 d, TCGv_i64 a)
3656 tcg_gen_setcondi_i64(TCG_COND_EQ, d, a, 0);
3657 tcg_gen_neg_i64(d, d);
3660 static void gen_ceq0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
3662 TCGv_vec zero = tcg_const_zeros_vec_matching(d);
3663 tcg_gen_cmp_vec(TCG_COND_EQ, vece, d, a, zero);
3664 tcg_temp_free_vec(zero);
3667 static const TCGOpcode vecop_list_cmp[] = {
3668 INDEX_op_cmp_vec, 0
3671 const GVecGen2 ceq0_op[4] = {
3672 { .fno = gen_helper_gvec_ceq0_b,
3673 .fniv = gen_ceq0_vec,
3674 .opt_opc = vecop_list_cmp,
3675 .vece = MO_8 },
3676 { .fno = gen_helper_gvec_ceq0_h,
3677 .fniv = gen_ceq0_vec,
3678 .opt_opc = vecop_list_cmp,
3679 .vece = MO_16 },
3680 { .fni4 = gen_ceq0_i32,
3681 .fniv = gen_ceq0_vec,
3682 .opt_opc = vecop_list_cmp,
3683 .vece = MO_32 },
3684 { .fni8 = gen_ceq0_i64,
3685 .fniv = gen_ceq0_vec,
3686 .opt_opc = vecop_list_cmp,
3687 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3688 .vece = MO_64 },
3691 static void gen_cle0_i32(TCGv_i32 d, TCGv_i32 a)
3693 tcg_gen_setcondi_i32(TCG_COND_LE, d, a, 0);
3694 tcg_gen_neg_i32(d, d);
3697 static void gen_cle0_i64(TCGv_i64 d, TCGv_i64 a)
3699 tcg_gen_setcondi_i64(TCG_COND_LE, d, a, 0);
3700 tcg_gen_neg_i64(d, d);
3703 static void gen_cle0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
3705 TCGv_vec zero = tcg_const_zeros_vec_matching(d);
3706 tcg_gen_cmp_vec(TCG_COND_LE, vece, d, a, zero);
3707 tcg_temp_free_vec(zero);
3710 const GVecGen2 cle0_op[4] = {
3711 { .fno = gen_helper_gvec_cle0_b,
3712 .fniv = gen_cle0_vec,
3713 .opt_opc = vecop_list_cmp,
3714 .vece = MO_8 },
3715 { .fno = gen_helper_gvec_cle0_h,
3716 .fniv = gen_cle0_vec,
3717 .opt_opc = vecop_list_cmp,
3718 .vece = MO_16 },
3719 { .fni4 = gen_cle0_i32,
3720 .fniv = gen_cle0_vec,
3721 .opt_opc = vecop_list_cmp,
3722 .vece = MO_32 },
3723 { .fni8 = gen_cle0_i64,
3724 .fniv = gen_cle0_vec,
3725 .opt_opc = vecop_list_cmp,
3726 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3727 .vece = MO_64 },
3730 static void gen_cge0_i32(TCGv_i32 d, TCGv_i32 a)
3732 tcg_gen_setcondi_i32(TCG_COND_GE, d, a, 0);
3733 tcg_gen_neg_i32(d, d);
3736 static void gen_cge0_i64(TCGv_i64 d, TCGv_i64 a)
3738 tcg_gen_setcondi_i64(TCG_COND_GE, d, a, 0);
3739 tcg_gen_neg_i64(d, d);
3742 static void gen_cge0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
3744 TCGv_vec zero = tcg_const_zeros_vec_matching(d);
3745 tcg_gen_cmp_vec(TCG_COND_GE, vece, d, a, zero);
3746 tcg_temp_free_vec(zero);
3749 const GVecGen2 cge0_op[4] = {
3750 { .fno = gen_helper_gvec_cge0_b,
3751 .fniv = gen_cge0_vec,
3752 .opt_opc = vecop_list_cmp,
3753 .vece = MO_8 },
3754 { .fno = gen_helper_gvec_cge0_h,
3755 .fniv = gen_cge0_vec,
3756 .opt_opc = vecop_list_cmp,
3757 .vece = MO_16 },
3758 { .fni4 = gen_cge0_i32,
3759 .fniv = gen_cge0_vec,
3760 .opt_opc = vecop_list_cmp,
3761 .vece = MO_32 },
3762 { .fni8 = gen_cge0_i64,
3763 .fniv = gen_cge0_vec,
3764 .opt_opc = vecop_list_cmp,
3765 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3766 .vece = MO_64 },
3769 static void gen_clt0_i32(TCGv_i32 d, TCGv_i32 a)
3771 tcg_gen_setcondi_i32(TCG_COND_LT, d, a, 0);
3772 tcg_gen_neg_i32(d, d);
3775 static void gen_clt0_i64(TCGv_i64 d, TCGv_i64 a)
3777 tcg_gen_setcondi_i64(TCG_COND_LT, d, a, 0);
3778 tcg_gen_neg_i64(d, d);
3781 static void gen_clt0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
3783 TCGv_vec zero = tcg_const_zeros_vec_matching(d);
3784 tcg_gen_cmp_vec(TCG_COND_LT, vece, d, a, zero);
3785 tcg_temp_free_vec(zero);
3788 const GVecGen2 clt0_op[4] = {
3789 { .fno = gen_helper_gvec_clt0_b,
3790 .fniv = gen_clt0_vec,
3791 .opt_opc = vecop_list_cmp,
3792 .vece = MO_8 },
3793 { .fno = gen_helper_gvec_clt0_h,
3794 .fniv = gen_clt0_vec,
3795 .opt_opc = vecop_list_cmp,
3796 .vece = MO_16 },
3797 { .fni4 = gen_clt0_i32,
3798 .fniv = gen_clt0_vec,
3799 .opt_opc = vecop_list_cmp,
3800 .vece = MO_32 },
3801 { .fni8 = gen_clt0_i64,
3802 .fniv = gen_clt0_vec,
3803 .opt_opc = vecop_list_cmp,
3804 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3805 .vece = MO_64 },
3808 static void gen_cgt0_i32(TCGv_i32 d, TCGv_i32 a)
3810 tcg_gen_setcondi_i32(TCG_COND_GT, d, a, 0);
3811 tcg_gen_neg_i32(d, d);
3814 static void gen_cgt0_i64(TCGv_i64 d, TCGv_i64 a)
3816 tcg_gen_setcondi_i64(TCG_COND_GT, d, a, 0);
3817 tcg_gen_neg_i64(d, d);
3820 static void gen_cgt0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
3822 TCGv_vec zero = tcg_const_zeros_vec_matching(d);
3823 tcg_gen_cmp_vec(TCG_COND_GT, vece, d, a, zero);
3824 tcg_temp_free_vec(zero);
3827 const GVecGen2 cgt0_op[4] = {
3828 { .fno = gen_helper_gvec_cgt0_b,
3829 .fniv = gen_cgt0_vec,
3830 .opt_opc = vecop_list_cmp,
3831 .vece = MO_8 },
3832 { .fno = gen_helper_gvec_cgt0_h,
3833 .fniv = gen_cgt0_vec,
3834 .opt_opc = vecop_list_cmp,
3835 .vece = MO_16 },
3836 { .fni4 = gen_cgt0_i32,
3837 .fniv = gen_cgt0_vec,
3838 .opt_opc = vecop_list_cmp,
3839 .vece = MO_32 },
3840 { .fni8 = gen_cgt0_i64,
3841 .fniv = gen_cgt0_vec,
3842 .opt_opc = vecop_list_cmp,
3843 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3844 .vece = MO_64 },
3847 static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3849 tcg_gen_vec_sar8i_i64(a, a, shift);
3850 tcg_gen_vec_add8_i64(d, d, a);
3853 static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3855 tcg_gen_vec_sar16i_i64(a, a, shift);
3856 tcg_gen_vec_add16_i64(d, d, a);
3859 static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
3861 tcg_gen_sari_i32(a, a, shift);
3862 tcg_gen_add_i32(d, d, a);
3865 static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3867 tcg_gen_sari_i64(a, a, shift);
3868 tcg_gen_add_i64(d, d, a);
3871 static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
3873 tcg_gen_sari_vec(vece, a, a, sh);
3874 tcg_gen_add_vec(vece, d, d, a);
3877 static const TCGOpcode vecop_list_ssra[] = {
3878 INDEX_op_sari_vec, INDEX_op_add_vec, 0
3881 const GVecGen2i ssra_op[4] = {
3882 { .fni8 = gen_ssra8_i64,
3883 .fniv = gen_ssra_vec,
3884 .load_dest = true,
3885 .opt_opc = vecop_list_ssra,
3886 .vece = MO_8 },
3887 { .fni8 = gen_ssra16_i64,
3888 .fniv = gen_ssra_vec,
3889 .load_dest = true,
3890 .opt_opc = vecop_list_ssra,
3891 .vece = MO_16 },
3892 { .fni4 = gen_ssra32_i32,
3893 .fniv = gen_ssra_vec,
3894 .load_dest = true,
3895 .opt_opc = vecop_list_ssra,
3896 .vece = MO_32 },
3897 { .fni8 = gen_ssra64_i64,
3898 .fniv = gen_ssra_vec,
3899 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3900 .opt_opc = vecop_list_ssra,
3901 .load_dest = true,
3902 .vece = MO_64 },
3905 static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3907 tcg_gen_vec_shr8i_i64(a, a, shift);
3908 tcg_gen_vec_add8_i64(d, d, a);
3911 static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3913 tcg_gen_vec_shr16i_i64(a, a, shift);
3914 tcg_gen_vec_add16_i64(d, d, a);
3917 static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
3919 tcg_gen_shri_i32(a, a, shift);
3920 tcg_gen_add_i32(d, d, a);
3923 static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3925 tcg_gen_shri_i64(a, a, shift);
3926 tcg_gen_add_i64(d, d, a);
3929 static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
3931 tcg_gen_shri_vec(vece, a, a, sh);
3932 tcg_gen_add_vec(vece, d, d, a);
3935 static const TCGOpcode vecop_list_usra[] = {
3936 INDEX_op_shri_vec, INDEX_op_add_vec, 0
3939 const GVecGen2i usra_op[4] = {
3940 { .fni8 = gen_usra8_i64,
3941 .fniv = gen_usra_vec,
3942 .load_dest = true,
3943 .opt_opc = vecop_list_usra,
3944 .vece = MO_8, },
3945 { .fni8 = gen_usra16_i64,
3946 .fniv = gen_usra_vec,
3947 .load_dest = true,
3948 .opt_opc = vecop_list_usra,
3949 .vece = MO_16, },
3950 { .fni4 = gen_usra32_i32,
3951 .fniv = gen_usra_vec,
3952 .load_dest = true,
3953 .opt_opc = vecop_list_usra,
3954 .vece = MO_32, },
3955 { .fni8 = gen_usra64_i64,
3956 .fniv = gen_usra_vec,
3957 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3958 .load_dest = true,
3959 .opt_opc = vecop_list_usra,
3960 .vece = MO_64, },
3963 static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3965 uint64_t mask = dup_const(MO_8, 0xff >> shift);
3966 TCGv_i64 t = tcg_temp_new_i64();
3968 tcg_gen_shri_i64(t, a, shift);
3969 tcg_gen_andi_i64(t, t, mask);
3970 tcg_gen_andi_i64(d, d, ~mask);
3971 tcg_gen_or_i64(d, d, t);
3972 tcg_temp_free_i64(t);
3975 static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3977 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
3978 TCGv_i64 t = tcg_temp_new_i64();
3980 tcg_gen_shri_i64(t, a, shift);
3981 tcg_gen_andi_i64(t, t, mask);
3982 tcg_gen_andi_i64(d, d, ~mask);
3983 tcg_gen_or_i64(d, d, t);
3984 tcg_temp_free_i64(t);
3987 static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
3989 tcg_gen_shri_i32(a, a, shift);
3990 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
3993 static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3995 tcg_gen_shri_i64(a, a, shift);
3996 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
3999 static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4001 if (sh == 0) {
4002 tcg_gen_mov_vec(d, a);
4003 } else {
4004 TCGv_vec t = tcg_temp_new_vec_matching(d);
4005 TCGv_vec m = tcg_temp_new_vec_matching(d);
4007 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
4008 tcg_gen_shri_vec(vece, t, a, sh);
4009 tcg_gen_and_vec(vece, d, d, m);
4010 tcg_gen_or_vec(vece, d, d, t);
4012 tcg_temp_free_vec(t);
4013 tcg_temp_free_vec(m);
4017 static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
4019 const GVecGen2i sri_op[4] = {
4020 { .fni8 = gen_shr8_ins_i64,
4021 .fniv = gen_shr_ins_vec,
4022 .load_dest = true,
4023 .opt_opc = vecop_list_sri,
4024 .vece = MO_8 },
4025 { .fni8 = gen_shr16_ins_i64,
4026 .fniv = gen_shr_ins_vec,
4027 .load_dest = true,
4028 .opt_opc = vecop_list_sri,
4029 .vece = MO_16 },
4030 { .fni4 = gen_shr32_ins_i32,
4031 .fniv = gen_shr_ins_vec,
4032 .load_dest = true,
4033 .opt_opc = vecop_list_sri,
4034 .vece = MO_32 },
4035 { .fni8 = gen_shr64_ins_i64,
4036 .fniv = gen_shr_ins_vec,
4037 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4038 .load_dest = true,
4039 .opt_opc = vecop_list_sri,
4040 .vece = MO_64 },
4043 static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4045 uint64_t mask = dup_const(MO_8, 0xff << shift);
4046 TCGv_i64 t = tcg_temp_new_i64();
4048 tcg_gen_shli_i64(t, a, shift);
4049 tcg_gen_andi_i64(t, t, mask);
4050 tcg_gen_andi_i64(d, d, ~mask);
4051 tcg_gen_or_i64(d, d, t);
4052 tcg_temp_free_i64(t);
4055 static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4057 uint64_t mask = dup_const(MO_16, 0xffff << shift);
4058 TCGv_i64 t = tcg_temp_new_i64();
4060 tcg_gen_shli_i64(t, a, shift);
4061 tcg_gen_andi_i64(t, t, mask);
4062 tcg_gen_andi_i64(d, d, ~mask);
4063 tcg_gen_or_i64(d, d, t);
4064 tcg_temp_free_i64(t);
4067 static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4069 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
4072 static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4074 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
4077 static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4079 if (sh == 0) {
4080 tcg_gen_mov_vec(d, a);
4081 } else {
4082 TCGv_vec t = tcg_temp_new_vec_matching(d);
4083 TCGv_vec m = tcg_temp_new_vec_matching(d);
4085 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
4086 tcg_gen_shli_vec(vece, t, a, sh);
4087 tcg_gen_and_vec(vece, d, d, m);
4088 tcg_gen_or_vec(vece, d, d, t);
4090 tcg_temp_free_vec(t);
4091 tcg_temp_free_vec(m);
4095 static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
4097 const GVecGen2i sli_op[4] = {
4098 { .fni8 = gen_shl8_ins_i64,
4099 .fniv = gen_shl_ins_vec,
4100 .load_dest = true,
4101 .opt_opc = vecop_list_sli,
4102 .vece = MO_8 },
4103 { .fni8 = gen_shl16_ins_i64,
4104 .fniv = gen_shl_ins_vec,
4105 .load_dest = true,
4106 .opt_opc = vecop_list_sli,
4107 .vece = MO_16 },
4108 { .fni4 = gen_shl32_ins_i32,
4109 .fniv = gen_shl_ins_vec,
4110 .load_dest = true,
4111 .opt_opc = vecop_list_sli,
4112 .vece = MO_32 },
4113 { .fni8 = gen_shl64_ins_i64,
4114 .fniv = gen_shl_ins_vec,
4115 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4116 .load_dest = true,
4117 .opt_opc = vecop_list_sli,
4118 .vece = MO_64 },
4121 static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4123 gen_helper_neon_mul_u8(a, a, b);
4124 gen_helper_neon_add_u8(d, d, a);
4127 static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4129 gen_helper_neon_mul_u8(a, a, b);
4130 gen_helper_neon_sub_u8(d, d, a);
4133 static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4135 gen_helper_neon_mul_u16(a, a, b);
4136 gen_helper_neon_add_u16(d, d, a);
4139 static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4141 gen_helper_neon_mul_u16(a, a, b);
4142 gen_helper_neon_sub_u16(d, d, a);
4145 static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4147 tcg_gen_mul_i32(a, a, b);
4148 tcg_gen_add_i32(d, d, a);
4151 static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4153 tcg_gen_mul_i32(a, a, b);
4154 tcg_gen_sub_i32(d, d, a);
4157 static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4159 tcg_gen_mul_i64(a, a, b);
4160 tcg_gen_add_i64(d, d, a);
4163 static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4165 tcg_gen_mul_i64(a, a, b);
4166 tcg_gen_sub_i64(d, d, a);
4169 static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4171 tcg_gen_mul_vec(vece, a, a, b);
4172 tcg_gen_add_vec(vece, d, d, a);
4175 static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4177 tcg_gen_mul_vec(vece, a, a, b);
4178 tcg_gen_sub_vec(vece, d, d, a);
4181 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4182 * these tables are shared with AArch64 which does support them.
4185 static const TCGOpcode vecop_list_mla[] = {
4186 INDEX_op_mul_vec, INDEX_op_add_vec, 0
4189 static const TCGOpcode vecop_list_mls[] = {
4190 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
4193 const GVecGen3 mla_op[4] = {
4194 { .fni4 = gen_mla8_i32,
4195 .fniv = gen_mla_vec,
4196 .load_dest = true,
4197 .opt_opc = vecop_list_mla,
4198 .vece = MO_8 },
4199 { .fni4 = gen_mla16_i32,
4200 .fniv = gen_mla_vec,
4201 .load_dest = true,
4202 .opt_opc = vecop_list_mla,
4203 .vece = MO_16 },
4204 { .fni4 = gen_mla32_i32,
4205 .fniv = gen_mla_vec,
4206 .load_dest = true,
4207 .opt_opc = vecop_list_mla,
4208 .vece = MO_32 },
4209 { .fni8 = gen_mla64_i64,
4210 .fniv = gen_mla_vec,
4211 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4212 .load_dest = true,
4213 .opt_opc = vecop_list_mla,
4214 .vece = MO_64 },
4217 const GVecGen3 mls_op[4] = {
4218 { .fni4 = gen_mls8_i32,
4219 .fniv = gen_mls_vec,
4220 .load_dest = true,
4221 .opt_opc = vecop_list_mls,
4222 .vece = MO_8 },
4223 { .fni4 = gen_mls16_i32,
4224 .fniv = gen_mls_vec,
4225 .load_dest = true,
4226 .opt_opc = vecop_list_mls,
4227 .vece = MO_16 },
4228 { .fni4 = gen_mls32_i32,
4229 .fniv = gen_mls_vec,
4230 .load_dest = true,
4231 .opt_opc = vecop_list_mls,
4232 .vece = MO_32 },
4233 { .fni8 = gen_mls64_i64,
4234 .fniv = gen_mls_vec,
4235 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4236 .load_dest = true,
4237 .opt_opc = vecop_list_mls,
4238 .vece = MO_64 },
4241 /* CMTST : test is "if (X & Y != 0)". */
4242 static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4244 tcg_gen_and_i32(d, a, b);
4245 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
4246 tcg_gen_neg_i32(d, d);
4249 void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4251 tcg_gen_and_i64(d, a, b);
4252 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
4253 tcg_gen_neg_i64(d, d);
4256 static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4258 tcg_gen_and_vec(vece, d, a, b);
4259 tcg_gen_dupi_vec(vece, a, 0);
4260 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
4263 static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
4265 const GVecGen3 cmtst_op[4] = {
4266 { .fni4 = gen_helper_neon_tst_u8,
4267 .fniv = gen_cmtst_vec,
4268 .opt_opc = vecop_list_cmtst,
4269 .vece = MO_8 },
4270 { .fni4 = gen_helper_neon_tst_u16,
4271 .fniv = gen_cmtst_vec,
4272 .opt_opc = vecop_list_cmtst,
4273 .vece = MO_16 },
4274 { .fni4 = gen_cmtst_i32,
4275 .fniv = gen_cmtst_vec,
4276 .opt_opc = vecop_list_cmtst,
4277 .vece = MO_32 },
4278 { .fni8 = gen_cmtst_i64,
4279 .fniv = gen_cmtst_vec,
4280 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4281 .opt_opc = vecop_list_cmtst,
4282 .vece = MO_64 },
4285 void gen_ushl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
4287 TCGv_i32 lval = tcg_temp_new_i32();
4288 TCGv_i32 rval = tcg_temp_new_i32();
4289 TCGv_i32 lsh = tcg_temp_new_i32();
4290 TCGv_i32 rsh = tcg_temp_new_i32();
4291 TCGv_i32 zero = tcg_const_i32(0);
4292 TCGv_i32 max = tcg_const_i32(32);
4295 * Rely on the TCG guarantee that out of range shifts produce
4296 * unspecified results, not undefined behaviour (i.e. no trap).
4297 * Discard out-of-range results after the fact.
4299 tcg_gen_ext8s_i32(lsh, shift);
4300 tcg_gen_neg_i32(rsh, lsh);
4301 tcg_gen_shl_i32(lval, src, lsh);
4302 tcg_gen_shr_i32(rval, src, rsh);
4303 tcg_gen_movcond_i32(TCG_COND_LTU, dst, lsh, max, lval, zero);
4304 tcg_gen_movcond_i32(TCG_COND_LTU, dst, rsh, max, rval, dst);
4306 tcg_temp_free_i32(lval);
4307 tcg_temp_free_i32(rval);
4308 tcg_temp_free_i32(lsh);
4309 tcg_temp_free_i32(rsh);
4310 tcg_temp_free_i32(zero);
4311 tcg_temp_free_i32(max);
4314 void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
4316 TCGv_i64 lval = tcg_temp_new_i64();
4317 TCGv_i64 rval = tcg_temp_new_i64();
4318 TCGv_i64 lsh = tcg_temp_new_i64();
4319 TCGv_i64 rsh = tcg_temp_new_i64();
4320 TCGv_i64 zero = tcg_const_i64(0);
4321 TCGv_i64 max = tcg_const_i64(64);
4324 * Rely on the TCG guarantee that out of range shifts produce
4325 * unspecified results, not undefined behaviour (i.e. no trap).
4326 * Discard out-of-range results after the fact.
4328 tcg_gen_ext8s_i64(lsh, shift);
4329 tcg_gen_neg_i64(rsh, lsh);
4330 tcg_gen_shl_i64(lval, src, lsh);
4331 tcg_gen_shr_i64(rval, src, rsh);
4332 tcg_gen_movcond_i64(TCG_COND_LTU, dst, lsh, max, lval, zero);
4333 tcg_gen_movcond_i64(TCG_COND_LTU, dst, rsh, max, rval, dst);
4335 tcg_temp_free_i64(lval);
4336 tcg_temp_free_i64(rval);
4337 tcg_temp_free_i64(lsh);
4338 tcg_temp_free_i64(rsh);
4339 tcg_temp_free_i64(zero);
4340 tcg_temp_free_i64(max);
4343 static void gen_ushl_vec(unsigned vece, TCGv_vec dst,
4344 TCGv_vec src, TCGv_vec shift)
4346 TCGv_vec lval = tcg_temp_new_vec_matching(dst);
4347 TCGv_vec rval = tcg_temp_new_vec_matching(dst);
4348 TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
4349 TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
4350 TCGv_vec msk, max;
4352 tcg_gen_neg_vec(vece, rsh, shift);
4353 if (vece == MO_8) {
4354 tcg_gen_mov_vec(lsh, shift);
4355 } else {
4356 msk = tcg_temp_new_vec_matching(dst);
4357 tcg_gen_dupi_vec(vece, msk, 0xff);
4358 tcg_gen_and_vec(vece, lsh, shift, msk);
4359 tcg_gen_and_vec(vece, rsh, rsh, msk);
4360 tcg_temp_free_vec(msk);
4364 * Rely on the TCG guarantee that out of range shifts produce
4365 * unspecified results, not undefined behaviour (i.e. no trap).
4366 * Discard out-of-range results after the fact.
4368 tcg_gen_shlv_vec(vece, lval, src, lsh);
4369 tcg_gen_shrv_vec(vece, rval, src, rsh);
4371 max = tcg_temp_new_vec_matching(dst);
4372 tcg_gen_dupi_vec(vece, max, 8 << vece);
4375 * The choice of LT (signed) and GEU (unsigned) are biased toward
4376 * the instructions of the x86_64 host. For MO_8, the whole byte
4377 * is significant so we must use an unsigned compare; otherwise we
4378 * have already masked to a byte and so a signed compare works.
4379 * Other tcg hosts have a full set of comparisons and do not care.
4381 if (vece == MO_8) {
4382 tcg_gen_cmp_vec(TCG_COND_GEU, vece, lsh, lsh, max);
4383 tcg_gen_cmp_vec(TCG_COND_GEU, vece, rsh, rsh, max);
4384 tcg_gen_andc_vec(vece, lval, lval, lsh);
4385 tcg_gen_andc_vec(vece, rval, rval, rsh);
4386 } else {
4387 tcg_gen_cmp_vec(TCG_COND_LT, vece, lsh, lsh, max);
4388 tcg_gen_cmp_vec(TCG_COND_LT, vece, rsh, rsh, max);
4389 tcg_gen_and_vec(vece, lval, lval, lsh);
4390 tcg_gen_and_vec(vece, rval, rval, rsh);
4392 tcg_gen_or_vec(vece, dst, lval, rval);
4394 tcg_temp_free_vec(max);
4395 tcg_temp_free_vec(lval);
4396 tcg_temp_free_vec(rval);
4397 tcg_temp_free_vec(lsh);
4398 tcg_temp_free_vec(rsh);
4401 static const TCGOpcode ushl_list[] = {
4402 INDEX_op_neg_vec, INDEX_op_shlv_vec,
4403 INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0
4406 const GVecGen3 ushl_op[4] = {
4407 { .fniv = gen_ushl_vec,
4408 .fno = gen_helper_gvec_ushl_b,
4409 .opt_opc = ushl_list,
4410 .vece = MO_8 },
4411 { .fniv = gen_ushl_vec,
4412 .fno = gen_helper_gvec_ushl_h,
4413 .opt_opc = ushl_list,
4414 .vece = MO_16 },
4415 { .fni4 = gen_ushl_i32,
4416 .fniv = gen_ushl_vec,
4417 .opt_opc = ushl_list,
4418 .vece = MO_32 },
4419 { .fni8 = gen_ushl_i64,
4420 .fniv = gen_ushl_vec,
4421 .opt_opc = ushl_list,
4422 .vece = MO_64 },
4425 void gen_sshl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
4427 TCGv_i32 lval = tcg_temp_new_i32();
4428 TCGv_i32 rval = tcg_temp_new_i32();
4429 TCGv_i32 lsh = tcg_temp_new_i32();
4430 TCGv_i32 rsh = tcg_temp_new_i32();
4431 TCGv_i32 zero = tcg_const_i32(0);
4432 TCGv_i32 max = tcg_const_i32(31);
4435 * Rely on the TCG guarantee that out of range shifts produce
4436 * unspecified results, not undefined behaviour (i.e. no trap).
4437 * Discard out-of-range results after the fact.
4439 tcg_gen_ext8s_i32(lsh, shift);
4440 tcg_gen_neg_i32(rsh, lsh);
4441 tcg_gen_shl_i32(lval, src, lsh);
4442 tcg_gen_umin_i32(rsh, rsh, max);
4443 tcg_gen_sar_i32(rval, src, rsh);
4444 tcg_gen_movcond_i32(TCG_COND_LEU, lval, lsh, max, lval, zero);
4445 tcg_gen_movcond_i32(TCG_COND_LT, dst, lsh, zero, rval, lval);
4447 tcg_temp_free_i32(lval);
4448 tcg_temp_free_i32(rval);
4449 tcg_temp_free_i32(lsh);
4450 tcg_temp_free_i32(rsh);
4451 tcg_temp_free_i32(zero);
4452 tcg_temp_free_i32(max);
4455 void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
4457 TCGv_i64 lval = tcg_temp_new_i64();
4458 TCGv_i64 rval = tcg_temp_new_i64();
4459 TCGv_i64 lsh = tcg_temp_new_i64();
4460 TCGv_i64 rsh = tcg_temp_new_i64();
4461 TCGv_i64 zero = tcg_const_i64(0);
4462 TCGv_i64 max = tcg_const_i64(63);
4465 * Rely on the TCG guarantee that out of range shifts produce
4466 * unspecified results, not undefined behaviour (i.e. no trap).
4467 * Discard out-of-range results after the fact.
4469 tcg_gen_ext8s_i64(lsh, shift);
4470 tcg_gen_neg_i64(rsh, lsh);
4471 tcg_gen_shl_i64(lval, src, lsh);
4472 tcg_gen_umin_i64(rsh, rsh, max);
4473 tcg_gen_sar_i64(rval, src, rsh);
4474 tcg_gen_movcond_i64(TCG_COND_LEU, lval, lsh, max, lval, zero);
4475 tcg_gen_movcond_i64(TCG_COND_LT, dst, lsh, zero, rval, lval);
4477 tcg_temp_free_i64(lval);
4478 tcg_temp_free_i64(rval);
4479 tcg_temp_free_i64(lsh);
4480 tcg_temp_free_i64(rsh);
4481 tcg_temp_free_i64(zero);
4482 tcg_temp_free_i64(max);
4485 static void gen_sshl_vec(unsigned vece, TCGv_vec dst,
4486 TCGv_vec src, TCGv_vec shift)
4488 TCGv_vec lval = tcg_temp_new_vec_matching(dst);
4489 TCGv_vec rval = tcg_temp_new_vec_matching(dst);
4490 TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
4491 TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
4492 TCGv_vec tmp = tcg_temp_new_vec_matching(dst);
4495 * Rely on the TCG guarantee that out of range shifts produce
4496 * unspecified results, not undefined behaviour (i.e. no trap).
4497 * Discard out-of-range results after the fact.
4499 tcg_gen_neg_vec(vece, rsh, shift);
4500 if (vece == MO_8) {
4501 tcg_gen_mov_vec(lsh, shift);
4502 } else {
4503 tcg_gen_dupi_vec(vece, tmp, 0xff);
4504 tcg_gen_and_vec(vece, lsh, shift, tmp);
4505 tcg_gen_and_vec(vece, rsh, rsh, tmp);
4508 /* Bound rsh so out of bound right shift gets -1. */
4509 tcg_gen_dupi_vec(vece, tmp, (8 << vece) - 1);
4510 tcg_gen_umin_vec(vece, rsh, rsh, tmp);
4511 tcg_gen_cmp_vec(TCG_COND_GT, vece, tmp, lsh, tmp);
4513 tcg_gen_shlv_vec(vece, lval, src, lsh);
4514 tcg_gen_sarv_vec(vece, rval, src, rsh);
4516 /* Select in-bound left shift. */
4517 tcg_gen_andc_vec(vece, lval, lval, tmp);
4519 /* Select between left and right shift. */
4520 if (vece == MO_8) {
4521 tcg_gen_dupi_vec(vece, tmp, 0);
4522 tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, rval, lval);
4523 } else {
4524 tcg_gen_dupi_vec(vece, tmp, 0x80);
4525 tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, lval, rval);
4528 tcg_temp_free_vec(lval);
4529 tcg_temp_free_vec(rval);
4530 tcg_temp_free_vec(lsh);
4531 tcg_temp_free_vec(rsh);
4532 tcg_temp_free_vec(tmp);
4535 static const TCGOpcode sshl_list[] = {
4536 INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec,
4537 INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0
4540 const GVecGen3 sshl_op[4] = {
4541 { .fniv = gen_sshl_vec,
4542 .fno = gen_helper_gvec_sshl_b,
4543 .opt_opc = sshl_list,
4544 .vece = MO_8 },
4545 { .fniv = gen_sshl_vec,
4546 .fno = gen_helper_gvec_sshl_h,
4547 .opt_opc = sshl_list,
4548 .vece = MO_16 },
4549 { .fni4 = gen_sshl_i32,
4550 .fniv = gen_sshl_vec,
4551 .opt_opc = sshl_list,
4552 .vece = MO_32 },
4553 { .fni8 = gen_sshl_i64,
4554 .fniv = gen_sshl_vec,
4555 .opt_opc = sshl_list,
4556 .vece = MO_64 },
4559 static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4560 TCGv_vec a, TCGv_vec b)
4562 TCGv_vec x = tcg_temp_new_vec_matching(t);
4563 tcg_gen_add_vec(vece, x, a, b);
4564 tcg_gen_usadd_vec(vece, t, a, b);
4565 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4566 tcg_gen_or_vec(vece, sat, sat, x);
4567 tcg_temp_free_vec(x);
4570 static const TCGOpcode vecop_list_uqadd[] = {
4571 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4574 const GVecGen4 uqadd_op[4] = {
4575 { .fniv = gen_uqadd_vec,
4576 .fno = gen_helper_gvec_uqadd_b,
4577 .write_aofs = true,
4578 .opt_opc = vecop_list_uqadd,
4579 .vece = MO_8 },
4580 { .fniv = gen_uqadd_vec,
4581 .fno = gen_helper_gvec_uqadd_h,
4582 .write_aofs = true,
4583 .opt_opc = vecop_list_uqadd,
4584 .vece = MO_16 },
4585 { .fniv = gen_uqadd_vec,
4586 .fno = gen_helper_gvec_uqadd_s,
4587 .write_aofs = true,
4588 .opt_opc = vecop_list_uqadd,
4589 .vece = MO_32 },
4590 { .fniv = gen_uqadd_vec,
4591 .fno = gen_helper_gvec_uqadd_d,
4592 .write_aofs = true,
4593 .opt_opc = vecop_list_uqadd,
4594 .vece = MO_64 },
4597 static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4598 TCGv_vec a, TCGv_vec b)
4600 TCGv_vec x = tcg_temp_new_vec_matching(t);
4601 tcg_gen_add_vec(vece, x, a, b);
4602 tcg_gen_ssadd_vec(vece, t, a, b);
4603 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4604 tcg_gen_or_vec(vece, sat, sat, x);
4605 tcg_temp_free_vec(x);
4608 static const TCGOpcode vecop_list_sqadd[] = {
4609 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4612 const GVecGen4 sqadd_op[4] = {
4613 { .fniv = gen_sqadd_vec,
4614 .fno = gen_helper_gvec_sqadd_b,
4615 .opt_opc = vecop_list_sqadd,
4616 .write_aofs = true,
4617 .vece = MO_8 },
4618 { .fniv = gen_sqadd_vec,
4619 .fno = gen_helper_gvec_sqadd_h,
4620 .opt_opc = vecop_list_sqadd,
4621 .write_aofs = true,
4622 .vece = MO_16 },
4623 { .fniv = gen_sqadd_vec,
4624 .fno = gen_helper_gvec_sqadd_s,
4625 .opt_opc = vecop_list_sqadd,
4626 .write_aofs = true,
4627 .vece = MO_32 },
4628 { .fniv = gen_sqadd_vec,
4629 .fno = gen_helper_gvec_sqadd_d,
4630 .opt_opc = vecop_list_sqadd,
4631 .write_aofs = true,
4632 .vece = MO_64 },
4635 static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4636 TCGv_vec a, TCGv_vec b)
4638 TCGv_vec x = tcg_temp_new_vec_matching(t);
4639 tcg_gen_sub_vec(vece, x, a, b);
4640 tcg_gen_ussub_vec(vece, t, a, b);
4641 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4642 tcg_gen_or_vec(vece, sat, sat, x);
4643 tcg_temp_free_vec(x);
4646 static const TCGOpcode vecop_list_uqsub[] = {
4647 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4650 const GVecGen4 uqsub_op[4] = {
4651 { .fniv = gen_uqsub_vec,
4652 .fno = gen_helper_gvec_uqsub_b,
4653 .opt_opc = vecop_list_uqsub,
4654 .write_aofs = true,
4655 .vece = MO_8 },
4656 { .fniv = gen_uqsub_vec,
4657 .fno = gen_helper_gvec_uqsub_h,
4658 .opt_opc = vecop_list_uqsub,
4659 .write_aofs = true,
4660 .vece = MO_16 },
4661 { .fniv = gen_uqsub_vec,
4662 .fno = gen_helper_gvec_uqsub_s,
4663 .opt_opc = vecop_list_uqsub,
4664 .write_aofs = true,
4665 .vece = MO_32 },
4666 { .fniv = gen_uqsub_vec,
4667 .fno = gen_helper_gvec_uqsub_d,
4668 .opt_opc = vecop_list_uqsub,
4669 .write_aofs = true,
4670 .vece = MO_64 },
4673 static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4674 TCGv_vec a, TCGv_vec b)
4676 TCGv_vec x = tcg_temp_new_vec_matching(t);
4677 tcg_gen_sub_vec(vece, x, a, b);
4678 tcg_gen_sssub_vec(vece, t, a, b);
4679 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4680 tcg_gen_or_vec(vece, sat, sat, x);
4681 tcg_temp_free_vec(x);
4684 static const TCGOpcode vecop_list_sqsub[] = {
4685 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4688 const GVecGen4 sqsub_op[4] = {
4689 { .fniv = gen_sqsub_vec,
4690 .fno = gen_helper_gvec_sqsub_b,
4691 .opt_opc = vecop_list_sqsub,
4692 .write_aofs = true,
4693 .vece = MO_8 },
4694 { .fniv = gen_sqsub_vec,
4695 .fno = gen_helper_gvec_sqsub_h,
4696 .opt_opc = vecop_list_sqsub,
4697 .write_aofs = true,
4698 .vece = MO_16 },
4699 { .fniv = gen_sqsub_vec,
4700 .fno = gen_helper_gvec_sqsub_s,
4701 .opt_opc = vecop_list_sqsub,
4702 .write_aofs = true,
4703 .vece = MO_32 },
4704 { .fniv = gen_sqsub_vec,
4705 .fno = gen_helper_gvec_sqsub_d,
4706 .opt_opc = vecop_list_sqsub,
4707 .write_aofs = true,
4708 .vece = MO_64 },
4711 /* Translate a NEON data processing instruction. Return nonzero if the
4712 instruction is invalid.
4713 We process data in a mixture of 32-bit and 64-bit chunks.
4714 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4716 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
4718 int op;
4719 int q;
4720 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
4721 int size;
4722 int shift;
4723 int pass;
4724 int count;
4725 int pairwise;
4726 int u;
4727 int vec_size;
4728 uint32_t imm;
4729 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
4730 TCGv_ptr ptr1, ptr2, ptr3;
4731 TCGv_i64 tmp64;
4733 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
4734 return 1;
4737 /* FIXME: this access check should not take precedence over UNDEF
4738 * for invalid encodings; we will generate incorrect syndrome information
4739 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4741 if (s->fp_excp_el) {
4742 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4743 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
4744 return 0;
4747 if (!s->vfp_enabled)
4748 return 1;
4749 q = (insn & (1 << 6)) != 0;
4750 u = (insn >> 24) & 1;
4751 VFP_DREG_D(rd, insn);
4752 VFP_DREG_N(rn, insn);
4753 VFP_DREG_M(rm, insn);
4754 size = (insn >> 20) & 3;
4755 vec_size = q ? 16 : 8;
4756 rd_ofs = neon_reg_offset(rd, 0);
4757 rn_ofs = neon_reg_offset(rn, 0);
4758 rm_ofs = neon_reg_offset(rm, 0);
4760 if ((insn & (1 << 23)) == 0) {
4761 /* Three register same length. */
4762 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4763 /* Catch invalid op and bad size combinations: UNDEF */
4764 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4765 return 1;
4767 /* All insns of this form UNDEF for either this condition or the
4768 * superset of cases "Q==1"; we catch the latter later.
4770 if (q && ((rd | rn | rm) & 1)) {
4771 return 1;
4773 switch (op) {
4774 case NEON_3R_SHA:
4775 /* The SHA-1/SHA-256 3-register instructions require special
4776 * treatment here, as their size field is overloaded as an
4777 * op type selector, and they all consume their input in a
4778 * single pass.
4780 if (!q) {
4781 return 1;
4783 if (!u) { /* SHA-1 */
4784 if (!dc_isar_feature(aa32_sha1, s)) {
4785 return 1;
4787 ptr1 = vfp_reg_ptr(true, rd);
4788 ptr2 = vfp_reg_ptr(true, rn);
4789 ptr3 = vfp_reg_ptr(true, rm);
4790 tmp4 = tcg_const_i32(size);
4791 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
4792 tcg_temp_free_i32(tmp4);
4793 } else { /* SHA-256 */
4794 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
4795 return 1;
4797 ptr1 = vfp_reg_ptr(true, rd);
4798 ptr2 = vfp_reg_ptr(true, rn);
4799 ptr3 = vfp_reg_ptr(true, rm);
4800 switch (size) {
4801 case 0:
4802 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
4803 break;
4804 case 1:
4805 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
4806 break;
4807 case 2:
4808 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
4809 break;
4812 tcg_temp_free_ptr(ptr1);
4813 tcg_temp_free_ptr(ptr2);
4814 tcg_temp_free_ptr(ptr3);
4815 return 0;
4817 case NEON_3R_VPADD_VQRDMLAH:
4818 if (!u) {
4819 break; /* VPADD */
4821 /* VQRDMLAH */
4822 switch (size) {
4823 case 1:
4824 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
4825 q, rd, rn, rm);
4826 case 2:
4827 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
4828 q, rd, rn, rm);
4830 return 1;
4832 case NEON_3R_VFM_VQRDMLSH:
4833 if (!u) {
4834 /* VFM, VFMS */
4835 if (size == 1) {
4836 return 1;
4838 break;
4840 /* VQRDMLSH */
4841 switch (size) {
4842 case 1:
4843 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
4844 q, rd, rn, rm);
4845 case 2:
4846 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
4847 q, rd, rn, rm);
4849 return 1;
4851 case NEON_3R_VQADD:
4852 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
4853 rn_ofs, rm_ofs, vec_size, vec_size,
4854 (u ? uqadd_op : sqadd_op) + size);
4855 return 0;
4857 case NEON_3R_VQSUB:
4858 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
4859 rn_ofs, rm_ofs, vec_size, vec_size,
4860 (u ? uqsub_op : sqsub_op) + size);
4861 return 0;
4863 case NEON_3R_VMUL: /* VMUL */
4864 if (u) {
4865 /* Polynomial case allows only P8. */
4866 if (size != 0) {
4867 return 1;
4869 tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
4870 0, gen_helper_gvec_pmul_b);
4871 } else {
4872 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
4873 vec_size, vec_size);
4875 return 0;
4877 case NEON_3R_VML: /* VMLA, VMLS */
4878 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
4879 u ? &mls_op[size] : &mla_op[size]);
4880 return 0;
4882 case NEON_3R_VSHL:
4883 /* Note the operation is vshl vd,vm,vn */
4884 tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size,
4885 u ? &ushl_op[size] : &sshl_op[size]);
4886 return 0;
4888 case NEON_3R_VADD_VSUB:
4889 case NEON_3R_LOGIC:
4890 case NEON_3R_VMAX:
4891 case NEON_3R_VMIN:
4892 case NEON_3R_VTST_VCEQ:
4893 case NEON_3R_VCGT:
4894 case NEON_3R_VCGE:
4895 /* Already handled by decodetree */
4896 return 1;
4899 if (size == 3) {
4900 /* 64-bit element instructions. */
4901 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4902 neon_load_reg64(cpu_V0, rn + pass);
4903 neon_load_reg64(cpu_V1, rm + pass);
4904 switch (op) {
4905 case NEON_3R_VQSHL:
4906 if (u) {
4907 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4908 cpu_V1, cpu_V0);
4909 } else {
4910 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4911 cpu_V1, cpu_V0);
4913 break;
4914 case NEON_3R_VRSHL:
4915 if (u) {
4916 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4917 } else {
4918 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4920 break;
4921 case NEON_3R_VQRSHL:
4922 if (u) {
4923 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4924 cpu_V1, cpu_V0);
4925 } else {
4926 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4927 cpu_V1, cpu_V0);
4929 break;
4930 default:
4931 abort();
4933 neon_store_reg64(cpu_V0, rd + pass);
4935 return 0;
4937 pairwise = 0;
4938 switch (op) {
4939 case NEON_3R_VQSHL:
4940 case NEON_3R_VRSHL:
4941 case NEON_3R_VQRSHL:
4943 int rtmp;
4944 /* Shift instruction operands are reversed. */
4945 rtmp = rn;
4946 rn = rm;
4947 rm = rtmp;
4949 break;
4950 case NEON_3R_VPADD_VQRDMLAH:
4951 case NEON_3R_VPMAX:
4952 case NEON_3R_VPMIN:
4953 pairwise = 1;
4954 break;
4955 case NEON_3R_FLOAT_ARITH:
4956 pairwise = (u && size < 2); /* if VPADD (float) */
4957 break;
4958 case NEON_3R_FLOAT_MINMAX:
4959 pairwise = u; /* if VPMIN/VPMAX (float) */
4960 break;
4961 case NEON_3R_FLOAT_CMP:
4962 if (!u && size) {
4963 /* no encoding for U=0 C=1x */
4964 return 1;
4966 break;
4967 case NEON_3R_FLOAT_ACMP:
4968 if (!u) {
4969 return 1;
4971 break;
4972 case NEON_3R_FLOAT_MISC:
4973 /* VMAXNM/VMINNM in ARMv8 */
4974 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
4975 return 1;
4977 break;
4978 case NEON_3R_VFM_VQRDMLSH:
4979 if (!dc_isar_feature(aa32_simdfmac, s)) {
4980 return 1;
4982 break;
4983 default:
4984 break;
4987 if (pairwise && q) {
4988 /* All the pairwise insns UNDEF if Q is set */
4989 return 1;
4992 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4994 if (pairwise) {
4995 /* Pairwise. */
4996 if (pass < 1) {
4997 tmp = neon_load_reg(rn, 0);
4998 tmp2 = neon_load_reg(rn, 1);
4999 } else {
5000 tmp = neon_load_reg(rm, 0);
5001 tmp2 = neon_load_reg(rm, 1);
5003 } else {
5004 /* Elementwise. */
5005 tmp = neon_load_reg(rn, pass);
5006 tmp2 = neon_load_reg(rm, pass);
5008 switch (op) {
5009 case NEON_3R_VHADD:
5010 GEN_NEON_INTEGER_OP(hadd);
5011 break;
5012 case NEON_3R_VRHADD:
5013 GEN_NEON_INTEGER_OP(rhadd);
5014 break;
5015 case NEON_3R_VHSUB:
5016 GEN_NEON_INTEGER_OP(hsub);
5017 break;
5018 case NEON_3R_VQSHL:
5019 GEN_NEON_INTEGER_OP_ENV(qshl);
5020 break;
5021 case NEON_3R_VRSHL:
5022 GEN_NEON_INTEGER_OP(rshl);
5023 break;
5024 case NEON_3R_VQRSHL:
5025 GEN_NEON_INTEGER_OP_ENV(qrshl);
5026 break;
5027 case NEON_3R_VABD:
5028 GEN_NEON_INTEGER_OP(abd);
5029 break;
5030 case NEON_3R_VABA:
5031 GEN_NEON_INTEGER_OP(abd);
5032 tcg_temp_free_i32(tmp2);
5033 tmp2 = neon_load_reg(rd, pass);
5034 gen_neon_add(size, tmp, tmp2);
5035 break;
5036 case NEON_3R_VPMAX:
5037 GEN_NEON_INTEGER_OP(pmax);
5038 break;
5039 case NEON_3R_VPMIN:
5040 GEN_NEON_INTEGER_OP(pmin);
5041 break;
5042 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5043 if (!u) { /* VQDMULH */
5044 switch (size) {
5045 case 1:
5046 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5047 break;
5048 case 2:
5049 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5050 break;
5051 default: abort();
5053 } else { /* VQRDMULH */
5054 switch (size) {
5055 case 1:
5056 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5057 break;
5058 case 2:
5059 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5060 break;
5061 default: abort();
5064 break;
5065 case NEON_3R_VPADD_VQRDMLAH:
5066 switch (size) {
5067 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5068 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5069 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5070 default: abort();
5072 break;
5073 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5075 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5076 switch ((u << 2) | size) {
5077 case 0: /* VADD */
5078 case 4: /* VPADD */
5079 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5080 break;
5081 case 2: /* VSUB */
5082 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5083 break;
5084 case 6: /* VABD */
5085 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5086 break;
5087 default:
5088 abort();
5090 tcg_temp_free_ptr(fpstatus);
5091 break;
5093 case NEON_3R_FLOAT_MULTIPLY:
5095 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5096 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5097 if (!u) {
5098 tcg_temp_free_i32(tmp2);
5099 tmp2 = neon_load_reg(rd, pass);
5100 if (size == 0) {
5101 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5102 } else {
5103 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5106 tcg_temp_free_ptr(fpstatus);
5107 break;
5109 case NEON_3R_FLOAT_CMP:
5111 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5112 if (!u) {
5113 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5114 } else {
5115 if (size == 0) {
5116 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5117 } else {
5118 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5121 tcg_temp_free_ptr(fpstatus);
5122 break;
5124 case NEON_3R_FLOAT_ACMP:
5126 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5127 if (size == 0) {
5128 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5129 } else {
5130 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5132 tcg_temp_free_ptr(fpstatus);
5133 break;
5135 case NEON_3R_FLOAT_MINMAX:
5137 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5138 if (size == 0) {
5139 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5140 } else {
5141 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5143 tcg_temp_free_ptr(fpstatus);
5144 break;
5146 case NEON_3R_FLOAT_MISC:
5147 if (u) {
5148 /* VMAXNM/VMINNM */
5149 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5150 if (size == 0) {
5151 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5152 } else {
5153 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5155 tcg_temp_free_ptr(fpstatus);
5156 } else {
5157 if (size == 0) {
5158 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5159 } else {
5160 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5163 break;
5164 case NEON_3R_VFM_VQRDMLSH:
5166 /* VFMA, VFMS: fused multiply-add */
5167 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5168 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5169 if (size) {
5170 /* VFMS */
5171 gen_helper_vfp_negs(tmp, tmp);
5173 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5174 tcg_temp_free_i32(tmp3);
5175 tcg_temp_free_ptr(fpstatus);
5176 break;
5178 default:
5179 abort();
5181 tcg_temp_free_i32(tmp2);
5183 /* Save the result. For elementwise operations we can put it
5184 straight into the destination register. For pairwise operations
5185 we have to be careful to avoid clobbering the source operands. */
5186 if (pairwise && rd == rm) {
5187 neon_store_scratch(pass, tmp);
5188 } else {
5189 neon_store_reg(rd, pass, tmp);
5192 } /* for pass */
5193 if (pairwise && rd == rm) {
5194 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5195 tmp = neon_load_scratch(pass);
5196 neon_store_reg(rd, pass, tmp);
5199 /* End of 3 register same size operations. */
5200 } else if (insn & (1 << 4)) {
5201 if ((insn & 0x00380080) != 0) {
5202 /* Two registers and shift. */
5203 op = (insn >> 8) & 0xf;
5204 if (insn & (1 << 7)) {
5205 /* 64-bit shift. */
5206 if (op > 7) {
5207 return 1;
5209 size = 3;
5210 } else {
5211 size = 2;
5212 while ((insn & (1 << (size + 19))) == 0)
5213 size--;
5215 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5216 if (op < 8) {
5217 /* Shift by immediate:
5218 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5219 if (q && ((rd | rm) & 1)) {
5220 return 1;
5222 if (!u && (op == 4 || op == 6)) {
5223 return 1;
5225 /* Right shifts are encoded as N - shift, where N is the
5226 element size in bits. */
5227 if (op <= 4) {
5228 shift = shift - (1 << (size + 3));
5231 switch (op) {
5232 case 0: /* VSHR */
5233 /* Right shift comes here negative. */
5234 shift = -shift;
5235 /* Shifts larger than the element size are architecturally
5236 * valid. Unsigned results in all zeros; signed results
5237 * in all sign bits.
5239 if (!u) {
5240 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
5241 MIN(shift, (8 << size) - 1),
5242 vec_size, vec_size);
5243 } else if (shift >= 8 << size) {
5244 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5245 } else {
5246 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
5247 vec_size, vec_size);
5249 return 0;
5251 case 1: /* VSRA */
5252 /* Right shift comes here negative. */
5253 shift = -shift;
5254 /* Shifts larger than the element size are architecturally
5255 * valid. Unsigned results in all zeros; signed results
5256 * in all sign bits.
5258 if (!u) {
5259 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5260 MIN(shift, (8 << size) - 1),
5261 &ssra_op[size]);
5262 } else if (shift >= 8 << size) {
5263 /* rd += 0 */
5264 } else {
5265 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5266 shift, &usra_op[size]);
5268 return 0;
5270 case 4: /* VSRI */
5271 if (!u) {
5272 return 1;
5274 /* Right shift comes here negative. */
5275 shift = -shift;
5276 /* Shift out of range leaves destination unchanged. */
5277 if (shift < 8 << size) {
5278 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5279 shift, &sri_op[size]);
5281 return 0;
5283 case 5: /* VSHL, VSLI */
5284 if (u) { /* VSLI */
5285 /* Shift out of range leaves destination unchanged. */
5286 if (shift < 8 << size) {
5287 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
5288 vec_size, shift, &sli_op[size]);
5290 } else { /* VSHL */
5291 /* Shifts larger than the element size are
5292 * architecturally valid and results in zero.
5294 if (shift >= 8 << size) {
5295 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5296 } else {
5297 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
5298 vec_size, vec_size);
5301 return 0;
5304 if (size == 3) {
5305 count = q + 1;
5306 } else {
5307 count = q ? 4: 2;
5310 /* To avoid excessive duplication of ops we implement shift
5311 * by immediate using the variable shift operations.
5313 imm = dup_const(size, shift);
5315 for (pass = 0; pass < count; pass++) {
5316 if (size == 3) {
5317 neon_load_reg64(cpu_V0, rm + pass);
5318 tcg_gen_movi_i64(cpu_V1, imm);
5319 switch (op) {
5320 case 2: /* VRSHR */
5321 case 3: /* VRSRA */
5322 if (u)
5323 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5324 else
5325 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5326 break;
5327 case 6: /* VQSHLU */
5328 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5329 cpu_V0, cpu_V1);
5330 break;
5331 case 7: /* VQSHL */
5332 if (u) {
5333 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5334 cpu_V0, cpu_V1);
5335 } else {
5336 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5337 cpu_V0, cpu_V1);
5339 break;
5340 default:
5341 g_assert_not_reached();
5343 if (op == 3) {
5344 /* Accumulate. */
5345 neon_load_reg64(cpu_V1, rd + pass);
5346 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5348 neon_store_reg64(cpu_V0, rd + pass);
5349 } else { /* size < 3 */
5350 /* Operands in T0 and T1. */
5351 tmp = neon_load_reg(rm, pass);
5352 tmp2 = tcg_temp_new_i32();
5353 tcg_gen_movi_i32(tmp2, imm);
5354 switch (op) {
5355 case 2: /* VRSHR */
5356 case 3: /* VRSRA */
5357 GEN_NEON_INTEGER_OP(rshl);
5358 break;
5359 case 6: /* VQSHLU */
5360 switch (size) {
5361 case 0:
5362 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5363 tmp, tmp2);
5364 break;
5365 case 1:
5366 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5367 tmp, tmp2);
5368 break;
5369 case 2:
5370 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5371 tmp, tmp2);
5372 break;
5373 default:
5374 abort();
5376 break;
5377 case 7: /* VQSHL */
5378 GEN_NEON_INTEGER_OP_ENV(qshl);
5379 break;
5380 default:
5381 g_assert_not_reached();
5383 tcg_temp_free_i32(tmp2);
5385 if (op == 3) {
5386 /* Accumulate. */
5387 tmp2 = neon_load_reg(rd, pass);
5388 gen_neon_add(size, tmp, tmp2);
5389 tcg_temp_free_i32(tmp2);
5391 neon_store_reg(rd, pass, tmp);
5393 } /* for pass */
5394 } else if (op < 10) {
5395 /* Shift by immediate and narrow:
5396 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5397 int input_unsigned = (op == 8) ? !u : u;
5398 if (rm & 1) {
5399 return 1;
5401 shift = shift - (1 << (size + 3));
5402 size++;
5403 if (size == 3) {
5404 tmp64 = tcg_const_i64(shift);
5405 neon_load_reg64(cpu_V0, rm);
5406 neon_load_reg64(cpu_V1, rm + 1);
5407 for (pass = 0; pass < 2; pass++) {
5408 TCGv_i64 in;
5409 if (pass == 0) {
5410 in = cpu_V0;
5411 } else {
5412 in = cpu_V1;
5414 if (q) {
5415 if (input_unsigned) {
5416 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5417 } else {
5418 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5420 } else {
5421 if (input_unsigned) {
5422 gen_ushl_i64(cpu_V0, in, tmp64);
5423 } else {
5424 gen_sshl_i64(cpu_V0, in, tmp64);
5427 tmp = tcg_temp_new_i32();
5428 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5429 neon_store_reg(rd, pass, tmp);
5430 } /* for pass */
5431 tcg_temp_free_i64(tmp64);
5432 } else {
5433 if (size == 1) {
5434 imm = (uint16_t)shift;
5435 imm |= imm << 16;
5436 } else {
5437 /* size == 2 */
5438 imm = (uint32_t)shift;
5440 tmp2 = tcg_const_i32(imm);
5441 tmp4 = neon_load_reg(rm + 1, 0);
5442 tmp5 = neon_load_reg(rm + 1, 1);
5443 for (pass = 0; pass < 2; pass++) {
5444 if (pass == 0) {
5445 tmp = neon_load_reg(rm, 0);
5446 } else {
5447 tmp = tmp4;
5449 gen_neon_shift_narrow(size, tmp, tmp2, q,
5450 input_unsigned);
5451 if (pass == 0) {
5452 tmp3 = neon_load_reg(rm, 1);
5453 } else {
5454 tmp3 = tmp5;
5456 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5457 input_unsigned);
5458 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5459 tcg_temp_free_i32(tmp);
5460 tcg_temp_free_i32(tmp3);
5461 tmp = tcg_temp_new_i32();
5462 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5463 neon_store_reg(rd, pass, tmp);
5464 } /* for pass */
5465 tcg_temp_free_i32(tmp2);
5467 } else if (op == 10) {
5468 /* VSHLL, VMOVL */
5469 if (q || (rd & 1)) {
5470 return 1;
5472 tmp = neon_load_reg(rm, 0);
5473 tmp2 = neon_load_reg(rm, 1);
5474 for (pass = 0; pass < 2; pass++) {
5475 if (pass == 1)
5476 tmp = tmp2;
5478 gen_neon_widen(cpu_V0, tmp, size, u);
5480 if (shift != 0) {
5481 /* The shift is less than the width of the source
5482 type, so we can just shift the whole register. */
5483 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5484 /* Widen the result of shift: we need to clear
5485 * the potential overflow bits resulting from
5486 * left bits of the narrow input appearing as
5487 * right bits of left the neighbour narrow
5488 * input. */
5489 if (size < 2 || !u) {
5490 uint64_t imm64;
5491 if (size == 0) {
5492 imm = (0xffu >> (8 - shift));
5493 imm |= imm << 16;
5494 } else if (size == 1) {
5495 imm = 0xffff >> (16 - shift);
5496 } else {
5497 /* size == 2 */
5498 imm = 0xffffffff >> (32 - shift);
5500 if (size < 2) {
5501 imm64 = imm | (((uint64_t)imm) << 32);
5502 } else {
5503 imm64 = imm;
5505 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5508 neon_store_reg64(cpu_V0, rd + pass);
5510 } else if (op >= 14) {
5511 /* VCVT fixed-point. */
5512 TCGv_ptr fpst;
5513 TCGv_i32 shiftv;
5514 VFPGenFixPointFn *fn;
5516 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5517 return 1;
5520 if (!(op & 1)) {
5521 if (u) {
5522 fn = gen_helper_vfp_ultos;
5523 } else {
5524 fn = gen_helper_vfp_sltos;
5526 } else {
5527 if (u) {
5528 fn = gen_helper_vfp_touls_round_to_zero;
5529 } else {
5530 fn = gen_helper_vfp_tosls_round_to_zero;
5534 /* We have already masked out the must-be-1 top bit of imm6,
5535 * hence this 32-shift where the ARM ARM has 64-imm6.
5537 shift = 32 - shift;
5538 fpst = get_fpstatus_ptr(1);
5539 shiftv = tcg_const_i32(shift);
5540 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5541 TCGv_i32 tmpf = neon_load_reg(rm, pass);
5542 fn(tmpf, tmpf, shiftv, fpst);
5543 neon_store_reg(rd, pass, tmpf);
5545 tcg_temp_free_ptr(fpst);
5546 tcg_temp_free_i32(shiftv);
5547 } else {
5548 return 1;
5550 } else { /* (insn & 0x00380080) == 0 */
5551 int invert, reg_ofs, vec_size;
5553 if (q && (rd & 1)) {
5554 return 1;
5557 op = (insn >> 8) & 0xf;
5558 /* One register and immediate. */
5559 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5560 invert = (insn & (1 << 5)) != 0;
5561 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5562 * We choose to not special-case this and will behave as if a
5563 * valid constant encoding of 0 had been given.
5565 switch (op) {
5566 case 0: case 1:
5567 /* no-op */
5568 break;
5569 case 2: case 3:
5570 imm <<= 8;
5571 break;
5572 case 4: case 5:
5573 imm <<= 16;
5574 break;
5575 case 6: case 7:
5576 imm <<= 24;
5577 break;
5578 case 8: case 9:
5579 imm |= imm << 16;
5580 break;
5581 case 10: case 11:
5582 imm = (imm << 8) | (imm << 24);
5583 break;
5584 case 12:
5585 imm = (imm << 8) | 0xff;
5586 break;
5587 case 13:
5588 imm = (imm << 16) | 0xffff;
5589 break;
5590 case 14:
5591 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5592 if (invert) {
5593 imm = ~imm;
5595 break;
5596 case 15:
5597 if (invert) {
5598 return 1;
5600 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5601 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5602 break;
5604 if (invert) {
5605 imm = ~imm;
5608 reg_ofs = neon_reg_offset(rd, 0);
5609 vec_size = q ? 16 : 8;
5611 if (op & 1 && op < 12) {
5612 if (invert) {
5613 /* The immediate value has already been inverted,
5614 * so BIC becomes AND.
5616 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
5617 vec_size, vec_size);
5618 } else {
5619 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
5620 vec_size, vec_size);
5622 } else {
5623 /* VMOV, VMVN. */
5624 if (op == 14 && invert) {
5625 TCGv_i64 t64 = tcg_temp_new_i64();
5627 for (pass = 0; pass <= q; ++pass) {
5628 uint64_t val = 0;
5629 int n;
5631 for (n = 0; n < 8; n++) {
5632 if (imm & (1 << (n + pass * 8))) {
5633 val |= 0xffull << (n * 8);
5636 tcg_gen_movi_i64(t64, val);
5637 neon_store_reg64(t64, rd + pass);
5639 tcg_temp_free_i64(t64);
5640 } else {
5641 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
5645 } else { /* (insn & 0x00800010 == 0x00800000) */
5646 if (size != 3) {
5647 op = (insn >> 8) & 0xf;
5648 if ((insn & (1 << 6)) == 0) {
5649 /* Three registers of different lengths. */
5650 int src1_wide;
5651 int src2_wide;
5652 int prewiden;
5653 /* undefreq: bit 0 : UNDEF if size == 0
5654 * bit 1 : UNDEF if size == 1
5655 * bit 2 : UNDEF if size == 2
5656 * bit 3 : UNDEF if U == 1
5657 * Note that [2:0] set implies 'always UNDEF'
5659 int undefreq;
5660 /* prewiden, src1_wide, src2_wide, undefreq */
5661 static const int neon_3reg_wide[16][4] = {
5662 {1, 0, 0, 0}, /* VADDL */
5663 {1, 1, 0, 0}, /* VADDW */
5664 {1, 0, 0, 0}, /* VSUBL */
5665 {1, 1, 0, 0}, /* VSUBW */
5666 {0, 1, 1, 0}, /* VADDHN */
5667 {0, 0, 0, 0}, /* VABAL */
5668 {0, 1, 1, 0}, /* VSUBHN */
5669 {0, 0, 0, 0}, /* VABDL */
5670 {0, 0, 0, 0}, /* VMLAL */
5671 {0, 0, 0, 9}, /* VQDMLAL */
5672 {0, 0, 0, 0}, /* VMLSL */
5673 {0, 0, 0, 9}, /* VQDMLSL */
5674 {0, 0, 0, 0}, /* Integer VMULL */
5675 {0, 0, 0, 9}, /* VQDMULL */
5676 {0, 0, 0, 0xa}, /* Polynomial VMULL */
5677 {0, 0, 0, 7}, /* Reserved: always UNDEF */
5680 prewiden = neon_3reg_wide[op][0];
5681 src1_wide = neon_3reg_wide[op][1];
5682 src2_wide = neon_3reg_wide[op][2];
5683 undefreq = neon_3reg_wide[op][3];
5685 if ((undefreq & (1 << size)) ||
5686 ((undefreq & 8) && u)) {
5687 return 1;
5689 if ((src1_wide && (rn & 1)) ||
5690 (src2_wide && (rm & 1)) ||
5691 (!src2_wide && (rd & 1))) {
5692 return 1;
5695 /* Handle polynomial VMULL in a single pass. */
5696 if (op == 14) {
5697 if (size == 0) {
5698 /* VMULL.P8 */
5699 tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, 16, 16,
5700 0, gen_helper_neon_pmull_h);
5701 } else {
5702 /* VMULL.P64 */
5703 if (!dc_isar_feature(aa32_pmull, s)) {
5704 return 1;
5706 tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, 16, 16,
5707 0, gen_helper_gvec_pmull_q);
5709 return 0;
5712 /* Avoid overlapping operands. Wide source operands are
5713 always aligned so will never overlap with wide
5714 destinations in problematic ways. */
5715 if (rd == rm && !src2_wide) {
5716 tmp = neon_load_reg(rm, 1);
5717 neon_store_scratch(2, tmp);
5718 } else if (rd == rn && !src1_wide) {
5719 tmp = neon_load_reg(rn, 1);
5720 neon_store_scratch(2, tmp);
5722 tmp3 = NULL;
5723 for (pass = 0; pass < 2; pass++) {
5724 if (src1_wide) {
5725 neon_load_reg64(cpu_V0, rn + pass);
5726 tmp = NULL;
5727 } else {
5728 if (pass == 1 && rd == rn) {
5729 tmp = neon_load_scratch(2);
5730 } else {
5731 tmp = neon_load_reg(rn, pass);
5733 if (prewiden) {
5734 gen_neon_widen(cpu_V0, tmp, size, u);
5737 if (src2_wide) {
5738 neon_load_reg64(cpu_V1, rm + pass);
5739 tmp2 = NULL;
5740 } else {
5741 if (pass == 1 && rd == rm) {
5742 tmp2 = neon_load_scratch(2);
5743 } else {
5744 tmp2 = neon_load_reg(rm, pass);
5746 if (prewiden) {
5747 gen_neon_widen(cpu_V1, tmp2, size, u);
5750 switch (op) {
5751 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5752 gen_neon_addl(size);
5753 break;
5754 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5755 gen_neon_subl(size);
5756 break;
5757 case 5: case 7: /* VABAL, VABDL */
5758 switch ((size << 1) | u) {
5759 case 0:
5760 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5761 break;
5762 case 1:
5763 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5764 break;
5765 case 2:
5766 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5767 break;
5768 case 3:
5769 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5770 break;
5771 case 4:
5772 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5773 break;
5774 case 5:
5775 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5776 break;
5777 default: abort();
5779 tcg_temp_free_i32(tmp2);
5780 tcg_temp_free_i32(tmp);
5781 break;
5782 case 8: case 9: case 10: case 11: case 12: case 13:
5783 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5784 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5785 break;
5786 default: /* 15 is RESERVED: caught earlier */
5787 abort();
5789 if (op == 13) {
5790 /* VQDMULL */
5791 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5792 neon_store_reg64(cpu_V0, rd + pass);
5793 } else if (op == 5 || (op >= 8 && op <= 11)) {
5794 /* Accumulate. */
5795 neon_load_reg64(cpu_V1, rd + pass);
5796 switch (op) {
5797 case 10: /* VMLSL */
5798 gen_neon_negl(cpu_V0, size);
5799 /* Fall through */
5800 case 5: case 8: /* VABAL, VMLAL */
5801 gen_neon_addl(size);
5802 break;
5803 case 9: case 11: /* VQDMLAL, VQDMLSL */
5804 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5805 if (op == 11) {
5806 gen_neon_negl(cpu_V0, size);
5808 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5809 break;
5810 default:
5811 abort();
5813 neon_store_reg64(cpu_V0, rd + pass);
5814 } else if (op == 4 || op == 6) {
5815 /* Narrowing operation. */
5816 tmp = tcg_temp_new_i32();
5817 if (!u) {
5818 switch (size) {
5819 case 0:
5820 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5821 break;
5822 case 1:
5823 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5824 break;
5825 case 2:
5826 tcg_gen_extrh_i64_i32(tmp, cpu_V0);
5827 break;
5828 default: abort();
5830 } else {
5831 switch (size) {
5832 case 0:
5833 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5834 break;
5835 case 1:
5836 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5837 break;
5838 case 2:
5839 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5840 tcg_gen_extrh_i64_i32(tmp, cpu_V0);
5841 break;
5842 default: abort();
5845 if (pass == 0) {
5846 tmp3 = tmp;
5847 } else {
5848 neon_store_reg(rd, 0, tmp3);
5849 neon_store_reg(rd, 1, tmp);
5851 } else {
5852 /* Write back the result. */
5853 neon_store_reg64(cpu_V0, rd + pass);
5856 } else {
5857 /* Two registers and a scalar. NB that for ops of this form
5858 * the ARM ARM labels bit 24 as Q, but it is in our variable
5859 * 'u', not 'q'.
5861 if (size == 0) {
5862 return 1;
5864 switch (op) {
5865 case 1: /* Float VMLA scalar */
5866 case 5: /* Floating point VMLS scalar */
5867 case 9: /* Floating point VMUL scalar */
5868 if (size == 1) {
5869 return 1;
5871 /* fall through */
5872 case 0: /* Integer VMLA scalar */
5873 case 4: /* Integer VMLS scalar */
5874 case 8: /* Integer VMUL scalar */
5875 case 12: /* VQDMULH scalar */
5876 case 13: /* VQRDMULH scalar */
5877 if (u && ((rd | rn) & 1)) {
5878 return 1;
5880 tmp = neon_get_scalar(size, rm);
5881 neon_store_scratch(0, tmp);
5882 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5883 tmp = neon_load_scratch(0);
5884 tmp2 = neon_load_reg(rn, pass);
5885 if (op == 12) {
5886 if (size == 1) {
5887 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5888 } else {
5889 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5891 } else if (op == 13) {
5892 if (size == 1) {
5893 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5894 } else {
5895 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5897 } else if (op & 1) {
5898 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5899 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5900 tcg_temp_free_ptr(fpstatus);
5901 } else {
5902 switch (size) {
5903 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5904 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5905 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5906 default: abort();
5909 tcg_temp_free_i32(tmp2);
5910 if (op < 8) {
5911 /* Accumulate. */
5912 tmp2 = neon_load_reg(rd, pass);
5913 switch (op) {
5914 case 0:
5915 gen_neon_add(size, tmp, tmp2);
5916 break;
5917 case 1:
5919 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5920 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5921 tcg_temp_free_ptr(fpstatus);
5922 break;
5924 case 4:
5925 gen_neon_rsb(size, tmp, tmp2);
5926 break;
5927 case 5:
5929 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5930 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5931 tcg_temp_free_ptr(fpstatus);
5932 break;
5934 default:
5935 abort();
5937 tcg_temp_free_i32(tmp2);
5939 neon_store_reg(rd, pass, tmp);
5941 break;
5942 case 3: /* VQDMLAL scalar */
5943 case 7: /* VQDMLSL scalar */
5944 case 11: /* VQDMULL scalar */
5945 if (u == 1) {
5946 return 1;
5948 /* fall through */
5949 case 2: /* VMLAL sclar */
5950 case 6: /* VMLSL scalar */
5951 case 10: /* VMULL scalar */
5952 if (rd & 1) {
5953 return 1;
5955 tmp2 = neon_get_scalar(size, rm);
5956 /* We need a copy of tmp2 because gen_neon_mull
5957 * deletes it during pass 0. */
5958 tmp4 = tcg_temp_new_i32();
5959 tcg_gen_mov_i32(tmp4, tmp2);
5960 tmp3 = neon_load_reg(rn, 1);
5962 for (pass = 0; pass < 2; pass++) {
5963 if (pass == 0) {
5964 tmp = neon_load_reg(rn, 0);
5965 } else {
5966 tmp = tmp3;
5967 tmp2 = tmp4;
5969 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5970 if (op != 11) {
5971 neon_load_reg64(cpu_V1, rd + pass);
5973 switch (op) {
5974 case 6:
5975 gen_neon_negl(cpu_V0, size);
5976 /* Fall through */
5977 case 2:
5978 gen_neon_addl(size);
5979 break;
5980 case 3: case 7:
5981 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5982 if (op == 7) {
5983 gen_neon_negl(cpu_V0, size);
5985 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5986 break;
5987 case 10:
5988 /* no-op */
5989 break;
5990 case 11:
5991 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5992 break;
5993 default:
5994 abort();
5996 neon_store_reg64(cpu_V0, rd + pass);
5998 break;
5999 case 14: /* VQRDMLAH scalar */
6000 case 15: /* VQRDMLSH scalar */
6002 NeonGenThreeOpEnvFn *fn;
6004 if (!dc_isar_feature(aa32_rdm, s)) {
6005 return 1;
6007 if (u && ((rd | rn) & 1)) {
6008 return 1;
6010 if (op == 14) {
6011 if (size == 1) {
6012 fn = gen_helper_neon_qrdmlah_s16;
6013 } else {
6014 fn = gen_helper_neon_qrdmlah_s32;
6016 } else {
6017 if (size == 1) {
6018 fn = gen_helper_neon_qrdmlsh_s16;
6019 } else {
6020 fn = gen_helper_neon_qrdmlsh_s32;
6024 tmp2 = neon_get_scalar(size, rm);
6025 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6026 tmp = neon_load_reg(rn, pass);
6027 tmp3 = neon_load_reg(rd, pass);
6028 fn(tmp, cpu_env, tmp, tmp2, tmp3);
6029 tcg_temp_free_i32(tmp3);
6030 neon_store_reg(rd, pass, tmp);
6032 tcg_temp_free_i32(tmp2);
6034 break;
6035 default:
6036 g_assert_not_reached();
6039 } else { /* size == 3 */
6040 if (!u) {
6041 /* Extract. */
6042 imm = (insn >> 8) & 0xf;
6044 if (imm > 7 && !q)
6045 return 1;
6047 if (q && ((rd | rn | rm) & 1)) {
6048 return 1;
6051 if (imm == 0) {
6052 neon_load_reg64(cpu_V0, rn);
6053 if (q) {
6054 neon_load_reg64(cpu_V1, rn + 1);
6056 } else if (imm == 8) {
6057 neon_load_reg64(cpu_V0, rn + 1);
6058 if (q) {
6059 neon_load_reg64(cpu_V1, rm);
6061 } else if (q) {
6062 tmp64 = tcg_temp_new_i64();
6063 if (imm < 8) {
6064 neon_load_reg64(cpu_V0, rn);
6065 neon_load_reg64(tmp64, rn + 1);
6066 } else {
6067 neon_load_reg64(cpu_V0, rn + 1);
6068 neon_load_reg64(tmp64, rm);
6070 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6071 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6072 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6073 if (imm < 8) {
6074 neon_load_reg64(cpu_V1, rm);
6075 } else {
6076 neon_load_reg64(cpu_V1, rm + 1);
6077 imm -= 8;
6079 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6080 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6081 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6082 tcg_temp_free_i64(tmp64);
6083 } else {
6084 /* BUGFIX */
6085 neon_load_reg64(cpu_V0, rn);
6086 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6087 neon_load_reg64(cpu_V1, rm);
6088 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6089 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6091 neon_store_reg64(cpu_V0, rd);
6092 if (q) {
6093 neon_store_reg64(cpu_V1, rd + 1);
6095 } else if ((insn & (1 << 11)) == 0) {
6096 /* Two register misc. */
6097 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6098 size = (insn >> 18) & 3;
6099 /* UNDEF for unknown op values and bad op-size combinations */
6100 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6101 return 1;
6103 if (neon_2rm_is_v8_op(op) &&
6104 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6105 return 1;
6107 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6108 q && ((rm | rd) & 1)) {
6109 return 1;
6111 switch (op) {
6112 case NEON_2RM_VREV64:
6113 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6114 tmp = neon_load_reg(rm, pass * 2);
6115 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6116 switch (size) {
6117 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6118 case 1: gen_swap_half(tmp); break;
6119 case 2: /* no-op */ break;
6120 default: abort();
6122 neon_store_reg(rd, pass * 2 + 1, tmp);
6123 if (size == 2) {
6124 neon_store_reg(rd, pass * 2, tmp2);
6125 } else {
6126 switch (size) {
6127 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6128 case 1: gen_swap_half(tmp2); break;
6129 default: abort();
6131 neon_store_reg(rd, pass * 2, tmp2);
6134 break;
6135 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6136 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6137 for (pass = 0; pass < q + 1; pass++) {
6138 tmp = neon_load_reg(rm, pass * 2);
6139 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6140 tmp = neon_load_reg(rm, pass * 2 + 1);
6141 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6142 switch (size) {
6143 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6144 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6145 case 2: tcg_gen_add_i64(CPU_V001); break;
6146 default: abort();
6148 if (op >= NEON_2RM_VPADAL) {
6149 /* Accumulate. */
6150 neon_load_reg64(cpu_V1, rd + pass);
6151 gen_neon_addl(size);
6153 neon_store_reg64(cpu_V0, rd + pass);
6155 break;
6156 case NEON_2RM_VTRN:
6157 if (size == 2) {
6158 int n;
6159 for (n = 0; n < (q ? 4 : 2); n += 2) {
6160 tmp = neon_load_reg(rm, n);
6161 tmp2 = neon_load_reg(rd, n + 1);
6162 neon_store_reg(rm, n, tmp2);
6163 neon_store_reg(rd, n + 1, tmp);
6165 } else {
6166 goto elementwise;
6168 break;
6169 case NEON_2RM_VUZP:
6170 if (gen_neon_unzip(rd, rm, size, q)) {
6171 return 1;
6173 break;
6174 case NEON_2RM_VZIP:
6175 if (gen_neon_zip(rd, rm, size, q)) {
6176 return 1;
6178 break;
6179 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6180 /* also VQMOVUN; op field and mnemonics don't line up */
6181 if (rm & 1) {
6182 return 1;
6184 tmp2 = NULL;
6185 for (pass = 0; pass < 2; pass++) {
6186 neon_load_reg64(cpu_V0, rm + pass);
6187 tmp = tcg_temp_new_i32();
6188 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6189 tmp, cpu_V0);
6190 if (pass == 0) {
6191 tmp2 = tmp;
6192 } else {
6193 neon_store_reg(rd, 0, tmp2);
6194 neon_store_reg(rd, 1, tmp);
6197 break;
6198 case NEON_2RM_VSHLL:
6199 if (q || (rd & 1)) {
6200 return 1;
6202 tmp = neon_load_reg(rm, 0);
6203 tmp2 = neon_load_reg(rm, 1);
6204 for (pass = 0; pass < 2; pass++) {
6205 if (pass == 1)
6206 tmp = tmp2;
6207 gen_neon_widen(cpu_V0, tmp, size, 1);
6208 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6209 neon_store_reg64(cpu_V0, rd + pass);
6211 break;
6212 case NEON_2RM_VCVT_F16_F32:
6214 TCGv_ptr fpst;
6215 TCGv_i32 ahp;
6217 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
6218 q || (rm & 1)) {
6219 return 1;
6221 fpst = get_fpstatus_ptr(true);
6222 ahp = get_ahp_flag();
6223 tmp = neon_load_reg(rm, 0);
6224 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6225 tmp2 = neon_load_reg(rm, 1);
6226 gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
6227 tcg_gen_shli_i32(tmp2, tmp2, 16);
6228 tcg_gen_or_i32(tmp2, tmp2, tmp);
6229 tcg_temp_free_i32(tmp);
6230 tmp = neon_load_reg(rm, 2);
6231 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6232 tmp3 = neon_load_reg(rm, 3);
6233 neon_store_reg(rd, 0, tmp2);
6234 gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
6235 tcg_gen_shli_i32(tmp3, tmp3, 16);
6236 tcg_gen_or_i32(tmp3, tmp3, tmp);
6237 neon_store_reg(rd, 1, tmp3);
6238 tcg_temp_free_i32(tmp);
6239 tcg_temp_free_i32(ahp);
6240 tcg_temp_free_ptr(fpst);
6241 break;
6243 case NEON_2RM_VCVT_F32_F16:
6245 TCGv_ptr fpst;
6246 TCGv_i32 ahp;
6247 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
6248 q || (rd & 1)) {
6249 return 1;
6251 fpst = get_fpstatus_ptr(true);
6252 ahp = get_ahp_flag();
6253 tmp3 = tcg_temp_new_i32();
6254 tmp = neon_load_reg(rm, 0);
6255 tmp2 = neon_load_reg(rm, 1);
6256 tcg_gen_ext16u_i32(tmp3, tmp);
6257 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6258 neon_store_reg(rd, 0, tmp3);
6259 tcg_gen_shri_i32(tmp, tmp, 16);
6260 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
6261 neon_store_reg(rd, 1, tmp);
6262 tmp3 = tcg_temp_new_i32();
6263 tcg_gen_ext16u_i32(tmp3, tmp2);
6264 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6265 neon_store_reg(rd, 2, tmp3);
6266 tcg_gen_shri_i32(tmp2, tmp2, 16);
6267 gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
6268 neon_store_reg(rd, 3, tmp2);
6269 tcg_temp_free_i32(ahp);
6270 tcg_temp_free_ptr(fpst);
6271 break;
6273 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6274 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
6275 return 1;
6277 ptr1 = vfp_reg_ptr(true, rd);
6278 ptr2 = vfp_reg_ptr(true, rm);
6280 /* Bit 6 is the lowest opcode bit; it distinguishes between
6281 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6283 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6285 if (op == NEON_2RM_AESE) {
6286 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
6287 } else {
6288 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
6290 tcg_temp_free_ptr(ptr1);
6291 tcg_temp_free_ptr(ptr2);
6292 tcg_temp_free_i32(tmp3);
6293 break;
6294 case NEON_2RM_SHA1H:
6295 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
6296 return 1;
6298 ptr1 = vfp_reg_ptr(true, rd);
6299 ptr2 = vfp_reg_ptr(true, rm);
6301 gen_helper_crypto_sha1h(ptr1, ptr2);
6303 tcg_temp_free_ptr(ptr1);
6304 tcg_temp_free_ptr(ptr2);
6305 break;
6306 case NEON_2RM_SHA1SU1:
6307 if ((rm | rd) & 1) {
6308 return 1;
6310 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6311 if (q) {
6312 if (!dc_isar_feature(aa32_sha2, s)) {
6313 return 1;
6315 } else if (!dc_isar_feature(aa32_sha1, s)) {
6316 return 1;
6318 ptr1 = vfp_reg_ptr(true, rd);
6319 ptr2 = vfp_reg_ptr(true, rm);
6320 if (q) {
6321 gen_helper_crypto_sha256su0(ptr1, ptr2);
6322 } else {
6323 gen_helper_crypto_sha1su1(ptr1, ptr2);
6325 tcg_temp_free_ptr(ptr1);
6326 tcg_temp_free_ptr(ptr2);
6327 break;
6329 case NEON_2RM_VMVN:
6330 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
6331 break;
6332 case NEON_2RM_VNEG:
6333 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
6334 break;
6335 case NEON_2RM_VABS:
6336 tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
6337 break;
6339 case NEON_2RM_VCEQ0:
6340 tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
6341 vec_size, &ceq0_op[size]);
6342 break;
6343 case NEON_2RM_VCGT0:
6344 tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
6345 vec_size, &cgt0_op[size]);
6346 break;
6347 case NEON_2RM_VCLE0:
6348 tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
6349 vec_size, &cle0_op[size]);
6350 break;
6351 case NEON_2RM_VCGE0:
6352 tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
6353 vec_size, &cge0_op[size]);
6354 break;
6355 case NEON_2RM_VCLT0:
6356 tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
6357 vec_size, &clt0_op[size]);
6358 break;
6360 default:
6361 elementwise:
6362 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6363 tmp = neon_load_reg(rm, pass);
6364 switch (op) {
6365 case NEON_2RM_VREV32:
6366 switch (size) {
6367 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6368 case 1: gen_swap_half(tmp); break;
6369 default: abort();
6371 break;
6372 case NEON_2RM_VREV16:
6373 gen_rev16(tmp, tmp);
6374 break;
6375 case NEON_2RM_VCLS:
6376 switch (size) {
6377 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6378 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6379 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6380 default: abort();
6382 break;
6383 case NEON_2RM_VCLZ:
6384 switch (size) {
6385 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6386 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6387 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
6388 default: abort();
6390 break;
6391 case NEON_2RM_VCNT:
6392 gen_helper_neon_cnt_u8(tmp, tmp);
6393 break;
6394 case NEON_2RM_VQABS:
6395 switch (size) {
6396 case 0:
6397 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6398 break;
6399 case 1:
6400 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6401 break;
6402 case 2:
6403 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6404 break;
6405 default: abort();
6407 break;
6408 case NEON_2RM_VQNEG:
6409 switch (size) {
6410 case 0:
6411 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6412 break;
6413 case 1:
6414 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6415 break;
6416 case 2:
6417 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6418 break;
6419 default: abort();
6421 break;
6422 case NEON_2RM_VCGT0_F:
6424 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6425 tmp2 = tcg_const_i32(0);
6426 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6427 tcg_temp_free_i32(tmp2);
6428 tcg_temp_free_ptr(fpstatus);
6429 break;
6431 case NEON_2RM_VCGE0_F:
6433 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6434 tmp2 = tcg_const_i32(0);
6435 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6436 tcg_temp_free_i32(tmp2);
6437 tcg_temp_free_ptr(fpstatus);
6438 break;
6440 case NEON_2RM_VCEQ0_F:
6442 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6443 tmp2 = tcg_const_i32(0);
6444 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6445 tcg_temp_free_i32(tmp2);
6446 tcg_temp_free_ptr(fpstatus);
6447 break;
6449 case NEON_2RM_VCLE0_F:
6451 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6452 tmp2 = tcg_const_i32(0);
6453 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6454 tcg_temp_free_i32(tmp2);
6455 tcg_temp_free_ptr(fpstatus);
6456 break;
6458 case NEON_2RM_VCLT0_F:
6460 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6461 tmp2 = tcg_const_i32(0);
6462 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6463 tcg_temp_free_i32(tmp2);
6464 tcg_temp_free_ptr(fpstatus);
6465 break;
6467 case NEON_2RM_VABS_F:
6468 gen_helper_vfp_abss(tmp, tmp);
6469 break;
6470 case NEON_2RM_VNEG_F:
6471 gen_helper_vfp_negs(tmp, tmp);
6472 break;
6473 case NEON_2RM_VSWP:
6474 tmp2 = neon_load_reg(rd, pass);
6475 neon_store_reg(rm, pass, tmp2);
6476 break;
6477 case NEON_2RM_VTRN:
6478 tmp2 = neon_load_reg(rd, pass);
6479 switch (size) {
6480 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6481 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6482 default: abort();
6484 neon_store_reg(rm, pass, tmp2);
6485 break;
6486 case NEON_2RM_VRINTN:
6487 case NEON_2RM_VRINTA:
6488 case NEON_2RM_VRINTM:
6489 case NEON_2RM_VRINTP:
6490 case NEON_2RM_VRINTZ:
6492 TCGv_i32 tcg_rmode;
6493 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6494 int rmode;
6496 if (op == NEON_2RM_VRINTZ) {
6497 rmode = FPROUNDING_ZERO;
6498 } else {
6499 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6502 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6503 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6504 cpu_env);
6505 gen_helper_rints(tmp, tmp, fpstatus);
6506 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6507 cpu_env);
6508 tcg_temp_free_ptr(fpstatus);
6509 tcg_temp_free_i32(tcg_rmode);
6510 break;
6512 case NEON_2RM_VRINTX:
6514 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6515 gen_helper_rints_exact(tmp, tmp, fpstatus);
6516 tcg_temp_free_ptr(fpstatus);
6517 break;
6519 case NEON_2RM_VCVTAU:
6520 case NEON_2RM_VCVTAS:
6521 case NEON_2RM_VCVTNU:
6522 case NEON_2RM_VCVTNS:
6523 case NEON_2RM_VCVTPU:
6524 case NEON_2RM_VCVTPS:
6525 case NEON_2RM_VCVTMU:
6526 case NEON_2RM_VCVTMS:
6528 bool is_signed = !extract32(insn, 7, 1);
6529 TCGv_ptr fpst = get_fpstatus_ptr(1);
6530 TCGv_i32 tcg_rmode, tcg_shift;
6531 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6533 tcg_shift = tcg_const_i32(0);
6534 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6535 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6536 cpu_env);
6538 if (is_signed) {
6539 gen_helper_vfp_tosls(tmp, tmp,
6540 tcg_shift, fpst);
6541 } else {
6542 gen_helper_vfp_touls(tmp, tmp,
6543 tcg_shift, fpst);
6546 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6547 cpu_env);
6548 tcg_temp_free_i32(tcg_rmode);
6549 tcg_temp_free_i32(tcg_shift);
6550 tcg_temp_free_ptr(fpst);
6551 break;
6553 case NEON_2RM_VRECPE:
6555 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6556 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6557 tcg_temp_free_ptr(fpstatus);
6558 break;
6560 case NEON_2RM_VRSQRTE:
6562 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6563 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6564 tcg_temp_free_ptr(fpstatus);
6565 break;
6567 case NEON_2RM_VRECPE_F:
6569 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6570 gen_helper_recpe_f32(tmp, tmp, fpstatus);
6571 tcg_temp_free_ptr(fpstatus);
6572 break;
6574 case NEON_2RM_VRSQRTE_F:
6576 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6577 gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
6578 tcg_temp_free_ptr(fpstatus);
6579 break;
6581 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6583 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6584 gen_helper_vfp_sitos(tmp, tmp, fpstatus);
6585 tcg_temp_free_ptr(fpstatus);
6586 break;
6588 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6590 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6591 gen_helper_vfp_uitos(tmp, tmp, fpstatus);
6592 tcg_temp_free_ptr(fpstatus);
6593 break;
6595 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6597 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6598 gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
6599 tcg_temp_free_ptr(fpstatus);
6600 break;
6602 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6604 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6605 gen_helper_vfp_touizs(tmp, tmp, fpstatus);
6606 tcg_temp_free_ptr(fpstatus);
6607 break;
6609 default:
6610 /* Reserved op values were caught by the
6611 * neon_2rm_sizes[] check earlier.
6613 abort();
6615 neon_store_reg(rd, pass, tmp);
6617 break;
6619 } else if ((insn & (1 << 10)) == 0) {
6620 /* VTBL, VTBX. */
6621 int n = ((insn >> 8) & 3) + 1;
6622 if ((rn + n) > 32) {
6623 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6624 * helper function running off the end of the register file.
6626 return 1;
6628 n <<= 3;
6629 if (insn & (1 << 6)) {
6630 tmp = neon_load_reg(rd, 0);
6631 } else {
6632 tmp = tcg_temp_new_i32();
6633 tcg_gen_movi_i32(tmp, 0);
6635 tmp2 = neon_load_reg(rm, 0);
6636 ptr1 = vfp_reg_ptr(true, rn);
6637 tmp5 = tcg_const_i32(n);
6638 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
6639 tcg_temp_free_i32(tmp);
6640 if (insn & (1 << 6)) {
6641 tmp = neon_load_reg(rd, 1);
6642 } else {
6643 tmp = tcg_temp_new_i32();
6644 tcg_gen_movi_i32(tmp, 0);
6646 tmp3 = neon_load_reg(rm, 1);
6647 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
6648 tcg_temp_free_i32(tmp5);
6649 tcg_temp_free_ptr(ptr1);
6650 neon_store_reg(rd, 0, tmp2);
6651 neon_store_reg(rd, 1, tmp3);
6652 tcg_temp_free_i32(tmp);
6653 } else if ((insn & 0x380) == 0) {
6654 /* VDUP */
6655 int element;
6656 MemOp size;
6658 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6659 return 1;
6661 if (insn & (1 << 16)) {
6662 size = MO_8;
6663 element = (insn >> 17) & 7;
6664 } else if (insn & (1 << 17)) {
6665 size = MO_16;
6666 element = (insn >> 18) & 3;
6667 } else {
6668 size = MO_32;
6669 element = (insn >> 19) & 1;
6671 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
6672 neon_element_offset(rm, element, size),
6673 q ? 16 : 8, q ? 16 : 8);
6674 } else {
6675 return 1;
6679 return 0;
6682 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
6684 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6685 const ARMCPRegInfo *ri;
6687 cpnum = (insn >> 8) & 0xf;
6689 /* First check for coprocessor space used for XScale/iwMMXt insns */
6690 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
6691 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
6692 return 1;
6694 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
6695 return disas_iwmmxt_insn(s, insn);
6696 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
6697 return disas_dsp_insn(s, insn);
6699 return 1;
6702 /* Otherwise treat as a generic register access */
6703 is64 = (insn & (1 << 25)) == 0;
6704 if (!is64 && ((insn & (1 << 4)) == 0)) {
6705 /* cdp */
6706 return 1;
6709 crm = insn & 0xf;
6710 if (is64) {
6711 crn = 0;
6712 opc1 = (insn >> 4) & 0xf;
6713 opc2 = 0;
6714 rt2 = (insn >> 16) & 0xf;
6715 } else {
6716 crn = (insn >> 16) & 0xf;
6717 opc1 = (insn >> 21) & 7;
6718 opc2 = (insn >> 5) & 7;
6719 rt2 = 0;
6721 isread = (insn >> 20) & 1;
6722 rt = (insn >> 12) & 0xf;
6724 ri = get_arm_cp_reginfo(s->cp_regs,
6725 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
6726 if (ri) {
6727 bool need_exit_tb;
6729 /* Check access permissions */
6730 if (!cp_access_ok(s->current_el, ri, isread)) {
6731 return 1;
6734 if (s->hstr_active || ri->accessfn ||
6735 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
6736 /* Emit code to perform further access permissions checks at
6737 * runtime; this may result in an exception.
6738 * Note that on XScale all cp0..c13 registers do an access check
6739 * call in order to handle c15_cpar.
6741 TCGv_ptr tmpptr;
6742 TCGv_i32 tcg_syn, tcg_isread;
6743 uint32_t syndrome;
6745 /* Note that since we are an implementation which takes an
6746 * exception on a trapped conditional instruction only if the
6747 * instruction passes its condition code check, we can take
6748 * advantage of the clause in the ARM ARM that allows us to set
6749 * the COND field in the instruction to 0xE in all cases.
6750 * We could fish the actual condition out of the insn (ARM)
6751 * or the condexec bits (Thumb) but it isn't necessary.
6753 switch (cpnum) {
6754 case 14:
6755 if (is64) {
6756 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
6757 isread, false);
6758 } else {
6759 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
6760 rt, isread, false);
6762 break;
6763 case 15:
6764 if (is64) {
6765 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
6766 isread, false);
6767 } else {
6768 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
6769 rt, isread, false);
6771 break;
6772 default:
6773 /* ARMv8 defines that only coprocessors 14 and 15 exist,
6774 * so this can only happen if this is an ARMv7 or earlier CPU,
6775 * in which case the syndrome information won't actually be
6776 * guest visible.
6778 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
6779 syndrome = syn_uncategorized();
6780 break;
6783 gen_set_condexec(s);
6784 gen_set_pc_im(s, s->pc_curr);
6785 tmpptr = tcg_const_ptr(ri);
6786 tcg_syn = tcg_const_i32(syndrome);
6787 tcg_isread = tcg_const_i32(isread);
6788 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
6789 tcg_isread);
6790 tcg_temp_free_ptr(tmpptr);
6791 tcg_temp_free_i32(tcg_syn);
6792 tcg_temp_free_i32(tcg_isread);
6793 } else if (ri->type & ARM_CP_RAISES_EXC) {
6795 * The readfn or writefn might raise an exception;
6796 * synchronize the CPU state in case it does.
6798 gen_set_condexec(s);
6799 gen_set_pc_im(s, s->pc_curr);
6802 /* Handle special cases first */
6803 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6804 case ARM_CP_NOP:
6805 return 0;
6806 case ARM_CP_WFI:
6807 if (isread) {
6808 return 1;
6810 gen_set_pc_im(s, s->base.pc_next);
6811 s->base.is_jmp = DISAS_WFI;
6812 return 0;
6813 default:
6814 break;
6817 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
6818 gen_io_start();
6821 if (isread) {
6822 /* Read */
6823 if (is64) {
6824 TCGv_i64 tmp64;
6825 TCGv_i32 tmp;
6826 if (ri->type & ARM_CP_CONST) {
6827 tmp64 = tcg_const_i64(ri->resetvalue);
6828 } else if (ri->readfn) {
6829 TCGv_ptr tmpptr;
6830 tmp64 = tcg_temp_new_i64();
6831 tmpptr = tcg_const_ptr(ri);
6832 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6833 tcg_temp_free_ptr(tmpptr);
6834 } else {
6835 tmp64 = tcg_temp_new_i64();
6836 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6838 tmp = tcg_temp_new_i32();
6839 tcg_gen_extrl_i64_i32(tmp, tmp64);
6840 store_reg(s, rt, tmp);
6841 tmp = tcg_temp_new_i32();
6842 tcg_gen_extrh_i64_i32(tmp, tmp64);
6843 tcg_temp_free_i64(tmp64);
6844 store_reg(s, rt2, tmp);
6845 } else {
6846 TCGv_i32 tmp;
6847 if (ri->type & ARM_CP_CONST) {
6848 tmp = tcg_const_i32(ri->resetvalue);
6849 } else if (ri->readfn) {
6850 TCGv_ptr tmpptr;
6851 tmp = tcg_temp_new_i32();
6852 tmpptr = tcg_const_ptr(ri);
6853 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6854 tcg_temp_free_ptr(tmpptr);
6855 } else {
6856 tmp = load_cpu_offset(ri->fieldoffset);
6858 if (rt == 15) {
6859 /* Destination register of r15 for 32 bit loads sets
6860 * the condition codes from the high 4 bits of the value
6862 gen_set_nzcv(tmp);
6863 tcg_temp_free_i32(tmp);
6864 } else {
6865 store_reg(s, rt, tmp);
6868 } else {
6869 /* Write */
6870 if (ri->type & ARM_CP_CONST) {
6871 /* If not forbidden by access permissions, treat as WI */
6872 return 0;
6875 if (is64) {
6876 TCGv_i32 tmplo, tmphi;
6877 TCGv_i64 tmp64 = tcg_temp_new_i64();
6878 tmplo = load_reg(s, rt);
6879 tmphi = load_reg(s, rt2);
6880 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6881 tcg_temp_free_i32(tmplo);
6882 tcg_temp_free_i32(tmphi);
6883 if (ri->writefn) {
6884 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6885 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6886 tcg_temp_free_ptr(tmpptr);
6887 } else {
6888 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6890 tcg_temp_free_i64(tmp64);
6891 } else {
6892 if (ri->writefn) {
6893 TCGv_i32 tmp;
6894 TCGv_ptr tmpptr;
6895 tmp = load_reg(s, rt);
6896 tmpptr = tcg_const_ptr(ri);
6897 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6898 tcg_temp_free_ptr(tmpptr);
6899 tcg_temp_free_i32(tmp);
6900 } else {
6901 TCGv_i32 tmp = load_reg(s, rt);
6902 store_cpu_offset(tmp, ri->fieldoffset);
6907 /* I/O operations must end the TB here (whether read or write) */
6908 need_exit_tb = ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) &&
6909 (ri->type & ARM_CP_IO));
6911 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6913 * A write to any coprocessor register that ends a TB
6914 * must rebuild the hflags for the next TB.
6916 TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
6917 if (arm_dc_feature(s, ARM_FEATURE_M)) {
6918 gen_helper_rebuild_hflags_m32(cpu_env, tcg_el);
6919 } else {
6920 if (ri->type & ARM_CP_NEWEL) {
6921 gen_helper_rebuild_hflags_a32_newel(cpu_env);
6922 } else {
6923 gen_helper_rebuild_hflags_a32(cpu_env, tcg_el);
6926 tcg_temp_free_i32(tcg_el);
6928 * We default to ending the TB on a coprocessor register write,
6929 * but allow this to be suppressed by the register definition
6930 * (usually only necessary to work around guest bugs).
6932 need_exit_tb = true;
6934 if (need_exit_tb) {
6935 gen_lookup_tb(s);
6938 return 0;
6941 /* Unknown register; this might be a guest error or a QEMU
6942 * unimplemented feature.
6944 if (is64) {
6945 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
6946 "64 bit system register cp:%d opc1: %d crm:%d "
6947 "(%s)\n",
6948 isread ? "read" : "write", cpnum, opc1, crm,
6949 s->ns ? "non-secure" : "secure");
6950 } else {
6951 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
6952 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
6953 "(%s)\n",
6954 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
6955 s->ns ? "non-secure" : "secure");
6958 return 1;
6962 /* Store a 64-bit value to a register pair. Clobbers val. */
6963 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6965 TCGv_i32 tmp;
6966 tmp = tcg_temp_new_i32();
6967 tcg_gen_extrl_i64_i32(tmp, val);
6968 store_reg(s, rlow, tmp);
6969 tmp = tcg_temp_new_i32();
6970 tcg_gen_extrh_i64_i32(tmp, val);
6971 store_reg(s, rhigh, tmp);
6974 /* load and add a 64-bit value from a register pair. */
6975 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6977 TCGv_i64 tmp;
6978 TCGv_i32 tmpl;
6979 TCGv_i32 tmph;
6981 /* Load 64-bit value rd:rn. */
6982 tmpl = load_reg(s, rlow);
6983 tmph = load_reg(s, rhigh);
6984 tmp = tcg_temp_new_i64();
6985 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6986 tcg_temp_free_i32(tmpl);
6987 tcg_temp_free_i32(tmph);
6988 tcg_gen_add_i64(val, val, tmp);
6989 tcg_temp_free_i64(tmp);
6992 /* Set N and Z flags from hi|lo. */
6993 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
6995 tcg_gen_mov_i32(cpu_NF, hi);
6996 tcg_gen_or_i32(cpu_ZF, lo, hi);
6999 /* Load/Store exclusive instructions are implemented by remembering
7000 the value/address loaded, and seeing if these are the same
7001 when the store is performed. This should be sufficient to implement
7002 the architecturally mandated semantics, and avoids having to monitor
7003 regular stores. The compare vs the remembered value is done during
7004 the cmpxchg operation, but we must compare the addresses manually. */
7005 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7006 TCGv_i32 addr, int size)
7008 TCGv_i32 tmp = tcg_temp_new_i32();
7009 MemOp opc = size | MO_ALIGN | s->be_data;
7011 s->is_ldex = true;
7013 if (size == 3) {
7014 TCGv_i32 tmp2 = tcg_temp_new_i32();
7015 TCGv_i64 t64 = tcg_temp_new_i64();
7017 /* For AArch32, architecturally the 32-bit word at the lowest
7018 * address is always Rt and the one at addr+4 is Rt2, even if
7019 * the CPU is big-endian. That means we don't want to do a
7020 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7021 * for an architecturally 64-bit access, but instead do a
7022 * 64-bit access using MO_BE if appropriate and then split
7023 * the two halves.
7024 * This only makes a difference for BE32 user-mode, where
7025 * frob64() must not flip the two halves of the 64-bit data
7026 * but this code must treat BE32 user-mode like BE32 system.
7028 TCGv taddr = gen_aa32_addr(s, addr, opc);
7030 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7031 tcg_temp_free(taddr);
7032 tcg_gen_mov_i64(cpu_exclusive_val, t64);
7033 if (s->be_data == MO_BE) {
7034 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7035 } else {
7036 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7038 tcg_temp_free_i64(t64);
7040 store_reg(s, rt2, tmp2);
7041 } else {
7042 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
7043 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7046 store_reg(s, rt, tmp);
7047 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7050 static void gen_clrex(DisasContext *s)
7052 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7055 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7056 TCGv_i32 addr, int size)
7058 TCGv_i32 t0, t1, t2;
7059 TCGv_i64 extaddr;
7060 TCGv taddr;
7061 TCGLabel *done_label;
7062 TCGLabel *fail_label;
7063 MemOp opc = size | MO_ALIGN | s->be_data;
7065 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7066 [addr] = {Rt};
7067 {Rd} = 0;
7068 } else {
7069 {Rd} = 1;
7070 } */
7071 fail_label = gen_new_label();
7072 done_label = gen_new_label();
7073 extaddr = tcg_temp_new_i64();
7074 tcg_gen_extu_i32_i64(extaddr, addr);
7075 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7076 tcg_temp_free_i64(extaddr);
7078 taddr = gen_aa32_addr(s, addr, opc);
7079 t0 = tcg_temp_new_i32();
7080 t1 = load_reg(s, rt);
7081 if (size == 3) {
7082 TCGv_i64 o64 = tcg_temp_new_i64();
7083 TCGv_i64 n64 = tcg_temp_new_i64();
7085 t2 = load_reg(s, rt2);
7086 /* For AArch32, architecturally the 32-bit word at the lowest
7087 * address is always Rt and the one at addr+4 is Rt2, even if
7088 * the CPU is big-endian. Since we're going to treat this as a
7089 * single 64-bit BE store, we need to put the two halves in the
7090 * opposite order for BE to LE, so that they end up in the right
7091 * places.
7092 * We don't want gen_aa32_frob64() because that does the wrong
7093 * thing for BE32 usermode.
7095 if (s->be_data == MO_BE) {
7096 tcg_gen_concat_i32_i64(n64, t2, t1);
7097 } else {
7098 tcg_gen_concat_i32_i64(n64, t1, t2);
7100 tcg_temp_free_i32(t2);
7102 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7103 get_mem_index(s), opc);
7104 tcg_temp_free_i64(n64);
7106 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7107 tcg_gen_extrl_i64_i32(t0, o64);
7109 tcg_temp_free_i64(o64);
7110 } else {
7111 t2 = tcg_temp_new_i32();
7112 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7113 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7114 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7115 tcg_temp_free_i32(t2);
7117 tcg_temp_free_i32(t1);
7118 tcg_temp_free(taddr);
7119 tcg_gen_mov_i32(cpu_R[rd], t0);
7120 tcg_temp_free_i32(t0);
7121 tcg_gen_br(done_label);
7123 gen_set_label(fail_label);
7124 tcg_gen_movi_i32(cpu_R[rd], 1);
7125 gen_set_label(done_label);
7126 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7129 /* gen_srs:
7130 * @env: CPUARMState
7131 * @s: DisasContext
7132 * @mode: mode field from insn (which stack to store to)
7133 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7134 * @writeback: true if writeback bit set
7136 * Generate code for the SRS (Store Return State) insn.
7138 static void gen_srs(DisasContext *s,
7139 uint32_t mode, uint32_t amode, bool writeback)
7141 int32_t offset;
7142 TCGv_i32 addr, tmp;
7143 bool undef = false;
7145 /* SRS is:
7146 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7147 * and specified mode is monitor mode
7148 * - UNDEFINED in Hyp mode
7149 * - UNPREDICTABLE in User or System mode
7150 * - UNPREDICTABLE if the specified mode is:
7151 * -- not implemented
7152 * -- not a valid mode number
7153 * -- a mode that's at a higher exception level
7154 * -- Monitor, if we are Non-secure
7155 * For the UNPREDICTABLE cases we choose to UNDEF.
7157 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
7158 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
7159 return;
7162 if (s->current_el == 0 || s->current_el == 2) {
7163 undef = true;
7166 switch (mode) {
7167 case ARM_CPU_MODE_USR:
7168 case ARM_CPU_MODE_FIQ:
7169 case ARM_CPU_MODE_IRQ:
7170 case ARM_CPU_MODE_SVC:
7171 case ARM_CPU_MODE_ABT:
7172 case ARM_CPU_MODE_UND:
7173 case ARM_CPU_MODE_SYS:
7174 break;
7175 case ARM_CPU_MODE_HYP:
7176 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7177 undef = true;
7179 break;
7180 case ARM_CPU_MODE_MON:
7181 /* No need to check specifically for "are we non-secure" because
7182 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7183 * so if this isn't EL3 then we must be non-secure.
7185 if (s->current_el != 3) {
7186 undef = true;
7188 break;
7189 default:
7190 undef = true;
7193 if (undef) {
7194 unallocated_encoding(s);
7195 return;
7198 addr = tcg_temp_new_i32();
7199 tmp = tcg_const_i32(mode);
7200 /* get_r13_banked() will raise an exception if called from System mode */
7201 gen_set_condexec(s);
7202 gen_set_pc_im(s, s->pc_curr);
7203 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7204 tcg_temp_free_i32(tmp);
7205 switch (amode) {
7206 case 0: /* DA */
7207 offset = -4;
7208 break;
7209 case 1: /* IA */
7210 offset = 0;
7211 break;
7212 case 2: /* DB */
7213 offset = -8;
7214 break;
7215 case 3: /* IB */
7216 offset = 4;
7217 break;
7218 default:
7219 abort();
7221 tcg_gen_addi_i32(addr, addr, offset);
7222 tmp = load_reg(s, 14);
7223 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7224 tcg_temp_free_i32(tmp);
7225 tmp = load_cpu_field(spsr);
7226 tcg_gen_addi_i32(addr, addr, 4);
7227 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7228 tcg_temp_free_i32(tmp);
7229 if (writeback) {
7230 switch (amode) {
7231 case 0:
7232 offset = -8;
7233 break;
7234 case 1:
7235 offset = 4;
7236 break;
7237 case 2:
7238 offset = -4;
7239 break;
7240 case 3:
7241 offset = 0;
7242 break;
7243 default:
7244 abort();
7246 tcg_gen_addi_i32(addr, addr, offset);
7247 tmp = tcg_const_i32(mode);
7248 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7249 tcg_temp_free_i32(tmp);
7251 tcg_temp_free_i32(addr);
7252 s->base.is_jmp = DISAS_UPDATE;
7255 /* Generate a label used for skipping this instruction */
7256 static void arm_gen_condlabel(DisasContext *s)
7258 if (!s->condjmp) {
7259 s->condlabel = gen_new_label();
7260 s->condjmp = 1;
7264 /* Skip this instruction if the ARM condition is false */
7265 static void arm_skip_unless(DisasContext *s, uint32_t cond)
7267 arm_gen_condlabel(s);
7268 arm_gen_test_cc(cond ^ 1, s->condlabel);
7273 * Constant expanders for the decoders.
7276 static int negate(DisasContext *s, int x)
7278 return -x;
7281 static int plus_2(DisasContext *s, int x)
7283 return x + 2;
7286 static int times_2(DisasContext *s, int x)
7288 return x * 2;
7291 static int times_4(DisasContext *s, int x)
7293 return x * 4;
7296 /* Return only the rotation part of T32ExpandImm. */
7297 static int t32_expandimm_rot(DisasContext *s, int x)
7299 return x & 0xc00 ? extract32(x, 7, 5) : 0;
7302 /* Return the unrotated immediate from T32ExpandImm. */
7303 static int t32_expandimm_imm(DisasContext *s, int x)
7305 int imm = extract32(x, 0, 8);
7307 switch (extract32(x, 8, 4)) {
7308 case 0: /* XY */
7309 /* Nothing to do. */
7310 break;
7311 case 1: /* 00XY00XY */
7312 imm *= 0x00010001;
7313 break;
7314 case 2: /* XY00XY00 */
7315 imm *= 0x01000100;
7316 break;
7317 case 3: /* XYXYXYXY */
7318 imm *= 0x01010101;
7319 break;
7320 default:
7321 /* Rotated constant. */
7322 imm |= 0x80;
7323 break;
7325 return imm;
7328 static int t32_branch24(DisasContext *s, int x)
7330 /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */
7331 x ^= !(x < 0) * (3 << 21);
7332 /* Append the final zero. */
7333 return x << 1;
7336 static int t16_setflags(DisasContext *s)
7338 return s->condexec_mask == 0;
7341 static int t16_push_list(DisasContext *s, int x)
7343 return (x & 0xff) | (x & 0x100) << (14 - 8);
7346 static int t16_pop_list(DisasContext *s, int x)
7348 return (x & 0xff) | (x & 0x100) << (15 - 8);
7352 * Include the generated decoders.
7355 #include "decode-a32.inc.c"
7356 #include "decode-a32-uncond.inc.c"
7357 #include "decode-t32.inc.c"
7358 #include "decode-t16.inc.c"
7360 /* Helpers to swap operands for reverse-subtract. */
7361 static void gen_rsb(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
7363 tcg_gen_sub_i32(dst, b, a);
7366 static void gen_rsb_CC(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
7368 gen_sub_CC(dst, b, a);
7371 static void gen_rsc(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
7373 gen_sub_carry(dest, b, a);
7376 static void gen_rsc_CC(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
7378 gen_sbc_CC(dest, b, a);
7382 * Helpers for the data processing routines.
7384 * After the computation store the results back.
7385 * This may be suppressed altogether (STREG_NONE), require a runtime
7386 * check against the stack limits (STREG_SP_CHECK), or generate an
7387 * exception return. Oh, or store into a register.
7389 * Always return true, indicating success for a trans_* function.
7391 typedef enum {
7392 STREG_NONE,
7393 STREG_NORMAL,
7394 STREG_SP_CHECK,
7395 STREG_EXC_RET,
7396 } StoreRegKind;
7398 static bool store_reg_kind(DisasContext *s, int rd,
7399 TCGv_i32 val, StoreRegKind kind)
7401 switch (kind) {
7402 case STREG_NONE:
7403 tcg_temp_free_i32(val);
7404 return true;
7405 case STREG_NORMAL:
7406 /* See ALUWritePC: Interworking only from a32 mode. */
7407 if (s->thumb) {
7408 store_reg(s, rd, val);
7409 } else {
7410 store_reg_bx(s, rd, val);
7412 return true;
7413 case STREG_SP_CHECK:
7414 store_sp_checked(s, val);
7415 return true;
7416 case STREG_EXC_RET:
7417 gen_exception_return(s, val);
7418 return true;
7420 g_assert_not_reached();
7424 * Data Processing (register)
7426 * Operate, with set flags, one register source,
7427 * one immediate shifted register source, and a destination.
7429 static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a,
7430 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
7431 int logic_cc, StoreRegKind kind)
7433 TCGv_i32 tmp1, tmp2;
7435 tmp2 = load_reg(s, a->rm);
7436 gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc);
7437 tmp1 = load_reg(s, a->rn);
7439 gen(tmp1, tmp1, tmp2);
7440 tcg_temp_free_i32(tmp2);
7442 if (logic_cc) {
7443 gen_logic_CC(tmp1);
7445 return store_reg_kind(s, a->rd, tmp1, kind);
7448 static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a,
7449 void (*gen)(TCGv_i32, TCGv_i32),
7450 int logic_cc, StoreRegKind kind)
7452 TCGv_i32 tmp;
7454 tmp = load_reg(s, a->rm);
7455 gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc);
7457 gen(tmp, tmp);
7458 if (logic_cc) {
7459 gen_logic_CC(tmp);
7461 return store_reg_kind(s, a->rd, tmp, kind);
7465 * Data-processing (register-shifted register)
7467 * Operate, with set flags, one register source,
7468 * one register shifted register source, and a destination.
7470 static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a,
7471 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
7472 int logic_cc, StoreRegKind kind)
7474 TCGv_i32 tmp1, tmp2;
7476 tmp1 = load_reg(s, a->rs);
7477 tmp2 = load_reg(s, a->rm);
7478 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
7479 tmp1 = load_reg(s, a->rn);
7481 gen(tmp1, tmp1, tmp2);
7482 tcg_temp_free_i32(tmp2);
7484 if (logic_cc) {
7485 gen_logic_CC(tmp1);
7487 return store_reg_kind(s, a->rd, tmp1, kind);
7490 static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a,
7491 void (*gen)(TCGv_i32, TCGv_i32),
7492 int logic_cc, StoreRegKind kind)
7494 TCGv_i32 tmp1, tmp2;
7496 tmp1 = load_reg(s, a->rs);
7497 tmp2 = load_reg(s, a->rm);
7498 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
7500 gen(tmp2, tmp2);
7501 if (logic_cc) {
7502 gen_logic_CC(tmp2);
7504 return store_reg_kind(s, a->rd, tmp2, kind);
7508 * Data-processing (immediate)
7510 * Operate, with set flags, one register source,
7511 * one rotated immediate, and a destination.
7513 * Note that logic_cc && a->rot setting CF based on the msb of the
7514 * immediate is the reason why we must pass in the unrotated form
7515 * of the immediate.
7517 static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a,
7518 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
7519 int logic_cc, StoreRegKind kind)
7521 TCGv_i32 tmp1, tmp2;
7522 uint32_t imm;
7524 imm = ror32(a->imm, a->rot);
7525 if (logic_cc && a->rot) {
7526 tcg_gen_movi_i32(cpu_CF, imm >> 31);
7528 tmp2 = tcg_const_i32(imm);
7529 tmp1 = load_reg(s, a->rn);
7531 gen(tmp1, tmp1, tmp2);
7532 tcg_temp_free_i32(tmp2);
7534 if (logic_cc) {
7535 gen_logic_CC(tmp1);
7537 return store_reg_kind(s, a->rd, tmp1, kind);
7540 static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a,
7541 void (*gen)(TCGv_i32, TCGv_i32),
7542 int logic_cc, StoreRegKind kind)
7544 TCGv_i32 tmp;
7545 uint32_t imm;
7547 imm = ror32(a->imm, a->rot);
7548 if (logic_cc && a->rot) {
7549 tcg_gen_movi_i32(cpu_CF, imm >> 31);
7551 tmp = tcg_const_i32(imm);
7553 gen(tmp, tmp);
7554 if (logic_cc) {
7555 gen_logic_CC(tmp);
7557 return store_reg_kind(s, a->rd, tmp, kind);
7560 #define DO_ANY3(NAME, OP, L, K) \
7561 static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
7562 { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
7563 static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
7564 { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
7565 static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
7566 { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
7568 #define DO_ANY2(NAME, OP, L, K) \
7569 static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
7570 { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
7571 static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
7572 { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
7573 static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
7574 { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
7576 #define DO_CMP2(NAME, OP, L) \
7577 static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
7578 { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
7579 static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
7580 { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
7581 static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
7582 { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
7584 DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
7585 DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
7586 DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
7587 DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
7589 DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
7590 DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
7591 DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
7592 DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
7594 DO_CMP2(TST, tcg_gen_and_i32, true)
7595 DO_CMP2(TEQ, tcg_gen_xor_i32, true)
7596 DO_CMP2(CMN, gen_add_CC, false)
7597 DO_CMP2(CMP, gen_sub_CC, false)
7599 DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false,
7600 a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL)
7603 * Note for the computation of StoreRegKind we return out of the
7604 * middle of the functions that are expanded by DO_ANY3, and that
7605 * we modify a->s via that parameter before it is used by OP.
7607 DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false,
7609 StoreRegKind ret = STREG_NORMAL;
7610 if (a->rd == 15 && a->s) {
7612 * See ALUExceptionReturn:
7613 * In User mode, UNPREDICTABLE; we choose UNDEF.
7614 * In Hyp mode, UNDEFINED.
7616 if (IS_USER(s) || s->current_el == 2) {
7617 unallocated_encoding(s);
7618 return true;
7620 /* There is no writeback of nzcv to PSTATE. */
7621 a->s = 0;
7622 ret = STREG_EXC_RET;
7623 } else if (a->rd == 13 && a->rn == 13) {
7624 ret = STREG_SP_CHECK;
7626 ret;
7629 DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
7631 StoreRegKind ret = STREG_NORMAL;
7632 if (a->rd == 15 && a->s) {
7634 * See ALUExceptionReturn:
7635 * In User mode, UNPREDICTABLE; we choose UNDEF.
7636 * In Hyp mode, UNDEFINED.
7638 if (IS_USER(s) || s->current_el == 2) {
7639 unallocated_encoding(s);
7640 return true;
7642 /* There is no writeback of nzcv to PSTATE. */
7643 a->s = 0;
7644 ret = STREG_EXC_RET;
7645 } else if (a->rd == 13) {
7646 ret = STREG_SP_CHECK;
7648 ret;
7651 DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
7654 * ORN is only available with T32, so there is no register-shifted-register
7655 * form of the insn. Using the DO_ANY3 macro would create an unused function.
7657 static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a)
7659 return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
7662 static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a)
7664 return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
7667 #undef DO_ANY3
7668 #undef DO_ANY2
7669 #undef DO_CMP2
7671 static bool trans_ADR(DisasContext *s, arg_ri *a)
7673 store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm));
7674 return true;
7677 static bool trans_MOVW(DisasContext *s, arg_MOVW *a)
7679 TCGv_i32 tmp;
7681 if (!ENABLE_ARCH_6T2) {
7682 return false;
7685 tmp = tcg_const_i32(a->imm);
7686 store_reg(s, a->rd, tmp);
7687 return true;
7690 static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
7692 TCGv_i32 tmp;
7694 if (!ENABLE_ARCH_6T2) {
7695 return false;
7698 tmp = load_reg(s, a->rd);
7699 tcg_gen_ext16u_i32(tmp, tmp);
7700 tcg_gen_ori_i32(tmp, tmp, a->imm << 16);
7701 store_reg(s, a->rd, tmp);
7702 return true;
7706 * Multiply and multiply accumulate
7709 static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add)
7711 TCGv_i32 t1, t2;
7713 t1 = load_reg(s, a->rn);
7714 t2 = load_reg(s, a->rm);
7715 tcg_gen_mul_i32(t1, t1, t2);
7716 tcg_temp_free_i32(t2);
7717 if (add) {
7718 t2 = load_reg(s, a->ra);
7719 tcg_gen_add_i32(t1, t1, t2);
7720 tcg_temp_free_i32(t2);
7722 if (a->s) {
7723 gen_logic_CC(t1);
7725 store_reg(s, a->rd, t1);
7726 return true;
7729 static bool trans_MUL(DisasContext *s, arg_MUL *a)
7731 return op_mla(s, a, false);
7734 static bool trans_MLA(DisasContext *s, arg_MLA *a)
7736 return op_mla(s, a, true);
7739 static bool trans_MLS(DisasContext *s, arg_MLS *a)
7741 TCGv_i32 t1, t2;
7743 if (!ENABLE_ARCH_6T2) {
7744 return false;
7746 t1 = load_reg(s, a->rn);
7747 t2 = load_reg(s, a->rm);
7748 tcg_gen_mul_i32(t1, t1, t2);
7749 tcg_temp_free_i32(t2);
7750 t2 = load_reg(s, a->ra);
7751 tcg_gen_sub_i32(t1, t2, t1);
7752 tcg_temp_free_i32(t2);
7753 store_reg(s, a->rd, t1);
7754 return true;
7757 static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add)
7759 TCGv_i32 t0, t1, t2, t3;
7761 t0 = load_reg(s, a->rm);
7762 t1 = load_reg(s, a->rn);
7763 if (uns) {
7764 tcg_gen_mulu2_i32(t0, t1, t0, t1);
7765 } else {
7766 tcg_gen_muls2_i32(t0, t1, t0, t1);
7768 if (add) {
7769 t2 = load_reg(s, a->ra);
7770 t3 = load_reg(s, a->rd);
7771 tcg_gen_add2_i32(t0, t1, t0, t1, t2, t3);
7772 tcg_temp_free_i32(t2);
7773 tcg_temp_free_i32(t3);
7775 if (a->s) {
7776 gen_logicq_cc(t0, t1);
7778 store_reg(s, a->ra, t0);
7779 store_reg(s, a->rd, t1);
7780 return true;
7783 static bool trans_UMULL(DisasContext *s, arg_UMULL *a)
7785 return op_mlal(s, a, true, false);
7788 static bool trans_SMULL(DisasContext *s, arg_SMULL *a)
7790 return op_mlal(s, a, false, false);
7793 static bool trans_UMLAL(DisasContext *s, arg_UMLAL *a)
7795 return op_mlal(s, a, true, true);
7798 static bool trans_SMLAL(DisasContext *s, arg_SMLAL *a)
7800 return op_mlal(s, a, false, true);
7803 static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a)
7805 TCGv_i32 t0, t1, t2, zero;
7807 if (s->thumb
7808 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
7809 : !ENABLE_ARCH_6) {
7810 return false;
7813 t0 = load_reg(s, a->rm);
7814 t1 = load_reg(s, a->rn);
7815 tcg_gen_mulu2_i32(t0, t1, t0, t1);
7816 zero = tcg_const_i32(0);
7817 t2 = load_reg(s, a->ra);
7818 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
7819 tcg_temp_free_i32(t2);
7820 t2 = load_reg(s, a->rd);
7821 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
7822 tcg_temp_free_i32(t2);
7823 tcg_temp_free_i32(zero);
7824 store_reg(s, a->ra, t0);
7825 store_reg(s, a->rd, t1);
7826 return true;
7830 * Saturating addition and subtraction
7833 static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub)
7835 TCGv_i32 t0, t1;
7837 if (s->thumb
7838 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
7839 : !ENABLE_ARCH_5TE) {
7840 return false;
7843 t0 = load_reg(s, a->rm);
7844 t1 = load_reg(s, a->rn);
7845 if (doub) {
7846 gen_helper_add_saturate(t1, cpu_env, t1, t1);
7848 if (add) {
7849 gen_helper_add_saturate(t0, cpu_env, t0, t1);
7850 } else {
7851 gen_helper_sub_saturate(t0, cpu_env, t0, t1);
7853 tcg_temp_free_i32(t1);
7854 store_reg(s, a->rd, t0);
7855 return true;
7858 #define DO_QADDSUB(NAME, ADD, DOUB) \
7859 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
7861 return op_qaddsub(s, a, ADD, DOUB); \
7864 DO_QADDSUB(QADD, true, false)
7865 DO_QADDSUB(QSUB, false, false)
7866 DO_QADDSUB(QDADD, true, true)
7867 DO_QADDSUB(QDSUB, false, true)
7869 #undef DO_QADDSUB
7872 * Halfword multiply and multiply accumulate
7875 static bool op_smlaxxx(DisasContext *s, arg_rrrr *a,
7876 int add_long, bool nt, bool mt)
7878 TCGv_i32 t0, t1, tl, th;
7880 if (s->thumb
7881 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
7882 : !ENABLE_ARCH_5TE) {
7883 return false;
7886 t0 = load_reg(s, a->rn);
7887 t1 = load_reg(s, a->rm);
7888 gen_mulxy(t0, t1, nt, mt);
7889 tcg_temp_free_i32(t1);
7891 switch (add_long) {
7892 case 0:
7893 store_reg(s, a->rd, t0);
7894 break;
7895 case 1:
7896 t1 = load_reg(s, a->ra);
7897 gen_helper_add_setq(t0, cpu_env, t0, t1);
7898 tcg_temp_free_i32(t1);
7899 store_reg(s, a->rd, t0);
7900 break;
7901 case 2:
7902 tl = load_reg(s, a->ra);
7903 th = load_reg(s, a->rd);
7904 /* Sign-extend the 32-bit product to 64 bits. */
7905 t1 = tcg_temp_new_i32();
7906 tcg_gen_sari_i32(t1, t0, 31);
7907 tcg_gen_add2_i32(tl, th, tl, th, t0, t1);
7908 tcg_temp_free_i32(t0);
7909 tcg_temp_free_i32(t1);
7910 store_reg(s, a->ra, tl);
7911 store_reg(s, a->rd, th);
7912 break;
7913 default:
7914 g_assert_not_reached();
7916 return true;
7919 #define DO_SMLAX(NAME, add, nt, mt) \
7920 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
7922 return op_smlaxxx(s, a, add, nt, mt); \
7925 DO_SMLAX(SMULBB, 0, 0, 0)
7926 DO_SMLAX(SMULBT, 0, 0, 1)
7927 DO_SMLAX(SMULTB, 0, 1, 0)
7928 DO_SMLAX(SMULTT, 0, 1, 1)
7930 DO_SMLAX(SMLABB, 1, 0, 0)
7931 DO_SMLAX(SMLABT, 1, 0, 1)
7932 DO_SMLAX(SMLATB, 1, 1, 0)
7933 DO_SMLAX(SMLATT, 1, 1, 1)
7935 DO_SMLAX(SMLALBB, 2, 0, 0)
7936 DO_SMLAX(SMLALBT, 2, 0, 1)
7937 DO_SMLAX(SMLALTB, 2, 1, 0)
7938 DO_SMLAX(SMLALTT, 2, 1, 1)
7940 #undef DO_SMLAX
7942 static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt)
7944 TCGv_i32 t0, t1;
7946 if (!ENABLE_ARCH_5TE) {
7947 return false;
7950 t0 = load_reg(s, a->rn);
7951 t1 = load_reg(s, a->rm);
7953 * Since the nominal result is product<47:16>, shift the 16-bit
7954 * input up by 16 bits, so that the result is at product<63:32>.
7956 if (mt) {
7957 tcg_gen_andi_i32(t1, t1, 0xffff0000);
7958 } else {
7959 tcg_gen_shli_i32(t1, t1, 16);
7961 tcg_gen_muls2_i32(t0, t1, t0, t1);
7962 tcg_temp_free_i32(t0);
7963 if (add) {
7964 t0 = load_reg(s, a->ra);
7965 gen_helper_add_setq(t1, cpu_env, t1, t0);
7966 tcg_temp_free_i32(t0);
7968 store_reg(s, a->rd, t1);
7969 return true;
7972 #define DO_SMLAWX(NAME, add, mt) \
7973 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
7975 return op_smlawx(s, a, add, mt); \
7978 DO_SMLAWX(SMULWB, 0, 0)
7979 DO_SMLAWX(SMULWT, 0, 1)
7980 DO_SMLAWX(SMLAWB, 1, 0)
7981 DO_SMLAWX(SMLAWT, 1, 1)
7983 #undef DO_SMLAWX
7986 * MSR (immediate) and hints
7989 static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
7992 * When running single-threaded TCG code, use the helper to ensure that
7993 * the next round-robin scheduled vCPU gets a crack. When running in
7994 * MTTCG we don't generate jumps to the helper as it won't affect the
7995 * scheduling of other vCPUs.
7997 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
7998 gen_set_pc_im(s, s->base.pc_next);
7999 s->base.is_jmp = DISAS_YIELD;
8001 return true;
8004 static bool trans_WFE(DisasContext *s, arg_WFE *a)
8007 * When running single-threaded TCG code, use the helper to ensure that
8008 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
8009 * just skip this instruction. Currently the SEV/SEVL instructions,
8010 * which are *one* of many ways to wake the CPU from WFE, are not
8011 * implemented so we can't sleep like WFI does.
8013 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
8014 gen_set_pc_im(s, s->base.pc_next);
8015 s->base.is_jmp = DISAS_WFE;
8017 return true;
8020 static bool trans_WFI(DisasContext *s, arg_WFI *a)
8022 /* For WFI, halt the vCPU until an IRQ. */
8023 gen_set_pc_im(s, s->base.pc_next);
8024 s->base.is_jmp = DISAS_WFI;
8025 return true;
8028 static bool trans_NOP(DisasContext *s, arg_NOP *a)
8030 return true;
8033 static bool trans_MSR_imm(DisasContext *s, arg_MSR_imm *a)
8035 uint32_t val = ror32(a->imm, a->rot * 2);
8036 uint32_t mask = msr_mask(s, a->mask, a->r);
8038 if (gen_set_psr_im(s, mask, a->r, val)) {
8039 unallocated_encoding(s);
8041 return true;
8045 * Cyclic Redundancy Check
8048 static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz)
8050 TCGv_i32 t1, t2, t3;
8052 if (!dc_isar_feature(aa32_crc32, s)) {
8053 return false;
8056 t1 = load_reg(s, a->rn);
8057 t2 = load_reg(s, a->rm);
8058 switch (sz) {
8059 case MO_8:
8060 gen_uxtb(t2);
8061 break;
8062 case MO_16:
8063 gen_uxth(t2);
8064 break;
8065 case MO_32:
8066 break;
8067 default:
8068 g_assert_not_reached();
8070 t3 = tcg_const_i32(1 << sz);
8071 if (c) {
8072 gen_helper_crc32c(t1, t1, t2, t3);
8073 } else {
8074 gen_helper_crc32(t1, t1, t2, t3);
8076 tcg_temp_free_i32(t2);
8077 tcg_temp_free_i32(t3);
8078 store_reg(s, a->rd, t1);
8079 return true;
8082 #define DO_CRC32(NAME, c, sz) \
8083 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
8084 { return op_crc32(s, a, c, sz); }
8086 DO_CRC32(CRC32B, false, MO_8)
8087 DO_CRC32(CRC32H, false, MO_16)
8088 DO_CRC32(CRC32W, false, MO_32)
8089 DO_CRC32(CRC32CB, true, MO_8)
8090 DO_CRC32(CRC32CH, true, MO_16)
8091 DO_CRC32(CRC32CW, true, MO_32)
8093 #undef DO_CRC32
8096 * Miscellaneous instructions
8099 static bool trans_MRS_bank(DisasContext *s, arg_MRS_bank *a)
8101 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8102 return false;
8104 gen_mrs_banked(s, a->r, a->sysm, a->rd);
8105 return true;
8108 static bool trans_MSR_bank(DisasContext *s, arg_MSR_bank *a)
8110 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8111 return false;
8113 gen_msr_banked(s, a->r, a->sysm, a->rn);
8114 return true;
8117 static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a)
8119 TCGv_i32 tmp;
8121 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8122 return false;
8124 if (a->r) {
8125 if (IS_USER(s)) {
8126 unallocated_encoding(s);
8127 return true;
8129 tmp = load_cpu_field(spsr);
8130 } else {
8131 tmp = tcg_temp_new_i32();
8132 gen_helper_cpsr_read(tmp, cpu_env);
8134 store_reg(s, a->rd, tmp);
8135 return true;
8138 static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a)
8140 TCGv_i32 tmp;
8141 uint32_t mask = msr_mask(s, a->mask, a->r);
8143 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8144 return false;
8146 tmp = load_reg(s, a->rn);
8147 if (gen_set_psr(s, mask, a->r, tmp)) {
8148 unallocated_encoding(s);
8150 return true;
8153 static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
8155 TCGv_i32 tmp;
8157 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
8158 return false;
8160 tmp = tcg_const_i32(a->sysm);
8161 gen_helper_v7m_mrs(tmp, cpu_env, tmp);
8162 store_reg(s, a->rd, tmp);
8163 return true;
8166 static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
8168 TCGv_i32 addr, reg;
8170 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
8171 return false;
8173 addr = tcg_const_i32((a->mask << 10) | a->sysm);
8174 reg = load_reg(s, a->rn);
8175 gen_helper_v7m_msr(cpu_env, addr, reg);
8176 tcg_temp_free_i32(addr);
8177 tcg_temp_free_i32(reg);
8178 /* If we wrote to CONTROL, the EL might have changed */
8179 gen_helper_rebuild_hflags_m32_newel(cpu_env);
8180 gen_lookup_tb(s);
8181 return true;
8184 static bool trans_BX(DisasContext *s, arg_BX *a)
8186 if (!ENABLE_ARCH_4T) {
8187 return false;
8189 gen_bx_excret(s, load_reg(s, a->rm));
8190 return true;
8193 static bool trans_BXJ(DisasContext *s, arg_BXJ *a)
8195 if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) {
8196 return false;
8198 /* Trivial implementation equivalent to bx. */
8199 gen_bx(s, load_reg(s, a->rm));
8200 return true;
8203 static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a)
8205 TCGv_i32 tmp;
8207 if (!ENABLE_ARCH_5) {
8208 return false;
8210 tmp = load_reg(s, a->rm);
8211 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
8212 gen_bx(s, tmp);
8213 return true;
8217 * BXNS/BLXNS: only exist for v8M with the security extensions,
8218 * and always UNDEF if NonSecure. We don't implement these in
8219 * the user-only mode either (in theory you can use them from
8220 * Secure User mode but they are too tied in to system emulation).
8222 static bool trans_BXNS(DisasContext *s, arg_BXNS *a)
8224 if (!s->v8m_secure || IS_USER_ONLY) {
8225 unallocated_encoding(s);
8226 } else {
8227 gen_bxns(s, a->rm);
8229 return true;
8232 static bool trans_BLXNS(DisasContext *s, arg_BLXNS *a)
8234 if (!s->v8m_secure || IS_USER_ONLY) {
8235 unallocated_encoding(s);
8236 } else {
8237 gen_blxns(s, a->rm);
8239 return true;
8242 static bool trans_CLZ(DisasContext *s, arg_CLZ *a)
8244 TCGv_i32 tmp;
8246 if (!ENABLE_ARCH_5) {
8247 return false;
8249 tmp = load_reg(s, a->rm);
8250 tcg_gen_clzi_i32(tmp, tmp, 32);
8251 store_reg(s, a->rd, tmp);
8252 return true;
8255 static bool trans_ERET(DisasContext *s, arg_ERET *a)
8257 TCGv_i32 tmp;
8259 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8260 return false;
8262 if (IS_USER(s)) {
8263 unallocated_encoding(s);
8264 return true;
8266 if (s->current_el == 2) {
8267 /* ERET from Hyp uses ELR_Hyp, not LR */
8268 tmp = load_cpu_field(elr_el[2]);
8269 } else {
8270 tmp = load_reg(s, 14);
8272 gen_exception_return(s, tmp);
8273 return true;
8276 static bool trans_HLT(DisasContext *s, arg_HLT *a)
8278 gen_hlt(s, a->imm);
8279 return true;
8282 static bool trans_BKPT(DisasContext *s, arg_BKPT *a)
8284 if (!ENABLE_ARCH_5) {
8285 return false;
8287 if (arm_dc_feature(s, ARM_FEATURE_M) &&
8288 semihosting_enabled() &&
8289 #ifndef CONFIG_USER_ONLY
8290 !IS_USER(s) &&
8291 #endif
8292 (a->imm == 0xab)) {
8293 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
8294 } else {
8295 gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false));
8297 return true;
8300 static bool trans_HVC(DisasContext *s, arg_HVC *a)
8302 if (!ENABLE_ARCH_7 || arm_dc_feature(s, ARM_FEATURE_M)) {
8303 return false;
8305 if (IS_USER(s)) {
8306 unallocated_encoding(s);
8307 } else {
8308 gen_hvc(s, a->imm);
8310 return true;
8313 static bool trans_SMC(DisasContext *s, arg_SMC *a)
8315 if (!ENABLE_ARCH_6K || arm_dc_feature(s, ARM_FEATURE_M)) {
8316 return false;
8318 if (IS_USER(s)) {
8319 unallocated_encoding(s);
8320 } else {
8321 gen_smc(s);
8323 return true;
8326 static bool trans_SG(DisasContext *s, arg_SG *a)
8328 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
8329 !arm_dc_feature(s, ARM_FEATURE_V8)) {
8330 return false;
8333 * SG (v8M only)
8334 * The bulk of the behaviour for this instruction is implemented
8335 * in v7m_handle_execute_nsc(), which deals with the insn when
8336 * it is executed by a CPU in non-secure state from memory
8337 * which is Secure & NonSecure-Callable.
8338 * Here we only need to handle the remaining cases:
8339 * * in NS memory (including the "security extension not
8340 * implemented" case) : NOP
8341 * * in S memory but CPU already secure (clear IT bits)
8342 * We know that the attribute for the memory this insn is
8343 * in must match the current CPU state, because otherwise
8344 * get_phys_addr_pmsav8 would have generated an exception.
8346 if (s->v8m_secure) {
8347 /* Like the IT insn, we don't need to generate any code */
8348 s->condexec_cond = 0;
8349 s->condexec_mask = 0;
8351 return true;
8354 static bool trans_TT(DisasContext *s, arg_TT *a)
8356 TCGv_i32 addr, tmp;
8358 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
8359 !arm_dc_feature(s, ARM_FEATURE_V8)) {
8360 return false;
8362 if (a->rd == 13 || a->rd == 15 || a->rn == 15) {
8363 /* We UNDEF for these UNPREDICTABLE cases */
8364 unallocated_encoding(s);
8365 return true;
8367 if (a->A && !s->v8m_secure) {
8368 /* This case is UNDEFINED. */
8369 unallocated_encoding(s);
8370 return true;
8373 addr = load_reg(s, a->rn);
8374 tmp = tcg_const_i32((a->A << 1) | a->T);
8375 gen_helper_v7m_tt(tmp, cpu_env, addr, tmp);
8376 tcg_temp_free_i32(addr);
8377 store_reg(s, a->rd, tmp);
8378 return true;
8382 * Load/store register index
8385 static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w)
8387 ISSInfo ret;
8389 /* ISS not valid if writeback */
8390 if (p && !w) {
8391 ret = rd;
8392 if (s->base.pc_next - s->pc_curr == 2) {
8393 ret |= ISSIs16Bit;
8395 } else {
8396 ret = ISSInvalid;
8398 return ret;
8401 static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
8403 TCGv_i32 addr = load_reg(s, a->rn);
8405 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
8406 gen_helper_v8m_stackcheck(cpu_env, addr);
8409 if (a->p) {
8410 TCGv_i32 ofs = load_reg(s, a->rm);
8411 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
8412 if (a->u) {
8413 tcg_gen_add_i32(addr, addr, ofs);
8414 } else {
8415 tcg_gen_sub_i32(addr, addr, ofs);
8417 tcg_temp_free_i32(ofs);
8419 return addr;
8422 static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
8423 TCGv_i32 addr, int address_offset)
8425 if (!a->p) {
8426 TCGv_i32 ofs = load_reg(s, a->rm);
8427 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
8428 if (a->u) {
8429 tcg_gen_add_i32(addr, addr, ofs);
8430 } else {
8431 tcg_gen_sub_i32(addr, addr, ofs);
8433 tcg_temp_free_i32(ofs);
8434 } else if (!a->w) {
8435 tcg_temp_free_i32(addr);
8436 return;
8438 tcg_gen_addi_i32(addr, addr, address_offset);
8439 store_reg(s, a->rn, addr);
8442 static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
8443 MemOp mop, int mem_idx)
8445 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
8446 TCGv_i32 addr, tmp;
8448 addr = op_addr_rr_pre(s, a);
8450 tmp = tcg_temp_new_i32();
8451 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
8452 disas_set_da_iss(s, mop, issinfo);
8455 * Perform base writeback before the loaded value to
8456 * ensure correct behavior with overlapping index registers.
8458 op_addr_rr_post(s, a, addr, 0);
8459 store_reg_from_load(s, a->rt, tmp);
8460 return true;
8463 static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
8464 MemOp mop, int mem_idx)
8466 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
8467 TCGv_i32 addr, tmp;
8469 addr = op_addr_rr_pre(s, a);
8471 tmp = load_reg(s, a->rt);
8472 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
8473 disas_set_da_iss(s, mop, issinfo);
8474 tcg_temp_free_i32(tmp);
8476 op_addr_rr_post(s, a, addr, 0);
8477 return true;
8480 static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
8482 int mem_idx = get_mem_index(s);
8483 TCGv_i32 addr, tmp;
8485 if (!ENABLE_ARCH_5TE) {
8486 return false;
8488 if (a->rt & 1) {
8489 unallocated_encoding(s);
8490 return true;
8492 addr = op_addr_rr_pre(s, a);
8494 tmp = tcg_temp_new_i32();
8495 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8496 store_reg(s, a->rt, tmp);
8498 tcg_gen_addi_i32(addr, addr, 4);
8500 tmp = tcg_temp_new_i32();
8501 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8502 store_reg(s, a->rt + 1, tmp);
8504 /* LDRD w/ base writeback is undefined if the registers overlap. */
8505 op_addr_rr_post(s, a, addr, -4);
8506 return true;
8509 static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
8511 int mem_idx = get_mem_index(s);
8512 TCGv_i32 addr, tmp;
8514 if (!ENABLE_ARCH_5TE) {
8515 return false;
8517 if (a->rt & 1) {
8518 unallocated_encoding(s);
8519 return true;
8521 addr = op_addr_rr_pre(s, a);
8523 tmp = load_reg(s, a->rt);
8524 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8525 tcg_temp_free_i32(tmp);
8527 tcg_gen_addi_i32(addr, addr, 4);
8529 tmp = load_reg(s, a->rt + 1);
8530 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8531 tcg_temp_free_i32(tmp);
8533 op_addr_rr_post(s, a, addr, -4);
8534 return true;
8538 * Load/store immediate index
8541 static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
8543 int ofs = a->imm;
8545 if (!a->u) {
8546 ofs = -ofs;
8549 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
8551 * Stackcheck. Here we know 'addr' is the current SP;
8552 * U is set if we're moving SP up, else down. It is
8553 * UNKNOWN whether the limit check triggers when SP starts
8554 * below the limit and ends up above it; we chose to do so.
8556 if (!a->u) {
8557 TCGv_i32 newsp = tcg_temp_new_i32();
8558 tcg_gen_addi_i32(newsp, cpu_R[13], ofs);
8559 gen_helper_v8m_stackcheck(cpu_env, newsp);
8560 tcg_temp_free_i32(newsp);
8561 } else {
8562 gen_helper_v8m_stackcheck(cpu_env, cpu_R[13]);
8566 return add_reg_for_lit(s, a->rn, a->p ? ofs : 0);
8569 static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a,
8570 TCGv_i32 addr, int address_offset)
8572 if (!a->p) {
8573 if (a->u) {
8574 address_offset += a->imm;
8575 } else {
8576 address_offset -= a->imm;
8578 } else if (!a->w) {
8579 tcg_temp_free_i32(addr);
8580 return;
8582 tcg_gen_addi_i32(addr, addr, address_offset);
8583 store_reg(s, a->rn, addr);
8586 static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
8587 MemOp mop, int mem_idx)
8589 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
8590 TCGv_i32 addr, tmp;
8592 addr = op_addr_ri_pre(s, a);
8594 tmp = tcg_temp_new_i32();
8595 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
8596 disas_set_da_iss(s, mop, issinfo);
8599 * Perform base writeback before the loaded value to
8600 * ensure correct behavior with overlapping index registers.
8602 op_addr_ri_post(s, a, addr, 0);
8603 store_reg_from_load(s, a->rt, tmp);
8604 return true;
8607 static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
8608 MemOp mop, int mem_idx)
8610 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
8611 TCGv_i32 addr, tmp;
8613 addr = op_addr_ri_pre(s, a);
8615 tmp = load_reg(s, a->rt);
8616 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
8617 disas_set_da_iss(s, mop, issinfo);
8618 tcg_temp_free_i32(tmp);
8620 op_addr_ri_post(s, a, addr, 0);
8621 return true;
8624 static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
8626 int mem_idx = get_mem_index(s);
8627 TCGv_i32 addr, tmp;
8629 addr = op_addr_ri_pre(s, a);
8631 tmp = tcg_temp_new_i32();
8632 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8633 store_reg(s, a->rt, tmp);
8635 tcg_gen_addi_i32(addr, addr, 4);
8637 tmp = tcg_temp_new_i32();
8638 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8639 store_reg(s, rt2, tmp);
8641 /* LDRD w/ base writeback is undefined if the registers overlap. */
8642 op_addr_ri_post(s, a, addr, -4);
8643 return true;
8646 static bool trans_LDRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
8648 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
8649 return false;
8651 return op_ldrd_ri(s, a, a->rt + 1);
8654 static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
8656 arg_ldst_ri b = {
8657 .u = a->u, .w = a->w, .p = a->p,
8658 .rn = a->rn, .rt = a->rt, .imm = a->imm
8660 return op_ldrd_ri(s, &b, a->rt2);
8663 static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
8665 int mem_idx = get_mem_index(s);
8666 TCGv_i32 addr, tmp;
8668 addr = op_addr_ri_pre(s, a);
8670 tmp = load_reg(s, a->rt);
8671 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8672 tcg_temp_free_i32(tmp);
8674 tcg_gen_addi_i32(addr, addr, 4);
8676 tmp = load_reg(s, rt2);
8677 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8678 tcg_temp_free_i32(tmp);
8680 op_addr_ri_post(s, a, addr, -4);
8681 return true;
8684 static bool trans_STRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
8686 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
8687 return false;
8689 return op_strd_ri(s, a, a->rt + 1);
8692 static bool trans_STRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
8694 arg_ldst_ri b = {
8695 .u = a->u, .w = a->w, .p = a->p,
8696 .rn = a->rn, .rt = a->rt, .imm = a->imm
8698 return op_strd_ri(s, &b, a->rt2);
8701 #define DO_LDST(NAME, WHICH, MEMOP) \
8702 static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \
8704 return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \
8706 static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \
8708 return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \
8710 static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \
8712 return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \
8714 static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \
8716 return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \
8719 DO_LDST(LDR, load, MO_UL)
8720 DO_LDST(LDRB, load, MO_UB)
8721 DO_LDST(LDRH, load, MO_UW)
8722 DO_LDST(LDRSB, load, MO_SB)
8723 DO_LDST(LDRSH, load, MO_SW)
8725 DO_LDST(STR, store, MO_UL)
8726 DO_LDST(STRB, store, MO_UB)
8727 DO_LDST(STRH, store, MO_UW)
8729 #undef DO_LDST
8732 * Synchronization primitives
8735 static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc)
8737 TCGv_i32 addr, tmp;
8738 TCGv taddr;
8740 opc |= s->be_data;
8741 addr = load_reg(s, a->rn);
8742 taddr = gen_aa32_addr(s, addr, opc);
8743 tcg_temp_free_i32(addr);
8745 tmp = load_reg(s, a->rt2);
8746 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp, get_mem_index(s), opc);
8747 tcg_temp_free(taddr);
8749 store_reg(s, a->rt, tmp);
8750 return true;
8753 static bool trans_SWP(DisasContext *s, arg_SWP *a)
8755 return op_swp(s, a, MO_UL | MO_ALIGN);
8758 static bool trans_SWPB(DisasContext *s, arg_SWP *a)
8760 return op_swp(s, a, MO_UB);
8764 * Load/Store Exclusive and Load-Acquire/Store-Release
8767 static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel)
8769 TCGv_i32 addr;
8770 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
8771 bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
8773 /* We UNDEF for these UNPREDICTABLE cases. */
8774 if (a->rd == 15 || a->rn == 15 || a->rt == 15
8775 || a->rd == a->rn || a->rd == a->rt
8776 || (!v8a && s->thumb && (a->rd == 13 || a->rt == 13))
8777 || (mop == MO_64
8778 && (a->rt2 == 15
8779 || a->rd == a->rt2
8780 || (!v8a && s->thumb && a->rt2 == 13)))) {
8781 unallocated_encoding(s);
8782 return true;
8785 if (rel) {
8786 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8789 addr = tcg_temp_local_new_i32();
8790 load_reg_var(s, addr, a->rn);
8791 tcg_gen_addi_i32(addr, addr, a->imm);
8793 gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop);
8794 tcg_temp_free_i32(addr);
8795 return true;
8798 static bool trans_STREX(DisasContext *s, arg_STREX *a)
8800 if (!ENABLE_ARCH_6) {
8801 return false;
8803 return op_strex(s, a, MO_32, false);
8806 static bool trans_STREXD_a32(DisasContext *s, arg_STREX *a)
8808 if (!ENABLE_ARCH_6K) {
8809 return false;
8811 /* We UNDEF for these UNPREDICTABLE cases. */
8812 if (a->rt & 1) {
8813 unallocated_encoding(s);
8814 return true;
8816 a->rt2 = a->rt + 1;
8817 return op_strex(s, a, MO_64, false);
8820 static bool trans_STREXD_t32(DisasContext *s, arg_STREX *a)
8822 return op_strex(s, a, MO_64, false);
8825 static bool trans_STREXB(DisasContext *s, arg_STREX *a)
8827 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
8828 return false;
8830 return op_strex(s, a, MO_8, false);
8833 static bool trans_STREXH(DisasContext *s, arg_STREX *a)
8835 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
8836 return false;
8838 return op_strex(s, a, MO_16, false);
8841 static bool trans_STLEX(DisasContext *s, arg_STREX *a)
8843 if (!ENABLE_ARCH_8) {
8844 return false;
8846 return op_strex(s, a, MO_32, true);
8849 static bool trans_STLEXD_a32(DisasContext *s, arg_STREX *a)
8851 if (!ENABLE_ARCH_8) {
8852 return false;
8854 /* We UNDEF for these UNPREDICTABLE cases. */
8855 if (a->rt & 1) {
8856 unallocated_encoding(s);
8857 return true;
8859 a->rt2 = a->rt + 1;
8860 return op_strex(s, a, MO_64, true);
8863 static bool trans_STLEXD_t32(DisasContext *s, arg_STREX *a)
8865 if (!ENABLE_ARCH_8) {
8866 return false;
8868 return op_strex(s, a, MO_64, true);
8871 static bool trans_STLEXB(DisasContext *s, arg_STREX *a)
8873 if (!ENABLE_ARCH_8) {
8874 return false;
8876 return op_strex(s, a, MO_8, true);
8879 static bool trans_STLEXH(DisasContext *s, arg_STREX *a)
8881 if (!ENABLE_ARCH_8) {
8882 return false;
8884 return op_strex(s, a, MO_16, true);
8887 static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
8889 TCGv_i32 addr, tmp;
8891 if (!ENABLE_ARCH_8) {
8892 return false;
8894 /* We UNDEF for these UNPREDICTABLE cases. */
8895 if (a->rn == 15 || a->rt == 15) {
8896 unallocated_encoding(s);
8897 return true;
8900 addr = load_reg(s, a->rn);
8901 tmp = load_reg(s, a->rt);
8902 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8903 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
8904 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
8906 tcg_temp_free_i32(tmp);
8907 tcg_temp_free_i32(addr);
8908 return true;
8911 static bool trans_STL(DisasContext *s, arg_STL *a)
8913 return op_stl(s, a, MO_UL);
8916 static bool trans_STLB(DisasContext *s, arg_STL *a)
8918 return op_stl(s, a, MO_UB);
8921 static bool trans_STLH(DisasContext *s, arg_STL *a)
8923 return op_stl(s, a, MO_UW);
8926 static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq)
8928 TCGv_i32 addr;
8929 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
8930 bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
8932 /* We UNDEF for these UNPREDICTABLE cases. */
8933 if (a->rn == 15 || a->rt == 15
8934 || (!v8a && s->thumb && a->rt == 13)
8935 || (mop == MO_64
8936 && (a->rt2 == 15 || a->rt == a->rt2
8937 || (!v8a && s->thumb && a->rt2 == 13)))) {
8938 unallocated_encoding(s);
8939 return true;
8942 addr = tcg_temp_local_new_i32();
8943 load_reg_var(s, addr, a->rn);
8944 tcg_gen_addi_i32(addr, addr, a->imm);
8946 gen_load_exclusive(s, a->rt, a->rt2, addr, mop);
8947 tcg_temp_free_i32(addr);
8949 if (acq) {
8950 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
8952 return true;
8955 static bool trans_LDREX(DisasContext *s, arg_LDREX *a)
8957 if (!ENABLE_ARCH_6) {
8958 return false;
8960 return op_ldrex(s, a, MO_32, false);
8963 static bool trans_LDREXD_a32(DisasContext *s, arg_LDREX *a)
8965 if (!ENABLE_ARCH_6K) {
8966 return false;
8968 /* We UNDEF for these UNPREDICTABLE cases. */
8969 if (a->rt & 1) {
8970 unallocated_encoding(s);
8971 return true;
8973 a->rt2 = a->rt + 1;
8974 return op_ldrex(s, a, MO_64, false);
8977 static bool trans_LDREXD_t32(DisasContext *s, arg_LDREX *a)
8979 return op_ldrex(s, a, MO_64, false);
8982 static bool trans_LDREXB(DisasContext *s, arg_LDREX *a)
8984 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
8985 return false;
8987 return op_ldrex(s, a, MO_8, false);
8990 static bool trans_LDREXH(DisasContext *s, arg_LDREX *a)
8992 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
8993 return false;
8995 return op_ldrex(s, a, MO_16, false);
8998 static bool trans_LDAEX(DisasContext *s, arg_LDREX *a)
9000 if (!ENABLE_ARCH_8) {
9001 return false;
9003 return op_ldrex(s, a, MO_32, true);
9006 static bool trans_LDAEXD_a32(DisasContext *s, arg_LDREX *a)
9008 if (!ENABLE_ARCH_8) {
9009 return false;
9011 /* We UNDEF for these UNPREDICTABLE cases. */
9012 if (a->rt & 1) {
9013 unallocated_encoding(s);
9014 return true;
9016 a->rt2 = a->rt + 1;
9017 return op_ldrex(s, a, MO_64, true);
9020 static bool trans_LDAEXD_t32(DisasContext *s, arg_LDREX *a)
9022 if (!ENABLE_ARCH_8) {
9023 return false;
9025 return op_ldrex(s, a, MO_64, true);
9028 static bool trans_LDAEXB(DisasContext *s, arg_LDREX *a)
9030 if (!ENABLE_ARCH_8) {
9031 return false;
9033 return op_ldrex(s, a, MO_8, true);
9036 static bool trans_LDAEXH(DisasContext *s, arg_LDREX *a)
9038 if (!ENABLE_ARCH_8) {
9039 return false;
9041 return op_ldrex(s, a, MO_16, true);
9044 static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
9046 TCGv_i32 addr, tmp;
9048 if (!ENABLE_ARCH_8) {
9049 return false;
9051 /* We UNDEF for these UNPREDICTABLE cases. */
9052 if (a->rn == 15 || a->rt == 15) {
9053 unallocated_encoding(s);
9054 return true;
9057 addr = load_reg(s, a->rn);
9058 tmp = tcg_temp_new_i32();
9059 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
9060 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
9061 tcg_temp_free_i32(addr);
9063 store_reg(s, a->rt, tmp);
9064 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9065 return true;
9068 static bool trans_LDA(DisasContext *s, arg_LDA *a)
9070 return op_lda(s, a, MO_UL);
9073 static bool trans_LDAB(DisasContext *s, arg_LDA *a)
9075 return op_lda(s, a, MO_UB);
9078 static bool trans_LDAH(DisasContext *s, arg_LDA *a)
9080 return op_lda(s, a, MO_UW);
9084 * Media instructions
9087 static bool trans_USADA8(DisasContext *s, arg_USADA8 *a)
9089 TCGv_i32 t1, t2;
9091 if (!ENABLE_ARCH_6) {
9092 return false;
9095 t1 = load_reg(s, a->rn);
9096 t2 = load_reg(s, a->rm);
9097 gen_helper_usad8(t1, t1, t2);
9098 tcg_temp_free_i32(t2);
9099 if (a->ra != 15) {
9100 t2 = load_reg(s, a->ra);
9101 tcg_gen_add_i32(t1, t1, t2);
9102 tcg_temp_free_i32(t2);
9104 store_reg(s, a->rd, t1);
9105 return true;
9108 static bool op_bfx(DisasContext *s, arg_UBFX *a, bool u)
9110 TCGv_i32 tmp;
9111 int width = a->widthm1 + 1;
9112 int shift = a->lsb;
9114 if (!ENABLE_ARCH_6T2) {
9115 return false;
9117 if (shift + width > 32) {
9118 /* UNPREDICTABLE; we choose to UNDEF */
9119 unallocated_encoding(s);
9120 return true;
9123 tmp = load_reg(s, a->rn);
9124 if (u) {
9125 tcg_gen_extract_i32(tmp, tmp, shift, width);
9126 } else {
9127 tcg_gen_sextract_i32(tmp, tmp, shift, width);
9129 store_reg(s, a->rd, tmp);
9130 return true;
9133 static bool trans_SBFX(DisasContext *s, arg_SBFX *a)
9135 return op_bfx(s, a, false);
9138 static bool trans_UBFX(DisasContext *s, arg_UBFX *a)
9140 return op_bfx(s, a, true);
9143 static bool trans_BFCI(DisasContext *s, arg_BFCI *a)
9145 TCGv_i32 tmp;
9146 int msb = a->msb, lsb = a->lsb;
9147 int width;
9149 if (!ENABLE_ARCH_6T2) {
9150 return false;
9152 if (msb < lsb) {
9153 /* UNPREDICTABLE; we choose to UNDEF */
9154 unallocated_encoding(s);
9155 return true;
9158 width = msb + 1 - lsb;
9159 if (a->rn == 15) {
9160 /* BFC */
9161 tmp = tcg_const_i32(0);
9162 } else {
9163 /* BFI */
9164 tmp = load_reg(s, a->rn);
9166 if (width != 32) {
9167 TCGv_i32 tmp2 = load_reg(s, a->rd);
9168 tcg_gen_deposit_i32(tmp, tmp2, tmp, lsb, width);
9169 tcg_temp_free_i32(tmp2);
9171 store_reg(s, a->rd, tmp);
9172 return true;
9175 static bool trans_UDF(DisasContext *s, arg_UDF *a)
9177 unallocated_encoding(s);
9178 return true;
9182 * Parallel addition and subtraction
9185 static bool op_par_addsub(DisasContext *s, arg_rrr *a,
9186 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
9188 TCGv_i32 t0, t1;
9190 if (s->thumb
9191 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9192 : !ENABLE_ARCH_6) {
9193 return false;
9196 t0 = load_reg(s, a->rn);
9197 t1 = load_reg(s, a->rm);
9199 gen(t0, t0, t1);
9201 tcg_temp_free_i32(t1);
9202 store_reg(s, a->rd, t0);
9203 return true;
9206 static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a,
9207 void (*gen)(TCGv_i32, TCGv_i32,
9208 TCGv_i32, TCGv_ptr))
9210 TCGv_i32 t0, t1;
9211 TCGv_ptr ge;
9213 if (s->thumb
9214 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9215 : !ENABLE_ARCH_6) {
9216 return false;
9219 t0 = load_reg(s, a->rn);
9220 t1 = load_reg(s, a->rm);
9222 ge = tcg_temp_new_ptr();
9223 tcg_gen_addi_ptr(ge, cpu_env, offsetof(CPUARMState, GE));
9224 gen(t0, t0, t1, ge);
9226 tcg_temp_free_ptr(ge);
9227 tcg_temp_free_i32(t1);
9228 store_reg(s, a->rd, t0);
9229 return true;
9232 #define DO_PAR_ADDSUB(NAME, helper) \
9233 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
9235 return op_par_addsub(s, a, helper); \
9238 #define DO_PAR_ADDSUB_GE(NAME, helper) \
9239 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
9241 return op_par_addsub_ge(s, a, helper); \
9244 DO_PAR_ADDSUB_GE(SADD16, gen_helper_sadd16)
9245 DO_PAR_ADDSUB_GE(SASX, gen_helper_saddsubx)
9246 DO_PAR_ADDSUB_GE(SSAX, gen_helper_ssubaddx)
9247 DO_PAR_ADDSUB_GE(SSUB16, gen_helper_ssub16)
9248 DO_PAR_ADDSUB_GE(SADD8, gen_helper_sadd8)
9249 DO_PAR_ADDSUB_GE(SSUB8, gen_helper_ssub8)
9251 DO_PAR_ADDSUB_GE(UADD16, gen_helper_uadd16)
9252 DO_PAR_ADDSUB_GE(UASX, gen_helper_uaddsubx)
9253 DO_PAR_ADDSUB_GE(USAX, gen_helper_usubaddx)
9254 DO_PAR_ADDSUB_GE(USUB16, gen_helper_usub16)
9255 DO_PAR_ADDSUB_GE(UADD8, gen_helper_uadd8)
9256 DO_PAR_ADDSUB_GE(USUB8, gen_helper_usub8)
9258 DO_PAR_ADDSUB(QADD16, gen_helper_qadd16)
9259 DO_PAR_ADDSUB(QASX, gen_helper_qaddsubx)
9260 DO_PAR_ADDSUB(QSAX, gen_helper_qsubaddx)
9261 DO_PAR_ADDSUB(QSUB16, gen_helper_qsub16)
9262 DO_PAR_ADDSUB(QADD8, gen_helper_qadd8)
9263 DO_PAR_ADDSUB(QSUB8, gen_helper_qsub8)
9265 DO_PAR_ADDSUB(UQADD16, gen_helper_uqadd16)
9266 DO_PAR_ADDSUB(UQASX, gen_helper_uqaddsubx)
9267 DO_PAR_ADDSUB(UQSAX, gen_helper_uqsubaddx)
9268 DO_PAR_ADDSUB(UQSUB16, gen_helper_uqsub16)
9269 DO_PAR_ADDSUB(UQADD8, gen_helper_uqadd8)
9270 DO_PAR_ADDSUB(UQSUB8, gen_helper_uqsub8)
9272 DO_PAR_ADDSUB(SHADD16, gen_helper_shadd16)
9273 DO_PAR_ADDSUB(SHASX, gen_helper_shaddsubx)
9274 DO_PAR_ADDSUB(SHSAX, gen_helper_shsubaddx)
9275 DO_PAR_ADDSUB(SHSUB16, gen_helper_shsub16)
9276 DO_PAR_ADDSUB(SHADD8, gen_helper_shadd8)
9277 DO_PAR_ADDSUB(SHSUB8, gen_helper_shsub8)
9279 DO_PAR_ADDSUB(UHADD16, gen_helper_uhadd16)
9280 DO_PAR_ADDSUB(UHASX, gen_helper_uhaddsubx)
9281 DO_PAR_ADDSUB(UHSAX, gen_helper_uhsubaddx)
9282 DO_PAR_ADDSUB(UHSUB16, gen_helper_uhsub16)
9283 DO_PAR_ADDSUB(UHADD8, gen_helper_uhadd8)
9284 DO_PAR_ADDSUB(UHSUB8, gen_helper_uhsub8)
9286 #undef DO_PAR_ADDSUB
9287 #undef DO_PAR_ADDSUB_GE
9290 * Packing, unpacking, saturation, and reversal
9293 static bool trans_PKH(DisasContext *s, arg_PKH *a)
9295 TCGv_i32 tn, tm;
9296 int shift = a->imm;
9298 if (s->thumb
9299 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9300 : !ENABLE_ARCH_6) {
9301 return false;
9304 tn = load_reg(s, a->rn);
9305 tm = load_reg(s, a->rm);
9306 if (a->tb) {
9307 /* PKHTB */
9308 if (shift == 0) {
9309 shift = 31;
9311 tcg_gen_sari_i32(tm, tm, shift);
9312 tcg_gen_deposit_i32(tn, tn, tm, 0, 16);
9313 } else {
9314 /* PKHBT */
9315 tcg_gen_shli_i32(tm, tm, shift);
9316 tcg_gen_deposit_i32(tn, tm, tn, 0, 16);
9318 tcg_temp_free_i32(tm);
9319 store_reg(s, a->rd, tn);
9320 return true;
9323 static bool op_sat(DisasContext *s, arg_sat *a,
9324 void (*gen)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
9326 TCGv_i32 tmp, satimm;
9327 int shift = a->imm;
9329 if (!ENABLE_ARCH_6) {
9330 return false;
9333 tmp = load_reg(s, a->rn);
9334 if (a->sh) {
9335 tcg_gen_sari_i32(tmp, tmp, shift ? shift : 31);
9336 } else {
9337 tcg_gen_shli_i32(tmp, tmp, shift);
9340 satimm = tcg_const_i32(a->satimm);
9341 gen(tmp, cpu_env, tmp, satimm);
9342 tcg_temp_free_i32(satimm);
9344 store_reg(s, a->rd, tmp);
9345 return true;
9348 static bool trans_SSAT(DisasContext *s, arg_sat *a)
9350 return op_sat(s, a, gen_helper_ssat);
9353 static bool trans_USAT(DisasContext *s, arg_sat *a)
9355 return op_sat(s, a, gen_helper_usat);
9358 static bool trans_SSAT16(DisasContext *s, arg_sat *a)
9360 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9361 return false;
9363 return op_sat(s, a, gen_helper_ssat16);
9366 static bool trans_USAT16(DisasContext *s, arg_sat *a)
9368 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9369 return false;
9371 return op_sat(s, a, gen_helper_usat16);
9374 static bool op_xta(DisasContext *s, arg_rrr_rot *a,
9375 void (*gen_extract)(TCGv_i32, TCGv_i32),
9376 void (*gen_add)(TCGv_i32, TCGv_i32, TCGv_i32))
9378 TCGv_i32 tmp;
9380 if (!ENABLE_ARCH_6) {
9381 return false;
9384 tmp = load_reg(s, a->rm);
9386 * TODO: In many cases we could do a shift instead of a rotate.
9387 * Combined with a simple extend, that becomes an extract.
9389 tcg_gen_rotri_i32(tmp, tmp, a->rot * 8);
9390 gen_extract(tmp, tmp);
9392 if (a->rn != 15) {
9393 TCGv_i32 tmp2 = load_reg(s, a->rn);
9394 gen_add(tmp, tmp, tmp2);
9395 tcg_temp_free_i32(tmp2);
9397 store_reg(s, a->rd, tmp);
9398 return true;
9401 static bool trans_SXTAB(DisasContext *s, arg_rrr_rot *a)
9403 return op_xta(s, a, tcg_gen_ext8s_i32, tcg_gen_add_i32);
9406 static bool trans_SXTAH(DisasContext *s, arg_rrr_rot *a)
9408 return op_xta(s, a, tcg_gen_ext16s_i32, tcg_gen_add_i32);
9411 static bool trans_SXTAB16(DisasContext *s, arg_rrr_rot *a)
9413 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9414 return false;
9416 return op_xta(s, a, gen_helper_sxtb16, gen_add16);
9419 static bool trans_UXTAB(DisasContext *s, arg_rrr_rot *a)
9421 return op_xta(s, a, tcg_gen_ext8u_i32, tcg_gen_add_i32);
9424 static bool trans_UXTAH(DisasContext *s, arg_rrr_rot *a)
9426 return op_xta(s, a, tcg_gen_ext16u_i32, tcg_gen_add_i32);
9429 static bool trans_UXTAB16(DisasContext *s, arg_rrr_rot *a)
9431 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9432 return false;
9434 return op_xta(s, a, gen_helper_uxtb16, gen_add16);
9437 static bool trans_SEL(DisasContext *s, arg_rrr *a)
9439 TCGv_i32 t1, t2, t3;
9441 if (s->thumb
9442 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9443 : !ENABLE_ARCH_6) {
9444 return false;
9447 t1 = load_reg(s, a->rn);
9448 t2 = load_reg(s, a->rm);
9449 t3 = tcg_temp_new_i32();
9450 tcg_gen_ld_i32(t3, cpu_env, offsetof(CPUARMState, GE));
9451 gen_helper_sel_flags(t1, t3, t1, t2);
9452 tcg_temp_free_i32(t3);
9453 tcg_temp_free_i32(t2);
9454 store_reg(s, a->rd, t1);
9455 return true;
9458 static bool op_rr(DisasContext *s, arg_rr *a,
9459 void (*gen)(TCGv_i32, TCGv_i32))
9461 TCGv_i32 tmp;
9463 tmp = load_reg(s, a->rm);
9464 gen(tmp, tmp);
9465 store_reg(s, a->rd, tmp);
9466 return true;
9469 static bool trans_REV(DisasContext *s, arg_rr *a)
9471 if (!ENABLE_ARCH_6) {
9472 return false;
9474 return op_rr(s, a, tcg_gen_bswap32_i32);
9477 static bool trans_REV16(DisasContext *s, arg_rr *a)
9479 if (!ENABLE_ARCH_6) {
9480 return false;
9482 return op_rr(s, a, gen_rev16);
9485 static bool trans_REVSH(DisasContext *s, arg_rr *a)
9487 if (!ENABLE_ARCH_6) {
9488 return false;
9490 return op_rr(s, a, gen_revsh);
9493 static bool trans_RBIT(DisasContext *s, arg_rr *a)
9495 if (!ENABLE_ARCH_6T2) {
9496 return false;
9498 return op_rr(s, a, gen_helper_rbit);
9502 * Signed multiply, signed and unsigned divide
9505 static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
9507 TCGv_i32 t1, t2;
9509 if (!ENABLE_ARCH_6) {
9510 return false;
9513 t1 = load_reg(s, a->rn);
9514 t2 = load_reg(s, a->rm);
9515 if (m_swap) {
9516 gen_swap_half(t2);
9518 gen_smul_dual(t1, t2);
9520 if (sub) {
9521 /* This subtraction cannot overflow. */
9522 tcg_gen_sub_i32(t1, t1, t2);
9523 } else {
9525 * This addition cannot overflow 32 bits; however it may
9526 * overflow considered as a signed operation, in which case
9527 * we must set the Q flag.
9529 gen_helper_add_setq(t1, cpu_env, t1, t2);
9531 tcg_temp_free_i32(t2);
9533 if (a->ra != 15) {
9534 t2 = load_reg(s, a->ra);
9535 gen_helper_add_setq(t1, cpu_env, t1, t2);
9536 tcg_temp_free_i32(t2);
9538 store_reg(s, a->rd, t1);
9539 return true;
9542 static bool trans_SMLAD(DisasContext *s, arg_rrrr *a)
9544 return op_smlad(s, a, false, false);
9547 static bool trans_SMLADX(DisasContext *s, arg_rrrr *a)
9549 return op_smlad(s, a, true, false);
9552 static bool trans_SMLSD(DisasContext *s, arg_rrrr *a)
9554 return op_smlad(s, a, false, true);
9557 static bool trans_SMLSDX(DisasContext *s, arg_rrrr *a)
9559 return op_smlad(s, a, true, true);
9562 static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
9564 TCGv_i32 t1, t2;
9565 TCGv_i64 l1, l2;
9567 if (!ENABLE_ARCH_6) {
9568 return false;
9571 t1 = load_reg(s, a->rn);
9572 t2 = load_reg(s, a->rm);
9573 if (m_swap) {
9574 gen_swap_half(t2);
9576 gen_smul_dual(t1, t2);
9578 l1 = tcg_temp_new_i64();
9579 l2 = tcg_temp_new_i64();
9580 tcg_gen_ext_i32_i64(l1, t1);
9581 tcg_gen_ext_i32_i64(l2, t2);
9582 tcg_temp_free_i32(t1);
9583 tcg_temp_free_i32(t2);
9585 if (sub) {
9586 tcg_gen_sub_i64(l1, l1, l2);
9587 } else {
9588 tcg_gen_add_i64(l1, l1, l2);
9590 tcg_temp_free_i64(l2);
9592 gen_addq(s, l1, a->ra, a->rd);
9593 gen_storeq_reg(s, a->ra, a->rd, l1);
9594 tcg_temp_free_i64(l1);
9595 return true;
9598 static bool trans_SMLALD(DisasContext *s, arg_rrrr *a)
9600 return op_smlald(s, a, false, false);
9603 static bool trans_SMLALDX(DisasContext *s, arg_rrrr *a)
9605 return op_smlald(s, a, true, false);
9608 static bool trans_SMLSLD(DisasContext *s, arg_rrrr *a)
9610 return op_smlald(s, a, false, true);
9613 static bool trans_SMLSLDX(DisasContext *s, arg_rrrr *a)
9615 return op_smlald(s, a, true, true);
9618 static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub)
9620 TCGv_i32 t1, t2;
9622 if (s->thumb
9623 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9624 : !ENABLE_ARCH_6) {
9625 return false;
9628 t1 = load_reg(s, a->rn);
9629 t2 = load_reg(s, a->rm);
9630 tcg_gen_muls2_i32(t2, t1, t1, t2);
9632 if (a->ra != 15) {
9633 TCGv_i32 t3 = load_reg(s, a->ra);
9634 if (sub) {
9636 * For SMMLS, we need a 64-bit subtract. Borrow caused by
9637 * a non-zero multiplicand lowpart, and the correct result
9638 * lowpart for rounding.
9640 TCGv_i32 zero = tcg_const_i32(0);
9641 tcg_gen_sub2_i32(t2, t1, zero, t3, t2, t1);
9642 tcg_temp_free_i32(zero);
9643 } else {
9644 tcg_gen_add_i32(t1, t1, t3);
9646 tcg_temp_free_i32(t3);
9648 if (round) {
9650 * Adding 0x80000000 to the 64-bit quantity means that we have
9651 * carry in to the high word when the low word has the msb set.
9653 tcg_gen_shri_i32(t2, t2, 31);
9654 tcg_gen_add_i32(t1, t1, t2);
9656 tcg_temp_free_i32(t2);
9657 store_reg(s, a->rd, t1);
9658 return true;
9661 static bool trans_SMMLA(DisasContext *s, arg_rrrr *a)
9663 return op_smmla(s, a, false, false);
9666 static bool trans_SMMLAR(DisasContext *s, arg_rrrr *a)
9668 return op_smmla(s, a, true, false);
9671 static bool trans_SMMLS(DisasContext *s, arg_rrrr *a)
9673 return op_smmla(s, a, false, true);
9676 static bool trans_SMMLSR(DisasContext *s, arg_rrrr *a)
9678 return op_smmla(s, a, true, true);
9681 static bool op_div(DisasContext *s, arg_rrr *a, bool u)
9683 TCGv_i32 t1, t2;
9685 if (s->thumb
9686 ? !dc_isar_feature(aa32_thumb_div, s)
9687 : !dc_isar_feature(aa32_arm_div, s)) {
9688 return false;
9691 t1 = load_reg(s, a->rn);
9692 t2 = load_reg(s, a->rm);
9693 if (u) {
9694 gen_helper_udiv(t1, t1, t2);
9695 } else {
9696 gen_helper_sdiv(t1, t1, t2);
9698 tcg_temp_free_i32(t2);
9699 store_reg(s, a->rd, t1);
9700 return true;
9703 static bool trans_SDIV(DisasContext *s, arg_rrr *a)
9705 return op_div(s, a, false);
9708 static bool trans_UDIV(DisasContext *s, arg_rrr *a)
9710 return op_div(s, a, true);
9714 * Block data transfer
9717 static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n)
9719 TCGv_i32 addr = load_reg(s, a->rn);
9721 if (a->b) {
9722 if (a->i) {
9723 /* pre increment */
9724 tcg_gen_addi_i32(addr, addr, 4);
9725 } else {
9726 /* pre decrement */
9727 tcg_gen_addi_i32(addr, addr, -(n * 4));
9729 } else if (!a->i && n != 1) {
9730 /* post decrement */
9731 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9734 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
9736 * If the writeback is incrementing SP rather than
9737 * decrementing it, and the initial SP is below the
9738 * stack limit but the final written-back SP would
9739 * be above, then then we must not perform any memory
9740 * accesses, but it is IMPDEF whether we generate
9741 * an exception. We choose to do so in this case.
9742 * At this point 'addr' is the lowest address, so
9743 * either the original SP (if incrementing) or our
9744 * final SP (if decrementing), so that's what we check.
9746 gen_helper_v8m_stackcheck(cpu_env, addr);
9749 return addr;
9752 static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
9753 TCGv_i32 addr, int n)
9755 if (a->w) {
9756 /* write back */
9757 if (!a->b) {
9758 if (a->i) {
9759 /* post increment */
9760 tcg_gen_addi_i32(addr, addr, 4);
9761 } else {
9762 /* post decrement */
9763 tcg_gen_addi_i32(addr, addr, -(n * 4));
9765 } else if (!a->i && n != 1) {
9766 /* pre decrement */
9767 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9769 store_reg(s, a->rn, addr);
9770 } else {
9771 tcg_temp_free_i32(addr);
9775 static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
9777 int i, j, n, list, mem_idx;
9778 bool user = a->u;
9779 TCGv_i32 addr, tmp, tmp2;
9781 if (user) {
9782 /* STM (user) */
9783 if (IS_USER(s)) {
9784 /* Only usable in supervisor mode. */
9785 unallocated_encoding(s);
9786 return true;
9790 list = a->list;
9791 n = ctpop16(list);
9792 if (n < min_n || a->rn == 15) {
9793 unallocated_encoding(s);
9794 return true;
9797 addr = op_addr_block_pre(s, a, n);
9798 mem_idx = get_mem_index(s);
9800 for (i = j = 0; i < 16; i++) {
9801 if (!(list & (1 << i))) {
9802 continue;
9805 if (user && i != 15) {
9806 tmp = tcg_temp_new_i32();
9807 tmp2 = tcg_const_i32(i);
9808 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9809 tcg_temp_free_i32(tmp2);
9810 } else {
9811 tmp = load_reg(s, i);
9813 gen_aa32_st32(s, tmp, addr, mem_idx);
9814 tcg_temp_free_i32(tmp);
9816 /* No need to add after the last transfer. */
9817 if (++j != n) {
9818 tcg_gen_addi_i32(addr, addr, 4);
9822 op_addr_block_post(s, a, addr, n);
9823 return true;
9826 static bool trans_STM(DisasContext *s, arg_ldst_block *a)
9828 /* BitCount(list) < 1 is UNPREDICTABLE */
9829 return op_stm(s, a, 1);
9832 static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
9834 /* Writeback register in register list is UNPREDICTABLE for T32. */
9835 if (a->w && (a->list & (1 << a->rn))) {
9836 unallocated_encoding(s);
9837 return true;
9839 /* BitCount(list) < 2 is UNPREDICTABLE */
9840 return op_stm(s, a, 2);
9843 static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
9845 int i, j, n, list, mem_idx;
9846 bool loaded_base;
9847 bool user = a->u;
9848 bool exc_return = false;
9849 TCGv_i32 addr, tmp, tmp2, loaded_var;
9851 if (user) {
9852 /* LDM (user), LDM (exception return) */
9853 if (IS_USER(s)) {
9854 /* Only usable in supervisor mode. */
9855 unallocated_encoding(s);
9856 return true;
9858 if (extract32(a->list, 15, 1)) {
9859 exc_return = true;
9860 user = false;
9861 } else {
9862 /* LDM (user) does not allow writeback. */
9863 if (a->w) {
9864 unallocated_encoding(s);
9865 return true;
9870 list = a->list;
9871 n = ctpop16(list);
9872 if (n < min_n || a->rn == 15) {
9873 unallocated_encoding(s);
9874 return true;
9877 addr = op_addr_block_pre(s, a, n);
9878 mem_idx = get_mem_index(s);
9879 loaded_base = false;
9880 loaded_var = NULL;
9882 for (i = j = 0; i < 16; i++) {
9883 if (!(list & (1 << i))) {
9884 continue;
9887 tmp = tcg_temp_new_i32();
9888 gen_aa32_ld32u(s, tmp, addr, mem_idx);
9889 if (user) {
9890 tmp2 = tcg_const_i32(i);
9891 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9892 tcg_temp_free_i32(tmp2);
9893 tcg_temp_free_i32(tmp);
9894 } else if (i == a->rn) {
9895 loaded_var = tmp;
9896 loaded_base = true;
9897 } else if (i == 15 && exc_return) {
9898 store_pc_exc_ret(s, tmp);
9899 } else {
9900 store_reg_from_load(s, i, tmp);
9903 /* No need to add after the last transfer. */
9904 if (++j != n) {
9905 tcg_gen_addi_i32(addr, addr, 4);
9909 op_addr_block_post(s, a, addr, n);
9911 if (loaded_base) {
9912 /* Note that we reject base == pc above. */
9913 store_reg(s, a->rn, loaded_var);
9916 if (exc_return) {
9917 /* Restore CPSR from SPSR. */
9918 tmp = load_cpu_field(spsr);
9919 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9920 gen_io_start();
9922 gen_helper_cpsr_write_eret(cpu_env, tmp);
9923 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9924 gen_io_end();
9926 tcg_temp_free_i32(tmp);
9927 /* Must exit loop to check un-masked IRQs */
9928 s->base.is_jmp = DISAS_EXIT;
9930 return true;
9933 static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a)
9936 * Writeback register in register list is UNPREDICTABLE
9937 * for ArchVersion() >= 7. Prior to v7, A32 would write
9938 * an UNKNOWN value to the base register.
9940 if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) {
9941 unallocated_encoding(s);
9942 return true;
9944 /* BitCount(list) < 1 is UNPREDICTABLE */
9945 return do_ldm(s, a, 1);
9948 static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
9950 /* Writeback register in register list is UNPREDICTABLE for T32. */
9951 if (a->w && (a->list & (1 << a->rn))) {
9952 unallocated_encoding(s);
9953 return true;
9955 /* BitCount(list) < 2 is UNPREDICTABLE */
9956 return do_ldm(s, a, 2);
9959 static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
9961 /* Writeback is conditional on the base register not being loaded. */
9962 a->w = !(a->list & (1 << a->rn));
9963 /* BitCount(list) < 1 is UNPREDICTABLE */
9964 return do_ldm(s, a, 1);
9968 * Branch, branch with link
9971 static bool trans_B(DisasContext *s, arg_i *a)
9973 gen_jmp(s, read_pc(s) + a->imm);
9974 return true;
9977 static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a)
9979 /* This has cond from encoding, required to be outside IT block. */
9980 if (a->cond >= 0xe) {
9981 return false;
9983 if (s->condexec_mask) {
9984 unallocated_encoding(s);
9985 return true;
9987 arm_skip_unless(s, a->cond);
9988 gen_jmp(s, read_pc(s) + a->imm);
9989 return true;
9992 static bool trans_BL(DisasContext *s, arg_i *a)
9994 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
9995 gen_jmp(s, read_pc(s) + a->imm);
9996 return true;
9999 static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
10001 TCGv_i32 tmp;
10003 /* For A32, ARCH(5) is checked near the start of the uncond block. */
10004 if (s->thumb && (a->imm & 2)) {
10005 return false;
10007 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
10008 tmp = tcg_const_i32(!s->thumb);
10009 store_cpu_field(tmp, thumb);
10010 gen_jmp(s, (read_pc(s) & ~3) + a->imm);
10011 return true;
10014 static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a)
10016 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
10017 tcg_gen_movi_i32(cpu_R[14], read_pc(s) + (a->imm << 12));
10018 return true;
10021 static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a)
10023 TCGv_i32 tmp = tcg_temp_new_i32();
10025 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
10026 tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1);
10027 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
10028 gen_bx(s, tmp);
10029 return true;
10032 static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a)
10034 TCGv_i32 tmp;
10036 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
10037 if (!ENABLE_ARCH_5) {
10038 return false;
10040 tmp = tcg_temp_new_i32();
10041 tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1);
10042 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
10043 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
10044 gen_bx(s, tmp);
10045 return true;
10048 static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
10050 TCGv_i32 addr, tmp;
10052 tmp = load_reg(s, a->rm);
10053 if (half) {
10054 tcg_gen_add_i32(tmp, tmp, tmp);
10056 addr = load_reg(s, a->rn);
10057 tcg_gen_add_i32(addr, addr, tmp);
10059 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
10060 half ? MO_UW | s->be_data : MO_UB);
10061 tcg_temp_free_i32(addr);
10063 tcg_gen_add_i32(tmp, tmp, tmp);
10064 tcg_gen_addi_i32(tmp, tmp, read_pc(s));
10065 store_reg(s, 15, tmp);
10066 return true;
10069 static bool trans_TBB(DisasContext *s, arg_tbranch *a)
10071 return op_tbranch(s, a, false);
10074 static bool trans_TBH(DisasContext *s, arg_tbranch *a)
10076 return op_tbranch(s, a, true);
10079 static bool trans_CBZ(DisasContext *s, arg_CBZ *a)
10081 TCGv_i32 tmp = load_reg(s, a->rn);
10083 arm_gen_condlabel(s);
10084 tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE,
10085 tmp, 0, s->condlabel);
10086 tcg_temp_free_i32(tmp);
10087 gen_jmp(s, read_pc(s) + a->imm);
10088 return true;
10092 * Supervisor call - both T32 & A32 come here so we need to check
10093 * which mode we are in when checking for semihosting.
10096 static bool trans_SVC(DisasContext *s, arg_SVC *a)
10098 const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456;
10100 if (!arm_dc_feature(s, ARM_FEATURE_M) && semihosting_enabled() &&
10101 #ifndef CONFIG_USER_ONLY
10102 !IS_USER(s) &&
10103 #endif
10104 (a->imm == semihost_imm)) {
10105 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
10106 } else {
10107 gen_set_pc_im(s, s->base.pc_next);
10108 s->svc_imm = a->imm;
10109 s->base.is_jmp = DISAS_SWI;
10111 return true;
10115 * Unconditional system instructions
10118 static bool trans_RFE(DisasContext *s, arg_RFE *a)
10120 static const int8_t pre_offset[4] = {
10121 /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4
10123 static const int8_t post_offset[4] = {
10124 /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0
10126 TCGv_i32 addr, t1, t2;
10128 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
10129 return false;
10131 if (IS_USER(s)) {
10132 unallocated_encoding(s);
10133 return true;
10136 addr = load_reg(s, a->rn);
10137 tcg_gen_addi_i32(addr, addr, pre_offset[a->pu]);
10139 /* Load PC into tmp and CPSR into tmp2. */
10140 t1 = tcg_temp_new_i32();
10141 gen_aa32_ld32u(s, t1, addr, get_mem_index(s));
10142 tcg_gen_addi_i32(addr, addr, 4);
10143 t2 = tcg_temp_new_i32();
10144 gen_aa32_ld32u(s, t2, addr, get_mem_index(s));
10146 if (a->w) {
10147 /* Base writeback. */
10148 tcg_gen_addi_i32(addr, addr, post_offset[a->pu]);
10149 store_reg(s, a->rn, addr);
10150 } else {
10151 tcg_temp_free_i32(addr);
10153 gen_rfe(s, t1, t2);
10154 return true;
10157 static bool trans_SRS(DisasContext *s, arg_SRS *a)
10159 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
10160 return false;
10162 gen_srs(s, a->mode, a->pu, a->w);
10163 return true;
10166 static bool trans_CPS(DisasContext *s, arg_CPS *a)
10168 uint32_t mask, val;
10170 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
10171 return false;
10173 if (IS_USER(s)) {
10174 /* Implemented as NOP in user mode. */
10175 return true;
10177 /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */
10179 mask = val = 0;
10180 if (a->imod & 2) {
10181 if (a->A) {
10182 mask |= CPSR_A;
10184 if (a->I) {
10185 mask |= CPSR_I;
10187 if (a->F) {
10188 mask |= CPSR_F;
10190 if (a->imod & 1) {
10191 val |= mask;
10194 if (a->M) {
10195 mask |= CPSR_M;
10196 val |= a->mode;
10198 if (mask) {
10199 gen_set_psr_im(s, mask, 0, val);
10201 return true;
10204 static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
10206 TCGv_i32 tmp, addr, el;
10208 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
10209 return false;
10211 if (IS_USER(s)) {
10212 /* Implemented as NOP in user mode. */
10213 return true;
10216 tmp = tcg_const_i32(a->im);
10217 /* FAULTMASK */
10218 if (a->F) {
10219 addr = tcg_const_i32(19);
10220 gen_helper_v7m_msr(cpu_env, addr, tmp);
10221 tcg_temp_free_i32(addr);
10223 /* PRIMASK */
10224 if (a->I) {
10225 addr = tcg_const_i32(16);
10226 gen_helper_v7m_msr(cpu_env, addr, tmp);
10227 tcg_temp_free_i32(addr);
10229 el = tcg_const_i32(s->current_el);
10230 gen_helper_rebuild_hflags_m32(cpu_env, el);
10231 tcg_temp_free_i32(el);
10232 tcg_temp_free_i32(tmp);
10233 gen_lookup_tb(s);
10234 return true;
10238 * Clear-Exclusive, Barriers
10241 static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
10243 if (s->thumb
10244 ? !ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)
10245 : !ENABLE_ARCH_6K) {
10246 return false;
10248 gen_clrex(s);
10249 return true;
10252 static bool trans_DSB(DisasContext *s, arg_DSB *a)
10254 if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
10255 return false;
10257 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10258 return true;
10261 static bool trans_DMB(DisasContext *s, arg_DMB *a)
10263 return trans_DSB(s, NULL);
10266 static bool trans_ISB(DisasContext *s, arg_ISB *a)
10268 if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
10269 return false;
10272 * We need to break the TB after this insn to execute
10273 * self-modifying code correctly and also to take
10274 * any pending interrupts immediately.
10276 gen_goto_tb(s, 0, s->base.pc_next);
10277 return true;
10280 static bool trans_SB(DisasContext *s, arg_SB *a)
10282 if (!dc_isar_feature(aa32_sb, s)) {
10283 return false;
10286 * TODO: There is no speculation barrier opcode
10287 * for TCG; MB and end the TB instead.
10289 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10290 gen_goto_tb(s, 0, s->base.pc_next);
10291 return true;
10294 static bool trans_SETEND(DisasContext *s, arg_SETEND *a)
10296 if (!ENABLE_ARCH_6) {
10297 return false;
10299 if (a->E != (s->be_data == MO_BE)) {
10300 gen_helper_setend(cpu_env);
10301 s->base.is_jmp = DISAS_UPDATE;
10303 return true;
10307 * Preload instructions
10308 * All are nops, contingent on the appropriate arch level.
10311 static bool trans_PLD(DisasContext *s, arg_PLD *a)
10313 return ENABLE_ARCH_5TE;
10316 static bool trans_PLDW(DisasContext *s, arg_PLD *a)
10318 return arm_dc_feature(s, ARM_FEATURE_V7MP);
10321 static bool trans_PLI(DisasContext *s, arg_PLD *a)
10323 return ENABLE_ARCH_7;
10327 * If-then
10330 static bool trans_IT(DisasContext *s, arg_IT *a)
10332 int cond_mask = a->cond_mask;
10335 * No actual code generated for this insn, just setup state.
10337 * Combinations of firstcond and mask which set up an 0b1111
10338 * condition are UNPREDICTABLE; we take the CONSTRAINED
10339 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
10340 * i.e. both meaning "execute always".
10342 s->condexec_cond = (cond_mask >> 4) & 0xe;
10343 s->condexec_mask = cond_mask & 0x1f;
10344 return true;
10348 * Legacy decoder.
10351 static void disas_arm_insn(DisasContext *s, unsigned int insn)
10353 unsigned int cond = insn >> 28;
10355 /* M variants do not implement ARM mode; this must raise the INVSTATE
10356 * UsageFault exception.
10358 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10359 gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
10360 default_exception_el(s));
10361 return;
10364 if (cond == 0xf) {
10365 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
10366 * choose to UNDEF. In ARMv5 and above the space is used
10367 * for miscellaneous unconditional instructions.
10369 ARCH(5);
10371 /* Unconditional instructions. */
10372 /* TODO: Perhaps merge these into one decodetree output file. */
10373 if (disas_a32_uncond(s, insn) ||
10374 disas_vfp_uncond(s, insn) ||
10375 disas_neon_dp(s, insn) ||
10376 disas_neon_ls(s, insn) ||
10377 disas_neon_shared(s, insn)) {
10378 return;
10380 /* fall back to legacy decoder */
10382 if (((insn >> 25) & 7) == 1) {
10383 /* NEON Data processing. */
10384 if (disas_neon_data_insn(s, insn)) {
10385 goto illegal_op;
10387 return;
10389 if ((insn & 0x0e000f00) == 0x0c000100) {
10390 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
10391 /* iWMMXt register transfer. */
10392 if (extract32(s->c15_cpar, 1, 1)) {
10393 if (!disas_iwmmxt_insn(s, insn)) {
10394 return;
10399 goto illegal_op;
10401 if (cond != 0xe) {
10402 /* if not always execute, we generate a conditional jump to
10403 next instruction */
10404 arm_skip_unless(s, cond);
10407 /* TODO: Perhaps merge these into one decodetree output file. */
10408 if (disas_a32(s, insn) ||
10409 disas_vfp(s, insn)) {
10410 return;
10412 /* fall back to legacy decoder */
10414 switch ((insn >> 24) & 0xf) {
10415 case 0xc:
10416 case 0xd:
10417 case 0xe:
10418 if (((insn >> 8) & 0xe) == 10) {
10419 /* VFP, but failed disas_vfp. */
10420 goto illegal_op;
10422 if (disas_coproc_insn(s, insn)) {
10423 /* Coprocessor. */
10424 goto illegal_op;
10426 break;
10427 default:
10428 illegal_op:
10429 unallocated_encoding(s);
10430 break;
10434 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
10437 * Return true if this is a 16 bit instruction. We must be precise
10438 * about this (matching the decode).
10440 if ((insn >> 11) < 0x1d) {
10441 /* Definitely a 16-bit instruction */
10442 return true;
10445 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10446 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10447 * end up actually treating this as two 16-bit insns, though,
10448 * if it's half of a bl/blx pair that might span a page boundary.
10450 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10451 arm_dc_feature(s, ARM_FEATURE_M)) {
10452 /* Thumb2 cores (including all M profile ones) always treat
10453 * 32-bit insns as 32-bit.
10455 return false;
10458 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
10459 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10460 * is not on the next page; we merge this into a 32-bit
10461 * insn.
10463 return false;
10465 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10466 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10467 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10468 * -- handle as single 16 bit insn
10470 return true;
10473 /* Translate a 32-bit thumb instruction. */
10474 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
10477 * ARMv6-M supports a limited subset of Thumb2 instructions.
10478 * Other Thumb1 architectures allow only 32-bit
10479 * combined BL/BLX prefix and suffix.
10481 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10482 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10483 int i;
10484 bool found = false;
10485 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10486 0xf3b08040 /* dsb */,
10487 0xf3b08050 /* dmb */,
10488 0xf3b08060 /* isb */,
10489 0xf3e08000 /* mrs */,
10490 0xf000d000 /* bl */};
10491 static const uint32_t armv6m_mask[] = {0xffe0d000,
10492 0xfff0d0f0,
10493 0xfff0d0f0,
10494 0xfff0d0f0,
10495 0xffe0d000,
10496 0xf800d000};
10498 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10499 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10500 found = true;
10501 break;
10504 if (!found) {
10505 goto illegal_op;
10507 } else if ((insn & 0xf800e800) != 0xf000e800) {
10508 ARCH(6T2);
10511 if ((insn & 0xef000000) == 0xef000000) {
10513 * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
10514 * transform into
10515 * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
10517 uint32_t a32_insn = (insn & 0xe2ffffff) |
10518 ((insn & (1 << 28)) >> 4) | (1 << 28);
10520 if (disas_neon_dp(s, a32_insn)) {
10521 return;
10525 if ((insn & 0xff100000) == 0xf9000000) {
10527 * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
10528 * transform into
10529 * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
10531 uint32_t a32_insn = (insn & 0x00ffffff) | 0xf4000000;
10533 if (disas_neon_ls(s, a32_insn)) {
10534 return;
10539 * TODO: Perhaps merge these into one decodetree output file.
10540 * Note disas_vfp is written for a32 with cond field in the
10541 * top nibble. The t32 encoding requires 0xe in the top nibble.
10543 if (disas_t32(s, insn) ||
10544 disas_vfp_uncond(s, insn) ||
10545 disas_neon_shared(s, insn) ||
10546 ((insn >> 28) == 0xe && disas_vfp(s, insn))) {
10547 return;
10549 /* fall back to legacy decoder */
10551 switch ((insn >> 25) & 0xf) {
10552 case 0: case 1: case 2: case 3:
10553 /* 16-bit instructions. Should never happen. */
10554 abort();
10555 case 6: case 7: case 14: case 15:
10556 /* Coprocessor. */
10557 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10558 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10559 if (extract32(insn, 24, 2) == 3) {
10560 goto illegal_op; /* op0 = 0b11 : unallocated */
10563 if (((insn >> 8) & 0xe) == 10 &&
10564 dc_isar_feature(aa32_fpsp_v2, s)) {
10565 /* FP, and the CPU supports it */
10566 goto illegal_op;
10567 } else {
10568 /* All other insns: NOCP */
10569 gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
10570 syn_uncategorized(),
10571 default_exception_el(s));
10573 break;
10575 if (((insn >> 24) & 3) == 3) {
10576 /* Translate into the equivalent ARM encoding. */
10577 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10578 if (disas_neon_data_insn(s, insn)) {
10579 goto illegal_op;
10581 } else if (((insn >> 8) & 0xe) == 10) {
10582 /* VFP, but failed disas_vfp. */
10583 goto illegal_op;
10584 } else {
10585 if (insn & (1 << 28))
10586 goto illegal_op;
10587 if (disas_coproc_insn(s, insn)) {
10588 goto illegal_op;
10591 break;
10592 case 12:
10593 goto illegal_op;
10594 default:
10595 illegal_op:
10596 unallocated_encoding(s);
10600 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
10602 if (!disas_t16(s, insn)) {
10603 unallocated_encoding(s);
10607 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
10609 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
10610 * (False positives are OK, false negatives are not.)
10611 * We know this is a Thumb insn, and our caller ensures we are
10612 * only called if dc->base.pc_next is less than 4 bytes from the page
10613 * boundary, so we cross the page if the first 16 bits indicate
10614 * that this is a 32 bit insn.
10616 uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
10618 return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
10621 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
10623 DisasContext *dc = container_of(dcbase, DisasContext, base);
10624 CPUARMState *env = cs->env_ptr;
10625 ARMCPU *cpu = env_archcpu(env);
10626 uint32_t tb_flags = dc->base.tb->flags;
10627 uint32_t condexec, core_mmu_idx;
10629 dc->isar = &cpu->isar;
10630 dc->condjmp = 0;
10632 dc->aarch64 = 0;
10633 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
10634 * there is no secure EL1, so we route exceptions to EL3.
10636 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
10637 !arm_el_is_aa64(env, 3);
10638 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB);
10639 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
10640 condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC);
10641 dc->condexec_mask = (condexec & 0xf) << 1;
10642 dc->condexec_cond = condexec >> 4;
10644 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
10645 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
10646 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
10647 #if !defined(CONFIG_USER_ONLY)
10648 dc->user = (dc->current_el == 0);
10649 #endif
10650 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
10652 if (arm_feature(env, ARM_FEATURE_M)) {
10653 dc->vfp_enabled = 1;
10654 dc->be_data = MO_TE;
10655 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER);
10656 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
10657 regime_is_secure(env, dc->mmu_idx);
10658 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK);
10659 dc->v8m_fpccr_s_wrong =
10660 FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG);
10661 dc->v7m_new_fp_ctxt_needed =
10662 FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED);
10663 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT);
10664 } else {
10665 dc->be_data =
10666 FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
10667 dc->debug_target_el =
10668 FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
10669 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
10670 dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
10671 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
10672 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
10673 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
10674 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
10675 } else {
10676 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
10677 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
10680 dc->cp_regs = cpu->cp_regs;
10681 dc->features = env->features;
10683 /* Single step state. The code-generation logic here is:
10684 * SS_ACTIVE == 0:
10685 * generate code with no special handling for single-stepping (except
10686 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
10687 * this happens anyway because those changes are all system register or
10688 * PSTATE writes).
10689 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
10690 * emit code for one insn
10691 * emit code to clear PSTATE.SS
10692 * emit code to generate software step exception for completed step
10693 * end TB (as usual for having generated an exception)
10694 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
10695 * emit code to generate a software step exception
10696 * end the TB
10698 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
10699 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
10700 dc->is_ldex = false;
10702 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
10704 /* If architectural single step active, limit to 1. */
10705 if (is_singlestepping(dc)) {
10706 dc->base.max_insns = 1;
10709 /* ARM is a fixed-length ISA. Bound the number of insns to execute
10710 to those left on the page. */
10711 if (!dc->thumb) {
10712 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
10713 dc->base.max_insns = MIN(dc->base.max_insns, bound);
10716 cpu_V0 = tcg_temp_new_i64();
10717 cpu_V1 = tcg_temp_new_i64();
10718 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
10719 cpu_M0 = tcg_temp_new_i64();
10722 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
10724 DisasContext *dc = container_of(dcbase, DisasContext, base);
10726 /* A note on handling of the condexec (IT) bits:
10728 * We want to avoid the overhead of having to write the updated condexec
10729 * bits back to the CPUARMState for every instruction in an IT block. So:
10730 * (1) if the condexec bits are not already zero then we write
10731 * zero back into the CPUARMState now. This avoids complications trying
10732 * to do it at the end of the block. (For example if we don't do this
10733 * it's hard to identify whether we can safely skip writing condexec
10734 * at the end of the TB, which we definitely want to do for the case
10735 * where a TB doesn't do anything with the IT state at all.)
10736 * (2) if we are going to leave the TB then we call gen_set_condexec()
10737 * which will write the correct value into CPUARMState if zero is wrong.
10738 * This is done both for leaving the TB at the end, and for leaving
10739 * it because of an exception we know will happen, which is done in
10740 * gen_exception_insn(). The latter is necessary because we need to
10741 * leave the TB with the PC/IT state just prior to execution of the
10742 * instruction which caused the exception.
10743 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
10744 * then the CPUARMState will be wrong and we need to reset it.
10745 * This is handled in the same way as restoration of the
10746 * PC in these situations; we save the value of the condexec bits
10747 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
10748 * then uses this to restore them after an exception.
10750 * Note that there are no instructions which can read the condexec
10751 * bits, and none which can write non-static values to them, so
10752 * we don't need to care about whether CPUARMState is correct in the
10753 * middle of a TB.
10756 /* Reset the conditional execution bits immediately. This avoids
10757 complications trying to do it at the end of the block. */
10758 if (dc->condexec_mask || dc->condexec_cond) {
10759 TCGv_i32 tmp = tcg_temp_new_i32();
10760 tcg_gen_movi_i32(tmp, 0);
10761 store_cpu_field(tmp, condexec_bits);
10765 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
10767 DisasContext *dc = container_of(dcbase, DisasContext, base);
10769 tcg_gen_insn_start(dc->base.pc_next,
10770 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
10772 dc->insn_start = tcg_last_op();
10775 static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
10776 const CPUBreakpoint *bp)
10778 DisasContext *dc = container_of(dcbase, DisasContext, base);
10780 if (bp->flags & BP_CPU) {
10781 gen_set_condexec(dc);
10782 gen_set_pc_im(dc, dc->base.pc_next);
10783 gen_helper_check_breakpoints(cpu_env);
10784 /* End the TB early; it's likely not going to be executed */
10785 dc->base.is_jmp = DISAS_TOO_MANY;
10786 } else {
10787 gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
10788 /* The address covered by the breakpoint must be
10789 included in [tb->pc, tb->pc + tb->size) in order
10790 to for it to be properly cleared -- thus we
10791 increment the PC here so that the logic setting
10792 tb->size below does the right thing. */
10793 /* TODO: Advance PC by correct instruction length to
10794 * avoid disassembler error messages */
10795 dc->base.pc_next += 2;
10796 dc->base.is_jmp = DISAS_NORETURN;
10799 return true;
10802 static bool arm_pre_translate_insn(DisasContext *dc)
10804 #ifdef CONFIG_USER_ONLY
10805 /* Intercept jump to the magic kernel page. */
10806 if (dc->base.pc_next >= 0xffff0000) {
10807 /* We always get here via a jump, so know we are not in a
10808 conditional execution block. */
10809 gen_exception_internal(EXCP_KERNEL_TRAP);
10810 dc->base.is_jmp = DISAS_NORETURN;
10811 return true;
10813 #endif
10815 if (dc->ss_active && !dc->pstate_ss) {
10816 /* Singlestep state is Active-pending.
10817 * If we're in this state at the start of a TB then either
10818 * a) we just took an exception to an EL which is being debugged
10819 * and this is the first insn in the exception handler
10820 * b) debug exceptions were masked and we just unmasked them
10821 * without changing EL (eg by clearing PSTATE.D)
10822 * In either case we're going to take a swstep exception in the
10823 * "did not step an insn" case, and so the syndrome ISV and EX
10824 * bits should be zero.
10826 assert(dc->base.num_insns == 1);
10827 gen_swstep_exception(dc, 0, 0);
10828 dc->base.is_jmp = DISAS_NORETURN;
10829 return true;
10832 return false;
10835 static void arm_post_translate_insn(DisasContext *dc)
10837 if (dc->condjmp && !dc->base.is_jmp) {
10838 gen_set_label(dc->condlabel);
10839 dc->condjmp = 0;
10841 translator_loop_temp_check(&dc->base);
10844 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
10846 DisasContext *dc = container_of(dcbase, DisasContext, base);
10847 CPUARMState *env = cpu->env_ptr;
10848 unsigned int insn;
10850 if (arm_pre_translate_insn(dc)) {
10851 return;
10854 dc->pc_curr = dc->base.pc_next;
10855 insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
10856 dc->insn = insn;
10857 dc->base.pc_next += 4;
10858 disas_arm_insn(dc, insn);
10860 arm_post_translate_insn(dc);
10862 /* ARM is a fixed-length ISA. We performed the cross-page check
10863 in init_disas_context by adjusting max_insns. */
10866 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
10868 /* Return true if this Thumb insn is always unconditional,
10869 * even inside an IT block. This is true of only a very few
10870 * instructions: BKPT, HLT, and SG.
10872 * A larger class of instructions are UNPREDICTABLE if used
10873 * inside an IT block; we do not need to detect those here, because
10874 * what we do by default (perform the cc check and update the IT
10875 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
10876 * choice for those situations.
10878 * insn is either a 16-bit or a 32-bit instruction; the two are
10879 * distinguishable because for the 16-bit case the top 16 bits
10880 * are zeroes, and that isn't a valid 32-bit encoding.
10882 if ((insn & 0xffffff00) == 0xbe00) {
10883 /* BKPT */
10884 return true;
10887 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
10888 !arm_dc_feature(s, ARM_FEATURE_M)) {
10889 /* HLT: v8A only. This is unconditional even when it is going to
10890 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
10891 * For v7 cores this was a plain old undefined encoding and so
10892 * honours its cc check. (We might be using the encoding as
10893 * a semihosting trap, but we don't change the cc check behaviour
10894 * on that account, because a debugger connected to a real v7A
10895 * core and emulating semihosting traps by catching the UNDEF
10896 * exception would also only see cases where the cc check passed.
10897 * No guest code should be trying to do a HLT semihosting trap
10898 * in an IT block anyway.
10900 return true;
10903 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
10904 arm_dc_feature(s, ARM_FEATURE_M)) {
10905 /* SG: v8M only */
10906 return true;
10909 return false;
10912 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
10914 DisasContext *dc = container_of(dcbase, DisasContext, base);
10915 CPUARMState *env = cpu->env_ptr;
10916 uint32_t insn;
10917 bool is_16bit;
10919 if (arm_pre_translate_insn(dc)) {
10920 return;
10923 dc->pc_curr = dc->base.pc_next;
10924 insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
10925 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
10926 dc->base.pc_next += 2;
10927 if (!is_16bit) {
10928 uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
10930 insn = insn << 16 | insn2;
10931 dc->base.pc_next += 2;
10933 dc->insn = insn;
10935 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
10936 uint32_t cond = dc->condexec_cond;
10939 * Conditionally skip the insn. Note that both 0xe and 0xf mean
10940 * "always"; 0xf is not "never".
10942 if (cond < 0x0e) {
10943 arm_skip_unless(dc, cond);
10947 if (is_16bit) {
10948 disas_thumb_insn(dc, insn);
10949 } else {
10950 disas_thumb2_insn(dc, insn);
10953 /* Advance the Thumb condexec condition. */
10954 if (dc->condexec_mask) {
10955 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
10956 ((dc->condexec_mask >> 4) & 1));
10957 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10958 if (dc->condexec_mask == 0) {
10959 dc->condexec_cond = 0;
10963 arm_post_translate_insn(dc);
10965 /* Thumb is a variable-length ISA. Stop translation when the next insn
10966 * will touch a new page. This ensures that prefetch aborts occur at
10967 * the right place.
10969 * We want to stop the TB if the next insn starts in a new page,
10970 * or if it spans between this page and the next. This means that
10971 * if we're looking at the last halfword in the page we need to
10972 * see if it's a 16-bit Thumb insn (which will fit in this TB)
10973 * or a 32-bit Thumb insn (which won't).
10974 * This is to avoid generating a silly TB with a single 16-bit insn
10975 * in it at the end of this page (which would execute correctly
10976 * but isn't very efficient).
10978 if (dc->base.is_jmp == DISAS_NEXT
10979 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
10980 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
10981 && insn_crosses_page(env, dc)))) {
10982 dc->base.is_jmp = DISAS_TOO_MANY;
10986 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
10988 DisasContext *dc = container_of(dcbase, DisasContext, base);
10990 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
10991 /* FIXME: This can theoretically happen with self-modifying code. */
10992 cpu_abort(cpu, "IO on conditional branch instruction");
10995 /* At this stage dc->condjmp will only be set when the skipped
10996 instruction was a conditional branch or trap, and the PC has
10997 already been written. */
10998 gen_set_condexec(dc);
10999 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
11000 /* Exception return branches need some special case code at the
11001 * end of the TB, which is complex enough that it has to
11002 * handle the single-step vs not and the condition-failed
11003 * insn codepath itself.
11005 gen_bx_excret_final_code(dc);
11006 } else if (unlikely(is_singlestepping(dc))) {
11007 /* Unconditional and "condition passed" instruction codepath. */
11008 switch (dc->base.is_jmp) {
11009 case DISAS_SWI:
11010 gen_ss_advance(dc);
11011 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11012 default_exception_el(dc));
11013 break;
11014 case DISAS_HVC:
11015 gen_ss_advance(dc);
11016 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11017 break;
11018 case DISAS_SMC:
11019 gen_ss_advance(dc);
11020 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11021 break;
11022 case DISAS_NEXT:
11023 case DISAS_TOO_MANY:
11024 case DISAS_UPDATE:
11025 gen_set_pc_im(dc, dc->base.pc_next);
11026 /* fall through */
11027 default:
11028 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
11029 gen_singlestep_exception(dc);
11030 break;
11031 case DISAS_NORETURN:
11032 break;
11034 } else {
11035 /* While branches must always occur at the end of an IT block,
11036 there are a few other things that can cause us to terminate
11037 the TB in the middle of an IT block:
11038 - Exception generating instructions (bkpt, swi, undefined).
11039 - Page boundaries.
11040 - Hardware watchpoints.
11041 Hardware breakpoints have already been handled and skip this code.
11043 switch(dc->base.is_jmp) {
11044 case DISAS_NEXT:
11045 case DISAS_TOO_MANY:
11046 gen_goto_tb(dc, 1, dc->base.pc_next);
11047 break;
11048 case DISAS_JUMP:
11049 gen_goto_ptr();
11050 break;
11051 case DISAS_UPDATE:
11052 gen_set_pc_im(dc, dc->base.pc_next);
11053 /* fall through */
11054 default:
11055 /* indicate that the hash table must be used to find the next TB */
11056 tcg_gen_exit_tb(NULL, 0);
11057 break;
11058 case DISAS_NORETURN:
11059 /* nothing more to generate */
11060 break;
11061 case DISAS_WFI:
11063 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
11064 !(dc->insn & (1U << 31))) ? 2 : 4);
11066 gen_helper_wfi(cpu_env, tmp);
11067 tcg_temp_free_i32(tmp);
11068 /* The helper doesn't necessarily throw an exception, but we
11069 * must go back to the main loop to check for interrupts anyway.
11071 tcg_gen_exit_tb(NULL, 0);
11072 break;
11074 case DISAS_WFE:
11075 gen_helper_wfe(cpu_env);
11076 break;
11077 case DISAS_YIELD:
11078 gen_helper_yield(cpu_env);
11079 break;
11080 case DISAS_SWI:
11081 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11082 default_exception_el(dc));
11083 break;
11084 case DISAS_HVC:
11085 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11086 break;
11087 case DISAS_SMC:
11088 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11089 break;
11093 if (dc->condjmp) {
11094 /* "Condition failed" instruction codepath for the branch/trap insn */
11095 gen_set_label(dc->condlabel);
11096 gen_set_condexec(dc);
11097 if (unlikely(is_singlestepping(dc))) {
11098 gen_set_pc_im(dc, dc->base.pc_next);
11099 gen_singlestep_exception(dc);
11100 } else {
11101 gen_goto_tb(dc, 1, dc->base.pc_next);
11106 static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
11108 DisasContext *dc = container_of(dcbase, DisasContext, base);
11110 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
11111 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
11114 static const TranslatorOps arm_translator_ops = {
11115 .init_disas_context = arm_tr_init_disas_context,
11116 .tb_start = arm_tr_tb_start,
11117 .insn_start = arm_tr_insn_start,
11118 .breakpoint_check = arm_tr_breakpoint_check,
11119 .translate_insn = arm_tr_translate_insn,
11120 .tb_stop = arm_tr_tb_stop,
11121 .disas_log = arm_tr_disas_log,
11124 static const TranslatorOps thumb_translator_ops = {
11125 .init_disas_context = arm_tr_init_disas_context,
11126 .tb_start = arm_tr_tb_start,
11127 .insn_start = arm_tr_insn_start,
11128 .breakpoint_check = arm_tr_breakpoint_check,
11129 .translate_insn = thumb_tr_translate_insn,
11130 .tb_stop = arm_tr_tb_stop,
11131 .disas_log = arm_tr_disas_log,
11134 /* generate intermediate code for basic block 'tb'. */
11135 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
11137 DisasContext dc = { };
11138 const TranslatorOps *ops = &arm_translator_ops;
11140 if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) {
11141 ops = &thumb_translator_ops;
11143 #ifdef TARGET_AARCH64
11144 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
11145 ops = &aarch64_translator_ops;
11147 #endif
11149 translator_loop(ops, &dc.base, cpu, tb, max_insns);
11152 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
11153 target_ulong *data)
11155 if (is_a64(env)) {
11156 env->pc = data[0];
11157 env->condexec_bits = 0;
11158 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
11159 } else {
11160 env->regs[15] = data[0];
11161 env->condexec_bits = data[1];
11162 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;