MAINTAINERS: Add myself as streams maintainer
[qemu/ar7.git] / target / arm / translate.c
blob74fac1d09c116f84db881d5665da6642e52e380a
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "qemu/log.h"
30 #include "qemu/bitops.h"
31 #include "arm_ldst.h"
32 #include "hw/semihosting/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
38 #include "exec/log.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
57 #define IS_USER(s) 1
58 #else
59 #define IS_USER(s) (s->user)
60 #endif
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
69 #include "exec/gen-icount.h"
71 static const char * const regnames[] =
72 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
75 /* Function prototypes for gen_ functions calling Neon helpers. */
76 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
77 TCGv_i32, TCGv_i32);
78 /* Function prototypes for gen_ functions for fix point conversions */
79 typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
81 /* initialize TCG globals. */
82 void arm_translate_init(void)
84 int i;
86 for (i = 0; i < 16; i++) {
87 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
88 offsetof(CPUARMState, regs[i]),
89 regnames[i]);
91 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
92 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
93 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
94 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
96 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
97 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
98 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_val), "exclusive_val");
101 a64_translate_init();
104 /* Flags for the disas_set_da_iss info argument:
105 * lower bits hold the Rt register number, higher bits are flags.
107 typedef enum ISSInfo {
108 ISSNone = 0,
109 ISSRegMask = 0x1f,
110 ISSInvalid = (1 << 5),
111 ISSIsAcqRel = (1 << 6),
112 ISSIsWrite = (1 << 7),
113 ISSIs16Bit = (1 << 8),
114 } ISSInfo;
116 /* Save the syndrome information for a Data Abort */
117 static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
119 uint32_t syn;
120 int sas = memop & MO_SIZE;
121 bool sse = memop & MO_SIGN;
122 bool is_acqrel = issinfo & ISSIsAcqRel;
123 bool is_write = issinfo & ISSIsWrite;
124 bool is_16bit = issinfo & ISSIs16Bit;
125 int srt = issinfo & ISSRegMask;
127 if (issinfo & ISSInvalid) {
128 /* Some callsites want to conditionally provide ISS info,
129 * eg "only if this was not a writeback"
131 return;
134 if (srt == 15) {
135 /* For AArch32, insns where the src/dest is R15 never generate
136 * ISS information. Catching that here saves checking at all
137 * the call sites.
139 return;
142 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
143 0, 0, 0, is_write, 0, is_16bit);
144 disas_set_insn_syndrome(s, syn);
147 static inline int get_a32_user_mem_index(DisasContext *s)
149 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
150 * insns:
151 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
152 * otherwise, access as if at PL0.
154 switch (s->mmu_idx) {
155 case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
156 case ARMMMUIdx_E10_0:
157 case ARMMMUIdx_E10_1:
158 case ARMMMUIdx_E10_1_PAN:
159 return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
160 case ARMMMUIdx_SE3:
161 case ARMMMUIdx_SE10_0:
162 case ARMMMUIdx_SE10_1:
163 case ARMMMUIdx_SE10_1_PAN:
164 return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0);
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
177 default:
178 g_assert_not_reached();
182 static inline TCGv_i32 load_cpu_offset(int offset)
184 TCGv_i32 tmp = tcg_temp_new_i32();
185 tcg_gen_ld_i32(tmp, cpu_env, offset);
186 return tmp;
189 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
191 static inline void store_cpu_offset(TCGv_i32 var, int offset)
193 tcg_gen_st_i32(var, cpu_env, offset);
194 tcg_temp_free_i32(var);
197 #define store_cpu_field(var, name) \
198 store_cpu_offset(var, offsetof(CPUARMState, name))
200 /* The architectural value of PC. */
201 static uint32_t read_pc(DisasContext *s)
203 return s->pc_curr + (s->thumb ? 4 : 8);
206 /* Set a variable to the value of a CPU register. */
207 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
209 if (reg == 15) {
210 tcg_gen_movi_i32(var, read_pc(s));
211 } else {
212 tcg_gen_mov_i32(var, cpu_R[reg]);
216 /* Create a new temporary and set it to the value of a CPU register. */
217 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
219 TCGv_i32 tmp = tcg_temp_new_i32();
220 load_reg_var(s, tmp, reg);
221 return tmp;
225 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
226 * This is used for load/store for which use of PC implies (literal),
227 * or ADD that implies ADR.
229 static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
231 TCGv_i32 tmp = tcg_temp_new_i32();
233 if (reg == 15) {
234 tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
235 } else {
236 tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
238 return tmp;
241 /* Set a CPU register. The source must be a temporary and will be
242 marked as dead. */
243 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
245 if (reg == 15) {
246 /* In Thumb mode, we must ignore bit 0.
247 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
248 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
249 * We choose to ignore [1:0] in ARM mode for all architecture versions.
251 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
252 s->base.is_jmp = DISAS_JUMP;
254 tcg_gen_mov_i32(cpu_R[reg], var);
255 tcg_temp_free_i32(var);
259 * Variant of store_reg which applies v8M stack-limit checks before updating
260 * SP. If the check fails this will result in an exception being taken.
261 * We disable the stack checks for CONFIG_USER_ONLY because we have
262 * no idea what the stack limits should be in that case.
263 * If stack checking is not being done this just acts like store_reg().
265 static void store_sp_checked(DisasContext *s, TCGv_i32 var)
267 #ifndef CONFIG_USER_ONLY
268 if (s->v8m_stackcheck) {
269 gen_helper_v8m_stackcheck(cpu_env, var);
271 #endif
272 store_reg(s, 13, var);
275 /* Value extensions. */
276 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
277 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
278 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
279 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
281 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
282 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
285 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
287 TCGv_i32 tmp_mask = tcg_const_i32(mask);
288 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
289 tcg_temp_free_i32(tmp_mask);
291 /* Set NZCV flags from the high 4 bits of var. */
292 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
294 static void gen_exception_internal(int excp)
296 TCGv_i32 tcg_excp = tcg_const_i32(excp);
298 assert(excp_is_internal(excp));
299 gen_helper_exception_internal(cpu_env, tcg_excp);
300 tcg_temp_free_i32(tcg_excp);
303 static void gen_step_complete_exception(DisasContext *s)
305 /* We just completed step of an insn. Move from Active-not-pending
306 * to Active-pending, and then also take the swstep exception.
307 * This corresponds to making the (IMPDEF) choice to prioritize
308 * swstep exceptions over asynchronous exceptions taken to an exception
309 * level where debug is disabled. This choice has the advantage that
310 * we do not need to maintain internal state corresponding to the
311 * ISV/EX syndrome bits between completion of the step and generation
312 * of the exception, and our syndrome information is always correct.
314 gen_ss_advance(s);
315 gen_swstep_exception(s, 1, s->is_ldex);
316 s->base.is_jmp = DISAS_NORETURN;
319 static void gen_singlestep_exception(DisasContext *s)
321 /* Generate the right kind of exception for singlestep, which is
322 * either the architectural singlestep or EXCP_DEBUG for QEMU's
323 * gdb singlestepping.
325 if (s->ss_active) {
326 gen_step_complete_exception(s);
327 } else {
328 gen_exception_internal(EXCP_DEBUG);
332 static inline bool is_singlestepping(DisasContext *s)
334 /* Return true if we are singlestepping either because of
335 * architectural singlestep or QEMU gdbstub singlestep. This does
336 * not include the command line '-singlestep' mode which is rather
337 * misnamed as it only means "one instruction per TB" and doesn't
338 * affect the code we generate.
340 return s->base.singlestep_enabled || s->ss_active;
343 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
345 TCGv_i32 tmp1 = tcg_temp_new_i32();
346 TCGv_i32 tmp2 = tcg_temp_new_i32();
347 tcg_gen_ext16s_i32(tmp1, a);
348 tcg_gen_ext16s_i32(tmp2, b);
349 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
350 tcg_temp_free_i32(tmp2);
351 tcg_gen_sari_i32(a, a, 16);
352 tcg_gen_sari_i32(b, b, 16);
353 tcg_gen_mul_i32(b, b, a);
354 tcg_gen_mov_i32(a, tmp1);
355 tcg_temp_free_i32(tmp1);
358 /* Byteswap each halfword. */
359 static void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
361 TCGv_i32 tmp = tcg_temp_new_i32();
362 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
363 tcg_gen_shri_i32(tmp, var, 8);
364 tcg_gen_and_i32(tmp, tmp, mask);
365 tcg_gen_and_i32(var, var, mask);
366 tcg_gen_shli_i32(var, var, 8);
367 tcg_gen_or_i32(dest, var, tmp);
368 tcg_temp_free_i32(mask);
369 tcg_temp_free_i32(tmp);
372 /* Byteswap low halfword and sign extend. */
373 static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
375 tcg_gen_ext16u_i32(var, var);
376 tcg_gen_bswap16_i32(var, var);
377 tcg_gen_ext16s_i32(dest, var);
380 /* 32x32->64 multiply. Marks inputs as dead. */
381 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
383 TCGv_i32 lo = tcg_temp_new_i32();
384 TCGv_i32 hi = tcg_temp_new_i32();
385 TCGv_i64 ret;
387 tcg_gen_mulu2_i32(lo, hi, a, b);
388 tcg_temp_free_i32(a);
389 tcg_temp_free_i32(b);
391 ret = tcg_temp_new_i64();
392 tcg_gen_concat_i32_i64(ret, lo, hi);
393 tcg_temp_free_i32(lo);
394 tcg_temp_free_i32(hi);
396 return ret;
399 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
401 TCGv_i32 lo = tcg_temp_new_i32();
402 TCGv_i32 hi = tcg_temp_new_i32();
403 TCGv_i64 ret;
405 tcg_gen_muls2_i32(lo, hi, a, b);
406 tcg_temp_free_i32(a);
407 tcg_temp_free_i32(b);
409 ret = tcg_temp_new_i64();
410 tcg_gen_concat_i32_i64(ret, lo, hi);
411 tcg_temp_free_i32(lo);
412 tcg_temp_free_i32(hi);
414 return ret;
417 /* Swap low and high halfwords. */
418 static void gen_swap_half(TCGv_i32 var)
420 tcg_gen_rotri_i32(var, var, 16);
423 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
424 tmp = (t0 ^ t1) & 0x8000;
425 t0 &= ~0x8000;
426 t1 &= ~0x8000;
427 t0 = (t0 + t1) ^ tmp;
430 static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
432 TCGv_i32 tmp = tcg_temp_new_i32();
433 tcg_gen_xor_i32(tmp, t0, t1);
434 tcg_gen_andi_i32(tmp, tmp, 0x8000);
435 tcg_gen_andi_i32(t0, t0, ~0x8000);
436 tcg_gen_andi_i32(t1, t1, ~0x8000);
437 tcg_gen_add_i32(t0, t0, t1);
438 tcg_gen_xor_i32(dest, t0, tmp);
439 tcg_temp_free_i32(tmp);
442 /* Set N and Z flags from var. */
443 static inline void gen_logic_CC(TCGv_i32 var)
445 tcg_gen_mov_i32(cpu_NF, var);
446 tcg_gen_mov_i32(cpu_ZF, var);
449 /* dest = T0 + T1 + CF. */
450 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
452 tcg_gen_add_i32(dest, t0, t1);
453 tcg_gen_add_i32(dest, dest, cpu_CF);
456 /* dest = T0 - T1 + CF - 1. */
457 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
459 tcg_gen_sub_i32(dest, t0, t1);
460 tcg_gen_add_i32(dest, dest, cpu_CF);
461 tcg_gen_subi_i32(dest, dest, 1);
464 /* dest = T0 + T1. Compute C, N, V and Z flags */
465 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
467 TCGv_i32 tmp = tcg_temp_new_i32();
468 tcg_gen_movi_i32(tmp, 0);
469 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
470 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
471 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
472 tcg_gen_xor_i32(tmp, t0, t1);
473 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
474 tcg_temp_free_i32(tmp);
475 tcg_gen_mov_i32(dest, cpu_NF);
478 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
479 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
481 TCGv_i32 tmp = tcg_temp_new_i32();
482 if (TCG_TARGET_HAS_add2_i32) {
483 tcg_gen_movi_i32(tmp, 0);
484 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
485 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
486 } else {
487 TCGv_i64 q0 = tcg_temp_new_i64();
488 TCGv_i64 q1 = tcg_temp_new_i64();
489 tcg_gen_extu_i32_i64(q0, t0);
490 tcg_gen_extu_i32_i64(q1, t1);
491 tcg_gen_add_i64(q0, q0, q1);
492 tcg_gen_extu_i32_i64(q1, cpu_CF);
493 tcg_gen_add_i64(q0, q0, q1);
494 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
495 tcg_temp_free_i64(q0);
496 tcg_temp_free_i64(q1);
498 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
499 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
500 tcg_gen_xor_i32(tmp, t0, t1);
501 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
502 tcg_temp_free_i32(tmp);
503 tcg_gen_mov_i32(dest, cpu_NF);
506 /* dest = T0 - T1. Compute C, N, V and Z flags */
507 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
509 TCGv_i32 tmp;
510 tcg_gen_sub_i32(cpu_NF, t0, t1);
511 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
512 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
513 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
514 tmp = tcg_temp_new_i32();
515 tcg_gen_xor_i32(tmp, t0, t1);
516 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
517 tcg_temp_free_i32(tmp);
518 tcg_gen_mov_i32(dest, cpu_NF);
521 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
522 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
524 TCGv_i32 tmp = tcg_temp_new_i32();
525 tcg_gen_not_i32(tmp, t1);
526 gen_adc_CC(dest, t0, tmp);
527 tcg_temp_free_i32(tmp);
530 #define GEN_SHIFT(name) \
531 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
533 TCGv_i32 tmp1, tmp2, tmp3; \
534 tmp1 = tcg_temp_new_i32(); \
535 tcg_gen_andi_i32(tmp1, t1, 0xff); \
536 tmp2 = tcg_const_i32(0); \
537 tmp3 = tcg_const_i32(0x1f); \
538 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
539 tcg_temp_free_i32(tmp3); \
540 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
541 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
542 tcg_temp_free_i32(tmp2); \
543 tcg_temp_free_i32(tmp1); \
545 GEN_SHIFT(shl)
546 GEN_SHIFT(shr)
547 #undef GEN_SHIFT
549 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
551 TCGv_i32 tmp1, tmp2;
552 tmp1 = tcg_temp_new_i32();
553 tcg_gen_andi_i32(tmp1, t1, 0xff);
554 tmp2 = tcg_const_i32(0x1f);
555 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
556 tcg_temp_free_i32(tmp2);
557 tcg_gen_sar_i32(dest, t0, tmp1);
558 tcg_temp_free_i32(tmp1);
561 static void shifter_out_im(TCGv_i32 var, int shift)
563 tcg_gen_extract_i32(cpu_CF, var, shift, 1);
566 /* Shift by immediate. Includes special handling for shift == 0. */
567 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
568 int shift, int flags)
570 switch (shiftop) {
571 case 0: /* LSL */
572 if (shift != 0) {
573 if (flags)
574 shifter_out_im(var, 32 - shift);
575 tcg_gen_shli_i32(var, var, shift);
577 break;
578 case 1: /* LSR */
579 if (shift == 0) {
580 if (flags) {
581 tcg_gen_shri_i32(cpu_CF, var, 31);
583 tcg_gen_movi_i32(var, 0);
584 } else {
585 if (flags)
586 shifter_out_im(var, shift - 1);
587 tcg_gen_shri_i32(var, var, shift);
589 break;
590 case 2: /* ASR */
591 if (shift == 0)
592 shift = 32;
593 if (flags)
594 shifter_out_im(var, shift - 1);
595 if (shift == 32)
596 shift = 31;
597 tcg_gen_sari_i32(var, var, shift);
598 break;
599 case 3: /* ROR/RRX */
600 if (shift != 0) {
601 if (flags)
602 shifter_out_im(var, shift - 1);
603 tcg_gen_rotri_i32(var, var, shift); break;
604 } else {
605 TCGv_i32 tmp = tcg_temp_new_i32();
606 tcg_gen_shli_i32(tmp, cpu_CF, 31);
607 if (flags)
608 shifter_out_im(var, 0);
609 tcg_gen_shri_i32(var, var, 1);
610 tcg_gen_or_i32(var, var, tmp);
611 tcg_temp_free_i32(tmp);
616 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
617 TCGv_i32 shift, int flags)
619 if (flags) {
620 switch (shiftop) {
621 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
622 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
623 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
624 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
626 } else {
627 switch (shiftop) {
628 case 0:
629 gen_shl(var, var, shift);
630 break;
631 case 1:
632 gen_shr(var, var, shift);
633 break;
634 case 2:
635 gen_sar(var, var, shift);
636 break;
637 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
638 tcg_gen_rotr_i32(var, var, shift); break;
641 tcg_temp_free_i32(shift);
645 * Generate a conditional based on ARM condition code cc.
646 * This is common between ARM and Aarch64 targets.
648 void arm_test_cc(DisasCompare *cmp, int cc)
650 TCGv_i32 value;
651 TCGCond cond;
652 bool global = true;
654 switch (cc) {
655 case 0: /* eq: Z */
656 case 1: /* ne: !Z */
657 cond = TCG_COND_EQ;
658 value = cpu_ZF;
659 break;
661 case 2: /* cs: C */
662 case 3: /* cc: !C */
663 cond = TCG_COND_NE;
664 value = cpu_CF;
665 break;
667 case 4: /* mi: N */
668 case 5: /* pl: !N */
669 cond = TCG_COND_LT;
670 value = cpu_NF;
671 break;
673 case 6: /* vs: V */
674 case 7: /* vc: !V */
675 cond = TCG_COND_LT;
676 value = cpu_VF;
677 break;
679 case 8: /* hi: C && !Z */
680 case 9: /* ls: !C || Z -> !(C && !Z) */
681 cond = TCG_COND_NE;
682 value = tcg_temp_new_i32();
683 global = false;
684 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
685 ZF is non-zero for !Z; so AND the two subexpressions. */
686 tcg_gen_neg_i32(value, cpu_CF);
687 tcg_gen_and_i32(value, value, cpu_ZF);
688 break;
690 case 10: /* ge: N == V -> N ^ V == 0 */
691 case 11: /* lt: N != V -> N ^ V != 0 */
692 /* Since we're only interested in the sign bit, == 0 is >= 0. */
693 cond = TCG_COND_GE;
694 value = tcg_temp_new_i32();
695 global = false;
696 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
697 break;
699 case 12: /* gt: !Z && N == V */
700 case 13: /* le: Z || N != V */
701 cond = TCG_COND_NE;
702 value = tcg_temp_new_i32();
703 global = false;
704 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
705 * the sign bit then AND with ZF to yield the result. */
706 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
707 tcg_gen_sari_i32(value, value, 31);
708 tcg_gen_andc_i32(value, cpu_ZF, value);
709 break;
711 case 14: /* always */
712 case 15: /* always */
713 /* Use the ALWAYS condition, which will fold early.
714 * It doesn't matter what we use for the value. */
715 cond = TCG_COND_ALWAYS;
716 value = cpu_ZF;
717 goto no_invert;
719 default:
720 fprintf(stderr, "Bad condition code 0x%x\n", cc);
721 abort();
724 if (cc & 1) {
725 cond = tcg_invert_cond(cond);
728 no_invert:
729 cmp->cond = cond;
730 cmp->value = value;
731 cmp->value_global = global;
734 void arm_free_cc(DisasCompare *cmp)
736 if (!cmp->value_global) {
737 tcg_temp_free_i32(cmp->value);
741 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
743 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
746 void arm_gen_test_cc(int cc, TCGLabel *label)
748 DisasCompare cmp;
749 arm_test_cc(&cmp, cc);
750 arm_jump_cc(&cmp, label);
751 arm_free_cc(&cmp);
754 static inline void gen_set_condexec(DisasContext *s)
756 if (s->condexec_mask) {
757 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
758 TCGv_i32 tmp = tcg_temp_new_i32();
759 tcg_gen_movi_i32(tmp, val);
760 store_cpu_field(tmp, condexec_bits);
764 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
766 tcg_gen_movi_i32(cpu_R[15], val);
769 /* Set PC and Thumb state from var. var is marked as dead. */
770 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
772 s->base.is_jmp = DISAS_JUMP;
773 tcg_gen_andi_i32(cpu_R[15], var, ~1);
774 tcg_gen_andi_i32(var, var, 1);
775 store_cpu_field(var, thumb);
779 * Set PC and Thumb state from var. var is marked as dead.
780 * For M-profile CPUs, include logic to detect exception-return
781 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
782 * and BX reg, and no others, and happens only for code in Handler mode.
783 * The Security Extension also requires us to check for the FNC_RETURN
784 * which signals a function return from non-secure state; this can happen
785 * in both Handler and Thread mode.
786 * To avoid having to do multiple comparisons in inline generated code,
787 * we make the check we do here loose, so it will match for EXC_RETURN
788 * in Thread mode. For system emulation do_v7m_exception_exit() checks
789 * for these spurious cases and returns without doing anything (giving
790 * the same behaviour as for a branch to a non-magic address).
792 * In linux-user mode it is unclear what the right behaviour for an
793 * attempted FNC_RETURN should be, because in real hardware this will go
794 * directly to Secure code (ie not the Linux kernel) which will then treat
795 * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
796 * attempt behave the way it would on a CPU without the security extension,
797 * which is to say "like a normal branch". That means we can simply treat
798 * all branches as normal with no magic address behaviour.
800 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
802 /* Generate the same code here as for a simple bx, but flag via
803 * s->base.is_jmp that we need to do the rest of the work later.
805 gen_bx(s, var);
806 #ifndef CONFIG_USER_ONLY
807 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
808 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
809 s->base.is_jmp = DISAS_BX_EXCRET;
811 #endif
814 static inline void gen_bx_excret_final_code(DisasContext *s)
816 /* Generate the code to finish possible exception return and end the TB */
817 TCGLabel *excret_label = gen_new_label();
818 uint32_t min_magic;
820 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
821 /* Covers FNC_RETURN and EXC_RETURN magic */
822 min_magic = FNC_RETURN_MIN_MAGIC;
823 } else {
824 /* EXC_RETURN magic only */
825 min_magic = EXC_RETURN_MIN_MAGIC;
828 /* Is the new PC value in the magic range indicating exception return? */
829 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
830 /* No: end the TB as we would for a DISAS_JMP */
831 if (is_singlestepping(s)) {
832 gen_singlestep_exception(s);
833 } else {
834 tcg_gen_exit_tb(NULL, 0);
836 gen_set_label(excret_label);
837 /* Yes: this is an exception return.
838 * At this point in runtime env->regs[15] and env->thumb will hold
839 * the exception-return magic number, which do_v7m_exception_exit()
840 * will read. Nothing else will be able to see those values because
841 * the cpu-exec main loop guarantees that we will always go straight
842 * from raising the exception to the exception-handling code.
844 * gen_ss_advance(s) does nothing on M profile currently but
845 * calling it is conceptually the right thing as we have executed
846 * this instruction (compare SWI, HVC, SMC handling).
848 gen_ss_advance(s);
849 gen_exception_internal(EXCP_EXCEPTION_EXIT);
852 static inline void gen_bxns(DisasContext *s, int rm)
854 TCGv_i32 var = load_reg(s, rm);
856 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
857 * we need to sync state before calling it, but:
858 * - we don't need to do gen_set_pc_im() because the bxns helper will
859 * always set the PC itself
860 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
861 * unless it's outside an IT block or the last insn in an IT block,
862 * so we know that condexec == 0 (already set at the top of the TB)
863 * is correct in the non-UNPREDICTABLE cases, and we can choose
864 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
866 gen_helper_v7m_bxns(cpu_env, var);
867 tcg_temp_free_i32(var);
868 s->base.is_jmp = DISAS_EXIT;
871 static inline void gen_blxns(DisasContext *s, int rm)
873 TCGv_i32 var = load_reg(s, rm);
875 /* We don't need to sync condexec state, for the same reason as bxns.
876 * We do however need to set the PC, because the blxns helper reads it.
877 * The blxns helper may throw an exception.
879 gen_set_pc_im(s, s->base.pc_next);
880 gen_helper_v7m_blxns(cpu_env, var);
881 tcg_temp_free_i32(var);
882 s->base.is_jmp = DISAS_EXIT;
885 /* Variant of store_reg which uses branch&exchange logic when storing
886 to r15 in ARM architecture v7 and above. The source must be a temporary
887 and will be marked as dead. */
888 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
890 if (reg == 15 && ENABLE_ARCH_7) {
891 gen_bx(s, var);
892 } else {
893 store_reg(s, reg, var);
897 /* Variant of store_reg which uses branch&exchange logic when storing
898 * to r15 in ARM architecture v5T and above. This is used for storing
899 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
900 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
901 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
903 if (reg == 15 && ENABLE_ARCH_5) {
904 gen_bx_excret(s, var);
905 } else {
906 store_reg(s, reg, var);
910 #ifdef CONFIG_USER_ONLY
911 #define IS_USER_ONLY 1
912 #else
913 #define IS_USER_ONLY 0
914 #endif
916 /* Abstractions of "generate code to do a guest load/store for
917 * AArch32", where a vaddr is always 32 bits (and is zero
918 * extended if we're a 64 bit core) and data is also
919 * 32 bits unless specifically doing a 64 bit access.
920 * These functions work like tcg_gen_qemu_{ld,st}* except
921 * that the address argument is TCGv_i32 rather than TCGv.
924 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
926 TCGv addr = tcg_temp_new();
927 tcg_gen_extu_i32_tl(addr, a32);
929 /* Not needed for user-mode BE32, where we use MO_BE instead. */
930 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
931 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
933 return addr;
936 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
937 int index, MemOp opc)
939 TCGv addr;
941 if (arm_dc_feature(s, ARM_FEATURE_M) &&
942 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
943 opc |= MO_ALIGN;
946 addr = gen_aa32_addr(s, a32, opc);
947 tcg_gen_qemu_ld_i32(val, addr, index, opc);
948 tcg_temp_free(addr);
951 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
952 int index, MemOp opc)
954 TCGv addr;
956 if (arm_dc_feature(s, ARM_FEATURE_M) &&
957 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
958 opc |= MO_ALIGN;
961 addr = gen_aa32_addr(s, a32, opc);
962 tcg_gen_qemu_st_i32(val, addr, index, opc);
963 tcg_temp_free(addr);
966 #define DO_GEN_LD(SUFF, OPC) \
967 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
968 TCGv_i32 a32, int index) \
970 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
973 #define DO_GEN_ST(SUFF, OPC) \
974 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
975 TCGv_i32 a32, int index) \
977 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
980 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
982 /* Not needed for user-mode BE32, where we use MO_BE instead. */
983 if (!IS_USER_ONLY && s->sctlr_b) {
984 tcg_gen_rotri_i64(val, val, 32);
988 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
989 int index, MemOp opc)
991 TCGv addr = gen_aa32_addr(s, a32, opc);
992 tcg_gen_qemu_ld_i64(val, addr, index, opc);
993 gen_aa32_frob64(s, val);
994 tcg_temp_free(addr);
997 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
998 TCGv_i32 a32, int index)
1000 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1003 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1004 int index, MemOp opc)
1006 TCGv addr = gen_aa32_addr(s, a32, opc);
1008 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1009 if (!IS_USER_ONLY && s->sctlr_b) {
1010 TCGv_i64 tmp = tcg_temp_new_i64();
1011 tcg_gen_rotri_i64(tmp, val, 32);
1012 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1013 tcg_temp_free_i64(tmp);
1014 } else {
1015 tcg_gen_qemu_st_i64(val, addr, index, opc);
1017 tcg_temp_free(addr);
1020 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1021 TCGv_i32 a32, int index)
1023 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1026 DO_GEN_LD(8u, MO_UB)
1027 DO_GEN_LD(16u, MO_UW)
1028 DO_GEN_LD(32u, MO_UL)
1029 DO_GEN_ST(8, MO_UB)
1030 DO_GEN_ST(16, MO_UW)
1031 DO_GEN_ST(32, MO_UL)
1033 static inline void gen_hvc(DisasContext *s, int imm16)
1035 /* The pre HVC helper handles cases when HVC gets trapped
1036 * as an undefined insn by runtime configuration (ie before
1037 * the insn really executes).
1039 gen_set_pc_im(s, s->pc_curr);
1040 gen_helper_pre_hvc(cpu_env);
1041 /* Otherwise we will treat this as a real exception which
1042 * happens after execution of the insn. (The distinction matters
1043 * for the PC value reported to the exception handler and also
1044 * for single stepping.)
1046 s->svc_imm = imm16;
1047 gen_set_pc_im(s, s->base.pc_next);
1048 s->base.is_jmp = DISAS_HVC;
1051 static inline void gen_smc(DisasContext *s)
1053 /* As with HVC, we may take an exception either before or after
1054 * the insn executes.
1056 TCGv_i32 tmp;
1058 gen_set_pc_im(s, s->pc_curr);
1059 tmp = tcg_const_i32(syn_aa32_smc());
1060 gen_helper_pre_smc(cpu_env, tmp);
1061 tcg_temp_free_i32(tmp);
1062 gen_set_pc_im(s, s->base.pc_next);
1063 s->base.is_jmp = DISAS_SMC;
1066 static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
1068 gen_set_condexec(s);
1069 gen_set_pc_im(s, pc);
1070 gen_exception_internal(excp);
1071 s->base.is_jmp = DISAS_NORETURN;
1074 static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
1075 int syn, uint32_t target_el)
1077 gen_set_condexec(s);
1078 gen_set_pc_im(s, pc);
1079 gen_exception(excp, syn, target_el);
1080 s->base.is_jmp = DISAS_NORETURN;
1083 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
1085 TCGv_i32 tcg_syn;
1087 gen_set_condexec(s);
1088 gen_set_pc_im(s, s->pc_curr);
1089 tcg_syn = tcg_const_i32(syn);
1090 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1091 tcg_temp_free_i32(tcg_syn);
1092 s->base.is_jmp = DISAS_NORETURN;
1095 static void unallocated_encoding(DisasContext *s)
1097 /* Unallocated and reserved encodings are uncategorized */
1098 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
1099 default_exception_el(s));
1102 /* Force a TB lookup after an instruction that changes the CPU state. */
1103 static inline void gen_lookup_tb(DisasContext *s)
1105 tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
1106 s->base.is_jmp = DISAS_EXIT;
1109 static inline void gen_hlt(DisasContext *s, int imm)
1111 /* HLT. This has two purposes.
1112 * Architecturally, it is an external halting debug instruction.
1113 * Since QEMU doesn't implement external debug, we treat this as
1114 * it is required for halting debug disabled: it will UNDEF.
1115 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1116 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1117 * must trigger semihosting even for ARMv7 and earlier, where
1118 * HLT was an undefined encoding.
1119 * In system mode, we don't allow userspace access to
1120 * semihosting, to provide some semblance of security
1121 * (and for consistency with our 32-bit semihosting).
1123 if (semihosting_enabled() &&
1124 #ifndef CONFIG_USER_ONLY
1125 s->current_el != 0 &&
1126 #endif
1127 (imm == (s->thumb ? 0x3c : 0xf000))) {
1128 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
1129 return;
1132 unallocated_encoding(s);
1135 static TCGv_ptr get_fpstatus_ptr(int neon)
1137 TCGv_ptr statusptr = tcg_temp_new_ptr();
1138 int offset;
1139 if (neon) {
1140 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1141 } else {
1142 offset = offsetof(CPUARMState, vfp.fp_status);
1144 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1145 return statusptr;
1148 static inline long vfp_reg_offset(bool dp, unsigned reg)
1150 if (dp) {
1151 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1152 } else {
1153 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
1154 if (reg & 1) {
1155 ofs += offsetof(CPU_DoubleU, l.upper);
1156 } else {
1157 ofs += offsetof(CPU_DoubleU, l.lower);
1159 return ofs;
1163 /* Return the offset of a 32-bit piece of a NEON register.
1164 zero is the least significant end of the register. */
1165 static inline long
1166 neon_reg_offset (int reg, int n)
1168 int sreg;
1169 sreg = reg * 2 + n;
1170 return vfp_reg_offset(0, sreg);
1173 /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1174 * where 0 is the least significant end of the register.
1176 static inline long
1177 neon_element_offset(int reg, int element, MemOp size)
1179 int element_size = 1 << size;
1180 int ofs = element * element_size;
1181 #ifdef HOST_WORDS_BIGENDIAN
1182 /* Calculate the offset assuming fully little-endian,
1183 * then XOR to account for the order of the 8-byte units.
1185 if (element_size < 8) {
1186 ofs ^= 8 - element_size;
1188 #endif
1189 return neon_reg_offset(reg, 0) + ofs;
1192 static TCGv_i32 neon_load_reg(int reg, int pass)
1194 TCGv_i32 tmp = tcg_temp_new_i32();
1195 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1196 return tmp;
1199 static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
1201 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1203 switch (mop) {
1204 case MO_UB:
1205 tcg_gen_ld8u_i32(var, cpu_env, offset);
1206 break;
1207 case MO_UW:
1208 tcg_gen_ld16u_i32(var, cpu_env, offset);
1209 break;
1210 case MO_UL:
1211 tcg_gen_ld_i32(var, cpu_env, offset);
1212 break;
1213 default:
1214 g_assert_not_reached();
1218 static void neon_load_element64(TCGv_i64 var, int reg, int ele, MemOp mop)
1220 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1222 switch (mop) {
1223 case MO_UB:
1224 tcg_gen_ld8u_i64(var, cpu_env, offset);
1225 break;
1226 case MO_UW:
1227 tcg_gen_ld16u_i64(var, cpu_env, offset);
1228 break;
1229 case MO_UL:
1230 tcg_gen_ld32u_i64(var, cpu_env, offset);
1231 break;
1232 case MO_Q:
1233 tcg_gen_ld_i64(var, cpu_env, offset);
1234 break;
1235 default:
1236 g_assert_not_reached();
1240 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1242 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1243 tcg_temp_free_i32(var);
1246 static void neon_store_element(int reg, int ele, MemOp size, TCGv_i32 var)
1248 long offset = neon_element_offset(reg, ele, size);
1250 switch (size) {
1251 case MO_8:
1252 tcg_gen_st8_i32(var, cpu_env, offset);
1253 break;
1254 case MO_16:
1255 tcg_gen_st16_i32(var, cpu_env, offset);
1256 break;
1257 case MO_32:
1258 tcg_gen_st_i32(var, cpu_env, offset);
1259 break;
1260 default:
1261 g_assert_not_reached();
1265 static void neon_store_element64(int reg, int ele, MemOp size, TCGv_i64 var)
1267 long offset = neon_element_offset(reg, ele, size);
1269 switch (size) {
1270 case MO_8:
1271 tcg_gen_st8_i64(var, cpu_env, offset);
1272 break;
1273 case MO_16:
1274 tcg_gen_st16_i64(var, cpu_env, offset);
1275 break;
1276 case MO_32:
1277 tcg_gen_st32_i64(var, cpu_env, offset);
1278 break;
1279 case MO_64:
1280 tcg_gen_st_i64(var, cpu_env, offset);
1281 break;
1282 default:
1283 g_assert_not_reached();
1287 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1289 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1292 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1294 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1297 static inline void neon_load_reg32(TCGv_i32 var, int reg)
1299 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1302 static inline void neon_store_reg32(TCGv_i32 var, int reg)
1304 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1307 static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1309 TCGv_ptr ret = tcg_temp_new_ptr();
1310 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1311 return ret;
1314 #define ARM_CP_RW_BIT (1 << 20)
1316 /* Include the VFP and Neon decoders */
1317 #include "translate-vfp.inc.c"
1318 #include "translate-neon.inc.c"
1320 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1322 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1325 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1327 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1330 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1332 TCGv_i32 var = tcg_temp_new_i32();
1333 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1334 return var;
1337 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1339 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1340 tcg_temp_free_i32(var);
1343 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1345 iwmmxt_store_reg(cpu_M0, rn);
1348 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1350 iwmmxt_load_reg(cpu_M0, rn);
1353 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1355 iwmmxt_load_reg(cpu_V1, rn);
1356 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1359 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1361 iwmmxt_load_reg(cpu_V1, rn);
1362 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1365 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1367 iwmmxt_load_reg(cpu_V1, rn);
1368 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1371 #define IWMMXT_OP(name) \
1372 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1374 iwmmxt_load_reg(cpu_V1, rn); \
1375 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1378 #define IWMMXT_OP_ENV(name) \
1379 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1381 iwmmxt_load_reg(cpu_V1, rn); \
1382 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1385 #define IWMMXT_OP_ENV_SIZE(name) \
1386 IWMMXT_OP_ENV(name##b) \
1387 IWMMXT_OP_ENV(name##w) \
1388 IWMMXT_OP_ENV(name##l)
1390 #define IWMMXT_OP_ENV1(name) \
1391 static inline void gen_op_iwmmxt_##name##_M0(void) \
1393 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1396 IWMMXT_OP(maddsq)
1397 IWMMXT_OP(madduq)
1398 IWMMXT_OP(sadb)
1399 IWMMXT_OP(sadw)
1400 IWMMXT_OP(mulslw)
1401 IWMMXT_OP(mulshw)
1402 IWMMXT_OP(mululw)
1403 IWMMXT_OP(muluhw)
1404 IWMMXT_OP(macsw)
1405 IWMMXT_OP(macuw)
1407 IWMMXT_OP_ENV_SIZE(unpackl)
1408 IWMMXT_OP_ENV_SIZE(unpackh)
1410 IWMMXT_OP_ENV1(unpacklub)
1411 IWMMXT_OP_ENV1(unpackluw)
1412 IWMMXT_OP_ENV1(unpacklul)
1413 IWMMXT_OP_ENV1(unpackhub)
1414 IWMMXT_OP_ENV1(unpackhuw)
1415 IWMMXT_OP_ENV1(unpackhul)
1416 IWMMXT_OP_ENV1(unpacklsb)
1417 IWMMXT_OP_ENV1(unpacklsw)
1418 IWMMXT_OP_ENV1(unpacklsl)
1419 IWMMXT_OP_ENV1(unpackhsb)
1420 IWMMXT_OP_ENV1(unpackhsw)
1421 IWMMXT_OP_ENV1(unpackhsl)
1423 IWMMXT_OP_ENV_SIZE(cmpeq)
1424 IWMMXT_OP_ENV_SIZE(cmpgtu)
1425 IWMMXT_OP_ENV_SIZE(cmpgts)
1427 IWMMXT_OP_ENV_SIZE(mins)
1428 IWMMXT_OP_ENV_SIZE(minu)
1429 IWMMXT_OP_ENV_SIZE(maxs)
1430 IWMMXT_OP_ENV_SIZE(maxu)
1432 IWMMXT_OP_ENV_SIZE(subn)
1433 IWMMXT_OP_ENV_SIZE(addn)
1434 IWMMXT_OP_ENV_SIZE(subu)
1435 IWMMXT_OP_ENV_SIZE(addu)
1436 IWMMXT_OP_ENV_SIZE(subs)
1437 IWMMXT_OP_ENV_SIZE(adds)
1439 IWMMXT_OP_ENV(avgb0)
1440 IWMMXT_OP_ENV(avgb1)
1441 IWMMXT_OP_ENV(avgw0)
1442 IWMMXT_OP_ENV(avgw1)
1444 IWMMXT_OP_ENV(packuw)
1445 IWMMXT_OP_ENV(packul)
1446 IWMMXT_OP_ENV(packuq)
1447 IWMMXT_OP_ENV(packsw)
1448 IWMMXT_OP_ENV(packsl)
1449 IWMMXT_OP_ENV(packsq)
1451 static void gen_op_iwmmxt_set_mup(void)
1453 TCGv_i32 tmp;
1454 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1455 tcg_gen_ori_i32(tmp, tmp, 2);
1456 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1459 static void gen_op_iwmmxt_set_cup(void)
1461 TCGv_i32 tmp;
1462 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1463 tcg_gen_ori_i32(tmp, tmp, 1);
1464 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1467 static void gen_op_iwmmxt_setpsr_nz(void)
1469 TCGv_i32 tmp = tcg_temp_new_i32();
1470 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1471 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1474 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1476 iwmmxt_load_reg(cpu_V1, rn);
1477 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1478 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1481 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1482 TCGv_i32 dest)
1484 int rd;
1485 uint32_t offset;
1486 TCGv_i32 tmp;
1488 rd = (insn >> 16) & 0xf;
1489 tmp = load_reg(s, rd);
1491 offset = (insn & 0xff) << ((insn >> 7) & 2);
1492 if (insn & (1 << 24)) {
1493 /* Pre indexed */
1494 if (insn & (1 << 23))
1495 tcg_gen_addi_i32(tmp, tmp, offset);
1496 else
1497 tcg_gen_addi_i32(tmp, tmp, -offset);
1498 tcg_gen_mov_i32(dest, tmp);
1499 if (insn & (1 << 21))
1500 store_reg(s, rd, tmp);
1501 else
1502 tcg_temp_free_i32(tmp);
1503 } else if (insn & (1 << 21)) {
1504 /* Post indexed */
1505 tcg_gen_mov_i32(dest, tmp);
1506 if (insn & (1 << 23))
1507 tcg_gen_addi_i32(tmp, tmp, offset);
1508 else
1509 tcg_gen_addi_i32(tmp, tmp, -offset);
1510 store_reg(s, rd, tmp);
1511 } else if (!(insn & (1 << 23)))
1512 return 1;
1513 return 0;
1516 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1518 int rd = (insn >> 0) & 0xf;
1519 TCGv_i32 tmp;
1521 if (insn & (1 << 8)) {
1522 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1523 return 1;
1524 } else {
1525 tmp = iwmmxt_load_creg(rd);
1527 } else {
1528 tmp = tcg_temp_new_i32();
1529 iwmmxt_load_reg(cpu_V0, rd);
1530 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1532 tcg_gen_andi_i32(tmp, tmp, mask);
1533 tcg_gen_mov_i32(dest, tmp);
1534 tcg_temp_free_i32(tmp);
1535 return 0;
1538 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1539 (ie. an undefined instruction). */
1540 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1542 int rd, wrd;
1543 int rdhi, rdlo, rd0, rd1, i;
1544 TCGv_i32 addr;
1545 TCGv_i32 tmp, tmp2, tmp3;
1547 if ((insn & 0x0e000e00) == 0x0c000000) {
1548 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1549 wrd = insn & 0xf;
1550 rdlo = (insn >> 12) & 0xf;
1551 rdhi = (insn >> 16) & 0xf;
1552 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1553 iwmmxt_load_reg(cpu_V0, wrd);
1554 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1555 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
1556 } else { /* TMCRR */
1557 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1558 iwmmxt_store_reg(cpu_V0, wrd);
1559 gen_op_iwmmxt_set_mup();
1561 return 0;
1564 wrd = (insn >> 12) & 0xf;
1565 addr = tcg_temp_new_i32();
1566 if (gen_iwmmxt_address(s, insn, addr)) {
1567 tcg_temp_free_i32(addr);
1568 return 1;
1570 if (insn & ARM_CP_RW_BIT) {
1571 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1572 tmp = tcg_temp_new_i32();
1573 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1574 iwmmxt_store_creg(wrd, tmp);
1575 } else {
1576 i = 1;
1577 if (insn & (1 << 8)) {
1578 if (insn & (1 << 22)) { /* WLDRD */
1579 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1580 i = 0;
1581 } else { /* WLDRW wRd */
1582 tmp = tcg_temp_new_i32();
1583 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1585 } else {
1586 tmp = tcg_temp_new_i32();
1587 if (insn & (1 << 22)) { /* WLDRH */
1588 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1589 } else { /* WLDRB */
1590 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1593 if (i) {
1594 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1595 tcg_temp_free_i32(tmp);
1597 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 } else {
1600 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1601 tmp = iwmmxt_load_creg(wrd);
1602 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1603 } else {
1604 gen_op_iwmmxt_movq_M0_wRn(wrd);
1605 tmp = tcg_temp_new_i32();
1606 if (insn & (1 << 8)) {
1607 if (insn & (1 << 22)) { /* WSTRD */
1608 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1609 } else { /* WSTRW wRd */
1610 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1611 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1613 } else {
1614 if (insn & (1 << 22)) { /* WSTRH */
1615 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1616 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1617 } else { /* WSTRB */
1618 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1619 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1623 tcg_temp_free_i32(tmp);
1625 tcg_temp_free_i32(addr);
1626 return 0;
1629 if ((insn & 0x0f000000) != 0x0e000000)
1630 return 1;
1632 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1633 case 0x000: /* WOR */
1634 wrd = (insn >> 12) & 0xf;
1635 rd0 = (insn >> 0) & 0xf;
1636 rd1 = (insn >> 16) & 0xf;
1637 gen_op_iwmmxt_movq_M0_wRn(rd0);
1638 gen_op_iwmmxt_orq_M0_wRn(rd1);
1639 gen_op_iwmmxt_setpsr_nz();
1640 gen_op_iwmmxt_movq_wRn_M0(wrd);
1641 gen_op_iwmmxt_set_mup();
1642 gen_op_iwmmxt_set_cup();
1643 break;
1644 case 0x011: /* TMCR */
1645 if (insn & 0xf)
1646 return 1;
1647 rd = (insn >> 12) & 0xf;
1648 wrd = (insn >> 16) & 0xf;
1649 switch (wrd) {
1650 case ARM_IWMMXT_wCID:
1651 case ARM_IWMMXT_wCASF:
1652 break;
1653 case ARM_IWMMXT_wCon:
1654 gen_op_iwmmxt_set_cup();
1655 /* Fall through. */
1656 case ARM_IWMMXT_wCSSF:
1657 tmp = iwmmxt_load_creg(wrd);
1658 tmp2 = load_reg(s, rd);
1659 tcg_gen_andc_i32(tmp, tmp, tmp2);
1660 tcg_temp_free_i32(tmp2);
1661 iwmmxt_store_creg(wrd, tmp);
1662 break;
1663 case ARM_IWMMXT_wCGR0:
1664 case ARM_IWMMXT_wCGR1:
1665 case ARM_IWMMXT_wCGR2:
1666 case ARM_IWMMXT_wCGR3:
1667 gen_op_iwmmxt_set_cup();
1668 tmp = load_reg(s, rd);
1669 iwmmxt_store_creg(wrd, tmp);
1670 break;
1671 default:
1672 return 1;
1674 break;
1675 case 0x100: /* WXOR */
1676 wrd = (insn >> 12) & 0xf;
1677 rd0 = (insn >> 0) & 0xf;
1678 rd1 = (insn >> 16) & 0xf;
1679 gen_op_iwmmxt_movq_M0_wRn(rd0);
1680 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1681 gen_op_iwmmxt_setpsr_nz();
1682 gen_op_iwmmxt_movq_wRn_M0(wrd);
1683 gen_op_iwmmxt_set_mup();
1684 gen_op_iwmmxt_set_cup();
1685 break;
1686 case 0x111: /* TMRC */
1687 if (insn & 0xf)
1688 return 1;
1689 rd = (insn >> 12) & 0xf;
1690 wrd = (insn >> 16) & 0xf;
1691 tmp = iwmmxt_load_creg(wrd);
1692 store_reg(s, rd, tmp);
1693 break;
1694 case 0x300: /* WANDN */
1695 wrd = (insn >> 12) & 0xf;
1696 rd0 = (insn >> 0) & 0xf;
1697 rd1 = (insn >> 16) & 0xf;
1698 gen_op_iwmmxt_movq_M0_wRn(rd0);
1699 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1700 gen_op_iwmmxt_andq_M0_wRn(rd1);
1701 gen_op_iwmmxt_setpsr_nz();
1702 gen_op_iwmmxt_movq_wRn_M0(wrd);
1703 gen_op_iwmmxt_set_mup();
1704 gen_op_iwmmxt_set_cup();
1705 break;
1706 case 0x200: /* WAND */
1707 wrd = (insn >> 12) & 0xf;
1708 rd0 = (insn >> 0) & 0xf;
1709 rd1 = (insn >> 16) & 0xf;
1710 gen_op_iwmmxt_movq_M0_wRn(rd0);
1711 gen_op_iwmmxt_andq_M0_wRn(rd1);
1712 gen_op_iwmmxt_setpsr_nz();
1713 gen_op_iwmmxt_movq_wRn_M0(wrd);
1714 gen_op_iwmmxt_set_mup();
1715 gen_op_iwmmxt_set_cup();
1716 break;
1717 case 0x810: case 0xa10: /* WMADD */
1718 wrd = (insn >> 12) & 0xf;
1719 rd0 = (insn >> 0) & 0xf;
1720 rd1 = (insn >> 16) & 0xf;
1721 gen_op_iwmmxt_movq_M0_wRn(rd0);
1722 if (insn & (1 << 21))
1723 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1724 else
1725 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1726 gen_op_iwmmxt_movq_wRn_M0(wrd);
1727 gen_op_iwmmxt_set_mup();
1728 break;
1729 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1730 wrd = (insn >> 12) & 0xf;
1731 rd0 = (insn >> 16) & 0xf;
1732 rd1 = (insn >> 0) & 0xf;
1733 gen_op_iwmmxt_movq_M0_wRn(rd0);
1734 switch ((insn >> 22) & 3) {
1735 case 0:
1736 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1737 break;
1738 case 1:
1739 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1740 break;
1741 case 2:
1742 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1743 break;
1744 case 3:
1745 return 1;
1747 gen_op_iwmmxt_movq_wRn_M0(wrd);
1748 gen_op_iwmmxt_set_mup();
1749 gen_op_iwmmxt_set_cup();
1750 break;
1751 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1752 wrd = (insn >> 12) & 0xf;
1753 rd0 = (insn >> 16) & 0xf;
1754 rd1 = (insn >> 0) & 0xf;
1755 gen_op_iwmmxt_movq_M0_wRn(rd0);
1756 switch ((insn >> 22) & 3) {
1757 case 0:
1758 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1759 break;
1760 case 1:
1761 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1762 break;
1763 case 2:
1764 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1765 break;
1766 case 3:
1767 return 1;
1769 gen_op_iwmmxt_movq_wRn_M0(wrd);
1770 gen_op_iwmmxt_set_mup();
1771 gen_op_iwmmxt_set_cup();
1772 break;
1773 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1774 wrd = (insn >> 12) & 0xf;
1775 rd0 = (insn >> 16) & 0xf;
1776 rd1 = (insn >> 0) & 0xf;
1777 gen_op_iwmmxt_movq_M0_wRn(rd0);
1778 if (insn & (1 << 22))
1779 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1780 else
1781 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1782 if (!(insn & (1 << 20)))
1783 gen_op_iwmmxt_addl_M0_wRn(wrd);
1784 gen_op_iwmmxt_movq_wRn_M0(wrd);
1785 gen_op_iwmmxt_set_mup();
1786 break;
1787 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1788 wrd = (insn >> 12) & 0xf;
1789 rd0 = (insn >> 16) & 0xf;
1790 rd1 = (insn >> 0) & 0xf;
1791 gen_op_iwmmxt_movq_M0_wRn(rd0);
1792 if (insn & (1 << 21)) {
1793 if (insn & (1 << 20))
1794 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1795 else
1796 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1797 } else {
1798 if (insn & (1 << 20))
1799 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1800 else
1801 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1803 gen_op_iwmmxt_movq_wRn_M0(wrd);
1804 gen_op_iwmmxt_set_mup();
1805 break;
1806 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1807 wrd = (insn >> 12) & 0xf;
1808 rd0 = (insn >> 16) & 0xf;
1809 rd1 = (insn >> 0) & 0xf;
1810 gen_op_iwmmxt_movq_M0_wRn(rd0);
1811 if (insn & (1 << 21))
1812 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1813 else
1814 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1815 if (!(insn & (1 << 20))) {
1816 iwmmxt_load_reg(cpu_V1, wrd);
1817 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1819 gen_op_iwmmxt_movq_wRn_M0(wrd);
1820 gen_op_iwmmxt_set_mup();
1821 break;
1822 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1823 wrd = (insn >> 12) & 0xf;
1824 rd0 = (insn >> 16) & 0xf;
1825 rd1 = (insn >> 0) & 0xf;
1826 gen_op_iwmmxt_movq_M0_wRn(rd0);
1827 switch ((insn >> 22) & 3) {
1828 case 0:
1829 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1830 break;
1831 case 1:
1832 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1833 break;
1834 case 2:
1835 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1836 break;
1837 case 3:
1838 return 1;
1840 gen_op_iwmmxt_movq_wRn_M0(wrd);
1841 gen_op_iwmmxt_set_mup();
1842 gen_op_iwmmxt_set_cup();
1843 break;
1844 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1845 wrd = (insn >> 12) & 0xf;
1846 rd0 = (insn >> 16) & 0xf;
1847 rd1 = (insn >> 0) & 0xf;
1848 gen_op_iwmmxt_movq_M0_wRn(rd0);
1849 if (insn & (1 << 22)) {
1850 if (insn & (1 << 20))
1851 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1852 else
1853 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1854 } else {
1855 if (insn & (1 << 20))
1856 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1857 else
1858 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1860 gen_op_iwmmxt_movq_wRn_M0(wrd);
1861 gen_op_iwmmxt_set_mup();
1862 gen_op_iwmmxt_set_cup();
1863 break;
1864 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1865 wrd = (insn >> 12) & 0xf;
1866 rd0 = (insn >> 16) & 0xf;
1867 rd1 = (insn >> 0) & 0xf;
1868 gen_op_iwmmxt_movq_M0_wRn(rd0);
1869 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1870 tcg_gen_andi_i32(tmp, tmp, 7);
1871 iwmmxt_load_reg(cpu_V1, rd1);
1872 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1873 tcg_temp_free_i32(tmp);
1874 gen_op_iwmmxt_movq_wRn_M0(wrd);
1875 gen_op_iwmmxt_set_mup();
1876 break;
1877 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1878 if (((insn >> 6) & 3) == 3)
1879 return 1;
1880 rd = (insn >> 12) & 0xf;
1881 wrd = (insn >> 16) & 0xf;
1882 tmp = load_reg(s, rd);
1883 gen_op_iwmmxt_movq_M0_wRn(wrd);
1884 switch ((insn >> 6) & 3) {
1885 case 0:
1886 tmp2 = tcg_const_i32(0xff);
1887 tmp3 = tcg_const_i32((insn & 7) << 3);
1888 break;
1889 case 1:
1890 tmp2 = tcg_const_i32(0xffff);
1891 tmp3 = tcg_const_i32((insn & 3) << 4);
1892 break;
1893 case 2:
1894 tmp2 = tcg_const_i32(0xffffffff);
1895 tmp3 = tcg_const_i32((insn & 1) << 5);
1896 break;
1897 default:
1898 tmp2 = NULL;
1899 tmp3 = NULL;
1901 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1902 tcg_temp_free_i32(tmp3);
1903 tcg_temp_free_i32(tmp2);
1904 tcg_temp_free_i32(tmp);
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 break;
1908 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1909 rd = (insn >> 12) & 0xf;
1910 wrd = (insn >> 16) & 0xf;
1911 if (rd == 15 || ((insn >> 22) & 3) == 3)
1912 return 1;
1913 gen_op_iwmmxt_movq_M0_wRn(wrd);
1914 tmp = tcg_temp_new_i32();
1915 switch ((insn >> 22) & 3) {
1916 case 0:
1917 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1918 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1919 if (insn & 8) {
1920 tcg_gen_ext8s_i32(tmp, tmp);
1921 } else {
1922 tcg_gen_andi_i32(tmp, tmp, 0xff);
1924 break;
1925 case 1:
1926 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1927 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1928 if (insn & 8) {
1929 tcg_gen_ext16s_i32(tmp, tmp);
1930 } else {
1931 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1933 break;
1934 case 2:
1935 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1936 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1937 break;
1939 store_reg(s, rd, tmp);
1940 break;
1941 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1942 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1943 return 1;
1944 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1945 switch ((insn >> 22) & 3) {
1946 case 0:
1947 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1948 break;
1949 case 1:
1950 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1951 break;
1952 case 2:
1953 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1954 break;
1956 tcg_gen_shli_i32(tmp, tmp, 28);
1957 gen_set_nzcv(tmp);
1958 tcg_temp_free_i32(tmp);
1959 break;
1960 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1961 if (((insn >> 6) & 3) == 3)
1962 return 1;
1963 rd = (insn >> 12) & 0xf;
1964 wrd = (insn >> 16) & 0xf;
1965 tmp = load_reg(s, rd);
1966 switch ((insn >> 6) & 3) {
1967 case 0:
1968 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1969 break;
1970 case 1:
1971 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1972 break;
1973 case 2:
1974 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1975 break;
1977 tcg_temp_free_i32(tmp);
1978 gen_op_iwmmxt_movq_wRn_M0(wrd);
1979 gen_op_iwmmxt_set_mup();
1980 break;
1981 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1982 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1983 return 1;
1984 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1985 tmp2 = tcg_temp_new_i32();
1986 tcg_gen_mov_i32(tmp2, tmp);
1987 switch ((insn >> 22) & 3) {
1988 case 0:
1989 for (i = 0; i < 7; i ++) {
1990 tcg_gen_shli_i32(tmp2, tmp2, 4);
1991 tcg_gen_and_i32(tmp, tmp, tmp2);
1993 break;
1994 case 1:
1995 for (i = 0; i < 3; i ++) {
1996 tcg_gen_shli_i32(tmp2, tmp2, 8);
1997 tcg_gen_and_i32(tmp, tmp, tmp2);
1999 break;
2000 case 2:
2001 tcg_gen_shli_i32(tmp2, tmp2, 16);
2002 tcg_gen_and_i32(tmp, tmp, tmp2);
2003 break;
2005 gen_set_nzcv(tmp);
2006 tcg_temp_free_i32(tmp2);
2007 tcg_temp_free_i32(tmp);
2008 break;
2009 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2010 wrd = (insn >> 12) & 0xf;
2011 rd0 = (insn >> 16) & 0xf;
2012 gen_op_iwmmxt_movq_M0_wRn(rd0);
2013 switch ((insn >> 22) & 3) {
2014 case 0:
2015 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2016 break;
2017 case 1:
2018 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2019 break;
2020 case 2:
2021 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2022 break;
2023 case 3:
2024 return 1;
2026 gen_op_iwmmxt_movq_wRn_M0(wrd);
2027 gen_op_iwmmxt_set_mup();
2028 break;
2029 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2030 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2031 return 1;
2032 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2033 tmp2 = tcg_temp_new_i32();
2034 tcg_gen_mov_i32(tmp2, tmp);
2035 switch ((insn >> 22) & 3) {
2036 case 0:
2037 for (i = 0; i < 7; i ++) {
2038 tcg_gen_shli_i32(tmp2, tmp2, 4);
2039 tcg_gen_or_i32(tmp, tmp, tmp2);
2041 break;
2042 case 1:
2043 for (i = 0; i < 3; i ++) {
2044 tcg_gen_shli_i32(tmp2, tmp2, 8);
2045 tcg_gen_or_i32(tmp, tmp, tmp2);
2047 break;
2048 case 2:
2049 tcg_gen_shli_i32(tmp2, tmp2, 16);
2050 tcg_gen_or_i32(tmp, tmp, tmp2);
2051 break;
2053 gen_set_nzcv(tmp);
2054 tcg_temp_free_i32(tmp2);
2055 tcg_temp_free_i32(tmp);
2056 break;
2057 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2058 rd = (insn >> 12) & 0xf;
2059 rd0 = (insn >> 16) & 0xf;
2060 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2061 return 1;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0);
2063 tmp = tcg_temp_new_i32();
2064 switch ((insn >> 22) & 3) {
2065 case 0:
2066 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2067 break;
2068 case 1:
2069 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2070 break;
2071 case 2:
2072 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2073 break;
2075 store_reg(s, rd, tmp);
2076 break;
2077 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2078 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2079 wrd = (insn >> 12) & 0xf;
2080 rd0 = (insn >> 16) & 0xf;
2081 rd1 = (insn >> 0) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0);
2083 switch ((insn >> 22) & 3) {
2084 case 0:
2085 if (insn & (1 << 21))
2086 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2087 else
2088 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2089 break;
2090 case 1:
2091 if (insn & (1 << 21))
2092 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2093 else
2094 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2095 break;
2096 case 2:
2097 if (insn & (1 << 21))
2098 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2099 else
2100 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2101 break;
2102 case 3:
2103 return 1;
2105 gen_op_iwmmxt_movq_wRn_M0(wrd);
2106 gen_op_iwmmxt_set_mup();
2107 gen_op_iwmmxt_set_cup();
2108 break;
2109 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2110 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2111 wrd = (insn >> 12) & 0xf;
2112 rd0 = (insn >> 16) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0);
2114 switch ((insn >> 22) & 3) {
2115 case 0:
2116 if (insn & (1 << 21))
2117 gen_op_iwmmxt_unpacklsb_M0();
2118 else
2119 gen_op_iwmmxt_unpacklub_M0();
2120 break;
2121 case 1:
2122 if (insn & (1 << 21))
2123 gen_op_iwmmxt_unpacklsw_M0();
2124 else
2125 gen_op_iwmmxt_unpackluw_M0();
2126 break;
2127 case 2:
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_unpacklsl_M0();
2130 else
2131 gen_op_iwmmxt_unpacklul_M0();
2132 break;
2133 case 3:
2134 return 1;
2136 gen_op_iwmmxt_movq_wRn_M0(wrd);
2137 gen_op_iwmmxt_set_mup();
2138 gen_op_iwmmxt_set_cup();
2139 break;
2140 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2141 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2142 wrd = (insn >> 12) & 0xf;
2143 rd0 = (insn >> 16) & 0xf;
2144 gen_op_iwmmxt_movq_M0_wRn(rd0);
2145 switch ((insn >> 22) & 3) {
2146 case 0:
2147 if (insn & (1 << 21))
2148 gen_op_iwmmxt_unpackhsb_M0();
2149 else
2150 gen_op_iwmmxt_unpackhub_M0();
2151 break;
2152 case 1:
2153 if (insn & (1 << 21))
2154 gen_op_iwmmxt_unpackhsw_M0();
2155 else
2156 gen_op_iwmmxt_unpackhuw_M0();
2157 break;
2158 case 2:
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_unpackhsl_M0();
2161 else
2162 gen_op_iwmmxt_unpackhul_M0();
2163 break;
2164 case 3:
2165 return 1;
2167 gen_op_iwmmxt_movq_wRn_M0(wrd);
2168 gen_op_iwmmxt_set_mup();
2169 gen_op_iwmmxt_set_cup();
2170 break;
2171 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2172 case 0x214: case 0x614: case 0xa14: case 0xe14:
2173 if (((insn >> 22) & 3) == 0)
2174 return 1;
2175 wrd = (insn >> 12) & 0xf;
2176 rd0 = (insn >> 16) & 0xf;
2177 gen_op_iwmmxt_movq_M0_wRn(rd0);
2178 tmp = tcg_temp_new_i32();
2179 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2180 tcg_temp_free_i32(tmp);
2181 return 1;
2183 switch ((insn >> 22) & 3) {
2184 case 1:
2185 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2186 break;
2187 case 2:
2188 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2189 break;
2190 case 3:
2191 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2192 break;
2194 tcg_temp_free_i32(tmp);
2195 gen_op_iwmmxt_movq_wRn_M0(wrd);
2196 gen_op_iwmmxt_set_mup();
2197 gen_op_iwmmxt_set_cup();
2198 break;
2199 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2200 case 0x014: case 0x414: case 0x814: case 0xc14:
2201 if (((insn >> 22) & 3) == 0)
2202 return 1;
2203 wrd = (insn >> 12) & 0xf;
2204 rd0 = (insn >> 16) & 0xf;
2205 gen_op_iwmmxt_movq_M0_wRn(rd0);
2206 tmp = tcg_temp_new_i32();
2207 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2208 tcg_temp_free_i32(tmp);
2209 return 1;
2211 switch ((insn >> 22) & 3) {
2212 case 1:
2213 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2214 break;
2215 case 2:
2216 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2217 break;
2218 case 3:
2219 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2220 break;
2222 tcg_temp_free_i32(tmp);
2223 gen_op_iwmmxt_movq_wRn_M0(wrd);
2224 gen_op_iwmmxt_set_mup();
2225 gen_op_iwmmxt_set_cup();
2226 break;
2227 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2228 case 0x114: case 0x514: case 0x914: case 0xd14:
2229 if (((insn >> 22) & 3) == 0)
2230 return 1;
2231 wrd = (insn >> 12) & 0xf;
2232 rd0 = (insn >> 16) & 0xf;
2233 gen_op_iwmmxt_movq_M0_wRn(rd0);
2234 tmp = tcg_temp_new_i32();
2235 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2236 tcg_temp_free_i32(tmp);
2237 return 1;
2239 switch ((insn >> 22) & 3) {
2240 case 1:
2241 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2242 break;
2243 case 2:
2244 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2245 break;
2246 case 3:
2247 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2248 break;
2250 tcg_temp_free_i32(tmp);
2251 gen_op_iwmmxt_movq_wRn_M0(wrd);
2252 gen_op_iwmmxt_set_mup();
2253 gen_op_iwmmxt_set_cup();
2254 break;
2255 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2256 case 0x314: case 0x714: case 0xb14: case 0xf14:
2257 if (((insn >> 22) & 3) == 0)
2258 return 1;
2259 wrd = (insn >> 12) & 0xf;
2260 rd0 = (insn >> 16) & 0xf;
2261 gen_op_iwmmxt_movq_M0_wRn(rd0);
2262 tmp = tcg_temp_new_i32();
2263 switch ((insn >> 22) & 3) {
2264 case 1:
2265 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2266 tcg_temp_free_i32(tmp);
2267 return 1;
2269 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2270 break;
2271 case 2:
2272 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2273 tcg_temp_free_i32(tmp);
2274 return 1;
2276 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2277 break;
2278 case 3:
2279 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2280 tcg_temp_free_i32(tmp);
2281 return 1;
2283 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2284 break;
2286 tcg_temp_free_i32(tmp);
2287 gen_op_iwmmxt_movq_wRn_M0(wrd);
2288 gen_op_iwmmxt_set_mup();
2289 gen_op_iwmmxt_set_cup();
2290 break;
2291 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2292 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2293 wrd = (insn >> 12) & 0xf;
2294 rd0 = (insn >> 16) & 0xf;
2295 rd1 = (insn >> 0) & 0xf;
2296 gen_op_iwmmxt_movq_M0_wRn(rd0);
2297 switch ((insn >> 22) & 3) {
2298 case 0:
2299 if (insn & (1 << 21))
2300 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2301 else
2302 gen_op_iwmmxt_minub_M0_wRn(rd1);
2303 break;
2304 case 1:
2305 if (insn & (1 << 21))
2306 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2307 else
2308 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2309 break;
2310 case 2:
2311 if (insn & (1 << 21))
2312 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2313 else
2314 gen_op_iwmmxt_minul_M0_wRn(rd1);
2315 break;
2316 case 3:
2317 return 1;
2319 gen_op_iwmmxt_movq_wRn_M0(wrd);
2320 gen_op_iwmmxt_set_mup();
2321 break;
2322 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2323 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2324 wrd = (insn >> 12) & 0xf;
2325 rd0 = (insn >> 16) & 0xf;
2326 rd1 = (insn >> 0) & 0xf;
2327 gen_op_iwmmxt_movq_M0_wRn(rd0);
2328 switch ((insn >> 22) & 3) {
2329 case 0:
2330 if (insn & (1 << 21))
2331 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2332 else
2333 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2334 break;
2335 case 1:
2336 if (insn & (1 << 21))
2337 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2338 else
2339 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2340 break;
2341 case 2:
2342 if (insn & (1 << 21))
2343 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2344 else
2345 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2346 break;
2347 case 3:
2348 return 1;
2350 gen_op_iwmmxt_movq_wRn_M0(wrd);
2351 gen_op_iwmmxt_set_mup();
2352 break;
2353 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2354 case 0x402: case 0x502: case 0x602: case 0x702:
2355 wrd = (insn >> 12) & 0xf;
2356 rd0 = (insn >> 16) & 0xf;
2357 rd1 = (insn >> 0) & 0xf;
2358 gen_op_iwmmxt_movq_M0_wRn(rd0);
2359 tmp = tcg_const_i32((insn >> 20) & 3);
2360 iwmmxt_load_reg(cpu_V1, rd1);
2361 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2362 tcg_temp_free_i32(tmp);
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 break;
2366 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2367 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2368 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2369 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2370 wrd = (insn >> 12) & 0xf;
2371 rd0 = (insn >> 16) & 0xf;
2372 rd1 = (insn >> 0) & 0xf;
2373 gen_op_iwmmxt_movq_M0_wRn(rd0);
2374 switch ((insn >> 20) & 0xf) {
2375 case 0x0:
2376 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2377 break;
2378 case 0x1:
2379 gen_op_iwmmxt_subub_M0_wRn(rd1);
2380 break;
2381 case 0x3:
2382 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2383 break;
2384 case 0x4:
2385 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2386 break;
2387 case 0x5:
2388 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2389 break;
2390 case 0x7:
2391 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2392 break;
2393 case 0x8:
2394 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2395 break;
2396 case 0x9:
2397 gen_op_iwmmxt_subul_M0_wRn(rd1);
2398 break;
2399 case 0xb:
2400 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2401 break;
2402 default:
2403 return 1;
2405 gen_op_iwmmxt_movq_wRn_M0(wrd);
2406 gen_op_iwmmxt_set_mup();
2407 gen_op_iwmmxt_set_cup();
2408 break;
2409 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2410 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2411 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2412 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2413 wrd = (insn >> 12) & 0xf;
2414 rd0 = (insn >> 16) & 0xf;
2415 gen_op_iwmmxt_movq_M0_wRn(rd0);
2416 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2417 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2418 tcg_temp_free_i32(tmp);
2419 gen_op_iwmmxt_movq_wRn_M0(wrd);
2420 gen_op_iwmmxt_set_mup();
2421 gen_op_iwmmxt_set_cup();
2422 break;
2423 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2424 case 0x418: case 0x518: case 0x618: case 0x718:
2425 case 0x818: case 0x918: case 0xa18: case 0xb18:
2426 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2427 wrd = (insn >> 12) & 0xf;
2428 rd0 = (insn >> 16) & 0xf;
2429 rd1 = (insn >> 0) & 0xf;
2430 gen_op_iwmmxt_movq_M0_wRn(rd0);
2431 switch ((insn >> 20) & 0xf) {
2432 case 0x0:
2433 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2434 break;
2435 case 0x1:
2436 gen_op_iwmmxt_addub_M0_wRn(rd1);
2437 break;
2438 case 0x3:
2439 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2440 break;
2441 case 0x4:
2442 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2443 break;
2444 case 0x5:
2445 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2446 break;
2447 case 0x7:
2448 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2449 break;
2450 case 0x8:
2451 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2452 break;
2453 case 0x9:
2454 gen_op_iwmmxt_addul_M0_wRn(rd1);
2455 break;
2456 case 0xb:
2457 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2458 break;
2459 default:
2460 return 1;
2462 gen_op_iwmmxt_movq_wRn_M0(wrd);
2463 gen_op_iwmmxt_set_mup();
2464 gen_op_iwmmxt_set_cup();
2465 break;
2466 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2467 case 0x408: case 0x508: case 0x608: case 0x708:
2468 case 0x808: case 0x908: case 0xa08: case 0xb08:
2469 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2470 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2471 return 1;
2472 wrd = (insn >> 12) & 0xf;
2473 rd0 = (insn >> 16) & 0xf;
2474 rd1 = (insn >> 0) & 0xf;
2475 gen_op_iwmmxt_movq_M0_wRn(rd0);
2476 switch ((insn >> 22) & 3) {
2477 case 1:
2478 if (insn & (1 << 21))
2479 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2480 else
2481 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2482 break;
2483 case 2:
2484 if (insn & (1 << 21))
2485 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2486 else
2487 gen_op_iwmmxt_packul_M0_wRn(rd1);
2488 break;
2489 case 3:
2490 if (insn & (1 << 21))
2491 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2492 else
2493 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2494 break;
2496 gen_op_iwmmxt_movq_wRn_M0(wrd);
2497 gen_op_iwmmxt_set_mup();
2498 gen_op_iwmmxt_set_cup();
2499 break;
2500 case 0x201: case 0x203: case 0x205: case 0x207:
2501 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2502 case 0x211: case 0x213: case 0x215: case 0x217:
2503 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2504 wrd = (insn >> 5) & 0xf;
2505 rd0 = (insn >> 12) & 0xf;
2506 rd1 = (insn >> 0) & 0xf;
2507 if (rd0 == 0xf || rd1 == 0xf)
2508 return 1;
2509 gen_op_iwmmxt_movq_M0_wRn(wrd);
2510 tmp = load_reg(s, rd0);
2511 tmp2 = load_reg(s, rd1);
2512 switch ((insn >> 16) & 0xf) {
2513 case 0x0: /* TMIA */
2514 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2515 break;
2516 case 0x8: /* TMIAPH */
2517 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2518 break;
2519 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2520 if (insn & (1 << 16))
2521 tcg_gen_shri_i32(tmp, tmp, 16);
2522 if (insn & (1 << 17))
2523 tcg_gen_shri_i32(tmp2, tmp2, 16);
2524 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2525 break;
2526 default:
2527 tcg_temp_free_i32(tmp2);
2528 tcg_temp_free_i32(tmp);
2529 return 1;
2531 tcg_temp_free_i32(tmp2);
2532 tcg_temp_free_i32(tmp);
2533 gen_op_iwmmxt_movq_wRn_M0(wrd);
2534 gen_op_iwmmxt_set_mup();
2535 break;
2536 default:
2537 return 1;
2540 return 0;
2543 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2544 (ie. an undefined instruction). */
2545 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2547 int acc, rd0, rd1, rdhi, rdlo;
2548 TCGv_i32 tmp, tmp2;
2550 if ((insn & 0x0ff00f10) == 0x0e200010) {
2551 /* Multiply with Internal Accumulate Format */
2552 rd0 = (insn >> 12) & 0xf;
2553 rd1 = insn & 0xf;
2554 acc = (insn >> 5) & 7;
2556 if (acc != 0)
2557 return 1;
2559 tmp = load_reg(s, rd0);
2560 tmp2 = load_reg(s, rd1);
2561 switch ((insn >> 16) & 0xf) {
2562 case 0x0: /* MIA */
2563 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2564 break;
2565 case 0x8: /* MIAPH */
2566 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2567 break;
2568 case 0xc: /* MIABB */
2569 case 0xd: /* MIABT */
2570 case 0xe: /* MIATB */
2571 case 0xf: /* MIATT */
2572 if (insn & (1 << 16))
2573 tcg_gen_shri_i32(tmp, tmp, 16);
2574 if (insn & (1 << 17))
2575 tcg_gen_shri_i32(tmp2, tmp2, 16);
2576 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2577 break;
2578 default:
2579 return 1;
2581 tcg_temp_free_i32(tmp2);
2582 tcg_temp_free_i32(tmp);
2584 gen_op_iwmmxt_movq_wRn_M0(acc);
2585 return 0;
2588 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2589 /* Internal Accumulator Access Format */
2590 rdhi = (insn >> 16) & 0xf;
2591 rdlo = (insn >> 12) & 0xf;
2592 acc = insn & 7;
2594 if (acc != 0)
2595 return 1;
2597 if (insn & ARM_CP_RW_BIT) { /* MRA */
2598 iwmmxt_load_reg(cpu_V0, acc);
2599 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2600 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
2601 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2602 } else { /* MAR */
2603 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2604 iwmmxt_store_reg(cpu_V0, acc);
2606 return 0;
2609 return 1;
2612 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2613 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2614 if (dc_isar_feature(aa32_simd_r32, s)) { \
2615 reg = (((insn) >> (bigbit)) & 0x0f) \
2616 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2617 } else { \
2618 if (insn & (1 << (smallbit))) \
2619 return 1; \
2620 reg = ((insn) >> (bigbit)) & 0x0f; \
2621 }} while (0)
2623 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2624 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2625 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2627 static void gen_neon_dup_low16(TCGv_i32 var)
2629 TCGv_i32 tmp = tcg_temp_new_i32();
2630 tcg_gen_ext16u_i32(var, var);
2631 tcg_gen_shli_i32(tmp, var, 16);
2632 tcg_gen_or_i32(var, var, tmp);
2633 tcg_temp_free_i32(tmp);
2636 static void gen_neon_dup_high16(TCGv_i32 var)
2638 TCGv_i32 tmp = tcg_temp_new_i32();
2639 tcg_gen_andi_i32(var, var, 0xffff0000);
2640 tcg_gen_shri_i32(tmp, var, 16);
2641 tcg_gen_or_i32(var, var, tmp);
2642 tcg_temp_free_i32(tmp);
2645 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
2647 #ifndef CONFIG_USER_ONLY
2648 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
2649 ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
2650 #else
2651 return true;
2652 #endif
2655 static void gen_goto_ptr(void)
2657 tcg_gen_lookup_and_goto_ptr();
2660 /* This will end the TB but doesn't guarantee we'll return to
2661 * cpu_loop_exec. Any live exit_requests will be processed as we
2662 * enter the next TB.
2664 static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
2666 if (use_goto_tb(s, dest)) {
2667 tcg_gen_goto_tb(n);
2668 gen_set_pc_im(s, dest);
2669 tcg_gen_exit_tb(s->base.tb, n);
2670 } else {
2671 gen_set_pc_im(s, dest);
2672 gen_goto_ptr();
2674 s->base.is_jmp = DISAS_NORETURN;
2677 static inline void gen_jmp (DisasContext *s, uint32_t dest)
2679 if (unlikely(is_singlestepping(s))) {
2680 /* An indirect jump so that we still trigger the debug exception. */
2681 gen_set_pc_im(s, dest);
2682 s->base.is_jmp = DISAS_JUMP;
2683 } else {
2684 gen_goto_tb(s, 0, dest);
2688 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
2690 if (x)
2691 tcg_gen_sari_i32(t0, t0, 16);
2692 else
2693 gen_sxth(t0);
2694 if (y)
2695 tcg_gen_sari_i32(t1, t1, 16);
2696 else
2697 gen_sxth(t1);
2698 tcg_gen_mul_i32(t0, t0, t1);
2701 /* Return the mask of PSR bits set by a MSR instruction. */
2702 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2704 uint32_t mask = 0;
2706 if (flags & (1 << 0)) {
2707 mask |= 0xff;
2709 if (flags & (1 << 1)) {
2710 mask |= 0xff00;
2712 if (flags & (1 << 2)) {
2713 mask |= 0xff0000;
2715 if (flags & (1 << 3)) {
2716 mask |= 0xff000000;
2719 /* Mask out undefined and reserved bits. */
2720 mask &= aarch32_cpsr_valid_mask(s->features, s->isar);
2722 /* Mask out execution state. */
2723 if (!spsr) {
2724 mask &= ~CPSR_EXEC;
2727 /* Mask out privileged bits. */
2728 if (IS_USER(s)) {
2729 mask &= CPSR_USER;
2731 return mask;
2734 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
2735 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
2737 TCGv_i32 tmp;
2738 if (spsr) {
2739 /* ??? This is also undefined in system mode. */
2740 if (IS_USER(s))
2741 return 1;
2743 tmp = load_cpu_field(spsr);
2744 tcg_gen_andi_i32(tmp, tmp, ~mask);
2745 tcg_gen_andi_i32(t0, t0, mask);
2746 tcg_gen_or_i32(tmp, tmp, t0);
2747 store_cpu_field(tmp, spsr);
2748 } else {
2749 gen_set_cpsr(t0, mask);
2751 tcg_temp_free_i32(t0);
2752 gen_lookup_tb(s);
2753 return 0;
2756 /* Returns nonzero if access to the PSR is not permitted. */
2757 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
2759 TCGv_i32 tmp;
2760 tmp = tcg_temp_new_i32();
2761 tcg_gen_movi_i32(tmp, val);
2762 return gen_set_psr(s, mask, spsr, tmp);
2765 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
2766 int *tgtmode, int *regno)
2768 /* Decode the r and sysm fields of MSR/MRS banked accesses into
2769 * the target mode and register number, and identify the various
2770 * unpredictable cases.
2771 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
2772 * + executed in user mode
2773 * + using R15 as the src/dest register
2774 * + accessing an unimplemented register
2775 * + accessing a register that's inaccessible at current PL/security state*
2776 * + accessing a register that you could access with a different insn
2777 * We choose to UNDEF in all these cases.
2778 * Since we don't know which of the various AArch32 modes we are in
2779 * we have to defer some checks to runtime.
2780 * Accesses to Monitor mode registers from Secure EL1 (which implies
2781 * that EL3 is AArch64) must trap to EL3.
2783 * If the access checks fail this function will emit code to take
2784 * an exception and return false. Otherwise it will return true,
2785 * and set *tgtmode and *regno appropriately.
2787 int exc_target = default_exception_el(s);
2789 /* These instructions are present only in ARMv8, or in ARMv7 with the
2790 * Virtualization Extensions.
2792 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
2793 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
2794 goto undef;
2797 if (IS_USER(s) || rn == 15) {
2798 goto undef;
2801 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
2802 * of registers into (r, sysm).
2804 if (r) {
2805 /* SPSRs for other modes */
2806 switch (sysm) {
2807 case 0xe: /* SPSR_fiq */
2808 *tgtmode = ARM_CPU_MODE_FIQ;
2809 break;
2810 case 0x10: /* SPSR_irq */
2811 *tgtmode = ARM_CPU_MODE_IRQ;
2812 break;
2813 case 0x12: /* SPSR_svc */
2814 *tgtmode = ARM_CPU_MODE_SVC;
2815 break;
2816 case 0x14: /* SPSR_abt */
2817 *tgtmode = ARM_CPU_MODE_ABT;
2818 break;
2819 case 0x16: /* SPSR_und */
2820 *tgtmode = ARM_CPU_MODE_UND;
2821 break;
2822 case 0x1c: /* SPSR_mon */
2823 *tgtmode = ARM_CPU_MODE_MON;
2824 break;
2825 case 0x1e: /* SPSR_hyp */
2826 *tgtmode = ARM_CPU_MODE_HYP;
2827 break;
2828 default: /* unallocated */
2829 goto undef;
2831 /* We arbitrarily assign SPSR a register number of 16. */
2832 *regno = 16;
2833 } else {
2834 /* general purpose registers for other modes */
2835 switch (sysm) {
2836 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
2837 *tgtmode = ARM_CPU_MODE_USR;
2838 *regno = sysm + 8;
2839 break;
2840 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
2841 *tgtmode = ARM_CPU_MODE_FIQ;
2842 *regno = sysm;
2843 break;
2844 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
2845 *tgtmode = ARM_CPU_MODE_IRQ;
2846 *regno = sysm & 1 ? 13 : 14;
2847 break;
2848 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
2849 *tgtmode = ARM_CPU_MODE_SVC;
2850 *regno = sysm & 1 ? 13 : 14;
2851 break;
2852 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
2853 *tgtmode = ARM_CPU_MODE_ABT;
2854 *regno = sysm & 1 ? 13 : 14;
2855 break;
2856 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
2857 *tgtmode = ARM_CPU_MODE_UND;
2858 *regno = sysm & 1 ? 13 : 14;
2859 break;
2860 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
2861 *tgtmode = ARM_CPU_MODE_MON;
2862 *regno = sysm & 1 ? 13 : 14;
2863 break;
2864 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
2865 *tgtmode = ARM_CPU_MODE_HYP;
2866 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
2867 *regno = sysm & 1 ? 13 : 17;
2868 break;
2869 default: /* unallocated */
2870 goto undef;
2874 /* Catch the 'accessing inaccessible register' cases we can detect
2875 * at translate time.
2877 switch (*tgtmode) {
2878 case ARM_CPU_MODE_MON:
2879 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
2880 goto undef;
2882 if (s->current_el == 1) {
2883 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
2884 * then accesses to Mon registers trap to EL3
2886 exc_target = 3;
2887 goto undef;
2889 break;
2890 case ARM_CPU_MODE_HYP:
2892 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
2893 * (and so we can forbid accesses from EL2 or below). elr_hyp
2894 * can be accessed also from Hyp mode, so forbid accesses from
2895 * EL0 or EL1.
2897 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
2898 (s->current_el < 3 && *regno != 17)) {
2899 goto undef;
2901 break;
2902 default:
2903 break;
2906 return true;
2908 undef:
2909 /* If we get here then some access check did not pass */
2910 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
2911 syn_uncategorized(), exc_target);
2912 return false;
2915 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
2917 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
2918 int tgtmode = 0, regno = 0;
2920 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
2921 return;
2924 /* Sync state because msr_banked() can raise exceptions */
2925 gen_set_condexec(s);
2926 gen_set_pc_im(s, s->pc_curr);
2927 tcg_reg = load_reg(s, rn);
2928 tcg_tgtmode = tcg_const_i32(tgtmode);
2929 tcg_regno = tcg_const_i32(regno);
2930 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
2931 tcg_temp_free_i32(tcg_tgtmode);
2932 tcg_temp_free_i32(tcg_regno);
2933 tcg_temp_free_i32(tcg_reg);
2934 s->base.is_jmp = DISAS_UPDATE;
2937 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
2939 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
2940 int tgtmode = 0, regno = 0;
2942 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
2943 return;
2946 /* Sync state because mrs_banked() can raise exceptions */
2947 gen_set_condexec(s);
2948 gen_set_pc_im(s, s->pc_curr);
2949 tcg_reg = tcg_temp_new_i32();
2950 tcg_tgtmode = tcg_const_i32(tgtmode);
2951 tcg_regno = tcg_const_i32(regno);
2952 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
2953 tcg_temp_free_i32(tcg_tgtmode);
2954 tcg_temp_free_i32(tcg_regno);
2955 store_reg(s, rn, tcg_reg);
2956 s->base.is_jmp = DISAS_UPDATE;
2959 /* Store value to PC as for an exception return (ie don't
2960 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
2961 * will do the masking based on the new value of the Thumb bit.
2963 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
2965 tcg_gen_mov_i32(cpu_R[15], pc);
2966 tcg_temp_free_i32(pc);
2969 /* Generate a v6 exception return. Marks both values as dead. */
2970 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2972 store_pc_exc_ret(s, pc);
2973 /* The cpsr_write_eret helper will mask the low bits of PC
2974 * appropriately depending on the new Thumb bit, so it must
2975 * be called after storing the new PC.
2977 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
2978 gen_io_start();
2980 gen_helper_cpsr_write_eret(cpu_env, cpsr);
2981 tcg_temp_free_i32(cpsr);
2982 /* Must exit loop to check un-masked IRQs */
2983 s->base.is_jmp = DISAS_EXIT;
2986 /* Generate an old-style exception return. Marks pc as dead. */
2987 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
2989 gen_rfe(s, pc, load_cpu_field(spsr));
2992 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
2994 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
2996 switch (size) {
2997 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
2998 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
2999 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3000 default: abort();
3004 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
3006 switch (size) {
3007 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3008 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3009 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3010 default: return;
3014 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3015 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3016 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3017 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3018 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
3020 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3021 switch ((size << 1) | u) { \
3022 case 0: \
3023 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3024 break; \
3025 case 1: \
3026 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3027 break; \
3028 case 2: \
3029 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3030 break; \
3031 case 3: \
3032 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3033 break; \
3034 case 4: \
3035 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3036 break; \
3037 case 5: \
3038 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3039 break; \
3040 default: return 1; \
3041 }} while (0)
3043 #define GEN_NEON_INTEGER_OP(name) do { \
3044 switch ((size << 1) | u) { \
3045 case 0: \
3046 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3047 break; \
3048 case 1: \
3049 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3050 break; \
3051 case 2: \
3052 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3053 break; \
3054 case 3: \
3055 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3056 break; \
3057 case 4: \
3058 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3059 break; \
3060 case 5: \
3061 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3062 break; \
3063 default: return 1; \
3064 }} while (0)
3066 static TCGv_i32 neon_load_scratch(int scratch)
3068 TCGv_i32 tmp = tcg_temp_new_i32();
3069 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3070 return tmp;
3073 static void neon_store_scratch(int scratch, TCGv_i32 var)
3075 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3076 tcg_temp_free_i32(var);
3079 static inline TCGv_i32 neon_get_scalar(int size, int reg)
3081 TCGv_i32 tmp;
3082 if (size == 1) {
3083 tmp = neon_load_reg(reg & 7, reg >> 4);
3084 if (reg & 8) {
3085 gen_neon_dup_high16(tmp);
3086 } else {
3087 gen_neon_dup_low16(tmp);
3089 } else {
3090 tmp = neon_load_reg(reg & 15, reg >> 4);
3092 return tmp;
3095 static int gen_neon_unzip(int rd, int rm, int size, int q)
3097 TCGv_ptr pd, pm;
3099 if (!q && size == 2) {
3100 return 1;
3102 pd = vfp_reg_ptr(true, rd);
3103 pm = vfp_reg_ptr(true, rm);
3104 if (q) {
3105 switch (size) {
3106 case 0:
3107 gen_helper_neon_qunzip8(pd, pm);
3108 break;
3109 case 1:
3110 gen_helper_neon_qunzip16(pd, pm);
3111 break;
3112 case 2:
3113 gen_helper_neon_qunzip32(pd, pm);
3114 break;
3115 default:
3116 abort();
3118 } else {
3119 switch (size) {
3120 case 0:
3121 gen_helper_neon_unzip8(pd, pm);
3122 break;
3123 case 1:
3124 gen_helper_neon_unzip16(pd, pm);
3125 break;
3126 default:
3127 abort();
3130 tcg_temp_free_ptr(pd);
3131 tcg_temp_free_ptr(pm);
3132 return 0;
3135 static int gen_neon_zip(int rd, int rm, int size, int q)
3137 TCGv_ptr pd, pm;
3139 if (!q && size == 2) {
3140 return 1;
3142 pd = vfp_reg_ptr(true, rd);
3143 pm = vfp_reg_ptr(true, rm);
3144 if (q) {
3145 switch (size) {
3146 case 0:
3147 gen_helper_neon_qzip8(pd, pm);
3148 break;
3149 case 1:
3150 gen_helper_neon_qzip16(pd, pm);
3151 break;
3152 case 2:
3153 gen_helper_neon_qzip32(pd, pm);
3154 break;
3155 default:
3156 abort();
3158 } else {
3159 switch (size) {
3160 case 0:
3161 gen_helper_neon_zip8(pd, pm);
3162 break;
3163 case 1:
3164 gen_helper_neon_zip16(pd, pm);
3165 break;
3166 default:
3167 abort();
3170 tcg_temp_free_ptr(pd);
3171 tcg_temp_free_ptr(pm);
3172 return 0;
3175 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
3177 TCGv_i32 rd, tmp;
3179 rd = tcg_temp_new_i32();
3180 tmp = tcg_temp_new_i32();
3182 tcg_gen_shli_i32(rd, t0, 8);
3183 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3184 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3185 tcg_gen_or_i32(rd, rd, tmp);
3187 tcg_gen_shri_i32(t1, t1, 8);
3188 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3189 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3190 tcg_gen_or_i32(t1, t1, tmp);
3191 tcg_gen_mov_i32(t0, rd);
3193 tcg_temp_free_i32(tmp);
3194 tcg_temp_free_i32(rd);
3197 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
3199 TCGv_i32 rd, tmp;
3201 rd = tcg_temp_new_i32();
3202 tmp = tcg_temp_new_i32();
3204 tcg_gen_shli_i32(rd, t0, 16);
3205 tcg_gen_andi_i32(tmp, t1, 0xffff);
3206 tcg_gen_or_i32(rd, rd, tmp);
3207 tcg_gen_shri_i32(t1, t1, 16);
3208 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3209 tcg_gen_or_i32(t1, t1, tmp);
3210 tcg_gen_mov_i32(t0, rd);
3212 tcg_temp_free_i32(tmp);
3213 tcg_temp_free_i32(rd);
3216 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
3218 switch (size) {
3219 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3220 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3221 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
3222 default: abort();
3226 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
3228 switch (size) {
3229 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3230 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3231 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3232 default: abort();
3236 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
3238 switch (size) {
3239 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3240 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3241 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3242 default: abort();
3246 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
3248 switch (size) {
3249 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
3250 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
3251 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
3252 default: abort();
3256 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
3257 int q, int u)
3259 if (q) {
3260 if (u) {
3261 switch (size) {
3262 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3263 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3264 default: abort();
3266 } else {
3267 switch (size) {
3268 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3269 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3270 default: abort();
3273 } else {
3274 if (u) {
3275 switch (size) {
3276 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
3277 case 2: gen_ushl_i32(var, var, shift); break;
3278 default: abort();
3280 } else {
3281 switch (size) {
3282 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3283 case 2: gen_sshl_i32(var, var, shift); break;
3284 default: abort();
3290 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
3292 if (u) {
3293 switch (size) {
3294 case 0: gen_helper_neon_widen_u8(dest, src); break;
3295 case 1: gen_helper_neon_widen_u16(dest, src); break;
3296 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3297 default: abort();
3299 } else {
3300 switch (size) {
3301 case 0: gen_helper_neon_widen_s8(dest, src); break;
3302 case 1: gen_helper_neon_widen_s16(dest, src); break;
3303 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3304 default: abort();
3307 tcg_temp_free_i32(src);
3310 static inline void gen_neon_addl(int size)
3312 switch (size) {
3313 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3314 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3315 case 2: tcg_gen_add_i64(CPU_V001); break;
3316 default: abort();
3320 static inline void gen_neon_subl(int size)
3322 switch (size) {
3323 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3324 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3325 case 2: tcg_gen_sub_i64(CPU_V001); break;
3326 default: abort();
3330 static inline void gen_neon_negl(TCGv_i64 var, int size)
3332 switch (size) {
3333 case 0: gen_helper_neon_negl_u16(var, var); break;
3334 case 1: gen_helper_neon_negl_u32(var, var); break;
3335 case 2:
3336 tcg_gen_neg_i64(var, var);
3337 break;
3338 default: abort();
3342 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
3344 switch (size) {
3345 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
3346 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
3347 default: abort();
3351 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
3352 int size, int u)
3354 TCGv_i64 tmp;
3356 switch ((size << 1) | u) {
3357 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
3358 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
3359 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
3360 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
3361 case 4:
3362 tmp = gen_muls_i64_i32(a, b);
3363 tcg_gen_mov_i64(dest, tmp);
3364 tcg_temp_free_i64(tmp);
3365 break;
3366 case 5:
3367 tmp = gen_mulu_i64_i32(a, b);
3368 tcg_gen_mov_i64(dest, tmp);
3369 tcg_temp_free_i64(tmp);
3370 break;
3371 default: abort();
3374 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
3375 Don't forget to clean them now. */
3376 if (size < 2) {
3377 tcg_temp_free_i32(a);
3378 tcg_temp_free_i32(b);
3382 static void gen_neon_narrow_op(int op, int u, int size,
3383 TCGv_i32 dest, TCGv_i64 src)
3385 if (op) {
3386 if (u) {
3387 gen_neon_unarrow_sats(size, dest, src);
3388 } else {
3389 gen_neon_narrow(size, dest, src);
3391 } else {
3392 if (u) {
3393 gen_neon_narrow_satu(size, dest, src);
3394 } else {
3395 gen_neon_narrow_sats(size, dest, src);
3400 /* Symbolic constants for op fields for Neon 3-register same-length.
3401 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
3402 * table A7-9.
3404 #define NEON_3R_VHADD 0
3405 #define NEON_3R_VQADD 1
3406 #define NEON_3R_VRHADD 2
3407 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
3408 #define NEON_3R_VHSUB 4
3409 #define NEON_3R_VQSUB 5
3410 #define NEON_3R_VCGT 6
3411 #define NEON_3R_VCGE 7
3412 #define NEON_3R_VSHL 8
3413 #define NEON_3R_VQSHL 9
3414 #define NEON_3R_VRSHL 10
3415 #define NEON_3R_VQRSHL 11
3416 #define NEON_3R_VMAX 12
3417 #define NEON_3R_VMIN 13
3418 #define NEON_3R_VABD 14
3419 #define NEON_3R_VABA 15
3420 #define NEON_3R_VADD_VSUB 16
3421 #define NEON_3R_VTST_VCEQ 17
3422 #define NEON_3R_VML 18 /* VMLA, VMLS */
3423 #define NEON_3R_VMUL 19
3424 #define NEON_3R_VPMAX 20
3425 #define NEON_3R_VPMIN 21
3426 #define NEON_3R_VQDMULH_VQRDMULH 22
3427 #define NEON_3R_VPADD_VQRDMLAH 23
3428 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
3429 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
3430 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
3431 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
3432 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
3433 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
3434 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
3435 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
3437 static const uint8_t neon_3r_sizes[] = {
3438 [NEON_3R_VHADD] = 0x7,
3439 [NEON_3R_VQADD] = 0xf,
3440 [NEON_3R_VRHADD] = 0x7,
3441 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
3442 [NEON_3R_VHSUB] = 0x7,
3443 [NEON_3R_VQSUB] = 0xf,
3444 [NEON_3R_VCGT] = 0x7,
3445 [NEON_3R_VCGE] = 0x7,
3446 [NEON_3R_VSHL] = 0xf,
3447 [NEON_3R_VQSHL] = 0xf,
3448 [NEON_3R_VRSHL] = 0xf,
3449 [NEON_3R_VQRSHL] = 0xf,
3450 [NEON_3R_VMAX] = 0x7,
3451 [NEON_3R_VMIN] = 0x7,
3452 [NEON_3R_VABD] = 0x7,
3453 [NEON_3R_VABA] = 0x7,
3454 [NEON_3R_VADD_VSUB] = 0xf,
3455 [NEON_3R_VTST_VCEQ] = 0x7,
3456 [NEON_3R_VML] = 0x7,
3457 [NEON_3R_VMUL] = 0x7,
3458 [NEON_3R_VPMAX] = 0x7,
3459 [NEON_3R_VPMIN] = 0x7,
3460 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
3461 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
3462 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
3463 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
3464 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
3465 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
3466 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
3467 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
3468 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
3469 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
3472 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
3473 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
3474 * table A7-13.
3476 #define NEON_2RM_VREV64 0
3477 #define NEON_2RM_VREV32 1
3478 #define NEON_2RM_VREV16 2
3479 #define NEON_2RM_VPADDL 4
3480 #define NEON_2RM_VPADDL_U 5
3481 #define NEON_2RM_AESE 6 /* Includes AESD */
3482 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
3483 #define NEON_2RM_VCLS 8
3484 #define NEON_2RM_VCLZ 9
3485 #define NEON_2RM_VCNT 10
3486 #define NEON_2RM_VMVN 11
3487 #define NEON_2RM_VPADAL 12
3488 #define NEON_2RM_VPADAL_U 13
3489 #define NEON_2RM_VQABS 14
3490 #define NEON_2RM_VQNEG 15
3491 #define NEON_2RM_VCGT0 16
3492 #define NEON_2RM_VCGE0 17
3493 #define NEON_2RM_VCEQ0 18
3494 #define NEON_2RM_VCLE0 19
3495 #define NEON_2RM_VCLT0 20
3496 #define NEON_2RM_SHA1H 21
3497 #define NEON_2RM_VABS 22
3498 #define NEON_2RM_VNEG 23
3499 #define NEON_2RM_VCGT0_F 24
3500 #define NEON_2RM_VCGE0_F 25
3501 #define NEON_2RM_VCEQ0_F 26
3502 #define NEON_2RM_VCLE0_F 27
3503 #define NEON_2RM_VCLT0_F 28
3504 #define NEON_2RM_VABS_F 30
3505 #define NEON_2RM_VNEG_F 31
3506 #define NEON_2RM_VSWP 32
3507 #define NEON_2RM_VTRN 33
3508 #define NEON_2RM_VUZP 34
3509 #define NEON_2RM_VZIP 35
3510 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
3511 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
3512 #define NEON_2RM_VSHLL 38
3513 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
3514 #define NEON_2RM_VRINTN 40
3515 #define NEON_2RM_VRINTX 41
3516 #define NEON_2RM_VRINTA 42
3517 #define NEON_2RM_VRINTZ 43
3518 #define NEON_2RM_VCVT_F16_F32 44
3519 #define NEON_2RM_VRINTM 45
3520 #define NEON_2RM_VCVT_F32_F16 46
3521 #define NEON_2RM_VRINTP 47
3522 #define NEON_2RM_VCVTAU 48
3523 #define NEON_2RM_VCVTAS 49
3524 #define NEON_2RM_VCVTNU 50
3525 #define NEON_2RM_VCVTNS 51
3526 #define NEON_2RM_VCVTPU 52
3527 #define NEON_2RM_VCVTPS 53
3528 #define NEON_2RM_VCVTMU 54
3529 #define NEON_2RM_VCVTMS 55
3530 #define NEON_2RM_VRECPE 56
3531 #define NEON_2RM_VRSQRTE 57
3532 #define NEON_2RM_VRECPE_F 58
3533 #define NEON_2RM_VRSQRTE_F 59
3534 #define NEON_2RM_VCVT_FS 60
3535 #define NEON_2RM_VCVT_FU 61
3536 #define NEON_2RM_VCVT_SF 62
3537 #define NEON_2RM_VCVT_UF 63
3539 static bool neon_2rm_is_v8_op(int op)
3541 /* Return true if this neon 2reg-misc op is ARMv8 and up */
3542 switch (op) {
3543 case NEON_2RM_VRINTN:
3544 case NEON_2RM_VRINTA:
3545 case NEON_2RM_VRINTM:
3546 case NEON_2RM_VRINTP:
3547 case NEON_2RM_VRINTZ:
3548 case NEON_2RM_VRINTX:
3549 case NEON_2RM_VCVTAU:
3550 case NEON_2RM_VCVTAS:
3551 case NEON_2RM_VCVTNU:
3552 case NEON_2RM_VCVTNS:
3553 case NEON_2RM_VCVTPU:
3554 case NEON_2RM_VCVTPS:
3555 case NEON_2RM_VCVTMU:
3556 case NEON_2RM_VCVTMS:
3557 return true;
3558 default:
3559 return false;
3563 /* Each entry in this array has bit n set if the insn allows
3564 * size value n (otherwise it will UNDEF). Since unallocated
3565 * op values will have no bits set they always UNDEF.
3567 static const uint8_t neon_2rm_sizes[] = {
3568 [NEON_2RM_VREV64] = 0x7,
3569 [NEON_2RM_VREV32] = 0x3,
3570 [NEON_2RM_VREV16] = 0x1,
3571 [NEON_2RM_VPADDL] = 0x7,
3572 [NEON_2RM_VPADDL_U] = 0x7,
3573 [NEON_2RM_AESE] = 0x1,
3574 [NEON_2RM_AESMC] = 0x1,
3575 [NEON_2RM_VCLS] = 0x7,
3576 [NEON_2RM_VCLZ] = 0x7,
3577 [NEON_2RM_VCNT] = 0x1,
3578 [NEON_2RM_VMVN] = 0x1,
3579 [NEON_2RM_VPADAL] = 0x7,
3580 [NEON_2RM_VPADAL_U] = 0x7,
3581 [NEON_2RM_VQABS] = 0x7,
3582 [NEON_2RM_VQNEG] = 0x7,
3583 [NEON_2RM_VCGT0] = 0x7,
3584 [NEON_2RM_VCGE0] = 0x7,
3585 [NEON_2RM_VCEQ0] = 0x7,
3586 [NEON_2RM_VCLE0] = 0x7,
3587 [NEON_2RM_VCLT0] = 0x7,
3588 [NEON_2RM_SHA1H] = 0x4,
3589 [NEON_2RM_VABS] = 0x7,
3590 [NEON_2RM_VNEG] = 0x7,
3591 [NEON_2RM_VCGT0_F] = 0x4,
3592 [NEON_2RM_VCGE0_F] = 0x4,
3593 [NEON_2RM_VCEQ0_F] = 0x4,
3594 [NEON_2RM_VCLE0_F] = 0x4,
3595 [NEON_2RM_VCLT0_F] = 0x4,
3596 [NEON_2RM_VABS_F] = 0x4,
3597 [NEON_2RM_VNEG_F] = 0x4,
3598 [NEON_2RM_VSWP] = 0x1,
3599 [NEON_2RM_VTRN] = 0x7,
3600 [NEON_2RM_VUZP] = 0x7,
3601 [NEON_2RM_VZIP] = 0x7,
3602 [NEON_2RM_VMOVN] = 0x7,
3603 [NEON_2RM_VQMOVN] = 0x7,
3604 [NEON_2RM_VSHLL] = 0x7,
3605 [NEON_2RM_SHA1SU1] = 0x4,
3606 [NEON_2RM_VRINTN] = 0x4,
3607 [NEON_2RM_VRINTX] = 0x4,
3608 [NEON_2RM_VRINTA] = 0x4,
3609 [NEON_2RM_VRINTZ] = 0x4,
3610 [NEON_2RM_VCVT_F16_F32] = 0x2,
3611 [NEON_2RM_VRINTM] = 0x4,
3612 [NEON_2RM_VCVT_F32_F16] = 0x2,
3613 [NEON_2RM_VRINTP] = 0x4,
3614 [NEON_2RM_VCVTAU] = 0x4,
3615 [NEON_2RM_VCVTAS] = 0x4,
3616 [NEON_2RM_VCVTNU] = 0x4,
3617 [NEON_2RM_VCVTNS] = 0x4,
3618 [NEON_2RM_VCVTPU] = 0x4,
3619 [NEON_2RM_VCVTPS] = 0x4,
3620 [NEON_2RM_VCVTMU] = 0x4,
3621 [NEON_2RM_VCVTMS] = 0x4,
3622 [NEON_2RM_VRECPE] = 0x4,
3623 [NEON_2RM_VRSQRTE] = 0x4,
3624 [NEON_2RM_VRECPE_F] = 0x4,
3625 [NEON_2RM_VRSQRTE_F] = 0x4,
3626 [NEON_2RM_VCVT_FS] = 0x4,
3627 [NEON_2RM_VCVT_FU] = 0x4,
3628 [NEON_2RM_VCVT_SF] = 0x4,
3629 [NEON_2RM_VCVT_UF] = 0x4,
3633 /* Expand v8.1 simd helper. */
3634 static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
3635 int q, int rd, int rn, int rm)
3637 if (dc_isar_feature(aa32_rdm, s)) {
3638 int opr_sz = (1 + q) * 8;
3639 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
3640 vfp_reg_offset(1, rn),
3641 vfp_reg_offset(1, rm), cpu_env,
3642 opr_sz, opr_sz, 0, fn);
3643 return 0;
3645 return 1;
3648 static void gen_ceq0_i32(TCGv_i32 d, TCGv_i32 a)
3650 tcg_gen_setcondi_i32(TCG_COND_EQ, d, a, 0);
3651 tcg_gen_neg_i32(d, d);
3654 static void gen_ceq0_i64(TCGv_i64 d, TCGv_i64 a)
3656 tcg_gen_setcondi_i64(TCG_COND_EQ, d, a, 0);
3657 tcg_gen_neg_i64(d, d);
3660 static void gen_ceq0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
3662 TCGv_vec zero = tcg_const_zeros_vec_matching(d);
3663 tcg_gen_cmp_vec(TCG_COND_EQ, vece, d, a, zero);
3664 tcg_temp_free_vec(zero);
3667 static const TCGOpcode vecop_list_cmp[] = {
3668 INDEX_op_cmp_vec, 0
3671 const GVecGen2 ceq0_op[4] = {
3672 { .fno = gen_helper_gvec_ceq0_b,
3673 .fniv = gen_ceq0_vec,
3674 .opt_opc = vecop_list_cmp,
3675 .vece = MO_8 },
3676 { .fno = gen_helper_gvec_ceq0_h,
3677 .fniv = gen_ceq0_vec,
3678 .opt_opc = vecop_list_cmp,
3679 .vece = MO_16 },
3680 { .fni4 = gen_ceq0_i32,
3681 .fniv = gen_ceq0_vec,
3682 .opt_opc = vecop_list_cmp,
3683 .vece = MO_32 },
3684 { .fni8 = gen_ceq0_i64,
3685 .fniv = gen_ceq0_vec,
3686 .opt_opc = vecop_list_cmp,
3687 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3688 .vece = MO_64 },
3691 static void gen_cle0_i32(TCGv_i32 d, TCGv_i32 a)
3693 tcg_gen_setcondi_i32(TCG_COND_LE, d, a, 0);
3694 tcg_gen_neg_i32(d, d);
3697 static void gen_cle0_i64(TCGv_i64 d, TCGv_i64 a)
3699 tcg_gen_setcondi_i64(TCG_COND_LE, d, a, 0);
3700 tcg_gen_neg_i64(d, d);
3703 static void gen_cle0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
3705 TCGv_vec zero = tcg_const_zeros_vec_matching(d);
3706 tcg_gen_cmp_vec(TCG_COND_LE, vece, d, a, zero);
3707 tcg_temp_free_vec(zero);
3710 const GVecGen2 cle0_op[4] = {
3711 { .fno = gen_helper_gvec_cle0_b,
3712 .fniv = gen_cle0_vec,
3713 .opt_opc = vecop_list_cmp,
3714 .vece = MO_8 },
3715 { .fno = gen_helper_gvec_cle0_h,
3716 .fniv = gen_cle0_vec,
3717 .opt_opc = vecop_list_cmp,
3718 .vece = MO_16 },
3719 { .fni4 = gen_cle0_i32,
3720 .fniv = gen_cle0_vec,
3721 .opt_opc = vecop_list_cmp,
3722 .vece = MO_32 },
3723 { .fni8 = gen_cle0_i64,
3724 .fniv = gen_cle0_vec,
3725 .opt_opc = vecop_list_cmp,
3726 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3727 .vece = MO_64 },
3730 static void gen_cge0_i32(TCGv_i32 d, TCGv_i32 a)
3732 tcg_gen_setcondi_i32(TCG_COND_GE, d, a, 0);
3733 tcg_gen_neg_i32(d, d);
3736 static void gen_cge0_i64(TCGv_i64 d, TCGv_i64 a)
3738 tcg_gen_setcondi_i64(TCG_COND_GE, d, a, 0);
3739 tcg_gen_neg_i64(d, d);
3742 static void gen_cge0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
3744 TCGv_vec zero = tcg_const_zeros_vec_matching(d);
3745 tcg_gen_cmp_vec(TCG_COND_GE, vece, d, a, zero);
3746 tcg_temp_free_vec(zero);
3749 const GVecGen2 cge0_op[4] = {
3750 { .fno = gen_helper_gvec_cge0_b,
3751 .fniv = gen_cge0_vec,
3752 .opt_opc = vecop_list_cmp,
3753 .vece = MO_8 },
3754 { .fno = gen_helper_gvec_cge0_h,
3755 .fniv = gen_cge0_vec,
3756 .opt_opc = vecop_list_cmp,
3757 .vece = MO_16 },
3758 { .fni4 = gen_cge0_i32,
3759 .fniv = gen_cge0_vec,
3760 .opt_opc = vecop_list_cmp,
3761 .vece = MO_32 },
3762 { .fni8 = gen_cge0_i64,
3763 .fniv = gen_cge0_vec,
3764 .opt_opc = vecop_list_cmp,
3765 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3766 .vece = MO_64 },
3769 static void gen_clt0_i32(TCGv_i32 d, TCGv_i32 a)
3771 tcg_gen_setcondi_i32(TCG_COND_LT, d, a, 0);
3772 tcg_gen_neg_i32(d, d);
3775 static void gen_clt0_i64(TCGv_i64 d, TCGv_i64 a)
3777 tcg_gen_setcondi_i64(TCG_COND_LT, d, a, 0);
3778 tcg_gen_neg_i64(d, d);
3781 static void gen_clt0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
3783 TCGv_vec zero = tcg_const_zeros_vec_matching(d);
3784 tcg_gen_cmp_vec(TCG_COND_LT, vece, d, a, zero);
3785 tcg_temp_free_vec(zero);
3788 const GVecGen2 clt0_op[4] = {
3789 { .fno = gen_helper_gvec_clt0_b,
3790 .fniv = gen_clt0_vec,
3791 .opt_opc = vecop_list_cmp,
3792 .vece = MO_8 },
3793 { .fno = gen_helper_gvec_clt0_h,
3794 .fniv = gen_clt0_vec,
3795 .opt_opc = vecop_list_cmp,
3796 .vece = MO_16 },
3797 { .fni4 = gen_clt0_i32,
3798 .fniv = gen_clt0_vec,
3799 .opt_opc = vecop_list_cmp,
3800 .vece = MO_32 },
3801 { .fni8 = gen_clt0_i64,
3802 .fniv = gen_clt0_vec,
3803 .opt_opc = vecop_list_cmp,
3804 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3805 .vece = MO_64 },
3808 static void gen_cgt0_i32(TCGv_i32 d, TCGv_i32 a)
3810 tcg_gen_setcondi_i32(TCG_COND_GT, d, a, 0);
3811 tcg_gen_neg_i32(d, d);
3814 static void gen_cgt0_i64(TCGv_i64 d, TCGv_i64 a)
3816 tcg_gen_setcondi_i64(TCG_COND_GT, d, a, 0);
3817 tcg_gen_neg_i64(d, d);
3820 static void gen_cgt0_vec(unsigned vece, TCGv_vec d, TCGv_vec a)
3822 TCGv_vec zero = tcg_const_zeros_vec_matching(d);
3823 tcg_gen_cmp_vec(TCG_COND_GT, vece, d, a, zero);
3824 tcg_temp_free_vec(zero);
3827 const GVecGen2 cgt0_op[4] = {
3828 { .fno = gen_helper_gvec_cgt0_b,
3829 .fniv = gen_cgt0_vec,
3830 .opt_opc = vecop_list_cmp,
3831 .vece = MO_8 },
3832 { .fno = gen_helper_gvec_cgt0_h,
3833 .fniv = gen_cgt0_vec,
3834 .opt_opc = vecop_list_cmp,
3835 .vece = MO_16 },
3836 { .fni4 = gen_cgt0_i32,
3837 .fniv = gen_cgt0_vec,
3838 .opt_opc = vecop_list_cmp,
3839 .vece = MO_32 },
3840 { .fni8 = gen_cgt0_i64,
3841 .fniv = gen_cgt0_vec,
3842 .opt_opc = vecop_list_cmp,
3843 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3844 .vece = MO_64 },
3847 static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3849 tcg_gen_vec_sar8i_i64(a, a, shift);
3850 tcg_gen_vec_add8_i64(d, d, a);
3853 static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3855 tcg_gen_vec_sar16i_i64(a, a, shift);
3856 tcg_gen_vec_add16_i64(d, d, a);
3859 static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
3861 tcg_gen_sari_i32(a, a, shift);
3862 tcg_gen_add_i32(d, d, a);
3865 static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3867 tcg_gen_sari_i64(a, a, shift);
3868 tcg_gen_add_i64(d, d, a);
3871 static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
3873 tcg_gen_sari_vec(vece, a, a, sh);
3874 tcg_gen_add_vec(vece, d, d, a);
3877 static const TCGOpcode vecop_list_ssra[] = {
3878 INDEX_op_sari_vec, INDEX_op_add_vec, 0
3881 const GVecGen2i ssra_op[4] = {
3882 { .fni8 = gen_ssra8_i64,
3883 .fniv = gen_ssra_vec,
3884 .load_dest = true,
3885 .opt_opc = vecop_list_ssra,
3886 .vece = MO_8 },
3887 { .fni8 = gen_ssra16_i64,
3888 .fniv = gen_ssra_vec,
3889 .load_dest = true,
3890 .opt_opc = vecop_list_ssra,
3891 .vece = MO_16 },
3892 { .fni4 = gen_ssra32_i32,
3893 .fniv = gen_ssra_vec,
3894 .load_dest = true,
3895 .opt_opc = vecop_list_ssra,
3896 .vece = MO_32 },
3897 { .fni8 = gen_ssra64_i64,
3898 .fniv = gen_ssra_vec,
3899 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3900 .opt_opc = vecop_list_ssra,
3901 .load_dest = true,
3902 .vece = MO_64 },
3905 static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3907 tcg_gen_vec_shr8i_i64(a, a, shift);
3908 tcg_gen_vec_add8_i64(d, d, a);
3911 static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3913 tcg_gen_vec_shr16i_i64(a, a, shift);
3914 tcg_gen_vec_add16_i64(d, d, a);
3917 static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
3919 tcg_gen_shri_i32(a, a, shift);
3920 tcg_gen_add_i32(d, d, a);
3923 static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3925 tcg_gen_shri_i64(a, a, shift);
3926 tcg_gen_add_i64(d, d, a);
3929 static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
3931 tcg_gen_shri_vec(vece, a, a, sh);
3932 tcg_gen_add_vec(vece, d, d, a);
3935 static const TCGOpcode vecop_list_usra[] = {
3936 INDEX_op_shri_vec, INDEX_op_add_vec, 0
3939 const GVecGen2i usra_op[4] = {
3940 { .fni8 = gen_usra8_i64,
3941 .fniv = gen_usra_vec,
3942 .load_dest = true,
3943 .opt_opc = vecop_list_usra,
3944 .vece = MO_8, },
3945 { .fni8 = gen_usra16_i64,
3946 .fniv = gen_usra_vec,
3947 .load_dest = true,
3948 .opt_opc = vecop_list_usra,
3949 .vece = MO_16, },
3950 { .fni4 = gen_usra32_i32,
3951 .fniv = gen_usra_vec,
3952 .load_dest = true,
3953 .opt_opc = vecop_list_usra,
3954 .vece = MO_32, },
3955 { .fni8 = gen_usra64_i64,
3956 .fniv = gen_usra_vec,
3957 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3958 .load_dest = true,
3959 .opt_opc = vecop_list_usra,
3960 .vece = MO_64, },
3963 static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3965 uint64_t mask = dup_const(MO_8, 0xff >> shift);
3966 TCGv_i64 t = tcg_temp_new_i64();
3968 tcg_gen_shri_i64(t, a, shift);
3969 tcg_gen_andi_i64(t, t, mask);
3970 tcg_gen_andi_i64(d, d, ~mask);
3971 tcg_gen_or_i64(d, d, t);
3972 tcg_temp_free_i64(t);
3975 static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3977 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
3978 TCGv_i64 t = tcg_temp_new_i64();
3980 tcg_gen_shri_i64(t, a, shift);
3981 tcg_gen_andi_i64(t, t, mask);
3982 tcg_gen_andi_i64(d, d, ~mask);
3983 tcg_gen_or_i64(d, d, t);
3984 tcg_temp_free_i64(t);
3987 static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
3989 tcg_gen_shri_i32(a, a, shift);
3990 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
3993 static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3995 tcg_gen_shri_i64(a, a, shift);
3996 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
3999 static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4001 if (sh == 0) {
4002 tcg_gen_mov_vec(d, a);
4003 } else {
4004 TCGv_vec t = tcg_temp_new_vec_matching(d);
4005 TCGv_vec m = tcg_temp_new_vec_matching(d);
4007 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
4008 tcg_gen_shri_vec(vece, t, a, sh);
4009 tcg_gen_and_vec(vece, d, d, m);
4010 tcg_gen_or_vec(vece, d, d, t);
4012 tcg_temp_free_vec(t);
4013 tcg_temp_free_vec(m);
4017 static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
4019 const GVecGen2i sri_op[4] = {
4020 { .fni8 = gen_shr8_ins_i64,
4021 .fniv = gen_shr_ins_vec,
4022 .load_dest = true,
4023 .opt_opc = vecop_list_sri,
4024 .vece = MO_8 },
4025 { .fni8 = gen_shr16_ins_i64,
4026 .fniv = gen_shr_ins_vec,
4027 .load_dest = true,
4028 .opt_opc = vecop_list_sri,
4029 .vece = MO_16 },
4030 { .fni4 = gen_shr32_ins_i32,
4031 .fniv = gen_shr_ins_vec,
4032 .load_dest = true,
4033 .opt_opc = vecop_list_sri,
4034 .vece = MO_32 },
4035 { .fni8 = gen_shr64_ins_i64,
4036 .fniv = gen_shr_ins_vec,
4037 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4038 .load_dest = true,
4039 .opt_opc = vecop_list_sri,
4040 .vece = MO_64 },
4043 static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4045 uint64_t mask = dup_const(MO_8, 0xff << shift);
4046 TCGv_i64 t = tcg_temp_new_i64();
4048 tcg_gen_shli_i64(t, a, shift);
4049 tcg_gen_andi_i64(t, t, mask);
4050 tcg_gen_andi_i64(d, d, ~mask);
4051 tcg_gen_or_i64(d, d, t);
4052 tcg_temp_free_i64(t);
4055 static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4057 uint64_t mask = dup_const(MO_16, 0xffff << shift);
4058 TCGv_i64 t = tcg_temp_new_i64();
4060 tcg_gen_shli_i64(t, a, shift);
4061 tcg_gen_andi_i64(t, t, mask);
4062 tcg_gen_andi_i64(d, d, ~mask);
4063 tcg_gen_or_i64(d, d, t);
4064 tcg_temp_free_i64(t);
4067 static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4069 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
4072 static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4074 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
4077 static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4079 if (sh == 0) {
4080 tcg_gen_mov_vec(d, a);
4081 } else {
4082 TCGv_vec t = tcg_temp_new_vec_matching(d);
4083 TCGv_vec m = tcg_temp_new_vec_matching(d);
4085 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
4086 tcg_gen_shli_vec(vece, t, a, sh);
4087 tcg_gen_and_vec(vece, d, d, m);
4088 tcg_gen_or_vec(vece, d, d, t);
4090 tcg_temp_free_vec(t);
4091 tcg_temp_free_vec(m);
4095 static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
4097 const GVecGen2i sli_op[4] = {
4098 { .fni8 = gen_shl8_ins_i64,
4099 .fniv = gen_shl_ins_vec,
4100 .load_dest = true,
4101 .opt_opc = vecop_list_sli,
4102 .vece = MO_8 },
4103 { .fni8 = gen_shl16_ins_i64,
4104 .fniv = gen_shl_ins_vec,
4105 .load_dest = true,
4106 .opt_opc = vecop_list_sli,
4107 .vece = MO_16 },
4108 { .fni4 = gen_shl32_ins_i32,
4109 .fniv = gen_shl_ins_vec,
4110 .load_dest = true,
4111 .opt_opc = vecop_list_sli,
4112 .vece = MO_32 },
4113 { .fni8 = gen_shl64_ins_i64,
4114 .fniv = gen_shl_ins_vec,
4115 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4116 .load_dest = true,
4117 .opt_opc = vecop_list_sli,
4118 .vece = MO_64 },
4121 static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4123 gen_helper_neon_mul_u8(a, a, b);
4124 gen_helper_neon_add_u8(d, d, a);
4127 static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4129 gen_helper_neon_mul_u8(a, a, b);
4130 gen_helper_neon_sub_u8(d, d, a);
4133 static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4135 gen_helper_neon_mul_u16(a, a, b);
4136 gen_helper_neon_add_u16(d, d, a);
4139 static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4141 gen_helper_neon_mul_u16(a, a, b);
4142 gen_helper_neon_sub_u16(d, d, a);
4145 static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4147 tcg_gen_mul_i32(a, a, b);
4148 tcg_gen_add_i32(d, d, a);
4151 static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4153 tcg_gen_mul_i32(a, a, b);
4154 tcg_gen_sub_i32(d, d, a);
4157 static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4159 tcg_gen_mul_i64(a, a, b);
4160 tcg_gen_add_i64(d, d, a);
4163 static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4165 tcg_gen_mul_i64(a, a, b);
4166 tcg_gen_sub_i64(d, d, a);
4169 static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4171 tcg_gen_mul_vec(vece, a, a, b);
4172 tcg_gen_add_vec(vece, d, d, a);
4175 static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4177 tcg_gen_mul_vec(vece, a, a, b);
4178 tcg_gen_sub_vec(vece, d, d, a);
4181 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4182 * these tables are shared with AArch64 which does support them.
4185 static const TCGOpcode vecop_list_mla[] = {
4186 INDEX_op_mul_vec, INDEX_op_add_vec, 0
4189 static const TCGOpcode vecop_list_mls[] = {
4190 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
4193 const GVecGen3 mla_op[4] = {
4194 { .fni4 = gen_mla8_i32,
4195 .fniv = gen_mla_vec,
4196 .load_dest = true,
4197 .opt_opc = vecop_list_mla,
4198 .vece = MO_8 },
4199 { .fni4 = gen_mla16_i32,
4200 .fniv = gen_mla_vec,
4201 .load_dest = true,
4202 .opt_opc = vecop_list_mla,
4203 .vece = MO_16 },
4204 { .fni4 = gen_mla32_i32,
4205 .fniv = gen_mla_vec,
4206 .load_dest = true,
4207 .opt_opc = vecop_list_mla,
4208 .vece = MO_32 },
4209 { .fni8 = gen_mla64_i64,
4210 .fniv = gen_mla_vec,
4211 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4212 .load_dest = true,
4213 .opt_opc = vecop_list_mla,
4214 .vece = MO_64 },
4217 const GVecGen3 mls_op[4] = {
4218 { .fni4 = gen_mls8_i32,
4219 .fniv = gen_mls_vec,
4220 .load_dest = true,
4221 .opt_opc = vecop_list_mls,
4222 .vece = MO_8 },
4223 { .fni4 = gen_mls16_i32,
4224 .fniv = gen_mls_vec,
4225 .load_dest = true,
4226 .opt_opc = vecop_list_mls,
4227 .vece = MO_16 },
4228 { .fni4 = gen_mls32_i32,
4229 .fniv = gen_mls_vec,
4230 .load_dest = true,
4231 .opt_opc = vecop_list_mls,
4232 .vece = MO_32 },
4233 { .fni8 = gen_mls64_i64,
4234 .fniv = gen_mls_vec,
4235 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4236 .load_dest = true,
4237 .opt_opc = vecop_list_mls,
4238 .vece = MO_64 },
4241 /* CMTST : test is "if (X & Y != 0)". */
4242 static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4244 tcg_gen_and_i32(d, a, b);
4245 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
4246 tcg_gen_neg_i32(d, d);
4249 void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4251 tcg_gen_and_i64(d, a, b);
4252 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
4253 tcg_gen_neg_i64(d, d);
4256 static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4258 tcg_gen_and_vec(vece, d, a, b);
4259 tcg_gen_dupi_vec(vece, a, 0);
4260 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
4263 static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
4265 const GVecGen3 cmtst_op[4] = {
4266 { .fni4 = gen_helper_neon_tst_u8,
4267 .fniv = gen_cmtst_vec,
4268 .opt_opc = vecop_list_cmtst,
4269 .vece = MO_8 },
4270 { .fni4 = gen_helper_neon_tst_u16,
4271 .fniv = gen_cmtst_vec,
4272 .opt_opc = vecop_list_cmtst,
4273 .vece = MO_16 },
4274 { .fni4 = gen_cmtst_i32,
4275 .fniv = gen_cmtst_vec,
4276 .opt_opc = vecop_list_cmtst,
4277 .vece = MO_32 },
4278 { .fni8 = gen_cmtst_i64,
4279 .fniv = gen_cmtst_vec,
4280 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4281 .opt_opc = vecop_list_cmtst,
4282 .vece = MO_64 },
4285 void gen_ushl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
4287 TCGv_i32 lval = tcg_temp_new_i32();
4288 TCGv_i32 rval = tcg_temp_new_i32();
4289 TCGv_i32 lsh = tcg_temp_new_i32();
4290 TCGv_i32 rsh = tcg_temp_new_i32();
4291 TCGv_i32 zero = tcg_const_i32(0);
4292 TCGv_i32 max = tcg_const_i32(32);
4295 * Rely on the TCG guarantee that out of range shifts produce
4296 * unspecified results, not undefined behaviour (i.e. no trap).
4297 * Discard out-of-range results after the fact.
4299 tcg_gen_ext8s_i32(lsh, shift);
4300 tcg_gen_neg_i32(rsh, lsh);
4301 tcg_gen_shl_i32(lval, src, lsh);
4302 tcg_gen_shr_i32(rval, src, rsh);
4303 tcg_gen_movcond_i32(TCG_COND_LTU, dst, lsh, max, lval, zero);
4304 tcg_gen_movcond_i32(TCG_COND_LTU, dst, rsh, max, rval, dst);
4306 tcg_temp_free_i32(lval);
4307 tcg_temp_free_i32(rval);
4308 tcg_temp_free_i32(lsh);
4309 tcg_temp_free_i32(rsh);
4310 tcg_temp_free_i32(zero);
4311 tcg_temp_free_i32(max);
4314 void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
4316 TCGv_i64 lval = tcg_temp_new_i64();
4317 TCGv_i64 rval = tcg_temp_new_i64();
4318 TCGv_i64 lsh = tcg_temp_new_i64();
4319 TCGv_i64 rsh = tcg_temp_new_i64();
4320 TCGv_i64 zero = tcg_const_i64(0);
4321 TCGv_i64 max = tcg_const_i64(64);
4324 * Rely on the TCG guarantee that out of range shifts produce
4325 * unspecified results, not undefined behaviour (i.e. no trap).
4326 * Discard out-of-range results after the fact.
4328 tcg_gen_ext8s_i64(lsh, shift);
4329 tcg_gen_neg_i64(rsh, lsh);
4330 tcg_gen_shl_i64(lval, src, lsh);
4331 tcg_gen_shr_i64(rval, src, rsh);
4332 tcg_gen_movcond_i64(TCG_COND_LTU, dst, lsh, max, lval, zero);
4333 tcg_gen_movcond_i64(TCG_COND_LTU, dst, rsh, max, rval, dst);
4335 tcg_temp_free_i64(lval);
4336 tcg_temp_free_i64(rval);
4337 tcg_temp_free_i64(lsh);
4338 tcg_temp_free_i64(rsh);
4339 tcg_temp_free_i64(zero);
4340 tcg_temp_free_i64(max);
4343 static void gen_ushl_vec(unsigned vece, TCGv_vec dst,
4344 TCGv_vec src, TCGv_vec shift)
4346 TCGv_vec lval = tcg_temp_new_vec_matching(dst);
4347 TCGv_vec rval = tcg_temp_new_vec_matching(dst);
4348 TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
4349 TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
4350 TCGv_vec msk, max;
4352 tcg_gen_neg_vec(vece, rsh, shift);
4353 if (vece == MO_8) {
4354 tcg_gen_mov_vec(lsh, shift);
4355 } else {
4356 msk = tcg_temp_new_vec_matching(dst);
4357 tcg_gen_dupi_vec(vece, msk, 0xff);
4358 tcg_gen_and_vec(vece, lsh, shift, msk);
4359 tcg_gen_and_vec(vece, rsh, rsh, msk);
4360 tcg_temp_free_vec(msk);
4364 * Rely on the TCG guarantee that out of range shifts produce
4365 * unspecified results, not undefined behaviour (i.e. no trap).
4366 * Discard out-of-range results after the fact.
4368 tcg_gen_shlv_vec(vece, lval, src, lsh);
4369 tcg_gen_shrv_vec(vece, rval, src, rsh);
4371 max = tcg_temp_new_vec_matching(dst);
4372 tcg_gen_dupi_vec(vece, max, 8 << vece);
4375 * The choice of LT (signed) and GEU (unsigned) are biased toward
4376 * the instructions of the x86_64 host. For MO_8, the whole byte
4377 * is significant so we must use an unsigned compare; otherwise we
4378 * have already masked to a byte and so a signed compare works.
4379 * Other tcg hosts have a full set of comparisons and do not care.
4381 if (vece == MO_8) {
4382 tcg_gen_cmp_vec(TCG_COND_GEU, vece, lsh, lsh, max);
4383 tcg_gen_cmp_vec(TCG_COND_GEU, vece, rsh, rsh, max);
4384 tcg_gen_andc_vec(vece, lval, lval, lsh);
4385 tcg_gen_andc_vec(vece, rval, rval, rsh);
4386 } else {
4387 tcg_gen_cmp_vec(TCG_COND_LT, vece, lsh, lsh, max);
4388 tcg_gen_cmp_vec(TCG_COND_LT, vece, rsh, rsh, max);
4389 tcg_gen_and_vec(vece, lval, lval, lsh);
4390 tcg_gen_and_vec(vece, rval, rval, rsh);
4392 tcg_gen_or_vec(vece, dst, lval, rval);
4394 tcg_temp_free_vec(max);
4395 tcg_temp_free_vec(lval);
4396 tcg_temp_free_vec(rval);
4397 tcg_temp_free_vec(lsh);
4398 tcg_temp_free_vec(rsh);
4401 static const TCGOpcode ushl_list[] = {
4402 INDEX_op_neg_vec, INDEX_op_shlv_vec,
4403 INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0
4406 const GVecGen3 ushl_op[4] = {
4407 { .fniv = gen_ushl_vec,
4408 .fno = gen_helper_gvec_ushl_b,
4409 .opt_opc = ushl_list,
4410 .vece = MO_8 },
4411 { .fniv = gen_ushl_vec,
4412 .fno = gen_helper_gvec_ushl_h,
4413 .opt_opc = ushl_list,
4414 .vece = MO_16 },
4415 { .fni4 = gen_ushl_i32,
4416 .fniv = gen_ushl_vec,
4417 .opt_opc = ushl_list,
4418 .vece = MO_32 },
4419 { .fni8 = gen_ushl_i64,
4420 .fniv = gen_ushl_vec,
4421 .opt_opc = ushl_list,
4422 .vece = MO_64 },
4425 void gen_sshl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
4427 TCGv_i32 lval = tcg_temp_new_i32();
4428 TCGv_i32 rval = tcg_temp_new_i32();
4429 TCGv_i32 lsh = tcg_temp_new_i32();
4430 TCGv_i32 rsh = tcg_temp_new_i32();
4431 TCGv_i32 zero = tcg_const_i32(0);
4432 TCGv_i32 max = tcg_const_i32(31);
4435 * Rely on the TCG guarantee that out of range shifts produce
4436 * unspecified results, not undefined behaviour (i.e. no trap).
4437 * Discard out-of-range results after the fact.
4439 tcg_gen_ext8s_i32(lsh, shift);
4440 tcg_gen_neg_i32(rsh, lsh);
4441 tcg_gen_shl_i32(lval, src, lsh);
4442 tcg_gen_umin_i32(rsh, rsh, max);
4443 tcg_gen_sar_i32(rval, src, rsh);
4444 tcg_gen_movcond_i32(TCG_COND_LEU, lval, lsh, max, lval, zero);
4445 tcg_gen_movcond_i32(TCG_COND_LT, dst, lsh, zero, rval, lval);
4447 tcg_temp_free_i32(lval);
4448 tcg_temp_free_i32(rval);
4449 tcg_temp_free_i32(lsh);
4450 tcg_temp_free_i32(rsh);
4451 tcg_temp_free_i32(zero);
4452 tcg_temp_free_i32(max);
4455 void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
4457 TCGv_i64 lval = tcg_temp_new_i64();
4458 TCGv_i64 rval = tcg_temp_new_i64();
4459 TCGv_i64 lsh = tcg_temp_new_i64();
4460 TCGv_i64 rsh = tcg_temp_new_i64();
4461 TCGv_i64 zero = tcg_const_i64(0);
4462 TCGv_i64 max = tcg_const_i64(63);
4465 * Rely on the TCG guarantee that out of range shifts produce
4466 * unspecified results, not undefined behaviour (i.e. no trap).
4467 * Discard out-of-range results after the fact.
4469 tcg_gen_ext8s_i64(lsh, shift);
4470 tcg_gen_neg_i64(rsh, lsh);
4471 tcg_gen_shl_i64(lval, src, lsh);
4472 tcg_gen_umin_i64(rsh, rsh, max);
4473 tcg_gen_sar_i64(rval, src, rsh);
4474 tcg_gen_movcond_i64(TCG_COND_LEU, lval, lsh, max, lval, zero);
4475 tcg_gen_movcond_i64(TCG_COND_LT, dst, lsh, zero, rval, lval);
4477 tcg_temp_free_i64(lval);
4478 tcg_temp_free_i64(rval);
4479 tcg_temp_free_i64(lsh);
4480 tcg_temp_free_i64(rsh);
4481 tcg_temp_free_i64(zero);
4482 tcg_temp_free_i64(max);
4485 static void gen_sshl_vec(unsigned vece, TCGv_vec dst,
4486 TCGv_vec src, TCGv_vec shift)
4488 TCGv_vec lval = tcg_temp_new_vec_matching(dst);
4489 TCGv_vec rval = tcg_temp_new_vec_matching(dst);
4490 TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
4491 TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
4492 TCGv_vec tmp = tcg_temp_new_vec_matching(dst);
4495 * Rely on the TCG guarantee that out of range shifts produce
4496 * unspecified results, not undefined behaviour (i.e. no trap).
4497 * Discard out-of-range results after the fact.
4499 tcg_gen_neg_vec(vece, rsh, shift);
4500 if (vece == MO_8) {
4501 tcg_gen_mov_vec(lsh, shift);
4502 } else {
4503 tcg_gen_dupi_vec(vece, tmp, 0xff);
4504 tcg_gen_and_vec(vece, lsh, shift, tmp);
4505 tcg_gen_and_vec(vece, rsh, rsh, tmp);
4508 /* Bound rsh so out of bound right shift gets -1. */
4509 tcg_gen_dupi_vec(vece, tmp, (8 << vece) - 1);
4510 tcg_gen_umin_vec(vece, rsh, rsh, tmp);
4511 tcg_gen_cmp_vec(TCG_COND_GT, vece, tmp, lsh, tmp);
4513 tcg_gen_shlv_vec(vece, lval, src, lsh);
4514 tcg_gen_sarv_vec(vece, rval, src, rsh);
4516 /* Select in-bound left shift. */
4517 tcg_gen_andc_vec(vece, lval, lval, tmp);
4519 /* Select between left and right shift. */
4520 if (vece == MO_8) {
4521 tcg_gen_dupi_vec(vece, tmp, 0);
4522 tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, rval, lval);
4523 } else {
4524 tcg_gen_dupi_vec(vece, tmp, 0x80);
4525 tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, lval, rval);
4528 tcg_temp_free_vec(lval);
4529 tcg_temp_free_vec(rval);
4530 tcg_temp_free_vec(lsh);
4531 tcg_temp_free_vec(rsh);
4532 tcg_temp_free_vec(tmp);
4535 static const TCGOpcode sshl_list[] = {
4536 INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec,
4537 INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0
4540 const GVecGen3 sshl_op[4] = {
4541 { .fniv = gen_sshl_vec,
4542 .fno = gen_helper_gvec_sshl_b,
4543 .opt_opc = sshl_list,
4544 .vece = MO_8 },
4545 { .fniv = gen_sshl_vec,
4546 .fno = gen_helper_gvec_sshl_h,
4547 .opt_opc = sshl_list,
4548 .vece = MO_16 },
4549 { .fni4 = gen_sshl_i32,
4550 .fniv = gen_sshl_vec,
4551 .opt_opc = sshl_list,
4552 .vece = MO_32 },
4553 { .fni8 = gen_sshl_i64,
4554 .fniv = gen_sshl_vec,
4555 .opt_opc = sshl_list,
4556 .vece = MO_64 },
4559 static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4560 TCGv_vec a, TCGv_vec b)
4562 TCGv_vec x = tcg_temp_new_vec_matching(t);
4563 tcg_gen_add_vec(vece, x, a, b);
4564 tcg_gen_usadd_vec(vece, t, a, b);
4565 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4566 tcg_gen_or_vec(vece, sat, sat, x);
4567 tcg_temp_free_vec(x);
4570 static const TCGOpcode vecop_list_uqadd[] = {
4571 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4574 const GVecGen4 uqadd_op[4] = {
4575 { .fniv = gen_uqadd_vec,
4576 .fno = gen_helper_gvec_uqadd_b,
4577 .write_aofs = true,
4578 .opt_opc = vecop_list_uqadd,
4579 .vece = MO_8 },
4580 { .fniv = gen_uqadd_vec,
4581 .fno = gen_helper_gvec_uqadd_h,
4582 .write_aofs = true,
4583 .opt_opc = vecop_list_uqadd,
4584 .vece = MO_16 },
4585 { .fniv = gen_uqadd_vec,
4586 .fno = gen_helper_gvec_uqadd_s,
4587 .write_aofs = true,
4588 .opt_opc = vecop_list_uqadd,
4589 .vece = MO_32 },
4590 { .fniv = gen_uqadd_vec,
4591 .fno = gen_helper_gvec_uqadd_d,
4592 .write_aofs = true,
4593 .opt_opc = vecop_list_uqadd,
4594 .vece = MO_64 },
4597 static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4598 TCGv_vec a, TCGv_vec b)
4600 TCGv_vec x = tcg_temp_new_vec_matching(t);
4601 tcg_gen_add_vec(vece, x, a, b);
4602 tcg_gen_ssadd_vec(vece, t, a, b);
4603 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4604 tcg_gen_or_vec(vece, sat, sat, x);
4605 tcg_temp_free_vec(x);
4608 static const TCGOpcode vecop_list_sqadd[] = {
4609 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4612 const GVecGen4 sqadd_op[4] = {
4613 { .fniv = gen_sqadd_vec,
4614 .fno = gen_helper_gvec_sqadd_b,
4615 .opt_opc = vecop_list_sqadd,
4616 .write_aofs = true,
4617 .vece = MO_8 },
4618 { .fniv = gen_sqadd_vec,
4619 .fno = gen_helper_gvec_sqadd_h,
4620 .opt_opc = vecop_list_sqadd,
4621 .write_aofs = true,
4622 .vece = MO_16 },
4623 { .fniv = gen_sqadd_vec,
4624 .fno = gen_helper_gvec_sqadd_s,
4625 .opt_opc = vecop_list_sqadd,
4626 .write_aofs = true,
4627 .vece = MO_32 },
4628 { .fniv = gen_sqadd_vec,
4629 .fno = gen_helper_gvec_sqadd_d,
4630 .opt_opc = vecop_list_sqadd,
4631 .write_aofs = true,
4632 .vece = MO_64 },
4635 static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4636 TCGv_vec a, TCGv_vec b)
4638 TCGv_vec x = tcg_temp_new_vec_matching(t);
4639 tcg_gen_sub_vec(vece, x, a, b);
4640 tcg_gen_ussub_vec(vece, t, a, b);
4641 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4642 tcg_gen_or_vec(vece, sat, sat, x);
4643 tcg_temp_free_vec(x);
4646 static const TCGOpcode vecop_list_uqsub[] = {
4647 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4650 const GVecGen4 uqsub_op[4] = {
4651 { .fniv = gen_uqsub_vec,
4652 .fno = gen_helper_gvec_uqsub_b,
4653 .opt_opc = vecop_list_uqsub,
4654 .write_aofs = true,
4655 .vece = MO_8 },
4656 { .fniv = gen_uqsub_vec,
4657 .fno = gen_helper_gvec_uqsub_h,
4658 .opt_opc = vecop_list_uqsub,
4659 .write_aofs = true,
4660 .vece = MO_16 },
4661 { .fniv = gen_uqsub_vec,
4662 .fno = gen_helper_gvec_uqsub_s,
4663 .opt_opc = vecop_list_uqsub,
4664 .write_aofs = true,
4665 .vece = MO_32 },
4666 { .fniv = gen_uqsub_vec,
4667 .fno = gen_helper_gvec_uqsub_d,
4668 .opt_opc = vecop_list_uqsub,
4669 .write_aofs = true,
4670 .vece = MO_64 },
4673 static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4674 TCGv_vec a, TCGv_vec b)
4676 TCGv_vec x = tcg_temp_new_vec_matching(t);
4677 tcg_gen_sub_vec(vece, x, a, b);
4678 tcg_gen_sssub_vec(vece, t, a, b);
4679 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4680 tcg_gen_or_vec(vece, sat, sat, x);
4681 tcg_temp_free_vec(x);
4684 static const TCGOpcode vecop_list_sqsub[] = {
4685 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4688 const GVecGen4 sqsub_op[4] = {
4689 { .fniv = gen_sqsub_vec,
4690 .fno = gen_helper_gvec_sqsub_b,
4691 .opt_opc = vecop_list_sqsub,
4692 .write_aofs = true,
4693 .vece = MO_8 },
4694 { .fniv = gen_sqsub_vec,
4695 .fno = gen_helper_gvec_sqsub_h,
4696 .opt_opc = vecop_list_sqsub,
4697 .write_aofs = true,
4698 .vece = MO_16 },
4699 { .fniv = gen_sqsub_vec,
4700 .fno = gen_helper_gvec_sqsub_s,
4701 .opt_opc = vecop_list_sqsub,
4702 .write_aofs = true,
4703 .vece = MO_32 },
4704 { .fniv = gen_sqsub_vec,
4705 .fno = gen_helper_gvec_sqsub_d,
4706 .opt_opc = vecop_list_sqsub,
4707 .write_aofs = true,
4708 .vece = MO_64 },
4711 /* Translate a NEON data processing instruction. Return nonzero if the
4712 instruction is invalid.
4713 We process data in a mixture of 32-bit and 64-bit chunks.
4714 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4716 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
4718 int op;
4719 int q;
4720 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
4721 int size;
4722 int shift;
4723 int pass;
4724 int count;
4725 int pairwise;
4726 int u;
4727 int vec_size;
4728 uint32_t imm;
4729 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
4730 TCGv_ptr ptr1, ptr2, ptr3;
4731 TCGv_i64 tmp64;
4733 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
4734 return 1;
4737 /* FIXME: this access check should not take precedence over UNDEF
4738 * for invalid encodings; we will generate incorrect syndrome information
4739 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4741 if (s->fp_excp_el) {
4742 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4743 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
4744 return 0;
4747 if (!s->vfp_enabled)
4748 return 1;
4749 q = (insn & (1 << 6)) != 0;
4750 u = (insn >> 24) & 1;
4751 VFP_DREG_D(rd, insn);
4752 VFP_DREG_N(rn, insn);
4753 VFP_DREG_M(rm, insn);
4754 size = (insn >> 20) & 3;
4755 vec_size = q ? 16 : 8;
4756 rd_ofs = neon_reg_offset(rd, 0);
4757 rn_ofs = neon_reg_offset(rn, 0);
4758 rm_ofs = neon_reg_offset(rm, 0);
4760 if ((insn & (1 << 23)) == 0) {
4761 /* Three register same length. */
4762 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4763 /* Catch invalid op and bad size combinations: UNDEF */
4764 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4765 return 1;
4767 /* All insns of this form UNDEF for either this condition or the
4768 * superset of cases "Q==1"; we catch the latter later.
4770 if (q && ((rd | rn | rm) & 1)) {
4771 return 1;
4773 switch (op) {
4774 case NEON_3R_SHA:
4775 /* The SHA-1/SHA-256 3-register instructions require special
4776 * treatment here, as their size field is overloaded as an
4777 * op type selector, and they all consume their input in a
4778 * single pass.
4780 if (!q) {
4781 return 1;
4783 if (!u) { /* SHA-1 */
4784 if (!dc_isar_feature(aa32_sha1, s)) {
4785 return 1;
4787 ptr1 = vfp_reg_ptr(true, rd);
4788 ptr2 = vfp_reg_ptr(true, rn);
4789 ptr3 = vfp_reg_ptr(true, rm);
4790 tmp4 = tcg_const_i32(size);
4791 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
4792 tcg_temp_free_i32(tmp4);
4793 } else { /* SHA-256 */
4794 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
4795 return 1;
4797 ptr1 = vfp_reg_ptr(true, rd);
4798 ptr2 = vfp_reg_ptr(true, rn);
4799 ptr3 = vfp_reg_ptr(true, rm);
4800 switch (size) {
4801 case 0:
4802 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
4803 break;
4804 case 1:
4805 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
4806 break;
4807 case 2:
4808 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
4809 break;
4812 tcg_temp_free_ptr(ptr1);
4813 tcg_temp_free_ptr(ptr2);
4814 tcg_temp_free_ptr(ptr3);
4815 return 0;
4817 case NEON_3R_VPADD_VQRDMLAH:
4818 if (!u) {
4819 break; /* VPADD */
4821 /* VQRDMLAH */
4822 switch (size) {
4823 case 1:
4824 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
4825 q, rd, rn, rm);
4826 case 2:
4827 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
4828 q, rd, rn, rm);
4830 return 1;
4832 case NEON_3R_VFM_VQRDMLSH:
4833 if (!u) {
4834 /* VFM, VFMS */
4835 if (size == 1) {
4836 return 1;
4838 break;
4840 /* VQRDMLSH */
4841 switch (size) {
4842 case 1:
4843 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
4844 q, rd, rn, rm);
4845 case 2:
4846 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
4847 q, rd, rn, rm);
4849 return 1;
4851 case NEON_3R_VADD_VSUB:
4852 case NEON_3R_LOGIC:
4853 case NEON_3R_VMAX:
4854 case NEON_3R_VMIN:
4855 case NEON_3R_VTST_VCEQ:
4856 case NEON_3R_VCGT:
4857 case NEON_3R_VCGE:
4858 case NEON_3R_VQADD:
4859 case NEON_3R_VQSUB:
4860 case NEON_3R_VMUL:
4861 case NEON_3R_VML:
4862 case NEON_3R_VSHL:
4863 /* Already handled by decodetree */
4864 return 1;
4867 if (size == 3) {
4868 /* 64-bit element instructions. */
4869 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4870 neon_load_reg64(cpu_V0, rn + pass);
4871 neon_load_reg64(cpu_V1, rm + pass);
4872 switch (op) {
4873 case NEON_3R_VQSHL:
4874 if (u) {
4875 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4876 cpu_V1, cpu_V0);
4877 } else {
4878 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4879 cpu_V1, cpu_V0);
4881 break;
4882 case NEON_3R_VRSHL:
4883 if (u) {
4884 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4885 } else {
4886 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4888 break;
4889 case NEON_3R_VQRSHL:
4890 if (u) {
4891 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4892 cpu_V1, cpu_V0);
4893 } else {
4894 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4895 cpu_V1, cpu_V0);
4897 break;
4898 default:
4899 abort();
4901 neon_store_reg64(cpu_V0, rd + pass);
4903 return 0;
4905 pairwise = 0;
4906 switch (op) {
4907 case NEON_3R_VQSHL:
4908 case NEON_3R_VRSHL:
4909 case NEON_3R_VQRSHL:
4911 int rtmp;
4912 /* Shift instruction operands are reversed. */
4913 rtmp = rn;
4914 rn = rm;
4915 rm = rtmp;
4917 break;
4918 case NEON_3R_VPADD_VQRDMLAH:
4919 case NEON_3R_VPMAX:
4920 case NEON_3R_VPMIN:
4921 pairwise = 1;
4922 break;
4923 case NEON_3R_FLOAT_ARITH:
4924 pairwise = (u && size < 2); /* if VPADD (float) */
4925 break;
4926 case NEON_3R_FLOAT_MINMAX:
4927 pairwise = u; /* if VPMIN/VPMAX (float) */
4928 break;
4929 case NEON_3R_FLOAT_CMP:
4930 if (!u && size) {
4931 /* no encoding for U=0 C=1x */
4932 return 1;
4934 break;
4935 case NEON_3R_FLOAT_ACMP:
4936 if (!u) {
4937 return 1;
4939 break;
4940 case NEON_3R_FLOAT_MISC:
4941 /* VMAXNM/VMINNM in ARMv8 */
4942 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
4943 return 1;
4945 break;
4946 case NEON_3R_VFM_VQRDMLSH:
4947 if (!dc_isar_feature(aa32_simdfmac, s)) {
4948 return 1;
4950 break;
4951 default:
4952 break;
4955 if (pairwise && q) {
4956 /* All the pairwise insns UNDEF if Q is set */
4957 return 1;
4960 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4962 if (pairwise) {
4963 /* Pairwise. */
4964 if (pass < 1) {
4965 tmp = neon_load_reg(rn, 0);
4966 tmp2 = neon_load_reg(rn, 1);
4967 } else {
4968 tmp = neon_load_reg(rm, 0);
4969 tmp2 = neon_load_reg(rm, 1);
4971 } else {
4972 /* Elementwise. */
4973 tmp = neon_load_reg(rn, pass);
4974 tmp2 = neon_load_reg(rm, pass);
4976 switch (op) {
4977 case NEON_3R_VHADD:
4978 GEN_NEON_INTEGER_OP(hadd);
4979 break;
4980 case NEON_3R_VRHADD:
4981 GEN_NEON_INTEGER_OP(rhadd);
4982 break;
4983 case NEON_3R_VHSUB:
4984 GEN_NEON_INTEGER_OP(hsub);
4985 break;
4986 case NEON_3R_VQSHL:
4987 GEN_NEON_INTEGER_OP_ENV(qshl);
4988 break;
4989 case NEON_3R_VRSHL:
4990 GEN_NEON_INTEGER_OP(rshl);
4991 break;
4992 case NEON_3R_VQRSHL:
4993 GEN_NEON_INTEGER_OP_ENV(qrshl);
4994 break;
4995 case NEON_3R_VABD:
4996 GEN_NEON_INTEGER_OP(abd);
4997 break;
4998 case NEON_3R_VABA:
4999 GEN_NEON_INTEGER_OP(abd);
5000 tcg_temp_free_i32(tmp2);
5001 tmp2 = neon_load_reg(rd, pass);
5002 gen_neon_add(size, tmp, tmp2);
5003 break;
5004 case NEON_3R_VPMAX:
5005 GEN_NEON_INTEGER_OP(pmax);
5006 break;
5007 case NEON_3R_VPMIN:
5008 GEN_NEON_INTEGER_OP(pmin);
5009 break;
5010 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5011 if (!u) { /* VQDMULH */
5012 switch (size) {
5013 case 1:
5014 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5015 break;
5016 case 2:
5017 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5018 break;
5019 default: abort();
5021 } else { /* VQRDMULH */
5022 switch (size) {
5023 case 1:
5024 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5025 break;
5026 case 2:
5027 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5028 break;
5029 default: abort();
5032 break;
5033 case NEON_3R_VPADD_VQRDMLAH:
5034 switch (size) {
5035 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5036 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5037 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5038 default: abort();
5040 break;
5041 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5043 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5044 switch ((u << 2) | size) {
5045 case 0: /* VADD */
5046 case 4: /* VPADD */
5047 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5048 break;
5049 case 2: /* VSUB */
5050 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5051 break;
5052 case 6: /* VABD */
5053 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5054 break;
5055 default:
5056 abort();
5058 tcg_temp_free_ptr(fpstatus);
5059 break;
5061 case NEON_3R_FLOAT_MULTIPLY:
5063 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5064 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5065 if (!u) {
5066 tcg_temp_free_i32(tmp2);
5067 tmp2 = neon_load_reg(rd, pass);
5068 if (size == 0) {
5069 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5070 } else {
5071 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5074 tcg_temp_free_ptr(fpstatus);
5075 break;
5077 case NEON_3R_FLOAT_CMP:
5079 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5080 if (!u) {
5081 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5082 } else {
5083 if (size == 0) {
5084 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5085 } else {
5086 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5089 tcg_temp_free_ptr(fpstatus);
5090 break;
5092 case NEON_3R_FLOAT_ACMP:
5094 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5095 if (size == 0) {
5096 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5097 } else {
5098 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5100 tcg_temp_free_ptr(fpstatus);
5101 break;
5103 case NEON_3R_FLOAT_MINMAX:
5105 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5106 if (size == 0) {
5107 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5108 } else {
5109 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5111 tcg_temp_free_ptr(fpstatus);
5112 break;
5114 case NEON_3R_FLOAT_MISC:
5115 if (u) {
5116 /* VMAXNM/VMINNM */
5117 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5118 if (size == 0) {
5119 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5120 } else {
5121 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5123 tcg_temp_free_ptr(fpstatus);
5124 } else {
5125 if (size == 0) {
5126 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5127 } else {
5128 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5131 break;
5132 case NEON_3R_VFM_VQRDMLSH:
5134 /* VFMA, VFMS: fused multiply-add */
5135 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5136 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5137 if (size) {
5138 /* VFMS */
5139 gen_helper_vfp_negs(tmp, tmp);
5141 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5142 tcg_temp_free_i32(tmp3);
5143 tcg_temp_free_ptr(fpstatus);
5144 break;
5146 default:
5147 abort();
5149 tcg_temp_free_i32(tmp2);
5151 /* Save the result. For elementwise operations we can put it
5152 straight into the destination register. For pairwise operations
5153 we have to be careful to avoid clobbering the source operands. */
5154 if (pairwise && rd == rm) {
5155 neon_store_scratch(pass, tmp);
5156 } else {
5157 neon_store_reg(rd, pass, tmp);
5160 } /* for pass */
5161 if (pairwise && rd == rm) {
5162 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5163 tmp = neon_load_scratch(pass);
5164 neon_store_reg(rd, pass, tmp);
5167 /* End of 3 register same size operations. */
5168 } else if (insn & (1 << 4)) {
5169 if ((insn & 0x00380080) != 0) {
5170 /* Two registers and shift. */
5171 op = (insn >> 8) & 0xf;
5172 if (insn & (1 << 7)) {
5173 /* 64-bit shift. */
5174 if (op > 7) {
5175 return 1;
5177 size = 3;
5178 } else {
5179 size = 2;
5180 while ((insn & (1 << (size + 19))) == 0)
5181 size--;
5183 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5184 if (op < 8) {
5185 /* Shift by immediate:
5186 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5187 if (q && ((rd | rm) & 1)) {
5188 return 1;
5190 if (!u && (op == 4 || op == 6)) {
5191 return 1;
5193 /* Right shifts are encoded as N - shift, where N is the
5194 element size in bits. */
5195 if (op <= 4) {
5196 shift = shift - (1 << (size + 3));
5199 switch (op) {
5200 case 0: /* VSHR */
5201 /* Right shift comes here negative. */
5202 shift = -shift;
5203 /* Shifts larger than the element size are architecturally
5204 * valid. Unsigned results in all zeros; signed results
5205 * in all sign bits.
5207 if (!u) {
5208 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
5209 MIN(shift, (8 << size) - 1),
5210 vec_size, vec_size);
5211 } else if (shift >= 8 << size) {
5212 tcg_gen_gvec_dup_imm(MO_8, rd_ofs, vec_size,
5213 vec_size, 0);
5214 } else {
5215 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
5216 vec_size, vec_size);
5218 return 0;
5220 case 1: /* VSRA */
5221 /* Right shift comes here negative. */
5222 shift = -shift;
5223 /* Shifts larger than the element size are architecturally
5224 * valid. Unsigned results in all zeros; signed results
5225 * in all sign bits.
5227 if (!u) {
5228 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5229 MIN(shift, (8 << size) - 1),
5230 &ssra_op[size]);
5231 } else if (shift >= 8 << size) {
5232 /* rd += 0 */
5233 } else {
5234 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5235 shift, &usra_op[size]);
5237 return 0;
5239 case 4: /* VSRI */
5240 if (!u) {
5241 return 1;
5243 /* Right shift comes here negative. */
5244 shift = -shift;
5245 /* Shift out of range leaves destination unchanged. */
5246 if (shift < 8 << size) {
5247 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5248 shift, &sri_op[size]);
5250 return 0;
5252 case 5: /* VSHL, VSLI */
5253 if (u) { /* VSLI */
5254 /* Shift out of range leaves destination unchanged. */
5255 if (shift < 8 << size) {
5256 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
5257 vec_size, shift, &sli_op[size]);
5259 } else { /* VSHL */
5260 /* Shifts larger than the element size are
5261 * architecturally valid and results in zero.
5263 if (shift >= 8 << size) {
5264 tcg_gen_gvec_dup_imm(size, rd_ofs,
5265 vec_size, vec_size, 0);
5266 } else {
5267 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
5268 vec_size, vec_size);
5271 return 0;
5274 if (size == 3) {
5275 count = q + 1;
5276 } else {
5277 count = q ? 4: 2;
5280 /* To avoid excessive duplication of ops we implement shift
5281 * by immediate using the variable shift operations.
5283 imm = dup_const(size, shift);
5285 for (pass = 0; pass < count; pass++) {
5286 if (size == 3) {
5287 neon_load_reg64(cpu_V0, rm + pass);
5288 tcg_gen_movi_i64(cpu_V1, imm);
5289 switch (op) {
5290 case 2: /* VRSHR */
5291 case 3: /* VRSRA */
5292 if (u)
5293 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5294 else
5295 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5296 break;
5297 case 6: /* VQSHLU */
5298 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5299 cpu_V0, cpu_V1);
5300 break;
5301 case 7: /* VQSHL */
5302 if (u) {
5303 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5304 cpu_V0, cpu_V1);
5305 } else {
5306 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5307 cpu_V0, cpu_V1);
5309 break;
5310 default:
5311 g_assert_not_reached();
5313 if (op == 3) {
5314 /* Accumulate. */
5315 neon_load_reg64(cpu_V1, rd + pass);
5316 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5318 neon_store_reg64(cpu_V0, rd + pass);
5319 } else { /* size < 3 */
5320 /* Operands in T0 and T1. */
5321 tmp = neon_load_reg(rm, pass);
5322 tmp2 = tcg_temp_new_i32();
5323 tcg_gen_movi_i32(tmp2, imm);
5324 switch (op) {
5325 case 2: /* VRSHR */
5326 case 3: /* VRSRA */
5327 GEN_NEON_INTEGER_OP(rshl);
5328 break;
5329 case 6: /* VQSHLU */
5330 switch (size) {
5331 case 0:
5332 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5333 tmp, tmp2);
5334 break;
5335 case 1:
5336 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5337 tmp, tmp2);
5338 break;
5339 case 2:
5340 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5341 tmp, tmp2);
5342 break;
5343 default:
5344 abort();
5346 break;
5347 case 7: /* VQSHL */
5348 GEN_NEON_INTEGER_OP_ENV(qshl);
5349 break;
5350 default:
5351 g_assert_not_reached();
5353 tcg_temp_free_i32(tmp2);
5355 if (op == 3) {
5356 /* Accumulate. */
5357 tmp2 = neon_load_reg(rd, pass);
5358 gen_neon_add(size, tmp, tmp2);
5359 tcg_temp_free_i32(tmp2);
5361 neon_store_reg(rd, pass, tmp);
5363 } /* for pass */
5364 } else if (op < 10) {
5365 /* Shift by immediate and narrow:
5366 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5367 int input_unsigned = (op == 8) ? !u : u;
5368 if (rm & 1) {
5369 return 1;
5371 shift = shift - (1 << (size + 3));
5372 size++;
5373 if (size == 3) {
5374 tmp64 = tcg_const_i64(shift);
5375 neon_load_reg64(cpu_V0, rm);
5376 neon_load_reg64(cpu_V1, rm + 1);
5377 for (pass = 0; pass < 2; pass++) {
5378 TCGv_i64 in;
5379 if (pass == 0) {
5380 in = cpu_V0;
5381 } else {
5382 in = cpu_V1;
5384 if (q) {
5385 if (input_unsigned) {
5386 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5387 } else {
5388 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5390 } else {
5391 if (input_unsigned) {
5392 gen_ushl_i64(cpu_V0, in, tmp64);
5393 } else {
5394 gen_sshl_i64(cpu_V0, in, tmp64);
5397 tmp = tcg_temp_new_i32();
5398 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5399 neon_store_reg(rd, pass, tmp);
5400 } /* for pass */
5401 tcg_temp_free_i64(tmp64);
5402 } else {
5403 if (size == 1) {
5404 imm = (uint16_t)shift;
5405 imm |= imm << 16;
5406 } else {
5407 /* size == 2 */
5408 imm = (uint32_t)shift;
5410 tmp2 = tcg_const_i32(imm);
5411 tmp4 = neon_load_reg(rm + 1, 0);
5412 tmp5 = neon_load_reg(rm + 1, 1);
5413 for (pass = 0; pass < 2; pass++) {
5414 if (pass == 0) {
5415 tmp = neon_load_reg(rm, 0);
5416 } else {
5417 tmp = tmp4;
5419 gen_neon_shift_narrow(size, tmp, tmp2, q,
5420 input_unsigned);
5421 if (pass == 0) {
5422 tmp3 = neon_load_reg(rm, 1);
5423 } else {
5424 tmp3 = tmp5;
5426 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5427 input_unsigned);
5428 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5429 tcg_temp_free_i32(tmp);
5430 tcg_temp_free_i32(tmp3);
5431 tmp = tcg_temp_new_i32();
5432 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5433 neon_store_reg(rd, pass, tmp);
5434 } /* for pass */
5435 tcg_temp_free_i32(tmp2);
5437 } else if (op == 10) {
5438 /* VSHLL, VMOVL */
5439 if (q || (rd & 1)) {
5440 return 1;
5442 tmp = neon_load_reg(rm, 0);
5443 tmp2 = neon_load_reg(rm, 1);
5444 for (pass = 0; pass < 2; pass++) {
5445 if (pass == 1)
5446 tmp = tmp2;
5448 gen_neon_widen(cpu_V0, tmp, size, u);
5450 if (shift != 0) {
5451 /* The shift is less than the width of the source
5452 type, so we can just shift the whole register. */
5453 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5454 /* Widen the result of shift: we need to clear
5455 * the potential overflow bits resulting from
5456 * left bits of the narrow input appearing as
5457 * right bits of left the neighbour narrow
5458 * input. */
5459 if (size < 2 || !u) {
5460 uint64_t imm64;
5461 if (size == 0) {
5462 imm = (0xffu >> (8 - shift));
5463 imm |= imm << 16;
5464 } else if (size == 1) {
5465 imm = 0xffff >> (16 - shift);
5466 } else {
5467 /* size == 2 */
5468 imm = 0xffffffff >> (32 - shift);
5470 if (size < 2) {
5471 imm64 = imm | (((uint64_t)imm) << 32);
5472 } else {
5473 imm64 = imm;
5475 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5478 neon_store_reg64(cpu_V0, rd + pass);
5480 } else if (op >= 14) {
5481 /* VCVT fixed-point. */
5482 TCGv_ptr fpst;
5483 TCGv_i32 shiftv;
5484 VFPGenFixPointFn *fn;
5486 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5487 return 1;
5490 if (!(op & 1)) {
5491 if (u) {
5492 fn = gen_helper_vfp_ultos;
5493 } else {
5494 fn = gen_helper_vfp_sltos;
5496 } else {
5497 if (u) {
5498 fn = gen_helper_vfp_touls_round_to_zero;
5499 } else {
5500 fn = gen_helper_vfp_tosls_round_to_zero;
5504 /* We have already masked out the must-be-1 top bit of imm6,
5505 * hence this 32-shift where the ARM ARM has 64-imm6.
5507 shift = 32 - shift;
5508 fpst = get_fpstatus_ptr(1);
5509 shiftv = tcg_const_i32(shift);
5510 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5511 TCGv_i32 tmpf = neon_load_reg(rm, pass);
5512 fn(tmpf, tmpf, shiftv, fpst);
5513 neon_store_reg(rd, pass, tmpf);
5515 tcg_temp_free_ptr(fpst);
5516 tcg_temp_free_i32(shiftv);
5517 } else {
5518 return 1;
5520 } else { /* (insn & 0x00380080) == 0 */
5521 int invert, reg_ofs, vec_size;
5523 if (q && (rd & 1)) {
5524 return 1;
5527 op = (insn >> 8) & 0xf;
5528 /* One register and immediate. */
5529 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5530 invert = (insn & (1 << 5)) != 0;
5531 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5532 * We choose to not special-case this and will behave as if a
5533 * valid constant encoding of 0 had been given.
5535 switch (op) {
5536 case 0: case 1:
5537 /* no-op */
5538 break;
5539 case 2: case 3:
5540 imm <<= 8;
5541 break;
5542 case 4: case 5:
5543 imm <<= 16;
5544 break;
5545 case 6: case 7:
5546 imm <<= 24;
5547 break;
5548 case 8: case 9:
5549 imm |= imm << 16;
5550 break;
5551 case 10: case 11:
5552 imm = (imm << 8) | (imm << 24);
5553 break;
5554 case 12:
5555 imm = (imm << 8) | 0xff;
5556 break;
5557 case 13:
5558 imm = (imm << 16) | 0xffff;
5559 break;
5560 case 14:
5561 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5562 if (invert) {
5563 imm = ~imm;
5565 break;
5566 case 15:
5567 if (invert) {
5568 return 1;
5570 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5571 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5572 break;
5574 if (invert) {
5575 imm = ~imm;
5578 reg_ofs = neon_reg_offset(rd, 0);
5579 vec_size = q ? 16 : 8;
5581 if (op & 1 && op < 12) {
5582 if (invert) {
5583 /* The immediate value has already been inverted,
5584 * so BIC becomes AND.
5586 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
5587 vec_size, vec_size);
5588 } else {
5589 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
5590 vec_size, vec_size);
5592 } else {
5593 /* VMOV, VMVN. */
5594 if (op == 14 && invert) {
5595 TCGv_i64 t64 = tcg_temp_new_i64();
5597 for (pass = 0; pass <= q; ++pass) {
5598 uint64_t val = 0;
5599 int n;
5601 for (n = 0; n < 8; n++) {
5602 if (imm & (1 << (n + pass * 8))) {
5603 val |= 0xffull << (n * 8);
5606 tcg_gen_movi_i64(t64, val);
5607 neon_store_reg64(t64, rd + pass);
5609 tcg_temp_free_i64(t64);
5610 } else {
5611 tcg_gen_gvec_dup_imm(MO_32, reg_ofs, vec_size,
5612 vec_size, imm);
5616 } else { /* (insn & 0x00800010 == 0x00800000) */
5617 if (size != 3) {
5618 op = (insn >> 8) & 0xf;
5619 if ((insn & (1 << 6)) == 0) {
5620 /* Three registers of different lengths. */
5621 int src1_wide;
5622 int src2_wide;
5623 int prewiden;
5624 /* undefreq: bit 0 : UNDEF if size == 0
5625 * bit 1 : UNDEF if size == 1
5626 * bit 2 : UNDEF if size == 2
5627 * bit 3 : UNDEF if U == 1
5628 * Note that [2:0] set implies 'always UNDEF'
5630 int undefreq;
5631 /* prewiden, src1_wide, src2_wide, undefreq */
5632 static const int neon_3reg_wide[16][4] = {
5633 {1, 0, 0, 0}, /* VADDL */
5634 {1, 1, 0, 0}, /* VADDW */
5635 {1, 0, 0, 0}, /* VSUBL */
5636 {1, 1, 0, 0}, /* VSUBW */
5637 {0, 1, 1, 0}, /* VADDHN */
5638 {0, 0, 0, 0}, /* VABAL */
5639 {0, 1, 1, 0}, /* VSUBHN */
5640 {0, 0, 0, 0}, /* VABDL */
5641 {0, 0, 0, 0}, /* VMLAL */
5642 {0, 0, 0, 9}, /* VQDMLAL */
5643 {0, 0, 0, 0}, /* VMLSL */
5644 {0, 0, 0, 9}, /* VQDMLSL */
5645 {0, 0, 0, 0}, /* Integer VMULL */
5646 {0, 0, 0, 9}, /* VQDMULL */
5647 {0, 0, 0, 0xa}, /* Polynomial VMULL */
5648 {0, 0, 0, 7}, /* Reserved: always UNDEF */
5651 prewiden = neon_3reg_wide[op][0];
5652 src1_wide = neon_3reg_wide[op][1];
5653 src2_wide = neon_3reg_wide[op][2];
5654 undefreq = neon_3reg_wide[op][3];
5656 if ((undefreq & (1 << size)) ||
5657 ((undefreq & 8) && u)) {
5658 return 1;
5660 if ((src1_wide && (rn & 1)) ||
5661 (src2_wide && (rm & 1)) ||
5662 (!src2_wide && (rd & 1))) {
5663 return 1;
5666 /* Handle polynomial VMULL in a single pass. */
5667 if (op == 14) {
5668 if (size == 0) {
5669 /* VMULL.P8 */
5670 tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, 16, 16,
5671 0, gen_helper_neon_pmull_h);
5672 } else {
5673 /* VMULL.P64 */
5674 if (!dc_isar_feature(aa32_pmull, s)) {
5675 return 1;
5677 tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, 16, 16,
5678 0, gen_helper_gvec_pmull_q);
5680 return 0;
5683 /* Avoid overlapping operands. Wide source operands are
5684 always aligned so will never overlap with wide
5685 destinations in problematic ways. */
5686 if (rd == rm && !src2_wide) {
5687 tmp = neon_load_reg(rm, 1);
5688 neon_store_scratch(2, tmp);
5689 } else if (rd == rn && !src1_wide) {
5690 tmp = neon_load_reg(rn, 1);
5691 neon_store_scratch(2, tmp);
5693 tmp3 = NULL;
5694 for (pass = 0; pass < 2; pass++) {
5695 if (src1_wide) {
5696 neon_load_reg64(cpu_V0, rn + pass);
5697 tmp = NULL;
5698 } else {
5699 if (pass == 1 && rd == rn) {
5700 tmp = neon_load_scratch(2);
5701 } else {
5702 tmp = neon_load_reg(rn, pass);
5704 if (prewiden) {
5705 gen_neon_widen(cpu_V0, tmp, size, u);
5708 if (src2_wide) {
5709 neon_load_reg64(cpu_V1, rm + pass);
5710 tmp2 = NULL;
5711 } else {
5712 if (pass == 1 && rd == rm) {
5713 tmp2 = neon_load_scratch(2);
5714 } else {
5715 tmp2 = neon_load_reg(rm, pass);
5717 if (prewiden) {
5718 gen_neon_widen(cpu_V1, tmp2, size, u);
5721 switch (op) {
5722 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5723 gen_neon_addl(size);
5724 break;
5725 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5726 gen_neon_subl(size);
5727 break;
5728 case 5: case 7: /* VABAL, VABDL */
5729 switch ((size << 1) | u) {
5730 case 0:
5731 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5732 break;
5733 case 1:
5734 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5735 break;
5736 case 2:
5737 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5738 break;
5739 case 3:
5740 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5741 break;
5742 case 4:
5743 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5744 break;
5745 case 5:
5746 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5747 break;
5748 default: abort();
5750 tcg_temp_free_i32(tmp2);
5751 tcg_temp_free_i32(tmp);
5752 break;
5753 case 8: case 9: case 10: case 11: case 12: case 13:
5754 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5755 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5756 break;
5757 default: /* 15 is RESERVED: caught earlier */
5758 abort();
5760 if (op == 13) {
5761 /* VQDMULL */
5762 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5763 neon_store_reg64(cpu_V0, rd + pass);
5764 } else if (op == 5 || (op >= 8 && op <= 11)) {
5765 /* Accumulate. */
5766 neon_load_reg64(cpu_V1, rd + pass);
5767 switch (op) {
5768 case 10: /* VMLSL */
5769 gen_neon_negl(cpu_V0, size);
5770 /* Fall through */
5771 case 5: case 8: /* VABAL, VMLAL */
5772 gen_neon_addl(size);
5773 break;
5774 case 9: case 11: /* VQDMLAL, VQDMLSL */
5775 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5776 if (op == 11) {
5777 gen_neon_negl(cpu_V0, size);
5779 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5780 break;
5781 default:
5782 abort();
5784 neon_store_reg64(cpu_V0, rd + pass);
5785 } else if (op == 4 || op == 6) {
5786 /* Narrowing operation. */
5787 tmp = tcg_temp_new_i32();
5788 if (!u) {
5789 switch (size) {
5790 case 0:
5791 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5792 break;
5793 case 1:
5794 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5795 break;
5796 case 2:
5797 tcg_gen_extrh_i64_i32(tmp, cpu_V0);
5798 break;
5799 default: abort();
5801 } else {
5802 switch (size) {
5803 case 0:
5804 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5805 break;
5806 case 1:
5807 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5808 break;
5809 case 2:
5810 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5811 tcg_gen_extrh_i64_i32(tmp, cpu_V0);
5812 break;
5813 default: abort();
5816 if (pass == 0) {
5817 tmp3 = tmp;
5818 } else {
5819 neon_store_reg(rd, 0, tmp3);
5820 neon_store_reg(rd, 1, tmp);
5822 } else {
5823 /* Write back the result. */
5824 neon_store_reg64(cpu_V0, rd + pass);
5827 } else {
5828 /* Two registers and a scalar. NB that for ops of this form
5829 * the ARM ARM labels bit 24 as Q, but it is in our variable
5830 * 'u', not 'q'.
5832 if (size == 0) {
5833 return 1;
5835 switch (op) {
5836 case 1: /* Float VMLA scalar */
5837 case 5: /* Floating point VMLS scalar */
5838 case 9: /* Floating point VMUL scalar */
5839 if (size == 1) {
5840 return 1;
5842 /* fall through */
5843 case 0: /* Integer VMLA scalar */
5844 case 4: /* Integer VMLS scalar */
5845 case 8: /* Integer VMUL scalar */
5846 case 12: /* VQDMULH scalar */
5847 case 13: /* VQRDMULH scalar */
5848 if (u && ((rd | rn) & 1)) {
5849 return 1;
5851 tmp = neon_get_scalar(size, rm);
5852 neon_store_scratch(0, tmp);
5853 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5854 tmp = neon_load_scratch(0);
5855 tmp2 = neon_load_reg(rn, pass);
5856 if (op == 12) {
5857 if (size == 1) {
5858 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5859 } else {
5860 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5862 } else if (op == 13) {
5863 if (size == 1) {
5864 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5865 } else {
5866 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5868 } else if (op & 1) {
5869 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5870 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5871 tcg_temp_free_ptr(fpstatus);
5872 } else {
5873 switch (size) {
5874 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5875 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5876 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5877 default: abort();
5880 tcg_temp_free_i32(tmp2);
5881 if (op < 8) {
5882 /* Accumulate. */
5883 tmp2 = neon_load_reg(rd, pass);
5884 switch (op) {
5885 case 0:
5886 gen_neon_add(size, tmp, tmp2);
5887 break;
5888 case 1:
5890 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5891 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5892 tcg_temp_free_ptr(fpstatus);
5893 break;
5895 case 4:
5896 gen_neon_rsb(size, tmp, tmp2);
5897 break;
5898 case 5:
5900 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5901 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5902 tcg_temp_free_ptr(fpstatus);
5903 break;
5905 default:
5906 abort();
5908 tcg_temp_free_i32(tmp2);
5910 neon_store_reg(rd, pass, tmp);
5912 break;
5913 case 3: /* VQDMLAL scalar */
5914 case 7: /* VQDMLSL scalar */
5915 case 11: /* VQDMULL scalar */
5916 if (u == 1) {
5917 return 1;
5919 /* fall through */
5920 case 2: /* VMLAL sclar */
5921 case 6: /* VMLSL scalar */
5922 case 10: /* VMULL scalar */
5923 if (rd & 1) {
5924 return 1;
5926 tmp2 = neon_get_scalar(size, rm);
5927 /* We need a copy of tmp2 because gen_neon_mull
5928 * deletes it during pass 0. */
5929 tmp4 = tcg_temp_new_i32();
5930 tcg_gen_mov_i32(tmp4, tmp2);
5931 tmp3 = neon_load_reg(rn, 1);
5933 for (pass = 0; pass < 2; pass++) {
5934 if (pass == 0) {
5935 tmp = neon_load_reg(rn, 0);
5936 } else {
5937 tmp = tmp3;
5938 tmp2 = tmp4;
5940 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5941 if (op != 11) {
5942 neon_load_reg64(cpu_V1, rd + pass);
5944 switch (op) {
5945 case 6:
5946 gen_neon_negl(cpu_V0, size);
5947 /* Fall through */
5948 case 2:
5949 gen_neon_addl(size);
5950 break;
5951 case 3: case 7:
5952 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5953 if (op == 7) {
5954 gen_neon_negl(cpu_V0, size);
5956 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5957 break;
5958 case 10:
5959 /* no-op */
5960 break;
5961 case 11:
5962 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5963 break;
5964 default:
5965 abort();
5967 neon_store_reg64(cpu_V0, rd + pass);
5969 break;
5970 case 14: /* VQRDMLAH scalar */
5971 case 15: /* VQRDMLSH scalar */
5973 NeonGenThreeOpEnvFn *fn;
5975 if (!dc_isar_feature(aa32_rdm, s)) {
5976 return 1;
5978 if (u && ((rd | rn) & 1)) {
5979 return 1;
5981 if (op == 14) {
5982 if (size == 1) {
5983 fn = gen_helper_neon_qrdmlah_s16;
5984 } else {
5985 fn = gen_helper_neon_qrdmlah_s32;
5987 } else {
5988 if (size == 1) {
5989 fn = gen_helper_neon_qrdmlsh_s16;
5990 } else {
5991 fn = gen_helper_neon_qrdmlsh_s32;
5995 tmp2 = neon_get_scalar(size, rm);
5996 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5997 tmp = neon_load_reg(rn, pass);
5998 tmp3 = neon_load_reg(rd, pass);
5999 fn(tmp, cpu_env, tmp, tmp2, tmp3);
6000 tcg_temp_free_i32(tmp3);
6001 neon_store_reg(rd, pass, tmp);
6003 tcg_temp_free_i32(tmp2);
6005 break;
6006 default:
6007 g_assert_not_reached();
6010 } else { /* size == 3 */
6011 if (!u) {
6012 /* Extract. */
6013 imm = (insn >> 8) & 0xf;
6015 if (imm > 7 && !q)
6016 return 1;
6018 if (q && ((rd | rn | rm) & 1)) {
6019 return 1;
6022 if (imm == 0) {
6023 neon_load_reg64(cpu_V0, rn);
6024 if (q) {
6025 neon_load_reg64(cpu_V1, rn + 1);
6027 } else if (imm == 8) {
6028 neon_load_reg64(cpu_V0, rn + 1);
6029 if (q) {
6030 neon_load_reg64(cpu_V1, rm);
6032 } else if (q) {
6033 tmp64 = tcg_temp_new_i64();
6034 if (imm < 8) {
6035 neon_load_reg64(cpu_V0, rn);
6036 neon_load_reg64(tmp64, rn + 1);
6037 } else {
6038 neon_load_reg64(cpu_V0, rn + 1);
6039 neon_load_reg64(tmp64, rm);
6041 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6042 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6043 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6044 if (imm < 8) {
6045 neon_load_reg64(cpu_V1, rm);
6046 } else {
6047 neon_load_reg64(cpu_V1, rm + 1);
6048 imm -= 8;
6050 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6051 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6052 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6053 tcg_temp_free_i64(tmp64);
6054 } else {
6055 /* BUGFIX */
6056 neon_load_reg64(cpu_V0, rn);
6057 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6058 neon_load_reg64(cpu_V1, rm);
6059 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6060 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6062 neon_store_reg64(cpu_V0, rd);
6063 if (q) {
6064 neon_store_reg64(cpu_V1, rd + 1);
6066 } else if ((insn & (1 << 11)) == 0) {
6067 /* Two register misc. */
6068 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6069 size = (insn >> 18) & 3;
6070 /* UNDEF for unknown op values and bad op-size combinations */
6071 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6072 return 1;
6074 if (neon_2rm_is_v8_op(op) &&
6075 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6076 return 1;
6078 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6079 q && ((rm | rd) & 1)) {
6080 return 1;
6082 switch (op) {
6083 case NEON_2RM_VREV64:
6084 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6085 tmp = neon_load_reg(rm, pass * 2);
6086 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6087 switch (size) {
6088 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6089 case 1: gen_swap_half(tmp); break;
6090 case 2: /* no-op */ break;
6091 default: abort();
6093 neon_store_reg(rd, pass * 2 + 1, tmp);
6094 if (size == 2) {
6095 neon_store_reg(rd, pass * 2, tmp2);
6096 } else {
6097 switch (size) {
6098 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6099 case 1: gen_swap_half(tmp2); break;
6100 default: abort();
6102 neon_store_reg(rd, pass * 2, tmp2);
6105 break;
6106 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6107 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6108 for (pass = 0; pass < q + 1; pass++) {
6109 tmp = neon_load_reg(rm, pass * 2);
6110 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6111 tmp = neon_load_reg(rm, pass * 2 + 1);
6112 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6113 switch (size) {
6114 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6115 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6116 case 2: tcg_gen_add_i64(CPU_V001); break;
6117 default: abort();
6119 if (op >= NEON_2RM_VPADAL) {
6120 /* Accumulate. */
6121 neon_load_reg64(cpu_V1, rd + pass);
6122 gen_neon_addl(size);
6124 neon_store_reg64(cpu_V0, rd + pass);
6126 break;
6127 case NEON_2RM_VTRN:
6128 if (size == 2) {
6129 int n;
6130 for (n = 0; n < (q ? 4 : 2); n += 2) {
6131 tmp = neon_load_reg(rm, n);
6132 tmp2 = neon_load_reg(rd, n + 1);
6133 neon_store_reg(rm, n, tmp2);
6134 neon_store_reg(rd, n + 1, tmp);
6136 } else {
6137 goto elementwise;
6139 break;
6140 case NEON_2RM_VUZP:
6141 if (gen_neon_unzip(rd, rm, size, q)) {
6142 return 1;
6144 break;
6145 case NEON_2RM_VZIP:
6146 if (gen_neon_zip(rd, rm, size, q)) {
6147 return 1;
6149 break;
6150 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6151 /* also VQMOVUN; op field and mnemonics don't line up */
6152 if (rm & 1) {
6153 return 1;
6155 tmp2 = NULL;
6156 for (pass = 0; pass < 2; pass++) {
6157 neon_load_reg64(cpu_V0, rm + pass);
6158 tmp = tcg_temp_new_i32();
6159 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6160 tmp, cpu_V0);
6161 if (pass == 0) {
6162 tmp2 = tmp;
6163 } else {
6164 neon_store_reg(rd, 0, tmp2);
6165 neon_store_reg(rd, 1, tmp);
6168 break;
6169 case NEON_2RM_VSHLL:
6170 if (q || (rd & 1)) {
6171 return 1;
6173 tmp = neon_load_reg(rm, 0);
6174 tmp2 = neon_load_reg(rm, 1);
6175 for (pass = 0; pass < 2; pass++) {
6176 if (pass == 1)
6177 tmp = tmp2;
6178 gen_neon_widen(cpu_V0, tmp, size, 1);
6179 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6180 neon_store_reg64(cpu_V0, rd + pass);
6182 break;
6183 case NEON_2RM_VCVT_F16_F32:
6185 TCGv_ptr fpst;
6186 TCGv_i32 ahp;
6188 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
6189 q || (rm & 1)) {
6190 return 1;
6192 fpst = get_fpstatus_ptr(true);
6193 ahp = get_ahp_flag();
6194 tmp = neon_load_reg(rm, 0);
6195 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6196 tmp2 = neon_load_reg(rm, 1);
6197 gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
6198 tcg_gen_shli_i32(tmp2, tmp2, 16);
6199 tcg_gen_or_i32(tmp2, tmp2, tmp);
6200 tcg_temp_free_i32(tmp);
6201 tmp = neon_load_reg(rm, 2);
6202 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6203 tmp3 = neon_load_reg(rm, 3);
6204 neon_store_reg(rd, 0, tmp2);
6205 gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
6206 tcg_gen_shli_i32(tmp3, tmp3, 16);
6207 tcg_gen_or_i32(tmp3, tmp3, tmp);
6208 neon_store_reg(rd, 1, tmp3);
6209 tcg_temp_free_i32(tmp);
6210 tcg_temp_free_i32(ahp);
6211 tcg_temp_free_ptr(fpst);
6212 break;
6214 case NEON_2RM_VCVT_F32_F16:
6216 TCGv_ptr fpst;
6217 TCGv_i32 ahp;
6218 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
6219 q || (rd & 1)) {
6220 return 1;
6222 fpst = get_fpstatus_ptr(true);
6223 ahp = get_ahp_flag();
6224 tmp3 = tcg_temp_new_i32();
6225 tmp = neon_load_reg(rm, 0);
6226 tmp2 = neon_load_reg(rm, 1);
6227 tcg_gen_ext16u_i32(tmp3, tmp);
6228 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6229 neon_store_reg(rd, 0, tmp3);
6230 tcg_gen_shri_i32(tmp, tmp, 16);
6231 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
6232 neon_store_reg(rd, 1, tmp);
6233 tmp3 = tcg_temp_new_i32();
6234 tcg_gen_ext16u_i32(tmp3, tmp2);
6235 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6236 neon_store_reg(rd, 2, tmp3);
6237 tcg_gen_shri_i32(tmp2, tmp2, 16);
6238 gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
6239 neon_store_reg(rd, 3, tmp2);
6240 tcg_temp_free_i32(ahp);
6241 tcg_temp_free_ptr(fpst);
6242 break;
6244 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6245 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
6246 return 1;
6248 ptr1 = vfp_reg_ptr(true, rd);
6249 ptr2 = vfp_reg_ptr(true, rm);
6251 /* Bit 6 is the lowest opcode bit; it distinguishes between
6252 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6254 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6256 if (op == NEON_2RM_AESE) {
6257 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
6258 } else {
6259 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
6261 tcg_temp_free_ptr(ptr1);
6262 tcg_temp_free_ptr(ptr2);
6263 tcg_temp_free_i32(tmp3);
6264 break;
6265 case NEON_2RM_SHA1H:
6266 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
6267 return 1;
6269 ptr1 = vfp_reg_ptr(true, rd);
6270 ptr2 = vfp_reg_ptr(true, rm);
6272 gen_helper_crypto_sha1h(ptr1, ptr2);
6274 tcg_temp_free_ptr(ptr1);
6275 tcg_temp_free_ptr(ptr2);
6276 break;
6277 case NEON_2RM_SHA1SU1:
6278 if ((rm | rd) & 1) {
6279 return 1;
6281 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6282 if (q) {
6283 if (!dc_isar_feature(aa32_sha2, s)) {
6284 return 1;
6286 } else if (!dc_isar_feature(aa32_sha1, s)) {
6287 return 1;
6289 ptr1 = vfp_reg_ptr(true, rd);
6290 ptr2 = vfp_reg_ptr(true, rm);
6291 if (q) {
6292 gen_helper_crypto_sha256su0(ptr1, ptr2);
6293 } else {
6294 gen_helper_crypto_sha1su1(ptr1, ptr2);
6296 tcg_temp_free_ptr(ptr1);
6297 tcg_temp_free_ptr(ptr2);
6298 break;
6300 case NEON_2RM_VMVN:
6301 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
6302 break;
6303 case NEON_2RM_VNEG:
6304 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
6305 break;
6306 case NEON_2RM_VABS:
6307 tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
6308 break;
6310 case NEON_2RM_VCEQ0:
6311 tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
6312 vec_size, &ceq0_op[size]);
6313 break;
6314 case NEON_2RM_VCGT0:
6315 tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
6316 vec_size, &cgt0_op[size]);
6317 break;
6318 case NEON_2RM_VCLE0:
6319 tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
6320 vec_size, &cle0_op[size]);
6321 break;
6322 case NEON_2RM_VCGE0:
6323 tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
6324 vec_size, &cge0_op[size]);
6325 break;
6326 case NEON_2RM_VCLT0:
6327 tcg_gen_gvec_2(rd_ofs, rm_ofs, vec_size,
6328 vec_size, &clt0_op[size]);
6329 break;
6331 default:
6332 elementwise:
6333 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6334 tmp = neon_load_reg(rm, pass);
6335 switch (op) {
6336 case NEON_2RM_VREV32:
6337 switch (size) {
6338 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6339 case 1: gen_swap_half(tmp); break;
6340 default: abort();
6342 break;
6343 case NEON_2RM_VREV16:
6344 gen_rev16(tmp, tmp);
6345 break;
6346 case NEON_2RM_VCLS:
6347 switch (size) {
6348 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6349 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6350 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6351 default: abort();
6353 break;
6354 case NEON_2RM_VCLZ:
6355 switch (size) {
6356 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6357 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6358 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
6359 default: abort();
6361 break;
6362 case NEON_2RM_VCNT:
6363 gen_helper_neon_cnt_u8(tmp, tmp);
6364 break;
6365 case NEON_2RM_VQABS:
6366 switch (size) {
6367 case 0:
6368 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6369 break;
6370 case 1:
6371 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6372 break;
6373 case 2:
6374 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6375 break;
6376 default: abort();
6378 break;
6379 case NEON_2RM_VQNEG:
6380 switch (size) {
6381 case 0:
6382 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6383 break;
6384 case 1:
6385 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6386 break;
6387 case 2:
6388 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6389 break;
6390 default: abort();
6392 break;
6393 case NEON_2RM_VCGT0_F:
6395 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6396 tmp2 = tcg_const_i32(0);
6397 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6398 tcg_temp_free_i32(tmp2);
6399 tcg_temp_free_ptr(fpstatus);
6400 break;
6402 case NEON_2RM_VCGE0_F:
6404 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6405 tmp2 = tcg_const_i32(0);
6406 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6407 tcg_temp_free_i32(tmp2);
6408 tcg_temp_free_ptr(fpstatus);
6409 break;
6411 case NEON_2RM_VCEQ0_F:
6413 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6414 tmp2 = tcg_const_i32(0);
6415 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6416 tcg_temp_free_i32(tmp2);
6417 tcg_temp_free_ptr(fpstatus);
6418 break;
6420 case NEON_2RM_VCLE0_F:
6422 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6423 tmp2 = tcg_const_i32(0);
6424 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6425 tcg_temp_free_i32(tmp2);
6426 tcg_temp_free_ptr(fpstatus);
6427 break;
6429 case NEON_2RM_VCLT0_F:
6431 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6432 tmp2 = tcg_const_i32(0);
6433 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6434 tcg_temp_free_i32(tmp2);
6435 tcg_temp_free_ptr(fpstatus);
6436 break;
6438 case NEON_2RM_VABS_F:
6439 gen_helper_vfp_abss(tmp, tmp);
6440 break;
6441 case NEON_2RM_VNEG_F:
6442 gen_helper_vfp_negs(tmp, tmp);
6443 break;
6444 case NEON_2RM_VSWP:
6445 tmp2 = neon_load_reg(rd, pass);
6446 neon_store_reg(rm, pass, tmp2);
6447 break;
6448 case NEON_2RM_VTRN:
6449 tmp2 = neon_load_reg(rd, pass);
6450 switch (size) {
6451 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6452 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6453 default: abort();
6455 neon_store_reg(rm, pass, tmp2);
6456 break;
6457 case NEON_2RM_VRINTN:
6458 case NEON_2RM_VRINTA:
6459 case NEON_2RM_VRINTM:
6460 case NEON_2RM_VRINTP:
6461 case NEON_2RM_VRINTZ:
6463 TCGv_i32 tcg_rmode;
6464 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6465 int rmode;
6467 if (op == NEON_2RM_VRINTZ) {
6468 rmode = FPROUNDING_ZERO;
6469 } else {
6470 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6473 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6474 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6475 cpu_env);
6476 gen_helper_rints(tmp, tmp, fpstatus);
6477 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6478 cpu_env);
6479 tcg_temp_free_ptr(fpstatus);
6480 tcg_temp_free_i32(tcg_rmode);
6481 break;
6483 case NEON_2RM_VRINTX:
6485 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6486 gen_helper_rints_exact(tmp, tmp, fpstatus);
6487 tcg_temp_free_ptr(fpstatus);
6488 break;
6490 case NEON_2RM_VCVTAU:
6491 case NEON_2RM_VCVTAS:
6492 case NEON_2RM_VCVTNU:
6493 case NEON_2RM_VCVTNS:
6494 case NEON_2RM_VCVTPU:
6495 case NEON_2RM_VCVTPS:
6496 case NEON_2RM_VCVTMU:
6497 case NEON_2RM_VCVTMS:
6499 bool is_signed = !extract32(insn, 7, 1);
6500 TCGv_ptr fpst = get_fpstatus_ptr(1);
6501 TCGv_i32 tcg_rmode, tcg_shift;
6502 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6504 tcg_shift = tcg_const_i32(0);
6505 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6506 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6507 cpu_env);
6509 if (is_signed) {
6510 gen_helper_vfp_tosls(tmp, tmp,
6511 tcg_shift, fpst);
6512 } else {
6513 gen_helper_vfp_touls(tmp, tmp,
6514 tcg_shift, fpst);
6517 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6518 cpu_env);
6519 tcg_temp_free_i32(tcg_rmode);
6520 tcg_temp_free_i32(tcg_shift);
6521 tcg_temp_free_ptr(fpst);
6522 break;
6524 case NEON_2RM_VRECPE:
6526 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6527 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6528 tcg_temp_free_ptr(fpstatus);
6529 break;
6531 case NEON_2RM_VRSQRTE:
6533 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6534 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6535 tcg_temp_free_ptr(fpstatus);
6536 break;
6538 case NEON_2RM_VRECPE_F:
6540 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6541 gen_helper_recpe_f32(tmp, tmp, fpstatus);
6542 tcg_temp_free_ptr(fpstatus);
6543 break;
6545 case NEON_2RM_VRSQRTE_F:
6547 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6548 gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
6549 tcg_temp_free_ptr(fpstatus);
6550 break;
6552 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6554 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6555 gen_helper_vfp_sitos(tmp, tmp, fpstatus);
6556 tcg_temp_free_ptr(fpstatus);
6557 break;
6559 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6561 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6562 gen_helper_vfp_uitos(tmp, tmp, fpstatus);
6563 tcg_temp_free_ptr(fpstatus);
6564 break;
6566 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6568 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6569 gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
6570 tcg_temp_free_ptr(fpstatus);
6571 break;
6573 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6575 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6576 gen_helper_vfp_touizs(tmp, tmp, fpstatus);
6577 tcg_temp_free_ptr(fpstatus);
6578 break;
6580 default:
6581 /* Reserved op values were caught by the
6582 * neon_2rm_sizes[] check earlier.
6584 abort();
6586 neon_store_reg(rd, pass, tmp);
6588 break;
6590 } else if ((insn & (1 << 10)) == 0) {
6591 /* VTBL, VTBX. */
6592 int n = ((insn >> 8) & 3) + 1;
6593 if ((rn + n) > 32) {
6594 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6595 * helper function running off the end of the register file.
6597 return 1;
6599 n <<= 3;
6600 if (insn & (1 << 6)) {
6601 tmp = neon_load_reg(rd, 0);
6602 } else {
6603 tmp = tcg_temp_new_i32();
6604 tcg_gen_movi_i32(tmp, 0);
6606 tmp2 = neon_load_reg(rm, 0);
6607 ptr1 = vfp_reg_ptr(true, rn);
6608 tmp5 = tcg_const_i32(n);
6609 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
6610 tcg_temp_free_i32(tmp);
6611 if (insn & (1 << 6)) {
6612 tmp = neon_load_reg(rd, 1);
6613 } else {
6614 tmp = tcg_temp_new_i32();
6615 tcg_gen_movi_i32(tmp, 0);
6617 tmp3 = neon_load_reg(rm, 1);
6618 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
6619 tcg_temp_free_i32(tmp5);
6620 tcg_temp_free_ptr(ptr1);
6621 neon_store_reg(rd, 0, tmp2);
6622 neon_store_reg(rd, 1, tmp3);
6623 tcg_temp_free_i32(tmp);
6624 } else if ((insn & 0x380) == 0) {
6625 /* VDUP */
6626 int element;
6627 MemOp size;
6629 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6630 return 1;
6632 if (insn & (1 << 16)) {
6633 size = MO_8;
6634 element = (insn >> 17) & 7;
6635 } else if (insn & (1 << 17)) {
6636 size = MO_16;
6637 element = (insn >> 18) & 3;
6638 } else {
6639 size = MO_32;
6640 element = (insn >> 19) & 1;
6642 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
6643 neon_element_offset(rm, element, size),
6644 q ? 16 : 8, q ? 16 : 8);
6645 } else {
6646 return 1;
6650 return 0;
6653 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
6655 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6656 const ARMCPRegInfo *ri;
6658 cpnum = (insn >> 8) & 0xf;
6660 /* First check for coprocessor space used for XScale/iwMMXt insns */
6661 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
6662 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
6663 return 1;
6665 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
6666 return disas_iwmmxt_insn(s, insn);
6667 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
6668 return disas_dsp_insn(s, insn);
6670 return 1;
6673 /* Otherwise treat as a generic register access */
6674 is64 = (insn & (1 << 25)) == 0;
6675 if (!is64 && ((insn & (1 << 4)) == 0)) {
6676 /* cdp */
6677 return 1;
6680 crm = insn & 0xf;
6681 if (is64) {
6682 crn = 0;
6683 opc1 = (insn >> 4) & 0xf;
6684 opc2 = 0;
6685 rt2 = (insn >> 16) & 0xf;
6686 } else {
6687 crn = (insn >> 16) & 0xf;
6688 opc1 = (insn >> 21) & 7;
6689 opc2 = (insn >> 5) & 7;
6690 rt2 = 0;
6692 isread = (insn >> 20) & 1;
6693 rt = (insn >> 12) & 0xf;
6695 ri = get_arm_cp_reginfo(s->cp_regs,
6696 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
6697 if (ri) {
6698 bool need_exit_tb;
6700 /* Check access permissions */
6701 if (!cp_access_ok(s->current_el, ri, isread)) {
6702 return 1;
6705 if (s->hstr_active || ri->accessfn ||
6706 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
6707 /* Emit code to perform further access permissions checks at
6708 * runtime; this may result in an exception.
6709 * Note that on XScale all cp0..c13 registers do an access check
6710 * call in order to handle c15_cpar.
6712 TCGv_ptr tmpptr;
6713 TCGv_i32 tcg_syn, tcg_isread;
6714 uint32_t syndrome;
6716 /* Note that since we are an implementation which takes an
6717 * exception on a trapped conditional instruction only if the
6718 * instruction passes its condition code check, we can take
6719 * advantage of the clause in the ARM ARM that allows us to set
6720 * the COND field in the instruction to 0xE in all cases.
6721 * We could fish the actual condition out of the insn (ARM)
6722 * or the condexec bits (Thumb) but it isn't necessary.
6724 switch (cpnum) {
6725 case 14:
6726 if (is64) {
6727 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
6728 isread, false);
6729 } else {
6730 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
6731 rt, isread, false);
6733 break;
6734 case 15:
6735 if (is64) {
6736 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
6737 isread, false);
6738 } else {
6739 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
6740 rt, isread, false);
6742 break;
6743 default:
6744 /* ARMv8 defines that only coprocessors 14 and 15 exist,
6745 * so this can only happen if this is an ARMv7 or earlier CPU,
6746 * in which case the syndrome information won't actually be
6747 * guest visible.
6749 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
6750 syndrome = syn_uncategorized();
6751 break;
6754 gen_set_condexec(s);
6755 gen_set_pc_im(s, s->pc_curr);
6756 tmpptr = tcg_const_ptr(ri);
6757 tcg_syn = tcg_const_i32(syndrome);
6758 tcg_isread = tcg_const_i32(isread);
6759 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
6760 tcg_isread);
6761 tcg_temp_free_ptr(tmpptr);
6762 tcg_temp_free_i32(tcg_syn);
6763 tcg_temp_free_i32(tcg_isread);
6764 } else if (ri->type & ARM_CP_RAISES_EXC) {
6766 * The readfn or writefn might raise an exception;
6767 * synchronize the CPU state in case it does.
6769 gen_set_condexec(s);
6770 gen_set_pc_im(s, s->pc_curr);
6773 /* Handle special cases first */
6774 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6775 case ARM_CP_NOP:
6776 return 0;
6777 case ARM_CP_WFI:
6778 if (isread) {
6779 return 1;
6781 gen_set_pc_im(s, s->base.pc_next);
6782 s->base.is_jmp = DISAS_WFI;
6783 return 0;
6784 default:
6785 break;
6788 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
6789 gen_io_start();
6792 if (isread) {
6793 /* Read */
6794 if (is64) {
6795 TCGv_i64 tmp64;
6796 TCGv_i32 tmp;
6797 if (ri->type & ARM_CP_CONST) {
6798 tmp64 = tcg_const_i64(ri->resetvalue);
6799 } else if (ri->readfn) {
6800 TCGv_ptr tmpptr;
6801 tmp64 = tcg_temp_new_i64();
6802 tmpptr = tcg_const_ptr(ri);
6803 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6804 tcg_temp_free_ptr(tmpptr);
6805 } else {
6806 tmp64 = tcg_temp_new_i64();
6807 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6809 tmp = tcg_temp_new_i32();
6810 tcg_gen_extrl_i64_i32(tmp, tmp64);
6811 store_reg(s, rt, tmp);
6812 tmp = tcg_temp_new_i32();
6813 tcg_gen_extrh_i64_i32(tmp, tmp64);
6814 tcg_temp_free_i64(tmp64);
6815 store_reg(s, rt2, tmp);
6816 } else {
6817 TCGv_i32 tmp;
6818 if (ri->type & ARM_CP_CONST) {
6819 tmp = tcg_const_i32(ri->resetvalue);
6820 } else if (ri->readfn) {
6821 TCGv_ptr tmpptr;
6822 tmp = tcg_temp_new_i32();
6823 tmpptr = tcg_const_ptr(ri);
6824 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6825 tcg_temp_free_ptr(tmpptr);
6826 } else {
6827 tmp = load_cpu_offset(ri->fieldoffset);
6829 if (rt == 15) {
6830 /* Destination register of r15 for 32 bit loads sets
6831 * the condition codes from the high 4 bits of the value
6833 gen_set_nzcv(tmp);
6834 tcg_temp_free_i32(tmp);
6835 } else {
6836 store_reg(s, rt, tmp);
6839 } else {
6840 /* Write */
6841 if (ri->type & ARM_CP_CONST) {
6842 /* If not forbidden by access permissions, treat as WI */
6843 return 0;
6846 if (is64) {
6847 TCGv_i32 tmplo, tmphi;
6848 TCGv_i64 tmp64 = tcg_temp_new_i64();
6849 tmplo = load_reg(s, rt);
6850 tmphi = load_reg(s, rt2);
6851 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6852 tcg_temp_free_i32(tmplo);
6853 tcg_temp_free_i32(tmphi);
6854 if (ri->writefn) {
6855 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6856 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6857 tcg_temp_free_ptr(tmpptr);
6858 } else {
6859 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6861 tcg_temp_free_i64(tmp64);
6862 } else {
6863 if (ri->writefn) {
6864 TCGv_i32 tmp;
6865 TCGv_ptr tmpptr;
6866 tmp = load_reg(s, rt);
6867 tmpptr = tcg_const_ptr(ri);
6868 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6869 tcg_temp_free_ptr(tmpptr);
6870 tcg_temp_free_i32(tmp);
6871 } else {
6872 TCGv_i32 tmp = load_reg(s, rt);
6873 store_cpu_offset(tmp, ri->fieldoffset);
6878 /* I/O operations must end the TB here (whether read or write) */
6879 need_exit_tb = ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) &&
6880 (ri->type & ARM_CP_IO));
6882 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6884 * A write to any coprocessor register that ends a TB
6885 * must rebuild the hflags for the next TB.
6887 TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
6888 if (arm_dc_feature(s, ARM_FEATURE_M)) {
6889 gen_helper_rebuild_hflags_m32(cpu_env, tcg_el);
6890 } else {
6891 if (ri->type & ARM_CP_NEWEL) {
6892 gen_helper_rebuild_hflags_a32_newel(cpu_env);
6893 } else {
6894 gen_helper_rebuild_hflags_a32(cpu_env, tcg_el);
6897 tcg_temp_free_i32(tcg_el);
6899 * We default to ending the TB on a coprocessor register write,
6900 * but allow this to be suppressed by the register definition
6901 * (usually only necessary to work around guest bugs).
6903 need_exit_tb = true;
6905 if (need_exit_tb) {
6906 gen_lookup_tb(s);
6909 return 0;
6912 /* Unknown register; this might be a guest error or a QEMU
6913 * unimplemented feature.
6915 if (is64) {
6916 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
6917 "64 bit system register cp:%d opc1: %d crm:%d "
6918 "(%s)\n",
6919 isread ? "read" : "write", cpnum, opc1, crm,
6920 s->ns ? "non-secure" : "secure");
6921 } else {
6922 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
6923 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
6924 "(%s)\n",
6925 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
6926 s->ns ? "non-secure" : "secure");
6929 return 1;
6933 /* Store a 64-bit value to a register pair. Clobbers val. */
6934 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6936 TCGv_i32 tmp;
6937 tmp = tcg_temp_new_i32();
6938 tcg_gen_extrl_i64_i32(tmp, val);
6939 store_reg(s, rlow, tmp);
6940 tmp = tcg_temp_new_i32();
6941 tcg_gen_extrh_i64_i32(tmp, val);
6942 store_reg(s, rhigh, tmp);
6945 /* load and add a 64-bit value from a register pair. */
6946 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6948 TCGv_i64 tmp;
6949 TCGv_i32 tmpl;
6950 TCGv_i32 tmph;
6952 /* Load 64-bit value rd:rn. */
6953 tmpl = load_reg(s, rlow);
6954 tmph = load_reg(s, rhigh);
6955 tmp = tcg_temp_new_i64();
6956 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6957 tcg_temp_free_i32(tmpl);
6958 tcg_temp_free_i32(tmph);
6959 tcg_gen_add_i64(val, val, tmp);
6960 tcg_temp_free_i64(tmp);
6963 /* Set N and Z flags from hi|lo. */
6964 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
6966 tcg_gen_mov_i32(cpu_NF, hi);
6967 tcg_gen_or_i32(cpu_ZF, lo, hi);
6970 /* Load/Store exclusive instructions are implemented by remembering
6971 the value/address loaded, and seeing if these are the same
6972 when the store is performed. This should be sufficient to implement
6973 the architecturally mandated semantics, and avoids having to monitor
6974 regular stores. The compare vs the remembered value is done during
6975 the cmpxchg operation, but we must compare the addresses manually. */
6976 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6977 TCGv_i32 addr, int size)
6979 TCGv_i32 tmp = tcg_temp_new_i32();
6980 MemOp opc = size | MO_ALIGN | s->be_data;
6982 s->is_ldex = true;
6984 if (size == 3) {
6985 TCGv_i32 tmp2 = tcg_temp_new_i32();
6986 TCGv_i64 t64 = tcg_temp_new_i64();
6988 /* For AArch32, architecturally the 32-bit word at the lowest
6989 * address is always Rt and the one at addr+4 is Rt2, even if
6990 * the CPU is big-endian. That means we don't want to do a
6991 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
6992 * for an architecturally 64-bit access, but instead do a
6993 * 64-bit access using MO_BE if appropriate and then split
6994 * the two halves.
6995 * This only makes a difference for BE32 user-mode, where
6996 * frob64() must not flip the two halves of the 64-bit data
6997 * but this code must treat BE32 user-mode like BE32 system.
6999 TCGv taddr = gen_aa32_addr(s, addr, opc);
7001 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7002 tcg_temp_free(taddr);
7003 tcg_gen_mov_i64(cpu_exclusive_val, t64);
7004 if (s->be_data == MO_BE) {
7005 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7006 } else {
7007 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7009 tcg_temp_free_i64(t64);
7011 store_reg(s, rt2, tmp2);
7012 } else {
7013 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
7014 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7017 store_reg(s, rt, tmp);
7018 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7021 static void gen_clrex(DisasContext *s)
7023 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7026 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7027 TCGv_i32 addr, int size)
7029 TCGv_i32 t0, t1, t2;
7030 TCGv_i64 extaddr;
7031 TCGv taddr;
7032 TCGLabel *done_label;
7033 TCGLabel *fail_label;
7034 MemOp opc = size | MO_ALIGN | s->be_data;
7036 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7037 [addr] = {Rt};
7038 {Rd} = 0;
7039 } else {
7040 {Rd} = 1;
7041 } */
7042 fail_label = gen_new_label();
7043 done_label = gen_new_label();
7044 extaddr = tcg_temp_new_i64();
7045 tcg_gen_extu_i32_i64(extaddr, addr);
7046 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7047 tcg_temp_free_i64(extaddr);
7049 taddr = gen_aa32_addr(s, addr, opc);
7050 t0 = tcg_temp_new_i32();
7051 t1 = load_reg(s, rt);
7052 if (size == 3) {
7053 TCGv_i64 o64 = tcg_temp_new_i64();
7054 TCGv_i64 n64 = tcg_temp_new_i64();
7056 t2 = load_reg(s, rt2);
7057 /* For AArch32, architecturally the 32-bit word at the lowest
7058 * address is always Rt and the one at addr+4 is Rt2, even if
7059 * the CPU is big-endian. Since we're going to treat this as a
7060 * single 64-bit BE store, we need to put the two halves in the
7061 * opposite order for BE to LE, so that they end up in the right
7062 * places.
7063 * We don't want gen_aa32_frob64() because that does the wrong
7064 * thing for BE32 usermode.
7066 if (s->be_data == MO_BE) {
7067 tcg_gen_concat_i32_i64(n64, t2, t1);
7068 } else {
7069 tcg_gen_concat_i32_i64(n64, t1, t2);
7071 tcg_temp_free_i32(t2);
7073 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7074 get_mem_index(s), opc);
7075 tcg_temp_free_i64(n64);
7077 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7078 tcg_gen_extrl_i64_i32(t0, o64);
7080 tcg_temp_free_i64(o64);
7081 } else {
7082 t2 = tcg_temp_new_i32();
7083 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7084 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7085 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7086 tcg_temp_free_i32(t2);
7088 tcg_temp_free_i32(t1);
7089 tcg_temp_free(taddr);
7090 tcg_gen_mov_i32(cpu_R[rd], t0);
7091 tcg_temp_free_i32(t0);
7092 tcg_gen_br(done_label);
7094 gen_set_label(fail_label);
7095 tcg_gen_movi_i32(cpu_R[rd], 1);
7096 gen_set_label(done_label);
7097 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7100 /* gen_srs:
7101 * @env: CPUARMState
7102 * @s: DisasContext
7103 * @mode: mode field from insn (which stack to store to)
7104 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7105 * @writeback: true if writeback bit set
7107 * Generate code for the SRS (Store Return State) insn.
7109 static void gen_srs(DisasContext *s,
7110 uint32_t mode, uint32_t amode, bool writeback)
7112 int32_t offset;
7113 TCGv_i32 addr, tmp;
7114 bool undef = false;
7116 /* SRS is:
7117 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7118 * and specified mode is monitor mode
7119 * - UNDEFINED in Hyp mode
7120 * - UNPREDICTABLE in User or System mode
7121 * - UNPREDICTABLE if the specified mode is:
7122 * -- not implemented
7123 * -- not a valid mode number
7124 * -- a mode that's at a higher exception level
7125 * -- Monitor, if we are Non-secure
7126 * For the UNPREDICTABLE cases we choose to UNDEF.
7128 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
7129 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
7130 return;
7133 if (s->current_el == 0 || s->current_el == 2) {
7134 undef = true;
7137 switch (mode) {
7138 case ARM_CPU_MODE_USR:
7139 case ARM_CPU_MODE_FIQ:
7140 case ARM_CPU_MODE_IRQ:
7141 case ARM_CPU_MODE_SVC:
7142 case ARM_CPU_MODE_ABT:
7143 case ARM_CPU_MODE_UND:
7144 case ARM_CPU_MODE_SYS:
7145 break;
7146 case ARM_CPU_MODE_HYP:
7147 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7148 undef = true;
7150 break;
7151 case ARM_CPU_MODE_MON:
7152 /* No need to check specifically for "are we non-secure" because
7153 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7154 * so if this isn't EL3 then we must be non-secure.
7156 if (s->current_el != 3) {
7157 undef = true;
7159 break;
7160 default:
7161 undef = true;
7164 if (undef) {
7165 unallocated_encoding(s);
7166 return;
7169 addr = tcg_temp_new_i32();
7170 tmp = tcg_const_i32(mode);
7171 /* get_r13_banked() will raise an exception if called from System mode */
7172 gen_set_condexec(s);
7173 gen_set_pc_im(s, s->pc_curr);
7174 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7175 tcg_temp_free_i32(tmp);
7176 switch (amode) {
7177 case 0: /* DA */
7178 offset = -4;
7179 break;
7180 case 1: /* IA */
7181 offset = 0;
7182 break;
7183 case 2: /* DB */
7184 offset = -8;
7185 break;
7186 case 3: /* IB */
7187 offset = 4;
7188 break;
7189 default:
7190 abort();
7192 tcg_gen_addi_i32(addr, addr, offset);
7193 tmp = load_reg(s, 14);
7194 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7195 tcg_temp_free_i32(tmp);
7196 tmp = load_cpu_field(spsr);
7197 tcg_gen_addi_i32(addr, addr, 4);
7198 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7199 tcg_temp_free_i32(tmp);
7200 if (writeback) {
7201 switch (amode) {
7202 case 0:
7203 offset = -8;
7204 break;
7205 case 1:
7206 offset = 4;
7207 break;
7208 case 2:
7209 offset = -4;
7210 break;
7211 case 3:
7212 offset = 0;
7213 break;
7214 default:
7215 abort();
7217 tcg_gen_addi_i32(addr, addr, offset);
7218 tmp = tcg_const_i32(mode);
7219 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7220 tcg_temp_free_i32(tmp);
7222 tcg_temp_free_i32(addr);
7223 s->base.is_jmp = DISAS_UPDATE;
7226 /* Generate a label used for skipping this instruction */
7227 static void arm_gen_condlabel(DisasContext *s)
7229 if (!s->condjmp) {
7230 s->condlabel = gen_new_label();
7231 s->condjmp = 1;
7235 /* Skip this instruction if the ARM condition is false */
7236 static void arm_skip_unless(DisasContext *s, uint32_t cond)
7238 arm_gen_condlabel(s);
7239 arm_gen_test_cc(cond ^ 1, s->condlabel);
7244 * Constant expanders for the decoders.
7247 static int negate(DisasContext *s, int x)
7249 return -x;
7252 static int plus_2(DisasContext *s, int x)
7254 return x + 2;
7257 static int times_2(DisasContext *s, int x)
7259 return x * 2;
7262 static int times_4(DisasContext *s, int x)
7264 return x * 4;
7267 /* Return only the rotation part of T32ExpandImm. */
7268 static int t32_expandimm_rot(DisasContext *s, int x)
7270 return x & 0xc00 ? extract32(x, 7, 5) : 0;
7273 /* Return the unrotated immediate from T32ExpandImm. */
7274 static int t32_expandimm_imm(DisasContext *s, int x)
7276 int imm = extract32(x, 0, 8);
7278 switch (extract32(x, 8, 4)) {
7279 case 0: /* XY */
7280 /* Nothing to do. */
7281 break;
7282 case 1: /* 00XY00XY */
7283 imm *= 0x00010001;
7284 break;
7285 case 2: /* XY00XY00 */
7286 imm *= 0x01000100;
7287 break;
7288 case 3: /* XYXYXYXY */
7289 imm *= 0x01010101;
7290 break;
7291 default:
7292 /* Rotated constant. */
7293 imm |= 0x80;
7294 break;
7296 return imm;
7299 static int t32_branch24(DisasContext *s, int x)
7301 /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */
7302 x ^= !(x < 0) * (3 << 21);
7303 /* Append the final zero. */
7304 return x << 1;
7307 static int t16_setflags(DisasContext *s)
7309 return s->condexec_mask == 0;
7312 static int t16_push_list(DisasContext *s, int x)
7314 return (x & 0xff) | (x & 0x100) << (14 - 8);
7317 static int t16_pop_list(DisasContext *s, int x)
7319 return (x & 0xff) | (x & 0x100) << (15 - 8);
7323 * Include the generated decoders.
7326 #include "decode-a32.inc.c"
7327 #include "decode-a32-uncond.inc.c"
7328 #include "decode-t32.inc.c"
7329 #include "decode-t16.inc.c"
7331 /* Helpers to swap operands for reverse-subtract. */
7332 static void gen_rsb(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
7334 tcg_gen_sub_i32(dst, b, a);
7337 static void gen_rsb_CC(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
7339 gen_sub_CC(dst, b, a);
7342 static void gen_rsc(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
7344 gen_sub_carry(dest, b, a);
7347 static void gen_rsc_CC(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
7349 gen_sbc_CC(dest, b, a);
7353 * Helpers for the data processing routines.
7355 * After the computation store the results back.
7356 * This may be suppressed altogether (STREG_NONE), require a runtime
7357 * check against the stack limits (STREG_SP_CHECK), or generate an
7358 * exception return. Oh, or store into a register.
7360 * Always return true, indicating success for a trans_* function.
7362 typedef enum {
7363 STREG_NONE,
7364 STREG_NORMAL,
7365 STREG_SP_CHECK,
7366 STREG_EXC_RET,
7367 } StoreRegKind;
7369 static bool store_reg_kind(DisasContext *s, int rd,
7370 TCGv_i32 val, StoreRegKind kind)
7372 switch (kind) {
7373 case STREG_NONE:
7374 tcg_temp_free_i32(val);
7375 return true;
7376 case STREG_NORMAL:
7377 /* See ALUWritePC: Interworking only from a32 mode. */
7378 if (s->thumb) {
7379 store_reg(s, rd, val);
7380 } else {
7381 store_reg_bx(s, rd, val);
7383 return true;
7384 case STREG_SP_CHECK:
7385 store_sp_checked(s, val);
7386 return true;
7387 case STREG_EXC_RET:
7388 gen_exception_return(s, val);
7389 return true;
7391 g_assert_not_reached();
7395 * Data Processing (register)
7397 * Operate, with set flags, one register source,
7398 * one immediate shifted register source, and a destination.
7400 static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a,
7401 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
7402 int logic_cc, StoreRegKind kind)
7404 TCGv_i32 tmp1, tmp2;
7406 tmp2 = load_reg(s, a->rm);
7407 gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc);
7408 tmp1 = load_reg(s, a->rn);
7410 gen(tmp1, tmp1, tmp2);
7411 tcg_temp_free_i32(tmp2);
7413 if (logic_cc) {
7414 gen_logic_CC(tmp1);
7416 return store_reg_kind(s, a->rd, tmp1, kind);
7419 static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a,
7420 void (*gen)(TCGv_i32, TCGv_i32),
7421 int logic_cc, StoreRegKind kind)
7423 TCGv_i32 tmp;
7425 tmp = load_reg(s, a->rm);
7426 gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc);
7428 gen(tmp, tmp);
7429 if (logic_cc) {
7430 gen_logic_CC(tmp);
7432 return store_reg_kind(s, a->rd, tmp, kind);
7436 * Data-processing (register-shifted register)
7438 * Operate, with set flags, one register source,
7439 * one register shifted register source, and a destination.
7441 static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a,
7442 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
7443 int logic_cc, StoreRegKind kind)
7445 TCGv_i32 tmp1, tmp2;
7447 tmp1 = load_reg(s, a->rs);
7448 tmp2 = load_reg(s, a->rm);
7449 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
7450 tmp1 = load_reg(s, a->rn);
7452 gen(tmp1, tmp1, tmp2);
7453 tcg_temp_free_i32(tmp2);
7455 if (logic_cc) {
7456 gen_logic_CC(tmp1);
7458 return store_reg_kind(s, a->rd, tmp1, kind);
7461 static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a,
7462 void (*gen)(TCGv_i32, TCGv_i32),
7463 int logic_cc, StoreRegKind kind)
7465 TCGv_i32 tmp1, tmp2;
7467 tmp1 = load_reg(s, a->rs);
7468 tmp2 = load_reg(s, a->rm);
7469 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
7471 gen(tmp2, tmp2);
7472 if (logic_cc) {
7473 gen_logic_CC(tmp2);
7475 return store_reg_kind(s, a->rd, tmp2, kind);
7479 * Data-processing (immediate)
7481 * Operate, with set flags, one register source,
7482 * one rotated immediate, and a destination.
7484 * Note that logic_cc && a->rot setting CF based on the msb of the
7485 * immediate is the reason why we must pass in the unrotated form
7486 * of the immediate.
7488 static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a,
7489 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
7490 int logic_cc, StoreRegKind kind)
7492 TCGv_i32 tmp1, tmp2;
7493 uint32_t imm;
7495 imm = ror32(a->imm, a->rot);
7496 if (logic_cc && a->rot) {
7497 tcg_gen_movi_i32(cpu_CF, imm >> 31);
7499 tmp2 = tcg_const_i32(imm);
7500 tmp1 = load_reg(s, a->rn);
7502 gen(tmp1, tmp1, tmp2);
7503 tcg_temp_free_i32(tmp2);
7505 if (logic_cc) {
7506 gen_logic_CC(tmp1);
7508 return store_reg_kind(s, a->rd, tmp1, kind);
7511 static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a,
7512 void (*gen)(TCGv_i32, TCGv_i32),
7513 int logic_cc, StoreRegKind kind)
7515 TCGv_i32 tmp;
7516 uint32_t imm;
7518 imm = ror32(a->imm, a->rot);
7519 if (logic_cc && a->rot) {
7520 tcg_gen_movi_i32(cpu_CF, imm >> 31);
7522 tmp = tcg_const_i32(imm);
7524 gen(tmp, tmp);
7525 if (logic_cc) {
7526 gen_logic_CC(tmp);
7528 return store_reg_kind(s, a->rd, tmp, kind);
7531 #define DO_ANY3(NAME, OP, L, K) \
7532 static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
7533 { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
7534 static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
7535 { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
7536 static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
7537 { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
7539 #define DO_ANY2(NAME, OP, L, K) \
7540 static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
7541 { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
7542 static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
7543 { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
7544 static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
7545 { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
7547 #define DO_CMP2(NAME, OP, L) \
7548 static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
7549 { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
7550 static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
7551 { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
7552 static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
7553 { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
7555 DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
7556 DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
7557 DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
7558 DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
7560 DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
7561 DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
7562 DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
7563 DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
7565 DO_CMP2(TST, tcg_gen_and_i32, true)
7566 DO_CMP2(TEQ, tcg_gen_xor_i32, true)
7567 DO_CMP2(CMN, gen_add_CC, false)
7568 DO_CMP2(CMP, gen_sub_CC, false)
7570 DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false,
7571 a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL)
7574 * Note for the computation of StoreRegKind we return out of the
7575 * middle of the functions that are expanded by DO_ANY3, and that
7576 * we modify a->s via that parameter before it is used by OP.
7578 DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false,
7580 StoreRegKind ret = STREG_NORMAL;
7581 if (a->rd == 15 && a->s) {
7583 * See ALUExceptionReturn:
7584 * In User mode, UNPREDICTABLE; we choose UNDEF.
7585 * In Hyp mode, UNDEFINED.
7587 if (IS_USER(s) || s->current_el == 2) {
7588 unallocated_encoding(s);
7589 return true;
7591 /* There is no writeback of nzcv to PSTATE. */
7592 a->s = 0;
7593 ret = STREG_EXC_RET;
7594 } else if (a->rd == 13 && a->rn == 13) {
7595 ret = STREG_SP_CHECK;
7597 ret;
7600 DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
7602 StoreRegKind ret = STREG_NORMAL;
7603 if (a->rd == 15 && a->s) {
7605 * See ALUExceptionReturn:
7606 * In User mode, UNPREDICTABLE; we choose UNDEF.
7607 * In Hyp mode, UNDEFINED.
7609 if (IS_USER(s) || s->current_el == 2) {
7610 unallocated_encoding(s);
7611 return true;
7613 /* There is no writeback of nzcv to PSTATE. */
7614 a->s = 0;
7615 ret = STREG_EXC_RET;
7616 } else if (a->rd == 13) {
7617 ret = STREG_SP_CHECK;
7619 ret;
7622 DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
7625 * ORN is only available with T32, so there is no register-shifted-register
7626 * form of the insn. Using the DO_ANY3 macro would create an unused function.
7628 static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a)
7630 return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
7633 static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a)
7635 return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
7638 #undef DO_ANY3
7639 #undef DO_ANY2
7640 #undef DO_CMP2
7642 static bool trans_ADR(DisasContext *s, arg_ri *a)
7644 store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm));
7645 return true;
7648 static bool trans_MOVW(DisasContext *s, arg_MOVW *a)
7650 TCGv_i32 tmp;
7652 if (!ENABLE_ARCH_6T2) {
7653 return false;
7656 tmp = tcg_const_i32(a->imm);
7657 store_reg(s, a->rd, tmp);
7658 return true;
7661 static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
7663 TCGv_i32 tmp;
7665 if (!ENABLE_ARCH_6T2) {
7666 return false;
7669 tmp = load_reg(s, a->rd);
7670 tcg_gen_ext16u_i32(tmp, tmp);
7671 tcg_gen_ori_i32(tmp, tmp, a->imm << 16);
7672 store_reg(s, a->rd, tmp);
7673 return true;
7677 * Multiply and multiply accumulate
7680 static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add)
7682 TCGv_i32 t1, t2;
7684 t1 = load_reg(s, a->rn);
7685 t2 = load_reg(s, a->rm);
7686 tcg_gen_mul_i32(t1, t1, t2);
7687 tcg_temp_free_i32(t2);
7688 if (add) {
7689 t2 = load_reg(s, a->ra);
7690 tcg_gen_add_i32(t1, t1, t2);
7691 tcg_temp_free_i32(t2);
7693 if (a->s) {
7694 gen_logic_CC(t1);
7696 store_reg(s, a->rd, t1);
7697 return true;
7700 static bool trans_MUL(DisasContext *s, arg_MUL *a)
7702 return op_mla(s, a, false);
7705 static bool trans_MLA(DisasContext *s, arg_MLA *a)
7707 return op_mla(s, a, true);
7710 static bool trans_MLS(DisasContext *s, arg_MLS *a)
7712 TCGv_i32 t1, t2;
7714 if (!ENABLE_ARCH_6T2) {
7715 return false;
7717 t1 = load_reg(s, a->rn);
7718 t2 = load_reg(s, a->rm);
7719 tcg_gen_mul_i32(t1, t1, t2);
7720 tcg_temp_free_i32(t2);
7721 t2 = load_reg(s, a->ra);
7722 tcg_gen_sub_i32(t1, t2, t1);
7723 tcg_temp_free_i32(t2);
7724 store_reg(s, a->rd, t1);
7725 return true;
7728 static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add)
7730 TCGv_i32 t0, t1, t2, t3;
7732 t0 = load_reg(s, a->rm);
7733 t1 = load_reg(s, a->rn);
7734 if (uns) {
7735 tcg_gen_mulu2_i32(t0, t1, t0, t1);
7736 } else {
7737 tcg_gen_muls2_i32(t0, t1, t0, t1);
7739 if (add) {
7740 t2 = load_reg(s, a->ra);
7741 t3 = load_reg(s, a->rd);
7742 tcg_gen_add2_i32(t0, t1, t0, t1, t2, t3);
7743 tcg_temp_free_i32(t2);
7744 tcg_temp_free_i32(t3);
7746 if (a->s) {
7747 gen_logicq_cc(t0, t1);
7749 store_reg(s, a->ra, t0);
7750 store_reg(s, a->rd, t1);
7751 return true;
7754 static bool trans_UMULL(DisasContext *s, arg_UMULL *a)
7756 return op_mlal(s, a, true, false);
7759 static bool trans_SMULL(DisasContext *s, arg_SMULL *a)
7761 return op_mlal(s, a, false, false);
7764 static bool trans_UMLAL(DisasContext *s, arg_UMLAL *a)
7766 return op_mlal(s, a, true, true);
7769 static bool trans_SMLAL(DisasContext *s, arg_SMLAL *a)
7771 return op_mlal(s, a, false, true);
7774 static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a)
7776 TCGv_i32 t0, t1, t2, zero;
7778 if (s->thumb
7779 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
7780 : !ENABLE_ARCH_6) {
7781 return false;
7784 t0 = load_reg(s, a->rm);
7785 t1 = load_reg(s, a->rn);
7786 tcg_gen_mulu2_i32(t0, t1, t0, t1);
7787 zero = tcg_const_i32(0);
7788 t2 = load_reg(s, a->ra);
7789 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
7790 tcg_temp_free_i32(t2);
7791 t2 = load_reg(s, a->rd);
7792 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
7793 tcg_temp_free_i32(t2);
7794 tcg_temp_free_i32(zero);
7795 store_reg(s, a->ra, t0);
7796 store_reg(s, a->rd, t1);
7797 return true;
7801 * Saturating addition and subtraction
7804 static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub)
7806 TCGv_i32 t0, t1;
7808 if (s->thumb
7809 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
7810 : !ENABLE_ARCH_5TE) {
7811 return false;
7814 t0 = load_reg(s, a->rm);
7815 t1 = load_reg(s, a->rn);
7816 if (doub) {
7817 gen_helper_add_saturate(t1, cpu_env, t1, t1);
7819 if (add) {
7820 gen_helper_add_saturate(t0, cpu_env, t0, t1);
7821 } else {
7822 gen_helper_sub_saturate(t0, cpu_env, t0, t1);
7824 tcg_temp_free_i32(t1);
7825 store_reg(s, a->rd, t0);
7826 return true;
7829 #define DO_QADDSUB(NAME, ADD, DOUB) \
7830 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
7832 return op_qaddsub(s, a, ADD, DOUB); \
7835 DO_QADDSUB(QADD, true, false)
7836 DO_QADDSUB(QSUB, false, false)
7837 DO_QADDSUB(QDADD, true, true)
7838 DO_QADDSUB(QDSUB, false, true)
7840 #undef DO_QADDSUB
7843 * Halfword multiply and multiply accumulate
7846 static bool op_smlaxxx(DisasContext *s, arg_rrrr *a,
7847 int add_long, bool nt, bool mt)
7849 TCGv_i32 t0, t1, tl, th;
7851 if (s->thumb
7852 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
7853 : !ENABLE_ARCH_5TE) {
7854 return false;
7857 t0 = load_reg(s, a->rn);
7858 t1 = load_reg(s, a->rm);
7859 gen_mulxy(t0, t1, nt, mt);
7860 tcg_temp_free_i32(t1);
7862 switch (add_long) {
7863 case 0:
7864 store_reg(s, a->rd, t0);
7865 break;
7866 case 1:
7867 t1 = load_reg(s, a->ra);
7868 gen_helper_add_setq(t0, cpu_env, t0, t1);
7869 tcg_temp_free_i32(t1);
7870 store_reg(s, a->rd, t0);
7871 break;
7872 case 2:
7873 tl = load_reg(s, a->ra);
7874 th = load_reg(s, a->rd);
7875 /* Sign-extend the 32-bit product to 64 bits. */
7876 t1 = tcg_temp_new_i32();
7877 tcg_gen_sari_i32(t1, t0, 31);
7878 tcg_gen_add2_i32(tl, th, tl, th, t0, t1);
7879 tcg_temp_free_i32(t0);
7880 tcg_temp_free_i32(t1);
7881 store_reg(s, a->ra, tl);
7882 store_reg(s, a->rd, th);
7883 break;
7884 default:
7885 g_assert_not_reached();
7887 return true;
7890 #define DO_SMLAX(NAME, add, nt, mt) \
7891 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
7893 return op_smlaxxx(s, a, add, nt, mt); \
7896 DO_SMLAX(SMULBB, 0, 0, 0)
7897 DO_SMLAX(SMULBT, 0, 0, 1)
7898 DO_SMLAX(SMULTB, 0, 1, 0)
7899 DO_SMLAX(SMULTT, 0, 1, 1)
7901 DO_SMLAX(SMLABB, 1, 0, 0)
7902 DO_SMLAX(SMLABT, 1, 0, 1)
7903 DO_SMLAX(SMLATB, 1, 1, 0)
7904 DO_SMLAX(SMLATT, 1, 1, 1)
7906 DO_SMLAX(SMLALBB, 2, 0, 0)
7907 DO_SMLAX(SMLALBT, 2, 0, 1)
7908 DO_SMLAX(SMLALTB, 2, 1, 0)
7909 DO_SMLAX(SMLALTT, 2, 1, 1)
7911 #undef DO_SMLAX
7913 static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt)
7915 TCGv_i32 t0, t1;
7917 if (!ENABLE_ARCH_5TE) {
7918 return false;
7921 t0 = load_reg(s, a->rn);
7922 t1 = load_reg(s, a->rm);
7924 * Since the nominal result is product<47:16>, shift the 16-bit
7925 * input up by 16 bits, so that the result is at product<63:32>.
7927 if (mt) {
7928 tcg_gen_andi_i32(t1, t1, 0xffff0000);
7929 } else {
7930 tcg_gen_shli_i32(t1, t1, 16);
7932 tcg_gen_muls2_i32(t0, t1, t0, t1);
7933 tcg_temp_free_i32(t0);
7934 if (add) {
7935 t0 = load_reg(s, a->ra);
7936 gen_helper_add_setq(t1, cpu_env, t1, t0);
7937 tcg_temp_free_i32(t0);
7939 store_reg(s, a->rd, t1);
7940 return true;
7943 #define DO_SMLAWX(NAME, add, mt) \
7944 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
7946 return op_smlawx(s, a, add, mt); \
7949 DO_SMLAWX(SMULWB, 0, 0)
7950 DO_SMLAWX(SMULWT, 0, 1)
7951 DO_SMLAWX(SMLAWB, 1, 0)
7952 DO_SMLAWX(SMLAWT, 1, 1)
7954 #undef DO_SMLAWX
7957 * MSR (immediate) and hints
7960 static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
7963 * When running single-threaded TCG code, use the helper to ensure that
7964 * the next round-robin scheduled vCPU gets a crack. When running in
7965 * MTTCG we don't generate jumps to the helper as it won't affect the
7966 * scheduling of other vCPUs.
7968 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
7969 gen_set_pc_im(s, s->base.pc_next);
7970 s->base.is_jmp = DISAS_YIELD;
7972 return true;
7975 static bool trans_WFE(DisasContext *s, arg_WFE *a)
7978 * When running single-threaded TCG code, use the helper to ensure that
7979 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
7980 * just skip this instruction. Currently the SEV/SEVL instructions,
7981 * which are *one* of many ways to wake the CPU from WFE, are not
7982 * implemented so we can't sleep like WFI does.
7984 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
7985 gen_set_pc_im(s, s->base.pc_next);
7986 s->base.is_jmp = DISAS_WFE;
7988 return true;
7991 static bool trans_WFI(DisasContext *s, arg_WFI *a)
7993 /* For WFI, halt the vCPU until an IRQ. */
7994 gen_set_pc_im(s, s->base.pc_next);
7995 s->base.is_jmp = DISAS_WFI;
7996 return true;
7999 static bool trans_NOP(DisasContext *s, arg_NOP *a)
8001 return true;
8004 static bool trans_MSR_imm(DisasContext *s, arg_MSR_imm *a)
8006 uint32_t val = ror32(a->imm, a->rot * 2);
8007 uint32_t mask = msr_mask(s, a->mask, a->r);
8009 if (gen_set_psr_im(s, mask, a->r, val)) {
8010 unallocated_encoding(s);
8012 return true;
8016 * Cyclic Redundancy Check
8019 static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz)
8021 TCGv_i32 t1, t2, t3;
8023 if (!dc_isar_feature(aa32_crc32, s)) {
8024 return false;
8027 t1 = load_reg(s, a->rn);
8028 t2 = load_reg(s, a->rm);
8029 switch (sz) {
8030 case MO_8:
8031 gen_uxtb(t2);
8032 break;
8033 case MO_16:
8034 gen_uxth(t2);
8035 break;
8036 case MO_32:
8037 break;
8038 default:
8039 g_assert_not_reached();
8041 t3 = tcg_const_i32(1 << sz);
8042 if (c) {
8043 gen_helper_crc32c(t1, t1, t2, t3);
8044 } else {
8045 gen_helper_crc32(t1, t1, t2, t3);
8047 tcg_temp_free_i32(t2);
8048 tcg_temp_free_i32(t3);
8049 store_reg(s, a->rd, t1);
8050 return true;
8053 #define DO_CRC32(NAME, c, sz) \
8054 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
8055 { return op_crc32(s, a, c, sz); }
8057 DO_CRC32(CRC32B, false, MO_8)
8058 DO_CRC32(CRC32H, false, MO_16)
8059 DO_CRC32(CRC32W, false, MO_32)
8060 DO_CRC32(CRC32CB, true, MO_8)
8061 DO_CRC32(CRC32CH, true, MO_16)
8062 DO_CRC32(CRC32CW, true, MO_32)
8064 #undef DO_CRC32
8067 * Miscellaneous instructions
8070 static bool trans_MRS_bank(DisasContext *s, arg_MRS_bank *a)
8072 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8073 return false;
8075 gen_mrs_banked(s, a->r, a->sysm, a->rd);
8076 return true;
8079 static bool trans_MSR_bank(DisasContext *s, arg_MSR_bank *a)
8081 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8082 return false;
8084 gen_msr_banked(s, a->r, a->sysm, a->rn);
8085 return true;
8088 static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a)
8090 TCGv_i32 tmp;
8092 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8093 return false;
8095 if (a->r) {
8096 if (IS_USER(s)) {
8097 unallocated_encoding(s);
8098 return true;
8100 tmp = load_cpu_field(spsr);
8101 } else {
8102 tmp = tcg_temp_new_i32();
8103 gen_helper_cpsr_read(tmp, cpu_env);
8105 store_reg(s, a->rd, tmp);
8106 return true;
8109 static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a)
8111 TCGv_i32 tmp;
8112 uint32_t mask = msr_mask(s, a->mask, a->r);
8114 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8115 return false;
8117 tmp = load_reg(s, a->rn);
8118 if (gen_set_psr(s, mask, a->r, tmp)) {
8119 unallocated_encoding(s);
8121 return true;
8124 static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
8126 TCGv_i32 tmp;
8128 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
8129 return false;
8131 tmp = tcg_const_i32(a->sysm);
8132 gen_helper_v7m_mrs(tmp, cpu_env, tmp);
8133 store_reg(s, a->rd, tmp);
8134 return true;
8137 static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
8139 TCGv_i32 addr, reg;
8141 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
8142 return false;
8144 addr = tcg_const_i32((a->mask << 10) | a->sysm);
8145 reg = load_reg(s, a->rn);
8146 gen_helper_v7m_msr(cpu_env, addr, reg);
8147 tcg_temp_free_i32(addr);
8148 tcg_temp_free_i32(reg);
8149 /* If we wrote to CONTROL, the EL might have changed */
8150 gen_helper_rebuild_hflags_m32_newel(cpu_env);
8151 gen_lookup_tb(s);
8152 return true;
8155 static bool trans_BX(DisasContext *s, arg_BX *a)
8157 if (!ENABLE_ARCH_4T) {
8158 return false;
8160 gen_bx_excret(s, load_reg(s, a->rm));
8161 return true;
8164 static bool trans_BXJ(DisasContext *s, arg_BXJ *a)
8166 if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) {
8167 return false;
8169 /* Trivial implementation equivalent to bx. */
8170 gen_bx(s, load_reg(s, a->rm));
8171 return true;
8174 static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a)
8176 TCGv_i32 tmp;
8178 if (!ENABLE_ARCH_5) {
8179 return false;
8181 tmp = load_reg(s, a->rm);
8182 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
8183 gen_bx(s, tmp);
8184 return true;
8188 * BXNS/BLXNS: only exist for v8M with the security extensions,
8189 * and always UNDEF if NonSecure. We don't implement these in
8190 * the user-only mode either (in theory you can use them from
8191 * Secure User mode but they are too tied in to system emulation).
8193 static bool trans_BXNS(DisasContext *s, arg_BXNS *a)
8195 if (!s->v8m_secure || IS_USER_ONLY) {
8196 unallocated_encoding(s);
8197 } else {
8198 gen_bxns(s, a->rm);
8200 return true;
8203 static bool trans_BLXNS(DisasContext *s, arg_BLXNS *a)
8205 if (!s->v8m_secure || IS_USER_ONLY) {
8206 unallocated_encoding(s);
8207 } else {
8208 gen_blxns(s, a->rm);
8210 return true;
8213 static bool trans_CLZ(DisasContext *s, arg_CLZ *a)
8215 TCGv_i32 tmp;
8217 if (!ENABLE_ARCH_5) {
8218 return false;
8220 tmp = load_reg(s, a->rm);
8221 tcg_gen_clzi_i32(tmp, tmp, 32);
8222 store_reg(s, a->rd, tmp);
8223 return true;
8226 static bool trans_ERET(DisasContext *s, arg_ERET *a)
8228 TCGv_i32 tmp;
8230 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8231 return false;
8233 if (IS_USER(s)) {
8234 unallocated_encoding(s);
8235 return true;
8237 if (s->current_el == 2) {
8238 /* ERET from Hyp uses ELR_Hyp, not LR */
8239 tmp = load_cpu_field(elr_el[2]);
8240 } else {
8241 tmp = load_reg(s, 14);
8243 gen_exception_return(s, tmp);
8244 return true;
8247 static bool trans_HLT(DisasContext *s, arg_HLT *a)
8249 gen_hlt(s, a->imm);
8250 return true;
8253 static bool trans_BKPT(DisasContext *s, arg_BKPT *a)
8255 if (!ENABLE_ARCH_5) {
8256 return false;
8258 if (arm_dc_feature(s, ARM_FEATURE_M) &&
8259 semihosting_enabled() &&
8260 #ifndef CONFIG_USER_ONLY
8261 !IS_USER(s) &&
8262 #endif
8263 (a->imm == 0xab)) {
8264 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
8265 } else {
8266 gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false));
8268 return true;
8271 static bool trans_HVC(DisasContext *s, arg_HVC *a)
8273 if (!ENABLE_ARCH_7 || arm_dc_feature(s, ARM_FEATURE_M)) {
8274 return false;
8276 if (IS_USER(s)) {
8277 unallocated_encoding(s);
8278 } else {
8279 gen_hvc(s, a->imm);
8281 return true;
8284 static bool trans_SMC(DisasContext *s, arg_SMC *a)
8286 if (!ENABLE_ARCH_6K || arm_dc_feature(s, ARM_FEATURE_M)) {
8287 return false;
8289 if (IS_USER(s)) {
8290 unallocated_encoding(s);
8291 } else {
8292 gen_smc(s);
8294 return true;
8297 static bool trans_SG(DisasContext *s, arg_SG *a)
8299 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
8300 !arm_dc_feature(s, ARM_FEATURE_V8)) {
8301 return false;
8304 * SG (v8M only)
8305 * The bulk of the behaviour for this instruction is implemented
8306 * in v7m_handle_execute_nsc(), which deals with the insn when
8307 * it is executed by a CPU in non-secure state from memory
8308 * which is Secure & NonSecure-Callable.
8309 * Here we only need to handle the remaining cases:
8310 * * in NS memory (including the "security extension not
8311 * implemented" case) : NOP
8312 * * in S memory but CPU already secure (clear IT bits)
8313 * We know that the attribute for the memory this insn is
8314 * in must match the current CPU state, because otherwise
8315 * get_phys_addr_pmsav8 would have generated an exception.
8317 if (s->v8m_secure) {
8318 /* Like the IT insn, we don't need to generate any code */
8319 s->condexec_cond = 0;
8320 s->condexec_mask = 0;
8322 return true;
8325 static bool trans_TT(DisasContext *s, arg_TT *a)
8327 TCGv_i32 addr, tmp;
8329 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
8330 !arm_dc_feature(s, ARM_FEATURE_V8)) {
8331 return false;
8333 if (a->rd == 13 || a->rd == 15 || a->rn == 15) {
8334 /* We UNDEF for these UNPREDICTABLE cases */
8335 unallocated_encoding(s);
8336 return true;
8338 if (a->A && !s->v8m_secure) {
8339 /* This case is UNDEFINED. */
8340 unallocated_encoding(s);
8341 return true;
8344 addr = load_reg(s, a->rn);
8345 tmp = tcg_const_i32((a->A << 1) | a->T);
8346 gen_helper_v7m_tt(tmp, cpu_env, addr, tmp);
8347 tcg_temp_free_i32(addr);
8348 store_reg(s, a->rd, tmp);
8349 return true;
8353 * Load/store register index
8356 static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w)
8358 ISSInfo ret;
8360 /* ISS not valid if writeback */
8361 if (p && !w) {
8362 ret = rd;
8363 if (s->base.pc_next - s->pc_curr == 2) {
8364 ret |= ISSIs16Bit;
8366 } else {
8367 ret = ISSInvalid;
8369 return ret;
8372 static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
8374 TCGv_i32 addr = load_reg(s, a->rn);
8376 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
8377 gen_helper_v8m_stackcheck(cpu_env, addr);
8380 if (a->p) {
8381 TCGv_i32 ofs = load_reg(s, a->rm);
8382 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
8383 if (a->u) {
8384 tcg_gen_add_i32(addr, addr, ofs);
8385 } else {
8386 tcg_gen_sub_i32(addr, addr, ofs);
8388 tcg_temp_free_i32(ofs);
8390 return addr;
8393 static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
8394 TCGv_i32 addr, int address_offset)
8396 if (!a->p) {
8397 TCGv_i32 ofs = load_reg(s, a->rm);
8398 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
8399 if (a->u) {
8400 tcg_gen_add_i32(addr, addr, ofs);
8401 } else {
8402 tcg_gen_sub_i32(addr, addr, ofs);
8404 tcg_temp_free_i32(ofs);
8405 } else if (!a->w) {
8406 tcg_temp_free_i32(addr);
8407 return;
8409 tcg_gen_addi_i32(addr, addr, address_offset);
8410 store_reg(s, a->rn, addr);
8413 static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
8414 MemOp mop, int mem_idx)
8416 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
8417 TCGv_i32 addr, tmp;
8419 addr = op_addr_rr_pre(s, a);
8421 tmp = tcg_temp_new_i32();
8422 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
8423 disas_set_da_iss(s, mop, issinfo);
8426 * Perform base writeback before the loaded value to
8427 * ensure correct behavior with overlapping index registers.
8429 op_addr_rr_post(s, a, addr, 0);
8430 store_reg_from_load(s, a->rt, tmp);
8431 return true;
8434 static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
8435 MemOp mop, int mem_idx)
8437 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
8438 TCGv_i32 addr, tmp;
8440 addr = op_addr_rr_pre(s, a);
8442 tmp = load_reg(s, a->rt);
8443 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
8444 disas_set_da_iss(s, mop, issinfo);
8445 tcg_temp_free_i32(tmp);
8447 op_addr_rr_post(s, a, addr, 0);
8448 return true;
8451 static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
8453 int mem_idx = get_mem_index(s);
8454 TCGv_i32 addr, tmp;
8456 if (!ENABLE_ARCH_5TE) {
8457 return false;
8459 if (a->rt & 1) {
8460 unallocated_encoding(s);
8461 return true;
8463 addr = op_addr_rr_pre(s, a);
8465 tmp = tcg_temp_new_i32();
8466 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8467 store_reg(s, a->rt, tmp);
8469 tcg_gen_addi_i32(addr, addr, 4);
8471 tmp = tcg_temp_new_i32();
8472 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8473 store_reg(s, a->rt + 1, tmp);
8475 /* LDRD w/ base writeback is undefined if the registers overlap. */
8476 op_addr_rr_post(s, a, addr, -4);
8477 return true;
8480 static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
8482 int mem_idx = get_mem_index(s);
8483 TCGv_i32 addr, tmp;
8485 if (!ENABLE_ARCH_5TE) {
8486 return false;
8488 if (a->rt & 1) {
8489 unallocated_encoding(s);
8490 return true;
8492 addr = op_addr_rr_pre(s, a);
8494 tmp = load_reg(s, a->rt);
8495 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8496 tcg_temp_free_i32(tmp);
8498 tcg_gen_addi_i32(addr, addr, 4);
8500 tmp = load_reg(s, a->rt + 1);
8501 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8502 tcg_temp_free_i32(tmp);
8504 op_addr_rr_post(s, a, addr, -4);
8505 return true;
8509 * Load/store immediate index
8512 static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
8514 int ofs = a->imm;
8516 if (!a->u) {
8517 ofs = -ofs;
8520 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
8522 * Stackcheck. Here we know 'addr' is the current SP;
8523 * U is set if we're moving SP up, else down. It is
8524 * UNKNOWN whether the limit check triggers when SP starts
8525 * below the limit and ends up above it; we chose to do so.
8527 if (!a->u) {
8528 TCGv_i32 newsp = tcg_temp_new_i32();
8529 tcg_gen_addi_i32(newsp, cpu_R[13], ofs);
8530 gen_helper_v8m_stackcheck(cpu_env, newsp);
8531 tcg_temp_free_i32(newsp);
8532 } else {
8533 gen_helper_v8m_stackcheck(cpu_env, cpu_R[13]);
8537 return add_reg_for_lit(s, a->rn, a->p ? ofs : 0);
8540 static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a,
8541 TCGv_i32 addr, int address_offset)
8543 if (!a->p) {
8544 if (a->u) {
8545 address_offset += a->imm;
8546 } else {
8547 address_offset -= a->imm;
8549 } else if (!a->w) {
8550 tcg_temp_free_i32(addr);
8551 return;
8553 tcg_gen_addi_i32(addr, addr, address_offset);
8554 store_reg(s, a->rn, addr);
8557 static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
8558 MemOp mop, int mem_idx)
8560 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
8561 TCGv_i32 addr, tmp;
8563 addr = op_addr_ri_pre(s, a);
8565 tmp = tcg_temp_new_i32();
8566 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
8567 disas_set_da_iss(s, mop, issinfo);
8570 * Perform base writeback before the loaded value to
8571 * ensure correct behavior with overlapping index registers.
8573 op_addr_ri_post(s, a, addr, 0);
8574 store_reg_from_load(s, a->rt, tmp);
8575 return true;
8578 static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
8579 MemOp mop, int mem_idx)
8581 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
8582 TCGv_i32 addr, tmp;
8584 addr = op_addr_ri_pre(s, a);
8586 tmp = load_reg(s, a->rt);
8587 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
8588 disas_set_da_iss(s, mop, issinfo);
8589 tcg_temp_free_i32(tmp);
8591 op_addr_ri_post(s, a, addr, 0);
8592 return true;
8595 static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
8597 int mem_idx = get_mem_index(s);
8598 TCGv_i32 addr, tmp;
8600 addr = op_addr_ri_pre(s, a);
8602 tmp = tcg_temp_new_i32();
8603 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8604 store_reg(s, a->rt, tmp);
8606 tcg_gen_addi_i32(addr, addr, 4);
8608 tmp = tcg_temp_new_i32();
8609 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8610 store_reg(s, rt2, tmp);
8612 /* LDRD w/ base writeback is undefined if the registers overlap. */
8613 op_addr_ri_post(s, a, addr, -4);
8614 return true;
8617 static bool trans_LDRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
8619 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
8620 return false;
8622 return op_ldrd_ri(s, a, a->rt + 1);
8625 static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
8627 arg_ldst_ri b = {
8628 .u = a->u, .w = a->w, .p = a->p,
8629 .rn = a->rn, .rt = a->rt, .imm = a->imm
8631 return op_ldrd_ri(s, &b, a->rt2);
8634 static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
8636 int mem_idx = get_mem_index(s);
8637 TCGv_i32 addr, tmp;
8639 addr = op_addr_ri_pre(s, a);
8641 tmp = load_reg(s, a->rt);
8642 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8643 tcg_temp_free_i32(tmp);
8645 tcg_gen_addi_i32(addr, addr, 4);
8647 tmp = load_reg(s, rt2);
8648 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
8649 tcg_temp_free_i32(tmp);
8651 op_addr_ri_post(s, a, addr, -4);
8652 return true;
8655 static bool trans_STRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
8657 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
8658 return false;
8660 return op_strd_ri(s, a, a->rt + 1);
8663 static bool trans_STRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
8665 arg_ldst_ri b = {
8666 .u = a->u, .w = a->w, .p = a->p,
8667 .rn = a->rn, .rt = a->rt, .imm = a->imm
8669 return op_strd_ri(s, &b, a->rt2);
8672 #define DO_LDST(NAME, WHICH, MEMOP) \
8673 static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \
8675 return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \
8677 static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \
8679 return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \
8681 static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \
8683 return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \
8685 static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \
8687 return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \
8690 DO_LDST(LDR, load, MO_UL)
8691 DO_LDST(LDRB, load, MO_UB)
8692 DO_LDST(LDRH, load, MO_UW)
8693 DO_LDST(LDRSB, load, MO_SB)
8694 DO_LDST(LDRSH, load, MO_SW)
8696 DO_LDST(STR, store, MO_UL)
8697 DO_LDST(STRB, store, MO_UB)
8698 DO_LDST(STRH, store, MO_UW)
8700 #undef DO_LDST
8703 * Synchronization primitives
8706 static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc)
8708 TCGv_i32 addr, tmp;
8709 TCGv taddr;
8711 opc |= s->be_data;
8712 addr = load_reg(s, a->rn);
8713 taddr = gen_aa32_addr(s, addr, opc);
8714 tcg_temp_free_i32(addr);
8716 tmp = load_reg(s, a->rt2);
8717 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp, get_mem_index(s), opc);
8718 tcg_temp_free(taddr);
8720 store_reg(s, a->rt, tmp);
8721 return true;
8724 static bool trans_SWP(DisasContext *s, arg_SWP *a)
8726 return op_swp(s, a, MO_UL | MO_ALIGN);
8729 static bool trans_SWPB(DisasContext *s, arg_SWP *a)
8731 return op_swp(s, a, MO_UB);
8735 * Load/Store Exclusive and Load-Acquire/Store-Release
8738 static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel)
8740 TCGv_i32 addr;
8741 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
8742 bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
8744 /* We UNDEF for these UNPREDICTABLE cases. */
8745 if (a->rd == 15 || a->rn == 15 || a->rt == 15
8746 || a->rd == a->rn || a->rd == a->rt
8747 || (!v8a && s->thumb && (a->rd == 13 || a->rt == 13))
8748 || (mop == MO_64
8749 && (a->rt2 == 15
8750 || a->rd == a->rt2
8751 || (!v8a && s->thumb && a->rt2 == 13)))) {
8752 unallocated_encoding(s);
8753 return true;
8756 if (rel) {
8757 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8760 addr = tcg_temp_local_new_i32();
8761 load_reg_var(s, addr, a->rn);
8762 tcg_gen_addi_i32(addr, addr, a->imm);
8764 gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop);
8765 tcg_temp_free_i32(addr);
8766 return true;
8769 static bool trans_STREX(DisasContext *s, arg_STREX *a)
8771 if (!ENABLE_ARCH_6) {
8772 return false;
8774 return op_strex(s, a, MO_32, false);
8777 static bool trans_STREXD_a32(DisasContext *s, arg_STREX *a)
8779 if (!ENABLE_ARCH_6K) {
8780 return false;
8782 /* We UNDEF for these UNPREDICTABLE cases. */
8783 if (a->rt & 1) {
8784 unallocated_encoding(s);
8785 return true;
8787 a->rt2 = a->rt + 1;
8788 return op_strex(s, a, MO_64, false);
8791 static bool trans_STREXD_t32(DisasContext *s, arg_STREX *a)
8793 return op_strex(s, a, MO_64, false);
8796 static bool trans_STREXB(DisasContext *s, arg_STREX *a)
8798 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
8799 return false;
8801 return op_strex(s, a, MO_8, false);
8804 static bool trans_STREXH(DisasContext *s, arg_STREX *a)
8806 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
8807 return false;
8809 return op_strex(s, a, MO_16, false);
8812 static bool trans_STLEX(DisasContext *s, arg_STREX *a)
8814 if (!ENABLE_ARCH_8) {
8815 return false;
8817 return op_strex(s, a, MO_32, true);
8820 static bool trans_STLEXD_a32(DisasContext *s, arg_STREX *a)
8822 if (!ENABLE_ARCH_8) {
8823 return false;
8825 /* We UNDEF for these UNPREDICTABLE cases. */
8826 if (a->rt & 1) {
8827 unallocated_encoding(s);
8828 return true;
8830 a->rt2 = a->rt + 1;
8831 return op_strex(s, a, MO_64, true);
8834 static bool trans_STLEXD_t32(DisasContext *s, arg_STREX *a)
8836 if (!ENABLE_ARCH_8) {
8837 return false;
8839 return op_strex(s, a, MO_64, true);
8842 static bool trans_STLEXB(DisasContext *s, arg_STREX *a)
8844 if (!ENABLE_ARCH_8) {
8845 return false;
8847 return op_strex(s, a, MO_8, true);
8850 static bool trans_STLEXH(DisasContext *s, arg_STREX *a)
8852 if (!ENABLE_ARCH_8) {
8853 return false;
8855 return op_strex(s, a, MO_16, true);
8858 static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
8860 TCGv_i32 addr, tmp;
8862 if (!ENABLE_ARCH_8) {
8863 return false;
8865 /* We UNDEF for these UNPREDICTABLE cases. */
8866 if (a->rn == 15 || a->rt == 15) {
8867 unallocated_encoding(s);
8868 return true;
8871 addr = load_reg(s, a->rn);
8872 tmp = load_reg(s, a->rt);
8873 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8874 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
8875 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
8877 tcg_temp_free_i32(tmp);
8878 tcg_temp_free_i32(addr);
8879 return true;
8882 static bool trans_STL(DisasContext *s, arg_STL *a)
8884 return op_stl(s, a, MO_UL);
8887 static bool trans_STLB(DisasContext *s, arg_STL *a)
8889 return op_stl(s, a, MO_UB);
8892 static bool trans_STLH(DisasContext *s, arg_STL *a)
8894 return op_stl(s, a, MO_UW);
8897 static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq)
8899 TCGv_i32 addr;
8900 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
8901 bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
8903 /* We UNDEF for these UNPREDICTABLE cases. */
8904 if (a->rn == 15 || a->rt == 15
8905 || (!v8a && s->thumb && a->rt == 13)
8906 || (mop == MO_64
8907 && (a->rt2 == 15 || a->rt == a->rt2
8908 || (!v8a && s->thumb && a->rt2 == 13)))) {
8909 unallocated_encoding(s);
8910 return true;
8913 addr = tcg_temp_local_new_i32();
8914 load_reg_var(s, addr, a->rn);
8915 tcg_gen_addi_i32(addr, addr, a->imm);
8917 gen_load_exclusive(s, a->rt, a->rt2, addr, mop);
8918 tcg_temp_free_i32(addr);
8920 if (acq) {
8921 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
8923 return true;
8926 static bool trans_LDREX(DisasContext *s, arg_LDREX *a)
8928 if (!ENABLE_ARCH_6) {
8929 return false;
8931 return op_ldrex(s, a, MO_32, false);
8934 static bool trans_LDREXD_a32(DisasContext *s, arg_LDREX *a)
8936 if (!ENABLE_ARCH_6K) {
8937 return false;
8939 /* We UNDEF for these UNPREDICTABLE cases. */
8940 if (a->rt & 1) {
8941 unallocated_encoding(s);
8942 return true;
8944 a->rt2 = a->rt + 1;
8945 return op_ldrex(s, a, MO_64, false);
8948 static bool trans_LDREXD_t32(DisasContext *s, arg_LDREX *a)
8950 return op_ldrex(s, a, MO_64, false);
8953 static bool trans_LDREXB(DisasContext *s, arg_LDREX *a)
8955 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
8956 return false;
8958 return op_ldrex(s, a, MO_8, false);
8961 static bool trans_LDREXH(DisasContext *s, arg_LDREX *a)
8963 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
8964 return false;
8966 return op_ldrex(s, a, MO_16, false);
8969 static bool trans_LDAEX(DisasContext *s, arg_LDREX *a)
8971 if (!ENABLE_ARCH_8) {
8972 return false;
8974 return op_ldrex(s, a, MO_32, true);
8977 static bool trans_LDAEXD_a32(DisasContext *s, arg_LDREX *a)
8979 if (!ENABLE_ARCH_8) {
8980 return false;
8982 /* We UNDEF for these UNPREDICTABLE cases. */
8983 if (a->rt & 1) {
8984 unallocated_encoding(s);
8985 return true;
8987 a->rt2 = a->rt + 1;
8988 return op_ldrex(s, a, MO_64, true);
8991 static bool trans_LDAEXD_t32(DisasContext *s, arg_LDREX *a)
8993 if (!ENABLE_ARCH_8) {
8994 return false;
8996 return op_ldrex(s, a, MO_64, true);
8999 static bool trans_LDAEXB(DisasContext *s, arg_LDREX *a)
9001 if (!ENABLE_ARCH_8) {
9002 return false;
9004 return op_ldrex(s, a, MO_8, true);
9007 static bool trans_LDAEXH(DisasContext *s, arg_LDREX *a)
9009 if (!ENABLE_ARCH_8) {
9010 return false;
9012 return op_ldrex(s, a, MO_16, true);
9015 static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
9017 TCGv_i32 addr, tmp;
9019 if (!ENABLE_ARCH_8) {
9020 return false;
9022 /* We UNDEF for these UNPREDICTABLE cases. */
9023 if (a->rn == 15 || a->rt == 15) {
9024 unallocated_encoding(s);
9025 return true;
9028 addr = load_reg(s, a->rn);
9029 tmp = tcg_temp_new_i32();
9030 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
9031 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
9032 tcg_temp_free_i32(addr);
9034 store_reg(s, a->rt, tmp);
9035 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9036 return true;
9039 static bool trans_LDA(DisasContext *s, arg_LDA *a)
9041 return op_lda(s, a, MO_UL);
9044 static bool trans_LDAB(DisasContext *s, arg_LDA *a)
9046 return op_lda(s, a, MO_UB);
9049 static bool trans_LDAH(DisasContext *s, arg_LDA *a)
9051 return op_lda(s, a, MO_UW);
9055 * Media instructions
9058 static bool trans_USADA8(DisasContext *s, arg_USADA8 *a)
9060 TCGv_i32 t1, t2;
9062 if (!ENABLE_ARCH_6) {
9063 return false;
9066 t1 = load_reg(s, a->rn);
9067 t2 = load_reg(s, a->rm);
9068 gen_helper_usad8(t1, t1, t2);
9069 tcg_temp_free_i32(t2);
9070 if (a->ra != 15) {
9071 t2 = load_reg(s, a->ra);
9072 tcg_gen_add_i32(t1, t1, t2);
9073 tcg_temp_free_i32(t2);
9075 store_reg(s, a->rd, t1);
9076 return true;
9079 static bool op_bfx(DisasContext *s, arg_UBFX *a, bool u)
9081 TCGv_i32 tmp;
9082 int width = a->widthm1 + 1;
9083 int shift = a->lsb;
9085 if (!ENABLE_ARCH_6T2) {
9086 return false;
9088 if (shift + width > 32) {
9089 /* UNPREDICTABLE; we choose to UNDEF */
9090 unallocated_encoding(s);
9091 return true;
9094 tmp = load_reg(s, a->rn);
9095 if (u) {
9096 tcg_gen_extract_i32(tmp, tmp, shift, width);
9097 } else {
9098 tcg_gen_sextract_i32(tmp, tmp, shift, width);
9100 store_reg(s, a->rd, tmp);
9101 return true;
9104 static bool trans_SBFX(DisasContext *s, arg_SBFX *a)
9106 return op_bfx(s, a, false);
9109 static bool trans_UBFX(DisasContext *s, arg_UBFX *a)
9111 return op_bfx(s, a, true);
9114 static bool trans_BFCI(DisasContext *s, arg_BFCI *a)
9116 TCGv_i32 tmp;
9117 int msb = a->msb, lsb = a->lsb;
9118 int width;
9120 if (!ENABLE_ARCH_6T2) {
9121 return false;
9123 if (msb < lsb) {
9124 /* UNPREDICTABLE; we choose to UNDEF */
9125 unallocated_encoding(s);
9126 return true;
9129 width = msb + 1 - lsb;
9130 if (a->rn == 15) {
9131 /* BFC */
9132 tmp = tcg_const_i32(0);
9133 } else {
9134 /* BFI */
9135 tmp = load_reg(s, a->rn);
9137 if (width != 32) {
9138 TCGv_i32 tmp2 = load_reg(s, a->rd);
9139 tcg_gen_deposit_i32(tmp, tmp2, tmp, lsb, width);
9140 tcg_temp_free_i32(tmp2);
9142 store_reg(s, a->rd, tmp);
9143 return true;
9146 static bool trans_UDF(DisasContext *s, arg_UDF *a)
9148 unallocated_encoding(s);
9149 return true;
9153 * Parallel addition and subtraction
9156 static bool op_par_addsub(DisasContext *s, arg_rrr *a,
9157 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
9159 TCGv_i32 t0, t1;
9161 if (s->thumb
9162 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9163 : !ENABLE_ARCH_6) {
9164 return false;
9167 t0 = load_reg(s, a->rn);
9168 t1 = load_reg(s, a->rm);
9170 gen(t0, t0, t1);
9172 tcg_temp_free_i32(t1);
9173 store_reg(s, a->rd, t0);
9174 return true;
9177 static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a,
9178 void (*gen)(TCGv_i32, TCGv_i32,
9179 TCGv_i32, TCGv_ptr))
9181 TCGv_i32 t0, t1;
9182 TCGv_ptr ge;
9184 if (s->thumb
9185 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9186 : !ENABLE_ARCH_6) {
9187 return false;
9190 t0 = load_reg(s, a->rn);
9191 t1 = load_reg(s, a->rm);
9193 ge = tcg_temp_new_ptr();
9194 tcg_gen_addi_ptr(ge, cpu_env, offsetof(CPUARMState, GE));
9195 gen(t0, t0, t1, ge);
9197 tcg_temp_free_ptr(ge);
9198 tcg_temp_free_i32(t1);
9199 store_reg(s, a->rd, t0);
9200 return true;
9203 #define DO_PAR_ADDSUB(NAME, helper) \
9204 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
9206 return op_par_addsub(s, a, helper); \
9209 #define DO_PAR_ADDSUB_GE(NAME, helper) \
9210 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
9212 return op_par_addsub_ge(s, a, helper); \
9215 DO_PAR_ADDSUB_GE(SADD16, gen_helper_sadd16)
9216 DO_PAR_ADDSUB_GE(SASX, gen_helper_saddsubx)
9217 DO_PAR_ADDSUB_GE(SSAX, gen_helper_ssubaddx)
9218 DO_PAR_ADDSUB_GE(SSUB16, gen_helper_ssub16)
9219 DO_PAR_ADDSUB_GE(SADD8, gen_helper_sadd8)
9220 DO_PAR_ADDSUB_GE(SSUB8, gen_helper_ssub8)
9222 DO_PAR_ADDSUB_GE(UADD16, gen_helper_uadd16)
9223 DO_PAR_ADDSUB_GE(UASX, gen_helper_uaddsubx)
9224 DO_PAR_ADDSUB_GE(USAX, gen_helper_usubaddx)
9225 DO_PAR_ADDSUB_GE(USUB16, gen_helper_usub16)
9226 DO_PAR_ADDSUB_GE(UADD8, gen_helper_uadd8)
9227 DO_PAR_ADDSUB_GE(USUB8, gen_helper_usub8)
9229 DO_PAR_ADDSUB(QADD16, gen_helper_qadd16)
9230 DO_PAR_ADDSUB(QASX, gen_helper_qaddsubx)
9231 DO_PAR_ADDSUB(QSAX, gen_helper_qsubaddx)
9232 DO_PAR_ADDSUB(QSUB16, gen_helper_qsub16)
9233 DO_PAR_ADDSUB(QADD8, gen_helper_qadd8)
9234 DO_PAR_ADDSUB(QSUB8, gen_helper_qsub8)
9236 DO_PAR_ADDSUB(UQADD16, gen_helper_uqadd16)
9237 DO_PAR_ADDSUB(UQASX, gen_helper_uqaddsubx)
9238 DO_PAR_ADDSUB(UQSAX, gen_helper_uqsubaddx)
9239 DO_PAR_ADDSUB(UQSUB16, gen_helper_uqsub16)
9240 DO_PAR_ADDSUB(UQADD8, gen_helper_uqadd8)
9241 DO_PAR_ADDSUB(UQSUB8, gen_helper_uqsub8)
9243 DO_PAR_ADDSUB(SHADD16, gen_helper_shadd16)
9244 DO_PAR_ADDSUB(SHASX, gen_helper_shaddsubx)
9245 DO_PAR_ADDSUB(SHSAX, gen_helper_shsubaddx)
9246 DO_PAR_ADDSUB(SHSUB16, gen_helper_shsub16)
9247 DO_PAR_ADDSUB(SHADD8, gen_helper_shadd8)
9248 DO_PAR_ADDSUB(SHSUB8, gen_helper_shsub8)
9250 DO_PAR_ADDSUB(UHADD16, gen_helper_uhadd16)
9251 DO_PAR_ADDSUB(UHASX, gen_helper_uhaddsubx)
9252 DO_PAR_ADDSUB(UHSAX, gen_helper_uhsubaddx)
9253 DO_PAR_ADDSUB(UHSUB16, gen_helper_uhsub16)
9254 DO_PAR_ADDSUB(UHADD8, gen_helper_uhadd8)
9255 DO_PAR_ADDSUB(UHSUB8, gen_helper_uhsub8)
9257 #undef DO_PAR_ADDSUB
9258 #undef DO_PAR_ADDSUB_GE
9261 * Packing, unpacking, saturation, and reversal
9264 static bool trans_PKH(DisasContext *s, arg_PKH *a)
9266 TCGv_i32 tn, tm;
9267 int shift = a->imm;
9269 if (s->thumb
9270 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9271 : !ENABLE_ARCH_6) {
9272 return false;
9275 tn = load_reg(s, a->rn);
9276 tm = load_reg(s, a->rm);
9277 if (a->tb) {
9278 /* PKHTB */
9279 if (shift == 0) {
9280 shift = 31;
9282 tcg_gen_sari_i32(tm, tm, shift);
9283 tcg_gen_deposit_i32(tn, tn, tm, 0, 16);
9284 } else {
9285 /* PKHBT */
9286 tcg_gen_shli_i32(tm, tm, shift);
9287 tcg_gen_deposit_i32(tn, tm, tn, 0, 16);
9289 tcg_temp_free_i32(tm);
9290 store_reg(s, a->rd, tn);
9291 return true;
9294 static bool op_sat(DisasContext *s, arg_sat *a,
9295 void (*gen)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
9297 TCGv_i32 tmp, satimm;
9298 int shift = a->imm;
9300 if (!ENABLE_ARCH_6) {
9301 return false;
9304 tmp = load_reg(s, a->rn);
9305 if (a->sh) {
9306 tcg_gen_sari_i32(tmp, tmp, shift ? shift : 31);
9307 } else {
9308 tcg_gen_shli_i32(tmp, tmp, shift);
9311 satimm = tcg_const_i32(a->satimm);
9312 gen(tmp, cpu_env, tmp, satimm);
9313 tcg_temp_free_i32(satimm);
9315 store_reg(s, a->rd, tmp);
9316 return true;
9319 static bool trans_SSAT(DisasContext *s, arg_sat *a)
9321 return op_sat(s, a, gen_helper_ssat);
9324 static bool trans_USAT(DisasContext *s, arg_sat *a)
9326 return op_sat(s, a, gen_helper_usat);
9329 static bool trans_SSAT16(DisasContext *s, arg_sat *a)
9331 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9332 return false;
9334 return op_sat(s, a, gen_helper_ssat16);
9337 static bool trans_USAT16(DisasContext *s, arg_sat *a)
9339 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9340 return false;
9342 return op_sat(s, a, gen_helper_usat16);
9345 static bool op_xta(DisasContext *s, arg_rrr_rot *a,
9346 void (*gen_extract)(TCGv_i32, TCGv_i32),
9347 void (*gen_add)(TCGv_i32, TCGv_i32, TCGv_i32))
9349 TCGv_i32 tmp;
9351 if (!ENABLE_ARCH_6) {
9352 return false;
9355 tmp = load_reg(s, a->rm);
9357 * TODO: In many cases we could do a shift instead of a rotate.
9358 * Combined with a simple extend, that becomes an extract.
9360 tcg_gen_rotri_i32(tmp, tmp, a->rot * 8);
9361 gen_extract(tmp, tmp);
9363 if (a->rn != 15) {
9364 TCGv_i32 tmp2 = load_reg(s, a->rn);
9365 gen_add(tmp, tmp, tmp2);
9366 tcg_temp_free_i32(tmp2);
9368 store_reg(s, a->rd, tmp);
9369 return true;
9372 static bool trans_SXTAB(DisasContext *s, arg_rrr_rot *a)
9374 return op_xta(s, a, tcg_gen_ext8s_i32, tcg_gen_add_i32);
9377 static bool trans_SXTAH(DisasContext *s, arg_rrr_rot *a)
9379 return op_xta(s, a, tcg_gen_ext16s_i32, tcg_gen_add_i32);
9382 static bool trans_SXTAB16(DisasContext *s, arg_rrr_rot *a)
9384 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9385 return false;
9387 return op_xta(s, a, gen_helper_sxtb16, gen_add16);
9390 static bool trans_UXTAB(DisasContext *s, arg_rrr_rot *a)
9392 return op_xta(s, a, tcg_gen_ext8u_i32, tcg_gen_add_i32);
9395 static bool trans_UXTAH(DisasContext *s, arg_rrr_rot *a)
9397 return op_xta(s, a, tcg_gen_ext16u_i32, tcg_gen_add_i32);
9400 static bool trans_UXTAB16(DisasContext *s, arg_rrr_rot *a)
9402 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9403 return false;
9405 return op_xta(s, a, gen_helper_uxtb16, gen_add16);
9408 static bool trans_SEL(DisasContext *s, arg_rrr *a)
9410 TCGv_i32 t1, t2, t3;
9412 if (s->thumb
9413 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9414 : !ENABLE_ARCH_6) {
9415 return false;
9418 t1 = load_reg(s, a->rn);
9419 t2 = load_reg(s, a->rm);
9420 t3 = tcg_temp_new_i32();
9421 tcg_gen_ld_i32(t3, cpu_env, offsetof(CPUARMState, GE));
9422 gen_helper_sel_flags(t1, t3, t1, t2);
9423 tcg_temp_free_i32(t3);
9424 tcg_temp_free_i32(t2);
9425 store_reg(s, a->rd, t1);
9426 return true;
9429 static bool op_rr(DisasContext *s, arg_rr *a,
9430 void (*gen)(TCGv_i32, TCGv_i32))
9432 TCGv_i32 tmp;
9434 tmp = load_reg(s, a->rm);
9435 gen(tmp, tmp);
9436 store_reg(s, a->rd, tmp);
9437 return true;
9440 static bool trans_REV(DisasContext *s, arg_rr *a)
9442 if (!ENABLE_ARCH_6) {
9443 return false;
9445 return op_rr(s, a, tcg_gen_bswap32_i32);
9448 static bool trans_REV16(DisasContext *s, arg_rr *a)
9450 if (!ENABLE_ARCH_6) {
9451 return false;
9453 return op_rr(s, a, gen_rev16);
9456 static bool trans_REVSH(DisasContext *s, arg_rr *a)
9458 if (!ENABLE_ARCH_6) {
9459 return false;
9461 return op_rr(s, a, gen_revsh);
9464 static bool trans_RBIT(DisasContext *s, arg_rr *a)
9466 if (!ENABLE_ARCH_6T2) {
9467 return false;
9469 return op_rr(s, a, gen_helper_rbit);
9473 * Signed multiply, signed and unsigned divide
9476 static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
9478 TCGv_i32 t1, t2;
9480 if (!ENABLE_ARCH_6) {
9481 return false;
9484 t1 = load_reg(s, a->rn);
9485 t2 = load_reg(s, a->rm);
9486 if (m_swap) {
9487 gen_swap_half(t2);
9489 gen_smul_dual(t1, t2);
9491 if (sub) {
9492 /* This subtraction cannot overflow. */
9493 tcg_gen_sub_i32(t1, t1, t2);
9494 } else {
9496 * This addition cannot overflow 32 bits; however it may
9497 * overflow considered as a signed operation, in which case
9498 * we must set the Q flag.
9500 gen_helper_add_setq(t1, cpu_env, t1, t2);
9502 tcg_temp_free_i32(t2);
9504 if (a->ra != 15) {
9505 t2 = load_reg(s, a->ra);
9506 gen_helper_add_setq(t1, cpu_env, t1, t2);
9507 tcg_temp_free_i32(t2);
9509 store_reg(s, a->rd, t1);
9510 return true;
9513 static bool trans_SMLAD(DisasContext *s, arg_rrrr *a)
9515 return op_smlad(s, a, false, false);
9518 static bool trans_SMLADX(DisasContext *s, arg_rrrr *a)
9520 return op_smlad(s, a, true, false);
9523 static bool trans_SMLSD(DisasContext *s, arg_rrrr *a)
9525 return op_smlad(s, a, false, true);
9528 static bool trans_SMLSDX(DisasContext *s, arg_rrrr *a)
9530 return op_smlad(s, a, true, true);
9533 static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
9535 TCGv_i32 t1, t2;
9536 TCGv_i64 l1, l2;
9538 if (!ENABLE_ARCH_6) {
9539 return false;
9542 t1 = load_reg(s, a->rn);
9543 t2 = load_reg(s, a->rm);
9544 if (m_swap) {
9545 gen_swap_half(t2);
9547 gen_smul_dual(t1, t2);
9549 l1 = tcg_temp_new_i64();
9550 l2 = tcg_temp_new_i64();
9551 tcg_gen_ext_i32_i64(l1, t1);
9552 tcg_gen_ext_i32_i64(l2, t2);
9553 tcg_temp_free_i32(t1);
9554 tcg_temp_free_i32(t2);
9556 if (sub) {
9557 tcg_gen_sub_i64(l1, l1, l2);
9558 } else {
9559 tcg_gen_add_i64(l1, l1, l2);
9561 tcg_temp_free_i64(l2);
9563 gen_addq(s, l1, a->ra, a->rd);
9564 gen_storeq_reg(s, a->ra, a->rd, l1);
9565 tcg_temp_free_i64(l1);
9566 return true;
9569 static bool trans_SMLALD(DisasContext *s, arg_rrrr *a)
9571 return op_smlald(s, a, false, false);
9574 static bool trans_SMLALDX(DisasContext *s, arg_rrrr *a)
9576 return op_smlald(s, a, true, false);
9579 static bool trans_SMLSLD(DisasContext *s, arg_rrrr *a)
9581 return op_smlald(s, a, false, true);
9584 static bool trans_SMLSLDX(DisasContext *s, arg_rrrr *a)
9586 return op_smlald(s, a, true, true);
9589 static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub)
9591 TCGv_i32 t1, t2;
9593 if (s->thumb
9594 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
9595 : !ENABLE_ARCH_6) {
9596 return false;
9599 t1 = load_reg(s, a->rn);
9600 t2 = load_reg(s, a->rm);
9601 tcg_gen_muls2_i32(t2, t1, t1, t2);
9603 if (a->ra != 15) {
9604 TCGv_i32 t3 = load_reg(s, a->ra);
9605 if (sub) {
9607 * For SMMLS, we need a 64-bit subtract. Borrow caused by
9608 * a non-zero multiplicand lowpart, and the correct result
9609 * lowpart for rounding.
9611 TCGv_i32 zero = tcg_const_i32(0);
9612 tcg_gen_sub2_i32(t2, t1, zero, t3, t2, t1);
9613 tcg_temp_free_i32(zero);
9614 } else {
9615 tcg_gen_add_i32(t1, t1, t3);
9617 tcg_temp_free_i32(t3);
9619 if (round) {
9621 * Adding 0x80000000 to the 64-bit quantity means that we have
9622 * carry in to the high word when the low word has the msb set.
9624 tcg_gen_shri_i32(t2, t2, 31);
9625 tcg_gen_add_i32(t1, t1, t2);
9627 tcg_temp_free_i32(t2);
9628 store_reg(s, a->rd, t1);
9629 return true;
9632 static bool trans_SMMLA(DisasContext *s, arg_rrrr *a)
9634 return op_smmla(s, a, false, false);
9637 static bool trans_SMMLAR(DisasContext *s, arg_rrrr *a)
9639 return op_smmla(s, a, true, false);
9642 static bool trans_SMMLS(DisasContext *s, arg_rrrr *a)
9644 return op_smmla(s, a, false, true);
9647 static bool trans_SMMLSR(DisasContext *s, arg_rrrr *a)
9649 return op_smmla(s, a, true, true);
9652 static bool op_div(DisasContext *s, arg_rrr *a, bool u)
9654 TCGv_i32 t1, t2;
9656 if (s->thumb
9657 ? !dc_isar_feature(aa32_thumb_div, s)
9658 : !dc_isar_feature(aa32_arm_div, s)) {
9659 return false;
9662 t1 = load_reg(s, a->rn);
9663 t2 = load_reg(s, a->rm);
9664 if (u) {
9665 gen_helper_udiv(t1, t1, t2);
9666 } else {
9667 gen_helper_sdiv(t1, t1, t2);
9669 tcg_temp_free_i32(t2);
9670 store_reg(s, a->rd, t1);
9671 return true;
9674 static bool trans_SDIV(DisasContext *s, arg_rrr *a)
9676 return op_div(s, a, false);
9679 static bool trans_UDIV(DisasContext *s, arg_rrr *a)
9681 return op_div(s, a, true);
9685 * Block data transfer
9688 static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n)
9690 TCGv_i32 addr = load_reg(s, a->rn);
9692 if (a->b) {
9693 if (a->i) {
9694 /* pre increment */
9695 tcg_gen_addi_i32(addr, addr, 4);
9696 } else {
9697 /* pre decrement */
9698 tcg_gen_addi_i32(addr, addr, -(n * 4));
9700 } else if (!a->i && n != 1) {
9701 /* post decrement */
9702 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9705 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
9707 * If the writeback is incrementing SP rather than
9708 * decrementing it, and the initial SP is below the
9709 * stack limit but the final written-back SP would
9710 * be above, then then we must not perform any memory
9711 * accesses, but it is IMPDEF whether we generate
9712 * an exception. We choose to do so in this case.
9713 * At this point 'addr' is the lowest address, so
9714 * either the original SP (if incrementing) or our
9715 * final SP (if decrementing), so that's what we check.
9717 gen_helper_v8m_stackcheck(cpu_env, addr);
9720 return addr;
9723 static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
9724 TCGv_i32 addr, int n)
9726 if (a->w) {
9727 /* write back */
9728 if (!a->b) {
9729 if (a->i) {
9730 /* post increment */
9731 tcg_gen_addi_i32(addr, addr, 4);
9732 } else {
9733 /* post decrement */
9734 tcg_gen_addi_i32(addr, addr, -(n * 4));
9736 } else if (!a->i && n != 1) {
9737 /* pre decrement */
9738 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9740 store_reg(s, a->rn, addr);
9741 } else {
9742 tcg_temp_free_i32(addr);
9746 static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
9748 int i, j, n, list, mem_idx;
9749 bool user = a->u;
9750 TCGv_i32 addr, tmp, tmp2;
9752 if (user) {
9753 /* STM (user) */
9754 if (IS_USER(s)) {
9755 /* Only usable in supervisor mode. */
9756 unallocated_encoding(s);
9757 return true;
9761 list = a->list;
9762 n = ctpop16(list);
9763 if (n < min_n || a->rn == 15) {
9764 unallocated_encoding(s);
9765 return true;
9768 addr = op_addr_block_pre(s, a, n);
9769 mem_idx = get_mem_index(s);
9771 for (i = j = 0; i < 16; i++) {
9772 if (!(list & (1 << i))) {
9773 continue;
9776 if (user && i != 15) {
9777 tmp = tcg_temp_new_i32();
9778 tmp2 = tcg_const_i32(i);
9779 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9780 tcg_temp_free_i32(tmp2);
9781 } else {
9782 tmp = load_reg(s, i);
9784 gen_aa32_st32(s, tmp, addr, mem_idx);
9785 tcg_temp_free_i32(tmp);
9787 /* No need to add after the last transfer. */
9788 if (++j != n) {
9789 tcg_gen_addi_i32(addr, addr, 4);
9793 op_addr_block_post(s, a, addr, n);
9794 return true;
9797 static bool trans_STM(DisasContext *s, arg_ldst_block *a)
9799 /* BitCount(list) < 1 is UNPREDICTABLE */
9800 return op_stm(s, a, 1);
9803 static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
9805 /* Writeback register in register list is UNPREDICTABLE for T32. */
9806 if (a->w && (a->list & (1 << a->rn))) {
9807 unallocated_encoding(s);
9808 return true;
9810 /* BitCount(list) < 2 is UNPREDICTABLE */
9811 return op_stm(s, a, 2);
9814 static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
9816 int i, j, n, list, mem_idx;
9817 bool loaded_base;
9818 bool user = a->u;
9819 bool exc_return = false;
9820 TCGv_i32 addr, tmp, tmp2, loaded_var;
9822 if (user) {
9823 /* LDM (user), LDM (exception return) */
9824 if (IS_USER(s)) {
9825 /* Only usable in supervisor mode. */
9826 unallocated_encoding(s);
9827 return true;
9829 if (extract32(a->list, 15, 1)) {
9830 exc_return = true;
9831 user = false;
9832 } else {
9833 /* LDM (user) does not allow writeback. */
9834 if (a->w) {
9835 unallocated_encoding(s);
9836 return true;
9841 list = a->list;
9842 n = ctpop16(list);
9843 if (n < min_n || a->rn == 15) {
9844 unallocated_encoding(s);
9845 return true;
9848 addr = op_addr_block_pre(s, a, n);
9849 mem_idx = get_mem_index(s);
9850 loaded_base = false;
9851 loaded_var = NULL;
9853 for (i = j = 0; i < 16; i++) {
9854 if (!(list & (1 << i))) {
9855 continue;
9858 tmp = tcg_temp_new_i32();
9859 gen_aa32_ld32u(s, tmp, addr, mem_idx);
9860 if (user) {
9861 tmp2 = tcg_const_i32(i);
9862 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9863 tcg_temp_free_i32(tmp2);
9864 tcg_temp_free_i32(tmp);
9865 } else if (i == a->rn) {
9866 loaded_var = tmp;
9867 loaded_base = true;
9868 } else if (i == 15 && exc_return) {
9869 store_pc_exc_ret(s, tmp);
9870 } else {
9871 store_reg_from_load(s, i, tmp);
9874 /* No need to add after the last transfer. */
9875 if (++j != n) {
9876 tcg_gen_addi_i32(addr, addr, 4);
9880 op_addr_block_post(s, a, addr, n);
9882 if (loaded_base) {
9883 /* Note that we reject base == pc above. */
9884 store_reg(s, a->rn, loaded_var);
9887 if (exc_return) {
9888 /* Restore CPSR from SPSR. */
9889 tmp = load_cpu_field(spsr);
9890 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9891 gen_io_start();
9893 gen_helper_cpsr_write_eret(cpu_env, tmp);
9894 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9895 gen_io_end();
9897 tcg_temp_free_i32(tmp);
9898 /* Must exit loop to check un-masked IRQs */
9899 s->base.is_jmp = DISAS_EXIT;
9901 return true;
9904 static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a)
9907 * Writeback register in register list is UNPREDICTABLE
9908 * for ArchVersion() >= 7. Prior to v7, A32 would write
9909 * an UNKNOWN value to the base register.
9911 if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) {
9912 unallocated_encoding(s);
9913 return true;
9915 /* BitCount(list) < 1 is UNPREDICTABLE */
9916 return do_ldm(s, a, 1);
9919 static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
9921 /* Writeback register in register list is UNPREDICTABLE for T32. */
9922 if (a->w && (a->list & (1 << a->rn))) {
9923 unallocated_encoding(s);
9924 return true;
9926 /* BitCount(list) < 2 is UNPREDICTABLE */
9927 return do_ldm(s, a, 2);
9930 static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
9932 /* Writeback is conditional on the base register not being loaded. */
9933 a->w = !(a->list & (1 << a->rn));
9934 /* BitCount(list) < 1 is UNPREDICTABLE */
9935 return do_ldm(s, a, 1);
9939 * Branch, branch with link
9942 static bool trans_B(DisasContext *s, arg_i *a)
9944 gen_jmp(s, read_pc(s) + a->imm);
9945 return true;
9948 static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a)
9950 /* This has cond from encoding, required to be outside IT block. */
9951 if (a->cond >= 0xe) {
9952 return false;
9954 if (s->condexec_mask) {
9955 unallocated_encoding(s);
9956 return true;
9958 arm_skip_unless(s, a->cond);
9959 gen_jmp(s, read_pc(s) + a->imm);
9960 return true;
9963 static bool trans_BL(DisasContext *s, arg_i *a)
9965 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
9966 gen_jmp(s, read_pc(s) + a->imm);
9967 return true;
9970 static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
9972 TCGv_i32 tmp;
9974 /* For A32, ARCH(5) is checked near the start of the uncond block. */
9975 if (s->thumb && (a->imm & 2)) {
9976 return false;
9978 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
9979 tmp = tcg_const_i32(!s->thumb);
9980 store_cpu_field(tmp, thumb);
9981 gen_jmp(s, (read_pc(s) & ~3) + a->imm);
9982 return true;
9985 static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a)
9987 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
9988 tcg_gen_movi_i32(cpu_R[14], read_pc(s) + (a->imm << 12));
9989 return true;
9992 static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a)
9994 TCGv_i32 tmp = tcg_temp_new_i32();
9996 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
9997 tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1);
9998 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
9999 gen_bx(s, tmp);
10000 return true;
10003 static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a)
10005 TCGv_i32 tmp;
10007 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
10008 if (!ENABLE_ARCH_5) {
10009 return false;
10011 tmp = tcg_temp_new_i32();
10012 tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1);
10013 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
10014 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
10015 gen_bx(s, tmp);
10016 return true;
10019 static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
10021 TCGv_i32 addr, tmp;
10023 tmp = load_reg(s, a->rm);
10024 if (half) {
10025 tcg_gen_add_i32(tmp, tmp, tmp);
10027 addr = load_reg(s, a->rn);
10028 tcg_gen_add_i32(addr, addr, tmp);
10030 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
10031 half ? MO_UW | s->be_data : MO_UB);
10032 tcg_temp_free_i32(addr);
10034 tcg_gen_add_i32(tmp, tmp, tmp);
10035 tcg_gen_addi_i32(tmp, tmp, read_pc(s));
10036 store_reg(s, 15, tmp);
10037 return true;
10040 static bool trans_TBB(DisasContext *s, arg_tbranch *a)
10042 return op_tbranch(s, a, false);
10045 static bool trans_TBH(DisasContext *s, arg_tbranch *a)
10047 return op_tbranch(s, a, true);
10050 static bool trans_CBZ(DisasContext *s, arg_CBZ *a)
10052 TCGv_i32 tmp = load_reg(s, a->rn);
10054 arm_gen_condlabel(s);
10055 tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE,
10056 tmp, 0, s->condlabel);
10057 tcg_temp_free_i32(tmp);
10058 gen_jmp(s, read_pc(s) + a->imm);
10059 return true;
10063 * Supervisor call - both T32 & A32 come here so we need to check
10064 * which mode we are in when checking for semihosting.
10067 static bool trans_SVC(DisasContext *s, arg_SVC *a)
10069 const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456;
10071 if (!arm_dc_feature(s, ARM_FEATURE_M) && semihosting_enabled() &&
10072 #ifndef CONFIG_USER_ONLY
10073 !IS_USER(s) &&
10074 #endif
10075 (a->imm == semihost_imm)) {
10076 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
10077 } else {
10078 gen_set_pc_im(s, s->base.pc_next);
10079 s->svc_imm = a->imm;
10080 s->base.is_jmp = DISAS_SWI;
10082 return true;
10086 * Unconditional system instructions
10089 static bool trans_RFE(DisasContext *s, arg_RFE *a)
10091 static const int8_t pre_offset[4] = {
10092 /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4
10094 static const int8_t post_offset[4] = {
10095 /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0
10097 TCGv_i32 addr, t1, t2;
10099 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
10100 return false;
10102 if (IS_USER(s)) {
10103 unallocated_encoding(s);
10104 return true;
10107 addr = load_reg(s, a->rn);
10108 tcg_gen_addi_i32(addr, addr, pre_offset[a->pu]);
10110 /* Load PC into tmp and CPSR into tmp2. */
10111 t1 = tcg_temp_new_i32();
10112 gen_aa32_ld32u(s, t1, addr, get_mem_index(s));
10113 tcg_gen_addi_i32(addr, addr, 4);
10114 t2 = tcg_temp_new_i32();
10115 gen_aa32_ld32u(s, t2, addr, get_mem_index(s));
10117 if (a->w) {
10118 /* Base writeback. */
10119 tcg_gen_addi_i32(addr, addr, post_offset[a->pu]);
10120 store_reg(s, a->rn, addr);
10121 } else {
10122 tcg_temp_free_i32(addr);
10124 gen_rfe(s, t1, t2);
10125 return true;
10128 static bool trans_SRS(DisasContext *s, arg_SRS *a)
10130 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
10131 return false;
10133 gen_srs(s, a->mode, a->pu, a->w);
10134 return true;
10137 static bool trans_CPS(DisasContext *s, arg_CPS *a)
10139 uint32_t mask, val;
10141 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
10142 return false;
10144 if (IS_USER(s)) {
10145 /* Implemented as NOP in user mode. */
10146 return true;
10148 /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */
10150 mask = val = 0;
10151 if (a->imod & 2) {
10152 if (a->A) {
10153 mask |= CPSR_A;
10155 if (a->I) {
10156 mask |= CPSR_I;
10158 if (a->F) {
10159 mask |= CPSR_F;
10161 if (a->imod & 1) {
10162 val |= mask;
10165 if (a->M) {
10166 mask |= CPSR_M;
10167 val |= a->mode;
10169 if (mask) {
10170 gen_set_psr_im(s, mask, 0, val);
10172 return true;
10175 static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
10177 TCGv_i32 tmp, addr, el;
10179 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
10180 return false;
10182 if (IS_USER(s)) {
10183 /* Implemented as NOP in user mode. */
10184 return true;
10187 tmp = tcg_const_i32(a->im);
10188 /* FAULTMASK */
10189 if (a->F) {
10190 addr = tcg_const_i32(19);
10191 gen_helper_v7m_msr(cpu_env, addr, tmp);
10192 tcg_temp_free_i32(addr);
10194 /* PRIMASK */
10195 if (a->I) {
10196 addr = tcg_const_i32(16);
10197 gen_helper_v7m_msr(cpu_env, addr, tmp);
10198 tcg_temp_free_i32(addr);
10200 el = tcg_const_i32(s->current_el);
10201 gen_helper_rebuild_hflags_m32(cpu_env, el);
10202 tcg_temp_free_i32(el);
10203 tcg_temp_free_i32(tmp);
10204 gen_lookup_tb(s);
10205 return true;
10209 * Clear-Exclusive, Barriers
10212 static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
10214 if (s->thumb
10215 ? !ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)
10216 : !ENABLE_ARCH_6K) {
10217 return false;
10219 gen_clrex(s);
10220 return true;
10223 static bool trans_DSB(DisasContext *s, arg_DSB *a)
10225 if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
10226 return false;
10228 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10229 return true;
10232 static bool trans_DMB(DisasContext *s, arg_DMB *a)
10234 return trans_DSB(s, NULL);
10237 static bool trans_ISB(DisasContext *s, arg_ISB *a)
10239 if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
10240 return false;
10243 * We need to break the TB after this insn to execute
10244 * self-modifying code correctly and also to take
10245 * any pending interrupts immediately.
10247 gen_goto_tb(s, 0, s->base.pc_next);
10248 return true;
10251 static bool trans_SB(DisasContext *s, arg_SB *a)
10253 if (!dc_isar_feature(aa32_sb, s)) {
10254 return false;
10257 * TODO: There is no speculation barrier opcode
10258 * for TCG; MB and end the TB instead.
10260 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10261 gen_goto_tb(s, 0, s->base.pc_next);
10262 return true;
10265 static bool trans_SETEND(DisasContext *s, arg_SETEND *a)
10267 if (!ENABLE_ARCH_6) {
10268 return false;
10270 if (a->E != (s->be_data == MO_BE)) {
10271 gen_helper_setend(cpu_env);
10272 s->base.is_jmp = DISAS_UPDATE;
10274 return true;
10278 * Preload instructions
10279 * All are nops, contingent on the appropriate arch level.
10282 static bool trans_PLD(DisasContext *s, arg_PLD *a)
10284 return ENABLE_ARCH_5TE;
10287 static bool trans_PLDW(DisasContext *s, arg_PLD *a)
10289 return arm_dc_feature(s, ARM_FEATURE_V7MP);
10292 static bool trans_PLI(DisasContext *s, arg_PLD *a)
10294 return ENABLE_ARCH_7;
10298 * If-then
10301 static bool trans_IT(DisasContext *s, arg_IT *a)
10303 int cond_mask = a->cond_mask;
10306 * No actual code generated for this insn, just setup state.
10308 * Combinations of firstcond and mask which set up an 0b1111
10309 * condition are UNPREDICTABLE; we take the CONSTRAINED
10310 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
10311 * i.e. both meaning "execute always".
10313 s->condexec_cond = (cond_mask >> 4) & 0xe;
10314 s->condexec_mask = cond_mask & 0x1f;
10315 return true;
10319 * Legacy decoder.
10322 static void disas_arm_insn(DisasContext *s, unsigned int insn)
10324 unsigned int cond = insn >> 28;
10326 /* M variants do not implement ARM mode; this must raise the INVSTATE
10327 * UsageFault exception.
10329 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10330 gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
10331 default_exception_el(s));
10332 return;
10335 if (cond == 0xf) {
10336 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
10337 * choose to UNDEF. In ARMv5 and above the space is used
10338 * for miscellaneous unconditional instructions.
10340 ARCH(5);
10342 /* Unconditional instructions. */
10343 /* TODO: Perhaps merge these into one decodetree output file. */
10344 if (disas_a32_uncond(s, insn) ||
10345 disas_vfp_uncond(s, insn) ||
10346 disas_neon_dp(s, insn) ||
10347 disas_neon_ls(s, insn) ||
10348 disas_neon_shared(s, insn)) {
10349 return;
10351 /* fall back to legacy decoder */
10353 if (((insn >> 25) & 7) == 1) {
10354 /* NEON Data processing. */
10355 if (disas_neon_data_insn(s, insn)) {
10356 goto illegal_op;
10358 return;
10360 if ((insn & 0x0e000f00) == 0x0c000100) {
10361 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
10362 /* iWMMXt register transfer. */
10363 if (extract32(s->c15_cpar, 1, 1)) {
10364 if (!disas_iwmmxt_insn(s, insn)) {
10365 return;
10370 goto illegal_op;
10372 if (cond != 0xe) {
10373 /* if not always execute, we generate a conditional jump to
10374 next instruction */
10375 arm_skip_unless(s, cond);
10378 /* TODO: Perhaps merge these into one decodetree output file. */
10379 if (disas_a32(s, insn) ||
10380 disas_vfp(s, insn)) {
10381 return;
10383 /* fall back to legacy decoder */
10385 switch ((insn >> 24) & 0xf) {
10386 case 0xc:
10387 case 0xd:
10388 case 0xe:
10389 if (((insn >> 8) & 0xe) == 10) {
10390 /* VFP, but failed disas_vfp. */
10391 goto illegal_op;
10393 if (disas_coproc_insn(s, insn)) {
10394 /* Coprocessor. */
10395 goto illegal_op;
10397 break;
10398 default:
10399 illegal_op:
10400 unallocated_encoding(s);
10401 break;
10405 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
10408 * Return true if this is a 16 bit instruction. We must be precise
10409 * about this (matching the decode).
10411 if ((insn >> 11) < 0x1d) {
10412 /* Definitely a 16-bit instruction */
10413 return true;
10416 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10417 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10418 * end up actually treating this as two 16-bit insns, though,
10419 * if it's half of a bl/blx pair that might span a page boundary.
10421 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10422 arm_dc_feature(s, ARM_FEATURE_M)) {
10423 /* Thumb2 cores (including all M profile ones) always treat
10424 * 32-bit insns as 32-bit.
10426 return false;
10429 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
10430 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10431 * is not on the next page; we merge this into a 32-bit
10432 * insn.
10434 return false;
10436 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10437 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10438 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10439 * -- handle as single 16 bit insn
10441 return true;
10444 /* Translate a 32-bit thumb instruction. */
10445 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
10448 * ARMv6-M supports a limited subset of Thumb2 instructions.
10449 * Other Thumb1 architectures allow only 32-bit
10450 * combined BL/BLX prefix and suffix.
10452 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10453 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10454 int i;
10455 bool found = false;
10456 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10457 0xf3b08040 /* dsb */,
10458 0xf3b08050 /* dmb */,
10459 0xf3b08060 /* isb */,
10460 0xf3e08000 /* mrs */,
10461 0xf000d000 /* bl */};
10462 static const uint32_t armv6m_mask[] = {0xffe0d000,
10463 0xfff0d0f0,
10464 0xfff0d0f0,
10465 0xfff0d0f0,
10466 0xffe0d000,
10467 0xf800d000};
10469 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10470 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10471 found = true;
10472 break;
10475 if (!found) {
10476 goto illegal_op;
10478 } else if ((insn & 0xf800e800) != 0xf000e800) {
10479 ARCH(6T2);
10482 if ((insn & 0xef000000) == 0xef000000) {
10484 * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
10485 * transform into
10486 * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
10488 uint32_t a32_insn = (insn & 0xe2ffffff) |
10489 ((insn & (1 << 28)) >> 4) | (1 << 28);
10491 if (disas_neon_dp(s, a32_insn)) {
10492 return;
10496 if ((insn & 0xff100000) == 0xf9000000) {
10498 * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
10499 * transform into
10500 * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
10502 uint32_t a32_insn = (insn & 0x00ffffff) | 0xf4000000;
10504 if (disas_neon_ls(s, a32_insn)) {
10505 return;
10510 * TODO: Perhaps merge these into one decodetree output file.
10511 * Note disas_vfp is written for a32 with cond field in the
10512 * top nibble. The t32 encoding requires 0xe in the top nibble.
10514 if (disas_t32(s, insn) ||
10515 disas_vfp_uncond(s, insn) ||
10516 disas_neon_shared(s, insn) ||
10517 ((insn >> 28) == 0xe && disas_vfp(s, insn))) {
10518 return;
10520 /* fall back to legacy decoder */
10522 switch ((insn >> 25) & 0xf) {
10523 case 0: case 1: case 2: case 3:
10524 /* 16-bit instructions. Should never happen. */
10525 abort();
10526 case 6: case 7: case 14: case 15:
10527 /* Coprocessor. */
10528 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10529 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10530 if (extract32(insn, 24, 2) == 3) {
10531 goto illegal_op; /* op0 = 0b11 : unallocated */
10534 if (((insn >> 8) & 0xe) == 10 &&
10535 dc_isar_feature(aa32_fpsp_v2, s)) {
10536 /* FP, and the CPU supports it */
10537 goto illegal_op;
10538 } else {
10539 /* All other insns: NOCP */
10540 gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
10541 syn_uncategorized(),
10542 default_exception_el(s));
10544 break;
10546 if (((insn >> 24) & 3) == 3) {
10547 /* Translate into the equivalent ARM encoding. */
10548 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10549 if (disas_neon_data_insn(s, insn)) {
10550 goto illegal_op;
10552 } else if (((insn >> 8) & 0xe) == 10) {
10553 /* VFP, but failed disas_vfp. */
10554 goto illegal_op;
10555 } else {
10556 if (insn & (1 << 28))
10557 goto illegal_op;
10558 if (disas_coproc_insn(s, insn)) {
10559 goto illegal_op;
10562 break;
10563 case 12:
10564 goto illegal_op;
10565 default:
10566 illegal_op:
10567 unallocated_encoding(s);
10571 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
10573 if (!disas_t16(s, insn)) {
10574 unallocated_encoding(s);
10578 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
10580 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
10581 * (False positives are OK, false negatives are not.)
10582 * We know this is a Thumb insn, and our caller ensures we are
10583 * only called if dc->base.pc_next is less than 4 bytes from the page
10584 * boundary, so we cross the page if the first 16 bits indicate
10585 * that this is a 32 bit insn.
10587 uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
10589 return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
10592 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
10594 DisasContext *dc = container_of(dcbase, DisasContext, base);
10595 CPUARMState *env = cs->env_ptr;
10596 ARMCPU *cpu = env_archcpu(env);
10597 uint32_t tb_flags = dc->base.tb->flags;
10598 uint32_t condexec, core_mmu_idx;
10600 dc->isar = &cpu->isar;
10601 dc->condjmp = 0;
10603 dc->aarch64 = 0;
10604 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
10605 * there is no secure EL1, so we route exceptions to EL3.
10607 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
10608 !arm_el_is_aa64(env, 3);
10609 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB);
10610 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
10611 condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC);
10612 dc->condexec_mask = (condexec & 0xf) << 1;
10613 dc->condexec_cond = condexec >> 4;
10615 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
10616 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
10617 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
10618 #if !defined(CONFIG_USER_ONLY)
10619 dc->user = (dc->current_el == 0);
10620 #endif
10621 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
10623 if (arm_feature(env, ARM_FEATURE_M)) {
10624 dc->vfp_enabled = 1;
10625 dc->be_data = MO_TE;
10626 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER);
10627 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
10628 regime_is_secure(env, dc->mmu_idx);
10629 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK);
10630 dc->v8m_fpccr_s_wrong =
10631 FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG);
10632 dc->v7m_new_fp_ctxt_needed =
10633 FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED);
10634 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT);
10635 } else {
10636 dc->be_data =
10637 FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
10638 dc->debug_target_el =
10639 FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
10640 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
10641 dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
10642 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
10643 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
10644 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
10645 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
10646 } else {
10647 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
10648 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
10651 dc->cp_regs = cpu->cp_regs;
10652 dc->features = env->features;
10654 /* Single step state. The code-generation logic here is:
10655 * SS_ACTIVE == 0:
10656 * generate code with no special handling for single-stepping (except
10657 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
10658 * this happens anyway because those changes are all system register or
10659 * PSTATE writes).
10660 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
10661 * emit code for one insn
10662 * emit code to clear PSTATE.SS
10663 * emit code to generate software step exception for completed step
10664 * end TB (as usual for having generated an exception)
10665 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
10666 * emit code to generate a software step exception
10667 * end the TB
10669 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
10670 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
10671 dc->is_ldex = false;
10673 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
10675 /* If architectural single step active, limit to 1. */
10676 if (is_singlestepping(dc)) {
10677 dc->base.max_insns = 1;
10680 /* ARM is a fixed-length ISA. Bound the number of insns to execute
10681 to those left on the page. */
10682 if (!dc->thumb) {
10683 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
10684 dc->base.max_insns = MIN(dc->base.max_insns, bound);
10687 cpu_V0 = tcg_temp_new_i64();
10688 cpu_V1 = tcg_temp_new_i64();
10689 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
10690 cpu_M0 = tcg_temp_new_i64();
10693 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
10695 DisasContext *dc = container_of(dcbase, DisasContext, base);
10697 /* A note on handling of the condexec (IT) bits:
10699 * We want to avoid the overhead of having to write the updated condexec
10700 * bits back to the CPUARMState for every instruction in an IT block. So:
10701 * (1) if the condexec bits are not already zero then we write
10702 * zero back into the CPUARMState now. This avoids complications trying
10703 * to do it at the end of the block. (For example if we don't do this
10704 * it's hard to identify whether we can safely skip writing condexec
10705 * at the end of the TB, which we definitely want to do for the case
10706 * where a TB doesn't do anything with the IT state at all.)
10707 * (2) if we are going to leave the TB then we call gen_set_condexec()
10708 * which will write the correct value into CPUARMState if zero is wrong.
10709 * This is done both for leaving the TB at the end, and for leaving
10710 * it because of an exception we know will happen, which is done in
10711 * gen_exception_insn(). The latter is necessary because we need to
10712 * leave the TB with the PC/IT state just prior to execution of the
10713 * instruction which caused the exception.
10714 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
10715 * then the CPUARMState will be wrong and we need to reset it.
10716 * This is handled in the same way as restoration of the
10717 * PC in these situations; we save the value of the condexec bits
10718 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
10719 * then uses this to restore them after an exception.
10721 * Note that there are no instructions which can read the condexec
10722 * bits, and none which can write non-static values to them, so
10723 * we don't need to care about whether CPUARMState is correct in the
10724 * middle of a TB.
10727 /* Reset the conditional execution bits immediately. This avoids
10728 complications trying to do it at the end of the block. */
10729 if (dc->condexec_mask || dc->condexec_cond) {
10730 TCGv_i32 tmp = tcg_temp_new_i32();
10731 tcg_gen_movi_i32(tmp, 0);
10732 store_cpu_field(tmp, condexec_bits);
10736 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
10738 DisasContext *dc = container_of(dcbase, DisasContext, base);
10740 tcg_gen_insn_start(dc->base.pc_next,
10741 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
10743 dc->insn_start = tcg_last_op();
10746 static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
10747 const CPUBreakpoint *bp)
10749 DisasContext *dc = container_of(dcbase, DisasContext, base);
10751 if (bp->flags & BP_CPU) {
10752 gen_set_condexec(dc);
10753 gen_set_pc_im(dc, dc->base.pc_next);
10754 gen_helper_check_breakpoints(cpu_env);
10755 /* End the TB early; it's likely not going to be executed */
10756 dc->base.is_jmp = DISAS_TOO_MANY;
10757 } else {
10758 gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
10759 /* The address covered by the breakpoint must be
10760 included in [tb->pc, tb->pc + tb->size) in order
10761 to for it to be properly cleared -- thus we
10762 increment the PC here so that the logic setting
10763 tb->size below does the right thing. */
10764 /* TODO: Advance PC by correct instruction length to
10765 * avoid disassembler error messages */
10766 dc->base.pc_next += 2;
10767 dc->base.is_jmp = DISAS_NORETURN;
10770 return true;
10773 static bool arm_pre_translate_insn(DisasContext *dc)
10775 #ifdef CONFIG_USER_ONLY
10776 /* Intercept jump to the magic kernel page. */
10777 if (dc->base.pc_next >= 0xffff0000) {
10778 /* We always get here via a jump, so know we are not in a
10779 conditional execution block. */
10780 gen_exception_internal(EXCP_KERNEL_TRAP);
10781 dc->base.is_jmp = DISAS_NORETURN;
10782 return true;
10784 #endif
10786 if (dc->ss_active && !dc->pstate_ss) {
10787 /* Singlestep state is Active-pending.
10788 * If we're in this state at the start of a TB then either
10789 * a) we just took an exception to an EL which is being debugged
10790 * and this is the first insn in the exception handler
10791 * b) debug exceptions were masked and we just unmasked them
10792 * without changing EL (eg by clearing PSTATE.D)
10793 * In either case we're going to take a swstep exception in the
10794 * "did not step an insn" case, and so the syndrome ISV and EX
10795 * bits should be zero.
10797 assert(dc->base.num_insns == 1);
10798 gen_swstep_exception(dc, 0, 0);
10799 dc->base.is_jmp = DISAS_NORETURN;
10800 return true;
10803 return false;
10806 static void arm_post_translate_insn(DisasContext *dc)
10808 if (dc->condjmp && !dc->base.is_jmp) {
10809 gen_set_label(dc->condlabel);
10810 dc->condjmp = 0;
10812 translator_loop_temp_check(&dc->base);
10815 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
10817 DisasContext *dc = container_of(dcbase, DisasContext, base);
10818 CPUARMState *env = cpu->env_ptr;
10819 unsigned int insn;
10821 if (arm_pre_translate_insn(dc)) {
10822 return;
10825 dc->pc_curr = dc->base.pc_next;
10826 insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
10827 dc->insn = insn;
10828 dc->base.pc_next += 4;
10829 disas_arm_insn(dc, insn);
10831 arm_post_translate_insn(dc);
10833 /* ARM is a fixed-length ISA. We performed the cross-page check
10834 in init_disas_context by adjusting max_insns. */
10837 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
10839 /* Return true if this Thumb insn is always unconditional,
10840 * even inside an IT block. This is true of only a very few
10841 * instructions: BKPT, HLT, and SG.
10843 * A larger class of instructions are UNPREDICTABLE if used
10844 * inside an IT block; we do not need to detect those here, because
10845 * what we do by default (perform the cc check and update the IT
10846 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
10847 * choice for those situations.
10849 * insn is either a 16-bit or a 32-bit instruction; the two are
10850 * distinguishable because for the 16-bit case the top 16 bits
10851 * are zeroes, and that isn't a valid 32-bit encoding.
10853 if ((insn & 0xffffff00) == 0xbe00) {
10854 /* BKPT */
10855 return true;
10858 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
10859 !arm_dc_feature(s, ARM_FEATURE_M)) {
10860 /* HLT: v8A only. This is unconditional even when it is going to
10861 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
10862 * For v7 cores this was a plain old undefined encoding and so
10863 * honours its cc check. (We might be using the encoding as
10864 * a semihosting trap, but we don't change the cc check behaviour
10865 * on that account, because a debugger connected to a real v7A
10866 * core and emulating semihosting traps by catching the UNDEF
10867 * exception would also only see cases where the cc check passed.
10868 * No guest code should be trying to do a HLT semihosting trap
10869 * in an IT block anyway.
10871 return true;
10874 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
10875 arm_dc_feature(s, ARM_FEATURE_M)) {
10876 /* SG: v8M only */
10877 return true;
10880 return false;
10883 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
10885 DisasContext *dc = container_of(dcbase, DisasContext, base);
10886 CPUARMState *env = cpu->env_ptr;
10887 uint32_t insn;
10888 bool is_16bit;
10890 if (arm_pre_translate_insn(dc)) {
10891 return;
10894 dc->pc_curr = dc->base.pc_next;
10895 insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
10896 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
10897 dc->base.pc_next += 2;
10898 if (!is_16bit) {
10899 uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
10901 insn = insn << 16 | insn2;
10902 dc->base.pc_next += 2;
10904 dc->insn = insn;
10906 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
10907 uint32_t cond = dc->condexec_cond;
10910 * Conditionally skip the insn. Note that both 0xe and 0xf mean
10911 * "always"; 0xf is not "never".
10913 if (cond < 0x0e) {
10914 arm_skip_unless(dc, cond);
10918 if (is_16bit) {
10919 disas_thumb_insn(dc, insn);
10920 } else {
10921 disas_thumb2_insn(dc, insn);
10924 /* Advance the Thumb condexec condition. */
10925 if (dc->condexec_mask) {
10926 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
10927 ((dc->condexec_mask >> 4) & 1));
10928 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10929 if (dc->condexec_mask == 0) {
10930 dc->condexec_cond = 0;
10934 arm_post_translate_insn(dc);
10936 /* Thumb is a variable-length ISA. Stop translation when the next insn
10937 * will touch a new page. This ensures that prefetch aborts occur at
10938 * the right place.
10940 * We want to stop the TB if the next insn starts in a new page,
10941 * or if it spans between this page and the next. This means that
10942 * if we're looking at the last halfword in the page we need to
10943 * see if it's a 16-bit Thumb insn (which will fit in this TB)
10944 * or a 32-bit Thumb insn (which won't).
10945 * This is to avoid generating a silly TB with a single 16-bit insn
10946 * in it at the end of this page (which would execute correctly
10947 * but isn't very efficient).
10949 if (dc->base.is_jmp == DISAS_NEXT
10950 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
10951 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
10952 && insn_crosses_page(env, dc)))) {
10953 dc->base.is_jmp = DISAS_TOO_MANY;
10957 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
10959 DisasContext *dc = container_of(dcbase, DisasContext, base);
10961 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
10962 /* FIXME: This can theoretically happen with self-modifying code. */
10963 cpu_abort(cpu, "IO on conditional branch instruction");
10966 /* At this stage dc->condjmp will only be set when the skipped
10967 instruction was a conditional branch or trap, and the PC has
10968 already been written. */
10969 gen_set_condexec(dc);
10970 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
10971 /* Exception return branches need some special case code at the
10972 * end of the TB, which is complex enough that it has to
10973 * handle the single-step vs not and the condition-failed
10974 * insn codepath itself.
10976 gen_bx_excret_final_code(dc);
10977 } else if (unlikely(is_singlestepping(dc))) {
10978 /* Unconditional and "condition passed" instruction codepath. */
10979 switch (dc->base.is_jmp) {
10980 case DISAS_SWI:
10981 gen_ss_advance(dc);
10982 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
10983 default_exception_el(dc));
10984 break;
10985 case DISAS_HVC:
10986 gen_ss_advance(dc);
10987 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
10988 break;
10989 case DISAS_SMC:
10990 gen_ss_advance(dc);
10991 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
10992 break;
10993 case DISAS_NEXT:
10994 case DISAS_TOO_MANY:
10995 case DISAS_UPDATE:
10996 gen_set_pc_im(dc, dc->base.pc_next);
10997 /* fall through */
10998 default:
10999 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
11000 gen_singlestep_exception(dc);
11001 break;
11002 case DISAS_NORETURN:
11003 break;
11005 } else {
11006 /* While branches must always occur at the end of an IT block,
11007 there are a few other things that can cause us to terminate
11008 the TB in the middle of an IT block:
11009 - Exception generating instructions (bkpt, swi, undefined).
11010 - Page boundaries.
11011 - Hardware watchpoints.
11012 Hardware breakpoints have already been handled and skip this code.
11014 switch(dc->base.is_jmp) {
11015 case DISAS_NEXT:
11016 case DISAS_TOO_MANY:
11017 gen_goto_tb(dc, 1, dc->base.pc_next);
11018 break;
11019 case DISAS_JUMP:
11020 gen_goto_ptr();
11021 break;
11022 case DISAS_UPDATE:
11023 gen_set_pc_im(dc, dc->base.pc_next);
11024 /* fall through */
11025 default:
11026 /* indicate that the hash table must be used to find the next TB */
11027 tcg_gen_exit_tb(NULL, 0);
11028 break;
11029 case DISAS_NORETURN:
11030 /* nothing more to generate */
11031 break;
11032 case DISAS_WFI:
11034 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
11035 !(dc->insn & (1U << 31))) ? 2 : 4);
11037 gen_helper_wfi(cpu_env, tmp);
11038 tcg_temp_free_i32(tmp);
11039 /* The helper doesn't necessarily throw an exception, but we
11040 * must go back to the main loop to check for interrupts anyway.
11042 tcg_gen_exit_tb(NULL, 0);
11043 break;
11045 case DISAS_WFE:
11046 gen_helper_wfe(cpu_env);
11047 break;
11048 case DISAS_YIELD:
11049 gen_helper_yield(cpu_env);
11050 break;
11051 case DISAS_SWI:
11052 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11053 default_exception_el(dc));
11054 break;
11055 case DISAS_HVC:
11056 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11057 break;
11058 case DISAS_SMC:
11059 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11060 break;
11064 if (dc->condjmp) {
11065 /* "Condition failed" instruction codepath for the branch/trap insn */
11066 gen_set_label(dc->condlabel);
11067 gen_set_condexec(dc);
11068 if (unlikely(is_singlestepping(dc))) {
11069 gen_set_pc_im(dc, dc->base.pc_next);
11070 gen_singlestep_exception(dc);
11071 } else {
11072 gen_goto_tb(dc, 1, dc->base.pc_next);
11077 static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
11079 DisasContext *dc = container_of(dcbase, DisasContext, base);
11081 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
11082 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
11085 static const TranslatorOps arm_translator_ops = {
11086 .init_disas_context = arm_tr_init_disas_context,
11087 .tb_start = arm_tr_tb_start,
11088 .insn_start = arm_tr_insn_start,
11089 .breakpoint_check = arm_tr_breakpoint_check,
11090 .translate_insn = arm_tr_translate_insn,
11091 .tb_stop = arm_tr_tb_stop,
11092 .disas_log = arm_tr_disas_log,
11095 static const TranslatorOps thumb_translator_ops = {
11096 .init_disas_context = arm_tr_init_disas_context,
11097 .tb_start = arm_tr_tb_start,
11098 .insn_start = arm_tr_insn_start,
11099 .breakpoint_check = arm_tr_breakpoint_check,
11100 .translate_insn = thumb_tr_translate_insn,
11101 .tb_stop = arm_tr_tb_stop,
11102 .disas_log = arm_tr_disas_log,
11105 /* generate intermediate code for basic block 'tb'. */
11106 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
11108 DisasContext dc = { };
11109 const TranslatorOps *ops = &arm_translator_ops;
11111 if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) {
11112 ops = &thumb_translator_ops;
11114 #ifdef TARGET_AARCH64
11115 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
11116 ops = &aarch64_translator_ops;
11118 #endif
11120 translator_loop(ops, &dc.base, cpu, tb, max_insns);
11123 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
11124 target_ulong *data)
11126 if (is_a64(env)) {
11127 env->pc = data[0];
11128 env->condexec_bits = 0;
11129 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
11130 } else {
11131 env->regs[15] = data[0];
11132 env->condexec_bits = data[1];
11133 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;