target/arm: Use ror32 instead of open-coding the operation
[qemu/ar7.git] / target / arm / translate.c
blob02ce8d44fa12234afc5809e010942398f77356ef
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "tcg-op-gvec.h"
29 #include "qemu/log.h"
30 #include "qemu/bitops.h"
31 #include "arm_ldst.h"
32 #include "hw/semihosting/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
38 #include "exec/log.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
57 #define IS_USER(s) 1
58 #else
59 #define IS_USER(s) (s->user)
60 #endif
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
69 #include "exec/gen-icount.h"
71 static const char * const regnames[] =
72 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
75 /* Function prototypes for gen_ functions calling Neon helpers. */
76 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
77 TCGv_i32, TCGv_i32);
78 /* Function prototypes for gen_ functions for fix point conversions */
79 typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
81 /* initialize TCG globals. */
82 void arm_translate_init(void)
84 int i;
86 for (i = 0; i < 16; i++) {
87 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
88 offsetof(CPUARMState, regs[i]),
89 regnames[i]);
91 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
92 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
93 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
94 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
96 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
97 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
98 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_val), "exclusive_val");
101 a64_translate_init();
104 /* Flags for the disas_set_da_iss info argument:
105 * lower bits hold the Rt register number, higher bits are flags.
107 typedef enum ISSInfo {
108 ISSNone = 0,
109 ISSRegMask = 0x1f,
110 ISSInvalid = (1 << 5),
111 ISSIsAcqRel = (1 << 6),
112 ISSIsWrite = (1 << 7),
113 ISSIs16Bit = (1 << 8),
114 } ISSInfo;
116 /* Save the syndrome information for a Data Abort */
117 static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
119 uint32_t syn;
120 int sas = memop & MO_SIZE;
121 bool sse = memop & MO_SIGN;
122 bool is_acqrel = issinfo & ISSIsAcqRel;
123 bool is_write = issinfo & ISSIsWrite;
124 bool is_16bit = issinfo & ISSIs16Bit;
125 int srt = issinfo & ISSRegMask;
127 if (issinfo & ISSInvalid) {
128 /* Some callsites want to conditionally provide ISS info,
129 * eg "only if this was not a writeback"
131 return;
134 if (srt == 15) {
135 /* For AArch32, insns where the src/dest is R15 never generate
136 * ISS information. Catching that here saves checking at all
137 * the call sites.
139 return;
142 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
143 0, 0, 0, is_write, 0, is_16bit);
144 disas_set_insn_syndrome(s, syn);
147 static inline int get_a32_user_mem_index(DisasContext *s)
149 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
150 * insns:
151 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
152 * otherwise, access as if at PL0.
154 switch (s->mmu_idx) {
155 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
156 case ARMMMUIdx_S12NSE0:
157 case ARMMMUIdx_S12NSE1:
158 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
159 case ARMMMUIdx_S1E3:
160 case ARMMMUIdx_S1SE0:
161 case ARMMMUIdx_S1SE1:
162 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
163 case ARMMMUIdx_MUser:
164 case ARMMMUIdx_MPriv:
165 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
166 case ARMMMUIdx_MUserNegPri:
167 case ARMMMUIdx_MPrivNegPri:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
169 case ARMMMUIdx_MSUser:
170 case ARMMMUIdx_MSPriv:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
172 case ARMMMUIdx_MSUserNegPri:
173 case ARMMMUIdx_MSPrivNegPri:
174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
175 case ARMMMUIdx_S2NS:
176 default:
177 g_assert_not_reached();
181 static inline TCGv_i32 load_cpu_offset(int offset)
183 TCGv_i32 tmp = tcg_temp_new_i32();
184 tcg_gen_ld_i32(tmp, cpu_env, offset);
185 return tmp;
188 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
190 static inline void store_cpu_offset(TCGv_i32 var, int offset)
192 tcg_gen_st_i32(var, cpu_env, offset);
193 tcg_temp_free_i32(var);
196 #define store_cpu_field(var, name) \
197 store_cpu_offset(var, offsetof(CPUARMState, name))
199 /* The architectural value of PC. */
200 static uint32_t read_pc(DisasContext *s)
202 return s->pc_curr + (s->thumb ? 4 : 8);
205 /* Set a variable to the value of a CPU register. */
206 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
208 if (reg == 15) {
209 tcg_gen_movi_i32(var, read_pc(s));
210 } else {
211 tcg_gen_mov_i32(var, cpu_R[reg]);
215 /* Create a new temporary and set it to the value of a CPU register. */
216 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
218 TCGv_i32 tmp = tcg_temp_new_i32();
219 load_reg_var(s, tmp, reg);
220 return tmp;
224 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
225 * This is used for load/store for which use of PC implies (literal),
226 * or ADD that implies ADR.
228 static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
230 TCGv_i32 tmp = tcg_temp_new_i32();
232 if (reg == 15) {
233 tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
234 } else {
235 tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
237 return tmp;
240 /* Set a CPU register. The source must be a temporary and will be
241 marked as dead. */
242 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
244 if (reg == 15) {
245 /* In Thumb mode, we must ignore bit 0.
246 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
247 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
248 * We choose to ignore [1:0] in ARM mode for all architecture versions.
250 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
251 s->base.is_jmp = DISAS_JUMP;
253 tcg_gen_mov_i32(cpu_R[reg], var);
254 tcg_temp_free_i32(var);
258 * Variant of store_reg which applies v8M stack-limit checks before updating
259 * SP. If the check fails this will result in an exception being taken.
260 * We disable the stack checks for CONFIG_USER_ONLY because we have
261 * no idea what the stack limits should be in that case.
262 * If stack checking is not being done this just acts like store_reg().
264 static void store_sp_checked(DisasContext *s, TCGv_i32 var)
266 #ifndef CONFIG_USER_ONLY
267 if (s->v8m_stackcheck) {
268 gen_helper_v8m_stackcheck(cpu_env, var);
270 #endif
271 store_reg(s, 13, var);
274 /* Value extensions. */
275 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
276 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
277 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
278 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
280 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
281 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
284 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
286 TCGv_i32 tmp_mask = tcg_const_i32(mask);
287 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
288 tcg_temp_free_i32(tmp_mask);
290 /* Set NZCV flags from the high 4 bits of var. */
291 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
293 static void gen_exception_internal(int excp)
295 TCGv_i32 tcg_excp = tcg_const_i32(excp);
297 assert(excp_is_internal(excp));
298 gen_helper_exception_internal(cpu_env, tcg_excp);
299 tcg_temp_free_i32(tcg_excp);
302 static void gen_step_complete_exception(DisasContext *s)
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
313 gen_ss_advance(s);
314 gen_swstep_exception(s, 1, s->is_ldex);
315 s->base.is_jmp = DISAS_NORETURN;
318 static void gen_singlestep_exception(DisasContext *s)
320 /* Generate the right kind of exception for singlestep, which is
321 * either the architectural singlestep or EXCP_DEBUG for QEMU's
322 * gdb singlestepping.
324 if (s->ss_active) {
325 gen_step_complete_exception(s);
326 } else {
327 gen_exception_internal(EXCP_DEBUG);
331 static inline bool is_singlestepping(DisasContext *s)
333 /* Return true if we are singlestepping either because of
334 * architectural singlestep or QEMU gdbstub singlestep. This does
335 * not include the command line '-singlestep' mode which is rather
336 * misnamed as it only means "one instruction per TB" and doesn't
337 * affect the code we generate.
339 return s->base.singlestep_enabled || s->ss_active;
342 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
344 TCGv_i32 tmp1 = tcg_temp_new_i32();
345 TCGv_i32 tmp2 = tcg_temp_new_i32();
346 tcg_gen_ext16s_i32(tmp1, a);
347 tcg_gen_ext16s_i32(tmp2, b);
348 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
349 tcg_temp_free_i32(tmp2);
350 tcg_gen_sari_i32(a, a, 16);
351 tcg_gen_sari_i32(b, b, 16);
352 tcg_gen_mul_i32(b, b, a);
353 tcg_gen_mov_i32(a, tmp1);
354 tcg_temp_free_i32(tmp1);
357 /* Byteswap each halfword. */
358 static void gen_rev16(TCGv_i32 var)
360 TCGv_i32 tmp = tcg_temp_new_i32();
361 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
362 tcg_gen_shri_i32(tmp, var, 8);
363 tcg_gen_and_i32(tmp, tmp, mask);
364 tcg_gen_and_i32(var, var, mask);
365 tcg_gen_shli_i32(var, var, 8);
366 tcg_gen_or_i32(var, var, tmp);
367 tcg_temp_free_i32(mask);
368 tcg_temp_free_i32(tmp);
371 /* Byteswap low halfword and sign extend. */
372 static void gen_revsh(TCGv_i32 var)
374 tcg_gen_ext16u_i32(var, var);
375 tcg_gen_bswap16_i32(var, var);
376 tcg_gen_ext16s_i32(var, var);
379 /* Return (b << 32) + a. Mark inputs as dead */
380 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
382 TCGv_i64 tmp64 = tcg_temp_new_i64();
384 tcg_gen_extu_i32_i64(tmp64, b);
385 tcg_temp_free_i32(b);
386 tcg_gen_shli_i64(tmp64, tmp64, 32);
387 tcg_gen_add_i64(a, tmp64, a);
389 tcg_temp_free_i64(tmp64);
390 return a;
393 /* Return (b << 32) - a. Mark inputs as dead. */
394 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
396 TCGv_i64 tmp64 = tcg_temp_new_i64();
398 tcg_gen_extu_i32_i64(tmp64, b);
399 tcg_temp_free_i32(b);
400 tcg_gen_shli_i64(tmp64, tmp64, 32);
401 tcg_gen_sub_i64(a, tmp64, a);
403 tcg_temp_free_i64(tmp64);
404 return a;
407 /* 32x32->64 multiply. Marks inputs as dead. */
408 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
410 TCGv_i32 lo = tcg_temp_new_i32();
411 TCGv_i32 hi = tcg_temp_new_i32();
412 TCGv_i64 ret;
414 tcg_gen_mulu2_i32(lo, hi, a, b);
415 tcg_temp_free_i32(a);
416 tcg_temp_free_i32(b);
418 ret = tcg_temp_new_i64();
419 tcg_gen_concat_i32_i64(ret, lo, hi);
420 tcg_temp_free_i32(lo);
421 tcg_temp_free_i32(hi);
423 return ret;
426 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
428 TCGv_i32 lo = tcg_temp_new_i32();
429 TCGv_i32 hi = tcg_temp_new_i32();
430 TCGv_i64 ret;
432 tcg_gen_muls2_i32(lo, hi, a, b);
433 tcg_temp_free_i32(a);
434 tcg_temp_free_i32(b);
436 ret = tcg_temp_new_i64();
437 tcg_gen_concat_i32_i64(ret, lo, hi);
438 tcg_temp_free_i32(lo);
439 tcg_temp_free_i32(hi);
441 return ret;
444 /* Swap low and high halfwords. */
445 static void gen_swap_half(TCGv_i32 var)
447 TCGv_i32 tmp = tcg_temp_new_i32();
448 tcg_gen_shri_i32(tmp, var, 16);
449 tcg_gen_shli_i32(var, var, 16);
450 tcg_gen_or_i32(var, var, tmp);
451 tcg_temp_free_i32(tmp);
454 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
455 tmp = (t0 ^ t1) & 0x8000;
456 t0 &= ~0x8000;
457 t1 &= ~0x8000;
458 t0 = (t0 + t1) ^ tmp;
461 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
463 TCGv_i32 tmp = tcg_temp_new_i32();
464 tcg_gen_xor_i32(tmp, t0, t1);
465 tcg_gen_andi_i32(tmp, tmp, 0x8000);
466 tcg_gen_andi_i32(t0, t0, ~0x8000);
467 tcg_gen_andi_i32(t1, t1, ~0x8000);
468 tcg_gen_add_i32(t0, t0, t1);
469 tcg_gen_xor_i32(t0, t0, tmp);
470 tcg_temp_free_i32(tmp);
471 tcg_temp_free_i32(t1);
474 /* Set CF to the top bit of var. */
475 static void gen_set_CF_bit31(TCGv_i32 var)
477 tcg_gen_shri_i32(cpu_CF, var, 31);
480 /* Set N and Z flags from var. */
481 static inline void gen_logic_CC(TCGv_i32 var)
483 tcg_gen_mov_i32(cpu_NF, var);
484 tcg_gen_mov_i32(cpu_ZF, var);
487 /* T0 += T1 + CF. */
488 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
490 tcg_gen_add_i32(t0, t0, t1);
491 tcg_gen_add_i32(t0, t0, cpu_CF);
494 /* dest = T0 + T1 + CF. */
495 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
497 tcg_gen_add_i32(dest, t0, t1);
498 tcg_gen_add_i32(dest, dest, cpu_CF);
501 /* dest = T0 - T1 + CF - 1. */
502 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
504 tcg_gen_sub_i32(dest, t0, t1);
505 tcg_gen_add_i32(dest, dest, cpu_CF);
506 tcg_gen_subi_i32(dest, dest, 1);
509 /* dest = T0 + T1. Compute C, N, V and Z flags */
510 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
512 TCGv_i32 tmp = tcg_temp_new_i32();
513 tcg_gen_movi_i32(tmp, 0);
514 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
515 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
516 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
517 tcg_gen_xor_i32(tmp, t0, t1);
518 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
519 tcg_temp_free_i32(tmp);
520 tcg_gen_mov_i32(dest, cpu_NF);
523 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
524 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
526 TCGv_i32 tmp = tcg_temp_new_i32();
527 if (TCG_TARGET_HAS_add2_i32) {
528 tcg_gen_movi_i32(tmp, 0);
529 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
530 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
531 } else {
532 TCGv_i64 q0 = tcg_temp_new_i64();
533 TCGv_i64 q1 = tcg_temp_new_i64();
534 tcg_gen_extu_i32_i64(q0, t0);
535 tcg_gen_extu_i32_i64(q1, t1);
536 tcg_gen_add_i64(q0, q0, q1);
537 tcg_gen_extu_i32_i64(q1, cpu_CF);
538 tcg_gen_add_i64(q0, q0, q1);
539 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
540 tcg_temp_free_i64(q0);
541 tcg_temp_free_i64(q1);
543 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
544 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
545 tcg_gen_xor_i32(tmp, t0, t1);
546 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
547 tcg_temp_free_i32(tmp);
548 tcg_gen_mov_i32(dest, cpu_NF);
551 /* dest = T0 - T1. Compute C, N, V and Z flags */
552 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
554 TCGv_i32 tmp;
555 tcg_gen_sub_i32(cpu_NF, t0, t1);
556 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
557 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
558 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
559 tmp = tcg_temp_new_i32();
560 tcg_gen_xor_i32(tmp, t0, t1);
561 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
562 tcg_temp_free_i32(tmp);
563 tcg_gen_mov_i32(dest, cpu_NF);
566 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
567 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
569 TCGv_i32 tmp = tcg_temp_new_i32();
570 tcg_gen_not_i32(tmp, t1);
571 gen_adc_CC(dest, t0, tmp);
572 tcg_temp_free_i32(tmp);
575 #define GEN_SHIFT(name) \
576 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
578 TCGv_i32 tmp1, tmp2, tmp3; \
579 tmp1 = tcg_temp_new_i32(); \
580 tcg_gen_andi_i32(tmp1, t1, 0xff); \
581 tmp2 = tcg_const_i32(0); \
582 tmp3 = tcg_const_i32(0x1f); \
583 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
584 tcg_temp_free_i32(tmp3); \
585 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
586 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
587 tcg_temp_free_i32(tmp2); \
588 tcg_temp_free_i32(tmp1); \
590 GEN_SHIFT(shl)
591 GEN_SHIFT(shr)
592 #undef GEN_SHIFT
594 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
596 TCGv_i32 tmp1, tmp2;
597 tmp1 = tcg_temp_new_i32();
598 tcg_gen_andi_i32(tmp1, t1, 0xff);
599 tmp2 = tcg_const_i32(0x1f);
600 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
601 tcg_temp_free_i32(tmp2);
602 tcg_gen_sar_i32(dest, t0, tmp1);
603 tcg_temp_free_i32(tmp1);
606 static void shifter_out_im(TCGv_i32 var, int shift)
608 tcg_gen_extract_i32(cpu_CF, var, shift, 1);
611 /* Shift by immediate. Includes special handling for shift == 0. */
612 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
613 int shift, int flags)
615 switch (shiftop) {
616 case 0: /* LSL */
617 if (shift != 0) {
618 if (flags)
619 shifter_out_im(var, 32 - shift);
620 tcg_gen_shli_i32(var, var, shift);
622 break;
623 case 1: /* LSR */
624 if (shift == 0) {
625 if (flags) {
626 tcg_gen_shri_i32(cpu_CF, var, 31);
628 tcg_gen_movi_i32(var, 0);
629 } else {
630 if (flags)
631 shifter_out_im(var, shift - 1);
632 tcg_gen_shri_i32(var, var, shift);
634 break;
635 case 2: /* ASR */
636 if (shift == 0)
637 shift = 32;
638 if (flags)
639 shifter_out_im(var, shift - 1);
640 if (shift == 32)
641 shift = 31;
642 tcg_gen_sari_i32(var, var, shift);
643 break;
644 case 3: /* ROR/RRX */
645 if (shift != 0) {
646 if (flags)
647 shifter_out_im(var, shift - 1);
648 tcg_gen_rotri_i32(var, var, shift); break;
649 } else {
650 TCGv_i32 tmp = tcg_temp_new_i32();
651 tcg_gen_shli_i32(tmp, cpu_CF, 31);
652 if (flags)
653 shifter_out_im(var, 0);
654 tcg_gen_shri_i32(var, var, 1);
655 tcg_gen_or_i32(var, var, tmp);
656 tcg_temp_free_i32(tmp);
661 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
662 TCGv_i32 shift, int flags)
664 if (flags) {
665 switch (shiftop) {
666 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
667 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
668 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
669 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
671 } else {
672 switch (shiftop) {
673 case 0:
674 gen_shl(var, var, shift);
675 break;
676 case 1:
677 gen_shr(var, var, shift);
678 break;
679 case 2:
680 gen_sar(var, var, shift);
681 break;
682 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
683 tcg_gen_rotr_i32(var, var, shift); break;
686 tcg_temp_free_i32(shift);
689 #define PAS_OP(pfx) \
690 switch (op2) { \
691 case 0: gen_pas_helper(glue(pfx,add16)); break; \
692 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
693 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
694 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
695 case 4: gen_pas_helper(glue(pfx,add8)); break; \
696 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
698 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
700 TCGv_ptr tmp;
702 switch (op1) {
703 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
704 case 1:
705 tmp = tcg_temp_new_ptr();
706 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
707 PAS_OP(s)
708 tcg_temp_free_ptr(tmp);
709 break;
710 case 5:
711 tmp = tcg_temp_new_ptr();
712 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
713 PAS_OP(u)
714 tcg_temp_free_ptr(tmp);
715 break;
716 #undef gen_pas_helper
717 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
718 case 2:
719 PAS_OP(q);
720 break;
721 case 3:
722 PAS_OP(sh);
723 break;
724 case 6:
725 PAS_OP(uq);
726 break;
727 case 7:
728 PAS_OP(uh);
729 break;
730 #undef gen_pas_helper
733 #undef PAS_OP
735 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
736 #define PAS_OP(pfx) \
737 switch (op1) { \
738 case 0: gen_pas_helper(glue(pfx,add8)); break; \
739 case 1: gen_pas_helper(glue(pfx,add16)); break; \
740 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
741 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
742 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
743 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
745 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
747 TCGv_ptr tmp;
749 switch (op2) {
750 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
751 case 0:
752 tmp = tcg_temp_new_ptr();
753 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
754 PAS_OP(s)
755 tcg_temp_free_ptr(tmp);
756 break;
757 case 4:
758 tmp = tcg_temp_new_ptr();
759 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
760 PAS_OP(u)
761 tcg_temp_free_ptr(tmp);
762 break;
763 #undef gen_pas_helper
764 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
765 case 1:
766 PAS_OP(q);
767 break;
768 case 2:
769 PAS_OP(sh);
770 break;
771 case 5:
772 PAS_OP(uq);
773 break;
774 case 6:
775 PAS_OP(uh);
776 break;
777 #undef gen_pas_helper
780 #undef PAS_OP
783 * Generate a conditional based on ARM condition code cc.
784 * This is common between ARM and Aarch64 targets.
786 void arm_test_cc(DisasCompare *cmp, int cc)
788 TCGv_i32 value;
789 TCGCond cond;
790 bool global = true;
792 switch (cc) {
793 case 0: /* eq: Z */
794 case 1: /* ne: !Z */
795 cond = TCG_COND_EQ;
796 value = cpu_ZF;
797 break;
799 case 2: /* cs: C */
800 case 3: /* cc: !C */
801 cond = TCG_COND_NE;
802 value = cpu_CF;
803 break;
805 case 4: /* mi: N */
806 case 5: /* pl: !N */
807 cond = TCG_COND_LT;
808 value = cpu_NF;
809 break;
811 case 6: /* vs: V */
812 case 7: /* vc: !V */
813 cond = TCG_COND_LT;
814 value = cpu_VF;
815 break;
817 case 8: /* hi: C && !Z */
818 case 9: /* ls: !C || Z -> !(C && !Z) */
819 cond = TCG_COND_NE;
820 value = tcg_temp_new_i32();
821 global = false;
822 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
823 ZF is non-zero for !Z; so AND the two subexpressions. */
824 tcg_gen_neg_i32(value, cpu_CF);
825 tcg_gen_and_i32(value, value, cpu_ZF);
826 break;
828 case 10: /* ge: N == V -> N ^ V == 0 */
829 case 11: /* lt: N != V -> N ^ V != 0 */
830 /* Since we're only interested in the sign bit, == 0 is >= 0. */
831 cond = TCG_COND_GE;
832 value = tcg_temp_new_i32();
833 global = false;
834 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
835 break;
837 case 12: /* gt: !Z && N == V */
838 case 13: /* le: Z || N != V */
839 cond = TCG_COND_NE;
840 value = tcg_temp_new_i32();
841 global = false;
842 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
843 * the sign bit then AND with ZF to yield the result. */
844 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
845 tcg_gen_sari_i32(value, value, 31);
846 tcg_gen_andc_i32(value, cpu_ZF, value);
847 break;
849 case 14: /* always */
850 case 15: /* always */
851 /* Use the ALWAYS condition, which will fold early.
852 * It doesn't matter what we use for the value. */
853 cond = TCG_COND_ALWAYS;
854 value = cpu_ZF;
855 goto no_invert;
857 default:
858 fprintf(stderr, "Bad condition code 0x%x\n", cc);
859 abort();
862 if (cc & 1) {
863 cond = tcg_invert_cond(cond);
866 no_invert:
867 cmp->cond = cond;
868 cmp->value = value;
869 cmp->value_global = global;
872 void arm_free_cc(DisasCompare *cmp)
874 if (!cmp->value_global) {
875 tcg_temp_free_i32(cmp->value);
879 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
881 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
884 void arm_gen_test_cc(int cc, TCGLabel *label)
886 DisasCompare cmp;
887 arm_test_cc(&cmp, cc);
888 arm_jump_cc(&cmp, label);
889 arm_free_cc(&cmp);
892 static const uint8_t table_logic_cc[16] = {
893 1, /* and */
894 1, /* xor */
895 0, /* sub */
896 0, /* rsb */
897 0, /* add */
898 0, /* adc */
899 0, /* sbc */
900 0, /* rsc */
901 1, /* andl */
902 1, /* xorl */
903 0, /* cmp */
904 0, /* cmn */
905 1, /* orr */
906 1, /* mov */
907 1, /* bic */
908 1, /* mvn */
911 static inline void gen_set_condexec(DisasContext *s)
913 if (s->condexec_mask) {
914 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
915 TCGv_i32 tmp = tcg_temp_new_i32();
916 tcg_gen_movi_i32(tmp, val);
917 store_cpu_field(tmp, condexec_bits);
921 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
923 tcg_gen_movi_i32(cpu_R[15], val);
926 /* Set PC and Thumb state from an immediate address. */
927 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
929 TCGv_i32 tmp;
931 s->base.is_jmp = DISAS_JUMP;
932 if (s->thumb != (addr & 1)) {
933 tmp = tcg_temp_new_i32();
934 tcg_gen_movi_i32(tmp, addr & 1);
935 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
936 tcg_temp_free_i32(tmp);
938 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
941 /* Set PC and Thumb state from var. var is marked as dead. */
942 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
944 s->base.is_jmp = DISAS_JUMP;
945 tcg_gen_andi_i32(cpu_R[15], var, ~1);
946 tcg_gen_andi_i32(var, var, 1);
947 store_cpu_field(var, thumb);
950 /* Set PC and Thumb state from var. var is marked as dead.
951 * For M-profile CPUs, include logic to detect exception-return
952 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
953 * and BX reg, and no others, and happens only for code in Handler mode.
955 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
957 /* Generate the same code here as for a simple bx, but flag via
958 * s->base.is_jmp that we need to do the rest of the work later.
960 gen_bx(s, var);
961 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
962 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
963 s->base.is_jmp = DISAS_BX_EXCRET;
967 static inline void gen_bx_excret_final_code(DisasContext *s)
969 /* Generate the code to finish possible exception return and end the TB */
970 TCGLabel *excret_label = gen_new_label();
971 uint32_t min_magic;
973 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
974 /* Covers FNC_RETURN and EXC_RETURN magic */
975 min_magic = FNC_RETURN_MIN_MAGIC;
976 } else {
977 /* EXC_RETURN magic only */
978 min_magic = EXC_RETURN_MIN_MAGIC;
981 /* Is the new PC value in the magic range indicating exception return? */
982 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
983 /* No: end the TB as we would for a DISAS_JMP */
984 if (is_singlestepping(s)) {
985 gen_singlestep_exception(s);
986 } else {
987 tcg_gen_exit_tb(NULL, 0);
989 gen_set_label(excret_label);
990 /* Yes: this is an exception return.
991 * At this point in runtime env->regs[15] and env->thumb will hold
992 * the exception-return magic number, which do_v7m_exception_exit()
993 * will read. Nothing else will be able to see those values because
994 * the cpu-exec main loop guarantees that we will always go straight
995 * from raising the exception to the exception-handling code.
997 * gen_ss_advance(s) does nothing on M profile currently but
998 * calling it is conceptually the right thing as we have executed
999 * this instruction (compare SWI, HVC, SMC handling).
1001 gen_ss_advance(s);
1002 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1005 static inline void gen_bxns(DisasContext *s, int rm)
1007 TCGv_i32 var = load_reg(s, rm);
1009 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1010 * we need to sync state before calling it, but:
1011 * - we don't need to do gen_set_pc_im() because the bxns helper will
1012 * always set the PC itself
1013 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1014 * unless it's outside an IT block or the last insn in an IT block,
1015 * so we know that condexec == 0 (already set at the top of the TB)
1016 * is correct in the non-UNPREDICTABLE cases, and we can choose
1017 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1019 gen_helper_v7m_bxns(cpu_env, var);
1020 tcg_temp_free_i32(var);
1021 s->base.is_jmp = DISAS_EXIT;
1024 static inline void gen_blxns(DisasContext *s, int rm)
1026 TCGv_i32 var = load_reg(s, rm);
1028 /* We don't need to sync condexec state, for the same reason as bxns.
1029 * We do however need to set the PC, because the blxns helper reads it.
1030 * The blxns helper may throw an exception.
1032 gen_set_pc_im(s, s->base.pc_next);
1033 gen_helper_v7m_blxns(cpu_env, var);
1034 tcg_temp_free_i32(var);
1035 s->base.is_jmp = DISAS_EXIT;
1038 /* Variant of store_reg which uses branch&exchange logic when storing
1039 to r15 in ARM architecture v7 and above. The source must be a temporary
1040 and will be marked as dead. */
1041 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
1043 if (reg == 15 && ENABLE_ARCH_7) {
1044 gen_bx(s, var);
1045 } else {
1046 store_reg(s, reg, var);
1050 /* Variant of store_reg which uses branch&exchange logic when storing
1051 * to r15 in ARM architecture v5T and above. This is used for storing
1052 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1053 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1054 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
1056 if (reg == 15 && ENABLE_ARCH_5) {
1057 gen_bx_excret(s, var);
1058 } else {
1059 store_reg(s, reg, var);
1063 #ifdef CONFIG_USER_ONLY
1064 #define IS_USER_ONLY 1
1065 #else
1066 #define IS_USER_ONLY 0
1067 #endif
1069 /* Abstractions of "generate code to do a guest load/store for
1070 * AArch32", where a vaddr is always 32 bits (and is zero
1071 * extended if we're a 64 bit core) and data is also
1072 * 32 bits unless specifically doing a 64 bit access.
1073 * These functions work like tcg_gen_qemu_{ld,st}* except
1074 * that the address argument is TCGv_i32 rather than TCGv.
1077 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
1079 TCGv addr = tcg_temp_new();
1080 tcg_gen_extu_i32_tl(addr, a32);
1082 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1083 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1084 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
1086 return addr;
1089 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1090 int index, TCGMemOp opc)
1092 TCGv addr;
1094 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1095 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1096 opc |= MO_ALIGN;
1099 addr = gen_aa32_addr(s, a32, opc);
1100 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1101 tcg_temp_free(addr);
1104 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1105 int index, TCGMemOp opc)
1107 TCGv addr;
1109 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1110 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1111 opc |= MO_ALIGN;
1114 addr = gen_aa32_addr(s, a32, opc);
1115 tcg_gen_qemu_st_i32(val, addr, index, opc);
1116 tcg_temp_free(addr);
1119 #define DO_GEN_LD(SUFF, OPC) \
1120 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1121 TCGv_i32 a32, int index) \
1123 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1125 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1126 TCGv_i32 val, \
1127 TCGv_i32 a32, int index, \
1128 ISSInfo issinfo) \
1130 gen_aa32_ld##SUFF(s, val, a32, index); \
1131 disas_set_da_iss(s, OPC, issinfo); \
1134 #define DO_GEN_ST(SUFF, OPC) \
1135 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1136 TCGv_i32 a32, int index) \
1138 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1140 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1141 TCGv_i32 val, \
1142 TCGv_i32 a32, int index, \
1143 ISSInfo issinfo) \
1145 gen_aa32_st##SUFF(s, val, a32, index); \
1146 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1149 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
1151 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1152 if (!IS_USER_ONLY && s->sctlr_b) {
1153 tcg_gen_rotri_i64(val, val, 32);
1157 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1158 int index, TCGMemOp opc)
1160 TCGv addr = gen_aa32_addr(s, a32, opc);
1161 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1162 gen_aa32_frob64(s, val);
1163 tcg_temp_free(addr);
1166 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1167 TCGv_i32 a32, int index)
1169 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1172 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1173 int index, TCGMemOp opc)
1175 TCGv addr = gen_aa32_addr(s, a32, opc);
1177 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1178 if (!IS_USER_ONLY && s->sctlr_b) {
1179 TCGv_i64 tmp = tcg_temp_new_i64();
1180 tcg_gen_rotri_i64(tmp, val, 32);
1181 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1182 tcg_temp_free_i64(tmp);
1183 } else {
1184 tcg_gen_qemu_st_i64(val, addr, index, opc);
1186 tcg_temp_free(addr);
1189 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1190 TCGv_i32 a32, int index)
1192 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1195 DO_GEN_LD(8s, MO_SB)
1196 DO_GEN_LD(8u, MO_UB)
1197 DO_GEN_LD(16s, MO_SW)
1198 DO_GEN_LD(16u, MO_UW)
1199 DO_GEN_LD(32u, MO_UL)
1200 DO_GEN_ST(8, MO_UB)
1201 DO_GEN_ST(16, MO_UW)
1202 DO_GEN_ST(32, MO_UL)
1204 static inline void gen_hvc(DisasContext *s, int imm16)
1206 /* The pre HVC helper handles cases when HVC gets trapped
1207 * as an undefined insn by runtime configuration (ie before
1208 * the insn really executes).
1210 gen_set_pc_im(s, s->pc_curr);
1211 gen_helper_pre_hvc(cpu_env);
1212 /* Otherwise we will treat this as a real exception which
1213 * happens after execution of the insn. (The distinction matters
1214 * for the PC value reported to the exception handler and also
1215 * for single stepping.)
1217 s->svc_imm = imm16;
1218 gen_set_pc_im(s, s->base.pc_next);
1219 s->base.is_jmp = DISAS_HVC;
1222 static inline void gen_smc(DisasContext *s)
1224 /* As with HVC, we may take an exception either before or after
1225 * the insn executes.
1227 TCGv_i32 tmp;
1229 gen_set_pc_im(s, s->pc_curr);
1230 tmp = tcg_const_i32(syn_aa32_smc());
1231 gen_helper_pre_smc(cpu_env, tmp);
1232 tcg_temp_free_i32(tmp);
1233 gen_set_pc_im(s, s->base.pc_next);
1234 s->base.is_jmp = DISAS_SMC;
1237 static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
1239 gen_set_condexec(s);
1240 gen_set_pc_im(s, pc);
1241 gen_exception_internal(excp);
1242 s->base.is_jmp = DISAS_NORETURN;
1245 static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
1246 int syn, uint32_t target_el)
1248 gen_set_condexec(s);
1249 gen_set_pc_im(s, pc);
1250 gen_exception(excp, syn, target_el);
1251 s->base.is_jmp = DISAS_NORETURN;
1254 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
1256 TCGv_i32 tcg_syn;
1258 gen_set_condexec(s);
1259 gen_set_pc_im(s, s->pc_curr);
1260 tcg_syn = tcg_const_i32(syn);
1261 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1262 tcg_temp_free_i32(tcg_syn);
1263 s->base.is_jmp = DISAS_NORETURN;
1266 void unallocated_encoding(DisasContext *s)
1268 /* Unallocated and reserved encodings are uncategorized */
1269 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
1270 default_exception_el(s));
1273 /* Force a TB lookup after an instruction that changes the CPU state. */
1274 static inline void gen_lookup_tb(DisasContext *s)
1276 tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
1277 s->base.is_jmp = DISAS_EXIT;
1280 static inline void gen_hlt(DisasContext *s, int imm)
1282 /* HLT. This has two purposes.
1283 * Architecturally, it is an external halting debug instruction.
1284 * Since QEMU doesn't implement external debug, we treat this as
1285 * it is required for halting debug disabled: it will UNDEF.
1286 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1287 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1288 * must trigger semihosting even for ARMv7 and earlier, where
1289 * HLT was an undefined encoding.
1290 * In system mode, we don't allow userspace access to
1291 * semihosting, to provide some semblance of security
1292 * (and for consistency with our 32-bit semihosting).
1294 if (semihosting_enabled() &&
1295 #ifndef CONFIG_USER_ONLY
1296 s->current_el != 0 &&
1297 #endif
1298 (imm == (s->thumb ? 0x3c : 0xf000))) {
1299 gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
1300 return;
1303 unallocated_encoding(s);
1306 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1307 TCGv_i32 var)
1309 int val, rm, shift, shiftop;
1310 TCGv_i32 offset;
1312 if (!(insn & (1 << 25))) {
1313 /* immediate */
1314 val = insn & 0xfff;
1315 if (!(insn & (1 << 23)))
1316 val = -val;
1317 if (val != 0)
1318 tcg_gen_addi_i32(var, var, val);
1319 } else {
1320 /* shift/register */
1321 rm = (insn) & 0xf;
1322 shift = (insn >> 7) & 0x1f;
1323 shiftop = (insn >> 5) & 3;
1324 offset = load_reg(s, rm);
1325 gen_arm_shift_im(offset, shiftop, shift, 0);
1326 if (!(insn & (1 << 23)))
1327 tcg_gen_sub_i32(var, var, offset);
1328 else
1329 tcg_gen_add_i32(var, var, offset);
1330 tcg_temp_free_i32(offset);
1334 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1335 int extra, TCGv_i32 var)
1337 int val, rm;
1338 TCGv_i32 offset;
1340 if (insn & (1 << 22)) {
1341 /* immediate */
1342 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1343 if (!(insn & (1 << 23)))
1344 val = -val;
1345 val += extra;
1346 if (val != 0)
1347 tcg_gen_addi_i32(var, var, val);
1348 } else {
1349 /* register */
1350 if (extra)
1351 tcg_gen_addi_i32(var, var, extra);
1352 rm = (insn) & 0xf;
1353 offset = load_reg(s, rm);
1354 if (!(insn & (1 << 23)))
1355 tcg_gen_sub_i32(var, var, offset);
1356 else
1357 tcg_gen_add_i32(var, var, offset);
1358 tcg_temp_free_i32(offset);
1362 static TCGv_ptr get_fpstatus_ptr(int neon)
1364 TCGv_ptr statusptr = tcg_temp_new_ptr();
1365 int offset;
1366 if (neon) {
1367 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1368 } else {
1369 offset = offsetof(CPUARMState, vfp.fp_status);
1371 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1372 return statusptr;
1375 static inline long vfp_reg_offset(bool dp, unsigned reg)
1377 if (dp) {
1378 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1379 } else {
1380 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
1381 if (reg & 1) {
1382 ofs += offsetof(CPU_DoubleU, l.upper);
1383 } else {
1384 ofs += offsetof(CPU_DoubleU, l.lower);
1386 return ofs;
1390 /* Return the offset of a 32-bit piece of a NEON register.
1391 zero is the least significant end of the register. */
1392 static inline long
1393 neon_reg_offset (int reg, int n)
1395 int sreg;
1396 sreg = reg * 2 + n;
1397 return vfp_reg_offset(0, sreg);
1400 /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1401 * where 0 is the least significant end of the register.
1403 static inline long
1404 neon_element_offset(int reg, int element, TCGMemOp size)
1406 int element_size = 1 << size;
1407 int ofs = element * element_size;
1408 #ifdef HOST_WORDS_BIGENDIAN
1409 /* Calculate the offset assuming fully little-endian,
1410 * then XOR to account for the order of the 8-byte units.
1412 if (element_size < 8) {
1413 ofs ^= 8 - element_size;
1415 #endif
1416 return neon_reg_offset(reg, 0) + ofs;
1419 static TCGv_i32 neon_load_reg(int reg, int pass)
1421 TCGv_i32 tmp = tcg_temp_new_i32();
1422 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1423 return tmp;
1426 static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1428 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1430 switch (mop) {
1431 case MO_UB:
1432 tcg_gen_ld8u_i32(var, cpu_env, offset);
1433 break;
1434 case MO_UW:
1435 tcg_gen_ld16u_i32(var, cpu_env, offset);
1436 break;
1437 case MO_UL:
1438 tcg_gen_ld_i32(var, cpu_env, offset);
1439 break;
1440 default:
1441 g_assert_not_reached();
1445 static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1447 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1449 switch (mop) {
1450 case MO_UB:
1451 tcg_gen_ld8u_i64(var, cpu_env, offset);
1452 break;
1453 case MO_UW:
1454 tcg_gen_ld16u_i64(var, cpu_env, offset);
1455 break;
1456 case MO_UL:
1457 tcg_gen_ld32u_i64(var, cpu_env, offset);
1458 break;
1459 case MO_Q:
1460 tcg_gen_ld_i64(var, cpu_env, offset);
1461 break;
1462 default:
1463 g_assert_not_reached();
1467 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1469 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1470 tcg_temp_free_i32(var);
1473 static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1475 long offset = neon_element_offset(reg, ele, size);
1477 switch (size) {
1478 case MO_8:
1479 tcg_gen_st8_i32(var, cpu_env, offset);
1480 break;
1481 case MO_16:
1482 tcg_gen_st16_i32(var, cpu_env, offset);
1483 break;
1484 case MO_32:
1485 tcg_gen_st_i32(var, cpu_env, offset);
1486 break;
1487 default:
1488 g_assert_not_reached();
1492 static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1494 long offset = neon_element_offset(reg, ele, size);
1496 switch (size) {
1497 case MO_8:
1498 tcg_gen_st8_i64(var, cpu_env, offset);
1499 break;
1500 case MO_16:
1501 tcg_gen_st16_i64(var, cpu_env, offset);
1502 break;
1503 case MO_32:
1504 tcg_gen_st32_i64(var, cpu_env, offset);
1505 break;
1506 case MO_64:
1507 tcg_gen_st_i64(var, cpu_env, offset);
1508 break;
1509 default:
1510 g_assert_not_reached();
1514 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1516 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1519 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1521 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1524 static inline void neon_load_reg32(TCGv_i32 var, int reg)
1526 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1529 static inline void neon_store_reg32(TCGv_i32 var, int reg)
1531 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1534 static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1536 TCGv_ptr ret = tcg_temp_new_ptr();
1537 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1538 return ret;
1541 #define ARM_CP_RW_BIT (1 << 20)
1543 /* Include the VFP decoder */
1544 #include "translate-vfp.inc.c"
1546 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1548 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1551 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1553 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1556 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1558 TCGv_i32 var = tcg_temp_new_i32();
1559 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1560 return var;
1563 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1565 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1566 tcg_temp_free_i32(var);
1569 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1571 iwmmxt_store_reg(cpu_M0, rn);
1574 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1576 iwmmxt_load_reg(cpu_M0, rn);
1579 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1581 iwmmxt_load_reg(cpu_V1, rn);
1582 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1585 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1587 iwmmxt_load_reg(cpu_V1, rn);
1588 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1591 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1593 iwmmxt_load_reg(cpu_V1, rn);
1594 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1597 #define IWMMXT_OP(name) \
1598 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1600 iwmmxt_load_reg(cpu_V1, rn); \
1601 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1604 #define IWMMXT_OP_ENV(name) \
1605 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1607 iwmmxt_load_reg(cpu_V1, rn); \
1608 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1611 #define IWMMXT_OP_ENV_SIZE(name) \
1612 IWMMXT_OP_ENV(name##b) \
1613 IWMMXT_OP_ENV(name##w) \
1614 IWMMXT_OP_ENV(name##l)
1616 #define IWMMXT_OP_ENV1(name) \
1617 static inline void gen_op_iwmmxt_##name##_M0(void) \
1619 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1622 IWMMXT_OP(maddsq)
1623 IWMMXT_OP(madduq)
1624 IWMMXT_OP(sadb)
1625 IWMMXT_OP(sadw)
1626 IWMMXT_OP(mulslw)
1627 IWMMXT_OP(mulshw)
1628 IWMMXT_OP(mululw)
1629 IWMMXT_OP(muluhw)
1630 IWMMXT_OP(macsw)
1631 IWMMXT_OP(macuw)
1633 IWMMXT_OP_ENV_SIZE(unpackl)
1634 IWMMXT_OP_ENV_SIZE(unpackh)
1636 IWMMXT_OP_ENV1(unpacklub)
1637 IWMMXT_OP_ENV1(unpackluw)
1638 IWMMXT_OP_ENV1(unpacklul)
1639 IWMMXT_OP_ENV1(unpackhub)
1640 IWMMXT_OP_ENV1(unpackhuw)
1641 IWMMXT_OP_ENV1(unpackhul)
1642 IWMMXT_OP_ENV1(unpacklsb)
1643 IWMMXT_OP_ENV1(unpacklsw)
1644 IWMMXT_OP_ENV1(unpacklsl)
1645 IWMMXT_OP_ENV1(unpackhsb)
1646 IWMMXT_OP_ENV1(unpackhsw)
1647 IWMMXT_OP_ENV1(unpackhsl)
1649 IWMMXT_OP_ENV_SIZE(cmpeq)
1650 IWMMXT_OP_ENV_SIZE(cmpgtu)
1651 IWMMXT_OP_ENV_SIZE(cmpgts)
1653 IWMMXT_OP_ENV_SIZE(mins)
1654 IWMMXT_OP_ENV_SIZE(minu)
1655 IWMMXT_OP_ENV_SIZE(maxs)
1656 IWMMXT_OP_ENV_SIZE(maxu)
1658 IWMMXT_OP_ENV_SIZE(subn)
1659 IWMMXT_OP_ENV_SIZE(addn)
1660 IWMMXT_OP_ENV_SIZE(subu)
1661 IWMMXT_OP_ENV_SIZE(addu)
1662 IWMMXT_OP_ENV_SIZE(subs)
1663 IWMMXT_OP_ENV_SIZE(adds)
1665 IWMMXT_OP_ENV(avgb0)
1666 IWMMXT_OP_ENV(avgb1)
1667 IWMMXT_OP_ENV(avgw0)
1668 IWMMXT_OP_ENV(avgw1)
1670 IWMMXT_OP_ENV(packuw)
1671 IWMMXT_OP_ENV(packul)
1672 IWMMXT_OP_ENV(packuq)
1673 IWMMXT_OP_ENV(packsw)
1674 IWMMXT_OP_ENV(packsl)
1675 IWMMXT_OP_ENV(packsq)
1677 static void gen_op_iwmmxt_set_mup(void)
1679 TCGv_i32 tmp;
1680 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1681 tcg_gen_ori_i32(tmp, tmp, 2);
1682 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1685 static void gen_op_iwmmxt_set_cup(void)
1687 TCGv_i32 tmp;
1688 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1689 tcg_gen_ori_i32(tmp, tmp, 1);
1690 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1693 static void gen_op_iwmmxt_setpsr_nz(void)
1695 TCGv_i32 tmp = tcg_temp_new_i32();
1696 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1697 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1700 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1702 iwmmxt_load_reg(cpu_V1, rn);
1703 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1704 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1707 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1708 TCGv_i32 dest)
1710 int rd;
1711 uint32_t offset;
1712 TCGv_i32 tmp;
1714 rd = (insn >> 16) & 0xf;
1715 tmp = load_reg(s, rd);
1717 offset = (insn & 0xff) << ((insn >> 7) & 2);
1718 if (insn & (1 << 24)) {
1719 /* Pre indexed */
1720 if (insn & (1 << 23))
1721 tcg_gen_addi_i32(tmp, tmp, offset);
1722 else
1723 tcg_gen_addi_i32(tmp, tmp, -offset);
1724 tcg_gen_mov_i32(dest, tmp);
1725 if (insn & (1 << 21))
1726 store_reg(s, rd, tmp);
1727 else
1728 tcg_temp_free_i32(tmp);
1729 } else if (insn & (1 << 21)) {
1730 /* Post indexed */
1731 tcg_gen_mov_i32(dest, tmp);
1732 if (insn & (1 << 23))
1733 tcg_gen_addi_i32(tmp, tmp, offset);
1734 else
1735 tcg_gen_addi_i32(tmp, tmp, -offset);
1736 store_reg(s, rd, tmp);
1737 } else if (!(insn & (1 << 23)))
1738 return 1;
1739 return 0;
1742 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1744 int rd = (insn >> 0) & 0xf;
1745 TCGv_i32 tmp;
1747 if (insn & (1 << 8)) {
1748 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1749 return 1;
1750 } else {
1751 tmp = iwmmxt_load_creg(rd);
1753 } else {
1754 tmp = tcg_temp_new_i32();
1755 iwmmxt_load_reg(cpu_V0, rd);
1756 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1758 tcg_gen_andi_i32(tmp, tmp, mask);
1759 tcg_gen_mov_i32(dest, tmp);
1760 tcg_temp_free_i32(tmp);
1761 return 0;
1764 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1765 (ie. an undefined instruction). */
1766 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1768 int rd, wrd;
1769 int rdhi, rdlo, rd0, rd1, i;
1770 TCGv_i32 addr;
1771 TCGv_i32 tmp, tmp2, tmp3;
1773 if ((insn & 0x0e000e00) == 0x0c000000) {
1774 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1775 wrd = insn & 0xf;
1776 rdlo = (insn >> 12) & 0xf;
1777 rdhi = (insn >> 16) & 0xf;
1778 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1779 iwmmxt_load_reg(cpu_V0, wrd);
1780 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1781 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1782 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1783 } else { /* TMCRR */
1784 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1785 iwmmxt_store_reg(cpu_V0, wrd);
1786 gen_op_iwmmxt_set_mup();
1788 return 0;
1791 wrd = (insn >> 12) & 0xf;
1792 addr = tcg_temp_new_i32();
1793 if (gen_iwmmxt_address(s, insn, addr)) {
1794 tcg_temp_free_i32(addr);
1795 return 1;
1797 if (insn & ARM_CP_RW_BIT) {
1798 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1799 tmp = tcg_temp_new_i32();
1800 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1801 iwmmxt_store_creg(wrd, tmp);
1802 } else {
1803 i = 1;
1804 if (insn & (1 << 8)) {
1805 if (insn & (1 << 22)) { /* WLDRD */
1806 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1807 i = 0;
1808 } else { /* WLDRW wRd */
1809 tmp = tcg_temp_new_i32();
1810 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1812 } else {
1813 tmp = tcg_temp_new_i32();
1814 if (insn & (1 << 22)) { /* WLDRH */
1815 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1816 } else { /* WLDRB */
1817 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1820 if (i) {
1821 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1822 tcg_temp_free_i32(tmp);
1824 gen_op_iwmmxt_movq_wRn_M0(wrd);
1826 } else {
1827 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1828 tmp = iwmmxt_load_creg(wrd);
1829 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1830 } else {
1831 gen_op_iwmmxt_movq_M0_wRn(wrd);
1832 tmp = tcg_temp_new_i32();
1833 if (insn & (1 << 8)) {
1834 if (insn & (1 << 22)) { /* WSTRD */
1835 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1836 } else { /* WSTRW wRd */
1837 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1838 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1840 } else {
1841 if (insn & (1 << 22)) { /* WSTRH */
1842 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1843 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1844 } else { /* WSTRB */
1845 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1846 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1850 tcg_temp_free_i32(tmp);
1852 tcg_temp_free_i32(addr);
1853 return 0;
1856 if ((insn & 0x0f000000) != 0x0e000000)
1857 return 1;
1859 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1860 case 0x000: /* WOR */
1861 wrd = (insn >> 12) & 0xf;
1862 rd0 = (insn >> 0) & 0xf;
1863 rd1 = (insn >> 16) & 0xf;
1864 gen_op_iwmmxt_movq_M0_wRn(rd0);
1865 gen_op_iwmmxt_orq_M0_wRn(rd1);
1866 gen_op_iwmmxt_setpsr_nz();
1867 gen_op_iwmmxt_movq_wRn_M0(wrd);
1868 gen_op_iwmmxt_set_mup();
1869 gen_op_iwmmxt_set_cup();
1870 break;
1871 case 0x011: /* TMCR */
1872 if (insn & 0xf)
1873 return 1;
1874 rd = (insn >> 12) & 0xf;
1875 wrd = (insn >> 16) & 0xf;
1876 switch (wrd) {
1877 case ARM_IWMMXT_wCID:
1878 case ARM_IWMMXT_wCASF:
1879 break;
1880 case ARM_IWMMXT_wCon:
1881 gen_op_iwmmxt_set_cup();
1882 /* Fall through. */
1883 case ARM_IWMMXT_wCSSF:
1884 tmp = iwmmxt_load_creg(wrd);
1885 tmp2 = load_reg(s, rd);
1886 tcg_gen_andc_i32(tmp, tmp, tmp2);
1887 tcg_temp_free_i32(tmp2);
1888 iwmmxt_store_creg(wrd, tmp);
1889 break;
1890 case ARM_IWMMXT_wCGR0:
1891 case ARM_IWMMXT_wCGR1:
1892 case ARM_IWMMXT_wCGR2:
1893 case ARM_IWMMXT_wCGR3:
1894 gen_op_iwmmxt_set_cup();
1895 tmp = load_reg(s, rd);
1896 iwmmxt_store_creg(wrd, tmp);
1897 break;
1898 default:
1899 return 1;
1901 break;
1902 case 0x100: /* WXOR */
1903 wrd = (insn >> 12) & 0xf;
1904 rd0 = (insn >> 0) & 0xf;
1905 rd1 = (insn >> 16) & 0xf;
1906 gen_op_iwmmxt_movq_M0_wRn(rd0);
1907 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1908 gen_op_iwmmxt_setpsr_nz();
1909 gen_op_iwmmxt_movq_wRn_M0(wrd);
1910 gen_op_iwmmxt_set_mup();
1911 gen_op_iwmmxt_set_cup();
1912 break;
1913 case 0x111: /* TMRC */
1914 if (insn & 0xf)
1915 return 1;
1916 rd = (insn >> 12) & 0xf;
1917 wrd = (insn >> 16) & 0xf;
1918 tmp = iwmmxt_load_creg(wrd);
1919 store_reg(s, rd, tmp);
1920 break;
1921 case 0x300: /* WANDN */
1922 wrd = (insn >> 12) & 0xf;
1923 rd0 = (insn >> 0) & 0xf;
1924 rd1 = (insn >> 16) & 0xf;
1925 gen_op_iwmmxt_movq_M0_wRn(rd0);
1926 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1927 gen_op_iwmmxt_andq_M0_wRn(rd1);
1928 gen_op_iwmmxt_setpsr_nz();
1929 gen_op_iwmmxt_movq_wRn_M0(wrd);
1930 gen_op_iwmmxt_set_mup();
1931 gen_op_iwmmxt_set_cup();
1932 break;
1933 case 0x200: /* WAND */
1934 wrd = (insn >> 12) & 0xf;
1935 rd0 = (insn >> 0) & 0xf;
1936 rd1 = (insn >> 16) & 0xf;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0);
1938 gen_op_iwmmxt_andq_M0_wRn(rd1);
1939 gen_op_iwmmxt_setpsr_nz();
1940 gen_op_iwmmxt_movq_wRn_M0(wrd);
1941 gen_op_iwmmxt_set_mup();
1942 gen_op_iwmmxt_set_cup();
1943 break;
1944 case 0x810: case 0xa10: /* WMADD */
1945 wrd = (insn >> 12) & 0xf;
1946 rd0 = (insn >> 0) & 0xf;
1947 rd1 = (insn >> 16) & 0xf;
1948 gen_op_iwmmxt_movq_M0_wRn(rd0);
1949 if (insn & (1 << 21))
1950 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1951 else
1952 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1955 break;
1956 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1957 wrd = (insn >> 12) & 0xf;
1958 rd0 = (insn >> 16) & 0xf;
1959 rd1 = (insn >> 0) & 0xf;
1960 gen_op_iwmmxt_movq_M0_wRn(rd0);
1961 switch ((insn >> 22) & 3) {
1962 case 0:
1963 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1964 break;
1965 case 1:
1966 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1967 break;
1968 case 2:
1969 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1970 break;
1971 case 3:
1972 return 1;
1974 gen_op_iwmmxt_movq_wRn_M0(wrd);
1975 gen_op_iwmmxt_set_mup();
1976 gen_op_iwmmxt_set_cup();
1977 break;
1978 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1979 wrd = (insn >> 12) & 0xf;
1980 rd0 = (insn >> 16) & 0xf;
1981 rd1 = (insn >> 0) & 0xf;
1982 gen_op_iwmmxt_movq_M0_wRn(rd0);
1983 switch ((insn >> 22) & 3) {
1984 case 0:
1985 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1986 break;
1987 case 1:
1988 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1989 break;
1990 case 2:
1991 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1992 break;
1993 case 3:
1994 return 1;
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
1999 break;
2000 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2001 wrd = (insn >> 12) & 0xf;
2002 rd0 = (insn >> 16) & 0xf;
2003 rd1 = (insn >> 0) & 0xf;
2004 gen_op_iwmmxt_movq_M0_wRn(rd0);
2005 if (insn & (1 << 22))
2006 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2007 else
2008 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2009 if (!(insn & (1 << 20)))
2010 gen_op_iwmmxt_addl_M0_wRn(wrd);
2011 gen_op_iwmmxt_movq_wRn_M0(wrd);
2012 gen_op_iwmmxt_set_mup();
2013 break;
2014 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2015 wrd = (insn >> 12) & 0xf;
2016 rd0 = (insn >> 16) & 0xf;
2017 rd1 = (insn >> 0) & 0xf;
2018 gen_op_iwmmxt_movq_M0_wRn(rd0);
2019 if (insn & (1 << 21)) {
2020 if (insn & (1 << 20))
2021 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2022 else
2023 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2024 } else {
2025 if (insn & (1 << 20))
2026 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2027 else
2028 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2030 gen_op_iwmmxt_movq_wRn_M0(wrd);
2031 gen_op_iwmmxt_set_mup();
2032 break;
2033 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2034 wrd = (insn >> 12) & 0xf;
2035 rd0 = (insn >> 16) & 0xf;
2036 rd1 = (insn >> 0) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
2038 if (insn & (1 << 21))
2039 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2040 else
2041 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2042 if (!(insn & (1 << 20))) {
2043 iwmmxt_load_reg(cpu_V1, wrd);
2044 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 break;
2049 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2050 wrd = (insn >> 12) & 0xf;
2051 rd0 = (insn >> 16) & 0xf;
2052 rd1 = (insn >> 0) & 0xf;
2053 gen_op_iwmmxt_movq_M0_wRn(rd0);
2054 switch ((insn >> 22) & 3) {
2055 case 0:
2056 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2057 break;
2058 case 1:
2059 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2060 break;
2061 case 2:
2062 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2063 break;
2064 case 3:
2065 return 1;
2067 gen_op_iwmmxt_movq_wRn_M0(wrd);
2068 gen_op_iwmmxt_set_mup();
2069 gen_op_iwmmxt_set_cup();
2070 break;
2071 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2072 wrd = (insn >> 12) & 0xf;
2073 rd0 = (insn >> 16) & 0xf;
2074 rd1 = (insn >> 0) & 0xf;
2075 gen_op_iwmmxt_movq_M0_wRn(rd0);
2076 if (insn & (1 << 22)) {
2077 if (insn & (1 << 20))
2078 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2079 else
2080 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2081 } else {
2082 if (insn & (1 << 20))
2083 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2084 else
2085 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2087 gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2090 break;
2091 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2092 wrd = (insn >> 12) & 0xf;
2093 rd0 = (insn >> 16) & 0xf;
2094 rd1 = (insn >> 0) & 0xf;
2095 gen_op_iwmmxt_movq_M0_wRn(rd0);
2096 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2097 tcg_gen_andi_i32(tmp, tmp, 7);
2098 iwmmxt_load_reg(cpu_V1, rd1);
2099 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2100 tcg_temp_free_i32(tmp);
2101 gen_op_iwmmxt_movq_wRn_M0(wrd);
2102 gen_op_iwmmxt_set_mup();
2103 break;
2104 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2105 if (((insn >> 6) & 3) == 3)
2106 return 1;
2107 rd = (insn >> 12) & 0xf;
2108 wrd = (insn >> 16) & 0xf;
2109 tmp = load_reg(s, rd);
2110 gen_op_iwmmxt_movq_M0_wRn(wrd);
2111 switch ((insn >> 6) & 3) {
2112 case 0:
2113 tmp2 = tcg_const_i32(0xff);
2114 tmp3 = tcg_const_i32((insn & 7) << 3);
2115 break;
2116 case 1:
2117 tmp2 = tcg_const_i32(0xffff);
2118 tmp3 = tcg_const_i32((insn & 3) << 4);
2119 break;
2120 case 2:
2121 tmp2 = tcg_const_i32(0xffffffff);
2122 tmp3 = tcg_const_i32((insn & 1) << 5);
2123 break;
2124 default:
2125 tmp2 = NULL;
2126 tmp3 = NULL;
2128 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2129 tcg_temp_free_i32(tmp3);
2130 tcg_temp_free_i32(tmp2);
2131 tcg_temp_free_i32(tmp);
2132 gen_op_iwmmxt_movq_wRn_M0(wrd);
2133 gen_op_iwmmxt_set_mup();
2134 break;
2135 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2136 rd = (insn >> 12) & 0xf;
2137 wrd = (insn >> 16) & 0xf;
2138 if (rd == 15 || ((insn >> 22) & 3) == 3)
2139 return 1;
2140 gen_op_iwmmxt_movq_M0_wRn(wrd);
2141 tmp = tcg_temp_new_i32();
2142 switch ((insn >> 22) & 3) {
2143 case 0:
2144 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2145 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2146 if (insn & 8) {
2147 tcg_gen_ext8s_i32(tmp, tmp);
2148 } else {
2149 tcg_gen_andi_i32(tmp, tmp, 0xff);
2151 break;
2152 case 1:
2153 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2154 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2155 if (insn & 8) {
2156 tcg_gen_ext16s_i32(tmp, tmp);
2157 } else {
2158 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2160 break;
2161 case 2:
2162 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2163 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2164 break;
2166 store_reg(s, rd, tmp);
2167 break;
2168 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2169 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2170 return 1;
2171 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2172 switch ((insn >> 22) & 3) {
2173 case 0:
2174 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2175 break;
2176 case 1:
2177 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2178 break;
2179 case 2:
2180 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2181 break;
2183 tcg_gen_shli_i32(tmp, tmp, 28);
2184 gen_set_nzcv(tmp);
2185 tcg_temp_free_i32(tmp);
2186 break;
2187 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2188 if (((insn >> 6) & 3) == 3)
2189 return 1;
2190 rd = (insn >> 12) & 0xf;
2191 wrd = (insn >> 16) & 0xf;
2192 tmp = load_reg(s, rd);
2193 switch ((insn >> 6) & 3) {
2194 case 0:
2195 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2196 break;
2197 case 1:
2198 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2199 break;
2200 case 2:
2201 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2202 break;
2204 tcg_temp_free_i32(tmp);
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 break;
2208 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2209 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2210 return 1;
2211 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2212 tmp2 = tcg_temp_new_i32();
2213 tcg_gen_mov_i32(tmp2, tmp);
2214 switch ((insn >> 22) & 3) {
2215 case 0:
2216 for (i = 0; i < 7; i ++) {
2217 tcg_gen_shli_i32(tmp2, tmp2, 4);
2218 tcg_gen_and_i32(tmp, tmp, tmp2);
2220 break;
2221 case 1:
2222 for (i = 0; i < 3; i ++) {
2223 tcg_gen_shli_i32(tmp2, tmp2, 8);
2224 tcg_gen_and_i32(tmp, tmp, tmp2);
2226 break;
2227 case 2:
2228 tcg_gen_shli_i32(tmp2, tmp2, 16);
2229 tcg_gen_and_i32(tmp, tmp, tmp2);
2230 break;
2232 gen_set_nzcv(tmp);
2233 tcg_temp_free_i32(tmp2);
2234 tcg_temp_free_i32(tmp);
2235 break;
2236 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2237 wrd = (insn >> 12) & 0xf;
2238 rd0 = (insn >> 16) & 0xf;
2239 gen_op_iwmmxt_movq_M0_wRn(rd0);
2240 switch ((insn >> 22) & 3) {
2241 case 0:
2242 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2243 break;
2244 case 1:
2245 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2246 break;
2247 case 2:
2248 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2249 break;
2250 case 3:
2251 return 1;
2253 gen_op_iwmmxt_movq_wRn_M0(wrd);
2254 gen_op_iwmmxt_set_mup();
2255 break;
2256 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2257 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2258 return 1;
2259 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2260 tmp2 = tcg_temp_new_i32();
2261 tcg_gen_mov_i32(tmp2, tmp);
2262 switch ((insn >> 22) & 3) {
2263 case 0:
2264 for (i = 0; i < 7; i ++) {
2265 tcg_gen_shli_i32(tmp2, tmp2, 4);
2266 tcg_gen_or_i32(tmp, tmp, tmp2);
2268 break;
2269 case 1:
2270 for (i = 0; i < 3; i ++) {
2271 tcg_gen_shli_i32(tmp2, tmp2, 8);
2272 tcg_gen_or_i32(tmp, tmp, tmp2);
2274 break;
2275 case 2:
2276 tcg_gen_shli_i32(tmp2, tmp2, 16);
2277 tcg_gen_or_i32(tmp, tmp, tmp2);
2278 break;
2280 gen_set_nzcv(tmp);
2281 tcg_temp_free_i32(tmp2);
2282 tcg_temp_free_i32(tmp);
2283 break;
2284 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2285 rd = (insn >> 12) & 0xf;
2286 rd0 = (insn >> 16) & 0xf;
2287 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2288 return 1;
2289 gen_op_iwmmxt_movq_M0_wRn(rd0);
2290 tmp = tcg_temp_new_i32();
2291 switch ((insn >> 22) & 3) {
2292 case 0:
2293 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2294 break;
2295 case 1:
2296 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2297 break;
2298 case 2:
2299 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2300 break;
2302 store_reg(s, rd, tmp);
2303 break;
2304 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2305 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2306 wrd = (insn >> 12) & 0xf;
2307 rd0 = (insn >> 16) & 0xf;
2308 rd1 = (insn >> 0) & 0xf;
2309 gen_op_iwmmxt_movq_M0_wRn(rd0);
2310 switch ((insn >> 22) & 3) {
2311 case 0:
2312 if (insn & (1 << 21))
2313 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2314 else
2315 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2316 break;
2317 case 1:
2318 if (insn & (1 << 21))
2319 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2320 else
2321 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2322 break;
2323 case 2:
2324 if (insn & (1 << 21))
2325 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2326 else
2327 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2328 break;
2329 case 3:
2330 return 1;
2332 gen_op_iwmmxt_movq_wRn_M0(wrd);
2333 gen_op_iwmmxt_set_mup();
2334 gen_op_iwmmxt_set_cup();
2335 break;
2336 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2337 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2338 wrd = (insn >> 12) & 0xf;
2339 rd0 = (insn >> 16) & 0xf;
2340 gen_op_iwmmxt_movq_M0_wRn(rd0);
2341 switch ((insn >> 22) & 3) {
2342 case 0:
2343 if (insn & (1 << 21))
2344 gen_op_iwmmxt_unpacklsb_M0();
2345 else
2346 gen_op_iwmmxt_unpacklub_M0();
2347 break;
2348 case 1:
2349 if (insn & (1 << 21))
2350 gen_op_iwmmxt_unpacklsw_M0();
2351 else
2352 gen_op_iwmmxt_unpackluw_M0();
2353 break;
2354 case 2:
2355 if (insn & (1 << 21))
2356 gen_op_iwmmxt_unpacklsl_M0();
2357 else
2358 gen_op_iwmmxt_unpacklul_M0();
2359 break;
2360 case 3:
2361 return 1;
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2366 break;
2367 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2368 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2369 wrd = (insn >> 12) & 0xf;
2370 rd0 = (insn >> 16) & 0xf;
2371 gen_op_iwmmxt_movq_M0_wRn(rd0);
2372 switch ((insn >> 22) & 3) {
2373 case 0:
2374 if (insn & (1 << 21))
2375 gen_op_iwmmxt_unpackhsb_M0();
2376 else
2377 gen_op_iwmmxt_unpackhub_M0();
2378 break;
2379 case 1:
2380 if (insn & (1 << 21))
2381 gen_op_iwmmxt_unpackhsw_M0();
2382 else
2383 gen_op_iwmmxt_unpackhuw_M0();
2384 break;
2385 case 2:
2386 if (insn & (1 << 21))
2387 gen_op_iwmmxt_unpackhsl_M0();
2388 else
2389 gen_op_iwmmxt_unpackhul_M0();
2390 break;
2391 case 3:
2392 return 1;
2394 gen_op_iwmmxt_movq_wRn_M0(wrd);
2395 gen_op_iwmmxt_set_mup();
2396 gen_op_iwmmxt_set_cup();
2397 break;
2398 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2399 case 0x214: case 0x614: case 0xa14: case 0xe14:
2400 if (((insn >> 22) & 3) == 0)
2401 return 1;
2402 wrd = (insn >> 12) & 0xf;
2403 rd0 = (insn >> 16) & 0xf;
2404 gen_op_iwmmxt_movq_M0_wRn(rd0);
2405 tmp = tcg_temp_new_i32();
2406 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2407 tcg_temp_free_i32(tmp);
2408 return 1;
2410 switch ((insn >> 22) & 3) {
2411 case 1:
2412 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2413 break;
2414 case 2:
2415 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2416 break;
2417 case 3:
2418 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2419 break;
2421 tcg_temp_free_i32(tmp);
2422 gen_op_iwmmxt_movq_wRn_M0(wrd);
2423 gen_op_iwmmxt_set_mup();
2424 gen_op_iwmmxt_set_cup();
2425 break;
2426 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2427 case 0x014: case 0x414: case 0x814: case 0xc14:
2428 if (((insn >> 22) & 3) == 0)
2429 return 1;
2430 wrd = (insn >> 12) & 0xf;
2431 rd0 = (insn >> 16) & 0xf;
2432 gen_op_iwmmxt_movq_M0_wRn(rd0);
2433 tmp = tcg_temp_new_i32();
2434 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2435 tcg_temp_free_i32(tmp);
2436 return 1;
2438 switch ((insn >> 22) & 3) {
2439 case 1:
2440 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2441 break;
2442 case 2:
2443 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2444 break;
2445 case 3:
2446 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2447 break;
2449 tcg_temp_free_i32(tmp);
2450 gen_op_iwmmxt_movq_wRn_M0(wrd);
2451 gen_op_iwmmxt_set_mup();
2452 gen_op_iwmmxt_set_cup();
2453 break;
2454 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2455 case 0x114: case 0x514: case 0x914: case 0xd14:
2456 if (((insn >> 22) & 3) == 0)
2457 return 1;
2458 wrd = (insn >> 12) & 0xf;
2459 rd0 = (insn >> 16) & 0xf;
2460 gen_op_iwmmxt_movq_M0_wRn(rd0);
2461 tmp = tcg_temp_new_i32();
2462 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2463 tcg_temp_free_i32(tmp);
2464 return 1;
2466 switch ((insn >> 22) & 3) {
2467 case 1:
2468 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2469 break;
2470 case 2:
2471 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2472 break;
2473 case 3:
2474 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2475 break;
2477 tcg_temp_free_i32(tmp);
2478 gen_op_iwmmxt_movq_wRn_M0(wrd);
2479 gen_op_iwmmxt_set_mup();
2480 gen_op_iwmmxt_set_cup();
2481 break;
2482 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2483 case 0x314: case 0x714: case 0xb14: case 0xf14:
2484 if (((insn >> 22) & 3) == 0)
2485 return 1;
2486 wrd = (insn >> 12) & 0xf;
2487 rd0 = (insn >> 16) & 0xf;
2488 gen_op_iwmmxt_movq_M0_wRn(rd0);
2489 tmp = tcg_temp_new_i32();
2490 switch ((insn >> 22) & 3) {
2491 case 1:
2492 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2493 tcg_temp_free_i32(tmp);
2494 return 1;
2496 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2497 break;
2498 case 2:
2499 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2500 tcg_temp_free_i32(tmp);
2501 return 1;
2503 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2504 break;
2505 case 3:
2506 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2507 tcg_temp_free_i32(tmp);
2508 return 1;
2510 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2511 break;
2513 tcg_temp_free_i32(tmp);
2514 gen_op_iwmmxt_movq_wRn_M0(wrd);
2515 gen_op_iwmmxt_set_mup();
2516 gen_op_iwmmxt_set_cup();
2517 break;
2518 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2519 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2520 wrd = (insn >> 12) & 0xf;
2521 rd0 = (insn >> 16) & 0xf;
2522 rd1 = (insn >> 0) & 0xf;
2523 gen_op_iwmmxt_movq_M0_wRn(rd0);
2524 switch ((insn >> 22) & 3) {
2525 case 0:
2526 if (insn & (1 << 21))
2527 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2528 else
2529 gen_op_iwmmxt_minub_M0_wRn(rd1);
2530 break;
2531 case 1:
2532 if (insn & (1 << 21))
2533 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2534 else
2535 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2536 break;
2537 case 2:
2538 if (insn & (1 << 21))
2539 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2540 else
2541 gen_op_iwmmxt_minul_M0_wRn(rd1);
2542 break;
2543 case 3:
2544 return 1;
2546 gen_op_iwmmxt_movq_wRn_M0(wrd);
2547 gen_op_iwmmxt_set_mup();
2548 break;
2549 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2550 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2551 wrd = (insn >> 12) & 0xf;
2552 rd0 = (insn >> 16) & 0xf;
2553 rd1 = (insn >> 0) & 0xf;
2554 gen_op_iwmmxt_movq_M0_wRn(rd0);
2555 switch ((insn >> 22) & 3) {
2556 case 0:
2557 if (insn & (1 << 21))
2558 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2559 else
2560 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2561 break;
2562 case 1:
2563 if (insn & (1 << 21))
2564 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2565 else
2566 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2567 break;
2568 case 2:
2569 if (insn & (1 << 21))
2570 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2571 else
2572 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2573 break;
2574 case 3:
2575 return 1;
2577 gen_op_iwmmxt_movq_wRn_M0(wrd);
2578 gen_op_iwmmxt_set_mup();
2579 break;
2580 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2581 case 0x402: case 0x502: case 0x602: case 0x702:
2582 wrd = (insn >> 12) & 0xf;
2583 rd0 = (insn >> 16) & 0xf;
2584 rd1 = (insn >> 0) & 0xf;
2585 gen_op_iwmmxt_movq_M0_wRn(rd0);
2586 tmp = tcg_const_i32((insn >> 20) & 3);
2587 iwmmxt_load_reg(cpu_V1, rd1);
2588 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2589 tcg_temp_free_i32(tmp);
2590 gen_op_iwmmxt_movq_wRn_M0(wrd);
2591 gen_op_iwmmxt_set_mup();
2592 break;
2593 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2594 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2595 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2596 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2597 wrd = (insn >> 12) & 0xf;
2598 rd0 = (insn >> 16) & 0xf;
2599 rd1 = (insn >> 0) & 0xf;
2600 gen_op_iwmmxt_movq_M0_wRn(rd0);
2601 switch ((insn >> 20) & 0xf) {
2602 case 0x0:
2603 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2604 break;
2605 case 0x1:
2606 gen_op_iwmmxt_subub_M0_wRn(rd1);
2607 break;
2608 case 0x3:
2609 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2610 break;
2611 case 0x4:
2612 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2613 break;
2614 case 0x5:
2615 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2616 break;
2617 case 0x7:
2618 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2619 break;
2620 case 0x8:
2621 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2622 break;
2623 case 0x9:
2624 gen_op_iwmmxt_subul_M0_wRn(rd1);
2625 break;
2626 case 0xb:
2627 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2628 break;
2629 default:
2630 return 1;
2632 gen_op_iwmmxt_movq_wRn_M0(wrd);
2633 gen_op_iwmmxt_set_mup();
2634 gen_op_iwmmxt_set_cup();
2635 break;
2636 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2637 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2638 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2639 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2640 wrd = (insn >> 12) & 0xf;
2641 rd0 = (insn >> 16) & 0xf;
2642 gen_op_iwmmxt_movq_M0_wRn(rd0);
2643 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2644 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2645 tcg_temp_free_i32(tmp);
2646 gen_op_iwmmxt_movq_wRn_M0(wrd);
2647 gen_op_iwmmxt_set_mup();
2648 gen_op_iwmmxt_set_cup();
2649 break;
2650 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2651 case 0x418: case 0x518: case 0x618: case 0x718:
2652 case 0x818: case 0x918: case 0xa18: case 0xb18:
2653 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2654 wrd = (insn >> 12) & 0xf;
2655 rd0 = (insn >> 16) & 0xf;
2656 rd1 = (insn >> 0) & 0xf;
2657 gen_op_iwmmxt_movq_M0_wRn(rd0);
2658 switch ((insn >> 20) & 0xf) {
2659 case 0x0:
2660 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2661 break;
2662 case 0x1:
2663 gen_op_iwmmxt_addub_M0_wRn(rd1);
2664 break;
2665 case 0x3:
2666 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2667 break;
2668 case 0x4:
2669 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2670 break;
2671 case 0x5:
2672 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2673 break;
2674 case 0x7:
2675 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2676 break;
2677 case 0x8:
2678 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2679 break;
2680 case 0x9:
2681 gen_op_iwmmxt_addul_M0_wRn(rd1);
2682 break;
2683 case 0xb:
2684 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2685 break;
2686 default:
2687 return 1;
2689 gen_op_iwmmxt_movq_wRn_M0(wrd);
2690 gen_op_iwmmxt_set_mup();
2691 gen_op_iwmmxt_set_cup();
2692 break;
2693 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2694 case 0x408: case 0x508: case 0x608: case 0x708:
2695 case 0x808: case 0x908: case 0xa08: case 0xb08:
2696 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2697 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2698 return 1;
2699 wrd = (insn >> 12) & 0xf;
2700 rd0 = (insn >> 16) & 0xf;
2701 rd1 = (insn >> 0) & 0xf;
2702 gen_op_iwmmxt_movq_M0_wRn(rd0);
2703 switch ((insn >> 22) & 3) {
2704 case 1:
2705 if (insn & (1 << 21))
2706 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2707 else
2708 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2709 break;
2710 case 2:
2711 if (insn & (1 << 21))
2712 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2713 else
2714 gen_op_iwmmxt_packul_M0_wRn(rd1);
2715 break;
2716 case 3:
2717 if (insn & (1 << 21))
2718 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2719 else
2720 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2721 break;
2723 gen_op_iwmmxt_movq_wRn_M0(wrd);
2724 gen_op_iwmmxt_set_mup();
2725 gen_op_iwmmxt_set_cup();
2726 break;
2727 case 0x201: case 0x203: case 0x205: case 0x207:
2728 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2729 case 0x211: case 0x213: case 0x215: case 0x217:
2730 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2731 wrd = (insn >> 5) & 0xf;
2732 rd0 = (insn >> 12) & 0xf;
2733 rd1 = (insn >> 0) & 0xf;
2734 if (rd0 == 0xf || rd1 == 0xf)
2735 return 1;
2736 gen_op_iwmmxt_movq_M0_wRn(wrd);
2737 tmp = load_reg(s, rd0);
2738 tmp2 = load_reg(s, rd1);
2739 switch ((insn >> 16) & 0xf) {
2740 case 0x0: /* TMIA */
2741 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2742 break;
2743 case 0x8: /* TMIAPH */
2744 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2745 break;
2746 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2747 if (insn & (1 << 16))
2748 tcg_gen_shri_i32(tmp, tmp, 16);
2749 if (insn & (1 << 17))
2750 tcg_gen_shri_i32(tmp2, tmp2, 16);
2751 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2752 break;
2753 default:
2754 tcg_temp_free_i32(tmp2);
2755 tcg_temp_free_i32(tmp);
2756 return 1;
2758 tcg_temp_free_i32(tmp2);
2759 tcg_temp_free_i32(tmp);
2760 gen_op_iwmmxt_movq_wRn_M0(wrd);
2761 gen_op_iwmmxt_set_mup();
2762 break;
2763 default:
2764 return 1;
2767 return 0;
2770 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2771 (ie. an undefined instruction). */
2772 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2774 int acc, rd0, rd1, rdhi, rdlo;
2775 TCGv_i32 tmp, tmp2;
2777 if ((insn & 0x0ff00f10) == 0x0e200010) {
2778 /* Multiply with Internal Accumulate Format */
2779 rd0 = (insn >> 12) & 0xf;
2780 rd1 = insn & 0xf;
2781 acc = (insn >> 5) & 7;
2783 if (acc != 0)
2784 return 1;
2786 tmp = load_reg(s, rd0);
2787 tmp2 = load_reg(s, rd1);
2788 switch ((insn >> 16) & 0xf) {
2789 case 0x0: /* MIA */
2790 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2791 break;
2792 case 0x8: /* MIAPH */
2793 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2794 break;
2795 case 0xc: /* MIABB */
2796 case 0xd: /* MIABT */
2797 case 0xe: /* MIATB */
2798 case 0xf: /* MIATT */
2799 if (insn & (1 << 16))
2800 tcg_gen_shri_i32(tmp, tmp, 16);
2801 if (insn & (1 << 17))
2802 tcg_gen_shri_i32(tmp2, tmp2, 16);
2803 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2804 break;
2805 default:
2806 return 1;
2808 tcg_temp_free_i32(tmp2);
2809 tcg_temp_free_i32(tmp);
2811 gen_op_iwmmxt_movq_wRn_M0(acc);
2812 return 0;
2815 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2816 /* Internal Accumulator Access Format */
2817 rdhi = (insn >> 16) & 0xf;
2818 rdlo = (insn >> 12) & 0xf;
2819 acc = insn & 7;
2821 if (acc != 0)
2822 return 1;
2824 if (insn & ARM_CP_RW_BIT) { /* MRA */
2825 iwmmxt_load_reg(cpu_V0, acc);
2826 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2827 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2828 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2829 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2830 } else { /* MAR */
2831 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2832 iwmmxt_store_reg(cpu_V0, acc);
2834 return 0;
2837 return 1;
2840 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2841 #define VFP_SREG(insn, bigbit, smallbit) \
2842 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2843 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2844 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2845 reg = (((insn) >> (bigbit)) & 0x0f) \
2846 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2847 } else { \
2848 if (insn & (1 << (smallbit))) \
2849 return 1; \
2850 reg = ((insn) >> (bigbit)) & 0x0f; \
2851 }} while (0)
2853 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2854 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2855 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2856 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2857 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2858 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2860 static void gen_neon_dup_low16(TCGv_i32 var)
2862 TCGv_i32 tmp = tcg_temp_new_i32();
2863 tcg_gen_ext16u_i32(var, var);
2864 tcg_gen_shli_i32(tmp, var, 16);
2865 tcg_gen_or_i32(var, var, tmp);
2866 tcg_temp_free_i32(tmp);
2869 static void gen_neon_dup_high16(TCGv_i32 var)
2871 TCGv_i32 tmp = tcg_temp_new_i32();
2872 tcg_gen_andi_i32(var, var, 0xffff0000);
2873 tcg_gen_shri_i32(tmp, var, 16);
2874 tcg_gen_or_i32(var, var, tmp);
2875 tcg_temp_free_i32(tmp);
2879 * Disassemble a VFP instruction. Returns nonzero if an error occurred
2880 * (ie. an undefined instruction).
2882 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
2884 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
2885 return 1;
2889 * If the decodetree decoder handles this insn it will always
2890 * emit code to either execute the insn or generate an appropriate
2891 * exception; so we don't need to ever return non-zero to tell
2892 * the calling code to emit an UNDEF exception.
2894 if (extract32(insn, 28, 4) == 0xf) {
2895 if (disas_vfp_uncond(s, insn)) {
2896 return 0;
2898 } else {
2899 if (disas_vfp(s, insn)) {
2900 return 0;
2903 /* If the decodetree decoder didn't handle this insn, it must be UNDEF */
2904 return 1;
2907 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
2909 #ifndef CONFIG_USER_ONLY
2910 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
2911 ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
2912 #else
2913 return true;
2914 #endif
2917 static void gen_goto_ptr(void)
2919 tcg_gen_lookup_and_goto_ptr();
2922 /* This will end the TB but doesn't guarantee we'll return to
2923 * cpu_loop_exec. Any live exit_requests will be processed as we
2924 * enter the next TB.
2926 static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
2928 if (use_goto_tb(s, dest)) {
2929 tcg_gen_goto_tb(n);
2930 gen_set_pc_im(s, dest);
2931 tcg_gen_exit_tb(s->base.tb, n);
2932 } else {
2933 gen_set_pc_im(s, dest);
2934 gen_goto_ptr();
2936 s->base.is_jmp = DISAS_NORETURN;
2939 static inline void gen_jmp (DisasContext *s, uint32_t dest)
2941 if (unlikely(is_singlestepping(s))) {
2942 /* An indirect jump so that we still trigger the debug exception. */
2943 if (s->thumb)
2944 dest |= 1;
2945 gen_bx_im(s, dest);
2946 } else {
2947 gen_goto_tb(s, 0, dest);
2951 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
2953 if (x)
2954 tcg_gen_sari_i32(t0, t0, 16);
2955 else
2956 gen_sxth(t0);
2957 if (y)
2958 tcg_gen_sari_i32(t1, t1, 16);
2959 else
2960 gen_sxth(t1);
2961 tcg_gen_mul_i32(t0, t0, t1);
2964 /* Return the mask of PSR bits set by a MSR instruction. */
2965 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2967 uint32_t mask;
2969 mask = 0;
2970 if (flags & (1 << 0))
2971 mask |= 0xff;
2972 if (flags & (1 << 1))
2973 mask |= 0xff00;
2974 if (flags & (1 << 2))
2975 mask |= 0xff0000;
2976 if (flags & (1 << 3))
2977 mask |= 0xff000000;
2979 /* Mask out undefined bits. */
2980 mask &= ~CPSR_RESERVED;
2981 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
2982 mask &= ~CPSR_T;
2984 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
2985 mask &= ~CPSR_Q; /* V5TE in reality*/
2987 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
2988 mask &= ~(CPSR_E | CPSR_GE);
2990 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
2991 mask &= ~CPSR_IT;
2993 /* Mask out execution state and reserved bits. */
2994 if (!spsr) {
2995 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
2997 /* Mask out privileged bits. */
2998 if (IS_USER(s))
2999 mask &= CPSR_USER;
3000 return mask;
3003 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3004 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
3006 TCGv_i32 tmp;
3007 if (spsr) {
3008 /* ??? This is also undefined in system mode. */
3009 if (IS_USER(s))
3010 return 1;
3012 tmp = load_cpu_field(spsr);
3013 tcg_gen_andi_i32(tmp, tmp, ~mask);
3014 tcg_gen_andi_i32(t0, t0, mask);
3015 tcg_gen_or_i32(tmp, tmp, t0);
3016 store_cpu_field(tmp, spsr);
3017 } else {
3018 gen_set_cpsr(t0, mask);
3020 tcg_temp_free_i32(t0);
3021 gen_lookup_tb(s);
3022 return 0;
3025 /* Returns nonzero if access to the PSR is not permitted. */
3026 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3028 TCGv_i32 tmp;
3029 tmp = tcg_temp_new_i32();
3030 tcg_gen_movi_i32(tmp, val);
3031 return gen_set_psr(s, mask, spsr, tmp);
3034 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
3035 int *tgtmode, int *regno)
3037 /* Decode the r and sysm fields of MSR/MRS banked accesses into
3038 * the target mode and register number, and identify the various
3039 * unpredictable cases.
3040 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
3041 * + executed in user mode
3042 * + using R15 as the src/dest register
3043 * + accessing an unimplemented register
3044 * + accessing a register that's inaccessible at current PL/security state*
3045 * + accessing a register that you could access with a different insn
3046 * We choose to UNDEF in all these cases.
3047 * Since we don't know which of the various AArch32 modes we are in
3048 * we have to defer some checks to runtime.
3049 * Accesses to Monitor mode registers from Secure EL1 (which implies
3050 * that EL3 is AArch64) must trap to EL3.
3052 * If the access checks fail this function will emit code to take
3053 * an exception and return false. Otherwise it will return true,
3054 * and set *tgtmode and *regno appropriately.
3056 int exc_target = default_exception_el(s);
3058 /* These instructions are present only in ARMv8, or in ARMv7 with the
3059 * Virtualization Extensions.
3061 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
3062 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3063 goto undef;
3066 if (IS_USER(s) || rn == 15) {
3067 goto undef;
3070 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
3071 * of registers into (r, sysm).
3073 if (r) {
3074 /* SPSRs for other modes */
3075 switch (sysm) {
3076 case 0xe: /* SPSR_fiq */
3077 *tgtmode = ARM_CPU_MODE_FIQ;
3078 break;
3079 case 0x10: /* SPSR_irq */
3080 *tgtmode = ARM_CPU_MODE_IRQ;
3081 break;
3082 case 0x12: /* SPSR_svc */
3083 *tgtmode = ARM_CPU_MODE_SVC;
3084 break;
3085 case 0x14: /* SPSR_abt */
3086 *tgtmode = ARM_CPU_MODE_ABT;
3087 break;
3088 case 0x16: /* SPSR_und */
3089 *tgtmode = ARM_CPU_MODE_UND;
3090 break;
3091 case 0x1c: /* SPSR_mon */
3092 *tgtmode = ARM_CPU_MODE_MON;
3093 break;
3094 case 0x1e: /* SPSR_hyp */
3095 *tgtmode = ARM_CPU_MODE_HYP;
3096 break;
3097 default: /* unallocated */
3098 goto undef;
3100 /* We arbitrarily assign SPSR a register number of 16. */
3101 *regno = 16;
3102 } else {
3103 /* general purpose registers for other modes */
3104 switch (sysm) {
3105 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
3106 *tgtmode = ARM_CPU_MODE_USR;
3107 *regno = sysm + 8;
3108 break;
3109 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
3110 *tgtmode = ARM_CPU_MODE_FIQ;
3111 *regno = sysm;
3112 break;
3113 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
3114 *tgtmode = ARM_CPU_MODE_IRQ;
3115 *regno = sysm & 1 ? 13 : 14;
3116 break;
3117 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
3118 *tgtmode = ARM_CPU_MODE_SVC;
3119 *regno = sysm & 1 ? 13 : 14;
3120 break;
3121 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
3122 *tgtmode = ARM_CPU_MODE_ABT;
3123 *regno = sysm & 1 ? 13 : 14;
3124 break;
3125 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
3126 *tgtmode = ARM_CPU_MODE_UND;
3127 *regno = sysm & 1 ? 13 : 14;
3128 break;
3129 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
3130 *tgtmode = ARM_CPU_MODE_MON;
3131 *regno = sysm & 1 ? 13 : 14;
3132 break;
3133 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
3134 *tgtmode = ARM_CPU_MODE_HYP;
3135 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
3136 *regno = sysm & 1 ? 13 : 17;
3137 break;
3138 default: /* unallocated */
3139 goto undef;
3143 /* Catch the 'accessing inaccessible register' cases we can detect
3144 * at translate time.
3146 switch (*tgtmode) {
3147 case ARM_CPU_MODE_MON:
3148 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
3149 goto undef;
3151 if (s->current_el == 1) {
3152 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
3153 * then accesses to Mon registers trap to EL3
3155 exc_target = 3;
3156 goto undef;
3158 break;
3159 case ARM_CPU_MODE_HYP:
3161 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
3162 * (and so we can forbid accesses from EL2 or below). elr_hyp
3163 * can be accessed also from Hyp mode, so forbid accesses from
3164 * EL0 or EL1.
3166 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
3167 (s->current_el < 3 && *regno != 17)) {
3168 goto undef;
3170 break;
3171 default:
3172 break;
3175 return true;
3177 undef:
3178 /* If we get here then some access check did not pass */
3179 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
3180 syn_uncategorized(), exc_target);
3181 return false;
3184 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
3186 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3187 int tgtmode = 0, regno = 0;
3189 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3190 return;
3193 /* Sync state because msr_banked() can raise exceptions */
3194 gen_set_condexec(s);
3195 gen_set_pc_im(s, s->pc_curr);
3196 tcg_reg = load_reg(s, rn);
3197 tcg_tgtmode = tcg_const_i32(tgtmode);
3198 tcg_regno = tcg_const_i32(regno);
3199 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
3200 tcg_temp_free_i32(tcg_tgtmode);
3201 tcg_temp_free_i32(tcg_regno);
3202 tcg_temp_free_i32(tcg_reg);
3203 s->base.is_jmp = DISAS_UPDATE;
3206 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
3208 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3209 int tgtmode = 0, regno = 0;
3211 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3212 return;
3215 /* Sync state because mrs_banked() can raise exceptions */
3216 gen_set_condexec(s);
3217 gen_set_pc_im(s, s->pc_curr);
3218 tcg_reg = tcg_temp_new_i32();
3219 tcg_tgtmode = tcg_const_i32(tgtmode);
3220 tcg_regno = tcg_const_i32(regno);
3221 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
3222 tcg_temp_free_i32(tcg_tgtmode);
3223 tcg_temp_free_i32(tcg_regno);
3224 store_reg(s, rn, tcg_reg);
3225 s->base.is_jmp = DISAS_UPDATE;
3228 /* Store value to PC as for an exception return (ie don't
3229 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
3230 * will do the masking based on the new value of the Thumb bit.
3232 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
3234 tcg_gen_mov_i32(cpu_R[15], pc);
3235 tcg_temp_free_i32(pc);
3238 /* Generate a v6 exception return. Marks both values as dead. */
3239 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
3241 store_pc_exc_ret(s, pc);
3242 /* The cpsr_write_eret helper will mask the low bits of PC
3243 * appropriately depending on the new Thumb bit, so it must
3244 * be called after storing the new PC.
3246 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3247 gen_io_start();
3249 gen_helper_cpsr_write_eret(cpu_env, cpsr);
3250 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3251 gen_io_end();
3253 tcg_temp_free_i32(cpsr);
3254 /* Must exit loop to check un-masked IRQs */
3255 s->base.is_jmp = DISAS_EXIT;
3258 /* Generate an old-style exception return. Marks pc as dead. */
3259 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3261 gen_rfe(s, pc, load_cpu_field(spsr));
3265 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
3266 * only call the helper when running single threaded TCG code to ensure
3267 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
3268 * just skip this instruction. Currently the SEV/SEVL instructions
3269 * which are *one* of many ways to wake the CPU from WFE are not
3270 * implemented so we can't sleep like WFI does.
3272 static void gen_nop_hint(DisasContext *s, int val)
3274 switch (val) {
3275 /* When running in MTTCG we don't generate jumps to the yield and
3276 * WFE helpers as it won't affect the scheduling of other vCPUs.
3277 * If we wanted to more completely model WFE/SEV so we don't busy
3278 * spin unnecessarily we would need to do something more involved.
3280 case 1: /* yield */
3281 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3282 gen_set_pc_im(s, s->base.pc_next);
3283 s->base.is_jmp = DISAS_YIELD;
3285 break;
3286 case 3: /* wfi */
3287 gen_set_pc_im(s, s->base.pc_next);
3288 s->base.is_jmp = DISAS_WFI;
3289 break;
3290 case 2: /* wfe */
3291 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3292 gen_set_pc_im(s, s->base.pc_next);
3293 s->base.is_jmp = DISAS_WFE;
3295 break;
3296 case 4: /* sev */
3297 case 5: /* sevl */
3298 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
3299 default: /* nop */
3300 break;
3304 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3306 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
3308 switch (size) {
3309 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3310 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3311 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3312 default: abort();
3316 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
3318 switch (size) {
3319 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3320 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3321 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3322 default: return;
3326 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3327 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3328 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3329 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3330 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
3332 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3333 switch ((size << 1) | u) { \
3334 case 0: \
3335 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3336 break; \
3337 case 1: \
3338 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3339 break; \
3340 case 2: \
3341 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3342 break; \
3343 case 3: \
3344 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3345 break; \
3346 case 4: \
3347 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3348 break; \
3349 case 5: \
3350 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3351 break; \
3352 default: return 1; \
3353 }} while (0)
3355 #define GEN_NEON_INTEGER_OP(name) do { \
3356 switch ((size << 1) | u) { \
3357 case 0: \
3358 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3359 break; \
3360 case 1: \
3361 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3362 break; \
3363 case 2: \
3364 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3365 break; \
3366 case 3: \
3367 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3368 break; \
3369 case 4: \
3370 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3371 break; \
3372 case 5: \
3373 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3374 break; \
3375 default: return 1; \
3376 }} while (0)
3378 static TCGv_i32 neon_load_scratch(int scratch)
3380 TCGv_i32 tmp = tcg_temp_new_i32();
3381 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3382 return tmp;
3385 static void neon_store_scratch(int scratch, TCGv_i32 var)
3387 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3388 tcg_temp_free_i32(var);
3391 static inline TCGv_i32 neon_get_scalar(int size, int reg)
3393 TCGv_i32 tmp;
3394 if (size == 1) {
3395 tmp = neon_load_reg(reg & 7, reg >> 4);
3396 if (reg & 8) {
3397 gen_neon_dup_high16(tmp);
3398 } else {
3399 gen_neon_dup_low16(tmp);
3401 } else {
3402 tmp = neon_load_reg(reg & 15, reg >> 4);
3404 return tmp;
3407 static int gen_neon_unzip(int rd, int rm, int size, int q)
3409 TCGv_ptr pd, pm;
3411 if (!q && size == 2) {
3412 return 1;
3414 pd = vfp_reg_ptr(true, rd);
3415 pm = vfp_reg_ptr(true, rm);
3416 if (q) {
3417 switch (size) {
3418 case 0:
3419 gen_helper_neon_qunzip8(pd, pm);
3420 break;
3421 case 1:
3422 gen_helper_neon_qunzip16(pd, pm);
3423 break;
3424 case 2:
3425 gen_helper_neon_qunzip32(pd, pm);
3426 break;
3427 default:
3428 abort();
3430 } else {
3431 switch (size) {
3432 case 0:
3433 gen_helper_neon_unzip8(pd, pm);
3434 break;
3435 case 1:
3436 gen_helper_neon_unzip16(pd, pm);
3437 break;
3438 default:
3439 abort();
3442 tcg_temp_free_ptr(pd);
3443 tcg_temp_free_ptr(pm);
3444 return 0;
3447 static int gen_neon_zip(int rd, int rm, int size, int q)
3449 TCGv_ptr pd, pm;
3451 if (!q && size == 2) {
3452 return 1;
3454 pd = vfp_reg_ptr(true, rd);
3455 pm = vfp_reg_ptr(true, rm);
3456 if (q) {
3457 switch (size) {
3458 case 0:
3459 gen_helper_neon_qzip8(pd, pm);
3460 break;
3461 case 1:
3462 gen_helper_neon_qzip16(pd, pm);
3463 break;
3464 case 2:
3465 gen_helper_neon_qzip32(pd, pm);
3466 break;
3467 default:
3468 abort();
3470 } else {
3471 switch (size) {
3472 case 0:
3473 gen_helper_neon_zip8(pd, pm);
3474 break;
3475 case 1:
3476 gen_helper_neon_zip16(pd, pm);
3477 break;
3478 default:
3479 abort();
3482 tcg_temp_free_ptr(pd);
3483 tcg_temp_free_ptr(pm);
3484 return 0;
3487 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
3489 TCGv_i32 rd, tmp;
3491 rd = tcg_temp_new_i32();
3492 tmp = tcg_temp_new_i32();
3494 tcg_gen_shli_i32(rd, t0, 8);
3495 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3496 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3497 tcg_gen_or_i32(rd, rd, tmp);
3499 tcg_gen_shri_i32(t1, t1, 8);
3500 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3501 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3502 tcg_gen_or_i32(t1, t1, tmp);
3503 tcg_gen_mov_i32(t0, rd);
3505 tcg_temp_free_i32(tmp);
3506 tcg_temp_free_i32(rd);
3509 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
3511 TCGv_i32 rd, tmp;
3513 rd = tcg_temp_new_i32();
3514 tmp = tcg_temp_new_i32();
3516 tcg_gen_shli_i32(rd, t0, 16);
3517 tcg_gen_andi_i32(tmp, t1, 0xffff);
3518 tcg_gen_or_i32(rd, rd, tmp);
3519 tcg_gen_shri_i32(t1, t1, 16);
3520 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3521 tcg_gen_or_i32(t1, t1, tmp);
3522 tcg_gen_mov_i32(t0, rd);
3524 tcg_temp_free_i32(tmp);
3525 tcg_temp_free_i32(rd);
3529 static struct {
3530 int nregs;
3531 int interleave;
3532 int spacing;
3533 } const neon_ls_element_type[11] = {
3534 {1, 4, 1},
3535 {1, 4, 2},
3536 {4, 1, 1},
3537 {2, 2, 2},
3538 {1, 3, 1},
3539 {1, 3, 2},
3540 {3, 1, 1},
3541 {1, 1, 1},
3542 {1, 2, 1},
3543 {1, 2, 2},
3544 {2, 1, 1}
3547 /* Translate a NEON load/store element instruction. Return nonzero if the
3548 instruction is invalid. */
3549 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
3551 int rd, rn, rm;
3552 int op;
3553 int nregs;
3554 int interleave;
3555 int spacing;
3556 int stride;
3557 int size;
3558 int reg;
3559 int load;
3560 int n;
3561 int vec_size;
3562 int mmu_idx;
3563 TCGMemOp endian;
3564 TCGv_i32 addr;
3565 TCGv_i32 tmp;
3566 TCGv_i32 tmp2;
3567 TCGv_i64 tmp64;
3569 /* FIXME: this access check should not take precedence over UNDEF
3570 * for invalid encodings; we will generate incorrect syndrome information
3571 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3573 if (s->fp_excp_el) {
3574 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
3575 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
3576 return 0;
3579 if (!s->vfp_enabled)
3580 return 1;
3581 VFP_DREG_D(rd, insn);
3582 rn = (insn >> 16) & 0xf;
3583 rm = insn & 0xf;
3584 load = (insn & (1 << 21)) != 0;
3585 endian = s->be_data;
3586 mmu_idx = get_mem_index(s);
3587 if ((insn & (1 << 23)) == 0) {
3588 /* Load store all elements. */
3589 op = (insn >> 8) & 0xf;
3590 size = (insn >> 6) & 3;
3591 if (op > 10)
3592 return 1;
3593 /* Catch UNDEF cases for bad values of align field */
3594 switch (op & 0xc) {
3595 case 4:
3596 if (((insn >> 5) & 1) == 1) {
3597 return 1;
3599 break;
3600 case 8:
3601 if (((insn >> 4) & 3) == 3) {
3602 return 1;
3604 break;
3605 default:
3606 break;
3608 nregs = neon_ls_element_type[op].nregs;
3609 interleave = neon_ls_element_type[op].interleave;
3610 spacing = neon_ls_element_type[op].spacing;
3611 if (size == 3 && (interleave | spacing) != 1) {
3612 return 1;
3614 /* For our purposes, bytes are always little-endian. */
3615 if (size == 0) {
3616 endian = MO_LE;
3618 /* Consecutive little-endian elements from a single register
3619 * can be promoted to a larger little-endian operation.
3621 if (interleave == 1 && endian == MO_LE) {
3622 size = 3;
3624 tmp64 = tcg_temp_new_i64();
3625 addr = tcg_temp_new_i32();
3626 tmp2 = tcg_const_i32(1 << size);
3627 load_reg_var(s, addr, rn);
3628 for (reg = 0; reg < nregs; reg++) {
3629 for (n = 0; n < 8 >> size; n++) {
3630 int xs;
3631 for (xs = 0; xs < interleave; xs++) {
3632 int tt = rd + reg + spacing * xs;
3634 if (load) {
3635 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
3636 neon_store_element64(tt, n, size, tmp64);
3637 } else {
3638 neon_load_element64(tmp64, tt, n, size);
3639 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
3641 tcg_gen_add_i32(addr, addr, tmp2);
3645 tcg_temp_free_i32(addr);
3646 tcg_temp_free_i32(tmp2);
3647 tcg_temp_free_i64(tmp64);
3648 stride = nregs * interleave * 8;
3649 } else {
3650 size = (insn >> 10) & 3;
3651 if (size == 3) {
3652 /* Load single element to all lanes. */
3653 int a = (insn >> 4) & 1;
3654 if (!load) {
3655 return 1;
3657 size = (insn >> 6) & 3;
3658 nregs = ((insn >> 8) & 3) + 1;
3660 if (size == 3) {
3661 if (nregs != 4 || a == 0) {
3662 return 1;
3664 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3665 size = 2;
3667 if (nregs == 1 && a == 1 && size == 0) {
3668 return 1;
3670 if (nregs == 3 && a == 1) {
3671 return 1;
3673 addr = tcg_temp_new_i32();
3674 load_reg_var(s, addr, rn);
3676 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
3677 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
3679 stride = (insn & (1 << 5)) ? 2 : 1;
3680 vec_size = nregs == 1 ? stride * 8 : 8;
3682 tmp = tcg_temp_new_i32();
3683 for (reg = 0; reg < nregs; reg++) {
3684 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3685 s->be_data | size);
3686 if ((rd & 1) && vec_size == 16) {
3687 /* We cannot write 16 bytes at once because the
3688 * destination is unaligned.
3690 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3691 8, 8, tmp);
3692 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
3693 neon_reg_offset(rd, 0), 8, 8);
3694 } else {
3695 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3696 vec_size, vec_size, tmp);
3698 tcg_gen_addi_i32(addr, addr, 1 << size);
3699 rd += stride;
3701 tcg_temp_free_i32(tmp);
3702 tcg_temp_free_i32(addr);
3703 stride = (1 << size) * nregs;
3704 } else {
3705 /* Single element. */
3706 int idx = (insn >> 4) & 0xf;
3707 int reg_idx;
3708 switch (size) {
3709 case 0:
3710 reg_idx = (insn >> 5) & 7;
3711 stride = 1;
3712 break;
3713 case 1:
3714 reg_idx = (insn >> 6) & 3;
3715 stride = (insn & (1 << 5)) ? 2 : 1;
3716 break;
3717 case 2:
3718 reg_idx = (insn >> 7) & 1;
3719 stride = (insn & (1 << 6)) ? 2 : 1;
3720 break;
3721 default:
3722 abort();
3724 nregs = ((insn >> 8) & 3) + 1;
3725 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3726 switch (nregs) {
3727 case 1:
3728 if (((idx & (1 << size)) != 0) ||
3729 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3730 return 1;
3732 break;
3733 case 3:
3734 if ((idx & 1) != 0) {
3735 return 1;
3737 /* fall through */
3738 case 2:
3739 if (size == 2 && (idx & 2) != 0) {
3740 return 1;
3742 break;
3743 case 4:
3744 if ((size == 2) && ((idx & 3) == 3)) {
3745 return 1;
3747 break;
3748 default:
3749 abort();
3751 if ((rd + stride * (nregs - 1)) > 31) {
3752 /* Attempts to write off the end of the register file
3753 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3754 * the neon_load_reg() would write off the end of the array.
3756 return 1;
3758 tmp = tcg_temp_new_i32();
3759 addr = tcg_temp_new_i32();
3760 load_reg_var(s, addr, rn);
3761 for (reg = 0; reg < nregs; reg++) {
3762 if (load) {
3763 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3764 s->be_data | size);
3765 neon_store_element(rd, reg_idx, size, tmp);
3766 } else { /* Store */
3767 neon_load_element(tmp, rd, reg_idx, size);
3768 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
3769 s->be_data | size);
3771 rd += stride;
3772 tcg_gen_addi_i32(addr, addr, 1 << size);
3774 tcg_temp_free_i32(addr);
3775 tcg_temp_free_i32(tmp);
3776 stride = nregs * (1 << size);
3779 if (rm != 15) {
3780 TCGv_i32 base;
3782 base = load_reg(s, rn);
3783 if (rm == 13) {
3784 tcg_gen_addi_i32(base, base, stride);
3785 } else {
3786 TCGv_i32 index;
3787 index = load_reg(s, rm);
3788 tcg_gen_add_i32(base, base, index);
3789 tcg_temp_free_i32(index);
3791 store_reg(s, rn, base);
3793 return 0;
3796 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
3798 switch (size) {
3799 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3800 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3801 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
3802 default: abort();
3806 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
3808 switch (size) {
3809 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3810 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3811 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3812 default: abort();
3816 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
3818 switch (size) {
3819 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3820 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3821 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3822 default: abort();
3826 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
3828 switch (size) {
3829 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
3830 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
3831 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
3832 default: abort();
3836 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
3837 int q, int u)
3839 if (q) {
3840 if (u) {
3841 switch (size) {
3842 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3843 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3844 default: abort();
3846 } else {
3847 switch (size) {
3848 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3849 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3850 default: abort();
3853 } else {
3854 if (u) {
3855 switch (size) {
3856 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
3857 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
3858 default: abort();
3860 } else {
3861 switch (size) {
3862 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3863 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3864 default: abort();
3870 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
3872 if (u) {
3873 switch (size) {
3874 case 0: gen_helper_neon_widen_u8(dest, src); break;
3875 case 1: gen_helper_neon_widen_u16(dest, src); break;
3876 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3877 default: abort();
3879 } else {
3880 switch (size) {
3881 case 0: gen_helper_neon_widen_s8(dest, src); break;
3882 case 1: gen_helper_neon_widen_s16(dest, src); break;
3883 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3884 default: abort();
3887 tcg_temp_free_i32(src);
3890 static inline void gen_neon_addl(int size)
3892 switch (size) {
3893 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3894 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3895 case 2: tcg_gen_add_i64(CPU_V001); break;
3896 default: abort();
3900 static inline void gen_neon_subl(int size)
3902 switch (size) {
3903 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3904 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3905 case 2: tcg_gen_sub_i64(CPU_V001); break;
3906 default: abort();
3910 static inline void gen_neon_negl(TCGv_i64 var, int size)
3912 switch (size) {
3913 case 0: gen_helper_neon_negl_u16(var, var); break;
3914 case 1: gen_helper_neon_negl_u32(var, var); break;
3915 case 2:
3916 tcg_gen_neg_i64(var, var);
3917 break;
3918 default: abort();
3922 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
3924 switch (size) {
3925 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
3926 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
3927 default: abort();
3931 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
3932 int size, int u)
3934 TCGv_i64 tmp;
3936 switch ((size << 1) | u) {
3937 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
3938 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
3939 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
3940 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
3941 case 4:
3942 tmp = gen_muls_i64_i32(a, b);
3943 tcg_gen_mov_i64(dest, tmp);
3944 tcg_temp_free_i64(tmp);
3945 break;
3946 case 5:
3947 tmp = gen_mulu_i64_i32(a, b);
3948 tcg_gen_mov_i64(dest, tmp);
3949 tcg_temp_free_i64(tmp);
3950 break;
3951 default: abort();
3954 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
3955 Don't forget to clean them now. */
3956 if (size < 2) {
3957 tcg_temp_free_i32(a);
3958 tcg_temp_free_i32(b);
3962 static void gen_neon_narrow_op(int op, int u, int size,
3963 TCGv_i32 dest, TCGv_i64 src)
3965 if (op) {
3966 if (u) {
3967 gen_neon_unarrow_sats(size, dest, src);
3968 } else {
3969 gen_neon_narrow(size, dest, src);
3971 } else {
3972 if (u) {
3973 gen_neon_narrow_satu(size, dest, src);
3974 } else {
3975 gen_neon_narrow_sats(size, dest, src);
3980 /* Symbolic constants for op fields for Neon 3-register same-length.
3981 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
3982 * table A7-9.
3984 #define NEON_3R_VHADD 0
3985 #define NEON_3R_VQADD 1
3986 #define NEON_3R_VRHADD 2
3987 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
3988 #define NEON_3R_VHSUB 4
3989 #define NEON_3R_VQSUB 5
3990 #define NEON_3R_VCGT 6
3991 #define NEON_3R_VCGE 7
3992 #define NEON_3R_VSHL 8
3993 #define NEON_3R_VQSHL 9
3994 #define NEON_3R_VRSHL 10
3995 #define NEON_3R_VQRSHL 11
3996 #define NEON_3R_VMAX 12
3997 #define NEON_3R_VMIN 13
3998 #define NEON_3R_VABD 14
3999 #define NEON_3R_VABA 15
4000 #define NEON_3R_VADD_VSUB 16
4001 #define NEON_3R_VTST_VCEQ 17
4002 #define NEON_3R_VML 18 /* VMLA, VMLS */
4003 #define NEON_3R_VMUL 19
4004 #define NEON_3R_VPMAX 20
4005 #define NEON_3R_VPMIN 21
4006 #define NEON_3R_VQDMULH_VQRDMULH 22
4007 #define NEON_3R_VPADD_VQRDMLAH 23
4008 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4009 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
4010 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4011 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4012 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4013 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4014 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4015 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4017 static const uint8_t neon_3r_sizes[] = {
4018 [NEON_3R_VHADD] = 0x7,
4019 [NEON_3R_VQADD] = 0xf,
4020 [NEON_3R_VRHADD] = 0x7,
4021 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4022 [NEON_3R_VHSUB] = 0x7,
4023 [NEON_3R_VQSUB] = 0xf,
4024 [NEON_3R_VCGT] = 0x7,
4025 [NEON_3R_VCGE] = 0x7,
4026 [NEON_3R_VSHL] = 0xf,
4027 [NEON_3R_VQSHL] = 0xf,
4028 [NEON_3R_VRSHL] = 0xf,
4029 [NEON_3R_VQRSHL] = 0xf,
4030 [NEON_3R_VMAX] = 0x7,
4031 [NEON_3R_VMIN] = 0x7,
4032 [NEON_3R_VABD] = 0x7,
4033 [NEON_3R_VABA] = 0x7,
4034 [NEON_3R_VADD_VSUB] = 0xf,
4035 [NEON_3R_VTST_VCEQ] = 0x7,
4036 [NEON_3R_VML] = 0x7,
4037 [NEON_3R_VMUL] = 0x7,
4038 [NEON_3R_VPMAX] = 0x7,
4039 [NEON_3R_VPMIN] = 0x7,
4040 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4041 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
4042 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
4043 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
4044 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4045 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4046 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4047 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4048 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4049 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
4052 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4053 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4054 * table A7-13.
4056 #define NEON_2RM_VREV64 0
4057 #define NEON_2RM_VREV32 1
4058 #define NEON_2RM_VREV16 2
4059 #define NEON_2RM_VPADDL 4
4060 #define NEON_2RM_VPADDL_U 5
4061 #define NEON_2RM_AESE 6 /* Includes AESD */
4062 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
4063 #define NEON_2RM_VCLS 8
4064 #define NEON_2RM_VCLZ 9
4065 #define NEON_2RM_VCNT 10
4066 #define NEON_2RM_VMVN 11
4067 #define NEON_2RM_VPADAL 12
4068 #define NEON_2RM_VPADAL_U 13
4069 #define NEON_2RM_VQABS 14
4070 #define NEON_2RM_VQNEG 15
4071 #define NEON_2RM_VCGT0 16
4072 #define NEON_2RM_VCGE0 17
4073 #define NEON_2RM_VCEQ0 18
4074 #define NEON_2RM_VCLE0 19
4075 #define NEON_2RM_VCLT0 20
4076 #define NEON_2RM_SHA1H 21
4077 #define NEON_2RM_VABS 22
4078 #define NEON_2RM_VNEG 23
4079 #define NEON_2RM_VCGT0_F 24
4080 #define NEON_2RM_VCGE0_F 25
4081 #define NEON_2RM_VCEQ0_F 26
4082 #define NEON_2RM_VCLE0_F 27
4083 #define NEON_2RM_VCLT0_F 28
4084 #define NEON_2RM_VABS_F 30
4085 #define NEON_2RM_VNEG_F 31
4086 #define NEON_2RM_VSWP 32
4087 #define NEON_2RM_VTRN 33
4088 #define NEON_2RM_VUZP 34
4089 #define NEON_2RM_VZIP 35
4090 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4091 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4092 #define NEON_2RM_VSHLL 38
4093 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
4094 #define NEON_2RM_VRINTN 40
4095 #define NEON_2RM_VRINTX 41
4096 #define NEON_2RM_VRINTA 42
4097 #define NEON_2RM_VRINTZ 43
4098 #define NEON_2RM_VCVT_F16_F32 44
4099 #define NEON_2RM_VRINTM 45
4100 #define NEON_2RM_VCVT_F32_F16 46
4101 #define NEON_2RM_VRINTP 47
4102 #define NEON_2RM_VCVTAU 48
4103 #define NEON_2RM_VCVTAS 49
4104 #define NEON_2RM_VCVTNU 50
4105 #define NEON_2RM_VCVTNS 51
4106 #define NEON_2RM_VCVTPU 52
4107 #define NEON_2RM_VCVTPS 53
4108 #define NEON_2RM_VCVTMU 54
4109 #define NEON_2RM_VCVTMS 55
4110 #define NEON_2RM_VRECPE 56
4111 #define NEON_2RM_VRSQRTE 57
4112 #define NEON_2RM_VRECPE_F 58
4113 #define NEON_2RM_VRSQRTE_F 59
4114 #define NEON_2RM_VCVT_FS 60
4115 #define NEON_2RM_VCVT_FU 61
4116 #define NEON_2RM_VCVT_SF 62
4117 #define NEON_2RM_VCVT_UF 63
4119 static bool neon_2rm_is_v8_op(int op)
4121 /* Return true if this neon 2reg-misc op is ARMv8 and up */
4122 switch (op) {
4123 case NEON_2RM_VRINTN:
4124 case NEON_2RM_VRINTA:
4125 case NEON_2RM_VRINTM:
4126 case NEON_2RM_VRINTP:
4127 case NEON_2RM_VRINTZ:
4128 case NEON_2RM_VRINTX:
4129 case NEON_2RM_VCVTAU:
4130 case NEON_2RM_VCVTAS:
4131 case NEON_2RM_VCVTNU:
4132 case NEON_2RM_VCVTNS:
4133 case NEON_2RM_VCVTPU:
4134 case NEON_2RM_VCVTPS:
4135 case NEON_2RM_VCVTMU:
4136 case NEON_2RM_VCVTMS:
4137 return true;
4138 default:
4139 return false;
4143 /* Each entry in this array has bit n set if the insn allows
4144 * size value n (otherwise it will UNDEF). Since unallocated
4145 * op values will have no bits set they always UNDEF.
4147 static const uint8_t neon_2rm_sizes[] = {
4148 [NEON_2RM_VREV64] = 0x7,
4149 [NEON_2RM_VREV32] = 0x3,
4150 [NEON_2RM_VREV16] = 0x1,
4151 [NEON_2RM_VPADDL] = 0x7,
4152 [NEON_2RM_VPADDL_U] = 0x7,
4153 [NEON_2RM_AESE] = 0x1,
4154 [NEON_2RM_AESMC] = 0x1,
4155 [NEON_2RM_VCLS] = 0x7,
4156 [NEON_2RM_VCLZ] = 0x7,
4157 [NEON_2RM_VCNT] = 0x1,
4158 [NEON_2RM_VMVN] = 0x1,
4159 [NEON_2RM_VPADAL] = 0x7,
4160 [NEON_2RM_VPADAL_U] = 0x7,
4161 [NEON_2RM_VQABS] = 0x7,
4162 [NEON_2RM_VQNEG] = 0x7,
4163 [NEON_2RM_VCGT0] = 0x7,
4164 [NEON_2RM_VCGE0] = 0x7,
4165 [NEON_2RM_VCEQ0] = 0x7,
4166 [NEON_2RM_VCLE0] = 0x7,
4167 [NEON_2RM_VCLT0] = 0x7,
4168 [NEON_2RM_SHA1H] = 0x4,
4169 [NEON_2RM_VABS] = 0x7,
4170 [NEON_2RM_VNEG] = 0x7,
4171 [NEON_2RM_VCGT0_F] = 0x4,
4172 [NEON_2RM_VCGE0_F] = 0x4,
4173 [NEON_2RM_VCEQ0_F] = 0x4,
4174 [NEON_2RM_VCLE0_F] = 0x4,
4175 [NEON_2RM_VCLT0_F] = 0x4,
4176 [NEON_2RM_VABS_F] = 0x4,
4177 [NEON_2RM_VNEG_F] = 0x4,
4178 [NEON_2RM_VSWP] = 0x1,
4179 [NEON_2RM_VTRN] = 0x7,
4180 [NEON_2RM_VUZP] = 0x7,
4181 [NEON_2RM_VZIP] = 0x7,
4182 [NEON_2RM_VMOVN] = 0x7,
4183 [NEON_2RM_VQMOVN] = 0x7,
4184 [NEON_2RM_VSHLL] = 0x7,
4185 [NEON_2RM_SHA1SU1] = 0x4,
4186 [NEON_2RM_VRINTN] = 0x4,
4187 [NEON_2RM_VRINTX] = 0x4,
4188 [NEON_2RM_VRINTA] = 0x4,
4189 [NEON_2RM_VRINTZ] = 0x4,
4190 [NEON_2RM_VCVT_F16_F32] = 0x2,
4191 [NEON_2RM_VRINTM] = 0x4,
4192 [NEON_2RM_VCVT_F32_F16] = 0x2,
4193 [NEON_2RM_VRINTP] = 0x4,
4194 [NEON_2RM_VCVTAU] = 0x4,
4195 [NEON_2RM_VCVTAS] = 0x4,
4196 [NEON_2RM_VCVTNU] = 0x4,
4197 [NEON_2RM_VCVTNS] = 0x4,
4198 [NEON_2RM_VCVTPU] = 0x4,
4199 [NEON_2RM_VCVTPS] = 0x4,
4200 [NEON_2RM_VCVTMU] = 0x4,
4201 [NEON_2RM_VCVTMS] = 0x4,
4202 [NEON_2RM_VRECPE] = 0x4,
4203 [NEON_2RM_VRSQRTE] = 0x4,
4204 [NEON_2RM_VRECPE_F] = 0x4,
4205 [NEON_2RM_VRSQRTE_F] = 0x4,
4206 [NEON_2RM_VCVT_FS] = 0x4,
4207 [NEON_2RM_VCVT_FU] = 0x4,
4208 [NEON_2RM_VCVT_SF] = 0x4,
4209 [NEON_2RM_VCVT_UF] = 0x4,
4213 /* Expand v8.1 simd helper. */
4214 static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
4215 int q, int rd, int rn, int rm)
4217 if (dc_isar_feature(aa32_rdm, s)) {
4218 int opr_sz = (1 + q) * 8;
4219 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
4220 vfp_reg_offset(1, rn),
4221 vfp_reg_offset(1, rm), cpu_env,
4222 opr_sz, opr_sz, 0, fn);
4223 return 0;
4225 return 1;
4228 static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4230 tcg_gen_vec_sar8i_i64(a, a, shift);
4231 tcg_gen_vec_add8_i64(d, d, a);
4234 static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4236 tcg_gen_vec_sar16i_i64(a, a, shift);
4237 tcg_gen_vec_add16_i64(d, d, a);
4240 static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4242 tcg_gen_sari_i32(a, a, shift);
4243 tcg_gen_add_i32(d, d, a);
4246 static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4248 tcg_gen_sari_i64(a, a, shift);
4249 tcg_gen_add_i64(d, d, a);
4252 static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4254 tcg_gen_sari_vec(vece, a, a, sh);
4255 tcg_gen_add_vec(vece, d, d, a);
4258 static const TCGOpcode vecop_list_ssra[] = {
4259 INDEX_op_sari_vec, INDEX_op_add_vec, 0
4262 const GVecGen2i ssra_op[4] = {
4263 { .fni8 = gen_ssra8_i64,
4264 .fniv = gen_ssra_vec,
4265 .load_dest = true,
4266 .opt_opc = vecop_list_ssra,
4267 .vece = MO_8 },
4268 { .fni8 = gen_ssra16_i64,
4269 .fniv = gen_ssra_vec,
4270 .load_dest = true,
4271 .opt_opc = vecop_list_ssra,
4272 .vece = MO_16 },
4273 { .fni4 = gen_ssra32_i32,
4274 .fniv = gen_ssra_vec,
4275 .load_dest = true,
4276 .opt_opc = vecop_list_ssra,
4277 .vece = MO_32 },
4278 { .fni8 = gen_ssra64_i64,
4279 .fniv = gen_ssra_vec,
4280 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4281 .opt_opc = vecop_list_ssra,
4282 .load_dest = true,
4283 .vece = MO_64 },
4286 static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4288 tcg_gen_vec_shr8i_i64(a, a, shift);
4289 tcg_gen_vec_add8_i64(d, d, a);
4292 static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4294 tcg_gen_vec_shr16i_i64(a, a, shift);
4295 tcg_gen_vec_add16_i64(d, d, a);
4298 static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4300 tcg_gen_shri_i32(a, a, shift);
4301 tcg_gen_add_i32(d, d, a);
4304 static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4306 tcg_gen_shri_i64(a, a, shift);
4307 tcg_gen_add_i64(d, d, a);
4310 static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4312 tcg_gen_shri_vec(vece, a, a, sh);
4313 tcg_gen_add_vec(vece, d, d, a);
4316 static const TCGOpcode vecop_list_usra[] = {
4317 INDEX_op_shri_vec, INDEX_op_add_vec, 0
4320 const GVecGen2i usra_op[4] = {
4321 { .fni8 = gen_usra8_i64,
4322 .fniv = gen_usra_vec,
4323 .load_dest = true,
4324 .opt_opc = vecop_list_usra,
4325 .vece = MO_8, },
4326 { .fni8 = gen_usra16_i64,
4327 .fniv = gen_usra_vec,
4328 .load_dest = true,
4329 .opt_opc = vecop_list_usra,
4330 .vece = MO_16, },
4331 { .fni4 = gen_usra32_i32,
4332 .fniv = gen_usra_vec,
4333 .load_dest = true,
4334 .opt_opc = vecop_list_usra,
4335 .vece = MO_32, },
4336 { .fni8 = gen_usra64_i64,
4337 .fniv = gen_usra_vec,
4338 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4339 .load_dest = true,
4340 .opt_opc = vecop_list_usra,
4341 .vece = MO_64, },
4344 static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4346 uint64_t mask = dup_const(MO_8, 0xff >> shift);
4347 TCGv_i64 t = tcg_temp_new_i64();
4349 tcg_gen_shri_i64(t, a, shift);
4350 tcg_gen_andi_i64(t, t, mask);
4351 tcg_gen_andi_i64(d, d, ~mask);
4352 tcg_gen_or_i64(d, d, t);
4353 tcg_temp_free_i64(t);
4356 static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4358 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
4359 TCGv_i64 t = tcg_temp_new_i64();
4361 tcg_gen_shri_i64(t, a, shift);
4362 tcg_gen_andi_i64(t, t, mask);
4363 tcg_gen_andi_i64(d, d, ~mask);
4364 tcg_gen_or_i64(d, d, t);
4365 tcg_temp_free_i64(t);
4368 static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4370 tcg_gen_shri_i32(a, a, shift);
4371 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
4374 static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4376 tcg_gen_shri_i64(a, a, shift);
4377 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
4380 static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4382 if (sh == 0) {
4383 tcg_gen_mov_vec(d, a);
4384 } else {
4385 TCGv_vec t = tcg_temp_new_vec_matching(d);
4386 TCGv_vec m = tcg_temp_new_vec_matching(d);
4388 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
4389 tcg_gen_shri_vec(vece, t, a, sh);
4390 tcg_gen_and_vec(vece, d, d, m);
4391 tcg_gen_or_vec(vece, d, d, t);
4393 tcg_temp_free_vec(t);
4394 tcg_temp_free_vec(m);
4398 static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
4400 const GVecGen2i sri_op[4] = {
4401 { .fni8 = gen_shr8_ins_i64,
4402 .fniv = gen_shr_ins_vec,
4403 .load_dest = true,
4404 .opt_opc = vecop_list_sri,
4405 .vece = MO_8 },
4406 { .fni8 = gen_shr16_ins_i64,
4407 .fniv = gen_shr_ins_vec,
4408 .load_dest = true,
4409 .opt_opc = vecop_list_sri,
4410 .vece = MO_16 },
4411 { .fni4 = gen_shr32_ins_i32,
4412 .fniv = gen_shr_ins_vec,
4413 .load_dest = true,
4414 .opt_opc = vecop_list_sri,
4415 .vece = MO_32 },
4416 { .fni8 = gen_shr64_ins_i64,
4417 .fniv = gen_shr_ins_vec,
4418 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4419 .load_dest = true,
4420 .opt_opc = vecop_list_sri,
4421 .vece = MO_64 },
4424 static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4426 uint64_t mask = dup_const(MO_8, 0xff << shift);
4427 TCGv_i64 t = tcg_temp_new_i64();
4429 tcg_gen_shli_i64(t, a, shift);
4430 tcg_gen_andi_i64(t, t, mask);
4431 tcg_gen_andi_i64(d, d, ~mask);
4432 tcg_gen_or_i64(d, d, t);
4433 tcg_temp_free_i64(t);
4436 static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4438 uint64_t mask = dup_const(MO_16, 0xffff << shift);
4439 TCGv_i64 t = tcg_temp_new_i64();
4441 tcg_gen_shli_i64(t, a, shift);
4442 tcg_gen_andi_i64(t, t, mask);
4443 tcg_gen_andi_i64(d, d, ~mask);
4444 tcg_gen_or_i64(d, d, t);
4445 tcg_temp_free_i64(t);
4448 static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4450 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
4453 static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4455 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
4458 static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4460 if (sh == 0) {
4461 tcg_gen_mov_vec(d, a);
4462 } else {
4463 TCGv_vec t = tcg_temp_new_vec_matching(d);
4464 TCGv_vec m = tcg_temp_new_vec_matching(d);
4466 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
4467 tcg_gen_shli_vec(vece, t, a, sh);
4468 tcg_gen_and_vec(vece, d, d, m);
4469 tcg_gen_or_vec(vece, d, d, t);
4471 tcg_temp_free_vec(t);
4472 tcg_temp_free_vec(m);
4476 static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
4478 const GVecGen2i sli_op[4] = {
4479 { .fni8 = gen_shl8_ins_i64,
4480 .fniv = gen_shl_ins_vec,
4481 .load_dest = true,
4482 .opt_opc = vecop_list_sli,
4483 .vece = MO_8 },
4484 { .fni8 = gen_shl16_ins_i64,
4485 .fniv = gen_shl_ins_vec,
4486 .load_dest = true,
4487 .opt_opc = vecop_list_sli,
4488 .vece = MO_16 },
4489 { .fni4 = gen_shl32_ins_i32,
4490 .fniv = gen_shl_ins_vec,
4491 .load_dest = true,
4492 .opt_opc = vecop_list_sli,
4493 .vece = MO_32 },
4494 { .fni8 = gen_shl64_ins_i64,
4495 .fniv = gen_shl_ins_vec,
4496 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4497 .load_dest = true,
4498 .opt_opc = vecop_list_sli,
4499 .vece = MO_64 },
4502 static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4504 gen_helper_neon_mul_u8(a, a, b);
4505 gen_helper_neon_add_u8(d, d, a);
4508 static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4510 gen_helper_neon_mul_u8(a, a, b);
4511 gen_helper_neon_sub_u8(d, d, a);
4514 static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4516 gen_helper_neon_mul_u16(a, a, b);
4517 gen_helper_neon_add_u16(d, d, a);
4520 static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4522 gen_helper_neon_mul_u16(a, a, b);
4523 gen_helper_neon_sub_u16(d, d, a);
4526 static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4528 tcg_gen_mul_i32(a, a, b);
4529 tcg_gen_add_i32(d, d, a);
4532 static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4534 tcg_gen_mul_i32(a, a, b);
4535 tcg_gen_sub_i32(d, d, a);
4538 static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4540 tcg_gen_mul_i64(a, a, b);
4541 tcg_gen_add_i64(d, d, a);
4544 static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4546 tcg_gen_mul_i64(a, a, b);
4547 tcg_gen_sub_i64(d, d, a);
4550 static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4552 tcg_gen_mul_vec(vece, a, a, b);
4553 tcg_gen_add_vec(vece, d, d, a);
4556 static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4558 tcg_gen_mul_vec(vece, a, a, b);
4559 tcg_gen_sub_vec(vece, d, d, a);
4562 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4563 * these tables are shared with AArch64 which does support them.
4566 static const TCGOpcode vecop_list_mla[] = {
4567 INDEX_op_mul_vec, INDEX_op_add_vec, 0
4570 static const TCGOpcode vecop_list_mls[] = {
4571 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
4574 const GVecGen3 mla_op[4] = {
4575 { .fni4 = gen_mla8_i32,
4576 .fniv = gen_mla_vec,
4577 .load_dest = true,
4578 .opt_opc = vecop_list_mla,
4579 .vece = MO_8 },
4580 { .fni4 = gen_mla16_i32,
4581 .fniv = gen_mla_vec,
4582 .load_dest = true,
4583 .opt_opc = vecop_list_mla,
4584 .vece = MO_16 },
4585 { .fni4 = gen_mla32_i32,
4586 .fniv = gen_mla_vec,
4587 .load_dest = true,
4588 .opt_opc = vecop_list_mla,
4589 .vece = MO_32 },
4590 { .fni8 = gen_mla64_i64,
4591 .fniv = gen_mla_vec,
4592 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4593 .load_dest = true,
4594 .opt_opc = vecop_list_mla,
4595 .vece = MO_64 },
4598 const GVecGen3 mls_op[4] = {
4599 { .fni4 = gen_mls8_i32,
4600 .fniv = gen_mls_vec,
4601 .load_dest = true,
4602 .opt_opc = vecop_list_mls,
4603 .vece = MO_8 },
4604 { .fni4 = gen_mls16_i32,
4605 .fniv = gen_mls_vec,
4606 .load_dest = true,
4607 .opt_opc = vecop_list_mls,
4608 .vece = MO_16 },
4609 { .fni4 = gen_mls32_i32,
4610 .fniv = gen_mls_vec,
4611 .load_dest = true,
4612 .opt_opc = vecop_list_mls,
4613 .vece = MO_32 },
4614 { .fni8 = gen_mls64_i64,
4615 .fniv = gen_mls_vec,
4616 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4617 .load_dest = true,
4618 .opt_opc = vecop_list_mls,
4619 .vece = MO_64 },
4622 /* CMTST : test is "if (X & Y != 0)". */
4623 static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4625 tcg_gen_and_i32(d, a, b);
4626 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
4627 tcg_gen_neg_i32(d, d);
4630 void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4632 tcg_gen_and_i64(d, a, b);
4633 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
4634 tcg_gen_neg_i64(d, d);
4637 static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4639 tcg_gen_and_vec(vece, d, a, b);
4640 tcg_gen_dupi_vec(vece, a, 0);
4641 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
4644 static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
4646 const GVecGen3 cmtst_op[4] = {
4647 { .fni4 = gen_helper_neon_tst_u8,
4648 .fniv = gen_cmtst_vec,
4649 .opt_opc = vecop_list_cmtst,
4650 .vece = MO_8 },
4651 { .fni4 = gen_helper_neon_tst_u16,
4652 .fniv = gen_cmtst_vec,
4653 .opt_opc = vecop_list_cmtst,
4654 .vece = MO_16 },
4655 { .fni4 = gen_cmtst_i32,
4656 .fniv = gen_cmtst_vec,
4657 .opt_opc = vecop_list_cmtst,
4658 .vece = MO_32 },
4659 { .fni8 = gen_cmtst_i64,
4660 .fniv = gen_cmtst_vec,
4661 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4662 .opt_opc = vecop_list_cmtst,
4663 .vece = MO_64 },
4666 static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4667 TCGv_vec a, TCGv_vec b)
4669 TCGv_vec x = tcg_temp_new_vec_matching(t);
4670 tcg_gen_add_vec(vece, x, a, b);
4671 tcg_gen_usadd_vec(vece, t, a, b);
4672 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4673 tcg_gen_or_vec(vece, sat, sat, x);
4674 tcg_temp_free_vec(x);
4677 static const TCGOpcode vecop_list_uqadd[] = {
4678 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4681 const GVecGen4 uqadd_op[4] = {
4682 { .fniv = gen_uqadd_vec,
4683 .fno = gen_helper_gvec_uqadd_b,
4684 .write_aofs = true,
4685 .opt_opc = vecop_list_uqadd,
4686 .vece = MO_8 },
4687 { .fniv = gen_uqadd_vec,
4688 .fno = gen_helper_gvec_uqadd_h,
4689 .write_aofs = true,
4690 .opt_opc = vecop_list_uqadd,
4691 .vece = MO_16 },
4692 { .fniv = gen_uqadd_vec,
4693 .fno = gen_helper_gvec_uqadd_s,
4694 .write_aofs = true,
4695 .opt_opc = vecop_list_uqadd,
4696 .vece = MO_32 },
4697 { .fniv = gen_uqadd_vec,
4698 .fno = gen_helper_gvec_uqadd_d,
4699 .write_aofs = true,
4700 .opt_opc = vecop_list_uqadd,
4701 .vece = MO_64 },
4704 static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4705 TCGv_vec a, TCGv_vec b)
4707 TCGv_vec x = tcg_temp_new_vec_matching(t);
4708 tcg_gen_add_vec(vece, x, a, b);
4709 tcg_gen_ssadd_vec(vece, t, a, b);
4710 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4711 tcg_gen_or_vec(vece, sat, sat, x);
4712 tcg_temp_free_vec(x);
4715 static const TCGOpcode vecop_list_sqadd[] = {
4716 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4719 const GVecGen4 sqadd_op[4] = {
4720 { .fniv = gen_sqadd_vec,
4721 .fno = gen_helper_gvec_sqadd_b,
4722 .opt_opc = vecop_list_sqadd,
4723 .write_aofs = true,
4724 .vece = MO_8 },
4725 { .fniv = gen_sqadd_vec,
4726 .fno = gen_helper_gvec_sqadd_h,
4727 .opt_opc = vecop_list_sqadd,
4728 .write_aofs = true,
4729 .vece = MO_16 },
4730 { .fniv = gen_sqadd_vec,
4731 .fno = gen_helper_gvec_sqadd_s,
4732 .opt_opc = vecop_list_sqadd,
4733 .write_aofs = true,
4734 .vece = MO_32 },
4735 { .fniv = gen_sqadd_vec,
4736 .fno = gen_helper_gvec_sqadd_d,
4737 .opt_opc = vecop_list_sqadd,
4738 .write_aofs = true,
4739 .vece = MO_64 },
4742 static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4743 TCGv_vec a, TCGv_vec b)
4745 TCGv_vec x = tcg_temp_new_vec_matching(t);
4746 tcg_gen_sub_vec(vece, x, a, b);
4747 tcg_gen_ussub_vec(vece, t, a, b);
4748 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4749 tcg_gen_or_vec(vece, sat, sat, x);
4750 tcg_temp_free_vec(x);
4753 static const TCGOpcode vecop_list_uqsub[] = {
4754 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4757 const GVecGen4 uqsub_op[4] = {
4758 { .fniv = gen_uqsub_vec,
4759 .fno = gen_helper_gvec_uqsub_b,
4760 .opt_opc = vecop_list_uqsub,
4761 .write_aofs = true,
4762 .vece = MO_8 },
4763 { .fniv = gen_uqsub_vec,
4764 .fno = gen_helper_gvec_uqsub_h,
4765 .opt_opc = vecop_list_uqsub,
4766 .write_aofs = true,
4767 .vece = MO_16 },
4768 { .fniv = gen_uqsub_vec,
4769 .fno = gen_helper_gvec_uqsub_s,
4770 .opt_opc = vecop_list_uqsub,
4771 .write_aofs = true,
4772 .vece = MO_32 },
4773 { .fniv = gen_uqsub_vec,
4774 .fno = gen_helper_gvec_uqsub_d,
4775 .opt_opc = vecop_list_uqsub,
4776 .write_aofs = true,
4777 .vece = MO_64 },
4780 static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4781 TCGv_vec a, TCGv_vec b)
4783 TCGv_vec x = tcg_temp_new_vec_matching(t);
4784 tcg_gen_sub_vec(vece, x, a, b);
4785 tcg_gen_sssub_vec(vece, t, a, b);
4786 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4787 tcg_gen_or_vec(vece, sat, sat, x);
4788 tcg_temp_free_vec(x);
4791 static const TCGOpcode vecop_list_sqsub[] = {
4792 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4795 const GVecGen4 sqsub_op[4] = {
4796 { .fniv = gen_sqsub_vec,
4797 .fno = gen_helper_gvec_sqsub_b,
4798 .opt_opc = vecop_list_sqsub,
4799 .write_aofs = true,
4800 .vece = MO_8 },
4801 { .fniv = gen_sqsub_vec,
4802 .fno = gen_helper_gvec_sqsub_h,
4803 .opt_opc = vecop_list_sqsub,
4804 .write_aofs = true,
4805 .vece = MO_16 },
4806 { .fniv = gen_sqsub_vec,
4807 .fno = gen_helper_gvec_sqsub_s,
4808 .opt_opc = vecop_list_sqsub,
4809 .write_aofs = true,
4810 .vece = MO_32 },
4811 { .fniv = gen_sqsub_vec,
4812 .fno = gen_helper_gvec_sqsub_d,
4813 .opt_opc = vecop_list_sqsub,
4814 .write_aofs = true,
4815 .vece = MO_64 },
4818 /* Translate a NEON data processing instruction. Return nonzero if the
4819 instruction is invalid.
4820 We process data in a mixture of 32-bit and 64-bit chunks.
4821 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4823 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
4825 int op;
4826 int q;
4827 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
4828 int size;
4829 int shift;
4830 int pass;
4831 int count;
4832 int pairwise;
4833 int u;
4834 int vec_size;
4835 uint32_t imm;
4836 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
4837 TCGv_ptr ptr1, ptr2, ptr3;
4838 TCGv_i64 tmp64;
4840 /* FIXME: this access check should not take precedence over UNDEF
4841 * for invalid encodings; we will generate incorrect syndrome information
4842 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4844 if (s->fp_excp_el) {
4845 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4846 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
4847 return 0;
4850 if (!s->vfp_enabled)
4851 return 1;
4852 q = (insn & (1 << 6)) != 0;
4853 u = (insn >> 24) & 1;
4854 VFP_DREG_D(rd, insn);
4855 VFP_DREG_N(rn, insn);
4856 VFP_DREG_M(rm, insn);
4857 size = (insn >> 20) & 3;
4858 vec_size = q ? 16 : 8;
4859 rd_ofs = neon_reg_offset(rd, 0);
4860 rn_ofs = neon_reg_offset(rn, 0);
4861 rm_ofs = neon_reg_offset(rm, 0);
4863 if ((insn & (1 << 23)) == 0) {
4864 /* Three register same length. */
4865 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4866 /* Catch invalid op and bad size combinations: UNDEF */
4867 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4868 return 1;
4870 /* All insns of this form UNDEF for either this condition or the
4871 * superset of cases "Q==1"; we catch the latter later.
4873 if (q && ((rd | rn | rm) & 1)) {
4874 return 1;
4876 switch (op) {
4877 case NEON_3R_SHA:
4878 /* The SHA-1/SHA-256 3-register instructions require special
4879 * treatment here, as their size field is overloaded as an
4880 * op type selector, and they all consume their input in a
4881 * single pass.
4883 if (!q) {
4884 return 1;
4886 if (!u) { /* SHA-1 */
4887 if (!dc_isar_feature(aa32_sha1, s)) {
4888 return 1;
4890 ptr1 = vfp_reg_ptr(true, rd);
4891 ptr2 = vfp_reg_ptr(true, rn);
4892 ptr3 = vfp_reg_ptr(true, rm);
4893 tmp4 = tcg_const_i32(size);
4894 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
4895 tcg_temp_free_i32(tmp4);
4896 } else { /* SHA-256 */
4897 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
4898 return 1;
4900 ptr1 = vfp_reg_ptr(true, rd);
4901 ptr2 = vfp_reg_ptr(true, rn);
4902 ptr3 = vfp_reg_ptr(true, rm);
4903 switch (size) {
4904 case 0:
4905 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
4906 break;
4907 case 1:
4908 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
4909 break;
4910 case 2:
4911 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
4912 break;
4915 tcg_temp_free_ptr(ptr1);
4916 tcg_temp_free_ptr(ptr2);
4917 tcg_temp_free_ptr(ptr3);
4918 return 0;
4920 case NEON_3R_VPADD_VQRDMLAH:
4921 if (!u) {
4922 break; /* VPADD */
4924 /* VQRDMLAH */
4925 switch (size) {
4926 case 1:
4927 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
4928 q, rd, rn, rm);
4929 case 2:
4930 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
4931 q, rd, rn, rm);
4933 return 1;
4935 case NEON_3R_VFM_VQRDMLSH:
4936 if (!u) {
4937 /* VFM, VFMS */
4938 if (size == 1) {
4939 return 1;
4941 break;
4943 /* VQRDMLSH */
4944 switch (size) {
4945 case 1:
4946 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
4947 q, rd, rn, rm);
4948 case 2:
4949 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
4950 q, rd, rn, rm);
4952 return 1;
4954 case NEON_3R_LOGIC: /* Logic ops. */
4955 switch ((u << 2) | size) {
4956 case 0: /* VAND */
4957 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
4958 vec_size, vec_size);
4959 break;
4960 case 1: /* VBIC */
4961 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
4962 vec_size, vec_size);
4963 break;
4964 case 2: /* VORR */
4965 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
4966 vec_size, vec_size);
4967 break;
4968 case 3: /* VORN */
4969 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
4970 vec_size, vec_size);
4971 break;
4972 case 4: /* VEOR */
4973 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
4974 vec_size, vec_size);
4975 break;
4976 case 5: /* VBSL */
4977 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
4978 vec_size, vec_size);
4979 break;
4980 case 6: /* VBIT */
4981 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
4982 vec_size, vec_size);
4983 break;
4984 case 7: /* VBIF */
4985 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
4986 vec_size, vec_size);
4987 break;
4989 return 0;
4991 case NEON_3R_VADD_VSUB:
4992 if (u) {
4993 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
4994 vec_size, vec_size);
4995 } else {
4996 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
4997 vec_size, vec_size);
4999 return 0;
5001 case NEON_3R_VQADD:
5002 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5003 rn_ofs, rm_ofs, vec_size, vec_size,
5004 (u ? uqadd_op : sqadd_op) + size);
5005 return 0;
5007 case NEON_3R_VQSUB:
5008 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5009 rn_ofs, rm_ofs, vec_size, vec_size,
5010 (u ? uqsub_op : sqsub_op) + size);
5011 return 0;
5013 case NEON_3R_VMUL: /* VMUL */
5014 if (u) {
5015 /* Polynomial case allows only P8 and is handled below. */
5016 if (size != 0) {
5017 return 1;
5019 } else {
5020 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
5021 vec_size, vec_size);
5022 return 0;
5024 break;
5026 case NEON_3R_VML: /* VMLA, VMLS */
5027 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
5028 u ? &mls_op[size] : &mla_op[size]);
5029 return 0;
5031 case NEON_3R_VTST_VCEQ:
5032 if (u) { /* VCEQ */
5033 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
5034 vec_size, vec_size);
5035 } else { /* VTST */
5036 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
5037 vec_size, vec_size, &cmtst_op[size]);
5039 return 0;
5041 case NEON_3R_VCGT:
5042 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
5043 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5044 return 0;
5046 case NEON_3R_VCGE:
5047 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
5048 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5049 return 0;
5051 case NEON_3R_VMAX:
5052 if (u) {
5053 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
5054 vec_size, vec_size);
5055 } else {
5056 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
5057 vec_size, vec_size);
5059 return 0;
5060 case NEON_3R_VMIN:
5061 if (u) {
5062 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
5063 vec_size, vec_size);
5064 } else {
5065 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
5066 vec_size, vec_size);
5068 return 0;
5071 if (size == 3) {
5072 /* 64-bit element instructions. */
5073 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5074 neon_load_reg64(cpu_V0, rn + pass);
5075 neon_load_reg64(cpu_V1, rm + pass);
5076 switch (op) {
5077 case NEON_3R_VSHL:
5078 if (u) {
5079 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5080 } else {
5081 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5083 break;
5084 case NEON_3R_VQSHL:
5085 if (u) {
5086 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5087 cpu_V1, cpu_V0);
5088 } else {
5089 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5090 cpu_V1, cpu_V0);
5092 break;
5093 case NEON_3R_VRSHL:
5094 if (u) {
5095 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5096 } else {
5097 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5099 break;
5100 case NEON_3R_VQRSHL:
5101 if (u) {
5102 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5103 cpu_V1, cpu_V0);
5104 } else {
5105 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5106 cpu_V1, cpu_V0);
5108 break;
5109 default:
5110 abort();
5112 neon_store_reg64(cpu_V0, rd + pass);
5114 return 0;
5116 pairwise = 0;
5117 switch (op) {
5118 case NEON_3R_VSHL:
5119 case NEON_3R_VQSHL:
5120 case NEON_3R_VRSHL:
5121 case NEON_3R_VQRSHL:
5123 int rtmp;
5124 /* Shift instruction operands are reversed. */
5125 rtmp = rn;
5126 rn = rm;
5127 rm = rtmp;
5129 break;
5130 case NEON_3R_VPADD_VQRDMLAH:
5131 case NEON_3R_VPMAX:
5132 case NEON_3R_VPMIN:
5133 pairwise = 1;
5134 break;
5135 case NEON_3R_FLOAT_ARITH:
5136 pairwise = (u && size < 2); /* if VPADD (float) */
5137 break;
5138 case NEON_3R_FLOAT_MINMAX:
5139 pairwise = u; /* if VPMIN/VPMAX (float) */
5140 break;
5141 case NEON_3R_FLOAT_CMP:
5142 if (!u && size) {
5143 /* no encoding for U=0 C=1x */
5144 return 1;
5146 break;
5147 case NEON_3R_FLOAT_ACMP:
5148 if (!u) {
5149 return 1;
5151 break;
5152 case NEON_3R_FLOAT_MISC:
5153 /* VMAXNM/VMINNM in ARMv8 */
5154 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5155 return 1;
5157 break;
5158 case NEON_3R_VFM_VQRDMLSH:
5159 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
5160 return 1;
5162 break;
5163 default:
5164 break;
5167 if (pairwise && q) {
5168 /* All the pairwise insns UNDEF if Q is set */
5169 return 1;
5172 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5174 if (pairwise) {
5175 /* Pairwise. */
5176 if (pass < 1) {
5177 tmp = neon_load_reg(rn, 0);
5178 tmp2 = neon_load_reg(rn, 1);
5179 } else {
5180 tmp = neon_load_reg(rm, 0);
5181 tmp2 = neon_load_reg(rm, 1);
5183 } else {
5184 /* Elementwise. */
5185 tmp = neon_load_reg(rn, pass);
5186 tmp2 = neon_load_reg(rm, pass);
5188 switch (op) {
5189 case NEON_3R_VHADD:
5190 GEN_NEON_INTEGER_OP(hadd);
5191 break;
5192 case NEON_3R_VRHADD:
5193 GEN_NEON_INTEGER_OP(rhadd);
5194 break;
5195 case NEON_3R_VHSUB:
5196 GEN_NEON_INTEGER_OP(hsub);
5197 break;
5198 case NEON_3R_VSHL:
5199 GEN_NEON_INTEGER_OP(shl);
5200 break;
5201 case NEON_3R_VQSHL:
5202 GEN_NEON_INTEGER_OP_ENV(qshl);
5203 break;
5204 case NEON_3R_VRSHL:
5205 GEN_NEON_INTEGER_OP(rshl);
5206 break;
5207 case NEON_3R_VQRSHL:
5208 GEN_NEON_INTEGER_OP_ENV(qrshl);
5209 break;
5210 case NEON_3R_VABD:
5211 GEN_NEON_INTEGER_OP(abd);
5212 break;
5213 case NEON_3R_VABA:
5214 GEN_NEON_INTEGER_OP(abd);
5215 tcg_temp_free_i32(tmp2);
5216 tmp2 = neon_load_reg(rd, pass);
5217 gen_neon_add(size, tmp, tmp2);
5218 break;
5219 case NEON_3R_VMUL:
5220 /* VMUL.P8; other cases already eliminated. */
5221 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5222 break;
5223 case NEON_3R_VPMAX:
5224 GEN_NEON_INTEGER_OP(pmax);
5225 break;
5226 case NEON_3R_VPMIN:
5227 GEN_NEON_INTEGER_OP(pmin);
5228 break;
5229 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5230 if (!u) { /* VQDMULH */
5231 switch (size) {
5232 case 1:
5233 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5234 break;
5235 case 2:
5236 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5237 break;
5238 default: abort();
5240 } else { /* VQRDMULH */
5241 switch (size) {
5242 case 1:
5243 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5244 break;
5245 case 2:
5246 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5247 break;
5248 default: abort();
5251 break;
5252 case NEON_3R_VPADD_VQRDMLAH:
5253 switch (size) {
5254 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5255 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5256 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5257 default: abort();
5259 break;
5260 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5262 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5263 switch ((u << 2) | size) {
5264 case 0: /* VADD */
5265 case 4: /* VPADD */
5266 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5267 break;
5268 case 2: /* VSUB */
5269 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5270 break;
5271 case 6: /* VABD */
5272 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5273 break;
5274 default:
5275 abort();
5277 tcg_temp_free_ptr(fpstatus);
5278 break;
5280 case NEON_3R_FLOAT_MULTIPLY:
5282 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5283 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5284 if (!u) {
5285 tcg_temp_free_i32(tmp2);
5286 tmp2 = neon_load_reg(rd, pass);
5287 if (size == 0) {
5288 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5289 } else {
5290 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5293 tcg_temp_free_ptr(fpstatus);
5294 break;
5296 case NEON_3R_FLOAT_CMP:
5298 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5299 if (!u) {
5300 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5301 } else {
5302 if (size == 0) {
5303 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5304 } else {
5305 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5308 tcg_temp_free_ptr(fpstatus);
5309 break;
5311 case NEON_3R_FLOAT_ACMP:
5313 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5314 if (size == 0) {
5315 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5316 } else {
5317 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5319 tcg_temp_free_ptr(fpstatus);
5320 break;
5322 case NEON_3R_FLOAT_MINMAX:
5324 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5325 if (size == 0) {
5326 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5327 } else {
5328 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5330 tcg_temp_free_ptr(fpstatus);
5331 break;
5333 case NEON_3R_FLOAT_MISC:
5334 if (u) {
5335 /* VMAXNM/VMINNM */
5336 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5337 if (size == 0) {
5338 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5339 } else {
5340 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5342 tcg_temp_free_ptr(fpstatus);
5343 } else {
5344 if (size == 0) {
5345 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5346 } else {
5347 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5350 break;
5351 case NEON_3R_VFM_VQRDMLSH:
5353 /* VFMA, VFMS: fused multiply-add */
5354 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5355 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5356 if (size) {
5357 /* VFMS */
5358 gen_helper_vfp_negs(tmp, tmp);
5360 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5361 tcg_temp_free_i32(tmp3);
5362 tcg_temp_free_ptr(fpstatus);
5363 break;
5365 default:
5366 abort();
5368 tcg_temp_free_i32(tmp2);
5370 /* Save the result. For elementwise operations we can put it
5371 straight into the destination register. For pairwise operations
5372 we have to be careful to avoid clobbering the source operands. */
5373 if (pairwise && rd == rm) {
5374 neon_store_scratch(pass, tmp);
5375 } else {
5376 neon_store_reg(rd, pass, tmp);
5379 } /* for pass */
5380 if (pairwise && rd == rm) {
5381 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5382 tmp = neon_load_scratch(pass);
5383 neon_store_reg(rd, pass, tmp);
5386 /* End of 3 register same size operations. */
5387 } else if (insn & (1 << 4)) {
5388 if ((insn & 0x00380080) != 0) {
5389 /* Two registers and shift. */
5390 op = (insn >> 8) & 0xf;
5391 if (insn & (1 << 7)) {
5392 /* 64-bit shift. */
5393 if (op > 7) {
5394 return 1;
5396 size = 3;
5397 } else {
5398 size = 2;
5399 while ((insn & (1 << (size + 19))) == 0)
5400 size--;
5402 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5403 if (op < 8) {
5404 /* Shift by immediate:
5405 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5406 if (q && ((rd | rm) & 1)) {
5407 return 1;
5409 if (!u && (op == 4 || op == 6)) {
5410 return 1;
5412 /* Right shifts are encoded as N - shift, where N is the
5413 element size in bits. */
5414 if (op <= 4) {
5415 shift = shift - (1 << (size + 3));
5418 switch (op) {
5419 case 0: /* VSHR */
5420 /* Right shift comes here negative. */
5421 shift = -shift;
5422 /* Shifts larger than the element size are architecturally
5423 * valid. Unsigned results in all zeros; signed results
5424 * in all sign bits.
5426 if (!u) {
5427 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
5428 MIN(shift, (8 << size) - 1),
5429 vec_size, vec_size);
5430 } else if (shift >= 8 << size) {
5431 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5432 } else {
5433 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
5434 vec_size, vec_size);
5436 return 0;
5438 case 1: /* VSRA */
5439 /* Right shift comes here negative. */
5440 shift = -shift;
5441 /* Shifts larger than the element size are architecturally
5442 * valid. Unsigned results in all zeros; signed results
5443 * in all sign bits.
5445 if (!u) {
5446 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5447 MIN(shift, (8 << size) - 1),
5448 &ssra_op[size]);
5449 } else if (shift >= 8 << size) {
5450 /* rd += 0 */
5451 } else {
5452 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5453 shift, &usra_op[size]);
5455 return 0;
5457 case 4: /* VSRI */
5458 if (!u) {
5459 return 1;
5461 /* Right shift comes here negative. */
5462 shift = -shift;
5463 /* Shift out of range leaves destination unchanged. */
5464 if (shift < 8 << size) {
5465 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5466 shift, &sri_op[size]);
5468 return 0;
5470 case 5: /* VSHL, VSLI */
5471 if (u) { /* VSLI */
5472 /* Shift out of range leaves destination unchanged. */
5473 if (shift < 8 << size) {
5474 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
5475 vec_size, shift, &sli_op[size]);
5477 } else { /* VSHL */
5478 /* Shifts larger than the element size are
5479 * architecturally valid and results in zero.
5481 if (shift >= 8 << size) {
5482 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5483 } else {
5484 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
5485 vec_size, vec_size);
5488 return 0;
5491 if (size == 3) {
5492 count = q + 1;
5493 } else {
5494 count = q ? 4: 2;
5497 /* To avoid excessive duplication of ops we implement shift
5498 * by immediate using the variable shift operations.
5500 imm = dup_const(size, shift);
5502 for (pass = 0; pass < count; pass++) {
5503 if (size == 3) {
5504 neon_load_reg64(cpu_V0, rm + pass);
5505 tcg_gen_movi_i64(cpu_V1, imm);
5506 switch (op) {
5507 case 2: /* VRSHR */
5508 case 3: /* VRSRA */
5509 if (u)
5510 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5511 else
5512 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5513 break;
5514 case 6: /* VQSHLU */
5515 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5516 cpu_V0, cpu_V1);
5517 break;
5518 case 7: /* VQSHL */
5519 if (u) {
5520 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5521 cpu_V0, cpu_V1);
5522 } else {
5523 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5524 cpu_V0, cpu_V1);
5526 break;
5527 default:
5528 g_assert_not_reached();
5530 if (op == 3) {
5531 /* Accumulate. */
5532 neon_load_reg64(cpu_V1, rd + pass);
5533 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5535 neon_store_reg64(cpu_V0, rd + pass);
5536 } else { /* size < 3 */
5537 /* Operands in T0 and T1. */
5538 tmp = neon_load_reg(rm, pass);
5539 tmp2 = tcg_temp_new_i32();
5540 tcg_gen_movi_i32(tmp2, imm);
5541 switch (op) {
5542 case 2: /* VRSHR */
5543 case 3: /* VRSRA */
5544 GEN_NEON_INTEGER_OP(rshl);
5545 break;
5546 case 6: /* VQSHLU */
5547 switch (size) {
5548 case 0:
5549 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5550 tmp, tmp2);
5551 break;
5552 case 1:
5553 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5554 tmp, tmp2);
5555 break;
5556 case 2:
5557 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5558 tmp, tmp2);
5559 break;
5560 default:
5561 abort();
5563 break;
5564 case 7: /* VQSHL */
5565 GEN_NEON_INTEGER_OP_ENV(qshl);
5566 break;
5567 default:
5568 g_assert_not_reached();
5570 tcg_temp_free_i32(tmp2);
5572 if (op == 3) {
5573 /* Accumulate. */
5574 tmp2 = neon_load_reg(rd, pass);
5575 gen_neon_add(size, tmp, tmp2);
5576 tcg_temp_free_i32(tmp2);
5578 neon_store_reg(rd, pass, tmp);
5580 } /* for pass */
5581 } else if (op < 10) {
5582 /* Shift by immediate and narrow:
5583 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5584 int input_unsigned = (op == 8) ? !u : u;
5585 if (rm & 1) {
5586 return 1;
5588 shift = shift - (1 << (size + 3));
5589 size++;
5590 if (size == 3) {
5591 tmp64 = tcg_const_i64(shift);
5592 neon_load_reg64(cpu_V0, rm);
5593 neon_load_reg64(cpu_V1, rm + 1);
5594 for (pass = 0; pass < 2; pass++) {
5595 TCGv_i64 in;
5596 if (pass == 0) {
5597 in = cpu_V0;
5598 } else {
5599 in = cpu_V1;
5601 if (q) {
5602 if (input_unsigned) {
5603 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5604 } else {
5605 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5607 } else {
5608 if (input_unsigned) {
5609 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5610 } else {
5611 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5614 tmp = tcg_temp_new_i32();
5615 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5616 neon_store_reg(rd, pass, tmp);
5617 } /* for pass */
5618 tcg_temp_free_i64(tmp64);
5619 } else {
5620 if (size == 1) {
5621 imm = (uint16_t)shift;
5622 imm |= imm << 16;
5623 } else {
5624 /* size == 2 */
5625 imm = (uint32_t)shift;
5627 tmp2 = tcg_const_i32(imm);
5628 tmp4 = neon_load_reg(rm + 1, 0);
5629 tmp5 = neon_load_reg(rm + 1, 1);
5630 for (pass = 0; pass < 2; pass++) {
5631 if (pass == 0) {
5632 tmp = neon_load_reg(rm, 0);
5633 } else {
5634 tmp = tmp4;
5636 gen_neon_shift_narrow(size, tmp, tmp2, q,
5637 input_unsigned);
5638 if (pass == 0) {
5639 tmp3 = neon_load_reg(rm, 1);
5640 } else {
5641 tmp3 = tmp5;
5643 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5644 input_unsigned);
5645 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5646 tcg_temp_free_i32(tmp);
5647 tcg_temp_free_i32(tmp3);
5648 tmp = tcg_temp_new_i32();
5649 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5650 neon_store_reg(rd, pass, tmp);
5651 } /* for pass */
5652 tcg_temp_free_i32(tmp2);
5654 } else if (op == 10) {
5655 /* VSHLL, VMOVL */
5656 if (q || (rd & 1)) {
5657 return 1;
5659 tmp = neon_load_reg(rm, 0);
5660 tmp2 = neon_load_reg(rm, 1);
5661 for (pass = 0; pass < 2; pass++) {
5662 if (pass == 1)
5663 tmp = tmp2;
5665 gen_neon_widen(cpu_V0, tmp, size, u);
5667 if (shift != 0) {
5668 /* The shift is less than the width of the source
5669 type, so we can just shift the whole register. */
5670 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5671 /* Widen the result of shift: we need to clear
5672 * the potential overflow bits resulting from
5673 * left bits of the narrow input appearing as
5674 * right bits of left the neighbour narrow
5675 * input. */
5676 if (size < 2 || !u) {
5677 uint64_t imm64;
5678 if (size == 0) {
5679 imm = (0xffu >> (8 - shift));
5680 imm |= imm << 16;
5681 } else if (size == 1) {
5682 imm = 0xffff >> (16 - shift);
5683 } else {
5684 /* size == 2 */
5685 imm = 0xffffffff >> (32 - shift);
5687 if (size < 2) {
5688 imm64 = imm | (((uint64_t)imm) << 32);
5689 } else {
5690 imm64 = imm;
5692 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5695 neon_store_reg64(cpu_V0, rd + pass);
5697 } else if (op >= 14) {
5698 /* VCVT fixed-point. */
5699 TCGv_ptr fpst;
5700 TCGv_i32 shiftv;
5701 VFPGenFixPointFn *fn;
5703 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5704 return 1;
5707 if (!(op & 1)) {
5708 if (u) {
5709 fn = gen_helper_vfp_ultos;
5710 } else {
5711 fn = gen_helper_vfp_sltos;
5713 } else {
5714 if (u) {
5715 fn = gen_helper_vfp_touls_round_to_zero;
5716 } else {
5717 fn = gen_helper_vfp_tosls_round_to_zero;
5721 /* We have already masked out the must-be-1 top bit of imm6,
5722 * hence this 32-shift where the ARM ARM has 64-imm6.
5724 shift = 32 - shift;
5725 fpst = get_fpstatus_ptr(1);
5726 shiftv = tcg_const_i32(shift);
5727 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5728 TCGv_i32 tmpf = neon_load_reg(rm, pass);
5729 fn(tmpf, tmpf, shiftv, fpst);
5730 neon_store_reg(rd, pass, tmpf);
5732 tcg_temp_free_ptr(fpst);
5733 tcg_temp_free_i32(shiftv);
5734 } else {
5735 return 1;
5737 } else { /* (insn & 0x00380080) == 0 */
5738 int invert, reg_ofs, vec_size;
5740 if (q && (rd & 1)) {
5741 return 1;
5744 op = (insn >> 8) & 0xf;
5745 /* One register and immediate. */
5746 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5747 invert = (insn & (1 << 5)) != 0;
5748 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5749 * We choose to not special-case this and will behave as if a
5750 * valid constant encoding of 0 had been given.
5752 switch (op) {
5753 case 0: case 1:
5754 /* no-op */
5755 break;
5756 case 2: case 3:
5757 imm <<= 8;
5758 break;
5759 case 4: case 5:
5760 imm <<= 16;
5761 break;
5762 case 6: case 7:
5763 imm <<= 24;
5764 break;
5765 case 8: case 9:
5766 imm |= imm << 16;
5767 break;
5768 case 10: case 11:
5769 imm = (imm << 8) | (imm << 24);
5770 break;
5771 case 12:
5772 imm = (imm << 8) | 0xff;
5773 break;
5774 case 13:
5775 imm = (imm << 16) | 0xffff;
5776 break;
5777 case 14:
5778 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5779 if (invert) {
5780 imm = ~imm;
5782 break;
5783 case 15:
5784 if (invert) {
5785 return 1;
5787 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5788 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5789 break;
5791 if (invert) {
5792 imm = ~imm;
5795 reg_ofs = neon_reg_offset(rd, 0);
5796 vec_size = q ? 16 : 8;
5798 if (op & 1 && op < 12) {
5799 if (invert) {
5800 /* The immediate value has already been inverted,
5801 * so BIC becomes AND.
5803 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
5804 vec_size, vec_size);
5805 } else {
5806 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
5807 vec_size, vec_size);
5809 } else {
5810 /* VMOV, VMVN. */
5811 if (op == 14 && invert) {
5812 TCGv_i64 t64 = tcg_temp_new_i64();
5814 for (pass = 0; pass <= q; ++pass) {
5815 uint64_t val = 0;
5816 int n;
5818 for (n = 0; n < 8; n++) {
5819 if (imm & (1 << (n + pass * 8))) {
5820 val |= 0xffull << (n * 8);
5823 tcg_gen_movi_i64(t64, val);
5824 neon_store_reg64(t64, rd + pass);
5826 tcg_temp_free_i64(t64);
5827 } else {
5828 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
5832 } else { /* (insn & 0x00800010 == 0x00800000) */
5833 if (size != 3) {
5834 op = (insn >> 8) & 0xf;
5835 if ((insn & (1 << 6)) == 0) {
5836 /* Three registers of different lengths. */
5837 int src1_wide;
5838 int src2_wide;
5839 int prewiden;
5840 /* undefreq: bit 0 : UNDEF if size == 0
5841 * bit 1 : UNDEF if size == 1
5842 * bit 2 : UNDEF if size == 2
5843 * bit 3 : UNDEF if U == 1
5844 * Note that [2:0] set implies 'always UNDEF'
5846 int undefreq;
5847 /* prewiden, src1_wide, src2_wide, undefreq */
5848 static const int neon_3reg_wide[16][4] = {
5849 {1, 0, 0, 0}, /* VADDL */
5850 {1, 1, 0, 0}, /* VADDW */
5851 {1, 0, 0, 0}, /* VSUBL */
5852 {1, 1, 0, 0}, /* VSUBW */
5853 {0, 1, 1, 0}, /* VADDHN */
5854 {0, 0, 0, 0}, /* VABAL */
5855 {0, 1, 1, 0}, /* VSUBHN */
5856 {0, 0, 0, 0}, /* VABDL */
5857 {0, 0, 0, 0}, /* VMLAL */
5858 {0, 0, 0, 9}, /* VQDMLAL */
5859 {0, 0, 0, 0}, /* VMLSL */
5860 {0, 0, 0, 9}, /* VQDMLSL */
5861 {0, 0, 0, 0}, /* Integer VMULL */
5862 {0, 0, 0, 1}, /* VQDMULL */
5863 {0, 0, 0, 0xa}, /* Polynomial VMULL */
5864 {0, 0, 0, 7}, /* Reserved: always UNDEF */
5867 prewiden = neon_3reg_wide[op][0];
5868 src1_wide = neon_3reg_wide[op][1];
5869 src2_wide = neon_3reg_wide[op][2];
5870 undefreq = neon_3reg_wide[op][3];
5872 if ((undefreq & (1 << size)) ||
5873 ((undefreq & 8) && u)) {
5874 return 1;
5876 if ((src1_wide && (rn & 1)) ||
5877 (src2_wide && (rm & 1)) ||
5878 (!src2_wide && (rd & 1))) {
5879 return 1;
5882 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
5883 * outside the loop below as it only performs a single pass.
5885 if (op == 14 && size == 2) {
5886 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
5888 if (!dc_isar_feature(aa32_pmull, s)) {
5889 return 1;
5891 tcg_rn = tcg_temp_new_i64();
5892 tcg_rm = tcg_temp_new_i64();
5893 tcg_rd = tcg_temp_new_i64();
5894 neon_load_reg64(tcg_rn, rn);
5895 neon_load_reg64(tcg_rm, rm);
5896 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
5897 neon_store_reg64(tcg_rd, rd);
5898 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
5899 neon_store_reg64(tcg_rd, rd + 1);
5900 tcg_temp_free_i64(tcg_rn);
5901 tcg_temp_free_i64(tcg_rm);
5902 tcg_temp_free_i64(tcg_rd);
5903 return 0;
5906 /* Avoid overlapping operands. Wide source operands are
5907 always aligned so will never overlap with wide
5908 destinations in problematic ways. */
5909 if (rd == rm && !src2_wide) {
5910 tmp = neon_load_reg(rm, 1);
5911 neon_store_scratch(2, tmp);
5912 } else if (rd == rn && !src1_wide) {
5913 tmp = neon_load_reg(rn, 1);
5914 neon_store_scratch(2, tmp);
5916 tmp3 = NULL;
5917 for (pass = 0; pass < 2; pass++) {
5918 if (src1_wide) {
5919 neon_load_reg64(cpu_V0, rn + pass);
5920 tmp = NULL;
5921 } else {
5922 if (pass == 1 && rd == rn) {
5923 tmp = neon_load_scratch(2);
5924 } else {
5925 tmp = neon_load_reg(rn, pass);
5927 if (prewiden) {
5928 gen_neon_widen(cpu_V0, tmp, size, u);
5931 if (src2_wide) {
5932 neon_load_reg64(cpu_V1, rm + pass);
5933 tmp2 = NULL;
5934 } else {
5935 if (pass == 1 && rd == rm) {
5936 tmp2 = neon_load_scratch(2);
5937 } else {
5938 tmp2 = neon_load_reg(rm, pass);
5940 if (prewiden) {
5941 gen_neon_widen(cpu_V1, tmp2, size, u);
5944 switch (op) {
5945 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5946 gen_neon_addl(size);
5947 break;
5948 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5949 gen_neon_subl(size);
5950 break;
5951 case 5: case 7: /* VABAL, VABDL */
5952 switch ((size << 1) | u) {
5953 case 0:
5954 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5955 break;
5956 case 1:
5957 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5958 break;
5959 case 2:
5960 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5961 break;
5962 case 3:
5963 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5964 break;
5965 case 4:
5966 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5967 break;
5968 case 5:
5969 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5970 break;
5971 default: abort();
5973 tcg_temp_free_i32(tmp2);
5974 tcg_temp_free_i32(tmp);
5975 break;
5976 case 8: case 9: case 10: case 11: case 12: case 13:
5977 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5978 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5979 break;
5980 case 14: /* Polynomial VMULL */
5981 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5982 tcg_temp_free_i32(tmp2);
5983 tcg_temp_free_i32(tmp);
5984 break;
5985 default: /* 15 is RESERVED: caught earlier */
5986 abort();
5988 if (op == 13) {
5989 /* VQDMULL */
5990 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5991 neon_store_reg64(cpu_V0, rd + pass);
5992 } else if (op == 5 || (op >= 8 && op <= 11)) {
5993 /* Accumulate. */
5994 neon_load_reg64(cpu_V1, rd + pass);
5995 switch (op) {
5996 case 10: /* VMLSL */
5997 gen_neon_negl(cpu_V0, size);
5998 /* Fall through */
5999 case 5: case 8: /* VABAL, VMLAL */
6000 gen_neon_addl(size);
6001 break;
6002 case 9: case 11: /* VQDMLAL, VQDMLSL */
6003 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6004 if (op == 11) {
6005 gen_neon_negl(cpu_V0, size);
6007 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6008 break;
6009 default:
6010 abort();
6012 neon_store_reg64(cpu_V0, rd + pass);
6013 } else if (op == 4 || op == 6) {
6014 /* Narrowing operation. */
6015 tmp = tcg_temp_new_i32();
6016 if (!u) {
6017 switch (size) {
6018 case 0:
6019 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6020 break;
6021 case 1:
6022 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6023 break;
6024 case 2:
6025 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6026 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6027 break;
6028 default: abort();
6030 } else {
6031 switch (size) {
6032 case 0:
6033 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6034 break;
6035 case 1:
6036 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6037 break;
6038 case 2:
6039 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6040 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6041 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6042 break;
6043 default: abort();
6046 if (pass == 0) {
6047 tmp3 = tmp;
6048 } else {
6049 neon_store_reg(rd, 0, tmp3);
6050 neon_store_reg(rd, 1, tmp);
6052 } else {
6053 /* Write back the result. */
6054 neon_store_reg64(cpu_V0, rd + pass);
6057 } else {
6058 /* Two registers and a scalar. NB that for ops of this form
6059 * the ARM ARM labels bit 24 as Q, but it is in our variable
6060 * 'u', not 'q'.
6062 if (size == 0) {
6063 return 1;
6065 switch (op) {
6066 case 1: /* Float VMLA scalar */
6067 case 5: /* Floating point VMLS scalar */
6068 case 9: /* Floating point VMUL scalar */
6069 if (size == 1) {
6070 return 1;
6072 /* fall through */
6073 case 0: /* Integer VMLA scalar */
6074 case 4: /* Integer VMLS scalar */
6075 case 8: /* Integer VMUL scalar */
6076 case 12: /* VQDMULH scalar */
6077 case 13: /* VQRDMULH scalar */
6078 if (u && ((rd | rn) & 1)) {
6079 return 1;
6081 tmp = neon_get_scalar(size, rm);
6082 neon_store_scratch(0, tmp);
6083 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6084 tmp = neon_load_scratch(0);
6085 tmp2 = neon_load_reg(rn, pass);
6086 if (op == 12) {
6087 if (size == 1) {
6088 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6089 } else {
6090 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6092 } else if (op == 13) {
6093 if (size == 1) {
6094 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6095 } else {
6096 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6098 } else if (op & 1) {
6099 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6100 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6101 tcg_temp_free_ptr(fpstatus);
6102 } else {
6103 switch (size) {
6104 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6105 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6106 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6107 default: abort();
6110 tcg_temp_free_i32(tmp2);
6111 if (op < 8) {
6112 /* Accumulate. */
6113 tmp2 = neon_load_reg(rd, pass);
6114 switch (op) {
6115 case 0:
6116 gen_neon_add(size, tmp, tmp2);
6117 break;
6118 case 1:
6120 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6121 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6122 tcg_temp_free_ptr(fpstatus);
6123 break;
6125 case 4:
6126 gen_neon_rsb(size, tmp, tmp2);
6127 break;
6128 case 5:
6130 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6131 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6132 tcg_temp_free_ptr(fpstatus);
6133 break;
6135 default:
6136 abort();
6138 tcg_temp_free_i32(tmp2);
6140 neon_store_reg(rd, pass, tmp);
6142 break;
6143 case 3: /* VQDMLAL scalar */
6144 case 7: /* VQDMLSL scalar */
6145 case 11: /* VQDMULL scalar */
6146 if (u == 1) {
6147 return 1;
6149 /* fall through */
6150 case 2: /* VMLAL sclar */
6151 case 6: /* VMLSL scalar */
6152 case 10: /* VMULL scalar */
6153 if (rd & 1) {
6154 return 1;
6156 tmp2 = neon_get_scalar(size, rm);
6157 /* We need a copy of tmp2 because gen_neon_mull
6158 * deletes it during pass 0. */
6159 tmp4 = tcg_temp_new_i32();
6160 tcg_gen_mov_i32(tmp4, tmp2);
6161 tmp3 = neon_load_reg(rn, 1);
6163 for (pass = 0; pass < 2; pass++) {
6164 if (pass == 0) {
6165 tmp = neon_load_reg(rn, 0);
6166 } else {
6167 tmp = tmp3;
6168 tmp2 = tmp4;
6170 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6171 if (op != 11) {
6172 neon_load_reg64(cpu_V1, rd + pass);
6174 switch (op) {
6175 case 6:
6176 gen_neon_negl(cpu_V0, size);
6177 /* Fall through */
6178 case 2:
6179 gen_neon_addl(size);
6180 break;
6181 case 3: case 7:
6182 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6183 if (op == 7) {
6184 gen_neon_negl(cpu_V0, size);
6186 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6187 break;
6188 case 10:
6189 /* no-op */
6190 break;
6191 case 11:
6192 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6193 break;
6194 default:
6195 abort();
6197 neon_store_reg64(cpu_V0, rd + pass);
6199 break;
6200 case 14: /* VQRDMLAH scalar */
6201 case 15: /* VQRDMLSH scalar */
6203 NeonGenThreeOpEnvFn *fn;
6205 if (!dc_isar_feature(aa32_rdm, s)) {
6206 return 1;
6208 if (u && ((rd | rn) & 1)) {
6209 return 1;
6211 if (op == 14) {
6212 if (size == 1) {
6213 fn = gen_helper_neon_qrdmlah_s16;
6214 } else {
6215 fn = gen_helper_neon_qrdmlah_s32;
6217 } else {
6218 if (size == 1) {
6219 fn = gen_helper_neon_qrdmlsh_s16;
6220 } else {
6221 fn = gen_helper_neon_qrdmlsh_s32;
6225 tmp2 = neon_get_scalar(size, rm);
6226 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6227 tmp = neon_load_reg(rn, pass);
6228 tmp3 = neon_load_reg(rd, pass);
6229 fn(tmp, cpu_env, tmp, tmp2, tmp3);
6230 tcg_temp_free_i32(tmp3);
6231 neon_store_reg(rd, pass, tmp);
6233 tcg_temp_free_i32(tmp2);
6235 break;
6236 default:
6237 g_assert_not_reached();
6240 } else { /* size == 3 */
6241 if (!u) {
6242 /* Extract. */
6243 imm = (insn >> 8) & 0xf;
6245 if (imm > 7 && !q)
6246 return 1;
6248 if (q && ((rd | rn | rm) & 1)) {
6249 return 1;
6252 if (imm == 0) {
6253 neon_load_reg64(cpu_V0, rn);
6254 if (q) {
6255 neon_load_reg64(cpu_V1, rn + 1);
6257 } else if (imm == 8) {
6258 neon_load_reg64(cpu_V0, rn + 1);
6259 if (q) {
6260 neon_load_reg64(cpu_V1, rm);
6262 } else if (q) {
6263 tmp64 = tcg_temp_new_i64();
6264 if (imm < 8) {
6265 neon_load_reg64(cpu_V0, rn);
6266 neon_load_reg64(tmp64, rn + 1);
6267 } else {
6268 neon_load_reg64(cpu_V0, rn + 1);
6269 neon_load_reg64(tmp64, rm);
6271 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6272 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6273 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6274 if (imm < 8) {
6275 neon_load_reg64(cpu_V1, rm);
6276 } else {
6277 neon_load_reg64(cpu_V1, rm + 1);
6278 imm -= 8;
6280 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6281 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6282 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6283 tcg_temp_free_i64(tmp64);
6284 } else {
6285 /* BUGFIX */
6286 neon_load_reg64(cpu_V0, rn);
6287 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6288 neon_load_reg64(cpu_V1, rm);
6289 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6290 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6292 neon_store_reg64(cpu_V0, rd);
6293 if (q) {
6294 neon_store_reg64(cpu_V1, rd + 1);
6296 } else if ((insn & (1 << 11)) == 0) {
6297 /* Two register misc. */
6298 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6299 size = (insn >> 18) & 3;
6300 /* UNDEF for unknown op values and bad op-size combinations */
6301 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6302 return 1;
6304 if (neon_2rm_is_v8_op(op) &&
6305 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6306 return 1;
6308 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6309 q && ((rm | rd) & 1)) {
6310 return 1;
6312 switch (op) {
6313 case NEON_2RM_VREV64:
6314 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6315 tmp = neon_load_reg(rm, pass * 2);
6316 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6317 switch (size) {
6318 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6319 case 1: gen_swap_half(tmp); break;
6320 case 2: /* no-op */ break;
6321 default: abort();
6323 neon_store_reg(rd, pass * 2 + 1, tmp);
6324 if (size == 2) {
6325 neon_store_reg(rd, pass * 2, tmp2);
6326 } else {
6327 switch (size) {
6328 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6329 case 1: gen_swap_half(tmp2); break;
6330 default: abort();
6332 neon_store_reg(rd, pass * 2, tmp2);
6335 break;
6336 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6337 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6338 for (pass = 0; pass < q + 1; pass++) {
6339 tmp = neon_load_reg(rm, pass * 2);
6340 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6341 tmp = neon_load_reg(rm, pass * 2 + 1);
6342 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6343 switch (size) {
6344 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6345 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6346 case 2: tcg_gen_add_i64(CPU_V001); break;
6347 default: abort();
6349 if (op >= NEON_2RM_VPADAL) {
6350 /* Accumulate. */
6351 neon_load_reg64(cpu_V1, rd + pass);
6352 gen_neon_addl(size);
6354 neon_store_reg64(cpu_V0, rd + pass);
6356 break;
6357 case NEON_2RM_VTRN:
6358 if (size == 2) {
6359 int n;
6360 for (n = 0; n < (q ? 4 : 2); n += 2) {
6361 tmp = neon_load_reg(rm, n);
6362 tmp2 = neon_load_reg(rd, n + 1);
6363 neon_store_reg(rm, n, tmp2);
6364 neon_store_reg(rd, n + 1, tmp);
6366 } else {
6367 goto elementwise;
6369 break;
6370 case NEON_2RM_VUZP:
6371 if (gen_neon_unzip(rd, rm, size, q)) {
6372 return 1;
6374 break;
6375 case NEON_2RM_VZIP:
6376 if (gen_neon_zip(rd, rm, size, q)) {
6377 return 1;
6379 break;
6380 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6381 /* also VQMOVUN; op field and mnemonics don't line up */
6382 if (rm & 1) {
6383 return 1;
6385 tmp2 = NULL;
6386 for (pass = 0; pass < 2; pass++) {
6387 neon_load_reg64(cpu_V0, rm + pass);
6388 tmp = tcg_temp_new_i32();
6389 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6390 tmp, cpu_V0);
6391 if (pass == 0) {
6392 tmp2 = tmp;
6393 } else {
6394 neon_store_reg(rd, 0, tmp2);
6395 neon_store_reg(rd, 1, tmp);
6398 break;
6399 case NEON_2RM_VSHLL:
6400 if (q || (rd & 1)) {
6401 return 1;
6403 tmp = neon_load_reg(rm, 0);
6404 tmp2 = neon_load_reg(rm, 1);
6405 for (pass = 0; pass < 2; pass++) {
6406 if (pass == 1)
6407 tmp = tmp2;
6408 gen_neon_widen(cpu_V0, tmp, size, 1);
6409 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6410 neon_store_reg64(cpu_V0, rd + pass);
6412 break;
6413 case NEON_2RM_VCVT_F16_F32:
6415 TCGv_ptr fpst;
6416 TCGv_i32 ahp;
6418 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
6419 q || (rm & 1)) {
6420 return 1;
6422 fpst = get_fpstatus_ptr(true);
6423 ahp = get_ahp_flag();
6424 tmp = neon_load_reg(rm, 0);
6425 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6426 tmp2 = neon_load_reg(rm, 1);
6427 gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
6428 tcg_gen_shli_i32(tmp2, tmp2, 16);
6429 tcg_gen_or_i32(tmp2, tmp2, tmp);
6430 tcg_temp_free_i32(tmp);
6431 tmp = neon_load_reg(rm, 2);
6432 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6433 tmp3 = neon_load_reg(rm, 3);
6434 neon_store_reg(rd, 0, tmp2);
6435 gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
6436 tcg_gen_shli_i32(tmp3, tmp3, 16);
6437 tcg_gen_or_i32(tmp3, tmp3, tmp);
6438 neon_store_reg(rd, 1, tmp3);
6439 tcg_temp_free_i32(tmp);
6440 tcg_temp_free_i32(ahp);
6441 tcg_temp_free_ptr(fpst);
6442 break;
6444 case NEON_2RM_VCVT_F32_F16:
6446 TCGv_ptr fpst;
6447 TCGv_i32 ahp;
6448 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
6449 q || (rd & 1)) {
6450 return 1;
6452 fpst = get_fpstatus_ptr(true);
6453 ahp = get_ahp_flag();
6454 tmp3 = tcg_temp_new_i32();
6455 tmp = neon_load_reg(rm, 0);
6456 tmp2 = neon_load_reg(rm, 1);
6457 tcg_gen_ext16u_i32(tmp3, tmp);
6458 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6459 neon_store_reg(rd, 0, tmp3);
6460 tcg_gen_shri_i32(tmp, tmp, 16);
6461 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
6462 neon_store_reg(rd, 1, tmp);
6463 tmp3 = tcg_temp_new_i32();
6464 tcg_gen_ext16u_i32(tmp3, tmp2);
6465 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6466 neon_store_reg(rd, 2, tmp3);
6467 tcg_gen_shri_i32(tmp2, tmp2, 16);
6468 gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
6469 neon_store_reg(rd, 3, tmp2);
6470 tcg_temp_free_i32(ahp);
6471 tcg_temp_free_ptr(fpst);
6472 break;
6474 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6475 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
6476 return 1;
6478 ptr1 = vfp_reg_ptr(true, rd);
6479 ptr2 = vfp_reg_ptr(true, rm);
6481 /* Bit 6 is the lowest opcode bit; it distinguishes between
6482 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6484 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6486 if (op == NEON_2RM_AESE) {
6487 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
6488 } else {
6489 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
6491 tcg_temp_free_ptr(ptr1);
6492 tcg_temp_free_ptr(ptr2);
6493 tcg_temp_free_i32(tmp3);
6494 break;
6495 case NEON_2RM_SHA1H:
6496 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
6497 return 1;
6499 ptr1 = vfp_reg_ptr(true, rd);
6500 ptr2 = vfp_reg_ptr(true, rm);
6502 gen_helper_crypto_sha1h(ptr1, ptr2);
6504 tcg_temp_free_ptr(ptr1);
6505 tcg_temp_free_ptr(ptr2);
6506 break;
6507 case NEON_2RM_SHA1SU1:
6508 if ((rm | rd) & 1) {
6509 return 1;
6511 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6512 if (q) {
6513 if (!dc_isar_feature(aa32_sha2, s)) {
6514 return 1;
6516 } else if (!dc_isar_feature(aa32_sha1, s)) {
6517 return 1;
6519 ptr1 = vfp_reg_ptr(true, rd);
6520 ptr2 = vfp_reg_ptr(true, rm);
6521 if (q) {
6522 gen_helper_crypto_sha256su0(ptr1, ptr2);
6523 } else {
6524 gen_helper_crypto_sha1su1(ptr1, ptr2);
6526 tcg_temp_free_ptr(ptr1);
6527 tcg_temp_free_ptr(ptr2);
6528 break;
6530 case NEON_2RM_VMVN:
6531 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
6532 break;
6533 case NEON_2RM_VNEG:
6534 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
6535 break;
6536 case NEON_2RM_VABS:
6537 tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
6538 break;
6540 default:
6541 elementwise:
6542 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6543 tmp = neon_load_reg(rm, pass);
6544 switch (op) {
6545 case NEON_2RM_VREV32:
6546 switch (size) {
6547 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6548 case 1: gen_swap_half(tmp); break;
6549 default: abort();
6551 break;
6552 case NEON_2RM_VREV16:
6553 gen_rev16(tmp);
6554 break;
6555 case NEON_2RM_VCLS:
6556 switch (size) {
6557 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6558 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6559 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6560 default: abort();
6562 break;
6563 case NEON_2RM_VCLZ:
6564 switch (size) {
6565 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6566 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6567 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
6568 default: abort();
6570 break;
6571 case NEON_2RM_VCNT:
6572 gen_helper_neon_cnt_u8(tmp, tmp);
6573 break;
6574 case NEON_2RM_VQABS:
6575 switch (size) {
6576 case 0:
6577 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6578 break;
6579 case 1:
6580 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6581 break;
6582 case 2:
6583 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6584 break;
6585 default: abort();
6587 break;
6588 case NEON_2RM_VQNEG:
6589 switch (size) {
6590 case 0:
6591 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6592 break;
6593 case 1:
6594 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6595 break;
6596 case 2:
6597 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6598 break;
6599 default: abort();
6601 break;
6602 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6603 tmp2 = tcg_const_i32(0);
6604 switch(size) {
6605 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6606 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6607 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6608 default: abort();
6610 tcg_temp_free_i32(tmp2);
6611 if (op == NEON_2RM_VCLE0) {
6612 tcg_gen_not_i32(tmp, tmp);
6614 break;
6615 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6616 tmp2 = tcg_const_i32(0);
6617 switch(size) {
6618 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6619 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6620 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6621 default: abort();
6623 tcg_temp_free_i32(tmp2);
6624 if (op == NEON_2RM_VCLT0) {
6625 tcg_gen_not_i32(tmp, tmp);
6627 break;
6628 case NEON_2RM_VCEQ0:
6629 tmp2 = tcg_const_i32(0);
6630 switch(size) {
6631 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6632 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6633 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6634 default: abort();
6636 tcg_temp_free_i32(tmp2);
6637 break;
6638 case NEON_2RM_VCGT0_F:
6640 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6641 tmp2 = tcg_const_i32(0);
6642 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6643 tcg_temp_free_i32(tmp2);
6644 tcg_temp_free_ptr(fpstatus);
6645 break;
6647 case NEON_2RM_VCGE0_F:
6649 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6650 tmp2 = tcg_const_i32(0);
6651 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6652 tcg_temp_free_i32(tmp2);
6653 tcg_temp_free_ptr(fpstatus);
6654 break;
6656 case NEON_2RM_VCEQ0_F:
6658 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6659 tmp2 = tcg_const_i32(0);
6660 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6661 tcg_temp_free_i32(tmp2);
6662 tcg_temp_free_ptr(fpstatus);
6663 break;
6665 case NEON_2RM_VCLE0_F:
6667 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6668 tmp2 = tcg_const_i32(0);
6669 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6670 tcg_temp_free_i32(tmp2);
6671 tcg_temp_free_ptr(fpstatus);
6672 break;
6674 case NEON_2RM_VCLT0_F:
6676 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6677 tmp2 = tcg_const_i32(0);
6678 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6679 tcg_temp_free_i32(tmp2);
6680 tcg_temp_free_ptr(fpstatus);
6681 break;
6683 case NEON_2RM_VABS_F:
6684 gen_helper_vfp_abss(tmp, tmp);
6685 break;
6686 case NEON_2RM_VNEG_F:
6687 gen_helper_vfp_negs(tmp, tmp);
6688 break;
6689 case NEON_2RM_VSWP:
6690 tmp2 = neon_load_reg(rd, pass);
6691 neon_store_reg(rm, pass, tmp2);
6692 break;
6693 case NEON_2RM_VTRN:
6694 tmp2 = neon_load_reg(rd, pass);
6695 switch (size) {
6696 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6697 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6698 default: abort();
6700 neon_store_reg(rm, pass, tmp2);
6701 break;
6702 case NEON_2RM_VRINTN:
6703 case NEON_2RM_VRINTA:
6704 case NEON_2RM_VRINTM:
6705 case NEON_2RM_VRINTP:
6706 case NEON_2RM_VRINTZ:
6708 TCGv_i32 tcg_rmode;
6709 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6710 int rmode;
6712 if (op == NEON_2RM_VRINTZ) {
6713 rmode = FPROUNDING_ZERO;
6714 } else {
6715 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6718 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6719 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6720 cpu_env);
6721 gen_helper_rints(tmp, tmp, fpstatus);
6722 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6723 cpu_env);
6724 tcg_temp_free_ptr(fpstatus);
6725 tcg_temp_free_i32(tcg_rmode);
6726 break;
6728 case NEON_2RM_VRINTX:
6730 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6731 gen_helper_rints_exact(tmp, tmp, fpstatus);
6732 tcg_temp_free_ptr(fpstatus);
6733 break;
6735 case NEON_2RM_VCVTAU:
6736 case NEON_2RM_VCVTAS:
6737 case NEON_2RM_VCVTNU:
6738 case NEON_2RM_VCVTNS:
6739 case NEON_2RM_VCVTPU:
6740 case NEON_2RM_VCVTPS:
6741 case NEON_2RM_VCVTMU:
6742 case NEON_2RM_VCVTMS:
6744 bool is_signed = !extract32(insn, 7, 1);
6745 TCGv_ptr fpst = get_fpstatus_ptr(1);
6746 TCGv_i32 tcg_rmode, tcg_shift;
6747 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6749 tcg_shift = tcg_const_i32(0);
6750 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6751 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6752 cpu_env);
6754 if (is_signed) {
6755 gen_helper_vfp_tosls(tmp, tmp,
6756 tcg_shift, fpst);
6757 } else {
6758 gen_helper_vfp_touls(tmp, tmp,
6759 tcg_shift, fpst);
6762 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6763 cpu_env);
6764 tcg_temp_free_i32(tcg_rmode);
6765 tcg_temp_free_i32(tcg_shift);
6766 tcg_temp_free_ptr(fpst);
6767 break;
6769 case NEON_2RM_VRECPE:
6771 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6772 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6773 tcg_temp_free_ptr(fpstatus);
6774 break;
6776 case NEON_2RM_VRSQRTE:
6778 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6779 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6780 tcg_temp_free_ptr(fpstatus);
6781 break;
6783 case NEON_2RM_VRECPE_F:
6785 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6786 gen_helper_recpe_f32(tmp, tmp, fpstatus);
6787 tcg_temp_free_ptr(fpstatus);
6788 break;
6790 case NEON_2RM_VRSQRTE_F:
6792 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6793 gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
6794 tcg_temp_free_ptr(fpstatus);
6795 break;
6797 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6799 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6800 gen_helper_vfp_sitos(tmp, tmp, fpstatus);
6801 tcg_temp_free_ptr(fpstatus);
6802 break;
6804 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6806 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6807 gen_helper_vfp_uitos(tmp, tmp, fpstatus);
6808 tcg_temp_free_ptr(fpstatus);
6809 break;
6811 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6813 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6814 gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
6815 tcg_temp_free_ptr(fpstatus);
6816 break;
6818 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6820 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6821 gen_helper_vfp_touizs(tmp, tmp, fpstatus);
6822 tcg_temp_free_ptr(fpstatus);
6823 break;
6825 default:
6826 /* Reserved op values were caught by the
6827 * neon_2rm_sizes[] check earlier.
6829 abort();
6831 neon_store_reg(rd, pass, tmp);
6833 break;
6835 } else if ((insn & (1 << 10)) == 0) {
6836 /* VTBL, VTBX. */
6837 int n = ((insn >> 8) & 3) + 1;
6838 if ((rn + n) > 32) {
6839 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6840 * helper function running off the end of the register file.
6842 return 1;
6844 n <<= 3;
6845 if (insn & (1 << 6)) {
6846 tmp = neon_load_reg(rd, 0);
6847 } else {
6848 tmp = tcg_temp_new_i32();
6849 tcg_gen_movi_i32(tmp, 0);
6851 tmp2 = neon_load_reg(rm, 0);
6852 ptr1 = vfp_reg_ptr(true, rn);
6853 tmp5 = tcg_const_i32(n);
6854 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
6855 tcg_temp_free_i32(tmp);
6856 if (insn & (1 << 6)) {
6857 tmp = neon_load_reg(rd, 1);
6858 } else {
6859 tmp = tcg_temp_new_i32();
6860 tcg_gen_movi_i32(tmp, 0);
6862 tmp3 = neon_load_reg(rm, 1);
6863 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
6864 tcg_temp_free_i32(tmp5);
6865 tcg_temp_free_ptr(ptr1);
6866 neon_store_reg(rd, 0, tmp2);
6867 neon_store_reg(rd, 1, tmp3);
6868 tcg_temp_free_i32(tmp);
6869 } else if ((insn & 0x380) == 0) {
6870 /* VDUP */
6871 int element;
6872 TCGMemOp size;
6874 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6875 return 1;
6877 if (insn & (1 << 16)) {
6878 size = MO_8;
6879 element = (insn >> 17) & 7;
6880 } else if (insn & (1 << 17)) {
6881 size = MO_16;
6882 element = (insn >> 18) & 3;
6883 } else {
6884 size = MO_32;
6885 element = (insn >> 19) & 1;
6887 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
6888 neon_element_offset(rm, element, size),
6889 q ? 16 : 8, q ? 16 : 8);
6890 } else {
6891 return 1;
6895 return 0;
6898 /* Advanced SIMD three registers of the same length extension.
6899 * 31 25 23 22 20 16 12 11 10 9 8 3 0
6900 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6901 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6902 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6904 static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
6906 gen_helper_gvec_3 *fn_gvec = NULL;
6907 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
6908 int rd, rn, rm, opr_sz;
6909 int data = 0;
6910 int off_rn, off_rm;
6911 bool is_long = false, q = extract32(insn, 6, 1);
6912 bool ptr_is_env = false;
6914 if ((insn & 0xfe200f10) == 0xfc200800) {
6915 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
6916 int size = extract32(insn, 20, 1);
6917 data = extract32(insn, 23, 2); /* rot */
6918 if (!dc_isar_feature(aa32_vcma, s)
6919 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
6920 return 1;
6922 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
6923 } else if ((insn & 0xfea00f10) == 0xfc800800) {
6924 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
6925 int size = extract32(insn, 20, 1);
6926 data = extract32(insn, 24, 1); /* rot */
6927 if (!dc_isar_feature(aa32_vcma, s)
6928 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
6929 return 1;
6931 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
6932 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
6933 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
6934 bool u = extract32(insn, 4, 1);
6935 if (!dc_isar_feature(aa32_dp, s)) {
6936 return 1;
6938 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
6939 } else if ((insn & 0xff300f10) == 0xfc200810) {
6940 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
6941 int is_s = extract32(insn, 23, 1);
6942 if (!dc_isar_feature(aa32_fhm, s)) {
6943 return 1;
6945 is_long = true;
6946 data = is_s; /* is_2 == 0 */
6947 fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
6948 ptr_is_env = true;
6949 } else {
6950 return 1;
6953 VFP_DREG_D(rd, insn);
6954 if (rd & q) {
6955 return 1;
6957 if (q || !is_long) {
6958 VFP_DREG_N(rn, insn);
6959 VFP_DREG_M(rm, insn);
6960 if ((rn | rm) & q & !is_long) {
6961 return 1;
6963 off_rn = vfp_reg_offset(1, rn);
6964 off_rm = vfp_reg_offset(1, rm);
6965 } else {
6966 rn = VFP_SREG_N(insn);
6967 rm = VFP_SREG_M(insn);
6968 off_rn = vfp_reg_offset(0, rn);
6969 off_rm = vfp_reg_offset(0, rm);
6972 if (s->fp_excp_el) {
6973 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
6974 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
6975 return 0;
6977 if (!s->vfp_enabled) {
6978 return 1;
6981 opr_sz = (1 + q) * 8;
6982 if (fn_gvec_ptr) {
6983 TCGv_ptr ptr;
6984 if (ptr_is_env) {
6985 ptr = cpu_env;
6986 } else {
6987 ptr = get_fpstatus_ptr(1);
6989 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
6990 opr_sz, opr_sz, data, fn_gvec_ptr);
6991 if (!ptr_is_env) {
6992 tcg_temp_free_ptr(ptr);
6994 } else {
6995 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
6996 opr_sz, opr_sz, data, fn_gvec);
6998 return 0;
7001 /* Advanced SIMD two registers and a scalar extension.
7002 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7003 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7004 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7005 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7009 static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7011 gen_helper_gvec_3 *fn_gvec = NULL;
7012 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
7013 int rd, rn, rm, opr_sz, data;
7014 int off_rn, off_rm;
7015 bool is_long = false, q = extract32(insn, 6, 1);
7016 bool ptr_is_env = false;
7018 if ((insn & 0xff000f10) == 0xfe000800) {
7019 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
7020 int rot = extract32(insn, 20, 2);
7021 int size = extract32(insn, 23, 1);
7022 int index;
7024 if (!dc_isar_feature(aa32_vcma, s)) {
7025 return 1;
7027 if (size == 0) {
7028 if (!dc_isar_feature(aa32_fp16_arith, s)) {
7029 return 1;
7031 /* For fp16, rm is just Vm, and index is M. */
7032 rm = extract32(insn, 0, 4);
7033 index = extract32(insn, 5, 1);
7034 } else {
7035 /* For fp32, rm is the usual M:Vm, and index is 0. */
7036 VFP_DREG_M(rm, insn);
7037 index = 0;
7039 data = (index << 2) | rot;
7040 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7041 : gen_helper_gvec_fcmlah_idx);
7042 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7043 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7044 int u = extract32(insn, 4, 1);
7046 if (!dc_isar_feature(aa32_dp, s)) {
7047 return 1;
7049 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7050 /* rm is just Vm, and index is M. */
7051 data = extract32(insn, 5, 1); /* index */
7052 rm = extract32(insn, 0, 4);
7053 } else if ((insn & 0xffa00f10) == 0xfe000810) {
7054 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
7055 int is_s = extract32(insn, 20, 1);
7056 int vm20 = extract32(insn, 0, 3);
7057 int vm3 = extract32(insn, 3, 1);
7058 int m = extract32(insn, 5, 1);
7059 int index;
7061 if (!dc_isar_feature(aa32_fhm, s)) {
7062 return 1;
7064 if (q) {
7065 rm = vm20;
7066 index = m * 2 + vm3;
7067 } else {
7068 rm = vm20 * 2 + m;
7069 index = vm3;
7071 is_long = true;
7072 data = (index << 2) | is_s; /* is_2 == 0 */
7073 fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
7074 ptr_is_env = true;
7075 } else {
7076 return 1;
7079 VFP_DREG_D(rd, insn);
7080 if (rd & q) {
7081 return 1;
7083 if (q || !is_long) {
7084 VFP_DREG_N(rn, insn);
7085 if (rn & q & !is_long) {
7086 return 1;
7088 off_rn = vfp_reg_offset(1, rn);
7089 off_rm = vfp_reg_offset(1, rm);
7090 } else {
7091 rn = VFP_SREG_N(insn);
7092 off_rn = vfp_reg_offset(0, rn);
7093 off_rm = vfp_reg_offset(0, rm);
7095 if (s->fp_excp_el) {
7096 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
7097 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
7098 return 0;
7100 if (!s->vfp_enabled) {
7101 return 1;
7104 opr_sz = (1 + q) * 8;
7105 if (fn_gvec_ptr) {
7106 TCGv_ptr ptr;
7107 if (ptr_is_env) {
7108 ptr = cpu_env;
7109 } else {
7110 ptr = get_fpstatus_ptr(1);
7112 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
7113 opr_sz, opr_sz, data, fn_gvec_ptr);
7114 if (!ptr_is_env) {
7115 tcg_temp_free_ptr(ptr);
7117 } else {
7118 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
7119 opr_sz, opr_sz, data, fn_gvec);
7121 return 0;
7124 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7126 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7127 const ARMCPRegInfo *ri;
7129 cpnum = (insn >> 8) & 0xf;
7131 /* First check for coprocessor space used for XScale/iwMMXt insns */
7132 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7133 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7134 return 1;
7136 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7137 return disas_iwmmxt_insn(s, insn);
7138 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7139 return disas_dsp_insn(s, insn);
7141 return 1;
7144 /* Otherwise treat as a generic register access */
7145 is64 = (insn & (1 << 25)) == 0;
7146 if (!is64 && ((insn & (1 << 4)) == 0)) {
7147 /* cdp */
7148 return 1;
7151 crm = insn & 0xf;
7152 if (is64) {
7153 crn = 0;
7154 opc1 = (insn >> 4) & 0xf;
7155 opc2 = 0;
7156 rt2 = (insn >> 16) & 0xf;
7157 } else {
7158 crn = (insn >> 16) & 0xf;
7159 opc1 = (insn >> 21) & 7;
7160 opc2 = (insn >> 5) & 7;
7161 rt2 = 0;
7163 isread = (insn >> 20) & 1;
7164 rt = (insn >> 12) & 0xf;
7166 ri = get_arm_cp_reginfo(s->cp_regs,
7167 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7168 if (ri) {
7169 /* Check access permissions */
7170 if (!cp_access_ok(s->current_el, ri, isread)) {
7171 return 1;
7174 if (ri->accessfn ||
7175 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7176 /* Emit code to perform further access permissions checks at
7177 * runtime; this may result in an exception.
7178 * Note that on XScale all cp0..c13 registers do an access check
7179 * call in order to handle c15_cpar.
7181 TCGv_ptr tmpptr;
7182 TCGv_i32 tcg_syn, tcg_isread;
7183 uint32_t syndrome;
7185 /* Note that since we are an implementation which takes an
7186 * exception on a trapped conditional instruction only if the
7187 * instruction passes its condition code check, we can take
7188 * advantage of the clause in the ARM ARM that allows us to set
7189 * the COND field in the instruction to 0xE in all cases.
7190 * We could fish the actual condition out of the insn (ARM)
7191 * or the condexec bits (Thumb) but it isn't necessary.
7193 switch (cpnum) {
7194 case 14:
7195 if (is64) {
7196 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7197 isread, false);
7198 } else {
7199 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7200 rt, isread, false);
7202 break;
7203 case 15:
7204 if (is64) {
7205 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7206 isread, false);
7207 } else {
7208 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7209 rt, isread, false);
7211 break;
7212 default:
7213 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7214 * so this can only happen if this is an ARMv7 or earlier CPU,
7215 * in which case the syndrome information won't actually be
7216 * guest visible.
7218 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7219 syndrome = syn_uncategorized();
7220 break;
7223 gen_set_condexec(s);
7224 gen_set_pc_im(s, s->pc_curr);
7225 tmpptr = tcg_const_ptr(ri);
7226 tcg_syn = tcg_const_i32(syndrome);
7227 tcg_isread = tcg_const_i32(isread);
7228 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7229 tcg_isread);
7230 tcg_temp_free_ptr(tmpptr);
7231 tcg_temp_free_i32(tcg_syn);
7232 tcg_temp_free_i32(tcg_isread);
7235 /* Handle special cases first */
7236 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7237 case ARM_CP_NOP:
7238 return 0;
7239 case ARM_CP_WFI:
7240 if (isread) {
7241 return 1;
7243 gen_set_pc_im(s, s->base.pc_next);
7244 s->base.is_jmp = DISAS_WFI;
7245 return 0;
7246 default:
7247 break;
7250 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7251 gen_io_start();
7254 if (isread) {
7255 /* Read */
7256 if (is64) {
7257 TCGv_i64 tmp64;
7258 TCGv_i32 tmp;
7259 if (ri->type & ARM_CP_CONST) {
7260 tmp64 = tcg_const_i64(ri->resetvalue);
7261 } else if (ri->readfn) {
7262 TCGv_ptr tmpptr;
7263 tmp64 = tcg_temp_new_i64();
7264 tmpptr = tcg_const_ptr(ri);
7265 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7266 tcg_temp_free_ptr(tmpptr);
7267 } else {
7268 tmp64 = tcg_temp_new_i64();
7269 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7271 tmp = tcg_temp_new_i32();
7272 tcg_gen_extrl_i64_i32(tmp, tmp64);
7273 store_reg(s, rt, tmp);
7274 tcg_gen_shri_i64(tmp64, tmp64, 32);
7275 tmp = tcg_temp_new_i32();
7276 tcg_gen_extrl_i64_i32(tmp, tmp64);
7277 tcg_temp_free_i64(tmp64);
7278 store_reg(s, rt2, tmp);
7279 } else {
7280 TCGv_i32 tmp;
7281 if (ri->type & ARM_CP_CONST) {
7282 tmp = tcg_const_i32(ri->resetvalue);
7283 } else if (ri->readfn) {
7284 TCGv_ptr tmpptr;
7285 tmp = tcg_temp_new_i32();
7286 tmpptr = tcg_const_ptr(ri);
7287 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7288 tcg_temp_free_ptr(tmpptr);
7289 } else {
7290 tmp = load_cpu_offset(ri->fieldoffset);
7292 if (rt == 15) {
7293 /* Destination register of r15 for 32 bit loads sets
7294 * the condition codes from the high 4 bits of the value
7296 gen_set_nzcv(tmp);
7297 tcg_temp_free_i32(tmp);
7298 } else {
7299 store_reg(s, rt, tmp);
7302 } else {
7303 /* Write */
7304 if (ri->type & ARM_CP_CONST) {
7305 /* If not forbidden by access permissions, treat as WI */
7306 return 0;
7309 if (is64) {
7310 TCGv_i32 tmplo, tmphi;
7311 TCGv_i64 tmp64 = tcg_temp_new_i64();
7312 tmplo = load_reg(s, rt);
7313 tmphi = load_reg(s, rt2);
7314 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7315 tcg_temp_free_i32(tmplo);
7316 tcg_temp_free_i32(tmphi);
7317 if (ri->writefn) {
7318 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7319 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7320 tcg_temp_free_ptr(tmpptr);
7321 } else {
7322 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7324 tcg_temp_free_i64(tmp64);
7325 } else {
7326 if (ri->writefn) {
7327 TCGv_i32 tmp;
7328 TCGv_ptr tmpptr;
7329 tmp = load_reg(s, rt);
7330 tmpptr = tcg_const_ptr(ri);
7331 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7332 tcg_temp_free_ptr(tmpptr);
7333 tcg_temp_free_i32(tmp);
7334 } else {
7335 TCGv_i32 tmp = load_reg(s, rt);
7336 store_cpu_offset(tmp, ri->fieldoffset);
7341 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7342 /* I/O operations must end the TB here (whether read or write) */
7343 gen_io_end();
7344 gen_lookup_tb(s);
7345 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7346 /* We default to ending the TB on a coprocessor register write,
7347 * but allow this to be suppressed by the register definition
7348 * (usually only necessary to work around guest bugs).
7350 gen_lookup_tb(s);
7353 return 0;
7356 /* Unknown register; this might be a guest error or a QEMU
7357 * unimplemented feature.
7359 if (is64) {
7360 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7361 "64 bit system register cp:%d opc1: %d crm:%d "
7362 "(%s)\n",
7363 isread ? "read" : "write", cpnum, opc1, crm,
7364 s->ns ? "non-secure" : "secure");
7365 } else {
7366 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7367 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7368 "(%s)\n",
7369 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7370 s->ns ? "non-secure" : "secure");
7373 return 1;
7377 /* Store a 64-bit value to a register pair. Clobbers val. */
7378 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7380 TCGv_i32 tmp;
7381 tmp = tcg_temp_new_i32();
7382 tcg_gen_extrl_i64_i32(tmp, val);
7383 store_reg(s, rlow, tmp);
7384 tmp = tcg_temp_new_i32();
7385 tcg_gen_shri_i64(val, val, 32);
7386 tcg_gen_extrl_i64_i32(tmp, val);
7387 store_reg(s, rhigh, tmp);
7390 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7391 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7393 TCGv_i64 tmp;
7394 TCGv_i32 tmp2;
7396 /* Load value and extend to 64 bits. */
7397 tmp = tcg_temp_new_i64();
7398 tmp2 = load_reg(s, rlow);
7399 tcg_gen_extu_i32_i64(tmp, tmp2);
7400 tcg_temp_free_i32(tmp2);
7401 tcg_gen_add_i64(val, val, tmp);
7402 tcg_temp_free_i64(tmp);
7405 /* load and add a 64-bit value from a register pair. */
7406 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7408 TCGv_i64 tmp;
7409 TCGv_i32 tmpl;
7410 TCGv_i32 tmph;
7412 /* Load 64-bit value rd:rn. */
7413 tmpl = load_reg(s, rlow);
7414 tmph = load_reg(s, rhigh);
7415 tmp = tcg_temp_new_i64();
7416 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7417 tcg_temp_free_i32(tmpl);
7418 tcg_temp_free_i32(tmph);
7419 tcg_gen_add_i64(val, val, tmp);
7420 tcg_temp_free_i64(tmp);
7423 /* Set N and Z flags from hi|lo. */
7424 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7426 tcg_gen_mov_i32(cpu_NF, hi);
7427 tcg_gen_or_i32(cpu_ZF, lo, hi);
7430 /* Load/Store exclusive instructions are implemented by remembering
7431 the value/address loaded, and seeing if these are the same
7432 when the store is performed. This should be sufficient to implement
7433 the architecturally mandated semantics, and avoids having to monitor
7434 regular stores. The compare vs the remembered value is done during
7435 the cmpxchg operation, but we must compare the addresses manually. */
7436 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7437 TCGv_i32 addr, int size)
7439 TCGv_i32 tmp = tcg_temp_new_i32();
7440 TCGMemOp opc = size | MO_ALIGN | s->be_data;
7442 s->is_ldex = true;
7444 if (size == 3) {
7445 TCGv_i32 tmp2 = tcg_temp_new_i32();
7446 TCGv_i64 t64 = tcg_temp_new_i64();
7448 /* For AArch32, architecturally the 32-bit word at the lowest
7449 * address is always Rt and the one at addr+4 is Rt2, even if
7450 * the CPU is big-endian. That means we don't want to do a
7451 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7452 * for an architecturally 64-bit access, but instead do a
7453 * 64-bit access using MO_BE if appropriate and then split
7454 * the two halves.
7455 * This only makes a difference for BE32 user-mode, where
7456 * frob64() must not flip the two halves of the 64-bit data
7457 * but this code must treat BE32 user-mode like BE32 system.
7459 TCGv taddr = gen_aa32_addr(s, addr, opc);
7461 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7462 tcg_temp_free(taddr);
7463 tcg_gen_mov_i64(cpu_exclusive_val, t64);
7464 if (s->be_data == MO_BE) {
7465 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7466 } else {
7467 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7469 tcg_temp_free_i64(t64);
7471 store_reg(s, rt2, tmp2);
7472 } else {
7473 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
7474 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7477 store_reg(s, rt, tmp);
7478 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7481 static void gen_clrex(DisasContext *s)
7483 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7486 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7487 TCGv_i32 addr, int size)
7489 TCGv_i32 t0, t1, t2;
7490 TCGv_i64 extaddr;
7491 TCGv taddr;
7492 TCGLabel *done_label;
7493 TCGLabel *fail_label;
7494 TCGMemOp opc = size | MO_ALIGN | s->be_data;
7496 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7497 [addr] = {Rt};
7498 {Rd} = 0;
7499 } else {
7500 {Rd} = 1;
7501 } */
7502 fail_label = gen_new_label();
7503 done_label = gen_new_label();
7504 extaddr = tcg_temp_new_i64();
7505 tcg_gen_extu_i32_i64(extaddr, addr);
7506 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7507 tcg_temp_free_i64(extaddr);
7509 taddr = gen_aa32_addr(s, addr, opc);
7510 t0 = tcg_temp_new_i32();
7511 t1 = load_reg(s, rt);
7512 if (size == 3) {
7513 TCGv_i64 o64 = tcg_temp_new_i64();
7514 TCGv_i64 n64 = tcg_temp_new_i64();
7516 t2 = load_reg(s, rt2);
7517 /* For AArch32, architecturally the 32-bit word at the lowest
7518 * address is always Rt and the one at addr+4 is Rt2, even if
7519 * the CPU is big-endian. Since we're going to treat this as a
7520 * single 64-bit BE store, we need to put the two halves in the
7521 * opposite order for BE to LE, so that they end up in the right
7522 * places.
7523 * We don't want gen_aa32_frob64() because that does the wrong
7524 * thing for BE32 usermode.
7526 if (s->be_data == MO_BE) {
7527 tcg_gen_concat_i32_i64(n64, t2, t1);
7528 } else {
7529 tcg_gen_concat_i32_i64(n64, t1, t2);
7531 tcg_temp_free_i32(t2);
7533 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7534 get_mem_index(s), opc);
7535 tcg_temp_free_i64(n64);
7537 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7538 tcg_gen_extrl_i64_i32(t0, o64);
7540 tcg_temp_free_i64(o64);
7541 } else {
7542 t2 = tcg_temp_new_i32();
7543 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7544 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7545 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7546 tcg_temp_free_i32(t2);
7548 tcg_temp_free_i32(t1);
7549 tcg_temp_free(taddr);
7550 tcg_gen_mov_i32(cpu_R[rd], t0);
7551 tcg_temp_free_i32(t0);
7552 tcg_gen_br(done_label);
7554 gen_set_label(fail_label);
7555 tcg_gen_movi_i32(cpu_R[rd], 1);
7556 gen_set_label(done_label);
7557 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7560 /* gen_srs:
7561 * @env: CPUARMState
7562 * @s: DisasContext
7563 * @mode: mode field from insn (which stack to store to)
7564 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7565 * @writeback: true if writeback bit set
7567 * Generate code for the SRS (Store Return State) insn.
7569 static void gen_srs(DisasContext *s,
7570 uint32_t mode, uint32_t amode, bool writeback)
7572 int32_t offset;
7573 TCGv_i32 addr, tmp;
7574 bool undef = false;
7576 /* SRS is:
7577 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7578 * and specified mode is monitor mode
7579 * - UNDEFINED in Hyp mode
7580 * - UNPREDICTABLE in User or System mode
7581 * - UNPREDICTABLE if the specified mode is:
7582 * -- not implemented
7583 * -- not a valid mode number
7584 * -- a mode that's at a higher exception level
7585 * -- Monitor, if we are Non-secure
7586 * For the UNPREDICTABLE cases we choose to UNDEF.
7588 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
7589 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
7590 return;
7593 if (s->current_el == 0 || s->current_el == 2) {
7594 undef = true;
7597 switch (mode) {
7598 case ARM_CPU_MODE_USR:
7599 case ARM_CPU_MODE_FIQ:
7600 case ARM_CPU_MODE_IRQ:
7601 case ARM_CPU_MODE_SVC:
7602 case ARM_CPU_MODE_ABT:
7603 case ARM_CPU_MODE_UND:
7604 case ARM_CPU_MODE_SYS:
7605 break;
7606 case ARM_CPU_MODE_HYP:
7607 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7608 undef = true;
7610 break;
7611 case ARM_CPU_MODE_MON:
7612 /* No need to check specifically for "are we non-secure" because
7613 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7614 * so if this isn't EL3 then we must be non-secure.
7616 if (s->current_el != 3) {
7617 undef = true;
7619 break;
7620 default:
7621 undef = true;
7624 if (undef) {
7625 unallocated_encoding(s);
7626 return;
7629 addr = tcg_temp_new_i32();
7630 tmp = tcg_const_i32(mode);
7631 /* get_r13_banked() will raise an exception if called from System mode */
7632 gen_set_condexec(s);
7633 gen_set_pc_im(s, s->pc_curr);
7634 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7635 tcg_temp_free_i32(tmp);
7636 switch (amode) {
7637 case 0: /* DA */
7638 offset = -4;
7639 break;
7640 case 1: /* IA */
7641 offset = 0;
7642 break;
7643 case 2: /* DB */
7644 offset = -8;
7645 break;
7646 case 3: /* IB */
7647 offset = 4;
7648 break;
7649 default:
7650 abort();
7652 tcg_gen_addi_i32(addr, addr, offset);
7653 tmp = load_reg(s, 14);
7654 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7655 tcg_temp_free_i32(tmp);
7656 tmp = load_cpu_field(spsr);
7657 tcg_gen_addi_i32(addr, addr, 4);
7658 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7659 tcg_temp_free_i32(tmp);
7660 if (writeback) {
7661 switch (amode) {
7662 case 0:
7663 offset = -8;
7664 break;
7665 case 1:
7666 offset = 4;
7667 break;
7668 case 2:
7669 offset = -4;
7670 break;
7671 case 3:
7672 offset = 0;
7673 break;
7674 default:
7675 abort();
7677 tcg_gen_addi_i32(addr, addr, offset);
7678 tmp = tcg_const_i32(mode);
7679 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7680 tcg_temp_free_i32(tmp);
7682 tcg_temp_free_i32(addr);
7683 s->base.is_jmp = DISAS_UPDATE;
7686 /* Generate a label used for skipping this instruction */
7687 static void arm_gen_condlabel(DisasContext *s)
7689 if (!s->condjmp) {
7690 s->condlabel = gen_new_label();
7691 s->condjmp = 1;
7695 /* Skip this instruction if the ARM condition is false */
7696 static void arm_skip_unless(DisasContext *s, uint32_t cond)
7698 arm_gen_condlabel(s);
7699 arm_gen_test_cc(cond ^ 1, s->condlabel);
7702 static void disas_arm_insn(DisasContext *s, unsigned int insn)
7704 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
7705 TCGv_i32 tmp;
7706 TCGv_i32 tmp2;
7707 TCGv_i32 tmp3;
7708 TCGv_i32 addr;
7709 TCGv_i64 tmp64;
7711 /* M variants do not implement ARM mode; this must raise the INVSTATE
7712 * UsageFault exception.
7714 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7715 gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
7716 default_exception_el(s));
7717 return;
7719 cond = insn >> 28;
7720 if (cond == 0xf){
7721 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7722 * choose to UNDEF. In ARMv5 and above the space is used
7723 * for miscellaneous unconditional instructions.
7725 ARCH(5);
7727 /* Unconditional instructions. */
7728 if (((insn >> 25) & 7) == 1) {
7729 /* NEON Data processing. */
7730 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7731 goto illegal_op;
7734 if (disas_neon_data_insn(s, insn)) {
7735 goto illegal_op;
7737 return;
7739 if ((insn & 0x0f100000) == 0x04000000) {
7740 /* NEON load/store. */
7741 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7742 goto illegal_op;
7745 if (disas_neon_ls_insn(s, insn)) {
7746 goto illegal_op;
7748 return;
7750 if ((insn & 0x0f000e10) == 0x0e000a00) {
7751 /* VFP. */
7752 if (disas_vfp_insn(s, insn)) {
7753 goto illegal_op;
7755 return;
7757 if (((insn & 0x0f30f000) == 0x0510f000) ||
7758 ((insn & 0x0f30f010) == 0x0710f000)) {
7759 if ((insn & (1 << 22)) == 0) {
7760 /* PLDW; v7MP */
7761 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7762 goto illegal_op;
7765 /* Otherwise PLD; v5TE+ */
7766 ARCH(5TE);
7767 return;
7769 if (((insn & 0x0f70f000) == 0x0450f000) ||
7770 ((insn & 0x0f70f010) == 0x0650f000)) {
7771 ARCH(7);
7772 return; /* PLI; V7 */
7774 if (((insn & 0x0f700000) == 0x04100000) ||
7775 ((insn & 0x0f700010) == 0x06100000)) {
7776 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7777 goto illegal_op;
7779 return; /* v7MP: Unallocated memory hint: must NOP */
7782 if ((insn & 0x0ffffdff) == 0x01010000) {
7783 ARCH(6);
7784 /* setend */
7785 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
7786 gen_helper_setend(cpu_env);
7787 s->base.is_jmp = DISAS_UPDATE;
7789 return;
7790 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7791 switch ((insn >> 4) & 0xf) {
7792 case 1: /* clrex */
7793 ARCH(6K);
7794 gen_clrex(s);
7795 return;
7796 case 4: /* dsb */
7797 case 5: /* dmb */
7798 ARCH(7);
7799 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7800 return;
7801 case 6: /* isb */
7802 /* We need to break the TB after this insn to execute
7803 * self-modifying code correctly and also to take
7804 * any pending interrupts immediately.
7806 gen_goto_tb(s, 0, s->base.pc_next);
7807 return;
7808 case 7: /* sb */
7809 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
7810 goto illegal_op;
7813 * TODO: There is no speculation barrier opcode
7814 * for TCG; MB and end the TB instead.
7816 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7817 gen_goto_tb(s, 0, s->base.pc_next);
7818 return;
7819 default:
7820 goto illegal_op;
7822 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7823 /* srs */
7824 ARCH(6);
7825 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
7826 return;
7827 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
7828 /* rfe */
7829 int32_t offset;
7830 if (IS_USER(s))
7831 goto illegal_op;
7832 ARCH(6);
7833 rn = (insn >> 16) & 0xf;
7834 addr = load_reg(s, rn);
7835 i = (insn >> 23) & 3;
7836 switch (i) {
7837 case 0: offset = -4; break; /* DA */
7838 case 1: offset = 0; break; /* IA */
7839 case 2: offset = -8; break; /* DB */
7840 case 3: offset = 4; break; /* IB */
7841 default: abort();
7843 if (offset)
7844 tcg_gen_addi_i32(addr, addr, offset);
7845 /* Load PC into tmp and CPSR into tmp2. */
7846 tmp = tcg_temp_new_i32();
7847 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
7848 tcg_gen_addi_i32(addr, addr, 4);
7849 tmp2 = tcg_temp_new_i32();
7850 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
7851 if (insn & (1 << 21)) {
7852 /* Base writeback. */
7853 switch (i) {
7854 case 0: offset = -8; break;
7855 case 1: offset = 4; break;
7856 case 2: offset = -4; break;
7857 case 3: offset = 0; break;
7858 default: abort();
7860 if (offset)
7861 tcg_gen_addi_i32(addr, addr, offset);
7862 store_reg(s, rn, addr);
7863 } else {
7864 tcg_temp_free_i32(addr);
7866 gen_rfe(s, tmp, tmp2);
7867 return;
7868 } else if ((insn & 0x0e000000) == 0x0a000000) {
7869 /* branch link and change to thumb (blx <offset>) */
7870 int32_t offset;
7872 tmp = tcg_temp_new_i32();
7873 tcg_gen_movi_i32(tmp, s->base.pc_next);
7874 store_reg(s, 14, tmp);
7875 /* Sign-extend the 24-bit offset */
7876 offset = (((int32_t)insn) << 8) >> 8;
7877 val = read_pc(s);
7878 /* offset * 4 + bit24 * 2 + (thumb bit) */
7879 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7880 /* protected by ARCH(5); above, near the start of uncond block */
7881 gen_bx_im(s, val);
7882 return;
7883 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7884 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7885 /* iWMMXt register transfer. */
7886 if (extract32(s->c15_cpar, 1, 1)) {
7887 if (!disas_iwmmxt_insn(s, insn)) {
7888 return;
7892 } else if ((insn & 0x0e000a00) == 0x0c000800
7893 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7894 if (disas_neon_insn_3same_ext(s, insn)) {
7895 goto illegal_op;
7897 return;
7898 } else if ((insn & 0x0f000a00) == 0x0e000800
7899 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7900 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
7901 goto illegal_op;
7903 return;
7904 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7905 /* Coprocessor double register transfer. */
7906 ARCH(5TE);
7907 } else if ((insn & 0x0f000010) == 0x0e000010) {
7908 /* Additional coprocessor register transfer. */
7909 } else if ((insn & 0x0ff10020) == 0x01000000) {
7910 uint32_t mask;
7911 uint32_t val;
7912 /* cps (privileged) */
7913 if (IS_USER(s))
7914 return;
7915 mask = val = 0;
7916 if (insn & (1 << 19)) {
7917 if (insn & (1 << 8))
7918 mask |= CPSR_A;
7919 if (insn & (1 << 7))
7920 mask |= CPSR_I;
7921 if (insn & (1 << 6))
7922 mask |= CPSR_F;
7923 if (insn & (1 << 18))
7924 val |= mask;
7926 if (insn & (1 << 17)) {
7927 mask |= CPSR_M;
7928 val |= (insn & 0x1f);
7930 if (mask) {
7931 gen_set_psr_im(s, mask, 0, val);
7933 return;
7935 goto illegal_op;
7937 if (cond != 0xe) {
7938 /* if not always execute, we generate a conditional jump to
7939 next instruction */
7940 arm_skip_unless(s, cond);
7942 if ((insn & 0x0f900000) == 0x03000000) {
7943 if ((insn & (1 << 21)) == 0) {
7944 ARCH(6T2);
7945 rd = (insn >> 12) & 0xf;
7946 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7947 if ((insn & (1 << 22)) == 0) {
7948 /* MOVW */
7949 tmp = tcg_temp_new_i32();
7950 tcg_gen_movi_i32(tmp, val);
7951 } else {
7952 /* MOVT */
7953 tmp = load_reg(s, rd);
7954 tcg_gen_ext16u_i32(tmp, tmp);
7955 tcg_gen_ori_i32(tmp, tmp, val << 16);
7957 store_reg(s, rd, tmp);
7958 } else {
7959 if (((insn >> 12) & 0xf) != 0xf)
7960 goto illegal_op;
7961 if (((insn >> 16) & 0xf) == 0) {
7962 gen_nop_hint(s, insn & 0xff);
7963 } else {
7964 /* CPSR = immediate */
7965 val = insn & 0xff;
7966 shift = ((insn >> 8) & 0xf) * 2;
7967 val = ror32(val, shift);
7968 i = ((insn & (1 << 22)) != 0);
7969 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7970 i, val)) {
7971 goto illegal_op;
7975 } else if ((insn & 0x0f900000) == 0x01000000
7976 && (insn & 0x00000090) != 0x00000090) {
7977 /* miscellaneous instructions */
7978 op1 = (insn >> 21) & 3;
7979 sh = (insn >> 4) & 0xf;
7980 rm = insn & 0xf;
7981 switch (sh) {
7982 case 0x0: /* MSR, MRS */
7983 if (insn & (1 << 9)) {
7984 /* MSR (banked) and MRS (banked) */
7985 int sysm = extract32(insn, 16, 4) |
7986 (extract32(insn, 8, 1) << 4);
7987 int r = extract32(insn, 22, 1);
7989 if (op1 & 1) {
7990 /* MSR (banked) */
7991 gen_msr_banked(s, r, sysm, rm);
7992 } else {
7993 /* MRS (banked) */
7994 int rd = extract32(insn, 12, 4);
7996 gen_mrs_banked(s, r, sysm, rd);
7998 break;
8001 /* MSR, MRS (for PSRs) */
8002 if (op1 & 1) {
8003 /* PSR = reg */
8004 tmp = load_reg(s, rm);
8005 i = ((op1 & 2) != 0);
8006 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
8007 goto illegal_op;
8008 } else {
8009 /* reg = PSR */
8010 rd = (insn >> 12) & 0xf;
8011 if (op1 & 2) {
8012 if (IS_USER(s))
8013 goto illegal_op;
8014 tmp = load_cpu_field(spsr);
8015 } else {
8016 tmp = tcg_temp_new_i32();
8017 gen_helper_cpsr_read(tmp, cpu_env);
8019 store_reg(s, rd, tmp);
8021 break;
8022 case 0x1:
8023 if (op1 == 1) {
8024 /* branch/exchange thumb (bx). */
8025 ARCH(4T);
8026 tmp = load_reg(s, rm);
8027 gen_bx(s, tmp);
8028 } else if (op1 == 3) {
8029 /* clz */
8030 ARCH(5);
8031 rd = (insn >> 12) & 0xf;
8032 tmp = load_reg(s, rm);
8033 tcg_gen_clzi_i32(tmp, tmp, 32);
8034 store_reg(s, rd, tmp);
8035 } else {
8036 goto illegal_op;
8038 break;
8039 case 0x2:
8040 if (op1 == 1) {
8041 ARCH(5J); /* bxj */
8042 /* Trivial implementation equivalent to bx. */
8043 tmp = load_reg(s, rm);
8044 gen_bx(s, tmp);
8045 } else {
8046 goto illegal_op;
8048 break;
8049 case 0x3:
8050 if (op1 != 1)
8051 goto illegal_op;
8053 ARCH(5);
8054 /* branch link/exchange thumb (blx) */
8055 tmp = load_reg(s, rm);
8056 tmp2 = tcg_temp_new_i32();
8057 tcg_gen_movi_i32(tmp2, s->base.pc_next);
8058 store_reg(s, 14, tmp2);
8059 gen_bx(s, tmp);
8060 break;
8061 case 0x4:
8063 /* crc32/crc32c */
8064 uint32_t c = extract32(insn, 8, 4);
8066 /* Check this CPU supports ARMv8 CRC instructions.
8067 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8068 * Bits 8, 10 and 11 should be zero.
8070 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
8071 goto illegal_op;
8074 rn = extract32(insn, 16, 4);
8075 rd = extract32(insn, 12, 4);
8077 tmp = load_reg(s, rn);
8078 tmp2 = load_reg(s, rm);
8079 if (op1 == 0) {
8080 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8081 } else if (op1 == 1) {
8082 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8084 tmp3 = tcg_const_i32(1 << op1);
8085 if (c & 0x2) {
8086 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8087 } else {
8088 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8090 tcg_temp_free_i32(tmp2);
8091 tcg_temp_free_i32(tmp3);
8092 store_reg(s, rd, tmp);
8093 break;
8095 case 0x5: /* saturating add/subtract */
8096 ARCH(5TE);
8097 rd = (insn >> 12) & 0xf;
8098 rn = (insn >> 16) & 0xf;
8099 tmp = load_reg(s, rm);
8100 tmp2 = load_reg(s, rn);
8101 if (op1 & 2)
8102 gen_helper_add_saturate(tmp2, cpu_env, tmp2, tmp2);
8103 if (op1 & 1)
8104 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8105 else
8106 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8107 tcg_temp_free_i32(tmp2);
8108 store_reg(s, rd, tmp);
8109 break;
8110 case 0x6: /* ERET */
8111 if (op1 != 3) {
8112 goto illegal_op;
8114 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8115 goto illegal_op;
8117 if ((insn & 0x000fff0f) != 0x0000000e) {
8118 /* UNPREDICTABLE; we choose to UNDEF */
8119 goto illegal_op;
8122 if (s->current_el == 2) {
8123 tmp = load_cpu_field(elr_el[2]);
8124 } else {
8125 tmp = load_reg(s, 14);
8127 gen_exception_return(s, tmp);
8128 break;
8129 case 7:
8131 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8132 switch (op1) {
8133 case 0:
8134 /* HLT */
8135 gen_hlt(s, imm16);
8136 break;
8137 case 1:
8138 /* bkpt */
8139 ARCH(5);
8140 gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm16, false));
8141 break;
8142 case 2:
8143 /* Hypervisor call (v7) */
8144 ARCH(7);
8145 if (IS_USER(s)) {
8146 goto illegal_op;
8148 gen_hvc(s, imm16);
8149 break;
8150 case 3:
8151 /* Secure monitor call (v6+) */
8152 ARCH(6K);
8153 if (IS_USER(s)) {
8154 goto illegal_op;
8156 gen_smc(s);
8157 break;
8158 default:
8159 g_assert_not_reached();
8161 break;
8163 case 0x8: /* signed multiply */
8164 case 0xa:
8165 case 0xc:
8166 case 0xe:
8167 ARCH(5TE);
8168 rs = (insn >> 8) & 0xf;
8169 rn = (insn >> 12) & 0xf;
8170 rd = (insn >> 16) & 0xf;
8171 if (op1 == 1) {
8172 /* (32 * 16) >> 16 */
8173 tmp = load_reg(s, rm);
8174 tmp2 = load_reg(s, rs);
8175 if (sh & 4)
8176 tcg_gen_sari_i32(tmp2, tmp2, 16);
8177 else
8178 gen_sxth(tmp2);
8179 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8180 tcg_gen_shri_i64(tmp64, tmp64, 16);
8181 tmp = tcg_temp_new_i32();
8182 tcg_gen_extrl_i64_i32(tmp, tmp64);
8183 tcg_temp_free_i64(tmp64);
8184 if ((sh & 2) == 0) {
8185 tmp2 = load_reg(s, rn);
8186 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8187 tcg_temp_free_i32(tmp2);
8189 store_reg(s, rd, tmp);
8190 } else {
8191 /* 16 * 16 */
8192 tmp = load_reg(s, rm);
8193 tmp2 = load_reg(s, rs);
8194 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8195 tcg_temp_free_i32(tmp2);
8196 if (op1 == 2) {
8197 tmp64 = tcg_temp_new_i64();
8198 tcg_gen_ext_i32_i64(tmp64, tmp);
8199 tcg_temp_free_i32(tmp);
8200 gen_addq(s, tmp64, rn, rd);
8201 gen_storeq_reg(s, rn, rd, tmp64);
8202 tcg_temp_free_i64(tmp64);
8203 } else {
8204 if (op1 == 0) {
8205 tmp2 = load_reg(s, rn);
8206 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8207 tcg_temp_free_i32(tmp2);
8209 store_reg(s, rd, tmp);
8212 break;
8213 default:
8214 goto illegal_op;
8216 } else if (((insn & 0x0e000000) == 0 &&
8217 (insn & 0x00000090) != 0x90) ||
8218 ((insn & 0x0e000000) == (1 << 25))) {
8219 int set_cc, logic_cc, shiftop;
8221 op1 = (insn >> 21) & 0xf;
8222 set_cc = (insn >> 20) & 1;
8223 logic_cc = table_logic_cc[op1] & set_cc;
8225 /* data processing instruction */
8226 if (insn & (1 << 25)) {
8227 /* immediate operand */
8228 val = insn & 0xff;
8229 shift = ((insn >> 8) & 0xf) * 2;
8230 val = ror32(val, shift);
8231 tmp2 = tcg_temp_new_i32();
8232 tcg_gen_movi_i32(tmp2, val);
8233 if (logic_cc && shift) {
8234 gen_set_CF_bit31(tmp2);
8236 } else {
8237 /* register */
8238 rm = (insn) & 0xf;
8239 tmp2 = load_reg(s, rm);
8240 shiftop = (insn >> 5) & 3;
8241 if (!(insn & (1 << 4))) {
8242 shift = (insn >> 7) & 0x1f;
8243 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8244 } else {
8245 rs = (insn >> 8) & 0xf;
8246 tmp = load_reg(s, rs);
8247 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8250 if (op1 != 0x0f && op1 != 0x0d) {
8251 rn = (insn >> 16) & 0xf;
8252 tmp = load_reg(s, rn);
8253 } else {
8254 tmp = NULL;
8256 rd = (insn >> 12) & 0xf;
8257 switch(op1) {
8258 case 0x00:
8259 tcg_gen_and_i32(tmp, tmp, tmp2);
8260 if (logic_cc) {
8261 gen_logic_CC(tmp);
8263 store_reg_bx(s, rd, tmp);
8264 break;
8265 case 0x01:
8266 tcg_gen_xor_i32(tmp, tmp, tmp2);
8267 if (logic_cc) {
8268 gen_logic_CC(tmp);
8270 store_reg_bx(s, rd, tmp);
8271 break;
8272 case 0x02:
8273 if (set_cc && rd == 15) {
8274 /* SUBS r15, ... is used for exception return. */
8275 if (IS_USER(s)) {
8276 goto illegal_op;
8278 gen_sub_CC(tmp, tmp, tmp2);
8279 gen_exception_return(s, tmp);
8280 } else {
8281 if (set_cc) {
8282 gen_sub_CC(tmp, tmp, tmp2);
8283 } else {
8284 tcg_gen_sub_i32(tmp, tmp, tmp2);
8286 store_reg_bx(s, rd, tmp);
8288 break;
8289 case 0x03:
8290 if (set_cc) {
8291 gen_sub_CC(tmp, tmp2, tmp);
8292 } else {
8293 tcg_gen_sub_i32(tmp, tmp2, tmp);
8295 store_reg_bx(s, rd, tmp);
8296 break;
8297 case 0x04:
8298 if (set_cc) {
8299 gen_add_CC(tmp, tmp, tmp2);
8300 } else {
8301 tcg_gen_add_i32(tmp, tmp, tmp2);
8303 store_reg_bx(s, rd, tmp);
8304 break;
8305 case 0x05:
8306 if (set_cc) {
8307 gen_adc_CC(tmp, tmp, tmp2);
8308 } else {
8309 gen_add_carry(tmp, tmp, tmp2);
8311 store_reg_bx(s, rd, tmp);
8312 break;
8313 case 0x06:
8314 if (set_cc) {
8315 gen_sbc_CC(tmp, tmp, tmp2);
8316 } else {
8317 gen_sub_carry(tmp, tmp, tmp2);
8319 store_reg_bx(s, rd, tmp);
8320 break;
8321 case 0x07:
8322 if (set_cc) {
8323 gen_sbc_CC(tmp, tmp2, tmp);
8324 } else {
8325 gen_sub_carry(tmp, tmp2, tmp);
8327 store_reg_bx(s, rd, tmp);
8328 break;
8329 case 0x08:
8330 if (set_cc) {
8331 tcg_gen_and_i32(tmp, tmp, tmp2);
8332 gen_logic_CC(tmp);
8334 tcg_temp_free_i32(tmp);
8335 break;
8336 case 0x09:
8337 if (set_cc) {
8338 tcg_gen_xor_i32(tmp, tmp, tmp2);
8339 gen_logic_CC(tmp);
8341 tcg_temp_free_i32(tmp);
8342 break;
8343 case 0x0a:
8344 if (set_cc) {
8345 gen_sub_CC(tmp, tmp, tmp2);
8347 tcg_temp_free_i32(tmp);
8348 break;
8349 case 0x0b:
8350 if (set_cc) {
8351 gen_add_CC(tmp, tmp, tmp2);
8353 tcg_temp_free_i32(tmp);
8354 break;
8355 case 0x0c:
8356 tcg_gen_or_i32(tmp, tmp, tmp2);
8357 if (logic_cc) {
8358 gen_logic_CC(tmp);
8360 store_reg_bx(s, rd, tmp);
8361 break;
8362 case 0x0d:
8363 if (logic_cc && rd == 15) {
8364 /* MOVS r15, ... is used for exception return. */
8365 if (IS_USER(s)) {
8366 goto illegal_op;
8368 gen_exception_return(s, tmp2);
8369 } else {
8370 if (logic_cc) {
8371 gen_logic_CC(tmp2);
8373 store_reg_bx(s, rd, tmp2);
8375 break;
8376 case 0x0e:
8377 tcg_gen_andc_i32(tmp, tmp, tmp2);
8378 if (logic_cc) {
8379 gen_logic_CC(tmp);
8381 store_reg_bx(s, rd, tmp);
8382 break;
8383 default:
8384 case 0x0f:
8385 tcg_gen_not_i32(tmp2, tmp2);
8386 if (logic_cc) {
8387 gen_logic_CC(tmp2);
8389 store_reg_bx(s, rd, tmp2);
8390 break;
8392 if (op1 != 0x0f && op1 != 0x0d) {
8393 tcg_temp_free_i32(tmp2);
8395 } else {
8396 /* other instructions */
8397 op1 = (insn >> 24) & 0xf;
8398 switch(op1) {
8399 case 0x0:
8400 case 0x1:
8401 /* multiplies, extra load/stores */
8402 sh = (insn >> 5) & 3;
8403 if (sh == 0) {
8404 if (op1 == 0x0) {
8405 rd = (insn >> 16) & 0xf;
8406 rn = (insn >> 12) & 0xf;
8407 rs = (insn >> 8) & 0xf;
8408 rm = (insn) & 0xf;
8409 op1 = (insn >> 20) & 0xf;
8410 switch (op1) {
8411 case 0: case 1: case 2: case 3: case 6:
8412 /* 32 bit mul */
8413 tmp = load_reg(s, rs);
8414 tmp2 = load_reg(s, rm);
8415 tcg_gen_mul_i32(tmp, tmp, tmp2);
8416 tcg_temp_free_i32(tmp2);
8417 if (insn & (1 << 22)) {
8418 /* Subtract (mls) */
8419 ARCH(6T2);
8420 tmp2 = load_reg(s, rn);
8421 tcg_gen_sub_i32(tmp, tmp2, tmp);
8422 tcg_temp_free_i32(tmp2);
8423 } else if (insn & (1 << 21)) {
8424 /* Add */
8425 tmp2 = load_reg(s, rn);
8426 tcg_gen_add_i32(tmp, tmp, tmp2);
8427 tcg_temp_free_i32(tmp2);
8429 if (insn & (1 << 20))
8430 gen_logic_CC(tmp);
8431 store_reg(s, rd, tmp);
8432 break;
8433 case 4:
8434 /* 64 bit mul double accumulate (UMAAL) */
8435 ARCH(6);
8436 tmp = load_reg(s, rs);
8437 tmp2 = load_reg(s, rm);
8438 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8439 gen_addq_lo(s, tmp64, rn);
8440 gen_addq_lo(s, tmp64, rd);
8441 gen_storeq_reg(s, rn, rd, tmp64);
8442 tcg_temp_free_i64(tmp64);
8443 break;
8444 case 8: case 9: case 10: case 11:
8445 case 12: case 13: case 14: case 15:
8446 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8447 tmp = load_reg(s, rs);
8448 tmp2 = load_reg(s, rm);
8449 if (insn & (1 << 22)) {
8450 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8451 } else {
8452 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8454 if (insn & (1 << 21)) { /* mult accumulate */
8455 TCGv_i32 al = load_reg(s, rn);
8456 TCGv_i32 ah = load_reg(s, rd);
8457 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8458 tcg_temp_free_i32(al);
8459 tcg_temp_free_i32(ah);
8461 if (insn & (1 << 20)) {
8462 gen_logicq_cc(tmp, tmp2);
8464 store_reg(s, rn, tmp);
8465 store_reg(s, rd, tmp2);
8466 break;
8467 default:
8468 goto illegal_op;
8470 } else {
8471 rn = (insn >> 16) & 0xf;
8472 rd = (insn >> 12) & 0xf;
8473 if (insn & (1 << 23)) {
8474 /* load/store exclusive */
8475 bool is_ld = extract32(insn, 20, 1);
8476 bool is_lasr = !extract32(insn, 8, 1);
8477 int op2 = (insn >> 8) & 3;
8478 op1 = (insn >> 21) & 0x3;
8480 switch (op2) {
8481 case 0: /* lda/stl */
8482 if (op1 == 1) {
8483 goto illegal_op;
8485 ARCH(8);
8486 break;
8487 case 1: /* reserved */
8488 goto illegal_op;
8489 case 2: /* ldaex/stlex */
8490 ARCH(8);
8491 break;
8492 case 3: /* ldrex/strex */
8493 if (op1) {
8494 ARCH(6K);
8495 } else {
8496 ARCH(6);
8498 break;
8501 addr = tcg_temp_local_new_i32();
8502 load_reg_var(s, addr, rn);
8504 if (is_lasr && !is_ld) {
8505 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8508 if (op2 == 0) {
8509 if (is_ld) {
8510 tmp = tcg_temp_new_i32();
8511 switch (op1) {
8512 case 0: /* lda */
8513 gen_aa32_ld32u_iss(s, tmp, addr,
8514 get_mem_index(s),
8515 rd | ISSIsAcqRel);
8516 break;
8517 case 2: /* ldab */
8518 gen_aa32_ld8u_iss(s, tmp, addr,
8519 get_mem_index(s),
8520 rd | ISSIsAcqRel);
8521 break;
8522 case 3: /* ldah */
8523 gen_aa32_ld16u_iss(s, tmp, addr,
8524 get_mem_index(s),
8525 rd | ISSIsAcqRel);
8526 break;
8527 default:
8528 abort();
8530 store_reg(s, rd, tmp);
8531 } else {
8532 rm = insn & 0xf;
8533 tmp = load_reg(s, rm);
8534 switch (op1) {
8535 case 0: /* stl */
8536 gen_aa32_st32_iss(s, tmp, addr,
8537 get_mem_index(s),
8538 rm | ISSIsAcqRel);
8539 break;
8540 case 2: /* stlb */
8541 gen_aa32_st8_iss(s, tmp, addr,
8542 get_mem_index(s),
8543 rm | ISSIsAcqRel);
8544 break;
8545 case 3: /* stlh */
8546 gen_aa32_st16_iss(s, tmp, addr,
8547 get_mem_index(s),
8548 rm | ISSIsAcqRel);
8549 break;
8550 default:
8551 abort();
8553 tcg_temp_free_i32(tmp);
8555 } else if (is_ld) {
8556 switch (op1) {
8557 case 0: /* ldrex */
8558 gen_load_exclusive(s, rd, 15, addr, 2);
8559 break;
8560 case 1: /* ldrexd */
8561 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8562 break;
8563 case 2: /* ldrexb */
8564 gen_load_exclusive(s, rd, 15, addr, 0);
8565 break;
8566 case 3: /* ldrexh */
8567 gen_load_exclusive(s, rd, 15, addr, 1);
8568 break;
8569 default:
8570 abort();
8572 } else {
8573 rm = insn & 0xf;
8574 switch (op1) {
8575 case 0: /* strex */
8576 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8577 break;
8578 case 1: /* strexd */
8579 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8580 break;
8581 case 2: /* strexb */
8582 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8583 break;
8584 case 3: /* strexh */
8585 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8586 break;
8587 default:
8588 abort();
8591 tcg_temp_free_i32(addr);
8593 if (is_lasr && is_ld) {
8594 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
8596 } else if ((insn & 0x00300f00) == 0) {
8597 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
8598 * - SWP, SWPB
8601 TCGv taddr;
8602 TCGMemOp opc = s->be_data;
8604 rm = (insn) & 0xf;
8606 if (insn & (1 << 22)) {
8607 opc |= MO_UB;
8608 } else {
8609 opc |= MO_UL | MO_ALIGN;
8612 addr = load_reg(s, rn);
8613 taddr = gen_aa32_addr(s, addr, opc);
8614 tcg_temp_free_i32(addr);
8616 tmp = load_reg(s, rm);
8617 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8618 get_mem_index(s), opc);
8619 tcg_temp_free(taddr);
8620 store_reg(s, rd, tmp);
8621 } else {
8622 goto illegal_op;
8625 } else {
8626 int address_offset;
8627 bool load = insn & (1 << 20);
8628 bool wbit = insn & (1 << 21);
8629 bool pbit = insn & (1 << 24);
8630 bool doubleword = false;
8631 ISSInfo issinfo;
8633 /* Misc load/store */
8634 rn = (insn >> 16) & 0xf;
8635 rd = (insn >> 12) & 0xf;
8637 /* ISS not valid if writeback */
8638 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8640 if (!load && (sh & 2)) {
8641 /* doubleword */
8642 ARCH(5TE);
8643 if (rd & 1) {
8644 /* UNPREDICTABLE; we choose to UNDEF */
8645 goto illegal_op;
8647 load = (sh & 1) == 0;
8648 doubleword = true;
8651 addr = load_reg(s, rn);
8652 if (pbit) {
8653 gen_add_datah_offset(s, insn, 0, addr);
8655 address_offset = 0;
8657 if (doubleword) {
8658 if (!load) {
8659 /* store */
8660 tmp = load_reg(s, rd);
8661 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8662 tcg_temp_free_i32(tmp);
8663 tcg_gen_addi_i32(addr, addr, 4);
8664 tmp = load_reg(s, rd + 1);
8665 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8666 tcg_temp_free_i32(tmp);
8667 } else {
8668 /* load */
8669 tmp = tcg_temp_new_i32();
8670 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8671 store_reg(s, rd, tmp);
8672 tcg_gen_addi_i32(addr, addr, 4);
8673 tmp = tcg_temp_new_i32();
8674 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8675 rd++;
8677 address_offset = -4;
8678 } else if (load) {
8679 /* load */
8680 tmp = tcg_temp_new_i32();
8681 switch (sh) {
8682 case 1:
8683 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
8684 issinfo);
8685 break;
8686 case 2:
8687 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
8688 issinfo);
8689 break;
8690 default:
8691 case 3:
8692 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
8693 issinfo);
8694 break;
8696 } else {
8697 /* store */
8698 tmp = load_reg(s, rd);
8699 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
8700 tcg_temp_free_i32(tmp);
8702 /* Perform base writeback before the loaded value to
8703 ensure correct behavior with overlapping index registers.
8704 ldrd with base writeback is undefined if the
8705 destination and index registers overlap. */
8706 if (!pbit) {
8707 gen_add_datah_offset(s, insn, address_offset, addr);
8708 store_reg(s, rn, addr);
8709 } else if (wbit) {
8710 if (address_offset)
8711 tcg_gen_addi_i32(addr, addr, address_offset);
8712 store_reg(s, rn, addr);
8713 } else {
8714 tcg_temp_free_i32(addr);
8716 if (load) {
8717 /* Complete the load. */
8718 store_reg(s, rd, tmp);
8721 break;
8722 case 0x4:
8723 case 0x5:
8724 goto do_ldst;
8725 case 0x6:
8726 case 0x7:
8727 if (insn & (1 << 4)) {
8728 ARCH(6);
8729 /* Armv6 Media instructions. */
8730 rm = insn & 0xf;
8731 rn = (insn >> 16) & 0xf;
8732 rd = (insn >> 12) & 0xf;
8733 rs = (insn >> 8) & 0xf;
8734 switch ((insn >> 23) & 3) {
8735 case 0: /* Parallel add/subtract. */
8736 op1 = (insn >> 20) & 7;
8737 tmp = load_reg(s, rn);
8738 tmp2 = load_reg(s, rm);
8739 sh = (insn >> 5) & 7;
8740 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8741 goto illegal_op;
8742 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
8743 tcg_temp_free_i32(tmp2);
8744 store_reg(s, rd, tmp);
8745 break;
8746 case 1:
8747 if ((insn & 0x00700020) == 0) {
8748 /* Halfword pack. */
8749 tmp = load_reg(s, rn);
8750 tmp2 = load_reg(s, rm);
8751 shift = (insn >> 7) & 0x1f;
8752 if (insn & (1 << 6)) {
8753 /* pkhtb */
8754 if (shift == 0) {
8755 shift = 31;
8757 tcg_gen_sari_i32(tmp2, tmp2, shift);
8758 tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
8759 } else {
8760 /* pkhbt */
8761 tcg_gen_shli_i32(tmp2, tmp2, shift);
8762 tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
8764 tcg_temp_free_i32(tmp2);
8765 store_reg(s, rd, tmp);
8766 } else if ((insn & 0x00200020) == 0x00200000) {
8767 /* [us]sat */
8768 tmp = load_reg(s, rm);
8769 shift = (insn >> 7) & 0x1f;
8770 if (insn & (1 << 6)) {
8771 if (shift == 0)
8772 shift = 31;
8773 tcg_gen_sari_i32(tmp, tmp, shift);
8774 } else {
8775 tcg_gen_shli_i32(tmp, tmp, shift);
8777 sh = (insn >> 16) & 0x1f;
8778 tmp2 = tcg_const_i32(sh);
8779 if (insn & (1 << 22))
8780 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8781 else
8782 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8783 tcg_temp_free_i32(tmp2);
8784 store_reg(s, rd, tmp);
8785 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8786 /* [us]sat16 */
8787 tmp = load_reg(s, rm);
8788 sh = (insn >> 16) & 0x1f;
8789 tmp2 = tcg_const_i32(sh);
8790 if (insn & (1 << 22))
8791 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8792 else
8793 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8794 tcg_temp_free_i32(tmp2);
8795 store_reg(s, rd, tmp);
8796 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8797 /* Select bytes. */
8798 tmp = load_reg(s, rn);
8799 tmp2 = load_reg(s, rm);
8800 tmp3 = tcg_temp_new_i32();
8801 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8802 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8803 tcg_temp_free_i32(tmp3);
8804 tcg_temp_free_i32(tmp2);
8805 store_reg(s, rd, tmp);
8806 } else if ((insn & 0x000003e0) == 0x00000060) {
8807 tmp = load_reg(s, rm);
8808 shift = (insn >> 10) & 3;
8809 /* ??? In many cases it's not necessary to do a
8810 rotate, a shift is sufficient. */
8811 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8812 op1 = (insn >> 20) & 7;
8813 switch (op1) {
8814 case 0: gen_sxtb16(tmp); break;
8815 case 2: gen_sxtb(tmp); break;
8816 case 3: gen_sxth(tmp); break;
8817 case 4: gen_uxtb16(tmp); break;
8818 case 6: gen_uxtb(tmp); break;
8819 case 7: gen_uxth(tmp); break;
8820 default: goto illegal_op;
8822 if (rn != 15) {
8823 tmp2 = load_reg(s, rn);
8824 if ((op1 & 3) == 0) {
8825 gen_add16(tmp, tmp2);
8826 } else {
8827 tcg_gen_add_i32(tmp, tmp, tmp2);
8828 tcg_temp_free_i32(tmp2);
8831 store_reg(s, rd, tmp);
8832 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8833 /* rev */
8834 tmp = load_reg(s, rm);
8835 if (insn & (1 << 22)) {
8836 if (insn & (1 << 7)) {
8837 gen_revsh(tmp);
8838 } else {
8839 ARCH(6T2);
8840 gen_helper_rbit(tmp, tmp);
8842 } else {
8843 if (insn & (1 << 7))
8844 gen_rev16(tmp);
8845 else
8846 tcg_gen_bswap32_i32(tmp, tmp);
8848 store_reg(s, rd, tmp);
8849 } else {
8850 goto illegal_op;
8852 break;
8853 case 2: /* Multiplies (Type 3). */
8854 switch ((insn >> 20) & 0x7) {
8855 case 5:
8856 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8857 /* op2 not 00x or 11x : UNDEF */
8858 goto illegal_op;
8860 /* Signed multiply most significant [accumulate].
8861 (SMMUL, SMMLA, SMMLS) */
8862 tmp = load_reg(s, rm);
8863 tmp2 = load_reg(s, rs);
8864 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8866 if (rd != 15) {
8867 tmp = load_reg(s, rd);
8868 if (insn & (1 << 6)) {
8869 tmp64 = gen_subq_msw(tmp64, tmp);
8870 } else {
8871 tmp64 = gen_addq_msw(tmp64, tmp);
8874 if (insn & (1 << 5)) {
8875 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8877 tcg_gen_shri_i64(tmp64, tmp64, 32);
8878 tmp = tcg_temp_new_i32();
8879 tcg_gen_extrl_i64_i32(tmp, tmp64);
8880 tcg_temp_free_i64(tmp64);
8881 store_reg(s, rn, tmp);
8882 break;
8883 case 0:
8884 case 4:
8885 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8886 if (insn & (1 << 7)) {
8887 goto illegal_op;
8889 tmp = load_reg(s, rm);
8890 tmp2 = load_reg(s, rs);
8891 if (insn & (1 << 5))
8892 gen_swap_half(tmp2);
8893 gen_smul_dual(tmp, tmp2);
8894 if (insn & (1 << 22)) {
8895 /* smlald, smlsld */
8896 TCGv_i64 tmp64_2;
8898 tmp64 = tcg_temp_new_i64();
8899 tmp64_2 = tcg_temp_new_i64();
8900 tcg_gen_ext_i32_i64(tmp64, tmp);
8901 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
8902 tcg_temp_free_i32(tmp);
8903 tcg_temp_free_i32(tmp2);
8904 if (insn & (1 << 6)) {
8905 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8906 } else {
8907 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8909 tcg_temp_free_i64(tmp64_2);
8910 gen_addq(s, tmp64, rd, rn);
8911 gen_storeq_reg(s, rd, rn, tmp64);
8912 tcg_temp_free_i64(tmp64);
8913 } else {
8914 /* smuad, smusd, smlad, smlsd */
8915 if (insn & (1 << 6)) {
8916 /* This subtraction cannot overflow. */
8917 tcg_gen_sub_i32(tmp, tmp, tmp2);
8918 } else {
8919 /* This addition cannot overflow 32 bits;
8920 * however it may overflow considered as a
8921 * signed operation, in which case we must set
8922 * the Q flag.
8924 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8926 tcg_temp_free_i32(tmp2);
8927 if (rd != 15)
8929 tmp2 = load_reg(s, rd);
8930 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8931 tcg_temp_free_i32(tmp2);
8933 store_reg(s, rn, tmp);
8935 break;
8936 case 1:
8937 case 3:
8938 /* SDIV, UDIV */
8939 if (!dc_isar_feature(arm_div, s)) {
8940 goto illegal_op;
8942 if (((insn >> 5) & 7) || (rd != 15)) {
8943 goto illegal_op;
8945 tmp = load_reg(s, rm);
8946 tmp2 = load_reg(s, rs);
8947 if (insn & (1 << 21)) {
8948 gen_helper_udiv(tmp, tmp, tmp2);
8949 } else {
8950 gen_helper_sdiv(tmp, tmp, tmp2);
8952 tcg_temp_free_i32(tmp2);
8953 store_reg(s, rn, tmp);
8954 break;
8955 default:
8956 goto illegal_op;
8958 break;
8959 case 3:
8960 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8961 switch (op1) {
8962 case 0: /* Unsigned sum of absolute differences. */
8963 ARCH(6);
8964 tmp = load_reg(s, rm);
8965 tmp2 = load_reg(s, rs);
8966 gen_helper_usad8(tmp, tmp, tmp2);
8967 tcg_temp_free_i32(tmp2);
8968 if (rd != 15) {
8969 tmp2 = load_reg(s, rd);
8970 tcg_gen_add_i32(tmp, tmp, tmp2);
8971 tcg_temp_free_i32(tmp2);
8973 store_reg(s, rn, tmp);
8974 break;
8975 case 0x20: case 0x24: case 0x28: case 0x2c:
8976 /* Bitfield insert/clear. */
8977 ARCH(6T2);
8978 shift = (insn >> 7) & 0x1f;
8979 i = (insn >> 16) & 0x1f;
8980 if (i < shift) {
8981 /* UNPREDICTABLE; we choose to UNDEF */
8982 goto illegal_op;
8984 i = i + 1 - shift;
8985 if (rm == 15) {
8986 tmp = tcg_temp_new_i32();
8987 tcg_gen_movi_i32(tmp, 0);
8988 } else {
8989 tmp = load_reg(s, rm);
8991 if (i != 32) {
8992 tmp2 = load_reg(s, rd);
8993 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
8994 tcg_temp_free_i32(tmp2);
8996 store_reg(s, rd, tmp);
8997 break;
8998 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8999 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9000 ARCH(6T2);
9001 tmp = load_reg(s, rm);
9002 shift = (insn >> 7) & 0x1f;
9003 i = ((insn >> 16) & 0x1f) + 1;
9004 if (shift + i > 32)
9005 goto illegal_op;
9006 if (i < 32) {
9007 if (op1 & 0x20) {
9008 tcg_gen_extract_i32(tmp, tmp, shift, i);
9009 } else {
9010 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9013 store_reg(s, rd, tmp);
9014 break;
9015 default:
9016 goto illegal_op;
9018 break;
9020 break;
9022 do_ldst:
9023 /* Check for undefined extension instructions
9024 * per the ARM Bible IE:
9025 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9027 sh = (0xf << 20) | (0xf << 4);
9028 if (op1 == 0x7 && ((insn & sh) == sh))
9030 goto illegal_op;
9032 /* load/store byte/word */
9033 rn = (insn >> 16) & 0xf;
9034 rd = (insn >> 12) & 0xf;
9035 tmp2 = load_reg(s, rn);
9036 if ((insn & 0x01200000) == 0x00200000) {
9037 /* ldrt/strt */
9038 i = get_a32_user_mem_index(s);
9039 } else {
9040 i = get_mem_index(s);
9042 if (insn & (1 << 24))
9043 gen_add_data_offset(s, insn, tmp2);
9044 if (insn & (1 << 20)) {
9045 /* load */
9046 tmp = tcg_temp_new_i32();
9047 if (insn & (1 << 22)) {
9048 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9049 } else {
9050 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9052 } else {
9053 /* store */
9054 tmp = load_reg(s, rd);
9055 if (insn & (1 << 22)) {
9056 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
9057 } else {
9058 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
9060 tcg_temp_free_i32(tmp);
9062 if (!(insn & (1 << 24))) {
9063 gen_add_data_offset(s, insn, tmp2);
9064 store_reg(s, rn, tmp2);
9065 } else if (insn & (1 << 21)) {
9066 store_reg(s, rn, tmp2);
9067 } else {
9068 tcg_temp_free_i32(tmp2);
9070 if (insn & (1 << 20)) {
9071 /* Complete the load. */
9072 store_reg_from_load(s, rd, tmp);
9074 break;
9075 case 0x08:
9076 case 0x09:
9078 int j, n, loaded_base;
9079 bool exc_return = false;
9080 bool is_load = extract32(insn, 20, 1);
9081 bool user = false;
9082 TCGv_i32 loaded_var;
9083 /* load/store multiple words */
9084 /* XXX: store correct base if write back */
9085 if (insn & (1 << 22)) {
9086 /* LDM (user), LDM (exception return) and STM (user) */
9087 if (IS_USER(s))
9088 goto illegal_op; /* only usable in supervisor mode */
9090 if (is_load && extract32(insn, 15, 1)) {
9091 exc_return = true;
9092 } else {
9093 user = true;
9096 rn = (insn >> 16) & 0xf;
9097 addr = load_reg(s, rn);
9099 /* compute total size */
9100 loaded_base = 0;
9101 loaded_var = NULL;
9102 n = 0;
9103 for (i = 0; i < 16; i++) {
9104 if (insn & (1 << i))
9105 n++;
9107 /* XXX: test invalid n == 0 case ? */
9108 if (insn & (1 << 23)) {
9109 if (insn & (1 << 24)) {
9110 /* pre increment */
9111 tcg_gen_addi_i32(addr, addr, 4);
9112 } else {
9113 /* post increment */
9115 } else {
9116 if (insn & (1 << 24)) {
9117 /* pre decrement */
9118 tcg_gen_addi_i32(addr, addr, -(n * 4));
9119 } else {
9120 /* post decrement */
9121 if (n != 1)
9122 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9125 j = 0;
9126 for (i = 0; i < 16; i++) {
9127 if (insn & (1 << i)) {
9128 if (is_load) {
9129 /* load */
9130 tmp = tcg_temp_new_i32();
9131 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9132 if (user) {
9133 tmp2 = tcg_const_i32(i);
9134 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9135 tcg_temp_free_i32(tmp2);
9136 tcg_temp_free_i32(tmp);
9137 } else if (i == rn) {
9138 loaded_var = tmp;
9139 loaded_base = 1;
9140 } else if (i == 15 && exc_return) {
9141 store_pc_exc_ret(s, tmp);
9142 } else {
9143 store_reg_from_load(s, i, tmp);
9145 } else {
9146 /* store */
9147 if (i == 15) {
9148 tmp = tcg_temp_new_i32();
9149 tcg_gen_movi_i32(tmp, read_pc(s));
9150 } else if (user) {
9151 tmp = tcg_temp_new_i32();
9152 tmp2 = tcg_const_i32(i);
9153 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9154 tcg_temp_free_i32(tmp2);
9155 } else {
9156 tmp = load_reg(s, i);
9158 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9159 tcg_temp_free_i32(tmp);
9161 j++;
9162 /* no need to add after the last transfer */
9163 if (j != n)
9164 tcg_gen_addi_i32(addr, addr, 4);
9167 if (insn & (1 << 21)) {
9168 /* write back */
9169 if (insn & (1 << 23)) {
9170 if (insn & (1 << 24)) {
9171 /* pre increment */
9172 } else {
9173 /* post increment */
9174 tcg_gen_addi_i32(addr, addr, 4);
9176 } else {
9177 if (insn & (1 << 24)) {
9178 /* pre decrement */
9179 if (n != 1)
9180 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9181 } else {
9182 /* post decrement */
9183 tcg_gen_addi_i32(addr, addr, -(n * 4));
9186 store_reg(s, rn, addr);
9187 } else {
9188 tcg_temp_free_i32(addr);
9190 if (loaded_base) {
9191 store_reg(s, rn, loaded_var);
9193 if (exc_return) {
9194 /* Restore CPSR from SPSR. */
9195 tmp = load_cpu_field(spsr);
9196 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9197 gen_io_start();
9199 gen_helper_cpsr_write_eret(cpu_env, tmp);
9200 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9201 gen_io_end();
9203 tcg_temp_free_i32(tmp);
9204 /* Must exit loop to check un-masked IRQs */
9205 s->base.is_jmp = DISAS_EXIT;
9208 break;
9209 case 0xa:
9210 case 0xb:
9212 int32_t offset;
9214 /* branch (and link) */
9215 if (insn & (1 << 24)) {
9216 tmp = tcg_temp_new_i32();
9217 tcg_gen_movi_i32(tmp, s->base.pc_next);
9218 store_reg(s, 14, tmp);
9220 offset = sextract32(insn << 2, 0, 26);
9221 gen_jmp(s, read_pc(s) + offset);
9223 break;
9224 case 0xc:
9225 case 0xd:
9226 case 0xe:
9227 if (((insn >> 8) & 0xe) == 10) {
9228 /* VFP. */
9229 if (disas_vfp_insn(s, insn)) {
9230 goto illegal_op;
9232 } else if (disas_coproc_insn(s, insn)) {
9233 /* Coprocessor. */
9234 goto illegal_op;
9236 break;
9237 case 0xf:
9238 /* swi */
9239 gen_set_pc_im(s, s->base.pc_next);
9240 s->svc_imm = extract32(insn, 0, 24);
9241 s->base.is_jmp = DISAS_SWI;
9242 break;
9243 default:
9244 illegal_op:
9245 unallocated_encoding(s);
9246 break;
9251 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
9254 * Return true if this is a 16 bit instruction. We must be precise
9255 * about this (matching the decode).
9257 if ((insn >> 11) < 0x1d) {
9258 /* Definitely a 16-bit instruction */
9259 return true;
9262 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9263 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9264 * end up actually treating this as two 16-bit insns, though,
9265 * if it's half of a bl/blx pair that might span a page boundary.
9267 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
9268 arm_dc_feature(s, ARM_FEATURE_M)) {
9269 /* Thumb2 cores (including all M profile ones) always treat
9270 * 32-bit insns as 32-bit.
9272 return false;
9275 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
9276 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9277 * is not on the next page; we merge this into a 32-bit
9278 * insn.
9280 return false;
9282 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9283 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9284 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9285 * -- handle as single 16 bit insn
9287 return true;
9290 /* Return true if this is a Thumb-2 logical op. */
9291 static int
9292 thumb2_logic_op(int op)
9294 return (op < 8);
9297 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9298 then set condition code flags based on the result of the operation.
9299 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9300 to the high bit of T1.
9301 Returns zero if the opcode is valid. */
9303 static int
9304 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9305 TCGv_i32 t0, TCGv_i32 t1)
9307 int logic_cc;
9309 logic_cc = 0;
9310 switch (op) {
9311 case 0: /* and */
9312 tcg_gen_and_i32(t0, t0, t1);
9313 logic_cc = conds;
9314 break;
9315 case 1: /* bic */
9316 tcg_gen_andc_i32(t0, t0, t1);
9317 logic_cc = conds;
9318 break;
9319 case 2: /* orr */
9320 tcg_gen_or_i32(t0, t0, t1);
9321 logic_cc = conds;
9322 break;
9323 case 3: /* orn */
9324 tcg_gen_orc_i32(t0, t0, t1);
9325 logic_cc = conds;
9326 break;
9327 case 4: /* eor */
9328 tcg_gen_xor_i32(t0, t0, t1);
9329 logic_cc = conds;
9330 break;
9331 case 8: /* add */
9332 if (conds)
9333 gen_add_CC(t0, t0, t1);
9334 else
9335 tcg_gen_add_i32(t0, t0, t1);
9336 break;
9337 case 10: /* adc */
9338 if (conds)
9339 gen_adc_CC(t0, t0, t1);
9340 else
9341 gen_adc(t0, t1);
9342 break;
9343 case 11: /* sbc */
9344 if (conds) {
9345 gen_sbc_CC(t0, t0, t1);
9346 } else {
9347 gen_sub_carry(t0, t0, t1);
9349 break;
9350 case 13: /* sub */
9351 if (conds)
9352 gen_sub_CC(t0, t0, t1);
9353 else
9354 tcg_gen_sub_i32(t0, t0, t1);
9355 break;
9356 case 14: /* rsb */
9357 if (conds)
9358 gen_sub_CC(t0, t1, t0);
9359 else
9360 tcg_gen_sub_i32(t0, t1, t0);
9361 break;
9362 default: /* 5, 6, 7, 9, 12, 15. */
9363 return 1;
9365 if (logic_cc) {
9366 gen_logic_CC(t0);
9367 if (shifter_out)
9368 gen_set_CF_bit31(t1);
9370 return 0;
9373 /* Translate a 32-bit thumb instruction. */
9374 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9376 uint32_t imm, shift, offset;
9377 uint32_t rd, rn, rm, rs;
9378 TCGv_i32 tmp;
9379 TCGv_i32 tmp2;
9380 TCGv_i32 tmp3;
9381 TCGv_i32 addr;
9382 TCGv_i64 tmp64;
9383 int op;
9384 int shiftop;
9385 int conds;
9386 int logic_cc;
9389 * ARMv6-M supports a limited subset of Thumb2 instructions.
9390 * Other Thumb1 architectures allow only 32-bit
9391 * combined BL/BLX prefix and suffix.
9393 if (arm_dc_feature(s, ARM_FEATURE_M) &&
9394 !arm_dc_feature(s, ARM_FEATURE_V7)) {
9395 int i;
9396 bool found = false;
9397 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
9398 0xf3b08040 /* dsb */,
9399 0xf3b08050 /* dmb */,
9400 0xf3b08060 /* isb */,
9401 0xf3e08000 /* mrs */,
9402 0xf000d000 /* bl */};
9403 static const uint32_t armv6m_mask[] = {0xffe0d000,
9404 0xfff0d0f0,
9405 0xfff0d0f0,
9406 0xfff0d0f0,
9407 0xffe0d000,
9408 0xf800d000};
9410 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
9411 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
9412 found = true;
9413 break;
9416 if (!found) {
9417 goto illegal_op;
9419 } else if ((insn & 0xf800e800) != 0xf000e800) {
9420 ARCH(6T2);
9423 rn = (insn >> 16) & 0xf;
9424 rs = (insn >> 12) & 0xf;
9425 rd = (insn >> 8) & 0xf;
9426 rm = insn & 0xf;
9427 switch ((insn >> 25) & 0xf) {
9428 case 0: case 1: case 2: case 3:
9429 /* 16-bit instructions. Should never happen. */
9430 abort();
9431 case 4:
9432 if (insn & (1 << 22)) {
9433 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9434 * - load/store doubleword, load/store exclusive, ldacq/strel,
9435 * table branch, TT.
9437 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
9438 arm_dc_feature(s, ARM_FEATURE_V8)) {
9439 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9440 * - SG (v8M only)
9441 * The bulk of the behaviour for this instruction is implemented
9442 * in v7m_handle_execute_nsc(), which deals with the insn when
9443 * it is executed by a CPU in non-secure state from memory
9444 * which is Secure & NonSecure-Callable.
9445 * Here we only need to handle the remaining cases:
9446 * * in NS memory (including the "security extension not
9447 * implemented" case) : NOP
9448 * * in S memory but CPU already secure (clear IT bits)
9449 * We know that the attribute for the memory this insn is
9450 * in must match the current CPU state, because otherwise
9451 * get_phys_addr_pmsav8 would have generated an exception.
9453 if (s->v8m_secure) {
9454 /* Like the IT insn, we don't need to generate any code */
9455 s->condexec_cond = 0;
9456 s->condexec_mask = 0;
9458 } else if (insn & 0x01200000) {
9459 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9460 * - load/store dual (post-indexed)
9461 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9462 * - load/store dual (literal and immediate)
9463 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9464 * - load/store dual (pre-indexed)
9466 bool wback = extract32(insn, 21, 1);
9468 if (rn == 15 && (insn & (1 << 21))) {
9469 /* UNPREDICTABLE */
9470 goto illegal_op;
9473 addr = add_reg_for_lit(s, rn, 0);
9474 offset = (insn & 0xff) * 4;
9475 if ((insn & (1 << 23)) == 0) {
9476 offset = -offset;
9479 if (s->v8m_stackcheck && rn == 13 && wback) {
9481 * Here 'addr' is the current SP; if offset is +ve we're
9482 * moving SP up, else down. It is UNKNOWN whether the limit
9483 * check triggers when SP starts below the limit and ends
9484 * up above it; check whichever of the current and final
9485 * SP is lower, so QEMU will trigger in that situation.
9487 if ((int32_t)offset < 0) {
9488 TCGv_i32 newsp = tcg_temp_new_i32();
9490 tcg_gen_addi_i32(newsp, addr, offset);
9491 gen_helper_v8m_stackcheck(cpu_env, newsp);
9492 tcg_temp_free_i32(newsp);
9493 } else {
9494 gen_helper_v8m_stackcheck(cpu_env, addr);
9498 if (insn & (1 << 24)) {
9499 tcg_gen_addi_i32(addr, addr, offset);
9500 offset = 0;
9502 if (insn & (1 << 20)) {
9503 /* ldrd */
9504 tmp = tcg_temp_new_i32();
9505 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9506 store_reg(s, rs, tmp);
9507 tcg_gen_addi_i32(addr, addr, 4);
9508 tmp = tcg_temp_new_i32();
9509 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9510 store_reg(s, rd, tmp);
9511 } else {
9512 /* strd */
9513 tmp = load_reg(s, rs);
9514 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9515 tcg_temp_free_i32(tmp);
9516 tcg_gen_addi_i32(addr, addr, 4);
9517 tmp = load_reg(s, rd);
9518 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9519 tcg_temp_free_i32(tmp);
9521 if (wback) {
9522 /* Base writeback. */
9523 tcg_gen_addi_i32(addr, addr, offset - 4);
9524 store_reg(s, rn, addr);
9525 } else {
9526 tcg_temp_free_i32(addr);
9528 } else if ((insn & (1 << 23)) == 0) {
9529 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9530 * - load/store exclusive word
9531 * - TT (v8M only)
9533 if (rs == 15) {
9534 if (!(insn & (1 << 20)) &&
9535 arm_dc_feature(s, ARM_FEATURE_M) &&
9536 arm_dc_feature(s, ARM_FEATURE_V8)) {
9537 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
9538 * - TT (v8M only)
9540 bool alt = insn & (1 << 7);
9541 TCGv_i32 addr, op, ttresp;
9543 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
9544 /* we UNDEF for these UNPREDICTABLE cases */
9545 goto illegal_op;
9548 if (alt && !s->v8m_secure) {
9549 goto illegal_op;
9552 addr = load_reg(s, rn);
9553 op = tcg_const_i32(extract32(insn, 6, 2));
9554 ttresp = tcg_temp_new_i32();
9555 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
9556 tcg_temp_free_i32(addr);
9557 tcg_temp_free_i32(op);
9558 store_reg(s, rd, ttresp);
9559 break;
9561 goto illegal_op;
9563 addr = tcg_temp_local_new_i32();
9564 load_reg_var(s, addr, rn);
9565 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9566 if (insn & (1 << 20)) {
9567 gen_load_exclusive(s, rs, 15, addr, 2);
9568 } else {
9569 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9571 tcg_temp_free_i32(addr);
9572 } else if ((insn & (7 << 5)) == 0) {
9573 /* Table Branch. */
9574 addr = load_reg(s, rn);
9575 tmp = load_reg(s, rm);
9576 tcg_gen_add_i32(addr, addr, tmp);
9577 if (insn & (1 << 4)) {
9578 /* tbh */
9579 tcg_gen_add_i32(addr, addr, tmp);
9580 tcg_temp_free_i32(tmp);
9581 tmp = tcg_temp_new_i32();
9582 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9583 } else { /* tbb */
9584 tcg_temp_free_i32(tmp);
9585 tmp = tcg_temp_new_i32();
9586 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9588 tcg_temp_free_i32(addr);
9589 tcg_gen_shli_i32(tmp, tmp, 1);
9590 tcg_gen_addi_i32(tmp, tmp, read_pc(s));
9591 store_reg(s, 15, tmp);
9592 } else {
9593 bool is_lasr = false;
9594 bool is_ld = extract32(insn, 20, 1);
9595 int op2 = (insn >> 6) & 0x3;
9596 op = (insn >> 4) & 0x3;
9597 switch (op2) {
9598 case 0:
9599 goto illegal_op;
9600 case 1:
9601 /* Load/store exclusive byte/halfword/doubleword */
9602 if (op == 2) {
9603 goto illegal_op;
9605 ARCH(7);
9606 break;
9607 case 2:
9608 /* Load-acquire/store-release */
9609 if (op == 3) {
9610 goto illegal_op;
9612 /* Fall through */
9613 case 3:
9614 /* Load-acquire/store-release exclusive */
9615 ARCH(8);
9616 is_lasr = true;
9617 break;
9620 if (is_lasr && !is_ld) {
9621 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9624 addr = tcg_temp_local_new_i32();
9625 load_reg_var(s, addr, rn);
9626 if (!(op2 & 1)) {
9627 if (is_ld) {
9628 tmp = tcg_temp_new_i32();
9629 switch (op) {
9630 case 0: /* ldab */
9631 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9632 rs | ISSIsAcqRel);
9633 break;
9634 case 1: /* ldah */
9635 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9636 rs | ISSIsAcqRel);
9637 break;
9638 case 2: /* lda */
9639 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9640 rs | ISSIsAcqRel);
9641 break;
9642 default:
9643 abort();
9645 store_reg(s, rs, tmp);
9646 } else {
9647 tmp = load_reg(s, rs);
9648 switch (op) {
9649 case 0: /* stlb */
9650 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9651 rs | ISSIsAcqRel);
9652 break;
9653 case 1: /* stlh */
9654 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9655 rs | ISSIsAcqRel);
9656 break;
9657 case 2: /* stl */
9658 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9659 rs | ISSIsAcqRel);
9660 break;
9661 default:
9662 abort();
9664 tcg_temp_free_i32(tmp);
9666 } else if (is_ld) {
9667 gen_load_exclusive(s, rs, rd, addr, op);
9668 } else {
9669 gen_store_exclusive(s, rm, rs, rd, addr, op);
9671 tcg_temp_free_i32(addr);
9673 if (is_lasr && is_ld) {
9674 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
9677 } else {
9678 /* Load/store multiple, RFE, SRS. */
9679 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9680 /* RFE, SRS: not available in user mode or on M profile */
9681 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9682 goto illegal_op;
9684 if (insn & (1 << 20)) {
9685 /* rfe */
9686 addr = load_reg(s, rn);
9687 if ((insn & (1 << 24)) == 0)
9688 tcg_gen_addi_i32(addr, addr, -8);
9689 /* Load PC into tmp and CPSR into tmp2. */
9690 tmp = tcg_temp_new_i32();
9691 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9692 tcg_gen_addi_i32(addr, addr, 4);
9693 tmp2 = tcg_temp_new_i32();
9694 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9695 if (insn & (1 << 21)) {
9696 /* Base writeback. */
9697 if (insn & (1 << 24)) {
9698 tcg_gen_addi_i32(addr, addr, 4);
9699 } else {
9700 tcg_gen_addi_i32(addr, addr, -4);
9702 store_reg(s, rn, addr);
9703 } else {
9704 tcg_temp_free_i32(addr);
9706 gen_rfe(s, tmp, tmp2);
9707 } else {
9708 /* srs */
9709 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9710 insn & (1 << 21));
9712 } else {
9713 int i, loaded_base = 0;
9714 TCGv_i32 loaded_var;
9715 bool wback = extract32(insn, 21, 1);
9716 /* Load/store multiple. */
9717 addr = load_reg(s, rn);
9718 offset = 0;
9719 for (i = 0; i < 16; i++) {
9720 if (insn & (1 << i))
9721 offset += 4;
9724 if (insn & (1 << 24)) {
9725 tcg_gen_addi_i32(addr, addr, -offset);
9728 if (s->v8m_stackcheck && rn == 13 && wback) {
9730 * If the writeback is incrementing SP rather than
9731 * decrementing it, and the initial SP is below the
9732 * stack limit but the final written-back SP would
9733 * be above, then then we must not perform any memory
9734 * accesses, but it is IMPDEF whether we generate
9735 * an exception. We choose to do so in this case.
9736 * At this point 'addr' is the lowest address, so
9737 * either the original SP (if incrementing) or our
9738 * final SP (if decrementing), so that's what we check.
9740 gen_helper_v8m_stackcheck(cpu_env, addr);
9743 loaded_var = NULL;
9744 for (i = 0; i < 16; i++) {
9745 if ((insn & (1 << i)) == 0)
9746 continue;
9747 if (insn & (1 << 20)) {
9748 /* Load. */
9749 tmp = tcg_temp_new_i32();
9750 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9751 if (i == 15) {
9752 gen_bx_excret(s, tmp);
9753 } else if (i == rn) {
9754 loaded_var = tmp;
9755 loaded_base = 1;
9756 } else {
9757 store_reg(s, i, tmp);
9759 } else {
9760 /* Store. */
9761 tmp = load_reg(s, i);
9762 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9763 tcg_temp_free_i32(tmp);
9765 tcg_gen_addi_i32(addr, addr, 4);
9767 if (loaded_base) {
9768 store_reg(s, rn, loaded_var);
9770 if (wback) {
9771 /* Base register writeback. */
9772 if (insn & (1 << 24)) {
9773 tcg_gen_addi_i32(addr, addr, -offset);
9775 /* Fault if writeback register is in register list. */
9776 if (insn & (1 << rn))
9777 goto illegal_op;
9778 store_reg(s, rn, addr);
9779 } else {
9780 tcg_temp_free_i32(addr);
9784 break;
9785 case 5:
9787 op = (insn >> 21) & 0xf;
9788 if (op == 6) {
9789 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9790 goto illegal_op;
9792 /* Halfword pack. */
9793 tmp = load_reg(s, rn);
9794 tmp2 = load_reg(s, rm);
9795 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9796 if (insn & (1 << 5)) {
9797 /* pkhtb */
9798 if (shift == 0) {
9799 shift = 31;
9801 tcg_gen_sari_i32(tmp2, tmp2, shift);
9802 tcg_gen_deposit_i32(tmp, tmp, tmp2, 0, 16);
9803 } else {
9804 /* pkhbt */
9805 tcg_gen_shli_i32(tmp2, tmp2, shift);
9806 tcg_gen_deposit_i32(tmp, tmp2, tmp, 0, 16);
9808 tcg_temp_free_i32(tmp2);
9809 store_reg(s, rd, tmp);
9810 } else {
9811 /* Data processing register constant shift. */
9812 if (rn == 15) {
9813 tmp = tcg_temp_new_i32();
9814 tcg_gen_movi_i32(tmp, 0);
9815 } else {
9816 tmp = load_reg(s, rn);
9818 tmp2 = load_reg(s, rm);
9820 shiftop = (insn >> 4) & 3;
9821 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9822 conds = (insn & (1 << 20)) != 0;
9823 logic_cc = (conds && thumb2_logic_op(op));
9824 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9825 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9826 goto illegal_op;
9827 tcg_temp_free_i32(tmp2);
9828 if (rd == 13 &&
9829 ((op == 2 && rn == 15) ||
9830 (op == 8 && rn == 13) ||
9831 (op == 13 && rn == 13))) {
9832 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
9833 store_sp_checked(s, tmp);
9834 } else if (rd != 15) {
9835 store_reg(s, rd, tmp);
9836 } else {
9837 tcg_temp_free_i32(tmp);
9840 break;
9841 case 13: /* Misc data processing. */
9842 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9843 if (op < 4 && (insn & 0xf000) != 0xf000)
9844 goto illegal_op;
9845 switch (op) {
9846 case 0: /* Register controlled shift. */
9847 tmp = load_reg(s, rn);
9848 tmp2 = load_reg(s, rm);
9849 if ((insn & 0x70) != 0)
9850 goto illegal_op;
9852 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
9853 * - MOV, MOVS (register-shifted register), flagsetting
9855 op = (insn >> 21) & 3;
9856 logic_cc = (insn & (1 << 20)) != 0;
9857 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9858 if (logic_cc)
9859 gen_logic_CC(tmp);
9860 store_reg(s, rd, tmp);
9861 break;
9862 case 1: /* Sign/zero extend. */
9863 op = (insn >> 20) & 7;
9864 switch (op) {
9865 case 0: /* SXTAH, SXTH */
9866 case 1: /* UXTAH, UXTH */
9867 case 4: /* SXTAB, SXTB */
9868 case 5: /* UXTAB, UXTB */
9869 break;
9870 case 2: /* SXTAB16, SXTB16 */
9871 case 3: /* UXTAB16, UXTB16 */
9872 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9873 goto illegal_op;
9875 break;
9876 default:
9877 goto illegal_op;
9879 if (rn != 15) {
9880 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9881 goto illegal_op;
9884 tmp = load_reg(s, rm);
9885 shift = (insn >> 4) & 3;
9886 /* ??? In many cases it's not necessary to do a
9887 rotate, a shift is sufficient. */
9888 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9889 op = (insn >> 20) & 7;
9890 switch (op) {
9891 case 0: gen_sxth(tmp); break;
9892 case 1: gen_uxth(tmp); break;
9893 case 2: gen_sxtb16(tmp); break;
9894 case 3: gen_uxtb16(tmp); break;
9895 case 4: gen_sxtb(tmp); break;
9896 case 5: gen_uxtb(tmp); break;
9897 default:
9898 g_assert_not_reached();
9900 if (rn != 15) {
9901 tmp2 = load_reg(s, rn);
9902 if ((op >> 1) == 1) {
9903 gen_add16(tmp, tmp2);
9904 } else {
9905 tcg_gen_add_i32(tmp, tmp, tmp2);
9906 tcg_temp_free_i32(tmp2);
9909 store_reg(s, rd, tmp);
9910 break;
9911 case 2: /* SIMD add/subtract. */
9912 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9913 goto illegal_op;
9915 op = (insn >> 20) & 7;
9916 shift = (insn >> 4) & 7;
9917 if ((op & 3) == 3 || (shift & 3) == 3)
9918 goto illegal_op;
9919 tmp = load_reg(s, rn);
9920 tmp2 = load_reg(s, rm);
9921 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
9922 tcg_temp_free_i32(tmp2);
9923 store_reg(s, rd, tmp);
9924 break;
9925 case 3: /* Other data processing. */
9926 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9927 if (op < 4) {
9928 /* Saturating add/subtract. */
9929 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9930 goto illegal_op;
9932 tmp = load_reg(s, rn);
9933 tmp2 = load_reg(s, rm);
9934 if (op & 1)
9935 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp);
9936 if (op & 2)
9937 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9938 else
9939 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
9940 tcg_temp_free_i32(tmp2);
9941 } else {
9942 switch (op) {
9943 case 0x0a: /* rbit */
9944 case 0x08: /* rev */
9945 case 0x09: /* rev16 */
9946 case 0x0b: /* revsh */
9947 case 0x18: /* clz */
9948 break;
9949 case 0x10: /* sel */
9950 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9951 goto illegal_op;
9953 break;
9954 case 0x20: /* crc32/crc32c */
9955 case 0x21:
9956 case 0x22:
9957 case 0x28:
9958 case 0x29:
9959 case 0x2a:
9960 if (!dc_isar_feature(aa32_crc32, s)) {
9961 goto illegal_op;
9963 break;
9964 default:
9965 goto illegal_op;
9967 tmp = load_reg(s, rn);
9968 switch (op) {
9969 case 0x0a: /* rbit */
9970 gen_helper_rbit(tmp, tmp);
9971 break;
9972 case 0x08: /* rev */
9973 tcg_gen_bswap32_i32(tmp, tmp);
9974 break;
9975 case 0x09: /* rev16 */
9976 gen_rev16(tmp);
9977 break;
9978 case 0x0b: /* revsh */
9979 gen_revsh(tmp);
9980 break;
9981 case 0x10: /* sel */
9982 tmp2 = load_reg(s, rm);
9983 tmp3 = tcg_temp_new_i32();
9984 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9985 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9986 tcg_temp_free_i32(tmp3);
9987 tcg_temp_free_i32(tmp2);
9988 break;
9989 case 0x18: /* clz */
9990 tcg_gen_clzi_i32(tmp, tmp, 32);
9991 break;
9992 case 0x20:
9993 case 0x21:
9994 case 0x22:
9995 case 0x28:
9996 case 0x29:
9997 case 0x2a:
9999 /* crc32/crc32c */
10000 uint32_t sz = op & 0x3;
10001 uint32_t c = op & 0x8;
10003 tmp2 = load_reg(s, rm);
10004 if (sz == 0) {
10005 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10006 } else if (sz == 1) {
10007 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10009 tmp3 = tcg_const_i32(1 << sz);
10010 if (c) {
10011 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10012 } else {
10013 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10015 tcg_temp_free_i32(tmp2);
10016 tcg_temp_free_i32(tmp3);
10017 break;
10019 default:
10020 g_assert_not_reached();
10023 store_reg(s, rd, tmp);
10024 break;
10025 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10026 switch ((insn >> 20) & 7) {
10027 case 0: /* 32 x 32 -> 32 */
10028 case 7: /* Unsigned sum of absolute differences. */
10029 break;
10030 case 1: /* 16 x 16 -> 32 */
10031 case 2: /* Dual multiply add. */
10032 case 3: /* 32 * 16 -> 32msb */
10033 case 4: /* Dual multiply subtract. */
10034 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10035 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10036 goto illegal_op;
10038 break;
10040 op = (insn >> 4) & 0xf;
10041 tmp = load_reg(s, rn);
10042 tmp2 = load_reg(s, rm);
10043 switch ((insn >> 20) & 7) {
10044 case 0: /* 32 x 32 -> 32 */
10045 tcg_gen_mul_i32(tmp, tmp, tmp2);
10046 tcg_temp_free_i32(tmp2);
10047 if (rs != 15) {
10048 tmp2 = load_reg(s, rs);
10049 if (op)
10050 tcg_gen_sub_i32(tmp, tmp2, tmp);
10051 else
10052 tcg_gen_add_i32(tmp, tmp, tmp2);
10053 tcg_temp_free_i32(tmp2);
10055 break;
10056 case 1: /* 16 x 16 -> 32 */
10057 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10058 tcg_temp_free_i32(tmp2);
10059 if (rs != 15) {
10060 tmp2 = load_reg(s, rs);
10061 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10062 tcg_temp_free_i32(tmp2);
10064 break;
10065 case 2: /* Dual multiply add. */
10066 case 4: /* Dual multiply subtract. */
10067 if (op)
10068 gen_swap_half(tmp2);
10069 gen_smul_dual(tmp, tmp2);
10070 if (insn & (1 << 22)) {
10071 /* This subtraction cannot overflow. */
10072 tcg_gen_sub_i32(tmp, tmp, tmp2);
10073 } else {
10074 /* This addition cannot overflow 32 bits;
10075 * however it may overflow considered as a signed
10076 * operation, in which case we must set the Q flag.
10078 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10080 tcg_temp_free_i32(tmp2);
10081 if (rs != 15)
10083 tmp2 = load_reg(s, rs);
10084 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10085 tcg_temp_free_i32(tmp2);
10087 break;
10088 case 3: /* 32 * 16 -> 32msb */
10089 if (op)
10090 tcg_gen_sari_i32(tmp2, tmp2, 16);
10091 else
10092 gen_sxth(tmp2);
10093 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10094 tcg_gen_shri_i64(tmp64, tmp64, 16);
10095 tmp = tcg_temp_new_i32();
10096 tcg_gen_extrl_i64_i32(tmp, tmp64);
10097 tcg_temp_free_i64(tmp64);
10098 if (rs != 15)
10100 tmp2 = load_reg(s, rs);
10101 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10102 tcg_temp_free_i32(tmp2);
10104 break;
10105 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10106 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10107 if (rs != 15) {
10108 tmp = load_reg(s, rs);
10109 if (insn & (1 << 20)) {
10110 tmp64 = gen_addq_msw(tmp64, tmp);
10111 } else {
10112 tmp64 = gen_subq_msw(tmp64, tmp);
10115 if (insn & (1 << 4)) {
10116 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10118 tcg_gen_shri_i64(tmp64, tmp64, 32);
10119 tmp = tcg_temp_new_i32();
10120 tcg_gen_extrl_i64_i32(tmp, tmp64);
10121 tcg_temp_free_i64(tmp64);
10122 break;
10123 case 7: /* Unsigned sum of absolute differences. */
10124 gen_helper_usad8(tmp, tmp, tmp2);
10125 tcg_temp_free_i32(tmp2);
10126 if (rs != 15) {
10127 tmp2 = load_reg(s, rs);
10128 tcg_gen_add_i32(tmp, tmp, tmp2);
10129 tcg_temp_free_i32(tmp2);
10131 break;
10133 store_reg(s, rd, tmp);
10134 break;
10135 case 6: case 7: /* 64-bit multiply, Divide. */
10136 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
10137 tmp = load_reg(s, rn);
10138 tmp2 = load_reg(s, rm);
10139 if ((op & 0x50) == 0x10) {
10140 /* sdiv, udiv */
10141 if (!dc_isar_feature(thumb_div, s)) {
10142 goto illegal_op;
10144 if (op & 0x20)
10145 gen_helper_udiv(tmp, tmp, tmp2);
10146 else
10147 gen_helper_sdiv(tmp, tmp, tmp2);
10148 tcg_temp_free_i32(tmp2);
10149 store_reg(s, rd, tmp);
10150 } else if ((op & 0xe) == 0xc) {
10151 /* Dual multiply accumulate long. */
10152 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10153 tcg_temp_free_i32(tmp);
10154 tcg_temp_free_i32(tmp2);
10155 goto illegal_op;
10157 if (op & 1)
10158 gen_swap_half(tmp2);
10159 gen_smul_dual(tmp, tmp2);
10160 if (op & 0x10) {
10161 tcg_gen_sub_i32(tmp, tmp, tmp2);
10162 } else {
10163 tcg_gen_add_i32(tmp, tmp, tmp2);
10165 tcg_temp_free_i32(tmp2);
10166 /* BUGFIX */
10167 tmp64 = tcg_temp_new_i64();
10168 tcg_gen_ext_i32_i64(tmp64, tmp);
10169 tcg_temp_free_i32(tmp);
10170 gen_addq(s, tmp64, rs, rd);
10171 gen_storeq_reg(s, rs, rd, tmp64);
10172 tcg_temp_free_i64(tmp64);
10173 } else {
10174 if (op & 0x20) {
10175 /* Unsigned 64-bit multiply */
10176 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
10177 } else {
10178 if (op & 8) {
10179 /* smlalxy */
10180 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10181 tcg_temp_free_i32(tmp2);
10182 tcg_temp_free_i32(tmp);
10183 goto illegal_op;
10185 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10186 tcg_temp_free_i32(tmp2);
10187 tmp64 = tcg_temp_new_i64();
10188 tcg_gen_ext_i32_i64(tmp64, tmp);
10189 tcg_temp_free_i32(tmp);
10190 } else {
10191 /* Signed 64-bit multiply */
10192 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10195 if (op & 4) {
10196 /* umaal */
10197 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10198 tcg_temp_free_i64(tmp64);
10199 goto illegal_op;
10201 gen_addq_lo(s, tmp64, rs);
10202 gen_addq_lo(s, tmp64, rd);
10203 } else if (op & 0x40) {
10204 /* 64-bit accumulate. */
10205 gen_addq(s, tmp64, rs, rd);
10207 gen_storeq_reg(s, rs, rd, tmp64);
10208 tcg_temp_free_i64(tmp64);
10210 break;
10212 break;
10213 case 6: case 7: case 14: case 15:
10214 /* Coprocessor. */
10215 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10216 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10217 if (extract32(insn, 24, 2) == 3) {
10218 goto illegal_op; /* op0 = 0b11 : unallocated */
10222 * Decode VLLDM and VLSTM first: these are nonstandard because:
10223 * * if there is no FPU then these insns must NOP in
10224 * Secure state and UNDEF in Nonsecure state
10225 * * if there is an FPU then these insns do not have
10226 * the usual behaviour that disas_vfp_insn() provides of
10227 * being controlled by CPACR/NSACR enable bits or the
10228 * lazy-stacking logic.
10230 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10231 (insn & 0xffa00f00) == 0xec200a00) {
10232 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10233 * - VLLDM, VLSTM
10234 * We choose to UNDEF if the RAZ bits are non-zero.
10236 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10237 goto illegal_op;
10240 if (arm_dc_feature(s, ARM_FEATURE_VFP)) {
10241 TCGv_i32 fptr = load_reg(s, rn);
10243 if (extract32(insn, 20, 1)) {
10244 gen_helper_v7m_vlldm(cpu_env, fptr);
10245 } else {
10246 gen_helper_v7m_vlstm(cpu_env, fptr);
10248 tcg_temp_free_i32(fptr);
10250 /* End the TB, because we have updated FP control bits */
10251 s->base.is_jmp = DISAS_UPDATE;
10253 break;
10255 if (arm_dc_feature(s, ARM_FEATURE_VFP) &&
10256 ((insn >> 8) & 0xe) == 10) {
10257 /* FP, and the CPU supports it */
10258 if (disas_vfp_insn(s, insn)) {
10259 goto illegal_op;
10261 break;
10264 /* All other insns: NOCP */
10265 gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
10266 default_exception_el(s));
10267 break;
10269 if ((insn & 0xfe000a00) == 0xfc000800
10270 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10271 /* The Thumb2 and ARM encodings are identical. */
10272 if (disas_neon_insn_3same_ext(s, insn)) {
10273 goto illegal_op;
10275 } else if ((insn & 0xff000a00) == 0xfe000800
10276 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10277 /* The Thumb2 and ARM encodings are identical. */
10278 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10279 goto illegal_op;
10281 } else if (((insn >> 24) & 3) == 3) {
10282 /* Translate into the equivalent ARM encoding. */
10283 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10284 if (disas_neon_data_insn(s, insn)) {
10285 goto illegal_op;
10287 } else if (((insn >> 8) & 0xe) == 10) {
10288 if (disas_vfp_insn(s, insn)) {
10289 goto illegal_op;
10291 } else {
10292 if (insn & (1 << 28))
10293 goto illegal_op;
10294 if (disas_coproc_insn(s, insn)) {
10295 goto illegal_op;
10298 break;
10299 case 8: case 9: case 10: case 11:
10300 if (insn & (1 << 15)) {
10301 /* Branches, misc control. */
10302 if (insn & 0x5000) {
10303 /* Unconditional branch. */
10304 /* signextend(hw1[10:0]) -> offset[:12]. */
10305 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10306 /* hw1[10:0] -> offset[11:1]. */
10307 offset |= (insn & 0x7ff) << 1;
10308 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10309 offset[24:22] already have the same value because of the
10310 sign extension above. */
10311 offset ^= ((~insn) & (1 << 13)) << 10;
10312 offset ^= ((~insn) & (1 << 11)) << 11;
10314 if (insn & (1 << 14)) {
10315 /* Branch and link. */
10316 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
10319 offset += read_pc(s);
10320 if (insn & (1 << 12)) {
10321 /* b/bl */
10322 gen_jmp(s, offset);
10323 } else {
10324 /* blx */
10325 offset &= ~(uint32_t)2;
10326 /* thumb2 bx, no need to check */
10327 gen_bx_im(s, offset);
10329 } else if (((insn >> 23) & 7) == 7) {
10330 /* Misc control */
10331 if (insn & (1 << 13))
10332 goto illegal_op;
10334 if (insn & (1 << 26)) {
10335 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10336 goto illegal_op;
10338 if (!(insn & (1 << 20))) {
10339 /* Hypervisor call (v7) */
10340 int imm16 = extract32(insn, 16, 4) << 12
10341 | extract32(insn, 0, 12);
10342 ARCH(7);
10343 if (IS_USER(s)) {
10344 goto illegal_op;
10346 gen_hvc(s, imm16);
10347 } else {
10348 /* Secure monitor call (v6+) */
10349 ARCH(6K);
10350 if (IS_USER(s)) {
10351 goto illegal_op;
10353 gen_smc(s);
10355 } else {
10356 op = (insn >> 20) & 7;
10357 switch (op) {
10358 case 0: /* msr cpsr. */
10359 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10360 tmp = load_reg(s, rn);
10361 /* the constant is the mask and SYSm fields */
10362 addr = tcg_const_i32(insn & 0xfff);
10363 gen_helper_v7m_msr(cpu_env, addr, tmp);
10364 tcg_temp_free_i32(addr);
10365 tcg_temp_free_i32(tmp);
10366 gen_lookup_tb(s);
10367 break;
10369 /* fall through */
10370 case 1: /* msr spsr. */
10371 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10372 goto illegal_op;
10375 if (extract32(insn, 5, 1)) {
10376 /* MSR (banked) */
10377 int sysm = extract32(insn, 8, 4) |
10378 (extract32(insn, 4, 1) << 4);
10379 int r = op & 1;
10381 gen_msr_banked(s, r, sysm, rm);
10382 break;
10385 /* MSR (for PSRs) */
10386 tmp = load_reg(s, rn);
10387 if (gen_set_psr(s,
10388 msr_mask(s, (insn >> 8) & 0xf, op == 1),
10389 op == 1, tmp))
10390 goto illegal_op;
10391 break;
10392 case 2: /* cps, nop-hint. */
10393 if (((insn >> 8) & 7) == 0) {
10394 gen_nop_hint(s, insn & 0xff);
10396 /* Implemented as NOP in user mode. */
10397 if (IS_USER(s))
10398 break;
10399 offset = 0;
10400 imm = 0;
10401 if (insn & (1 << 10)) {
10402 if (insn & (1 << 7))
10403 offset |= CPSR_A;
10404 if (insn & (1 << 6))
10405 offset |= CPSR_I;
10406 if (insn & (1 << 5))
10407 offset |= CPSR_F;
10408 if (insn & (1 << 9))
10409 imm = CPSR_A | CPSR_I | CPSR_F;
10411 if (insn & (1 << 8)) {
10412 offset |= 0x1f;
10413 imm |= (insn & 0x1f);
10415 if (offset) {
10416 gen_set_psr_im(s, offset, 0, imm);
10418 break;
10419 case 3: /* Special control operations. */
10420 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
10421 !arm_dc_feature(s, ARM_FEATURE_M)) {
10422 goto illegal_op;
10424 op = (insn >> 4) & 0xf;
10425 switch (op) {
10426 case 2: /* clrex */
10427 gen_clrex(s);
10428 break;
10429 case 4: /* dsb */
10430 case 5: /* dmb */
10431 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10432 break;
10433 case 6: /* isb */
10434 /* We need to break the TB after this insn
10435 * to execute self-modifying code correctly
10436 * and also to take any pending interrupts
10437 * immediately.
10439 gen_goto_tb(s, 0, s->base.pc_next);
10440 break;
10441 case 7: /* sb */
10442 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
10443 goto illegal_op;
10446 * TODO: There is no speculation barrier opcode
10447 * for TCG; MB and end the TB instead.
10449 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10450 gen_goto_tb(s, 0, s->base.pc_next);
10451 break;
10452 default:
10453 goto illegal_op;
10455 break;
10456 case 4: /* bxj */
10457 /* Trivial implementation equivalent to bx.
10458 * This instruction doesn't exist at all for M-profile.
10460 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10461 goto illegal_op;
10463 tmp = load_reg(s, rn);
10464 gen_bx(s, tmp);
10465 break;
10466 case 5: /* Exception return. */
10467 if (IS_USER(s)) {
10468 goto illegal_op;
10470 if (rn != 14 || rd != 15) {
10471 goto illegal_op;
10473 if (s->current_el == 2) {
10474 /* ERET from Hyp uses ELR_Hyp, not LR */
10475 if (insn & 0xff) {
10476 goto illegal_op;
10478 tmp = load_cpu_field(elr_el[2]);
10479 } else {
10480 tmp = load_reg(s, rn);
10481 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10483 gen_exception_return(s, tmp);
10484 break;
10485 case 6: /* MRS */
10486 if (extract32(insn, 5, 1) &&
10487 !arm_dc_feature(s, ARM_FEATURE_M)) {
10488 /* MRS (banked) */
10489 int sysm = extract32(insn, 16, 4) |
10490 (extract32(insn, 4, 1) << 4);
10492 gen_mrs_banked(s, 0, sysm, rd);
10493 break;
10496 if (extract32(insn, 16, 4) != 0xf) {
10497 goto illegal_op;
10499 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10500 extract32(insn, 0, 8) != 0) {
10501 goto illegal_op;
10504 /* mrs cpsr */
10505 tmp = tcg_temp_new_i32();
10506 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10507 addr = tcg_const_i32(insn & 0xff);
10508 gen_helper_v7m_mrs(tmp, cpu_env, addr);
10509 tcg_temp_free_i32(addr);
10510 } else {
10511 gen_helper_cpsr_read(tmp, cpu_env);
10513 store_reg(s, rd, tmp);
10514 break;
10515 case 7: /* MRS */
10516 if (extract32(insn, 5, 1) &&
10517 !arm_dc_feature(s, ARM_FEATURE_M)) {
10518 /* MRS (banked) */
10519 int sysm = extract32(insn, 16, 4) |
10520 (extract32(insn, 4, 1) << 4);
10522 gen_mrs_banked(s, 1, sysm, rd);
10523 break;
10526 /* mrs spsr. */
10527 /* Not accessible in user mode. */
10528 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10529 goto illegal_op;
10532 if (extract32(insn, 16, 4) != 0xf ||
10533 extract32(insn, 0, 8) != 0) {
10534 goto illegal_op;
10537 tmp = load_cpu_field(spsr);
10538 store_reg(s, rd, tmp);
10539 break;
10542 } else {
10543 /* Conditional branch. */
10544 op = (insn >> 22) & 0xf;
10545 /* Generate a conditional jump to next instruction. */
10546 arm_skip_unless(s, op);
10548 /* offset[11:1] = insn[10:0] */
10549 offset = (insn & 0x7ff) << 1;
10550 /* offset[17:12] = insn[21:16]. */
10551 offset |= (insn & 0x003f0000) >> 4;
10552 /* offset[31:20] = insn[26]. */
10553 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10554 /* offset[18] = insn[13]. */
10555 offset |= (insn & (1 << 13)) << 5;
10556 /* offset[19] = insn[11]. */
10557 offset |= (insn & (1 << 11)) << 8;
10559 /* jump to the offset */
10560 gen_jmp(s, read_pc(s) + offset);
10562 } else {
10564 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
10565 * - Data-processing (modified immediate, plain binary immediate)
10567 if (insn & (1 << 25)) {
10569 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
10570 * - Data-processing (plain binary immediate)
10572 if (insn & (1 << 24)) {
10573 if (insn & (1 << 20))
10574 goto illegal_op;
10575 /* Bitfield/Saturate. */
10576 op = (insn >> 21) & 7;
10577 imm = insn & 0x1f;
10578 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10579 if (rn == 15) {
10580 tmp = tcg_temp_new_i32();
10581 tcg_gen_movi_i32(tmp, 0);
10582 } else {
10583 tmp = load_reg(s, rn);
10585 switch (op) {
10586 case 2: /* Signed bitfield extract. */
10587 imm++;
10588 if (shift + imm > 32)
10589 goto illegal_op;
10590 if (imm < 32) {
10591 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10593 break;
10594 case 6: /* Unsigned bitfield extract. */
10595 imm++;
10596 if (shift + imm > 32)
10597 goto illegal_op;
10598 if (imm < 32) {
10599 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10601 break;
10602 case 3: /* Bitfield insert/clear. */
10603 if (imm < shift)
10604 goto illegal_op;
10605 imm = imm + 1 - shift;
10606 if (imm != 32) {
10607 tmp2 = load_reg(s, rd);
10608 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
10609 tcg_temp_free_i32(tmp2);
10611 break;
10612 case 7:
10613 goto illegal_op;
10614 default: /* Saturate. */
10615 if (op & 1) {
10616 tcg_gen_sari_i32(tmp, tmp, shift);
10617 } else {
10618 tcg_gen_shli_i32(tmp, tmp, shift);
10620 tmp2 = tcg_const_i32(imm);
10621 if (op & 4) {
10622 /* Unsigned. */
10623 if ((op & 1) && shift == 0) {
10624 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10625 tcg_temp_free_i32(tmp);
10626 tcg_temp_free_i32(tmp2);
10627 goto illegal_op;
10629 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10630 } else {
10631 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10633 } else {
10634 /* Signed. */
10635 if ((op & 1) && shift == 0) {
10636 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10637 tcg_temp_free_i32(tmp);
10638 tcg_temp_free_i32(tmp2);
10639 goto illegal_op;
10641 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10642 } else {
10643 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10646 tcg_temp_free_i32(tmp2);
10647 break;
10649 store_reg(s, rd, tmp);
10650 } else {
10651 imm = ((insn & 0x04000000) >> 15)
10652 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10653 if (insn & (1 << 22)) {
10654 /* 16-bit immediate. */
10655 imm |= (insn >> 4) & 0xf000;
10656 if (insn & (1 << 23)) {
10657 /* movt */
10658 tmp = load_reg(s, rd);
10659 tcg_gen_ext16u_i32(tmp, tmp);
10660 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10661 } else {
10662 /* movw */
10663 tmp = tcg_temp_new_i32();
10664 tcg_gen_movi_i32(tmp, imm);
10666 store_reg(s, rd, tmp);
10667 } else {
10668 /* Add/sub 12-bit immediate. */
10669 if (insn & (1 << 23)) {
10670 imm = -imm;
10672 tmp = add_reg_for_lit(s, rn, imm);
10673 if (rn == 13 && rd == 13) {
10674 /* ADD SP, SP, imm or SUB SP, SP, imm */
10675 store_sp_checked(s, tmp);
10676 } else {
10677 store_reg(s, rd, tmp);
10681 } else {
10683 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
10684 * - Data-processing (modified immediate)
10686 int shifter_out = 0;
10687 /* modified 12-bit immediate. */
10688 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10689 imm = (insn & 0xff);
10690 switch (shift) {
10691 case 0: /* XY */
10692 /* Nothing to do. */
10693 break;
10694 case 1: /* 00XY00XY */
10695 imm |= imm << 16;
10696 break;
10697 case 2: /* XY00XY00 */
10698 imm |= imm << 16;
10699 imm <<= 8;
10700 break;
10701 case 3: /* XYXYXYXY */
10702 imm |= imm << 16;
10703 imm |= imm << 8;
10704 break;
10705 default: /* Rotated constant. */
10706 shift = (shift << 1) | (imm >> 7);
10707 imm |= 0x80;
10708 imm = imm << (32 - shift);
10709 shifter_out = 1;
10710 break;
10712 tmp2 = tcg_temp_new_i32();
10713 tcg_gen_movi_i32(tmp2, imm);
10714 rn = (insn >> 16) & 0xf;
10715 if (rn == 15) {
10716 tmp = tcg_temp_new_i32();
10717 tcg_gen_movi_i32(tmp, 0);
10718 } else {
10719 tmp = load_reg(s, rn);
10721 op = (insn >> 21) & 0xf;
10722 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10723 shifter_out, tmp, tmp2))
10724 goto illegal_op;
10725 tcg_temp_free_i32(tmp2);
10726 rd = (insn >> 8) & 0xf;
10727 if (rd == 13 && rn == 13
10728 && (op == 8 || op == 13)) {
10729 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
10730 store_sp_checked(s, tmp);
10731 } else if (rd != 15) {
10732 store_reg(s, rd, tmp);
10733 } else {
10734 tcg_temp_free_i32(tmp);
10738 break;
10739 case 12: /* Load/store single data item. */
10741 int postinc = 0;
10742 int writeback = 0;
10743 int memidx;
10744 ISSInfo issinfo;
10746 if ((insn & 0x01100000) == 0x01000000) {
10747 if (disas_neon_ls_insn(s, insn)) {
10748 goto illegal_op;
10750 break;
10752 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10753 if (rs == 15) {
10754 if (!(insn & (1 << 20))) {
10755 goto illegal_op;
10757 if (op != 2) {
10758 /* Byte or halfword load space with dest == r15 : memory hints.
10759 * Catch them early so we don't emit pointless addressing code.
10760 * This space is a mix of:
10761 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10762 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10763 * cores)
10764 * unallocated hints, which must be treated as NOPs
10765 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10766 * which is easiest for the decoding logic
10767 * Some space which must UNDEF
10769 int op1 = (insn >> 23) & 3;
10770 int op2 = (insn >> 6) & 0x3f;
10771 if (op & 2) {
10772 goto illegal_op;
10774 if (rn == 15) {
10775 /* UNPREDICTABLE, unallocated hint or
10776 * PLD/PLDW/PLI (literal)
10778 return;
10780 if (op1 & 1) {
10781 return; /* PLD/PLDW/PLI or unallocated hint */
10783 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10784 return; /* PLD/PLDW/PLI or unallocated hint */
10786 /* UNDEF space, or an UNPREDICTABLE */
10787 goto illegal_op;
10790 memidx = get_mem_index(s);
10791 imm = insn & 0xfff;
10792 if (insn & (1 << 23)) {
10793 /* PC relative or Positive offset. */
10794 addr = add_reg_for_lit(s, rn, imm);
10795 } else if (rn == 15) {
10796 /* PC relative with negative offset. */
10797 addr = add_reg_for_lit(s, rn, -imm);
10798 } else {
10799 addr = load_reg(s, rn);
10800 imm = insn & 0xff;
10801 switch ((insn >> 8) & 0xf) {
10802 case 0x0: /* Shifted Register. */
10803 shift = (insn >> 4) & 0xf;
10804 if (shift > 3) {
10805 tcg_temp_free_i32(addr);
10806 goto illegal_op;
10808 tmp = load_reg(s, rm);
10809 tcg_gen_shli_i32(tmp, tmp, shift);
10810 tcg_gen_add_i32(addr, addr, tmp);
10811 tcg_temp_free_i32(tmp);
10812 break;
10813 case 0xc: /* Negative offset. */
10814 tcg_gen_addi_i32(addr, addr, -imm);
10815 break;
10816 case 0xe: /* User privilege. */
10817 tcg_gen_addi_i32(addr, addr, imm);
10818 memidx = get_a32_user_mem_index(s);
10819 break;
10820 case 0x9: /* Post-decrement. */
10821 imm = -imm;
10822 /* Fall through. */
10823 case 0xb: /* Post-increment. */
10824 postinc = 1;
10825 writeback = 1;
10826 break;
10827 case 0xd: /* Pre-decrement. */
10828 imm = -imm;
10829 /* Fall through. */
10830 case 0xf: /* Pre-increment. */
10831 writeback = 1;
10832 break;
10833 default:
10834 tcg_temp_free_i32(addr);
10835 goto illegal_op;
10839 issinfo = writeback ? ISSInvalid : rs;
10841 if (s->v8m_stackcheck && rn == 13 && writeback) {
10843 * Stackcheck. Here we know 'addr' is the current SP;
10844 * if imm is +ve we're moving SP up, else down. It is
10845 * UNKNOWN whether the limit check triggers when SP starts
10846 * below the limit and ends up above it; we chose to do so.
10848 if ((int32_t)imm < 0) {
10849 TCGv_i32 newsp = tcg_temp_new_i32();
10851 tcg_gen_addi_i32(newsp, addr, imm);
10852 gen_helper_v8m_stackcheck(cpu_env, newsp);
10853 tcg_temp_free_i32(newsp);
10854 } else {
10855 gen_helper_v8m_stackcheck(cpu_env, addr);
10859 if (writeback && !postinc) {
10860 tcg_gen_addi_i32(addr, addr, imm);
10863 if (insn & (1 << 20)) {
10864 /* Load. */
10865 tmp = tcg_temp_new_i32();
10866 switch (op) {
10867 case 0:
10868 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
10869 break;
10870 case 4:
10871 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
10872 break;
10873 case 1:
10874 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
10875 break;
10876 case 5:
10877 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
10878 break;
10879 case 2:
10880 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
10881 break;
10882 default:
10883 tcg_temp_free_i32(tmp);
10884 tcg_temp_free_i32(addr);
10885 goto illegal_op;
10887 if (rs == 15) {
10888 gen_bx_excret(s, tmp);
10889 } else {
10890 store_reg(s, rs, tmp);
10892 } else {
10893 /* Store. */
10894 tmp = load_reg(s, rs);
10895 switch (op) {
10896 case 0:
10897 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
10898 break;
10899 case 1:
10900 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
10901 break;
10902 case 2:
10903 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
10904 break;
10905 default:
10906 tcg_temp_free_i32(tmp);
10907 tcg_temp_free_i32(addr);
10908 goto illegal_op;
10910 tcg_temp_free_i32(tmp);
10912 if (postinc)
10913 tcg_gen_addi_i32(addr, addr, imm);
10914 if (writeback) {
10915 store_reg(s, rn, addr);
10916 } else {
10917 tcg_temp_free_i32(addr);
10920 break;
10921 default:
10922 goto illegal_op;
10924 return;
10925 illegal_op:
10926 unallocated_encoding(s);
10929 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
10931 uint32_t val, op, rm, rn, rd, shift, cond;
10932 int32_t offset;
10933 int i;
10934 TCGv_i32 tmp;
10935 TCGv_i32 tmp2;
10936 TCGv_i32 addr;
10938 switch (insn >> 12) {
10939 case 0: case 1:
10941 rd = insn & 7;
10942 op = (insn >> 11) & 3;
10943 if (op == 3) {
10945 * 0b0001_1xxx_xxxx_xxxx
10946 * - Add, subtract (three low registers)
10947 * - Add, subtract (two low registers and immediate)
10949 rn = (insn >> 3) & 7;
10950 tmp = load_reg(s, rn);
10951 if (insn & (1 << 10)) {
10952 /* immediate */
10953 tmp2 = tcg_temp_new_i32();
10954 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
10955 } else {
10956 /* reg */
10957 rm = (insn >> 6) & 7;
10958 tmp2 = load_reg(s, rm);
10960 if (insn & (1 << 9)) {
10961 if (s->condexec_mask)
10962 tcg_gen_sub_i32(tmp, tmp, tmp2);
10963 else
10964 gen_sub_CC(tmp, tmp, tmp2);
10965 } else {
10966 if (s->condexec_mask)
10967 tcg_gen_add_i32(tmp, tmp, tmp2);
10968 else
10969 gen_add_CC(tmp, tmp, tmp2);
10971 tcg_temp_free_i32(tmp2);
10972 store_reg(s, rd, tmp);
10973 } else {
10974 /* shift immediate */
10975 rm = (insn >> 3) & 7;
10976 shift = (insn >> 6) & 0x1f;
10977 tmp = load_reg(s, rm);
10978 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10979 if (!s->condexec_mask)
10980 gen_logic_CC(tmp);
10981 store_reg(s, rd, tmp);
10983 break;
10984 case 2: case 3:
10986 * 0b001x_xxxx_xxxx_xxxx
10987 * - Add, subtract, compare, move (one low register and immediate)
10989 op = (insn >> 11) & 3;
10990 rd = (insn >> 8) & 0x7;
10991 if (op == 0) { /* mov */
10992 tmp = tcg_temp_new_i32();
10993 tcg_gen_movi_i32(tmp, insn & 0xff);
10994 if (!s->condexec_mask)
10995 gen_logic_CC(tmp);
10996 store_reg(s, rd, tmp);
10997 } else {
10998 tmp = load_reg(s, rd);
10999 tmp2 = tcg_temp_new_i32();
11000 tcg_gen_movi_i32(tmp2, insn & 0xff);
11001 switch (op) {
11002 case 1: /* cmp */
11003 gen_sub_CC(tmp, tmp, tmp2);
11004 tcg_temp_free_i32(tmp);
11005 tcg_temp_free_i32(tmp2);
11006 break;
11007 case 2: /* add */
11008 if (s->condexec_mask)
11009 tcg_gen_add_i32(tmp, tmp, tmp2);
11010 else
11011 gen_add_CC(tmp, tmp, tmp2);
11012 tcg_temp_free_i32(tmp2);
11013 store_reg(s, rd, tmp);
11014 break;
11015 case 3: /* sub */
11016 if (s->condexec_mask)
11017 tcg_gen_sub_i32(tmp, tmp, tmp2);
11018 else
11019 gen_sub_CC(tmp, tmp, tmp2);
11020 tcg_temp_free_i32(tmp2);
11021 store_reg(s, rd, tmp);
11022 break;
11025 break;
11026 case 4:
11027 if (insn & (1 << 11)) {
11028 rd = (insn >> 8) & 7;
11029 /* load pc-relative. Bit 1 of PC is ignored. */
11030 addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4);
11031 tmp = tcg_temp_new_i32();
11032 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11033 rd | ISSIs16Bit);
11034 tcg_temp_free_i32(addr);
11035 store_reg(s, rd, tmp);
11036 break;
11038 if (insn & (1 << 10)) {
11039 /* 0b0100_01xx_xxxx_xxxx
11040 * - data processing extended, branch and exchange
11042 rd = (insn & 7) | ((insn >> 4) & 8);
11043 rm = (insn >> 3) & 0xf;
11044 op = (insn >> 8) & 3;
11045 switch (op) {
11046 case 0: /* add */
11047 tmp = load_reg(s, rd);
11048 tmp2 = load_reg(s, rm);
11049 tcg_gen_add_i32(tmp, tmp, tmp2);
11050 tcg_temp_free_i32(tmp2);
11051 if (rd == 13) {
11052 /* ADD SP, SP, reg */
11053 store_sp_checked(s, tmp);
11054 } else {
11055 store_reg(s, rd, tmp);
11057 break;
11058 case 1: /* cmp */
11059 tmp = load_reg(s, rd);
11060 tmp2 = load_reg(s, rm);
11061 gen_sub_CC(tmp, tmp, tmp2);
11062 tcg_temp_free_i32(tmp2);
11063 tcg_temp_free_i32(tmp);
11064 break;
11065 case 2: /* mov/cpy */
11066 tmp = load_reg(s, rm);
11067 if (rd == 13) {
11068 /* MOV SP, reg */
11069 store_sp_checked(s, tmp);
11070 } else {
11071 store_reg(s, rd, tmp);
11073 break;
11074 case 3:
11076 /* 0b0100_0111_xxxx_xxxx
11077 * - branch [and link] exchange thumb register
11079 bool link = insn & (1 << 7);
11081 if (insn & 3) {
11082 goto undef;
11084 if (link) {
11085 ARCH(5);
11087 if ((insn & 4)) {
11088 /* BXNS/BLXNS: only exists for v8M with the
11089 * security extensions, and always UNDEF if NonSecure.
11090 * We don't implement these in the user-only mode
11091 * either (in theory you can use them from Secure User
11092 * mode but they are too tied in to system emulation.)
11094 if (!s->v8m_secure || IS_USER_ONLY) {
11095 goto undef;
11097 if (link) {
11098 gen_blxns(s, rm);
11099 } else {
11100 gen_bxns(s, rm);
11102 break;
11104 /* BLX/BX */
11105 tmp = load_reg(s, rm);
11106 if (link) {
11107 val = (uint32_t)s->base.pc_next | 1;
11108 tmp2 = tcg_temp_new_i32();
11109 tcg_gen_movi_i32(tmp2, val);
11110 store_reg(s, 14, tmp2);
11111 gen_bx(s, tmp);
11112 } else {
11113 /* Only BX works as exception-return, not BLX */
11114 gen_bx_excret(s, tmp);
11116 break;
11119 break;
11123 * 0b0100_00xx_xxxx_xxxx
11124 * - Data-processing (two low registers)
11126 rd = insn & 7;
11127 rm = (insn >> 3) & 7;
11128 op = (insn >> 6) & 0xf;
11129 if (op == 2 || op == 3 || op == 4 || op == 7) {
11130 /* the shift/rotate ops want the operands backwards */
11131 val = rm;
11132 rm = rd;
11133 rd = val;
11134 val = 1;
11135 } else {
11136 val = 0;
11139 if (op == 9) { /* neg */
11140 tmp = tcg_temp_new_i32();
11141 tcg_gen_movi_i32(tmp, 0);
11142 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11143 tmp = load_reg(s, rd);
11144 } else {
11145 tmp = NULL;
11148 tmp2 = load_reg(s, rm);
11149 switch (op) {
11150 case 0x0: /* and */
11151 tcg_gen_and_i32(tmp, tmp, tmp2);
11152 if (!s->condexec_mask)
11153 gen_logic_CC(tmp);
11154 break;
11155 case 0x1: /* eor */
11156 tcg_gen_xor_i32(tmp, tmp, tmp2);
11157 if (!s->condexec_mask)
11158 gen_logic_CC(tmp);
11159 break;
11160 case 0x2: /* lsl */
11161 if (s->condexec_mask) {
11162 gen_shl(tmp2, tmp2, tmp);
11163 } else {
11164 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
11165 gen_logic_CC(tmp2);
11167 break;
11168 case 0x3: /* lsr */
11169 if (s->condexec_mask) {
11170 gen_shr(tmp2, tmp2, tmp);
11171 } else {
11172 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
11173 gen_logic_CC(tmp2);
11175 break;
11176 case 0x4: /* asr */
11177 if (s->condexec_mask) {
11178 gen_sar(tmp2, tmp2, tmp);
11179 } else {
11180 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
11181 gen_logic_CC(tmp2);
11183 break;
11184 case 0x5: /* adc */
11185 if (s->condexec_mask) {
11186 gen_adc(tmp, tmp2);
11187 } else {
11188 gen_adc_CC(tmp, tmp, tmp2);
11190 break;
11191 case 0x6: /* sbc */
11192 if (s->condexec_mask) {
11193 gen_sub_carry(tmp, tmp, tmp2);
11194 } else {
11195 gen_sbc_CC(tmp, tmp, tmp2);
11197 break;
11198 case 0x7: /* ror */
11199 if (s->condexec_mask) {
11200 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11201 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
11202 } else {
11203 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
11204 gen_logic_CC(tmp2);
11206 break;
11207 case 0x8: /* tst */
11208 tcg_gen_and_i32(tmp, tmp, tmp2);
11209 gen_logic_CC(tmp);
11210 rd = 16;
11211 break;
11212 case 0x9: /* neg */
11213 if (s->condexec_mask)
11214 tcg_gen_neg_i32(tmp, tmp2);
11215 else
11216 gen_sub_CC(tmp, tmp, tmp2);
11217 break;
11218 case 0xa: /* cmp */
11219 gen_sub_CC(tmp, tmp, tmp2);
11220 rd = 16;
11221 break;
11222 case 0xb: /* cmn */
11223 gen_add_CC(tmp, tmp, tmp2);
11224 rd = 16;
11225 break;
11226 case 0xc: /* orr */
11227 tcg_gen_or_i32(tmp, tmp, tmp2);
11228 if (!s->condexec_mask)
11229 gen_logic_CC(tmp);
11230 break;
11231 case 0xd: /* mul */
11232 tcg_gen_mul_i32(tmp, tmp, tmp2);
11233 if (!s->condexec_mask)
11234 gen_logic_CC(tmp);
11235 break;
11236 case 0xe: /* bic */
11237 tcg_gen_andc_i32(tmp, tmp, tmp2);
11238 if (!s->condexec_mask)
11239 gen_logic_CC(tmp);
11240 break;
11241 case 0xf: /* mvn */
11242 tcg_gen_not_i32(tmp2, tmp2);
11243 if (!s->condexec_mask)
11244 gen_logic_CC(tmp2);
11245 val = 1;
11246 rm = rd;
11247 break;
11249 if (rd != 16) {
11250 if (val) {
11251 store_reg(s, rm, tmp2);
11252 if (op != 0xf)
11253 tcg_temp_free_i32(tmp);
11254 } else {
11255 store_reg(s, rd, tmp);
11256 tcg_temp_free_i32(tmp2);
11258 } else {
11259 tcg_temp_free_i32(tmp);
11260 tcg_temp_free_i32(tmp2);
11262 break;
11264 case 5:
11265 /* load/store register offset. */
11266 rd = insn & 7;
11267 rn = (insn >> 3) & 7;
11268 rm = (insn >> 6) & 7;
11269 op = (insn >> 9) & 7;
11270 addr = load_reg(s, rn);
11271 tmp = load_reg(s, rm);
11272 tcg_gen_add_i32(addr, addr, tmp);
11273 tcg_temp_free_i32(tmp);
11275 if (op < 3) { /* store */
11276 tmp = load_reg(s, rd);
11277 } else {
11278 tmp = tcg_temp_new_i32();
11281 switch (op) {
11282 case 0: /* str */
11283 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11284 break;
11285 case 1: /* strh */
11286 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11287 break;
11288 case 2: /* strb */
11289 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11290 break;
11291 case 3: /* ldrsb */
11292 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11293 break;
11294 case 4: /* ldr */
11295 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11296 break;
11297 case 5: /* ldrh */
11298 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11299 break;
11300 case 6: /* ldrb */
11301 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11302 break;
11303 case 7: /* ldrsh */
11304 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11305 break;
11307 if (op >= 3) { /* load */
11308 store_reg(s, rd, tmp);
11309 } else {
11310 tcg_temp_free_i32(tmp);
11312 tcg_temp_free_i32(addr);
11313 break;
11315 case 6:
11316 /* load/store word immediate offset */
11317 rd = insn & 7;
11318 rn = (insn >> 3) & 7;
11319 addr = load_reg(s, rn);
11320 val = (insn >> 4) & 0x7c;
11321 tcg_gen_addi_i32(addr, addr, val);
11323 if (insn & (1 << 11)) {
11324 /* load */
11325 tmp = tcg_temp_new_i32();
11326 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11327 store_reg(s, rd, tmp);
11328 } else {
11329 /* store */
11330 tmp = load_reg(s, rd);
11331 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11332 tcg_temp_free_i32(tmp);
11334 tcg_temp_free_i32(addr);
11335 break;
11337 case 7:
11338 /* load/store byte immediate offset */
11339 rd = insn & 7;
11340 rn = (insn >> 3) & 7;
11341 addr = load_reg(s, rn);
11342 val = (insn >> 6) & 0x1f;
11343 tcg_gen_addi_i32(addr, addr, val);
11345 if (insn & (1 << 11)) {
11346 /* load */
11347 tmp = tcg_temp_new_i32();
11348 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11349 store_reg(s, rd, tmp);
11350 } else {
11351 /* store */
11352 tmp = load_reg(s, rd);
11353 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11354 tcg_temp_free_i32(tmp);
11356 tcg_temp_free_i32(addr);
11357 break;
11359 case 8:
11360 /* load/store halfword immediate offset */
11361 rd = insn & 7;
11362 rn = (insn >> 3) & 7;
11363 addr = load_reg(s, rn);
11364 val = (insn >> 5) & 0x3e;
11365 tcg_gen_addi_i32(addr, addr, val);
11367 if (insn & (1 << 11)) {
11368 /* load */
11369 tmp = tcg_temp_new_i32();
11370 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11371 store_reg(s, rd, tmp);
11372 } else {
11373 /* store */
11374 tmp = load_reg(s, rd);
11375 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11376 tcg_temp_free_i32(tmp);
11378 tcg_temp_free_i32(addr);
11379 break;
11381 case 9:
11382 /* load/store from stack */
11383 rd = (insn >> 8) & 7;
11384 addr = load_reg(s, 13);
11385 val = (insn & 0xff) * 4;
11386 tcg_gen_addi_i32(addr, addr, val);
11388 if (insn & (1 << 11)) {
11389 /* load */
11390 tmp = tcg_temp_new_i32();
11391 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11392 store_reg(s, rd, tmp);
11393 } else {
11394 /* store */
11395 tmp = load_reg(s, rd);
11396 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11397 tcg_temp_free_i32(tmp);
11399 tcg_temp_free_i32(addr);
11400 break;
11402 case 10:
11404 * 0b1010_xxxx_xxxx_xxxx
11405 * - Add PC/SP (immediate)
11407 rd = (insn >> 8) & 7;
11408 val = (insn & 0xff) * 4;
11409 tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val);
11410 store_reg(s, rd, tmp);
11411 break;
11413 case 11:
11414 /* misc */
11415 op = (insn >> 8) & 0xf;
11416 switch (op) {
11417 case 0:
11419 * 0b1011_0000_xxxx_xxxx
11420 * - ADD (SP plus immediate)
11421 * - SUB (SP minus immediate)
11423 tmp = load_reg(s, 13);
11424 val = (insn & 0x7f) * 4;
11425 if (insn & (1 << 7))
11426 val = -(int32_t)val;
11427 tcg_gen_addi_i32(tmp, tmp, val);
11428 store_sp_checked(s, tmp);
11429 break;
11431 case 2: /* sign/zero extend. */
11432 ARCH(6);
11433 rd = insn & 7;
11434 rm = (insn >> 3) & 7;
11435 tmp = load_reg(s, rm);
11436 switch ((insn >> 6) & 3) {
11437 case 0: gen_sxth(tmp); break;
11438 case 1: gen_sxtb(tmp); break;
11439 case 2: gen_uxth(tmp); break;
11440 case 3: gen_uxtb(tmp); break;
11442 store_reg(s, rd, tmp);
11443 break;
11444 case 4: case 5: case 0xc: case 0xd:
11446 * 0b1011_x10x_xxxx_xxxx
11447 * - push/pop
11449 addr = load_reg(s, 13);
11450 if (insn & (1 << 8))
11451 offset = 4;
11452 else
11453 offset = 0;
11454 for (i = 0; i < 8; i++) {
11455 if (insn & (1 << i))
11456 offset += 4;
11458 if ((insn & (1 << 11)) == 0) {
11459 tcg_gen_addi_i32(addr, addr, -offset);
11462 if (s->v8m_stackcheck) {
11464 * Here 'addr' is the lower of "old SP" and "new SP";
11465 * if this is a pop that starts below the limit and ends
11466 * above it, it is UNKNOWN whether the limit check triggers;
11467 * we choose to trigger.
11469 gen_helper_v8m_stackcheck(cpu_env, addr);
11472 for (i = 0; i < 8; i++) {
11473 if (insn & (1 << i)) {
11474 if (insn & (1 << 11)) {
11475 /* pop */
11476 tmp = tcg_temp_new_i32();
11477 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11478 store_reg(s, i, tmp);
11479 } else {
11480 /* push */
11481 tmp = load_reg(s, i);
11482 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11483 tcg_temp_free_i32(tmp);
11485 /* advance to the next address. */
11486 tcg_gen_addi_i32(addr, addr, 4);
11489 tmp = NULL;
11490 if (insn & (1 << 8)) {
11491 if (insn & (1 << 11)) {
11492 /* pop pc */
11493 tmp = tcg_temp_new_i32();
11494 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11495 /* don't set the pc until the rest of the instruction
11496 has completed */
11497 } else {
11498 /* push lr */
11499 tmp = load_reg(s, 14);
11500 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11501 tcg_temp_free_i32(tmp);
11503 tcg_gen_addi_i32(addr, addr, 4);
11505 if ((insn & (1 << 11)) == 0) {
11506 tcg_gen_addi_i32(addr, addr, -offset);
11508 /* write back the new stack pointer */
11509 store_reg(s, 13, addr);
11510 /* set the new PC value */
11511 if ((insn & 0x0900) == 0x0900) {
11512 store_reg_from_load(s, 15, tmp);
11514 break;
11516 case 1: case 3: case 9: case 11: /* czb */
11517 rm = insn & 7;
11518 tmp = load_reg(s, rm);
11519 arm_gen_condlabel(s);
11520 if (insn & (1 << 11))
11521 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
11522 else
11523 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
11524 tcg_temp_free_i32(tmp);
11525 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11526 gen_jmp(s, read_pc(s) + offset);
11527 break;
11529 case 15: /* IT, nop-hint. */
11530 if ((insn & 0xf) == 0) {
11531 gen_nop_hint(s, (insn >> 4) & 0xf);
11532 break;
11535 * IT (If-Then)
11537 * Combinations of firstcond and mask which set up an 0b1111
11538 * condition are UNPREDICTABLE; we take the CONSTRAINED
11539 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
11540 * i.e. both meaning "execute always".
11542 s->condexec_cond = (insn >> 4) & 0xe;
11543 s->condexec_mask = insn & 0x1f;
11544 /* No actual code generated for this insn, just setup state. */
11545 break;
11547 case 0xe: /* bkpt */
11549 int imm8 = extract32(insn, 0, 8);
11550 ARCH(5);
11551 gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm8, true));
11552 break;
11555 case 0xa: /* rev, and hlt */
11557 int op1 = extract32(insn, 6, 2);
11559 if (op1 == 2) {
11560 /* HLT */
11561 int imm6 = extract32(insn, 0, 6);
11563 gen_hlt(s, imm6);
11564 break;
11567 /* Otherwise this is rev */
11568 ARCH(6);
11569 rn = (insn >> 3) & 0x7;
11570 rd = insn & 0x7;
11571 tmp = load_reg(s, rn);
11572 switch (op1) {
11573 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
11574 case 1: gen_rev16(tmp); break;
11575 case 3: gen_revsh(tmp); break;
11576 default:
11577 g_assert_not_reached();
11579 store_reg(s, rd, tmp);
11580 break;
11583 case 6:
11584 switch ((insn >> 5) & 7) {
11585 case 2:
11586 /* setend */
11587 ARCH(6);
11588 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11589 gen_helper_setend(cpu_env);
11590 s->base.is_jmp = DISAS_UPDATE;
11592 break;
11593 case 3:
11594 /* cps */
11595 ARCH(6);
11596 if (IS_USER(s)) {
11597 break;
11599 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11600 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11601 /* FAULTMASK */
11602 if (insn & 1) {
11603 addr = tcg_const_i32(19);
11604 gen_helper_v7m_msr(cpu_env, addr, tmp);
11605 tcg_temp_free_i32(addr);
11607 /* PRIMASK */
11608 if (insn & 2) {
11609 addr = tcg_const_i32(16);
11610 gen_helper_v7m_msr(cpu_env, addr, tmp);
11611 tcg_temp_free_i32(addr);
11613 tcg_temp_free_i32(tmp);
11614 gen_lookup_tb(s);
11615 } else {
11616 if (insn & (1 << 4)) {
11617 shift = CPSR_A | CPSR_I | CPSR_F;
11618 } else {
11619 shift = 0;
11621 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
11623 break;
11624 default:
11625 goto undef;
11627 break;
11629 default:
11630 goto undef;
11632 break;
11634 case 12:
11636 /* load/store multiple */
11637 TCGv_i32 loaded_var = NULL;
11638 rn = (insn >> 8) & 0x7;
11639 addr = load_reg(s, rn);
11640 for (i = 0; i < 8; i++) {
11641 if (insn & (1 << i)) {
11642 if (insn & (1 << 11)) {
11643 /* load */
11644 tmp = tcg_temp_new_i32();
11645 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11646 if (i == rn) {
11647 loaded_var = tmp;
11648 } else {
11649 store_reg(s, i, tmp);
11651 } else {
11652 /* store */
11653 tmp = load_reg(s, i);
11654 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11655 tcg_temp_free_i32(tmp);
11657 /* advance to the next address */
11658 tcg_gen_addi_i32(addr, addr, 4);
11661 if ((insn & (1 << rn)) == 0) {
11662 /* base reg not in list: base register writeback */
11663 store_reg(s, rn, addr);
11664 } else {
11665 /* base reg in list: if load, complete it now */
11666 if (insn & (1 << 11)) {
11667 store_reg(s, rn, loaded_var);
11669 tcg_temp_free_i32(addr);
11671 break;
11673 case 13:
11674 /* conditional branch or swi */
11675 cond = (insn >> 8) & 0xf;
11676 if (cond == 0xe)
11677 goto undef;
11679 if (cond == 0xf) {
11680 /* swi */
11681 gen_set_pc_im(s, s->base.pc_next);
11682 s->svc_imm = extract32(insn, 0, 8);
11683 s->base.is_jmp = DISAS_SWI;
11684 break;
11686 /* generate a conditional jump to next instruction */
11687 arm_skip_unless(s, cond);
11689 /* jump to the offset */
11690 val = read_pc(s);
11691 offset = ((int32_t)insn << 24) >> 24;
11692 val += offset << 1;
11693 gen_jmp(s, val);
11694 break;
11696 case 14:
11697 if (insn & (1 << 11)) {
11698 /* thumb_insn_is_16bit() ensures we can't get here for
11699 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
11700 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
11702 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11703 ARCH(5);
11704 offset = ((insn & 0x7ff) << 1);
11705 tmp = load_reg(s, 14);
11706 tcg_gen_addi_i32(tmp, tmp, offset);
11707 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
11709 tmp2 = tcg_temp_new_i32();
11710 tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
11711 store_reg(s, 14, tmp2);
11712 gen_bx(s, tmp);
11713 break;
11715 /* unconditional branch */
11716 val = read_pc(s);
11717 offset = ((int32_t)insn << 21) >> 21;
11718 val += offset << 1;
11719 gen_jmp(s, val);
11720 break;
11722 case 15:
11723 /* thumb_insn_is_16bit() ensures we can't get here for
11724 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
11726 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11728 if (insn & (1 << 11)) {
11729 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
11730 offset = ((insn & 0x7ff) << 1) | 1;
11731 tmp = load_reg(s, 14);
11732 tcg_gen_addi_i32(tmp, tmp, offset);
11734 tmp2 = tcg_temp_new_i32();
11735 tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
11736 store_reg(s, 14, tmp2);
11737 gen_bx(s, tmp);
11738 } else {
11739 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
11740 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
11742 tcg_gen_movi_i32(cpu_R[14], read_pc(s) + uoffset);
11744 break;
11746 return;
11747 illegal_op:
11748 undef:
11749 unallocated_encoding(s);
11752 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11754 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
11755 * (False positives are OK, false negatives are not.)
11756 * We know this is a Thumb insn, and our caller ensures we are
11757 * only called if dc->base.pc_next is less than 4 bytes from the page
11758 * boundary, so we cross the page if the first 16 bits indicate
11759 * that this is a 32 bit insn.
11761 uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
11763 return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
11766 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
11768 DisasContext *dc = container_of(dcbase, DisasContext, base);
11769 CPUARMState *env = cs->env_ptr;
11770 ARMCPU *cpu = env_archcpu(env);
11771 uint32_t tb_flags = dc->base.tb->flags;
11772 uint32_t condexec, core_mmu_idx;
11774 dc->isar = &cpu->isar;
11775 dc->condjmp = 0;
11777 dc->aarch64 = 0;
11778 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11779 * there is no secure EL1, so we route exceptions to EL3.
11781 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11782 !arm_el_is_aa64(env, 3);
11783 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
11784 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
11785 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
11786 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
11787 dc->condexec_mask = (condexec & 0xf) << 1;
11788 dc->condexec_cond = condexec >> 4;
11789 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
11790 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
11791 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11792 #if !defined(CONFIG_USER_ONLY)
11793 dc->user = (dc->current_el == 0);
11794 #endif
11795 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
11796 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
11797 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
11798 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
11799 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11800 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
11801 dc->vec_stride = 0;
11802 } else {
11803 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
11804 dc->c15_cpar = 0;
11806 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
11807 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11808 regime_is_secure(env, dc->mmu_idx);
11809 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
11810 dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
11811 dc->v7m_new_fp_ctxt_needed =
11812 FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
11813 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
11814 dc->cp_regs = cpu->cp_regs;
11815 dc->features = env->features;
11817 /* Single step state. The code-generation logic here is:
11818 * SS_ACTIVE == 0:
11819 * generate code with no special handling for single-stepping (except
11820 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11821 * this happens anyway because those changes are all system register or
11822 * PSTATE writes).
11823 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11824 * emit code for one insn
11825 * emit code to clear PSTATE.SS
11826 * emit code to generate software step exception for completed step
11827 * end TB (as usual for having generated an exception)
11828 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11829 * emit code to generate a software step exception
11830 * end the TB
11832 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
11833 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
11834 dc->is_ldex = false;
11835 if (!arm_feature(env, ARM_FEATURE_M)) {
11836 dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
11839 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
11841 /* If architectural single step active, limit to 1. */
11842 if (is_singlestepping(dc)) {
11843 dc->base.max_insns = 1;
11846 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11847 to those left on the page. */
11848 if (!dc->thumb) {
11849 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
11850 dc->base.max_insns = MIN(dc->base.max_insns, bound);
11853 cpu_V0 = tcg_temp_new_i64();
11854 cpu_V1 = tcg_temp_new_i64();
11855 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11856 cpu_M0 = tcg_temp_new_i64();
11859 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
11861 DisasContext *dc = container_of(dcbase, DisasContext, base);
11863 /* A note on handling of the condexec (IT) bits:
11865 * We want to avoid the overhead of having to write the updated condexec
11866 * bits back to the CPUARMState for every instruction in an IT block. So:
11867 * (1) if the condexec bits are not already zero then we write
11868 * zero back into the CPUARMState now. This avoids complications trying
11869 * to do it at the end of the block. (For example if we don't do this
11870 * it's hard to identify whether we can safely skip writing condexec
11871 * at the end of the TB, which we definitely want to do for the case
11872 * where a TB doesn't do anything with the IT state at all.)
11873 * (2) if we are going to leave the TB then we call gen_set_condexec()
11874 * which will write the correct value into CPUARMState if zero is wrong.
11875 * This is done both for leaving the TB at the end, and for leaving
11876 * it because of an exception we know will happen, which is done in
11877 * gen_exception_insn(). The latter is necessary because we need to
11878 * leave the TB with the PC/IT state just prior to execution of the
11879 * instruction which caused the exception.
11880 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11881 * then the CPUARMState will be wrong and we need to reset it.
11882 * This is handled in the same way as restoration of the
11883 * PC in these situations; we save the value of the condexec bits
11884 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11885 * then uses this to restore them after an exception.
11887 * Note that there are no instructions which can read the condexec
11888 * bits, and none which can write non-static values to them, so
11889 * we don't need to care about whether CPUARMState is correct in the
11890 * middle of a TB.
11893 /* Reset the conditional execution bits immediately. This avoids
11894 complications trying to do it at the end of the block. */
11895 if (dc->condexec_mask || dc->condexec_cond) {
11896 TCGv_i32 tmp = tcg_temp_new_i32();
11897 tcg_gen_movi_i32(tmp, 0);
11898 store_cpu_field(tmp, condexec_bits);
11902 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
11904 DisasContext *dc = container_of(dcbase, DisasContext, base);
11906 tcg_gen_insn_start(dc->base.pc_next,
11907 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11909 dc->insn_start = tcg_last_op();
11912 static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
11913 const CPUBreakpoint *bp)
11915 DisasContext *dc = container_of(dcbase, DisasContext, base);
11917 if (bp->flags & BP_CPU) {
11918 gen_set_condexec(dc);
11919 gen_set_pc_im(dc, dc->base.pc_next);
11920 gen_helper_check_breakpoints(cpu_env);
11921 /* End the TB early; it's likely not going to be executed */
11922 dc->base.is_jmp = DISAS_TOO_MANY;
11923 } else {
11924 gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
11925 /* The address covered by the breakpoint must be
11926 included in [tb->pc, tb->pc + tb->size) in order
11927 to for it to be properly cleared -- thus we
11928 increment the PC here so that the logic setting
11929 tb->size below does the right thing. */
11930 /* TODO: Advance PC by correct instruction length to
11931 * avoid disassembler error messages */
11932 dc->base.pc_next += 2;
11933 dc->base.is_jmp = DISAS_NORETURN;
11936 return true;
11939 static bool arm_pre_translate_insn(DisasContext *dc)
11941 #ifdef CONFIG_USER_ONLY
11942 /* Intercept jump to the magic kernel page. */
11943 if (dc->base.pc_next >= 0xffff0000) {
11944 /* We always get here via a jump, so know we are not in a
11945 conditional execution block. */
11946 gen_exception_internal(EXCP_KERNEL_TRAP);
11947 dc->base.is_jmp = DISAS_NORETURN;
11948 return true;
11950 #endif
11952 if (dc->ss_active && !dc->pstate_ss) {
11953 /* Singlestep state is Active-pending.
11954 * If we're in this state at the start of a TB then either
11955 * a) we just took an exception to an EL which is being debugged
11956 * and this is the first insn in the exception handler
11957 * b) debug exceptions were masked and we just unmasked them
11958 * without changing EL (eg by clearing PSTATE.D)
11959 * In either case we're going to take a swstep exception in the
11960 * "did not step an insn" case, and so the syndrome ISV and EX
11961 * bits should be zero.
11963 assert(dc->base.num_insns == 1);
11964 gen_swstep_exception(dc, 0, 0);
11965 dc->base.is_jmp = DISAS_NORETURN;
11966 return true;
11969 return false;
11972 static void arm_post_translate_insn(DisasContext *dc)
11974 if (dc->condjmp && !dc->base.is_jmp) {
11975 gen_set_label(dc->condlabel);
11976 dc->condjmp = 0;
11978 translator_loop_temp_check(&dc->base);
11981 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
11983 DisasContext *dc = container_of(dcbase, DisasContext, base);
11984 CPUARMState *env = cpu->env_ptr;
11985 unsigned int insn;
11987 if (arm_pre_translate_insn(dc)) {
11988 return;
11991 dc->pc_curr = dc->base.pc_next;
11992 insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
11993 dc->insn = insn;
11994 dc->base.pc_next += 4;
11995 disas_arm_insn(dc, insn);
11997 arm_post_translate_insn(dc);
11999 /* ARM is a fixed-length ISA. We performed the cross-page check
12000 in init_disas_context by adjusting max_insns. */
12003 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12005 /* Return true if this Thumb insn is always unconditional,
12006 * even inside an IT block. This is true of only a very few
12007 * instructions: BKPT, HLT, and SG.
12009 * A larger class of instructions are UNPREDICTABLE if used
12010 * inside an IT block; we do not need to detect those here, because
12011 * what we do by default (perform the cc check and update the IT
12012 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12013 * choice for those situations.
12015 * insn is either a 16-bit or a 32-bit instruction; the two are
12016 * distinguishable because for the 16-bit case the top 16 bits
12017 * are zeroes, and that isn't a valid 32-bit encoding.
12019 if ((insn & 0xffffff00) == 0xbe00) {
12020 /* BKPT */
12021 return true;
12024 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12025 !arm_dc_feature(s, ARM_FEATURE_M)) {
12026 /* HLT: v8A only. This is unconditional even when it is going to
12027 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12028 * For v7 cores this was a plain old undefined encoding and so
12029 * honours its cc check. (We might be using the encoding as
12030 * a semihosting trap, but we don't change the cc check behaviour
12031 * on that account, because a debugger connected to a real v7A
12032 * core and emulating semihosting traps by catching the UNDEF
12033 * exception would also only see cases where the cc check passed.
12034 * No guest code should be trying to do a HLT semihosting trap
12035 * in an IT block anyway.
12037 return true;
12040 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12041 arm_dc_feature(s, ARM_FEATURE_M)) {
12042 /* SG: v8M only */
12043 return true;
12046 return false;
12049 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12051 DisasContext *dc = container_of(dcbase, DisasContext, base);
12052 CPUARMState *env = cpu->env_ptr;
12053 uint32_t insn;
12054 bool is_16bit;
12056 if (arm_pre_translate_insn(dc)) {
12057 return;
12060 dc->pc_curr = dc->base.pc_next;
12061 insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
12062 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
12063 dc->base.pc_next += 2;
12064 if (!is_16bit) {
12065 uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
12067 insn = insn << 16 | insn2;
12068 dc->base.pc_next += 2;
12070 dc->insn = insn;
12072 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
12073 uint32_t cond = dc->condexec_cond;
12076 * Conditionally skip the insn. Note that both 0xe and 0xf mean
12077 * "always"; 0xf is not "never".
12079 if (cond < 0x0e) {
12080 arm_skip_unless(dc, cond);
12084 if (is_16bit) {
12085 disas_thumb_insn(dc, insn);
12086 } else {
12087 disas_thumb2_insn(dc, insn);
12090 /* Advance the Thumb condexec condition. */
12091 if (dc->condexec_mask) {
12092 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12093 ((dc->condexec_mask >> 4) & 1));
12094 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12095 if (dc->condexec_mask == 0) {
12096 dc->condexec_cond = 0;
12100 arm_post_translate_insn(dc);
12102 /* Thumb is a variable-length ISA. Stop translation when the next insn
12103 * will touch a new page. This ensures that prefetch aborts occur at
12104 * the right place.
12106 * We want to stop the TB if the next insn starts in a new page,
12107 * or if it spans between this page and the next. This means that
12108 * if we're looking at the last halfword in the page we need to
12109 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12110 * or a 32-bit Thumb insn (which won't).
12111 * This is to avoid generating a silly TB with a single 16-bit insn
12112 * in it at the end of this page (which would execute correctly
12113 * but isn't very efficient).
12115 if (dc->base.is_jmp == DISAS_NEXT
12116 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
12117 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
12118 && insn_crosses_page(env, dc)))) {
12119 dc->base.is_jmp = DISAS_TOO_MANY;
12123 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
12125 DisasContext *dc = container_of(dcbase, DisasContext, base);
12127 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
12128 /* FIXME: This can theoretically happen with self-modifying code. */
12129 cpu_abort(cpu, "IO on conditional branch instruction");
12132 /* At this stage dc->condjmp will only be set when the skipped
12133 instruction was a conditional branch or trap, and the PC has
12134 already been written. */
12135 gen_set_condexec(dc);
12136 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
12137 /* Exception return branches need some special case code at the
12138 * end of the TB, which is complex enough that it has to
12139 * handle the single-step vs not and the condition-failed
12140 * insn codepath itself.
12142 gen_bx_excret_final_code(dc);
12143 } else if (unlikely(is_singlestepping(dc))) {
12144 /* Unconditional and "condition passed" instruction codepath. */
12145 switch (dc->base.is_jmp) {
12146 case DISAS_SWI:
12147 gen_ss_advance(dc);
12148 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12149 default_exception_el(dc));
12150 break;
12151 case DISAS_HVC:
12152 gen_ss_advance(dc);
12153 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12154 break;
12155 case DISAS_SMC:
12156 gen_ss_advance(dc);
12157 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12158 break;
12159 case DISAS_NEXT:
12160 case DISAS_TOO_MANY:
12161 case DISAS_UPDATE:
12162 gen_set_pc_im(dc, dc->base.pc_next);
12163 /* fall through */
12164 default:
12165 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12166 gen_singlestep_exception(dc);
12167 break;
12168 case DISAS_NORETURN:
12169 break;
12171 } else {
12172 /* While branches must always occur at the end of an IT block,
12173 there are a few other things that can cause us to terminate
12174 the TB in the middle of an IT block:
12175 - Exception generating instructions (bkpt, swi, undefined).
12176 - Page boundaries.
12177 - Hardware watchpoints.
12178 Hardware breakpoints have already been handled and skip this code.
12180 switch(dc->base.is_jmp) {
12181 case DISAS_NEXT:
12182 case DISAS_TOO_MANY:
12183 gen_goto_tb(dc, 1, dc->base.pc_next);
12184 break;
12185 case DISAS_JUMP:
12186 gen_goto_ptr();
12187 break;
12188 case DISAS_UPDATE:
12189 gen_set_pc_im(dc, dc->base.pc_next);
12190 /* fall through */
12191 default:
12192 /* indicate that the hash table must be used to find the next TB */
12193 tcg_gen_exit_tb(NULL, 0);
12194 break;
12195 case DISAS_NORETURN:
12196 /* nothing more to generate */
12197 break;
12198 case DISAS_WFI:
12200 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12201 !(dc->insn & (1U << 31))) ? 2 : 4);
12203 gen_helper_wfi(cpu_env, tmp);
12204 tcg_temp_free_i32(tmp);
12205 /* The helper doesn't necessarily throw an exception, but we
12206 * must go back to the main loop to check for interrupts anyway.
12208 tcg_gen_exit_tb(NULL, 0);
12209 break;
12211 case DISAS_WFE:
12212 gen_helper_wfe(cpu_env);
12213 break;
12214 case DISAS_YIELD:
12215 gen_helper_yield(cpu_env);
12216 break;
12217 case DISAS_SWI:
12218 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12219 default_exception_el(dc));
12220 break;
12221 case DISAS_HVC:
12222 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12223 break;
12224 case DISAS_SMC:
12225 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12226 break;
12230 if (dc->condjmp) {
12231 /* "Condition failed" instruction codepath for the branch/trap insn */
12232 gen_set_label(dc->condlabel);
12233 gen_set_condexec(dc);
12234 if (unlikely(is_singlestepping(dc))) {
12235 gen_set_pc_im(dc, dc->base.pc_next);
12236 gen_singlestep_exception(dc);
12237 } else {
12238 gen_goto_tb(dc, 1, dc->base.pc_next);
12243 static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12245 DisasContext *dc = container_of(dcbase, DisasContext, base);
12247 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
12248 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
12251 static const TranslatorOps arm_translator_ops = {
12252 .init_disas_context = arm_tr_init_disas_context,
12253 .tb_start = arm_tr_tb_start,
12254 .insn_start = arm_tr_insn_start,
12255 .breakpoint_check = arm_tr_breakpoint_check,
12256 .translate_insn = arm_tr_translate_insn,
12257 .tb_stop = arm_tr_tb_stop,
12258 .disas_log = arm_tr_disas_log,
12261 static const TranslatorOps thumb_translator_ops = {
12262 .init_disas_context = arm_tr_init_disas_context,
12263 .tb_start = arm_tr_tb_start,
12264 .insn_start = arm_tr_insn_start,
12265 .breakpoint_check = arm_tr_breakpoint_check,
12266 .translate_insn = thumb_tr_translate_insn,
12267 .tb_stop = arm_tr_tb_stop,
12268 .disas_log = arm_tr_disas_log,
12271 /* generate intermediate code for basic block 'tb'. */
12272 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
12274 DisasContext dc;
12275 const TranslatorOps *ops = &arm_translator_ops;
12277 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
12278 ops = &thumb_translator_ops;
12280 #ifdef TARGET_AARCH64
12281 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
12282 ops = &aarch64_translator_ops;
12284 #endif
12286 translator_loop(ops, &dc.base, cpu, tb, max_insns);
12289 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12290 target_ulong *data)
12292 if (is_a64(env)) {
12293 env->pc = data[0];
12294 env->condexec_bits = 0;
12295 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
12296 } else {
12297 env->regs[15] = data[0];
12298 env->condexec_bits = data[1];
12299 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;