target/arm: Remove helper_double_saturate
[qemu/ar7.git] / target / arm / translate.c
blob34e65cd80c01d58b46cafeff228b29a47a4fc435
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "tcg-op-gvec.h"
29 #include "qemu/log.h"
30 #include "qemu/bitops.h"
31 #include "arm_ldst.h"
32 #include "hw/semihosting/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
38 #include "exec/log.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
57 #define IS_USER(s) 1
58 #else
59 #define IS_USER(s) (s->user)
60 #endif
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
69 #include "exec/gen-icount.h"
71 static const char * const regnames[] =
72 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
75 /* Function prototypes for gen_ functions calling Neon helpers. */
76 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
77 TCGv_i32, TCGv_i32);
78 /* Function prototypes for gen_ functions for fix point conversions */
79 typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
81 /* initialize TCG globals. */
82 void arm_translate_init(void)
84 int i;
86 for (i = 0; i < 16; i++) {
87 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
88 offsetof(CPUARMState, regs[i]),
89 regnames[i]);
91 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
92 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
93 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
94 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
96 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
97 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
98 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_val), "exclusive_val");
101 a64_translate_init();
104 /* Flags for the disas_set_da_iss info argument:
105 * lower bits hold the Rt register number, higher bits are flags.
107 typedef enum ISSInfo {
108 ISSNone = 0,
109 ISSRegMask = 0x1f,
110 ISSInvalid = (1 << 5),
111 ISSIsAcqRel = (1 << 6),
112 ISSIsWrite = (1 << 7),
113 ISSIs16Bit = (1 << 8),
114 } ISSInfo;
116 /* Save the syndrome information for a Data Abort */
117 static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
119 uint32_t syn;
120 int sas = memop & MO_SIZE;
121 bool sse = memop & MO_SIGN;
122 bool is_acqrel = issinfo & ISSIsAcqRel;
123 bool is_write = issinfo & ISSIsWrite;
124 bool is_16bit = issinfo & ISSIs16Bit;
125 int srt = issinfo & ISSRegMask;
127 if (issinfo & ISSInvalid) {
128 /* Some callsites want to conditionally provide ISS info,
129 * eg "only if this was not a writeback"
131 return;
134 if (srt == 15) {
135 /* For AArch32, insns where the src/dest is R15 never generate
136 * ISS information. Catching that here saves checking at all
137 * the call sites.
139 return;
142 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
143 0, 0, 0, is_write, 0, is_16bit);
144 disas_set_insn_syndrome(s, syn);
147 static inline int get_a32_user_mem_index(DisasContext *s)
149 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
150 * insns:
151 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
152 * otherwise, access as if at PL0.
154 switch (s->mmu_idx) {
155 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
156 case ARMMMUIdx_S12NSE0:
157 case ARMMMUIdx_S12NSE1:
158 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
159 case ARMMMUIdx_S1E3:
160 case ARMMMUIdx_S1SE0:
161 case ARMMMUIdx_S1SE1:
162 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
163 case ARMMMUIdx_MUser:
164 case ARMMMUIdx_MPriv:
165 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
166 case ARMMMUIdx_MUserNegPri:
167 case ARMMMUIdx_MPrivNegPri:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
169 case ARMMMUIdx_MSUser:
170 case ARMMMUIdx_MSPriv:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
172 case ARMMMUIdx_MSUserNegPri:
173 case ARMMMUIdx_MSPrivNegPri:
174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
175 case ARMMMUIdx_S2NS:
176 default:
177 g_assert_not_reached();
181 static inline TCGv_i32 load_cpu_offset(int offset)
183 TCGv_i32 tmp = tcg_temp_new_i32();
184 tcg_gen_ld_i32(tmp, cpu_env, offset);
185 return tmp;
188 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
190 static inline void store_cpu_offset(TCGv_i32 var, int offset)
192 tcg_gen_st_i32(var, cpu_env, offset);
193 tcg_temp_free_i32(var);
196 #define store_cpu_field(var, name) \
197 store_cpu_offset(var, offsetof(CPUARMState, name))
199 /* The architectural value of PC. */
200 static uint32_t read_pc(DisasContext *s)
202 return s->pc_curr + (s->thumb ? 4 : 8);
205 /* Set a variable to the value of a CPU register. */
206 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
208 if (reg == 15) {
209 tcg_gen_movi_i32(var, read_pc(s));
210 } else {
211 tcg_gen_mov_i32(var, cpu_R[reg]);
215 /* Create a new temporary and set it to the value of a CPU register. */
216 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
218 TCGv_i32 tmp = tcg_temp_new_i32();
219 load_reg_var(s, tmp, reg);
220 return tmp;
224 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
225 * This is used for load/store for which use of PC implies (literal),
226 * or ADD that implies ADR.
228 static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
230 TCGv_i32 tmp = tcg_temp_new_i32();
232 if (reg == 15) {
233 tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
234 } else {
235 tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
237 return tmp;
240 /* Set a CPU register. The source must be a temporary and will be
241 marked as dead. */
242 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
244 if (reg == 15) {
245 /* In Thumb mode, we must ignore bit 0.
246 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
247 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
248 * We choose to ignore [1:0] in ARM mode for all architecture versions.
250 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
251 s->base.is_jmp = DISAS_JUMP;
253 tcg_gen_mov_i32(cpu_R[reg], var);
254 tcg_temp_free_i32(var);
258 * Variant of store_reg which applies v8M stack-limit checks before updating
259 * SP. If the check fails this will result in an exception being taken.
260 * We disable the stack checks for CONFIG_USER_ONLY because we have
261 * no idea what the stack limits should be in that case.
262 * If stack checking is not being done this just acts like store_reg().
264 static void store_sp_checked(DisasContext *s, TCGv_i32 var)
266 #ifndef CONFIG_USER_ONLY
267 if (s->v8m_stackcheck) {
268 gen_helper_v8m_stackcheck(cpu_env, var);
270 #endif
271 store_reg(s, 13, var);
274 /* Value extensions. */
275 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
276 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
277 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
278 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
280 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
281 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
284 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
286 TCGv_i32 tmp_mask = tcg_const_i32(mask);
287 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
288 tcg_temp_free_i32(tmp_mask);
290 /* Set NZCV flags from the high 4 bits of var. */
291 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
293 static void gen_exception_internal(int excp)
295 TCGv_i32 tcg_excp = tcg_const_i32(excp);
297 assert(excp_is_internal(excp));
298 gen_helper_exception_internal(cpu_env, tcg_excp);
299 tcg_temp_free_i32(tcg_excp);
302 static void gen_step_complete_exception(DisasContext *s)
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
313 gen_ss_advance(s);
314 gen_swstep_exception(s, 1, s->is_ldex);
315 s->base.is_jmp = DISAS_NORETURN;
318 static void gen_singlestep_exception(DisasContext *s)
320 /* Generate the right kind of exception for singlestep, which is
321 * either the architectural singlestep or EXCP_DEBUG for QEMU's
322 * gdb singlestepping.
324 if (s->ss_active) {
325 gen_step_complete_exception(s);
326 } else {
327 gen_exception_internal(EXCP_DEBUG);
331 static inline bool is_singlestepping(DisasContext *s)
333 /* Return true if we are singlestepping either because of
334 * architectural singlestep or QEMU gdbstub singlestep. This does
335 * not include the command line '-singlestep' mode which is rather
336 * misnamed as it only means "one instruction per TB" and doesn't
337 * affect the code we generate.
339 return s->base.singlestep_enabled || s->ss_active;
342 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
344 TCGv_i32 tmp1 = tcg_temp_new_i32();
345 TCGv_i32 tmp2 = tcg_temp_new_i32();
346 tcg_gen_ext16s_i32(tmp1, a);
347 tcg_gen_ext16s_i32(tmp2, b);
348 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
349 tcg_temp_free_i32(tmp2);
350 tcg_gen_sari_i32(a, a, 16);
351 tcg_gen_sari_i32(b, b, 16);
352 tcg_gen_mul_i32(b, b, a);
353 tcg_gen_mov_i32(a, tmp1);
354 tcg_temp_free_i32(tmp1);
357 /* Byteswap each halfword. */
358 static void gen_rev16(TCGv_i32 var)
360 TCGv_i32 tmp = tcg_temp_new_i32();
361 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
362 tcg_gen_shri_i32(tmp, var, 8);
363 tcg_gen_and_i32(tmp, tmp, mask);
364 tcg_gen_and_i32(var, var, mask);
365 tcg_gen_shli_i32(var, var, 8);
366 tcg_gen_or_i32(var, var, tmp);
367 tcg_temp_free_i32(mask);
368 tcg_temp_free_i32(tmp);
371 /* Byteswap low halfword and sign extend. */
372 static void gen_revsh(TCGv_i32 var)
374 tcg_gen_ext16u_i32(var, var);
375 tcg_gen_bswap16_i32(var, var);
376 tcg_gen_ext16s_i32(var, var);
379 /* Return (b << 32) + a. Mark inputs as dead */
380 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
382 TCGv_i64 tmp64 = tcg_temp_new_i64();
384 tcg_gen_extu_i32_i64(tmp64, b);
385 tcg_temp_free_i32(b);
386 tcg_gen_shli_i64(tmp64, tmp64, 32);
387 tcg_gen_add_i64(a, tmp64, a);
389 tcg_temp_free_i64(tmp64);
390 return a;
393 /* Return (b << 32) - a. Mark inputs as dead. */
394 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
396 TCGv_i64 tmp64 = tcg_temp_new_i64();
398 tcg_gen_extu_i32_i64(tmp64, b);
399 tcg_temp_free_i32(b);
400 tcg_gen_shli_i64(tmp64, tmp64, 32);
401 tcg_gen_sub_i64(a, tmp64, a);
403 tcg_temp_free_i64(tmp64);
404 return a;
407 /* 32x32->64 multiply. Marks inputs as dead. */
408 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
410 TCGv_i32 lo = tcg_temp_new_i32();
411 TCGv_i32 hi = tcg_temp_new_i32();
412 TCGv_i64 ret;
414 tcg_gen_mulu2_i32(lo, hi, a, b);
415 tcg_temp_free_i32(a);
416 tcg_temp_free_i32(b);
418 ret = tcg_temp_new_i64();
419 tcg_gen_concat_i32_i64(ret, lo, hi);
420 tcg_temp_free_i32(lo);
421 tcg_temp_free_i32(hi);
423 return ret;
426 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
428 TCGv_i32 lo = tcg_temp_new_i32();
429 TCGv_i32 hi = tcg_temp_new_i32();
430 TCGv_i64 ret;
432 tcg_gen_muls2_i32(lo, hi, a, b);
433 tcg_temp_free_i32(a);
434 tcg_temp_free_i32(b);
436 ret = tcg_temp_new_i64();
437 tcg_gen_concat_i32_i64(ret, lo, hi);
438 tcg_temp_free_i32(lo);
439 tcg_temp_free_i32(hi);
441 return ret;
444 /* Swap low and high halfwords. */
445 static void gen_swap_half(TCGv_i32 var)
447 TCGv_i32 tmp = tcg_temp_new_i32();
448 tcg_gen_shri_i32(tmp, var, 16);
449 tcg_gen_shli_i32(var, var, 16);
450 tcg_gen_or_i32(var, var, tmp);
451 tcg_temp_free_i32(tmp);
454 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
455 tmp = (t0 ^ t1) & 0x8000;
456 t0 &= ~0x8000;
457 t1 &= ~0x8000;
458 t0 = (t0 + t1) ^ tmp;
461 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
463 TCGv_i32 tmp = tcg_temp_new_i32();
464 tcg_gen_xor_i32(tmp, t0, t1);
465 tcg_gen_andi_i32(tmp, tmp, 0x8000);
466 tcg_gen_andi_i32(t0, t0, ~0x8000);
467 tcg_gen_andi_i32(t1, t1, ~0x8000);
468 tcg_gen_add_i32(t0, t0, t1);
469 tcg_gen_xor_i32(t0, t0, tmp);
470 tcg_temp_free_i32(tmp);
471 tcg_temp_free_i32(t1);
474 /* Set CF to the top bit of var. */
475 static void gen_set_CF_bit31(TCGv_i32 var)
477 tcg_gen_shri_i32(cpu_CF, var, 31);
480 /* Set N and Z flags from var. */
481 static inline void gen_logic_CC(TCGv_i32 var)
483 tcg_gen_mov_i32(cpu_NF, var);
484 tcg_gen_mov_i32(cpu_ZF, var);
487 /* T0 += T1 + CF. */
488 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
490 tcg_gen_add_i32(t0, t0, t1);
491 tcg_gen_add_i32(t0, t0, cpu_CF);
494 /* dest = T0 + T1 + CF. */
495 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
497 tcg_gen_add_i32(dest, t0, t1);
498 tcg_gen_add_i32(dest, dest, cpu_CF);
501 /* dest = T0 - T1 + CF - 1. */
502 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
504 tcg_gen_sub_i32(dest, t0, t1);
505 tcg_gen_add_i32(dest, dest, cpu_CF);
506 tcg_gen_subi_i32(dest, dest, 1);
509 /* dest = T0 + T1. Compute C, N, V and Z flags */
510 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
512 TCGv_i32 tmp = tcg_temp_new_i32();
513 tcg_gen_movi_i32(tmp, 0);
514 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
515 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
516 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
517 tcg_gen_xor_i32(tmp, t0, t1);
518 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
519 tcg_temp_free_i32(tmp);
520 tcg_gen_mov_i32(dest, cpu_NF);
523 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
524 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
526 TCGv_i32 tmp = tcg_temp_new_i32();
527 if (TCG_TARGET_HAS_add2_i32) {
528 tcg_gen_movi_i32(tmp, 0);
529 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
530 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
531 } else {
532 TCGv_i64 q0 = tcg_temp_new_i64();
533 TCGv_i64 q1 = tcg_temp_new_i64();
534 tcg_gen_extu_i32_i64(q0, t0);
535 tcg_gen_extu_i32_i64(q1, t1);
536 tcg_gen_add_i64(q0, q0, q1);
537 tcg_gen_extu_i32_i64(q1, cpu_CF);
538 tcg_gen_add_i64(q0, q0, q1);
539 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
540 tcg_temp_free_i64(q0);
541 tcg_temp_free_i64(q1);
543 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
544 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
545 tcg_gen_xor_i32(tmp, t0, t1);
546 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
547 tcg_temp_free_i32(tmp);
548 tcg_gen_mov_i32(dest, cpu_NF);
551 /* dest = T0 - T1. Compute C, N, V and Z flags */
552 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
554 TCGv_i32 tmp;
555 tcg_gen_sub_i32(cpu_NF, t0, t1);
556 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
557 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
558 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
559 tmp = tcg_temp_new_i32();
560 tcg_gen_xor_i32(tmp, t0, t1);
561 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
562 tcg_temp_free_i32(tmp);
563 tcg_gen_mov_i32(dest, cpu_NF);
566 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
567 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
569 TCGv_i32 tmp = tcg_temp_new_i32();
570 tcg_gen_not_i32(tmp, t1);
571 gen_adc_CC(dest, t0, tmp);
572 tcg_temp_free_i32(tmp);
575 #define GEN_SHIFT(name) \
576 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
578 TCGv_i32 tmp1, tmp2, tmp3; \
579 tmp1 = tcg_temp_new_i32(); \
580 tcg_gen_andi_i32(tmp1, t1, 0xff); \
581 tmp2 = tcg_const_i32(0); \
582 tmp3 = tcg_const_i32(0x1f); \
583 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
584 tcg_temp_free_i32(tmp3); \
585 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
586 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
587 tcg_temp_free_i32(tmp2); \
588 tcg_temp_free_i32(tmp1); \
590 GEN_SHIFT(shl)
591 GEN_SHIFT(shr)
592 #undef GEN_SHIFT
594 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
596 TCGv_i32 tmp1, tmp2;
597 tmp1 = tcg_temp_new_i32();
598 tcg_gen_andi_i32(tmp1, t1, 0xff);
599 tmp2 = tcg_const_i32(0x1f);
600 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
601 tcg_temp_free_i32(tmp2);
602 tcg_gen_sar_i32(dest, t0, tmp1);
603 tcg_temp_free_i32(tmp1);
606 static void shifter_out_im(TCGv_i32 var, int shift)
608 if (shift == 0) {
609 tcg_gen_andi_i32(cpu_CF, var, 1);
610 } else {
611 tcg_gen_shri_i32(cpu_CF, var, shift);
612 if (shift != 31) {
613 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
618 /* Shift by immediate. Includes special handling for shift == 0. */
619 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
620 int shift, int flags)
622 switch (shiftop) {
623 case 0: /* LSL */
624 if (shift != 0) {
625 if (flags)
626 shifter_out_im(var, 32 - shift);
627 tcg_gen_shli_i32(var, var, shift);
629 break;
630 case 1: /* LSR */
631 if (shift == 0) {
632 if (flags) {
633 tcg_gen_shri_i32(cpu_CF, var, 31);
635 tcg_gen_movi_i32(var, 0);
636 } else {
637 if (flags)
638 shifter_out_im(var, shift - 1);
639 tcg_gen_shri_i32(var, var, shift);
641 break;
642 case 2: /* ASR */
643 if (shift == 0)
644 shift = 32;
645 if (flags)
646 shifter_out_im(var, shift - 1);
647 if (shift == 32)
648 shift = 31;
649 tcg_gen_sari_i32(var, var, shift);
650 break;
651 case 3: /* ROR/RRX */
652 if (shift != 0) {
653 if (flags)
654 shifter_out_im(var, shift - 1);
655 tcg_gen_rotri_i32(var, var, shift); break;
656 } else {
657 TCGv_i32 tmp = tcg_temp_new_i32();
658 tcg_gen_shli_i32(tmp, cpu_CF, 31);
659 if (flags)
660 shifter_out_im(var, 0);
661 tcg_gen_shri_i32(var, var, 1);
662 tcg_gen_or_i32(var, var, tmp);
663 tcg_temp_free_i32(tmp);
668 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
669 TCGv_i32 shift, int flags)
671 if (flags) {
672 switch (shiftop) {
673 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
674 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
675 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
676 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
678 } else {
679 switch (shiftop) {
680 case 0:
681 gen_shl(var, var, shift);
682 break;
683 case 1:
684 gen_shr(var, var, shift);
685 break;
686 case 2:
687 gen_sar(var, var, shift);
688 break;
689 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
690 tcg_gen_rotr_i32(var, var, shift); break;
693 tcg_temp_free_i32(shift);
696 #define PAS_OP(pfx) \
697 switch (op2) { \
698 case 0: gen_pas_helper(glue(pfx,add16)); break; \
699 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
700 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
701 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
702 case 4: gen_pas_helper(glue(pfx,add8)); break; \
703 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
705 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
707 TCGv_ptr tmp;
709 switch (op1) {
710 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
711 case 1:
712 tmp = tcg_temp_new_ptr();
713 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
714 PAS_OP(s)
715 tcg_temp_free_ptr(tmp);
716 break;
717 case 5:
718 tmp = tcg_temp_new_ptr();
719 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
720 PAS_OP(u)
721 tcg_temp_free_ptr(tmp);
722 break;
723 #undef gen_pas_helper
724 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
725 case 2:
726 PAS_OP(q);
727 break;
728 case 3:
729 PAS_OP(sh);
730 break;
731 case 6:
732 PAS_OP(uq);
733 break;
734 case 7:
735 PAS_OP(uh);
736 break;
737 #undef gen_pas_helper
740 #undef PAS_OP
742 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
743 #define PAS_OP(pfx) \
744 switch (op1) { \
745 case 0: gen_pas_helper(glue(pfx,add8)); break; \
746 case 1: gen_pas_helper(glue(pfx,add16)); break; \
747 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
748 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
749 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
750 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
752 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
754 TCGv_ptr tmp;
756 switch (op2) {
757 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
758 case 0:
759 tmp = tcg_temp_new_ptr();
760 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
761 PAS_OP(s)
762 tcg_temp_free_ptr(tmp);
763 break;
764 case 4:
765 tmp = tcg_temp_new_ptr();
766 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
767 PAS_OP(u)
768 tcg_temp_free_ptr(tmp);
769 break;
770 #undef gen_pas_helper
771 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
772 case 1:
773 PAS_OP(q);
774 break;
775 case 2:
776 PAS_OP(sh);
777 break;
778 case 5:
779 PAS_OP(uq);
780 break;
781 case 6:
782 PAS_OP(uh);
783 break;
784 #undef gen_pas_helper
787 #undef PAS_OP
790 * Generate a conditional based on ARM condition code cc.
791 * This is common between ARM and Aarch64 targets.
793 void arm_test_cc(DisasCompare *cmp, int cc)
795 TCGv_i32 value;
796 TCGCond cond;
797 bool global = true;
799 switch (cc) {
800 case 0: /* eq: Z */
801 case 1: /* ne: !Z */
802 cond = TCG_COND_EQ;
803 value = cpu_ZF;
804 break;
806 case 2: /* cs: C */
807 case 3: /* cc: !C */
808 cond = TCG_COND_NE;
809 value = cpu_CF;
810 break;
812 case 4: /* mi: N */
813 case 5: /* pl: !N */
814 cond = TCG_COND_LT;
815 value = cpu_NF;
816 break;
818 case 6: /* vs: V */
819 case 7: /* vc: !V */
820 cond = TCG_COND_LT;
821 value = cpu_VF;
822 break;
824 case 8: /* hi: C && !Z */
825 case 9: /* ls: !C || Z -> !(C && !Z) */
826 cond = TCG_COND_NE;
827 value = tcg_temp_new_i32();
828 global = false;
829 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
830 ZF is non-zero for !Z; so AND the two subexpressions. */
831 tcg_gen_neg_i32(value, cpu_CF);
832 tcg_gen_and_i32(value, value, cpu_ZF);
833 break;
835 case 10: /* ge: N == V -> N ^ V == 0 */
836 case 11: /* lt: N != V -> N ^ V != 0 */
837 /* Since we're only interested in the sign bit, == 0 is >= 0. */
838 cond = TCG_COND_GE;
839 value = tcg_temp_new_i32();
840 global = false;
841 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
842 break;
844 case 12: /* gt: !Z && N == V */
845 case 13: /* le: Z || N != V */
846 cond = TCG_COND_NE;
847 value = tcg_temp_new_i32();
848 global = false;
849 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
850 * the sign bit then AND with ZF to yield the result. */
851 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
852 tcg_gen_sari_i32(value, value, 31);
853 tcg_gen_andc_i32(value, cpu_ZF, value);
854 break;
856 case 14: /* always */
857 case 15: /* always */
858 /* Use the ALWAYS condition, which will fold early.
859 * It doesn't matter what we use for the value. */
860 cond = TCG_COND_ALWAYS;
861 value = cpu_ZF;
862 goto no_invert;
864 default:
865 fprintf(stderr, "Bad condition code 0x%x\n", cc);
866 abort();
869 if (cc & 1) {
870 cond = tcg_invert_cond(cond);
873 no_invert:
874 cmp->cond = cond;
875 cmp->value = value;
876 cmp->value_global = global;
879 void arm_free_cc(DisasCompare *cmp)
881 if (!cmp->value_global) {
882 tcg_temp_free_i32(cmp->value);
886 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
888 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
891 void arm_gen_test_cc(int cc, TCGLabel *label)
893 DisasCompare cmp;
894 arm_test_cc(&cmp, cc);
895 arm_jump_cc(&cmp, label);
896 arm_free_cc(&cmp);
899 static const uint8_t table_logic_cc[16] = {
900 1, /* and */
901 1, /* xor */
902 0, /* sub */
903 0, /* rsb */
904 0, /* add */
905 0, /* adc */
906 0, /* sbc */
907 0, /* rsc */
908 1, /* andl */
909 1, /* xorl */
910 0, /* cmp */
911 0, /* cmn */
912 1, /* orr */
913 1, /* mov */
914 1, /* bic */
915 1, /* mvn */
918 static inline void gen_set_condexec(DisasContext *s)
920 if (s->condexec_mask) {
921 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
922 TCGv_i32 tmp = tcg_temp_new_i32();
923 tcg_gen_movi_i32(tmp, val);
924 store_cpu_field(tmp, condexec_bits);
928 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
930 tcg_gen_movi_i32(cpu_R[15], val);
933 /* Set PC and Thumb state from an immediate address. */
934 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
936 TCGv_i32 tmp;
938 s->base.is_jmp = DISAS_JUMP;
939 if (s->thumb != (addr & 1)) {
940 tmp = tcg_temp_new_i32();
941 tcg_gen_movi_i32(tmp, addr & 1);
942 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
943 tcg_temp_free_i32(tmp);
945 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
948 /* Set PC and Thumb state from var. var is marked as dead. */
949 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
951 s->base.is_jmp = DISAS_JUMP;
952 tcg_gen_andi_i32(cpu_R[15], var, ~1);
953 tcg_gen_andi_i32(var, var, 1);
954 store_cpu_field(var, thumb);
957 /* Set PC and Thumb state from var. var is marked as dead.
958 * For M-profile CPUs, include logic to detect exception-return
959 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
960 * and BX reg, and no others, and happens only for code in Handler mode.
962 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
964 /* Generate the same code here as for a simple bx, but flag via
965 * s->base.is_jmp that we need to do the rest of the work later.
967 gen_bx(s, var);
968 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
969 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
970 s->base.is_jmp = DISAS_BX_EXCRET;
974 static inline void gen_bx_excret_final_code(DisasContext *s)
976 /* Generate the code to finish possible exception return and end the TB */
977 TCGLabel *excret_label = gen_new_label();
978 uint32_t min_magic;
980 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
981 /* Covers FNC_RETURN and EXC_RETURN magic */
982 min_magic = FNC_RETURN_MIN_MAGIC;
983 } else {
984 /* EXC_RETURN magic only */
985 min_magic = EXC_RETURN_MIN_MAGIC;
988 /* Is the new PC value in the magic range indicating exception return? */
989 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
990 /* No: end the TB as we would for a DISAS_JMP */
991 if (is_singlestepping(s)) {
992 gen_singlestep_exception(s);
993 } else {
994 tcg_gen_exit_tb(NULL, 0);
996 gen_set_label(excret_label);
997 /* Yes: this is an exception return.
998 * At this point in runtime env->regs[15] and env->thumb will hold
999 * the exception-return magic number, which do_v7m_exception_exit()
1000 * will read. Nothing else will be able to see those values because
1001 * the cpu-exec main loop guarantees that we will always go straight
1002 * from raising the exception to the exception-handling code.
1004 * gen_ss_advance(s) does nothing on M profile currently but
1005 * calling it is conceptually the right thing as we have executed
1006 * this instruction (compare SWI, HVC, SMC handling).
1008 gen_ss_advance(s);
1009 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1012 static inline void gen_bxns(DisasContext *s, int rm)
1014 TCGv_i32 var = load_reg(s, rm);
1016 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1017 * we need to sync state before calling it, but:
1018 * - we don't need to do gen_set_pc_im() because the bxns helper will
1019 * always set the PC itself
1020 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1021 * unless it's outside an IT block or the last insn in an IT block,
1022 * so we know that condexec == 0 (already set at the top of the TB)
1023 * is correct in the non-UNPREDICTABLE cases, and we can choose
1024 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1026 gen_helper_v7m_bxns(cpu_env, var);
1027 tcg_temp_free_i32(var);
1028 s->base.is_jmp = DISAS_EXIT;
1031 static inline void gen_blxns(DisasContext *s, int rm)
1033 TCGv_i32 var = load_reg(s, rm);
1035 /* We don't need to sync condexec state, for the same reason as bxns.
1036 * We do however need to set the PC, because the blxns helper reads it.
1037 * The blxns helper may throw an exception.
1039 gen_set_pc_im(s, s->base.pc_next);
1040 gen_helper_v7m_blxns(cpu_env, var);
1041 tcg_temp_free_i32(var);
1042 s->base.is_jmp = DISAS_EXIT;
1045 /* Variant of store_reg which uses branch&exchange logic when storing
1046 to r15 in ARM architecture v7 and above. The source must be a temporary
1047 and will be marked as dead. */
1048 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
1050 if (reg == 15 && ENABLE_ARCH_7) {
1051 gen_bx(s, var);
1052 } else {
1053 store_reg(s, reg, var);
1057 /* Variant of store_reg which uses branch&exchange logic when storing
1058 * to r15 in ARM architecture v5T and above. This is used for storing
1059 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1060 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1061 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
1063 if (reg == 15 && ENABLE_ARCH_5) {
1064 gen_bx_excret(s, var);
1065 } else {
1066 store_reg(s, reg, var);
1070 #ifdef CONFIG_USER_ONLY
1071 #define IS_USER_ONLY 1
1072 #else
1073 #define IS_USER_ONLY 0
1074 #endif
1076 /* Abstractions of "generate code to do a guest load/store for
1077 * AArch32", where a vaddr is always 32 bits (and is zero
1078 * extended if we're a 64 bit core) and data is also
1079 * 32 bits unless specifically doing a 64 bit access.
1080 * These functions work like tcg_gen_qemu_{ld,st}* except
1081 * that the address argument is TCGv_i32 rather than TCGv.
1084 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
1086 TCGv addr = tcg_temp_new();
1087 tcg_gen_extu_i32_tl(addr, a32);
1089 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1090 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1091 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
1093 return addr;
1096 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1097 int index, TCGMemOp opc)
1099 TCGv addr;
1101 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1102 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1103 opc |= MO_ALIGN;
1106 addr = gen_aa32_addr(s, a32, opc);
1107 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1108 tcg_temp_free(addr);
1111 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1112 int index, TCGMemOp opc)
1114 TCGv addr;
1116 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1117 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1118 opc |= MO_ALIGN;
1121 addr = gen_aa32_addr(s, a32, opc);
1122 tcg_gen_qemu_st_i32(val, addr, index, opc);
1123 tcg_temp_free(addr);
1126 #define DO_GEN_LD(SUFF, OPC) \
1127 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1128 TCGv_i32 a32, int index) \
1130 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1132 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1133 TCGv_i32 val, \
1134 TCGv_i32 a32, int index, \
1135 ISSInfo issinfo) \
1137 gen_aa32_ld##SUFF(s, val, a32, index); \
1138 disas_set_da_iss(s, OPC, issinfo); \
1141 #define DO_GEN_ST(SUFF, OPC) \
1142 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1143 TCGv_i32 a32, int index) \
1145 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1147 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1148 TCGv_i32 val, \
1149 TCGv_i32 a32, int index, \
1150 ISSInfo issinfo) \
1152 gen_aa32_st##SUFF(s, val, a32, index); \
1153 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1156 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
1158 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1159 if (!IS_USER_ONLY && s->sctlr_b) {
1160 tcg_gen_rotri_i64(val, val, 32);
1164 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1165 int index, TCGMemOp opc)
1167 TCGv addr = gen_aa32_addr(s, a32, opc);
1168 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1169 gen_aa32_frob64(s, val);
1170 tcg_temp_free(addr);
1173 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1174 TCGv_i32 a32, int index)
1176 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1179 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1180 int index, TCGMemOp opc)
1182 TCGv addr = gen_aa32_addr(s, a32, opc);
1184 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1185 if (!IS_USER_ONLY && s->sctlr_b) {
1186 TCGv_i64 tmp = tcg_temp_new_i64();
1187 tcg_gen_rotri_i64(tmp, val, 32);
1188 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1189 tcg_temp_free_i64(tmp);
1190 } else {
1191 tcg_gen_qemu_st_i64(val, addr, index, opc);
1193 tcg_temp_free(addr);
1196 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1197 TCGv_i32 a32, int index)
1199 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1202 DO_GEN_LD(8s, MO_SB)
1203 DO_GEN_LD(8u, MO_UB)
1204 DO_GEN_LD(16s, MO_SW)
1205 DO_GEN_LD(16u, MO_UW)
1206 DO_GEN_LD(32u, MO_UL)
1207 DO_GEN_ST(8, MO_UB)
1208 DO_GEN_ST(16, MO_UW)
1209 DO_GEN_ST(32, MO_UL)
1211 static inline void gen_hvc(DisasContext *s, int imm16)
1213 /* The pre HVC helper handles cases when HVC gets trapped
1214 * as an undefined insn by runtime configuration (ie before
1215 * the insn really executes).
1217 gen_set_pc_im(s, s->pc_curr);
1218 gen_helper_pre_hvc(cpu_env);
1219 /* Otherwise we will treat this as a real exception which
1220 * happens after execution of the insn. (The distinction matters
1221 * for the PC value reported to the exception handler and also
1222 * for single stepping.)
1224 s->svc_imm = imm16;
1225 gen_set_pc_im(s, s->base.pc_next);
1226 s->base.is_jmp = DISAS_HVC;
1229 static inline void gen_smc(DisasContext *s)
1231 /* As with HVC, we may take an exception either before or after
1232 * the insn executes.
1234 TCGv_i32 tmp;
1236 gen_set_pc_im(s, s->pc_curr);
1237 tmp = tcg_const_i32(syn_aa32_smc());
1238 gen_helper_pre_smc(cpu_env, tmp);
1239 tcg_temp_free_i32(tmp);
1240 gen_set_pc_im(s, s->base.pc_next);
1241 s->base.is_jmp = DISAS_SMC;
1244 static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
1246 gen_set_condexec(s);
1247 gen_set_pc_im(s, pc);
1248 gen_exception_internal(excp);
1249 s->base.is_jmp = DISAS_NORETURN;
1252 static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
1253 int syn, uint32_t target_el)
1255 gen_set_condexec(s);
1256 gen_set_pc_im(s, pc);
1257 gen_exception(excp, syn, target_el);
1258 s->base.is_jmp = DISAS_NORETURN;
1261 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
1263 TCGv_i32 tcg_syn;
1265 gen_set_condexec(s);
1266 gen_set_pc_im(s, s->pc_curr);
1267 tcg_syn = tcg_const_i32(syn);
1268 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1269 tcg_temp_free_i32(tcg_syn);
1270 s->base.is_jmp = DISAS_NORETURN;
1273 void unallocated_encoding(DisasContext *s)
1275 /* Unallocated and reserved encodings are uncategorized */
1276 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
1277 default_exception_el(s));
1280 /* Force a TB lookup after an instruction that changes the CPU state. */
1281 static inline void gen_lookup_tb(DisasContext *s)
1283 tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
1284 s->base.is_jmp = DISAS_EXIT;
1287 static inline void gen_hlt(DisasContext *s, int imm)
1289 /* HLT. This has two purposes.
1290 * Architecturally, it is an external halting debug instruction.
1291 * Since QEMU doesn't implement external debug, we treat this as
1292 * it is required for halting debug disabled: it will UNDEF.
1293 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1294 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1295 * must trigger semihosting even for ARMv7 and earlier, where
1296 * HLT was an undefined encoding.
1297 * In system mode, we don't allow userspace access to
1298 * semihosting, to provide some semblance of security
1299 * (and for consistency with our 32-bit semihosting).
1301 if (semihosting_enabled() &&
1302 #ifndef CONFIG_USER_ONLY
1303 s->current_el != 0 &&
1304 #endif
1305 (imm == (s->thumb ? 0x3c : 0xf000))) {
1306 gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST);
1307 return;
1310 unallocated_encoding(s);
1313 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1314 TCGv_i32 var)
1316 int val, rm, shift, shiftop;
1317 TCGv_i32 offset;
1319 if (!(insn & (1 << 25))) {
1320 /* immediate */
1321 val = insn & 0xfff;
1322 if (!(insn & (1 << 23)))
1323 val = -val;
1324 if (val != 0)
1325 tcg_gen_addi_i32(var, var, val);
1326 } else {
1327 /* shift/register */
1328 rm = (insn) & 0xf;
1329 shift = (insn >> 7) & 0x1f;
1330 shiftop = (insn >> 5) & 3;
1331 offset = load_reg(s, rm);
1332 gen_arm_shift_im(offset, shiftop, shift, 0);
1333 if (!(insn & (1 << 23)))
1334 tcg_gen_sub_i32(var, var, offset);
1335 else
1336 tcg_gen_add_i32(var, var, offset);
1337 tcg_temp_free_i32(offset);
1341 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1342 int extra, TCGv_i32 var)
1344 int val, rm;
1345 TCGv_i32 offset;
1347 if (insn & (1 << 22)) {
1348 /* immediate */
1349 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1350 if (!(insn & (1 << 23)))
1351 val = -val;
1352 val += extra;
1353 if (val != 0)
1354 tcg_gen_addi_i32(var, var, val);
1355 } else {
1356 /* register */
1357 if (extra)
1358 tcg_gen_addi_i32(var, var, extra);
1359 rm = (insn) & 0xf;
1360 offset = load_reg(s, rm);
1361 if (!(insn & (1 << 23)))
1362 tcg_gen_sub_i32(var, var, offset);
1363 else
1364 tcg_gen_add_i32(var, var, offset);
1365 tcg_temp_free_i32(offset);
1369 static TCGv_ptr get_fpstatus_ptr(int neon)
1371 TCGv_ptr statusptr = tcg_temp_new_ptr();
1372 int offset;
1373 if (neon) {
1374 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1375 } else {
1376 offset = offsetof(CPUARMState, vfp.fp_status);
1378 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1379 return statusptr;
1382 static inline long vfp_reg_offset(bool dp, unsigned reg)
1384 if (dp) {
1385 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1386 } else {
1387 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
1388 if (reg & 1) {
1389 ofs += offsetof(CPU_DoubleU, l.upper);
1390 } else {
1391 ofs += offsetof(CPU_DoubleU, l.lower);
1393 return ofs;
1397 /* Return the offset of a 32-bit piece of a NEON register.
1398 zero is the least significant end of the register. */
1399 static inline long
1400 neon_reg_offset (int reg, int n)
1402 int sreg;
1403 sreg = reg * 2 + n;
1404 return vfp_reg_offset(0, sreg);
1407 /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1408 * where 0 is the least significant end of the register.
1410 static inline long
1411 neon_element_offset(int reg, int element, TCGMemOp size)
1413 int element_size = 1 << size;
1414 int ofs = element * element_size;
1415 #ifdef HOST_WORDS_BIGENDIAN
1416 /* Calculate the offset assuming fully little-endian,
1417 * then XOR to account for the order of the 8-byte units.
1419 if (element_size < 8) {
1420 ofs ^= 8 - element_size;
1422 #endif
1423 return neon_reg_offset(reg, 0) + ofs;
1426 static TCGv_i32 neon_load_reg(int reg, int pass)
1428 TCGv_i32 tmp = tcg_temp_new_i32();
1429 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1430 return tmp;
1433 static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1435 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1437 switch (mop) {
1438 case MO_UB:
1439 tcg_gen_ld8u_i32(var, cpu_env, offset);
1440 break;
1441 case MO_UW:
1442 tcg_gen_ld16u_i32(var, cpu_env, offset);
1443 break;
1444 case MO_UL:
1445 tcg_gen_ld_i32(var, cpu_env, offset);
1446 break;
1447 default:
1448 g_assert_not_reached();
1452 static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1454 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1456 switch (mop) {
1457 case MO_UB:
1458 tcg_gen_ld8u_i64(var, cpu_env, offset);
1459 break;
1460 case MO_UW:
1461 tcg_gen_ld16u_i64(var, cpu_env, offset);
1462 break;
1463 case MO_UL:
1464 tcg_gen_ld32u_i64(var, cpu_env, offset);
1465 break;
1466 case MO_Q:
1467 tcg_gen_ld_i64(var, cpu_env, offset);
1468 break;
1469 default:
1470 g_assert_not_reached();
1474 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1476 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1477 tcg_temp_free_i32(var);
1480 static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1482 long offset = neon_element_offset(reg, ele, size);
1484 switch (size) {
1485 case MO_8:
1486 tcg_gen_st8_i32(var, cpu_env, offset);
1487 break;
1488 case MO_16:
1489 tcg_gen_st16_i32(var, cpu_env, offset);
1490 break;
1491 case MO_32:
1492 tcg_gen_st_i32(var, cpu_env, offset);
1493 break;
1494 default:
1495 g_assert_not_reached();
1499 static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1501 long offset = neon_element_offset(reg, ele, size);
1503 switch (size) {
1504 case MO_8:
1505 tcg_gen_st8_i64(var, cpu_env, offset);
1506 break;
1507 case MO_16:
1508 tcg_gen_st16_i64(var, cpu_env, offset);
1509 break;
1510 case MO_32:
1511 tcg_gen_st32_i64(var, cpu_env, offset);
1512 break;
1513 case MO_64:
1514 tcg_gen_st_i64(var, cpu_env, offset);
1515 break;
1516 default:
1517 g_assert_not_reached();
1521 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1523 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1526 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1528 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1531 static inline void neon_load_reg32(TCGv_i32 var, int reg)
1533 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1536 static inline void neon_store_reg32(TCGv_i32 var, int reg)
1538 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1541 static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1543 TCGv_ptr ret = tcg_temp_new_ptr();
1544 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1545 return ret;
1548 #define ARM_CP_RW_BIT (1 << 20)
1550 /* Include the VFP decoder */
1551 #include "translate-vfp.inc.c"
1553 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1555 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1558 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1560 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1563 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1565 TCGv_i32 var = tcg_temp_new_i32();
1566 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1567 return var;
1570 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1572 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1573 tcg_temp_free_i32(var);
1576 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1578 iwmmxt_store_reg(cpu_M0, rn);
1581 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1583 iwmmxt_load_reg(cpu_M0, rn);
1586 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1588 iwmmxt_load_reg(cpu_V1, rn);
1589 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1592 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1594 iwmmxt_load_reg(cpu_V1, rn);
1595 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1598 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1600 iwmmxt_load_reg(cpu_V1, rn);
1601 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1604 #define IWMMXT_OP(name) \
1605 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1607 iwmmxt_load_reg(cpu_V1, rn); \
1608 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1611 #define IWMMXT_OP_ENV(name) \
1612 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1614 iwmmxt_load_reg(cpu_V1, rn); \
1615 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1618 #define IWMMXT_OP_ENV_SIZE(name) \
1619 IWMMXT_OP_ENV(name##b) \
1620 IWMMXT_OP_ENV(name##w) \
1621 IWMMXT_OP_ENV(name##l)
1623 #define IWMMXT_OP_ENV1(name) \
1624 static inline void gen_op_iwmmxt_##name##_M0(void) \
1626 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1629 IWMMXT_OP(maddsq)
1630 IWMMXT_OP(madduq)
1631 IWMMXT_OP(sadb)
1632 IWMMXT_OP(sadw)
1633 IWMMXT_OP(mulslw)
1634 IWMMXT_OP(mulshw)
1635 IWMMXT_OP(mululw)
1636 IWMMXT_OP(muluhw)
1637 IWMMXT_OP(macsw)
1638 IWMMXT_OP(macuw)
1640 IWMMXT_OP_ENV_SIZE(unpackl)
1641 IWMMXT_OP_ENV_SIZE(unpackh)
1643 IWMMXT_OP_ENV1(unpacklub)
1644 IWMMXT_OP_ENV1(unpackluw)
1645 IWMMXT_OP_ENV1(unpacklul)
1646 IWMMXT_OP_ENV1(unpackhub)
1647 IWMMXT_OP_ENV1(unpackhuw)
1648 IWMMXT_OP_ENV1(unpackhul)
1649 IWMMXT_OP_ENV1(unpacklsb)
1650 IWMMXT_OP_ENV1(unpacklsw)
1651 IWMMXT_OP_ENV1(unpacklsl)
1652 IWMMXT_OP_ENV1(unpackhsb)
1653 IWMMXT_OP_ENV1(unpackhsw)
1654 IWMMXT_OP_ENV1(unpackhsl)
1656 IWMMXT_OP_ENV_SIZE(cmpeq)
1657 IWMMXT_OP_ENV_SIZE(cmpgtu)
1658 IWMMXT_OP_ENV_SIZE(cmpgts)
1660 IWMMXT_OP_ENV_SIZE(mins)
1661 IWMMXT_OP_ENV_SIZE(minu)
1662 IWMMXT_OP_ENV_SIZE(maxs)
1663 IWMMXT_OP_ENV_SIZE(maxu)
1665 IWMMXT_OP_ENV_SIZE(subn)
1666 IWMMXT_OP_ENV_SIZE(addn)
1667 IWMMXT_OP_ENV_SIZE(subu)
1668 IWMMXT_OP_ENV_SIZE(addu)
1669 IWMMXT_OP_ENV_SIZE(subs)
1670 IWMMXT_OP_ENV_SIZE(adds)
1672 IWMMXT_OP_ENV(avgb0)
1673 IWMMXT_OP_ENV(avgb1)
1674 IWMMXT_OP_ENV(avgw0)
1675 IWMMXT_OP_ENV(avgw1)
1677 IWMMXT_OP_ENV(packuw)
1678 IWMMXT_OP_ENV(packul)
1679 IWMMXT_OP_ENV(packuq)
1680 IWMMXT_OP_ENV(packsw)
1681 IWMMXT_OP_ENV(packsl)
1682 IWMMXT_OP_ENV(packsq)
1684 static void gen_op_iwmmxt_set_mup(void)
1686 TCGv_i32 tmp;
1687 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1688 tcg_gen_ori_i32(tmp, tmp, 2);
1689 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1692 static void gen_op_iwmmxt_set_cup(void)
1694 TCGv_i32 tmp;
1695 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1696 tcg_gen_ori_i32(tmp, tmp, 1);
1697 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1700 static void gen_op_iwmmxt_setpsr_nz(void)
1702 TCGv_i32 tmp = tcg_temp_new_i32();
1703 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1704 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1707 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1709 iwmmxt_load_reg(cpu_V1, rn);
1710 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1711 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1714 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1715 TCGv_i32 dest)
1717 int rd;
1718 uint32_t offset;
1719 TCGv_i32 tmp;
1721 rd = (insn >> 16) & 0xf;
1722 tmp = load_reg(s, rd);
1724 offset = (insn & 0xff) << ((insn >> 7) & 2);
1725 if (insn & (1 << 24)) {
1726 /* Pre indexed */
1727 if (insn & (1 << 23))
1728 tcg_gen_addi_i32(tmp, tmp, offset);
1729 else
1730 tcg_gen_addi_i32(tmp, tmp, -offset);
1731 tcg_gen_mov_i32(dest, tmp);
1732 if (insn & (1 << 21))
1733 store_reg(s, rd, tmp);
1734 else
1735 tcg_temp_free_i32(tmp);
1736 } else if (insn & (1 << 21)) {
1737 /* Post indexed */
1738 tcg_gen_mov_i32(dest, tmp);
1739 if (insn & (1 << 23))
1740 tcg_gen_addi_i32(tmp, tmp, offset);
1741 else
1742 tcg_gen_addi_i32(tmp, tmp, -offset);
1743 store_reg(s, rd, tmp);
1744 } else if (!(insn & (1 << 23)))
1745 return 1;
1746 return 0;
1749 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1751 int rd = (insn >> 0) & 0xf;
1752 TCGv_i32 tmp;
1754 if (insn & (1 << 8)) {
1755 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1756 return 1;
1757 } else {
1758 tmp = iwmmxt_load_creg(rd);
1760 } else {
1761 tmp = tcg_temp_new_i32();
1762 iwmmxt_load_reg(cpu_V0, rd);
1763 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1765 tcg_gen_andi_i32(tmp, tmp, mask);
1766 tcg_gen_mov_i32(dest, tmp);
1767 tcg_temp_free_i32(tmp);
1768 return 0;
1771 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1772 (ie. an undefined instruction). */
1773 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1775 int rd, wrd;
1776 int rdhi, rdlo, rd0, rd1, i;
1777 TCGv_i32 addr;
1778 TCGv_i32 tmp, tmp2, tmp3;
1780 if ((insn & 0x0e000e00) == 0x0c000000) {
1781 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1782 wrd = insn & 0xf;
1783 rdlo = (insn >> 12) & 0xf;
1784 rdhi = (insn >> 16) & 0xf;
1785 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1786 iwmmxt_load_reg(cpu_V0, wrd);
1787 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1788 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1789 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1790 } else { /* TMCRR */
1791 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1792 iwmmxt_store_reg(cpu_V0, wrd);
1793 gen_op_iwmmxt_set_mup();
1795 return 0;
1798 wrd = (insn >> 12) & 0xf;
1799 addr = tcg_temp_new_i32();
1800 if (gen_iwmmxt_address(s, insn, addr)) {
1801 tcg_temp_free_i32(addr);
1802 return 1;
1804 if (insn & ARM_CP_RW_BIT) {
1805 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1806 tmp = tcg_temp_new_i32();
1807 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1808 iwmmxt_store_creg(wrd, tmp);
1809 } else {
1810 i = 1;
1811 if (insn & (1 << 8)) {
1812 if (insn & (1 << 22)) { /* WLDRD */
1813 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1814 i = 0;
1815 } else { /* WLDRW wRd */
1816 tmp = tcg_temp_new_i32();
1817 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1819 } else {
1820 tmp = tcg_temp_new_i32();
1821 if (insn & (1 << 22)) { /* WLDRH */
1822 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1823 } else { /* WLDRB */
1824 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1827 if (i) {
1828 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1829 tcg_temp_free_i32(tmp);
1831 gen_op_iwmmxt_movq_wRn_M0(wrd);
1833 } else {
1834 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1835 tmp = iwmmxt_load_creg(wrd);
1836 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1837 } else {
1838 gen_op_iwmmxt_movq_M0_wRn(wrd);
1839 tmp = tcg_temp_new_i32();
1840 if (insn & (1 << 8)) {
1841 if (insn & (1 << 22)) { /* WSTRD */
1842 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1843 } else { /* WSTRW wRd */
1844 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1845 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1847 } else {
1848 if (insn & (1 << 22)) { /* WSTRH */
1849 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1850 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1851 } else { /* WSTRB */
1852 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1853 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1857 tcg_temp_free_i32(tmp);
1859 tcg_temp_free_i32(addr);
1860 return 0;
1863 if ((insn & 0x0f000000) != 0x0e000000)
1864 return 1;
1866 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1867 case 0x000: /* WOR */
1868 wrd = (insn >> 12) & 0xf;
1869 rd0 = (insn >> 0) & 0xf;
1870 rd1 = (insn >> 16) & 0xf;
1871 gen_op_iwmmxt_movq_M0_wRn(rd0);
1872 gen_op_iwmmxt_orq_M0_wRn(rd1);
1873 gen_op_iwmmxt_setpsr_nz();
1874 gen_op_iwmmxt_movq_wRn_M0(wrd);
1875 gen_op_iwmmxt_set_mup();
1876 gen_op_iwmmxt_set_cup();
1877 break;
1878 case 0x011: /* TMCR */
1879 if (insn & 0xf)
1880 return 1;
1881 rd = (insn >> 12) & 0xf;
1882 wrd = (insn >> 16) & 0xf;
1883 switch (wrd) {
1884 case ARM_IWMMXT_wCID:
1885 case ARM_IWMMXT_wCASF:
1886 break;
1887 case ARM_IWMMXT_wCon:
1888 gen_op_iwmmxt_set_cup();
1889 /* Fall through. */
1890 case ARM_IWMMXT_wCSSF:
1891 tmp = iwmmxt_load_creg(wrd);
1892 tmp2 = load_reg(s, rd);
1893 tcg_gen_andc_i32(tmp, tmp, tmp2);
1894 tcg_temp_free_i32(tmp2);
1895 iwmmxt_store_creg(wrd, tmp);
1896 break;
1897 case ARM_IWMMXT_wCGR0:
1898 case ARM_IWMMXT_wCGR1:
1899 case ARM_IWMMXT_wCGR2:
1900 case ARM_IWMMXT_wCGR3:
1901 gen_op_iwmmxt_set_cup();
1902 tmp = load_reg(s, rd);
1903 iwmmxt_store_creg(wrd, tmp);
1904 break;
1905 default:
1906 return 1;
1908 break;
1909 case 0x100: /* WXOR */
1910 wrd = (insn >> 12) & 0xf;
1911 rd0 = (insn >> 0) & 0xf;
1912 rd1 = (insn >> 16) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0);
1914 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1915 gen_op_iwmmxt_setpsr_nz();
1916 gen_op_iwmmxt_movq_wRn_M0(wrd);
1917 gen_op_iwmmxt_set_mup();
1918 gen_op_iwmmxt_set_cup();
1919 break;
1920 case 0x111: /* TMRC */
1921 if (insn & 0xf)
1922 return 1;
1923 rd = (insn >> 12) & 0xf;
1924 wrd = (insn >> 16) & 0xf;
1925 tmp = iwmmxt_load_creg(wrd);
1926 store_reg(s, rd, tmp);
1927 break;
1928 case 0x300: /* WANDN */
1929 wrd = (insn >> 12) & 0xf;
1930 rd0 = (insn >> 0) & 0xf;
1931 rd1 = (insn >> 16) & 0xf;
1932 gen_op_iwmmxt_movq_M0_wRn(rd0);
1933 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1934 gen_op_iwmmxt_andq_M0_wRn(rd1);
1935 gen_op_iwmmxt_setpsr_nz();
1936 gen_op_iwmmxt_movq_wRn_M0(wrd);
1937 gen_op_iwmmxt_set_mup();
1938 gen_op_iwmmxt_set_cup();
1939 break;
1940 case 0x200: /* WAND */
1941 wrd = (insn >> 12) & 0xf;
1942 rd0 = (insn >> 0) & 0xf;
1943 rd1 = (insn >> 16) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
1945 gen_op_iwmmxt_andq_M0_wRn(rd1);
1946 gen_op_iwmmxt_setpsr_nz();
1947 gen_op_iwmmxt_movq_wRn_M0(wrd);
1948 gen_op_iwmmxt_set_mup();
1949 gen_op_iwmmxt_set_cup();
1950 break;
1951 case 0x810: case 0xa10: /* WMADD */
1952 wrd = (insn >> 12) & 0xf;
1953 rd0 = (insn >> 0) & 0xf;
1954 rd1 = (insn >> 16) & 0xf;
1955 gen_op_iwmmxt_movq_M0_wRn(rd0);
1956 if (insn & (1 << 21))
1957 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1958 else
1959 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1960 gen_op_iwmmxt_movq_wRn_M0(wrd);
1961 gen_op_iwmmxt_set_mup();
1962 break;
1963 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1964 wrd = (insn >> 12) & 0xf;
1965 rd0 = (insn >> 16) & 0xf;
1966 rd1 = (insn >> 0) & 0xf;
1967 gen_op_iwmmxt_movq_M0_wRn(rd0);
1968 switch ((insn >> 22) & 3) {
1969 case 0:
1970 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1971 break;
1972 case 1:
1973 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1974 break;
1975 case 2:
1976 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1977 break;
1978 case 3:
1979 return 1;
1981 gen_op_iwmmxt_movq_wRn_M0(wrd);
1982 gen_op_iwmmxt_set_mup();
1983 gen_op_iwmmxt_set_cup();
1984 break;
1985 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1986 wrd = (insn >> 12) & 0xf;
1987 rd0 = (insn >> 16) & 0xf;
1988 rd1 = (insn >> 0) & 0xf;
1989 gen_op_iwmmxt_movq_M0_wRn(rd0);
1990 switch ((insn >> 22) & 3) {
1991 case 0:
1992 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1993 break;
1994 case 1:
1995 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1996 break;
1997 case 2:
1998 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1999 break;
2000 case 3:
2001 return 1;
2003 gen_op_iwmmxt_movq_wRn_M0(wrd);
2004 gen_op_iwmmxt_set_mup();
2005 gen_op_iwmmxt_set_cup();
2006 break;
2007 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2008 wrd = (insn >> 12) & 0xf;
2009 rd0 = (insn >> 16) & 0xf;
2010 rd1 = (insn >> 0) & 0xf;
2011 gen_op_iwmmxt_movq_M0_wRn(rd0);
2012 if (insn & (1 << 22))
2013 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2014 else
2015 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2016 if (!(insn & (1 << 20)))
2017 gen_op_iwmmxt_addl_M0_wRn(wrd);
2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2019 gen_op_iwmmxt_set_mup();
2020 break;
2021 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2022 wrd = (insn >> 12) & 0xf;
2023 rd0 = (insn >> 16) & 0xf;
2024 rd1 = (insn >> 0) & 0xf;
2025 gen_op_iwmmxt_movq_M0_wRn(rd0);
2026 if (insn & (1 << 21)) {
2027 if (insn & (1 << 20))
2028 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2029 else
2030 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2031 } else {
2032 if (insn & (1 << 20))
2033 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2034 else
2035 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2037 gen_op_iwmmxt_movq_wRn_M0(wrd);
2038 gen_op_iwmmxt_set_mup();
2039 break;
2040 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2041 wrd = (insn >> 12) & 0xf;
2042 rd0 = (insn >> 16) & 0xf;
2043 rd1 = (insn >> 0) & 0xf;
2044 gen_op_iwmmxt_movq_M0_wRn(rd0);
2045 if (insn & (1 << 21))
2046 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2047 else
2048 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2049 if (!(insn & (1 << 20))) {
2050 iwmmxt_load_reg(cpu_V1, wrd);
2051 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
2053 gen_op_iwmmxt_movq_wRn_M0(wrd);
2054 gen_op_iwmmxt_set_mup();
2055 break;
2056 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2057 wrd = (insn >> 12) & 0xf;
2058 rd0 = (insn >> 16) & 0xf;
2059 rd1 = (insn >> 0) & 0xf;
2060 gen_op_iwmmxt_movq_M0_wRn(rd0);
2061 switch ((insn >> 22) & 3) {
2062 case 0:
2063 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2064 break;
2065 case 1:
2066 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2067 break;
2068 case 2:
2069 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2070 break;
2071 case 3:
2072 return 1;
2074 gen_op_iwmmxt_movq_wRn_M0(wrd);
2075 gen_op_iwmmxt_set_mup();
2076 gen_op_iwmmxt_set_cup();
2077 break;
2078 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2079 wrd = (insn >> 12) & 0xf;
2080 rd0 = (insn >> 16) & 0xf;
2081 rd1 = (insn >> 0) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0);
2083 if (insn & (1 << 22)) {
2084 if (insn & (1 << 20))
2085 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2086 else
2087 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2088 } else {
2089 if (insn & (1 << 20))
2090 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2091 else
2092 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2094 gen_op_iwmmxt_movq_wRn_M0(wrd);
2095 gen_op_iwmmxt_set_mup();
2096 gen_op_iwmmxt_set_cup();
2097 break;
2098 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2099 wrd = (insn >> 12) & 0xf;
2100 rd0 = (insn >> 16) & 0xf;
2101 rd1 = (insn >> 0) & 0xf;
2102 gen_op_iwmmxt_movq_M0_wRn(rd0);
2103 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2104 tcg_gen_andi_i32(tmp, tmp, 7);
2105 iwmmxt_load_reg(cpu_V1, rd1);
2106 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2107 tcg_temp_free_i32(tmp);
2108 gen_op_iwmmxt_movq_wRn_M0(wrd);
2109 gen_op_iwmmxt_set_mup();
2110 break;
2111 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2112 if (((insn >> 6) & 3) == 3)
2113 return 1;
2114 rd = (insn >> 12) & 0xf;
2115 wrd = (insn >> 16) & 0xf;
2116 tmp = load_reg(s, rd);
2117 gen_op_iwmmxt_movq_M0_wRn(wrd);
2118 switch ((insn >> 6) & 3) {
2119 case 0:
2120 tmp2 = tcg_const_i32(0xff);
2121 tmp3 = tcg_const_i32((insn & 7) << 3);
2122 break;
2123 case 1:
2124 tmp2 = tcg_const_i32(0xffff);
2125 tmp3 = tcg_const_i32((insn & 3) << 4);
2126 break;
2127 case 2:
2128 tmp2 = tcg_const_i32(0xffffffff);
2129 tmp3 = tcg_const_i32((insn & 1) << 5);
2130 break;
2131 default:
2132 tmp2 = NULL;
2133 tmp3 = NULL;
2135 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2136 tcg_temp_free_i32(tmp3);
2137 tcg_temp_free_i32(tmp2);
2138 tcg_temp_free_i32(tmp);
2139 gen_op_iwmmxt_movq_wRn_M0(wrd);
2140 gen_op_iwmmxt_set_mup();
2141 break;
2142 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2143 rd = (insn >> 12) & 0xf;
2144 wrd = (insn >> 16) & 0xf;
2145 if (rd == 15 || ((insn >> 22) & 3) == 3)
2146 return 1;
2147 gen_op_iwmmxt_movq_M0_wRn(wrd);
2148 tmp = tcg_temp_new_i32();
2149 switch ((insn >> 22) & 3) {
2150 case 0:
2151 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2152 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2153 if (insn & 8) {
2154 tcg_gen_ext8s_i32(tmp, tmp);
2155 } else {
2156 tcg_gen_andi_i32(tmp, tmp, 0xff);
2158 break;
2159 case 1:
2160 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2161 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2162 if (insn & 8) {
2163 tcg_gen_ext16s_i32(tmp, tmp);
2164 } else {
2165 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2167 break;
2168 case 2:
2169 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2170 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2171 break;
2173 store_reg(s, rd, tmp);
2174 break;
2175 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2176 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2177 return 1;
2178 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2179 switch ((insn >> 22) & 3) {
2180 case 0:
2181 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2182 break;
2183 case 1:
2184 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2185 break;
2186 case 2:
2187 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2188 break;
2190 tcg_gen_shli_i32(tmp, tmp, 28);
2191 gen_set_nzcv(tmp);
2192 tcg_temp_free_i32(tmp);
2193 break;
2194 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2195 if (((insn >> 6) & 3) == 3)
2196 return 1;
2197 rd = (insn >> 12) & 0xf;
2198 wrd = (insn >> 16) & 0xf;
2199 tmp = load_reg(s, rd);
2200 switch ((insn >> 6) & 3) {
2201 case 0:
2202 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2203 break;
2204 case 1:
2205 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2206 break;
2207 case 2:
2208 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2209 break;
2211 tcg_temp_free_i32(tmp);
2212 gen_op_iwmmxt_movq_wRn_M0(wrd);
2213 gen_op_iwmmxt_set_mup();
2214 break;
2215 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2216 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2217 return 1;
2218 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2219 tmp2 = tcg_temp_new_i32();
2220 tcg_gen_mov_i32(tmp2, tmp);
2221 switch ((insn >> 22) & 3) {
2222 case 0:
2223 for (i = 0; i < 7; i ++) {
2224 tcg_gen_shli_i32(tmp2, tmp2, 4);
2225 tcg_gen_and_i32(tmp, tmp, tmp2);
2227 break;
2228 case 1:
2229 for (i = 0; i < 3; i ++) {
2230 tcg_gen_shli_i32(tmp2, tmp2, 8);
2231 tcg_gen_and_i32(tmp, tmp, tmp2);
2233 break;
2234 case 2:
2235 tcg_gen_shli_i32(tmp2, tmp2, 16);
2236 tcg_gen_and_i32(tmp, tmp, tmp2);
2237 break;
2239 gen_set_nzcv(tmp);
2240 tcg_temp_free_i32(tmp2);
2241 tcg_temp_free_i32(tmp);
2242 break;
2243 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2244 wrd = (insn >> 12) & 0xf;
2245 rd0 = (insn >> 16) & 0xf;
2246 gen_op_iwmmxt_movq_M0_wRn(rd0);
2247 switch ((insn >> 22) & 3) {
2248 case 0:
2249 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2250 break;
2251 case 1:
2252 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2253 break;
2254 case 2:
2255 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2256 break;
2257 case 3:
2258 return 1;
2260 gen_op_iwmmxt_movq_wRn_M0(wrd);
2261 gen_op_iwmmxt_set_mup();
2262 break;
2263 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2264 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2265 return 1;
2266 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2267 tmp2 = tcg_temp_new_i32();
2268 tcg_gen_mov_i32(tmp2, tmp);
2269 switch ((insn >> 22) & 3) {
2270 case 0:
2271 for (i = 0; i < 7; i ++) {
2272 tcg_gen_shli_i32(tmp2, tmp2, 4);
2273 tcg_gen_or_i32(tmp, tmp, tmp2);
2275 break;
2276 case 1:
2277 for (i = 0; i < 3; i ++) {
2278 tcg_gen_shli_i32(tmp2, tmp2, 8);
2279 tcg_gen_or_i32(tmp, tmp, tmp2);
2281 break;
2282 case 2:
2283 tcg_gen_shli_i32(tmp2, tmp2, 16);
2284 tcg_gen_or_i32(tmp, tmp, tmp2);
2285 break;
2287 gen_set_nzcv(tmp);
2288 tcg_temp_free_i32(tmp2);
2289 tcg_temp_free_i32(tmp);
2290 break;
2291 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2292 rd = (insn >> 12) & 0xf;
2293 rd0 = (insn >> 16) & 0xf;
2294 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2295 return 1;
2296 gen_op_iwmmxt_movq_M0_wRn(rd0);
2297 tmp = tcg_temp_new_i32();
2298 switch ((insn >> 22) & 3) {
2299 case 0:
2300 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2301 break;
2302 case 1:
2303 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2304 break;
2305 case 2:
2306 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2307 break;
2309 store_reg(s, rd, tmp);
2310 break;
2311 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2312 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2313 wrd = (insn >> 12) & 0xf;
2314 rd0 = (insn >> 16) & 0xf;
2315 rd1 = (insn >> 0) & 0xf;
2316 gen_op_iwmmxt_movq_M0_wRn(rd0);
2317 switch ((insn >> 22) & 3) {
2318 case 0:
2319 if (insn & (1 << 21))
2320 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2321 else
2322 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2323 break;
2324 case 1:
2325 if (insn & (1 << 21))
2326 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2327 else
2328 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2329 break;
2330 case 2:
2331 if (insn & (1 << 21))
2332 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2333 else
2334 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2335 break;
2336 case 3:
2337 return 1;
2339 gen_op_iwmmxt_movq_wRn_M0(wrd);
2340 gen_op_iwmmxt_set_mup();
2341 gen_op_iwmmxt_set_cup();
2342 break;
2343 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2344 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2345 wrd = (insn >> 12) & 0xf;
2346 rd0 = (insn >> 16) & 0xf;
2347 gen_op_iwmmxt_movq_M0_wRn(rd0);
2348 switch ((insn >> 22) & 3) {
2349 case 0:
2350 if (insn & (1 << 21))
2351 gen_op_iwmmxt_unpacklsb_M0();
2352 else
2353 gen_op_iwmmxt_unpacklub_M0();
2354 break;
2355 case 1:
2356 if (insn & (1 << 21))
2357 gen_op_iwmmxt_unpacklsw_M0();
2358 else
2359 gen_op_iwmmxt_unpackluw_M0();
2360 break;
2361 case 2:
2362 if (insn & (1 << 21))
2363 gen_op_iwmmxt_unpacklsl_M0();
2364 else
2365 gen_op_iwmmxt_unpacklul_M0();
2366 break;
2367 case 3:
2368 return 1;
2370 gen_op_iwmmxt_movq_wRn_M0(wrd);
2371 gen_op_iwmmxt_set_mup();
2372 gen_op_iwmmxt_set_cup();
2373 break;
2374 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2375 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2376 wrd = (insn >> 12) & 0xf;
2377 rd0 = (insn >> 16) & 0xf;
2378 gen_op_iwmmxt_movq_M0_wRn(rd0);
2379 switch ((insn >> 22) & 3) {
2380 case 0:
2381 if (insn & (1 << 21))
2382 gen_op_iwmmxt_unpackhsb_M0();
2383 else
2384 gen_op_iwmmxt_unpackhub_M0();
2385 break;
2386 case 1:
2387 if (insn & (1 << 21))
2388 gen_op_iwmmxt_unpackhsw_M0();
2389 else
2390 gen_op_iwmmxt_unpackhuw_M0();
2391 break;
2392 case 2:
2393 if (insn & (1 << 21))
2394 gen_op_iwmmxt_unpackhsl_M0();
2395 else
2396 gen_op_iwmmxt_unpackhul_M0();
2397 break;
2398 case 3:
2399 return 1;
2401 gen_op_iwmmxt_movq_wRn_M0(wrd);
2402 gen_op_iwmmxt_set_mup();
2403 gen_op_iwmmxt_set_cup();
2404 break;
2405 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2406 case 0x214: case 0x614: case 0xa14: case 0xe14:
2407 if (((insn >> 22) & 3) == 0)
2408 return 1;
2409 wrd = (insn >> 12) & 0xf;
2410 rd0 = (insn >> 16) & 0xf;
2411 gen_op_iwmmxt_movq_M0_wRn(rd0);
2412 tmp = tcg_temp_new_i32();
2413 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2414 tcg_temp_free_i32(tmp);
2415 return 1;
2417 switch ((insn >> 22) & 3) {
2418 case 1:
2419 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2420 break;
2421 case 2:
2422 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2423 break;
2424 case 3:
2425 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2426 break;
2428 tcg_temp_free_i32(tmp);
2429 gen_op_iwmmxt_movq_wRn_M0(wrd);
2430 gen_op_iwmmxt_set_mup();
2431 gen_op_iwmmxt_set_cup();
2432 break;
2433 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2434 case 0x014: case 0x414: case 0x814: case 0xc14:
2435 if (((insn >> 22) & 3) == 0)
2436 return 1;
2437 wrd = (insn >> 12) & 0xf;
2438 rd0 = (insn >> 16) & 0xf;
2439 gen_op_iwmmxt_movq_M0_wRn(rd0);
2440 tmp = tcg_temp_new_i32();
2441 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2442 tcg_temp_free_i32(tmp);
2443 return 1;
2445 switch ((insn >> 22) & 3) {
2446 case 1:
2447 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2448 break;
2449 case 2:
2450 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2451 break;
2452 case 3:
2453 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2454 break;
2456 tcg_temp_free_i32(tmp);
2457 gen_op_iwmmxt_movq_wRn_M0(wrd);
2458 gen_op_iwmmxt_set_mup();
2459 gen_op_iwmmxt_set_cup();
2460 break;
2461 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2462 case 0x114: case 0x514: case 0x914: case 0xd14:
2463 if (((insn >> 22) & 3) == 0)
2464 return 1;
2465 wrd = (insn >> 12) & 0xf;
2466 rd0 = (insn >> 16) & 0xf;
2467 gen_op_iwmmxt_movq_M0_wRn(rd0);
2468 tmp = tcg_temp_new_i32();
2469 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2470 tcg_temp_free_i32(tmp);
2471 return 1;
2473 switch ((insn >> 22) & 3) {
2474 case 1:
2475 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2476 break;
2477 case 2:
2478 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2479 break;
2480 case 3:
2481 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2482 break;
2484 tcg_temp_free_i32(tmp);
2485 gen_op_iwmmxt_movq_wRn_M0(wrd);
2486 gen_op_iwmmxt_set_mup();
2487 gen_op_iwmmxt_set_cup();
2488 break;
2489 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2490 case 0x314: case 0x714: case 0xb14: case 0xf14:
2491 if (((insn >> 22) & 3) == 0)
2492 return 1;
2493 wrd = (insn >> 12) & 0xf;
2494 rd0 = (insn >> 16) & 0xf;
2495 gen_op_iwmmxt_movq_M0_wRn(rd0);
2496 tmp = tcg_temp_new_i32();
2497 switch ((insn >> 22) & 3) {
2498 case 1:
2499 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2500 tcg_temp_free_i32(tmp);
2501 return 1;
2503 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2504 break;
2505 case 2:
2506 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2507 tcg_temp_free_i32(tmp);
2508 return 1;
2510 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2511 break;
2512 case 3:
2513 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2514 tcg_temp_free_i32(tmp);
2515 return 1;
2517 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2518 break;
2520 tcg_temp_free_i32(tmp);
2521 gen_op_iwmmxt_movq_wRn_M0(wrd);
2522 gen_op_iwmmxt_set_mup();
2523 gen_op_iwmmxt_set_cup();
2524 break;
2525 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2526 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2527 wrd = (insn >> 12) & 0xf;
2528 rd0 = (insn >> 16) & 0xf;
2529 rd1 = (insn >> 0) & 0xf;
2530 gen_op_iwmmxt_movq_M0_wRn(rd0);
2531 switch ((insn >> 22) & 3) {
2532 case 0:
2533 if (insn & (1 << 21))
2534 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2535 else
2536 gen_op_iwmmxt_minub_M0_wRn(rd1);
2537 break;
2538 case 1:
2539 if (insn & (1 << 21))
2540 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2541 else
2542 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2543 break;
2544 case 2:
2545 if (insn & (1 << 21))
2546 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2547 else
2548 gen_op_iwmmxt_minul_M0_wRn(rd1);
2549 break;
2550 case 3:
2551 return 1;
2553 gen_op_iwmmxt_movq_wRn_M0(wrd);
2554 gen_op_iwmmxt_set_mup();
2555 break;
2556 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2557 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2558 wrd = (insn >> 12) & 0xf;
2559 rd0 = (insn >> 16) & 0xf;
2560 rd1 = (insn >> 0) & 0xf;
2561 gen_op_iwmmxt_movq_M0_wRn(rd0);
2562 switch ((insn >> 22) & 3) {
2563 case 0:
2564 if (insn & (1 << 21))
2565 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2566 else
2567 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2568 break;
2569 case 1:
2570 if (insn & (1 << 21))
2571 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2572 else
2573 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2574 break;
2575 case 2:
2576 if (insn & (1 << 21))
2577 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2578 else
2579 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2580 break;
2581 case 3:
2582 return 1;
2584 gen_op_iwmmxt_movq_wRn_M0(wrd);
2585 gen_op_iwmmxt_set_mup();
2586 break;
2587 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2588 case 0x402: case 0x502: case 0x602: case 0x702:
2589 wrd = (insn >> 12) & 0xf;
2590 rd0 = (insn >> 16) & 0xf;
2591 rd1 = (insn >> 0) & 0xf;
2592 gen_op_iwmmxt_movq_M0_wRn(rd0);
2593 tmp = tcg_const_i32((insn >> 20) & 3);
2594 iwmmxt_load_reg(cpu_V1, rd1);
2595 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2596 tcg_temp_free_i32(tmp);
2597 gen_op_iwmmxt_movq_wRn_M0(wrd);
2598 gen_op_iwmmxt_set_mup();
2599 break;
2600 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2601 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2602 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2603 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2604 wrd = (insn >> 12) & 0xf;
2605 rd0 = (insn >> 16) & 0xf;
2606 rd1 = (insn >> 0) & 0xf;
2607 gen_op_iwmmxt_movq_M0_wRn(rd0);
2608 switch ((insn >> 20) & 0xf) {
2609 case 0x0:
2610 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2611 break;
2612 case 0x1:
2613 gen_op_iwmmxt_subub_M0_wRn(rd1);
2614 break;
2615 case 0x3:
2616 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2617 break;
2618 case 0x4:
2619 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2620 break;
2621 case 0x5:
2622 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2623 break;
2624 case 0x7:
2625 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2626 break;
2627 case 0x8:
2628 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2629 break;
2630 case 0x9:
2631 gen_op_iwmmxt_subul_M0_wRn(rd1);
2632 break;
2633 case 0xb:
2634 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2635 break;
2636 default:
2637 return 1;
2639 gen_op_iwmmxt_movq_wRn_M0(wrd);
2640 gen_op_iwmmxt_set_mup();
2641 gen_op_iwmmxt_set_cup();
2642 break;
2643 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2644 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2645 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2646 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2647 wrd = (insn >> 12) & 0xf;
2648 rd0 = (insn >> 16) & 0xf;
2649 gen_op_iwmmxt_movq_M0_wRn(rd0);
2650 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2651 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2652 tcg_temp_free_i32(tmp);
2653 gen_op_iwmmxt_movq_wRn_M0(wrd);
2654 gen_op_iwmmxt_set_mup();
2655 gen_op_iwmmxt_set_cup();
2656 break;
2657 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2658 case 0x418: case 0x518: case 0x618: case 0x718:
2659 case 0x818: case 0x918: case 0xa18: case 0xb18:
2660 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2661 wrd = (insn >> 12) & 0xf;
2662 rd0 = (insn >> 16) & 0xf;
2663 rd1 = (insn >> 0) & 0xf;
2664 gen_op_iwmmxt_movq_M0_wRn(rd0);
2665 switch ((insn >> 20) & 0xf) {
2666 case 0x0:
2667 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2668 break;
2669 case 0x1:
2670 gen_op_iwmmxt_addub_M0_wRn(rd1);
2671 break;
2672 case 0x3:
2673 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2674 break;
2675 case 0x4:
2676 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2677 break;
2678 case 0x5:
2679 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2680 break;
2681 case 0x7:
2682 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2683 break;
2684 case 0x8:
2685 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2686 break;
2687 case 0x9:
2688 gen_op_iwmmxt_addul_M0_wRn(rd1);
2689 break;
2690 case 0xb:
2691 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2692 break;
2693 default:
2694 return 1;
2696 gen_op_iwmmxt_movq_wRn_M0(wrd);
2697 gen_op_iwmmxt_set_mup();
2698 gen_op_iwmmxt_set_cup();
2699 break;
2700 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2701 case 0x408: case 0x508: case 0x608: case 0x708:
2702 case 0x808: case 0x908: case 0xa08: case 0xb08:
2703 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2704 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2705 return 1;
2706 wrd = (insn >> 12) & 0xf;
2707 rd0 = (insn >> 16) & 0xf;
2708 rd1 = (insn >> 0) & 0xf;
2709 gen_op_iwmmxt_movq_M0_wRn(rd0);
2710 switch ((insn >> 22) & 3) {
2711 case 1:
2712 if (insn & (1 << 21))
2713 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2714 else
2715 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2716 break;
2717 case 2:
2718 if (insn & (1 << 21))
2719 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2720 else
2721 gen_op_iwmmxt_packul_M0_wRn(rd1);
2722 break;
2723 case 3:
2724 if (insn & (1 << 21))
2725 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2726 else
2727 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2728 break;
2730 gen_op_iwmmxt_movq_wRn_M0(wrd);
2731 gen_op_iwmmxt_set_mup();
2732 gen_op_iwmmxt_set_cup();
2733 break;
2734 case 0x201: case 0x203: case 0x205: case 0x207:
2735 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2736 case 0x211: case 0x213: case 0x215: case 0x217:
2737 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2738 wrd = (insn >> 5) & 0xf;
2739 rd0 = (insn >> 12) & 0xf;
2740 rd1 = (insn >> 0) & 0xf;
2741 if (rd0 == 0xf || rd1 == 0xf)
2742 return 1;
2743 gen_op_iwmmxt_movq_M0_wRn(wrd);
2744 tmp = load_reg(s, rd0);
2745 tmp2 = load_reg(s, rd1);
2746 switch ((insn >> 16) & 0xf) {
2747 case 0x0: /* TMIA */
2748 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2749 break;
2750 case 0x8: /* TMIAPH */
2751 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2752 break;
2753 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2754 if (insn & (1 << 16))
2755 tcg_gen_shri_i32(tmp, tmp, 16);
2756 if (insn & (1 << 17))
2757 tcg_gen_shri_i32(tmp2, tmp2, 16);
2758 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2759 break;
2760 default:
2761 tcg_temp_free_i32(tmp2);
2762 tcg_temp_free_i32(tmp);
2763 return 1;
2765 tcg_temp_free_i32(tmp2);
2766 tcg_temp_free_i32(tmp);
2767 gen_op_iwmmxt_movq_wRn_M0(wrd);
2768 gen_op_iwmmxt_set_mup();
2769 break;
2770 default:
2771 return 1;
2774 return 0;
2777 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2778 (ie. an undefined instruction). */
2779 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2781 int acc, rd0, rd1, rdhi, rdlo;
2782 TCGv_i32 tmp, tmp2;
2784 if ((insn & 0x0ff00f10) == 0x0e200010) {
2785 /* Multiply with Internal Accumulate Format */
2786 rd0 = (insn >> 12) & 0xf;
2787 rd1 = insn & 0xf;
2788 acc = (insn >> 5) & 7;
2790 if (acc != 0)
2791 return 1;
2793 tmp = load_reg(s, rd0);
2794 tmp2 = load_reg(s, rd1);
2795 switch ((insn >> 16) & 0xf) {
2796 case 0x0: /* MIA */
2797 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2798 break;
2799 case 0x8: /* MIAPH */
2800 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2801 break;
2802 case 0xc: /* MIABB */
2803 case 0xd: /* MIABT */
2804 case 0xe: /* MIATB */
2805 case 0xf: /* MIATT */
2806 if (insn & (1 << 16))
2807 tcg_gen_shri_i32(tmp, tmp, 16);
2808 if (insn & (1 << 17))
2809 tcg_gen_shri_i32(tmp2, tmp2, 16);
2810 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2811 break;
2812 default:
2813 return 1;
2815 tcg_temp_free_i32(tmp2);
2816 tcg_temp_free_i32(tmp);
2818 gen_op_iwmmxt_movq_wRn_M0(acc);
2819 return 0;
2822 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2823 /* Internal Accumulator Access Format */
2824 rdhi = (insn >> 16) & 0xf;
2825 rdlo = (insn >> 12) & 0xf;
2826 acc = insn & 7;
2828 if (acc != 0)
2829 return 1;
2831 if (insn & ARM_CP_RW_BIT) { /* MRA */
2832 iwmmxt_load_reg(cpu_V0, acc);
2833 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2834 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2835 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2836 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2837 } else { /* MAR */
2838 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2839 iwmmxt_store_reg(cpu_V0, acc);
2841 return 0;
2844 return 1;
2847 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2848 #define VFP_SREG(insn, bigbit, smallbit) \
2849 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2850 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2851 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2852 reg = (((insn) >> (bigbit)) & 0x0f) \
2853 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2854 } else { \
2855 if (insn & (1 << (smallbit))) \
2856 return 1; \
2857 reg = ((insn) >> (bigbit)) & 0x0f; \
2858 }} while (0)
2860 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2861 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2862 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2863 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2864 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2865 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2867 static void gen_neon_dup_low16(TCGv_i32 var)
2869 TCGv_i32 tmp = tcg_temp_new_i32();
2870 tcg_gen_ext16u_i32(var, var);
2871 tcg_gen_shli_i32(tmp, var, 16);
2872 tcg_gen_or_i32(var, var, tmp);
2873 tcg_temp_free_i32(tmp);
2876 static void gen_neon_dup_high16(TCGv_i32 var)
2878 TCGv_i32 tmp = tcg_temp_new_i32();
2879 tcg_gen_andi_i32(var, var, 0xffff0000);
2880 tcg_gen_shri_i32(tmp, var, 16);
2881 tcg_gen_or_i32(var, var, tmp);
2882 tcg_temp_free_i32(tmp);
2886 * Disassemble a VFP instruction. Returns nonzero if an error occurred
2887 * (ie. an undefined instruction).
2889 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
2891 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
2892 return 1;
2896 * If the decodetree decoder handles this insn it will always
2897 * emit code to either execute the insn or generate an appropriate
2898 * exception; so we don't need to ever return non-zero to tell
2899 * the calling code to emit an UNDEF exception.
2901 if (extract32(insn, 28, 4) == 0xf) {
2902 if (disas_vfp_uncond(s, insn)) {
2903 return 0;
2905 } else {
2906 if (disas_vfp(s, insn)) {
2907 return 0;
2910 /* If the decodetree decoder didn't handle this insn, it must be UNDEF */
2911 return 1;
2914 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
2916 #ifndef CONFIG_USER_ONLY
2917 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
2918 ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
2919 #else
2920 return true;
2921 #endif
2924 static void gen_goto_ptr(void)
2926 tcg_gen_lookup_and_goto_ptr();
2929 /* This will end the TB but doesn't guarantee we'll return to
2930 * cpu_loop_exec. Any live exit_requests will be processed as we
2931 * enter the next TB.
2933 static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
2935 if (use_goto_tb(s, dest)) {
2936 tcg_gen_goto_tb(n);
2937 gen_set_pc_im(s, dest);
2938 tcg_gen_exit_tb(s->base.tb, n);
2939 } else {
2940 gen_set_pc_im(s, dest);
2941 gen_goto_ptr();
2943 s->base.is_jmp = DISAS_NORETURN;
2946 static inline void gen_jmp (DisasContext *s, uint32_t dest)
2948 if (unlikely(is_singlestepping(s))) {
2949 /* An indirect jump so that we still trigger the debug exception. */
2950 if (s->thumb)
2951 dest |= 1;
2952 gen_bx_im(s, dest);
2953 } else {
2954 gen_goto_tb(s, 0, dest);
2958 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
2960 if (x)
2961 tcg_gen_sari_i32(t0, t0, 16);
2962 else
2963 gen_sxth(t0);
2964 if (y)
2965 tcg_gen_sari_i32(t1, t1, 16);
2966 else
2967 gen_sxth(t1);
2968 tcg_gen_mul_i32(t0, t0, t1);
2971 /* Return the mask of PSR bits set by a MSR instruction. */
2972 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2974 uint32_t mask;
2976 mask = 0;
2977 if (flags & (1 << 0))
2978 mask |= 0xff;
2979 if (flags & (1 << 1))
2980 mask |= 0xff00;
2981 if (flags & (1 << 2))
2982 mask |= 0xff0000;
2983 if (flags & (1 << 3))
2984 mask |= 0xff000000;
2986 /* Mask out undefined bits. */
2987 mask &= ~CPSR_RESERVED;
2988 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
2989 mask &= ~CPSR_T;
2991 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
2992 mask &= ~CPSR_Q; /* V5TE in reality*/
2994 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
2995 mask &= ~(CPSR_E | CPSR_GE);
2997 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
2998 mask &= ~CPSR_IT;
3000 /* Mask out execution state and reserved bits. */
3001 if (!spsr) {
3002 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
3004 /* Mask out privileged bits. */
3005 if (IS_USER(s))
3006 mask &= CPSR_USER;
3007 return mask;
3010 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3011 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
3013 TCGv_i32 tmp;
3014 if (spsr) {
3015 /* ??? This is also undefined in system mode. */
3016 if (IS_USER(s))
3017 return 1;
3019 tmp = load_cpu_field(spsr);
3020 tcg_gen_andi_i32(tmp, tmp, ~mask);
3021 tcg_gen_andi_i32(t0, t0, mask);
3022 tcg_gen_or_i32(tmp, tmp, t0);
3023 store_cpu_field(tmp, spsr);
3024 } else {
3025 gen_set_cpsr(t0, mask);
3027 tcg_temp_free_i32(t0);
3028 gen_lookup_tb(s);
3029 return 0;
3032 /* Returns nonzero if access to the PSR is not permitted. */
3033 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3035 TCGv_i32 tmp;
3036 tmp = tcg_temp_new_i32();
3037 tcg_gen_movi_i32(tmp, val);
3038 return gen_set_psr(s, mask, spsr, tmp);
3041 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
3042 int *tgtmode, int *regno)
3044 /* Decode the r and sysm fields of MSR/MRS banked accesses into
3045 * the target mode and register number, and identify the various
3046 * unpredictable cases.
3047 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
3048 * + executed in user mode
3049 * + using R15 as the src/dest register
3050 * + accessing an unimplemented register
3051 * + accessing a register that's inaccessible at current PL/security state*
3052 * + accessing a register that you could access with a different insn
3053 * We choose to UNDEF in all these cases.
3054 * Since we don't know which of the various AArch32 modes we are in
3055 * we have to defer some checks to runtime.
3056 * Accesses to Monitor mode registers from Secure EL1 (which implies
3057 * that EL3 is AArch64) must trap to EL3.
3059 * If the access checks fail this function will emit code to take
3060 * an exception and return false. Otherwise it will return true,
3061 * and set *tgtmode and *regno appropriately.
3063 int exc_target = default_exception_el(s);
3065 /* These instructions are present only in ARMv8, or in ARMv7 with the
3066 * Virtualization Extensions.
3068 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
3069 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3070 goto undef;
3073 if (IS_USER(s) || rn == 15) {
3074 goto undef;
3077 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
3078 * of registers into (r, sysm).
3080 if (r) {
3081 /* SPSRs for other modes */
3082 switch (sysm) {
3083 case 0xe: /* SPSR_fiq */
3084 *tgtmode = ARM_CPU_MODE_FIQ;
3085 break;
3086 case 0x10: /* SPSR_irq */
3087 *tgtmode = ARM_CPU_MODE_IRQ;
3088 break;
3089 case 0x12: /* SPSR_svc */
3090 *tgtmode = ARM_CPU_MODE_SVC;
3091 break;
3092 case 0x14: /* SPSR_abt */
3093 *tgtmode = ARM_CPU_MODE_ABT;
3094 break;
3095 case 0x16: /* SPSR_und */
3096 *tgtmode = ARM_CPU_MODE_UND;
3097 break;
3098 case 0x1c: /* SPSR_mon */
3099 *tgtmode = ARM_CPU_MODE_MON;
3100 break;
3101 case 0x1e: /* SPSR_hyp */
3102 *tgtmode = ARM_CPU_MODE_HYP;
3103 break;
3104 default: /* unallocated */
3105 goto undef;
3107 /* We arbitrarily assign SPSR a register number of 16. */
3108 *regno = 16;
3109 } else {
3110 /* general purpose registers for other modes */
3111 switch (sysm) {
3112 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
3113 *tgtmode = ARM_CPU_MODE_USR;
3114 *regno = sysm + 8;
3115 break;
3116 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
3117 *tgtmode = ARM_CPU_MODE_FIQ;
3118 *regno = sysm;
3119 break;
3120 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
3121 *tgtmode = ARM_CPU_MODE_IRQ;
3122 *regno = sysm & 1 ? 13 : 14;
3123 break;
3124 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
3125 *tgtmode = ARM_CPU_MODE_SVC;
3126 *regno = sysm & 1 ? 13 : 14;
3127 break;
3128 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
3129 *tgtmode = ARM_CPU_MODE_ABT;
3130 *regno = sysm & 1 ? 13 : 14;
3131 break;
3132 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
3133 *tgtmode = ARM_CPU_MODE_UND;
3134 *regno = sysm & 1 ? 13 : 14;
3135 break;
3136 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
3137 *tgtmode = ARM_CPU_MODE_MON;
3138 *regno = sysm & 1 ? 13 : 14;
3139 break;
3140 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
3141 *tgtmode = ARM_CPU_MODE_HYP;
3142 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
3143 *regno = sysm & 1 ? 13 : 17;
3144 break;
3145 default: /* unallocated */
3146 goto undef;
3150 /* Catch the 'accessing inaccessible register' cases we can detect
3151 * at translate time.
3153 switch (*tgtmode) {
3154 case ARM_CPU_MODE_MON:
3155 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
3156 goto undef;
3158 if (s->current_el == 1) {
3159 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
3160 * then accesses to Mon registers trap to EL3
3162 exc_target = 3;
3163 goto undef;
3165 break;
3166 case ARM_CPU_MODE_HYP:
3168 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
3169 * (and so we can forbid accesses from EL2 or below). elr_hyp
3170 * can be accessed also from Hyp mode, so forbid accesses from
3171 * EL0 or EL1.
3173 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
3174 (s->current_el < 3 && *regno != 17)) {
3175 goto undef;
3177 break;
3178 default:
3179 break;
3182 return true;
3184 undef:
3185 /* If we get here then some access check did not pass */
3186 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
3187 syn_uncategorized(), exc_target);
3188 return false;
3191 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
3193 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3194 int tgtmode = 0, regno = 0;
3196 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3197 return;
3200 /* Sync state because msr_banked() can raise exceptions */
3201 gen_set_condexec(s);
3202 gen_set_pc_im(s, s->pc_curr);
3203 tcg_reg = load_reg(s, rn);
3204 tcg_tgtmode = tcg_const_i32(tgtmode);
3205 tcg_regno = tcg_const_i32(regno);
3206 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
3207 tcg_temp_free_i32(tcg_tgtmode);
3208 tcg_temp_free_i32(tcg_regno);
3209 tcg_temp_free_i32(tcg_reg);
3210 s->base.is_jmp = DISAS_UPDATE;
3213 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
3215 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3216 int tgtmode = 0, regno = 0;
3218 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
3219 return;
3222 /* Sync state because mrs_banked() can raise exceptions */
3223 gen_set_condexec(s);
3224 gen_set_pc_im(s, s->pc_curr);
3225 tcg_reg = tcg_temp_new_i32();
3226 tcg_tgtmode = tcg_const_i32(tgtmode);
3227 tcg_regno = tcg_const_i32(regno);
3228 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
3229 tcg_temp_free_i32(tcg_tgtmode);
3230 tcg_temp_free_i32(tcg_regno);
3231 store_reg(s, rn, tcg_reg);
3232 s->base.is_jmp = DISAS_UPDATE;
3235 /* Store value to PC as for an exception return (ie don't
3236 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
3237 * will do the masking based on the new value of the Thumb bit.
3239 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
3241 tcg_gen_mov_i32(cpu_R[15], pc);
3242 tcg_temp_free_i32(pc);
3245 /* Generate a v6 exception return. Marks both values as dead. */
3246 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
3248 store_pc_exc_ret(s, pc);
3249 /* The cpsr_write_eret helper will mask the low bits of PC
3250 * appropriately depending on the new Thumb bit, so it must
3251 * be called after storing the new PC.
3253 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3254 gen_io_start();
3256 gen_helper_cpsr_write_eret(cpu_env, cpsr);
3257 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3258 gen_io_end();
3260 tcg_temp_free_i32(cpsr);
3261 /* Must exit loop to check un-masked IRQs */
3262 s->base.is_jmp = DISAS_EXIT;
3265 /* Generate an old-style exception return. Marks pc as dead. */
3266 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3268 gen_rfe(s, pc, load_cpu_field(spsr));
3272 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
3273 * only call the helper when running single threaded TCG code to ensure
3274 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
3275 * just skip this instruction. Currently the SEV/SEVL instructions
3276 * which are *one* of many ways to wake the CPU from WFE are not
3277 * implemented so we can't sleep like WFI does.
3279 static void gen_nop_hint(DisasContext *s, int val)
3281 switch (val) {
3282 /* When running in MTTCG we don't generate jumps to the yield and
3283 * WFE helpers as it won't affect the scheduling of other vCPUs.
3284 * If we wanted to more completely model WFE/SEV so we don't busy
3285 * spin unnecessarily we would need to do something more involved.
3287 case 1: /* yield */
3288 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3289 gen_set_pc_im(s, s->base.pc_next);
3290 s->base.is_jmp = DISAS_YIELD;
3292 break;
3293 case 3: /* wfi */
3294 gen_set_pc_im(s, s->base.pc_next);
3295 s->base.is_jmp = DISAS_WFI;
3296 break;
3297 case 2: /* wfe */
3298 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3299 gen_set_pc_im(s, s->base.pc_next);
3300 s->base.is_jmp = DISAS_WFE;
3302 break;
3303 case 4: /* sev */
3304 case 5: /* sevl */
3305 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
3306 default: /* nop */
3307 break;
3311 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3313 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
3315 switch (size) {
3316 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3317 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3318 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3319 default: abort();
3323 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
3325 switch (size) {
3326 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3327 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3328 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3329 default: return;
3333 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3334 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3335 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3336 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3337 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
3339 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3340 switch ((size << 1) | u) { \
3341 case 0: \
3342 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3343 break; \
3344 case 1: \
3345 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3346 break; \
3347 case 2: \
3348 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3349 break; \
3350 case 3: \
3351 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3352 break; \
3353 case 4: \
3354 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3355 break; \
3356 case 5: \
3357 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3358 break; \
3359 default: return 1; \
3360 }} while (0)
3362 #define GEN_NEON_INTEGER_OP(name) do { \
3363 switch ((size << 1) | u) { \
3364 case 0: \
3365 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3366 break; \
3367 case 1: \
3368 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3369 break; \
3370 case 2: \
3371 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3372 break; \
3373 case 3: \
3374 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3375 break; \
3376 case 4: \
3377 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3378 break; \
3379 case 5: \
3380 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3381 break; \
3382 default: return 1; \
3383 }} while (0)
3385 static TCGv_i32 neon_load_scratch(int scratch)
3387 TCGv_i32 tmp = tcg_temp_new_i32();
3388 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3389 return tmp;
3392 static void neon_store_scratch(int scratch, TCGv_i32 var)
3394 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3395 tcg_temp_free_i32(var);
3398 static inline TCGv_i32 neon_get_scalar(int size, int reg)
3400 TCGv_i32 tmp;
3401 if (size == 1) {
3402 tmp = neon_load_reg(reg & 7, reg >> 4);
3403 if (reg & 8) {
3404 gen_neon_dup_high16(tmp);
3405 } else {
3406 gen_neon_dup_low16(tmp);
3408 } else {
3409 tmp = neon_load_reg(reg & 15, reg >> 4);
3411 return tmp;
3414 static int gen_neon_unzip(int rd, int rm, int size, int q)
3416 TCGv_ptr pd, pm;
3418 if (!q && size == 2) {
3419 return 1;
3421 pd = vfp_reg_ptr(true, rd);
3422 pm = vfp_reg_ptr(true, rm);
3423 if (q) {
3424 switch (size) {
3425 case 0:
3426 gen_helper_neon_qunzip8(pd, pm);
3427 break;
3428 case 1:
3429 gen_helper_neon_qunzip16(pd, pm);
3430 break;
3431 case 2:
3432 gen_helper_neon_qunzip32(pd, pm);
3433 break;
3434 default:
3435 abort();
3437 } else {
3438 switch (size) {
3439 case 0:
3440 gen_helper_neon_unzip8(pd, pm);
3441 break;
3442 case 1:
3443 gen_helper_neon_unzip16(pd, pm);
3444 break;
3445 default:
3446 abort();
3449 tcg_temp_free_ptr(pd);
3450 tcg_temp_free_ptr(pm);
3451 return 0;
3454 static int gen_neon_zip(int rd, int rm, int size, int q)
3456 TCGv_ptr pd, pm;
3458 if (!q && size == 2) {
3459 return 1;
3461 pd = vfp_reg_ptr(true, rd);
3462 pm = vfp_reg_ptr(true, rm);
3463 if (q) {
3464 switch (size) {
3465 case 0:
3466 gen_helper_neon_qzip8(pd, pm);
3467 break;
3468 case 1:
3469 gen_helper_neon_qzip16(pd, pm);
3470 break;
3471 case 2:
3472 gen_helper_neon_qzip32(pd, pm);
3473 break;
3474 default:
3475 abort();
3477 } else {
3478 switch (size) {
3479 case 0:
3480 gen_helper_neon_zip8(pd, pm);
3481 break;
3482 case 1:
3483 gen_helper_neon_zip16(pd, pm);
3484 break;
3485 default:
3486 abort();
3489 tcg_temp_free_ptr(pd);
3490 tcg_temp_free_ptr(pm);
3491 return 0;
3494 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
3496 TCGv_i32 rd, tmp;
3498 rd = tcg_temp_new_i32();
3499 tmp = tcg_temp_new_i32();
3501 tcg_gen_shli_i32(rd, t0, 8);
3502 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3503 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3504 tcg_gen_or_i32(rd, rd, tmp);
3506 tcg_gen_shri_i32(t1, t1, 8);
3507 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3508 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3509 tcg_gen_or_i32(t1, t1, tmp);
3510 tcg_gen_mov_i32(t0, rd);
3512 tcg_temp_free_i32(tmp);
3513 tcg_temp_free_i32(rd);
3516 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
3518 TCGv_i32 rd, tmp;
3520 rd = tcg_temp_new_i32();
3521 tmp = tcg_temp_new_i32();
3523 tcg_gen_shli_i32(rd, t0, 16);
3524 tcg_gen_andi_i32(tmp, t1, 0xffff);
3525 tcg_gen_or_i32(rd, rd, tmp);
3526 tcg_gen_shri_i32(t1, t1, 16);
3527 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3528 tcg_gen_or_i32(t1, t1, tmp);
3529 tcg_gen_mov_i32(t0, rd);
3531 tcg_temp_free_i32(tmp);
3532 tcg_temp_free_i32(rd);
3536 static struct {
3537 int nregs;
3538 int interleave;
3539 int spacing;
3540 } const neon_ls_element_type[11] = {
3541 {1, 4, 1},
3542 {1, 4, 2},
3543 {4, 1, 1},
3544 {2, 2, 2},
3545 {1, 3, 1},
3546 {1, 3, 2},
3547 {3, 1, 1},
3548 {1, 1, 1},
3549 {1, 2, 1},
3550 {1, 2, 2},
3551 {2, 1, 1}
3554 /* Translate a NEON load/store element instruction. Return nonzero if the
3555 instruction is invalid. */
3556 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
3558 int rd, rn, rm;
3559 int op;
3560 int nregs;
3561 int interleave;
3562 int spacing;
3563 int stride;
3564 int size;
3565 int reg;
3566 int load;
3567 int n;
3568 int vec_size;
3569 int mmu_idx;
3570 TCGMemOp endian;
3571 TCGv_i32 addr;
3572 TCGv_i32 tmp;
3573 TCGv_i32 tmp2;
3574 TCGv_i64 tmp64;
3576 /* FIXME: this access check should not take precedence over UNDEF
3577 * for invalid encodings; we will generate incorrect syndrome information
3578 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3580 if (s->fp_excp_el) {
3581 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
3582 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
3583 return 0;
3586 if (!s->vfp_enabled)
3587 return 1;
3588 VFP_DREG_D(rd, insn);
3589 rn = (insn >> 16) & 0xf;
3590 rm = insn & 0xf;
3591 load = (insn & (1 << 21)) != 0;
3592 endian = s->be_data;
3593 mmu_idx = get_mem_index(s);
3594 if ((insn & (1 << 23)) == 0) {
3595 /* Load store all elements. */
3596 op = (insn >> 8) & 0xf;
3597 size = (insn >> 6) & 3;
3598 if (op > 10)
3599 return 1;
3600 /* Catch UNDEF cases for bad values of align field */
3601 switch (op & 0xc) {
3602 case 4:
3603 if (((insn >> 5) & 1) == 1) {
3604 return 1;
3606 break;
3607 case 8:
3608 if (((insn >> 4) & 3) == 3) {
3609 return 1;
3611 break;
3612 default:
3613 break;
3615 nregs = neon_ls_element_type[op].nregs;
3616 interleave = neon_ls_element_type[op].interleave;
3617 spacing = neon_ls_element_type[op].spacing;
3618 if (size == 3 && (interleave | spacing) != 1) {
3619 return 1;
3621 /* For our purposes, bytes are always little-endian. */
3622 if (size == 0) {
3623 endian = MO_LE;
3625 /* Consecutive little-endian elements from a single register
3626 * can be promoted to a larger little-endian operation.
3628 if (interleave == 1 && endian == MO_LE) {
3629 size = 3;
3631 tmp64 = tcg_temp_new_i64();
3632 addr = tcg_temp_new_i32();
3633 tmp2 = tcg_const_i32(1 << size);
3634 load_reg_var(s, addr, rn);
3635 for (reg = 0; reg < nregs; reg++) {
3636 for (n = 0; n < 8 >> size; n++) {
3637 int xs;
3638 for (xs = 0; xs < interleave; xs++) {
3639 int tt = rd + reg + spacing * xs;
3641 if (load) {
3642 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
3643 neon_store_element64(tt, n, size, tmp64);
3644 } else {
3645 neon_load_element64(tmp64, tt, n, size);
3646 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
3648 tcg_gen_add_i32(addr, addr, tmp2);
3652 tcg_temp_free_i32(addr);
3653 tcg_temp_free_i32(tmp2);
3654 tcg_temp_free_i64(tmp64);
3655 stride = nregs * interleave * 8;
3656 } else {
3657 size = (insn >> 10) & 3;
3658 if (size == 3) {
3659 /* Load single element to all lanes. */
3660 int a = (insn >> 4) & 1;
3661 if (!load) {
3662 return 1;
3664 size = (insn >> 6) & 3;
3665 nregs = ((insn >> 8) & 3) + 1;
3667 if (size == 3) {
3668 if (nregs != 4 || a == 0) {
3669 return 1;
3671 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3672 size = 2;
3674 if (nregs == 1 && a == 1 && size == 0) {
3675 return 1;
3677 if (nregs == 3 && a == 1) {
3678 return 1;
3680 addr = tcg_temp_new_i32();
3681 load_reg_var(s, addr, rn);
3683 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
3684 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
3686 stride = (insn & (1 << 5)) ? 2 : 1;
3687 vec_size = nregs == 1 ? stride * 8 : 8;
3689 tmp = tcg_temp_new_i32();
3690 for (reg = 0; reg < nregs; reg++) {
3691 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3692 s->be_data | size);
3693 if ((rd & 1) && vec_size == 16) {
3694 /* We cannot write 16 bytes at once because the
3695 * destination is unaligned.
3697 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3698 8, 8, tmp);
3699 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
3700 neon_reg_offset(rd, 0), 8, 8);
3701 } else {
3702 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3703 vec_size, vec_size, tmp);
3705 tcg_gen_addi_i32(addr, addr, 1 << size);
3706 rd += stride;
3708 tcg_temp_free_i32(tmp);
3709 tcg_temp_free_i32(addr);
3710 stride = (1 << size) * nregs;
3711 } else {
3712 /* Single element. */
3713 int idx = (insn >> 4) & 0xf;
3714 int reg_idx;
3715 switch (size) {
3716 case 0:
3717 reg_idx = (insn >> 5) & 7;
3718 stride = 1;
3719 break;
3720 case 1:
3721 reg_idx = (insn >> 6) & 3;
3722 stride = (insn & (1 << 5)) ? 2 : 1;
3723 break;
3724 case 2:
3725 reg_idx = (insn >> 7) & 1;
3726 stride = (insn & (1 << 6)) ? 2 : 1;
3727 break;
3728 default:
3729 abort();
3731 nregs = ((insn >> 8) & 3) + 1;
3732 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3733 switch (nregs) {
3734 case 1:
3735 if (((idx & (1 << size)) != 0) ||
3736 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3737 return 1;
3739 break;
3740 case 3:
3741 if ((idx & 1) != 0) {
3742 return 1;
3744 /* fall through */
3745 case 2:
3746 if (size == 2 && (idx & 2) != 0) {
3747 return 1;
3749 break;
3750 case 4:
3751 if ((size == 2) && ((idx & 3) == 3)) {
3752 return 1;
3754 break;
3755 default:
3756 abort();
3758 if ((rd + stride * (nregs - 1)) > 31) {
3759 /* Attempts to write off the end of the register file
3760 * are UNPREDICTABLE; we choose to UNDEF because otherwise
3761 * the neon_load_reg() would write off the end of the array.
3763 return 1;
3765 tmp = tcg_temp_new_i32();
3766 addr = tcg_temp_new_i32();
3767 load_reg_var(s, addr, rn);
3768 for (reg = 0; reg < nregs; reg++) {
3769 if (load) {
3770 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3771 s->be_data | size);
3772 neon_store_element(rd, reg_idx, size, tmp);
3773 } else { /* Store */
3774 neon_load_element(tmp, rd, reg_idx, size);
3775 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
3776 s->be_data | size);
3778 rd += stride;
3779 tcg_gen_addi_i32(addr, addr, 1 << size);
3781 tcg_temp_free_i32(addr);
3782 tcg_temp_free_i32(tmp);
3783 stride = nregs * (1 << size);
3786 if (rm != 15) {
3787 TCGv_i32 base;
3789 base = load_reg(s, rn);
3790 if (rm == 13) {
3791 tcg_gen_addi_i32(base, base, stride);
3792 } else {
3793 TCGv_i32 index;
3794 index = load_reg(s, rm);
3795 tcg_gen_add_i32(base, base, index);
3796 tcg_temp_free_i32(index);
3798 store_reg(s, rn, base);
3800 return 0;
3803 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
3805 switch (size) {
3806 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3807 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3808 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
3809 default: abort();
3813 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
3815 switch (size) {
3816 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3817 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3818 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3819 default: abort();
3823 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
3825 switch (size) {
3826 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3827 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3828 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3829 default: abort();
3833 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
3835 switch (size) {
3836 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
3837 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
3838 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
3839 default: abort();
3843 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
3844 int q, int u)
3846 if (q) {
3847 if (u) {
3848 switch (size) {
3849 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3850 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3851 default: abort();
3853 } else {
3854 switch (size) {
3855 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3856 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3857 default: abort();
3860 } else {
3861 if (u) {
3862 switch (size) {
3863 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
3864 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
3865 default: abort();
3867 } else {
3868 switch (size) {
3869 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3870 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3871 default: abort();
3877 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
3879 if (u) {
3880 switch (size) {
3881 case 0: gen_helper_neon_widen_u8(dest, src); break;
3882 case 1: gen_helper_neon_widen_u16(dest, src); break;
3883 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3884 default: abort();
3886 } else {
3887 switch (size) {
3888 case 0: gen_helper_neon_widen_s8(dest, src); break;
3889 case 1: gen_helper_neon_widen_s16(dest, src); break;
3890 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3891 default: abort();
3894 tcg_temp_free_i32(src);
3897 static inline void gen_neon_addl(int size)
3899 switch (size) {
3900 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3901 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3902 case 2: tcg_gen_add_i64(CPU_V001); break;
3903 default: abort();
3907 static inline void gen_neon_subl(int size)
3909 switch (size) {
3910 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3911 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3912 case 2: tcg_gen_sub_i64(CPU_V001); break;
3913 default: abort();
3917 static inline void gen_neon_negl(TCGv_i64 var, int size)
3919 switch (size) {
3920 case 0: gen_helper_neon_negl_u16(var, var); break;
3921 case 1: gen_helper_neon_negl_u32(var, var); break;
3922 case 2:
3923 tcg_gen_neg_i64(var, var);
3924 break;
3925 default: abort();
3929 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
3931 switch (size) {
3932 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
3933 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
3934 default: abort();
3938 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
3939 int size, int u)
3941 TCGv_i64 tmp;
3943 switch ((size << 1) | u) {
3944 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
3945 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
3946 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
3947 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
3948 case 4:
3949 tmp = gen_muls_i64_i32(a, b);
3950 tcg_gen_mov_i64(dest, tmp);
3951 tcg_temp_free_i64(tmp);
3952 break;
3953 case 5:
3954 tmp = gen_mulu_i64_i32(a, b);
3955 tcg_gen_mov_i64(dest, tmp);
3956 tcg_temp_free_i64(tmp);
3957 break;
3958 default: abort();
3961 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
3962 Don't forget to clean them now. */
3963 if (size < 2) {
3964 tcg_temp_free_i32(a);
3965 tcg_temp_free_i32(b);
3969 static void gen_neon_narrow_op(int op, int u, int size,
3970 TCGv_i32 dest, TCGv_i64 src)
3972 if (op) {
3973 if (u) {
3974 gen_neon_unarrow_sats(size, dest, src);
3975 } else {
3976 gen_neon_narrow(size, dest, src);
3978 } else {
3979 if (u) {
3980 gen_neon_narrow_satu(size, dest, src);
3981 } else {
3982 gen_neon_narrow_sats(size, dest, src);
3987 /* Symbolic constants for op fields for Neon 3-register same-length.
3988 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
3989 * table A7-9.
3991 #define NEON_3R_VHADD 0
3992 #define NEON_3R_VQADD 1
3993 #define NEON_3R_VRHADD 2
3994 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
3995 #define NEON_3R_VHSUB 4
3996 #define NEON_3R_VQSUB 5
3997 #define NEON_3R_VCGT 6
3998 #define NEON_3R_VCGE 7
3999 #define NEON_3R_VSHL 8
4000 #define NEON_3R_VQSHL 9
4001 #define NEON_3R_VRSHL 10
4002 #define NEON_3R_VQRSHL 11
4003 #define NEON_3R_VMAX 12
4004 #define NEON_3R_VMIN 13
4005 #define NEON_3R_VABD 14
4006 #define NEON_3R_VABA 15
4007 #define NEON_3R_VADD_VSUB 16
4008 #define NEON_3R_VTST_VCEQ 17
4009 #define NEON_3R_VML 18 /* VMLA, VMLS */
4010 #define NEON_3R_VMUL 19
4011 #define NEON_3R_VPMAX 20
4012 #define NEON_3R_VPMIN 21
4013 #define NEON_3R_VQDMULH_VQRDMULH 22
4014 #define NEON_3R_VPADD_VQRDMLAH 23
4015 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4016 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
4017 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4018 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4019 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4020 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4021 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4022 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4024 static const uint8_t neon_3r_sizes[] = {
4025 [NEON_3R_VHADD] = 0x7,
4026 [NEON_3R_VQADD] = 0xf,
4027 [NEON_3R_VRHADD] = 0x7,
4028 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4029 [NEON_3R_VHSUB] = 0x7,
4030 [NEON_3R_VQSUB] = 0xf,
4031 [NEON_3R_VCGT] = 0x7,
4032 [NEON_3R_VCGE] = 0x7,
4033 [NEON_3R_VSHL] = 0xf,
4034 [NEON_3R_VQSHL] = 0xf,
4035 [NEON_3R_VRSHL] = 0xf,
4036 [NEON_3R_VQRSHL] = 0xf,
4037 [NEON_3R_VMAX] = 0x7,
4038 [NEON_3R_VMIN] = 0x7,
4039 [NEON_3R_VABD] = 0x7,
4040 [NEON_3R_VABA] = 0x7,
4041 [NEON_3R_VADD_VSUB] = 0xf,
4042 [NEON_3R_VTST_VCEQ] = 0x7,
4043 [NEON_3R_VML] = 0x7,
4044 [NEON_3R_VMUL] = 0x7,
4045 [NEON_3R_VPMAX] = 0x7,
4046 [NEON_3R_VPMIN] = 0x7,
4047 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4048 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
4049 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
4050 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
4051 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4052 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4053 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4054 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4055 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4056 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
4059 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4060 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4061 * table A7-13.
4063 #define NEON_2RM_VREV64 0
4064 #define NEON_2RM_VREV32 1
4065 #define NEON_2RM_VREV16 2
4066 #define NEON_2RM_VPADDL 4
4067 #define NEON_2RM_VPADDL_U 5
4068 #define NEON_2RM_AESE 6 /* Includes AESD */
4069 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
4070 #define NEON_2RM_VCLS 8
4071 #define NEON_2RM_VCLZ 9
4072 #define NEON_2RM_VCNT 10
4073 #define NEON_2RM_VMVN 11
4074 #define NEON_2RM_VPADAL 12
4075 #define NEON_2RM_VPADAL_U 13
4076 #define NEON_2RM_VQABS 14
4077 #define NEON_2RM_VQNEG 15
4078 #define NEON_2RM_VCGT0 16
4079 #define NEON_2RM_VCGE0 17
4080 #define NEON_2RM_VCEQ0 18
4081 #define NEON_2RM_VCLE0 19
4082 #define NEON_2RM_VCLT0 20
4083 #define NEON_2RM_SHA1H 21
4084 #define NEON_2RM_VABS 22
4085 #define NEON_2RM_VNEG 23
4086 #define NEON_2RM_VCGT0_F 24
4087 #define NEON_2RM_VCGE0_F 25
4088 #define NEON_2RM_VCEQ0_F 26
4089 #define NEON_2RM_VCLE0_F 27
4090 #define NEON_2RM_VCLT0_F 28
4091 #define NEON_2RM_VABS_F 30
4092 #define NEON_2RM_VNEG_F 31
4093 #define NEON_2RM_VSWP 32
4094 #define NEON_2RM_VTRN 33
4095 #define NEON_2RM_VUZP 34
4096 #define NEON_2RM_VZIP 35
4097 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4098 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4099 #define NEON_2RM_VSHLL 38
4100 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
4101 #define NEON_2RM_VRINTN 40
4102 #define NEON_2RM_VRINTX 41
4103 #define NEON_2RM_VRINTA 42
4104 #define NEON_2RM_VRINTZ 43
4105 #define NEON_2RM_VCVT_F16_F32 44
4106 #define NEON_2RM_VRINTM 45
4107 #define NEON_2RM_VCVT_F32_F16 46
4108 #define NEON_2RM_VRINTP 47
4109 #define NEON_2RM_VCVTAU 48
4110 #define NEON_2RM_VCVTAS 49
4111 #define NEON_2RM_VCVTNU 50
4112 #define NEON_2RM_VCVTNS 51
4113 #define NEON_2RM_VCVTPU 52
4114 #define NEON_2RM_VCVTPS 53
4115 #define NEON_2RM_VCVTMU 54
4116 #define NEON_2RM_VCVTMS 55
4117 #define NEON_2RM_VRECPE 56
4118 #define NEON_2RM_VRSQRTE 57
4119 #define NEON_2RM_VRECPE_F 58
4120 #define NEON_2RM_VRSQRTE_F 59
4121 #define NEON_2RM_VCVT_FS 60
4122 #define NEON_2RM_VCVT_FU 61
4123 #define NEON_2RM_VCVT_SF 62
4124 #define NEON_2RM_VCVT_UF 63
4126 static bool neon_2rm_is_v8_op(int op)
4128 /* Return true if this neon 2reg-misc op is ARMv8 and up */
4129 switch (op) {
4130 case NEON_2RM_VRINTN:
4131 case NEON_2RM_VRINTA:
4132 case NEON_2RM_VRINTM:
4133 case NEON_2RM_VRINTP:
4134 case NEON_2RM_VRINTZ:
4135 case NEON_2RM_VRINTX:
4136 case NEON_2RM_VCVTAU:
4137 case NEON_2RM_VCVTAS:
4138 case NEON_2RM_VCVTNU:
4139 case NEON_2RM_VCVTNS:
4140 case NEON_2RM_VCVTPU:
4141 case NEON_2RM_VCVTPS:
4142 case NEON_2RM_VCVTMU:
4143 case NEON_2RM_VCVTMS:
4144 return true;
4145 default:
4146 return false;
4150 /* Each entry in this array has bit n set if the insn allows
4151 * size value n (otherwise it will UNDEF). Since unallocated
4152 * op values will have no bits set they always UNDEF.
4154 static const uint8_t neon_2rm_sizes[] = {
4155 [NEON_2RM_VREV64] = 0x7,
4156 [NEON_2RM_VREV32] = 0x3,
4157 [NEON_2RM_VREV16] = 0x1,
4158 [NEON_2RM_VPADDL] = 0x7,
4159 [NEON_2RM_VPADDL_U] = 0x7,
4160 [NEON_2RM_AESE] = 0x1,
4161 [NEON_2RM_AESMC] = 0x1,
4162 [NEON_2RM_VCLS] = 0x7,
4163 [NEON_2RM_VCLZ] = 0x7,
4164 [NEON_2RM_VCNT] = 0x1,
4165 [NEON_2RM_VMVN] = 0x1,
4166 [NEON_2RM_VPADAL] = 0x7,
4167 [NEON_2RM_VPADAL_U] = 0x7,
4168 [NEON_2RM_VQABS] = 0x7,
4169 [NEON_2RM_VQNEG] = 0x7,
4170 [NEON_2RM_VCGT0] = 0x7,
4171 [NEON_2RM_VCGE0] = 0x7,
4172 [NEON_2RM_VCEQ0] = 0x7,
4173 [NEON_2RM_VCLE0] = 0x7,
4174 [NEON_2RM_VCLT0] = 0x7,
4175 [NEON_2RM_SHA1H] = 0x4,
4176 [NEON_2RM_VABS] = 0x7,
4177 [NEON_2RM_VNEG] = 0x7,
4178 [NEON_2RM_VCGT0_F] = 0x4,
4179 [NEON_2RM_VCGE0_F] = 0x4,
4180 [NEON_2RM_VCEQ0_F] = 0x4,
4181 [NEON_2RM_VCLE0_F] = 0x4,
4182 [NEON_2RM_VCLT0_F] = 0x4,
4183 [NEON_2RM_VABS_F] = 0x4,
4184 [NEON_2RM_VNEG_F] = 0x4,
4185 [NEON_2RM_VSWP] = 0x1,
4186 [NEON_2RM_VTRN] = 0x7,
4187 [NEON_2RM_VUZP] = 0x7,
4188 [NEON_2RM_VZIP] = 0x7,
4189 [NEON_2RM_VMOVN] = 0x7,
4190 [NEON_2RM_VQMOVN] = 0x7,
4191 [NEON_2RM_VSHLL] = 0x7,
4192 [NEON_2RM_SHA1SU1] = 0x4,
4193 [NEON_2RM_VRINTN] = 0x4,
4194 [NEON_2RM_VRINTX] = 0x4,
4195 [NEON_2RM_VRINTA] = 0x4,
4196 [NEON_2RM_VRINTZ] = 0x4,
4197 [NEON_2RM_VCVT_F16_F32] = 0x2,
4198 [NEON_2RM_VRINTM] = 0x4,
4199 [NEON_2RM_VCVT_F32_F16] = 0x2,
4200 [NEON_2RM_VRINTP] = 0x4,
4201 [NEON_2RM_VCVTAU] = 0x4,
4202 [NEON_2RM_VCVTAS] = 0x4,
4203 [NEON_2RM_VCVTNU] = 0x4,
4204 [NEON_2RM_VCVTNS] = 0x4,
4205 [NEON_2RM_VCVTPU] = 0x4,
4206 [NEON_2RM_VCVTPS] = 0x4,
4207 [NEON_2RM_VCVTMU] = 0x4,
4208 [NEON_2RM_VCVTMS] = 0x4,
4209 [NEON_2RM_VRECPE] = 0x4,
4210 [NEON_2RM_VRSQRTE] = 0x4,
4211 [NEON_2RM_VRECPE_F] = 0x4,
4212 [NEON_2RM_VRSQRTE_F] = 0x4,
4213 [NEON_2RM_VCVT_FS] = 0x4,
4214 [NEON_2RM_VCVT_FU] = 0x4,
4215 [NEON_2RM_VCVT_SF] = 0x4,
4216 [NEON_2RM_VCVT_UF] = 0x4,
4220 /* Expand v8.1 simd helper. */
4221 static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
4222 int q, int rd, int rn, int rm)
4224 if (dc_isar_feature(aa32_rdm, s)) {
4225 int opr_sz = (1 + q) * 8;
4226 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
4227 vfp_reg_offset(1, rn),
4228 vfp_reg_offset(1, rm), cpu_env,
4229 opr_sz, opr_sz, 0, fn);
4230 return 0;
4232 return 1;
4235 static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4237 tcg_gen_vec_sar8i_i64(a, a, shift);
4238 tcg_gen_vec_add8_i64(d, d, a);
4241 static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4243 tcg_gen_vec_sar16i_i64(a, a, shift);
4244 tcg_gen_vec_add16_i64(d, d, a);
4247 static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4249 tcg_gen_sari_i32(a, a, shift);
4250 tcg_gen_add_i32(d, d, a);
4253 static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4255 tcg_gen_sari_i64(a, a, shift);
4256 tcg_gen_add_i64(d, d, a);
4259 static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4261 tcg_gen_sari_vec(vece, a, a, sh);
4262 tcg_gen_add_vec(vece, d, d, a);
4265 static const TCGOpcode vecop_list_ssra[] = {
4266 INDEX_op_sari_vec, INDEX_op_add_vec, 0
4269 const GVecGen2i ssra_op[4] = {
4270 { .fni8 = gen_ssra8_i64,
4271 .fniv = gen_ssra_vec,
4272 .load_dest = true,
4273 .opt_opc = vecop_list_ssra,
4274 .vece = MO_8 },
4275 { .fni8 = gen_ssra16_i64,
4276 .fniv = gen_ssra_vec,
4277 .load_dest = true,
4278 .opt_opc = vecop_list_ssra,
4279 .vece = MO_16 },
4280 { .fni4 = gen_ssra32_i32,
4281 .fniv = gen_ssra_vec,
4282 .load_dest = true,
4283 .opt_opc = vecop_list_ssra,
4284 .vece = MO_32 },
4285 { .fni8 = gen_ssra64_i64,
4286 .fniv = gen_ssra_vec,
4287 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4288 .opt_opc = vecop_list_ssra,
4289 .load_dest = true,
4290 .vece = MO_64 },
4293 static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4295 tcg_gen_vec_shr8i_i64(a, a, shift);
4296 tcg_gen_vec_add8_i64(d, d, a);
4299 static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4301 tcg_gen_vec_shr16i_i64(a, a, shift);
4302 tcg_gen_vec_add16_i64(d, d, a);
4305 static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4307 tcg_gen_shri_i32(a, a, shift);
4308 tcg_gen_add_i32(d, d, a);
4311 static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4313 tcg_gen_shri_i64(a, a, shift);
4314 tcg_gen_add_i64(d, d, a);
4317 static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4319 tcg_gen_shri_vec(vece, a, a, sh);
4320 tcg_gen_add_vec(vece, d, d, a);
4323 static const TCGOpcode vecop_list_usra[] = {
4324 INDEX_op_shri_vec, INDEX_op_add_vec, 0
4327 const GVecGen2i usra_op[4] = {
4328 { .fni8 = gen_usra8_i64,
4329 .fniv = gen_usra_vec,
4330 .load_dest = true,
4331 .opt_opc = vecop_list_usra,
4332 .vece = MO_8, },
4333 { .fni8 = gen_usra16_i64,
4334 .fniv = gen_usra_vec,
4335 .load_dest = true,
4336 .opt_opc = vecop_list_usra,
4337 .vece = MO_16, },
4338 { .fni4 = gen_usra32_i32,
4339 .fniv = gen_usra_vec,
4340 .load_dest = true,
4341 .opt_opc = vecop_list_usra,
4342 .vece = MO_32, },
4343 { .fni8 = gen_usra64_i64,
4344 .fniv = gen_usra_vec,
4345 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4346 .load_dest = true,
4347 .opt_opc = vecop_list_usra,
4348 .vece = MO_64, },
4351 static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4353 uint64_t mask = dup_const(MO_8, 0xff >> shift);
4354 TCGv_i64 t = tcg_temp_new_i64();
4356 tcg_gen_shri_i64(t, a, shift);
4357 tcg_gen_andi_i64(t, t, mask);
4358 tcg_gen_andi_i64(d, d, ~mask);
4359 tcg_gen_or_i64(d, d, t);
4360 tcg_temp_free_i64(t);
4363 static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4365 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
4366 TCGv_i64 t = tcg_temp_new_i64();
4368 tcg_gen_shri_i64(t, a, shift);
4369 tcg_gen_andi_i64(t, t, mask);
4370 tcg_gen_andi_i64(d, d, ~mask);
4371 tcg_gen_or_i64(d, d, t);
4372 tcg_temp_free_i64(t);
4375 static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4377 tcg_gen_shri_i32(a, a, shift);
4378 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
4381 static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4383 tcg_gen_shri_i64(a, a, shift);
4384 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
4387 static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4389 if (sh == 0) {
4390 tcg_gen_mov_vec(d, a);
4391 } else {
4392 TCGv_vec t = tcg_temp_new_vec_matching(d);
4393 TCGv_vec m = tcg_temp_new_vec_matching(d);
4395 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
4396 tcg_gen_shri_vec(vece, t, a, sh);
4397 tcg_gen_and_vec(vece, d, d, m);
4398 tcg_gen_or_vec(vece, d, d, t);
4400 tcg_temp_free_vec(t);
4401 tcg_temp_free_vec(m);
4405 static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
4407 const GVecGen2i sri_op[4] = {
4408 { .fni8 = gen_shr8_ins_i64,
4409 .fniv = gen_shr_ins_vec,
4410 .load_dest = true,
4411 .opt_opc = vecop_list_sri,
4412 .vece = MO_8 },
4413 { .fni8 = gen_shr16_ins_i64,
4414 .fniv = gen_shr_ins_vec,
4415 .load_dest = true,
4416 .opt_opc = vecop_list_sri,
4417 .vece = MO_16 },
4418 { .fni4 = gen_shr32_ins_i32,
4419 .fniv = gen_shr_ins_vec,
4420 .load_dest = true,
4421 .opt_opc = vecop_list_sri,
4422 .vece = MO_32 },
4423 { .fni8 = gen_shr64_ins_i64,
4424 .fniv = gen_shr_ins_vec,
4425 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4426 .load_dest = true,
4427 .opt_opc = vecop_list_sri,
4428 .vece = MO_64 },
4431 static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4433 uint64_t mask = dup_const(MO_8, 0xff << shift);
4434 TCGv_i64 t = tcg_temp_new_i64();
4436 tcg_gen_shli_i64(t, a, shift);
4437 tcg_gen_andi_i64(t, t, mask);
4438 tcg_gen_andi_i64(d, d, ~mask);
4439 tcg_gen_or_i64(d, d, t);
4440 tcg_temp_free_i64(t);
4443 static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4445 uint64_t mask = dup_const(MO_16, 0xffff << shift);
4446 TCGv_i64 t = tcg_temp_new_i64();
4448 tcg_gen_shli_i64(t, a, shift);
4449 tcg_gen_andi_i64(t, t, mask);
4450 tcg_gen_andi_i64(d, d, ~mask);
4451 tcg_gen_or_i64(d, d, t);
4452 tcg_temp_free_i64(t);
4455 static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4457 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
4460 static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4462 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
4465 static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4467 if (sh == 0) {
4468 tcg_gen_mov_vec(d, a);
4469 } else {
4470 TCGv_vec t = tcg_temp_new_vec_matching(d);
4471 TCGv_vec m = tcg_temp_new_vec_matching(d);
4473 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
4474 tcg_gen_shli_vec(vece, t, a, sh);
4475 tcg_gen_and_vec(vece, d, d, m);
4476 tcg_gen_or_vec(vece, d, d, t);
4478 tcg_temp_free_vec(t);
4479 tcg_temp_free_vec(m);
4483 static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
4485 const GVecGen2i sli_op[4] = {
4486 { .fni8 = gen_shl8_ins_i64,
4487 .fniv = gen_shl_ins_vec,
4488 .load_dest = true,
4489 .opt_opc = vecop_list_sli,
4490 .vece = MO_8 },
4491 { .fni8 = gen_shl16_ins_i64,
4492 .fniv = gen_shl_ins_vec,
4493 .load_dest = true,
4494 .opt_opc = vecop_list_sli,
4495 .vece = MO_16 },
4496 { .fni4 = gen_shl32_ins_i32,
4497 .fniv = gen_shl_ins_vec,
4498 .load_dest = true,
4499 .opt_opc = vecop_list_sli,
4500 .vece = MO_32 },
4501 { .fni8 = gen_shl64_ins_i64,
4502 .fniv = gen_shl_ins_vec,
4503 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4504 .load_dest = true,
4505 .opt_opc = vecop_list_sli,
4506 .vece = MO_64 },
4509 static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4511 gen_helper_neon_mul_u8(a, a, b);
4512 gen_helper_neon_add_u8(d, d, a);
4515 static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4517 gen_helper_neon_mul_u8(a, a, b);
4518 gen_helper_neon_sub_u8(d, d, a);
4521 static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4523 gen_helper_neon_mul_u16(a, a, b);
4524 gen_helper_neon_add_u16(d, d, a);
4527 static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4529 gen_helper_neon_mul_u16(a, a, b);
4530 gen_helper_neon_sub_u16(d, d, a);
4533 static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4535 tcg_gen_mul_i32(a, a, b);
4536 tcg_gen_add_i32(d, d, a);
4539 static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4541 tcg_gen_mul_i32(a, a, b);
4542 tcg_gen_sub_i32(d, d, a);
4545 static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4547 tcg_gen_mul_i64(a, a, b);
4548 tcg_gen_add_i64(d, d, a);
4551 static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4553 tcg_gen_mul_i64(a, a, b);
4554 tcg_gen_sub_i64(d, d, a);
4557 static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4559 tcg_gen_mul_vec(vece, a, a, b);
4560 tcg_gen_add_vec(vece, d, d, a);
4563 static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4565 tcg_gen_mul_vec(vece, a, a, b);
4566 tcg_gen_sub_vec(vece, d, d, a);
4569 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4570 * these tables are shared with AArch64 which does support them.
4573 static const TCGOpcode vecop_list_mla[] = {
4574 INDEX_op_mul_vec, INDEX_op_add_vec, 0
4577 static const TCGOpcode vecop_list_mls[] = {
4578 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
4581 const GVecGen3 mla_op[4] = {
4582 { .fni4 = gen_mla8_i32,
4583 .fniv = gen_mla_vec,
4584 .load_dest = true,
4585 .opt_opc = vecop_list_mla,
4586 .vece = MO_8 },
4587 { .fni4 = gen_mla16_i32,
4588 .fniv = gen_mla_vec,
4589 .load_dest = true,
4590 .opt_opc = vecop_list_mla,
4591 .vece = MO_16 },
4592 { .fni4 = gen_mla32_i32,
4593 .fniv = gen_mla_vec,
4594 .load_dest = true,
4595 .opt_opc = vecop_list_mla,
4596 .vece = MO_32 },
4597 { .fni8 = gen_mla64_i64,
4598 .fniv = gen_mla_vec,
4599 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4600 .load_dest = true,
4601 .opt_opc = vecop_list_mla,
4602 .vece = MO_64 },
4605 const GVecGen3 mls_op[4] = {
4606 { .fni4 = gen_mls8_i32,
4607 .fniv = gen_mls_vec,
4608 .load_dest = true,
4609 .opt_opc = vecop_list_mls,
4610 .vece = MO_8 },
4611 { .fni4 = gen_mls16_i32,
4612 .fniv = gen_mls_vec,
4613 .load_dest = true,
4614 .opt_opc = vecop_list_mls,
4615 .vece = MO_16 },
4616 { .fni4 = gen_mls32_i32,
4617 .fniv = gen_mls_vec,
4618 .load_dest = true,
4619 .opt_opc = vecop_list_mls,
4620 .vece = MO_32 },
4621 { .fni8 = gen_mls64_i64,
4622 .fniv = gen_mls_vec,
4623 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4624 .load_dest = true,
4625 .opt_opc = vecop_list_mls,
4626 .vece = MO_64 },
4629 /* CMTST : test is "if (X & Y != 0)". */
4630 static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4632 tcg_gen_and_i32(d, a, b);
4633 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
4634 tcg_gen_neg_i32(d, d);
4637 void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4639 tcg_gen_and_i64(d, a, b);
4640 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
4641 tcg_gen_neg_i64(d, d);
4644 static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4646 tcg_gen_and_vec(vece, d, a, b);
4647 tcg_gen_dupi_vec(vece, a, 0);
4648 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
4651 static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
4653 const GVecGen3 cmtst_op[4] = {
4654 { .fni4 = gen_helper_neon_tst_u8,
4655 .fniv = gen_cmtst_vec,
4656 .opt_opc = vecop_list_cmtst,
4657 .vece = MO_8 },
4658 { .fni4 = gen_helper_neon_tst_u16,
4659 .fniv = gen_cmtst_vec,
4660 .opt_opc = vecop_list_cmtst,
4661 .vece = MO_16 },
4662 { .fni4 = gen_cmtst_i32,
4663 .fniv = gen_cmtst_vec,
4664 .opt_opc = vecop_list_cmtst,
4665 .vece = MO_32 },
4666 { .fni8 = gen_cmtst_i64,
4667 .fniv = gen_cmtst_vec,
4668 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4669 .opt_opc = vecop_list_cmtst,
4670 .vece = MO_64 },
4673 static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4674 TCGv_vec a, TCGv_vec b)
4676 TCGv_vec x = tcg_temp_new_vec_matching(t);
4677 tcg_gen_add_vec(vece, x, a, b);
4678 tcg_gen_usadd_vec(vece, t, a, b);
4679 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4680 tcg_gen_or_vec(vece, sat, sat, x);
4681 tcg_temp_free_vec(x);
4684 static const TCGOpcode vecop_list_uqadd[] = {
4685 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4688 const GVecGen4 uqadd_op[4] = {
4689 { .fniv = gen_uqadd_vec,
4690 .fno = gen_helper_gvec_uqadd_b,
4691 .write_aofs = true,
4692 .opt_opc = vecop_list_uqadd,
4693 .vece = MO_8 },
4694 { .fniv = gen_uqadd_vec,
4695 .fno = gen_helper_gvec_uqadd_h,
4696 .write_aofs = true,
4697 .opt_opc = vecop_list_uqadd,
4698 .vece = MO_16 },
4699 { .fniv = gen_uqadd_vec,
4700 .fno = gen_helper_gvec_uqadd_s,
4701 .write_aofs = true,
4702 .opt_opc = vecop_list_uqadd,
4703 .vece = MO_32 },
4704 { .fniv = gen_uqadd_vec,
4705 .fno = gen_helper_gvec_uqadd_d,
4706 .write_aofs = true,
4707 .opt_opc = vecop_list_uqadd,
4708 .vece = MO_64 },
4711 static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4712 TCGv_vec a, TCGv_vec b)
4714 TCGv_vec x = tcg_temp_new_vec_matching(t);
4715 tcg_gen_add_vec(vece, x, a, b);
4716 tcg_gen_ssadd_vec(vece, t, a, b);
4717 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4718 tcg_gen_or_vec(vece, sat, sat, x);
4719 tcg_temp_free_vec(x);
4722 static const TCGOpcode vecop_list_sqadd[] = {
4723 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4726 const GVecGen4 sqadd_op[4] = {
4727 { .fniv = gen_sqadd_vec,
4728 .fno = gen_helper_gvec_sqadd_b,
4729 .opt_opc = vecop_list_sqadd,
4730 .write_aofs = true,
4731 .vece = MO_8 },
4732 { .fniv = gen_sqadd_vec,
4733 .fno = gen_helper_gvec_sqadd_h,
4734 .opt_opc = vecop_list_sqadd,
4735 .write_aofs = true,
4736 .vece = MO_16 },
4737 { .fniv = gen_sqadd_vec,
4738 .fno = gen_helper_gvec_sqadd_s,
4739 .opt_opc = vecop_list_sqadd,
4740 .write_aofs = true,
4741 .vece = MO_32 },
4742 { .fniv = gen_sqadd_vec,
4743 .fno = gen_helper_gvec_sqadd_d,
4744 .opt_opc = vecop_list_sqadd,
4745 .write_aofs = true,
4746 .vece = MO_64 },
4749 static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4750 TCGv_vec a, TCGv_vec b)
4752 TCGv_vec x = tcg_temp_new_vec_matching(t);
4753 tcg_gen_sub_vec(vece, x, a, b);
4754 tcg_gen_ussub_vec(vece, t, a, b);
4755 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4756 tcg_gen_or_vec(vece, sat, sat, x);
4757 tcg_temp_free_vec(x);
4760 static const TCGOpcode vecop_list_uqsub[] = {
4761 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4764 const GVecGen4 uqsub_op[4] = {
4765 { .fniv = gen_uqsub_vec,
4766 .fno = gen_helper_gvec_uqsub_b,
4767 .opt_opc = vecop_list_uqsub,
4768 .write_aofs = true,
4769 .vece = MO_8 },
4770 { .fniv = gen_uqsub_vec,
4771 .fno = gen_helper_gvec_uqsub_h,
4772 .opt_opc = vecop_list_uqsub,
4773 .write_aofs = true,
4774 .vece = MO_16 },
4775 { .fniv = gen_uqsub_vec,
4776 .fno = gen_helper_gvec_uqsub_s,
4777 .opt_opc = vecop_list_uqsub,
4778 .write_aofs = true,
4779 .vece = MO_32 },
4780 { .fniv = gen_uqsub_vec,
4781 .fno = gen_helper_gvec_uqsub_d,
4782 .opt_opc = vecop_list_uqsub,
4783 .write_aofs = true,
4784 .vece = MO_64 },
4787 static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4788 TCGv_vec a, TCGv_vec b)
4790 TCGv_vec x = tcg_temp_new_vec_matching(t);
4791 tcg_gen_sub_vec(vece, x, a, b);
4792 tcg_gen_sssub_vec(vece, t, a, b);
4793 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4794 tcg_gen_or_vec(vece, sat, sat, x);
4795 tcg_temp_free_vec(x);
4798 static const TCGOpcode vecop_list_sqsub[] = {
4799 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4802 const GVecGen4 sqsub_op[4] = {
4803 { .fniv = gen_sqsub_vec,
4804 .fno = gen_helper_gvec_sqsub_b,
4805 .opt_opc = vecop_list_sqsub,
4806 .write_aofs = true,
4807 .vece = MO_8 },
4808 { .fniv = gen_sqsub_vec,
4809 .fno = gen_helper_gvec_sqsub_h,
4810 .opt_opc = vecop_list_sqsub,
4811 .write_aofs = true,
4812 .vece = MO_16 },
4813 { .fniv = gen_sqsub_vec,
4814 .fno = gen_helper_gvec_sqsub_s,
4815 .opt_opc = vecop_list_sqsub,
4816 .write_aofs = true,
4817 .vece = MO_32 },
4818 { .fniv = gen_sqsub_vec,
4819 .fno = gen_helper_gvec_sqsub_d,
4820 .opt_opc = vecop_list_sqsub,
4821 .write_aofs = true,
4822 .vece = MO_64 },
4825 /* Translate a NEON data processing instruction. Return nonzero if the
4826 instruction is invalid.
4827 We process data in a mixture of 32-bit and 64-bit chunks.
4828 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4830 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
4832 int op;
4833 int q;
4834 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
4835 int size;
4836 int shift;
4837 int pass;
4838 int count;
4839 int pairwise;
4840 int u;
4841 int vec_size;
4842 uint32_t imm;
4843 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
4844 TCGv_ptr ptr1, ptr2, ptr3;
4845 TCGv_i64 tmp64;
4847 /* FIXME: this access check should not take precedence over UNDEF
4848 * for invalid encodings; we will generate incorrect syndrome information
4849 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4851 if (s->fp_excp_el) {
4852 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
4853 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
4854 return 0;
4857 if (!s->vfp_enabled)
4858 return 1;
4859 q = (insn & (1 << 6)) != 0;
4860 u = (insn >> 24) & 1;
4861 VFP_DREG_D(rd, insn);
4862 VFP_DREG_N(rn, insn);
4863 VFP_DREG_M(rm, insn);
4864 size = (insn >> 20) & 3;
4865 vec_size = q ? 16 : 8;
4866 rd_ofs = neon_reg_offset(rd, 0);
4867 rn_ofs = neon_reg_offset(rn, 0);
4868 rm_ofs = neon_reg_offset(rm, 0);
4870 if ((insn & (1 << 23)) == 0) {
4871 /* Three register same length. */
4872 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4873 /* Catch invalid op and bad size combinations: UNDEF */
4874 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4875 return 1;
4877 /* All insns of this form UNDEF for either this condition or the
4878 * superset of cases "Q==1"; we catch the latter later.
4880 if (q && ((rd | rn | rm) & 1)) {
4881 return 1;
4883 switch (op) {
4884 case NEON_3R_SHA:
4885 /* The SHA-1/SHA-256 3-register instructions require special
4886 * treatment here, as their size field is overloaded as an
4887 * op type selector, and they all consume their input in a
4888 * single pass.
4890 if (!q) {
4891 return 1;
4893 if (!u) { /* SHA-1 */
4894 if (!dc_isar_feature(aa32_sha1, s)) {
4895 return 1;
4897 ptr1 = vfp_reg_ptr(true, rd);
4898 ptr2 = vfp_reg_ptr(true, rn);
4899 ptr3 = vfp_reg_ptr(true, rm);
4900 tmp4 = tcg_const_i32(size);
4901 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
4902 tcg_temp_free_i32(tmp4);
4903 } else { /* SHA-256 */
4904 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
4905 return 1;
4907 ptr1 = vfp_reg_ptr(true, rd);
4908 ptr2 = vfp_reg_ptr(true, rn);
4909 ptr3 = vfp_reg_ptr(true, rm);
4910 switch (size) {
4911 case 0:
4912 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
4913 break;
4914 case 1:
4915 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
4916 break;
4917 case 2:
4918 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
4919 break;
4922 tcg_temp_free_ptr(ptr1);
4923 tcg_temp_free_ptr(ptr2);
4924 tcg_temp_free_ptr(ptr3);
4925 return 0;
4927 case NEON_3R_VPADD_VQRDMLAH:
4928 if (!u) {
4929 break; /* VPADD */
4931 /* VQRDMLAH */
4932 switch (size) {
4933 case 1:
4934 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
4935 q, rd, rn, rm);
4936 case 2:
4937 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
4938 q, rd, rn, rm);
4940 return 1;
4942 case NEON_3R_VFM_VQRDMLSH:
4943 if (!u) {
4944 /* VFM, VFMS */
4945 if (size == 1) {
4946 return 1;
4948 break;
4950 /* VQRDMLSH */
4951 switch (size) {
4952 case 1:
4953 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
4954 q, rd, rn, rm);
4955 case 2:
4956 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
4957 q, rd, rn, rm);
4959 return 1;
4961 case NEON_3R_LOGIC: /* Logic ops. */
4962 switch ((u << 2) | size) {
4963 case 0: /* VAND */
4964 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
4965 vec_size, vec_size);
4966 break;
4967 case 1: /* VBIC */
4968 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
4969 vec_size, vec_size);
4970 break;
4971 case 2: /* VORR */
4972 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
4973 vec_size, vec_size);
4974 break;
4975 case 3: /* VORN */
4976 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
4977 vec_size, vec_size);
4978 break;
4979 case 4: /* VEOR */
4980 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
4981 vec_size, vec_size);
4982 break;
4983 case 5: /* VBSL */
4984 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
4985 vec_size, vec_size);
4986 break;
4987 case 6: /* VBIT */
4988 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
4989 vec_size, vec_size);
4990 break;
4991 case 7: /* VBIF */
4992 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
4993 vec_size, vec_size);
4994 break;
4996 return 0;
4998 case NEON_3R_VADD_VSUB:
4999 if (u) {
5000 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
5001 vec_size, vec_size);
5002 } else {
5003 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
5004 vec_size, vec_size);
5006 return 0;
5008 case NEON_3R_VQADD:
5009 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5010 rn_ofs, rm_ofs, vec_size, vec_size,
5011 (u ? uqadd_op : sqadd_op) + size);
5012 return 0;
5014 case NEON_3R_VQSUB:
5015 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5016 rn_ofs, rm_ofs, vec_size, vec_size,
5017 (u ? uqsub_op : sqsub_op) + size);
5018 return 0;
5020 case NEON_3R_VMUL: /* VMUL */
5021 if (u) {
5022 /* Polynomial case allows only P8 and is handled below. */
5023 if (size != 0) {
5024 return 1;
5026 } else {
5027 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
5028 vec_size, vec_size);
5029 return 0;
5031 break;
5033 case NEON_3R_VML: /* VMLA, VMLS */
5034 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
5035 u ? &mls_op[size] : &mla_op[size]);
5036 return 0;
5038 case NEON_3R_VTST_VCEQ:
5039 if (u) { /* VCEQ */
5040 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
5041 vec_size, vec_size);
5042 } else { /* VTST */
5043 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
5044 vec_size, vec_size, &cmtst_op[size]);
5046 return 0;
5048 case NEON_3R_VCGT:
5049 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
5050 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5051 return 0;
5053 case NEON_3R_VCGE:
5054 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
5055 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5056 return 0;
5058 case NEON_3R_VMAX:
5059 if (u) {
5060 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
5061 vec_size, vec_size);
5062 } else {
5063 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
5064 vec_size, vec_size);
5066 return 0;
5067 case NEON_3R_VMIN:
5068 if (u) {
5069 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
5070 vec_size, vec_size);
5071 } else {
5072 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
5073 vec_size, vec_size);
5075 return 0;
5078 if (size == 3) {
5079 /* 64-bit element instructions. */
5080 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5081 neon_load_reg64(cpu_V0, rn + pass);
5082 neon_load_reg64(cpu_V1, rm + pass);
5083 switch (op) {
5084 case NEON_3R_VSHL:
5085 if (u) {
5086 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5087 } else {
5088 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5090 break;
5091 case NEON_3R_VQSHL:
5092 if (u) {
5093 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5094 cpu_V1, cpu_V0);
5095 } else {
5096 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5097 cpu_V1, cpu_V0);
5099 break;
5100 case NEON_3R_VRSHL:
5101 if (u) {
5102 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5103 } else {
5104 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5106 break;
5107 case NEON_3R_VQRSHL:
5108 if (u) {
5109 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5110 cpu_V1, cpu_V0);
5111 } else {
5112 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5113 cpu_V1, cpu_V0);
5115 break;
5116 default:
5117 abort();
5119 neon_store_reg64(cpu_V0, rd + pass);
5121 return 0;
5123 pairwise = 0;
5124 switch (op) {
5125 case NEON_3R_VSHL:
5126 case NEON_3R_VQSHL:
5127 case NEON_3R_VRSHL:
5128 case NEON_3R_VQRSHL:
5130 int rtmp;
5131 /* Shift instruction operands are reversed. */
5132 rtmp = rn;
5133 rn = rm;
5134 rm = rtmp;
5136 break;
5137 case NEON_3R_VPADD_VQRDMLAH:
5138 case NEON_3R_VPMAX:
5139 case NEON_3R_VPMIN:
5140 pairwise = 1;
5141 break;
5142 case NEON_3R_FLOAT_ARITH:
5143 pairwise = (u && size < 2); /* if VPADD (float) */
5144 break;
5145 case NEON_3R_FLOAT_MINMAX:
5146 pairwise = u; /* if VPMIN/VPMAX (float) */
5147 break;
5148 case NEON_3R_FLOAT_CMP:
5149 if (!u && size) {
5150 /* no encoding for U=0 C=1x */
5151 return 1;
5153 break;
5154 case NEON_3R_FLOAT_ACMP:
5155 if (!u) {
5156 return 1;
5158 break;
5159 case NEON_3R_FLOAT_MISC:
5160 /* VMAXNM/VMINNM in ARMv8 */
5161 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5162 return 1;
5164 break;
5165 case NEON_3R_VFM_VQRDMLSH:
5166 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
5167 return 1;
5169 break;
5170 default:
5171 break;
5174 if (pairwise && q) {
5175 /* All the pairwise insns UNDEF if Q is set */
5176 return 1;
5179 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5181 if (pairwise) {
5182 /* Pairwise. */
5183 if (pass < 1) {
5184 tmp = neon_load_reg(rn, 0);
5185 tmp2 = neon_load_reg(rn, 1);
5186 } else {
5187 tmp = neon_load_reg(rm, 0);
5188 tmp2 = neon_load_reg(rm, 1);
5190 } else {
5191 /* Elementwise. */
5192 tmp = neon_load_reg(rn, pass);
5193 tmp2 = neon_load_reg(rm, pass);
5195 switch (op) {
5196 case NEON_3R_VHADD:
5197 GEN_NEON_INTEGER_OP(hadd);
5198 break;
5199 case NEON_3R_VRHADD:
5200 GEN_NEON_INTEGER_OP(rhadd);
5201 break;
5202 case NEON_3R_VHSUB:
5203 GEN_NEON_INTEGER_OP(hsub);
5204 break;
5205 case NEON_3R_VSHL:
5206 GEN_NEON_INTEGER_OP(shl);
5207 break;
5208 case NEON_3R_VQSHL:
5209 GEN_NEON_INTEGER_OP_ENV(qshl);
5210 break;
5211 case NEON_3R_VRSHL:
5212 GEN_NEON_INTEGER_OP(rshl);
5213 break;
5214 case NEON_3R_VQRSHL:
5215 GEN_NEON_INTEGER_OP_ENV(qrshl);
5216 break;
5217 case NEON_3R_VABD:
5218 GEN_NEON_INTEGER_OP(abd);
5219 break;
5220 case NEON_3R_VABA:
5221 GEN_NEON_INTEGER_OP(abd);
5222 tcg_temp_free_i32(tmp2);
5223 tmp2 = neon_load_reg(rd, pass);
5224 gen_neon_add(size, tmp, tmp2);
5225 break;
5226 case NEON_3R_VMUL:
5227 /* VMUL.P8; other cases already eliminated. */
5228 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5229 break;
5230 case NEON_3R_VPMAX:
5231 GEN_NEON_INTEGER_OP(pmax);
5232 break;
5233 case NEON_3R_VPMIN:
5234 GEN_NEON_INTEGER_OP(pmin);
5235 break;
5236 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5237 if (!u) { /* VQDMULH */
5238 switch (size) {
5239 case 1:
5240 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5241 break;
5242 case 2:
5243 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5244 break;
5245 default: abort();
5247 } else { /* VQRDMULH */
5248 switch (size) {
5249 case 1:
5250 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5251 break;
5252 case 2:
5253 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5254 break;
5255 default: abort();
5258 break;
5259 case NEON_3R_VPADD_VQRDMLAH:
5260 switch (size) {
5261 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5262 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5263 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5264 default: abort();
5266 break;
5267 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5269 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5270 switch ((u << 2) | size) {
5271 case 0: /* VADD */
5272 case 4: /* VPADD */
5273 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5274 break;
5275 case 2: /* VSUB */
5276 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5277 break;
5278 case 6: /* VABD */
5279 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5280 break;
5281 default:
5282 abort();
5284 tcg_temp_free_ptr(fpstatus);
5285 break;
5287 case NEON_3R_FLOAT_MULTIPLY:
5289 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5290 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5291 if (!u) {
5292 tcg_temp_free_i32(tmp2);
5293 tmp2 = neon_load_reg(rd, pass);
5294 if (size == 0) {
5295 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5296 } else {
5297 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5300 tcg_temp_free_ptr(fpstatus);
5301 break;
5303 case NEON_3R_FLOAT_CMP:
5305 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5306 if (!u) {
5307 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5308 } else {
5309 if (size == 0) {
5310 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5311 } else {
5312 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5315 tcg_temp_free_ptr(fpstatus);
5316 break;
5318 case NEON_3R_FLOAT_ACMP:
5320 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5321 if (size == 0) {
5322 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5323 } else {
5324 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5326 tcg_temp_free_ptr(fpstatus);
5327 break;
5329 case NEON_3R_FLOAT_MINMAX:
5331 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5332 if (size == 0) {
5333 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5334 } else {
5335 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5337 tcg_temp_free_ptr(fpstatus);
5338 break;
5340 case NEON_3R_FLOAT_MISC:
5341 if (u) {
5342 /* VMAXNM/VMINNM */
5343 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5344 if (size == 0) {
5345 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5346 } else {
5347 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5349 tcg_temp_free_ptr(fpstatus);
5350 } else {
5351 if (size == 0) {
5352 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5353 } else {
5354 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5357 break;
5358 case NEON_3R_VFM_VQRDMLSH:
5360 /* VFMA, VFMS: fused multiply-add */
5361 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5362 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5363 if (size) {
5364 /* VFMS */
5365 gen_helper_vfp_negs(tmp, tmp);
5367 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5368 tcg_temp_free_i32(tmp3);
5369 tcg_temp_free_ptr(fpstatus);
5370 break;
5372 default:
5373 abort();
5375 tcg_temp_free_i32(tmp2);
5377 /* Save the result. For elementwise operations we can put it
5378 straight into the destination register. For pairwise operations
5379 we have to be careful to avoid clobbering the source operands. */
5380 if (pairwise && rd == rm) {
5381 neon_store_scratch(pass, tmp);
5382 } else {
5383 neon_store_reg(rd, pass, tmp);
5386 } /* for pass */
5387 if (pairwise && rd == rm) {
5388 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5389 tmp = neon_load_scratch(pass);
5390 neon_store_reg(rd, pass, tmp);
5393 /* End of 3 register same size operations. */
5394 } else if (insn & (1 << 4)) {
5395 if ((insn & 0x00380080) != 0) {
5396 /* Two registers and shift. */
5397 op = (insn >> 8) & 0xf;
5398 if (insn & (1 << 7)) {
5399 /* 64-bit shift. */
5400 if (op > 7) {
5401 return 1;
5403 size = 3;
5404 } else {
5405 size = 2;
5406 while ((insn & (1 << (size + 19))) == 0)
5407 size--;
5409 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5410 if (op < 8) {
5411 /* Shift by immediate:
5412 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5413 if (q && ((rd | rm) & 1)) {
5414 return 1;
5416 if (!u && (op == 4 || op == 6)) {
5417 return 1;
5419 /* Right shifts are encoded as N - shift, where N is the
5420 element size in bits. */
5421 if (op <= 4) {
5422 shift = shift - (1 << (size + 3));
5425 switch (op) {
5426 case 0: /* VSHR */
5427 /* Right shift comes here negative. */
5428 shift = -shift;
5429 /* Shifts larger than the element size are architecturally
5430 * valid. Unsigned results in all zeros; signed results
5431 * in all sign bits.
5433 if (!u) {
5434 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
5435 MIN(shift, (8 << size) - 1),
5436 vec_size, vec_size);
5437 } else if (shift >= 8 << size) {
5438 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5439 } else {
5440 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
5441 vec_size, vec_size);
5443 return 0;
5445 case 1: /* VSRA */
5446 /* Right shift comes here negative. */
5447 shift = -shift;
5448 /* Shifts larger than the element size are architecturally
5449 * valid. Unsigned results in all zeros; signed results
5450 * in all sign bits.
5452 if (!u) {
5453 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5454 MIN(shift, (8 << size) - 1),
5455 &ssra_op[size]);
5456 } else if (shift >= 8 << size) {
5457 /* rd += 0 */
5458 } else {
5459 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5460 shift, &usra_op[size]);
5462 return 0;
5464 case 4: /* VSRI */
5465 if (!u) {
5466 return 1;
5468 /* Right shift comes here negative. */
5469 shift = -shift;
5470 /* Shift out of range leaves destination unchanged. */
5471 if (shift < 8 << size) {
5472 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5473 shift, &sri_op[size]);
5475 return 0;
5477 case 5: /* VSHL, VSLI */
5478 if (u) { /* VSLI */
5479 /* Shift out of range leaves destination unchanged. */
5480 if (shift < 8 << size) {
5481 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
5482 vec_size, shift, &sli_op[size]);
5484 } else { /* VSHL */
5485 /* Shifts larger than the element size are
5486 * architecturally valid and results in zero.
5488 if (shift >= 8 << size) {
5489 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5490 } else {
5491 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
5492 vec_size, vec_size);
5495 return 0;
5498 if (size == 3) {
5499 count = q + 1;
5500 } else {
5501 count = q ? 4: 2;
5504 /* To avoid excessive duplication of ops we implement shift
5505 * by immediate using the variable shift operations.
5507 imm = dup_const(size, shift);
5509 for (pass = 0; pass < count; pass++) {
5510 if (size == 3) {
5511 neon_load_reg64(cpu_V0, rm + pass);
5512 tcg_gen_movi_i64(cpu_V1, imm);
5513 switch (op) {
5514 case 2: /* VRSHR */
5515 case 3: /* VRSRA */
5516 if (u)
5517 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5518 else
5519 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5520 break;
5521 case 6: /* VQSHLU */
5522 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5523 cpu_V0, cpu_V1);
5524 break;
5525 case 7: /* VQSHL */
5526 if (u) {
5527 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5528 cpu_V0, cpu_V1);
5529 } else {
5530 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5531 cpu_V0, cpu_V1);
5533 break;
5534 default:
5535 g_assert_not_reached();
5537 if (op == 3) {
5538 /* Accumulate. */
5539 neon_load_reg64(cpu_V1, rd + pass);
5540 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5542 neon_store_reg64(cpu_V0, rd + pass);
5543 } else { /* size < 3 */
5544 /* Operands in T0 and T1. */
5545 tmp = neon_load_reg(rm, pass);
5546 tmp2 = tcg_temp_new_i32();
5547 tcg_gen_movi_i32(tmp2, imm);
5548 switch (op) {
5549 case 2: /* VRSHR */
5550 case 3: /* VRSRA */
5551 GEN_NEON_INTEGER_OP(rshl);
5552 break;
5553 case 6: /* VQSHLU */
5554 switch (size) {
5555 case 0:
5556 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5557 tmp, tmp2);
5558 break;
5559 case 1:
5560 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5561 tmp, tmp2);
5562 break;
5563 case 2:
5564 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5565 tmp, tmp2);
5566 break;
5567 default:
5568 abort();
5570 break;
5571 case 7: /* VQSHL */
5572 GEN_NEON_INTEGER_OP_ENV(qshl);
5573 break;
5574 default:
5575 g_assert_not_reached();
5577 tcg_temp_free_i32(tmp2);
5579 if (op == 3) {
5580 /* Accumulate. */
5581 tmp2 = neon_load_reg(rd, pass);
5582 gen_neon_add(size, tmp, tmp2);
5583 tcg_temp_free_i32(tmp2);
5585 neon_store_reg(rd, pass, tmp);
5587 } /* for pass */
5588 } else if (op < 10) {
5589 /* Shift by immediate and narrow:
5590 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5591 int input_unsigned = (op == 8) ? !u : u;
5592 if (rm & 1) {
5593 return 1;
5595 shift = shift - (1 << (size + 3));
5596 size++;
5597 if (size == 3) {
5598 tmp64 = tcg_const_i64(shift);
5599 neon_load_reg64(cpu_V0, rm);
5600 neon_load_reg64(cpu_V1, rm + 1);
5601 for (pass = 0; pass < 2; pass++) {
5602 TCGv_i64 in;
5603 if (pass == 0) {
5604 in = cpu_V0;
5605 } else {
5606 in = cpu_V1;
5608 if (q) {
5609 if (input_unsigned) {
5610 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5611 } else {
5612 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5614 } else {
5615 if (input_unsigned) {
5616 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5617 } else {
5618 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5621 tmp = tcg_temp_new_i32();
5622 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5623 neon_store_reg(rd, pass, tmp);
5624 } /* for pass */
5625 tcg_temp_free_i64(tmp64);
5626 } else {
5627 if (size == 1) {
5628 imm = (uint16_t)shift;
5629 imm |= imm << 16;
5630 } else {
5631 /* size == 2 */
5632 imm = (uint32_t)shift;
5634 tmp2 = tcg_const_i32(imm);
5635 tmp4 = neon_load_reg(rm + 1, 0);
5636 tmp5 = neon_load_reg(rm + 1, 1);
5637 for (pass = 0; pass < 2; pass++) {
5638 if (pass == 0) {
5639 tmp = neon_load_reg(rm, 0);
5640 } else {
5641 tmp = tmp4;
5643 gen_neon_shift_narrow(size, tmp, tmp2, q,
5644 input_unsigned);
5645 if (pass == 0) {
5646 tmp3 = neon_load_reg(rm, 1);
5647 } else {
5648 tmp3 = tmp5;
5650 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5651 input_unsigned);
5652 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5653 tcg_temp_free_i32(tmp);
5654 tcg_temp_free_i32(tmp3);
5655 tmp = tcg_temp_new_i32();
5656 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5657 neon_store_reg(rd, pass, tmp);
5658 } /* for pass */
5659 tcg_temp_free_i32(tmp2);
5661 } else if (op == 10) {
5662 /* VSHLL, VMOVL */
5663 if (q || (rd & 1)) {
5664 return 1;
5666 tmp = neon_load_reg(rm, 0);
5667 tmp2 = neon_load_reg(rm, 1);
5668 for (pass = 0; pass < 2; pass++) {
5669 if (pass == 1)
5670 tmp = tmp2;
5672 gen_neon_widen(cpu_V0, tmp, size, u);
5674 if (shift != 0) {
5675 /* The shift is less than the width of the source
5676 type, so we can just shift the whole register. */
5677 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5678 /* Widen the result of shift: we need to clear
5679 * the potential overflow bits resulting from
5680 * left bits of the narrow input appearing as
5681 * right bits of left the neighbour narrow
5682 * input. */
5683 if (size < 2 || !u) {
5684 uint64_t imm64;
5685 if (size == 0) {
5686 imm = (0xffu >> (8 - shift));
5687 imm |= imm << 16;
5688 } else if (size == 1) {
5689 imm = 0xffff >> (16 - shift);
5690 } else {
5691 /* size == 2 */
5692 imm = 0xffffffff >> (32 - shift);
5694 if (size < 2) {
5695 imm64 = imm | (((uint64_t)imm) << 32);
5696 } else {
5697 imm64 = imm;
5699 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5702 neon_store_reg64(cpu_V0, rd + pass);
5704 } else if (op >= 14) {
5705 /* VCVT fixed-point. */
5706 TCGv_ptr fpst;
5707 TCGv_i32 shiftv;
5708 VFPGenFixPointFn *fn;
5710 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5711 return 1;
5714 if (!(op & 1)) {
5715 if (u) {
5716 fn = gen_helper_vfp_ultos;
5717 } else {
5718 fn = gen_helper_vfp_sltos;
5720 } else {
5721 if (u) {
5722 fn = gen_helper_vfp_touls_round_to_zero;
5723 } else {
5724 fn = gen_helper_vfp_tosls_round_to_zero;
5728 /* We have already masked out the must-be-1 top bit of imm6,
5729 * hence this 32-shift where the ARM ARM has 64-imm6.
5731 shift = 32 - shift;
5732 fpst = get_fpstatus_ptr(1);
5733 shiftv = tcg_const_i32(shift);
5734 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5735 TCGv_i32 tmpf = neon_load_reg(rm, pass);
5736 fn(tmpf, tmpf, shiftv, fpst);
5737 neon_store_reg(rd, pass, tmpf);
5739 tcg_temp_free_ptr(fpst);
5740 tcg_temp_free_i32(shiftv);
5741 } else {
5742 return 1;
5744 } else { /* (insn & 0x00380080) == 0 */
5745 int invert, reg_ofs, vec_size;
5747 if (q && (rd & 1)) {
5748 return 1;
5751 op = (insn >> 8) & 0xf;
5752 /* One register and immediate. */
5753 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5754 invert = (insn & (1 << 5)) != 0;
5755 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5756 * We choose to not special-case this and will behave as if a
5757 * valid constant encoding of 0 had been given.
5759 switch (op) {
5760 case 0: case 1:
5761 /* no-op */
5762 break;
5763 case 2: case 3:
5764 imm <<= 8;
5765 break;
5766 case 4: case 5:
5767 imm <<= 16;
5768 break;
5769 case 6: case 7:
5770 imm <<= 24;
5771 break;
5772 case 8: case 9:
5773 imm |= imm << 16;
5774 break;
5775 case 10: case 11:
5776 imm = (imm << 8) | (imm << 24);
5777 break;
5778 case 12:
5779 imm = (imm << 8) | 0xff;
5780 break;
5781 case 13:
5782 imm = (imm << 16) | 0xffff;
5783 break;
5784 case 14:
5785 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5786 if (invert) {
5787 imm = ~imm;
5789 break;
5790 case 15:
5791 if (invert) {
5792 return 1;
5794 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5795 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5796 break;
5798 if (invert) {
5799 imm = ~imm;
5802 reg_ofs = neon_reg_offset(rd, 0);
5803 vec_size = q ? 16 : 8;
5805 if (op & 1 && op < 12) {
5806 if (invert) {
5807 /* The immediate value has already been inverted,
5808 * so BIC becomes AND.
5810 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
5811 vec_size, vec_size);
5812 } else {
5813 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
5814 vec_size, vec_size);
5816 } else {
5817 /* VMOV, VMVN. */
5818 if (op == 14 && invert) {
5819 TCGv_i64 t64 = tcg_temp_new_i64();
5821 for (pass = 0; pass <= q; ++pass) {
5822 uint64_t val = 0;
5823 int n;
5825 for (n = 0; n < 8; n++) {
5826 if (imm & (1 << (n + pass * 8))) {
5827 val |= 0xffull << (n * 8);
5830 tcg_gen_movi_i64(t64, val);
5831 neon_store_reg64(t64, rd + pass);
5833 tcg_temp_free_i64(t64);
5834 } else {
5835 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
5839 } else { /* (insn & 0x00800010 == 0x00800000) */
5840 if (size != 3) {
5841 op = (insn >> 8) & 0xf;
5842 if ((insn & (1 << 6)) == 0) {
5843 /* Three registers of different lengths. */
5844 int src1_wide;
5845 int src2_wide;
5846 int prewiden;
5847 /* undefreq: bit 0 : UNDEF if size == 0
5848 * bit 1 : UNDEF if size == 1
5849 * bit 2 : UNDEF if size == 2
5850 * bit 3 : UNDEF if U == 1
5851 * Note that [2:0] set implies 'always UNDEF'
5853 int undefreq;
5854 /* prewiden, src1_wide, src2_wide, undefreq */
5855 static const int neon_3reg_wide[16][4] = {
5856 {1, 0, 0, 0}, /* VADDL */
5857 {1, 1, 0, 0}, /* VADDW */
5858 {1, 0, 0, 0}, /* VSUBL */
5859 {1, 1, 0, 0}, /* VSUBW */
5860 {0, 1, 1, 0}, /* VADDHN */
5861 {0, 0, 0, 0}, /* VABAL */
5862 {0, 1, 1, 0}, /* VSUBHN */
5863 {0, 0, 0, 0}, /* VABDL */
5864 {0, 0, 0, 0}, /* VMLAL */
5865 {0, 0, 0, 9}, /* VQDMLAL */
5866 {0, 0, 0, 0}, /* VMLSL */
5867 {0, 0, 0, 9}, /* VQDMLSL */
5868 {0, 0, 0, 0}, /* Integer VMULL */
5869 {0, 0, 0, 1}, /* VQDMULL */
5870 {0, 0, 0, 0xa}, /* Polynomial VMULL */
5871 {0, 0, 0, 7}, /* Reserved: always UNDEF */
5874 prewiden = neon_3reg_wide[op][0];
5875 src1_wide = neon_3reg_wide[op][1];
5876 src2_wide = neon_3reg_wide[op][2];
5877 undefreq = neon_3reg_wide[op][3];
5879 if ((undefreq & (1 << size)) ||
5880 ((undefreq & 8) && u)) {
5881 return 1;
5883 if ((src1_wide && (rn & 1)) ||
5884 (src2_wide && (rm & 1)) ||
5885 (!src2_wide && (rd & 1))) {
5886 return 1;
5889 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
5890 * outside the loop below as it only performs a single pass.
5892 if (op == 14 && size == 2) {
5893 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
5895 if (!dc_isar_feature(aa32_pmull, s)) {
5896 return 1;
5898 tcg_rn = tcg_temp_new_i64();
5899 tcg_rm = tcg_temp_new_i64();
5900 tcg_rd = tcg_temp_new_i64();
5901 neon_load_reg64(tcg_rn, rn);
5902 neon_load_reg64(tcg_rm, rm);
5903 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
5904 neon_store_reg64(tcg_rd, rd);
5905 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
5906 neon_store_reg64(tcg_rd, rd + 1);
5907 tcg_temp_free_i64(tcg_rn);
5908 tcg_temp_free_i64(tcg_rm);
5909 tcg_temp_free_i64(tcg_rd);
5910 return 0;
5913 /* Avoid overlapping operands. Wide source operands are
5914 always aligned so will never overlap with wide
5915 destinations in problematic ways. */
5916 if (rd == rm && !src2_wide) {
5917 tmp = neon_load_reg(rm, 1);
5918 neon_store_scratch(2, tmp);
5919 } else if (rd == rn && !src1_wide) {
5920 tmp = neon_load_reg(rn, 1);
5921 neon_store_scratch(2, tmp);
5923 tmp3 = NULL;
5924 for (pass = 0; pass < 2; pass++) {
5925 if (src1_wide) {
5926 neon_load_reg64(cpu_V0, rn + pass);
5927 tmp = NULL;
5928 } else {
5929 if (pass == 1 && rd == rn) {
5930 tmp = neon_load_scratch(2);
5931 } else {
5932 tmp = neon_load_reg(rn, pass);
5934 if (prewiden) {
5935 gen_neon_widen(cpu_V0, tmp, size, u);
5938 if (src2_wide) {
5939 neon_load_reg64(cpu_V1, rm + pass);
5940 tmp2 = NULL;
5941 } else {
5942 if (pass == 1 && rd == rm) {
5943 tmp2 = neon_load_scratch(2);
5944 } else {
5945 tmp2 = neon_load_reg(rm, pass);
5947 if (prewiden) {
5948 gen_neon_widen(cpu_V1, tmp2, size, u);
5951 switch (op) {
5952 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5953 gen_neon_addl(size);
5954 break;
5955 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5956 gen_neon_subl(size);
5957 break;
5958 case 5: case 7: /* VABAL, VABDL */
5959 switch ((size << 1) | u) {
5960 case 0:
5961 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5962 break;
5963 case 1:
5964 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5965 break;
5966 case 2:
5967 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5968 break;
5969 case 3:
5970 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5971 break;
5972 case 4:
5973 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5974 break;
5975 case 5:
5976 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5977 break;
5978 default: abort();
5980 tcg_temp_free_i32(tmp2);
5981 tcg_temp_free_i32(tmp);
5982 break;
5983 case 8: case 9: case 10: case 11: case 12: case 13:
5984 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5985 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5986 break;
5987 case 14: /* Polynomial VMULL */
5988 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5989 tcg_temp_free_i32(tmp2);
5990 tcg_temp_free_i32(tmp);
5991 break;
5992 default: /* 15 is RESERVED: caught earlier */
5993 abort();
5995 if (op == 13) {
5996 /* VQDMULL */
5997 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5998 neon_store_reg64(cpu_V0, rd + pass);
5999 } else if (op == 5 || (op >= 8 && op <= 11)) {
6000 /* Accumulate. */
6001 neon_load_reg64(cpu_V1, rd + pass);
6002 switch (op) {
6003 case 10: /* VMLSL */
6004 gen_neon_negl(cpu_V0, size);
6005 /* Fall through */
6006 case 5: case 8: /* VABAL, VMLAL */
6007 gen_neon_addl(size);
6008 break;
6009 case 9: case 11: /* VQDMLAL, VQDMLSL */
6010 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6011 if (op == 11) {
6012 gen_neon_negl(cpu_V0, size);
6014 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6015 break;
6016 default:
6017 abort();
6019 neon_store_reg64(cpu_V0, rd + pass);
6020 } else if (op == 4 || op == 6) {
6021 /* Narrowing operation. */
6022 tmp = tcg_temp_new_i32();
6023 if (!u) {
6024 switch (size) {
6025 case 0:
6026 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6027 break;
6028 case 1:
6029 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6030 break;
6031 case 2:
6032 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6033 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6034 break;
6035 default: abort();
6037 } else {
6038 switch (size) {
6039 case 0:
6040 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6041 break;
6042 case 1:
6043 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6044 break;
6045 case 2:
6046 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6047 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6048 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6049 break;
6050 default: abort();
6053 if (pass == 0) {
6054 tmp3 = tmp;
6055 } else {
6056 neon_store_reg(rd, 0, tmp3);
6057 neon_store_reg(rd, 1, tmp);
6059 } else {
6060 /* Write back the result. */
6061 neon_store_reg64(cpu_V0, rd + pass);
6064 } else {
6065 /* Two registers and a scalar. NB that for ops of this form
6066 * the ARM ARM labels bit 24 as Q, but it is in our variable
6067 * 'u', not 'q'.
6069 if (size == 0) {
6070 return 1;
6072 switch (op) {
6073 case 1: /* Float VMLA scalar */
6074 case 5: /* Floating point VMLS scalar */
6075 case 9: /* Floating point VMUL scalar */
6076 if (size == 1) {
6077 return 1;
6079 /* fall through */
6080 case 0: /* Integer VMLA scalar */
6081 case 4: /* Integer VMLS scalar */
6082 case 8: /* Integer VMUL scalar */
6083 case 12: /* VQDMULH scalar */
6084 case 13: /* VQRDMULH scalar */
6085 if (u && ((rd | rn) & 1)) {
6086 return 1;
6088 tmp = neon_get_scalar(size, rm);
6089 neon_store_scratch(0, tmp);
6090 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6091 tmp = neon_load_scratch(0);
6092 tmp2 = neon_load_reg(rn, pass);
6093 if (op == 12) {
6094 if (size == 1) {
6095 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6096 } else {
6097 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6099 } else if (op == 13) {
6100 if (size == 1) {
6101 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6102 } else {
6103 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6105 } else if (op & 1) {
6106 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6107 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6108 tcg_temp_free_ptr(fpstatus);
6109 } else {
6110 switch (size) {
6111 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6112 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6113 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6114 default: abort();
6117 tcg_temp_free_i32(tmp2);
6118 if (op < 8) {
6119 /* Accumulate. */
6120 tmp2 = neon_load_reg(rd, pass);
6121 switch (op) {
6122 case 0:
6123 gen_neon_add(size, tmp, tmp2);
6124 break;
6125 case 1:
6127 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6128 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6129 tcg_temp_free_ptr(fpstatus);
6130 break;
6132 case 4:
6133 gen_neon_rsb(size, tmp, tmp2);
6134 break;
6135 case 5:
6137 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6138 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6139 tcg_temp_free_ptr(fpstatus);
6140 break;
6142 default:
6143 abort();
6145 tcg_temp_free_i32(tmp2);
6147 neon_store_reg(rd, pass, tmp);
6149 break;
6150 case 3: /* VQDMLAL scalar */
6151 case 7: /* VQDMLSL scalar */
6152 case 11: /* VQDMULL scalar */
6153 if (u == 1) {
6154 return 1;
6156 /* fall through */
6157 case 2: /* VMLAL sclar */
6158 case 6: /* VMLSL scalar */
6159 case 10: /* VMULL scalar */
6160 if (rd & 1) {
6161 return 1;
6163 tmp2 = neon_get_scalar(size, rm);
6164 /* We need a copy of tmp2 because gen_neon_mull
6165 * deletes it during pass 0. */
6166 tmp4 = tcg_temp_new_i32();
6167 tcg_gen_mov_i32(tmp4, tmp2);
6168 tmp3 = neon_load_reg(rn, 1);
6170 for (pass = 0; pass < 2; pass++) {
6171 if (pass == 0) {
6172 tmp = neon_load_reg(rn, 0);
6173 } else {
6174 tmp = tmp3;
6175 tmp2 = tmp4;
6177 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6178 if (op != 11) {
6179 neon_load_reg64(cpu_V1, rd + pass);
6181 switch (op) {
6182 case 6:
6183 gen_neon_negl(cpu_V0, size);
6184 /* Fall through */
6185 case 2:
6186 gen_neon_addl(size);
6187 break;
6188 case 3: case 7:
6189 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6190 if (op == 7) {
6191 gen_neon_negl(cpu_V0, size);
6193 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6194 break;
6195 case 10:
6196 /* no-op */
6197 break;
6198 case 11:
6199 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6200 break;
6201 default:
6202 abort();
6204 neon_store_reg64(cpu_V0, rd + pass);
6206 break;
6207 case 14: /* VQRDMLAH scalar */
6208 case 15: /* VQRDMLSH scalar */
6210 NeonGenThreeOpEnvFn *fn;
6212 if (!dc_isar_feature(aa32_rdm, s)) {
6213 return 1;
6215 if (u && ((rd | rn) & 1)) {
6216 return 1;
6218 if (op == 14) {
6219 if (size == 1) {
6220 fn = gen_helper_neon_qrdmlah_s16;
6221 } else {
6222 fn = gen_helper_neon_qrdmlah_s32;
6224 } else {
6225 if (size == 1) {
6226 fn = gen_helper_neon_qrdmlsh_s16;
6227 } else {
6228 fn = gen_helper_neon_qrdmlsh_s32;
6232 tmp2 = neon_get_scalar(size, rm);
6233 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6234 tmp = neon_load_reg(rn, pass);
6235 tmp3 = neon_load_reg(rd, pass);
6236 fn(tmp, cpu_env, tmp, tmp2, tmp3);
6237 tcg_temp_free_i32(tmp3);
6238 neon_store_reg(rd, pass, tmp);
6240 tcg_temp_free_i32(tmp2);
6242 break;
6243 default:
6244 g_assert_not_reached();
6247 } else { /* size == 3 */
6248 if (!u) {
6249 /* Extract. */
6250 imm = (insn >> 8) & 0xf;
6252 if (imm > 7 && !q)
6253 return 1;
6255 if (q && ((rd | rn | rm) & 1)) {
6256 return 1;
6259 if (imm == 0) {
6260 neon_load_reg64(cpu_V0, rn);
6261 if (q) {
6262 neon_load_reg64(cpu_V1, rn + 1);
6264 } else if (imm == 8) {
6265 neon_load_reg64(cpu_V0, rn + 1);
6266 if (q) {
6267 neon_load_reg64(cpu_V1, rm);
6269 } else if (q) {
6270 tmp64 = tcg_temp_new_i64();
6271 if (imm < 8) {
6272 neon_load_reg64(cpu_V0, rn);
6273 neon_load_reg64(tmp64, rn + 1);
6274 } else {
6275 neon_load_reg64(cpu_V0, rn + 1);
6276 neon_load_reg64(tmp64, rm);
6278 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6279 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6280 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6281 if (imm < 8) {
6282 neon_load_reg64(cpu_V1, rm);
6283 } else {
6284 neon_load_reg64(cpu_V1, rm + 1);
6285 imm -= 8;
6287 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6288 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6289 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6290 tcg_temp_free_i64(tmp64);
6291 } else {
6292 /* BUGFIX */
6293 neon_load_reg64(cpu_V0, rn);
6294 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6295 neon_load_reg64(cpu_V1, rm);
6296 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6297 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6299 neon_store_reg64(cpu_V0, rd);
6300 if (q) {
6301 neon_store_reg64(cpu_V1, rd + 1);
6303 } else if ((insn & (1 << 11)) == 0) {
6304 /* Two register misc. */
6305 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6306 size = (insn >> 18) & 3;
6307 /* UNDEF for unknown op values and bad op-size combinations */
6308 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6309 return 1;
6311 if (neon_2rm_is_v8_op(op) &&
6312 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6313 return 1;
6315 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6316 q && ((rm | rd) & 1)) {
6317 return 1;
6319 switch (op) {
6320 case NEON_2RM_VREV64:
6321 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6322 tmp = neon_load_reg(rm, pass * 2);
6323 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6324 switch (size) {
6325 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6326 case 1: gen_swap_half(tmp); break;
6327 case 2: /* no-op */ break;
6328 default: abort();
6330 neon_store_reg(rd, pass * 2 + 1, tmp);
6331 if (size == 2) {
6332 neon_store_reg(rd, pass * 2, tmp2);
6333 } else {
6334 switch (size) {
6335 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6336 case 1: gen_swap_half(tmp2); break;
6337 default: abort();
6339 neon_store_reg(rd, pass * 2, tmp2);
6342 break;
6343 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6344 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6345 for (pass = 0; pass < q + 1; pass++) {
6346 tmp = neon_load_reg(rm, pass * 2);
6347 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6348 tmp = neon_load_reg(rm, pass * 2 + 1);
6349 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6350 switch (size) {
6351 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6352 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6353 case 2: tcg_gen_add_i64(CPU_V001); break;
6354 default: abort();
6356 if (op >= NEON_2RM_VPADAL) {
6357 /* Accumulate. */
6358 neon_load_reg64(cpu_V1, rd + pass);
6359 gen_neon_addl(size);
6361 neon_store_reg64(cpu_V0, rd + pass);
6363 break;
6364 case NEON_2RM_VTRN:
6365 if (size == 2) {
6366 int n;
6367 for (n = 0; n < (q ? 4 : 2); n += 2) {
6368 tmp = neon_load_reg(rm, n);
6369 tmp2 = neon_load_reg(rd, n + 1);
6370 neon_store_reg(rm, n, tmp2);
6371 neon_store_reg(rd, n + 1, tmp);
6373 } else {
6374 goto elementwise;
6376 break;
6377 case NEON_2RM_VUZP:
6378 if (gen_neon_unzip(rd, rm, size, q)) {
6379 return 1;
6381 break;
6382 case NEON_2RM_VZIP:
6383 if (gen_neon_zip(rd, rm, size, q)) {
6384 return 1;
6386 break;
6387 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6388 /* also VQMOVUN; op field and mnemonics don't line up */
6389 if (rm & 1) {
6390 return 1;
6392 tmp2 = NULL;
6393 for (pass = 0; pass < 2; pass++) {
6394 neon_load_reg64(cpu_V0, rm + pass);
6395 tmp = tcg_temp_new_i32();
6396 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6397 tmp, cpu_V0);
6398 if (pass == 0) {
6399 tmp2 = tmp;
6400 } else {
6401 neon_store_reg(rd, 0, tmp2);
6402 neon_store_reg(rd, 1, tmp);
6405 break;
6406 case NEON_2RM_VSHLL:
6407 if (q || (rd & 1)) {
6408 return 1;
6410 tmp = neon_load_reg(rm, 0);
6411 tmp2 = neon_load_reg(rm, 1);
6412 for (pass = 0; pass < 2; pass++) {
6413 if (pass == 1)
6414 tmp = tmp2;
6415 gen_neon_widen(cpu_V0, tmp, size, 1);
6416 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6417 neon_store_reg64(cpu_V0, rd + pass);
6419 break;
6420 case NEON_2RM_VCVT_F16_F32:
6422 TCGv_ptr fpst;
6423 TCGv_i32 ahp;
6425 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
6426 q || (rm & 1)) {
6427 return 1;
6429 fpst = get_fpstatus_ptr(true);
6430 ahp = get_ahp_flag();
6431 tmp = neon_load_reg(rm, 0);
6432 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6433 tmp2 = neon_load_reg(rm, 1);
6434 gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
6435 tcg_gen_shli_i32(tmp2, tmp2, 16);
6436 tcg_gen_or_i32(tmp2, tmp2, tmp);
6437 tcg_temp_free_i32(tmp);
6438 tmp = neon_load_reg(rm, 2);
6439 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6440 tmp3 = neon_load_reg(rm, 3);
6441 neon_store_reg(rd, 0, tmp2);
6442 gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
6443 tcg_gen_shli_i32(tmp3, tmp3, 16);
6444 tcg_gen_or_i32(tmp3, tmp3, tmp);
6445 neon_store_reg(rd, 1, tmp3);
6446 tcg_temp_free_i32(tmp);
6447 tcg_temp_free_i32(ahp);
6448 tcg_temp_free_ptr(fpst);
6449 break;
6451 case NEON_2RM_VCVT_F32_F16:
6453 TCGv_ptr fpst;
6454 TCGv_i32 ahp;
6455 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
6456 q || (rd & 1)) {
6457 return 1;
6459 fpst = get_fpstatus_ptr(true);
6460 ahp = get_ahp_flag();
6461 tmp3 = tcg_temp_new_i32();
6462 tmp = neon_load_reg(rm, 0);
6463 tmp2 = neon_load_reg(rm, 1);
6464 tcg_gen_ext16u_i32(tmp3, tmp);
6465 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6466 neon_store_reg(rd, 0, tmp3);
6467 tcg_gen_shri_i32(tmp, tmp, 16);
6468 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
6469 neon_store_reg(rd, 1, tmp);
6470 tmp3 = tcg_temp_new_i32();
6471 tcg_gen_ext16u_i32(tmp3, tmp2);
6472 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6473 neon_store_reg(rd, 2, tmp3);
6474 tcg_gen_shri_i32(tmp2, tmp2, 16);
6475 gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
6476 neon_store_reg(rd, 3, tmp2);
6477 tcg_temp_free_i32(ahp);
6478 tcg_temp_free_ptr(fpst);
6479 break;
6481 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6482 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
6483 return 1;
6485 ptr1 = vfp_reg_ptr(true, rd);
6486 ptr2 = vfp_reg_ptr(true, rm);
6488 /* Bit 6 is the lowest opcode bit; it distinguishes between
6489 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6491 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6493 if (op == NEON_2RM_AESE) {
6494 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
6495 } else {
6496 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
6498 tcg_temp_free_ptr(ptr1);
6499 tcg_temp_free_ptr(ptr2);
6500 tcg_temp_free_i32(tmp3);
6501 break;
6502 case NEON_2RM_SHA1H:
6503 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
6504 return 1;
6506 ptr1 = vfp_reg_ptr(true, rd);
6507 ptr2 = vfp_reg_ptr(true, rm);
6509 gen_helper_crypto_sha1h(ptr1, ptr2);
6511 tcg_temp_free_ptr(ptr1);
6512 tcg_temp_free_ptr(ptr2);
6513 break;
6514 case NEON_2RM_SHA1SU1:
6515 if ((rm | rd) & 1) {
6516 return 1;
6518 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6519 if (q) {
6520 if (!dc_isar_feature(aa32_sha2, s)) {
6521 return 1;
6523 } else if (!dc_isar_feature(aa32_sha1, s)) {
6524 return 1;
6526 ptr1 = vfp_reg_ptr(true, rd);
6527 ptr2 = vfp_reg_ptr(true, rm);
6528 if (q) {
6529 gen_helper_crypto_sha256su0(ptr1, ptr2);
6530 } else {
6531 gen_helper_crypto_sha1su1(ptr1, ptr2);
6533 tcg_temp_free_ptr(ptr1);
6534 tcg_temp_free_ptr(ptr2);
6535 break;
6537 case NEON_2RM_VMVN:
6538 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
6539 break;
6540 case NEON_2RM_VNEG:
6541 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
6542 break;
6543 case NEON_2RM_VABS:
6544 tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
6545 break;
6547 default:
6548 elementwise:
6549 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6550 tmp = neon_load_reg(rm, pass);
6551 switch (op) {
6552 case NEON_2RM_VREV32:
6553 switch (size) {
6554 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6555 case 1: gen_swap_half(tmp); break;
6556 default: abort();
6558 break;
6559 case NEON_2RM_VREV16:
6560 gen_rev16(tmp);
6561 break;
6562 case NEON_2RM_VCLS:
6563 switch (size) {
6564 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6565 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6566 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6567 default: abort();
6569 break;
6570 case NEON_2RM_VCLZ:
6571 switch (size) {
6572 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6573 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6574 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
6575 default: abort();
6577 break;
6578 case NEON_2RM_VCNT:
6579 gen_helper_neon_cnt_u8(tmp, tmp);
6580 break;
6581 case NEON_2RM_VQABS:
6582 switch (size) {
6583 case 0:
6584 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6585 break;
6586 case 1:
6587 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6588 break;
6589 case 2:
6590 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6591 break;
6592 default: abort();
6594 break;
6595 case NEON_2RM_VQNEG:
6596 switch (size) {
6597 case 0:
6598 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6599 break;
6600 case 1:
6601 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6602 break;
6603 case 2:
6604 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6605 break;
6606 default: abort();
6608 break;
6609 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6610 tmp2 = tcg_const_i32(0);
6611 switch(size) {
6612 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6613 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6614 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6615 default: abort();
6617 tcg_temp_free_i32(tmp2);
6618 if (op == NEON_2RM_VCLE0) {
6619 tcg_gen_not_i32(tmp, tmp);
6621 break;
6622 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6623 tmp2 = tcg_const_i32(0);
6624 switch(size) {
6625 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6626 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6627 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6628 default: abort();
6630 tcg_temp_free_i32(tmp2);
6631 if (op == NEON_2RM_VCLT0) {
6632 tcg_gen_not_i32(tmp, tmp);
6634 break;
6635 case NEON_2RM_VCEQ0:
6636 tmp2 = tcg_const_i32(0);
6637 switch(size) {
6638 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6639 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6640 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6641 default: abort();
6643 tcg_temp_free_i32(tmp2);
6644 break;
6645 case NEON_2RM_VCGT0_F:
6647 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6648 tmp2 = tcg_const_i32(0);
6649 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6650 tcg_temp_free_i32(tmp2);
6651 tcg_temp_free_ptr(fpstatus);
6652 break;
6654 case NEON_2RM_VCGE0_F:
6656 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6657 tmp2 = tcg_const_i32(0);
6658 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6659 tcg_temp_free_i32(tmp2);
6660 tcg_temp_free_ptr(fpstatus);
6661 break;
6663 case NEON_2RM_VCEQ0_F:
6665 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6666 tmp2 = tcg_const_i32(0);
6667 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6668 tcg_temp_free_i32(tmp2);
6669 tcg_temp_free_ptr(fpstatus);
6670 break;
6672 case NEON_2RM_VCLE0_F:
6674 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6675 tmp2 = tcg_const_i32(0);
6676 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6677 tcg_temp_free_i32(tmp2);
6678 tcg_temp_free_ptr(fpstatus);
6679 break;
6681 case NEON_2RM_VCLT0_F:
6683 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6684 tmp2 = tcg_const_i32(0);
6685 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6686 tcg_temp_free_i32(tmp2);
6687 tcg_temp_free_ptr(fpstatus);
6688 break;
6690 case NEON_2RM_VABS_F:
6691 gen_helper_vfp_abss(tmp, tmp);
6692 break;
6693 case NEON_2RM_VNEG_F:
6694 gen_helper_vfp_negs(tmp, tmp);
6695 break;
6696 case NEON_2RM_VSWP:
6697 tmp2 = neon_load_reg(rd, pass);
6698 neon_store_reg(rm, pass, tmp2);
6699 break;
6700 case NEON_2RM_VTRN:
6701 tmp2 = neon_load_reg(rd, pass);
6702 switch (size) {
6703 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6704 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6705 default: abort();
6707 neon_store_reg(rm, pass, tmp2);
6708 break;
6709 case NEON_2RM_VRINTN:
6710 case NEON_2RM_VRINTA:
6711 case NEON_2RM_VRINTM:
6712 case NEON_2RM_VRINTP:
6713 case NEON_2RM_VRINTZ:
6715 TCGv_i32 tcg_rmode;
6716 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6717 int rmode;
6719 if (op == NEON_2RM_VRINTZ) {
6720 rmode = FPROUNDING_ZERO;
6721 } else {
6722 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6725 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6726 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6727 cpu_env);
6728 gen_helper_rints(tmp, tmp, fpstatus);
6729 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6730 cpu_env);
6731 tcg_temp_free_ptr(fpstatus);
6732 tcg_temp_free_i32(tcg_rmode);
6733 break;
6735 case NEON_2RM_VRINTX:
6737 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6738 gen_helper_rints_exact(tmp, tmp, fpstatus);
6739 tcg_temp_free_ptr(fpstatus);
6740 break;
6742 case NEON_2RM_VCVTAU:
6743 case NEON_2RM_VCVTAS:
6744 case NEON_2RM_VCVTNU:
6745 case NEON_2RM_VCVTNS:
6746 case NEON_2RM_VCVTPU:
6747 case NEON_2RM_VCVTPS:
6748 case NEON_2RM_VCVTMU:
6749 case NEON_2RM_VCVTMS:
6751 bool is_signed = !extract32(insn, 7, 1);
6752 TCGv_ptr fpst = get_fpstatus_ptr(1);
6753 TCGv_i32 tcg_rmode, tcg_shift;
6754 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6756 tcg_shift = tcg_const_i32(0);
6757 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6758 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6759 cpu_env);
6761 if (is_signed) {
6762 gen_helper_vfp_tosls(tmp, tmp,
6763 tcg_shift, fpst);
6764 } else {
6765 gen_helper_vfp_touls(tmp, tmp,
6766 tcg_shift, fpst);
6769 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6770 cpu_env);
6771 tcg_temp_free_i32(tcg_rmode);
6772 tcg_temp_free_i32(tcg_shift);
6773 tcg_temp_free_ptr(fpst);
6774 break;
6776 case NEON_2RM_VRECPE:
6778 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6779 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6780 tcg_temp_free_ptr(fpstatus);
6781 break;
6783 case NEON_2RM_VRSQRTE:
6785 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6786 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6787 tcg_temp_free_ptr(fpstatus);
6788 break;
6790 case NEON_2RM_VRECPE_F:
6792 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6793 gen_helper_recpe_f32(tmp, tmp, fpstatus);
6794 tcg_temp_free_ptr(fpstatus);
6795 break;
6797 case NEON_2RM_VRSQRTE_F:
6799 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6800 gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
6801 tcg_temp_free_ptr(fpstatus);
6802 break;
6804 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6806 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6807 gen_helper_vfp_sitos(tmp, tmp, fpstatus);
6808 tcg_temp_free_ptr(fpstatus);
6809 break;
6811 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6813 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6814 gen_helper_vfp_uitos(tmp, tmp, fpstatus);
6815 tcg_temp_free_ptr(fpstatus);
6816 break;
6818 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6820 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6821 gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
6822 tcg_temp_free_ptr(fpstatus);
6823 break;
6825 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6827 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6828 gen_helper_vfp_touizs(tmp, tmp, fpstatus);
6829 tcg_temp_free_ptr(fpstatus);
6830 break;
6832 default:
6833 /* Reserved op values were caught by the
6834 * neon_2rm_sizes[] check earlier.
6836 abort();
6838 neon_store_reg(rd, pass, tmp);
6840 break;
6842 } else if ((insn & (1 << 10)) == 0) {
6843 /* VTBL, VTBX. */
6844 int n = ((insn >> 8) & 3) + 1;
6845 if ((rn + n) > 32) {
6846 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6847 * helper function running off the end of the register file.
6849 return 1;
6851 n <<= 3;
6852 if (insn & (1 << 6)) {
6853 tmp = neon_load_reg(rd, 0);
6854 } else {
6855 tmp = tcg_temp_new_i32();
6856 tcg_gen_movi_i32(tmp, 0);
6858 tmp2 = neon_load_reg(rm, 0);
6859 ptr1 = vfp_reg_ptr(true, rn);
6860 tmp5 = tcg_const_i32(n);
6861 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
6862 tcg_temp_free_i32(tmp);
6863 if (insn & (1 << 6)) {
6864 tmp = neon_load_reg(rd, 1);
6865 } else {
6866 tmp = tcg_temp_new_i32();
6867 tcg_gen_movi_i32(tmp, 0);
6869 tmp3 = neon_load_reg(rm, 1);
6870 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
6871 tcg_temp_free_i32(tmp5);
6872 tcg_temp_free_ptr(ptr1);
6873 neon_store_reg(rd, 0, tmp2);
6874 neon_store_reg(rd, 1, tmp3);
6875 tcg_temp_free_i32(tmp);
6876 } else if ((insn & 0x380) == 0) {
6877 /* VDUP */
6878 int element;
6879 TCGMemOp size;
6881 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6882 return 1;
6884 if (insn & (1 << 16)) {
6885 size = MO_8;
6886 element = (insn >> 17) & 7;
6887 } else if (insn & (1 << 17)) {
6888 size = MO_16;
6889 element = (insn >> 18) & 3;
6890 } else {
6891 size = MO_32;
6892 element = (insn >> 19) & 1;
6894 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
6895 neon_element_offset(rm, element, size),
6896 q ? 16 : 8, q ? 16 : 8);
6897 } else {
6898 return 1;
6902 return 0;
6905 /* Advanced SIMD three registers of the same length extension.
6906 * 31 25 23 22 20 16 12 11 10 9 8 3 0
6907 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6908 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
6909 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
6911 static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
6913 gen_helper_gvec_3 *fn_gvec = NULL;
6914 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
6915 int rd, rn, rm, opr_sz;
6916 int data = 0;
6917 int off_rn, off_rm;
6918 bool is_long = false, q = extract32(insn, 6, 1);
6919 bool ptr_is_env = false;
6921 if ((insn & 0xfe200f10) == 0xfc200800) {
6922 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
6923 int size = extract32(insn, 20, 1);
6924 data = extract32(insn, 23, 2); /* rot */
6925 if (!dc_isar_feature(aa32_vcma, s)
6926 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
6927 return 1;
6929 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
6930 } else if ((insn & 0xfea00f10) == 0xfc800800) {
6931 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
6932 int size = extract32(insn, 20, 1);
6933 data = extract32(insn, 24, 1); /* rot */
6934 if (!dc_isar_feature(aa32_vcma, s)
6935 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
6936 return 1;
6938 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
6939 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
6940 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
6941 bool u = extract32(insn, 4, 1);
6942 if (!dc_isar_feature(aa32_dp, s)) {
6943 return 1;
6945 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
6946 } else if ((insn & 0xff300f10) == 0xfc200810) {
6947 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
6948 int is_s = extract32(insn, 23, 1);
6949 if (!dc_isar_feature(aa32_fhm, s)) {
6950 return 1;
6952 is_long = true;
6953 data = is_s; /* is_2 == 0 */
6954 fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
6955 ptr_is_env = true;
6956 } else {
6957 return 1;
6960 VFP_DREG_D(rd, insn);
6961 if (rd & q) {
6962 return 1;
6964 if (q || !is_long) {
6965 VFP_DREG_N(rn, insn);
6966 VFP_DREG_M(rm, insn);
6967 if ((rn | rm) & q & !is_long) {
6968 return 1;
6970 off_rn = vfp_reg_offset(1, rn);
6971 off_rm = vfp_reg_offset(1, rm);
6972 } else {
6973 rn = VFP_SREG_N(insn);
6974 rm = VFP_SREG_M(insn);
6975 off_rn = vfp_reg_offset(0, rn);
6976 off_rm = vfp_reg_offset(0, rm);
6979 if (s->fp_excp_el) {
6980 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
6981 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
6982 return 0;
6984 if (!s->vfp_enabled) {
6985 return 1;
6988 opr_sz = (1 + q) * 8;
6989 if (fn_gvec_ptr) {
6990 TCGv_ptr ptr;
6991 if (ptr_is_env) {
6992 ptr = cpu_env;
6993 } else {
6994 ptr = get_fpstatus_ptr(1);
6996 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
6997 opr_sz, opr_sz, data, fn_gvec_ptr);
6998 if (!ptr_is_env) {
6999 tcg_temp_free_ptr(ptr);
7001 } else {
7002 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
7003 opr_sz, opr_sz, data, fn_gvec);
7005 return 0;
7008 /* Advanced SIMD two registers and a scalar extension.
7009 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7010 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7011 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7012 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7016 static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7018 gen_helper_gvec_3 *fn_gvec = NULL;
7019 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
7020 int rd, rn, rm, opr_sz, data;
7021 int off_rn, off_rm;
7022 bool is_long = false, q = extract32(insn, 6, 1);
7023 bool ptr_is_env = false;
7025 if ((insn & 0xff000f10) == 0xfe000800) {
7026 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
7027 int rot = extract32(insn, 20, 2);
7028 int size = extract32(insn, 23, 1);
7029 int index;
7031 if (!dc_isar_feature(aa32_vcma, s)) {
7032 return 1;
7034 if (size == 0) {
7035 if (!dc_isar_feature(aa32_fp16_arith, s)) {
7036 return 1;
7038 /* For fp16, rm is just Vm, and index is M. */
7039 rm = extract32(insn, 0, 4);
7040 index = extract32(insn, 5, 1);
7041 } else {
7042 /* For fp32, rm is the usual M:Vm, and index is 0. */
7043 VFP_DREG_M(rm, insn);
7044 index = 0;
7046 data = (index << 2) | rot;
7047 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7048 : gen_helper_gvec_fcmlah_idx);
7049 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7050 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7051 int u = extract32(insn, 4, 1);
7053 if (!dc_isar_feature(aa32_dp, s)) {
7054 return 1;
7056 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7057 /* rm is just Vm, and index is M. */
7058 data = extract32(insn, 5, 1); /* index */
7059 rm = extract32(insn, 0, 4);
7060 } else if ((insn & 0xffa00f10) == 0xfe000810) {
7061 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
7062 int is_s = extract32(insn, 20, 1);
7063 int vm20 = extract32(insn, 0, 3);
7064 int vm3 = extract32(insn, 3, 1);
7065 int m = extract32(insn, 5, 1);
7066 int index;
7068 if (!dc_isar_feature(aa32_fhm, s)) {
7069 return 1;
7071 if (q) {
7072 rm = vm20;
7073 index = m * 2 + vm3;
7074 } else {
7075 rm = vm20 * 2 + m;
7076 index = vm3;
7078 is_long = true;
7079 data = (index << 2) | is_s; /* is_2 == 0 */
7080 fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
7081 ptr_is_env = true;
7082 } else {
7083 return 1;
7086 VFP_DREG_D(rd, insn);
7087 if (rd & q) {
7088 return 1;
7090 if (q || !is_long) {
7091 VFP_DREG_N(rn, insn);
7092 if (rn & q & !is_long) {
7093 return 1;
7095 off_rn = vfp_reg_offset(1, rn);
7096 off_rm = vfp_reg_offset(1, rm);
7097 } else {
7098 rn = VFP_SREG_N(insn);
7099 off_rn = vfp_reg_offset(0, rn);
7100 off_rm = vfp_reg_offset(0, rm);
7102 if (s->fp_excp_el) {
7103 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
7104 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
7105 return 0;
7107 if (!s->vfp_enabled) {
7108 return 1;
7111 opr_sz = (1 + q) * 8;
7112 if (fn_gvec_ptr) {
7113 TCGv_ptr ptr;
7114 if (ptr_is_env) {
7115 ptr = cpu_env;
7116 } else {
7117 ptr = get_fpstatus_ptr(1);
7119 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
7120 opr_sz, opr_sz, data, fn_gvec_ptr);
7121 if (!ptr_is_env) {
7122 tcg_temp_free_ptr(ptr);
7124 } else {
7125 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
7126 opr_sz, opr_sz, data, fn_gvec);
7128 return 0;
7131 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7133 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7134 const ARMCPRegInfo *ri;
7136 cpnum = (insn >> 8) & 0xf;
7138 /* First check for coprocessor space used for XScale/iwMMXt insns */
7139 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7140 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7141 return 1;
7143 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7144 return disas_iwmmxt_insn(s, insn);
7145 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7146 return disas_dsp_insn(s, insn);
7148 return 1;
7151 /* Otherwise treat as a generic register access */
7152 is64 = (insn & (1 << 25)) == 0;
7153 if (!is64 && ((insn & (1 << 4)) == 0)) {
7154 /* cdp */
7155 return 1;
7158 crm = insn & 0xf;
7159 if (is64) {
7160 crn = 0;
7161 opc1 = (insn >> 4) & 0xf;
7162 opc2 = 0;
7163 rt2 = (insn >> 16) & 0xf;
7164 } else {
7165 crn = (insn >> 16) & 0xf;
7166 opc1 = (insn >> 21) & 7;
7167 opc2 = (insn >> 5) & 7;
7168 rt2 = 0;
7170 isread = (insn >> 20) & 1;
7171 rt = (insn >> 12) & 0xf;
7173 ri = get_arm_cp_reginfo(s->cp_regs,
7174 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7175 if (ri) {
7176 /* Check access permissions */
7177 if (!cp_access_ok(s->current_el, ri, isread)) {
7178 return 1;
7181 if (ri->accessfn ||
7182 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7183 /* Emit code to perform further access permissions checks at
7184 * runtime; this may result in an exception.
7185 * Note that on XScale all cp0..c13 registers do an access check
7186 * call in order to handle c15_cpar.
7188 TCGv_ptr tmpptr;
7189 TCGv_i32 tcg_syn, tcg_isread;
7190 uint32_t syndrome;
7192 /* Note that since we are an implementation which takes an
7193 * exception on a trapped conditional instruction only if the
7194 * instruction passes its condition code check, we can take
7195 * advantage of the clause in the ARM ARM that allows us to set
7196 * the COND field in the instruction to 0xE in all cases.
7197 * We could fish the actual condition out of the insn (ARM)
7198 * or the condexec bits (Thumb) but it isn't necessary.
7200 switch (cpnum) {
7201 case 14:
7202 if (is64) {
7203 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7204 isread, false);
7205 } else {
7206 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7207 rt, isread, false);
7209 break;
7210 case 15:
7211 if (is64) {
7212 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7213 isread, false);
7214 } else {
7215 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7216 rt, isread, false);
7218 break;
7219 default:
7220 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7221 * so this can only happen if this is an ARMv7 or earlier CPU,
7222 * in which case the syndrome information won't actually be
7223 * guest visible.
7225 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7226 syndrome = syn_uncategorized();
7227 break;
7230 gen_set_condexec(s);
7231 gen_set_pc_im(s, s->pc_curr);
7232 tmpptr = tcg_const_ptr(ri);
7233 tcg_syn = tcg_const_i32(syndrome);
7234 tcg_isread = tcg_const_i32(isread);
7235 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7236 tcg_isread);
7237 tcg_temp_free_ptr(tmpptr);
7238 tcg_temp_free_i32(tcg_syn);
7239 tcg_temp_free_i32(tcg_isread);
7242 /* Handle special cases first */
7243 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7244 case ARM_CP_NOP:
7245 return 0;
7246 case ARM_CP_WFI:
7247 if (isread) {
7248 return 1;
7250 gen_set_pc_im(s, s->base.pc_next);
7251 s->base.is_jmp = DISAS_WFI;
7252 return 0;
7253 default:
7254 break;
7257 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7258 gen_io_start();
7261 if (isread) {
7262 /* Read */
7263 if (is64) {
7264 TCGv_i64 tmp64;
7265 TCGv_i32 tmp;
7266 if (ri->type & ARM_CP_CONST) {
7267 tmp64 = tcg_const_i64(ri->resetvalue);
7268 } else if (ri->readfn) {
7269 TCGv_ptr tmpptr;
7270 tmp64 = tcg_temp_new_i64();
7271 tmpptr = tcg_const_ptr(ri);
7272 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7273 tcg_temp_free_ptr(tmpptr);
7274 } else {
7275 tmp64 = tcg_temp_new_i64();
7276 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7278 tmp = tcg_temp_new_i32();
7279 tcg_gen_extrl_i64_i32(tmp, tmp64);
7280 store_reg(s, rt, tmp);
7281 tcg_gen_shri_i64(tmp64, tmp64, 32);
7282 tmp = tcg_temp_new_i32();
7283 tcg_gen_extrl_i64_i32(tmp, tmp64);
7284 tcg_temp_free_i64(tmp64);
7285 store_reg(s, rt2, tmp);
7286 } else {
7287 TCGv_i32 tmp;
7288 if (ri->type & ARM_CP_CONST) {
7289 tmp = tcg_const_i32(ri->resetvalue);
7290 } else if (ri->readfn) {
7291 TCGv_ptr tmpptr;
7292 tmp = tcg_temp_new_i32();
7293 tmpptr = tcg_const_ptr(ri);
7294 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7295 tcg_temp_free_ptr(tmpptr);
7296 } else {
7297 tmp = load_cpu_offset(ri->fieldoffset);
7299 if (rt == 15) {
7300 /* Destination register of r15 for 32 bit loads sets
7301 * the condition codes from the high 4 bits of the value
7303 gen_set_nzcv(tmp);
7304 tcg_temp_free_i32(tmp);
7305 } else {
7306 store_reg(s, rt, tmp);
7309 } else {
7310 /* Write */
7311 if (ri->type & ARM_CP_CONST) {
7312 /* If not forbidden by access permissions, treat as WI */
7313 return 0;
7316 if (is64) {
7317 TCGv_i32 tmplo, tmphi;
7318 TCGv_i64 tmp64 = tcg_temp_new_i64();
7319 tmplo = load_reg(s, rt);
7320 tmphi = load_reg(s, rt2);
7321 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7322 tcg_temp_free_i32(tmplo);
7323 tcg_temp_free_i32(tmphi);
7324 if (ri->writefn) {
7325 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7326 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7327 tcg_temp_free_ptr(tmpptr);
7328 } else {
7329 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7331 tcg_temp_free_i64(tmp64);
7332 } else {
7333 if (ri->writefn) {
7334 TCGv_i32 tmp;
7335 TCGv_ptr tmpptr;
7336 tmp = load_reg(s, rt);
7337 tmpptr = tcg_const_ptr(ri);
7338 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7339 tcg_temp_free_ptr(tmpptr);
7340 tcg_temp_free_i32(tmp);
7341 } else {
7342 TCGv_i32 tmp = load_reg(s, rt);
7343 store_cpu_offset(tmp, ri->fieldoffset);
7348 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7349 /* I/O operations must end the TB here (whether read or write) */
7350 gen_io_end();
7351 gen_lookup_tb(s);
7352 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7353 /* We default to ending the TB on a coprocessor register write,
7354 * but allow this to be suppressed by the register definition
7355 * (usually only necessary to work around guest bugs).
7357 gen_lookup_tb(s);
7360 return 0;
7363 /* Unknown register; this might be a guest error or a QEMU
7364 * unimplemented feature.
7366 if (is64) {
7367 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7368 "64 bit system register cp:%d opc1: %d crm:%d "
7369 "(%s)\n",
7370 isread ? "read" : "write", cpnum, opc1, crm,
7371 s->ns ? "non-secure" : "secure");
7372 } else {
7373 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7374 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7375 "(%s)\n",
7376 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7377 s->ns ? "non-secure" : "secure");
7380 return 1;
7384 /* Store a 64-bit value to a register pair. Clobbers val. */
7385 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7387 TCGv_i32 tmp;
7388 tmp = tcg_temp_new_i32();
7389 tcg_gen_extrl_i64_i32(tmp, val);
7390 store_reg(s, rlow, tmp);
7391 tmp = tcg_temp_new_i32();
7392 tcg_gen_shri_i64(val, val, 32);
7393 tcg_gen_extrl_i64_i32(tmp, val);
7394 store_reg(s, rhigh, tmp);
7397 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7398 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7400 TCGv_i64 tmp;
7401 TCGv_i32 tmp2;
7403 /* Load value and extend to 64 bits. */
7404 tmp = tcg_temp_new_i64();
7405 tmp2 = load_reg(s, rlow);
7406 tcg_gen_extu_i32_i64(tmp, tmp2);
7407 tcg_temp_free_i32(tmp2);
7408 tcg_gen_add_i64(val, val, tmp);
7409 tcg_temp_free_i64(tmp);
7412 /* load and add a 64-bit value from a register pair. */
7413 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7415 TCGv_i64 tmp;
7416 TCGv_i32 tmpl;
7417 TCGv_i32 tmph;
7419 /* Load 64-bit value rd:rn. */
7420 tmpl = load_reg(s, rlow);
7421 tmph = load_reg(s, rhigh);
7422 tmp = tcg_temp_new_i64();
7423 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7424 tcg_temp_free_i32(tmpl);
7425 tcg_temp_free_i32(tmph);
7426 tcg_gen_add_i64(val, val, tmp);
7427 tcg_temp_free_i64(tmp);
7430 /* Set N and Z flags from hi|lo. */
7431 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7433 tcg_gen_mov_i32(cpu_NF, hi);
7434 tcg_gen_or_i32(cpu_ZF, lo, hi);
7437 /* Load/Store exclusive instructions are implemented by remembering
7438 the value/address loaded, and seeing if these are the same
7439 when the store is performed. This should be sufficient to implement
7440 the architecturally mandated semantics, and avoids having to monitor
7441 regular stores. The compare vs the remembered value is done during
7442 the cmpxchg operation, but we must compare the addresses manually. */
7443 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7444 TCGv_i32 addr, int size)
7446 TCGv_i32 tmp = tcg_temp_new_i32();
7447 TCGMemOp opc = size | MO_ALIGN | s->be_data;
7449 s->is_ldex = true;
7451 if (size == 3) {
7452 TCGv_i32 tmp2 = tcg_temp_new_i32();
7453 TCGv_i64 t64 = tcg_temp_new_i64();
7455 /* For AArch32, architecturally the 32-bit word at the lowest
7456 * address is always Rt and the one at addr+4 is Rt2, even if
7457 * the CPU is big-endian. That means we don't want to do a
7458 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7459 * for an architecturally 64-bit access, but instead do a
7460 * 64-bit access using MO_BE if appropriate and then split
7461 * the two halves.
7462 * This only makes a difference for BE32 user-mode, where
7463 * frob64() must not flip the two halves of the 64-bit data
7464 * but this code must treat BE32 user-mode like BE32 system.
7466 TCGv taddr = gen_aa32_addr(s, addr, opc);
7468 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7469 tcg_temp_free(taddr);
7470 tcg_gen_mov_i64(cpu_exclusive_val, t64);
7471 if (s->be_data == MO_BE) {
7472 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7473 } else {
7474 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7476 tcg_temp_free_i64(t64);
7478 store_reg(s, rt2, tmp2);
7479 } else {
7480 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
7481 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7484 store_reg(s, rt, tmp);
7485 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7488 static void gen_clrex(DisasContext *s)
7490 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7493 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7494 TCGv_i32 addr, int size)
7496 TCGv_i32 t0, t1, t2;
7497 TCGv_i64 extaddr;
7498 TCGv taddr;
7499 TCGLabel *done_label;
7500 TCGLabel *fail_label;
7501 TCGMemOp opc = size | MO_ALIGN | s->be_data;
7503 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7504 [addr] = {Rt};
7505 {Rd} = 0;
7506 } else {
7507 {Rd} = 1;
7508 } */
7509 fail_label = gen_new_label();
7510 done_label = gen_new_label();
7511 extaddr = tcg_temp_new_i64();
7512 tcg_gen_extu_i32_i64(extaddr, addr);
7513 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7514 tcg_temp_free_i64(extaddr);
7516 taddr = gen_aa32_addr(s, addr, opc);
7517 t0 = tcg_temp_new_i32();
7518 t1 = load_reg(s, rt);
7519 if (size == 3) {
7520 TCGv_i64 o64 = tcg_temp_new_i64();
7521 TCGv_i64 n64 = tcg_temp_new_i64();
7523 t2 = load_reg(s, rt2);
7524 /* For AArch32, architecturally the 32-bit word at the lowest
7525 * address is always Rt and the one at addr+4 is Rt2, even if
7526 * the CPU is big-endian. Since we're going to treat this as a
7527 * single 64-bit BE store, we need to put the two halves in the
7528 * opposite order for BE to LE, so that they end up in the right
7529 * places.
7530 * We don't want gen_aa32_frob64() because that does the wrong
7531 * thing for BE32 usermode.
7533 if (s->be_data == MO_BE) {
7534 tcg_gen_concat_i32_i64(n64, t2, t1);
7535 } else {
7536 tcg_gen_concat_i32_i64(n64, t1, t2);
7538 tcg_temp_free_i32(t2);
7540 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7541 get_mem_index(s), opc);
7542 tcg_temp_free_i64(n64);
7544 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7545 tcg_gen_extrl_i64_i32(t0, o64);
7547 tcg_temp_free_i64(o64);
7548 } else {
7549 t2 = tcg_temp_new_i32();
7550 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7551 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7552 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7553 tcg_temp_free_i32(t2);
7555 tcg_temp_free_i32(t1);
7556 tcg_temp_free(taddr);
7557 tcg_gen_mov_i32(cpu_R[rd], t0);
7558 tcg_temp_free_i32(t0);
7559 tcg_gen_br(done_label);
7561 gen_set_label(fail_label);
7562 tcg_gen_movi_i32(cpu_R[rd], 1);
7563 gen_set_label(done_label);
7564 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7567 /* gen_srs:
7568 * @env: CPUARMState
7569 * @s: DisasContext
7570 * @mode: mode field from insn (which stack to store to)
7571 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7572 * @writeback: true if writeback bit set
7574 * Generate code for the SRS (Store Return State) insn.
7576 static void gen_srs(DisasContext *s,
7577 uint32_t mode, uint32_t amode, bool writeback)
7579 int32_t offset;
7580 TCGv_i32 addr, tmp;
7581 bool undef = false;
7583 /* SRS is:
7584 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7585 * and specified mode is monitor mode
7586 * - UNDEFINED in Hyp mode
7587 * - UNPREDICTABLE in User or System mode
7588 * - UNPREDICTABLE if the specified mode is:
7589 * -- not implemented
7590 * -- not a valid mode number
7591 * -- a mode that's at a higher exception level
7592 * -- Monitor, if we are Non-secure
7593 * For the UNPREDICTABLE cases we choose to UNDEF.
7595 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
7596 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
7597 return;
7600 if (s->current_el == 0 || s->current_el == 2) {
7601 undef = true;
7604 switch (mode) {
7605 case ARM_CPU_MODE_USR:
7606 case ARM_CPU_MODE_FIQ:
7607 case ARM_CPU_MODE_IRQ:
7608 case ARM_CPU_MODE_SVC:
7609 case ARM_CPU_MODE_ABT:
7610 case ARM_CPU_MODE_UND:
7611 case ARM_CPU_MODE_SYS:
7612 break;
7613 case ARM_CPU_MODE_HYP:
7614 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7615 undef = true;
7617 break;
7618 case ARM_CPU_MODE_MON:
7619 /* No need to check specifically for "are we non-secure" because
7620 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7621 * so if this isn't EL3 then we must be non-secure.
7623 if (s->current_el != 3) {
7624 undef = true;
7626 break;
7627 default:
7628 undef = true;
7631 if (undef) {
7632 unallocated_encoding(s);
7633 return;
7636 addr = tcg_temp_new_i32();
7637 tmp = tcg_const_i32(mode);
7638 /* get_r13_banked() will raise an exception if called from System mode */
7639 gen_set_condexec(s);
7640 gen_set_pc_im(s, s->pc_curr);
7641 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7642 tcg_temp_free_i32(tmp);
7643 switch (amode) {
7644 case 0: /* DA */
7645 offset = -4;
7646 break;
7647 case 1: /* IA */
7648 offset = 0;
7649 break;
7650 case 2: /* DB */
7651 offset = -8;
7652 break;
7653 case 3: /* IB */
7654 offset = 4;
7655 break;
7656 default:
7657 abort();
7659 tcg_gen_addi_i32(addr, addr, offset);
7660 tmp = load_reg(s, 14);
7661 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7662 tcg_temp_free_i32(tmp);
7663 tmp = load_cpu_field(spsr);
7664 tcg_gen_addi_i32(addr, addr, 4);
7665 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7666 tcg_temp_free_i32(tmp);
7667 if (writeback) {
7668 switch (amode) {
7669 case 0:
7670 offset = -8;
7671 break;
7672 case 1:
7673 offset = 4;
7674 break;
7675 case 2:
7676 offset = -4;
7677 break;
7678 case 3:
7679 offset = 0;
7680 break;
7681 default:
7682 abort();
7684 tcg_gen_addi_i32(addr, addr, offset);
7685 tmp = tcg_const_i32(mode);
7686 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7687 tcg_temp_free_i32(tmp);
7689 tcg_temp_free_i32(addr);
7690 s->base.is_jmp = DISAS_UPDATE;
7693 /* Generate a label used for skipping this instruction */
7694 static void arm_gen_condlabel(DisasContext *s)
7696 if (!s->condjmp) {
7697 s->condlabel = gen_new_label();
7698 s->condjmp = 1;
7702 /* Skip this instruction if the ARM condition is false */
7703 static void arm_skip_unless(DisasContext *s, uint32_t cond)
7705 arm_gen_condlabel(s);
7706 arm_gen_test_cc(cond ^ 1, s->condlabel);
7709 static void disas_arm_insn(DisasContext *s, unsigned int insn)
7711 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
7712 TCGv_i32 tmp;
7713 TCGv_i32 tmp2;
7714 TCGv_i32 tmp3;
7715 TCGv_i32 addr;
7716 TCGv_i64 tmp64;
7718 /* M variants do not implement ARM mode; this must raise the INVSTATE
7719 * UsageFault exception.
7721 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7722 gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
7723 default_exception_el(s));
7724 return;
7726 cond = insn >> 28;
7727 if (cond == 0xf){
7728 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7729 * choose to UNDEF. In ARMv5 and above the space is used
7730 * for miscellaneous unconditional instructions.
7732 ARCH(5);
7734 /* Unconditional instructions. */
7735 if (((insn >> 25) & 7) == 1) {
7736 /* NEON Data processing. */
7737 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7738 goto illegal_op;
7741 if (disas_neon_data_insn(s, insn)) {
7742 goto illegal_op;
7744 return;
7746 if ((insn & 0x0f100000) == 0x04000000) {
7747 /* NEON load/store. */
7748 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7749 goto illegal_op;
7752 if (disas_neon_ls_insn(s, insn)) {
7753 goto illegal_op;
7755 return;
7757 if ((insn & 0x0f000e10) == 0x0e000a00) {
7758 /* VFP. */
7759 if (disas_vfp_insn(s, insn)) {
7760 goto illegal_op;
7762 return;
7764 if (((insn & 0x0f30f000) == 0x0510f000) ||
7765 ((insn & 0x0f30f010) == 0x0710f000)) {
7766 if ((insn & (1 << 22)) == 0) {
7767 /* PLDW; v7MP */
7768 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7769 goto illegal_op;
7772 /* Otherwise PLD; v5TE+ */
7773 ARCH(5TE);
7774 return;
7776 if (((insn & 0x0f70f000) == 0x0450f000) ||
7777 ((insn & 0x0f70f010) == 0x0650f000)) {
7778 ARCH(7);
7779 return; /* PLI; V7 */
7781 if (((insn & 0x0f700000) == 0x04100000) ||
7782 ((insn & 0x0f700010) == 0x06100000)) {
7783 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7784 goto illegal_op;
7786 return; /* v7MP: Unallocated memory hint: must NOP */
7789 if ((insn & 0x0ffffdff) == 0x01010000) {
7790 ARCH(6);
7791 /* setend */
7792 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
7793 gen_helper_setend(cpu_env);
7794 s->base.is_jmp = DISAS_UPDATE;
7796 return;
7797 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7798 switch ((insn >> 4) & 0xf) {
7799 case 1: /* clrex */
7800 ARCH(6K);
7801 gen_clrex(s);
7802 return;
7803 case 4: /* dsb */
7804 case 5: /* dmb */
7805 ARCH(7);
7806 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7807 return;
7808 case 6: /* isb */
7809 /* We need to break the TB after this insn to execute
7810 * self-modifying code correctly and also to take
7811 * any pending interrupts immediately.
7813 gen_goto_tb(s, 0, s->base.pc_next);
7814 return;
7815 case 7: /* sb */
7816 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
7817 goto illegal_op;
7820 * TODO: There is no speculation barrier opcode
7821 * for TCG; MB and end the TB instead.
7823 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7824 gen_goto_tb(s, 0, s->base.pc_next);
7825 return;
7826 default:
7827 goto illegal_op;
7829 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7830 /* srs */
7831 ARCH(6);
7832 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
7833 return;
7834 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
7835 /* rfe */
7836 int32_t offset;
7837 if (IS_USER(s))
7838 goto illegal_op;
7839 ARCH(6);
7840 rn = (insn >> 16) & 0xf;
7841 addr = load_reg(s, rn);
7842 i = (insn >> 23) & 3;
7843 switch (i) {
7844 case 0: offset = -4; break; /* DA */
7845 case 1: offset = 0; break; /* IA */
7846 case 2: offset = -8; break; /* DB */
7847 case 3: offset = 4; break; /* IB */
7848 default: abort();
7850 if (offset)
7851 tcg_gen_addi_i32(addr, addr, offset);
7852 /* Load PC into tmp and CPSR into tmp2. */
7853 tmp = tcg_temp_new_i32();
7854 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
7855 tcg_gen_addi_i32(addr, addr, 4);
7856 tmp2 = tcg_temp_new_i32();
7857 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
7858 if (insn & (1 << 21)) {
7859 /* Base writeback. */
7860 switch (i) {
7861 case 0: offset = -8; break;
7862 case 1: offset = 4; break;
7863 case 2: offset = -4; break;
7864 case 3: offset = 0; break;
7865 default: abort();
7867 if (offset)
7868 tcg_gen_addi_i32(addr, addr, offset);
7869 store_reg(s, rn, addr);
7870 } else {
7871 tcg_temp_free_i32(addr);
7873 gen_rfe(s, tmp, tmp2);
7874 return;
7875 } else if ((insn & 0x0e000000) == 0x0a000000) {
7876 /* branch link and change to thumb (blx <offset>) */
7877 int32_t offset;
7879 tmp = tcg_temp_new_i32();
7880 tcg_gen_movi_i32(tmp, s->base.pc_next);
7881 store_reg(s, 14, tmp);
7882 /* Sign-extend the 24-bit offset */
7883 offset = (((int32_t)insn) << 8) >> 8;
7884 val = read_pc(s);
7885 /* offset * 4 + bit24 * 2 + (thumb bit) */
7886 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7887 /* protected by ARCH(5); above, near the start of uncond block */
7888 gen_bx_im(s, val);
7889 return;
7890 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7891 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7892 /* iWMMXt register transfer. */
7893 if (extract32(s->c15_cpar, 1, 1)) {
7894 if (!disas_iwmmxt_insn(s, insn)) {
7895 return;
7899 } else if ((insn & 0x0e000a00) == 0x0c000800
7900 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7901 if (disas_neon_insn_3same_ext(s, insn)) {
7902 goto illegal_op;
7904 return;
7905 } else if ((insn & 0x0f000a00) == 0x0e000800
7906 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7907 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
7908 goto illegal_op;
7910 return;
7911 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7912 /* Coprocessor double register transfer. */
7913 ARCH(5TE);
7914 } else if ((insn & 0x0f000010) == 0x0e000010) {
7915 /* Additional coprocessor register transfer. */
7916 } else if ((insn & 0x0ff10020) == 0x01000000) {
7917 uint32_t mask;
7918 uint32_t val;
7919 /* cps (privileged) */
7920 if (IS_USER(s))
7921 return;
7922 mask = val = 0;
7923 if (insn & (1 << 19)) {
7924 if (insn & (1 << 8))
7925 mask |= CPSR_A;
7926 if (insn & (1 << 7))
7927 mask |= CPSR_I;
7928 if (insn & (1 << 6))
7929 mask |= CPSR_F;
7930 if (insn & (1 << 18))
7931 val |= mask;
7933 if (insn & (1 << 17)) {
7934 mask |= CPSR_M;
7935 val |= (insn & 0x1f);
7937 if (mask) {
7938 gen_set_psr_im(s, mask, 0, val);
7940 return;
7942 goto illegal_op;
7944 if (cond != 0xe) {
7945 /* if not always execute, we generate a conditional jump to
7946 next instruction */
7947 arm_skip_unless(s, cond);
7949 if ((insn & 0x0f900000) == 0x03000000) {
7950 if ((insn & (1 << 21)) == 0) {
7951 ARCH(6T2);
7952 rd = (insn >> 12) & 0xf;
7953 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7954 if ((insn & (1 << 22)) == 0) {
7955 /* MOVW */
7956 tmp = tcg_temp_new_i32();
7957 tcg_gen_movi_i32(tmp, val);
7958 } else {
7959 /* MOVT */
7960 tmp = load_reg(s, rd);
7961 tcg_gen_ext16u_i32(tmp, tmp);
7962 tcg_gen_ori_i32(tmp, tmp, val << 16);
7964 store_reg(s, rd, tmp);
7965 } else {
7966 if (((insn >> 12) & 0xf) != 0xf)
7967 goto illegal_op;
7968 if (((insn >> 16) & 0xf) == 0) {
7969 gen_nop_hint(s, insn & 0xff);
7970 } else {
7971 /* CPSR = immediate */
7972 val = insn & 0xff;
7973 shift = ((insn >> 8) & 0xf) * 2;
7974 if (shift)
7975 val = (val >> shift) | (val << (32 - shift));
7976 i = ((insn & (1 << 22)) != 0);
7977 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7978 i, val)) {
7979 goto illegal_op;
7983 } else if ((insn & 0x0f900000) == 0x01000000
7984 && (insn & 0x00000090) != 0x00000090) {
7985 /* miscellaneous instructions */
7986 op1 = (insn >> 21) & 3;
7987 sh = (insn >> 4) & 0xf;
7988 rm = insn & 0xf;
7989 switch (sh) {
7990 case 0x0: /* MSR, MRS */
7991 if (insn & (1 << 9)) {
7992 /* MSR (banked) and MRS (banked) */
7993 int sysm = extract32(insn, 16, 4) |
7994 (extract32(insn, 8, 1) << 4);
7995 int r = extract32(insn, 22, 1);
7997 if (op1 & 1) {
7998 /* MSR (banked) */
7999 gen_msr_banked(s, r, sysm, rm);
8000 } else {
8001 /* MRS (banked) */
8002 int rd = extract32(insn, 12, 4);
8004 gen_mrs_banked(s, r, sysm, rd);
8006 break;
8009 /* MSR, MRS (for PSRs) */
8010 if (op1 & 1) {
8011 /* PSR = reg */
8012 tmp = load_reg(s, rm);
8013 i = ((op1 & 2) != 0);
8014 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
8015 goto illegal_op;
8016 } else {
8017 /* reg = PSR */
8018 rd = (insn >> 12) & 0xf;
8019 if (op1 & 2) {
8020 if (IS_USER(s))
8021 goto illegal_op;
8022 tmp = load_cpu_field(spsr);
8023 } else {
8024 tmp = tcg_temp_new_i32();
8025 gen_helper_cpsr_read(tmp, cpu_env);
8027 store_reg(s, rd, tmp);
8029 break;
8030 case 0x1:
8031 if (op1 == 1) {
8032 /* branch/exchange thumb (bx). */
8033 ARCH(4T);
8034 tmp = load_reg(s, rm);
8035 gen_bx(s, tmp);
8036 } else if (op1 == 3) {
8037 /* clz */
8038 ARCH(5);
8039 rd = (insn >> 12) & 0xf;
8040 tmp = load_reg(s, rm);
8041 tcg_gen_clzi_i32(tmp, tmp, 32);
8042 store_reg(s, rd, tmp);
8043 } else {
8044 goto illegal_op;
8046 break;
8047 case 0x2:
8048 if (op1 == 1) {
8049 ARCH(5J); /* bxj */
8050 /* Trivial implementation equivalent to bx. */
8051 tmp = load_reg(s, rm);
8052 gen_bx(s, tmp);
8053 } else {
8054 goto illegal_op;
8056 break;
8057 case 0x3:
8058 if (op1 != 1)
8059 goto illegal_op;
8061 ARCH(5);
8062 /* branch link/exchange thumb (blx) */
8063 tmp = load_reg(s, rm);
8064 tmp2 = tcg_temp_new_i32();
8065 tcg_gen_movi_i32(tmp2, s->base.pc_next);
8066 store_reg(s, 14, tmp2);
8067 gen_bx(s, tmp);
8068 break;
8069 case 0x4:
8071 /* crc32/crc32c */
8072 uint32_t c = extract32(insn, 8, 4);
8074 /* Check this CPU supports ARMv8 CRC instructions.
8075 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8076 * Bits 8, 10 and 11 should be zero.
8078 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
8079 goto illegal_op;
8082 rn = extract32(insn, 16, 4);
8083 rd = extract32(insn, 12, 4);
8085 tmp = load_reg(s, rn);
8086 tmp2 = load_reg(s, rm);
8087 if (op1 == 0) {
8088 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8089 } else if (op1 == 1) {
8090 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8092 tmp3 = tcg_const_i32(1 << op1);
8093 if (c & 0x2) {
8094 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8095 } else {
8096 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8098 tcg_temp_free_i32(tmp2);
8099 tcg_temp_free_i32(tmp3);
8100 store_reg(s, rd, tmp);
8101 break;
8103 case 0x5: /* saturating add/subtract */
8104 ARCH(5TE);
8105 rd = (insn >> 12) & 0xf;
8106 rn = (insn >> 16) & 0xf;
8107 tmp = load_reg(s, rm);
8108 tmp2 = load_reg(s, rn);
8109 if (op1 & 2)
8110 gen_helper_add_saturate(tmp2, cpu_env, tmp2, tmp2);
8111 if (op1 & 1)
8112 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8113 else
8114 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8115 tcg_temp_free_i32(tmp2);
8116 store_reg(s, rd, tmp);
8117 break;
8118 case 0x6: /* ERET */
8119 if (op1 != 3) {
8120 goto illegal_op;
8122 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8123 goto illegal_op;
8125 if ((insn & 0x000fff0f) != 0x0000000e) {
8126 /* UNPREDICTABLE; we choose to UNDEF */
8127 goto illegal_op;
8130 if (s->current_el == 2) {
8131 tmp = load_cpu_field(elr_el[2]);
8132 } else {
8133 tmp = load_reg(s, 14);
8135 gen_exception_return(s, tmp);
8136 break;
8137 case 7:
8139 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8140 switch (op1) {
8141 case 0:
8142 /* HLT */
8143 gen_hlt(s, imm16);
8144 break;
8145 case 1:
8146 /* bkpt */
8147 ARCH(5);
8148 gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm16, false));
8149 break;
8150 case 2:
8151 /* Hypervisor call (v7) */
8152 ARCH(7);
8153 if (IS_USER(s)) {
8154 goto illegal_op;
8156 gen_hvc(s, imm16);
8157 break;
8158 case 3:
8159 /* Secure monitor call (v6+) */
8160 ARCH(6K);
8161 if (IS_USER(s)) {
8162 goto illegal_op;
8164 gen_smc(s);
8165 break;
8166 default:
8167 g_assert_not_reached();
8169 break;
8171 case 0x8: /* signed multiply */
8172 case 0xa:
8173 case 0xc:
8174 case 0xe:
8175 ARCH(5TE);
8176 rs = (insn >> 8) & 0xf;
8177 rn = (insn >> 12) & 0xf;
8178 rd = (insn >> 16) & 0xf;
8179 if (op1 == 1) {
8180 /* (32 * 16) >> 16 */
8181 tmp = load_reg(s, rm);
8182 tmp2 = load_reg(s, rs);
8183 if (sh & 4)
8184 tcg_gen_sari_i32(tmp2, tmp2, 16);
8185 else
8186 gen_sxth(tmp2);
8187 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8188 tcg_gen_shri_i64(tmp64, tmp64, 16);
8189 tmp = tcg_temp_new_i32();
8190 tcg_gen_extrl_i64_i32(tmp, tmp64);
8191 tcg_temp_free_i64(tmp64);
8192 if ((sh & 2) == 0) {
8193 tmp2 = load_reg(s, rn);
8194 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8195 tcg_temp_free_i32(tmp2);
8197 store_reg(s, rd, tmp);
8198 } else {
8199 /* 16 * 16 */
8200 tmp = load_reg(s, rm);
8201 tmp2 = load_reg(s, rs);
8202 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8203 tcg_temp_free_i32(tmp2);
8204 if (op1 == 2) {
8205 tmp64 = tcg_temp_new_i64();
8206 tcg_gen_ext_i32_i64(tmp64, tmp);
8207 tcg_temp_free_i32(tmp);
8208 gen_addq(s, tmp64, rn, rd);
8209 gen_storeq_reg(s, rn, rd, tmp64);
8210 tcg_temp_free_i64(tmp64);
8211 } else {
8212 if (op1 == 0) {
8213 tmp2 = load_reg(s, rn);
8214 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8215 tcg_temp_free_i32(tmp2);
8217 store_reg(s, rd, tmp);
8220 break;
8221 default:
8222 goto illegal_op;
8224 } else if (((insn & 0x0e000000) == 0 &&
8225 (insn & 0x00000090) != 0x90) ||
8226 ((insn & 0x0e000000) == (1 << 25))) {
8227 int set_cc, logic_cc, shiftop;
8229 op1 = (insn >> 21) & 0xf;
8230 set_cc = (insn >> 20) & 1;
8231 logic_cc = table_logic_cc[op1] & set_cc;
8233 /* data processing instruction */
8234 if (insn & (1 << 25)) {
8235 /* immediate operand */
8236 val = insn & 0xff;
8237 shift = ((insn >> 8) & 0xf) * 2;
8238 if (shift) {
8239 val = (val >> shift) | (val << (32 - shift));
8241 tmp2 = tcg_temp_new_i32();
8242 tcg_gen_movi_i32(tmp2, val);
8243 if (logic_cc && shift) {
8244 gen_set_CF_bit31(tmp2);
8246 } else {
8247 /* register */
8248 rm = (insn) & 0xf;
8249 tmp2 = load_reg(s, rm);
8250 shiftop = (insn >> 5) & 3;
8251 if (!(insn & (1 << 4))) {
8252 shift = (insn >> 7) & 0x1f;
8253 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8254 } else {
8255 rs = (insn >> 8) & 0xf;
8256 tmp = load_reg(s, rs);
8257 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8260 if (op1 != 0x0f && op1 != 0x0d) {
8261 rn = (insn >> 16) & 0xf;
8262 tmp = load_reg(s, rn);
8263 } else {
8264 tmp = NULL;
8266 rd = (insn >> 12) & 0xf;
8267 switch(op1) {
8268 case 0x00:
8269 tcg_gen_and_i32(tmp, tmp, tmp2);
8270 if (logic_cc) {
8271 gen_logic_CC(tmp);
8273 store_reg_bx(s, rd, tmp);
8274 break;
8275 case 0x01:
8276 tcg_gen_xor_i32(tmp, tmp, tmp2);
8277 if (logic_cc) {
8278 gen_logic_CC(tmp);
8280 store_reg_bx(s, rd, tmp);
8281 break;
8282 case 0x02:
8283 if (set_cc && rd == 15) {
8284 /* SUBS r15, ... is used for exception return. */
8285 if (IS_USER(s)) {
8286 goto illegal_op;
8288 gen_sub_CC(tmp, tmp, tmp2);
8289 gen_exception_return(s, tmp);
8290 } else {
8291 if (set_cc) {
8292 gen_sub_CC(tmp, tmp, tmp2);
8293 } else {
8294 tcg_gen_sub_i32(tmp, tmp, tmp2);
8296 store_reg_bx(s, rd, tmp);
8298 break;
8299 case 0x03:
8300 if (set_cc) {
8301 gen_sub_CC(tmp, tmp2, tmp);
8302 } else {
8303 tcg_gen_sub_i32(tmp, tmp2, tmp);
8305 store_reg_bx(s, rd, tmp);
8306 break;
8307 case 0x04:
8308 if (set_cc) {
8309 gen_add_CC(tmp, tmp, tmp2);
8310 } else {
8311 tcg_gen_add_i32(tmp, tmp, tmp2);
8313 store_reg_bx(s, rd, tmp);
8314 break;
8315 case 0x05:
8316 if (set_cc) {
8317 gen_adc_CC(tmp, tmp, tmp2);
8318 } else {
8319 gen_add_carry(tmp, tmp, tmp2);
8321 store_reg_bx(s, rd, tmp);
8322 break;
8323 case 0x06:
8324 if (set_cc) {
8325 gen_sbc_CC(tmp, tmp, tmp2);
8326 } else {
8327 gen_sub_carry(tmp, tmp, tmp2);
8329 store_reg_bx(s, rd, tmp);
8330 break;
8331 case 0x07:
8332 if (set_cc) {
8333 gen_sbc_CC(tmp, tmp2, tmp);
8334 } else {
8335 gen_sub_carry(tmp, tmp2, tmp);
8337 store_reg_bx(s, rd, tmp);
8338 break;
8339 case 0x08:
8340 if (set_cc) {
8341 tcg_gen_and_i32(tmp, tmp, tmp2);
8342 gen_logic_CC(tmp);
8344 tcg_temp_free_i32(tmp);
8345 break;
8346 case 0x09:
8347 if (set_cc) {
8348 tcg_gen_xor_i32(tmp, tmp, tmp2);
8349 gen_logic_CC(tmp);
8351 tcg_temp_free_i32(tmp);
8352 break;
8353 case 0x0a:
8354 if (set_cc) {
8355 gen_sub_CC(tmp, tmp, tmp2);
8357 tcg_temp_free_i32(tmp);
8358 break;
8359 case 0x0b:
8360 if (set_cc) {
8361 gen_add_CC(tmp, tmp, tmp2);
8363 tcg_temp_free_i32(tmp);
8364 break;
8365 case 0x0c:
8366 tcg_gen_or_i32(tmp, tmp, tmp2);
8367 if (logic_cc) {
8368 gen_logic_CC(tmp);
8370 store_reg_bx(s, rd, tmp);
8371 break;
8372 case 0x0d:
8373 if (logic_cc && rd == 15) {
8374 /* MOVS r15, ... is used for exception return. */
8375 if (IS_USER(s)) {
8376 goto illegal_op;
8378 gen_exception_return(s, tmp2);
8379 } else {
8380 if (logic_cc) {
8381 gen_logic_CC(tmp2);
8383 store_reg_bx(s, rd, tmp2);
8385 break;
8386 case 0x0e:
8387 tcg_gen_andc_i32(tmp, tmp, tmp2);
8388 if (logic_cc) {
8389 gen_logic_CC(tmp);
8391 store_reg_bx(s, rd, tmp);
8392 break;
8393 default:
8394 case 0x0f:
8395 tcg_gen_not_i32(tmp2, tmp2);
8396 if (logic_cc) {
8397 gen_logic_CC(tmp2);
8399 store_reg_bx(s, rd, tmp2);
8400 break;
8402 if (op1 != 0x0f && op1 != 0x0d) {
8403 tcg_temp_free_i32(tmp2);
8405 } else {
8406 /* other instructions */
8407 op1 = (insn >> 24) & 0xf;
8408 switch(op1) {
8409 case 0x0:
8410 case 0x1:
8411 /* multiplies, extra load/stores */
8412 sh = (insn >> 5) & 3;
8413 if (sh == 0) {
8414 if (op1 == 0x0) {
8415 rd = (insn >> 16) & 0xf;
8416 rn = (insn >> 12) & 0xf;
8417 rs = (insn >> 8) & 0xf;
8418 rm = (insn) & 0xf;
8419 op1 = (insn >> 20) & 0xf;
8420 switch (op1) {
8421 case 0: case 1: case 2: case 3: case 6:
8422 /* 32 bit mul */
8423 tmp = load_reg(s, rs);
8424 tmp2 = load_reg(s, rm);
8425 tcg_gen_mul_i32(tmp, tmp, tmp2);
8426 tcg_temp_free_i32(tmp2);
8427 if (insn & (1 << 22)) {
8428 /* Subtract (mls) */
8429 ARCH(6T2);
8430 tmp2 = load_reg(s, rn);
8431 tcg_gen_sub_i32(tmp, tmp2, tmp);
8432 tcg_temp_free_i32(tmp2);
8433 } else if (insn & (1 << 21)) {
8434 /* Add */
8435 tmp2 = load_reg(s, rn);
8436 tcg_gen_add_i32(tmp, tmp, tmp2);
8437 tcg_temp_free_i32(tmp2);
8439 if (insn & (1 << 20))
8440 gen_logic_CC(tmp);
8441 store_reg(s, rd, tmp);
8442 break;
8443 case 4:
8444 /* 64 bit mul double accumulate (UMAAL) */
8445 ARCH(6);
8446 tmp = load_reg(s, rs);
8447 tmp2 = load_reg(s, rm);
8448 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8449 gen_addq_lo(s, tmp64, rn);
8450 gen_addq_lo(s, tmp64, rd);
8451 gen_storeq_reg(s, rn, rd, tmp64);
8452 tcg_temp_free_i64(tmp64);
8453 break;
8454 case 8: case 9: case 10: case 11:
8455 case 12: case 13: case 14: case 15:
8456 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8457 tmp = load_reg(s, rs);
8458 tmp2 = load_reg(s, rm);
8459 if (insn & (1 << 22)) {
8460 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8461 } else {
8462 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8464 if (insn & (1 << 21)) { /* mult accumulate */
8465 TCGv_i32 al = load_reg(s, rn);
8466 TCGv_i32 ah = load_reg(s, rd);
8467 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8468 tcg_temp_free_i32(al);
8469 tcg_temp_free_i32(ah);
8471 if (insn & (1 << 20)) {
8472 gen_logicq_cc(tmp, tmp2);
8474 store_reg(s, rn, tmp);
8475 store_reg(s, rd, tmp2);
8476 break;
8477 default:
8478 goto illegal_op;
8480 } else {
8481 rn = (insn >> 16) & 0xf;
8482 rd = (insn >> 12) & 0xf;
8483 if (insn & (1 << 23)) {
8484 /* load/store exclusive */
8485 bool is_ld = extract32(insn, 20, 1);
8486 bool is_lasr = !extract32(insn, 8, 1);
8487 int op2 = (insn >> 8) & 3;
8488 op1 = (insn >> 21) & 0x3;
8490 switch (op2) {
8491 case 0: /* lda/stl */
8492 if (op1 == 1) {
8493 goto illegal_op;
8495 ARCH(8);
8496 break;
8497 case 1: /* reserved */
8498 goto illegal_op;
8499 case 2: /* ldaex/stlex */
8500 ARCH(8);
8501 break;
8502 case 3: /* ldrex/strex */
8503 if (op1) {
8504 ARCH(6K);
8505 } else {
8506 ARCH(6);
8508 break;
8511 addr = tcg_temp_local_new_i32();
8512 load_reg_var(s, addr, rn);
8514 if (is_lasr && !is_ld) {
8515 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8518 if (op2 == 0) {
8519 if (is_ld) {
8520 tmp = tcg_temp_new_i32();
8521 switch (op1) {
8522 case 0: /* lda */
8523 gen_aa32_ld32u_iss(s, tmp, addr,
8524 get_mem_index(s),
8525 rd | ISSIsAcqRel);
8526 break;
8527 case 2: /* ldab */
8528 gen_aa32_ld8u_iss(s, tmp, addr,
8529 get_mem_index(s),
8530 rd | ISSIsAcqRel);
8531 break;
8532 case 3: /* ldah */
8533 gen_aa32_ld16u_iss(s, tmp, addr,
8534 get_mem_index(s),
8535 rd | ISSIsAcqRel);
8536 break;
8537 default:
8538 abort();
8540 store_reg(s, rd, tmp);
8541 } else {
8542 rm = insn & 0xf;
8543 tmp = load_reg(s, rm);
8544 switch (op1) {
8545 case 0: /* stl */
8546 gen_aa32_st32_iss(s, tmp, addr,
8547 get_mem_index(s),
8548 rm | ISSIsAcqRel);
8549 break;
8550 case 2: /* stlb */
8551 gen_aa32_st8_iss(s, tmp, addr,
8552 get_mem_index(s),
8553 rm | ISSIsAcqRel);
8554 break;
8555 case 3: /* stlh */
8556 gen_aa32_st16_iss(s, tmp, addr,
8557 get_mem_index(s),
8558 rm | ISSIsAcqRel);
8559 break;
8560 default:
8561 abort();
8563 tcg_temp_free_i32(tmp);
8565 } else if (is_ld) {
8566 switch (op1) {
8567 case 0: /* ldrex */
8568 gen_load_exclusive(s, rd, 15, addr, 2);
8569 break;
8570 case 1: /* ldrexd */
8571 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8572 break;
8573 case 2: /* ldrexb */
8574 gen_load_exclusive(s, rd, 15, addr, 0);
8575 break;
8576 case 3: /* ldrexh */
8577 gen_load_exclusive(s, rd, 15, addr, 1);
8578 break;
8579 default:
8580 abort();
8582 } else {
8583 rm = insn & 0xf;
8584 switch (op1) {
8585 case 0: /* strex */
8586 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8587 break;
8588 case 1: /* strexd */
8589 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8590 break;
8591 case 2: /* strexb */
8592 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8593 break;
8594 case 3: /* strexh */
8595 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8596 break;
8597 default:
8598 abort();
8601 tcg_temp_free_i32(addr);
8603 if (is_lasr && is_ld) {
8604 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
8606 } else if ((insn & 0x00300f00) == 0) {
8607 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
8608 * - SWP, SWPB
8611 TCGv taddr;
8612 TCGMemOp opc = s->be_data;
8614 rm = (insn) & 0xf;
8616 if (insn & (1 << 22)) {
8617 opc |= MO_UB;
8618 } else {
8619 opc |= MO_UL | MO_ALIGN;
8622 addr = load_reg(s, rn);
8623 taddr = gen_aa32_addr(s, addr, opc);
8624 tcg_temp_free_i32(addr);
8626 tmp = load_reg(s, rm);
8627 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8628 get_mem_index(s), opc);
8629 tcg_temp_free(taddr);
8630 store_reg(s, rd, tmp);
8631 } else {
8632 goto illegal_op;
8635 } else {
8636 int address_offset;
8637 bool load = insn & (1 << 20);
8638 bool wbit = insn & (1 << 21);
8639 bool pbit = insn & (1 << 24);
8640 bool doubleword = false;
8641 ISSInfo issinfo;
8643 /* Misc load/store */
8644 rn = (insn >> 16) & 0xf;
8645 rd = (insn >> 12) & 0xf;
8647 /* ISS not valid if writeback */
8648 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8650 if (!load && (sh & 2)) {
8651 /* doubleword */
8652 ARCH(5TE);
8653 if (rd & 1) {
8654 /* UNPREDICTABLE; we choose to UNDEF */
8655 goto illegal_op;
8657 load = (sh & 1) == 0;
8658 doubleword = true;
8661 addr = load_reg(s, rn);
8662 if (pbit) {
8663 gen_add_datah_offset(s, insn, 0, addr);
8665 address_offset = 0;
8667 if (doubleword) {
8668 if (!load) {
8669 /* store */
8670 tmp = load_reg(s, rd);
8671 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8672 tcg_temp_free_i32(tmp);
8673 tcg_gen_addi_i32(addr, addr, 4);
8674 tmp = load_reg(s, rd + 1);
8675 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8676 tcg_temp_free_i32(tmp);
8677 } else {
8678 /* load */
8679 tmp = tcg_temp_new_i32();
8680 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8681 store_reg(s, rd, tmp);
8682 tcg_gen_addi_i32(addr, addr, 4);
8683 tmp = tcg_temp_new_i32();
8684 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8685 rd++;
8687 address_offset = -4;
8688 } else if (load) {
8689 /* load */
8690 tmp = tcg_temp_new_i32();
8691 switch (sh) {
8692 case 1:
8693 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
8694 issinfo);
8695 break;
8696 case 2:
8697 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
8698 issinfo);
8699 break;
8700 default:
8701 case 3:
8702 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
8703 issinfo);
8704 break;
8706 } else {
8707 /* store */
8708 tmp = load_reg(s, rd);
8709 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
8710 tcg_temp_free_i32(tmp);
8712 /* Perform base writeback before the loaded value to
8713 ensure correct behavior with overlapping index registers.
8714 ldrd with base writeback is undefined if the
8715 destination and index registers overlap. */
8716 if (!pbit) {
8717 gen_add_datah_offset(s, insn, address_offset, addr);
8718 store_reg(s, rn, addr);
8719 } else if (wbit) {
8720 if (address_offset)
8721 tcg_gen_addi_i32(addr, addr, address_offset);
8722 store_reg(s, rn, addr);
8723 } else {
8724 tcg_temp_free_i32(addr);
8726 if (load) {
8727 /* Complete the load. */
8728 store_reg(s, rd, tmp);
8731 break;
8732 case 0x4:
8733 case 0x5:
8734 goto do_ldst;
8735 case 0x6:
8736 case 0x7:
8737 if (insn & (1 << 4)) {
8738 ARCH(6);
8739 /* Armv6 Media instructions. */
8740 rm = insn & 0xf;
8741 rn = (insn >> 16) & 0xf;
8742 rd = (insn >> 12) & 0xf;
8743 rs = (insn >> 8) & 0xf;
8744 switch ((insn >> 23) & 3) {
8745 case 0: /* Parallel add/subtract. */
8746 op1 = (insn >> 20) & 7;
8747 tmp = load_reg(s, rn);
8748 tmp2 = load_reg(s, rm);
8749 sh = (insn >> 5) & 7;
8750 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8751 goto illegal_op;
8752 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
8753 tcg_temp_free_i32(tmp2);
8754 store_reg(s, rd, tmp);
8755 break;
8756 case 1:
8757 if ((insn & 0x00700020) == 0) {
8758 /* Halfword pack. */
8759 tmp = load_reg(s, rn);
8760 tmp2 = load_reg(s, rm);
8761 shift = (insn >> 7) & 0x1f;
8762 if (insn & (1 << 6)) {
8763 /* pkhtb */
8764 if (shift == 0)
8765 shift = 31;
8766 tcg_gen_sari_i32(tmp2, tmp2, shift);
8767 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8768 tcg_gen_ext16u_i32(tmp2, tmp2);
8769 } else {
8770 /* pkhbt */
8771 if (shift)
8772 tcg_gen_shli_i32(tmp2, tmp2, shift);
8773 tcg_gen_ext16u_i32(tmp, tmp);
8774 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8776 tcg_gen_or_i32(tmp, tmp, tmp2);
8777 tcg_temp_free_i32(tmp2);
8778 store_reg(s, rd, tmp);
8779 } else if ((insn & 0x00200020) == 0x00200000) {
8780 /* [us]sat */
8781 tmp = load_reg(s, rm);
8782 shift = (insn >> 7) & 0x1f;
8783 if (insn & (1 << 6)) {
8784 if (shift == 0)
8785 shift = 31;
8786 tcg_gen_sari_i32(tmp, tmp, shift);
8787 } else {
8788 tcg_gen_shli_i32(tmp, tmp, shift);
8790 sh = (insn >> 16) & 0x1f;
8791 tmp2 = tcg_const_i32(sh);
8792 if (insn & (1 << 22))
8793 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8794 else
8795 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8796 tcg_temp_free_i32(tmp2);
8797 store_reg(s, rd, tmp);
8798 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8799 /* [us]sat16 */
8800 tmp = load_reg(s, rm);
8801 sh = (insn >> 16) & 0x1f;
8802 tmp2 = tcg_const_i32(sh);
8803 if (insn & (1 << 22))
8804 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8805 else
8806 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8807 tcg_temp_free_i32(tmp2);
8808 store_reg(s, rd, tmp);
8809 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8810 /* Select bytes. */
8811 tmp = load_reg(s, rn);
8812 tmp2 = load_reg(s, rm);
8813 tmp3 = tcg_temp_new_i32();
8814 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8815 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8816 tcg_temp_free_i32(tmp3);
8817 tcg_temp_free_i32(tmp2);
8818 store_reg(s, rd, tmp);
8819 } else if ((insn & 0x000003e0) == 0x00000060) {
8820 tmp = load_reg(s, rm);
8821 shift = (insn >> 10) & 3;
8822 /* ??? In many cases it's not necessary to do a
8823 rotate, a shift is sufficient. */
8824 if (shift != 0)
8825 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8826 op1 = (insn >> 20) & 7;
8827 switch (op1) {
8828 case 0: gen_sxtb16(tmp); break;
8829 case 2: gen_sxtb(tmp); break;
8830 case 3: gen_sxth(tmp); break;
8831 case 4: gen_uxtb16(tmp); break;
8832 case 6: gen_uxtb(tmp); break;
8833 case 7: gen_uxth(tmp); break;
8834 default: goto illegal_op;
8836 if (rn != 15) {
8837 tmp2 = load_reg(s, rn);
8838 if ((op1 & 3) == 0) {
8839 gen_add16(tmp, tmp2);
8840 } else {
8841 tcg_gen_add_i32(tmp, tmp, tmp2);
8842 tcg_temp_free_i32(tmp2);
8845 store_reg(s, rd, tmp);
8846 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8847 /* rev */
8848 tmp = load_reg(s, rm);
8849 if (insn & (1 << 22)) {
8850 if (insn & (1 << 7)) {
8851 gen_revsh(tmp);
8852 } else {
8853 ARCH(6T2);
8854 gen_helper_rbit(tmp, tmp);
8856 } else {
8857 if (insn & (1 << 7))
8858 gen_rev16(tmp);
8859 else
8860 tcg_gen_bswap32_i32(tmp, tmp);
8862 store_reg(s, rd, tmp);
8863 } else {
8864 goto illegal_op;
8866 break;
8867 case 2: /* Multiplies (Type 3). */
8868 switch ((insn >> 20) & 0x7) {
8869 case 5:
8870 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8871 /* op2 not 00x or 11x : UNDEF */
8872 goto illegal_op;
8874 /* Signed multiply most significant [accumulate].
8875 (SMMUL, SMMLA, SMMLS) */
8876 tmp = load_reg(s, rm);
8877 tmp2 = load_reg(s, rs);
8878 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8880 if (rd != 15) {
8881 tmp = load_reg(s, rd);
8882 if (insn & (1 << 6)) {
8883 tmp64 = gen_subq_msw(tmp64, tmp);
8884 } else {
8885 tmp64 = gen_addq_msw(tmp64, tmp);
8888 if (insn & (1 << 5)) {
8889 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8891 tcg_gen_shri_i64(tmp64, tmp64, 32);
8892 tmp = tcg_temp_new_i32();
8893 tcg_gen_extrl_i64_i32(tmp, tmp64);
8894 tcg_temp_free_i64(tmp64);
8895 store_reg(s, rn, tmp);
8896 break;
8897 case 0:
8898 case 4:
8899 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8900 if (insn & (1 << 7)) {
8901 goto illegal_op;
8903 tmp = load_reg(s, rm);
8904 tmp2 = load_reg(s, rs);
8905 if (insn & (1 << 5))
8906 gen_swap_half(tmp2);
8907 gen_smul_dual(tmp, tmp2);
8908 if (insn & (1 << 22)) {
8909 /* smlald, smlsld */
8910 TCGv_i64 tmp64_2;
8912 tmp64 = tcg_temp_new_i64();
8913 tmp64_2 = tcg_temp_new_i64();
8914 tcg_gen_ext_i32_i64(tmp64, tmp);
8915 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
8916 tcg_temp_free_i32(tmp);
8917 tcg_temp_free_i32(tmp2);
8918 if (insn & (1 << 6)) {
8919 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8920 } else {
8921 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8923 tcg_temp_free_i64(tmp64_2);
8924 gen_addq(s, tmp64, rd, rn);
8925 gen_storeq_reg(s, rd, rn, tmp64);
8926 tcg_temp_free_i64(tmp64);
8927 } else {
8928 /* smuad, smusd, smlad, smlsd */
8929 if (insn & (1 << 6)) {
8930 /* This subtraction cannot overflow. */
8931 tcg_gen_sub_i32(tmp, tmp, tmp2);
8932 } else {
8933 /* This addition cannot overflow 32 bits;
8934 * however it may overflow considered as a
8935 * signed operation, in which case we must set
8936 * the Q flag.
8938 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8940 tcg_temp_free_i32(tmp2);
8941 if (rd != 15)
8943 tmp2 = load_reg(s, rd);
8944 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8945 tcg_temp_free_i32(tmp2);
8947 store_reg(s, rn, tmp);
8949 break;
8950 case 1:
8951 case 3:
8952 /* SDIV, UDIV */
8953 if (!dc_isar_feature(arm_div, s)) {
8954 goto illegal_op;
8956 if (((insn >> 5) & 7) || (rd != 15)) {
8957 goto illegal_op;
8959 tmp = load_reg(s, rm);
8960 tmp2 = load_reg(s, rs);
8961 if (insn & (1 << 21)) {
8962 gen_helper_udiv(tmp, tmp, tmp2);
8963 } else {
8964 gen_helper_sdiv(tmp, tmp, tmp2);
8966 tcg_temp_free_i32(tmp2);
8967 store_reg(s, rn, tmp);
8968 break;
8969 default:
8970 goto illegal_op;
8972 break;
8973 case 3:
8974 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8975 switch (op1) {
8976 case 0: /* Unsigned sum of absolute differences. */
8977 ARCH(6);
8978 tmp = load_reg(s, rm);
8979 tmp2 = load_reg(s, rs);
8980 gen_helper_usad8(tmp, tmp, tmp2);
8981 tcg_temp_free_i32(tmp2);
8982 if (rd != 15) {
8983 tmp2 = load_reg(s, rd);
8984 tcg_gen_add_i32(tmp, tmp, tmp2);
8985 tcg_temp_free_i32(tmp2);
8987 store_reg(s, rn, tmp);
8988 break;
8989 case 0x20: case 0x24: case 0x28: case 0x2c:
8990 /* Bitfield insert/clear. */
8991 ARCH(6T2);
8992 shift = (insn >> 7) & 0x1f;
8993 i = (insn >> 16) & 0x1f;
8994 if (i < shift) {
8995 /* UNPREDICTABLE; we choose to UNDEF */
8996 goto illegal_op;
8998 i = i + 1 - shift;
8999 if (rm == 15) {
9000 tmp = tcg_temp_new_i32();
9001 tcg_gen_movi_i32(tmp, 0);
9002 } else {
9003 tmp = load_reg(s, rm);
9005 if (i != 32) {
9006 tmp2 = load_reg(s, rd);
9007 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
9008 tcg_temp_free_i32(tmp2);
9010 store_reg(s, rd, tmp);
9011 break;
9012 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9013 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9014 ARCH(6T2);
9015 tmp = load_reg(s, rm);
9016 shift = (insn >> 7) & 0x1f;
9017 i = ((insn >> 16) & 0x1f) + 1;
9018 if (shift + i > 32)
9019 goto illegal_op;
9020 if (i < 32) {
9021 if (op1 & 0x20) {
9022 tcg_gen_extract_i32(tmp, tmp, shift, i);
9023 } else {
9024 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9027 store_reg(s, rd, tmp);
9028 break;
9029 default:
9030 goto illegal_op;
9032 break;
9034 break;
9036 do_ldst:
9037 /* Check for undefined extension instructions
9038 * per the ARM Bible IE:
9039 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9041 sh = (0xf << 20) | (0xf << 4);
9042 if (op1 == 0x7 && ((insn & sh) == sh))
9044 goto illegal_op;
9046 /* load/store byte/word */
9047 rn = (insn >> 16) & 0xf;
9048 rd = (insn >> 12) & 0xf;
9049 tmp2 = load_reg(s, rn);
9050 if ((insn & 0x01200000) == 0x00200000) {
9051 /* ldrt/strt */
9052 i = get_a32_user_mem_index(s);
9053 } else {
9054 i = get_mem_index(s);
9056 if (insn & (1 << 24))
9057 gen_add_data_offset(s, insn, tmp2);
9058 if (insn & (1 << 20)) {
9059 /* load */
9060 tmp = tcg_temp_new_i32();
9061 if (insn & (1 << 22)) {
9062 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9063 } else {
9064 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9066 } else {
9067 /* store */
9068 tmp = load_reg(s, rd);
9069 if (insn & (1 << 22)) {
9070 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
9071 } else {
9072 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
9074 tcg_temp_free_i32(tmp);
9076 if (!(insn & (1 << 24))) {
9077 gen_add_data_offset(s, insn, tmp2);
9078 store_reg(s, rn, tmp2);
9079 } else if (insn & (1 << 21)) {
9080 store_reg(s, rn, tmp2);
9081 } else {
9082 tcg_temp_free_i32(tmp2);
9084 if (insn & (1 << 20)) {
9085 /* Complete the load. */
9086 store_reg_from_load(s, rd, tmp);
9088 break;
9089 case 0x08:
9090 case 0x09:
9092 int j, n, loaded_base;
9093 bool exc_return = false;
9094 bool is_load = extract32(insn, 20, 1);
9095 bool user = false;
9096 TCGv_i32 loaded_var;
9097 /* load/store multiple words */
9098 /* XXX: store correct base if write back */
9099 if (insn & (1 << 22)) {
9100 /* LDM (user), LDM (exception return) and STM (user) */
9101 if (IS_USER(s))
9102 goto illegal_op; /* only usable in supervisor mode */
9104 if (is_load && extract32(insn, 15, 1)) {
9105 exc_return = true;
9106 } else {
9107 user = true;
9110 rn = (insn >> 16) & 0xf;
9111 addr = load_reg(s, rn);
9113 /* compute total size */
9114 loaded_base = 0;
9115 loaded_var = NULL;
9116 n = 0;
9117 for (i = 0; i < 16; i++) {
9118 if (insn & (1 << i))
9119 n++;
9121 /* XXX: test invalid n == 0 case ? */
9122 if (insn & (1 << 23)) {
9123 if (insn & (1 << 24)) {
9124 /* pre increment */
9125 tcg_gen_addi_i32(addr, addr, 4);
9126 } else {
9127 /* post increment */
9129 } else {
9130 if (insn & (1 << 24)) {
9131 /* pre decrement */
9132 tcg_gen_addi_i32(addr, addr, -(n * 4));
9133 } else {
9134 /* post decrement */
9135 if (n != 1)
9136 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9139 j = 0;
9140 for (i = 0; i < 16; i++) {
9141 if (insn & (1 << i)) {
9142 if (is_load) {
9143 /* load */
9144 tmp = tcg_temp_new_i32();
9145 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9146 if (user) {
9147 tmp2 = tcg_const_i32(i);
9148 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9149 tcg_temp_free_i32(tmp2);
9150 tcg_temp_free_i32(tmp);
9151 } else if (i == rn) {
9152 loaded_var = tmp;
9153 loaded_base = 1;
9154 } else if (i == 15 && exc_return) {
9155 store_pc_exc_ret(s, tmp);
9156 } else {
9157 store_reg_from_load(s, i, tmp);
9159 } else {
9160 /* store */
9161 if (i == 15) {
9162 tmp = tcg_temp_new_i32();
9163 tcg_gen_movi_i32(tmp, read_pc(s));
9164 } else if (user) {
9165 tmp = tcg_temp_new_i32();
9166 tmp2 = tcg_const_i32(i);
9167 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9168 tcg_temp_free_i32(tmp2);
9169 } else {
9170 tmp = load_reg(s, i);
9172 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9173 tcg_temp_free_i32(tmp);
9175 j++;
9176 /* no need to add after the last transfer */
9177 if (j != n)
9178 tcg_gen_addi_i32(addr, addr, 4);
9181 if (insn & (1 << 21)) {
9182 /* write back */
9183 if (insn & (1 << 23)) {
9184 if (insn & (1 << 24)) {
9185 /* pre increment */
9186 } else {
9187 /* post increment */
9188 tcg_gen_addi_i32(addr, addr, 4);
9190 } else {
9191 if (insn & (1 << 24)) {
9192 /* pre decrement */
9193 if (n != 1)
9194 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9195 } else {
9196 /* post decrement */
9197 tcg_gen_addi_i32(addr, addr, -(n * 4));
9200 store_reg(s, rn, addr);
9201 } else {
9202 tcg_temp_free_i32(addr);
9204 if (loaded_base) {
9205 store_reg(s, rn, loaded_var);
9207 if (exc_return) {
9208 /* Restore CPSR from SPSR. */
9209 tmp = load_cpu_field(spsr);
9210 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9211 gen_io_start();
9213 gen_helper_cpsr_write_eret(cpu_env, tmp);
9214 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9215 gen_io_end();
9217 tcg_temp_free_i32(tmp);
9218 /* Must exit loop to check un-masked IRQs */
9219 s->base.is_jmp = DISAS_EXIT;
9222 break;
9223 case 0xa:
9224 case 0xb:
9226 int32_t offset;
9228 /* branch (and link) */
9229 if (insn & (1 << 24)) {
9230 tmp = tcg_temp_new_i32();
9231 tcg_gen_movi_i32(tmp, s->base.pc_next);
9232 store_reg(s, 14, tmp);
9234 offset = sextract32(insn << 2, 0, 26);
9235 gen_jmp(s, read_pc(s) + offset);
9237 break;
9238 case 0xc:
9239 case 0xd:
9240 case 0xe:
9241 if (((insn >> 8) & 0xe) == 10) {
9242 /* VFP. */
9243 if (disas_vfp_insn(s, insn)) {
9244 goto illegal_op;
9246 } else if (disas_coproc_insn(s, insn)) {
9247 /* Coprocessor. */
9248 goto illegal_op;
9250 break;
9251 case 0xf:
9252 /* swi */
9253 gen_set_pc_im(s, s->base.pc_next);
9254 s->svc_imm = extract32(insn, 0, 24);
9255 s->base.is_jmp = DISAS_SWI;
9256 break;
9257 default:
9258 illegal_op:
9259 unallocated_encoding(s);
9260 break;
9265 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
9268 * Return true if this is a 16 bit instruction. We must be precise
9269 * about this (matching the decode).
9271 if ((insn >> 11) < 0x1d) {
9272 /* Definitely a 16-bit instruction */
9273 return true;
9276 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9277 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9278 * end up actually treating this as two 16-bit insns, though,
9279 * if it's half of a bl/blx pair that might span a page boundary.
9281 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
9282 arm_dc_feature(s, ARM_FEATURE_M)) {
9283 /* Thumb2 cores (including all M profile ones) always treat
9284 * 32-bit insns as 32-bit.
9286 return false;
9289 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
9290 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9291 * is not on the next page; we merge this into a 32-bit
9292 * insn.
9294 return false;
9296 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9297 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9298 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9299 * -- handle as single 16 bit insn
9301 return true;
9304 /* Return true if this is a Thumb-2 logical op. */
9305 static int
9306 thumb2_logic_op(int op)
9308 return (op < 8);
9311 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9312 then set condition code flags based on the result of the operation.
9313 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9314 to the high bit of T1.
9315 Returns zero if the opcode is valid. */
9317 static int
9318 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9319 TCGv_i32 t0, TCGv_i32 t1)
9321 int logic_cc;
9323 logic_cc = 0;
9324 switch (op) {
9325 case 0: /* and */
9326 tcg_gen_and_i32(t0, t0, t1);
9327 logic_cc = conds;
9328 break;
9329 case 1: /* bic */
9330 tcg_gen_andc_i32(t0, t0, t1);
9331 logic_cc = conds;
9332 break;
9333 case 2: /* orr */
9334 tcg_gen_or_i32(t0, t0, t1);
9335 logic_cc = conds;
9336 break;
9337 case 3: /* orn */
9338 tcg_gen_orc_i32(t0, t0, t1);
9339 logic_cc = conds;
9340 break;
9341 case 4: /* eor */
9342 tcg_gen_xor_i32(t0, t0, t1);
9343 logic_cc = conds;
9344 break;
9345 case 8: /* add */
9346 if (conds)
9347 gen_add_CC(t0, t0, t1);
9348 else
9349 tcg_gen_add_i32(t0, t0, t1);
9350 break;
9351 case 10: /* adc */
9352 if (conds)
9353 gen_adc_CC(t0, t0, t1);
9354 else
9355 gen_adc(t0, t1);
9356 break;
9357 case 11: /* sbc */
9358 if (conds) {
9359 gen_sbc_CC(t0, t0, t1);
9360 } else {
9361 gen_sub_carry(t0, t0, t1);
9363 break;
9364 case 13: /* sub */
9365 if (conds)
9366 gen_sub_CC(t0, t0, t1);
9367 else
9368 tcg_gen_sub_i32(t0, t0, t1);
9369 break;
9370 case 14: /* rsb */
9371 if (conds)
9372 gen_sub_CC(t0, t1, t0);
9373 else
9374 tcg_gen_sub_i32(t0, t1, t0);
9375 break;
9376 default: /* 5, 6, 7, 9, 12, 15. */
9377 return 1;
9379 if (logic_cc) {
9380 gen_logic_CC(t0);
9381 if (shifter_out)
9382 gen_set_CF_bit31(t1);
9384 return 0;
9387 /* Translate a 32-bit thumb instruction. */
9388 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9390 uint32_t imm, shift, offset;
9391 uint32_t rd, rn, rm, rs;
9392 TCGv_i32 tmp;
9393 TCGv_i32 tmp2;
9394 TCGv_i32 tmp3;
9395 TCGv_i32 addr;
9396 TCGv_i64 tmp64;
9397 int op;
9398 int shiftop;
9399 int conds;
9400 int logic_cc;
9403 * ARMv6-M supports a limited subset of Thumb2 instructions.
9404 * Other Thumb1 architectures allow only 32-bit
9405 * combined BL/BLX prefix and suffix.
9407 if (arm_dc_feature(s, ARM_FEATURE_M) &&
9408 !arm_dc_feature(s, ARM_FEATURE_V7)) {
9409 int i;
9410 bool found = false;
9411 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
9412 0xf3b08040 /* dsb */,
9413 0xf3b08050 /* dmb */,
9414 0xf3b08060 /* isb */,
9415 0xf3e08000 /* mrs */,
9416 0xf000d000 /* bl */};
9417 static const uint32_t armv6m_mask[] = {0xffe0d000,
9418 0xfff0d0f0,
9419 0xfff0d0f0,
9420 0xfff0d0f0,
9421 0xffe0d000,
9422 0xf800d000};
9424 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
9425 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
9426 found = true;
9427 break;
9430 if (!found) {
9431 goto illegal_op;
9433 } else if ((insn & 0xf800e800) != 0xf000e800) {
9434 ARCH(6T2);
9437 rn = (insn >> 16) & 0xf;
9438 rs = (insn >> 12) & 0xf;
9439 rd = (insn >> 8) & 0xf;
9440 rm = insn & 0xf;
9441 switch ((insn >> 25) & 0xf) {
9442 case 0: case 1: case 2: case 3:
9443 /* 16-bit instructions. Should never happen. */
9444 abort();
9445 case 4:
9446 if (insn & (1 << 22)) {
9447 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9448 * - load/store doubleword, load/store exclusive, ldacq/strel,
9449 * table branch, TT.
9451 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
9452 arm_dc_feature(s, ARM_FEATURE_V8)) {
9453 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9454 * - SG (v8M only)
9455 * The bulk of the behaviour for this instruction is implemented
9456 * in v7m_handle_execute_nsc(), which deals with the insn when
9457 * it is executed by a CPU in non-secure state from memory
9458 * which is Secure & NonSecure-Callable.
9459 * Here we only need to handle the remaining cases:
9460 * * in NS memory (including the "security extension not
9461 * implemented" case) : NOP
9462 * * in S memory but CPU already secure (clear IT bits)
9463 * We know that the attribute for the memory this insn is
9464 * in must match the current CPU state, because otherwise
9465 * get_phys_addr_pmsav8 would have generated an exception.
9467 if (s->v8m_secure) {
9468 /* Like the IT insn, we don't need to generate any code */
9469 s->condexec_cond = 0;
9470 s->condexec_mask = 0;
9472 } else if (insn & 0x01200000) {
9473 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9474 * - load/store dual (post-indexed)
9475 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9476 * - load/store dual (literal and immediate)
9477 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9478 * - load/store dual (pre-indexed)
9480 bool wback = extract32(insn, 21, 1);
9482 if (rn == 15 && (insn & (1 << 21))) {
9483 /* UNPREDICTABLE */
9484 goto illegal_op;
9487 addr = add_reg_for_lit(s, rn, 0);
9488 offset = (insn & 0xff) * 4;
9489 if ((insn & (1 << 23)) == 0) {
9490 offset = -offset;
9493 if (s->v8m_stackcheck && rn == 13 && wback) {
9495 * Here 'addr' is the current SP; if offset is +ve we're
9496 * moving SP up, else down. It is UNKNOWN whether the limit
9497 * check triggers when SP starts below the limit and ends
9498 * up above it; check whichever of the current and final
9499 * SP is lower, so QEMU will trigger in that situation.
9501 if ((int32_t)offset < 0) {
9502 TCGv_i32 newsp = tcg_temp_new_i32();
9504 tcg_gen_addi_i32(newsp, addr, offset);
9505 gen_helper_v8m_stackcheck(cpu_env, newsp);
9506 tcg_temp_free_i32(newsp);
9507 } else {
9508 gen_helper_v8m_stackcheck(cpu_env, addr);
9512 if (insn & (1 << 24)) {
9513 tcg_gen_addi_i32(addr, addr, offset);
9514 offset = 0;
9516 if (insn & (1 << 20)) {
9517 /* ldrd */
9518 tmp = tcg_temp_new_i32();
9519 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9520 store_reg(s, rs, tmp);
9521 tcg_gen_addi_i32(addr, addr, 4);
9522 tmp = tcg_temp_new_i32();
9523 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9524 store_reg(s, rd, tmp);
9525 } else {
9526 /* strd */
9527 tmp = load_reg(s, rs);
9528 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9529 tcg_temp_free_i32(tmp);
9530 tcg_gen_addi_i32(addr, addr, 4);
9531 tmp = load_reg(s, rd);
9532 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9533 tcg_temp_free_i32(tmp);
9535 if (wback) {
9536 /* Base writeback. */
9537 tcg_gen_addi_i32(addr, addr, offset - 4);
9538 store_reg(s, rn, addr);
9539 } else {
9540 tcg_temp_free_i32(addr);
9542 } else if ((insn & (1 << 23)) == 0) {
9543 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9544 * - load/store exclusive word
9545 * - TT (v8M only)
9547 if (rs == 15) {
9548 if (!(insn & (1 << 20)) &&
9549 arm_dc_feature(s, ARM_FEATURE_M) &&
9550 arm_dc_feature(s, ARM_FEATURE_V8)) {
9551 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
9552 * - TT (v8M only)
9554 bool alt = insn & (1 << 7);
9555 TCGv_i32 addr, op, ttresp;
9557 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
9558 /* we UNDEF for these UNPREDICTABLE cases */
9559 goto illegal_op;
9562 if (alt && !s->v8m_secure) {
9563 goto illegal_op;
9566 addr = load_reg(s, rn);
9567 op = tcg_const_i32(extract32(insn, 6, 2));
9568 ttresp = tcg_temp_new_i32();
9569 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
9570 tcg_temp_free_i32(addr);
9571 tcg_temp_free_i32(op);
9572 store_reg(s, rd, ttresp);
9573 break;
9575 goto illegal_op;
9577 addr = tcg_temp_local_new_i32();
9578 load_reg_var(s, addr, rn);
9579 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9580 if (insn & (1 << 20)) {
9581 gen_load_exclusive(s, rs, 15, addr, 2);
9582 } else {
9583 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9585 tcg_temp_free_i32(addr);
9586 } else if ((insn & (7 << 5)) == 0) {
9587 /* Table Branch. */
9588 addr = load_reg(s, rn);
9589 tmp = load_reg(s, rm);
9590 tcg_gen_add_i32(addr, addr, tmp);
9591 if (insn & (1 << 4)) {
9592 /* tbh */
9593 tcg_gen_add_i32(addr, addr, tmp);
9594 tcg_temp_free_i32(tmp);
9595 tmp = tcg_temp_new_i32();
9596 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9597 } else { /* tbb */
9598 tcg_temp_free_i32(tmp);
9599 tmp = tcg_temp_new_i32();
9600 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9602 tcg_temp_free_i32(addr);
9603 tcg_gen_shli_i32(tmp, tmp, 1);
9604 tcg_gen_addi_i32(tmp, tmp, read_pc(s));
9605 store_reg(s, 15, tmp);
9606 } else {
9607 bool is_lasr = false;
9608 bool is_ld = extract32(insn, 20, 1);
9609 int op2 = (insn >> 6) & 0x3;
9610 op = (insn >> 4) & 0x3;
9611 switch (op2) {
9612 case 0:
9613 goto illegal_op;
9614 case 1:
9615 /* Load/store exclusive byte/halfword/doubleword */
9616 if (op == 2) {
9617 goto illegal_op;
9619 ARCH(7);
9620 break;
9621 case 2:
9622 /* Load-acquire/store-release */
9623 if (op == 3) {
9624 goto illegal_op;
9626 /* Fall through */
9627 case 3:
9628 /* Load-acquire/store-release exclusive */
9629 ARCH(8);
9630 is_lasr = true;
9631 break;
9634 if (is_lasr && !is_ld) {
9635 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9638 addr = tcg_temp_local_new_i32();
9639 load_reg_var(s, addr, rn);
9640 if (!(op2 & 1)) {
9641 if (is_ld) {
9642 tmp = tcg_temp_new_i32();
9643 switch (op) {
9644 case 0: /* ldab */
9645 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9646 rs | ISSIsAcqRel);
9647 break;
9648 case 1: /* ldah */
9649 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9650 rs | ISSIsAcqRel);
9651 break;
9652 case 2: /* lda */
9653 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9654 rs | ISSIsAcqRel);
9655 break;
9656 default:
9657 abort();
9659 store_reg(s, rs, tmp);
9660 } else {
9661 tmp = load_reg(s, rs);
9662 switch (op) {
9663 case 0: /* stlb */
9664 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9665 rs | ISSIsAcqRel);
9666 break;
9667 case 1: /* stlh */
9668 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9669 rs | ISSIsAcqRel);
9670 break;
9671 case 2: /* stl */
9672 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9673 rs | ISSIsAcqRel);
9674 break;
9675 default:
9676 abort();
9678 tcg_temp_free_i32(tmp);
9680 } else if (is_ld) {
9681 gen_load_exclusive(s, rs, rd, addr, op);
9682 } else {
9683 gen_store_exclusive(s, rm, rs, rd, addr, op);
9685 tcg_temp_free_i32(addr);
9687 if (is_lasr && is_ld) {
9688 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
9691 } else {
9692 /* Load/store multiple, RFE, SRS. */
9693 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9694 /* RFE, SRS: not available in user mode or on M profile */
9695 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9696 goto illegal_op;
9698 if (insn & (1 << 20)) {
9699 /* rfe */
9700 addr = load_reg(s, rn);
9701 if ((insn & (1 << 24)) == 0)
9702 tcg_gen_addi_i32(addr, addr, -8);
9703 /* Load PC into tmp and CPSR into tmp2. */
9704 tmp = tcg_temp_new_i32();
9705 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9706 tcg_gen_addi_i32(addr, addr, 4);
9707 tmp2 = tcg_temp_new_i32();
9708 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9709 if (insn & (1 << 21)) {
9710 /* Base writeback. */
9711 if (insn & (1 << 24)) {
9712 tcg_gen_addi_i32(addr, addr, 4);
9713 } else {
9714 tcg_gen_addi_i32(addr, addr, -4);
9716 store_reg(s, rn, addr);
9717 } else {
9718 tcg_temp_free_i32(addr);
9720 gen_rfe(s, tmp, tmp2);
9721 } else {
9722 /* srs */
9723 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9724 insn & (1 << 21));
9726 } else {
9727 int i, loaded_base = 0;
9728 TCGv_i32 loaded_var;
9729 bool wback = extract32(insn, 21, 1);
9730 /* Load/store multiple. */
9731 addr = load_reg(s, rn);
9732 offset = 0;
9733 for (i = 0; i < 16; i++) {
9734 if (insn & (1 << i))
9735 offset += 4;
9738 if (insn & (1 << 24)) {
9739 tcg_gen_addi_i32(addr, addr, -offset);
9742 if (s->v8m_stackcheck && rn == 13 && wback) {
9744 * If the writeback is incrementing SP rather than
9745 * decrementing it, and the initial SP is below the
9746 * stack limit but the final written-back SP would
9747 * be above, then then we must not perform any memory
9748 * accesses, but it is IMPDEF whether we generate
9749 * an exception. We choose to do so in this case.
9750 * At this point 'addr' is the lowest address, so
9751 * either the original SP (if incrementing) or our
9752 * final SP (if decrementing), so that's what we check.
9754 gen_helper_v8m_stackcheck(cpu_env, addr);
9757 loaded_var = NULL;
9758 for (i = 0; i < 16; i++) {
9759 if ((insn & (1 << i)) == 0)
9760 continue;
9761 if (insn & (1 << 20)) {
9762 /* Load. */
9763 tmp = tcg_temp_new_i32();
9764 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9765 if (i == 15) {
9766 gen_bx_excret(s, tmp);
9767 } else if (i == rn) {
9768 loaded_var = tmp;
9769 loaded_base = 1;
9770 } else {
9771 store_reg(s, i, tmp);
9773 } else {
9774 /* Store. */
9775 tmp = load_reg(s, i);
9776 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9777 tcg_temp_free_i32(tmp);
9779 tcg_gen_addi_i32(addr, addr, 4);
9781 if (loaded_base) {
9782 store_reg(s, rn, loaded_var);
9784 if (wback) {
9785 /* Base register writeback. */
9786 if (insn & (1 << 24)) {
9787 tcg_gen_addi_i32(addr, addr, -offset);
9789 /* Fault if writeback register is in register list. */
9790 if (insn & (1 << rn))
9791 goto illegal_op;
9792 store_reg(s, rn, addr);
9793 } else {
9794 tcg_temp_free_i32(addr);
9798 break;
9799 case 5:
9801 op = (insn >> 21) & 0xf;
9802 if (op == 6) {
9803 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9804 goto illegal_op;
9806 /* Halfword pack. */
9807 tmp = load_reg(s, rn);
9808 tmp2 = load_reg(s, rm);
9809 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9810 if (insn & (1 << 5)) {
9811 /* pkhtb */
9812 if (shift == 0)
9813 shift = 31;
9814 tcg_gen_sari_i32(tmp2, tmp2, shift);
9815 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9816 tcg_gen_ext16u_i32(tmp2, tmp2);
9817 } else {
9818 /* pkhbt */
9819 if (shift)
9820 tcg_gen_shli_i32(tmp2, tmp2, shift);
9821 tcg_gen_ext16u_i32(tmp, tmp);
9822 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9824 tcg_gen_or_i32(tmp, tmp, tmp2);
9825 tcg_temp_free_i32(tmp2);
9826 store_reg(s, rd, tmp);
9827 } else {
9828 /* Data processing register constant shift. */
9829 if (rn == 15) {
9830 tmp = tcg_temp_new_i32();
9831 tcg_gen_movi_i32(tmp, 0);
9832 } else {
9833 tmp = load_reg(s, rn);
9835 tmp2 = load_reg(s, rm);
9837 shiftop = (insn >> 4) & 3;
9838 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9839 conds = (insn & (1 << 20)) != 0;
9840 logic_cc = (conds && thumb2_logic_op(op));
9841 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9842 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9843 goto illegal_op;
9844 tcg_temp_free_i32(tmp2);
9845 if (rd == 13 &&
9846 ((op == 2 && rn == 15) ||
9847 (op == 8 && rn == 13) ||
9848 (op == 13 && rn == 13))) {
9849 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
9850 store_sp_checked(s, tmp);
9851 } else if (rd != 15) {
9852 store_reg(s, rd, tmp);
9853 } else {
9854 tcg_temp_free_i32(tmp);
9857 break;
9858 case 13: /* Misc data processing. */
9859 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9860 if (op < 4 && (insn & 0xf000) != 0xf000)
9861 goto illegal_op;
9862 switch (op) {
9863 case 0: /* Register controlled shift. */
9864 tmp = load_reg(s, rn);
9865 tmp2 = load_reg(s, rm);
9866 if ((insn & 0x70) != 0)
9867 goto illegal_op;
9869 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
9870 * - MOV, MOVS (register-shifted register), flagsetting
9872 op = (insn >> 21) & 3;
9873 logic_cc = (insn & (1 << 20)) != 0;
9874 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9875 if (logic_cc)
9876 gen_logic_CC(tmp);
9877 store_reg(s, rd, tmp);
9878 break;
9879 case 1: /* Sign/zero extend. */
9880 op = (insn >> 20) & 7;
9881 switch (op) {
9882 case 0: /* SXTAH, SXTH */
9883 case 1: /* UXTAH, UXTH */
9884 case 4: /* SXTAB, SXTB */
9885 case 5: /* UXTAB, UXTB */
9886 break;
9887 case 2: /* SXTAB16, SXTB16 */
9888 case 3: /* UXTAB16, UXTB16 */
9889 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9890 goto illegal_op;
9892 break;
9893 default:
9894 goto illegal_op;
9896 if (rn != 15) {
9897 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9898 goto illegal_op;
9901 tmp = load_reg(s, rm);
9902 shift = (insn >> 4) & 3;
9903 /* ??? In many cases it's not necessary to do a
9904 rotate, a shift is sufficient. */
9905 if (shift != 0)
9906 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9907 op = (insn >> 20) & 7;
9908 switch (op) {
9909 case 0: gen_sxth(tmp); break;
9910 case 1: gen_uxth(tmp); break;
9911 case 2: gen_sxtb16(tmp); break;
9912 case 3: gen_uxtb16(tmp); break;
9913 case 4: gen_sxtb(tmp); break;
9914 case 5: gen_uxtb(tmp); break;
9915 default:
9916 g_assert_not_reached();
9918 if (rn != 15) {
9919 tmp2 = load_reg(s, rn);
9920 if ((op >> 1) == 1) {
9921 gen_add16(tmp, tmp2);
9922 } else {
9923 tcg_gen_add_i32(tmp, tmp, tmp2);
9924 tcg_temp_free_i32(tmp2);
9927 store_reg(s, rd, tmp);
9928 break;
9929 case 2: /* SIMD add/subtract. */
9930 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9931 goto illegal_op;
9933 op = (insn >> 20) & 7;
9934 shift = (insn >> 4) & 7;
9935 if ((op & 3) == 3 || (shift & 3) == 3)
9936 goto illegal_op;
9937 tmp = load_reg(s, rn);
9938 tmp2 = load_reg(s, rm);
9939 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
9940 tcg_temp_free_i32(tmp2);
9941 store_reg(s, rd, tmp);
9942 break;
9943 case 3: /* Other data processing. */
9944 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9945 if (op < 4) {
9946 /* Saturating add/subtract. */
9947 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9948 goto illegal_op;
9950 tmp = load_reg(s, rn);
9951 tmp2 = load_reg(s, rm);
9952 if (op & 1)
9953 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp);
9954 if (op & 2)
9955 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9956 else
9957 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
9958 tcg_temp_free_i32(tmp2);
9959 } else {
9960 switch (op) {
9961 case 0x0a: /* rbit */
9962 case 0x08: /* rev */
9963 case 0x09: /* rev16 */
9964 case 0x0b: /* revsh */
9965 case 0x18: /* clz */
9966 break;
9967 case 0x10: /* sel */
9968 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9969 goto illegal_op;
9971 break;
9972 case 0x20: /* crc32/crc32c */
9973 case 0x21:
9974 case 0x22:
9975 case 0x28:
9976 case 0x29:
9977 case 0x2a:
9978 if (!dc_isar_feature(aa32_crc32, s)) {
9979 goto illegal_op;
9981 break;
9982 default:
9983 goto illegal_op;
9985 tmp = load_reg(s, rn);
9986 switch (op) {
9987 case 0x0a: /* rbit */
9988 gen_helper_rbit(tmp, tmp);
9989 break;
9990 case 0x08: /* rev */
9991 tcg_gen_bswap32_i32(tmp, tmp);
9992 break;
9993 case 0x09: /* rev16 */
9994 gen_rev16(tmp);
9995 break;
9996 case 0x0b: /* revsh */
9997 gen_revsh(tmp);
9998 break;
9999 case 0x10: /* sel */
10000 tmp2 = load_reg(s, rm);
10001 tmp3 = tcg_temp_new_i32();
10002 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
10003 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
10004 tcg_temp_free_i32(tmp3);
10005 tcg_temp_free_i32(tmp2);
10006 break;
10007 case 0x18: /* clz */
10008 tcg_gen_clzi_i32(tmp, tmp, 32);
10009 break;
10010 case 0x20:
10011 case 0x21:
10012 case 0x22:
10013 case 0x28:
10014 case 0x29:
10015 case 0x2a:
10017 /* crc32/crc32c */
10018 uint32_t sz = op & 0x3;
10019 uint32_t c = op & 0x8;
10021 tmp2 = load_reg(s, rm);
10022 if (sz == 0) {
10023 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10024 } else if (sz == 1) {
10025 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10027 tmp3 = tcg_const_i32(1 << sz);
10028 if (c) {
10029 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10030 } else {
10031 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10033 tcg_temp_free_i32(tmp2);
10034 tcg_temp_free_i32(tmp3);
10035 break;
10037 default:
10038 g_assert_not_reached();
10041 store_reg(s, rd, tmp);
10042 break;
10043 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10044 switch ((insn >> 20) & 7) {
10045 case 0: /* 32 x 32 -> 32 */
10046 case 7: /* Unsigned sum of absolute differences. */
10047 break;
10048 case 1: /* 16 x 16 -> 32 */
10049 case 2: /* Dual multiply add. */
10050 case 3: /* 32 * 16 -> 32msb */
10051 case 4: /* Dual multiply subtract. */
10052 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10053 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10054 goto illegal_op;
10056 break;
10058 op = (insn >> 4) & 0xf;
10059 tmp = load_reg(s, rn);
10060 tmp2 = load_reg(s, rm);
10061 switch ((insn >> 20) & 7) {
10062 case 0: /* 32 x 32 -> 32 */
10063 tcg_gen_mul_i32(tmp, tmp, tmp2);
10064 tcg_temp_free_i32(tmp2);
10065 if (rs != 15) {
10066 tmp2 = load_reg(s, rs);
10067 if (op)
10068 tcg_gen_sub_i32(tmp, tmp2, tmp);
10069 else
10070 tcg_gen_add_i32(tmp, tmp, tmp2);
10071 tcg_temp_free_i32(tmp2);
10073 break;
10074 case 1: /* 16 x 16 -> 32 */
10075 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10076 tcg_temp_free_i32(tmp2);
10077 if (rs != 15) {
10078 tmp2 = load_reg(s, rs);
10079 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10080 tcg_temp_free_i32(tmp2);
10082 break;
10083 case 2: /* Dual multiply add. */
10084 case 4: /* Dual multiply subtract. */
10085 if (op)
10086 gen_swap_half(tmp2);
10087 gen_smul_dual(tmp, tmp2);
10088 if (insn & (1 << 22)) {
10089 /* This subtraction cannot overflow. */
10090 tcg_gen_sub_i32(tmp, tmp, tmp2);
10091 } else {
10092 /* This addition cannot overflow 32 bits;
10093 * however it may overflow considered as a signed
10094 * operation, in which case we must set the Q flag.
10096 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10098 tcg_temp_free_i32(tmp2);
10099 if (rs != 15)
10101 tmp2 = load_reg(s, rs);
10102 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10103 tcg_temp_free_i32(tmp2);
10105 break;
10106 case 3: /* 32 * 16 -> 32msb */
10107 if (op)
10108 tcg_gen_sari_i32(tmp2, tmp2, 16);
10109 else
10110 gen_sxth(tmp2);
10111 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10112 tcg_gen_shri_i64(tmp64, tmp64, 16);
10113 tmp = tcg_temp_new_i32();
10114 tcg_gen_extrl_i64_i32(tmp, tmp64);
10115 tcg_temp_free_i64(tmp64);
10116 if (rs != 15)
10118 tmp2 = load_reg(s, rs);
10119 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10120 tcg_temp_free_i32(tmp2);
10122 break;
10123 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10124 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10125 if (rs != 15) {
10126 tmp = load_reg(s, rs);
10127 if (insn & (1 << 20)) {
10128 tmp64 = gen_addq_msw(tmp64, tmp);
10129 } else {
10130 tmp64 = gen_subq_msw(tmp64, tmp);
10133 if (insn & (1 << 4)) {
10134 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10136 tcg_gen_shri_i64(tmp64, tmp64, 32);
10137 tmp = tcg_temp_new_i32();
10138 tcg_gen_extrl_i64_i32(tmp, tmp64);
10139 tcg_temp_free_i64(tmp64);
10140 break;
10141 case 7: /* Unsigned sum of absolute differences. */
10142 gen_helper_usad8(tmp, tmp, tmp2);
10143 tcg_temp_free_i32(tmp2);
10144 if (rs != 15) {
10145 tmp2 = load_reg(s, rs);
10146 tcg_gen_add_i32(tmp, tmp, tmp2);
10147 tcg_temp_free_i32(tmp2);
10149 break;
10151 store_reg(s, rd, tmp);
10152 break;
10153 case 6: case 7: /* 64-bit multiply, Divide. */
10154 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
10155 tmp = load_reg(s, rn);
10156 tmp2 = load_reg(s, rm);
10157 if ((op & 0x50) == 0x10) {
10158 /* sdiv, udiv */
10159 if (!dc_isar_feature(thumb_div, s)) {
10160 goto illegal_op;
10162 if (op & 0x20)
10163 gen_helper_udiv(tmp, tmp, tmp2);
10164 else
10165 gen_helper_sdiv(tmp, tmp, tmp2);
10166 tcg_temp_free_i32(tmp2);
10167 store_reg(s, rd, tmp);
10168 } else if ((op & 0xe) == 0xc) {
10169 /* Dual multiply accumulate long. */
10170 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10171 tcg_temp_free_i32(tmp);
10172 tcg_temp_free_i32(tmp2);
10173 goto illegal_op;
10175 if (op & 1)
10176 gen_swap_half(tmp2);
10177 gen_smul_dual(tmp, tmp2);
10178 if (op & 0x10) {
10179 tcg_gen_sub_i32(tmp, tmp, tmp2);
10180 } else {
10181 tcg_gen_add_i32(tmp, tmp, tmp2);
10183 tcg_temp_free_i32(tmp2);
10184 /* BUGFIX */
10185 tmp64 = tcg_temp_new_i64();
10186 tcg_gen_ext_i32_i64(tmp64, tmp);
10187 tcg_temp_free_i32(tmp);
10188 gen_addq(s, tmp64, rs, rd);
10189 gen_storeq_reg(s, rs, rd, tmp64);
10190 tcg_temp_free_i64(tmp64);
10191 } else {
10192 if (op & 0x20) {
10193 /* Unsigned 64-bit multiply */
10194 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
10195 } else {
10196 if (op & 8) {
10197 /* smlalxy */
10198 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10199 tcg_temp_free_i32(tmp2);
10200 tcg_temp_free_i32(tmp);
10201 goto illegal_op;
10203 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10204 tcg_temp_free_i32(tmp2);
10205 tmp64 = tcg_temp_new_i64();
10206 tcg_gen_ext_i32_i64(tmp64, tmp);
10207 tcg_temp_free_i32(tmp);
10208 } else {
10209 /* Signed 64-bit multiply */
10210 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10213 if (op & 4) {
10214 /* umaal */
10215 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10216 tcg_temp_free_i64(tmp64);
10217 goto illegal_op;
10219 gen_addq_lo(s, tmp64, rs);
10220 gen_addq_lo(s, tmp64, rd);
10221 } else if (op & 0x40) {
10222 /* 64-bit accumulate. */
10223 gen_addq(s, tmp64, rs, rd);
10225 gen_storeq_reg(s, rs, rd, tmp64);
10226 tcg_temp_free_i64(tmp64);
10228 break;
10230 break;
10231 case 6: case 7: case 14: case 15:
10232 /* Coprocessor. */
10233 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10234 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10235 if (extract32(insn, 24, 2) == 3) {
10236 goto illegal_op; /* op0 = 0b11 : unallocated */
10240 * Decode VLLDM and VLSTM first: these are nonstandard because:
10241 * * if there is no FPU then these insns must NOP in
10242 * Secure state and UNDEF in Nonsecure state
10243 * * if there is an FPU then these insns do not have
10244 * the usual behaviour that disas_vfp_insn() provides of
10245 * being controlled by CPACR/NSACR enable bits or the
10246 * lazy-stacking logic.
10248 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10249 (insn & 0xffa00f00) == 0xec200a00) {
10250 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10251 * - VLLDM, VLSTM
10252 * We choose to UNDEF if the RAZ bits are non-zero.
10254 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10255 goto illegal_op;
10258 if (arm_dc_feature(s, ARM_FEATURE_VFP)) {
10259 TCGv_i32 fptr = load_reg(s, rn);
10261 if (extract32(insn, 20, 1)) {
10262 gen_helper_v7m_vlldm(cpu_env, fptr);
10263 } else {
10264 gen_helper_v7m_vlstm(cpu_env, fptr);
10266 tcg_temp_free_i32(fptr);
10268 /* End the TB, because we have updated FP control bits */
10269 s->base.is_jmp = DISAS_UPDATE;
10271 break;
10273 if (arm_dc_feature(s, ARM_FEATURE_VFP) &&
10274 ((insn >> 8) & 0xe) == 10) {
10275 /* FP, and the CPU supports it */
10276 if (disas_vfp_insn(s, insn)) {
10277 goto illegal_op;
10279 break;
10282 /* All other insns: NOCP */
10283 gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
10284 default_exception_el(s));
10285 break;
10287 if ((insn & 0xfe000a00) == 0xfc000800
10288 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10289 /* The Thumb2 and ARM encodings are identical. */
10290 if (disas_neon_insn_3same_ext(s, insn)) {
10291 goto illegal_op;
10293 } else if ((insn & 0xff000a00) == 0xfe000800
10294 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10295 /* The Thumb2 and ARM encodings are identical. */
10296 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10297 goto illegal_op;
10299 } else if (((insn >> 24) & 3) == 3) {
10300 /* Translate into the equivalent ARM encoding. */
10301 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10302 if (disas_neon_data_insn(s, insn)) {
10303 goto illegal_op;
10305 } else if (((insn >> 8) & 0xe) == 10) {
10306 if (disas_vfp_insn(s, insn)) {
10307 goto illegal_op;
10309 } else {
10310 if (insn & (1 << 28))
10311 goto illegal_op;
10312 if (disas_coproc_insn(s, insn)) {
10313 goto illegal_op;
10316 break;
10317 case 8: case 9: case 10: case 11:
10318 if (insn & (1 << 15)) {
10319 /* Branches, misc control. */
10320 if (insn & 0x5000) {
10321 /* Unconditional branch. */
10322 /* signextend(hw1[10:0]) -> offset[:12]. */
10323 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10324 /* hw1[10:0] -> offset[11:1]. */
10325 offset |= (insn & 0x7ff) << 1;
10326 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10327 offset[24:22] already have the same value because of the
10328 sign extension above. */
10329 offset ^= ((~insn) & (1 << 13)) << 10;
10330 offset ^= ((~insn) & (1 << 11)) << 11;
10332 if (insn & (1 << 14)) {
10333 /* Branch and link. */
10334 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
10337 offset += read_pc(s);
10338 if (insn & (1 << 12)) {
10339 /* b/bl */
10340 gen_jmp(s, offset);
10341 } else {
10342 /* blx */
10343 offset &= ~(uint32_t)2;
10344 /* thumb2 bx, no need to check */
10345 gen_bx_im(s, offset);
10347 } else if (((insn >> 23) & 7) == 7) {
10348 /* Misc control */
10349 if (insn & (1 << 13))
10350 goto illegal_op;
10352 if (insn & (1 << 26)) {
10353 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10354 goto illegal_op;
10356 if (!(insn & (1 << 20))) {
10357 /* Hypervisor call (v7) */
10358 int imm16 = extract32(insn, 16, 4) << 12
10359 | extract32(insn, 0, 12);
10360 ARCH(7);
10361 if (IS_USER(s)) {
10362 goto illegal_op;
10364 gen_hvc(s, imm16);
10365 } else {
10366 /* Secure monitor call (v6+) */
10367 ARCH(6K);
10368 if (IS_USER(s)) {
10369 goto illegal_op;
10371 gen_smc(s);
10373 } else {
10374 op = (insn >> 20) & 7;
10375 switch (op) {
10376 case 0: /* msr cpsr. */
10377 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10378 tmp = load_reg(s, rn);
10379 /* the constant is the mask and SYSm fields */
10380 addr = tcg_const_i32(insn & 0xfff);
10381 gen_helper_v7m_msr(cpu_env, addr, tmp);
10382 tcg_temp_free_i32(addr);
10383 tcg_temp_free_i32(tmp);
10384 gen_lookup_tb(s);
10385 break;
10387 /* fall through */
10388 case 1: /* msr spsr. */
10389 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10390 goto illegal_op;
10393 if (extract32(insn, 5, 1)) {
10394 /* MSR (banked) */
10395 int sysm = extract32(insn, 8, 4) |
10396 (extract32(insn, 4, 1) << 4);
10397 int r = op & 1;
10399 gen_msr_banked(s, r, sysm, rm);
10400 break;
10403 /* MSR (for PSRs) */
10404 tmp = load_reg(s, rn);
10405 if (gen_set_psr(s,
10406 msr_mask(s, (insn >> 8) & 0xf, op == 1),
10407 op == 1, tmp))
10408 goto illegal_op;
10409 break;
10410 case 2: /* cps, nop-hint. */
10411 if (((insn >> 8) & 7) == 0) {
10412 gen_nop_hint(s, insn & 0xff);
10414 /* Implemented as NOP in user mode. */
10415 if (IS_USER(s))
10416 break;
10417 offset = 0;
10418 imm = 0;
10419 if (insn & (1 << 10)) {
10420 if (insn & (1 << 7))
10421 offset |= CPSR_A;
10422 if (insn & (1 << 6))
10423 offset |= CPSR_I;
10424 if (insn & (1 << 5))
10425 offset |= CPSR_F;
10426 if (insn & (1 << 9))
10427 imm = CPSR_A | CPSR_I | CPSR_F;
10429 if (insn & (1 << 8)) {
10430 offset |= 0x1f;
10431 imm |= (insn & 0x1f);
10433 if (offset) {
10434 gen_set_psr_im(s, offset, 0, imm);
10436 break;
10437 case 3: /* Special control operations. */
10438 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
10439 !arm_dc_feature(s, ARM_FEATURE_M)) {
10440 goto illegal_op;
10442 op = (insn >> 4) & 0xf;
10443 switch (op) {
10444 case 2: /* clrex */
10445 gen_clrex(s);
10446 break;
10447 case 4: /* dsb */
10448 case 5: /* dmb */
10449 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10450 break;
10451 case 6: /* isb */
10452 /* We need to break the TB after this insn
10453 * to execute self-modifying code correctly
10454 * and also to take any pending interrupts
10455 * immediately.
10457 gen_goto_tb(s, 0, s->base.pc_next);
10458 break;
10459 case 7: /* sb */
10460 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
10461 goto illegal_op;
10464 * TODO: There is no speculation barrier opcode
10465 * for TCG; MB and end the TB instead.
10467 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10468 gen_goto_tb(s, 0, s->base.pc_next);
10469 break;
10470 default:
10471 goto illegal_op;
10473 break;
10474 case 4: /* bxj */
10475 /* Trivial implementation equivalent to bx.
10476 * This instruction doesn't exist at all for M-profile.
10478 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10479 goto illegal_op;
10481 tmp = load_reg(s, rn);
10482 gen_bx(s, tmp);
10483 break;
10484 case 5: /* Exception return. */
10485 if (IS_USER(s)) {
10486 goto illegal_op;
10488 if (rn != 14 || rd != 15) {
10489 goto illegal_op;
10491 if (s->current_el == 2) {
10492 /* ERET from Hyp uses ELR_Hyp, not LR */
10493 if (insn & 0xff) {
10494 goto illegal_op;
10496 tmp = load_cpu_field(elr_el[2]);
10497 } else {
10498 tmp = load_reg(s, rn);
10499 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10501 gen_exception_return(s, tmp);
10502 break;
10503 case 6: /* MRS */
10504 if (extract32(insn, 5, 1) &&
10505 !arm_dc_feature(s, ARM_FEATURE_M)) {
10506 /* MRS (banked) */
10507 int sysm = extract32(insn, 16, 4) |
10508 (extract32(insn, 4, 1) << 4);
10510 gen_mrs_banked(s, 0, sysm, rd);
10511 break;
10514 if (extract32(insn, 16, 4) != 0xf) {
10515 goto illegal_op;
10517 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10518 extract32(insn, 0, 8) != 0) {
10519 goto illegal_op;
10522 /* mrs cpsr */
10523 tmp = tcg_temp_new_i32();
10524 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10525 addr = tcg_const_i32(insn & 0xff);
10526 gen_helper_v7m_mrs(tmp, cpu_env, addr);
10527 tcg_temp_free_i32(addr);
10528 } else {
10529 gen_helper_cpsr_read(tmp, cpu_env);
10531 store_reg(s, rd, tmp);
10532 break;
10533 case 7: /* MRS */
10534 if (extract32(insn, 5, 1) &&
10535 !arm_dc_feature(s, ARM_FEATURE_M)) {
10536 /* MRS (banked) */
10537 int sysm = extract32(insn, 16, 4) |
10538 (extract32(insn, 4, 1) << 4);
10540 gen_mrs_banked(s, 1, sysm, rd);
10541 break;
10544 /* mrs spsr. */
10545 /* Not accessible in user mode. */
10546 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10547 goto illegal_op;
10550 if (extract32(insn, 16, 4) != 0xf ||
10551 extract32(insn, 0, 8) != 0) {
10552 goto illegal_op;
10555 tmp = load_cpu_field(spsr);
10556 store_reg(s, rd, tmp);
10557 break;
10560 } else {
10561 /* Conditional branch. */
10562 op = (insn >> 22) & 0xf;
10563 /* Generate a conditional jump to next instruction. */
10564 arm_skip_unless(s, op);
10566 /* offset[11:1] = insn[10:0] */
10567 offset = (insn & 0x7ff) << 1;
10568 /* offset[17:12] = insn[21:16]. */
10569 offset |= (insn & 0x003f0000) >> 4;
10570 /* offset[31:20] = insn[26]. */
10571 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10572 /* offset[18] = insn[13]. */
10573 offset |= (insn & (1 << 13)) << 5;
10574 /* offset[19] = insn[11]. */
10575 offset |= (insn & (1 << 11)) << 8;
10577 /* jump to the offset */
10578 gen_jmp(s, read_pc(s) + offset);
10580 } else {
10582 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
10583 * - Data-processing (modified immediate, plain binary immediate)
10585 if (insn & (1 << 25)) {
10587 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
10588 * - Data-processing (plain binary immediate)
10590 if (insn & (1 << 24)) {
10591 if (insn & (1 << 20))
10592 goto illegal_op;
10593 /* Bitfield/Saturate. */
10594 op = (insn >> 21) & 7;
10595 imm = insn & 0x1f;
10596 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10597 if (rn == 15) {
10598 tmp = tcg_temp_new_i32();
10599 tcg_gen_movi_i32(tmp, 0);
10600 } else {
10601 tmp = load_reg(s, rn);
10603 switch (op) {
10604 case 2: /* Signed bitfield extract. */
10605 imm++;
10606 if (shift + imm > 32)
10607 goto illegal_op;
10608 if (imm < 32) {
10609 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10611 break;
10612 case 6: /* Unsigned bitfield extract. */
10613 imm++;
10614 if (shift + imm > 32)
10615 goto illegal_op;
10616 if (imm < 32) {
10617 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10619 break;
10620 case 3: /* Bitfield insert/clear. */
10621 if (imm < shift)
10622 goto illegal_op;
10623 imm = imm + 1 - shift;
10624 if (imm != 32) {
10625 tmp2 = load_reg(s, rd);
10626 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
10627 tcg_temp_free_i32(tmp2);
10629 break;
10630 case 7:
10631 goto illegal_op;
10632 default: /* Saturate. */
10633 if (shift) {
10634 if (op & 1)
10635 tcg_gen_sari_i32(tmp, tmp, shift);
10636 else
10637 tcg_gen_shli_i32(tmp, tmp, shift);
10639 tmp2 = tcg_const_i32(imm);
10640 if (op & 4) {
10641 /* Unsigned. */
10642 if ((op & 1) && shift == 0) {
10643 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10644 tcg_temp_free_i32(tmp);
10645 tcg_temp_free_i32(tmp2);
10646 goto illegal_op;
10648 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10649 } else {
10650 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10652 } else {
10653 /* Signed. */
10654 if ((op & 1) && shift == 0) {
10655 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10656 tcg_temp_free_i32(tmp);
10657 tcg_temp_free_i32(tmp2);
10658 goto illegal_op;
10660 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10661 } else {
10662 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10665 tcg_temp_free_i32(tmp2);
10666 break;
10668 store_reg(s, rd, tmp);
10669 } else {
10670 imm = ((insn & 0x04000000) >> 15)
10671 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10672 if (insn & (1 << 22)) {
10673 /* 16-bit immediate. */
10674 imm |= (insn >> 4) & 0xf000;
10675 if (insn & (1 << 23)) {
10676 /* movt */
10677 tmp = load_reg(s, rd);
10678 tcg_gen_ext16u_i32(tmp, tmp);
10679 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10680 } else {
10681 /* movw */
10682 tmp = tcg_temp_new_i32();
10683 tcg_gen_movi_i32(tmp, imm);
10685 store_reg(s, rd, tmp);
10686 } else {
10687 /* Add/sub 12-bit immediate. */
10688 if (insn & (1 << 23)) {
10689 imm = -imm;
10691 tmp = add_reg_for_lit(s, rn, imm);
10692 if (rn == 13 && rd == 13) {
10693 /* ADD SP, SP, imm or SUB SP, SP, imm */
10694 store_sp_checked(s, tmp);
10695 } else {
10696 store_reg(s, rd, tmp);
10700 } else {
10702 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
10703 * - Data-processing (modified immediate)
10705 int shifter_out = 0;
10706 /* modified 12-bit immediate. */
10707 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10708 imm = (insn & 0xff);
10709 switch (shift) {
10710 case 0: /* XY */
10711 /* Nothing to do. */
10712 break;
10713 case 1: /* 00XY00XY */
10714 imm |= imm << 16;
10715 break;
10716 case 2: /* XY00XY00 */
10717 imm |= imm << 16;
10718 imm <<= 8;
10719 break;
10720 case 3: /* XYXYXYXY */
10721 imm |= imm << 16;
10722 imm |= imm << 8;
10723 break;
10724 default: /* Rotated constant. */
10725 shift = (shift << 1) | (imm >> 7);
10726 imm |= 0x80;
10727 imm = imm << (32 - shift);
10728 shifter_out = 1;
10729 break;
10731 tmp2 = tcg_temp_new_i32();
10732 tcg_gen_movi_i32(tmp2, imm);
10733 rn = (insn >> 16) & 0xf;
10734 if (rn == 15) {
10735 tmp = tcg_temp_new_i32();
10736 tcg_gen_movi_i32(tmp, 0);
10737 } else {
10738 tmp = load_reg(s, rn);
10740 op = (insn >> 21) & 0xf;
10741 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10742 shifter_out, tmp, tmp2))
10743 goto illegal_op;
10744 tcg_temp_free_i32(tmp2);
10745 rd = (insn >> 8) & 0xf;
10746 if (rd == 13 && rn == 13
10747 && (op == 8 || op == 13)) {
10748 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
10749 store_sp_checked(s, tmp);
10750 } else if (rd != 15) {
10751 store_reg(s, rd, tmp);
10752 } else {
10753 tcg_temp_free_i32(tmp);
10757 break;
10758 case 12: /* Load/store single data item. */
10760 int postinc = 0;
10761 int writeback = 0;
10762 int memidx;
10763 ISSInfo issinfo;
10765 if ((insn & 0x01100000) == 0x01000000) {
10766 if (disas_neon_ls_insn(s, insn)) {
10767 goto illegal_op;
10769 break;
10771 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10772 if (rs == 15) {
10773 if (!(insn & (1 << 20))) {
10774 goto illegal_op;
10776 if (op != 2) {
10777 /* Byte or halfword load space with dest == r15 : memory hints.
10778 * Catch them early so we don't emit pointless addressing code.
10779 * This space is a mix of:
10780 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10781 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10782 * cores)
10783 * unallocated hints, which must be treated as NOPs
10784 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10785 * which is easiest for the decoding logic
10786 * Some space which must UNDEF
10788 int op1 = (insn >> 23) & 3;
10789 int op2 = (insn >> 6) & 0x3f;
10790 if (op & 2) {
10791 goto illegal_op;
10793 if (rn == 15) {
10794 /* UNPREDICTABLE, unallocated hint or
10795 * PLD/PLDW/PLI (literal)
10797 return;
10799 if (op1 & 1) {
10800 return; /* PLD/PLDW/PLI or unallocated hint */
10802 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10803 return; /* PLD/PLDW/PLI or unallocated hint */
10805 /* UNDEF space, or an UNPREDICTABLE */
10806 goto illegal_op;
10809 memidx = get_mem_index(s);
10810 imm = insn & 0xfff;
10811 if (insn & (1 << 23)) {
10812 /* PC relative or Positive offset. */
10813 addr = add_reg_for_lit(s, rn, imm);
10814 } else if (rn == 15) {
10815 /* PC relative with negative offset. */
10816 addr = add_reg_for_lit(s, rn, -imm);
10817 } else {
10818 addr = load_reg(s, rn);
10819 imm = insn & 0xff;
10820 switch ((insn >> 8) & 0xf) {
10821 case 0x0: /* Shifted Register. */
10822 shift = (insn >> 4) & 0xf;
10823 if (shift > 3) {
10824 tcg_temp_free_i32(addr);
10825 goto illegal_op;
10827 tmp = load_reg(s, rm);
10828 if (shift) {
10829 tcg_gen_shli_i32(tmp, tmp, shift);
10831 tcg_gen_add_i32(addr, addr, tmp);
10832 tcg_temp_free_i32(tmp);
10833 break;
10834 case 0xc: /* Negative offset. */
10835 tcg_gen_addi_i32(addr, addr, -imm);
10836 break;
10837 case 0xe: /* User privilege. */
10838 tcg_gen_addi_i32(addr, addr, imm);
10839 memidx = get_a32_user_mem_index(s);
10840 break;
10841 case 0x9: /* Post-decrement. */
10842 imm = -imm;
10843 /* Fall through. */
10844 case 0xb: /* Post-increment. */
10845 postinc = 1;
10846 writeback = 1;
10847 break;
10848 case 0xd: /* Pre-decrement. */
10849 imm = -imm;
10850 /* Fall through. */
10851 case 0xf: /* Pre-increment. */
10852 writeback = 1;
10853 break;
10854 default:
10855 tcg_temp_free_i32(addr);
10856 goto illegal_op;
10860 issinfo = writeback ? ISSInvalid : rs;
10862 if (s->v8m_stackcheck && rn == 13 && writeback) {
10864 * Stackcheck. Here we know 'addr' is the current SP;
10865 * if imm is +ve we're moving SP up, else down. It is
10866 * UNKNOWN whether the limit check triggers when SP starts
10867 * below the limit and ends up above it; we chose to do so.
10869 if ((int32_t)imm < 0) {
10870 TCGv_i32 newsp = tcg_temp_new_i32();
10872 tcg_gen_addi_i32(newsp, addr, imm);
10873 gen_helper_v8m_stackcheck(cpu_env, newsp);
10874 tcg_temp_free_i32(newsp);
10875 } else {
10876 gen_helper_v8m_stackcheck(cpu_env, addr);
10880 if (writeback && !postinc) {
10881 tcg_gen_addi_i32(addr, addr, imm);
10884 if (insn & (1 << 20)) {
10885 /* Load. */
10886 tmp = tcg_temp_new_i32();
10887 switch (op) {
10888 case 0:
10889 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
10890 break;
10891 case 4:
10892 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
10893 break;
10894 case 1:
10895 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
10896 break;
10897 case 5:
10898 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
10899 break;
10900 case 2:
10901 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
10902 break;
10903 default:
10904 tcg_temp_free_i32(tmp);
10905 tcg_temp_free_i32(addr);
10906 goto illegal_op;
10908 if (rs == 15) {
10909 gen_bx_excret(s, tmp);
10910 } else {
10911 store_reg(s, rs, tmp);
10913 } else {
10914 /* Store. */
10915 tmp = load_reg(s, rs);
10916 switch (op) {
10917 case 0:
10918 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
10919 break;
10920 case 1:
10921 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
10922 break;
10923 case 2:
10924 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
10925 break;
10926 default:
10927 tcg_temp_free_i32(tmp);
10928 tcg_temp_free_i32(addr);
10929 goto illegal_op;
10931 tcg_temp_free_i32(tmp);
10933 if (postinc)
10934 tcg_gen_addi_i32(addr, addr, imm);
10935 if (writeback) {
10936 store_reg(s, rn, addr);
10937 } else {
10938 tcg_temp_free_i32(addr);
10941 break;
10942 default:
10943 goto illegal_op;
10945 return;
10946 illegal_op:
10947 unallocated_encoding(s);
10950 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
10952 uint32_t val, op, rm, rn, rd, shift, cond;
10953 int32_t offset;
10954 int i;
10955 TCGv_i32 tmp;
10956 TCGv_i32 tmp2;
10957 TCGv_i32 addr;
10959 switch (insn >> 12) {
10960 case 0: case 1:
10962 rd = insn & 7;
10963 op = (insn >> 11) & 3;
10964 if (op == 3) {
10966 * 0b0001_1xxx_xxxx_xxxx
10967 * - Add, subtract (three low registers)
10968 * - Add, subtract (two low registers and immediate)
10970 rn = (insn >> 3) & 7;
10971 tmp = load_reg(s, rn);
10972 if (insn & (1 << 10)) {
10973 /* immediate */
10974 tmp2 = tcg_temp_new_i32();
10975 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
10976 } else {
10977 /* reg */
10978 rm = (insn >> 6) & 7;
10979 tmp2 = load_reg(s, rm);
10981 if (insn & (1 << 9)) {
10982 if (s->condexec_mask)
10983 tcg_gen_sub_i32(tmp, tmp, tmp2);
10984 else
10985 gen_sub_CC(tmp, tmp, tmp2);
10986 } else {
10987 if (s->condexec_mask)
10988 tcg_gen_add_i32(tmp, tmp, tmp2);
10989 else
10990 gen_add_CC(tmp, tmp, tmp2);
10992 tcg_temp_free_i32(tmp2);
10993 store_reg(s, rd, tmp);
10994 } else {
10995 /* shift immediate */
10996 rm = (insn >> 3) & 7;
10997 shift = (insn >> 6) & 0x1f;
10998 tmp = load_reg(s, rm);
10999 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11000 if (!s->condexec_mask)
11001 gen_logic_CC(tmp);
11002 store_reg(s, rd, tmp);
11004 break;
11005 case 2: case 3:
11007 * 0b001x_xxxx_xxxx_xxxx
11008 * - Add, subtract, compare, move (one low register and immediate)
11010 op = (insn >> 11) & 3;
11011 rd = (insn >> 8) & 0x7;
11012 if (op == 0) { /* mov */
11013 tmp = tcg_temp_new_i32();
11014 tcg_gen_movi_i32(tmp, insn & 0xff);
11015 if (!s->condexec_mask)
11016 gen_logic_CC(tmp);
11017 store_reg(s, rd, tmp);
11018 } else {
11019 tmp = load_reg(s, rd);
11020 tmp2 = tcg_temp_new_i32();
11021 tcg_gen_movi_i32(tmp2, insn & 0xff);
11022 switch (op) {
11023 case 1: /* cmp */
11024 gen_sub_CC(tmp, tmp, tmp2);
11025 tcg_temp_free_i32(tmp);
11026 tcg_temp_free_i32(tmp2);
11027 break;
11028 case 2: /* add */
11029 if (s->condexec_mask)
11030 tcg_gen_add_i32(tmp, tmp, tmp2);
11031 else
11032 gen_add_CC(tmp, tmp, tmp2);
11033 tcg_temp_free_i32(tmp2);
11034 store_reg(s, rd, tmp);
11035 break;
11036 case 3: /* sub */
11037 if (s->condexec_mask)
11038 tcg_gen_sub_i32(tmp, tmp, tmp2);
11039 else
11040 gen_sub_CC(tmp, tmp, tmp2);
11041 tcg_temp_free_i32(tmp2);
11042 store_reg(s, rd, tmp);
11043 break;
11046 break;
11047 case 4:
11048 if (insn & (1 << 11)) {
11049 rd = (insn >> 8) & 7;
11050 /* load pc-relative. Bit 1 of PC is ignored. */
11051 addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4);
11052 tmp = tcg_temp_new_i32();
11053 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11054 rd | ISSIs16Bit);
11055 tcg_temp_free_i32(addr);
11056 store_reg(s, rd, tmp);
11057 break;
11059 if (insn & (1 << 10)) {
11060 /* 0b0100_01xx_xxxx_xxxx
11061 * - data processing extended, branch and exchange
11063 rd = (insn & 7) | ((insn >> 4) & 8);
11064 rm = (insn >> 3) & 0xf;
11065 op = (insn >> 8) & 3;
11066 switch (op) {
11067 case 0: /* add */
11068 tmp = load_reg(s, rd);
11069 tmp2 = load_reg(s, rm);
11070 tcg_gen_add_i32(tmp, tmp, tmp2);
11071 tcg_temp_free_i32(tmp2);
11072 if (rd == 13) {
11073 /* ADD SP, SP, reg */
11074 store_sp_checked(s, tmp);
11075 } else {
11076 store_reg(s, rd, tmp);
11078 break;
11079 case 1: /* cmp */
11080 tmp = load_reg(s, rd);
11081 tmp2 = load_reg(s, rm);
11082 gen_sub_CC(tmp, tmp, tmp2);
11083 tcg_temp_free_i32(tmp2);
11084 tcg_temp_free_i32(tmp);
11085 break;
11086 case 2: /* mov/cpy */
11087 tmp = load_reg(s, rm);
11088 if (rd == 13) {
11089 /* MOV SP, reg */
11090 store_sp_checked(s, tmp);
11091 } else {
11092 store_reg(s, rd, tmp);
11094 break;
11095 case 3:
11097 /* 0b0100_0111_xxxx_xxxx
11098 * - branch [and link] exchange thumb register
11100 bool link = insn & (1 << 7);
11102 if (insn & 3) {
11103 goto undef;
11105 if (link) {
11106 ARCH(5);
11108 if ((insn & 4)) {
11109 /* BXNS/BLXNS: only exists for v8M with the
11110 * security extensions, and always UNDEF if NonSecure.
11111 * We don't implement these in the user-only mode
11112 * either (in theory you can use them from Secure User
11113 * mode but they are too tied in to system emulation.)
11115 if (!s->v8m_secure || IS_USER_ONLY) {
11116 goto undef;
11118 if (link) {
11119 gen_blxns(s, rm);
11120 } else {
11121 gen_bxns(s, rm);
11123 break;
11125 /* BLX/BX */
11126 tmp = load_reg(s, rm);
11127 if (link) {
11128 val = (uint32_t)s->base.pc_next | 1;
11129 tmp2 = tcg_temp_new_i32();
11130 tcg_gen_movi_i32(tmp2, val);
11131 store_reg(s, 14, tmp2);
11132 gen_bx(s, tmp);
11133 } else {
11134 /* Only BX works as exception-return, not BLX */
11135 gen_bx_excret(s, tmp);
11137 break;
11140 break;
11144 * 0b0100_00xx_xxxx_xxxx
11145 * - Data-processing (two low registers)
11147 rd = insn & 7;
11148 rm = (insn >> 3) & 7;
11149 op = (insn >> 6) & 0xf;
11150 if (op == 2 || op == 3 || op == 4 || op == 7) {
11151 /* the shift/rotate ops want the operands backwards */
11152 val = rm;
11153 rm = rd;
11154 rd = val;
11155 val = 1;
11156 } else {
11157 val = 0;
11160 if (op == 9) { /* neg */
11161 tmp = tcg_temp_new_i32();
11162 tcg_gen_movi_i32(tmp, 0);
11163 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11164 tmp = load_reg(s, rd);
11165 } else {
11166 tmp = NULL;
11169 tmp2 = load_reg(s, rm);
11170 switch (op) {
11171 case 0x0: /* and */
11172 tcg_gen_and_i32(tmp, tmp, tmp2);
11173 if (!s->condexec_mask)
11174 gen_logic_CC(tmp);
11175 break;
11176 case 0x1: /* eor */
11177 tcg_gen_xor_i32(tmp, tmp, tmp2);
11178 if (!s->condexec_mask)
11179 gen_logic_CC(tmp);
11180 break;
11181 case 0x2: /* lsl */
11182 if (s->condexec_mask) {
11183 gen_shl(tmp2, tmp2, tmp);
11184 } else {
11185 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
11186 gen_logic_CC(tmp2);
11188 break;
11189 case 0x3: /* lsr */
11190 if (s->condexec_mask) {
11191 gen_shr(tmp2, tmp2, tmp);
11192 } else {
11193 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
11194 gen_logic_CC(tmp2);
11196 break;
11197 case 0x4: /* asr */
11198 if (s->condexec_mask) {
11199 gen_sar(tmp2, tmp2, tmp);
11200 } else {
11201 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
11202 gen_logic_CC(tmp2);
11204 break;
11205 case 0x5: /* adc */
11206 if (s->condexec_mask) {
11207 gen_adc(tmp, tmp2);
11208 } else {
11209 gen_adc_CC(tmp, tmp, tmp2);
11211 break;
11212 case 0x6: /* sbc */
11213 if (s->condexec_mask) {
11214 gen_sub_carry(tmp, tmp, tmp2);
11215 } else {
11216 gen_sbc_CC(tmp, tmp, tmp2);
11218 break;
11219 case 0x7: /* ror */
11220 if (s->condexec_mask) {
11221 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11222 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
11223 } else {
11224 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
11225 gen_logic_CC(tmp2);
11227 break;
11228 case 0x8: /* tst */
11229 tcg_gen_and_i32(tmp, tmp, tmp2);
11230 gen_logic_CC(tmp);
11231 rd = 16;
11232 break;
11233 case 0x9: /* neg */
11234 if (s->condexec_mask)
11235 tcg_gen_neg_i32(tmp, tmp2);
11236 else
11237 gen_sub_CC(tmp, tmp, tmp2);
11238 break;
11239 case 0xa: /* cmp */
11240 gen_sub_CC(tmp, tmp, tmp2);
11241 rd = 16;
11242 break;
11243 case 0xb: /* cmn */
11244 gen_add_CC(tmp, tmp, tmp2);
11245 rd = 16;
11246 break;
11247 case 0xc: /* orr */
11248 tcg_gen_or_i32(tmp, tmp, tmp2);
11249 if (!s->condexec_mask)
11250 gen_logic_CC(tmp);
11251 break;
11252 case 0xd: /* mul */
11253 tcg_gen_mul_i32(tmp, tmp, tmp2);
11254 if (!s->condexec_mask)
11255 gen_logic_CC(tmp);
11256 break;
11257 case 0xe: /* bic */
11258 tcg_gen_andc_i32(tmp, tmp, tmp2);
11259 if (!s->condexec_mask)
11260 gen_logic_CC(tmp);
11261 break;
11262 case 0xf: /* mvn */
11263 tcg_gen_not_i32(tmp2, tmp2);
11264 if (!s->condexec_mask)
11265 gen_logic_CC(tmp2);
11266 val = 1;
11267 rm = rd;
11268 break;
11270 if (rd != 16) {
11271 if (val) {
11272 store_reg(s, rm, tmp2);
11273 if (op != 0xf)
11274 tcg_temp_free_i32(tmp);
11275 } else {
11276 store_reg(s, rd, tmp);
11277 tcg_temp_free_i32(tmp2);
11279 } else {
11280 tcg_temp_free_i32(tmp);
11281 tcg_temp_free_i32(tmp2);
11283 break;
11285 case 5:
11286 /* load/store register offset. */
11287 rd = insn & 7;
11288 rn = (insn >> 3) & 7;
11289 rm = (insn >> 6) & 7;
11290 op = (insn >> 9) & 7;
11291 addr = load_reg(s, rn);
11292 tmp = load_reg(s, rm);
11293 tcg_gen_add_i32(addr, addr, tmp);
11294 tcg_temp_free_i32(tmp);
11296 if (op < 3) { /* store */
11297 tmp = load_reg(s, rd);
11298 } else {
11299 tmp = tcg_temp_new_i32();
11302 switch (op) {
11303 case 0: /* str */
11304 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11305 break;
11306 case 1: /* strh */
11307 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11308 break;
11309 case 2: /* strb */
11310 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11311 break;
11312 case 3: /* ldrsb */
11313 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11314 break;
11315 case 4: /* ldr */
11316 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11317 break;
11318 case 5: /* ldrh */
11319 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11320 break;
11321 case 6: /* ldrb */
11322 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11323 break;
11324 case 7: /* ldrsh */
11325 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11326 break;
11328 if (op >= 3) { /* load */
11329 store_reg(s, rd, tmp);
11330 } else {
11331 tcg_temp_free_i32(tmp);
11333 tcg_temp_free_i32(addr);
11334 break;
11336 case 6:
11337 /* load/store word immediate offset */
11338 rd = insn & 7;
11339 rn = (insn >> 3) & 7;
11340 addr = load_reg(s, rn);
11341 val = (insn >> 4) & 0x7c;
11342 tcg_gen_addi_i32(addr, addr, val);
11344 if (insn & (1 << 11)) {
11345 /* load */
11346 tmp = tcg_temp_new_i32();
11347 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11348 store_reg(s, rd, tmp);
11349 } else {
11350 /* store */
11351 tmp = load_reg(s, rd);
11352 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11353 tcg_temp_free_i32(tmp);
11355 tcg_temp_free_i32(addr);
11356 break;
11358 case 7:
11359 /* load/store byte immediate offset */
11360 rd = insn & 7;
11361 rn = (insn >> 3) & 7;
11362 addr = load_reg(s, rn);
11363 val = (insn >> 6) & 0x1f;
11364 tcg_gen_addi_i32(addr, addr, val);
11366 if (insn & (1 << 11)) {
11367 /* load */
11368 tmp = tcg_temp_new_i32();
11369 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11370 store_reg(s, rd, tmp);
11371 } else {
11372 /* store */
11373 tmp = load_reg(s, rd);
11374 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11375 tcg_temp_free_i32(tmp);
11377 tcg_temp_free_i32(addr);
11378 break;
11380 case 8:
11381 /* load/store halfword immediate offset */
11382 rd = insn & 7;
11383 rn = (insn >> 3) & 7;
11384 addr = load_reg(s, rn);
11385 val = (insn >> 5) & 0x3e;
11386 tcg_gen_addi_i32(addr, addr, val);
11388 if (insn & (1 << 11)) {
11389 /* load */
11390 tmp = tcg_temp_new_i32();
11391 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11392 store_reg(s, rd, tmp);
11393 } else {
11394 /* store */
11395 tmp = load_reg(s, rd);
11396 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11397 tcg_temp_free_i32(tmp);
11399 tcg_temp_free_i32(addr);
11400 break;
11402 case 9:
11403 /* load/store from stack */
11404 rd = (insn >> 8) & 7;
11405 addr = load_reg(s, 13);
11406 val = (insn & 0xff) * 4;
11407 tcg_gen_addi_i32(addr, addr, val);
11409 if (insn & (1 << 11)) {
11410 /* load */
11411 tmp = tcg_temp_new_i32();
11412 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11413 store_reg(s, rd, tmp);
11414 } else {
11415 /* store */
11416 tmp = load_reg(s, rd);
11417 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11418 tcg_temp_free_i32(tmp);
11420 tcg_temp_free_i32(addr);
11421 break;
11423 case 10:
11425 * 0b1010_xxxx_xxxx_xxxx
11426 * - Add PC/SP (immediate)
11428 rd = (insn >> 8) & 7;
11429 val = (insn & 0xff) * 4;
11430 tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val);
11431 store_reg(s, rd, tmp);
11432 break;
11434 case 11:
11435 /* misc */
11436 op = (insn >> 8) & 0xf;
11437 switch (op) {
11438 case 0:
11440 * 0b1011_0000_xxxx_xxxx
11441 * - ADD (SP plus immediate)
11442 * - SUB (SP minus immediate)
11444 tmp = load_reg(s, 13);
11445 val = (insn & 0x7f) * 4;
11446 if (insn & (1 << 7))
11447 val = -(int32_t)val;
11448 tcg_gen_addi_i32(tmp, tmp, val);
11449 store_sp_checked(s, tmp);
11450 break;
11452 case 2: /* sign/zero extend. */
11453 ARCH(6);
11454 rd = insn & 7;
11455 rm = (insn >> 3) & 7;
11456 tmp = load_reg(s, rm);
11457 switch ((insn >> 6) & 3) {
11458 case 0: gen_sxth(tmp); break;
11459 case 1: gen_sxtb(tmp); break;
11460 case 2: gen_uxth(tmp); break;
11461 case 3: gen_uxtb(tmp); break;
11463 store_reg(s, rd, tmp);
11464 break;
11465 case 4: case 5: case 0xc: case 0xd:
11467 * 0b1011_x10x_xxxx_xxxx
11468 * - push/pop
11470 addr = load_reg(s, 13);
11471 if (insn & (1 << 8))
11472 offset = 4;
11473 else
11474 offset = 0;
11475 for (i = 0; i < 8; i++) {
11476 if (insn & (1 << i))
11477 offset += 4;
11479 if ((insn & (1 << 11)) == 0) {
11480 tcg_gen_addi_i32(addr, addr, -offset);
11483 if (s->v8m_stackcheck) {
11485 * Here 'addr' is the lower of "old SP" and "new SP";
11486 * if this is a pop that starts below the limit and ends
11487 * above it, it is UNKNOWN whether the limit check triggers;
11488 * we choose to trigger.
11490 gen_helper_v8m_stackcheck(cpu_env, addr);
11493 for (i = 0; i < 8; i++) {
11494 if (insn & (1 << i)) {
11495 if (insn & (1 << 11)) {
11496 /* pop */
11497 tmp = tcg_temp_new_i32();
11498 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11499 store_reg(s, i, tmp);
11500 } else {
11501 /* push */
11502 tmp = load_reg(s, i);
11503 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11504 tcg_temp_free_i32(tmp);
11506 /* advance to the next address. */
11507 tcg_gen_addi_i32(addr, addr, 4);
11510 tmp = NULL;
11511 if (insn & (1 << 8)) {
11512 if (insn & (1 << 11)) {
11513 /* pop pc */
11514 tmp = tcg_temp_new_i32();
11515 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11516 /* don't set the pc until the rest of the instruction
11517 has completed */
11518 } else {
11519 /* push lr */
11520 tmp = load_reg(s, 14);
11521 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11522 tcg_temp_free_i32(tmp);
11524 tcg_gen_addi_i32(addr, addr, 4);
11526 if ((insn & (1 << 11)) == 0) {
11527 tcg_gen_addi_i32(addr, addr, -offset);
11529 /* write back the new stack pointer */
11530 store_reg(s, 13, addr);
11531 /* set the new PC value */
11532 if ((insn & 0x0900) == 0x0900) {
11533 store_reg_from_load(s, 15, tmp);
11535 break;
11537 case 1: case 3: case 9: case 11: /* czb */
11538 rm = insn & 7;
11539 tmp = load_reg(s, rm);
11540 arm_gen_condlabel(s);
11541 if (insn & (1 << 11))
11542 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
11543 else
11544 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
11545 tcg_temp_free_i32(tmp);
11546 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11547 gen_jmp(s, read_pc(s) + offset);
11548 break;
11550 case 15: /* IT, nop-hint. */
11551 if ((insn & 0xf) == 0) {
11552 gen_nop_hint(s, (insn >> 4) & 0xf);
11553 break;
11556 * IT (If-Then)
11558 * Combinations of firstcond and mask which set up an 0b1111
11559 * condition are UNPREDICTABLE; we take the CONSTRAINED
11560 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
11561 * i.e. both meaning "execute always".
11563 s->condexec_cond = (insn >> 4) & 0xe;
11564 s->condexec_mask = insn & 0x1f;
11565 /* No actual code generated for this insn, just setup state. */
11566 break;
11568 case 0xe: /* bkpt */
11570 int imm8 = extract32(insn, 0, 8);
11571 ARCH(5);
11572 gen_exception_bkpt_insn(s, syn_aa32_bkpt(imm8, true));
11573 break;
11576 case 0xa: /* rev, and hlt */
11578 int op1 = extract32(insn, 6, 2);
11580 if (op1 == 2) {
11581 /* HLT */
11582 int imm6 = extract32(insn, 0, 6);
11584 gen_hlt(s, imm6);
11585 break;
11588 /* Otherwise this is rev */
11589 ARCH(6);
11590 rn = (insn >> 3) & 0x7;
11591 rd = insn & 0x7;
11592 tmp = load_reg(s, rn);
11593 switch (op1) {
11594 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
11595 case 1: gen_rev16(tmp); break;
11596 case 3: gen_revsh(tmp); break;
11597 default:
11598 g_assert_not_reached();
11600 store_reg(s, rd, tmp);
11601 break;
11604 case 6:
11605 switch ((insn >> 5) & 7) {
11606 case 2:
11607 /* setend */
11608 ARCH(6);
11609 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11610 gen_helper_setend(cpu_env);
11611 s->base.is_jmp = DISAS_UPDATE;
11613 break;
11614 case 3:
11615 /* cps */
11616 ARCH(6);
11617 if (IS_USER(s)) {
11618 break;
11620 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11621 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11622 /* FAULTMASK */
11623 if (insn & 1) {
11624 addr = tcg_const_i32(19);
11625 gen_helper_v7m_msr(cpu_env, addr, tmp);
11626 tcg_temp_free_i32(addr);
11628 /* PRIMASK */
11629 if (insn & 2) {
11630 addr = tcg_const_i32(16);
11631 gen_helper_v7m_msr(cpu_env, addr, tmp);
11632 tcg_temp_free_i32(addr);
11634 tcg_temp_free_i32(tmp);
11635 gen_lookup_tb(s);
11636 } else {
11637 if (insn & (1 << 4)) {
11638 shift = CPSR_A | CPSR_I | CPSR_F;
11639 } else {
11640 shift = 0;
11642 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
11644 break;
11645 default:
11646 goto undef;
11648 break;
11650 default:
11651 goto undef;
11653 break;
11655 case 12:
11657 /* load/store multiple */
11658 TCGv_i32 loaded_var = NULL;
11659 rn = (insn >> 8) & 0x7;
11660 addr = load_reg(s, rn);
11661 for (i = 0; i < 8; i++) {
11662 if (insn & (1 << i)) {
11663 if (insn & (1 << 11)) {
11664 /* load */
11665 tmp = tcg_temp_new_i32();
11666 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11667 if (i == rn) {
11668 loaded_var = tmp;
11669 } else {
11670 store_reg(s, i, tmp);
11672 } else {
11673 /* store */
11674 tmp = load_reg(s, i);
11675 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11676 tcg_temp_free_i32(tmp);
11678 /* advance to the next address */
11679 tcg_gen_addi_i32(addr, addr, 4);
11682 if ((insn & (1 << rn)) == 0) {
11683 /* base reg not in list: base register writeback */
11684 store_reg(s, rn, addr);
11685 } else {
11686 /* base reg in list: if load, complete it now */
11687 if (insn & (1 << 11)) {
11688 store_reg(s, rn, loaded_var);
11690 tcg_temp_free_i32(addr);
11692 break;
11694 case 13:
11695 /* conditional branch or swi */
11696 cond = (insn >> 8) & 0xf;
11697 if (cond == 0xe)
11698 goto undef;
11700 if (cond == 0xf) {
11701 /* swi */
11702 gen_set_pc_im(s, s->base.pc_next);
11703 s->svc_imm = extract32(insn, 0, 8);
11704 s->base.is_jmp = DISAS_SWI;
11705 break;
11707 /* generate a conditional jump to next instruction */
11708 arm_skip_unless(s, cond);
11710 /* jump to the offset */
11711 val = read_pc(s);
11712 offset = ((int32_t)insn << 24) >> 24;
11713 val += offset << 1;
11714 gen_jmp(s, val);
11715 break;
11717 case 14:
11718 if (insn & (1 << 11)) {
11719 /* thumb_insn_is_16bit() ensures we can't get here for
11720 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
11721 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
11723 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11724 ARCH(5);
11725 offset = ((insn & 0x7ff) << 1);
11726 tmp = load_reg(s, 14);
11727 tcg_gen_addi_i32(tmp, tmp, offset);
11728 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
11730 tmp2 = tcg_temp_new_i32();
11731 tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
11732 store_reg(s, 14, tmp2);
11733 gen_bx(s, tmp);
11734 break;
11736 /* unconditional branch */
11737 val = read_pc(s);
11738 offset = ((int32_t)insn << 21) >> 21;
11739 val += offset << 1;
11740 gen_jmp(s, val);
11741 break;
11743 case 15:
11744 /* thumb_insn_is_16bit() ensures we can't get here for
11745 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
11747 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11749 if (insn & (1 << 11)) {
11750 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
11751 offset = ((insn & 0x7ff) << 1) | 1;
11752 tmp = load_reg(s, 14);
11753 tcg_gen_addi_i32(tmp, tmp, offset);
11755 tmp2 = tcg_temp_new_i32();
11756 tcg_gen_movi_i32(tmp2, s->base.pc_next | 1);
11757 store_reg(s, 14, tmp2);
11758 gen_bx(s, tmp);
11759 } else {
11760 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
11761 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
11763 tcg_gen_movi_i32(cpu_R[14], read_pc(s) + uoffset);
11765 break;
11767 return;
11768 illegal_op:
11769 undef:
11770 unallocated_encoding(s);
11773 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11775 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
11776 * (False positives are OK, false negatives are not.)
11777 * We know this is a Thumb insn, and our caller ensures we are
11778 * only called if dc->base.pc_next is less than 4 bytes from the page
11779 * boundary, so we cross the page if the first 16 bits indicate
11780 * that this is a 32 bit insn.
11782 uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
11784 return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
11787 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
11789 DisasContext *dc = container_of(dcbase, DisasContext, base);
11790 CPUARMState *env = cs->env_ptr;
11791 ARMCPU *cpu = env_archcpu(env);
11792 uint32_t tb_flags = dc->base.tb->flags;
11793 uint32_t condexec, core_mmu_idx;
11795 dc->isar = &cpu->isar;
11796 dc->condjmp = 0;
11798 dc->aarch64 = 0;
11799 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11800 * there is no secure EL1, so we route exceptions to EL3.
11802 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11803 !arm_el_is_aa64(env, 3);
11804 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
11805 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
11806 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
11807 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
11808 dc->condexec_mask = (condexec & 0xf) << 1;
11809 dc->condexec_cond = condexec >> 4;
11810 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
11811 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
11812 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11813 #if !defined(CONFIG_USER_ONLY)
11814 dc->user = (dc->current_el == 0);
11815 #endif
11816 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
11817 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
11818 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
11819 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
11820 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11821 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
11822 dc->vec_stride = 0;
11823 } else {
11824 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
11825 dc->c15_cpar = 0;
11827 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
11828 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11829 regime_is_secure(env, dc->mmu_idx);
11830 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
11831 dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
11832 dc->v7m_new_fp_ctxt_needed =
11833 FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
11834 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
11835 dc->cp_regs = cpu->cp_regs;
11836 dc->features = env->features;
11838 /* Single step state. The code-generation logic here is:
11839 * SS_ACTIVE == 0:
11840 * generate code with no special handling for single-stepping (except
11841 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11842 * this happens anyway because those changes are all system register or
11843 * PSTATE writes).
11844 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11845 * emit code for one insn
11846 * emit code to clear PSTATE.SS
11847 * emit code to generate software step exception for completed step
11848 * end TB (as usual for having generated an exception)
11849 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11850 * emit code to generate a software step exception
11851 * end the TB
11853 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
11854 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
11855 dc->is_ldex = false;
11856 if (!arm_feature(env, ARM_FEATURE_M)) {
11857 dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
11860 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
11862 /* If architectural single step active, limit to 1. */
11863 if (is_singlestepping(dc)) {
11864 dc->base.max_insns = 1;
11867 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11868 to those left on the page. */
11869 if (!dc->thumb) {
11870 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
11871 dc->base.max_insns = MIN(dc->base.max_insns, bound);
11874 cpu_V0 = tcg_temp_new_i64();
11875 cpu_V1 = tcg_temp_new_i64();
11876 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11877 cpu_M0 = tcg_temp_new_i64();
11880 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
11882 DisasContext *dc = container_of(dcbase, DisasContext, base);
11884 /* A note on handling of the condexec (IT) bits:
11886 * We want to avoid the overhead of having to write the updated condexec
11887 * bits back to the CPUARMState for every instruction in an IT block. So:
11888 * (1) if the condexec bits are not already zero then we write
11889 * zero back into the CPUARMState now. This avoids complications trying
11890 * to do it at the end of the block. (For example if we don't do this
11891 * it's hard to identify whether we can safely skip writing condexec
11892 * at the end of the TB, which we definitely want to do for the case
11893 * where a TB doesn't do anything with the IT state at all.)
11894 * (2) if we are going to leave the TB then we call gen_set_condexec()
11895 * which will write the correct value into CPUARMState if zero is wrong.
11896 * This is done both for leaving the TB at the end, and for leaving
11897 * it because of an exception we know will happen, which is done in
11898 * gen_exception_insn(). The latter is necessary because we need to
11899 * leave the TB with the PC/IT state just prior to execution of the
11900 * instruction which caused the exception.
11901 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11902 * then the CPUARMState will be wrong and we need to reset it.
11903 * This is handled in the same way as restoration of the
11904 * PC in these situations; we save the value of the condexec bits
11905 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11906 * then uses this to restore them after an exception.
11908 * Note that there are no instructions which can read the condexec
11909 * bits, and none which can write non-static values to them, so
11910 * we don't need to care about whether CPUARMState is correct in the
11911 * middle of a TB.
11914 /* Reset the conditional execution bits immediately. This avoids
11915 complications trying to do it at the end of the block. */
11916 if (dc->condexec_mask || dc->condexec_cond) {
11917 TCGv_i32 tmp = tcg_temp_new_i32();
11918 tcg_gen_movi_i32(tmp, 0);
11919 store_cpu_field(tmp, condexec_bits);
11923 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
11925 DisasContext *dc = container_of(dcbase, DisasContext, base);
11927 tcg_gen_insn_start(dc->base.pc_next,
11928 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11930 dc->insn_start = tcg_last_op();
11933 static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
11934 const CPUBreakpoint *bp)
11936 DisasContext *dc = container_of(dcbase, DisasContext, base);
11938 if (bp->flags & BP_CPU) {
11939 gen_set_condexec(dc);
11940 gen_set_pc_im(dc, dc->base.pc_next);
11941 gen_helper_check_breakpoints(cpu_env);
11942 /* End the TB early; it's likely not going to be executed */
11943 dc->base.is_jmp = DISAS_TOO_MANY;
11944 } else {
11945 gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
11946 /* The address covered by the breakpoint must be
11947 included in [tb->pc, tb->pc + tb->size) in order
11948 to for it to be properly cleared -- thus we
11949 increment the PC here so that the logic setting
11950 tb->size below does the right thing. */
11951 /* TODO: Advance PC by correct instruction length to
11952 * avoid disassembler error messages */
11953 dc->base.pc_next += 2;
11954 dc->base.is_jmp = DISAS_NORETURN;
11957 return true;
11960 static bool arm_pre_translate_insn(DisasContext *dc)
11962 #ifdef CONFIG_USER_ONLY
11963 /* Intercept jump to the magic kernel page. */
11964 if (dc->base.pc_next >= 0xffff0000) {
11965 /* We always get here via a jump, so know we are not in a
11966 conditional execution block. */
11967 gen_exception_internal(EXCP_KERNEL_TRAP);
11968 dc->base.is_jmp = DISAS_NORETURN;
11969 return true;
11971 #endif
11973 if (dc->ss_active && !dc->pstate_ss) {
11974 /* Singlestep state is Active-pending.
11975 * If we're in this state at the start of a TB then either
11976 * a) we just took an exception to an EL which is being debugged
11977 * and this is the first insn in the exception handler
11978 * b) debug exceptions were masked and we just unmasked them
11979 * without changing EL (eg by clearing PSTATE.D)
11980 * In either case we're going to take a swstep exception in the
11981 * "did not step an insn" case, and so the syndrome ISV and EX
11982 * bits should be zero.
11984 assert(dc->base.num_insns == 1);
11985 gen_swstep_exception(dc, 0, 0);
11986 dc->base.is_jmp = DISAS_NORETURN;
11987 return true;
11990 return false;
11993 static void arm_post_translate_insn(DisasContext *dc)
11995 if (dc->condjmp && !dc->base.is_jmp) {
11996 gen_set_label(dc->condlabel);
11997 dc->condjmp = 0;
11999 translator_loop_temp_check(&dc->base);
12002 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12004 DisasContext *dc = container_of(dcbase, DisasContext, base);
12005 CPUARMState *env = cpu->env_ptr;
12006 unsigned int insn;
12008 if (arm_pre_translate_insn(dc)) {
12009 return;
12012 dc->pc_curr = dc->base.pc_next;
12013 insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
12014 dc->insn = insn;
12015 dc->base.pc_next += 4;
12016 disas_arm_insn(dc, insn);
12018 arm_post_translate_insn(dc);
12020 /* ARM is a fixed-length ISA. We performed the cross-page check
12021 in init_disas_context by adjusting max_insns. */
12024 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12026 /* Return true if this Thumb insn is always unconditional,
12027 * even inside an IT block. This is true of only a very few
12028 * instructions: BKPT, HLT, and SG.
12030 * A larger class of instructions are UNPREDICTABLE if used
12031 * inside an IT block; we do not need to detect those here, because
12032 * what we do by default (perform the cc check and update the IT
12033 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12034 * choice for those situations.
12036 * insn is either a 16-bit or a 32-bit instruction; the two are
12037 * distinguishable because for the 16-bit case the top 16 bits
12038 * are zeroes, and that isn't a valid 32-bit encoding.
12040 if ((insn & 0xffffff00) == 0xbe00) {
12041 /* BKPT */
12042 return true;
12045 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12046 !arm_dc_feature(s, ARM_FEATURE_M)) {
12047 /* HLT: v8A only. This is unconditional even when it is going to
12048 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12049 * For v7 cores this was a plain old undefined encoding and so
12050 * honours its cc check. (We might be using the encoding as
12051 * a semihosting trap, but we don't change the cc check behaviour
12052 * on that account, because a debugger connected to a real v7A
12053 * core and emulating semihosting traps by catching the UNDEF
12054 * exception would also only see cases where the cc check passed.
12055 * No guest code should be trying to do a HLT semihosting trap
12056 * in an IT block anyway.
12058 return true;
12061 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12062 arm_dc_feature(s, ARM_FEATURE_M)) {
12063 /* SG: v8M only */
12064 return true;
12067 return false;
12070 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12072 DisasContext *dc = container_of(dcbase, DisasContext, base);
12073 CPUARMState *env = cpu->env_ptr;
12074 uint32_t insn;
12075 bool is_16bit;
12077 if (arm_pre_translate_insn(dc)) {
12078 return;
12081 dc->pc_curr = dc->base.pc_next;
12082 insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
12083 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
12084 dc->base.pc_next += 2;
12085 if (!is_16bit) {
12086 uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
12088 insn = insn << 16 | insn2;
12089 dc->base.pc_next += 2;
12091 dc->insn = insn;
12093 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
12094 uint32_t cond = dc->condexec_cond;
12097 * Conditionally skip the insn. Note that both 0xe and 0xf mean
12098 * "always"; 0xf is not "never".
12100 if (cond < 0x0e) {
12101 arm_skip_unless(dc, cond);
12105 if (is_16bit) {
12106 disas_thumb_insn(dc, insn);
12107 } else {
12108 disas_thumb2_insn(dc, insn);
12111 /* Advance the Thumb condexec condition. */
12112 if (dc->condexec_mask) {
12113 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12114 ((dc->condexec_mask >> 4) & 1));
12115 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12116 if (dc->condexec_mask == 0) {
12117 dc->condexec_cond = 0;
12121 arm_post_translate_insn(dc);
12123 /* Thumb is a variable-length ISA. Stop translation when the next insn
12124 * will touch a new page. This ensures that prefetch aborts occur at
12125 * the right place.
12127 * We want to stop the TB if the next insn starts in a new page,
12128 * or if it spans between this page and the next. This means that
12129 * if we're looking at the last halfword in the page we need to
12130 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12131 * or a 32-bit Thumb insn (which won't).
12132 * This is to avoid generating a silly TB with a single 16-bit insn
12133 * in it at the end of this page (which would execute correctly
12134 * but isn't very efficient).
12136 if (dc->base.is_jmp == DISAS_NEXT
12137 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
12138 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
12139 && insn_crosses_page(env, dc)))) {
12140 dc->base.is_jmp = DISAS_TOO_MANY;
12144 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
12146 DisasContext *dc = container_of(dcbase, DisasContext, base);
12148 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
12149 /* FIXME: This can theoretically happen with self-modifying code. */
12150 cpu_abort(cpu, "IO on conditional branch instruction");
12153 /* At this stage dc->condjmp will only be set when the skipped
12154 instruction was a conditional branch or trap, and the PC has
12155 already been written. */
12156 gen_set_condexec(dc);
12157 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
12158 /* Exception return branches need some special case code at the
12159 * end of the TB, which is complex enough that it has to
12160 * handle the single-step vs not and the condition-failed
12161 * insn codepath itself.
12163 gen_bx_excret_final_code(dc);
12164 } else if (unlikely(is_singlestepping(dc))) {
12165 /* Unconditional and "condition passed" instruction codepath. */
12166 switch (dc->base.is_jmp) {
12167 case DISAS_SWI:
12168 gen_ss_advance(dc);
12169 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12170 default_exception_el(dc));
12171 break;
12172 case DISAS_HVC:
12173 gen_ss_advance(dc);
12174 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12175 break;
12176 case DISAS_SMC:
12177 gen_ss_advance(dc);
12178 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12179 break;
12180 case DISAS_NEXT:
12181 case DISAS_TOO_MANY:
12182 case DISAS_UPDATE:
12183 gen_set_pc_im(dc, dc->base.pc_next);
12184 /* fall through */
12185 default:
12186 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12187 gen_singlestep_exception(dc);
12188 break;
12189 case DISAS_NORETURN:
12190 break;
12192 } else {
12193 /* While branches must always occur at the end of an IT block,
12194 there are a few other things that can cause us to terminate
12195 the TB in the middle of an IT block:
12196 - Exception generating instructions (bkpt, swi, undefined).
12197 - Page boundaries.
12198 - Hardware watchpoints.
12199 Hardware breakpoints have already been handled and skip this code.
12201 switch(dc->base.is_jmp) {
12202 case DISAS_NEXT:
12203 case DISAS_TOO_MANY:
12204 gen_goto_tb(dc, 1, dc->base.pc_next);
12205 break;
12206 case DISAS_JUMP:
12207 gen_goto_ptr();
12208 break;
12209 case DISAS_UPDATE:
12210 gen_set_pc_im(dc, dc->base.pc_next);
12211 /* fall through */
12212 default:
12213 /* indicate that the hash table must be used to find the next TB */
12214 tcg_gen_exit_tb(NULL, 0);
12215 break;
12216 case DISAS_NORETURN:
12217 /* nothing more to generate */
12218 break;
12219 case DISAS_WFI:
12221 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12222 !(dc->insn & (1U << 31))) ? 2 : 4);
12224 gen_helper_wfi(cpu_env, tmp);
12225 tcg_temp_free_i32(tmp);
12226 /* The helper doesn't necessarily throw an exception, but we
12227 * must go back to the main loop to check for interrupts anyway.
12229 tcg_gen_exit_tb(NULL, 0);
12230 break;
12232 case DISAS_WFE:
12233 gen_helper_wfe(cpu_env);
12234 break;
12235 case DISAS_YIELD:
12236 gen_helper_yield(cpu_env);
12237 break;
12238 case DISAS_SWI:
12239 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12240 default_exception_el(dc));
12241 break;
12242 case DISAS_HVC:
12243 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12244 break;
12245 case DISAS_SMC:
12246 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12247 break;
12251 if (dc->condjmp) {
12252 /* "Condition failed" instruction codepath for the branch/trap insn */
12253 gen_set_label(dc->condlabel);
12254 gen_set_condexec(dc);
12255 if (unlikely(is_singlestepping(dc))) {
12256 gen_set_pc_im(dc, dc->base.pc_next);
12257 gen_singlestep_exception(dc);
12258 } else {
12259 gen_goto_tb(dc, 1, dc->base.pc_next);
12264 static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12266 DisasContext *dc = container_of(dcbase, DisasContext, base);
12268 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
12269 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
12272 static const TranslatorOps arm_translator_ops = {
12273 .init_disas_context = arm_tr_init_disas_context,
12274 .tb_start = arm_tr_tb_start,
12275 .insn_start = arm_tr_insn_start,
12276 .breakpoint_check = arm_tr_breakpoint_check,
12277 .translate_insn = arm_tr_translate_insn,
12278 .tb_stop = arm_tr_tb_stop,
12279 .disas_log = arm_tr_disas_log,
12282 static const TranslatorOps thumb_translator_ops = {
12283 .init_disas_context = arm_tr_init_disas_context,
12284 .tb_start = arm_tr_tb_start,
12285 .insn_start = arm_tr_insn_start,
12286 .breakpoint_check = arm_tr_breakpoint_check,
12287 .translate_insn = thumb_tr_translate_insn,
12288 .tb_stop = arm_tr_tb_stop,
12289 .disas_log = arm_tr_disas_log,
12292 /* generate intermediate code for basic block 'tb'. */
12293 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
12295 DisasContext dc;
12296 const TranslatorOps *ops = &arm_translator_ops;
12298 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
12299 ops = &thumb_translator_ops;
12301 #ifdef TARGET_AARCH64
12302 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
12303 ops = &aarch64_translator_ops;
12305 #endif
12307 translator_loop(ops, &dc.base, cpu, tb, max_insns);
12310 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12311 target_ulong *data)
12313 if (is_a64(env)) {
12314 env->pc = data[0];
12315 env->condexec_bits = 0;
12316 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
12317 } else {
12318 env->regs[15] = data[0];
12319 env->condexec_bits = data[1];
12320 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;