target/arm: Fix vfp_gdb_get/set_reg vs FPSCR
[qemu/ar7.git] / target / arm / translate.c
blobf0101d27887bba917739401731277ba0188073c3
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "tcg-op-gvec.h"
29 #include "qemu/log.h"
30 #include "qemu/bitops.h"
31 #include "arm_ldst.h"
32 #include "exec/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
38 #include "exec/log.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
57 #define IS_USER(s) 1
58 #else
59 #define IS_USER(s) (s->user)
60 #endif
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
69 /* FIXME: These should be removed. */
70 static TCGv_i32 cpu_F0s, cpu_F1s;
71 static TCGv_i64 cpu_F0d, cpu_F1d;
73 #include "exec/gen-icount.h"
75 static const char * const regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79 /* Function prototypes for gen_ functions calling Neon helpers. */
80 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
81 TCGv_i32, TCGv_i32);
83 /* initialize TCG globals. */
84 void arm_translate_init(void)
86 int i;
88 for (i = 0; i < 16; i++) {
89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
90 offsetof(CPUARMState, regs[i]),
91 regnames[i]);
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
103 a64_translate_init();
106 /* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
109 typedef enum ISSInfo {
110 ISSNone = 0,
111 ISSRegMask = 0x1f,
112 ISSInvalid = (1 << 5),
113 ISSIsAcqRel = (1 << 6),
114 ISSIsWrite = (1 << 7),
115 ISSIs16Bit = (1 << 8),
116 } ISSInfo;
118 /* Save the syndrome information for a Data Abort */
119 static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
121 uint32_t syn;
122 int sas = memop & MO_SIZE;
123 bool sse = memop & MO_SIGN;
124 bool is_acqrel = issinfo & ISSIsAcqRel;
125 bool is_write = issinfo & ISSIsWrite;
126 bool is_16bit = issinfo & ISSIs16Bit;
127 int srt = issinfo & ISSRegMask;
129 if (issinfo & ISSInvalid) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
133 return;
136 if (srt == 15) {
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
139 * the call sites.
141 return;
144 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
145 0, 0, 0, is_write, 0, is_16bit);
146 disas_set_insn_syndrome(s, syn);
149 static inline int get_a32_user_mem_index(DisasContext *s)
151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
152 * insns:
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
156 switch (s->mmu_idx) {
157 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0:
159 case ARMMMUIdx_S12NSE1:
160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
161 case ARMMMUIdx_S1E3:
162 case ARMMMUIdx_S1SE0:
163 case ARMMMUIdx_S1SE1:
164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
177 case ARMMMUIdx_S2NS:
178 default:
179 g_assert_not_reached();
183 static inline TCGv_i32 load_cpu_offset(int offset)
185 TCGv_i32 tmp = tcg_temp_new_i32();
186 tcg_gen_ld_i32(tmp, cpu_env, offset);
187 return tmp;
190 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
192 static inline void store_cpu_offset(TCGv_i32 var, int offset)
194 tcg_gen_st_i32(var, cpu_env, offset);
195 tcg_temp_free_i32(var);
198 #define store_cpu_field(var, name) \
199 store_cpu_offset(var, offsetof(CPUARMState, name))
201 /* Set a variable to the value of a CPU register. */
202 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
204 if (reg == 15) {
205 uint32_t addr;
206 /* normally, since we updated PC, we need only to add one insn */
207 if (s->thumb)
208 addr = (long)s->pc + 2;
209 else
210 addr = (long)s->pc + 4;
211 tcg_gen_movi_i32(var, addr);
212 } else {
213 tcg_gen_mov_i32(var, cpu_R[reg]);
217 /* Create a new temporary and set it to the value of a CPU register. */
218 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
220 TCGv_i32 tmp = tcg_temp_new_i32();
221 load_reg_var(s, tmp, reg);
222 return tmp;
225 /* Set a CPU register. The source must be a temporary and will be
226 marked as dead. */
227 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
229 if (reg == 15) {
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
235 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
236 s->base.is_jmp = DISAS_JUMP;
238 tcg_gen_mov_i32(cpu_R[reg], var);
239 tcg_temp_free_i32(var);
243 * Variant of store_reg which applies v8M stack-limit checks before updating
244 * SP. If the check fails this will result in an exception being taken.
245 * We disable the stack checks for CONFIG_USER_ONLY because we have
246 * no idea what the stack limits should be in that case.
247 * If stack checking is not being done this just acts like store_reg().
249 static void store_sp_checked(DisasContext *s, TCGv_i32 var)
251 #ifndef CONFIG_USER_ONLY
252 if (s->v8m_stackcheck) {
253 gen_helper_v8m_stackcheck(cpu_env, var);
255 #endif
256 store_reg(s, 13, var);
259 /* Value extensions. */
260 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
261 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
262 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
263 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
265 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
266 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
269 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
271 TCGv_i32 tmp_mask = tcg_const_i32(mask);
272 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
273 tcg_temp_free_i32(tmp_mask);
275 /* Set NZCV flags from the high 4 bits of var. */
276 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
278 static void gen_exception_internal(int excp)
280 TCGv_i32 tcg_excp = tcg_const_i32(excp);
282 assert(excp_is_internal(excp));
283 gen_helper_exception_internal(cpu_env, tcg_excp);
284 tcg_temp_free_i32(tcg_excp);
287 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
289 TCGv_i32 tcg_excp = tcg_const_i32(excp);
290 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
291 TCGv_i32 tcg_el = tcg_const_i32(target_el);
293 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
294 tcg_syn, tcg_el);
296 tcg_temp_free_i32(tcg_el);
297 tcg_temp_free_i32(tcg_syn);
298 tcg_temp_free_i32(tcg_excp);
301 static void gen_ss_advance(DisasContext *s)
303 /* If the singlestep state is Active-not-pending, advance to
304 * Active-pending.
306 if (s->ss_active) {
307 s->pstate_ss = 0;
308 gen_helper_clear_pstate_ss(cpu_env);
312 static void gen_step_complete_exception(DisasContext *s)
314 /* We just completed step of an insn. Move from Active-not-pending
315 * to Active-pending, and then also take the swstep exception.
316 * This corresponds to making the (IMPDEF) choice to prioritize
317 * swstep exceptions over asynchronous exceptions taken to an exception
318 * level where debug is disabled. This choice has the advantage that
319 * we do not need to maintain internal state corresponding to the
320 * ISV/EX syndrome bits between completion of the step and generation
321 * of the exception, and our syndrome information is always correct.
323 gen_ss_advance(s);
324 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
325 default_exception_el(s));
326 s->base.is_jmp = DISAS_NORETURN;
329 static void gen_singlestep_exception(DisasContext *s)
331 /* Generate the right kind of exception for singlestep, which is
332 * either the architectural singlestep or EXCP_DEBUG for QEMU's
333 * gdb singlestepping.
335 if (s->ss_active) {
336 gen_step_complete_exception(s);
337 } else {
338 gen_exception_internal(EXCP_DEBUG);
342 static inline bool is_singlestepping(DisasContext *s)
344 /* Return true if we are singlestepping either because of
345 * architectural singlestep or QEMU gdbstub singlestep. This does
346 * not include the command line '-singlestep' mode which is rather
347 * misnamed as it only means "one instruction per TB" and doesn't
348 * affect the code we generate.
350 return s->base.singlestep_enabled || s->ss_active;
353 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
355 TCGv_i32 tmp1 = tcg_temp_new_i32();
356 TCGv_i32 tmp2 = tcg_temp_new_i32();
357 tcg_gen_ext16s_i32(tmp1, a);
358 tcg_gen_ext16s_i32(tmp2, b);
359 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
360 tcg_temp_free_i32(tmp2);
361 tcg_gen_sari_i32(a, a, 16);
362 tcg_gen_sari_i32(b, b, 16);
363 tcg_gen_mul_i32(b, b, a);
364 tcg_gen_mov_i32(a, tmp1);
365 tcg_temp_free_i32(tmp1);
368 /* Byteswap each halfword. */
369 static void gen_rev16(TCGv_i32 var)
371 TCGv_i32 tmp = tcg_temp_new_i32();
372 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
373 tcg_gen_shri_i32(tmp, var, 8);
374 tcg_gen_and_i32(tmp, tmp, mask);
375 tcg_gen_and_i32(var, var, mask);
376 tcg_gen_shli_i32(var, var, 8);
377 tcg_gen_or_i32(var, var, tmp);
378 tcg_temp_free_i32(mask);
379 tcg_temp_free_i32(tmp);
382 /* Byteswap low halfword and sign extend. */
383 static void gen_revsh(TCGv_i32 var)
385 tcg_gen_ext16u_i32(var, var);
386 tcg_gen_bswap16_i32(var, var);
387 tcg_gen_ext16s_i32(var, var);
390 /* Return (b << 32) + a. Mark inputs as dead */
391 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
393 TCGv_i64 tmp64 = tcg_temp_new_i64();
395 tcg_gen_extu_i32_i64(tmp64, b);
396 tcg_temp_free_i32(b);
397 tcg_gen_shli_i64(tmp64, tmp64, 32);
398 tcg_gen_add_i64(a, tmp64, a);
400 tcg_temp_free_i64(tmp64);
401 return a;
404 /* Return (b << 32) - a. Mark inputs as dead. */
405 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
407 TCGv_i64 tmp64 = tcg_temp_new_i64();
409 tcg_gen_extu_i32_i64(tmp64, b);
410 tcg_temp_free_i32(b);
411 tcg_gen_shli_i64(tmp64, tmp64, 32);
412 tcg_gen_sub_i64(a, tmp64, a);
414 tcg_temp_free_i64(tmp64);
415 return a;
418 /* 32x32->64 multiply. Marks inputs as dead. */
419 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
421 TCGv_i32 lo = tcg_temp_new_i32();
422 TCGv_i32 hi = tcg_temp_new_i32();
423 TCGv_i64 ret;
425 tcg_gen_mulu2_i32(lo, hi, a, b);
426 tcg_temp_free_i32(a);
427 tcg_temp_free_i32(b);
429 ret = tcg_temp_new_i64();
430 tcg_gen_concat_i32_i64(ret, lo, hi);
431 tcg_temp_free_i32(lo);
432 tcg_temp_free_i32(hi);
434 return ret;
437 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
439 TCGv_i32 lo = tcg_temp_new_i32();
440 TCGv_i32 hi = tcg_temp_new_i32();
441 TCGv_i64 ret;
443 tcg_gen_muls2_i32(lo, hi, a, b);
444 tcg_temp_free_i32(a);
445 tcg_temp_free_i32(b);
447 ret = tcg_temp_new_i64();
448 tcg_gen_concat_i32_i64(ret, lo, hi);
449 tcg_temp_free_i32(lo);
450 tcg_temp_free_i32(hi);
452 return ret;
455 /* Swap low and high halfwords. */
456 static void gen_swap_half(TCGv_i32 var)
458 TCGv_i32 tmp = tcg_temp_new_i32();
459 tcg_gen_shri_i32(tmp, var, 16);
460 tcg_gen_shli_i32(var, var, 16);
461 tcg_gen_or_i32(var, var, tmp);
462 tcg_temp_free_i32(tmp);
465 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
466 tmp = (t0 ^ t1) & 0x8000;
467 t0 &= ~0x8000;
468 t1 &= ~0x8000;
469 t0 = (t0 + t1) ^ tmp;
472 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
474 TCGv_i32 tmp = tcg_temp_new_i32();
475 tcg_gen_xor_i32(tmp, t0, t1);
476 tcg_gen_andi_i32(tmp, tmp, 0x8000);
477 tcg_gen_andi_i32(t0, t0, ~0x8000);
478 tcg_gen_andi_i32(t1, t1, ~0x8000);
479 tcg_gen_add_i32(t0, t0, t1);
480 tcg_gen_xor_i32(t0, t0, tmp);
481 tcg_temp_free_i32(tmp);
482 tcg_temp_free_i32(t1);
485 /* Set CF to the top bit of var. */
486 static void gen_set_CF_bit31(TCGv_i32 var)
488 tcg_gen_shri_i32(cpu_CF, var, 31);
491 /* Set N and Z flags from var. */
492 static inline void gen_logic_CC(TCGv_i32 var)
494 tcg_gen_mov_i32(cpu_NF, var);
495 tcg_gen_mov_i32(cpu_ZF, var);
498 /* T0 += T1 + CF. */
499 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
501 tcg_gen_add_i32(t0, t0, t1);
502 tcg_gen_add_i32(t0, t0, cpu_CF);
505 /* dest = T0 + T1 + CF. */
506 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
508 tcg_gen_add_i32(dest, t0, t1);
509 tcg_gen_add_i32(dest, dest, cpu_CF);
512 /* dest = T0 - T1 + CF - 1. */
513 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
515 tcg_gen_sub_i32(dest, t0, t1);
516 tcg_gen_add_i32(dest, dest, cpu_CF);
517 tcg_gen_subi_i32(dest, dest, 1);
520 /* dest = T0 + T1. Compute C, N, V and Z flags */
521 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
523 TCGv_i32 tmp = tcg_temp_new_i32();
524 tcg_gen_movi_i32(tmp, 0);
525 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
526 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
527 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
528 tcg_gen_xor_i32(tmp, t0, t1);
529 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
530 tcg_temp_free_i32(tmp);
531 tcg_gen_mov_i32(dest, cpu_NF);
534 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
535 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
537 TCGv_i32 tmp = tcg_temp_new_i32();
538 if (TCG_TARGET_HAS_add2_i32) {
539 tcg_gen_movi_i32(tmp, 0);
540 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
541 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
542 } else {
543 TCGv_i64 q0 = tcg_temp_new_i64();
544 TCGv_i64 q1 = tcg_temp_new_i64();
545 tcg_gen_extu_i32_i64(q0, t0);
546 tcg_gen_extu_i32_i64(q1, t1);
547 tcg_gen_add_i64(q0, q0, q1);
548 tcg_gen_extu_i32_i64(q1, cpu_CF);
549 tcg_gen_add_i64(q0, q0, q1);
550 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
551 tcg_temp_free_i64(q0);
552 tcg_temp_free_i64(q1);
554 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
555 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
556 tcg_gen_xor_i32(tmp, t0, t1);
557 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
558 tcg_temp_free_i32(tmp);
559 tcg_gen_mov_i32(dest, cpu_NF);
562 /* dest = T0 - T1. Compute C, N, V and Z flags */
563 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
565 TCGv_i32 tmp;
566 tcg_gen_sub_i32(cpu_NF, t0, t1);
567 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
568 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
569 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
570 tmp = tcg_temp_new_i32();
571 tcg_gen_xor_i32(tmp, t0, t1);
572 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
573 tcg_temp_free_i32(tmp);
574 tcg_gen_mov_i32(dest, cpu_NF);
577 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
578 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
580 TCGv_i32 tmp = tcg_temp_new_i32();
581 tcg_gen_not_i32(tmp, t1);
582 gen_adc_CC(dest, t0, tmp);
583 tcg_temp_free_i32(tmp);
586 #define GEN_SHIFT(name) \
587 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
589 TCGv_i32 tmp1, tmp2, tmp3; \
590 tmp1 = tcg_temp_new_i32(); \
591 tcg_gen_andi_i32(tmp1, t1, 0xff); \
592 tmp2 = tcg_const_i32(0); \
593 tmp3 = tcg_const_i32(0x1f); \
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
595 tcg_temp_free_i32(tmp3); \
596 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
597 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
598 tcg_temp_free_i32(tmp2); \
599 tcg_temp_free_i32(tmp1); \
601 GEN_SHIFT(shl)
602 GEN_SHIFT(shr)
603 #undef GEN_SHIFT
605 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
607 TCGv_i32 tmp1, tmp2;
608 tmp1 = tcg_temp_new_i32();
609 tcg_gen_andi_i32(tmp1, t1, 0xff);
610 tmp2 = tcg_const_i32(0x1f);
611 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
612 tcg_temp_free_i32(tmp2);
613 tcg_gen_sar_i32(dest, t0, tmp1);
614 tcg_temp_free_i32(tmp1);
617 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
619 TCGv_i32 c0 = tcg_const_i32(0);
620 TCGv_i32 tmp = tcg_temp_new_i32();
621 tcg_gen_neg_i32(tmp, src);
622 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
623 tcg_temp_free_i32(c0);
624 tcg_temp_free_i32(tmp);
627 static void shifter_out_im(TCGv_i32 var, int shift)
629 if (shift == 0) {
630 tcg_gen_andi_i32(cpu_CF, var, 1);
631 } else {
632 tcg_gen_shri_i32(cpu_CF, var, shift);
633 if (shift != 31) {
634 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
639 /* Shift by immediate. Includes special handling for shift == 0. */
640 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
641 int shift, int flags)
643 switch (shiftop) {
644 case 0: /* LSL */
645 if (shift != 0) {
646 if (flags)
647 shifter_out_im(var, 32 - shift);
648 tcg_gen_shli_i32(var, var, shift);
650 break;
651 case 1: /* LSR */
652 if (shift == 0) {
653 if (flags) {
654 tcg_gen_shri_i32(cpu_CF, var, 31);
656 tcg_gen_movi_i32(var, 0);
657 } else {
658 if (flags)
659 shifter_out_im(var, shift - 1);
660 tcg_gen_shri_i32(var, var, shift);
662 break;
663 case 2: /* ASR */
664 if (shift == 0)
665 shift = 32;
666 if (flags)
667 shifter_out_im(var, shift - 1);
668 if (shift == 32)
669 shift = 31;
670 tcg_gen_sari_i32(var, var, shift);
671 break;
672 case 3: /* ROR/RRX */
673 if (shift != 0) {
674 if (flags)
675 shifter_out_im(var, shift - 1);
676 tcg_gen_rotri_i32(var, var, shift); break;
677 } else {
678 TCGv_i32 tmp = tcg_temp_new_i32();
679 tcg_gen_shli_i32(tmp, cpu_CF, 31);
680 if (flags)
681 shifter_out_im(var, 0);
682 tcg_gen_shri_i32(var, var, 1);
683 tcg_gen_or_i32(var, var, tmp);
684 tcg_temp_free_i32(tmp);
689 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
690 TCGv_i32 shift, int flags)
692 if (flags) {
693 switch (shiftop) {
694 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
695 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
696 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
697 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
699 } else {
700 switch (shiftop) {
701 case 0:
702 gen_shl(var, var, shift);
703 break;
704 case 1:
705 gen_shr(var, var, shift);
706 break;
707 case 2:
708 gen_sar(var, var, shift);
709 break;
710 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
711 tcg_gen_rotr_i32(var, var, shift); break;
714 tcg_temp_free_i32(shift);
717 #define PAS_OP(pfx) \
718 switch (op2) { \
719 case 0: gen_pas_helper(glue(pfx,add16)); break; \
720 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
721 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
722 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
723 case 4: gen_pas_helper(glue(pfx,add8)); break; \
724 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
726 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
728 TCGv_ptr tmp;
730 switch (op1) {
731 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
732 case 1:
733 tmp = tcg_temp_new_ptr();
734 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
735 PAS_OP(s)
736 tcg_temp_free_ptr(tmp);
737 break;
738 case 5:
739 tmp = tcg_temp_new_ptr();
740 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
741 PAS_OP(u)
742 tcg_temp_free_ptr(tmp);
743 break;
744 #undef gen_pas_helper
745 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
746 case 2:
747 PAS_OP(q);
748 break;
749 case 3:
750 PAS_OP(sh);
751 break;
752 case 6:
753 PAS_OP(uq);
754 break;
755 case 7:
756 PAS_OP(uh);
757 break;
758 #undef gen_pas_helper
761 #undef PAS_OP
763 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
764 #define PAS_OP(pfx) \
765 switch (op1) { \
766 case 0: gen_pas_helper(glue(pfx,add8)); break; \
767 case 1: gen_pas_helper(glue(pfx,add16)); break; \
768 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
769 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
770 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
771 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
773 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
775 TCGv_ptr tmp;
777 switch (op2) {
778 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
779 case 0:
780 tmp = tcg_temp_new_ptr();
781 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
782 PAS_OP(s)
783 tcg_temp_free_ptr(tmp);
784 break;
785 case 4:
786 tmp = tcg_temp_new_ptr();
787 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
788 PAS_OP(u)
789 tcg_temp_free_ptr(tmp);
790 break;
791 #undef gen_pas_helper
792 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
793 case 1:
794 PAS_OP(q);
795 break;
796 case 2:
797 PAS_OP(sh);
798 break;
799 case 5:
800 PAS_OP(uq);
801 break;
802 case 6:
803 PAS_OP(uh);
804 break;
805 #undef gen_pas_helper
808 #undef PAS_OP
811 * Generate a conditional based on ARM condition code cc.
812 * This is common between ARM and Aarch64 targets.
814 void arm_test_cc(DisasCompare *cmp, int cc)
816 TCGv_i32 value;
817 TCGCond cond;
818 bool global = true;
820 switch (cc) {
821 case 0: /* eq: Z */
822 case 1: /* ne: !Z */
823 cond = TCG_COND_EQ;
824 value = cpu_ZF;
825 break;
827 case 2: /* cs: C */
828 case 3: /* cc: !C */
829 cond = TCG_COND_NE;
830 value = cpu_CF;
831 break;
833 case 4: /* mi: N */
834 case 5: /* pl: !N */
835 cond = TCG_COND_LT;
836 value = cpu_NF;
837 break;
839 case 6: /* vs: V */
840 case 7: /* vc: !V */
841 cond = TCG_COND_LT;
842 value = cpu_VF;
843 break;
845 case 8: /* hi: C && !Z */
846 case 9: /* ls: !C || Z -> !(C && !Z) */
847 cond = TCG_COND_NE;
848 value = tcg_temp_new_i32();
849 global = false;
850 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
851 ZF is non-zero for !Z; so AND the two subexpressions. */
852 tcg_gen_neg_i32(value, cpu_CF);
853 tcg_gen_and_i32(value, value, cpu_ZF);
854 break;
856 case 10: /* ge: N == V -> N ^ V == 0 */
857 case 11: /* lt: N != V -> N ^ V != 0 */
858 /* Since we're only interested in the sign bit, == 0 is >= 0. */
859 cond = TCG_COND_GE;
860 value = tcg_temp_new_i32();
861 global = false;
862 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
863 break;
865 case 12: /* gt: !Z && N == V */
866 case 13: /* le: Z || N != V */
867 cond = TCG_COND_NE;
868 value = tcg_temp_new_i32();
869 global = false;
870 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
871 * the sign bit then AND with ZF to yield the result. */
872 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
873 tcg_gen_sari_i32(value, value, 31);
874 tcg_gen_andc_i32(value, cpu_ZF, value);
875 break;
877 case 14: /* always */
878 case 15: /* always */
879 /* Use the ALWAYS condition, which will fold early.
880 * It doesn't matter what we use for the value. */
881 cond = TCG_COND_ALWAYS;
882 value = cpu_ZF;
883 goto no_invert;
885 default:
886 fprintf(stderr, "Bad condition code 0x%x\n", cc);
887 abort();
890 if (cc & 1) {
891 cond = tcg_invert_cond(cond);
894 no_invert:
895 cmp->cond = cond;
896 cmp->value = value;
897 cmp->value_global = global;
900 void arm_free_cc(DisasCompare *cmp)
902 if (!cmp->value_global) {
903 tcg_temp_free_i32(cmp->value);
907 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
909 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
912 void arm_gen_test_cc(int cc, TCGLabel *label)
914 DisasCompare cmp;
915 arm_test_cc(&cmp, cc);
916 arm_jump_cc(&cmp, label);
917 arm_free_cc(&cmp);
920 static const uint8_t table_logic_cc[16] = {
921 1, /* and */
922 1, /* xor */
923 0, /* sub */
924 0, /* rsb */
925 0, /* add */
926 0, /* adc */
927 0, /* sbc */
928 0, /* rsc */
929 1, /* andl */
930 1, /* xorl */
931 0, /* cmp */
932 0, /* cmn */
933 1, /* orr */
934 1, /* mov */
935 1, /* bic */
936 1, /* mvn */
939 static inline void gen_set_condexec(DisasContext *s)
941 if (s->condexec_mask) {
942 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
943 TCGv_i32 tmp = tcg_temp_new_i32();
944 tcg_gen_movi_i32(tmp, val);
945 store_cpu_field(tmp, condexec_bits);
949 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
951 tcg_gen_movi_i32(cpu_R[15], val);
954 /* Set PC and Thumb state from an immediate address. */
955 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
957 TCGv_i32 tmp;
959 s->base.is_jmp = DISAS_JUMP;
960 if (s->thumb != (addr & 1)) {
961 tmp = tcg_temp_new_i32();
962 tcg_gen_movi_i32(tmp, addr & 1);
963 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
964 tcg_temp_free_i32(tmp);
966 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
969 /* Set PC and Thumb state from var. var is marked as dead. */
970 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
972 s->base.is_jmp = DISAS_JUMP;
973 tcg_gen_andi_i32(cpu_R[15], var, ~1);
974 tcg_gen_andi_i32(var, var, 1);
975 store_cpu_field(var, thumb);
978 /* Set PC and Thumb state from var. var is marked as dead.
979 * For M-profile CPUs, include logic to detect exception-return
980 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
981 * and BX reg, and no others, and happens only for code in Handler mode.
983 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
985 /* Generate the same code here as for a simple bx, but flag via
986 * s->base.is_jmp that we need to do the rest of the work later.
988 gen_bx(s, var);
989 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
990 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
991 s->base.is_jmp = DISAS_BX_EXCRET;
995 static inline void gen_bx_excret_final_code(DisasContext *s)
997 /* Generate the code to finish possible exception return and end the TB */
998 TCGLabel *excret_label = gen_new_label();
999 uint32_t min_magic;
1001 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
1002 /* Covers FNC_RETURN and EXC_RETURN magic */
1003 min_magic = FNC_RETURN_MIN_MAGIC;
1004 } else {
1005 /* EXC_RETURN magic only */
1006 min_magic = EXC_RETURN_MIN_MAGIC;
1009 /* Is the new PC value in the magic range indicating exception return? */
1010 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
1011 /* No: end the TB as we would for a DISAS_JMP */
1012 if (is_singlestepping(s)) {
1013 gen_singlestep_exception(s);
1014 } else {
1015 tcg_gen_exit_tb(NULL, 0);
1017 gen_set_label(excret_label);
1018 /* Yes: this is an exception return.
1019 * At this point in runtime env->regs[15] and env->thumb will hold
1020 * the exception-return magic number, which do_v7m_exception_exit()
1021 * will read. Nothing else will be able to see those values because
1022 * the cpu-exec main loop guarantees that we will always go straight
1023 * from raising the exception to the exception-handling code.
1025 * gen_ss_advance(s) does nothing on M profile currently but
1026 * calling it is conceptually the right thing as we have executed
1027 * this instruction (compare SWI, HVC, SMC handling).
1029 gen_ss_advance(s);
1030 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1033 static inline void gen_bxns(DisasContext *s, int rm)
1035 TCGv_i32 var = load_reg(s, rm);
1037 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1038 * we need to sync state before calling it, but:
1039 * - we don't need to do gen_set_pc_im() because the bxns helper will
1040 * always set the PC itself
1041 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1042 * unless it's outside an IT block or the last insn in an IT block,
1043 * so we know that condexec == 0 (already set at the top of the TB)
1044 * is correct in the non-UNPREDICTABLE cases, and we can choose
1045 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1047 gen_helper_v7m_bxns(cpu_env, var);
1048 tcg_temp_free_i32(var);
1049 s->base.is_jmp = DISAS_EXIT;
1052 static inline void gen_blxns(DisasContext *s, int rm)
1054 TCGv_i32 var = load_reg(s, rm);
1056 /* We don't need to sync condexec state, for the same reason as bxns.
1057 * We do however need to set the PC, because the blxns helper reads it.
1058 * The blxns helper may throw an exception.
1060 gen_set_pc_im(s, s->pc);
1061 gen_helper_v7m_blxns(cpu_env, var);
1062 tcg_temp_free_i32(var);
1063 s->base.is_jmp = DISAS_EXIT;
1066 /* Variant of store_reg which uses branch&exchange logic when storing
1067 to r15 in ARM architecture v7 and above. The source must be a temporary
1068 and will be marked as dead. */
1069 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
1071 if (reg == 15 && ENABLE_ARCH_7) {
1072 gen_bx(s, var);
1073 } else {
1074 store_reg(s, reg, var);
1078 /* Variant of store_reg which uses branch&exchange logic when storing
1079 * to r15 in ARM architecture v5T and above. This is used for storing
1080 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1081 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1082 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
1084 if (reg == 15 && ENABLE_ARCH_5) {
1085 gen_bx_excret(s, var);
1086 } else {
1087 store_reg(s, reg, var);
1091 #ifdef CONFIG_USER_ONLY
1092 #define IS_USER_ONLY 1
1093 #else
1094 #define IS_USER_ONLY 0
1095 #endif
1097 /* Abstractions of "generate code to do a guest load/store for
1098 * AArch32", where a vaddr is always 32 bits (and is zero
1099 * extended if we're a 64 bit core) and data is also
1100 * 32 bits unless specifically doing a 64 bit access.
1101 * These functions work like tcg_gen_qemu_{ld,st}* except
1102 * that the address argument is TCGv_i32 rather than TCGv.
1105 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
1107 TCGv addr = tcg_temp_new();
1108 tcg_gen_extu_i32_tl(addr, a32);
1110 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1111 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1112 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
1114 return addr;
1117 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1118 int index, TCGMemOp opc)
1120 TCGv addr;
1122 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1123 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1124 opc |= MO_ALIGN;
1127 addr = gen_aa32_addr(s, a32, opc);
1128 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1129 tcg_temp_free(addr);
1132 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1133 int index, TCGMemOp opc)
1135 TCGv addr;
1137 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1138 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1139 opc |= MO_ALIGN;
1142 addr = gen_aa32_addr(s, a32, opc);
1143 tcg_gen_qemu_st_i32(val, addr, index, opc);
1144 tcg_temp_free(addr);
1147 #define DO_GEN_LD(SUFF, OPC) \
1148 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1149 TCGv_i32 a32, int index) \
1151 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1153 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1154 TCGv_i32 val, \
1155 TCGv_i32 a32, int index, \
1156 ISSInfo issinfo) \
1158 gen_aa32_ld##SUFF(s, val, a32, index); \
1159 disas_set_da_iss(s, OPC, issinfo); \
1162 #define DO_GEN_ST(SUFF, OPC) \
1163 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1164 TCGv_i32 a32, int index) \
1166 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1168 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1169 TCGv_i32 val, \
1170 TCGv_i32 a32, int index, \
1171 ISSInfo issinfo) \
1173 gen_aa32_st##SUFF(s, val, a32, index); \
1174 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1177 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
1179 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1180 if (!IS_USER_ONLY && s->sctlr_b) {
1181 tcg_gen_rotri_i64(val, val, 32);
1185 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1186 int index, TCGMemOp opc)
1188 TCGv addr = gen_aa32_addr(s, a32, opc);
1189 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1190 gen_aa32_frob64(s, val);
1191 tcg_temp_free(addr);
1194 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1195 TCGv_i32 a32, int index)
1197 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1200 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1201 int index, TCGMemOp opc)
1203 TCGv addr = gen_aa32_addr(s, a32, opc);
1205 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1206 if (!IS_USER_ONLY && s->sctlr_b) {
1207 TCGv_i64 tmp = tcg_temp_new_i64();
1208 tcg_gen_rotri_i64(tmp, val, 32);
1209 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1210 tcg_temp_free_i64(tmp);
1211 } else {
1212 tcg_gen_qemu_st_i64(val, addr, index, opc);
1214 tcg_temp_free(addr);
1217 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1218 TCGv_i32 a32, int index)
1220 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1223 DO_GEN_LD(8s, MO_SB)
1224 DO_GEN_LD(8u, MO_UB)
1225 DO_GEN_LD(16s, MO_SW)
1226 DO_GEN_LD(16u, MO_UW)
1227 DO_GEN_LD(32u, MO_UL)
1228 DO_GEN_ST(8, MO_UB)
1229 DO_GEN_ST(16, MO_UW)
1230 DO_GEN_ST(32, MO_UL)
1232 static inline void gen_hvc(DisasContext *s, int imm16)
1234 /* The pre HVC helper handles cases when HVC gets trapped
1235 * as an undefined insn by runtime configuration (ie before
1236 * the insn really executes).
1238 gen_set_pc_im(s, s->pc - 4);
1239 gen_helper_pre_hvc(cpu_env);
1240 /* Otherwise we will treat this as a real exception which
1241 * happens after execution of the insn. (The distinction matters
1242 * for the PC value reported to the exception handler and also
1243 * for single stepping.)
1245 s->svc_imm = imm16;
1246 gen_set_pc_im(s, s->pc);
1247 s->base.is_jmp = DISAS_HVC;
1250 static inline void gen_smc(DisasContext *s)
1252 /* As with HVC, we may take an exception either before or after
1253 * the insn executes.
1255 TCGv_i32 tmp;
1257 gen_set_pc_im(s, s->pc - 4);
1258 tmp = tcg_const_i32(syn_aa32_smc());
1259 gen_helper_pre_smc(cpu_env, tmp);
1260 tcg_temp_free_i32(tmp);
1261 gen_set_pc_im(s, s->pc);
1262 s->base.is_jmp = DISAS_SMC;
1265 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1267 gen_set_condexec(s);
1268 gen_set_pc_im(s, s->pc - offset);
1269 gen_exception_internal(excp);
1270 s->base.is_jmp = DISAS_NORETURN;
1273 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1274 int syn, uint32_t target_el)
1276 gen_set_condexec(s);
1277 gen_set_pc_im(s, s->pc - offset);
1278 gen_exception(excp, syn, target_el);
1279 s->base.is_jmp = DISAS_NORETURN;
1282 static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1284 TCGv_i32 tcg_syn;
1286 gen_set_condexec(s);
1287 gen_set_pc_im(s, s->pc - offset);
1288 tcg_syn = tcg_const_i32(syn);
1289 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1290 tcg_temp_free_i32(tcg_syn);
1291 s->base.is_jmp = DISAS_NORETURN;
1294 /* Force a TB lookup after an instruction that changes the CPU state. */
1295 static inline void gen_lookup_tb(DisasContext *s)
1297 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1298 s->base.is_jmp = DISAS_EXIT;
1301 static inline void gen_hlt(DisasContext *s, int imm)
1303 /* HLT. This has two purposes.
1304 * Architecturally, it is an external halting debug instruction.
1305 * Since QEMU doesn't implement external debug, we treat this as
1306 * it is required for halting debug disabled: it will UNDEF.
1307 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1308 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1309 * must trigger semihosting even for ARMv7 and earlier, where
1310 * HLT was an undefined encoding.
1311 * In system mode, we don't allow userspace access to
1312 * semihosting, to provide some semblance of security
1313 * (and for consistency with our 32-bit semihosting).
1315 if (semihosting_enabled() &&
1316 #ifndef CONFIG_USER_ONLY
1317 s->current_el != 0 &&
1318 #endif
1319 (imm == (s->thumb ? 0x3c : 0xf000))) {
1320 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1321 return;
1324 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1325 default_exception_el(s));
1328 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1329 TCGv_i32 var)
1331 int val, rm, shift, shiftop;
1332 TCGv_i32 offset;
1334 if (!(insn & (1 << 25))) {
1335 /* immediate */
1336 val = insn & 0xfff;
1337 if (!(insn & (1 << 23)))
1338 val = -val;
1339 if (val != 0)
1340 tcg_gen_addi_i32(var, var, val);
1341 } else {
1342 /* shift/register */
1343 rm = (insn) & 0xf;
1344 shift = (insn >> 7) & 0x1f;
1345 shiftop = (insn >> 5) & 3;
1346 offset = load_reg(s, rm);
1347 gen_arm_shift_im(offset, shiftop, shift, 0);
1348 if (!(insn & (1 << 23)))
1349 tcg_gen_sub_i32(var, var, offset);
1350 else
1351 tcg_gen_add_i32(var, var, offset);
1352 tcg_temp_free_i32(offset);
1356 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1357 int extra, TCGv_i32 var)
1359 int val, rm;
1360 TCGv_i32 offset;
1362 if (insn & (1 << 22)) {
1363 /* immediate */
1364 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1365 if (!(insn & (1 << 23)))
1366 val = -val;
1367 val += extra;
1368 if (val != 0)
1369 tcg_gen_addi_i32(var, var, val);
1370 } else {
1371 /* register */
1372 if (extra)
1373 tcg_gen_addi_i32(var, var, extra);
1374 rm = (insn) & 0xf;
1375 offset = load_reg(s, rm);
1376 if (!(insn & (1 << 23)))
1377 tcg_gen_sub_i32(var, var, offset);
1378 else
1379 tcg_gen_add_i32(var, var, offset);
1380 tcg_temp_free_i32(offset);
1384 static TCGv_ptr get_fpstatus_ptr(int neon)
1386 TCGv_ptr statusptr = tcg_temp_new_ptr();
1387 int offset;
1388 if (neon) {
1389 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1390 } else {
1391 offset = offsetof(CPUARMState, vfp.fp_status);
1393 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1394 return statusptr;
1397 #define VFP_OP2(name) \
1398 static inline void gen_vfp_##name(int dp) \
1400 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1401 if (dp) { \
1402 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1403 } else { \
1404 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1406 tcg_temp_free_ptr(fpst); \
1409 VFP_OP2(add)
1410 VFP_OP2(sub)
1411 VFP_OP2(mul)
1412 VFP_OP2(div)
1414 #undef VFP_OP2
1416 static inline void gen_vfp_F1_mul(int dp)
1418 /* Like gen_vfp_mul() but put result in F1 */
1419 TCGv_ptr fpst = get_fpstatus_ptr(0);
1420 if (dp) {
1421 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1422 } else {
1423 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1425 tcg_temp_free_ptr(fpst);
1428 static inline void gen_vfp_F1_neg(int dp)
1430 /* Like gen_vfp_neg() but put result in F1 */
1431 if (dp) {
1432 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1433 } else {
1434 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1438 static inline void gen_vfp_abs(int dp)
1440 if (dp)
1441 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1442 else
1443 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1446 static inline void gen_vfp_neg(int dp)
1448 if (dp)
1449 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1450 else
1451 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1454 static inline void gen_vfp_sqrt(int dp)
1456 if (dp)
1457 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1458 else
1459 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1462 static inline void gen_vfp_cmp(int dp)
1464 if (dp)
1465 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1466 else
1467 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1470 static inline void gen_vfp_cmpe(int dp)
1472 if (dp)
1473 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1474 else
1475 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1478 static inline void gen_vfp_F1_ld0(int dp)
1480 if (dp)
1481 tcg_gen_movi_i64(cpu_F1d, 0);
1482 else
1483 tcg_gen_movi_i32(cpu_F1s, 0);
1486 #define VFP_GEN_ITOF(name) \
1487 static inline void gen_vfp_##name(int dp, int neon) \
1489 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1490 if (dp) { \
1491 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1492 } else { \
1493 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1495 tcg_temp_free_ptr(statusptr); \
1498 VFP_GEN_ITOF(uito)
1499 VFP_GEN_ITOF(sito)
1500 #undef VFP_GEN_ITOF
1502 #define VFP_GEN_FTOI(name) \
1503 static inline void gen_vfp_##name(int dp, int neon) \
1505 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1506 if (dp) { \
1507 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1508 } else { \
1509 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1511 tcg_temp_free_ptr(statusptr); \
1514 VFP_GEN_FTOI(toui)
1515 VFP_GEN_FTOI(touiz)
1516 VFP_GEN_FTOI(tosi)
1517 VFP_GEN_FTOI(tosiz)
1518 #undef VFP_GEN_FTOI
1520 #define VFP_GEN_FIX(name, round) \
1521 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1523 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1524 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1525 if (dp) { \
1526 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1527 statusptr); \
1528 } else { \
1529 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1530 statusptr); \
1532 tcg_temp_free_i32(tmp_shift); \
1533 tcg_temp_free_ptr(statusptr); \
1535 VFP_GEN_FIX(tosh, _round_to_zero)
1536 VFP_GEN_FIX(tosl, _round_to_zero)
1537 VFP_GEN_FIX(touh, _round_to_zero)
1538 VFP_GEN_FIX(toul, _round_to_zero)
1539 VFP_GEN_FIX(shto, )
1540 VFP_GEN_FIX(slto, )
1541 VFP_GEN_FIX(uhto, )
1542 VFP_GEN_FIX(ulto, )
1543 #undef VFP_GEN_FIX
1545 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1547 if (dp) {
1548 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
1549 } else {
1550 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
1554 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1556 if (dp) {
1557 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
1558 } else {
1559 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
1563 static inline long vfp_reg_offset(bool dp, unsigned reg)
1565 if (dp) {
1566 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1567 } else {
1568 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
1569 if (reg & 1) {
1570 ofs += offsetof(CPU_DoubleU, l.upper);
1571 } else {
1572 ofs += offsetof(CPU_DoubleU, l.lower);
1574 return ofs;
1578 /* Return the offset of a 32-bit piece of a NEON register.
1579 zero is the least significant end of the register. */
1580 static inline long
1581 neon_reg_offset (int reg, int n)
1583 int sreg;
1584 sreg = reg * 2 + n;
1585 return vfp_reg_offset(0, sreg);
1588 /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1589 * where 0 is the least significant end of the register.
1591 static inline long
1592 neon_element_offset(int reg, int element, TCGMemOp size)
1594 int element_size = 1 << size;
1595 int ofs = element * element_size;
1596 #ifdef HOST_WORDS_BIGENDIAN
1597 /* Calculate the offset assuming fully little-endian,
1598 * then XOR to account for the order of the 8-byte units.
1600 if (element_size < 8) {
1601 ofs ^= 8 - element_size;
1603 #endif
1604 return neon_reg_offset(reg, 0) + ofs;
1607 static TCGv_i32 neon_load_reg(int reg, int pass)
1609 TCGv_i32 tmp = tcg_temp_new_i32();
1610 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1611 return tmp;
1614 static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1616 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1618 switch (mop) {
1619 case MO_UB:
1620 tcg_gen_ld8u_i32(var, cpu_env, offset);
1621 break;
1622 case MO_UW:
1623 tcg_gen_ld16u_i32(var, cpu_env, offset);
1624 break;
1625 case MO_UL:
1626 tcg_gen_ld_i32(var, cpu_env, offset);
1627 break;
1628 default:
1629 g_assert_not_reached();
1633 static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1635 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1637 switch (mop) {
1638 case MO_UB:
1639 tcg_gen_ld8u_i64(var, cpu_env, offset);
1640 break;
1641 case MO_UW:
1642 tcg_gen_ld16u_i64(var, cpu_env, offset);
1643 break;
1644 case MO_UL:
1645 tcg_gen_ld32u_i64(var, cpu_env, offset);
1646 break;
1647 case MO_Q:
1648 tcg_gen_ld_i64(var, cpu_env, offset);
1649 break;
1650 default:
1651 g_assert_not_reached();
1655 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1657 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1658 tcg_temp_free_i32(var);
1661 static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1663 long offset = neon_element_offset(reg, ele, size);
1665 switch (size) {
1666 case MO_8:
1667 tcg_gen_st8_i32(var, cpu_env, offset);
1668 break;
1669 case MO_16:
1670 tcg_gen_st16_i32(var, cpu_env, offset);
1671 break;
1672 case MO_32:
1673 tcg_gen_st_i32(var, cpu_env, offset);
1674 break;
1675 default:
1676 g_assert_not_reached();
1680 static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1682 long offset = neon_element_offset(reg, ele, size);
1684 switch (size) {
1685 case MO_8:
1686 tcg_gen_st8_i64(var, cpu_env, offset);
1687 break;
1688 case MO_16:
1689 tcg_gen_st16_i64(var, cpu_env, offset);
1690 break;
1691 case MO_32:
1692 tcg_gen_st32_i64(var, cpu_env, offset);
1693 break;
1694 case MO_64:
1695 tcg_gen_st_i64(var, cpu_env, offset);
1696 break;
1697 default:
1698 g_assert_not_reached();
1702 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1704 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1707 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1709 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1712 static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1714 TCGv_ptr ret = tcg_temp_new_ptr();
1715 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1716 return ret;
1719 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1720 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1721 #define tcg_gen_st_f32 tcg_gen_st_i32
1722 #define tcg_gen_st_f64 tcg_gen_st_i64
1724 static inline void gen_mov_F0_vreg(int dp, int reg)
1726 if (dp)
1727 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1728 else
1729 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1732 static inline void gen_mov_F1_vreg(int dp, int reg)
1734 if (dp)
1735 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1736 else
1737 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1740 static inline void gen_mov_vreg_F0(int dp, int reg)
1742 if (dp)
1743 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1744 else
1745 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1748 #define ARM_CP_RW_BIT (1 << 20)
1750 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1752 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1755 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1757 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1760 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1762 TCGv_i32 var = tcg_temp_new_i32();
1763 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1764 return var;
1767 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1769 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1770 tcg_temp_free_i32(var);
1773 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1775 iwmmxt_store_reg(cpu_M0, rn);
1778 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1780 iwmmxt_load_reg(cpu_M0, rn);
1783 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1785 iwmmxt_load_reg(cpu_V1, rn);
1786 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1789 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1791 iwmmxt_load_reg(cpu_V1, rn);
1792 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1795 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1797 iwmmxt_load_reg(cpu_V1, rn);
1798 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1801 #define IWMMXT_OP(name) \
1802 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1804 iwmmxt_load_reg(cpu_V1, rn); \
1805 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1808 #define IWMMXT_OP_ENV(name) \
1809 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1811 iwmmxt_load_reg(cpu_V1, rn); \
1812 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1815 #define IWMMXT_OP_ENV_SIZE(name) \
1816 IWMMXT_OP_ENV(name##b) \
1817 IWMMXT_OP_ENV(name##w) \
1818 IWMMXT_OP_ENV(name##l)
1820 #define IWMMXT_OP_ENV1(name) \
1821 static inline void gen_op_iwmmxt_##name##_M0(void) \
1823 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1826 IWMMXT_OP(maddsq)
1827 IWMMXT_OP(madduq)
1828 IWMMXT_OP(sadb)
1829 IWMMXT_OP(sadw)
1830 IWMMXT_OP(mulslw)
1831 IWMMXT_OP(mulshw)
1832 IWMMXT_OP(mululw)
1833 IWMMXT_OP(muluhw)
1834 IWMMXT_OP(macsw)
1835 IWMMXT_OP(macuw)
1837 IWMMXT_OP_ENV_SIZE(unpackl)
1838 IWMMXT_OP_ENV_SIZE(unpackh)
1840 IWMMXT_OP_ENV1(unpacklub)
1841 IWMMXT_OP_ENV1(unpackluw)
1842 IWMMXT_OP_ENV1(unpacklul)
1843 IWMMXT_OP_ENV1(unpackhub)
1844 IWMMXT_OP_ENV1(unpackhuw)
1845 IWMMXT_OP_ENV1(unpackhul)
1846 IWMMXT_OP_ENV1(unpacklsb)
1847 IWMMXT_OP_ENV1(unpacklsw)
1848 IWMMXT_OP_ENV1(unpacklsl)
1849 IWMMXT_OP_ENV1(unpackhsb)
1850 IWMMXT_OP_ENV1(unpackhsw)
1851 IWMMXT_OP_ENV1(unpackhsl)
1853 IWMMXT_OP_ENV_SIZE(cmpeq)
1854 IWMMXT_OP_ENV_SIZE(cmpgtu)
1855 IWMMXT_OP_ENV_SIZE(cmpgts)
1857 IWMMXT_OP_ENV_SIZE(mins)
1858 IWMMXT_OP_ENV_SIZE(minu)
1859 IWMMXT_OP_ENV_SIZE(maxs)
1860 IWMMXT_OP_ENV_SIZE(maxu)
1862 IWMMXT_OP_ENV_SIZE(subn)
1863 IWMMXT_OP_ENV_SIZE(addn)
1864 IWMMXT_OP_ENV_SIZE(subu)
1865 IWMMXT_OP_ENV_SIZE(addu)
1866 IWMMXT_OP_ENV_SIZE(subs)
1867 IWMMXT_OP_ENV_SIZE(adds)
1869 IWMMXT_OP_ENV(avgb0)
1870 IWMMXT_OP_ENV(avgb1)
1871 IWMMXT_OP_ENV(avgw0)
1872 IWMMXT_OP_ENV(avgw1)
1874 IWMMXT_OP_ENV(packuw)
1875 IWMMXT_OP_ENV(packul)
1876 IWMMXT_OP_ENV(packuq)
1877 IWMMXT_OP_ENV(packsw)
1878 IWMMXT_OP_ENV(packsl)
1879 IWMMXT_OP_ENV(packsq)
1881 static void gen_op_iwmmxt_set_mup(void)
1883 TCGv_i32 tmp;
1884 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1885 tcg_gen_ori_i32(tmp, tmp, 2);
1886 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1889 static void gen_op_iwmmxt_set_cup(void)
1891 TCGv_i32 tmp;
1892 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1893 tcg_gen_ori_i32(tmp, tmp, 1);
1894 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1897 static void gen_op_iwmmxt_setpsr_nz(void)
1899 TCGv_i32 tmp = tcg_temp_new_i32();
1900 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1901 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1904 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1906 iwmmxt_load_reg(cpu_V1, rn);
1907 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1908 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1911 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1912 TCGv_i32 dest)
1914 int rd;
1915 uint32_t offset;
1916 TCGv_i32 tmp;
1918 rd = (insn >> 16) & 0xf;
1919 tmp = load_reg(s, rd);
1921 offset = (insn & 0xff) << ((insn >> 7) & 2);
1922 if (insn & (1 << 24)) {
1923 /* Pre indexed */
1924 if (insn & (1 << 23))
1925 tcg_gen_addi_i32(tmp, tmp, offset);
1926 else
1927 tcg_gen_addi_i32(tmp, tmp, -offset);
1928 tcg_gen_mov_i32(dest, tmp);
1929 if (insn & (1 << 21))
1930 store_reg(s, rd, tmp);
1931 else
1932 tcg_temp_free_i32(tmp);
1933 } else if (insn & (1 << 21)) {
1934 /* Post indexed */
1935 tcg_gen_mov_i32(dest, tmp);
1936 if (insn & (1 << 23))
1937 tcg_gen_addi_i32(tmp, tmp, offset);
1938 else
1939 tcg_gen_addi_i32(tmp, tmp, -offset);
1940 store_reg(s, rd, tmp);
1941 } else if (!(insn & (1 << 23)))
1942 return 1;
1943 return 0;
1946 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1948 int rd = (insn >> 0) & 0xf;
1949 TCGv_i32 tmp;
1951 if (insn & (1 << 8)) {
1952 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1953 return 1;
1954 } else {
1955 tmp = iwmmxt_load_creg(rd);
1957 } else {
1958 tmp = tcg_temp_new_i32();
1959 iwmmxt_load_reg(cpu_V0, rd);
1960 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1962 tcg_gen_andi_i32(tmp, tmp, mask);
1963 tcg_gen_mov_i32(dest, tmp);
1964 tcg_temp_free_i32(tmp);
1965 return 0;
1968 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1969 (ie. an undefined instruction). */
1970 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1972 int rd, wrd;
1973 int rdhi, rdlo, rd0, rd1, i;
1974 TCGv_i32 addr;
1975 TCGv_i32 tmp, tmp2, tmp3;
1977 if ((insn & 0x0e000e00) == 0x0c000000) {
1978 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1979 wrd = insn & 0xf;
1980 rdlo = (insn >> 12) & 0xf;
1981 rdhi = (insn >> 16) & 0xf;
1982 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1983 iwmmxt_load_reg(cpu_V0, wrd);
1984 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1985 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1986 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1987 } else { /* TMCRR */
1988 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1989 iwmmxt_store_reg(cpu_V0, wrd);
1990 gen_op_iwmmxt_set_mup();
1992 return 0;
1995 wrd = (insn >> 12) & 0xf;
1996 addr = tcg_temp_new_i32();
1997 if (gen_iwmmxt_address(s, insn, addr)) {
1998 tcg_temp_free_i32(addr);
1999 return 1;
2001 if (insn & ARM_CP_RW_BIT) {
2002 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
2003 tmp = tcg_temp_new_i32();
2004 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2005 iwmmxt_store_creg(wrd, tmp);
2006 } else {
2007 i = 1;
2008 if (insn & (1 << 8)) {
2009 if (insn & (1 << 22)) { /* WLDRD */
2010 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
2011 i = 0;
2012 } else { /* WLDRW wRd */
2013 tmp = tcg_temp_new_i32();
2014 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2016 } else {
2017 tmp = tcg_temp_new_i32();
2018 if (insn & (1 << 22)) { /* WLDRH */
2019 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2020 } else { /* WLDRB */
2021 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2024 if (i) {
2025 tcg_gen_extu_i32_i64(cpu_M0, tmp);
2026 tcg_temp_free_i32(tmp);
2028 gen_op_iwmmxt_movq_wRn_M0(wrd);
2030 } else {
2031 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
2032 tmp = iwmmxt_load_creg(wrd);
2033 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
2034 } else {
2035 gen_op_iwmmxt_movq_M0_wRn(wrd);
2036 tmp = tcg_temp_new_i32();
2037 if (insn & (1 << 8)) {
2038 if (insn & (1 << 22)) { /* WSTRD */
2039 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
2040 } else { /* WSTRW wRd */
2041 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2042 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
2044 } else {
2045 if (insn & (1 << 22)) { /* WSTRH */
2046 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2047 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
2048 } else { /* WSTRB */
2049 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2050 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
2054 tcg_temp_free_i32(tmp);
2056 tcg_temp_free_i32(addr);
2057 return 0;
2060 if ((insn & 0x0f000000) != 0x0e000000)
2061 return 1;
2063 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
2064 case 0x000: /* WOR */
2065 wrd = (insn >> 12) & 0xf;
2066 rd0 = (insn >> 0) & 0xf;
2067 rd1 = (insn >> 16) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0);
2069 gen_op_iwmmxt_orq_M0_wRn(rd1);
2070 gen_op_iwmmxt_setpsr_nz();
2071 gen_op_iwmmxt_movq_wRn_M0(wrd);
2072 gen_op_iwmmxt_set_mup();
2073 gen_op_iwmmxt_set_cup();
2074 break;
2075 case 0x011: /* TMCR */
2076 if (insn & 0xf)
2077 return 1;
2078 rd = (insn >> 12) & 0xf;
2079 wrd = (insn >> 16) & 0xf;
2080 switch (wrd) {
2081 case ARM_IWMMXT_wCID:
2082 case ARM_IWMMXT_wCASF:
2083 break;
2084 case ARM_IWMMXT_wCon:
2085 gen_op_iwmmxt_set_cup();
2086 /* Fall through. */
2087 case ARM_IWMMXT_wCSSF:
2088 tmp = iwmmxt_load_creg(wrd);
2089 tmp2 = load_reg(s, rd);
2090 tcg_gen_andc_i32(tmp, tmp, tmp2);
2091 tcg_temp_free_i32(tmp2);
2092 iwmmxt_store_creg(wrd, tmp);
2093 break;
2094 case ARM_IWMMXT_wCGR0:
2095 case ARM_IWMMXT_wCGR1:
2096 case ARM_IWMMXT_wCGR2:
2097 case ARM_IWMMXT_wCGR3:
2098 gen_op_iwmmxt_set_cup();
2099 tmp = load_reg(s, rd);
2100 iwmmxt_store_creg(wrd, tmp);
2101 break;
2102 default:
2103 return 1;
2105 break;
2106 case 0x100: /* WXOR */
2107 wrd = (insn >> 12) & 0xf;
2108 rd0 = (insn >> 0) & 0xf;
2109 rd1 = (insn >> 16) & 0xf;
2110 gen_op_iwmmxt_movq_M0_wRn(rd0);
2111 gen_op_iwmmxt_xorq_M0_wRn(rd1);
2112 gen_op_iwmmxt_setpsr_nz();
2113 gen_op_iwmmxt_movq_wRn_M0(wrd);
2114 gen_op_iwmmxt_set_mup();
2115 gen_op_iwmmxt_set_cup();
2116 break;
2117 case 0x111: /* TMRC */
2118 if (insn & 0xf)
2119 return 1;
2120 rd = (insn >> 12) & 0xf;
2121 wrd = (insn >> 16) & 0xf;
2122 tmp = iwmmxt_load_creg(wrd);
2123 store_reg(s, rd, tmp);
2124 break;
2125 case 0x300: /* WANDN */
2126 wrd = (insn >> 12) & 0xf;
2127 rd0 = (insn >> 0) & 0xf;
2128 rd1 = (insn >> 16) & 0xf;
2129 gen_op_iwmmxt_movq_M0_wRn(rd0);
2130 tcg_gen_neg_i64(cpu_M0, cpu_M0);
2131 gen_op_iwmmxt_andq_M0_wRn(rd1);
2132 gen_op_iwmmxt_setpsr_nz();
2133 gen_op_iwmmxt_movq_wRn_M0(wrd);
2134 gen_op_iwmmxt_set_mup();
2135 gen_op_iwmmxt_set_cup();
2136 break;
2137 case 0x200: /* WAND */
2138 wrd = (insn >> 12) & 0xf;
2139 rd0 = (insn >> 0) & 0xf;
2140 rd1 = (insn >> 16) & 0xf;
2141 gen_op_iwmmxt_movq_M0_wRn(rd0);
2142 gen_op_iwmmxt_andq_M0_wRn(rd1);
2143 gen_op_iwmmxt_setpsr_nz();
2144 gen_op_iwmmxt_movq_wRn_M0(wrd);
2145 gen_op_iwmmxt_set_mup();
2146 gen_op_iwmmxt_set_cup();
2147 break;
2148 case 0x810: case 0xa10: /* WMADD */
2149 wrd = (insn >> 12) & 0xf;
2150 rd0 = (insn >> 0) & 0xf;
2151 rd1 = (insn >> 16) & 0xf;
2152 gen_op_iwmmxt_movq_M0_wRn(rd0);
2153 if (insn & (1 << 21))
2154 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2155 else
2156 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2157 gen_op_iwmmxt_movq_wRn_M0(wrd);
2158 gen_op_iwmmxt_set_mup();
2159 break;
2160 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2161 wrd = (insn >> 12) & 0xf;
2162 rd0 = (insn >> 16) & 0xf;
2163 rd1 = (insn >> 0) & 0xf;
2164 gen_op_iwmmxt_movq_M0_wRn(rd0);
2165 switch ((insn >> 22) & 3) {
2166 case 0:
2167 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2168 break;
2169 case 1:
2170 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2171 break;
2172 case 2:
2173 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2174 break;
2175 case 3:
2176 return 1;
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 gen_op_iwmmxt_set_cup();
2181 break;
2182 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 rd1 = (insn >> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0);
2187 switch ((insn >> 22) & 3) {
2188 case 0:
2189 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2190 break;
2191 case 1:
2192 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2193 break;
2194 case 2:
2195 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2196 break;
2197 case 3:
2198 return 1;
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
2204 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2205 wrd = (insn >> 12) & 0xf;
2206 rd0 = (insn >> 16) & 0xf;
2207 rd1 = (insn >> 0) & 0xf;
2208 gen_op_iwmmxt_movq_M0_wRn(rd0);
2209 if (insn & (1 << 22))
2210 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2211 else
2212 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2213 if (!(insn & (1 << 20)))
2214 gen_op_iwmmxt_addl_M0_wRn(wrd);
2215 gen_op_iwmmxt_movq_wRn_M0(wrd);
2216 gen_op_iwmmxt_set_mup();
2217 break;
2218 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2219 wrd = (insn >> 12) & 0xf;
2220 rd0 = (insn >> 16) & 0xf;
2221 rd1 = (insn >> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0);
2223 if (insn & (1 << 21)) {
2224 if (insn & (1 << 20))
2225 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2226 else
2227 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2228 } else {
2229 if (insn & (1 << 20))
2230 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2231 else
2232 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 break;
2237 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2238 wrd = (insn >> 12) & 0xf;
2239 rd0 = (insn >> 16) & 0xf;
2240 rd1 = (insn >> 0) & 0xf;
2241 gen_op_iwmmxt_movq_M0_wRn(rd0);
2242 if (insn & (1 << 21))
2243 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2244 else
2245 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2246 if (!(insn & (1 << 20))) {
2247 iwmmxt_load_reg(cpu_V1, wrd);
2248 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
2250 gen_op_iwmmxt_movq_wRn_M0(wrd);
2251 gen_op_iwmmxt_set_mup();
2252 break;
2253 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2254 wrd = (insn >> 12) & 0xf;
2255 rd0 = (insn >> 16) & 0xf;
2256 rd1 = (insn >> 0) & 0xf;
2257 gen_op_iwmmxt_movq_M0_wRn(rd0);
2258 switch ((insn >> 22) & 3) {
2259 case 0:
2260 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2261 break;
2262 case 1:
2263 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2264 break;
2265 case 2:
2266 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2267 break;
2268 case 3:
2269 return 1;
2271 gen_op_iwmmxt_movq_wRn_M0(wrd);
2272 gen_op_iwmmxt_set_mup();
2273 gen_op_iwmmxt_set_cup();
2274 break;
2275 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2276 wrd = (insn >> 12) & 0xf;
2277 rd0 = (insn >> 16) & 0xf;
2278 rd1 = (insn >> 0) & 0xf;
2279 gen_op_iwmmxt_movq_M0_wRn(rd0);
2280 if (insn & (1 << 22)) {
2281 if (insn & (1 << 20))
2282 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2283 else
2284 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2285 } else {
2286 if (insn & (1 << 20))
2287 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2288 else
2289 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2296 wrd = (insn >> 12) & 0xf;
2297 rd0 = (insn >> 16) & 0xf;
2298 rd1 = (insn >> 0) & 0xf;
2299 gen_op_iwmmxt_movq_M0_wRn(rd0);
2300 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2301 tcg_gen_andi_i32(tmp, tmp, 7);
2302 iwmmxt_load_reg(cpu_V1, rd1);
2303 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2304 tcg_temp_free_i32(tmp);
2305 gen_op_iwmmxt_movq_wRn_M0(wrd);
2306 gen_op_iwmmxt_set_mup();
2307 break;
2308 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2309 if (((insn >> 6) & 3) == 3)
2310 return 1;
2311 rd = (insn >> 12) & 0xf;
2312 wrd = (insn >> 16) & 0xf;
2313 tmp = load_reg(s, rd);
2314 gen_op_iwmmxt_movq_M0_wRn(wrd);
2315 switch ((insn >> 6) & 3) {
2316 case 0:
2317 tmp2 = tcg_const_i32(0xff);
2318 tmp3 = tcg_const_i32((insn & 7) << 3);
2319 break;
2320 case 1:
2321 tmp2 = tcg_const_i32(0xffff);
2322 tmp3 = tcg_const_i32((insn & 3) << 4);
2323 break;
2324 case 2:
2325 tmp2 = tcg_const_i32(0xffffffff);
2326 tmp3 = tcg_const_i32((insn & 1) << 5);
2327 break;
2328 default:
2329 tmp2 = NULL;
2330 tmp3 = NULL;
2332 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2333 tcg_temp_free_i32(tmp3);
2334 tcg_temp_free_i32(tmp2);
2335 tcg_temp_free_i32(tmp);
2336 gen_op_iwmmxt_movq_wRn_M0(wrd);
2337 gen_op_iwmmxt_set_mup();
2338 break;
2339 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2340 rd = (insn >> 12) & 0xf;
2341 wrd = (insn >> 16) & 0xf;
2342 if (rd == 15 || ((insn >> 22) & 3) == 3)
2343 return 1;
2344 gen_op_iwmmxt_movq_M0_wRn(wrd);
2345 tmp = tcg_temp_new_i32();
2346 switch ((insn >> 22) & 3) {
2347 case 0:
2348 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2349 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2350 if (insn & 8) {
2351 tcg_gen_ext8s_i32(tmp, tmp);
2352 } else {
2353 tcg_gen_andi_i32(tmp, tmp, 0xff);
2355 break;
2356 case 1:
2357 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2358 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2359 if (insn & 8) {
2360 tcg_gen_ext16s_i32(tmp, tmp);
2361 } else {
2362 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2364 break;
2365 case 2:
2366 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2367 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2368 break;
2370 store_reg(s, rd, tmp);
2371 break;
2372 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2373 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2374 return 1;
2375 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2376 switch ((insn >> 22) & 3) {
2377 case 0:
2378 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2379 break;
2380 case 1:
2381 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2382 break;
2383 case 2:
2384 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2385 break;
2387 tcg_gen_shli_i32(tmp, tmp, 28);
2388 gen_set_nzcv(tmp);
2389 tcg_temp_free_i32(tmp);
2390 break;
2391 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2392 if (((insn >> 6) & 3) == 3)
2393 return 1;
2394 rd = (insn >> 12) & 0xf;
2395 wrd = (insn >> 16) & 0xf;
2396 tmp = load_reg(s, rd);
2397 switch ((insn >> 6) & 3) {
2398 case 0:
2399 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2400 break;
2401 case 1:
2402 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2403 break;
2404 case 2:
2405 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2406 break;
2408 tcg_temp_free_i32(tmp);
2409 gen_op_iwmmxt_movq_wRn_M0(wrd);
2410 gen_op_iwmmxt_set_mup();
2411 break;
2412 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2413 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2414 return 1;
2415 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2416 tmp2 = tcg_temp_new_i32();
2417 tcg_gen_mov_i32(tmp2, tmp);
2418 switch ((insn >> 22) & 3) {
2419 case 0:
2420 for (i = 0; i < 7; i ++) {
2421 tcg_gen_shli_i32(tmp2, tmp2, 4);
2422 tcg_gen_and_i32(tmp, tmp, tmp2);
2424 break;
2425 case 1:
2426 for (i = 0; i < 3; i ++) {
2427 tcg_gen_shli_i32(tmp2, tmp2, 8);
2428 tcg_gen_and_i32(tmp, tmp, tmp2);
2430 break;
2431 case 2:
2432 tcg_gen_shli_i32(tmp2, tmp2, 16);
2433 tcg_gen_and_i32(tmp, tmp, tmp2);
2434 break;
2436 gen_set_nzcv(tmp);
2437 tcg_temp_free_i32(tmp2);
2438 tcg_temp_free_i32(tmp);
2439 break;
2440 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2441 wrd = (insn >> 12) & 0xf;
2442 rd0 = (insn >> 16) & 0xf;
2443 gen_op_iwmmxt_movq_M0_wRn(rd0);
2444 switch ((insn >> 22) & 3) {
2445 case 0:
2446 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2447 break;
2448 case 1:
2449 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2450 break;
2451 case 2:
2452 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2453 break;
2454 case 3:
2455 return 1;
2457 gen_op_iwmmxt_movq_wRn_M0(wrd);
2458 gen_op_iwmmxt_set_mup();
2459 break;
2460 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2461 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2462 return 1;
2463 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2464 tmp2 = tcg_temp_new_i32();
2465 tcg_gen_mov_i32(tmp2, tmp);
2466 switch ((insn >> 22) & 3) {
2467 case 0:
2468 for (i = 0; i < 7; i ++) {
2469 tcg_gen_shli_i32(tmp2, tmp2, 4);
2470 tcg_gen_or_i32(tmp, tmp, tmp2);
2472 break;
2473 case 1:
2474 for (i = 0; i < 3; i ++) {
2475 tcg_gen_shli_i32(tmp2, tmp2, 8);
2476 tcg_gen_or_i32(tmp, tmp, tmp2);
2478 break;
2479 case 2:
2480 tcg_gen_shli_i32(tmp2, tmp2, 16);
2481 tcg_gen_or_i32(tmp, tmp, tmp2);
2482 break;
2484 gen_set_nzcv(tmp);
2485 tcg_temp_free_i32(tmp2);
2486 tcg_temp_free_i32(tmp);
2487 break;
2488 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2489 rd = (insn >> 12) & 0xf;
2490 rd0 = (insn >> 16) & 0xf;
2491 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2492 return 1;
2493 gen_op_iwmmxt_movq_M0_wRn(rd0);
2494 tmp = tcg_temp_new_i32();
2495 switch ((insn >> 22) & 3) {
2496 case 0:
2497 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2498 break;
2499 case 1:
2500 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2501 break;
2502 case 2:
2503 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2504 break;
2506 store_reg(s, rd, tmp);
2507 break;
2508 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2509 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2510 wrd = (insn >> 12) & 0xf;
2511 rd0 = (insn >> 16) & 0xf;
2512 rd1 = (insn >> 0) & 0xf;
2513 gen_op_iwmmxt_movq_M0_wRn(rd0);
2514 switch ((insn >> 22) & 3) {
2515 case 0:
2516 if (insn & (1 << 21))
2517 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2518 else
2519 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2520 break;
2521 case 1:
2522 if (insn & (1 << 21))
2523 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2524 else
2525 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2526 break;
2527 case 2:
2528 if (insn & (1 << 21))
2529 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2530 else
2531 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2532 break;
2533 case 3:
2534 return 1;
2536 gen_op_iwmmxt_movq_wRn_M0(wrd);
2537 gen_op_iwmmxt_set_mup();
2538 gen_op_iwmmxt_set_cup();
2539 break;
2540 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2541 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2542 wrd = (insn >> 12) & 0xf;
2543 rd0 = (insn >> 16) & 0xf;
2544 gen_op_iwmmxt_movq_M0_wRn(rd0);
2545 switch ((insn >> 22) & 3) {
2546 case 0:
2547 if (insn & (1 << 21))
2548 gen_op_iwmmxt_unpacklsb_M0();
2549 else
2550 gen_op_iwmmxt_unpacklub_M0();
2551 break;
2552 case 1:
2553 if (insn & (1 << 21))
2554 gen_op_iwmmxt_unpacklsw_M0();
2555 else
2556 gen_op_iwmmxt_unpackluw_M0();
2557 break;
2558 case 2:
2559 if (insn & (1 << 21))
2560 gen_op_iwmmxt_unpacklsl_M0();
2561 else
2562 gen_op_iwmmxt_unpacklul_M0();
2563 break;
2564 case 3:
2565 return 1;
2567 gen_op_iwmmxt_movq_wRn_M0(wrd);
2568 gen_op_iwmmxt_set_mup();
2569 gen_op_iwmmxt_set_cup();
2570 break;
2571 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2572 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2573 wrd = (insn >> 12) & 0xf;
2574 rd0 = (insn >> 16) & 0xf;
2575 gen_op_iwmmxt_movq_M0_wRn(rd0);
2576 switch ((insn >> 22) & 3) {
2577 case 0:
2578 if (insn & (1 << 21))
2579 gen_op_iwmmxt_unpackhsb_M0();
2580 else
2581 gen_op_iwmmxt_unpackhub_M0();
2582 break;
2583 case 1:
2584 if (insn & (1 << 21))
2585 gen_op_iwmmxt_unpackhsw_M0();
2586 else
2587 gen_op_iwmmxt_unpackhuw_M0();
2588 break;
2589 case 2:
2590 if (insn & (1 << 21))
2591 gen_op_iwmmxt_unpackhsl_M0();
2592 else
2593 gen_op_iwmmxt_unpackhul_M0();
2594 break;
2595 case 3:
2596 return 1;
2598 gen_op_iwmmxt_movq_wRn_M0(wrd);
2599 gen_op_iwmmxt_set_mup();
2600 gen_op_iwmmxt_set_cup();
2601 break;
2602 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2603 case 0x214: case 0x614: case 0xa14: case 0xe14:
2604 if (((insn >> 22) & 3) == 0)
2605 return 1;
2606 wrd = (insn >> 12) & 0xf;
2607 rd0 = (insn >> 16) & 0xf;
2608 gen_op_iwmmxt_movq_M0_wRn(rd0);
2609 tmp = tcg_temp_new_i32();
2610 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2611 tcg_temp_free_i32(tmp);
2612 return 1;
2614 switch ((insn >> 22) & 3) {
2615 case 1:
2616 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2617 break;
2618 case 2:
2619 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2620 break;
2621 case 3:
2622 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2623 break;
2625 tcg_temp_free_i32(tmp);
2626 gen_op_iwmmxt_movq_wRn_M0(wrd);
2627 gen_op_iwmmxt_set_mup();
2628 gen_op_iwmmxt_set_cup();
2629 break;
2630 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2631 case 0x014: case 0x414: case 0x814: case 0xc14:
2632 if (((insn >> 22) & 3) == 0)
2633 return 1;
2634 wrd = (insn >> 12) & 0xf;
2635 rd0 = (insn >> 16) & 0xf;
2636 gen_op_iwmmxt_movq_M0_wRn(rd0);
2637 tmp = tcg_temp_new_i32();
2638 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2639 tcg_temp_free_i32(tmp);
2640 return 1;
2642 switch ((insn >> 22) & 3) {
2643 case 1:
2644 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2645 break;
2646 case 2:
2647 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2648 break;
2649 case 3:
2650 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2651 break;
2653 tcg_temp_free_i32(tmp);
2654 gen_op_iwmmxt_movq_wRn_M0(wrd);
2655 gen_op_iwmmxt_set_mup();
2656 gen_op_iwmmxt_set_cup();
2657 break;
2658 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2659 case 0x114: case 0x514: case 0x914: case 0xd14:
2660 if (((insn >> 22) & 3) == 0)
2661 return 1;
2662 wrd = (insn >> 12) & 0xf;
2663 rd0 = (insn >> 16) & 0xf;
2664 gen_op_iwmmxt_movq_M0_wRn(rd0);
2665 tmp = tcg_temp_new_i32();
2666 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2667 tcg_temp_free_i32(tmp);
2668 return 1;
2670 switch ((insn >> 22) & 3) {
2671 case 1:
2672 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2673 break;
2674 case 2:
2675 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2676 break;
2677 case 3:
2678 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2679 break;
2681 tcg_temp_free_i32(tmp);
2682 gen_op_iwmmxt_movq_wRn_M0(wrd);
2683 gen_op_iwmmxt_set_mup();
2684 gen_op_iwmmxt_set_cup();
2685 break;
2686 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2687 case 0x314: case 0x714: case 0xb14: case 0xf14:
2688 if (((insn >> 22) & 3) == 0)
2689 return 1;
2690 wrd = (insn >> 12) & 0xf;
2691 rd0 = (insn >> 16) & 0xf;
2692 gen_op_iwmmxt_movq_M0_wRn(rd0);
2693 tmp = tcg_temp_new_i32();
2694 switch ((insn >> 22) & 3) {
2695 case 1:
2696 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2697 tcg_temp_free_i32(tmp);
2698 return 1;
2700 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2701 break;
2702 case 2:
2703 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2704 tcg_temp_free_i32(tmp);
2705 return 1;
2707 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2708 break;
2709 case 3:
2710 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2711 tcg_temp_free_i32(tmp);
2712 return 1;
2714 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2715 break;
2717 tcg_temp_free_i32(tmp);
2718 gen_op_iwmmxt_movq_wRn_M0(wrd);
2719 gen_op_iwmmxt_set_mup();
2720 gen_op_iwmmxt_set_cup();
2721 break;
2722 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2723 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2724 wrd = (insn >> 12) & 0xf;
2725 rd0 = (insn >> 16) & 0xf;
2726 rd1 = (insn >> 0) & 0xf;
2727 gen_op_iwmmxt_movq_M0_wRn(rd0);
2728 switch ((insn >> 22) & 3) {
2729 case 0:
2730 if (insn & (1 << 21))
2731 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2732 else
2733 gen_op_iwmmxt_minub_M0_wRn(rd1);
2734 break;
2735 case 1:
2736 if (insn & (1 << 21))
2737 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2738 else
2739 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2740 break;
2741 case 2:
2742 if (insn & (1 << 21))
2743 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2744 else
2745 gen_op_iwmmxt_minul_M0_wRn(rd1);
2746 break;
2747 case 3:
2748 return 1;
2750 gen_op_iwmmxt_movq_wRn_M0(wrd);
2751 gen_op_iwmmxt_set_mup();
2752 break;
2753 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2754 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2755 wrd = (insn >> 12) & 0xf;
2756 rd0 = (insn >> 16) & 0xf;
2757 rd1 = (insn >> 0) & 0xf;
2758 gen_op_iwmmxt_movq_M0_wRn(rd0);
2759 switch ((insn >> 22) & 3) {
2760 case 0:
2761 if (insn & (1 << 21))
2762 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2763 else
2764 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2765 break;
2766 case 1:
2767 if (insn & (1 << 21))
2768 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2769 else
2770 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2771 break;
2772 case 2:
2773 if (insn & (1 << 21))
2774 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2775 else
2776 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2777 break;
2778 case 3:
2779 return 1;
2781 gen_op_iwmmxt_movq_wRn_M0(wrd);
2782 gen_op_iwmmxt_set_mup();
2783 break;
2784 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2785 case 0x402: case 0x502: case 0x602: case 0x702:
2786 wrd = (insn >> 12) & 0xf;
2787 rd0 = (insn >> 16) & 0xf;
2788 rd1 = (insn >> 0) & 0xf;
2789 gen_op_iwmmxt_movq_M0_wRn(rd0);
2790 tmp = tcg_const_i32((insn >> 20) & 3);
2791 iwmmxt_load_reg(cpu_V1, rd1);
2792 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2793 tcg_temp_free_i32(tmp);
2794 gen_op_iwmmxt_movq_wRn_M0(wrd);
2795 gen_op_iwmmxt_set_mup();
2796 break;
2797 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2798 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2799 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2800 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2801 wrd = (insn >> 12) & 0xf;
2802 rd0 = (insn >> 16) & 0xf;
2803 rd1 = (insn >> 0) & 0xf;
2804 gen_op_iwmmxt_movq_M0_wRn(rd0);
2805 switch ((insn >> 20) & 0xf) {
2806 case 0x0:
2807 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2808 break;
2809 case 0x1:
2810 gen_op_iwmmxt_subub_M0_wRn(rd1);
2811 break;
2812 case 0x3:
2813 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2814 break;
2815 case 0x4:
2816 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2817 break;
2818 case 0x5:
2819 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2820 break;
2821 case 0x7:
2822 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2823 break;
2824 case 0x8:
2825 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2826 break;
2827 case 0x9:
2828 gen_op_iwmmxt_subul_M0_wRn(rd1);
2829 break;
2830 case 0xb:
2831 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2832 break;
2833 default:
2834 return 1;
2836 gen_op_iwmmxt_movq_wRn_M0(wrd);
2837 gen_op_iwmmxt_set_mup();
2838 gen_op_iwmmxt_set_cup();
2839 break;
2840 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2841 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2842 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2843 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2844 wrd = (insn >> 12) & 0xf;
2845 rd0 = (insn >> 16) & 0xf;
2846 gen_op_iwmmxt_movq_M0_wRn(rd0);
2847 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2848 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2849 tcg_temp_free_i32(tmp);
2850 gen_op_iwmmxt_movq_wRn_M0(wrd);
2851 gen_op_iwmmxt_set_mup();
2852 gen_op_iwmmxt_set_cup();
2853 break;
2854 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2855 case 0x418: case 0x518: case 0x618: case 0x718:
2856 case 0x818: case 0x918: case 0xa18: case 0xb18:
2857 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2858 wrd = (insn >> 12) & 0xf;
2859 rd0 = (insn >> 16) & 0xf;
2860 rd1 = (insn >> 0) & 0xf;
2861 gen_op_iwmmxt_movq_M0_wRn(rd0);
2862 switch ((insn >> 20) & 0xf) {
2863 case 0x0:
2864 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2865 break;
2866 case 0x1:
2867 gen_op_iwmmxt_addub_M0_wRn(rd1);
2868 break;
2869 case 0x3:
2870 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2871 break;
2872 case 0x4:
2873 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2874 break;
2875 case 0x5:
2876 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2877 break;
2878 case 0x7:
2879 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2880 break;
2881 case 0x8:
2882 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2883 break;
2884 case 0x9:
2885 gen_op_iwmmxt_addul_M0_wRn(rd1);
2886 break;
2887 case 0xb:
2888 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2889 break;
2890 default:
2891 return 1;
2893 gen_op_iwmmxt_movq_wRn_M0(wrd);
2894 gen_op_iwmmxt_set_mup();
2895 gen_op_iwmmxt_set_cup();
2896 break;
2897 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2898 case 0x408: case 0x508: case 0x608: case 0x708:
2899 case 0x808: case 0x908: case 0xa08: case 0xb08:
2900 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2901 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2902 return 1;
2903 wrd = (insn >> 12) & 0xf;
2904 rd0 = (insn >> 16) & 0xf;
2905 rd1 = (insn >> 0) & 0xf;
2906 gen_op_iwmmxt_movq_M0_wRn(rd0);
2907 switch ((insn >> 22) & 3) {
2908 case 1:
2909 if (insn & (1 << 21))
2910 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2911 else
2912 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2913 break;
2914 case 2:
2915 if (insn & (1 << 21))
2916 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2917 else
2918 gen_op_iwmmxt_packul_M0_wRn(rd1);
2919 break;
2920 case 3:
2921 if (insn & (1 << 21))
2922 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2923 else
2924 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2925 break;
2927 gen_op_iwmmxt_movq_wRn_M0(wrd);
2928 gen_op_iwmmxt_set_mup();
2929 gen_op_iwmmxt_set_cup();
2930 break;
2931 case 0x201: case 0x203: case 0x205: case 0x207:
2932 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2933 case 0x211: case 0x213: case 0x215: case 0x217:
2934 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2935 wrd = (insn >> 5) & 0xf;
2936 rd0 = (insn >> 12) & 0xf;
2937 rd1 = (insn >> 0) & 0xf;
2938 if (rd0 == 0xf || rd1 == 0xf)
2939 return 1;
2940 gen_op_iwmmxt_movq_M0_wRn(wrd);
2941 tmp = load_reg(s, rd0);
2942 tmp2 = load_reg(s, rd1);
2943 switch ((insn >> 16) & 0xf) {
2944 case 0x0: /* TMIA */
2945 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2946 break;
2947 case 0x8: /* TMIAPH */
2948 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2949 break;
2950 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2951 if (insn & (1 << 16))
2952 tcg_gen_shri_i32(tmp, tmp, 16);
2953 if (insn & (1 << 17))
2954 tcg_gen_shri_i32(tmp2, tmp2, 16);
2955 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2956 break;
2957 default:
2958 tcg_temp_free_i32(tmp2);
2959 tcg_temp_free_i32(tmp);
2960 return 1;
2962 tcg_temp_free_i32(tmp2);
2963 tcg_temp_free_i32(tmp);
2964 gen_op_iwmmxt_movq_wRn_M0(wrd);
2965 gen_op_iwmmxt_set_mup();
2966 break;
2967 default:
2968 return 1;
2971 return 0;
2974 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2975 (ie. an undefined instruction). */
2976 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2978 int acc, rd0, rd1, rdhi, rdlo;
2979 TCGv_i32 tmp, tmp2;
2981 if ((insn & 0x0ff00f10) == 0x0e200010) {
2982 /* Multiply with Internal Accumulate Format */
2983 rd0 = (insn >> 12) & 0xf;
2984 rd1 = insn & 0xf;
2985 acc = (insn >> 5) & 7;
2987 if (acc != 0)
2988 return 1;
2990 tmp = load_reg(s, rd0);
2991 tmp2 = load_reg(s, rd1);
2992 switch ((insn >> 16) & 0xf) {
2993 case 0x0: /* MIA */
2994 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2995 break;
2996 case 0x8: /* MIAPH */
2997 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2998 break;
2999 case 0xc: /* MIABB */
3000 case 0xd: /* MIABT */
3001 case 0xe: /* MIATB */
3002 case 0xf: /* MIATT */
3003 if (insn & (1 << 16))
3004 tcg_gen_shri_i32(tmp, tmp, 16);
3005 if (insn & (1 << 17))
3006 tcg_gen_shri_i32(tmp2, tmp2, 16);
3007 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
3008 break;
3009 default:
3010 return 1;
3012 tcg_temp_free_i32(tmp2);
3013 tcg_temp_free_i32(tmp);
3015 gen_op_iwmmxt_movq_wRn_M0(acc);
3016 return 0;
3019 if ((insn & 0x0fe00ff8) == 0x0c400000) {
3020 /* Internal Accumulator Access Format */
3021 rdhi = (insn >> 16) & 0xf;
3022 rdlo = (insn >> 12) & 0xf;
3023 acc = insn & 7;
3025 if (acc != 0)
3026 return 1;
3028 if (insn & ARM_CP_RW_BIT) { /* MRA */
3029 iwmmxt_load_reg(cpu_V0, acc);
3030 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3031 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
3032 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3033 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
3034 } else { /* MAR */
3035 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
3036 iwmmxt_store_reg(cpu_V0, acc);
3038 return 0;
3041 return 1;
3044 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
3045 #define VFP_SREG(insn, bigbit, smallbit) \
3046 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
3047 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
3048 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
3049 reg = (((insn) >> (bigbit)) & 0x0f) \
3050 | (((insn) >> ((smallbit) - 4)) & 0x10); \
3051 } else { \
3052 if (insn & (1 << (smallbit))) \
3053 return 1; \
3054 reg = ((insn) >> (bigbit)) & 0x0f; \
3055 }} while (0)
3057 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
3058 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
3059 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
3060 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
3061 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
3062 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
3064 /* Move between integer and VFP cores. */
3065 static TCGv_i32 gen_vfp_mrs(void)
3067 TCGv_i32 tmp = tcg_temp_new_i32();
3068 tcg_gen_mov_i32(tmp, cpu_F0s);
3069 return tmp;
3072 static void gen_vfp_msr(TCGv_i32 tmp)
3074 tcg_gen_mov_i32(cpu_F0s, tmp);
3075 tcg_temp_free_i32(tmp);
3078 static void gen_neon_dup_low16(TCGv_i32 var)
3080 TCGv_i32 tmp = tcg_temp_new_i32();
3081 tcg_gen_ext16u_i32(var, var);
3082 tcg_gen_shli_i32(tmp, var, 16);
3083 tcg_gen_or_i32(var, var, tmp);
3084 tcg_temp_free_i32(tmp);
3087 static void gen_neon_dup_high16(TCGv_i32 var)
3089 TCGv_i32 tmp = tcg_temp_new_i32();
3090 tcg_gen_andi_i32(var, var, 0xffff0000);
3091 tcg_gen_shri_i32(tmp, var, 16);
3092 tcg_gen_or_i32(var, var, tmp);
3093 tcg_temp_free_i32(tmp);
3096 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3097 uint32_t dp)
3099 uint32_t cc = extract32(insn, 20, 2);
3101 if (dp) {
3102 TCGv_i64 frn, frm, dest;
3103 TCGv_i64 tmp, zero, zf, nf, vf;
3105 zero = tcg_const_i64(0);
3107 frn = tcg_temp_new_i64();
3108 frm = tcg_temp_new_i64();
3109 dest = tcg_temp_new_i64();
3111 zf = tcg_temp_new_i64();
3112 nf = tcg_temp_new_i64();
3113 vf = tcg_temp_new_i64();
3115 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3116 tcg_gen_ext_i32_i64(nf, cpu_NF);
3117 tcg_gen_ext_i32_i64(vf, cpu_VF);
3119 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3120 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3121 switch (cc) {
3122 case 0: /* eq: Z */
3123 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3124 frn, frm);
3125 break;
3126 case 1: /* vs: V */
3127 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3128 frn, frm);
3129 break;
3130 case 2: /* ge: N == V -> N ^ V == 0 */
3131 tmp = tcg_temp_new_i64();
3132 tcg_gen_xor_i64(tmp, vf, nf);
3133 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3134 frn, frm);
3135 tcg_temp_free_i64(tmp);
3136 break;
3137 case 3: /* gt: !Z && N == V */
3138 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3139 frn, frm);
3140 tmp = tcg_temp_new_i64();
3141 tcg_gen_xor_i64(tmp, vf, nf);
3142 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3143 dest, frm);
3144 tcg_temp_free_i64(tmp);
3145 break;
3147 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3148 tcg_temp_free_i64(frn);
3149 tcg_temp_free_i64(frm);
3150 tcg_temp_free_i64(dest);
3152 tcg_temp_free_i64(zf);
3153 tcg_temp_free_i64(nf);
3154 tcg_temp_free_i64(vf);
3156 tcg_temp_free_i64(zero);
3157 } else {
3158 TCGv_i32 frn, frm, dest;
3159 TCGv_i32 tmp, zero;
3161 zero = tcg_const_i32(0);
3163 frn = tcg_temp_new_i32();
3164 frm = tcg_temp_new_i32();
3165 dest = tcg_temp_new_i32();
3166 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3167 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3168 switch (cc) {
3169 case 0: /* eq: Z */
3170 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3171 frn, frm);
3172 break;
3173 case 1: /* vs: V */
3174 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3175 frn, frm);
3176 break;
3177 case 2: /* ge: N == V -> N ^ V == 0 */
3178 tmp = tcg_temp_new_i32();
3179 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3180 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3181 frn, frm);
3182 tcg_temp_free_i32(tmp);
3183 break;
3184 case 3: /* gt: !Z && N == V */
3185 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3186 frn, frm);
3187 tmp = tcg_temp_new_i32();
3188 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3189 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3190 dest, frm);
3191 tcg_temp_free_i32(tmp);
3192 break;
3194 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3195 tcg_temp_free_i32(frn);
3196 tcg_temp_free_i32(frm);
3197 tcg_temp_free_i32(dest);
3199 tcg_temp_free_i32(zero);
3202 return 0;
3205 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3206 uint32_t rm, uint32_t dp)
3208 uint32_t vmin = extract32(insn, 6, 1);
3209 TCGv_ptr fpst = get_fpstatus_ptr(0);
3211 if (dp) {
3212 TCGv_i64 frn, frm, dest;
3214 frn = tcg_temp_new_i64();
3215 frm = tcg_temp_new_i64();
3216 dest = tcg_temp_new_i64();
3218 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3219 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3220 if (vmin) {
3221 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
3222 } else {
3223 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
3225 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3226 tcg_temp_free_i64(frn);
3227 tcg_temp_free_i64(frm);
3228 tcg_temp_free_i64(dest);
3229 } else {
3230 TCGv_i32 frn, frm, dest;
3232 frn = tcg_temp_new_i32();
3233 frm = tcg_temp_new_i32();
3234 dest = tcg_temp_new_i32();
3236 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3237 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3238 if (vmin) {
3239 gen_helper_vfp_minnums(dest, frn, frm, fpst);
3240 } else {
3241 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
3243 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3244 tcg_temp_free_i32(frn);
3245 tcg_temp_free_i32(frm);
3246 tcg_temp_free_i32(dest);
3249 tcg_temp_free_ptr(fpst);
3250 return 0;
3253 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3254 int rounding)
3256 TCGv_ptr fpst = get_fpstatus_ptr(0);
3257 TCGv_i32 tcg_rmode;
3259 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3260 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3262 if (dp) {
3263 TCGv_i64 tcg_op;
3264 TCGv_i64 tcg_res;
3265 tcg_op = tcg_temp_new_i64();
3266 tcg_res = tcg_temp_new_i64();
3267 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3268 gen_helper_rintd(tcg_res, tcg_op, fpst);
3269 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3270 tcg_temp_free_i64(tcg_op);
3271 tcg_temp_free_i64(tcg_res);
3272 } else {
3273 TCGv_i32 tcg_op;
3274 TCGv_i32 tcg_res;
3275 tcg_op = tcg_temp_new_i32();
3276 tcg_res = tcg_temp_new_i32();
3277 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3278 gen_helper_rints(tcg_res, tcg_op, fpst);
3279 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3280 tcg_temp_free_i32(tcg_op);
3281 tcg_temp_free_i32(tcg_res);
3284 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3285 tcg_temp_free_i32(tcg_rmode);
3287 tcg_temp_free_ptr(fpst);
3288 return 0;
3291 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3292 int rounding)
3294 bool is_signed = extract32(insn, 7, 1);
3295 TCGv_ptr fpst = get_fpstatus_ptr(0);
3296 TCGv_i32 tcg_rmode, tcg_shift;
3298 tcg_shift = tcg_const_i32(0);
3300 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3301 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3303 if (dp) {
3304 TCGv_i64 tcg_double, tcg_res;
3305 TCGv_i32 tcg_tmp;
3306 /* Rd is encoded as a single precision register even when the source
3307 * is double precision.
3309 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3310 tcg_double = tcg_temp_new_i64();
3311 tcg_res = tcg_temp_new_i64();
3312 tcg_tmp = tcg_temp_new_i32();
3313 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3314 if (is_signed) {
3315 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3316 } else {
3317 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3319 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
3320 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3321 tcg_temp_free_i32(tcg_tmp);
3322 tcg_temp_free_i64(tcg_res);
3323 tcg_temp_free_i64(tcg_double);
3324 } else {
3325 TCGv_i32 tcg_single, tcg_res;
3326 tcg_single = tcg_temp_new_i32();
3327 tcg_res = tcg_temp_new_i32();
3328 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3329 if (is_signed) {
3330 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3331 } else {
3332 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3334 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3335 tcg_temp_free_i32(tcg_res);
3336 tcg_temp_free_i32(tcg_single);
3339 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3340 tcg_temp_free_i32(tcg_rmode);
3342 tcg_temp_free_i32(tcg_shift);
3344 tcg_temp_free_ptr(fpst);
3346 return 0;
3349 /* Table for converting the most common AArch32 encoding of
3350 * rounding mode to arm_fprounding order (which matches the
3351 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3353 static const uint8_t fp_decode_rm[] = {
3354 FPROUNDING_TIEAWAY,
3355 FPROUNDING_TIEEVEN,
3356 FPROUNDING_POSINF,
3357 FPROUNDING_NEGINF,
3360 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3362 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3364 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3365 return 1;
3368 if (dp) {
3369 VFP_DREG_D(rd, insn);
3370 VFP_DREG_N(rn, insn);
3371 VFP_DREG_M(rm, insn);
3372 } else {
3373 rd = VFP_SREG_D(insn);
3374 rn = VFP_SREG_N(insn);
3375 rm = VFP_SREG_M(insn);
3378 if ((insn & 0x0f800e50) == 0x0e000a00) {
3379 return handle_vsel(insn, rd, rn, rm, dp);
3380 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3381 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3382 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3383 /* VRINTA, VRINTN, VRINTP, VRINTM */
3384 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3385 return handle_vrint(insn, rd, rm, dp, rounding);
3386 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3387 /* VCVTA, VCVTN, VCVTP, VCVTM */
3388 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3389 return handle_vcvt(insn, rd, rm, dp, rounding);
3391 return 1;
3394 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3395 (ie. an undefined instruction). */
3396 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3398 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3399 int dp, veclen;
3400 TCGv_i32 addr;
3401 TCGv_i32 tmp;
3402 TCGv_i32 tmp2;
3404 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3405 return 1;
3408 /* FIXME: this access check should not take precedence over UNDEF
3409 * for invalid encodings; we will generate incorrect syndrome information
3410 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3412 if (s->fp_excp_el) {
3413 gen_exception_insn(s, 4, EXCP_UDEF,
3414 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
3415 return 0;
3418 if (!s->vfp_enabled) {
3419 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3420 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3421 return 1;
3422 rn = (insn >> 16) & 0xf;
3423 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3424 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3425 return 1;
3429 if (extract32(insn, 28, 4) == 0xf) {
3430 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3431 * only used in v8 and above.
3433 return disas_vfp_v8_insn(s, insn);
3436 dp = ((insn & 0xf00) == 0xb00);
3437 switch ((insn >> 24) & 0xf) {
3438 case 0xe:
3439 if (insn & (1 << 4)) {
3440 /* single register transfer */
3441 rd = (insn >> 12) & 0xf;
3442 if (dp) {
3443 int size;
3444 int pass;
3446 VFP_DREG_N(rn, insn);
3447 if (insn & 0xf)
3448 return 1;
3449 if (insn & 0x00c00060
3450 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3451 return 1;
3454 pass = (insn >> 21) & 1;
3455 if (insn & (1 << 22)) {
3456 size = 0;
3457 offset = ((insn >> 5) & 3) * 8;
3458 } else if (insn & (1 << 5)) {
3459 size = 1;
3460 offset = (insn & (1 << 6)) ? 16 : 0;
3461 } else {
3462 size = 2;
3463 offset = 0;
3465 if (insn & ARM_CP_RW_BIT) {
3466 /* vfp->arm */
3467 tmp = neon_load_reg(rn, pass);
3468 switch (size) {
3469 case 0:
3470 if (offset)
3471 tcg_gen_shri_i32(tmp, tmp, offset);
3472 if (insn & (1 << 23))
3473 gen_uxtb(tmp);
3474 else
3475 gen_sxtb(tmp);
3476 break;
3477 case 1:
3478 if (insn & (1 << 23)) {
3479 if (offset) {
3480 tcg_gen_shri_i32(tmp, tmp, 16);
3481 } else {
3482 gen_uxth(tmp);
3484 } else {
3485 if (offset) {
3486 tcg_gen_sari_i32(tmp, tmp, 16);
3487 } else {
3488 gen_sxth(tmp);
3491 break;
3492 case 2:
3493 break;
3495 store_reg(s, rd, tmp);
3496 } else {
3497 /* arm->vfp */
3498 tmp = load_reg(s, rd);
3499 if (insn & (1 << 23)) {
3500 /* VDUP */
3501 int vec_size = pass ? 16 : 8;
3502 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0),
3503 vec_size, vec_size, tmp);
3504 tcg_temp_free_i32(tmp);
3505 } else {
3506 /* VMOV */
3507 switch (size) {
3508 case 0:
3509 tmp2 = neon_load_reg(rn, pass);
3510 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3511 tcg_temp_free_i32(tmp2);
3512 break;
3513 case 1:
3514 tmp2 = neon_load_reg(rn, pass);
3515 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3516 tcg_temp_free_i32(tmp2);
3517 break;
3518 case 2:
3519 break;
3521 neon_store_reg(rn, pass, tmp);
3524 } else { /* !dp */
3525 if ((insn & 0x6f) != 0x00)
3526 return 1;
3527 rn = VFP_SREG_N(insn);
3528 if (insn & ARM_CP_RW_BIT) {
3529 /* vfp->arm */
3530 if (insn & (1 << 21)) {
3531 /* system register */
3532 rn >>= 1;
3534 switch (rn) {
3535 case ARM_VFP_FPSID:
3536 /* VFP2 allows access to FSID from userspace.
3537 VFP3 restricts all id registers to privileged
3538 accesses. */
3539 if (IS_USER(s)
3540 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3541 return 1;
3543 tmp = load_cpu_field(vfp.xregs[rn]);
3544 break;
3545 case ARM_VFP_FPEXC:
3546 if (IS_USER(s))
3547 return 1;
3548 tmp = load_cpu_field(vfp.xregs[rn]);
3549 break;
3550 case ARM_VFP_FPINST:
3551 case ARM_VFP_FPINST2:
3552 /* Not present in VFP3. */
3553 if (IS_USER(s)
3554 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3555 return 1;
3557 tmp = load_cpu_field(vfp.xregs[rn]);
3558 break;
3559 case ARM_VFP_FPSCR:
3560 if (rd == 15) {
3561 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3562 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3563 } else {
3564 tmp = tcg_temp_new_i32();
3565 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3567 break;
3568 case ARM_VFP_MVFR2:
3569 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3570 return 1;
3572 /* fall through */
3573 case ARM_VFP_MVFR0:
3574 case ARM_VFP_MVFR1:
3575 if (IS_USER(s)
3576 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3577 return 1;
3579 tmp = load_cpu_field(vfp.xregs[rn]);
3580 break;
3581 default:
3582 return 1;
3584 } else {
3585 gen_mov_F0_vreg(0, rn);
3586 tmp = gen_vfp_mrs();
3588 if (rd == 15) {
3589 /* Set the 4 flag bits in the CPSR. */
3590 gen_set_nzcv(tmp);
3591 tcg_temp_free_i32(tmp);
3592 } else {
3593 store_reg(s, rd, tmp);
3595 } else {
3596 /* arm->vfp */
3597 if (insn & (1 << 21)) {
3598 rn >>= 1;
3599 /* system register */
3600 switch (rn) {
3601 case ARM_VFP_FPSID:
3602 case ARM_VFP_MVFR0:
3603 case ARM_VFP_MVFR1:
3604 /* Writes are ignored. */
3605 break;
3606 case ARM_VFP_FPSCR:
3607 tmp = load_reg(s, rd);
3608 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3609 tcg_temp_free_i32(tmp);
3610 gen_lookup_tb(s);
3611 break;
3612 case ARM_VFP_FPEXC:
3613 if (IS_USER(s))
3614 return 1;
3615 /* TODO: VFP subarchitecture support.
3616 * For now, keep the EN bit only */
3617 tmp = load_reg(s, rd);
3618 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3619 store_cpu_field(tmp, vfp.xregs[rn]);
3620 gen_lookup_tb(s);
3621 break;
3622 case ARM_VFP_FPINST:
3623 case ARM_VFP_FPINST2:
3624 if (IS_USER(s)) {
3625 return 1;
3627 tmp = load_reg(s, rd);
3628 store_cpu_field(tmp, vfp.xregs[rn]);
3629 break;
3630 default:
3631 return 1;
3633 } else {
3634 tmp = load_reg(s, rd);
3635 gen_vfp_msr(tmp);
3636 gen_mov_vreg_F0(0, rn);
3640 } else {
3641 /* data processing */
3642 /* The opcode is in bits 23, 21, 20 and 6. */
3643 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3644 if (dp) {
3645 if (op == 15) {
3646 /* rn is opcode */
3647 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3648 } else {
3649 /* rn is register number */
3650 VFP_DREG_N(rn, insn);
3653 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3654 ((rn & 0x1e) == 0x6))) {
3655 /* Integer or single/half precision destination. */
3656 rd = VFP_SREG_D(insn);
3657 } else {
3658 VFP_DREG_D(rd, insn);
3660 if (op == 15 &&
3661 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3662 ((rn & 0x1e) == 0x4))) {
3663 /* VCVT from int or half precision is always from S reg
3664 * regardless of dp bit. VCVT with immediate frac_bits
3665 * has same format as SREG_M.
3667 rm = VFP_SREG_M(insn);
3668 } else {
3669 VFP_DREG_M(rm, insn);
3671 } else {
3672 rn = VFP_SREG_N(insn);
3673 if (op == 15 && rn == 15) {
3674 /* Double precision destination. */
3675 VFP_DREG_D(rd, insn);
3676 } else {
3677 rd = VFP_SREG_D(insn);
3679 /* NB that we implicitly rely on the encoding for the frac_bits
3680 * in VCVT of fixed to float being the same as that of an SREG_M
3682 rm = VFP_SREG_M(insn);
3685 veclen = s->vec_len;
3686 if (op == 15 && rn > 3)
3687 veclen = 0;
3689 /* Shut up compiler warnings. */
3690 delta_m = 0;
3691 delta_d = 0;
3692 bank_mask = 0;
3694 if (veclen > 0) {
3695 if (dp)
3696 bank_mask = 0xc;
3697 else
3698 bank_mask = 0x18;
3700 /* Figure out what type of vector operation this is. */
3701 if ((rd & bank_mask) == 0) {
3702 /* scalar */
3703 veclen = 0;
3704 } else {
3705 if (dp)
3706 delta_d = (s->vec_stride >> 1) + 1;
3707 else
3708 delta_d = s->vec_stride + 1;
3710 if ((rm & bank_mask) == 0) {
3711 /* mixed scalar/vector */
3712 delta_m = 0;
3713 } else {
3714 /* vector */
3715 delta_m = delta_d;
3720 /* Load the initial operands. */
3721 if (op == 15) {
3722 switch (rn) {
3723 case 16:
3724 case 17:
3725 /* Integer source */
3726 gen_mov_F0_vreg(0, rm);
3727 break;
3728 case 8:
3729 case 9:
3730 /* Compare */
3731 gen_mov_F0_vreg(dp, rd);
3732 gen_mov_F1_vreg(dp, rm);
3733 break;
3734 case 10:
3735 case 11:
3736 /* Compare with zero */
3737 gen_mov_F0_vreg(dp, rd);
3738 gen_vfp_F1_ld0(dp);
3739 break;
3740 case 20:
3741 case 21:
3742 case 22:
3743 case 23:
3744 case 28:
3745 case 29:
3746 case 30:
3747 case 31:
3748 /* Source and destination the same. */
3749 gen_mov_F0_vreg(dp, rd);
3750 break;
3751 case 4:
3752 case 5:
3753 case 6:
3754 case 7:
3755 /* VCVTB, VCVTT: only present with the halfprec extension
3756 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3757 * (we choose to UNDEF)
3759 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3760 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3761 return 1;
3763 if (!extract32(rn, 1, 1)) {
3764 /* Half precision source. */
3765 gen_mov_F0_vreg(0, rm);
3766 break;
3768 /* Otherwise fall through */
3769 default:
3770 /* One source operand. */
3771 gen_mov_F0_vreg(dp, rm);
3772 break;
3774 } else {
3775 /* Two source operands. */
3776 gen_mov_F0_vreg(dp, rn);
3777 gen_mov_F1_vreg(dp, rm);
3780 for (;;) {
3781 /* Perform the calculation. */
3782 switch (op) {
3783 case 0: /* VMLA: fd + (fn * fm) */
3784 /* Note that order of inputs to the add matters for NaNs */
3785 gen_vfp_F1_mul(dp);
3786 gen_mov_F0_vreg(dp, rd);
3787 gen_vfp_add(dp);
3788 break;
3789 case 1: /* VMLS: fd + -(fn * fm) */
3790 gen_vfp_mul(dp);
3791 gen_vfp_F1_neg(dp);
3792 gen_mov_F0_vreg(dp, rd);
3793 gen_vfp_add(dp);
3794 break;
3795 case 2: /* VNMLS: -fd + (fn * fm) */
3796 /* Note that it isn't valid to replace (-A + B) with (B - A)
3797 * or similar plausible looking simplifications
3798 * because this will give wrong results for NaNs.
3800 gen_vfp_F1_mul(dp);
3801 gen_mov_F0_vreg(dp, rd);
3802 gen_vfp_neg(dp);
3803 gen_vfp_add(dp);
3804 break;
3805 case 3: /* VNMLA: -fd + -(fn * fm) */
3806 gen_vfp_mul(dp);
3807 gen_vfp_F1_neg(dp);
3808 gen_mov_F0_vreg(dp, rd);
3809 gen_vfp_neg(dp);
3810 gen_vfp_add(dp);
3811 break;
3812 case 4: /* mul: fn * fm */
3813 gen_vfp_mul(dp);
3814 break;
3815 case 5: /* nmul: -(fn * fm) */
3816 gen_vfp_mul(dp);
3817 gen_vfp_neg(dp);
3818 break;
3819 case 6: /* add: fn + fm */
3820 gen_vfp_add(dp);
3821 break;
3822 case 7: /* sub: fn - fm */
3823 gen_vfp_sub(dp);
3824 break;
3825 case 8: /* div: fn / fm */
3826 gen_vfp_div(dp);
3827 break;
3828 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3829 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3830 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3831 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3832 /* These are fused multiply-add, and must be done as one
3833 * floating point operation with no rounding between the
3834 * multiplication and addition steps.
3835 * NB that doing the negations here as separate steps is
3836 * correct : an input NaN should come out with its sign bit
3837 * flipped if it is a negated-input.
3839 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3840 return 1;
3842 if (dp) {
3843 TCGv_ptr fpst;
3844 TCGv_i64 frd;
3845 if (op & 1) {
3846 /* VFNMS, VFMS */
3847 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3849 frd = tcg_temp_new_i64();
3850 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3851 if (op & 2) {
3852 /* VFNMA, VFNMS */
3853 gen_helper_vfp_negd(frd, frd);
3855 fpst = get_fpstatus_ptr(0);
3856 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3857 cpu_F1d, frd, fpst);
3858 tcg_temp_free_ptr(fpst);
3859 tcg_temp_free_i64(frd);
3860 } else {
3861 TCGv_ptr fpst;
3862 TCGv_i32 frd;
3863 if (op & 1) {
3864 /* VFNMS, VFMS */
3865 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3867 frd = tcg_temp_new_i32();
3868 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3869 if (op & 2) {
3870 gen_helper_vfp_negs(frd, frd);
3872 fpst = get_fpstatus_ptr(0);
3873 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3874 cpu_F1s, frd, fpst);
3875 tcg_temp_free_ptr(fpst);
3876 tcg_temp_free_i32(frd);
3878 break;
3879 case 14: /* fconst */
3880 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3881 return 1;
3884 n = (insn << 12) & 0x80000000;
3885 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3886 if (dp) {
3887 if (i & 0x40)
3888 i |= 0x3f80;
3889 else
3890 i |= 0x4000;
3891 n |= i << 16;
3892 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3893 } else {
3894 if (i & 0x40)
3895 i |= 0x780;
3896 else
3897 i |= 0x800;
3898 n |= i << 19;
3899 tcg_gen_movi_i32(cpu_F0s, n);
3901 break;
3902 case 15: /* extension space */
3903 switch (rn) {
3904 case 0: /* cpy */
3905 /* no-op */
3906 break;
3907 case 1: /* abs */
3908 gen_vfp_abs(dp);
3909 break;
3910 case 2: /* neg */
3911 gen_vfp_neg(dp);
3912 break;
3913 case 3: /* sqrt */
3914 gen_vfp_sqrt(dp);
3915 break;
3916 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3918 TCGv_ptr fpst = get_fpstatus_ptr(false);
3919 TCGv_i32 ahp_mode = get_ahp_flag();
3920 tmp = gen_vfp_mrs();
3921 tcg_gen_ext16u_i32(tmp, tmp);
3922 if (dp) {
3923 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3924 fpst, ahp_mode);
3925 } else {
3926 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3927 fpst, ahp_mode);
3929 tcg_temp_free_i32(ahp_mode);
3930 tcg_temp_free_ptr(fpst);
3931 tcg_temp_free_i32(tmp);
3932 break;
3934 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3936 TCGv_ptr fpst = get_fpstatus_ptr(false);
3937 TCGv_i32 ahp = get_ahp_flag();
3938 tmp = gen_vfp_mrs();
3939 tcg_gen_shri_i32(tmp, tmp, 16);
3940 if (dp) {
3941 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3942 fpst, ahp);
3943 } else {
3944 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3945 fpst, ahp);
3947 tcg_temp_free_i32(tmp);
3948 tcg_temp_free_i32(ahp);
3949 tcg_temp_free_ptr(fpst);
3950 break;
3952 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3954 TCGv_ptr fpst = get_fpstatus_ptr(false);
3955 TCGv_i32 ahp = get_ahp_flag();
3956 tmp = tcg_temp_new_i32();
3958 if (dp) {
3959 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3960 fpst, ahp);
3961 } else {
3962 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3963 fpst, ahp);
3965 tcg_temp_free_i32(ahp);
3966 tcg_temp_free_ptr(fpst);
3967 gen_mov_F0_vreg(0, rd);
3968 tmp2 = gen_vfp_mrs();
3969 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3970 tcg_gen_or_i32(tmp, tmp, tmp2);
3971 tcg_temp_free_i32(tmp2);
3972 gen_vfp_msr(tmp);
3973 break;
3975 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3977 TCGv_ptr fpst = get_fpstatus_ptr(false);
3978 TCGv_i32 ahp = get_ahp_flag();
3979 tmp = tcg_temp_new_i32();
3980 if (dp) {
3981 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3982 fpst, ahp);
3983 } else {
3984 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3985 fpst, ahp);
3987 tcg_temp_free_i32(ahp);
3988 tcg_temp_free_ptr(fpst);
3989 tcg_gen_shli_i32(tmp, tmp, 16);
3990 gen_mov_F0_vreg(0, rd);
3991 tmp2 = gen_vfp_mrs();
3992 tcg_gen_ext16u_i32(tmp2, tmp2);
3993 tcg_gen_or_i32(tmp, tmp, tmp2);
3994 tcg_temp_free_i32(tmp2);
3995 gen_vfp_msr(tmp);
3996 break;
3998 case 8: /* cmp */
3999 gen_vfp_cmp(dp);
4000 break;
4001 case 9: /* cmpe */
4002 gen_vfp_cmpe(dp);
4003 break;
4004 case 10: /* cmpz */
4005 gen_vfp_cmp(dp);
4006 break;
4007 case 11: /* cmpez */
4008 gen_vfp_F1_ld0(dp);
4009 gen_vfp_cmpe(dp);
4010 break;
4011 case 12: /* vrintr */
4013 TCGv_ptr fpst = get_fpstatus_ptr(0);
4014 if (dp) {
4015 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4016 } else {
4017 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4019 tcg_temp_free_ptr(fpst);
4020 break;
4022 case 13: /* vrintz */
4024 TCGv_ptr fpst = get_fpstatus_ptr(0);
4025 TCGv_i32 tcg_rmode;
4026 tcg_rmode = tcg_const_i32(float_round_to_zero);
4027 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4028 if (dp) {
4029 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4030 } else {
4031 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4033 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4034 tcg_temp_free_i32(tcg_rmode);
4035 tcg_temp_free_ptr(fpst);
4036 break;
4038 case 14: /* vrintx */
4040 TCGv_ptr fpst = get_fpstatus_ptr(0);
4041 if (dp) {
4042 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
4043 } else {
4044 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
4046 tcg_temp_free_ptr(fpst);
4047 break;
4049 case 15: /* single<->double conversion */
4050 if (dp)
4051 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
4052 else
4053 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
4054 break;
4055 case 16: /* fuito */
4056 gen_vfp_uito(dp, 0);
4057 break;
4058 case 17: /* fsito */
4059 gen_vfp_sito(dp, 0);
4060 break;
4061 case 20: /* fshto */
4062 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4063 return 1;
4065 gen_vfp_shto(dp, 16 - rm, 0);
4066 break;
4067 case 21: /* fslto */
4068 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4069 return 1;
4071 gen_vfp_slto(dp, 32 - rm, 0);
4072 break;
4073 case 22: /* fuhto */
4074 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4075 return 1;
4077 gen_vfp_uhto(dp, 16 - rm, 0);
4078 break;
4079 case 23: /* fulto */
4080 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4081 return 1;
4083 gen_vfp_ulto(dp, 32 - rm, 0);
4084 break;
4085 case 24: /* ftoui */
4086 gen_vfp_toui(dp, 0);
4087 break;
4088 case 25: /* ftouiz */
4089 gen_vfp_touiz(dp, 0);
4090 break;
4091 case 26: /* ftosi */
4092 gen_vfp_tosi(dp, 0);
4093 break;
4094 case 27: /* ftosiz */
4095 gen_vfp_tosiz(dp, 0);
4096 break;
4097 case 28: /* ftosh */
4098 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4099 return 1;
4101 gen_vfp_tosh(dp, 16 - rm, 0);
4102 break;
4103 case 29: /* ftosl */
4104 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4105 return 1;
4107 gen_vfp_tosl(dp, 32 - rm, 0);
4108 break;
4109 case 30: /* ftouh */
4110 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4111 return 1;
4113 gen_vfp_touh(dp, 16 - rm, 0);
4114 break;
4115 case 31: /* ftoul */
4116 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4117 return 1;
4119 gen_vfp_toul(dp, 32 - rm, 0);
4120 break;
4121 default: /* undefined */
4122 return 1;
4124 break;
4125 default: /* undefined */
4126 return 1;
4129 /* Write back the result. */
4130 if (op == 15 && (rn >= 8 && rn <= 11)) {
4131 /* Comparison, do nothing. */
4132 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4133 (rn & 0x1e) == 0x6)) {
4134 /* VCVT double to int: always integer result.
4135 * VCVT double to half precision is always a single
4136 * precision result.
4138 gen_mov_vreg_F0(0, rd);
4139 } else if (op == 15 && rn == 15) {
4140 /* conversion */
4141 gen_mov_vreg_F0(!dp, rd);
4142 } else {
4143 gen_mov_vreg_F0(dp, rd);
4146 /* break out of the loop if we have finished */
4147 if (veclen == 0)
4148 break;
4150 if (op == 15 && delta_m == 0) {
4151 /* single source one-many */
4152 while (veclen--) {
4153 rd = ((rd + delta_d) & (bank_mask - 1))
4154 | (rd & bank_mask);
4155 gen_mov_vreg_F0(dp, rd);
4157 break;
4159 /* Setup the next operands. */
4160 veclen--;
4161 rd = ((rd + delta_d) & (bank_mask - 1))
4162 | (rd & bank_mask);
4164 if (op == 15) {
4165 /* One source operand. */
4166 rm = ((rm + delta_m) & (bank_mask - 1))
4167 | (rm & bank_mask);
4168 gen_mov_F0_vreg(dp, rm);
4169 } else {
4170 /* Two source operands. */
4171 rn = ((rn + delta_d) & (bank_mask - 1))
4172 | (rn & bank_mask);
4173 gen_mov_F0_vreg(dp, rn);
4174 if (delta_m) {
4175 rm = ((rm + delta_m) & (bank_mask - 1))
4176 | (rm & bank_mask);
4177 gen_mov_F1_vreg(dp, rm);
4182 break;
4183 case 0xc:
4184 case 0xd:
4185 if ((insn & 0x03e00000) == 0x00400000) {
4186 /* two-register transfer */
4187 rn = (insn >> 16) & 0xf;
4188 rd = (insn >> 12) & 0xf;
4189 if (dp) {
4190 VFP_DREG_M(rm, insn);
4191 } else {
4192 rm = VFP_SREG_M(insn);
4195 if (insn & ARM_CP_RW_BIT) {
4196 /* vfp->arm */
4197 if (dp) {
4198 gen_mov_F0_vreg(0, rm * 2);
4199 tmp = gen_vfp_mrs();
4200 store_reg(s, rd, tmp);
4201 gen_mov_F0_vreg(0, rm * 2 + 1);
4202 tmp = gen_vfp_mrs();
4203 store_reg(s, rn, tmp);
4204 } else {
4205 gen_mov_F0_vreg(0, rm);
4206 tmp = gen_vfp_mrs();
4207 store_reg(s, rd, tmp);
4208 gen_mov_F0_vreg(0, rm + 1);
4209 tmp = gen_vfp_mrs();
4210 store_reg(s, rn, tmp);
4212 } else {
4213 /* arm->vfp */
4214 if (dp) {
4215 tmp = load_reg(s, rd);
4216 gen_vfp_msr(tmp);
4217 gen_mov_vreg_F0(0, rm * 2);
4218 tmp = load_reg(s, rn);
4219 gen_vfp_msr(tmp);
4220 gen_mov_vreg_F0(0, rm * 2 + 1);
4221 } else {
4222 tmp = load_reg(s, rd);
4223 gen_vfp_msr(tmp);
4224 gen_mov_vreg_F0(0, rm);
4225 tmp = load_reg(s, rn);
4226 gen_vfp_msr(tmp);
4227 gen_mov_vreg_F0(0, rm + 1);
4230 } else {
4231 /* Load/store */
4232 rn = (insn >> 16) & 0xf;
4233 if (dp)
4234 VFP_DREG_D(rd, insn);
4235 else
4236 rd = VFP_SREG_D(insn);
4237 if ((insn & 0x01200000) == 0x01000000) {
4238 /* Single load/store */
4239 offset = (insn & 0xff) << 2;
4240 if ((insn & (1 << 23)) == 0)
4241 offset = -offset;
4242 if (s->thumb && rn == 15) {
4243 /* This is actually UNPREDICTABLE */
4244 addr = tcg_temp_new_i32();
4245 tcg_gen_movi_i32(addr, s->pc & ~2);
4246 } else {
4247 addr = load_reg(s, rn);
4249 tcg_gen_addi_i32(addr, addr, offset);
4250 if (insn & (1 << 20)) {
4251 gen_vfp_ld(s, dp, addr);
4252 gen_mov_vreg_F0(dp, rd);
4253 } else {
4254 gen_mov_F0_vreg(dp, rd);
4255 gen_vfp_st(s, dp, addr);
4257 tcg_temp_free_i32(addr);
4258 } else {
4259 /* load/store multiple */
4260 int w = insn & (1 << 21);
4261 if (dp)
4262 n = (insn >> 1) & 0x7f;
4263 else
4264 n = insn & 0xff;
4266 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4267 /* P == U , W == 1 => UNDEF */
4268 return 1;
4270 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4271 /* UNPREDICTABLE cases for bad immediates: we choose to
4272 * UNDEF to avoid generating huge numbers of TCG ops
4274 return 1;
4276 if (rn == 15 && w) {
4277 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4278 return 1;
4281 if (s->thumb && rn == 15) {
4282 /* This is actually UNPREDICTABLE */
4283 addr = tcg_temp_new_i32();
4284 tcg_gen_movi_i32(addr, s->pc & ~2);
4285 } else {
4286 addr = load_reg(s, rn);
4288 if (insn & (1 << 24)) /* pre-decrement */
4289 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
4291 if (s->v8m_stackcheck && rn == 13 && w) {
4293 * Here 'addr' is the lowest address we will store to,
4294 * and is either the old SP (if post-increment) or
4295 * the new SP (if pre-decrement). For post-increment
4296 * where the old value is below the limit and the new
4297 * value is above, it is UNKNOWN whether the limit check
4298 * triggers; we choose to trigger.
4300 gen_helper_v8m_stackcheck(cpu_env, addr);
4303 if (dp)
4304 offset = 8;
4305 else
4306 offset = 4;
4307 for (i = 0; i < n; i++) {
4308 if (insn & ARM_CP_RW_BIT) {
4309 /* load */
4310 gen_vfp_ld(s, dp, addr);
4311 gen_mov_vreg_F0(dp, rd + i);
4312 } else {
4313 /* store */
4314 gen_mov_F0_vreg(dp, rd + i);
4315 gen_vfp_st(s, dp, addr);
4317 tcg_gen_addi_i32(addr, addr, offset);
4319 if (w) {
4320 /* writeback */
4321 if (insn & (1 << 24))
4322 offset = -offset * n;
4323 else if (dp && (insn & 1))
4324 offset = 4;
4325 else
4326 offset = 0;
4328 if (offset != 0)
4329 tcg_gen_addi_i32(addr, addr, offset);
4330 store_reg(s, rn, addr);
4331 } else {
4332 tcg_temp_free_i32(addr);
4336 break;
4337 default:
4338 /* Should never happen. */
4339 return 1;
4341 return 0;
4344 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
4346 #ifndef CONFIG_USER_ONLY
4347 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4348 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4349 #else
4350 return true;
4351 #endif
4354 static void gen_goto_ptr(void)
4356 tcg_gen_lookup_and_goto_ptr();
4359 /* This will end the TB but doesn't guarantee we'll return to
4360 * cpu_loop_exec. Any live exit_requests will be processed as we
4361 * enter the next TB.
4363 static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4365 if (use_goto_tb(s, dest)) {
4366 tcg_gen_goto_tb(n);
4367 gen_set_pc_im(s, dest);
4368 tcg_gen_exit_tb(s->base.tb, n);
4369 } else {
4370 gen_set_pc_im(s, dest);
4371 gen_goto_ptr();
4373 s->base.is_jmp = DISAS_NORETURN;
4376 static inline void gen_jmp (DisasContext *s, uint32_t dest)
4378 if (unlikely(is_singlestepping(s))) {
4379 /* An indirect jump so that we still trigger the debug exception. */
4380 if (s->thumb)
4381 dest |= 1;
4382 gen_bx_im(s, dest);
4383 } else {
4384 gen_goto_tb(s, 0, dest);
4388 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
4390 if (x)
4391 tcg_gen_sari_i32(t0, t0, 16);
4392 else
4393 gen_sxth(t0);
4394 if (y)
4395 tcg_gen_sari_i32(t1, t1, 16);
4396 else
4397 gen_sxth(t1);
4398 tcg_gen_mul_i32(t0, t0, t1);
4401 /* Return the mask of PSR bits set by a MSR instruction. */
4402 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4404 uint32_t mask;
4406 mask = 0;
4407 if (flags & (1 << 0))
4408 mask |= 0xff;
4409 if (flags & (1 << 1))
4410 mask |= 0xff00;
4411 if (flags & (1 << 2))
4412 mask |= 0xff0000;
4413 if (flags & (1 << 3))
4414 mask |= 0xff000000;
4416 /* Mask out undefined bits. */
4417 mask &= ~CPSR_RESERVED;
4418 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4419 mask &= ~CPSR_T;
4421 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4422 mask &= ~CPSR_Q; /* V5TE in reality*/
4424 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4425 mask &= ~(CPSR_E | CPSR_GE);
4427 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4428 mask &= ~CPSR_IT;
4430 /* Mask out execution state and reserved bits. */
4431 if (!spsr) {
4432 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4434 /* Mask out privileged bits. */
4435 if (IS_USER(s))
4436 mask &= CPSR_USER;
4437 return mask;
4440 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4441 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4443 TCGv_i32 tmp;
4444 if (spsr) {
4445 /* ??? This is also undefined in system mode. */
4446 if (IS_USER(s))
4447 return 1;
4449 tmp = load_cpu_field(spsr);
4450 tcg_gen_andi_i32(tmp, tmp, ~mask);
4451 tcg_gen_andi_i32(t0, t0, mask);
4452 tcg_gen_or_i32(tmp, tmp, t0);
4453 store_cpu_field(tmp, spsr);
4454 } else {
4455 gen_set_cpsr(t0, mask);
4457 tcg_temp_free_i32(t0);
4458 gen_lookup_tb(s);
4459 return 0;
4462 /* Returns nonzero if access to the PSR is not permitted. */
4463 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4465 TCGv_i32 tmp;
4466 tmp = tcg_temp_new_i32();
4467 tcg_gen_movi_i32(tmp, val);
4468 return gen_set_psr(s, mask, spsr, tmp);
4471 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4472 int *tgtmode, int *regno)
4474 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4475 * the target mode and register number, and identify the various
4476 * unpredictable cases.
4477 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4478 * + executed in user mode
4479 * + using R15 as the src/dest register
4480 * + accessing an unimplemented register
4481 * + accessing a register that's inaccessible at current PL/security state*
4482 * + accessing a register that you could access with a different insn
4483 * We choose to UNDEF in all these cases.
4484 * Since we don't know which of the various AArch32 modes we are in
4485 * we have to defer some checks to runtime.
4486 * Accesses to Monitor mode registers from Secure EL1 (which implies
4487 * that EL3 is AArch64) must trap to EL3.
4489 * If the access checks fail this function will emit code to take
4490 * an exception and return false. Otherwise it will return true,
4491 * and set *tgtmode and *regno appropriately.
4493 int exc_target = default_exception_el(s);
4495 /* These instructions are present only in ARMv8, or in ARMv7 with the
4496 * Virtualization Extensions.
4498 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4499 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4500 goto undef;
4503 if (IS_USER(s) || rn == 15) {
4504 goto undef;
4507 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4508 * of registers into (r, sysm).
4510 if (r) {
4511 /* SPSRs for other modes */
4512 switch (sysm) {
4513 case 0xe: /* SPSR_fiq */
4514 *tgtmode = ARM_CPU_MODE_FIQ;
4515 break;
4516 case 0x10: /* SPSR_irq */
4517 *tgtmode = ARM_CPU_MODE_IRQ;
4518 break;
4519 case 0x12: /* SPSR_svc */
4520 *tgtmode = ARM_CPU_MODE_SVC;
4521 break;
4522 case 0x14: /* SPSR_abt */
4523 *tgtmode = ARM_CPU_MODE_ABT;
4524 break;
4525 case 0x16: /* SPSR_und */
4526 *tgtmode = ARM_CPU_MODE_UND;
4527 break;
4528 case 0x1c: /* SPSR_mon */
4529 *tgtmode = ARM_CPU_MODE_MON;
4530 break;
4531 case 0x1e: /* SPSR_hyp */
4532 *tgtmode = ARM_CPU_MODE_HYP;
4533 break;
4534 default: /* unallocated */
4535 goto undef;
4537 /* We arbitrarily assign SPSR a register number of 16. */
4538 *regno = 16;
4539 } else {
4540 /* general purpose registers for other modes */
4541 switch (sysm) {
4542 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4543 *tgtmode = ARM_CPU_MODE_USR;
4544 *regno = sysm + 8;
4545 break;
4546 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4547 *tgtmode = ARM_CPU_MODE_FIQ;
4548 *regno = sysm;
4549 break;
4550 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4551 *tgtmode = ARM_CPU_MODE_IRQ;
4552 *regno = sysm & 1 ? 13 : 14;
4553 break;
4554 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4555 *tgtmode = ARM_CPU_MODE_SVC;
4556 *regno = sysm & 1 ? 13 : 14;
4557 break;
4558 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4559 *tgtmode = ARM_CPU_MODE_ABT;
4560 *regno = sysm & 1 ? 13 : 14;
4561 break;
4562 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4563 *tgtmode = ARM_CPU_MODE_UND;
4564 *regno = sysm & 1 ? 13 : 14;
4565 break;
4566 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4567 *tgtmode = ARM_CPU_MODE_MON;
4568 *regno = sysm & 1 ? 13 : 14;
4569 break;
4570 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4571 *tgtmode = ARM_CPU_MODE_HYP;
4572 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4573 *regno = sysm & 1 ? 13 : 17;
4574 break;
4575 default: /* unallocated */
4576 goto undef;
4580 /* Catch the 'accessing inaccessible register' cases we can detect
4581 * at translate time.
4583 switch (*tgtmode) {
4584 case ARM_CPU_MODE_MON:
4585 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4586 goto undef;
4588 if (s->current_el == 1) {
4589 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4590 * then accesses to Mon registers trap to EL3
4592 exc_target = 3;
4593 goto undef;
4595 break;
4596 case ARM_CPU_MODE_HYP:
4598 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
4599 * (and so we can forbid accesses from EL2 or below). elr_hyp
4600 * can be accessed also from Hyp mode, so forbid accesses from
4601 * EL0 or EL1.
4603 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
4604 (s->current_el < 3 && *regno != 17)) {
4605 goto undef;
4607 break;
4608 default:
4609 break;
4612 return true;
4614 undef:
4615 /* If we get here then some access check did not pass */
4616 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4617 return false;
4620 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4622 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4623 int tgtmode = 0, regno = 0;
4625 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4626 return;
4629 /* Sync state because msr_banked() can raise exceptions */
4630 gen_set_condexec(s);
4631 gen_set_pc_im(s, s->pc - 4);
4632 tcg_reg = load_reg(s, rn);
4633 tcg_tgtmode = tcg_const_i32(tgtmode);
4634 tcg_regno = tcg_const_i32(regno);
4635 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4636 tcg_temp_free_i32(tcg_tgtmode);
4637 tcg_temp_free_i32(tcg_regno);
4638 tcg_temp_free_i32(tcg_reg);
4639 s->base.is_jmp = DISAS_UPDATE;
4642 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4644 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4645 int tgtmode = 0, regno = 0;
4647 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4648 return;
4651 /* Sync state because mrs_banked() can raise exceptions */
4652 gen_set_condexec(s);
4653 gen_set_pc_im(s, s->pc - 4);
4654 tcg_reg = tcg_temp_new_i32();
4655 tcg_tgtmode = tcg_const_i32(tgtmode);
4656 tcg_regno = tcg_const_i32(regno);
4657 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4658 tcg_temp_free_i32(tcg_tgtmode);
4659 tcg_temp_free_i32(tcg_regno);
4660 store_reg(s, rn, tcg_reg);
4661 s->base.is_jmp = DISAS_UPDATE;
4664 /* Store value to PC as for an exception return (ie don't
4665 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4666 * will do the masking based on the new value of the Thumb bit.
4668 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
4670 tcg_gen_mov_i32(cpu_R[15], pc);
4671 tcg_temp_free_i32(pc);
4674 /* Generate a v6 exception return. Marks both values as dead. */
4675 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4677 store_pc_exc_ret(s, pc);
4678 /* The cpsr_write_eret helper will mask the low bits of PC
4679 * appropriately depending on the new Thumb bit, so it must
4680 * be called after storing the new PC.
4682 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4683 gen_io_start();
4685 gen_helper_cpsr_write_eret(cpu_env, cpsr);
4686 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4687 gen_io_end();
4689 tcg_temp_free_i32(cpsr);
4690 /* Must exit loop to check un-masked IRQs */
4691 s->base.is_jmp = DISAS_EXIT;
4694 /* Generate an old-style exception return. Marks pc as dead. */
4695 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4697 gen_rfe(s, pc, load_cpu_field(spsr));
4701 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4702 * only call the helper when running single threaded TCG code to ensure
4703 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4704 * just skip this instruction. Currently the SEV/SEVL instructions
4705 * which are *one* of many ways to wake the CPU from WFE are not
4706 * implemented so we can't sleep like WFI does.
4708 static void gen_nop_hint(DisasContext *s, int val)
4710 switch (val) {
4711 /* When running in MTTCG we don't generate jumps to the yield and
4712 * WFE helpers as it won't affect the scheduling of other vCPUs.
4713 * If we wanted to more completely model WFE/SEV so we don't busy
4714 * spin unnecessarily we would need to do something more involved.
4716 case 1: /* yield */
4717 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4718 gen_set_pc_im(s, s->pc);
4719 s->base.is_jmp = DISAS_YIELD;
4721 break;
4722 case 3: /* wfi */
4723 gen_set_pc_im(s, s->pc);
4724 s->base.is_jmp = DISAS_WFI;
4725 break;
4726 case 2: /* wfe */
4727 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4728 gen_set_pc_im(s, s->pc);
4729 s->base.is_jmp = DISAS_WFE;
4731 break;
4732 case 4: /* sev */
4733 case 5: /* sevl */
4734 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4735 default: /* nop */
4736 break;
4740 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4742 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4744 switch (size) {
4745 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4746 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4747 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4748 default: abort();
4752 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4754 switch (size) {
4755 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4756 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4757 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4758 default: return;
4762 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4763 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
4764 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
4765 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
4766 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
4768 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4769 switch ((size << 1) | u) { \
4770 case 0: \
4771 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4772 break; \
4773 case 1: \
4774 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4775 break; \
4776 case 2: \
4777 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4778 break; \
4779 case 3: \
4780 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4781 break; \
4782 case 4: \
4783 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4784 break; \
4785 case 5: \
4786 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4787 break; \
4788 default: return 1; \
4789 }} while (0)
4791 #define GEN_NEON_INTEGER_OP(name) do { \
4792 switch ((size << 1) | u) { \
4793 case 0: \
4794 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4795 break; \
4796 case 1: \
4797 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4798 break; \
4799 case 2: \
4800 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4801 break; \
4802 case 3: \
4803 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4804 break; \
4805 case 4: \
4806 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4807 break; \
4808 case 5: \
4809 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4810 break; \
4811 default: return 1; \
4812 }} while (0)
4814 static TCGv_i32 neon_load_scratch(int scratch)
4816 TCGv_i32 tmp = tcg_temp_new_i32();
4817 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4818 return tmp;
4821 static void neon_store_scratch(int scratch, TCGv_i32 var)
4823 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4824 tcg_temp_free_i32(var);
4827 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4829 TCGv_i32 tmp;
4830 if (size == 1) {
4831 tmp = neon_load_reg(reg & 7, reg >> 4);
4832 if (reg & 8) {
4833 gen_neon_dup_high16(tmp);
4834 } else {
4835 gen_neon_dup_low16(tmp);
4837 } else {
4838 tmp = neon_load_reg(reg & 15, reg >> 4);
4840 return tmp;
4843 static int gen_neon_unzip(int rd, int rm, int size, int q)
4845 TCGv_ptr pd, pm;
4847 if (!q && size == 2) {
4848 return 1;
4850 pd = vfp_reg_ptr(true, rd);
4851 pm = vfp_reg_ptr(true, rm);
4852 if (q) {
4853 switch (size) {
4854 case 0:
4855 gen_helper_neon_qunzip8(pd, pm);
4856 break;
4857 case 1:
4858 gen_helper_neon_qunzip16(pd, pm);
4859 break;
4860 case 2:
4861 gen_helper_neon_qunzip32(pd, pm);
4862 break;
4863 default:
4864 abort();
4866 } else {
4867 switch (size) {
4868 case 0:
4869 gen_helper_neon_unzip8(pd, pm);
4870 break;
4871 case 1:
4872 gen_helper_neon_unzip16(pd, pm);
4873 break;
4874 default:
4875 abort();
4878 tcg_temp_free_ptr(pd);
4879 tcg_temp_free_ptr(pm);
4880 return 0;
4883 static int gen_neon_zip(int rd, int rm, int size, int q)
4885 TCGv_ptr pd, pm;
4887 if (!q && size == 2) {
4888 return 1;
4890 pd = vfp_reg_ptr(true, rd);
4891 pm = vfp_reg_ptr(true, rm);
4892 if (q) {
4893 switch (size) {
4894 case 0:
4895 gen_helper_neon_qzip8(pd, pm);
4896 break;
4897 case 1:
4898 gen_helper_neon_qzip16(pd, pm);
4899 break;
4900 case 2:
4901 gen_helper_neon_qzip32(pd, pm);
4902 break;
4903 default:
4904 abort();
4906 } else {
4907 switch (size) {
4908 case 0:
4909 gen_helper_neon_zip8(pd, pm);
4910 break;
4911 case 1:
4912 gen_helper_neon_zip16(pd, pm);
4913 break;
4914 default:
4915 abort();
4918 tcg_temp_free_ptr(pd);
4919 tcg_temp_free_ptr(pm);
4920 return 0;
4923 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4925 TCGv_i32 rd, tmp;
4927 rd = tcg_temp_new_i32();
4928 tmp = tcg_temp_new_i32();
4930 tcg_gen_shli_i32(rd, t0, 8);
4931 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4932 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4933 tcg_gen_or_i32(rd, rd, tmp);
4935 tcg_gen_shri_i32(t1, t1, 8);
4936 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4937 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4938 tcg_gen_or_i32(t1, t1, tmp);
4939 tcg_gen_mov_i32(t0, rd);
4941 tcg_temp_free_i32(tmp);
4942 tcg_temp_free_i32(rd);
4945 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4947 TCGv_i32 rd, tmp;
4949 rd = tcg_temp_new_i32();
4950 tmp = tcg_temp_new_i32();
4952 tcg_gen_shli_i32(rd, t0, 16);
4953 tcg_gen_andi_i32(tmp, t1, 0xffff);
4954 tcg_gen_or_i32(rd, rd, tmp);
4955 tcg_gen_shri_i32(t1, t1, 16);
4956 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4957 tcg_gen_or_i32(t1, t1, tmp);
4958 tcg_gen_mov_i32(t0, rd);
4960 tcg_temp_free_i32(tmp);
4961 tcg_temp_free_i32(rd);
4965 static struct {
4966 int nregs;
4967 int interleave;
4968 int spacing;
4969 } const neon_ls_element_type[11] = {
4970 {1, 4, 1},
4971 {1, 4, 2},
4972 {4, 1, 1},
4973 {2, 2, 2},
4974 {1, 3, 1},
4975 {1, 3, 2},
4976 {3, 1, 1},
4977 {1, 1, 1},
4978 {1, 2, 1},
4979 {1, 2, 2},
4980 {2, 1, 1}
4983 /* Translate a NEON load/store element instruction. Return nonzero if the
4984 instruction is invalid. */
4985 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4987 int rd, rn, rm;
4988 int op;
4989 int nregs;
4990 int interleave;
4991 int spacing;
4992 int stride;
4993 int size;
4994 int reg;
4995 int load;
4996 int n;
4997 int vec_size;
4998 int mmu_idx;
4999 TCGMemOp endian;
5000 TCGv_i32 addr;
5001 TCGv_i32 tmp;
5002 TCGv_i32 tmp2;
5003 TCGv_i64 tmp64;
5005 /* FIXME: this access check should not take precedence over UNDEF
5006 * for invalid encodings; we will generate incorrect syndrome information
5007 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5009 if (s->fp_excp_el) {
5010 gen_exception_insn(s, 4, EXCP_UDEF,
5011 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
5012 return 0;
5015 if (!s->vfp_enabled)
5016 return 1;
5017 VFP_DREG_D(rd, insn);
5018 rn = (insn >> 16) & 0xf;
5019 rm = insn & 0xf;
5020 load = (insn & (1 << 21)) != 0;
5021 endian = s->be_data;
5022 mmu_idx = get_mem_index(s);
5023 if ((insn & (1 << 23)) == 0) {
5024 /* Load store all elements. */
5025 op = (insn >> 8) & 0xf;
5026 size = (insn >> 6) & 3;
5027 if (op > 10)
5028 return 1;
5029 /* Catch UNDEF cases for bad values of align field */
5030 switch (op & 0xc) {
5031 case 4:
5032 if (((insn >> 5) & 1) == 1) {
5033 return 1;
5035 break;
5036 case 8:
5037 if (((insn >> 4) & 3) == 3) {
5038 return 1;
5040 break;
5041 default:
5042 break;
5044 nregs = neon_ls_element_type[op].nregs;
5045 interleave = neon_ls_element_type[op].interleave;
5046 spacing = neon_ls_element_type[op].spacing;
5047 if (size == 3 && (interleave | spacing) != 1) {
5048 return 1;
5050 /* For our purposes, bytes are always little-endian. */
5051 if (size == 0) {
5052 endian = MO_LE;
5054 /* Consecutive little-endian elements from a single register
5055 * can be promoted to a larger little-endian operation.
5057 if (interleave == 1 && endian == MO_LE) {
5058 size = 3;
5060 tmp64 = tcg_temp_new_i64();
5061 addr = tcg_temp_new_i32();
5062 tmp2 = tcg_const_i32(1 << size);
5063 load_reg_var(s, addr, rn);
5064 for (reg = 0; reg < nregs; reg++) {
5065 for (n = 0; n < 8 >> size; n++) {
5066 int xs;
5067 for (xs = 0; xs < interleave; xs++) {
5068 int tt = rd + reg + spacing * xs;
5070 if (load) {
5071 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
5072 neon_store_element64(tt, n, size, tmp64);
5073 } else {
5074 neon_load_element64(tmp64, tt, n, size);
5075 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
5077 tcg_gen_add_i32(addr, addr, tmp2);
5081 tcg_temp_free_i32(addr);
5082 tcg_temp_free_i32(tmp2);
5083 tcg_temp_free_i64(tmp64);
5084 stride = nregs * interleave * 8;
5085 } else {
5086 size = (insn >> 10) & 3;
5087 if (size == 3) {
5088 /* Load single element to all lanes. */
5089 int a = (insn >> 4) & 1;
5090 if (!load) {
5091 return 1;
5093 size = (insn >> 6) & 3;
5094 nregs = ((insn >> 8) & 3) + 1;
5096 if (size == 3) {
5097 if (nregs != 4 || a == 0) {
5098 return 1;
5100 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5101 size = 2;
5103 if (nregs == 1 && a == 1 && size == 0) {
5104 return 1;
5106 if (nregs == 3 && a == 1) {
5107 return 1;
5109 addr = tcg_temp_new_i32();
5110 load_reg_var(s, addr, rn);
5112 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
5113 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
5115 stride = (insn & (1 << 5)) ? 2 : 1;
5116 vec_size = nregs == 1 ? stride * 8 : 8;
5118 tmp = tcg_temp_new_i32();
5119 for (reg = 0; reg < nregs; reg++) {
5120 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5121 s->be_data | size);
5122 if ((rd & 1) && vec_size == 16) {
5123 /* We cannot write 16 bytes at once because the
5124 * destination is unaligned.
5126 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5127 8, 8, tmp);
5128 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
5129 neon_reg_offset(rd, 0), 8, 8);
5130 } else {
5131 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5132 vec_size, vec_size, tmp);
5134 tcg_gen_addi_i32(addr, addr, 1 << size);
5135 rd += stride;
5137 tcg_temp_free_i32(tmp);
5138 tcg_temp_free_i32(addr);
5139 stride = (1 << size) * nregs;
5140 } else {
5141 /* Single element. */
5142 int idx = (insn >> 4) & 0xf;
5143 int reg_idx;
5144 switch (size) {
5145 case 0:
5146 reg_idx = (insn >> 5) & 7;
5147 stride = 1;
5148 break;
5149 case 1:
5150 reg_idx = (insn >> 6) & 3;
5151 stride = (insn & (1 << 5)) ? 2 : 1;
5152 break;
5153 case 2:
5154 reg_idx = (insn >> 7) & 1;
5155 stride = (insn & (1 << 6)) ? 2 : 1;
5156 break;
5157 default:
5158 abort();
5160 nregs = ((insn >> 8) & 3) + 1;
5161 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5162 switch (nregs) {
5163 case 1:
5164 if (((idx & (1 << size)) != 0) ||
5165 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5166 return 1;
5168 break;
5169 case 3:
5170 if ((idx & 1) != 0) {
5171 return 1;
5173 /* fall through */
5174 case 2:
5175 if (size == 2 && (idx & 2) != 0) {
5176 return 1;
5178 break;
5179 case 4:
5180 if ((size == 2) && ((idx & 3) == 3)) {
5181 return 1;
5183 break;
5184 default:
5185 abort();
5187 if ((rd + stride * (nregs - 1)) > 31) {
5188 /* Attempts to write off the end of the register file
5189 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5190 * the neon_load_reg() would write off the end of the array.
5192 return 1;
5194 tmp = tcg_temp_new_i32();
5195 addr = tcg_temp_new_i32();
5196 load_reg_var(s, addr, rn);
5197 for (reg = 0; reg < nregs; reg++) {
5198 if (load) {
5199 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5200 s->be_data | size);
5201 neon_store_element(rd, reg_idx, size, tmp);
5202 } else { /* Store */
5203 neon_load_element(tmp, rd, reg_idx, size);
5204 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
5205 s->be_data | size);
5207 rd += stride;
5208 tcg_gen_addi_i32(addr, addr, 1 << size);
5210 tcg_temp_free_i32(addr);
5211 tcg_temp_free_i32(tmp);
5212 stride = nregs * (1 << size);
5215 if (rm != 15) {
5216 TCGv_i32 base;
5218 base = load_reg(s, rn);
5219 if (rm == 13) {
5220 tcg_gen_addi_i32(base, base, stride);
5221 } else {
5222 TCGv_i32 index;
5223 index = load_reg(s, rm);
5224 tcg_gen_add_i32(base, base, index);
5225 tcg_temp_free_i32(index);
5227 store_reg(s, rn, base);
5229 return 0;
5232 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
5234 switch (size) {
5235 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5236 case 1: gen_helper_neon_narrow_u16(dest, src); break;
5237 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
5238 default: abort();
5242 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5244 switch (size) {
5245 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5246 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5247 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
5248 default: abort();
5252 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
5254 switch (size) {
5255 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5256 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5257 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
5258 default: abort();
5262 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5264 switch (size) {
5265 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5266 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5267 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
5268 default: abort();
5272 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
5273 int q, int u)
5275 if (q) {
5276 if (u) {
5277 switch (size) {
5278 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5279 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5280 default: abort();
5282 } else {
5283 switch (size) {
5284 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5285 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5286 default: abort();
5289 } else {
5290 if (u) {
5291 switch (size) {
5292 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5293 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
5294 default: abort();
5296 } else {
5297 switch (size) {
5298 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5299 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5300 default: abort();
5306 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
5308 if (u) {
5309 switch (size) {
5310 case 0: gen_helper_neon_widen_u8(dest, src); break;
5311 case 1: gen_helper_neon_widen_u16(dest, src); break;
5312 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5313 default: abort();
5315 } else {
5316 switch (size) {
5317 case 0: gen_helper_neon_widen_s8(dest, src); break;
5318 case 1: gen_helper_neon_widen_s16(dest, src); break;
5319 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5320 default: abort();
5323 tcg_temp_free_i32(src);
5326 static inline void gen_neon_addl(int size)
5328 switch (size) {
5329 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5330 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5331 case 2: tcg_gen_add_i64(CPU_V001); break;
5332 default: abort();
5336 static inline void gen_neon_subl(int size)
5338 switch (size) {
5339 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5340 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5341 case 2: tcg_gen_sub_i64(CPU_V001); break;
5342 default: abort();
5346 static inline void gen_neon_negl(TCGv_i64 var, int size)
5348 switch (size) {
5349 case 0: gen_helper_neon_negl_u16(var, var); break;
5350 case 1: gen_helper_neon_negl_u32(var, var); break;
5351 case 2:
5352 tcg_gen_neg_i64(var, var);
5353 break;
5354 default: abort();
5358 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
5360 switch (size) {
5361 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5362 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
5363 default: abort();
5367 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5368 int size, int u)
5370 TCGv_i64 tmp;
5372 switch ((size << 1) | u) {
5373 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5374 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5375 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5376 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5377 case 4:
5378 tmp = gen_muls_i64_i32(a, b);
5379 tcg_gen_mov_i64(dest, tmp);
5380 tcg_temp_free_i64(tmp);
5381 break;
5382 case 5:
5383 tmp = gen_mulu_i64_i32(a, b);
5384 tcg_gen_mov_i64(dest, tmp);
5385 tcg_temp_free_i64(tmp);
5386 break;
5387 default: abort();
5390 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5391 Don't forget to clean them now. */
5392 if (size < 2) {
5393 tcg_temp_free_i32(a);
5394 tcg_temp_free_i32(b);
5398 static void gen_neon_narrow_op(int op, int u, int size,
5399 TCGv_i32 dest, TCGv_i64 src)
5401 if (op) {
5402 if (u) {
5403 gen_neon_unarrow_sats(size, dest, src);
5404 } else {
5405 gen_neon_narrow(size, dest, src);
5407 } else {
5408 if (u) {
5409 gen_neon_narrow_satu(size, dest, src);
5410 } else {
5411 gen_neon_narrow_sats(size, dest, src);
5416 /* Symbolic constants for op fields for Neon 3-register same-length.
5417 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5418 * table A7-9.
5420 #define NEON_3R_VHADD 0
5421 #define NEON_3R_VQADD 1
5422 #define NEON_3R_VRHADD 2
5423 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5424 #define NEON_3R_VHSUB 4
5425 #define NEON_3R_VQSUB 5
5426 #define NEON_3R_VCGT 6
5427 #define NEON_3R_VCGE 7
5428 #define NEON_3R_VSHL 8
5429 #define NEON_3R_VQSHL 9
5430 #define NEON_3R_VRSHL 10
5431 #define NEON_3R_VQRSHL 11
5432 #define NEON_3R_VMAX 12
5433 #define NEON_3R_VMIN 13
5434 #define NEON_3R_VABD 14
5435 #define NEON_3R_VABA 15
5436 #define NEON_3R_VADD_VSUB 16
5437 #define NEON_3R_VTST_VCEQ 17
5438 #define NEON_3R_VML 18 /* VMLA, VMLS */
5439 #define NEON_3R_VMUL 19
5440 #define NEON_3R_VPMAX 20
5441 #define NEON_3R_VPMIN 21
5442 #define NEON_3R_VQDMULH_VQRDMULH 22
5443 #define NEON_3R_VPADD_VQRDMLAH 23
5444 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
5445 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
5446 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5447 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5448 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5449 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5450 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
5451 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
5453 static const uint8_t neon_3r_sizes[] = {
5454 [NEON_3R_VHADD] = 0x7,
5455 [NEON_3R_VQADD] = 0xf,
5456 [NEON_3R_VRHADD] = 0x7,
5457 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5458 [NEON_3R_VHSUB] = 0x7,
5459 [NEON_3R_VQSUB] = 0xf,
5460 [NEON_3R_VCGT] = 0x7,
5461 [NEON_3R_VCGE] = 0x7,
5462 [NEON_3R_VSHL] = 0xf,
5463 [NEON_3R_VQSHL] = 0xf,
5464 [NEON_3R_VRSHL] = 0xf,
5465 [NEON_3R_VQRSHL] = 0xf,
5466 [NEON_3R_VMAX] = 0x7,
5467 [NEON_3R_VMIN] = 0x7,
5468 [NEON_3R_VABD] = 0x7,
5469 [NEON_3R_VABA] = 0x7,
5470 [NEON_3R_VADD_VSUB] = 0xf,
5471 [NEON_3R_VTST_VCEQ] = 0x7,
5472 [NEON_3R_VML] = 0x7,
5473 [NEON_3R_VMUL] = 0x7,
5474 [NEON_3R_VPMAX] = 0x7,
5475 [NEON_3R_VPMIN] = 0x7,
5476 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5477 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
5478 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
5479 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
5480 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5481 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5482 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5483 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5484 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
5485 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
5488 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
5489 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5490 * table A7-13.
5492 #define NEON_2RM_VREV64 0
5493 #define NEON_2RM_VREV32 1
5494 #define NEON_2RM_VREV16 2
5495 #define NEON_2RM_VPADDL 4
5496 #define NEON_2RM_VPADDL_U 5
5497 #define NEON_2RM_AESE 6 /* Includes AESD */
5498 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5499 #define NEON_2RM_VCLS 8
5500 #define NEON_2RM_VCLZ 9
5501 #define NEON_2RM_VCNT 10
5502 #define NEON_2RM_VMVN 11
5503 #define NEON_2RM_VPADAL 12
5504 #define NEON_2RM_VPADAL_U 13
5505 #define NEON_2RM_VQABS 14
5506 #define NEON_2RM_VQNEG 15
5507 #define NEON_2RM_VCGT0 16
5508 #define NEON_2RM_VCGE0 17
5509 #define NEON_2RM_VCEQ0 18
5510 #define NEON_2RM_VCLE0 19
5511 #define NEON_2RM_VCLT0 20
5512 #define NEON_2RM_SHA1H 21
5513 #define NEON_2RM_VABS 22
5514 #define NEON_2RM_VNEG 23
5515 #define NEON_2RM_VCGT0_F 24
5516 #define NEON_2RM_VCGE0_F 25
5517 #define NEON_2RM_VCEQ0_F 26
5518 #define NEON_2RM_VCLE0_F 27
5519 #define NEON_2RM_VCLT0_F 28
5520 #define NEON_2RM_VABS_F 30
5521 #define NEON_2RM_VNEG_F 31
5522 #define NEON_2RM_VSWP 32
5523 #define NEON_2RM_VTRN 33
5524 #define NEON_2RM_VUZP 34
5525 #define NEON_2RM_VZIP 35
5526 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5527 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5528 #define NEON_2RM_VSHLL 38
5529 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5530 #define NEON_2RM_VRINTN 40
5531 #define NEON_2RM_VRINTX 41
5532 #define NEON_2RM_VRINTA 42
5533 #define NEON_2RM_VRINTZ 43
5534 #define NEON_2RM_VCVT_F16_F32 44
5535 #define NEON_2RM_VRINTM 45
5536 #define NEON_2RM_VCVT_F32_F16 46
5537 #define NEON_2RM_VRINTP 47
5538 #define NEON_2RM_VCVTAU 48
5539 #define NEON_2RM_VCVTAS 49
5540 #define NEON_2RM_VCVTNU 50
5541 #define NEON_2RM_VCVTNS 51
5542 #define NEON_2RM_VCVTPU 52
5543 #define NEON_2RM_VCVTPS 53
5544 #define NEON_2RM_VCVTMU 54
5545 #define NEON_2RM_VCVTMS 55
5546 #define NEON_2RM_VRECPE 56
5547 #define NEON_2RM_VRSQRTE 57
5548 #define NEON_2RM_VRECPE_F 58
5549 #define NEON_2RM_VRSQRTE_F 59
5550 #define NEON_2RM_VCVT_FS 60
5551 #define NEON_2RM_VCVT_FU 61
5552 #define NEON_2RM_VCVT_SF 62
5553 #define NEON_2RM_VCVT_UF 63
5555 static int neon_2rm_is_float_op(int op)
5557 /* Return true if this neon 2reg-misc op is float-to-float */
5558 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
5559 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
5560 op == NEON_2RM_VRINTM ||
5561 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5562 op >= NEON_2RM_VRECPE_F);
5565 static bool neon_2rm_is_v8_op(int op)
5567 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5568 switch (op) {
5569 case NEON_2RM_VRINTN:
5570 case NEON_2RM_VRINTA:
5571 case NEON_2RM_VRINTM:
5572 case NEON_2RM_VRINTP:
5573 case NEON_2RM_VRINTZ:
5574 case NEON_2RM_VRINTX:
5575 case NEON_2RM_VCVTAU:
5576 case NEON_2RM_VCVTAS:
5577 case NEON_2RM_VCVTNU:
5578 case NEON_2RM_VCVTNS:
5579 case NEON_2RM_VCVTPU:
5580 case NEON_2RM_VCVTPS:
5581 case NEON_2RM_VCVTMU:
5582 case NEON_2RM_VCVTMS:
5583 return true;
5584 default:
5585 return false;
5589 /* Each entry in this array has bit n set if the insn allows
5590 * size value n (otherwise it will UNDEF). Since unallocated
5591 * op values will have no bits set they always UNDEF.
5593 static const uint8_t neon_2rm_sizes[] = {
5594 [NEON_2RM_VREV64] = 0x7,
5595 [NEON_2RM_VREV32] = 0x3,
5596 [NEON_2RM_VREV16] = 0x1,
5597 [NEON_2RM_VPADDL] = 0x7,
5598 [NEON_2RM_VPADDL_U] = 0x7,
5599 [NEON_2RM_AESE] = 0x1,
5600 [NEON_2RM_AESMC] = 0x1,
5601 [NEON_2RM_VCLS] = 0x7,
5602 [NEON_2RM_VCLZ] = 0x7,
5603 [NEON_2RM_VCNT] = 0x1,
5604 [NEON_2RM_VMVN] = 0x1,
5605 [NEON_2RM_VPADAL] = 0x7,
5606 [NEON_2RM_VPADAL_U] = 0x7,
5607 [NEON_2RM_VQABS] = 0x7,
5608 [NEON_2RM_VQNEG] = 0x7,
5609 [NEON_2RM_VCGT0] = 0x7,
5610 [NEON_2RM_VCGE0] = 0x7,
5611 [NEON_2RM_VCEQ0] = 0x7,
5612 [NEON_2RM_VCLE0] = 0x7,
5613 [NEON_2RM_VCLT0] = 0x7,
5614 [NEON_2RM_SHA1H] = 0x4,
5615 [NEON_2RM_VABS] = 0x7,
5616 [NEON_2RM_VNEG] = 0x7,
5617 [NEON_2RM_VCGT0_F] = 0x4,
5618 [NEON_2RM_VCGE0_F] = 0x4,
5619 [NEON_2RM_VCEQ0_F] = 0x4,
5620 [NEON_2RM_VCLE0_F] = 0x4,
5621 [NEON_2RM_VCLT0_F] = 0x4,
5622 [NEON_2RM_VABS_F] = 0x4,
5623 [NEON_2RM_VNEG_F] = 0x4,
5624 [NEON_2RM_VSWP] = 0x1,
5625 [NEON_2RM_VTRN] = 0x7,
5626 [NEON_2RM_VUZP] = 0x7,
5627 [NEON_2RM_VZIP] = 0x7,
5628 [NEON_2RM_VMOVN] = 0x7,
5629 [NEON_2RM_VQMOVN] = 0x7,
5630 [NEON_2RM_VSHLL] = 0x7,
5631 [NEON_2RM_SHA1SU1] = 0x4,
5632 [NEON_2RM_VRINTN] = 0x4,
5633 [NEON_2RM_VRINTX] = 0x4,
5634 [NEON_2RM_VRINTA] = 0x4,
5635 [NEON_2RM_VRINTZ] = 0x4,
5636 [NEON_2RM_VCVT_F16_F32] = 0x2,
5637 [NEON_2RM_VRINTM] = 0x4,
5638 [NEON_2RM_VCVT_F32_F16] = 0x2,
5639 [NEON_2RM_VRINTP] = 0x4,
5640 [NEON_2RM_VCVTAU] = 0x4,
5641 [NEON_2RM_VCVTAS] = 0x4,
5642 [NEON_2RM_VCVTNU] = 0x4,
5643 [NEON_2RM_VCVTNS] = 0x4,
5644 [NEON_2RM_VCVTPU] = 0x4,
5645 [NEON_2RM_VCVTPS] = 0x4,
5646 [NEON_2RM_VCVTMU] = 0x4,
5647 [NEON_2RM_VCVTMS] = 0x4,
5648 [NEON_2RM_VRECPE] = 0x4,
5649 [NEON_2RM_VRSQRTE] = 0x4,
5650 [NEON_2RM_VRECPE_F] = 0x4,
5651 [NEON_2RM_VRSQRTE_F] = 0x4,
5652 [NEON_2RM_VCVT_FS] = 0x4,
5653 [NEON_2RM_VCVT_FU] = 0x4,
5654 [NEON_2RM_VCVT_SF] = 0x4,
5655 [NEON_2RM_VCVT_UF] = 0x4,
5659 /* Expand v8.1 simd helper. */
5660 static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5661 int q, int rd, int rn, int rm)
5663 if (dc_isar_feature(aa32_rdm, s)) {
5664 int opr_sz = (1 + q) * 8;
5665 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5666 vfp_reg_offset(1, rn),
5667 vfp_reg_offset(1, rm), cpu_env,
5668 opr_sz, opr_sz, 0, fn);
5669 return 0;
5671 return 1;
5675 * Expanders for VBitOps_VBIF, VBIT, VBSL.
5677 static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5679 tcg_gen_xor_i64(rn, rn, rm);
5680 tcg_gen_and_i64(rn, rn, rd);
5681 tcg_gen_xor_i64(rd, rm, rn);
5684 static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5686 tcg_gen_xor_i64(rn, rn, rd);
5687 tcg_gen_and_i64(rn, rn, rm);
5688 tcg_gen_xor_i64(rd, rd, rn);
5691 static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5693 tcg_gen_xor_i64(rn, rn, rd);
5694 tcg_gen_andc_i64(rn, rn, rm);
5695 tcg_gen_xor_i64(rd, rd, rn);
5698 static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5700 tcg_gen_xor_vec(vece, rn, rn, rm);
5701 tcg_gen_and_vec(vece, rn, rn, rd);
5702 tcg_gen_xor_vec(vece, rd, rm, rn);
5705 static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5707 tcg_gen_xor_vec(vece, rn, rn, rd);
5708 tcg_gen_and_vec(vece, rn, rn, rm);
5709 tcg_gen_xor_vec(vece, rd, rd, rn);
5712 static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5714 tcg_gen_xor_vec(vece, rn, rn, rd);
5715 tcg_gen_andc_vec(vece, rn, rn, rm);
5716 tcg_gen_xor_vec(vece, rd, rd, rn);
5719 const GVecGen3 bsl_op = {
5720 .fni8 = gen_bsl_i64,
5721 .fniv = gen_bsl_vec,
5722 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5723 .load_dest = true
5726 const GVecGen3 bit_op = {
5727 .fni8 = gen_bit_i64,
5728 .fniv = gen_bit_vec,
5729 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5730 .load_dest = true
5733 const GVecGen3 bif_op = {
5734 .fni8 = gen_bif_i64,
5735 .fniv = gen_bif_vec,
5736 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5737 .load_dest = true
5740 static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5742 tcg_gen_vec_sar8i_i64(a, a, shift);
5743 tcg_gen_vec_add8_i64(d, d, a);
5746 static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5748 tcg_gen_vec_sar16i_i64(a, a, shift);
5749 tcg_gen_vec_add16_i64(d, d, a);
5752 static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5754 tcg_gen_sari_i32(a, a, shift);
5755 tcg_gen_add_i32(d, d, a);
5758 static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5760 tcg_gen_sari_i64(a, a, shift);
5761 tcg_gen_add_i64(d, d, a);
5764 static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5766 tcg_gen_sari_vec(vece, a, a, sh);
5767 tcg_gen_add_vec(vece, d, d, a);
5770 const GVecGen2i ssra_op[4] = {
5771 { .fni8 = gen_ssra8_i64,
5772 .fniv = gen_ssra_vec,
5773 .load_dest = true,
5774 .opc = INDEX_op_sari_vec,
5775 .vece = MO_8 },
5776 { .fni8 = gen_ssra16_i64,
5777 .fniv = gen_ssra_vec,
5778 .load_dest = true,
5779 .opc = INDEX_op_sari_vec,
5780 .vece = MO_16 },
5781 { .fni4 = gen_ssra32_i32,
5782 .fniv = gen_ssra_vec,
5783 .load_dest = true,
5784 .opc = INDEX_op_sari_vec,
5785 .vece = MO_32 },
5786 { .fni8 = gen_ssra64_i64,
5787 .fniv = gen_ssra_vec,
5788 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5789 .load_dest = true,
5790 .opc = INDEX_op_sari_vec,
5791 .vece = MO_64 },
5794 static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5796 tcg_gen_vec_shr8i_i64(a, a, shift);
5797 tcg_gen_vec_add8_i64(d, d, a);
5800 static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5802 tcg_gen_vec_shr16i_i64(a, a, shift);
5803 tcg_gen_vec_add16_i64(d, d, a);
5806 static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5808 tcg_gen_shri_i32(a, a, shift);
5809 tcg_gen_add_i32(d, d, a);
5812 static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5814 tcg_gen_shri_i64(a, a, shift);
5815 tcg_gen_add_i64(d, d, a);
5818 static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5820 tcg_gen_shri_vec(vece, a, a, sh);
5821 tcg_gen_add_vec(vece, d, d, a);
5824 const GVecGen2i usra_op[4] = {
5825 { .fni8 = gen_usra8_i64,
5826 .fniv = gen_usra_vec,
5827 .load_dest = true,
5828 .opc = INDEX_op_shri_vec,
5829 .vece = MO_8, },
5830 { .fni8 = gen_usra16_i64,
5831 .fniv = gen_usra_vec,
5832 .load_dest = true,
5833 .opc = INDEX_op_shri_vec,
5834 .vece = MO_16, },
5835 { .fni4 = gen_usra32_i32,
5836 .fniv = gen_usra_vec,
5837 .load_dest = true,
5838 .opc = INDEX_op_shri_vec,
5839 .vece = MO_32, },
5840 { .fni8 = gen_usra64_i64,
5841 .fniv = gen_usra_vec,
5842 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5843 .load_dest = true,
5844 .opc = INDEX_op_shri_vec,
5845 .vece = MO_64, },
5848 static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5850 uint64_t mask = dup_const(MO_8, 0xff >> shift);
5851 TCGv_i64 t = tcg_temp_new_i64();
5853 tcg_gen_shri_i64(t, a, shift);
5854 tcg_gen_andi_i64(t, t, mask);
5855 tcg_gen_andi_i64(d, d, ~mask);
5856 tcg_gen_or_i64(d, d, t);
5857 tcg_temp_free_i64(t);
5860 static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5862 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
5863 TCGv_i64 t = tcg_temp_new_i64();
5865 tcg_gen_shri_i64(t, a, shift);
5866 tcg_gen_andi_i64(t, t, mask);
5867 tcg_gen_andi_i64(d, d, ~mask);
5868 tcg_gen_or_i64(d, d, t);
5869 tcg_temp_free_i64(t);
5872 static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5874 tcg_gen_shri_i32(a, a, shift);
5875 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
5878 static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5880 tcg_gen_shri_i64(a, a, shift);
5881 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
5884 static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5886 if (sh == 0) {
5887 tcg_gen_mov_vec(d, a);
5888 } else {
5889 TCGv_vec t = tcg_temp_new_vec_matching(d);
5890 TCGv_vec m = tcg_temp_new_vec_matching(d);
5892 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
5893 tcg_gen_shri_vec(vece, t, a, sh);
5894 tcg_gen_and_vec(vece, d, d, m);
5895 tcg_gen_or_vec(vece, d, d, t);
5897 tcg_temp_free_vec(t);
5898 tcg_temp_free_vec(m);
5902 const GVecGen2i sri_op[4] = {
5903 { .fni8 = gen_shr8_ins_i64,
5904 .fniv = gen_shr_ins_vec,
5905 .load_dest = true,
5906 .opc = INDEX_op_shri_vec,
5907 .vece = MO_8 },
5908 { .fni8 = gen_shr16_ins_i64,
5909 .fniv = gen_shr_ins_vec,
5910 .load_dest = true,
5911 .opc = INDEX_op_shri_vec,
5912 .vece = MO_16 },
5913 { .fni4 = gen_shr32_ins_i32,
5914 .fniv = gen_shr_ins_vec,
5915 .load_dest = true,
5916 .opc = INDEX_op_shri_vec,
5917 .vece = MO_32 },
5918 { .fni8 = gen_shr64_ins_i64,
5919 .fniv = gen_shr_ins_vec,
5920 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5921 .load_dest = true,
5922 .opc = INDEX_op_shri_vec,
5923 .vece = MO_64 },
5926 static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5928 uint64_t mask = dup_const(MO_8, 0xff << shift);
5929 TCGv_i64 t = tcg_temp_new_i64();
5931 tcg_gen_shli_i64(t, a, shift);
5932 tcg_gen_andi_i64(t, t, mask);
5933 tcg_gen_andi_i64(d, d, ~mask);
5934 tcg_gen_or_i64(d, d, t);
5935 tcg_temp_free_i64(t);
5938 static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5940 uint64_t mask = dup_const(MO_16, 0xffff << shift);
5941 TCGv_i64 t = tcg_temp_new_i64();
5943 tcg_gen_shli_i64(t, a, shift);
5944 tcg_gen_andi_i64(t, t, mask);
5945 tcg_gen_andi_i64(d, d, ~mask);
5946 tcg_gen_or_i64(d, d, t);
5947 tcg_temp_free_i64(t);
5950 static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5952 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
5955 static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5957 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
5960 static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5962 if (sh == 0) {
5963 tcg_gen_mov_vec(d, a);
5964 } else {
5965 TCGv_vec t = tcg_temp_new_vec_matching(d);
5966 TCGv_vec m = tcg_temp_new_vec_matching(d);
5968 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
5969 tcg_gen_shli_vec(vece, t, a, sh);
5970 tcg_gen_and_vec(vece, d, d, m);
5971 tcg_gen_or_vec(vece, d, d, t);
5973 tcg_temp_free_vec(t);
5974 tcg_temp_free_vec(m);
5978 const GVecGen2i sli_op[4] = {
5979 { .fni8 = gen_shl8_ins_i64,
5980 .fniv = gen_shl_ins_vec,
5981 .load_dest = true,
5982 .opc = INDEX_op_shli_vec,
5983 .vece = MO_8 },
5984 { .fni8 = gen_shl16_ins_i64,
5985 .fniv = gen_shl_ins_vec,
5986 .load_dest = true,
5987 .opc = INDEX_op_shli_vec,
5988 .vece = MO_16 },
5989 { .fni4 = gen_shl32_ins_i32,
5990 .fniv = gen_shl_ins_vec,
5991 .load_dest = true,
5992 .opc = INDEX_op_shli_vec,
5993 .vece = MO_32 },
5994 { .fni8 = gen_shl64_ins_i64,
5995 .fniv = gen_shl_ins_vec,
5996 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5997 .load_dest = true,
5998 .opc = INDEX_op_shli_vec,
5999 .vece = MO_64 },
6002 static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6004 gen_helper_neon_mul_u8(a, a, b);
6005 gen_helper_neon_add_u8(d, d, a);
6008 static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6010 gen_helper_neon_mul_u8(a, a, b);
6011 gen_helper_neon_sub_u8(d, d, a);
6014 static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6016 gen_helper_neon_mul_u16(a, a, b);
6017 gen_helper_neon_add_u16(d, d, a);
6020 static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6022 gen_helper_neon_mul_u16(a, a, b);
6023 gen_helper_neon_sub_u16(d, d, a);
6026 static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6028 tcg_gen_mul_i32(a, a, b);
6029 tcg_gen_add_i32(d, d, a);
6032 static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6034 tcg_gen_mul_i32(a, a, b);
6035 tcg_gen_sub_i32(d, d, a);
6038 static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6040 tcg_gen_mul_i64(a, a, b);
6041 tcg_gen_add_i64(d, d, a);
6044 static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6046 tcg_gen_mul_i64(a, a, b);
6047 tcg_gen_sub_i64(d, d, a);
6050 static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6052 tcg_gen_mul_vec(vece, a, a, b);
6053 tcg_gen_add_vec(vece, d, d, a);
6056 static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6058 tcg_gen_mul_vec(vece, a, a, b);
6059 tcg_gen_sub_vec(vece, d, d, a);
6062 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
6063 * these tables are shared with AArch64 which does support them.
6065 const GVecGen3 mla_op[4] = {
6066 { .fni4 = gen_mla8_i32,
6067 .fniv = gen_mla_vec,
6068 .opc = INDEX_op_mul_vec,
6069 .load_dest = true,
6070 .vece = MO_8 },
6071 { .fni4 = gen_mla16_i32,
6072 .fniv = gen_mla_vec,
6073 .opc = INDEX_op_mul_vec,
6074 .load_dest = true,
6075 .vece = MO_16 },
6076 { .fni4 = gen_mla32_i32,
6077 .fniv = gen_mla_vec,
6078 .opc = INDEX_op_mul_vec,
6079 .load_dest = true,
6080 .vece = MO_32 },
6081 { .fni8 = gen_mla64_i64,
6082 .fniv = gen_mla_vec,
6083 .opc = INDEX_op_mul_vec,
6084 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6085 .load_dest = true,
6086 .vece = MO_64 },
6089 const GVecGen3 mls_op[4] = {
6090 { .fni4 = gen_mls8_i32,
6091 .fniv = gen_mls_vec,
6092 .opc = INDEX_op_mul_vec,
6093 .load_dest = true,
6094 .vece = MO_8 },
6095 { .fni4 = gen_mls16_i32,
6096 .fniv = gen_mls_vec,
6097 .opc = INDEX_op_mul_vec,
6098 .load_dest = true,
6099 .vece = MO_16 },
6100 { .fni4 = gen_mls32_i32,
6101 .fniv = gen_mls_vec,
6102 .opc = INDEX_op_mul_vec,
6103 .load_dest = true,
6104 .vece = MO_32 },
6105 { .fni8 = gen_mls64_i64,
6106 .fniv = gen_mls_vec,
6107 .opc = INDEX_op_mul_vec,
6108 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6109 .load_dest = true,
6110 .vece = MO_64 },
6113 /* CMTST : test is "if (X & Y != 0)". */
6114 static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6116 tcg_gen_and_i32(d, a, b);
6117 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
6118 tcg_gen_neg_i32(d, d);
6121 void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6123 tcg_gen_and_i64(d, a, b);
6124 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
6125 tcg_gen_neg_i64(d, d);
6128 static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6130 tcg_gen_and_vec(vece, d, a, b);
6131 tcg_gen_dupi_vec(vece, a, 0);
6132 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
6135 const GVecGen3 cmtst_op[4] = {
6136 { .fni4 = gen_helper_neon_tst_u8,
6137 .fniv = gen_cmtst_vec,
6138 .vece = MO_8 },
6139 { .fni4 = gen_helper_neon_tst_u16,
6140 .fniv = gen_cmtst_vec,
6141 .vece = MO_16 },
6142 { .fni4 = gen_cmtst_i32,
6143 .fniv = gen_cmtst_vec,
6144 .vece = MO_32 },
6145 { .fni8 = gen_cmtst_i64,
6146 .fniv = gen_cmtst_vec,
6147 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6148 .vece = MO_64 },
6151 /* Translate a NEON data processing instruction. Return nonzero if the
6152 instruction is invalid.
6153 We process data in a mixture of 32-bit and 64-bit chunks.
6154 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
6156 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
6158 int op;
6159 int q;
6160 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
6161 int size;
6162 int shift;
6163 int pass;
6164 int count;
6165 int pairwise;
6166 int u;
6167 int vec_size;
6168 uint32_t imm;
6169 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
6170 TCGv_ptr ptr1, ptr2, ptr3;
6171 TCGv_i64 tmp64;
6173 /* FIXME: this access check should not take precedence over UNDEF
6174 * for invalid encodings; we will generate incorrect syndrome information
6175 * for attempts to execute invalid vfp/neon encodings with FP disabled.
6177 if (s->fp_excp_el) {
6178 gen_exception_insn(s, 4, EXCP_UDEF,
6179 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
6180 return 0;
6183 if (!s->vfp_enabled)
6184 return 1;
6185 q = (insn & (1 << 6)) != 0;
6186 u = (insn >> 24) & 1;
6187 VFP_DREG_D(rd, insn);
6188 VFP_DREG_N(rn, insn);
6189 VFP_DREG_M(rm, insn);
6190 size = (insn >> 20) & 3;
6191 vec_size = q ? 16 : 8;
6192 rd_ofs = neon_reg_offset(rd, 0);
6193 rn_ofs = neon_reg_offset(rn, 0);
6194 rm_ofs = neon_reg_offset(rm, 0);
6196 if ((insn & (1 << 23)) == 0) {
6197 /* Three register same length. */
6198 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
6199 /* Catch invalid op and bad size combinations: UNDEF */
6200 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
6201 return 1;
6203 /* All insns of this form UNDEF for either this condition or the
6204 * superset of cases "Q==1"; we catch the latter later.
6206 if (q && ((rd | rn | rm) & 1)) {
6207 return 1;
6209 switch (op) {
6210 case NEON_3R_SHA:
6211 /* The SHA-1/SHA-256 3-register instructions require special
6212 * treatment here, as their size field is overloaded as an
6213 * op type selector, and they all consume their input in a
6214 * single pass.
6216 if (!q) {
6217 return 1;
6219 if (!u) { /* SHA-1 */
6220 if (!dc_isar_feature(aa32_sha1, s)) {
6221 return 1;
6223 ptr1 = vfp_reg_ptr(true, rd);
6224 ptr2 = vfp_reg_ptr(true, rn);
6225 ptr3 = vfp_reg_ptr(true, rm);
6226 tmp4 = tcg_const_i32(size);
6227 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
6228 tcg_temp_free_i32(tmp4);
6229 } else { /* SHA-256 */
6230 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
6231 return 1;
6233 ptr1 = vfp_reg_ptr(true, rd);
6234 ptr2 = vfp_reg_ptr(true, rn);
6235 ptr3 = vfp_reg_ptr(true, rm);
6236 switch (size) {
6237 case 0:
6238 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
6239 break;
6240 case 1:
6241 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
6242 break;
6243 case 2:
6244 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
6245 break;
6248 tcg_temp_free_ptr(ptr1);
6249 tcg_temp_free_ptr(ptr2);
6250 tcg_temp_free_ptr(ptr3);
6251 return 0;
6253 case NEON_3R_VPADD_VQRDMLAH:
6254 if (!u) {
6255 break; /* VPADD */
6257 /* VQRDMLAH */
6258 switch (size) {
6259 case 1:
6260 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
6261 q, rd, rn, rm);
6262 case 2:
6263 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
6264 q, rd, rn, rm);
6266 return 1;
6268 case NEON_3R_VFM_VQRDMLSH:
6269 if (!u) {
6270 /* VFM, VFMS */
6271 if (size == 1) {
6272 return 1;
6274 break;
6276 /* VQRDMLSH */
6277 switch (size) {
6278 case 1:
6279 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
6280 q, rd, rn, rm);
6281 case 2:
6282 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
6283 q, rd, rn, rm);
6285 return 1;
6287 case NEON_3R_LOGIC: /* Logic ops. */
6288 switch ((u << 2) | size) {
6289 case 0: /* VAND */
6290 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
6291 vec_size, vec_size);
6292 break;
6293 case 1: /* VBIC */
6294 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
6295 vec_size, vec_size);
6296 break;
6297 case 2: /* VORR */
6298 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
6299 vec_size, vec_size);
6300 break;
6301 case 3: /* VORN */
6302 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
6303 vec_size, vec_size);
6304 break;
6305 case 4: /* VEOR */
6306 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
6307 vec_size, vec_size);
6308 break;
6309 case 5: /* VBSL */
6310 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6311 vec_size, vec_size, &bsl_op);
6312 break;
6313 case 6: /* VBIT */
6314 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6315 vec_size, vec_size, &bit_op);
6316 break;
6317 case 7: /* VBIF */
6318 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6319 vec_size, vec_size, &bif_op);
6320 break;
6322 return 0;
6324 case NEON_3R_VADD_VSUB:
6325 if (u) {
6326 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
6327 vec_size, vec_size);
6328 } else {
6329 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
6330 vec_size, vec_size);
6332 return 0;
6334 case NEON_3R_VMUL: /* VMUL */
6335 if (u) {
6336 /* Polynomial case allows only P8 and is handled below. */
6337 if (size != 0) {
6338 return 1;
6340 } else {
6341 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
6342 vec_size, vec_size);
6343 return 0;
6345 break;
6347 case NEON_3R_VML: /* VMLA, VMLS */
6348 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
6349 u ? &mls_op[size] : &mla_op[size]);
6350 return 0;
6352 case NEON_3R_VTST_VCEQ:
6353 if (u) { /* VCEQ */
6354 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
6355 vec_size, vec_size);
6356 } else { /* VTST */
6357 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6358 vec_size, vec_size, &cmtst_op[size]);
6360 return 0;
6362 case NEON_3R_VCGT:
6363 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
6364 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6365 return 0;
6367 case NEON_3R_VCGE:
6368 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
6369 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6370 return 0;
6372 case NEON_3R_VMAX:
6373 if (u) {
6374 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
6375 vec_size, vec_size);
6376 } else {
6377 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
6378 vec_size, vec_size);
6380 return 0;
6381 case NEON_3R_VMIN:
6382 if (u) {
6383 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
6384 vec_size, vec_size);
6385 } else {
6386 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
6387 vec_size, vec_size);
6389 return 0;
6392 if (size == 3) {
6393 /* 64-bit element instructions. */
6394 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6395 neon_load_reg64(cpu_V0, rn + pass);
6396 neon_load_reg64(cpu_V1, rm + pass);
6397 switch (op) {
6398 case NEON_3R_VQADD:
6399 if (u) {
6400 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
6401 cpu_V0, cpu_V1);
6402 } else {
6403 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
6404 cpu_V0, cpu_V1);
6406 break;
6407 case NEON_3R_VQSUB:
6408 if (u) {
6409 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
6410 cpu_V0, cpu_V1);
6411 } else {
6412 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
6413 cpu_V0, cpu_V1);
6415 break;
6416 case NEON_3R_VSHL:
6417 if (u) {
6418 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
6419 } else {
6420 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
6422 break;
6423 case NEON_3R_VQSHL:
6424 if (u) {
6425 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6426 cpu_V1, cpu_V0);
6427 } else {
6428 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6429 cpu_V1, cpu_V0);
6431 break;
6432 case NEON_3R_VRSHL:
6433 if (u) {
6434 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
6435 } else {
6436 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
6438 break;
6439 case NEON_3R_VQRSHL:
6440 if (u) {
6441 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
6442 cpu_V1, cpu_V0);
6443 } else {
6444 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
6445 cpu_V1, cpu_V0);
6447 break;
6448 default:
6449 abort();
6451 neon_store_reg64(cpu_V0, rd + pass);
6453 return 0;
6455 pairwise = 0;
6456 switch (op) {
6457 case NEON_3R_VSHL:
6458 case NEON_3R_VQSHL:
6459 case NEON_3R_VRSHL:
6460 case NEON_3R_VQRSHL:
6462 int rtmp;
6463 /* Shift instruction operands are reversed. */
6464 rtmp = rn;
6465 rn = rm;
6466 rm = rtmp;
6468 break;
6469 case NEON_3R_VPADD_VQRDMLAH:
6470 case NEON_3R_VPMAX:
6471 case NEON_3R_VPMIN:
6472 pairwise = 1;
6473 break;
6474 case NEON_3R_FLOAT_ARITH:
6475 pairwise = (u && size < 2); /* if VPADD (float) */
6476 break;
6477 case NEON_3R_FLOAT_MINMAX:
6478 pairwise = u; /* if VPMIN/VPMAX (float) */
6479 break;
6480 case NEON_3R_FLOAT_CMP:
6481 if (!u && size) {
6482 /* no encoding for U=0 C=1x */
6483 return 1;
6485 break;
6486 case NEON_3R_FLOAT_ACMP:
6487 if (!u) {
6488 return 1;
6490 break;
6491 case NEON_3R_FLOAT_MISC:
6492 /* VMAXNM/VMINNM in ARMv8 */
6493 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
6494 return 1;
6496 break;
6497 case NEON_3R_VFM_VQRDMLSH:
6498 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
6499 return 1;
6501 break;
6502 default:
6503 break;
6506 if (pairwise && q) {
6507 /* All the pairwise insns UNDEF if Q is set */
6508 return 1;
6511 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6513 if (pairwise) {
6514 /* Pairwise. */
6515 if (pass < 1) {
6516 tmp = neon_load_reg(rn, 0);
6517 tmp2 = neon_load_reg(rn, 1);
6518 } else {
6519 tmp = neon_load_reg(rm, 0);
6520 tmp2 = neon_load_reg(rm, 1);
6522 } else {
6523 /* Elementwise. */
6524 tmp = neon_load_reg(rn, pass);
6525 tmp2 = neon_load_reg(rm, pass);
6527 switch (op) {
6528 case NEON_3R_VHADD:
6529 GEN_NEON_INTEGER_OP(hadd);
6530 break;
6531 case NEON_3R_VQADD:
6532 GEN_NEON_INTEGER_OP_ENV(qadd);
6533 break;
6534 case NEON_3R_VRHADD:
6535 GEN_NEON_INTEGER_OP(rhadd);
6536 break;
6537 case NEON_3R_VHSUB:
6538 GEN_NEON_INTEGER_OP(hsub);
6539 break;
6540 case NEON_3R_VQSUB:
6541 GEN_NEON_INTEGER_OP_ENV(qsub);
6542 break;
6543 case NEON_3R_VSHL:
6544 GEN_NEON_INTEGER_OP(shl);
6545 break;
6546 case NEON_3R_VQSHL:
6547 GEN_NEON_INTEGER_OP_ENV(qshl);
6548 break;
6549 case NEON_3R_VRSHL:
6550 GEN_NEON_INTEGER_OP(rshl);
6551 break;
6552 case NEON_3R_VQRSHL:
6553 GEN_NEON_INTEGER_OP_ENV(qrshl);
6554 break;
6555 case NEON_3R_VABD:
6556 GEN_NEON_INTEGER_OP(abd);
6557 break;
6558 case NEON_3R_VABA:
6559 GEN_NEON_INTEGER_OP(abd);
6560 tcg_temp_free_i32(tmp2);
6561 tmp2 = neon_load_reg(rd, pass);
6562 gen_neon_add(size, tmp, tmp2);
6563 break;
6564 case NEON_3R_VMUL:
6565 /* VMUL.P8; other cases already eliminated. */
6566 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
6567 break;
6568 case NEON_3R_VPMAX:
6569 GEN_NEON_INTEGER_OP(pmax);
6570 break;
6571 case NEON_3R_VPMIN:
6572 GEN_NEON_INTEGER_OP(pmin);
6573 break;
6574 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
6575 if (!u) { /* VQDMULH */
6576 switch (size) {
6577 case 1:
6578 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6579 break;
6580 case 2:
6581 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6582 break;
6583 default: abort();
6585 } else { /* VQRDMULH */
6586 switch (size) {
6587 case 1:
6588 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6589 break;
6590 case 2:
6591 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6592 break;
6593 default: abort();
6596 break;
6597 case NEON_3R_VPADD_VQRDMLAH:
6598 switch (size) {
6599 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6600 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6601 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
6602 default: abort();
6604 break;
6605 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
6607 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6608 switch ((u << 2) | size) {
6609 case 0: /* VADD */
6610 case 4: /* VPADD */
6611 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6612 break;
6613 case 2: /* VSUB */
6614 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
6615 break;
6616 case 6: /* VABD */
6617 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
6618 break;
6619 default:
6620 abort();
6622 tcg_temp_free_ptr(fpstatus);
6623 break;
6625 case NEON_3R_FLOAT_MULTIPLY:
6627 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6628 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6629 if (!u) {
6630 tcg_temp_free_i32(tmp2);
6631 tmp2 = neon_load_reg(rd, pass);
6632 if (size == 0) {
6633 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6634 } else {
6635 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6638 tcg_temp_free_ptr(fpstatus);
6639 break;
6641 case NEON_3R_FLOAT_CMP:
6643 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6644 if (!u) {
6645 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6646 } else {
6647 if (size == 0) {
6648 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6649 } else {
6650 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6653 tcg_temp_free_ptr(fpstatus);
6654 break;
6656 case NEON_3R_FLOAT_ACMP:
6658 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6659 if (size == 0) {
6660 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6661 } else {
6662 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6664 tcg_temp_free_ptr(fpstatus);
6665 break;
6667 case NEON_3R_FLOAT_MINMAX:
6669 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6670 if (size == 0) {
6671 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
6672 } else {
6673 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
6675 tcg_temp_free_ptr(fpstatus);
6676 break;
6678 case NEON_3R_FLOAT_MISC:
6679 if (u) {
6680 /* VMAXNM/VMINNM */
6681 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6682 if (size == 0) {
6683 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
6684 } else {
6685 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
6687 tcg_temp_free_ptr(fpstatus);
6688 } else {
6689 if (size == 0) {
6690 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6691 } else {
6692 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6695 break;
6696 case NEON_3R_VFM_VQRDMLSH:
6698 /* VFMA, VFMS: fused multiply-add */
6699 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6700 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6701 if (size) {
6702 /* VFMS */
6703 gen_helper_vfp_negs(tmp, tmp);
6705 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6706 tcg_temp_free_i32(tmp3);
6707 tcg_temp_free_ptr(fpstatus);
6708 break;
6710 default:
6711 abort();
6713 tcg_temp_free_i32(tmp2);
6715 /* Save the result. For elementwise operations we can put it
6716 straight into the destination register. For pairwise operations
6717 we have to be careful to avoid clobbering the source operands. */
6718 if (pairwise && rd == rm) {
6719 neon_store_scratch(pass, tmp);
6720 } else {
6721 neon_store_reg(rd, pass, tmp);
6724 } /* for pass */
6725 if (pairwise && rd == rm) {
6726 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6727 tmp = neon_load_scratch(pass);
6728 neon_store_reg(rd, pass, tmp);
6731 /* End of 3 register same size operations. */
6732 } else if (insn & (1 << 4)) {
6733 if ((insn & 0x00380080) != 0) {
6734 /* Two registers and shift. */
6735 op = (insn >> 8) & 0xf;
6736 if (insn & (1 << 7)) {
6737 /* 64-bit shift. */
6738 if (op > 7) {
6739 return 1;
6741 size = 3;
6742 } else {
6743 size = 2;
6744 while ((insn & (1 << (size + 19))) == 0)
6745 size--;
6747 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
6748 if (op < 8) {
6749 /* Shift by immediate:
6750 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
6751 if (q && ((rd | rm) & 1)) {
6752 return 1;
6754 if (!u && (op == 4 || op == 6)) {
6755 return 1;
6757 /* Right shifts are encoded as N - shift, where N is the
6758 element size in bits. */
6759 if (op <= 4) {
6760 shift = shift - (1 << (size + 3));
6763 switch (op) {
6764 case 0: /* VSHR */
6765 /* Right shift comes here negative. */
6766 shift = -shift;
6767 /* Shifts larger than the element size are architecturally
6768 * valid. Unsigned results in all zeros; signed results
6769 * in all sign bits.
6771 if (!u) {
6772 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
6773 MIN(shift, (8 << size) - 1),
6774 vec_size, vec_size);
6775 } else if (shift >= 8 << size) {
6776 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
6777 } else {
6778 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
6779 vec_size, vec_size);
6781 return 0;
6783 case 1: /* VSRA */
6784 /* Right shift comes here negative. */
6785 shift = -shift;
6786 /* Shifts larger than the element size are architecturally
6787 * valid. Unsigned results in all zeros; signed results
6788 * in all sign bits.
6790 if (!u) {
6791 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6792 MIN(shift, (8 << size) - 1),
6793 &ssra_op[size]);
6794 } else if (shift >= 8 << size) {
6795 /* rd += 0 */
6796 } else {
6797 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6798 shift, &usra_op[size]);
6800 return 0;
6802 case 4: /* VSRI */
6803 if (!u) {
6804 return 1;
6806 /* Right shift comes here negative. */
6807 shift = -shift;
6808 /* Shift out of range leaves destination unchanged. */
6809 if (shift < 8 << size) {
6810 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6811 shift, &sri_op[size]);
6813 return 0;
6815 case 5: /* VSHL, VSLI */
6816 if (u) { /* VSLI */
6817 /* Shift out of range leaves destination unchanged. */
6818 if (shift < 8 << size) {
6819 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
6820 vec_size, shift, &sli_op[size]);
6822 } else { /* VSHL */
6823 /* Shifts larger than the element size are
6824 * architecturally valid and results in zero.
6826 if (shift >= 8 << size) {
6827 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
6828 } else {
6829 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
6830 vec_size, vec_size);
6833 return 0;
6836 if (size == 3) {
6837 count = q + 1;
6838 } else {
6839 count = q ? 4: 2;
6842 /* To avoid excessive duplication of ops we implement shift
6843 * by immediate using the variable shift operations.
6845 imm = dup_const(size, shift);
6847 for (pass = 0; pass < count; pass++) {
6848 if (size == 3) {
6849 neon_load_reg64(cpu_V0, rm + pass);
6850 tcg_gen_movi_i64(cpu_V1, imm);
6851 switch (op) {
6852 case 2: /* VRSHR */
6853 case 3: /* VRSRA */
6854 if (u)
6855 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
6856 else
6857 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
6858 break;
6859 case 6: /* VQSHLU */
6860 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6861 cpu_V0, cpu_V1);
6862 break;
6863 case 7: /* VQSHL */
6864 if (u) {
6865 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6866 cpu_V0, cpu_V1);
6867 } else {
6868 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6869 cpu_V0, cpu_V1);
6871 break;
6872 default:
6873 g_assert_not_reached();
6875 if (op == 3) {
6876 /* Accumulate. */
6877 neon_load_reg64(cpu_V1, rd + pass);
6878 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6880 neon_store_reg64(cpu_V0, rd + pass);
6881 } else { /* size < 3 */
6882 /* Operands in T0 and T1. */
6883 tmp = neon_load_reg(rm, pass);
6884 tmp2 = tcg_temp_new_i32();
6885 tcg_gen_movi_i32(tmp2, imm);
6886 switch (op) {
6887 case 2: /* VRSHR */
6888 case 3: /* VRSRA */
6889 GEN_NEON_INTEGER_OP(rshl);
6890 break;
6891 case 6: /* VQSHLU */
6892 switch (size) {
6893 case 0:
6894 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6895 tmp, tmp2);
6896 break;
6897 case 1:
6898 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6899 tmp, tmp2);
6900 break;
6901 case 2:
6902 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6903 tmp, tmp2);
6904 break;
6905 default:
6906 abort();
6908 break;
6909 case 7: /* VQSHL */
6910 GEN_NEON_INTEGER_OP_ENV(qshl);
6911 break;
6912 default:
6913 g_assert_not_reached();
6915 tcg_temp_free_i32(tmp2);
6917 if (op == 3) {
6918 /* Accumulate. */
6919 tmp2 = neon_load_reg(rd, pass);
6920 gen_neon_add(size, tmp, tmp2);
6921 tcg_temp_free_i32(tmp2);
6923 neon_store_reg(rd, pass, tmp);
6925 } /* for pass */
6926 } else if (op < 10) {
6927 /* Shift by immediate and narrow:
6928 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
6929 int input_unsigned = (op == 8) ? !u : u;
6930 if (rm & 1) {
6931 return 1;
6933 shift = shift - (1 << (size + 3));
6934 size++;
6935 if (size == 3) {
6936 tmp64 = tcg_const_i64(shift);
6937 neon_load_reg64(cpu_V0, rm);
6938 neon_load_reg64(cpu_V1, rm + 1);
6939 for (pass = 0; pass < 2; pass++) {
6940 TCGv_i64 in;
6941 if (pass == 0) {
6942 in = cpu_V0;
6943 } else {
6944 in = cpu_V1;
6946 if (q) {
6947 if (input_unsigned) {
6948 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
6949 } else {
6950 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
6952 } else {
6953 if (input_unsigned) {
6954 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
6955 } else {
6956 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
6959 tmp = tcg_temp_new_i32();
6960 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6961 neon_store_reg(rd, pass, tmp);
6962 } /* for pass */
6963 tcg_temp_free_i64(tmp64);
6964 } else {
6965 if (size == 1) {
6966 imm = (uint16_t)shift;
6967 imm |= imm << 16;
6968 } else {
6969 /* size == 2 */
6970 imm = (uint32_t)shift;
6972 tmp2 = tcg_const_i32(imm);
6973 tmp4 = neon_load_reg(rm + 1, 0);
6974 tmp5 = neon_load_reg(rm + 1, 1);
6975 for (pass = 0; pass < 2; pass++) {
6976 if (pass == 0) {
6977 tmp = neon_load_reg(rm, 0);
6978 } else {
6979 tmp = tmp4;
6981 gen_neon_shift_narrow(size, tmp, tmp2, q,
6982 input_unsigned);
6983 if (pass == 0) {
6984 tmp3 = neon_load_reg(rm, 1);
6985 } else {
6986 tmp3 = tmp5;
6988 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6989 input_unsigned);
6990 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
6991 tcg_temp_free_i32(tmp);
6992 tcg_temp_free_i32(tmp3);
6993 tmp = tcg_temp_new_i32();
6994 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6995 neon_store_reg(rd, pass, tmp);
6996 } /* for pass */
6997 tcg_temp_free_i32(tmp2);
6999 } else if (op == 10) {
7000 /* VSHLL, VMOVL */
7001 if (q || (rd & 1)) {
7002 return 1;
7004 tmp = neon_load_reg(rm, 0);
7005 tmp2 = neon_load_reg(rm, 1);
7006 for (pass = 0; pass < 2; pass++) {
7007 if (pass == 1)
7008 tmp = tmp2;
7010 gen_neon_widen(cpu_V0, tmp, size, u);
7012 if (shift != 0) {
7013 /* The shift is less than the width of the source
7014 type, so we can just shift the whole register. */
7015 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
7016 /* Widen the result of shift: we need to clear
7017 * the potential overflow bits resulting from
7018 * left bits of the narrow input appearing as
7019 * right bits of left the neighbour narrow
7020 * input. */
7021 if (size < 2 || !u) {
7022 uint64_t imm64;
7023 if (size == 0) {
7024 imm = (0xffu >> (8 - shift));
7025 imm |= imm << 16;
7026 } else if (size == 1) {
7027 imm = 0xffff >> (16 - shift);
7028 } else {
7029 /* size == 2 */
7030 imm = 0xffffffff >> (32 - shift);
7032 if (size < 2) {
7033 imm64 = imm | (((uint64_t)imm) << 32);
7034 } else {
7035 imm64 = imm;
7037 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
7040 neon_store_reg64(cpu_V0, rd + pass);
7042 } else if (op >= 14) {
7043 /* VCVT fixed-point. */
7044 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
7045 return 1;
7047 /* We have already masked out the must-be-1 top bit of imm6,
7048 * hence this 32-shift where the ARM ARM has 64-imm6.
7050 shift = 32 - shift;
7051 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7052 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
7053 if (!(op & 1)) {
7054 if (u)
7055 gen_vfp_ulto(0, shift, 1);
7056 else
7057 gen_vfp_slto(0, shift, 1);
7058 } else {
7059 if (u)
7060 gen_vfp_toul(0, shift, 1);
7061 else
7062 gen_vfp_tosl(0, shift, 1);
7064 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
7066 } else {
7067 return 1;
7069 } else { /* (insn & 0x00380080) == 0 */
7070 int invert, reg_ofs, vec_size;
7072 if (q && (rd & 1)) {
7073 return 1;
7076 op = (insn >> 8) & 0xf;
7077 /* One register and immediate. */
7078 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
7079 invert = (insn & (1 << 5)) != 0;
7080 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
7081 * We choose to not special-case this and will behave as if a
7082 * valid constant encoding of 0 had been given.
7084 switch (op) {
7085 case 0: case 1:
7086 /* no-op */
7087 break;
7088 case 2: case 3:
7089 imm <<= 8;
7090 break;
7091 case 4: case 5:
7092 imm <<= 16;
7093 break;
7094 case 6: case 7:
7095 imm <<= 24;
7096 break;
7097 case 8: case 9:
7098 imm |= imm << 16;
7099 break;
7100 case 10: case 11:
7101 imm = (imm << 8) | (imm << 24);
7102 break;
7103 case 12:
7104 imm = (imm << 8) | 0xff;
7105 break;
7106 case 13:
7107 imm = (imm << 16) | 0xffff;
7108 break;
7109 case 14:
7110 imm |= (imm << 8) | (imm << 16) | (imm << 24);
7111 if (invert) {
7112 imm = ~imm;
7114 break;
7115 case 15:
7116 if (invert) {
7117 return 1;
7119 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
7120 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
7121 break;
7123 if (invert) {
7124 imm = ~imm;
7127 reg_ofs = neon_reg_offset(rd, 0);
7128 vec_size = q ? 16 : 8;
7130 if (op & 1 && op < 12) {
7131 if (invert) {
7132 /* The immediate value has already been inverted,
7133 * so BIC becomes AND.
7135 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
7136 vec_size, vec_size);
7137 } else {
7138 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
7139 vec_size, vec_size);
7141 } else {
7142 /* VMOV, VMVN. */
7143 if (op == 14 && invert) {
7144 TCGv_i64 t64 = tcg_temp_new_i64();
7146 for (pass = 0; pass <= q; ++pass) {
7147 uint64_t val = 0;
7148 int n;
7150 for (n = 0; n < 8; n++) {
7151 if (imm & (1 << (n + pass * 8))) {
7152 val |= 0xffull << (n * 8);
7155 tcg_gen_movi_i64(t64, val);
7156 neon_store_reg64(t64, rd + pass);
7158 tcg_temp_free_i64(t64);
7159 } else {
7160 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
7164 } else { /* (insn & 0x00800010 == 0x00800000) */
7165 if (size != 3) {
7166 op = (insn >> 8) & 0xf;
7167 if ((insn & (1 << 6)) == 0) {
7168 /* Three registers of different lengths. */
7169 int src1_wide;
7170 int src2_wide;
7171 int prewiden;
7172 /* undefreq: bit 0 : UNDEF if size == 0
7173 * bit 1 : UNDEF if size == 1
7174 * bit 2 : UNDEF if size == 2
7175 * bit 3 : UNDEF if U == 1
7176 * Note that [2:0] set implies 'always UNDEF'
7178 int undefreq;
7179 /* prewiden, src1_wide, src2_wide, undefreq */
7180 static const int neon_3reg_wide[16][4] = {
7181 {1, 0, 0, 0}, /* VADDL */
7182 {1, 1, 0, 0}, /* VADDW */
7183 {1, 0, 0, 0}, /* VSUBL */
7184 {1, 1, 0, 0}, /* VSUBW */
7185 {0, 1, 1, 0}, /* VADDHN */
7186 {0, 0, 0, 0}, /* VABAL */
7187 {0, 1, 1, 0}, /* VSUBHN */
7188 {0, 0, 0, 0}, /* VABDL */
7189 {0, 0, 0, 0}, /* VMLAL */
7190 {0, 0, 0, 9}, /* VQDMLAL */
7191 {0, 0, 0, 0}, /* VMLSL */
7192 {0, 0, 0, 9}, /* VQDMLSL */
7193 {0, 0, 0, 0}, /* Integer VMULL */
7194 {0, 0, 0, 1}, /* VQDMULL */
7195 {0, 0, 0, 0xa}, /* Polynomial VMULL */
7196 {0, 0, 0, 7}, /* Reserved: always UNDEF */
7199 prewiden = neon_3reg_wide[op][0];
7200 src1_wide = neon_3reg_wide[op][1];
7201 src2_wide = neon_3reg_wide[op][2];
7202 undefreq = neon_3reg_wide[op][3];
7204 if ((undefreq & (1 << size)) ||
7205 ((undefreq & 8) && u)) {
7206 return 1;
7208 if ((src1_wide && (rn & 1)) ||
7209 (src2_wide && (rm & 1)) ||
7210 (!src2_wide && (rd & 1))) {
7211 return 1;
7214 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
7215 * outside the loop below as it only performs a single pass.
7217 if (op == 14 && size == 2) {
7218 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
7220 if (!dc_isar_feature(aa32_pmull, s)) {
7221 return 1;
7223 tcg_rn = tcg_temp_new_i64();
7224 tcg_rm = tcg_temp_new_i64();
7225 tcg_rd = tcg_temp_new_i64();
7226 neon_load_reg64(tcg_rn, rn);
7227 neon_load_reg64(tcg_rm, rm);
7228 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
7229 neon_store_reg64(tcg_rd, rd);
7230 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
7231 neon_store_reg64(tcg_rd, rd + 1);
7232 tcg_temp_free_i64(tcg_rn);
7233 tcg_temp_free_i64(tcg_rm);
7234 tcg_temp_free_i64(tcg_rd);
7235 return 0;
7238 /* Avoid overlapping operands. Wide source operands are
7239 always aligned so will never overlap with wide
7240 destinations in problematic ways. */
7241 if (rd == rm && !src2_wide) {
7242 tmp = neon_load_reg(rm, 1);
7243 neon_store_scratch(2, tmp);
7244 } else if (rd == rn && !src1_wide) {
7245 tmp = neon_load_reg(rn, 1);
7246 neon_store_scratch(2, tmp);
7248 tmp3 = NULL;
7249 for (pass = 0; pass < 2; pass++) {
7250 if (src1_wide) {
7251 neon_load_reg64(cpu_V0, rn + pass);
7252 tmp = NULL;
7253 } else {
7254 if (pass == 1 && rd == rn) {
7255 tmp = neon_load_scratch(2);
7256 } else {
7257 tmp = neon_load_reg(rn, pass);
7259 if (prewiden) {
7260 gen_neon_widen(cpu_V0, tmp, size, u);
7263 if (src2_wide) {
7264 neon_load_reg64(cpu_V1, rm + pass);
7265 tmp2 = NULL;
7266 } else {
7267 if (pass == 1 && rd == rm) {
7268 tmp2 = neon_load_scratch(2);
7269 } else {
7270 tmp2 = neon_load_reg(rm, pass);
7272 if (prewiden) {
7273 gen_neon_widen(cpu_V1, tmp2, size, u);
7276 switch (op) {
7277 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
7278 gen_neon_addl(size);
7279 break;
7280 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
7281 gen_neon_subl(size);
7282 break;
7283 case 5: case 7: /* VABAL, VABDL */
7284 switch ((size << 1) | u) {
7285 case 0:
7286 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
7287 break;
7288 case 1:
7289 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
7290 break;
7291 case 2:
7292 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
7293 break;
7294 case 3:
7295 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
7296 break;
7297 case 4:
7298 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
7299 break;
7300 case 5:
7301 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
7302 break;
7303 default: abort();
7305 tcg_temp_free_i32(tmp2);
7306 tcg_temp_free_i32(tmp);
7307 break;
7308 case 8: case 9: case 10: case 11: case 12: case 13:
7309 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
7310 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
7311 break;
7312 case 14: /* Polynomial VMULL */
7313 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7314 tcg_temp_free_i32(tmp2);
7315 tcg_temp_free_i32(tmp);
7316 break;
7317 default: /* 15 is RESERVED: caught earlier */
7318 abort();
7320 if (op == 13) {
7321 /* VQDMULL */
7322 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7323 neon_store_reg64(cpu_V0, rd + pass);
7324 } else if (op == 5 || (op >= 8 && op <= 11)) {
7325 /* Accumulate. */
7326 neon_load_reg64(cpu_V1, rd + pass);
7327 switch (op) {
7328 case 10: /* VMLSL */
7329 gen_neon_negl(cpu_V0, size);
7330 /* Fall through */
7331 case 5: case 8: /* VABAL, VMLAL */
7332 gen_neon_addl(size);
7333 break;
7334 case 9: case 11: /* VQDMLAL, VQDMLSL */
7335 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7336 if (op == 11) {
7337 gen_neon_negl(cpu_V0, size);
7339 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
7340 break;
7341 default:
7342 abort();
7344 neon_store_reg64(cpu_V0, rd + pass);
7345 } else if (op == 4 || op == 6) {
7346 /* Narrowing operation. */
7347 tmp = tcg_temp_new_i32();
7348 if (!u) {
7349 switch (size) {
7350 case 0:
7351 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
7352 break;
7353 case 1:
7354 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
7355 break;
7356 case 2:
7357 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
7358 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
7359 break;
7360 default: abort();
7362 } else {
7363 switch (size) {
7364 case 0:
7365 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
7366 break;
7367 case 1:
7368 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
7369 break;
7370 case 2:
7371 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
7372 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
7373 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
7374 break;
7375 default: abort();
7378 if (pass == 0) {
7379 tmp3 = tmp;
7380 } else {
7381 neon_store_reg(rd, 0, tmp3);
7382 neon_store_reg(rd, 1, tmp);
7384 } else {
7385 /* Write back the result. */
7386 neon_store_reg64(cpu_V0, rd + pass);
7389 } else {
7390 /* Two registers and a scalar. NB that for ops of this form
7391 * the ARM ARM labels bit 24 as Q, but it is in our variable
7392 * 'u', not 'q'.
7394 if (size == 0) {
7395 return 1;
7397 switch (op) {
7398 case 1: /* Float VMLA scalar */
7399 case 5: /* Floating point VMLS scalar */
7400 case 9: /* Floating point VMUL scalar */
7401 if (size == 1) {
7402 return 1;
7404 /* fall through */
7405 case 0: /* Integer VMLA scalar */
7406 case 4: /* Integer VMLS scalar */
7407 case 8: /* Integer VMUL scalar */
7408 case 12: /* VQDMULH scalar */
7409 case 13: /* VQRDMULH scalar */
7410 if (u && ((rd | rn) & 1)) {
7411 return 1;
7413 tmp = neon_get_scalar(size, rm);
7414 neon_store_scratch(0, tmp);
7415 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7416 tmp = neon_load_scratch(0);
7417 tmp2 = neon_load_reg(rn, pass);
7418 if (op == 12) {
7419 if (size == 1) {
7420 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
7421 } else {
7422 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
7424 } else if (op == 13) {
7425 if (size == 1) {
7426 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
7427 } else {
7428 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
7430 } else if (op & 1) {
7431 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7432 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
7433 tcg_temp_free_ptr(fpstatus);
7434 } else {
7435 switch (size) {
7436 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
7437 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
7438 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
7439 default: abort();
7442 tcg_temp_free_i32(tmp2);
7443 if (op < 8) {
7444 /* Accumulate. */
7445 tmp2 = neon_load_reg(rd, pass);
7446 switch (op) {
7447 case 0:
7448 gen_neon_add(size, tmp, tmp2);
7449 break;
7450 case 1:
7452 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7453 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
7454 tcg_temp_free_ptr(fpstatus);
7455 break;
7457 case 4:
7458 gen_neon_rsb(size, tmp, tmp2);
7459 break;
7460 case 5:
7462 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7463 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
7464 tcg_temp_free_ptr(fpstatus);
7465 break;
7467 default:
7468 abort();
7470 tcg_temp_free_i32(tmp2);
7472 neon_store_reg(rd, pass, tmp);
7474 break;
7475 case 3: /* VQDMLAL scalar */
7476 case 7: /* VQDMLSL scalar */
7477 case 11: /* VQDMULL scalar */
7478 if (u == 1) {
7479 return 1;
7481 /* fall through */
7482 case 2: /* VMLAL sclar */
7483 case 6: /* VMLSL scalar */
7484 case 10: /* VMULL scalar */
7485 if (rd & 1) {
7486 return 1;
7488 tmp2 = neon_get_scalar(size, rm);
7489 /* We need a copy of tmp2 because gen_neon_mull
7490 * deletes it during pass 0. */
7491 tmp4 = tcg_temp_new_i32();
7492 tcg_gen_mov_i32(tmp4, tmp2);
7493 tmp3 = neon_load_reg(rn, 1);
7495 for (pass = 0; pass < 2; pass++) {
7496 if (pass == 0) {
7497 tmp = neon_load_reg(rn, 0);
7498 } else {
7499 tmp = tmp3;
7500 tmp2 = tmp4;
7502 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
7503 if (op != 11) {
7504 neon_load_reg64(cpu_V1, rd + pass);
7506 switch (op) {
7507 case 6:
7508 gen_neon_negl(cpu_V0, size);
7509 /* Fall through */
7510 case 2:
7511 gen_neon_addl(size);
7512 break;
7513 case 3: case 7:
7514 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7515 if (op == 7) {
7516 gen_neon_negl(cpu_V0, size);
7518 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
7519 break;
7520 case 10:
7521 /* no-op */
7522 break;
7523 case 11:
7524 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7525 break;
7526 default:
7527 abort();
7529 neon_store_reg64(cpu_V0, rd + pass);
7531 break;
7532 case 14: /* VQRDMLAH scalar */
7533 case 15: /* VQRDMLSH scalar */
7535 NeonGenThreeOpEnvFn *fn;
7537 if (!dc_isar_feature(aa32_rdm, s)) {
7538 return 1;
7540 if (u && ((rd | rn) & 1)) {
7541 return 1;
7543 if (op == 14) {
7544 if (size == 1) {
7545 fn = gen_helper_neon_qrdmlah_s16;
7546 } else {
7547 fn = gen_helper_neon_qrdmlah_s32;
7549 } else {
7550 if (size == 1) {
7551 fn = gen_helper_neon_qrdmlsh_s16;
7552 } else {
7553 fn = gen_helper_neon_qrdmlsh_s32;
7557 tmp2 = neon_get_scalar(size, rm);
7558 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7559 tmp = neon_load_reg(rn, pass);
7560 tmp3 = neon_load_reg(rd, pass);
7561 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7562 tcg_temp_free_i32(tmp3);
7563 neon_store_reg(rd, pass, tmp);
7565 tcg_temp_free_i32(tmp2);
7567 break;
7568 default:
7569 g_assert_not_reached();
7572 } else { /* size == 3 */
7573 if (!u) {
7574 /* Extract. */
7575 imm = (insn >> 8) & 0xf;
7577 if (imm > 7 && !q)
7578 return 1;
7580 if (q && ((rd | rn | rm) & 1)) {
7581 return 1;
7584 if (imm == 0) {
7585 neon_load_reg64(cpu_V0, rn);
7586 if (q) {
7587 neon_load_reg64(cpu_V1, rn + 1);
7589 } else if (imm == 8) {
7590 neon_load_reg64(cpu_V0, rn + 1);
7591 if (q) {
7592 neon_load_reg64(cpu_V1, rm);
7594 } else if (q) {
7595 tmp64 = tcg_temp_new_i64();
7596 if (imm < 8) {
7597 neon_load_reg64(cpu_V0, rn);
7598 neon_load_reg64(tmp64, rn + 1);
7599 } else {
7600 neon_load_reg64(cpu_V0, rn + 1);
7601 neon_load_reg64(tmp64, rm);
7603 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
7604 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
7605 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7606 if (imm < 8) {
7607 neon_load_reg64(cpu_V1, rm);
7608 } else {
7609 neon_load_reg64(cpu_V1, rm + 1);
7610 imm -= 8;
7612 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
7613 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7614 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
7615 tcg_temp_free_i64(tmp64);
7616 } else {
7617 /* BUGFIX */
7618 neon_load_reg64(cpu_V0, rn);
7619 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
7620 neon_load_reg64(cpu_V1, rm);
7621 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
7622 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7624 neon_store_reg64(cpu_V0, rd);
7625 if (q) {
7626 neon_store_reg64(cpu_V1, rd + 1);
7628 } else if ((insn & (1 << 11)) == 0) {
7629 /* Two register misc. */
7630 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7631 size = (insn >> 18) & 3;
7632 /* UNDEF for unknown op values and bad op-size combinations */
7633 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7634 return 1;
7636 if (neon_2rm_is_v8_op(op) &&
7637 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7638 return 1;
7640 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7641 q && ((rm | rd) & 1)) {
7642 return 1;
7644 switch (op) {
7645 case NEON_2RM_VREV64:
7646 for (pass = 0; pass < (q ? 2 : 1); pass++) {
7647 tmp = neon_load_reg(rm, pass * 2);
7648 tmp2 = neon_load_reg(rm, pass * 2 + 1);
7649 switch (size) {
7650 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7651 case 1: gen_swap_half(tmp); break;
7652 case 2: /* no-op */ break;
7653 default: abort();
7655 neon_store_reg(rd, pass * 2 + 1, tmp);
7656 if (size == 2) {
7657 neon_store_reg(rd, pass * 2, tmp2);
7658 } else {
7659 switch (size) {
7660 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7661 case 1: gen_swap_half(tmp2); break;
7662 default: abort();
7664 neon_store_reg(rd, pass * 2, tmp2);
7667 break;
7668 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7669 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
7670 for (pass = 0; pass < q + 1; pass++) {
7671 tmp = neon_load_reg(rm, pass * 2);
7672 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7673 tmp = neon_load_reg(rm, pass * 2 + 1);
7674 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7675 switch (size) {
7676 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7677 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7678 case 2: tcg_gen_add_i64(CPU_V001); break;
7679 default: abort();
7681 if (op >= NEON_2RM_VPADAL) {
7682 /* Accumulate. */
7683 neon_load_reg64(cpu_V1, rd + pass);
7684 gen_neon_addl(size);
7686 neon_store_reg64(cpu_V0, rd + pass);
7688 break;
7689 case NEON_2RM_VTRN:
7690 if (size == 2) {
7691 int n;
7692 for (n = 0; n < (q ? 4 : 2); n += 2) {
7693 tmp = neon_load_reg(rm, n);
7694 tmp2 = neon_load_reg(rd, n + 1);
7695 neon_store_reg(rm, n, tmp2);
7696 neon_store_reg(rd, n + 1, tmp);
7698 } else {
7699 goto elementwise;
7701 break;
7702 case NEON_2RM_VUZP:
7703 if (gen_neon_unzip(rd, rm, size, q)) {
7704 return 1;
7706 break;
7707 case NEON_2RM_VZIP:
7708 if (gen_neon_zip(rd, rm, size, q)) {
7709 return 1;
7711 break;
7712 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7713 /* also VQMOVUN; op field and mnemonics don't line up */
7714 if (rm & 1) {
7715 return 1;
7717 tmp2 = NULL;
7718 for (pass = 0; pass < 2; pass++) {
7719 neon_load_reg64(cpu_V0, rm + pass);
7720 tmp = tcg_temp_new_i32();
7721 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7722 tmp, cpu_V0);
7723 if (pass == 0) {
7724 tmp2 = tmp;
7725 } else {
7726 neon_store_reg(rd, 0, tmp2);
7727 neon_store_reg(rd, 1, tmp);
7730 break;
7731 case NEON_2RM_VSHLL:
7732 if (q || (rd & 1)) {
7733 return 1;
7735 tmp = neon_load_reg(rm, 0);
7736 tmp2 = neon_load_reg(rm, 1);
7737 for (pass = 0; pass < 2; pass++) {
7738 if (pass == 1)
7739 tmp = tmp2;
7740 gen_neon_widen(cpu_V0, tmp, size, 1);
7741 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
7742 neon_store_reg64(cpu_V0, rd + pass);
7744 break;
7745 case NEON_2RM_VCVT_F16_F32:
7747 TCGv_ptr fpst;
7748 TCGv_i32 ahp;
7750 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
7751 q || (rm & 1)) {
7752 return 1;
7754 tmp = tcg_temp_new_i32();
7755 tmp2 = tcg_temp_new_i32();
7756 fpst = get_fpstatus_ptr(true);
7757 ahp = get_ahp_flag();
7758 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
7759 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
7760 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
7761 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
7762 tcg_gen_shli_i32(tmp2, tmp2, 16);
7763 tcg_gen_or_i32(tmp2, tmp2, tmp);
7764 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
7765 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
7766 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7767 neon_store_reg(rd, 0, tmp2);
7768 tmp2 = tcg_temp_new_i32();
7769 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
7770 tcg_gen_shli_i32(tmp2, tmp2, 16);
7771 tcg_gen_or_i32(tmp2, tmp2, tmp);
7772 neon_store_reg(rd, 1, tmp2);
7773 tcg_temp_free_i32(tmp);
7774 tcg_temp_free_i32(ahp);
7775 tcg_temp_free_ptr(fpst);
7776 break;
7778 case NEON_2RM_VCVT_F32_F16:
7780 TCGv_ptr fpst;
7781 TCGv_i32 ahp;
7782 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
7783 q || (rd & 1)) {
7784 return 1;
7786 fpst = get_fpstatus_ptr(true);
7787 ahp = get_ahp_flag();
7788 tmp3 = tcg_temp_new_i32();
7789 tmp = neon_load_reg(rm, 0);
7790 tmp2 = neon_load_reg(rm, 1);
7791 tcg_gen_ext16u_i32(tmp3, tmp);
7792 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7793 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7794 tcg_gen_shri_i32(tmp3, tmp, 16);
7795 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7796 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7797 tcg_temp_free_i32(tmp);
7798 tcg_gen_ext16u_i32(tmp3, tmp2);
7799 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7800 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7801 tcg_gen_shri_i32(tmp3, tmp2, 16);
7802 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7803 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7804 tcg_temp_free_i32(tmp2);
7805 tcg_temp_free_i32(tmp3);
7806 tcg_temp_free_i32(ahp);
7807 tcg_temp_free_ptr(fpst);
7808 break;
7810 case NEON_2RM_AESE: case NEON_2RM_AESMC:
7811 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
7812 return 1;
7814 ptr1 = vfp_reg_ptr(true, rd);
7815 ptr2 = vfp_reg_ptr(true, rm);
7817 /* Bit 6 is the lowest opcode bit; it distinguishes between
7818 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7820 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7822 if (op == NEON_2RM_AESE) {
7823 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
7824 } else {
7825 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
7827 tcg_temp_free_ptr(ptr1);
7828 tcg_temp_free_ptr(ptr2);
7829 tcg_temp_free_i32(tmp3);
7830 break;
7831 case NEON_2RM_SHA1H:
7832 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
7833 return 1;
7835 ptr1 = vfp_reg_ptr(true, rd);
7836 ptr2 = vfp_reg_ptr(true, rm);
7838 gen_helper_crypto_sha1h(ptr1, ptr2);
7840 tcg_temp_free_ptr(ptr1);
7841 tcg_temp_free_ptr(ptr2);
7842 break;
7843 case NEON_2RM_SHA1SU1:
7844 if ((rm | rd) & 1) {
7845 return 1;
7847 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7848 if (q) {
7849 if (!dc_isar_feature(aa32_sha2, s)) {
7850 return 1;
7852 } else if (!dc_isar_feature(aa32_sha1, s)) {
7853 return 1;
7855 ptr1 = vfp_reg_ptr(true, rd);
7856 ptr2 = vfp_reg_ptr(true, rm);
7857 if (q) {
7858 gen_helper_crypto_sha256su0(ptr1, ptr2);
7859 } else {
7860 gen_helper_crypto_sha1su1(ptr1, ptr2);
7862 tcg_temp_free_ptr(ptr1);
7863 tcg_temp_free_ptr(ptr2);
7864 break;
7866 case NEON_2RM_VMVN:
7867 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
7868 break;
7869 case NEON_2RM_VNEG:
7870 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
7871 break;
7873 default:
7874 elementwise:
7875 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7876 if (neon_2rm_is_float_op(op)) {
7877 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7878 neon_reg_offset(rm, pass));
7879 tmp = NULL;
7880 } else {
7881 tmp = neon_load_reg(rm, pass);
7883 switch (op) {
7884 case NEON_2RM_VREV32:
7885 switch (size) {
7886 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7887 case 1: gen_swap_half(tmp); break;
7888 default: abort();
7890 break;
7891 case NEON_2RM_VREV16:
7892 gen_rev16(tmp);
7893 break;
7894 case NEON_2RM_VCLS:
7895 switch (size) {
7896 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7897 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7898 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
7899 default: abort();
7901 break;
7902 case NEON_2RM_VCLZ:
7903 switch (size) {
7904 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7905 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7906 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
7907 default: abort();
7909 break;
7910 case NEON_2RM_VCNT:
7911 gen_helper_neon_cnt_u8(tmp, tmp);
7912 break;
7913 case NEON_2RM_VQABS:
7914 switch (size) {
7915 case 0:
7916 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7917 break;
7918 case 1:
7919 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7920 break;
7921 case 2:
7922 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7923 break;
7924 default: abort();
7926 break;
7927 case NEON_2RM_VQNEG:
7928 switch (size) {
7929 case 0:
7930 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7931 break;
7932 case 1:
7933 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7934 break;
7935 case 2:
7936 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7937 break;
7938 default: abort();
7940 break;
7941 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
7942 tmp2 = tcg_const_i32(0);
7943 switch(size) {
7944 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7945 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7946 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
7947 default: abort();
7949 tcg_temp_free_i32(tmp2);
7950 if (op == NEON_2RM_VCLE0) {
7951 tcg_gen_not_i32(tmp, tmp);
7953 break;
7954 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
7955 tmp2 = tcg_const_i32(0);
7956 switch(size) {
7957 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7958 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7959 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
7960 default: abort();
7962 tcg_temp_free_i32(tmp2);
7963 if (op == NEON_2RM_VCLT0) {
7964 tcg_gen_not_i32(tmp, tmp);
7966 break;
7967 case NEON_2RM_VCEQ0:
7968 tmp2 = tcg_const_i32(0);
7969 switch(size) {
7970 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7971 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7972 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
7973 default: abort();
7975 tcg_temp_free_i32(tmp2);
7976 break;
7977 case NEON_2RM_VABS:
7978 switch(size) {
7979 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7980 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7981 case 2: tcg_gen_abs_i32(tmp, tmp); break;
7982 default: abort();
7984 break;
7985 case NEON_2RM_VCGT0_F:
7987 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7988 tmp2 = tcg_const_i32(0);
7989 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
7990 tcg_temp_free_i32(tmp2);
7991 tcg_temp_free_ptr(fpstatus);
7992 break;
7994 case NEON_2RM_VCGE0_F:
7996 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7997 tmp2 = tcg_const_i32(0);
7998 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
7999 tcg_temp_free_i32(tmp2);
8000 tcg_temp_free_ptr(fpstatus);
8001 break;
8003 case NEON_2RM_VCEQ0_F:
8005 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8006 tmp2 = tcg_const_i32(0);
8007 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
8008 tcg_temp_free_i32(tmp2);
8009 tcg_temp_free_ptr(fpstatus);
8010 break;
8012 case NEON_2RM_VCLE0_F:
8014 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8015 tmp2 = tcg_const_i32(0);
8016 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
8017 tcg_temp_free_i32(tmp2);
8018 tcg_temp_free_ptr(fpstatus);
8019 break;
8021 case NEON_2RM_VCLT0_F:
8023 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8024 tmp2 = tcg_const_i32(0);
8025 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
8026 tcg_temp_free_i32(tmp2);
8027 tcg_temp_free_ptr(fpstatus);
8028 break;
8030 case NEON_2RM_VABS_F:
8031 gen_vfp_abs(0);
8032 break;
8033 case NEON_2RM_VNEG_F:
8034 gen_vfp_neg(0);
8035 break;
8036 case NEON_2RM_VSWP:
8037 tmp2 = neon_load_reg(rd, pass);
8038 neon_store_reg(rm, pass, tmp2);
8039 break;
8040 case NEON_2RM_VTRN:
8041 tmp2 = neon_load_reg(rd, pass);
8042 switch (size) {
8043 case 0: gen_neon_trn_u8(tmp, tmp2); break;
8044 case 1: gen_neon_trn_u16(tmp, tmp2); break;
8045 default: abort();
8047 neon_store_reg(rm, pass, tmp2);
8048 break;
8049 case NEON_2RM_VRINTN:
8050 case NEON_2RM_VRINTA:
8051 case NEON_2RM_VRINTM:
8052 case NEON_2RM_VRINTP:
8053 case NEON_2RM_VRINTZ:
8055 TCGv_i32 tcg_rmode;
8056 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8057 int rmode;
8059 if (op == NEON_2RM_VRINTZ) {
8060 rmode = FPROUNDING_ZERO;
8061 } else {
8062 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
8065 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8066 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8067 cpu_env);
8068 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
8069 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8070 cpu_env);
8071 tcg_temp_free_ptr(fpstatus);
8072 tcg_temp_free_i32(tcg_rmode);
8073 break;
8075 case NEON_2RM_VRINTX:
8077 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8078 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
8079 tcg_temp_free_ptr(fpstatus);
8080 break;
8082 case NEON_2RM_VCVTAU:
8083 case NEON_2RM_VCVTAS:
8084 case NEON_2RM_VCVTNU:
8085 case NEON_2RM_VCVTNS:
8086 case NEON_2RM_VCVTPU:
8087 case NEON_2RM_VCVTPS:
8088 case NEON_2RM_VCVTMU:
8089 case NEON_2RM_VCVTMS:
8091 bool is_signed = !extract32(insn, 7, 1);
8092 TCGv_ptr fpst = get_fpstatus_ptr(1);
8093 TCGv_i32 tcg_rmode, tcg_shift;
8094 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
8096 tcg_shift = tcg_const_i32(0);
8097 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8098 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8099 cpu_env);
8101 if (is_signed) {
8102 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
8103 tcg_shift, fpst);
8104 } else {
8105 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
8106 tcg_shift, fpst);
8109 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8110 cpu_env);
8111 tcg_temp_free_i32(tcg_rmode);
8112 tcg_temp_free_i32(tcg_shift);
8113 tcg_temp_free_ptr(fpst);
8114 break;
8116 case NEON_2RM_VRECPE:
8118 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8119 gen_helper_recpe_u32(tmp, tmp, fpstatus);
8120 tcg_temp_free_ptr(fpstatus);
8121 break;
8123 case NEON_2RM_VRSQRTE:
8125 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8126 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
8127 tcg_temp_free_ptr(fpstatus);
8128 break;
8130 case NEON_2RM_VRECPE_F:
8132 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8133 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
8134 tcg_temp_free_ptr(fpstatus);
8135 break;
8137 case NEON_2RM_VRSQRTE_F:
8139 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8140 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
8141 tcg_temp_free_ptr(fpstatus);
8142 break;
8144 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
8145 gen_vfp_sito(0, 1);
8146 break;
8147 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
8148 gen_vfp_uito(0, 1);
8149 break;
8150 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
8151 gen_vfp_tosiz(0, 1);
8152 break;
8153 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
8154 gen_vfp_touiz(0, 1);
8155 break;
8156 default:
8157 /* Reserved op values were caught by the
8158 * neon_2rm_sizes[] check earlier.
8160 abort();
8162 if (neon_2rm_is_float_op(op)) {
8163 tcg_gen_st_f32(cpu_F0s, cpu_env,
8164 neon_reg_offset(rd, pass));
8165 } else {
8166 neon_store_reg(rd, pass, tmp);
8169 break;
8171 } else if ((insn & (1 << 10)) == 0) {
8172 /* VTBL, VTBX. */
8173 int n = ((insn >> 8) & 3) + 1;
8174 if ((rn + n) > 32) {
8175 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
8176 * helper function running off the end of the register file.
8178 return 1;
8180 n <<= 3;
8181 if (insn & (1 << 6)) {
8182 tmp = neon_load_reg(rd, 0);
8183 } else {
8184 tmp = tcg_temp_new_i32();
8185 tcg_gen_movi_i32(tmp, 0);
8187 tmp2 = neon_load_reg(rm, 0);
8188 ptr1 = vfp_reg_ptr(true, rn);
8189 tmp5 = tcg_const_i32(n);
8190 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
8191 tcg_temp_free_i32(tmp);
8192 if (insn & (1 << 6)) {
8193 tmp = neon_load_reg(rd, 1);
8194 } else {
8195 tmp = tcg_temp_new_i32();
8196 tcg_gen_movi_i32(tmp, 0);
8198 tmp3 = neon_load_reg(rm, 1);
8199 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
8200 tcg_temp_free_i32(tmp5);
8201 tcg_temp_free_ptr(ptr1);
8202 neon_store_reg(rd, 0, tmp2);
8203 neon_store_reg(rd, 1, tmp3);
8204 tcg_temp_free_i32(tmp);
8205 } else if ((insn & 0x380) == 0) {
8206 /* VDUP */
8207 int element;
8208 TCGMemOp size;
8210 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
8211 return 1;
8213 if (insn & (1 << 16)) {
8214 size = MO_8;
8215 element = (insn >> 17) & 7;
8216 } else if (insn & (1 << 17)) {
8217 size = MO_16;
8218 element = (insn >> 18) & 3;
8219 } else {
8220 size = MO_32;
8221 element = (insn >> 19) & 1;
8223 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
8224 neon_element_offset(rm, element, size),
8225 q ? 16 : 8, q ? 16 : 8);
8226 } else {
8227 return 1;
8231 return 0;
8234 /* Advanced SIMD three registers of the same length extension.
8235 * 31 25 23 22 20 16 12 11 10 9 8 3 0
8236 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8237 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8238 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8240 static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
8242 gen_helper_gvec_3 *fn_gvec = NULL;
8243 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
8244 int rd, rn, rm, opr_sz;
8245 int data = 0;
8246 bool q;
8248 q = extract32(insn, 6, 1);
8249 VFP_DREG_D(rd, insn);
8250 VFP_DREG_N(rn, insn);
8251 VFP_DREG_M(rm, insn);
8252 if ((rd | rn | rm) & q) {
8253 return 1;
8256 if ((insn & 0xfe200f10) == 0xfc200800) {
8257 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
8258 int size = extract32(insn, 20, 1);
8259 data = extract32(insn, 23, 2); /* rot */
8260 if (!dc_isar_feature(aa32_vcma, s)
8261 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8262 return 1;
8264 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
8265 } else if ((insn & 0xfea00f10) == 0xfc800800) {
8266 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
8267 int size = extract32(insn, 20, 1);
8268 data = extract32(insn, 24, 1); /* rot */
8269 if (!dc_isar_feature(aa32_vcma, s)
8270 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8271 return 1;
8273 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
8274 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
8275 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
8276 bool u = extract32(insn, 4, 1);
8277 if (!dc_isar_feature(aa32_dp, s)) {
8278 return 1;
8280 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
8281 } else {
8282 return 1;
8285 if (s->fp_excp_el) {
8286 gen_exception_insn(s, 4, EXCP_UDEF,
8287 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8288 return 0;
8290 if (!s->vfp_enabled) {
8291 return 1;
8294 opr_sz = (1 + q) * 8;
8295 if (fn_gvec_ptr) {
8296 TCGv_ptr fpst = get_fpstatus_ptr(1);
8297 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
8298 vfp_reg_offset(1, rn),
8299 vfp_reg_offset(1, rm), fpst,
8300 opr_sz, opr_sz, data, fn_gvec_ptr);
8301 tcg_temp_free_ptr(fpst);
8302 } else {
8303 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
8304 vfp_reg_offset(1, rn),
8305 vfp_reg_offset(1, rm),
8306 opr_sz, opr_sz, data, fn_gvec);
8308 return 0;
8311 /* Advanced SIMD two registers and a scalar extension.
8312 * 31 24 23 22 20 16 12 11 10 9 8 3 0
8313 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8314 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8315 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8319 static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
8321 gen_helper_gvec_3 *fn_gvec = NULL;
8322 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
8323 int rd, rn, rm, opr_sz, data;
8324 bool q;
8326 q = extract32(insn, 6, 1);
8327 VFP_DREG_D(rd, insn);
8328 VFP_DREG_N(rn, insn);
8329 if ((rd | rn) & q) {
8330 return 1;
8333 if ((insn & 0xff000f10) == 0xfe000800) {
8334 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
8335 int rot = extract32(insn, 20, 2);
8336 int size = extract32(insn, 23, 1);
8337 int index;
8339 if (!dc_isar_feature(aa32_vcma, s)) {
8340 return 1;
8342 if (size == 0) {
8343 if (!dc_isar_feature(aa32_fp16_arith, s)) {
8344 return 1;
8346 /* For fp16, rm is just Vm, and index is M. */
8347 rm = extract32(insn, 0, 4);
8348 index = extract32(insn, 5, 1);
8349 } else {
8350 /* For fp32, rm is the usual M:Vm, and index is 0. */
8351 VFP_DREG_M(rm, insn);
8352 index = 0;
8354 data = (index << 2) | rot;
8355 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
8356 : gen_helper_gvec_fcmlah_idx);
8357 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
8358 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
8359 int u = extract32(insn, 4, 1);
8360 if (!dc_isar_feature(aa32_dp, s)) {
8361 return 1;
8363 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
8364 /* rm is just Vm, and index is M. */
8365 data = extract32(insn, 5, 1); /* index */
8366 rm = extract32(insn, 0, 4);
8367 } else {
8368 return 1;
8371 if (s->fp_excp_el) {
8372 gen_exception_insn(s, 4, EXCP_UDEF,
8373 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8374 return 0;
8376 if (!s->vfp_enabled) {
8377 return 1;
8380 opr_sz = (1 + q) * 8;
8381 if (fn_gvec_ptr) {
8382 TCGv_ptr fpst = get_fpstatus_ptr(1);
8383 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
8384 vfp_reg_offset(1, rn),
8385 vfp_reg_offset(1, rm), fpst,
8386 opr_sz, opr_sz, data, fn_gvec_ptr);
8387 tcg_temp_free_ptr(fpst);
8388 } else {
8389 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
8390 vfp_reg_offset(1, rn),
8391 vfp_reg_offset(1, rm),
8392 opr_sz, opr_sz, data, fn_gvec);
8394 return 0;
8397 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
8399 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
8400 const ARMCPRegInfo *ri;
8402 cpnum = (insn >> 8) & 0xf;
8404 /* First check for coprocessor space used for XScale/iwMMXt insns */
8405 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
8406 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
8407 return 1;
8409 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
8410 return disas_iwmmxt_insn(s, insn);
8411 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
8412 return disas_dsp_insn(s, insn);
8414 return 1;
8417 /* Otherwise treat as a generic register access */
8418 is64 = (insn & (1 << 25)) == 0;
8419 if (!is64 && ((insn & (1 << 4)) == 0)) {
8420 /* cdp */
8421 return 1;
8424 crm = insn & 0xf;
8425 if (is64) {
8426 crn = 0;
8427 opc1 = (insn >> 4) & 0xf;
8428 opc2 = 0;
8429 rt2 = (insn >> 16) & 0xf;
8430 } else {
8431 crn = (insn >> 16) & 0xf;
8432 opc1 = (insn >> 21) & 7;
8433 opc2 = (insn >> 5) & 7;
8434 rt2 = 0;
8436 isread = (insn >> 20) & 1;
8437 rt = (insn >> 12) & 0xf;
8439 ri = get_arm_cp_reginfo(s->cp_regs,
8440 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
8441 if (ri) {
8442 /* Check access permissions */
8443 if (!cp_access_ok(s->current_el, ri, isread)) {
8444 return 1;
8447 if (ri->accessfn ||
8448 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
8449 /* Emit code to perform further access permissions checks at
8450 * runtime; this may result in an exception.
8451 * Note that on XScale all cp0..c13 registers do an access check
8452 * call in order to handle c15_cpar.
8454 TCGv_ptr tmpptr;
8455 TCGv_i32 tcg_syn, tcg_isread;
8456 uint32_t syndrome;
8458 /* Note that since we are an implementation which takes an
8459 * exception on a trapped conditional instruction only if the
8460 * instruction passes its condition code check, we can take
8461 * advantage of the clause in the ARM ARM that allows us to set
8462 * the COND field in the instruction to 0xE in all cases.
8463 * We could fish the actual condition out of the insn (ARM)
8464 * or the condexec bits (Thumb) but it isn't necessary.
8466 switch (cpnum) {
8467 case 14:
8468 if (is64) {
8469 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
8470 isread, false);
8471 } else {
8472 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
8473 rt, isread, false);
8475 break;
8476 case 15:
8477 if (is64) {
8478 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
8479 isread, false);
8480 } else {
8481 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
8482 rt, isread, false);
8484 break;
8485 default:
8486 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8487 * so this can only happen if this is an ARMv7 or earlier CPU,
8488 * in which case the syndrome information won't actually be
8489 * guest visible.
8491 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8492 syndrome = syn_uncategorized();
8493 break;
8496 gen_set_condexec(s);
8497 gen_set_pc_im(s, s->pc - 4);
8498 tmpptr = tcg_const_ptr(ri);
8499 tcg_syn = tcg_const_i32(syndrome);
8500 tcg_isread = tcg_const_i32(isread);
8501 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
8502 tcg_isread);
8503 tcg_temp_free_ptr(tmpptr);
8504 tcg_temp_free_i32(tcg_syn);
8505 tcg_temp_free_i32(tcg_isread);
8508 /* Handle special cases first */
8509 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
8510 case ARM_CP_NOP:
8511 return 0;
8512 case ARM_CP_WFI:
8513 if (isread) {
8514 return 1;
8516 gen_set_pc_im(s, s->pc);
8517 s->base.is_jmp = DISAS_WFI;
8518 return 0;
8519 default:
8520 break;
8523 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
8524 gen_io_start();
8527 if (isread) {
8528 /* Read */
8529 if (is64) {
8530 TCGv_i64 tmp64;
8531 TCGv_i32 tmp;
8532 if (ri->type & ARM_CP_CONST) {
8533 tmp64 = tcg_const_i64(ri->resetvalue);
8534 } else if (ri->readfn) {
8535 TCGv_ptr tmpptr;
8536 tmp64 = tcg_temp_new_i64();
8537 tmpptr = tcg_const_ptr(ri);
8538 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8539 tcg_temp_free_ptr(tmpptr);
8540 } else {
8541 tmp64 = tcg_temp_new_i64();
8542 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8544 tmp = tcg_temp_new_i32();
8545 tcg_gen_extrl_i64_i32(tmp, tmp64);
8546 store_reg(s, rt, tmp);
8547 tcg_gen_shri_i64(tmp64, tmp64, 32);
8548 tmp = tcg_temp_new_i32();
8549 tcg_gen_extrl_i64_i32(tmp, tmp64);
8550 tcg_temp_free_i64(tmp64);
8551 store_reg(s, rt2, tmp);
8552 } else {
8553 TCGv_i32 tmp;
8554 if (ri->type & ARM_CP_CONST) {
8555 tmp = tcg_const_i32(ri->resetvalue);
8556 } else if (ri->readfn) {
8557 TCGv_ptr tmpptr;
8558 tmp = tcg_temp_new_i32();
8559 tmpptr = tcg_const_ptr(ri);
8560 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8561 tcg_temp_free_ptr(tmpptr);
8562 } else {
8563 tmp = load_cpu_offset(ri->fieldoffset);
8565 if (rt == 15) {
8566 /* Destination register of r15 for 32 bit loads sets
8567 * the condition codes from the high 4 bits of the value
8569 gen_set_nzcv(tmp);
8570 tcg_temp_free_i32(tmp);
8571 } else {
8572 store_reg(s, rt, tmp);
8575 } else {
8576 /* Write */
8577 if (ri->type & ARM_CP_CONST) {
8578 /* If not forbidden by access permissions, treat as WI */
8579 return 0;
8582 if (is64) {
8583 TCGv_i32 tmplo, tmphi;
8584 TCGv_i64 tmp64 = tcg_temp_new_i64();
8585 tmplo = load_reg(s, rt);
8586 tmphi = load_reg(s, rt2);
8587 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8588 tcg_temp_free_i32(tmplo);
8589 tcg_temp_free_i32(tmphi);
8590 if (ri->writefn) {
8591 TCGv_ptr tmpptr = tcg_const_ptr(ri);
8592 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8593 tcg_temp_free_ptr(tmpptr);
8594 } else {
8595 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8597 tcg_temp_free_i64(tmp64);
8598 } else {
8599 if (ri->writefn) {
8600 TCGv_i32 tmp;
8601 TCGv_ptr tmpptr;
8602 tmp = load_reg(s, rt);
8603 tmpptr = tcg_const_ptr(ri);
8604 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8605 tcg_temp_free_ptr(tmpptr);
8606 tcg_temp_free_i32(tmp);
8607 } else {
8608 TCGv_i32 tmp = load_reg(s, rt);
8609 store_cpu_offset(tmp, ri->fieldoffset);
8614 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
8615 /* I/O operations must end the TB here (whether read or write) */
8616 gen_io_end();
8617 gen_lookup_tb(s);
8618 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
8619 /* We default to ending the TB on a coprocessor register write,
8620 * but allow this to be suppressed by the register definition
8621 * (usually only necessary to work around guest bugs).
8623 gen_lookup_tb(s);
8626 return 0;
8629 /* Unknown register; this might be a guest error or a QEMU
8630 * unimplemented feature.
8632 if (is64) {
8633 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
8634 "64 bit system register cp:%d opc1: %d crm:%d "
8635 "(%s)\n",
8636 isread ? "read" : "write", cpnum, opc1, crm,
8637 s->ns ? "non-secure" : "secure");
8638 } else {
8639 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
8640 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8641 "(%s)\n",
8642 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8643 s->ns ? "non-secure" : "secure");
8646 return 1;
8650 /* Store a 64-bit value to a register pair. Clobbers val. */
8651 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
8653 TCGv_i32 tmp;
8654 tmp = tcg_temp_new_i32();
8655 tcg_gen_extrl_i64_i32(tmp, val);
8656 store_reg(s, rlow, tmp);
8657 tmp = tcg_temp_new_i32();
8658 tcg_gen_shri_i64(val, val, 32);
8659 tcg_gen_extrl_i64_i32(tmp, val);
8660 store_reg(s, rhigh, tmp);
8663 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
8664 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
8666 TCGv_i64 tmp;
8667 TCGv_i32 tmp2;
8669 /* Load value and extend to 64 bits. */
8670 tmp = tcg_temp_new_i64();
8671 tmp2 = load_reg(s, rlow);
8672 tcg_gen_extu_i32_i64(tmp, tmp2);
8673 tcg_temp_free_i32(tmp2);
8674 tcg_gen_add_i64(val, val, tmp);
8675 tcg_temp_free_i64(tmp);
8678 /* load and add a 64-bit value from a register pair. */
8679 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
8681 TCGv_i64 tmp;
8682 TCGv_i32 tmpl;
8683 TCGv_i32 tmph;
8685 /* Load 64-bit value rd:rn. */
8686 tmpl = load_reg(s, rlow);
8687 tmph = load_reg(s, rhigh);
8688 tmp = tcg_temp_new_i64();
8689 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
8690 tcg_temp_free_i32(tmpl);
8691 tcg_temp_free_i32(tmph);
8692 tcg_gen_add_i64(val, val, tmp);
8693 tcg_temp_free_i64(tmp);
8696 /* Set N and Z flags from hi|lo. */
8697 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
8699 tcg_gen_mov_i32(cpu_NF, hi);
8700 tcg_gen_or_i32(cpu_ZF, lo, hi);
8703 /* Load/Store exclusive instructions are implemented by remembering
8704 the value/address loaded, and seeing if these are the same
8705 when the store is performed. This should be sufficient to implement
8706 the architecturally mandated semantics, and avoids having to monitor
8707 regular stores. The compare vs the remembered value is done during
8708 the cmpxchg operation, but we must compare the addresses manually. */
8709 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
8710 TCGv_i32 addr, int size)
8712 TCGv_i32 tmp = tcg_temp_new_i32();
8713 TCGMemOp opc = size | MO_ALIGN | s->be_data;
8715 s->is_ldex = true;
8717 if (size == 3) {
8718 TCGv_i32 tmp2 = tcg_temp_new_i32();
8719 TCGv_i64 t64 = tcg_temp_new_i64();
8721 /* For AArch32, architecturally the 32-bit word at the lowest
8722 * address is always Rt and the one at addr+4 is Rt2, even if
8723 * the CPU is big-endian. That means we don't want to do a
8724 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8725 * for an architecturally 64-bit access, but instead do a
8726 * 64-bit access using MO_BE if appropriate and then split
8727 * the two halves.
8728 * This only makes a difference for BE32 user-mode, where
8729 * frob64() must not flip the two halves of the 64-bit data
8730 * but this code must treat BE32 user-mode like BE32 system.
8732 TCGv taddr = gen_aa32_addr(s, addr, opc);
8734 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8735 tcg_temp_free(taddr);
8736 tcg_gen_mov_i64(cpu_exclusive_val, t64);
8737 if (s->be_data == MO_BE) {
8738 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8739 } else {
8740 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8742 tcg_temp_free_i64(t64);
8744 store_reg(s, rt2, tmp2);
8745 } else {
8746 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
8747 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
8750 store_reg(s, rt, tmp);
8751 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
8754 static void gen_clrex(DisasContext *s)
8756 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
8759 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
8760 TCGv_i32 addr, int size)
8762 TCGv_i32 t0, t1, t2;
8763 TCGv_i64 extaddr;
8764 TCGv taddr;
8765 TCGLabel *done_label;
8766 TCGLabel *fail_label;
8767 TCGMemOp opc = size | MO_ALIGN | s->be_data;
8769 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8770 [addr] = {Rt};
8771 {Rd} = 0;
8772 } else {
8773 {Rd} = 1;
8774 } */
8775 fail_label = gen_new_label();
8776 done_label = gen_new_label();
8777 extaddr = tcg_temp_new_i64();
8778 tcg_gen_extu_i32_i64(extaddr, addr);
8779 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8780 tcg_temp_free_i64(extaddr);
8782 taddr = gen_aa32_addr(s, addr, opc);
8783 t0 = tcg_temp_new_i32();
8784 t1 = load_reg(s, rt);
8785 if (size == 3) {
8786 TCGv_i64 o64 = tcg_temp_new_i64();
8787 TCGv_i64 n64 = tcg_temp_new_i64();
8789 t2 = load_reg(s, rt2);
8790 /* For AArch32, architecturally the 32-bit word at the lowest
8791 * address is always Rt and the one at addr+4 is Rt2, even if
8792 * the CPU is big-endian. Since we're going to treat this as a
8793 * single 64-bit BE store, we need to put the two halves in the
8794 * opposite order for BE to LE, so that they end up in the right
8795 * places.
8796 * We don't want gen_aa32_frob64() because that does the wrong
8797 * thing for BE32 usermode.
8799 if (s->be_data == MO_BE) {
8800 tcg_gen_concat_i32_i64(n64, t2, t1);
8801 } else {
8802 tcg_gen_concat_i32_i64(n64, t1, t2);
8804 tcg_temp_free_i32(t2);
8806 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8807 get_mem_index(s), opc);
8808 tcg_temp_free_i64(n64);
8810 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8811 tcg_gen_extrl_i64_i32(t0, o64);
8813 tcg_temp_free_i64(o64);
8814 } else {
8815 t2 = tcg_temp_new_i32();
8816 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8817 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8818 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8819 tcg_temp_free_i32(t2);
8821 tcg_temp_free_i32(t1);
8822 tcg_temp_free(taddr);
8823 tcg_gen_mov_i32(cpu_R[rd], t0);
8824 tcg_temp_free_i32(t0);
8825 tcg_gen_br(done_label);
8827 gen_set_label(fail_label);
8828 tcg_gen_movi_i32(cpu_R[rd], 1);
8829 gen_set_label(done_label);
8830 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
8833 /* gen_srs:
8834 * @env: CPUARMState
8835 * @s: DisasContext
8836 * @mode: mode field from insn (which stack to store to)
8837 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8838 * @writeback: true if writeback bit set
8840 * Generate code for the SRS (Store Return State) insn.
8842 static void gen_srs(DisasContext *s,
8843 uint32_t mode, uint32_t amode, bool writeback)
8845 int32_t offset;
8846 TCGv_i32 addr, tmp;
8847 bool undef = false;
8849 /* SRS is:
8850 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
8851 * and specified mode is monitor mode
8852 * - UNDEFINED in Hyp mode
8853 * - UNPREDICTABLE in User or System mode
8854 * - UNPREDICTABLE if the specified mode is:
8855 * -- not implemented
8856 * -- not a valid mode number
8857 * -- a mode that's at a higher exception level
8858 * -- Monitor, if we are Non-secure
8859 * For the UNPREDICTABLE cases we choose to UNDEF.
8861 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
8862 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8863 return;
8866 if (s->current_el == 0 || s->current_el == 2) {
8867 undef = true;
8870 switch (mode) {
8871 case ARM_CPU_MODE_USR:
8872 case ARM_CPU_MODE_FIQ:
8873 case ARM_CPU_MODE_IRQ:
8874 case ARM_CPU_MODE_SVC:
8875 case ARM_CPU_MODE_ABT:
8876 case ARM_CPU_MODE_UND:
8877 case ARM_CPU_MODE_SYS:
8878 break;
8879 case ARM_CPU_MODE_HYP:
8880 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8881 undef = true;
8883 break;
8884 case ARM_CPU_MODE_MON:
8885 /* No need to check specifically for "are we non-secure" because
8886 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8887 * so if this isn't EL3 then we must be non-secure.
8889 if (s->current_el != 3) {
8890 undef = true;
8892 break;
8893 default:
8894 undef = true;
8897 if (undef) {
8898 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8899 default_exception_el(s));
8900 return;
8903 addr = tcg_temp_new_i32();
8904 tmp = tcg_const_i32(mode);
8905 /* get_r13_banked() will raise an exception if called from System mode */
8906 gen_set_condexec(s);
8907 gen_set_pc_im(s, s->pc - 4);
8908 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8909 tcg_temp_free_i32(tmp);
8910 switch (amode) {
8911 case 0: /* DA */
8912 offset = -4;
8913 break;
8914 case 1: /* IA */
8915 offset = 0;
8916 break;
8917 case 2: /* DB */
8918 offset = -8;
8919 break;
8920 case 3: /* IB */
8921 offset = 4;
8922 break;
8923 default:
8924 abort();
8926 tcg_gen_addi_i32(addr, addr, offset);
8927 tmp = load_reg(s, 14);
8928 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8929 tcg_temp_free_i32(tmp);
8930 tmp = load_cpu_field(spsr);
8931 tcg_gen_addi_i32(addr, addr, 4);
8932 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8933 tcg_temp_free_i32(tmp);
8934 if (writeback) {
8935 switch (amode) {
8936 case 0:
8937 offset = -8;
8938 break;
8939 case 1:
8940 offset = 4;
8941 break;
8942 case 2:
8943 offset = -4;
8944 break;
8945 case 3:
8946 offset = 0;
8947 break;
8948 default:
8949 abort();
8951 tcg_gen_addi_i32(addr, addr, offset);
8952 tmp = tcg_const_i32(mode);
8953 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8954 tcg_temp_free_i32(tmp);
8956 tcg_temp_free_i32(addr);
8957 s->base.is_jmp = DISAS_UPDATE;
8960 /* Generate a label used for skipping this instruction */
8961 static void arm_gen_condlabel(DisasContext *s)
8963 if (!s->condjmp) {
8964 s->condlabel = gen_new_label();
8965 s->condjmp = 1;
8969 /* Skip this instruction if the ARM condition is false */
8970 static void arm_skip_unless(DisasContext *s, uint32_t cond)
8972 arm_gen_condlabel(s);
8973 arm_gen_test_cc(cond ^ 1, s->condlabel);
8976 static void disas_arm_insn(DisasContext *s, unsigned int insn)
8978 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
8979 TCGv_i32 tmp;
8980 TCGv_i32 tmp2;
8981 TCGv_i32 tmp3;
8982 TCGv_i32 addr;
8983 TCGv_i64 tmp64;
8985 /* M variants do not implement ARM mode; this must raise the INVSTATE
8986 * UsageFault exception.
8988 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8989 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8990 default_exception_el(s));
8991 return;
8993 cond = insn >> 28;
8994 if (cond == 0xf){
8995 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8996 * choose to UNDEF. In ARMv5 and above the space is used
8997 * for miscellaneous unconditional instructions.
8999 ARCH(5);
9001 /* Unconditional instructions. */
9002 if (((insn >> 25) & 7) == 1) {
9003 /* NEON Data processing. */
9004 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9005 goto illegal_op;
9008 if (disas_neon_data_insn(s, insn)) {
9009 goto illegal_op;
9011 return;
9013 if ((insn & 0x0f100000) == 0x04000000) {
9014 /* NEON load/store. */
9015 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9016 goto illegal_op;
9019 if (disas_neon_ls_insn(s, insn)) {
9020 goto illegal_op;
9022 return;
9024 if ((insn & 0x0f000e10) == 0x0e000a00) {
9025 /* VFP. */
9026 if (disas_vfp_insn(s, insn)) {
9027 goto illegal_op;
9029 return;
9031 if (((insn & 0x0f30f000) == 0x0510f000) ||
9032 ((insn & 0x0f30f010) == 0x0710f000)) {
9033 if ((insn & (1 << 22)) == 0) {
9034 /* PLDW; v7MP */
9035 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
9036 goto illegal_op;
9039 /* Otherwise PLD; v5TE+ */
9040 ARCH(5TE);
9041 return;
9043 if (((insn & 0x0f70f000) == 0x0450f000) ||
9044 ((insn & 0x0f70f010) == 0x0650f000)) {
9045 ARCH(7);
9046 return; /* PLI; V7 */
9048 if (((insn & 0x0f700000) == 0x04100000) ||
9049 ((insn & 0x0f700010) == 0x06100000)) {
9050 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
9051 goto illegal_op;
9053 return; /* v7MP: Unallocated memory hint: must NOP */
9056 if ((insn & 0x0ffffdff) == 0x01010000) {
9057 ARCH(6);
9058 /* setend */
9059 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
9060 gen_helper_setend(cpu_env);
9061 s->base.is_jmp = DISAS_UPDATE;
9063 return;
9064 } else if ((insn & 0x0fffff00) == 0x057ff000) {
9065 switch ((insn >> 4) & 0xf) {
9066 case 1: /* clrex */
9067 ARCH(6K);
9068 gen_clrex(s);
9069 return;
9070 case 4: /* dsb */
9071 case 5: /* dmb */
9072 ARCH(7);
9073 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9074 return;
9075 case 6: /* isb */
9076 /* We need to break the TB after this insn to execute
9077 * self-modifying code correctly and also to take
9078 * any pending interrupts immediately.
9080 gen_goto_tb(s, 0, s->pc & ~1);
9081 return;
9082 default:
9083 goto illegal_op;
9085 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
9086 /* srs */
9087 ARCH(6);
9088 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
9089 return;
9090 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9091 /* rfe */
9092 int32_t offset;
9093 if (IS_USER(s))
9094 goto illegal_op;
9095 ARCH(6);
9096 rn = (insn >> 16) & 0xf;
9097 addr = load_reg(s, rn);
9098 i = (insn >> 23) & 3;
9099 switch (i) {
9100 case 0: offset = -4; break; /* DA */
9101 case 1: offset = 0; break; /* IA */
9102 case 2: offset = -8; break; /* DB */
9103 case 3: offset = 4; break; /* IB */
9104 default: abort();
9106 if (offset)
9107 tcg_gen_addi_i32(addr, addr, offset);
9108 /* Load PC into tmp and CPSR into tmp2. */
9109 tmp = tcg_temp_new_i32();
9110 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9111 tcg_gen_addi_i32(addr, addr, 4);
9112 tmp2 = tcg_temp_new_i32();
9113 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9114 if (insn & (1 << 21)) {
9115 /* Base writeback. */
9116 switch (i) {
9117 case 0: offset = -8; break;
9118 case 1: offset = 4; break;
9119 case 2: offset = -4; break;
9120 case 3: offset = 0; break;
9121 default: abort();
9123 if (offset)
9124 tcg_gen_addi_i32(addr, addr, offset);
9125 store_reg(s, rn, addr);
9126 } else {
9127 tcg_temp_free_i32(addr);
9129 gen_rfe(s, tmp, tmp2);
9130 return;
9131 } else if ((insn & 0x0e000000) == 0x0a000000) {
9132 /* branch link and change to thumb (blx <offset>) */
9133 int32_t offset;
9135 val = (uint32_t)s->pc;
9136 tmp = tcg_temp_new_i32();
9137 tcg_gen_movi_i32(tmp, val);
9138 store_reg(s, 14, tmp);
9139 /* Sign-extend the 24-bit offset */
9140 offset = (((int32_t)insn) << 8) >> 8;
9141 /* offset * 4 + bit24 * 2 + (thumb bit) */
9142 val += (offset << 2) | ((insn >> 23) & 2) | 1;
9143 /* pipeline offset */
9144 val += 4;
9145 /* protected by ARCH(5); above, near the start of uncond block */
9146 gen_bx_im(s, val);
9147 return;
9148 } else if ((insn & 0x0e000f00) == 0x0c000100) {
9149 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9150 /* iWMMXt register transfer. */
9151 if (extract32(s->c15_cpar, 1, 1)) {
9152 if (!disas_iwmmxt_insn(s, insn)) {
9153 return;
9157 } else if ((insn & 0x0e000a00) == 0x0c000800
9158 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9159 if (disas_neon_insn_3same_ext(s, insn)) {
9160 goto illegal_op;
9162 return;
9163 } else if ((insn & 0x0f000a00) == 0x0e000800
9164 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9165 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
9166 goto illegal_op;
9168 return;
9169 } else if ((insn & 0x0fe00000) == 0x0c400000) {
9170 /* Coprocessor double register transfer. */
9171 ARCH(5TE);
9172 } else if ((insn & 0x0f000010) == 0x0e000010) {
9173 /* Additional coprocessor register transfer. */
9174 } else if ((insn & 0x0ff10020) == 0x01000000) {
9175 uint32_t mask;
9176 uint32_t val;
9177 /* cps (privileged) */
9178 if (IS_USER(s))
9179 return;
9180 mask = val = 0;
9181 if (insn & (1 << 19)) {
9182 if (insn & (1 << 8))
9183 mask |= CPSR_A;
9184 if (insn & (1 << 7))
9185 mask |= CPSR_I;
9186 if (insn & (1 << 6))
9187 mask |= CPSR_F;
9188 if (insn & (1 << 18))
9189 val |= mask;
9191 if (insn & (1 << 17)) {
9192 mask |= CPSR_M;
9193 val |= (insn & 0x1f);
9195 if (mask) {
9196 gen_set_psr_im(s, mask, 0, val);
9198 return;
9200 goto illegal_op;
9202 if (cond != 0xe) {
9203 /* if not always execute, we generate a conditional jump to
9204 next instruction */
9205 arm_skip_unless(s, cond);
9207 if ((insn & 0x0f900000) == 0x03000000) {
9208 if ((insn & (1 << 21)) == 0) {
9209 ARCH(6T2);
9210 rd = (insn >> 12) & 0xf;
9211 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9212 if ((insn & (1 << 22)) == 0) {
9213 /* MOVW */
9214 tmp = tcg_temp_new_i32();
9215 tcg_gen_movi_i32(tmp, val);
9216 } else {
9217 /* MOVT */
9218 tmp = load_reg(s, rd);
9219 tcg_gen_ext16u_i32(tmp, tmp);
9220 tcg_gen_ori_i32(tmp, tmp, val << 16);
9222 store_reg(s, rd, tmp);
9223 } else {
9224 if (((insn >> 12) & 0xf) != 0xf)
9225 goto illegal_op;
9226 if (((insn >> 16) & 0xf) == 0) {
9227 gen_nop_hint(s, insn & 0xff);
9228 } else {
9229 /* CPSR = immediate */
9230 val = insn & 0xff;
9231 shift = ((insn >> 8) & 0xf) * 2;
9232 if (shift)
9233 val = (val >> shift) | (val << (32 - shift));
9234 i = ((insn & (1 << 22)) != 0);
9235 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
9236 i, val)) {
9237 goto illegal_op;
9241 } else if ((insn & 0x0f900000) == 0x01000000
9242 && (insn & 0x00000090) != 0x00000090) {
9243 /* miscellaneous instructions */
9244 op1 = (insn >> 21) & 3;
9245 sh = (insn >> 4) & 0xf;
9246 rm = insn & 0xf;
9247 switch (sh) {
9248 case 0x0: /* MSR, MRS */
9249 if (insn & (1 << 9)) {
9250 /* MSR (banked) and MRS (banked) */
9251 int sysm = extract32(insn, 16, 4) |
9252 (extract32(insn, 8, 1) << 4);
9253 int r = extract32(insn, 22, 1);
9255 if (op1 & 1) {
9256 /* MSR (banked) */
9257 gen_msr_banked(s, r, sysm, rm);
9258 } else {
9259 /* MRS (banked) */
9260 int rd = extract32(insn, 12, 4);
9262 gen_mrs_banked(s, r, sysm, rd);
9264 break;
9267 /* MSR, MRS (for PSRs) */
9268 if (op1 & 1) {
9269 /* PSR = reg */
9270 tmp = load_reg(s, rm);
9271 i = ((op1 & 2) != 0);
9272 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9273 goto illegal_op;
9274 } else {
9275 /* reg = PSR */
9276 rd = (insn >> 12) & 0xf;
9277 if (op1 & 2) {
9278 if (IS_USER(s))
9279 goto illegal_op;
9280 tmp = load_cpu_field(spsr);
9281 } else {
9282 tmp = tcg_temp_new_i32();
9283 gen_helper_cpsr_read(tmp, cpu_env);
9285 store_reg(s, rd, tmp);
9287 break;
9288 case 0x1:
9289 if (op1 == 1) {
9290 /* branch/exchange thumb (bx). */
9291 ARCH(4T);
9292 tmp = load_reg(s, rm);
9293 gen_bx(s, tmp);
9294 } else if (op1 == 3) {
9295 /* clz */
9296 ARCH(5);
9297 rd = (insn >> 12) & 0xf;
9298 tmp = load_reg(s, rm);
9299 tcg_gen_clzi_i32(tmp, tmp, 32);
9300 store_reg(s, rd, tmp);
9301 } else {
9302 goto illegal_op;
9304 break;
9305 case 0x2:
9306 if (op1 == 1) {
9307 ARCH(5J); /* bxj */
9308 /* Trivial implementation equivalent to bx. */
9309 tmp = load_reg(s, rm);
9310 gen_bx(s, tmp);
9311 } else {
9312 goto illegal_op;
9314 break;
9315 case 0x3:
9316 if (op1 != 1)
9317 goto illegal_op;
9319 ARCH(5);
9320 /* branch link/exchange thumb (blx) */
9321 tmp = load_reg(s, rm);
9322 tmp2 = tcg_temp_new_i32();
9323 tcg_gen_movi_i32(tmp2, s->pc);
9324 store_reg(s, 14, tmp2);
9325 gen_bx(s, tmp);
9326 break;
9327 case 0x4:
9329 /* crc32/crc32c */
9330 uint32_t c = extract32(insn, 8, 4);
9332 /* Check this CPU supports ARMv8 CRC instructions.
9333 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
9334 * Bits 8, 10 and 11 should be zero.
9336 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
9337 goto illegal_op;
9340 rn = extract32(insn, 16, 4);
9341 rd = extract32(insn, 12, 4);
9343 tmp = load_reg(s, rn);
9344 tmp2 = load_reg(s, rm);
9345 if (op1 == 0) {
9346 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9347 } else if (op1 == 1) {
9348 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9350 tmp3 = tcg_const_i32(1 << op1);
9351 if (c & 0x2) {
9352 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9353 } else {
9354 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9356 tcg_temp_free_i32(tmp2);
9357 tcg_temp_free_i32(tmp3);
9358 store_reg(s, rd, tmp);
9359 break;
9361 case 0x5: /* saturating add/subtract */
9362 ARCH(5TE);
9363 rd = (insn >> 12) & 0xf;
9364 rn = (insn >> 16) & 0xf;
9365 tmp = load_reg(s, rm);
9366 tmp2 = load_reg(s, rn);
9367 if (op1 & 2)
9368 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9369 if (op1 & 1)
9370 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9371 else
9372 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
9373 tcg_temp_free_i32(tmp2);
9374 store_reg(s, rd, tmp);
9375 break;
9376 case 0x6: /* ERET */
9377 if (op1 != 3) {
9378 goto illegal_op;
9380 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
9381 goto illegal_op;
9383 if ((insn & 0x000fff0f) != 0x0000000e) {
9384 /* UNPREDICTABLE; we choose to UNDEF */
9385 goto illegal_op;
9388 if (s->current_el == 2) {
9389 tmp = load_cpu_field(elr_el[2]);
9390 } else {
9391 tmp = load_reg(s, 14);
9393 gen_exception_return(s, tmp);
9394 break;
9395 case 7:
9397 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
9398 switch (op1) {
9399 case 0:
9400 /* HLT */
9401 gen_hlt(s, imm16);
9402 break;
9403 case 1:
9404 /* bkpt */
9405 ARCH(5);
9406 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
9407 break;
9408 case 2:
9409 /* Hypervisor call (v7) */
9410 ARCH(7);
9411 if (IS_USER(s)) {
9412 goto illegal_op;
9414 gen_hvc(s, imm16);
9415 break;
9416 case 3:
9417 /* Secure monitor call (v6+) */
9418 ARCH(6K);
9419 if (IS_USER(s)) {
9420 goto illegal_op;
9422 gen_smc(s);
9423 break;
9424 default:
9425 g_assert_not_reached();
9427 break;
9429 case 0x8: /* signed multiply */
9430 case 0xa:
9431 case 0xc:
9432 case 0xe:
9433 ARCH(5TE);
9434 rs = (insn >> 8) & 0xf;
9435 rn = (insn >> 12) & 0xf;
9436 rd = (insn >> 16) & 0xf;
9437 if (op1 == 1) {
9438 /* (32 * 16) >> 16 */
9439 tmp = load_reg(s, rm);
9440 tmp2 = load_reg(s, rs);
9441 if (sh & 4)
9442 tcg_gen_sari_i32(tmp2, tmp2, 16);
9443 else
9444 gen_sxth(tmp2);
9445 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9446 tcg_gen_shri_i64(tmp64, tmp64, 16);
9447 tmp = tcg_temp_new_i32();
9448 tcg_gen_extrl_i64_i32(tmp, tmp64);
9449 tcg_temp_free_i64(tmp64);
9450 if ((sh & 2) == 0) {
9451 tmp2 = load_reg(s, rn);
9452 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9453 tcg_temp_free_i32(tmp2);
9455 store_reg(s, rd, tmp);
9456 } else {
9457 /* 16 * 16 */
9458 tmp = load_reg(s, rm);
9459 tmp2 = load_reg(s, rs);
9460 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
9461 tcg_temp_free_i32(tmp2);
9462 if (op1 == 2) {
9463 tmp64 = tcg_temp_new_i64();
9464 tcg_gen_ext_i32_i64(tmp64, tmp);
9465 tcg_temp_free_i32(tmp);
9466 gen_addq(s, tmp64, rn, rd);
9467 gen_storeq_reg(s, rn, rd, tmp64);
9468 tcg_temp_free_i64(tmp64);
9469 } else {
9470 if (op1 == 0) {
9471 tmp2 = load_reg(s, rn);
9472 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9473 tcg_temp_free_i32(tmp2);
9475 store_reg(s, rd, tmp);
9478 break;
9479 default:
9480 goto illegal_op;
9482 } else if (((insn & 0x0e000000) == 0 &&
9483 (insn & 0x00000090) != 0x90) ||
9484 ((insn & 0x0e000000) == (1 << 25))) {
9485 int set_cc, logic_cc, shiftop;
9487 op1 = (insn >> 21) & 0xf;
9488 set_cc = (insn >> 20) & 1;
9489 logic_cc = table_logic_cc[op1] & set_cc;
9491 /* data processing instruction */
9492 if (insn & (1 << 25)) {
9493 /* immediate operand */
9494 val = insn & 0xff;
9495 shift = ((insn >> 8) & 0xf) * 2;
9496 if (shift) {
9497 val = (val >> shift) | (val << (32 - shift));
9499 tmp2 = tcg_temp_new_i32();
9500 tcg_gen_movi_i32(tmp2, val);
9501 if (logic_cc && shift) {
9502 gen_set_CF_bit31(tmp2);
9504 } else {
9505 /* register */
9506 rm = (insn) & 0xf;
9507 tmp2 = load_reg(s, rm);
9508 shiftop = (insn >> 5) & 3;
9509 if (!(insn & (1 << 4))) {
9510 shift = (insn >> 7) & 0x1f;
9511 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9512 } else {
9513 rs = (insn >> 8) & 0xf;
9514 tmp = load_reg(s, rs);
9515 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9518 if (op1 != 0x0f && op1 != 0x0d) {
9519 rn = (insn >> 16) & 0xf;
9520 tmp = load_reg(s, rn);
9521 } else {
9522 tmp = NULL;
9524 rd = (insn >> 12) & 0xf;
9525 switch(op1) {
9526 case 0x00:
9527 tcg_gen_and_i32(tmp, tmp, tmp2);
9528 if (logic_cc) {
9529 gen_logic_CC(tmp);
9531 store_reg_bx(s, rd, tmp);
9532 break;
9533 case 0x01:
9534 tcg_gen_xor_i32(tmp, tmp, tmp2);
9535 if (logic_cc) {
9536 gen_logic_CC(tmp);
9538 store_reg_bx(s, rd, tmp);
9539 break;
9540 case 0x02:
9541 if (set_cc && rd == 15) {
9542 /* SUBS r15, ... is used for exception return. */
9543 if (IS_USER(s)) {
9544 goto illegal_op;
9546 gen_sub_CC(tmp, tmp, tmp2);
9547 gen_exception_return(s, tmp);
9548 } else {
9549 if (set_cc) {
9550 gen_sub_CC(tmp, tmp, tmp2);
9551 } else {
9552 tcg_gen_sub_i32(tmp, tmp, tmp2);
9554 store_reg_bx(s, rd, tmp);
9556 break;
9557 case 0x03:
9558 if (set_cc) {
9559 gen_sub_CC(tmp, tmp2, tmp);
9560 } else {
9561 tcg_gen_sub_i32(tmp, tmp2, tmp);
9563 store_reg_bx(s, rd, tmp);
9564 break;
9565 case 0x04:
9566 if (set_cc) {
9567 gen_add_CC(tmp, tmp, tmp2);
9568 } else {
9569 tcg_gen_add_i32(tmp, tmp, tmp2);
9571 store_reg_bx(s, rd, tmp);
9572 break;
9573 case 0x05:
9574 if (set_cc) {
9575 gen_adc_CC(tmp, tmp, tmp2);
9576 } else {
9577 gen_add_carry(tmp, tmp, tmp2);
9579 store_reg_bx(s, rd, tmp);
9580 break;
9581 case 0x06:
9582 if (set_cc) {
9583 gen_sbc_CC(tmp, tmp, tmp2);
9584 } else {
9585 gen_sub_carry(tmp, tmp, tmp2);
9587 store_reg_bx(s, rd, tmp);
9588 break;
9589 case 0x07:
9590 if (set_cc) {
9591 gen_sbc_CC(tmp, tmp2, tmp);
9592 } else {
9593 gen_sub_carry(tmp, tmp2, tmp);
9595 store_reg_bx(s, rd, tmp);
9596 break;
9597 case 0x08:
9598 if (set_cc) {
9599 tcg_gen_and_i32(tmp, tmp, tmp2);
9600 gen_logic_CC(tmp);
9602 tcg_temp_free_i32(tmp);
9603 break;
9604 case 0x09:
9605 if (set_cc) {
9606 tcg_gen_xor_i32(tmp, tmp, tmp2);
9607 gen_logic_CC(tmp);
9609 tcg_temp_free_i32(tmp);
9610 break;
9611 case 0x0a:
9612 if (set_cc) {
9613 gen_sub_CC(tmp, tmp, tmp2);
9615 tcg_temp_free_i32(tmp);
9616 break;
9617 case 0x0b:
9618 if (set_cc) {
9619 gen_add_CC(tmp, tmp, tmp2);
9621 tcg_temp_free_i32(tmp);
9622 break;
9623 case 0x0c:
9624 tcg_gen_or_i32(tmp, tmp, tmp2);
9625 if (logic_cc) {
9626 gen_logic_CC(tmp);
9628 store_reg_bx(s, rd, tmp);
9629 break;
9630 case 0x0d:
9631 if (logic_cc && rd == 15) {
9632 /* MOVS r15, ... is used for exception return. */
9633 if (IS_USER(s)) {
9634 goto illegal_op;
9636 gen_exception_return(s, tmp2);
9637 } else {
9638 if (logic_cc) {
9639 gen_logic_CC(tmp2);
9641 store_reg_bx(s, rd, tmp2);
9643 break;
9644 case 0x0e:
9645 tcg_gen_andc_i32(tmp, tmp, tmp2);
9646 if (logic_cc) {
9647 gen_logic_CC(tmp);
9649 store_reg_bx(s, rd, tmp);
9650 break;
9651 default:
9652 case 0x0f:
9653 tcg_gen_not_i32(tmp2, tmp2);
9654 if (logic_cc) {
9655 gen_logic_CC(tmp2);
9657 store_reg_bx(s, rd, tmp2);
9658 break;
9660 if (op1 != 0x0f && op1 != 0x0d) {
9661 tcg_temp_free_i32(tmp2);
9663 } else {
9664 /* other instructions */
9665 op1 = (insn >> 24) & 0xf;
9666 switch(op1) {
9667 case 0x0:
9668 case 0x1:
9669 /* multiplies, extra load/stores */
9670 sh = (insn >> 5) & 3;
9671 if (sh == 0) {
9672 if (op1 == 0x0) {
9673 rd = (insn >> 16) & 0xf;
9674 rn = (insn >> 12) & 0xf;
9675 rs = (insn >> 8) & 0xf;
9676 rm = (insn) & 0xf;
9677 op1 = (insn >> 20) & 0xf;
9678 switch (op1) {
9679 case 0: case 1: case 2: case 3: case 6:
9680 /* 32 bit mul */
9681 tmp = load_reg(s, rs);
9682 tmp2 = load_reg(s, rm);
9683 tcg_gen_mul_i32(tmp, tmp, tmp2);
9684 tcg_temp_free_i32(tmp2);
9685 if (insn & (1 << 22)) {
9686 /* Subtract (mls) */
9687 ARCH(6T2);
9688 tmp2 = load_reg(s, rn);
9689 tcg_gen_sub_i32(tmp, tmp2, tmp);
9690 tcg_temp_free_i32(tmp2);
9691 } else if (insn & (1 << 21)) {
9692 /* Add */
9693 tmp2 = load_reg(s, rn);
9694 tcg_gen_add_i32(tmp, tmp, tmp2);
9695 tcg_temp_free_i32(tmp2);
9697 if (insn & (1 << 20))
9698 gen_logic_CC(tmp);
9699 store_reg(s, rd, tmp);
9700 break;
9701 case 4:
9702 /* 64 bit mul double accumulate (UMAAL) */
9703 ARCH(6);
9704 tmp = load_reg(s, rs);
9705 tmp2 = load_reg(s, rm);
9706 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9707 gen_addq_lo(s, tmp64, rn);
9708 gen_addq_lo(s, tmp64, rd);
9709 gen_storeq_reg(s, rn, rd, tmp64);
9710 tcg_temp_free_i64(tmp64);
9711 break;
9712 case 8: case 9: case 10: case 11:
9713 case 12: case 13: case 14: case 15:
9714 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
9715 tmp = load_reg(s, rs);
9716 tmp2 = load_reg(s, rm);
9717 if (insn & (1 << 22)) {
9718 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
9719 } else {
9720 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
9722 if (insn & (1 << 21)) { /* mult accumulate */
9723 TCGv_i32 al = load_reg(s, rn);
9724 TCGv_i32 ah = load_reg(s, rd);
9725 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
9726 tcg_temp_free_i32(al);
9727 tcg_temp_free_i32(ah);
9729 if (insn & (1 << 20)) {
9730 gen_logicq_cc(tmp, tmp2);
9732 store_reg(s, rn, tmp);
9733 store_reg(s, rd, tmp2);
9734 break;
9735 default:
9736 goto illegal_op;
9738 } else {
9739 rn = (insn >> 16) & 0xf;
9740 rd = (insn >> 12) & 0xf;
9741 if (insn & (1 << 23)) {
9742 /* load/store exclusive */
9743 bool is_ld = extract32(insn, 20, 1);
9744 bool is_lasr = !extract32(insn, 8, 1);
9745 int op2 = (insn >> 8) & 3;
9746 op1 = (insn >> 21) & 0x3;
9748 switch (op2) {
9749 case 0: /* lda/stl */
9750 if (op1 == 1) {
9751 goto illegal_op;
9753 ARCH(8);
9754 break;
9755 case 1: /* reserved */
9756 goto illegal_op;
9757 case 2: /* ldaex/stlex */
9758 ARCH(8);
9759 break;
9760 case 3: /* ldrex/strex */
9761 if (op1) {
9762 ARCH(6K);
9763 } else {
9764 ARCH(6);
9766 break;
9769 addr = tcg_temp_local_new_i32();
9770 load_reg_var(s, addr, rn);
9772 if (is_lasr && !is_ld) {
9773 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9776 if (op2 == 0) {
9777 if (is_ld) {
9778 tmp = tcg_temp_new_i32();
9779 switch (op1) {
9780 case 0: /* lda */
9781 gen_aa32_ld32u_iss(s, tmp, addr,
9782 get_mem_index(s),
9783 rd | ISSIsAcqRel);
9784 break;
9785 case 2: /* ldab */
9786 gen_aa32_ld8u_iss(s, tmp, addr,
9787 get_mem_index(s),
9788 rd | ISSIsAcqRel);
9789 break;
9790 case 3: /* ldah */
9791 gen_aa32_ld16u_iss(s, tmp, addr,
9792 get_mem_index(s),
9793 rd | ISSIsAcqRel);
9794 break;
9795 default:
9796 abort();
9798 store_reg(s, rd, tmp);
9799 } else {
9800 rm = insn & 0xf;
9801 tmp = load_reg(s, rm);
9802 switch (op1) {
9803 case 0: /* stl */
9804 gen_aa32_st32_iss(s, tmp, addr,
9805 get_mem_index(s),
9806 rm | ISSIsAcqRel);
9807 break;
9808 case 2: /* stlb */
9809 gen_aa32_st8_iss(s, tmp, addr,
9810 get_mem_index(s),
9811 rm | ISSIsAcqRel);
9812 break;
9813 case 3: /* stlh */
9814 gen_aa32_st16_iss(s, tmp, addr,
9815 get_mem_index(s),
9816 rm | ISSIsAcqRel);
9817 break;
9818 default:
9819 abort();
9821 tcg_temp_free_i32(tmp);
9823 } else if (is_ld) {
9824 switch (op1) {
9825 case 0: /* ldrex */
9826 gen_load_exclusive(s, rd, 15, addr, 2);
9827 break;
9828 case 1: /* ldrexd */
9829 gen_load_exclusive(s, rd, rd + 1, addr, 3);
9830 break;
9831 case 2: /* ldrexb */
9832 gen_load_exclusive(s, rd, 15, addr, 0);
9833 break;
9834 case 3: /* ldrexh */
9835 gen_load_exclusive(s, rd, 15, addr, 1);
9836 break;
9837 default:
9838 abort();
9840 } else {
9841 rm = insn & 0xf;
9842 switch (op1) {
9843 case 0: /* strex */
9844 gen_store_exclusive(s, rd, rm, 15, addr, 2);
9845 break;
9846 case 1: /* strexd */
9847 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
9848 break;
9849 case 2: /* strexb */
9850 gen_store_exclusive(s, rd, rm, 15, addr, 0);
9851 break;
9852 case 3: /* strexh */
9853 gen_store_exclusive(s, rd, rm, 15, addr, 1);
9854 break;
9855 default:
9856 abort();
9859 tcg_temp_free_i32(addr);
9861 if (is_lasr && is_ld) {
9862 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
9864 } else if ((insn & 0x00300f00) == 0) {
9865 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
9866 * - SWP, SWPB
9869 TCGv taddr;
9870 TCGMemOp opc = s->be_data;
9872 rm = (insn) & 0xf;
9874 if (insn & (1 << 22)) {
9875 opc |= MO_UB;
9876 } else {
9877 opc |= MO_UL | MO_ALIGN;
9880 addr = load_reg(s, rn);
9881 taddr = gen_aa32_addr(s, addr, opc);
9882 tcg_temp_free_i32(addr);
9884 tmp = load_reg(s, rm);
9885 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9886 get_mem_index(s), opc);
9887 tcg_temp_free(taddr);
9888 store_reg(s, rd, tmp);
9889 } else {
9890 goto illegal_op;
9893 } else {
9894 int address_offset;
9895 bool load = insn & (1 << 20);
9896 bool wbit = insn & (1 << 21);
9897 bool pbit = insn & (1 << 24);
9898 bool doubleword = false;
9899 ISSInfo issinfo;
9901 /* Misc load/store */
9902 rn = (insn >> 16) & 0xf;
9903 rd = (insn >> 12) & 0xf;
9905 /* ISS not valid if writeback */
9906 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9908 if (!load && (sh & 2)) {
9909 /* doubleword */
9910 ARCH(5TE);
9911 if (rd & 1) {
9912 /* UNPREDICTABLE; we choose to UNDEF */
9913 goto illegal_op;
9915 load = (sh & 1) == 0;
9916 doubleword = true;
9919 addr = load_reg(s, rn);
9920 if (pbit) {
9921 gen_add_datah_offset(s, insn, 0, addr);
9923 address_offset = 0;
9925 if (doubleword) {
9926 if (!load) {
9927 /* store */
9928 tmp = load_reg(s, rd);
9929 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9930 tcg_temp_free_i32(tmp);
9931 tcg_gen_addi_i32(addr, addr, 4);
9932 tmp = load_reg(s, rd + 1);
9933 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9934 tcg_temp_free_i32(tmp);
9935 } else {
9936 /* load */
9937 tmp = tcg_temp_new_i32();
9938 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9939 store_reg(s, rd, tmp);
9940 tcg_gen_addi_i32(addr, addr, 4);
9941 tmp = tcg_temp_new_i32();
9942 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9943 rd++;
9945 address_offset = -4;
9946 } else if (load) {
9947 /* load */
9948 tmp = tcg_temp_new_i32();
9949 switch (sh) {
9950 case 1:
9951 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9952 issinfo);
9953 break;
9954 case 2:
9955 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9956 issinfo);
9957 break;
9958 default:
9959 case 3:
9960 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9961 issinfo);
9962 break;
9964 } else {
9965 /* store */
9966 tmp = load_reg(s, rd);
9967 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
9968 tcg_temp_free_i32(tmp);
9970 /* Perform base writeback before the loaded value to
9971 ensure correct behavior with overlapping index registers.
9972 ldrd with base writeback is undefined if the
9973 destination and index registers overlap. */
9974 if (!pbit) {
9975 gen_add_datah_offset(s, insn, address_offset, addr);
9976 store_reg(s, rn, addr);
9977 } else if (wbit) {
9978 if (address_offset)
9979 tcg_gen_addi_i32(addr, addr, address_offset);
9980 store_reg(s, rn, addr);
9981 } else {
9982 tcg_temp_free_i32(addr);
9984 if (load) {
9985 /* Complete the load. */
9986 store_reg(s, rd, tmp);
9989 break;
9990 case 0x4:
9991 case 0x5:
9992 goto do_ldst;
9993 case 0x6:
9994 case 0x7:
9995 if (insn & (1 << 4)) {
9996 ARCH(6);
9997 /* Armv6 Media instructions. */
9998 rm = insn & 0xf;
9999 rn = (insn >> 16) & 0xf;
10000 rd = (insn >> 12) & 0xf;
10001 rs = (insn >> 8) & 0xf;
10002 switch ((insn >> 23) & 3) {
10003 case 0: /* Parallel add/subtract. */
10004 op1 = (insn >> 20) & 7;
10005 tmp = load_reg(s, rn);
10006 tmp2 = load_reg(s, rm);
10007 sh = (insn >> 5) & 7;
10008 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
10009 goto illegal_op;
10010 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
10011 tcg_temp_free_i32(tmp2);
10012 store_reg(s, rd, tmp);
10013 break;
10014 case 1:
10015 if ((insn & 0x00700020) == 0) {
10016 /* Halfword pack. */
10017 tmp = load_reg(s, rn);
10018 tmp2 = load_reg(s, rm);
10019 shift = (insn >> 7) & 0x1f;
10020 if (insn & (1 << 6)) {
10021 /* pkhtb */
10022 if (shift == 0)
10023 shift = 31;
10024 tcg_gen_sari_i32(tmp2, tmp2, shift);
10025 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10026 tcg_gen_ext16u_i32(tmp2, tmp2);
10027 } else {
10028 /* pkhbt */
10029 if (shift)
10030 tcg_gen_shli_i32(tmp2, tmp2, shift);
10031 tcg_gen_ext16u_i32(tmp, tmp);
10032 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10034 tcg_gen_or_i32(tmp, tmp, tmp2);
10035 tcg_temp_free_i32(tmp2);
10036 store_reg(s, rd, tmp);
10037 } else if ((insn & 0x00200020) == 0x00200000) {
10038 /* [us]sat */
10039 tmp = load_reg(s, rm);
10040 shift = (insn >> 7) & 0x1f;
10041 if (insn & (1 << 6)) {
10042 if (shift == 0)
10043 shift = 31;
10044 tcg_gen_sari_i32(tmp, tmp, shift);
10045 } else {
10046 tcg_gen_shli_i32(tmp, tmp, shift);
10048 sh = (insn >> 16) & 0x1f;
10049 tmp2 = tcg_const_i32(sh);
10050 if (insn & (1 << 22))
10051 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10052 else
10053 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10054 tcg_temp_free_i32(tmp2);
10055 store_reg(s, rd, tmp);
10056 } else if ((insn & 0x00300fe0) == 0x00200f20) {
10057 /* [us]sat16 */
10058 tmp = load_reg(s, rm);
10059 sh = (insn >> 16) & 0x1f;
10060 tmp2 = tcg_const_i32(sh);
10061 if (insn & (1 << 22))
10062 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10063 else
10064 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10065 tcg_temp_free_i32(tmp2);
10066 store_reg(s, rd, tmp);
10067 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
10068 /* Select bytes. */
10069 tmp = load_reg(s, rn);
10070 tmp2 = load_reg(s, rm);
10071 tmp3 = tcg_temp_new_i32();
10072 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
10073 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
10074 tcg_temp_free_i32(tmp3);
10075 tcg_temp_free_i32(tmp2);
10076 store_reg(s, rd, tmp);
10077 } else if ((insn & 0x000003e0) == 0x00000060) {
10078 tmp = load_reg(s, rm);
10079 shift = (insn >> 10) & 3;
10080 /* ??? In many cases it's not necessary to do a
10081 rotate, a shift is sufficient. */
10082 if (shift != 0)
10083 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
10084 op1 = (insn >> 20) & 7;
10085 switch (op1) {
10086 case 0: gen_sxtb16(tmp); break;
10087 case 2: gen_sxtb(tmp); break;
10088 case 3: gen_sxth(tmp); break;
10089 case 4: gen_uxtb16(tmp); break;
10090 case 6: gen_uxtb(tmp); break;
10091 case 7: gen_uxth(tmp); break;
10092 default: goto illegal_op;
10094 if (rn != 15) {
10095 tmp2 = load_reg(s, rn);
10096 if ((op1 & 3) == 0) {
10097 gen_add16(tmp, tmp2);
10098 } else {
10099 tcg_gen_add_i32(tmp, tmp, tmp2);
10100 tcg_temp_free_i32(tmp2);
10103 store_reg(s, rd, tmp);
10104 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
10105 /* rev */
10106 tmp = load_reg(s, rm);
10107 if (insn & (1 << 22)) {
10108 if (insn & (1 << 7)) {
10109 gen_revsh(tmp);
10110 } else {
10111 ARCH(6T2);
10112 gen_helper_rbit(tmp, tmp);
10114 } else {
10115 if (insn & (1 << 7))
10116 gen_rev16(tmp);
10117 else
10118 tcg_gen_bswap32_i32(tmp, tmp);
10120 store_reg(s, rd, tmp);
10121 } else {
10122 goto illegal_op;
10124 break;
10125 case 2: /* Multiplies (Type 3). */
10126 switch ((insn >> 20) & 0x7) {
10127 case 5:
10128 if (((insn >> 6) ^ (insn >> 7)) & 1) {
10129 /* op2 not 00x or 11x : UNDEF */
10130 goto illegal_op;
10132 /* Signed multiply most significant [accumulate].
10133 (SMMUL, SMMLA, SMMLS) */
10134 tmp = load_reg(s, rm);
10135 tmp2 = load_reg(s, rs);
10136 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10138 if (rd != 15) {
10139 tmp = load_reg(s, rd);
10140 if (insn & (1 << 6)) {
10141 tmp64 = gen_subq_msw(tmp64, tmp);
10142 } else {
10143 tmp64 = gen_addq_msw(tmp64, tmp);
10146 if (insn & (1 << 5)) {
10147 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10149 tcg_gen_shri_i64(tmp64, tmp64, 32);
10150 tmp = tcg_temp_new_i32();
10151 tcg_gen_extrl_i64_i32(tmp, tmp64);
10152 tcg_temp_free_i64(tmp64);
10153 store_reg(s, rn, tmp);
10154 break;
10155 case 0:
10156 case 4:
10157 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
10158 if (insn & (1 << 7)) {
10159 goto illegal_op;
10161 tmp = load_reg(s, rm);
10162 tmp2 = load_reg(s, rs);
10163 if (insn & (1 << 5))
10164 gen_swap_half(tmp2);
10165 gen_smul_dual(tmp, tmp2);
10166 if (insn & (1 << 22)) {
10167 /* smlald, smlsld */
10168 TCGv_i64 tmp64_2;
10170 tmp64 = tcg_temp_new_i64();
10171 tmp64_2 = tcg_temp_new_i64();
10172 tcg_gen_ext_i32_i64(tmp64, tmp);
10173 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
10174 tcg_temp_free_i32(tmp);
10175 tcg_temp_free_i32(tmp2);
10176 if (insn & (1 << 6)) {
10177 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
10178 } else {
10179 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
10181 tcg_temp_free_i64(tmp64_2);
10182 gen_addq(s, tmp64, rd, rn);
10183 gen_storeq_reg(s, rd, rn, tmp64);
10184 tcg_temp_free_i64(tmp64);
10185 } else {
10186 /* smuad, smusd, smlad, smlsd */
10187 if (insn & (1 << 6)) {
10188 /* This subtraction cannot overflow. */
10189 tcg_gen_sub_i32(tmp, tmp, tmp2);
10190 } else {
10191 /* This addition cannot overflow 32 bits;
10192 * however it may overflow considered as a
10193 * signed operation, in which case we must set
10194 * the Q flag.
10196 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10198 tcg_temp_free_i32(tmp2);
10199 if (rd != 15)
10201 tmp2 = load_reg(s, rd);
10202 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10203 tcg_temp_free_i32(tmp2);
10205 store_reg(s, rn, tmp);
10207 break;
10208 case 1:
10209 case 3:
10210 /* SDIV, UDIV */
10211 if (!dc_isar_feature(arm_div, s)) {
10212 goto illegal_op;
10214 if (((insn >> 5) & 7) || (rd != 15)) {
10215 goto illegal_op;
10217 tmp = load_reg(s, rm);
10218 tmp2 = load_reg(s, rs);
10219 if (insn & (1 << 21)) {
10220 gen_helper_udiv(tmp, tmp, tmp2);
10221 } else {
10222 gen_helper_sdiv(tmp, tmp, tmp2);
10224 tcg_temp_free_i32(tmp2);
10225 store_reg(s, rn, tmp);
10226 break;
10227 default:
10228 goto illegal_op;
10230 break;
10231 case 3:
10232 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
10233 switch (op1) {
10234 case 0: /* Unsigned sum of absolute differences. */
10235 ARCH(6);
10236 tmp = load_reg(s, rm);
10237 tmp2 = load_reg(s, rs);
10238 gen_helper_usad8(tmp, tmp, tmp2);
10239 tcg_temp_free_i32(tmp2);
10240 if (rd != 15) {
10241 tmp2 = load_reg(s, rd);
10242 tcg_gen_add_i32(tmp, tmp, tmp2);
10243 tcg_temp_free_i32(tmp2);
10245 store_reg(s, rn, tmp);
10246 break;
10247 case 0x20: case 0x24: case 0x28: case 0x2c:
10248 /* Bitfield insert/clear. */
10249 ARCH(6T2);
10250 shift = (insn >> 7) & 0x1f;
10251 i = (insn >> 16) & 0x1f;
10252 if (i < shift) {
10253 /* UNPREDICTABLE; we choose to UNDEF */
10254 goto illegal_op;
10256 i = i + 1 - shift;
10257 if (rm == 15) {
10258 tmp = tcg_temp_new_i32();
10259 tcg_gen_movi_i32(tmp, 0);
10260 } else {
10261 tmp = load_reg(s, rm);
10263 if (i != 32) {
10264 tmp2 = load_reg(s, rd);
10265 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
10266 tcg_temp_free_i32(tmp2);
10268 store_reg(s, rd, tmp);
10269 break;
10270 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
10271 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
10272 ARCH(6T2);
10273 tmp = load_reg(s, rm);
10274 shift = (insn >> 7) & 0x1f;
10275 i = ((insn >> 16) & 0x1f) + 1;
10276 if (shift + i > 32)
10277 goto illegal_op;
10278 if (i < 32) {
10279 if (op1 & 0x20) {
10280 tcg_gen_extract_i32(tmp, tmp, shift, i);
10281 } else {
10282 tcg_gen_sextract_i32(tmp, tmp, shift, i);
10285 store_reg(s, rd, tmp);
10286 break;
10287 default:
10288 goto illegal_op;
10290 break;
10292 break;
10294 do_ldst:
10295 /* Check for undefined extension instructions
10296 * per the ARM Bible IE:
10297 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
10299 sh = (0xf << 20) | (0xf << 4);
10300 if (op1 == 0x7 && ((insn & sh) == sh))
10302 goto illegal_op;
10304 /* load/store byte/word */
10305 rn = (insn >> 16) & 0xf;
10306 rd = (insn >> 12) & 0xf;
10307 tmp2 = load_reg(s, rn);
10308 if ((insn & 0x01200000) == 0x00200000) {
10309 /* ldrt/strt */
10310 i = get_a32_user_mem_index(s);
10311 } else {
10312 i = get_mem_index(s);
10314 if (insn & (1 << 24))
10315 gen_add_data_offset(s, insn, tmp2);
10316 if (insn & (1 << 20)) {
10317 /* load */
10318 tmp = tcg_temp_new_i32();
10319 if (insn & (1 << 22)) {
10320 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
10321 } else {
10322 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
10324 } else {
10325 /* store */
10326 tmp = load_reg(s, rd);
10327 if (insn & (1 << 22)) {
10328 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
10329 } else {
10330 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
10332 tcg_temp_free_i32(tmp);
10334 if (!(insn & (1 << 24))) {
10335 gen_add_data_offset(s, insn, tmp2);
10336 store_reg(s, rn, tmp2);
10337 } else if (insn & (1 << 21)) {
10338 store_reg(s, rn, tmp2);
10339 } else {
10340 tcg_temp_free_i32(tmp2);
10342 if (insn & (1 << 20)) {
10343 /* Complete the load. */
10344 store_reg_from_load(s, rd, tmp);
10346 break;
10347 case 0x08:
10348 case 0x09:
10350 int j, n, loaded_base;
10351 bool exc_return = false;
10352 bool is_load = extract32(insn, 20, 1);
10353 bool user = false;
10354 TCGv_i32 loaded_var;
10355 /* load/store multiple words */
10356 /* XXX: store correct base if write back */
10357 if (insn & (1 << 22)) {
10358 /* LDM (user), LDM (exception return) and STM (user) */
10359 if (IS_USER(s))
10360 goto illegal_op; /* only usable in supervisor mode */
10362 if (is_load && extract32(insn, 15, 1)) {
10363 exc_return = true;
10364 } else {
10365 user = true;
10368 rn = (insn >> 16) & 0xf;
10369 addr = load_reg(s, rn);
10371 /* compute total size */
10372 loaded_base = 0;
10373 loaded_var = NULL;
10374 n = 0;
10375 for(i=0;i<16;i++) {
10376 if (insn & (1 << i))
10377 n++;
10379 /* XXX: test invalid n == 0 case ? */
10380 if (insn & (1 << 23)) {
10381 if (insn & (1 << 24)) {
10382 /* pre increment */
10383 tcg_gen_addi_i32(addr, addr, 4);
10384 } else {
10385 /* post increment */
10387 } else {
10388 if (insn & (1 << 24)) {
10389 /* pre decrement */
10390 tcg_gen_addi_i32(addr, addr, -(n * 4));
10391 } else {
10392 /* post decrement */
10393 if (n != 1)
10394 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
10397 j = 0;
10398 for(i=0;i<16;i++) {
10399 if (insn & (1 << i)) {
10400 if (is_load) {
10401 /* load */
10402 tmp = tcg_temp_new_i32();
10403 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10404 if (user) {
10405 tmp2 = tcg_const_i32(i);
10406 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
10407 tcg_temp_free_i32(tmp2);
10408 tcg_temp_free_i32(tmp);
10409 } else if (i == rn) {
10410 loaded_var = tmp;
10411 loaded_base = 1;
10412 } else if (rn == 15 && exc_return) {
10413 store_pc_exc_ret(s, tmp);
10414 } else {
10415 store_reg_from_load(s, i, tmp);
10417 } else {
10418 /* store */
10419 if (i == 15) {
10420 /* special case: r15 = PC + 8 */
10421 val = (long)s->pc + 4;
10422 tmp = tcg_temp_new_i32();
10423 tcg_gen_movi_i32(tmp, val);
10424 } else if (user) {
10425 tmp = tcg_temp_new_i32();
10426 tmp2 = tcg_const_i32(i);
10427 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
10428 tcg_temp_free_i32(tmp2);
10429 } else {
10430 tmp = load_reg(s, i);
10432 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10433 tcg_temp_free_i32(tmp);
10435 j++;
10436 /* no need to add after the last transfer */
10437 if (j != n)
10438 tcg_gen_addi_i32(addr, addr, 4);
10441 if (insn & (1 << 21)) {
10442 /* write back */
10443 if (insn & (1 << 23)) {
10444 if (insn & (1 << 24)) {
10445 /* pre increment */
10446 } else {
10447 /* post increment */
10448 tcg_gen_addi_i32(addr, addr, 4);
10450 } else {
10451 if (insn & (1 << 24)) {
10452 /* pre decrement */
10453 if (n != 1)
10454 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
10455 } else {
10456 /* post decrement */
10457 tcg_gen_addi_i32(addr, addr, -(n * 4));
10460 store_reg(s, rn, addr);
10461 } else {
10462 tcg_temp_free_i32(addr);
10464 if (loaded_base) {
10465 store_reg(s, rn, loaded_var);
10467 if (exc_return) {
10468 /* Restore CPSR from SPSR. */
10469 tmp = load_cpu_field(spsr);
10470 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10471 gen_io_start();
10473 gen_helper_cpsr_write_eret(cpu_env, tmp);
10474 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10475 gen_io_end();
10477 tcg_temp_free_i32(tmp);
10478 /* Must exit loop to check un-masked IRQs */
10479 s->base.is_jmp = DISAS_EXIT;
10482 break;
10483 case 0xa:
10484 case 0xb:
10486 int32_t offset;
10488 /* branch (and link) */
10489 val = (int32_t)s->pc;
10490 if (insn & (1 << 24)) {
10491 tmp = tcg_temp_new_i32();
10492 tcg_gen_movi_i32(tmp, val);
10493 store_reg(s, 14, tmp);
10495 offset = sextract32(insn << 2, 0, 26);
10496 val += offset + 4;
10497 gen_jmp(s, val);
10499 break;
10500 case 0xc:
10501 case 0xd:
10502 case 0xe:
10503 if (((insn >> 8) & 0xe) == 10) {
10504 /* VFP. */
10505 if (disas_vfp_insn(s, insn)) {
10506 goto illegal_op;
10508 } else if (disas_coproc_insn(s, insn)) {
10509 /* Coprocessor. */
10510 goto illegal_op;
10512 break;
10513 case 0xf:
10514 /* swi */
10515 gen_set_pc_im(s, s->pc);
10516 s->svc_imm = extract32(insn, 0, 24);
10517 s->base.is_jmp = DISAS_SWI;
10518 break;
10519 default:
10520 illegal_op:
10521 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10522 default_exception_el(s));
10523 break;
10528 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
10530 /* Return true if this is a 16 bit instruction. We must be precise
10531 * about this (matching the decode). We assume that s->pc still
10532 * points to the first 16 bits of the insn.
10534 if ((insn >> 11) < 0x1d) {
10535 /* Definitely a 16-bit instruction */
10536 return true;
10539 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10540 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10541 * end up actually treating this as two 16-bit insns, though,
10542 * if it's half of a bl/blx pair that might span a page boundary.
10544 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10545 arm_dc_feature(s, ARM_FEATURE_M)) {
10546 /* Thumb2 cores (including all M profile ones) always treat
10547 * 32-bit insns as 32-bit.
10549 return false;
10552 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
10553 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10554 * is not on the next page; we merge this into a 32-bit
10555 * insn.
10557 return false;
10559 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10560 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10561 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10562 * -- handle as single 16 bit insn
10564 return true;
10567 /* Return true if this is a Thumb-2 logical op. */
10568 static int
10569 thumb2_logic_op(int op)
10571 return (op < 8);
10574 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10575 then set condition code flags based on the result of the operation.
10576 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10577 to the high bit of T1.
10578 Returns zero if the opcode is valid. */
10580 static int
10581 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10582 TCGv_i32 t0, TCGv_i32 t1)
10584 int logic_cc;
10586 logic_cc = 0;
10587 switch (op) {
10588 case 0: /* and */
10589 tcg_gen_and_i32(t0, t0, t1);
10590 logic_cc = conds;
10591 break;
10592 case 1: /* bic */
10593 tcg_gen_andc_i32(t0, t0, t1);
10594 logic_cc = conds;
10595 break;
10596 case 2: /* orr */
10597 tcg_gen_or_i32(t0, t0, t1);
10598 logic_cc = conds;
10599 break;
10600 case 3: /* orn */
10601 tcg_gen_orc_i32(t0, t0, t1);
10602 logic_cc = conds;
10603 break;
10604 case 4: /* eor */
10605 tcg_gen_xor_i32(t0, t0, t1);
10606 logic_cc = conds;
10607 break;
10608 case 8: /* add */
10609 if (conds)
10610 gen_add_CC(t0, t0, t1);
10611 else
10612 tcg_gen_add_i32(t0, t0, t1);
10613 break;
10614 case 10: /* adc */
10615 if (conds)
10616 gen_adc_CC(t0, t0, t1);
10617 else
10618 gen_adc(t0, t1);
10619 break;
10620 case 11: /* sbc */
10621 if (conds) {
10622 gen_sbc_CC(t0, t0, t1);
10623 } else {
10624 gen_sub_carry(t0, t0, t1);
10626 break;
10627 case 13: /* sub */
10628 if (conds)
10629 gen_sub_CC(t0, t0, t1);
10630 else
10631 tcg_gen_sub_i32(t0, t0, t1);
10632 break;
10633 case 14: /* rsb */
10634 if (conds)
10635 gen_sub_CC(t0, t1, t0);
10636 else
10637 tcg_gen_sub_i32(t0, t1, t0);
10638 break;
10639 default: /* 5, 6, 7, 9, 12, 15. */
10640 return 1;
10642 if (logic_cc) {
10643 gen_logic_CC(t0);
10644 if (shifter_out)
10645 gen_set_CF_bit31(t1);
10647 return 0;
10650 /* Translate a 32-bit thumb instruction. */
10651 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
10653 uint32_t imm, shift, offset;
10654 uint32_t rd, rn, rm, rs;
10655 TCGv_i32 tmp;
10656 TCGv_i32 tmp2;
10657 TCGv_i32 tmp3;
10658 TCGv_i32 addr;
10659 TCGv_i64 tmp64;
10660 int op;
10661 int shiftop;
10662 int conds;
10663 int logic_cc;
10666 * ARMv6-M supports a limited subset of Thumb2 instructions.
10667 * Other Thumb1 architectures allow only 32-bit
10668 * combined BL/BLX prefix and suffix.
10670 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10671 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10672 int i;
10673 bool found = false;
10674 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10675 0xf3b08040 /* dsb */,
10676 0xf3b08050 /* dmb */,
10677 0xf3b08060 /* isb */,
10678 0xf3e08000 /* mrs */,
10679 0xf000d000 /* bl */};
10680 static const uint32_t armv6m_mask[] = {0xffe0d000,
10681 0xfff0d0f0,
10682 0xfff0d0f0,
10683 0xfff0d0f0,
10684 0xffe0d000,
10685 0xf800d000};
10687 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10688 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10689 found = true;
10690 break;
10693 if (!found) {
10694 goto illegal_op;
10696 } else if ((insn & 0xf800e800) != 0xf000e800) {
10697 ARCH(6T2);
10700 rn = (insn >> 16) & 0xf;
10701 rs = (insn >> 12) & 0xf;
10702 rd = (insn >> 8) & 0xf;
10703 rm = insn & 0xf;
10704 switch ((insn >> 25) & 0xf) {
10705 case 0: case 1: case 2: case 3:
10706 /* 16-bit instructions. Should never happen. */
10707 abort();
10708 case 4:
10709 if (insn & (1 << 22)) {
10710 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10711 * - load/store doubleword, load/store exclusive, ldacq/strel,
10712 * table branch, TT.
10714 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10715 arm_dc_feature(s, ARM_FEATURE_V8)) {
10716 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10717 * - SG (v8M only)
10718 * The bulk of the behaviour for this instruction is implemented
10719 * in v7m_handle_execute_nsc(), which deals with the insn when
10720 * it is executed by a CPU in non-secure state from memory
10721 * which is Secure & NonSecure-Callable.
10722 * Here we only need to handle the remaining cases:
10723 * * in NS memory (including the "security extension not
10724 * implemented" case) : NOP
10725 * * in S memory but CPU already secure (clear IT bits)
10726 * We know that the attribute for the memory this insn is
10727 * in must match the current CPU state, because otherwise
10728 * get_phys_addr_pmsav8 would have generated an exception.
10730 if (s->v8m_secure) {
10731 /* Like the IT insn, we don't need to generate any code */
10732 s->condexec_cond = 0;
10733 s->condexec_mask = 0;
10735 } else if (insn & 0x01200000) {
10736 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10737 * - load/store dual (post-indexed)
10738 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10739 * - load/store dual (literal and immediate)
10740 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10741 * - load/store dual (pre-indexed)
10743 bool wback = extract32(insn, 21, 1);
10745 if (rn == 15) {
10746 if (insn & (1 << 21)) {
10747 /* UNPREDICTABLE */
10748 goto illegal_op;
10750 addr = tcg_temp_new_i32();
10751 tcg_gen_movi_i32(addr, s->pc & ~3);
10752 } else {
10753 addr = load_reg(s, rn);
10755 offset = (insn & 0xff) * 4;
10756 if ((insn & (1 << 23)) == 0) {
10757 offset = -offset;
10760 if (s->v8m_stackcheck && rn == 13 && wback) {
10762 * Here 'addr' is the current SP; if offset is +ve we're
10763 * moving SP up, else down. It is UNKNOWN whether the limit
10764 * check triggers when SP starts below the limit and ends
10765 * up above it; check whichever of the current and final
10766 * SP is lower, so QEMU will trigger in that situation.
10768 if ((int32_t)offset < 0) {
10769 TCGv_i32 newsp = tcg_temp_new_i32();
10771 tcg_gen_addi_i32(newsp, addr, offset);
10772 gen_helper_v8m_stackcheck(cpu_env, newsp);
10773 tcg_temp_free_i32(newsp);
10774 } else {
10775 gen_helper_v8m_stackcheck(cpu_env, addr);
10779 if (insn & (1 << 24)) {
10780 tcg_gen_addi_i32(addr, addr, offset);
10781 offset = 0;
10783 if (insn & (1 << 20)) {
10784 /* ldrd */
10785 tmp = tcg_temp_new_i32();
10786 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10787 store_reg(s, rs, tmp);
10788 tcg_gen_addi_i32(addr, addr, 4);
10789 tmp = tcg_temp_new_i32();
10790 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10791 store_reg(s, rd, tmp);
10792 } else {
10793 /* strd */
10794 tmp = load_reg(s, rs);
10795 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10796 tcg_temp_free_i32(tmp);
10797 tcg_gen_addi_i32(addr, addr, 4);
10798 tmp = load_reg(s, rd);
10799 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10800 tcg_temp_free_i32(tmp);
10802 if (wback) {
10803 /* Base writeback. */
10804 tcg_gen_addi_i32(addr, addr, offset - 4);
10805 store_reg(s, rn, addr);
10806 } else {
10807 tcg_temp_free_i32(addr);
10809 } else if ((insn & (1 << 23)) == 0) {
10810 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10811 * - load/store exclusive word
10812 * - TT (v8M only)
10814 if (rs == 15) {
10815 if (!(insn & (1 << 20)) &&
10816 arm_dc_feature(s, ARM_FEATURE_M) &&
10817 arm_dc_feature(s, ARM_FEATURE_V8)) {
10818 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10819 * - TT (v8M only)
10821 bool alt = insn & (1 << 7);
10822 TCGv_i32 addr, op, ttresp;
10824 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10825 /* we UNDEF for these UNPREDICTABLE cases */
10826 goto illegal_op;
10829 if (alt && !s->v8m_secure) {
10830 goto illegal_op;
10833 addr = load_reg(s, rn);
10834 op = tcg_const_i32(extract32(insn, 6, 2));
10835 ttresp = tcg_temp_new_i32();
10836 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10837 tcg_temp_free_i32(addr);
10838 tcg_temp_free_i32(op);
10839 store_reg(s, rd, ttresp);
10840 break;
10842 goto illegal_op;
10844 addr = tcg_temp_local_new_i32();
10845 load_reg_var(s, addr, rn);
10846 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
10847 if (insn & (1 << 20)) {
10848 gen_load_exclusive(s, rs, 15, addr, 2);
10849 } else {
10850 gen_store_exclusive(s, rd, rs, 15, addr, 2);
10852 tcg_temp_free_i32(addr);
10853 } else if ((insn & (7 << 5)) == 0) {
10854 /* Table Branch. */
10855 if (rn == 15) {
10856 addr = tcg_temp_new_i32();
10857 tcg_gen_movi_i32(addr, s->pc);
10858 } else {
10859 addr = load_reg(s, rn);
10861 tmp = load_reg(s, rm);
10862 tcg_gen_add_i32(addr, addr, tmp);
10863 if (insn & (1 << 4)) {
10864 /* tbh */
10865 tcg_gen_add_i32(addr, addr, tmp);
10866 tcg_temp_free_i32(tmp);
10867 tmp = tcg_temp_new_i32();
10868 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
10869 } else { /* tbb */
10870 tcg_temp_free_i32(tmp);
10871 tmp = tcg_temp_new_i32();
10872 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
10874 tcg_temp_free_i32(addr);
10875 tcg_gen_shli_i32(tmp, tmp, 1);
10876 tcg_gen_addi_i32(tmp, tmp, s->pc);
10877 store_reg(s, 15, tmp);
10878 } else {
10879 bool is_lasr = false;
10880 bool is_ld = extract32(insn, 20, 1);
10881 int op2 = (insn >> 6) & 0x3;
10882 op = (insn >> 4) & 0x3;
10883 switch (op2) {
10884 case 0:
10885 goto illegal_op;
10886 case 1:
10887 /* Load/store exclusive byte/halfword/doubleword */
10888 if (op == 2) {
10889 goto illegal_op;
10891 ARCH(7);
10892 break;
10893 case 2:
10894 /* Load-acquire/store-release */
10895 if (op == 3) {
10896 goto illegal_op;
10898 /* Fall through */
10899 case 3:
10900 /* Load-acquire/store-release exclusive */
10901 ARCH(8);
10902 is_lasr = true;
10903 break;
10906 if (is_lasr && !is_ld) {
10907 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
10910 addr = tcg_temp_local_new_i32();
10911 load_reg_var(s, addr, rn);
10912 if (!(op2 & 1)) {
10913 if (is_ld) {
10914 tmp = tcg_temp_new_i32();
10915 switch (op) {
10916 case 0: /* ldab */
10917 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
10918 rs | ISSIsAcqRel);
10919 break;
10920 case 1: /* ldah */
10921 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10922 rs | ISSIsAcqRel);
10923 break;
10924 case 2: /* lda */
10925 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10926 rs | ISSIsAcqRel);
10927 break;
10928 default:
10929 abort();
10931 store_reg(s, rs, tmp);
10932 } else {
10933 tmp = load_reg(s, rs);
10934 switch (op) {
10935 case 0: /* stlb */
10936 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10937 rs | ISSIsAcqRel);
10938 break;
10939 case 1: /* stlh */
10940 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10941 rs | ISSIsAcqRel);
10942 break;
10943 case 2: /* stl */
10944 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10945 rs | ISSIsAcqRel);
10946 break;
10947 default:
10948 abort();
10950 tcg_temp_free_i32(tmp);
10952 } else if (is_ld) {
10953 gen_load_exclusive(s, rs, rd, addr, op);
10954 } else {
10955 gen_store_exclusive(s, rm, rs, rd, addr, op);
10957 tcg_temp_free_i32(addr);
10959 if (is_lasr && is_ld) {
10960 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
10963 } else {
10964 /* Load/store multiple, RFE, SRS. */
10965 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
10966 /* RFE, SRS: not available in user mode or on M profile */
10967 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10968 goto illegal_op;
10970 if (insn & (1 << 20)) {
10971 /* rfe */
10972 addr = load_reg(s, rn);
10973 if ((insn & (1 << 24)) == 0)
10974 tcg_gen_addi_i32(addr, addr, -8);
10975 /* Load PC into tmp and CPSR into tmp2. */
10976 tmp = tcg_temp_new_i32();
10977 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10978 tcg_gen_addi_i32(addr, addr, 4);
10979 tmp2 = tcg_temp_new_i32();
10980 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
10981 if (insn & (1 << 21)) {
10982 /* Base writeback. */
10983 if (insn & (1 << 24)) {
10984 tcg_gen_addi_i32(addr, addr, 4);
10985 } else {
10986 tcg_gen_addi_i32(addr, addr, -4);
10988 store_reg(s, rn, addr);
10989 } else {
10990 tcg_temp_free_i32(addr);
10992 gen_rfe(s, tmp, tmp2);
10993 } else {
10994 /* srs */
10995 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10996 insn & (1 << 21));
10998 } else {
10999 int i, loaded_base = 0;
11000 TCGv_i32 loaded_var;
11001 bool wback = extract32(insn, 21, 1);
11002 /* Load/store multiple. */
11003 addr = load_reg(s, rn);
11004 offset = 0;
11005 for (i = 0; i < 16; i++) {
11006 if (insn & (1 << i))
11007 offset += 4;
11010 if (insn & (1 << 24)) {
11011 tcg_gen_addi_i32(addr, addr, -offset);
11014 if (s->v8m_stackcheck && rn == 13 && wback) {
11016 * If the writeback is incrementing SP rather than
11017 * decrementing it, and the initial SP is below the
11018 * stack limit but the final written-back SP would
11019 * be above, then then we must not perform any memory
11020 * accesses, but it is IMPDEF whether we generate
11021 * an exception. We choose to do so in this case.
11022 * At this point 'addr' is the lowest address, so
11023 * either the original SP (if incrementing) or our
11024 * final SP (if decrementing), so that's what we check.
11026 gen_helper_v8m_stackcheck(cpu_env, addr);
11029 loaded_var = NULL;
11030 for (i = 0; i < 16; i++) {
11031 if ((insn & (1 << i)) == 0)
11032 continue;
11033 if (insn & (1 << 20)) {
11034 /* Load. */
11035 tmp = tcg_temp_new_i32();
11036 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11037 if (i == 15) {
11038 gen_bx_excret(s, tmp);
11039 } else if (i == rn) {
11040 loaded_var = tmp;
11041 loaded_base = 1;
11042 } else {
11043 store_reg(s, i, tmp);
11045 } else {
11046 /* Store. */
11047 tmp = load_reg(s, i);
11048 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11049 tcg_temp_free_i32(tmp);
11051 tcg_gen_addi_i32(addr, addr, 4);
11053 if (loaded_base) {
11054 store_reg(s, rn, loaded_var);
11056 if (wback) {
11057 /* Base register writeback. */
11058 if (insn & (1 << 24)) {
11059 tcg_gen_addi_i32(addr, addr, -offset);
11061 /* Fault if writeback register is in register list. */
11062 if (insn & (1 << rn))
11063 goto illegal_op;
11064 store_reg(s, rn, addr);
11065 } else {
11066 tcg_temp_free_i32(addr);
11070 break;
11071 case 5:
11073 op = (insn >> 21) & 0xf;
11074 if (op == 6) {
11075 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11076 goto illegal_op;
11078 /* Halfword pack. */
11079 tmp = load_reg(s, rn);
11080 tmp2 = load_reg(s, rm);
11081 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
11082 if (insn & (1 << 5)) {
11083 /* pkhtb */
11084 if (shift == 0)
11085 shift = 31;
11086 tcg_gen_sari_i32(tmp2, tmp2, shift);
11087 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
11088 tcg_gen_ext16u_i32(tmp2, tmp2);
11089 } else {
11090 /* pkhbt */
11091 if (shift)
11092 tcg_gen_shli_i32(tmp2, tmp2, shift);
11093 tcg_gen_ext16u_i32(tmp, tmp);
11094 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
11096 tcg_gen_or_i32(tmp, tmp, tmp2);
11097 tcg_temp_free_i32(tmp2);
11098 store_reg(s, rd, tmp);
11099 } else {
11100 /* Data processing register constant shift. */
11101 if (rn == 15) {
11102 tmp = tcg_temp_new_i32();
11103 tcg_gen_movi_i32(tmp, 0);
11104 } else {
11105 tmp = load_reg(s, rn);
11107 tmp2 = load_reg(s, rm);
11109 shiftop = (insn >> 4) & 3;
11110 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
11111 conds = (insn & (1 << 20)) != 0;
11112 logic_cc = (conds && thumb2_logic_op(op));
11113 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
11114 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
11115 goto illegal_op;
11116 tcg_temp_free_i32(tmp2);
11117 if (rd == 13 &&
11118 ((op == 2 && rn == 15) ||
11119 (op == 8 && rn == 13) ||
11120 (op == 13 && rn == 13))) {
11121 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
11122 store_sp_checked(s, tmp);
11123 } else if (rd != 15) {
11124 store_reg(s, rd, tmp);
11125 } else {
11126 tcg_temp_free_i32(tmp);
11129 break;
11130 case 13: /* Misc data processing. */
11131 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
11132 if (op < 4 && (insn & 0xf000) != 0xf000)
11133 goto illegal_op;
11134 switch (op) {
11135 case 0: /* Register controlled shift. */
11136 tmp = load_reg(s, rn);
11137 tmp2 = load_reg(s, rm);
11138 if ((insn & 0x70) != 0)
11139 goto illegal_op;
11141 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
11142 * - MOV, MOVS (register-shifted register), flagsetting
11144 op = (insn >> 21) & 3;
11145 logic_cc = (insn & (1 << 20)) != 0;
11146 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
11147 if (logic_cc)
11148 gen_logic_CC(tmp);
11149 store_reg(s, rd, tmp);
11150 break;
11151 case 1: /* Sign/zero extend. */
11152 op = (insn >> 20) & 7;
11153 switch (op) {
11154 case 0: /* SXTAH, SXTH */
11155 case 1: /* UXTAH, UXTH */
11156 case 4: /* SXTAB, SXTB */
11157 case 5: /* UXTAB, UXTB */
11158 break;
11159 case 2: /* SXTAB16, SXTB16 */
11160 case 3: /* UXTAB16, UXTB16 */
11161 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11162 goto illegal_op;
11164 break;
11165 default:
11166 goto illegal_op;
11168 if (rn != 15) {
11169 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11170 goto illegal_op;
11173 tmp = load_reg(s, rm);
11174 shift = (insn >> 4) & 3;
11175 /* ??? In many cases it's not necessary to do a
11176 rotate, a shift is sufficient. */
11177 if (shift != 0)
11178 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
11179 op = (insn >> 20) & 7;
11180 switch (op) {
11181 case 0: gen_sxth(tmp); break;
11182 case 1: gen_uxth(tmp); break;
11183 case 2: gen_sxtb16(tmp); break;
11184 case 3: gen_uxtb16(tmp); break;
11185 case 4: gen_sxtb(tmp); break;
11186 case 5: gen_uxtb(tmp); break;
11187 default:
11188 g_assert_not_reached();
11190 if (rn != 15) {
11191 tmp2 = load_reg(s, rn);
11192 if ((op >> 1) == 1) {
11193 gen_add16(tmp, tmp2);
11194 } else {
11195 tcg_gen_add_i32(tmp, tmp, tmp2);
11196 tcg_temp_free_i32(tmp2);
11199 store_reg(s, rd, tmp);
11200 break;
11201 case 2: /* SIMD add/subtract. */
11202 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11203 goto illegal_op;
11205 op = (insn >> 20) & 7;
11206 shift = (insn >> 4) & 7;
11207 if ((op & 3) == 3 || (shift & 3) == 3)
11208 goto illegal_op;
11209 tmp = load_reg(s, rn);
11210 tmp2 = load_reg(s, rm);
11211 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
11212 tcg_temp_free_i32(tmp2);
11213 store_reg(s, rd, tmp);
11214 break;
11215 case 3: /* Other data processing. */
11216 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
11217 if (op < 4) {
11218 /* Saturating add/subtract. */
11219 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11220 goto illegal_op;
11222 tmp = load_reg(s, rn);
11223 tmp2 = load_reg(s, rm);
11224 if (op & 1)
11225 gen_helper_double_saturate(tmp, cpu_env, tmp);
11226 if (op & 2)
11227 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
11228 else
11229 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
11230 tcg_temp_free_i32(tmp2);
11231 } else {
11232 switch (op) {
11233 case 0x0a: /* rbit */
11234 case 0x08: /* rev */
11235 case 0x09: /* rev16 */
11236 case 0x0b: /* revsh */
11237 case 0x18: /* clz */
11238 break;
11239 case 0x10: /* sel */
11240 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11241 goto illegal_op;
11243 break;
11244 case 0x20: /* crc32/crc32c */
11245 case 0x21:
11246 case 0x22:
11247 case 0x28:
11248 case 0x29:
11249 case 0x2a:
11250 if (!dc_isar_feature(aa32_crc32, s)) {
11251 goto illegal_op;
11253 break;
11254 default:
11255 goto illegal_op;
11257 tmp = load_reg(s, rn);
11258 switch (op) {
11259 case 0x0a: /* rbit */
11260 gen_helper_rbit(tmp, tmp);
11261 break;
11262 case 0x08: /* rev */
11263 tcg_gen_bswap32_i32(tmp, tmp);
11264 break;
11265 case 0x09: /* rev16 */
11266 gen_rev16(tmp);
11267 break;
11268 case 0x0b: /* revsh */
11269 gen_revsh(tmp);
11270 break;
11271 case 0x10: /* sel */
11272 tmp2 = load_reg(s, rm);
11273 tmp3 = tcg_temp_new_i32();
11274 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
11275 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
11276 tcg_temp_free_i32(tmp3);
11277 tcg_temp_free_i32(tmp2);
11278 break;
11279 case 0x18: /* clz */
11280 tcg_gen_clzi_i32(tmp, tmp, 32);
11281 break;
11282 case 0x20:
11283 case 0x21:
11284 case 0x22:
11285 case 0x28:
11286 case 0x29:
11287 case 0x2a:
11289 /* crc32/crc32c */
11290 uint32_t sz = op & 0x3;
11291 uint32_t c = op & 0x8;
11293 tmp2 = load_reg(s, rm);
11294 if (sz == 0) {
11295 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
11296 } else if (sz == 1) {
11297 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
11299 tmp3 = tcg_const_i32(1 << sz);
11300 if (c) {
11301 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
11302 } else {
11303 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
11305 tcg_temp_free_i32(tmp2);
11306 tcg_temp_free_i32(tmp3);
11307 break;
11309 default:
11310 g_assert_not_reached();
11313 store_reg(s, rd, tmp);
11314 break;
11315 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
11316 switch ((insn >> 20) & 7) {
11317 case 0: /* 32 x 32 -> 32 */
11318 case 7: /* Unsigned sum of absolute differences. */
11319 break;
11320 case 1: /* 16 x 16 -> 32 */
11321 case 2: /* Dual multiply add. */
11322 case 3: /* 32 * 16 -> 32msb */
11323 case 4: /* Dual multiply subtract. */
11324 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11325 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11326 goto illegal_op;
11328 break;
11330 op = (insn >> 4) & 0xf;
11331 tmp = load_reg(s, rn);
11332 tmp2 = load_reg(s, rm);
11333 switch ((insn >> 20) & 7) {
11334 case 0: /* 32 x 32 -> 32 */
11335 tcg_gen_mul_i32(tmp, tmp, tmp2);
11336 tcg_temp_free_i32(tmp2);
11337 if (rs != 15) {
11338 tmp2 = load_reg(s, rs);
11339 if (op)
11340 tcg_gen_sub_i32(tmp, tmp2, tmp);
11341 else
11342 tcg_gen_add_i32(tmp, tmp, tmp2);
11343 tcg_temp_free_i32(tmp2);
11345 break;
11346 case 1: /* 16 x 16 -> 32 */
11347 gen_mulxy(tmp, tmp2, op & 2, op & 1);
11348 tcg_temp_free_i32(tmp2);
11349 if (rs != 15) {
11350 tmp2 = load_reg(s, rs);
11351 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
11352 tcg_temp_free_i32(tmp2);
11354 break;
11355 case 2: /* Dual multiply add. */
11356 case 4: /* Dual multiply subtract. */
11357 if (op)
11358 gen_swap_half(tmp2);
11359 gen_smul_dual(tmp, tmp2);
11360 if (insn & (1 << 22)) {
11361 /* This subtraction cannot overflow. */
11362 tcg_gen_sub_i32(tmp, tmp, tmp2);
11363 } else {
11364 /* This addition cannot overflow 32 bits;
11365 * however it may overflow considered as a signed
11366 * operation, in which case we must set the Q flag.
11368 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
11370 tcg_temp_free_i32(tmp2);
11371 if (rs != 15)
11373 tmp2 = load_reg(s, rs);
11374 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
11375 tcg_temp_free_i32(tmp2);
11377 break;
11378 case 3: /* 32 * 16 -> 32msb */
11379 if (op)
11380 tcg_gen_sari_i32(tmp2, tmp2, 16);
11381 else
11382 gen_sxth(tmp2);
11383 tmp64 = gen_muls_i64_i32(tmp, tmp2);
11384 tcg_gen_shri_i64(tmp64, tmp64, 16);
11385 tmp = tcg_temp_new_i32();
11386 tcg_gen_extrl_i64_i32(tmp, tmp64);
11387 tcg_temp_free_i64(tmp64);
11388 if (rs != 15)
11390 tmp2 = load_reg(s, rs);
11391 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
11392 tcg_temp_free_i32(tmp2);
11394 break;
11395 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11396 tmp64 = gen_muls_i64_i32(tmp, tmp2);
11397 if (rs != 15) {
11398 tmp = load_reg(s, rs);
11399 if (insn & (1 << 20)) {
11400 tmp64 = gen_addq_msw(tmp64, tmp);
11401 } else {
11402 tmp64 = gen_subq_msw(tmp64, tmp);
11405 if (insn & (1 << 4)) {
11406 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
11408 tcg_gen_shri_i64(tmp64, tmp64, 32);
11409 tmp = tcg_temp_new_i32();
11410 tcg_gen_extrl_i64_i32(tmp, tmp64);
11411 tcg_temp_free_i64(tmp64);
11412 break;
11413 case 7: /* Unsigned sum of absolute differences. */
11414 gen_helper_usad8(tmp, tmp, tmp2);
11415 tcg_temp_free_i32(tmp2);
11416 if (rs != 15) {
11417 tmp2 = load_reg(s, rs);
11418 tcg_gen_add_i32(tmp, tmp, tmp2);
11419 tcg_temp_free_i32(tmp2);
11421 break;
11423 store_reg(s, rd, tmp);
11424 break;
11425 case 6: case 7: /* 64-bit multiply, Divide. */
11426 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
11427 tmp = load_reg(s, rn);
11428 tmp2 = load_reg(s, rm);
11429 if ((op & 0x50) == 0x10) {
11430 /* sdiv, udiv */
11431 if (!dc_isar_feature(thumb_div, s)) {
11432 goto illegal_op;
11434 if (op & 0x20)
11435 gen_helper_udiv(tmp, tmp, tmp2);
11436 else
11437 gen_helper_sdiv(tmp, tmp, tmp2);
11438 tcg_temp_free_i32(tmp2);
11439 store_reg(s, rd, tmp);
11440 } else if ((op & 0xe) == 0xc) {
11441 /* Dual multiply accumulate long. */
11442 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11443 tcg_temp_free_i32(tmp);
11444 tcg_temp_free_i32(tmp2);
11445 goto illegal_op;
11447 if (op & 1)
11448 gen_swap_half(tmp2);
11449 gen_smul_dual(tmp, tmp2);
11450 if (op & 0x10) {
11451 tcg_gen_sub_i32(tmp, tmp, tmp2);
11452 } else {
11453 tcg_gen_add_i32(tmp, tmp, tmp2);
11455 tcg_temp_free_i32(tmp2);
11456 /* BUGFIX */
11457 tmp64 = tcg_temp_new_i64();
11458 tcg_gen_ext_i32_i64(tmp64, tmp);
11459 tcg_temp_free_i32(tmp);
11460 gen_addq(s, tmp64, rs, rd);
11461 gen_storeq_reg(s, rs, rd, tmp64);
11462 tcg_temp_free_i64(tmp64);
11463 } else {
11464 if (op & 0x20) {
11465 /* Unsigned 64-bit multiply */
11466 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
11467 } else {
11468 if (op & 8) {
11469 /* smlalxy */
11470 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11471 tcg_temp_free_i32(tmp2);
11472 tcg_temp_free_i32(tmp);
11473 goto illegal_op;
11475 gen_mulxy(tmp, tmp2, op & 2, op & 1);
11476 tcg_temp_free_i32(tmp2);
11477 tmp64 = tcg_temp_new_i64();
11478 tcg_gen_ext_i32_i64(tmp64, tmp);
11479 tcg_temp_free_i32(tmp);
11480 } else {
11481 /* Signed 64-bit multiply */
11482 tmp64 = gen_muls_i64_i32(tmp, tmp2);
11485 if (op & 4) {
11486 /* umaal */
11487 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11488 tcg_temp_free_i64(tmp64);
11489 goto illegal_op;
11491 gen_addq_lo(s, tmp64, rs);
11492 gen_addq_lo(s, tmp64, rd);
11493 } else if (op & 0x40) {
11494 /* 64-bit accumulate. */
11495 gen_addq(s, tmp64, rs, rd);
11497 gen_storeq_reg(s, rs, rd, tmp64);
11498 tcg_temp_free_i64(tmp64);
11500 break;
11502 break;
11503 case 6: case 7: case 14: case 15:
11504 /* Coprocessor. */
11505 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11506 /* We don't currently implement M profile FP support,
11507 * so this entire space should give a NOCP fault, with
11508 * the exception of the v8M VLLDM and VLSTM insns, which
11509 * must be NOPs in Secure state and UNDEF in Nonsecure state.
11511 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
11512 (insn & 0xffa00f00) == 0xec200a00) {
11513 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
11514 * - VLLDM, VLSTM
11515 * We choose to UNDEF if the RAZ bits are non-zero.
11517 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
11518 goto illegal_op;
11520 /* Just NOP since FP support is not implemented */
11521 break;
11523 /* All other insns: NOCP */
11524 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
11525 default_exception_el(s));
11526 break;
11528 if ((insn & 0xfe000a00) == 0xfc000800
11529 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11530 /* The Thumb2 and ARM encodings are identical. */
11531 if (disas_neon_insn_3same_ext(s, insn)) {
11532 goto illegal_op;
11534 } else if ((insn & 0xff000a00) == 0xfe000800
11535 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11536 /* The Thumb2 and ARM encodings are identical. */
11537 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
11538 goto illegal_op;
11540 } else if (((insn >> 24) & 3) == 3) {
11541 /* Translate into the equivalent ARM encoding. */
11542 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
11543 if (disas_neon_data_insn(s, insn)) {
11544 goto illegal_op;
11546 } else if (((insn >> 8) & 0xe) == 10) {
11547 if (disas_vfp_insn(s, insn)) {
11548 goto illegal_op;
11550 } else {
11551 if (insn & (1 << 28))
11552 goto illegal_op;
11553 if (disas_coproc_insn(s, insn)) {
11554 goto illegal_op;
11557 break;
11558 case 8: case 9: case 10: case 11:
11559 if (insn & (1 << 15)) {
11560 /* Branches, misc control. */
11561 if (insn & 0x5000) {
11562 /* Unconditional branch. */
11563 /* signextend(hw1[10:0]) -> offset[:12]. */
11564 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
11565 /* hw1[10:0] -> offset[11:1]. */
11566 offset |= (insn & 0x7ff) << 1;
11567 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
11568 offset[24:22] already have the same value because of the
11569 sign extension above. */
11570 offset ^= ((~insn) & (1 << 13)) << 10;
11571 offset ^= ((~insn) & (1 << 11)) << 11;
11573 if (insn & (1 << 14)) {
11574 /* Branch and link. */
11575 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
11578 offset += s->pc;
11579 if (insn & (1 << 12)) {
11580 /* b/bl */
11581 gen_jmp(s, offset);
11582 } else {
11583 /* blx */
11584 offset &= ~(uint32_t)2;
11585 /* thumb2 bx, no need to check */
11586 gen_bx_im(s, offset);
11588 } else if (((insn >> 23) & 7) == 7) {
11589 /* Misc control */
11590 if (insn & (1 << 13))
11591 goto illegal_op;
11593 if (insn & (1 << 26)) {
11594 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11595 goto illegal_op;
11597 if (!(insn & (1 << 20))) {
11598 /* Hypervisor call (v7) */
11599 int imm16 = extract32(insn, 16, 4) << 12
11600 | extract32(insn, 0, 12);
11601 ARCH(7);
11602 if (IS_USER(s)) {
11603 goto illegal_op;
11605 gen_hvc(s, imm16);
11606 } else {
11607 /* Secure monitor call (v6+) */
11608 ARCH(6K);
11609 if (IS_USER(s)) {
11610 goto illegal_op;
11612 gen_smc(s);
11614 } else {
11615 op = (insn >> 20) & 7;
11616 switch (op) {
11617 case 0: /* msr cpsr. */
11618 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11619 tmp = load_reg(s, rn);
11620 /* the constant is the mask and SYSm fields */
11621 addr = tcg_const_i32(insn & 0xfff);
11622 gen_helper_v7m_msr(cpu_env, addr, tmp);
11623 tcg_temp_free_i32(addr);
11624 tcg_temp_free_i32(tmp);
11625 gen_lookup_tb(s);
11626 break;
11628 /* fall through */
11629 case 1: /* msr spsr. */
11630 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11631 goto illegal_op;
11634 if (extract32(insn, 5, 1)) {
11635 /* MSR (banked) */
11636 int sysm = extract32(insn, 8, 4) |
11637 (extract32(insn, 4, 1) << 4);
11638 int r = op & 1;
11640 gen_msr_banked(s, r, sysm, rm);
11641 break;
11644 /* MSR (for PSRs) */
11645 tmp = load_reg(s, rn);
11646 if (gen_set_psr(s,
11647 msr_mask(s, (insn >> 8) & 0xf, op == 1),
11648 op == 1, tmp))
11649 goto illegal_op;
11650 break;
11651 case 2: /* cps, nop-hint. */
11652 if (((insn >> 8) & 7) == 0) {
11653 gen_nop_hint(s, insn & 0xff);
11655 /* Implemented as NOP in user mode. */
11656 if (IS_USER(s))
11657 break;
11658 offset = 0;
11659 imm = 0;
11660 if (insn & (1 << 10)) {
11661 if (insn & (1 << 7))
11662 offset |= CPSR_A;
11663 if (insn & (1 << 6))
11664 offset |= CPSR_I;
11665 if (insn & (1 << 5))
11666 offset |= CPSR_F;
11667 if (insn & (1 << 9))
11668 imm = CPSR_A | CPSR_I | CPSR_F;
11670 if (insn & (1 << 8)) {
11671 offset |= 0x1f;
11672 imm |= (insn & 0x1f);
11674 if (offset) {
11675 gen_set_psr_im(s, offset, 0, imm);
11677 break;
11678 case 3: /* Special control operations. */
11679 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
11680 !arm_dc_feature(s, ARM_FEATURE_M)) {
11681 goto illegal_op;
11683 op = (insn >> 4) & 0xf;
11684 switch (op) {
11685 case 2: /* clrex */
11686 gen_clrex(s);
11687 break;
11688 case 4: /* dsb */
11689 case 5: /* dmb */
11690 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
11691 break;
11692 case 6: /* isb */
11693 /* We need to break the TB after this insn
11694 * to execute self-modifying code correctly
11695 * and also to take any pending interrupts
11696 * immediately.
11698 gen_goto_tb(s, 0, s->pc & ~1);
11699 break;
11700 default:
11701 goto illegal_op;
11703 break;
11704 case 4: /* bxj */
11705 /* Trivial implementation equivalent to bx.
11706 * This instruction doesn't exist at all for M-profile.
11708 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11709 goto illegal_op;
11711 tmp = load_reg(s, rn);
11712 gen_bx(s, tmp);
11713 break;
11714 case 5: /* Exception return. */
11715 if (IS_USER(s)) {
11716 goto illegal_op;
11718 if (rn != 14 || rd != 15) {
11719 goto illegal_op;
11721 if (s->current_el == 2) {
11722 /* ERET from Hyp uses ELR_Hyp, not LR */
11723 if (insn & 0xff) {
11724 goto illegal_op;
11726 tmp = load_cpu_field(elr_el[2]);
11727 } else {
11728 tmp = load_reg(s, rn);
11729 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11731 gen_exception_return(s, tmp);
11732 break;
11733 case 6: /* MRS */
11734 if (extract32(insn, 5, 1) &&
11735 !arm_dc_feature(s, ARM_FEATURE_M)) {
11736 /* MRS (banked) */
11737 int sysm = extract32(insn, 16, 4) |
11738 (extract32(insn, 4, 1) << 4);
11740 gen_mrs_banked(s, 0, sysm, rd);
11741 break;
11744 if (extract32(insn, 16, 4) != 0xf) {
11745 goto illegal_op;
11747 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
11748 extract32(insn, 0, 8) != 0) {
11749 goto illegal_op;
11752 /* mrs cpsr */
11753 tmp = tcg_temp_new_i32();
11754 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11755 addr = tcg_const_i32(insn & 0xff);
11756 gen_helper_v7m_mrs(tmp, cpu_env, addr);
11757 tcg_temp_free_i32(addr);
11758 } else {
11759 gen_helper_cpsr_read(tmp, cpu_env);
11761 store_reg(s, rd, tmp);
11762 break;
11763 case 7: /* MRS */
11764 if (extract32(insn, 5, 1) &&
11765 !arm_dc_feature(s, ARM_FEATURE_M)) {
11766 /* MRS (banked) */
11767 int sysm = extract32(insn, 16, 4) |
11768 (extract32(insn, 4, 1) << 4);
11770 gen_mrs_banked(s, 1, sysm, rd);
11771 break;
11774 /* mrs spsr. */
11775 /* Not accessible in user mode. */
11776 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
11777 goto illegal_op;
11780 if (extract32(insn, 16, 4) != 0xf ||
11781 extract32(insn, 0, 8) != 0) {
11782 goto illegal_op;
11785 tmp = load_cpu_field(spsr);
11786 store_reg(s, rd, tmp);
11787 break;
11790 } else {
11791 /* Conditional branch. */
11792 op = (insn >> 22) & 0xf;
11793 /* Generate a conditional jump to next instruction. */
11794 arm_skip_unless(s, op);
11796 /* offset[11:1] = insn[10:0] */
11797 offset = (insn & 0x7ff) << 1;
11798 /* offset[17:12] = insn[21:16]. */
11799 offset |= (insn & 0x003f0000) >> 4;
11800 /* offset[31:20] = insn[26]. */
11801 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
11802 /* offset[18] = insn[13]. */
11803 offset |= (insn & (1 << 13)) << 5;
11804 /* offset[19] = insn[11]. */
11805 offset |= (insn & (1 << 11)) << 8;
11807 /* jump to the offset */
11808 gen_jmp(s, s->pc + offset);
11810 } else {
11812 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
11813 * - Data-processing (modified immediate, plain binary immediate)
11815 if (insn & (1 << 25)) {
11817 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
11818 * - Data-processing (plain binary immediate)
11820 if (insn & (1 << 24)) {
11821 if (insn & (1 << 20))
11822 goto illegal_op;
11823 /* Bitfield/Saturate. */
11824 op = (insn >> 21) & 7;
11825 imm = insn & 0x1f;
11826 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
11827 if (rn == 15) {
11828 tmp = tcg_temp_new_i32();
11829 tcg_gen_movi_i32(tmp, 0);
11830 } else {
11831 tmp = load_reg(s, rn);
11833 switch (op) {
11834 case 2: /* Signed bitfield extract. */
11835 imm++;
11836 if (shift + imm > 32)
11837 goto illegal_op;
11838 if (imm < 32) {
11839 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
11841 break;
11842 case 6: /* Unsigned bitfield extract. */
11843 imm++;
11844 if (shift + imm > 32)
11845 goto illegal_op;
11846 if (imm < 32) {
11847 tcg_gen_extract_i32(tmp, tmp, shift, imm);
11849 break;
11850 case 3: /* Bitfield insert/clear. */
11851 if (imm < shift)
11852 goto illegal_op;
11853 imm = imm + 1 - shift;
11854 if (imm != 32) {
11855 tmp2 = load_reg(s, rd);
11856 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
11857 tcg_temp_free_i32(tmp2);
11859 break;
11860 case 7:
11861 goto illegal_op;
11862 default: /* Saturate. */
11863 if (shift) {
11864 if (op & 1)
11865 tcg_gen_sari_i32(tmp, tmp, shift);
11866 else
11867 tcg_gen_shli_i32(tmp, tmp, shift);
11869 tmp2 = tcg_const_i32(imm);
11870 if (op & 4) {
11871 /* Unsigned. */
11872 if ((op & 1) && shift == 0) {
11873 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11874 tcg_temp_free_i32(tmp);
11875 tcg_temp_free_i32(tmp2);
11876 goto illegal_op;
11878 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
11879 } else {
11880 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
11882 } else {
11883 /* Signed. */
11884 if ((op & 1) && shift == 0) {
11885 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11886 tcg_temp_free_i32(tmp);
11887 tcg_temp_free_i32(tmp2);
11888 goto illegal_op;
11890 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
11891 } else {
11892 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
11895 tcg_temp_free_i32(tmp2);
11896 break;
11898 store_reg(s, rd, tmp);
11899 } else {
11900 imm = ((insn & 0x04000000) >> 15)
11901 | ((insn & 0x7000) >> 4) | (insn & 0xff);
11902 if (insn & (1 << 22)) {
11903 /* 16-bit immediate. */
11904 imm |= (insn >> 4) & 0xf000;
11905 if (insn & (1 << 23)) {
11906 /* movt */
11907 tmp = load_reg(s, rd);
11908 tcg_gen_ext16u_i32(tmp, tmp);
11909 tcg_gen_ori_i32(tmp, tmp, imm << 16);
11910 } else {
11911 /* movw */
11912 tmp = tcg_temp_new_i32();
11913 tcg_gen_movi_i32(tmp, imm);
11915 store_reg(s, rd, tmp);
11916 } else {
11917 /* Add/sub 12-bit immediate. */
11918 if (rn == 15) {
11919 offset = s->pc & ~(uint32_t)3;
11920 if (insn & (1 << 23))
11921 offset -= imm;
11922 else
11923 offset += imm;
11924 tmp = tcg_temp_new_i32();
11925 tcg_gen_movi_i32(tmp, offset);
11926 store_reg(s, rd, tmp);
11927 } else {
11928 tmp = load_reg(s, rn);
11929 if (insn & (1 << 23))
11930 tcg_gen_subi_i32(tmp, tmp, imm);
11931 else
11932 tcg_gen_addi_i32(tmp, tmp, imm);
11933 if (rn == 13 && rd == 13) {
11934 /* ADD SP, SP, imm or SUB SP, SP, imm */
11935 store_sp_checked(s, tmp);
11936 } else {
11937 store_reg(s, rd, tmp);
11942 } else {
11944 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
11945 * - Data-processing (modified immediate)
11947 int shifter_out = 0;
11948 /* modified 12-bit immediate. */
11949 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
11950 imm = (insn & 0xff);
11951 switch (shift) {
11952 case 0: /* XY */
11953 /* Nothing to do. */
11954 break;
11955 case 1: /* 00XY00XY */
11956 imm |= imm << 16;
11957 break;
11958 case 2: /* XY00XY00 */
11959 imm |= imm << 16;
11960 imm <<= 8;
11961 break;
11962 case 3: /* XYXYXYXY */
11963 imm |= imm << 16;
11964 imm |= imm << 8;
11965 break;
11966 default: /* Rotated constant. */
11967 shift = (shift << 1) | (imm >> 7);
11968 imm |= 0x80;
11969 imm = imm << (32 - shift);
11970 shifter_out = 1;
11971 break;
11973 tmp2 = tcg_temp_new_i32();
11974 tcg_gen_movi_i32(tmp2, imm);
11975 rn = (insn >> 16) & 0xf;
11976 if (rn == 15) {
11977 tmp = tcg_temp_new_i32();
11978 tcg_gen_movi_i32(tmp, 0);
11979 } else {
11980 tmp = load_reg(s, rn);
11982 op = (insn >> 21) & 0xf;
11983 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
11984 shifter_out, tmp, tmp2))
11985 goto illegal_op;
11986 tcg_temp_free_i32(tmp2);
11987 rd = (insn >> 8) & 0xf;
11988 if (rd == 13 && rn == 13
11989 && (op == 8 || op == 13)) {
11990 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
11991 store_sp_checked(s, tmp);
11992 } else if (rd != 15) {
11993 store_reg(s, rd, tmp);
11994 } else {
11995 tcg_temp_free_i32(tmp);
11999 break;
12000 case 12: /* Load/store single data item. */
12002 int postinc = 0;
12003 int writeback = 0;
12004 int memidx;
12005 ISSInfo issinfo;
12007 if ((insn & 0x01100000) == 0x01000000) {
12008 if (disas_neon_ls_insn(s, insn)) {
12009 goto illegal_op;
12011 break;
12013 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
12014 if (rs == 15) {
12015 if (!(insn & (1 << 20))) {
12016 goto illegal_op;
12018 if (op != 2) {
12019 /* Byte or halfword load space with dest == r15 : memory hints.
12020 * Catch them early so we don't emit pointless addressing code.
12021 * This space is a mix of:
12022 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
12023 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
12024 * cores)
12025 * unallocated hints, which must be treated as NOPs
12026 * UNPREDICTABLE space, which we NOP or UNDEF depending on
12027 * which is easiest for the decoding logic
12028 * Some space which must UNDEF
12030 int op1 = (insn >> 23) & 3;
12031 int op2 = (insn >> 6) & 0x3f;
12032 if (op & 2) {
12033 goto illegal_op;
12035 if (rn == 15) {
12036 /* UNPREDICTABLE, unallocated hint or
12037 * PLD/PLDW/PLI (literal)
12039 return;
12041 if (op1 & 1) {
12042 return; /* PLD/PLDW/PLI or unallocated hint */
12044 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
12045 return; /* PLD/PLDW/PLI or unallocated hint */
12047 /* UNDEF space, or an UNPREDICTABLE */
12048 goto illegal_op;
12051 memidx = get_mem_index(s);
12052 if (rn == 15) {
12053 addr = tcg_temp_new_i32();
12054 /* PC relative. */
12055 /* s->pc has already been incremented by 4. */
12056 imm = s->pc & 0xfffffffc;
12057 if (insn & (1 << 23))
12058 imm += insn & 0xfff;
12059 else
12060 imm -= insn & 0xfff;
12061 tcg_gen_movi_i32(addr, imm);
12062 } else {
12063 addr = load_reg(s, rn);
12064 if (insn & (1 << 23)) {
12065 /* Positive offset. */
12066 imm = insn & 0xfff;
12067 tcg_gen_addi_i32(addr, addr, imm);
12068 } else {
12069 imm = insn & 0xff;
12070 switch ((insn >> 8) & 0xf) {
12071 case 0x0: /* Shifted Register. */
12072 shift = (insn >> 4) & 0xf;
12073 if (shift > 3) {
12074 tcg_temp_free_i32(addr);
12075 goto illegal_op;
12077 tmp = load_reg(s, rm);
12078 if (shift)
12079 tcg_gen_shli_i32(tmp, tmp, shift);
12080 tcg_gen_add_i32(addr, addr, tmp);
12081 tcg_temp_free_i32(tmp);
12082 break;
12083 case 0xc: /* Negative offset. */
12084 tcg_gen_addi_i32(addr, addr, -imm);
12085 break;
12086 case 0xe: /* User privilege. */
12087 tcg_gen_addi_i32(addr, addr, imm);
12088 memidx = get_a32_user_mem_index(s);
12089 break;
12090 case 0x9: /* Post-decrement. */
12091 imm = -imm;
12092 /* Fall through. */
12093 case 0xb: /* Post-increment. */
12094 postinc = 1;
12095 writeback = 1;
12096 break;
12097 case 0xd: /* Pre-decrement. */
12098 imm = -imm;
12099 /* Fall through. */
12100 case 0xf: /* Pre-increment. */
12101 writeback = 1;
12102 break;
12103 default:
12104 tcg_temp_free_i32(addr);
12105 goto illegal_op;
12110 issinfo = writeback ? ISSInvalid : rs;
12112 if (s->v8m_stackcheck && rn == 13 && writeback) {
12114 * Stackcheck. Here we know 'addr' is the current SP;
12115 * if imm is +ve we're moving SP up, else down. It is
12116 * UNKNOWN whether the limit check triggers when SP starts
12117 * below the limit and ends up above it; we chose to do so.
12119 if ((int32_t)imm < 0) {
12120 TCGv_i32 newsp = tcg_temp_new_i32();
12122 tcg_gen_addi_i32(newsp, addr, imm);
12123 gen_helper_v8m_stackcheck(cpu_env, newsp);
12124 tcg_temp_free_i32(newsp);
12125 } else {
12126 gen_helper_v8m_stackcheck(cpu_env, addr);
12130 if (writeback && !postinc) {
12131 tcg_gen_addi_i32(addr, addr, imm);
12134 if (insn & (1 << 20)) {
12135 /* Load. */
12136 tmp = tcg_temp_new_i32();
12137 switch (op) {
12138 case 0:
12139 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
12140 break;
12141 case 4:
12142 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
12143 break;
12144 case 1:
12145 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
12146 break;
12147 case 5:
12148 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
12149 break;
12150 case 2:
12151 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
12152 break;
12153 default:
12154 tcg_temp_free_i32(tmp);
12155 tcg_temp_free_i32(addr);
12156 goto illegal_op;
12158 if (rs == 15) {
12159 gen_bx_excret(s, tmp);
12160 } else {
12161 store_reg(s, rs, tmp);
12163 } else {
12164 /* Store. */
12165 tmp = load_reg(s, rs);
12166 switch (op) {
12167 case 0:
12168 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
12169 break;
12170 case 1:
12171 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
12172 break;
12173 case 2:
12174 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
12175 break;
12176 default:
12177 tcg_temp_free_i32(tmp);
12178 tcg_temp_free_i32(addr);
12179 goto illegal_op;
12181 tcg_temp_free_i32(tmp);
12183 if (postinc)
12184 tcg_gen_addi_i32(addr, addr, imm);
12185 if (writeback) {
12186 store_reg(s, rn, addr);
12187 } else {
12188 tcg_temp_free_i32(addr);
12191 break;
12192 default:
12193 goto illegal_op;
12195 return;
12196 illegal_op:
12197 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
12198 default_exception_el(s));
12201 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
12203 uint32_t val, op, rm, rn, rd, shift, cond;
12204 int32_t offset;
12205 int i;
12206 TCGv_i32 tmp;
12207 TCGv_i32 tmp2;
12208 TCGv_i32 addr;
12210 switch (insn >> 12) {
12211 case 0: case 1:
12213 rd = insn & 7;
12214 op = (insn >> 11) & 3;
12215 if (op == 3) {
12217 * 0b0001_1xxx_xxxx_xxxx
12218 * - Add, subtract (three low registers)
12219 * - Add, subtract (two low registers and immediate)
12221 rn = (insn >> 3) & 7;
12222 tmp = load_reg(s, rn);
12223 if (insn & (1 << 10)) {
12224 /* immediate */
12225 tmp2 = tcg_temp_new_i32();
12226 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
12227 } else {
12228 /* reg */
12229 rm = (insn >> 6) & 7;
12230 tmp2 = load_reg(s, rm);
12232 if (insn & (1 << 9)) {
12233 if (s->condexec_mask)
12234 tcg_gen_sub_i32(tmp, tmp, tmp2);
12235 else
12236 gen_sub_CC(tmp, tmp, tmp2);
12237 } else {
12238 if (s->condexec_mask)
12239 tcg_gen_add_i32(tmp, tmp, tmp2);
12240 else
12241 gen_add_CC(tmp, tmp, tmp2);
12243 tcg_temp_free_i32(tmp2);
12244 store_reg(s, rd, tmp);
12245 } else {
12246 /* shift immediate */
12247 rm = (insn >> 3) & 7;
12248 shift = (insn >> 6) & 0x1f;
12249 tmp = load_reg(s, rm);
12250 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
12251 if (!s->condexec_mask)
12252 gen_logic_CC(tmp);
12253 store_reg(s, rd, tmp);
12255 break;
12256 case 2: case 3:
12258 * 0b001x_xxxx_xxxx_xxxx
12259 * - Add, subtract, compare, move (one low register and immediate)
12261 op = (insn >> 11) & 3;
12262 rd = (insn >> 8) & 0x7;
12263 if (op == 0) { /* mov */
12264 tmp = tcg_temp_new_i32();
12265 tcg_gen_movi_i32(tmp, insn & 0xff);
12266 if (!s->condexec_mask)
12267 gen_logic_CC(tmp);
12268 store_reg(s, rd, tmp);
12269 } else {
12270 tmp = load_reg(s, rd);
12271 tmp2 = tcg_temp_new_i32();
12272 tcg_gen_movi_i32(tmp2, insn & 0xff);
12273 switch (op) {
12274 case 1: /* cmp */
12275 gen_sub_CC(tmp, tmp, tmp2);
12276 tcg_temp_free_i32(tmp);
12277 tcg_temp_free_i32(tmp2);
12278 break;
12279 case 2: /* add */
12280 if (s->condexec_mask)
12281 tcg_gen_add_i32(tmp, tmp, tmp2);
12282 else
12283 gen_add_CC(tmp, tmp, tmp2);
12284 tcg_temp_free_i32(tmp2);
12285 store_reg(s, rd, tmp);
12286 break;
12287 case 3: /* sub */
12288 if (s->condexec_mask)
12289 tcg_gen_sub_i32(tmp, tmp, tmp2);
12290 else
12291 gen_sub_CC(tmp, tmp, tmp2);
12292 tcg_temp_free_i32(tmp2);
12293 store_reg(s, rd, tmp);
12294 break;
12297 break;
12298 case 4:
12299 if (insn & (1 << 11)) {
12300 rd = (insn >> 8) & 7;
12301 /* load pc-relative. Bit 1 of PC is ignored. */
12302 val = s->pc + 2 + ((insn & 0xff) * 4);
12303 val &= ~(uint32_t)2;
12304 addr = tcg_temp_new_i32();
12305 tcg_gen_movi_i32(addr, val);
12306 tmp = tcg_temp_new_i32();
12307 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
12308 rd | ISSIs16Bit);
12309 tcg_temp_free_i32(addr);
12310 store_reg(s, rd, tmp);
12311 break;
12313 if (insn & (1 << 10)) {
12314 /* 0b0100_01xx_xxxx_xxxx
12315 * - data processing extended, branch and exchange
12317 rd = (insn & 7) | ((insn >> 4) & 8);
12318 rm = (insn >> 3) & 0xf;
12319 op = (insn >> 8) & 3;
12320 switch (op) {
12321 case 0: /* add */
12322 tmp = load_reg(s, rd);
12323 tmp2 = load_reg(s, rm);
12324 tcg_gen_add_i32(tmp, tmp, tmp2);
12325 tcg_temp_free_i32(tmp2);
12326 if (rd == 13) {
12327 /* ADD SP, SP, reg */
12328 store_sp_checked(s, tmp);
12329 } else {
12330 store_reg(s, rd, tmp);
12332 break;
12333 case 1: /* cmp */
12334 tmp = load_reg(s, rd);
12335 tmp2 = load_reg(s, rm);
12336 gen_sub_CC(tmp, tmp, tmp2);
12337 tcg_temp_free_i32(tmp2);
12338 tcg_temp_free_i32(tmp);
12339 break;
12340 case 2: /* mov/cpy */
12341 tmp = load_reg(s, rm);
12342 if (rd == 13) {
12343 /* MOV SP, reg */
12344 store_sp_checked(s, tmp);
12345 } else {
12346 store_reg(s, rd, tmp);
12348 break;
12349 case 3:
12351 /* 0b0100_0111_xxxx_xxxx
12352 * - branch [and link] exchange thumb register
12354 bool link = insn & (1 << 7);
12356 if (insn & 3) {
12357 goto undef;
12359 if (link) {
12360 ARCH(5);
12362 if ((insn & 4)) {
12363 /* BXNS/BLXNS: only exists for v8M with the
12364 * security extensions, and always UNDEF if NonSecure.
12365 * We don't implement these in the user-only mode
12366 * either (in theory you can use them from Secure User
12367 * mode but they are too tied in to system emulation.)
12369 if (!s->v8m_secure || IS_USER_ONLY) {
12370 goto undef;
12372 if (link) {
12373 gen_blxns(s, rm);
12374 } else {
12375 gen_bxns(s, rm);
12377 break;
12379 /* BLX/BX */
12380 tmp = load_reg(s, rm);
12381 if (link) {
12382 val = (uint32_t)s->pc | 1;
12383 tmp2 = tcg_temp_new_i32();
12384 tcg_gen_movi_i32(tmp2, val);
12385 store_reg(s, 14, tmp2);
12386 gen_bx(s, tmp);
12387 } else {
12388 /* Only BX works as exception-return, not BLX */
12389 gen_bx_excret(s, tmp);
12391 break;
12394 break;
12398 * 0b0100_00xx_xxxx_xxxx
12399 * - Data-processing (two low registers)
12401 rd = insn & 7;
12402 rm = (insn >> 3) & 7;
12403 op = (insn >> 6) & 0xf;
12404 if (op == 2 || op == 3 || op == 4 || op == 7) {
12405 /* the shift/rotate ops want the operands backwards */
12406 val = rm;
12407 rm = rd;
12408 rd = val;
12409 val = 1;
12410 } else {
12411 val = 0;
12414 if (op == 9) { /* neg */
12415 tmp = tcg_temp_new_i32();
12416 tcg_gen_movi_i32(tmp, 0);
12417 } else if (op != 0xf) { /* mvn doesn't read its first operand */
12418 tmp = load_reg(s, rd);
12419 } else {
12420 tmp = NULL;
12423 tmp2 = load_reg(s, rm);
12424 switch (op) {
12425 case 0x0: /* and */
12426 tcg_gen_and_i32(tmp, tmp, tmp2);
12427 if (!s->condexec_mask)
12428 gen_logic_CC(tmp);
12429 break;
12430 case 0x1: /* eor */
12431 tcg_gen_xor_i32(tmp, tmp, tmp2);
12432 if (!s->condexec_mask)
12433 gen_logic_CC(tmp);
12434 break;
12435 case 0x2: /* lsl */
12436 if (s->condexec_mask) {
12437 gen_shl(tmp2, tmp2, tmp);
12438 } else {
12439 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
12440 gen_logic_CC(tmp2);
12442 break;
12443 case 0x3: /* lsr */
12444 if (s->condexec_mask) {
12445 gen_shr(tmp2, tmp2, tmp);
12446 } else {
12447 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
12448 gen_logic_CC(tmp2);
12450 break;
12451 case 0x4: /* asr */
12452 if (s->condexec_mask) {
12453 gen_sar(tmp2, tmp2, tmp);
12454 } else {
12455 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
12456 gen_logic_CC(tmp2);
12458 break;
12459 case 0x5: /* adc */
12460 if (s->condexec_mask) {
12461 gen_adc(tmp, tmp2);
12462 } else {
12463 gen_adc_CC(tmp, tmp, tmp2);
12465 break;
12466 case 0x6: /* sbc */
12467 if (s->condexec_mask) {
12468 gen_sub_carry(tmp, tmp, tmp2);
12469 } else {
12470 gen_sbc_CC(tmp, tmp, tmp2);
12472 break;
12473 case 0x7: /* ror */
12474 if (s->condexec_mask) {
12475 tcg_gen_andi_i32(tmp, tmp, 0x1f);
12476 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
12477 } else {
12478 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
12479 gen_logic_CC(tmp2);
12481 break;
12482 case 0x8: /* tst */
12483 tcg_gen_and_i32(tmp, tmp, tmp2);
12484 gen_logic_CC(tmp);
12485 rd = 16;
12486 break;
12487 case 0x9: /* neg */
12488 if (s->condexec_mask)
12489 tcg_gen_neg_i32(tmp, tmp2);
12490 else
12491 gen_sub_CC(tmp, tmp, tmp2);
12492 break;
12493 case 0xa: /* cmp */
12494 gen_sub_CC(tmp, tmp, tmp2);
12495 rd = 16;
12496 break;
12497 case 0xb: /* cmn */
12498 gen_add_CC(tmp, tmp, tmp2);
12499 rd = 16;
12500 break;
12501 case 0xc: /* orr */
12502 tcg_gen_or_i32(tmp, tmp, tmp2);
12503 if (!s->condexec_mask)
12504 gen_logic_CC(tmp);
12505 break;
12506 case 0xd: /* mul */
12507 tcg_gen_mul_i32(tmp, tmp, tmp2);
12508 if (!s->condexec_mask)
12509 gen_logic_CC(tmp);
12510 break;
12511 case 0xe: /* bic */
12512 tcg_gen_andc_i32(tmp, tmp, tmp2);
12513 if (!s->condexec_mask)
12514 gen_logic_CC(tmp);
12515 break;
12516 case 0xf: /* mvn */
12517 tcg_gen_not_i32(tmp2, tmp2);
12518 if (!s->condexec_mask)
12519 gen_logic_CC(tmp2);
12520 val = 1;
12521 rm = rd;
12522 break;
12524 if (rd != 16) {
12525 if (val) {
12526 store_reg(s, rm, tmp2);
12527 if (op != 0xf)
12528 tcg_temp_free_i32(tmp);
12529 } else {
12530 store_reg(s, rd, tmp);
12531 tcg_temp_free_i32(tmp2);
12533 } else {
12534 tcg_temp_free_i32(tmp);
12535 tcg_temp_free_i32(tmp2);
12537 break;
12539 case 5:
12540 /* load/store register offset. */
12541 rd = insn & 7;
12542 rn = (insn >> 3) & 7;
12543 rm = (insn >> 6) & 7;
12544 op = (insn >> 9) & 7;
12545 addr = load_reg(s, rn);
12546 tmp = load_reg(s, rm);
12547 tcg_gen_add_i32(addr, addr, tmp);
12548 tcg_temp_free_i32(tmp);
12550 if (op < 3) { /* store */
12551 tmp = load_reg(s, rd);
12552 } else {
12553 tmp = tcg_temp_new_i32();
12556 switch (op) {
12557 case 0: /* str */
12558 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12559 break;
12560 case 1: /* strh */
12561 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12562 break;
12563 case 2: /* strb */
12564 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12565 break;
12566 case 3: /* ldrsb */
12567 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12568 break;
12569 case 4: /* ldr */
12570 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12571 break;
12572 case 5: /* ldrh */
12573 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12574 break;
12575 case 6: /* ldrb */
12576 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12577 break;
12578 case 7: /* ldrsh */
12579 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12580 break;
12582 if (op >= 3) { /* load */
12583 store_reg(s, rd, tmp);
12584 } else {
12585 tcg_temp_free_i32(tmp);
12587 tcg_temp_free_i32(addr);
12588 break;
12590 case 6:
12591 /* load/store word immediate offset */
12592 rd = insn & 7;
12593 rn = (insn >> 3) & 7;
12594 addr = load_reg(s, rn);
12595 val = (insn >> 4) & 0x7c;
12596 tcg_gen_addi_i32(addr, addr, val);
12598 if (insn & (1 << 11)) {
12599 /* load */
12600 tmp = tcg_temp_new_i32();
12601 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12602 store_reg(s, rd, tmp);
12603 } else {
12604 /* store */
12605 tmp = load_reg(s, rd);
12606 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12607 tcg_temp_free_i32(tmp);
12609 tcg_temp_free_i32(addr);
12610 break;
12612 case 7:
12613 /* load/store byte immediate offset */
12614 rd = insn & 7;
12615 rn = (insn >> 3) & 7;
12616 addr = load_reg(s, rn);
12617 val = (insn >> 6) & 0x1f;
12618 tcg_gen_addi_i32(addr, addr, val);
12620 if (insn & (1 << 11)) {
12621 /* load */
12622 tmp = tcg_temp_new_i32();
12623 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12624 store_reg(s, rd, tmp);
12625 } else {
12626 /* store */
12627 tmp = load_reg(s, rd);
12628 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12629 tcg_temp_free_i32(tmp);
12631 tcg_temp_free_i32(addr);
12632 break;
12634 case 8:
12635 /* load/store halfword immediate offset */
12636 rd = insn & 7;
12637 rn = (insn >> 3) & 7;
12638 addr = load_reg(s, rn);
12639 val = (insn >> 5) & 0x3e;
12640 tcg_gen_addi_i32(addr, addr, val);
12642 if (insn & (1 << 11)) {
12643 /* load */
12644 tmp = tcg_temp_new_i32();
12645 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12646 store_reg(s, rd, tmp);
12647 } else {
12648 /* store */
12649 tmp = load_reg(s, rd);
12650 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12651 tcg_temp_free_i32(tmp);
12653 tcg_temp_free_i32(addr);
12654 break;
12656 case 9:
12657 /* load/store from stack */
12658 rd = (insn >> 8) & 7;
12659 addr = load_reg(s, 13);
12660 val = (insn & 0xff) * 4;
12661 tcg_gen_addi_i32(addr, addr, val);
12663 if (insn & (1 << 11)) {
12664 /* load */
12665 tmp = tcg_temp_new_i32();
12666 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12667 store_reg(s, rd, tmp);
12668 } else {
12669 /* store */
12670 tmp = load_reg(s, rd);
12671 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12672 tcg_temp_free_i32(tmp);
12674 tcg_temp_free_i32(addr);
12675 break;
12677 case 10:
12679 * 0b1010_xxxx_xxxx_xxxx
12680 * - Add PC/SP (immediate)
12682 rd = (insn >> 8) & 7;
12683 if (insn & (1 << 11)) {
12684 /* SP */
12685 tmp = load_reg(s, 13);
12686 } else {
12687 /* PC. bit 1 is ignored. */
12688 tmp = tcg_temp_new_i32();
12689 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
12691 val = (insn & 0xff) * 4;
12692 tcg_gen_addi_i32(tmp, tmp, val);
12693 store_reg(s, rd, tmp);
12694 break;
12696 case 11:
12697 /* misc */
12698 op = (insn >> 8) & 0xf;
12699 switch (op) {
12700 case 0:
12702 * 0b1011_0000_xxxx_xxxx
12703 * - ADD (SP plus immediate)
12704 * - SUB (SP minus immediate)
12706 tmp = load_reg(s, 13);
12707 val = (insn & 0x7f) * 4;
12708 if (insn & (1 << 7))
12709 val = -(int32_t)val;
12710 tcg_gen_addi_i32(tmp, tmp, val);
12711 store_sp_checked(s, tmp);
12712 break;
12714 case 2: /* sign/zero extend. */
12715 ARCH(6);
12716 rd = insn & 7;
12717 rm = (insn >> 3) & 7;
12718 tmp = load_reg(s, rm);
12719 switch ((insn >> 6) & 3) {
12720 case 0: gen_sxth(tmp); break;
12721 case 1: gen_sxtb(tmp); break;
12722 case 2: gen_uxth(tmp); break;
12723 case 3: gen_uxtb(tmp); break;
12725 store_reg(s, rd, tmp);
12726 break;
12727 case 4: case 5: case 0xc: case 0xd:
12729 * 0b1011_x10x_xxxx_xxxx
12730 * - push/pop
12732 addr = load_reg(s, 13);
12733 if (insn & (1 << 8))
12734 offset = 4;
12735 else
12736 offset = 0;
12737 for (i = 0; i < 8; i++) {
12738 if (insn & (1 << i))
12739 offset += 4;
12741 if ((insn & (1 << 11)) == 0) {
12742 tcg_gen_addi_i32(addr, addr, -offset);
12745 if (s->v8m_stackcheck) {
12747 * Here 'addr' is the lower of "old SP" and "new SP";
12748 * if this is a pop that starts below the limit and ends
12749 * above it, it is UNKNOWN whether the limit check triggers;
12750 * we choose to trigger.
12752 gen_helper_v8m_stackcheck(cpu_env, addr);
12755 for (i = 0; i < 8; i++) {
12756 if (insn & (1 << i)) {
12757 if (insn & (1 << 11)) {
12758 /* pop */
12759 tmp = tcg_temp_new_i32();
12760 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12761 store_reg(s, i, tmp);
12762 } else {
12763 /* push */
12764 tmp = load_reg(s, i);
12765 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12766 tcg_temp_free_i32(tmp);
12768 /* advance to the next address. */
12769 tcg_gen_addi_i32(addr, addr, 4);
12772 tmp = NULL;
12773 if (insn & (1 << 8)) {
12774 if (insn & (1 << 11)) {
12775 /* pop pc */
12776 tmp = tcg_temp_new_i32();
12777 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12778 /* don't set the pc until the rest of the instruction
12779 has completed */
12780 } else {
12781 /* push lr */
12782 tmp = load_reg(s, 14);
12783 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12784 tcg_temp_free_i32(tmp);
12786 tcg_gen_addi_i32(addr, addr, 4);
12788 if ((insn & (1 << 11)) == 0) {
12789 tcg_gen_addi_i32(addr, addr, -offset);
12791 /* write back the new stack pointer */
12792 store_reg(s, 13, addr);
12793 /* set the new PC value */
12794 if ((insn & 0x0900) == 0x0900) {
12795 store_reg_from_load(s, 15, tmp);
12797 break;
12799 case 1: case 3: case 9: case 11: /* czb */
12800 rm = insn & 7;
12801 tmp = load_reg(s, rm);
12802 arm_gen_condlabel(s);
12803 if (insn & (1 << 11))
12804 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
12805 else
12806 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
12807 tcg_temp_free_i32(tmp);
12808 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
12809 val = (uint32_t)s->pc + 2;
12810 val += offset;
12811 gen_jmp(s, val);
12812 break;
12814 case 15: /* IT, nop-hint. */
12815 if ((insn & 0xf) == 0) {
12816 gen_nop_hint(s, (insn >> 4) & 0xf);
12817 break;
12819 /* If Then. */
12820 s->condexec_cond = (insn >> 4) & 0xe;
12821 s->condexec_mask = insn & 0x1f;
12822 /* No actual code generated for this insn, just setup state. */
12823 break;
12825 case 0xe: /* bkpt */
12827 int imm8 = extract32(insn, 0, 8);
12828 ARCH(5);
12829 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
12830 break;
12833 case 0xa: /* rev, and hlt */
12835 int op1 = extract32(insn, 6, 2);
12837 if (op1 == 2) {
12838 /* HLT */
12839 int imm6 = extract32(insn, 0, 6);
12841 gen_hlt(s, imm6);
12842 break;
12845 /* Otherwise this is rev */
12846 ARCH(6);
12847 rn = (insn >> 3) & 0x7;
12848 rd = insn & 0x7;
12849 tmp = load_reg(s, rn);
12850 switch (op1) {
12851 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
12852 case 1: gen_rev16(tmp); break;
12853 case 3: gen_revsh(tmp); break;
12854 default:
12855 g_assert_not_reached();
12857 store_reg(s, rd, tmp);
12858 break;
12861 case 6:
12862 switch ((insn >> 5) & 7) {
12863 case 2:
12864 /* setend */
12865 ARCH(6);
12866 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
12867 gen_helper_setend(cpu_env);
12868 s->base.is_jmp = DISAS_UPDATE;
12870 break;
12871 case 3:
12872 /* cps */
12873 ARCH(6);
12874 if (IS_USER(s)) {
12875 break;
12877 if (arm_dc_feature(s, ARM_FEATURE_M)) {
12878 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
12879 /* FAULTMASK */
12880 if (insn & 1) {
12881 addr = tcg_const_i32(19);
12882 gen_helper_v7m_msr(cpu_env, addr, tmp);
12883 tcg_temp_free_i32(addr);
12885 /* PRIMASK */
12886 if (insn & 2) {
12887 addr = tcg_const_i32(16);
12888 gen_helper_v7m_msr(cpu_env, addr, tmp);
12889 tcg_temp_free_i32(addr);
12891 tcg_temp_free_i32(tmp);
12892 gen_lookup_tb(s);
12893 } else {
12894 if (insn & (1 << 4)) {
12895 shift = CPSR_A | CPSR_I | CPSR_F;
12896 } else {
12897 shift = 0;
12899 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
12901 break;
12902 default:
12903 goto undef;
12905 break;
12907 default:
12908 goto undef;
12910 break;
12912 case 12:
12914 /* load/store multiple */
12915 TCGv_i32 loaded_var = NULL;
12916 rn = (insn >> 8) & 0x7;
12917 addr = load_reg(s, rn);
12918 for (i = 0; i < 8; i++) {
12919 if (insn & (1 << i)) {
12920 if (insn & (1 << 11)) {
12921 /* load */
12922 tmp = tcg_temp_new_i32();
12923 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12924 if (i == rn) {
12925 loaded_var = tmp;
12926 } else {
12927 store_reg(s, i, tmp);
12929 } else {
12930 /* store */
12931 tmp = load_reg(s, i);
12932 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12933 tcg_temp_free_i32(tmp);
12935 /* advance to the next address */
12936 tcg_gen_addi_i32(addr, addr, 4);
12939 if ((insn & (1 << rn)) == 0) {
12940 /* base reg not in list: base register writeback */
12941 store_reg(s, rn, addr);
12942 } else {
12943 /* base reg in list: if load, complete it now */
12944 if (insn & (1 << 11)) {
12945 store_reg(s, rn, loaded_var);
12947 tcg_temp_free_i32(addr);
12949 break;
12951 case 13:
12952 /* conditional branch or swi */
12953 cond = (insn >> 8) & 0xf;
12954 if (cond == 0xe)
12955 goto undef;
12957 if (cond == 0xf) {
12958 /* swi */
12959 gen_set_pc_im(s, s->pc);
12960 s->svc_imm = extract32(insn, 0, 8);
12961 s->base.is_jmp = DISAS_SWI;
12962 break;
12964 /* generate a conditional jump to next instruction */
12965 arm_skip_unless(s, cond);
12967 /* jump to the offset */
12968 val = (uint32_t)s->pc + 2;
12969 offset = ((int32_t)insn << 24) >> 24;
12970 val += offset << 1;
12971 gen_jmp(s, val);
12972 break;
12974 case 14:
12975 if (insn & (1 << 11)) {
12976 /* thumb_insn_is_16bit() ensures we can't get here for
12977 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12978 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12980 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12981 ARCH(5);
12982 offset = ((insn & 0x7ff) << 1);
12983 tmp = load_reg(s, 14);
12984 tcg_gen_addi_i32(tmp, tmp, offset);
12985 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
12987 tmp2 = tcg_temp_new_i32();
12988 tcg_gen_movi_i32(tmp2, s->pc | 1);
12989 store_reg(s, 14, tmp2);
12990 gen_bx(s, tmp);
12991 break;
12993 /* unconditional branch */
12994 val = (uint32_t)s->pc;
12995 offset = ((int32_t)insn << 21) >> 21;
12996 val += (offset << 1) + 2;
12997 gen_jmp(s, val);
12998 break;
13000 case 15:
13001 /* thumb_insn_is_16bit() ensures we can't get here for
13002 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
13004 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
13006 if (insn & (1 << 11)) {
13007 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
13008 offset = ((insn & 0x7ff) << 1) | 1;
13009 tmp = load_reg(s, 14);
13010 tcg_gen_addi_i32(tmp, tmp, offset);
13012 tmp2 = tcg_temp_new_i32();
13013 tcg_gen_movi_i32(tmp2, s->pc | 1);
13014 store_reg(s, 14, tmp2);
13015 gen_bx(s, tmp);
13016 } else {
13017 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
13018 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
13020 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
13022 break;
13024 return;
13025 illegal_op:
13026 undef:
13027 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
13028 default_exception_el(s));
13031 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
13033 /* Return true if the insn at dc->pc might cross a page boundary.
13034 * (False positives are OK, false negatives are not.)
13035 * We know this is a Thumb insn, and our caller ensures we are
13036 * only called if dc->pc is less than 4 bytes from the page
13037 * boundary, so we cross the page if the first 16 bits indicate
13038 * that this is a 32 bit insn.
13040 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
13042 return !thumb_insn_is_16bit(s, insn);
13045 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
13047 DisasContext *dc = container_of(dcbase, DisasContext, base);
13048 CPUARMState *env = cs->env_ptr;
13049 ARMCPU *cpu = arm_env_get_cpu(env);
13050 uint32_t tb_flags = dc->base.tb->flags;
13051 uint32_t condexec, core_mmu_idx;
13053 dc->isar = &cpu->isar;
13054 dc->pc = dc->base.pc_first;
13055 dc->condjmp = 0;
13057 dc->aarch64 = 0;
13058 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
13059 * there is no secure EL1, so we route exceptions to EL3.
13061 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
13062 !arm_el_is_aa64(env, 3);
13063 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
13064 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
13065 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
13066 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
13067 dc->condexec_mask = (condexec & 0xf) << 1;
13068 dc->condexec_cond = condexec >> 4;
13069 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
13070 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
13071 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
13072 #if !defined(CONFIG_USER_ONLY)
13073 dc->user = (dc->current_el == 0);
13074 #endif
13075 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
13076 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
13077 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
13078 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
13079 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
13080 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
13081 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
13082 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
13083 regime_is_secure(env, dc->mmu_idx);
13084 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
13085 dc->cp_regs = cpu->cp_regs;
13086 dc->features = env->features;
13088 /* Single step state. The code-generation logic here is:
13089 * SS_ACTIVE == 0:
13090 * generate code with no special handling for single-stepping (except
13091 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
13092 * this happens anyway because those changes are all system register or
13093 * PSTATE writes).
13094 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
13095 * emit code for one insn
13096 * emit code to clear PSTATE.SS
13097 * emit code to generate software step exception for completed step
13098 * end TB (as usual for having generated an exception)
13099 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
13100 * emit code to generate a software step exception
13101 * end the TB
13103 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
13104 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
13105 dc->is_ldex = false;
13106 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
13108 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
13110 /* If architectural single step active, limit to 1. */
13111 if (is_singlestepping(dc)) {
13112 dc->base.max_insns = 1;
13115 /* ARM is a fixed-length ISA. Bound the number of insns to execute
13116 to those left on the page. */
13117 if (!dc->thumb) {
13118 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
13119 dc->base.max_insns = MIN(dc->base.max_insns, bound);
13122 cpu_F0s = tcg_temp_new_i32();
13123 cpu_F1s = tcg_temp_new_i32();
13124 cpu_F0d = tcg_temp_new_i64();
13125 cpu_F1d = tcg_temp_new_i64();
13126 cpu_V0 = cpu_F0d;
13127 cpu_V1 = cpu_F1d;
13128 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
13129 cpu_M0 = tcg_temp_new_i64();
13132 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
13134 DisasContext *dc = container_of(dcbase, DisasContext, base);
13136 /* A note on handling of the condexec (IT) bits:
13138 * We want to avoid the overhead of having to write the updated condexec
13139 * bits back to the CPUARMState for every instruction in an IT block. So:
13140 * (1) if the condexec bits are not already zero then we write
13141 * zero back into the CPUARMState now. This avoids complications trying
13142 * to do it at the end of the block. (For example if we don't do this
13143 * it's hard to identify whether we can safely skip writing condexec
13144 * at the end of the TB, which we definitely want to do for the case
13145 * where a TB doesn't do anything with the IT state at all.)
13146 * (2) if we are going to leave the TB then we call gen_set_condexec()
13147 * which will write the correct value into CPUARMState if zero is wrong.
13148 * This is done both for leaving the TB at the end, and for leaving
13149 * it because of an exception we know will happen, which is done in
13150 * gen_exception_insn(). The latter is necessary because we need to
13151 * leave the TB with the PC/IT state just prior to execution of the
13152 * instruction which caused the exception.
13153 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
13154 * then the CPUARMState will be wrong and we need to reset it.
13155 * This is handled in the same way as restoration of the
13156 * PC in these situations; we save the value of the condexec bits
13157 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
13158 * then uses this to restore them after an exception.
13160 * Note that there are no instructions which can read the condexec
13161 * bits, and none which can write non-static values to them, so
13162 * we don't need to care about whether CPUARMState is correct in the
13163 * middle of a TB.
13166 /* Reset the conditional execution bits immediately. This avoids
13167 complications trying to do it at the end of the block. */
13168 if (dc->condexec_mask || dc->condexec_cond) {
13169 TCGv_i32 tmp = tcg_temp_new_i32();
13170 tcg_gen_movi_i32(tmp, 0);
13171 store_cpu_field(tmp, condexec_bits);
13175 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
13177 DisasContext *dc = container_of(dcbase, DisasContext, base);
13179 tcg_gen_insn_start(dc->pc,
13180 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
13182 dc->insn_start = tcg_last_op();
13185 static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
13186 const CPUBreakpoint *bp)
13188 DisasContext *dc = container_of(dcbase, DisasContext, base);
13190 if (bp->flags & BP_CPU) {
13191 gen_set_condexec(dc);
13192 gen_set_pc_im(dc, dc->pc);
13193 gen_helper_check_breakpoints(cpu_env);
13194 /* End the TB early; it's likely not going to be executed */
13195 dc->base.is_jmp = DISAS_TOO_MANY;
13196 } else {
13197 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
13198 /* The address covered by the breakpoint must be
13199 included in [tb->pc, tb->pc + tb->size) in order
13200 to for it to be properly cleared -- thus we
13201 increment the PC here so that the logic setting
13202 tb->size below does the right thing. */
13203 /* TODO: Advance PC by correct instruction length to
13204 * avoid disassembler error messages */
13205 dc->pc += 2;
13206 dc->base.is_jmp = DISAS_NORETURN;
13209 return true;
13212 static bool arm_pre_translate_insn(DisasContext *dc)
13214 #ifdef CONFIG_USER_ONLY
13215 /* Intercept jump to the magic kernel page. */
13216 if (dc->pc >= 0xffff0000) {
13217 /* We always get here via a jump, so know we are not in a
13218 conditional execution block. */
13219 gen_exception_internal(EXCP_KERNEL_TRAP);
13220 dc->base.is_jmp = DISAS_NORETURN;
13221 return true;
13223 #endif
13225 if (dc->ss_active && !dc->pstate_ss) {
13226 /* Singlestep state is Active-pending.
13227 * If we're in this state at the start of a TB then either
13228 * a) we just took an exception to an EL which is being debugged
13229 * and this is the first insn in the exception handler
13230 * b) debug exceptions were masked and we just unmasked them
13231 * without changing EL (eg by clearing PSTATE.D)
13232 * In either case we're going to take a swstep exception in the
13233 * "did not step an insn" case, and so the syndrome ISV and EX
13234 * bits should be zero.
13236 assert(dc->base.num_insns == 1);
13237 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
13238 default_exception_el(dc));
13239 dc->base.is_jmp = DISAS_NORETURN;
13240 return true;
13243 return false;
13246 static void arm_post_translate_insn(DisasContext *dc)
13248 if (dc->condjmp && !dc->base.is_jmp) {
13249 gen_set_label(dc->condlabel);
13250 dc->condjmp = 0;
13252 dc->base.pc_next = dc->pc;
13253 translator_loop_temp_check(&dc->base);
13256 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13258 DisasContext *dc = container_of(dcbase, DisasContext, base);
13259 CPUARMState *env = cpu->env_ptr;
13260 unsigned int insn;
13262 if (arm_pre_translate_insn(dc)) {
13263 return;
13266 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
13267 dc->insn = insn;
13268 dc->pc += 4;
13269 disas_arm_insn(dc, insn);
13271 arm_post_translate_insn(dc);
13273 /* ARM is a fixed-length ISA. We performed the cross-page check
13274 in init_disas_context by adjusting max_insns. */
13277 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
13279 /* Return true if this Thumb insn is always unconditional,
13280 * even inside an IT block. This is true of only a very few
13281 * instructions: BKPT, HLT, and SG.
13283 * A larger class of instructions are UNPREDICTABLE if used
13284 * inside an IT block; we do not need to detect those here, because
13285 * what we do by default (perform the cc check and update the IT
13286 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
13287 * choice for those situations.
13289 * insn is either a 16-bit or a 32-bit instruction; the two are
13290 * distinguishable because for the 16-bit case the top 16 bits
13291 * are zeroes, and that isn't a valid 32-bit encoding.
13293 if ((insn & 0xffffff00) == 0xbe00) {
13294 /* BKPT */
13295 return true;
13298 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
13299 !arm_dc_feature(s, ARM_FEATURE_M)) {
13300 /* HLT: v8A only. This is unconditional even when it is going to
13301 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
13302 * For v7 cores this was a plain old undefined encoding and so
13303 * honours its cc check. (We might be using the encoding as
13304 * a semihosting trap, but we don't change the cc check behaviour
13305 * on that account, because a debugger connected to a real v7A
13306 * core and emulating semihosting traps by catching the UNDEF
13307 * exception would also only see cases where the cc check passed.
13308 * No guest code should be trying to do a HLT semihosting trap
13309 * in an IT block anyway.
13311 return true;
13314 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
13315 arm_dc_feature(s, ARM_FEATURE_M)) {
13316 /* SG: v8M only */
13317 return true;
13320 return false;
13323 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13325 DisasContext *dc = container_of(dcbase, DisasContext, base);
13326 CPUARMState *env = cpu->env_ptr;
13327 uint32_t insn;
13328 bool is_16bit;
13330 if (arm_pre_translate_insn(dc)) {
13331 return;
13334 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13335 is_16bit = thumb_insn_is_16bit(dc, insn);
13336 dc->pc += 2;
13337 if (!is_16bit) {
13338 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13340 insn = insn << 16 | insn2;
13341 dc->pc += 2;
13343 dc->insn = insn;
13345 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
13346 uint32_t cond = dc->condexec_cond;
13348 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
13349 arm_skip_unless(dc, cond);
13353 if (is_16bit) {
13354 disas_thumb_insn(dc, insn);
13355 } else {
13356 disas_thumb2_insn(dc, insn);
13359 /* Advance the Thumb condexec condition. */
13360 if (dc->condexec_mask) {
13361 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
13362 ((dc->condexec_mask >> 4) & 1));
13363 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
13364 if (dc->condexec_mask == 0) {
13365 dc->condexec_cond = 0;
13369 arm_post_translate_insn(dc);
13371 /* Thumb is a variable-length ISA. Stop translation when the next insn
13372 * will touch a new page. This ensures that prefetch aborts occur at
13373 * the right place.
13375 * We want to stop the TB if the next insn starts in a new page,
13376 * or if it spans between this page and the next. This means that
13377 * if we're looking at the last halfword in the page we need to
13378 * see if it's a 16-bit Thumb insn (which will fit in this TB)
13379 * or a 32-bit Thumb insn (which won't).
13380 * This is to avoid generating a silly TB with a single 16-bit insn
13381 * in it at the end of this page (which would execute correctly
13382 * but isn't very efficient).
13384 if (dc->base.is_jmp == DISAS_NEXT
13385 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
13386 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
13387 && insn_crosses_page(env, dc)))) {
13388 dc->base.is_jmp = DISAS_TOO_MANY;
13392 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
13394 DisasContext *dc = container_of(dcbase, DisasContext, base);
13396 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
13397 /* FIXME: This can theoretically happen with self-modifying code. */
13398 cpu_abort(cpu, "IO on conditional branch instruction");
13401 /* At this stage dc->condjmp will only be set when the skipped
13402 instruction was a conditional branch or trap, and the PC has
13403 already been written. */
13404 gen_set_condexec(dc);
13405 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
13406 /* Exception return branches need some special case code at the
13407 * end of the TB, which is complex enough that it has to
13408 * handle the single-step vs not and the condition-failed
13409 * insn codepath itself.
13411 gen_bx_excret_final_code(dc);
13412 } else if (unlikely(is_singlestepping(dc))) {
13413 /* Unconditional and "condition passed" instruction codepath. */
13414 switch (dc->base.is_jmp) {
13415 case DISAS_SWI:
13416 gen_ss_advance(dc);
13417 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13418 default_exception_el(dc));
13419 break;
13420 case DISAS_HVC:
13421 gen_ss_advance(dc);
13422 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
13423 break;
13424 case DISAS_SMC:
13425 gen_ss_advance(dc);
13426 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
13427 break;
13428 case DISAS_NEXT:
13429 case DISAS_TOO_MANY:
13430 case DISAS_UPDATE:
13431 gen_set_pc_im(dc, dc->pc);
13432 /* fall through */
13433 default:
13434 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
13435 gen_singlestep_exception(dc);
13436 break;
13437 case DISAS_NORETURN:
13438 break;
13440 } else {
13441 /* While branches must always occur at the end of an IT block,
13442 there are a few other things that can cause us to terminate
13443 the TB in the middle of an IT block:
13444 - Exception generating instructions (bkpt, swi, undefined).
13445 - Page boundaries.
13446 - Hardware watchpoints.
13447 Hardware breakpoints have already been handled and skip this code.
13449 switch(dc->base.is_jmp) {
13450 case DISAS_NEXT:
13451 case DISAS_TOO_MANY:
13452 gen_goto_tb(dc, 1, dc->pc);
13453 break;
13454 case DISAS_JUMP:
13455 gen_goto_ptr();
13456 break;
13457 case DISAS_UPDATE:
13458 gen_set_pc_im(dc, dc->pc);
13459 /* fall through */
13460 default:
13461 /* indicate that the hash table must be used to find the next TB */
13462 tcg_gen_exit_tb(NULL, 0);
13463 break;
13464 case DISAS_NORETURN:
13465 /* nothing more to generate */
13466 break;
13467 case DISAS_WFI:
13469 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
13470 !(dc->insn & (1U << 31))) ? 2 : 4);
13472 gen_helper_wfi(cpu_env, tmp);
13473 tcg_temp_free_i32(tmp);
13474 /* The helper doesn't necessarily throw an exception, but we
13475 * must go back to the main loop to check for interrupts anyway.
13477 tcg_gen_exit_tb(NULL, 0);
13478 break;
13480 case DISAS_WFE:
13481 gen_helper_wfe(cpu_env);
13482 break;
13483 case DISAS_YIELD:
13484 gen_helper_yield(cpu_env);
13485 break;
13486 case DISAS_SWI:
13487 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13488 default_exception_el(dc));
13489 break;
13490 case DISAS_HVC:
13491 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
13492 break;
13493 case DISAS_SMC:
13494 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
13495 break;
13499 if (dc->condjmp) {
13500 /* "Condition failed" instruction codepath for the branch/trap insn */
13501 gen_set_label(dc->condlabel);
13502 gen_set_condexec(dc);
13503 if (unlikely(is_singlestepping(dc))) {
13504 gen_set_pc_im(dc, dc->pc);
13505 gen_singlestep_exception(dc);
13506 } else {
13507 gen_goto_tb(dc, 1, dc->pc);
13511 /* Functions above can change dc->pc, so re-align db->pc_next */
13512 dc->base.pc_next = dc->pc;
13515 static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
13517 DisasContext *dc = container_of(dcbase, DisasContext, base);
13519 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
13520 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
13523 static const TranslatorOps arm_translator_ops = {
13524 .init_disas_context = arm_tr_init_disas_context,
13525 .tb_start = arm_tr_tb_start,
13526 .insn_start = arm_tr_insn_start,
13527 .breakpoint_check = arm_tr_breakpoint_check,
13528 .translate_insn = arm_tr_translate_insn,
13529 .tb_stop = arm_tr_tb_stop,
13530 .disas_log = arm_tr_disas_log,
13533 static const TranslatorOps thumb_translator_ops = {
13534 .init_disas_context = arm_tr_init_disas_context,
13535 .tb_start = arm_tr_tb_start,
13536 .insn_start = arm_tr_insn_start,
13537 .breakpoint_check = arm_tr_breakpoint_check,
13538 .translate_insn = thumb_tr_translate_insn,
13539 .tb_stop = arm_tr_tb_stop,
13540 .disas_log = arm_tr_disas_log,
13543 /* generate intermediate code for basic block 'tb'. */
13544 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
13546 DisasContext dc;
13547 const TranslatorOps *ops = &arm_translator_ops;
13549 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
13550 ops = &thumb_translator_ops;
13552 #ifdef TARGET_AARCH64
13553 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
13554 ops = &aarch64_translator_ops;
13556 #endif
13558 translator_loop(ops, &dc.base, cpu, tb);
13561 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
13562 int flags)
13564 ARMCPU *cpu = ARM_CPU(cs);
13565 CPUARMState *env = &cpu->env;
13566 int i;
13568 if (is_a64(env)) {
13569 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
13570 return;
13573 for(i=0;i<16;i++) {
13574 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
13575 if ((i % 4) == 3)
13576 cpu_fprintf(f, "\n");
13577 else
13578 cpu_fprintf(f, " ");
13581 if (arm_feature(env, ARM_FEATURE_M)) {
13582 uint32_t xpsr = xpsr_read(env);
13583 const char *mode;
13584 const char *ns_status = "";
13586 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
13587 ns_status = env->v7m.secure ? "S " : "NS ";
13590 if (xpsr & XPSR_EXCP) {
13591 mode = "handler";
13592 } else {
13593 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
13594 mode = "unpriv-thread";
13595 } else {
13596 mode = "priv-thread";
13600 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
13601 xpsr,
13602 xpsr & XPSR_N ? 'N' : '-',
13603 xpsr & XPSR_Z ? 'Z' : '-',
13604 xpsr & XPSR_C ? 'C' : '-',
13605 xpsr & XPSR_V ? 'V' : '-',
13606 xpsr & XPSR_T ? 'T' : 'A',
13607 ns_status,
13608 mode);
13609 } else {
13610 uint32_t psr = cpsr_read(env);
13611 const char *ns_status = "";
13613 if (arm_feature(env, ARM_FEATURE_EL3) &&
13614 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
13615 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
13618 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
13619 psr,
13620 psr & CPSR_N ? 'N' : '-',
13621 psr & CPSR_Z ? 'Z' : '-',
13622 psr & CPSR_C ? 'C' : '-',
13623 psr & CPSR_V ? 'V' : '-',
13624 psr & CPSR_T ? 'T' : 'A',
13625 ns_status,
13626 aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
13629 if (flags & CPU_DUMP_FPU) {
13630 int numvfpregs = 0;
13631 if (arm_feature(env, ARM_FEATURE_VFP)) {
13632 numvfpregs += 16;
13634 if (arm_feature(env, ARM_FEATURE_VFP3)) {
13635 numvfpregs += 16;
13637 for (i = 0; i < numvfpregs; i++) {
13638 uint64_t v = *aa32_vfp_dreg(env, i);
13639 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
13640 i * 2, (uint32_t)v,
13641 i * 2 + 1, (uint32_t)(v >> 32),
13642 i, v);
13644 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
13648 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
13649 target_ulong *data)
13651 if (is_a64(env)) {
13652 env->pc = data[0];
13653 env->condexec_bits = 0;
13654 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
13655 } else {
13656 env->regs[15] = data[0];
13657 env->condexec_bits = data[1];
13658 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;