block/dirty-bitmap: remove assertion from restore
[qemu/ar7.git] / target / arm / translate.c
blob66cf28c8cbefc8e18df82c6cecb92d4bb70fedbb
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "tcg-op-gvec.h"
29 #include "qemu/log.h"
30 #include "qemu/bitops.h"
31 #include "arm_ldst.h"
32 #include "exec/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
38 #include "exec/log.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
57 #define IS_USER(s) 1
58 #else
59 #define IS_USER(s) (s->user)
60 #endif
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
69 /* FIXME: These should be removed. */
70 static TCGv_i32 cpu_F0s, cpu_F1s;
71 static TCGv_i64 cpu_F0d, cpu_F1d;
73 #include "exec/gen-icount.h"
75 static const char * const regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79 /* Function prototypes for gen_ functions calling Neon helpers. */
80 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
81 TCGv_i32, TCGv_i32);
83 /* initialize TCG globals. */
84 void arm_translate_init(void)
86 int i;
88 for (i = 0; i < 16; i++) {
89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
90 offsetof(CPUARMState, regs[i]),
91 regnames[i]);
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
103 a64_translate_init();
106 /* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
109 typedef enum ISSInfo {
110 ISSNone = 0,
111 ISSRegMask = 0x1f,
112 ISSInvalid = (1 << 5),
113 ISSIsAcqRel = (1 << 6),
114 ISSIsWrite = (1 << 7),
115 ISSIs16Bit = (1 << 8),
116 } ISSInfo;
118 /* Save the syndrome information for a Data Abort */
119 static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
121 uint32_t syn;
122 int sas = memop & MO_SIZE;
123 bool sse = memop & MO_SIGN;
124 bool is_acqrel = issinfo & ISSIsAcqRel;
125 bool is_write = issinfo & ISSIsWrite;
126 bool is_16bit = issinfo & ISSIs16Bit;
127 int srt = issinfo & ISSRegMask;
129 if (issinfo & ISSInvalid) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
133 return;
136 if (srt == 15) {
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
139 * the call sites.
141 return;
144 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
145 0, 0, 0, is_write, 0, is_16bit);
146 disas_set_insn_syndrome(s, syn);
149 static inline int get_a32_user_mem_index(DisasContext *s)
151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
152 * insns:
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
156 switch (s->mmu_idx) {
157 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0:
159 case ARMMMUIdx_S12NSE1:
160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
161 case ARMMMUIdx_S1E3:
162 case ARMMMUIdx_S1SE0:
163 case ARMMMUIdx_S1SE1:
164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
177 case ARMMMUIdx_S2NS:
178 default:
179 g_assert_not_reached();
183 static inline TCGv_i32 load_cpu_offset(int offset)
185 TCGv_i32 tmp = tcg_temp_new_i32();
186 tcg_gen_ld_i32(tmp, cpu_env, offset);
187 return tmp;
190 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
192 static inline void store_cpu_offset(TCGv_i32 var, int offset)
194 tcg_gen_st_i32(var, cpu_env, offset);
195 tcg_temp_free_i32(var);
198 #define store_cpu_field(var, name) \
199 store_cpu_offset(var, offsetof(CPUARMState, name))
201 /* Set a variable to the value of a CPU register. */
202 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
204 if (reg == 15) {
205 uint32_t addr;
206 /* normally, since we updated PC, we need only to add one insn */
207 if (s->thumb)
208 addr = (long)s->pc + 2;
209 else
210 addr = (long)s->pc + 4;
211 tcg_gen_movi_i32(var, addr);
212 } else {
213 tcg_gen_mov_i32(var, cpu_R[reg]);
217 /* Create a new temporary and set it to the value of a CPU register. */
218 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
220 TCGv_i32 tmp = tcg_temp_new_i32();
221 load_reg_var(s, tmp, reg);
222 return tmp;
225 /* Set a CPU register. The source must be a temporary and will be
226 marked as dead. */
227 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
229 if (reg == 15) {
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
235 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
236 s->base.is_jmp = DISAS_JUMP;
238 tcg_gen_mov_i32(cpu_R[reg], var);
239 tcg_temp_free_i32(var);
243 * Variant of store_reg which applies v8M stack-limit checks before updating
244 * SP. If the check fails this will result in an exception being taken.
245 * We disable the stack checks for CONFIG_USER_ONLY because we have
246 * no idea what the stack limits should be in that case.
247 * If stack checking is not being done this just acts like store_reg().
249 static void store_sp_checked(DisasContext *s, TCGv_i32 var)
251 #ifndef CONFIG_USER_ONLY
252 if (s->v8m_stackcheck) {
253 gen_helper_v8m_stackcheck(cpu_env, var);
255 #endif
256 store_reg(s, 13, var);
259 /* Value extensions. */
260 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
261 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
262 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
263 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
265 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
266 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
269 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
271 TCGv_i32 tmp_mask = tcg_const_i32(mask);
272 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
273 tcg_temp_free_i32(tmp_mask);
275 /* Set NZCV flags from the high 4 bits of var. */
276 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
278 static void gen_exception_internal(int excp)
280 TCGv_i32 tcg_excp = tcg_const_i32(excp);
282 assert(excp_is_internal(excp));
283 gen_helper_exception_internal(cpu_env, tcg_excp);
284 tcg_temp_free_i32(tcg_excp);
287 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
289 TCGv_i32 tcg_excp = tcg_const_i32(excp);
290 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
291 TCGv_i32 tcg_el = tcg_const_i32(target_el);
293 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
294 tcg_syn, tcg_el);
296 tcg_temp_free_i32(tcg_el);
297 tcg_temp_free_i32(tcg_syn);
298 tcg_temp_free_i32(tcg_excp);
301 static void gen_ss_advance(DisasContext *s)
303 /* If the singlestep state is Active-not-pending, advance to
304 * Active-pending.
306 if (s->ss_active) {
307 s->pstate_ss = 0;
308 gen_helper_clear_pstate_ss(cpu_env);
312 static void gen_step_complete_exception(DisasContext *s)
314 /* We just completed step of an insn. Move from Active-not-pending
315 * to Active-pending, and then also take the swstep exception.
316 * This corresponds to making the (IMPDEF) choice to prioritize
317 * swstep exceptions over asynchronous exceptions taken to an exception
318 * level where debug is disabled. This choice has the advantage that
319 * we do not need to maintain internal state corresponding to the
320 * ISV/EX syndrome bits between completion of the step and generation
321 * of the exception, and our syndrome information is always correct.
323 gen_ss_advance(s);
324 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
325 default_exception_el(s));
326 s->base.is_jmp = DISAS_NORETURN;
329 static void gen_singlestep_exception(DisasContext *s)
331 /* Generate the right kind of exception for singlestep, which is
332 * either the architectural singlestep or EXCP_DEBUG for QEMU's
333 * gdb singlestepping.
335 if (s->ss_active) {
336 gen_step_complete_exception(s);
337 } else {
338 gen_exception_internal(EXCP_DEBUG);
342 static inline bool is_singlestepping(DisasContext *s)
344 /* Return true if we are singlestepping either because of
345 * architectural singlestep or QEMU gdbstub singlestep. This does
346 * not include the command line '-singlestep' mode which is rather
347 * misnamed as it only means "one instruction per TB" and doesn't
348 * affect the code we generate.
350 return s->base.singlestep_enabled || s->ss_active;
353 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
355 TCGv_i32 tmp1 = tcg_temp_new_i32();
356 TCGv_i32 tmp2 = tcg_temp_new_i32();
357 tcg_gen_ext16s_i32(tmp1, a);
358 tcg_gen_ext16s_i32(tmp2, b);
359 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
360 tcg_temp_free_i32(tmp2);
361 tcg_gen_sari_i32(a, a, 16);
362 tcg_gen_sari_i32(b, b, 16);
363 tcg_gen_mul_i32(b, b, a);
364 tcg_gen_mov_i32(a, tmp1);
365 tcg_temp_free_i32(tmp1);
368 /* Byteswap each halfword. */
369 static void gen_rev16(TCGv_i32 var)
371 TCGv_i32 tmp = tcg_temp_new_i32();
372 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
373 tcg_gen_shri_i32(tmp, var, 8);
374 tcg_gen_and_i32(tmp, tmp, mask);
375 tcg_gen_and_i32(var, var, mask);
376 tcg_gen_shli_i32(var, var, 8);
377 tcg_gen_or_i32(var, var, tmp);
378 tcg_temp_free_i32(mask);
379 tcg_temp_free_i32(tmp);
382 /* Byteswap low halfword and sign extend. */
383 static void gen_revsh(TCGv_i32 var)
385 tcg_gen_ext16u_i32(var, var);
386 tcg_gen_bswap16_i32(var, var);
387 tcg_gen_ext16s_i32(var, var);
390 /* Return (b << 32) + a. Mark inputs as dead */
391 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
393 TCGv_i64 tmp64 = tcg_temp_new_i64();
395 tcg_gen_extu_i32_i64(tmp64, b);
396 tcg_temp_free_i32(b);
397 tcg_gen_shli_i64(tmp64, tmp64, 32);
398 tcg_gen_add_i64(a, tmp64, a);
400 tcg_temp_free_i64(tmp64);
401 return a;
404 /* Return (b << 32) - a. Mark inputs as dead. */
405 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
407 TCGv_i64 tmp64 = tcg_temp_new_i64();
409 tcg_gen_extu_i32_i64(tmp64, b);
410 tcg_temp_free_i32(b);
411 tcg_gen_shli_i64(tmp64, tmp64, 32);
412 tcg_gen_sub_i64(a, tmp64, a);
414 tcg_temp_free_i64(tmp64);
415 return a;
418 /* 32x32->64 multiply. Marks inputs as dead. */
419 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
421 TCGv_i32 lo = tcg_temp_new_i32();
422 TCGv_i32 hi = tcg_temp_new_i32();
423 TCGv_i64 ret;
425 tcg_gen_mulu2_i32(lo, hi, a, b);
426 tcg_temp_free_i32(a);
427 tcg_temp_free_i32(b);
429 ret = tcg_temp_new_i64();
430 tcg_gen_concat_i32_i64(ret, lo, hi);
431 tcg_temp_free_i32(lo);
432 tcg_temp_free_i32(hi);
434 return ret;
437 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
439 TCGv_i32 lo = tcg_temp_new_i32();
440 TCGv_i32 hi = tcg_temp_new_i32();
441 TCGv_i64 ret;
443 tcg_gen_muls2_i32(lo, hi, a, b);
444 tcg_temp_free_i32(a);
445 tcg_temp_free_i32(b);
447 ret = tcg_temp_new_i64();
448 tcg_gen_concat_i32_i64(ret, lo, hi);
449 tcg_temp_free_i32(lo);
450 tcg_temp_free_i32(hi);
452 return ret;
455 /* Swap low and high halfwords. */
456 static void gen_swap_half(TCGv_i32 var)
458 TCGv_i32 tmp = tcg_temp_new_i32();
459 tcg_gen_shri_i32(tmp, var, 16);
460 tcg_gen_shli_i32(var, var, 16);
461 tcg_gen_or_i32(var, var, tmp);
462 tcg_temp_free_i32(tmp);
465 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
466 tmp = (t0 ^ t1) & 0x8000;
467 t0 &= ~0x8000;
468 t1 &= ~0x8000;
469 t0 = (t0 + t1) ^ tmp;
472 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
474 TCGv_i32 tmp = tcg_temp_new_i32();
475 tcg_gen_xor_i32(tmp, t0, t1);
476 tcg_gen_andi_i32(tmp, tmp, 0x8000);
477 tcg_gen_andi_i32(t0, t0, ~0x8000);
478 tcg_gen_andi_i32(t1, t1, ~0x8000);
479 tcg_gen_add_i32(t0, t0, t1);
480 tcg_gen_xor_i32(t0, t0, tmp);
481 tcg_temp_free_i32(tmp);
482 tcg_temp_free_i32(t1);
485 /* Set CF to the top bit of var. */
486 static void gen_set_CF_bit31(TCGv_i32 var)
488 tcg_gen_shri_i32(cpu_CF, var, 31);
491 /* Set N and Z flags from var. */
492 static inline void gen_logic_CC(TCGv_i32 var)
494 tcg_gen_mov_i32(cpu_NF, var);
495 tcg_gen_mov_i32(cpu_ZF, var);
498 /* T0 += T1 + CF. */
499 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
501 tcg_gen_add_i32(t0, t0, t1);
502 tcg_gen_add_i32(t0, t0, cpu_CF);
505 /* dest = T0 + T1 + CF. */
506 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
508 tcg_gen_add_i32(dest, t0, t1);
509 tcg_gen_add_i32(dest, dest, cpu_CF);
512 /* dest = T0 - T1 + CF - 1. */
513 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
515 tcg_gen_sub_i32(dest, t0, t1);
516 tcg_gen_add_i32(dest, dest, cpu_CF);
517 tcg_gen_subi_i32(dest, dest, 1);
520 /* dest = T0 + T1. Compute C, N, V and Z flags */
521 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
523 TCGv_i32 tmp = tcg_temp_new_i32();
524 tcg_gen_movi_i32(tmp, 0);
525 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
526 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
527 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
528 tcg_gen_xor_i32(tmp, t0, t1);
529 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
530 tcg_temp_free_i32(tmp);
531 tcg_gen_mov_i32(dest, cpu_NF);
534 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
535 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
537 TCGv_i32 tmp = tcg_temp_new_i32();
538 if (TCG_TARGET_HAS_add2_i32) {
539 tcg_gen_movi_i32(tmp, 0);
540 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
541 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
542 } else {
543 TCGv_i64 q0 = tcg_temp_new_i64();
544 TCGv_i64 q1 = tcg_temp_new_i64();
545 tcg_gen_extu_i32_i64(q0, t0);
546 tcg_gen_extu_i32_i64(q1, t1);
547 tcg_gen_add_i64(q0, q0, q1);
548 tcg_gen_extu_i32_i64(q1, cpu_CF);
549 tcg_gen_add_i64(q0, q0, q1);
550 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
551 tcg_temp_free_i64(q0);
552 tcg_temp_free_i64(q1);
554 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
555 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
556 tcg_gen_xor_i32(tmp, t0, t1);
557 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
558 tcg_temp_free_i32(tmp);
559 tcg_gen_mov_i32(dest, cpu_NF);
562 /* dest = T0 - T1. Compute C, N, V and Z flags */
563 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
565 TCGv_i32 tmp;
566 tcg_gen_sub_i32(cpu_NF, t0, t1);
567 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
568 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
569 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
570 tmp = tcg_temp_new_i32();
571 tcg_gen_xor_i32(tmp, t0, t1);
572 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
573 tcg_temp_free_i32(tmp);
574 tcg_gen_mov_i32(dest, cpu_NF);
577 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
578 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
580 TCGv_i32 tmp = tcg_temp_new_i32();
581 tcg_gen_not_i32(tmp, t1);
582 gen_adc_CC(dest, t0, tmp);
583 tcg_temp_free_i32(tmp);
586 #define GEN_SHIFT(name) \
587 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
589 TCGv_i32 tmp1, tmp2, tmp3; \
590 tmp1 = tcg_temp_new_i32(); \
591 tcg_gen_andi_i32(tmp1, t1, 0xff); \
592 tmp2 = tcg_const_i32(0); \
593 tmp3 = tcg_const_i32(0x1f); \
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
595 tcg_temp_free_i32(tmp3); \
596 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
597 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
598 tcg_temp_free_i32(tmp2); \
599 tcg_temp_free_i32(tmp1); \
601 GEN_SHIFT(shl)
602 GEN_SHIFT(shr)
603 #undef GEN_SHIFT
605 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
607 TCGv_i32 tmp1, tmp2;
608 tmp1 = tcg_temp_new_i32();
609 tcg_gen_andi_i32(tmp1, t1, 0xff);
610 tmp2 = tcg_const_i32(0x1f);
611 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
612 tcg_temp_free_i32(tmp2);
613 tcg_gen_sar_i32(dest, t0, tmp1);
614 tcg_temp_free_i32(tmp1);
617 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
619 TCGv_i32 c0 = tcg_const_i32(0);
620 TCGv_i32 tmp = tcg_temp_new_i32();
621 tcg_gen_neg_i32(tmp, src);
622 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
623 tcg_temp_free_i32(c0);
624 tcg_temp_free_i32(tmp);
627 static void shifter_out_im(TCGv_i32 var, int shift)
629 if (shift == 0) {
630 tcg_gen_andi_i32(cpu_CF, var, 1);
631 } else {
632 tcg_gen_shri_i32(cpu_CF, var, shift);
633 if (shift != 31) {
634 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
639 /* Shift by immediate. Includes special handling for shift == 0. */
640 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
641 int shift, int flags)
643 switch (shiftop) {
644 case 0: /* LSL */
645 if (shift != 0) {
646 if (flags)
647 shifter_out_im(var, 32 - shift);
648 tcg_gen_shli_i32(var, var, shift);
650 break;
651 case 1: /* LSR */
652 if (shift == 0) {
653 if (flags) {
654 tcg_gen_shri_i32(cpu_CF, var, 31);
656 tcg_gen_movi_i32(var, 0);
657 } else {
658 if (flags)
659 shifter_out_im(var, shift - 1);
660 tcg_gen_shri_i32(var, var, shift);
662 break;
663 case 2: /* ASR */
664 if (shift == 0)
665 shift = 32;
666 if (flags)
667 shifter_out_im(var, shift - 1);
668 if (shift == 32)
669 shift = 31;
670 tcg_gen_sari_i32(var, var, shift);
671 break;
672 case 3: /* ROR/RRX */
673 if (shift != 0) {
674 if (flags)
675 shifter_out_im(var, shift - 1);
676 tcg_gen_rotri_i32(var, var, shift); break;
677 } else {
678 TCGv_i32 tmp = tcg_temp_new_i32();
679 tcg_gen_shli_i32(tmp, cpu_CF, 31);
680 if (flags)
681 shifter_out_im(var, 0);
682 tcg_gen_shri_i32(var, var, 1);
683 tcg_gen_or_i32(var, var, tmp);
684 tcg_temp_free_i32(tmp);
689 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
690 TCGv_i32 shift, int flags)
692 if (flags) {
693 switch (shiftop) {
694 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
695 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
696 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
697 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
699 } else {
700 switch (shiftop) {
701 case 0:
702 gen_shl(var, var, shift);
703 break;
704 case 1:
705 gen_shr(var, var, shift);
706 break;
707 case 2:
708 gen_sar(var, var, shift);
709 break;
710 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
711 tcg_gen_rotr_i32(var, var, shift); break;
714 tcg_temp_free_i32(shift);
717 #define PAS_OP(pfx) \
718 switch (op2) { \
719 case 0: gen_pas_helper(glue(pfx,add16)); break; \
720 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
721 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
722 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
723 case 4: gen_pas_helper(glue(pfx,add8)); break; \
724 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
726 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
728 TCGv_ptr tmp;
730 switch (op1) {
731 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
732 case 1:
733 tmp = tcg_temp_new_ptr();
734 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
735 PAS_OP(s)
736 tcg_temp_free_ptr(tmp);
737 break;
738 case 5:
739 tmp = tcg_temp_new_ptr();
740 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
741 PAS_OP(u)
742 tcg_temp_free_ptr(tmp);
743 break;
744 #undef gen_pas_helper
745 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
746 case 2:
747 PAS_OP(q);
748 break;
749 case 3:
750 PAS_OP(sh);
751 break;
752 case 6:
753 PAS_OP(uq);
754 break;
755 case 7:
756 PAS_OP(uh);
757 break;
758 #undef gen_pas_helper
761 #undef PAS_OP
763 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
764 #define PAS_OP(pfx) \
765 switch (op1) { \
766 case 0: gen_pas_helper(glue(pfx,add8)); break; \
767 case 1: gen_pas_helper(glue(pfx,add16)); break; \
768 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
769 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
770 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
771 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
773 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
775 TCGv_ptr tmp;
777 switch (op2) {
778 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
779 case 0:
780 tmp = tcg_temp_new_ptr();
781 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
782 PAS_OP(s)
783 tcg_temp_free_ptr(tmp);
784 break;
785 case 4:
786 tmp = tcg_temp_new_ptr();
787 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
788 PAS_OP(u)
789 tcg_temp_free_ptr(tmp);
790 break;
791 #undef gen_pas_helper
792 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
793 case 1:
794 PAS_OP(q);
795 break;
796 case 2:
797 PAS_OP(sh);
798 break;
799 case 5:
800 PAS_OP(uq);
801 break;
802 case 6:
803 PAS_OP(uh);
804 break;
805 #undef gen_pas_helper
808 #undef PAS_OP
811 * Generate a conditional based on ARM condition code cc.
812 * This is common between ARM and Aarch64 targets.
814 void arm_test_cc(DisasCompare *cmp, int cc)
816 TCGv_i32 value;
817 TCGCond cond;
818 bool global = true;
820 switch (cc) {
821 case 0: /* eq: Z */
822 case 1: /* ne: !Z */
823 cond = TCG_COND_EQ;
824 value = cpu_ZF;
825 break;
827 case 2: /* cs: C */
828 case 3: /* cc: !C */
829 cond = TCG_COND_NE;
830 value = cpu_CF;
831 break;
833 case 4: /* mi: N */
834 case 5: /* pl: !N */
835 cond = TCG_COND_LT;
836 value = cpu_NF;
837 break;
839 case 6: /* vs: V */
840 case 7: /* vc: !V */
841 cond = TCG_COND_LT;
842 value = cpu_VF;
843 break;
845 case 8: /* hi: C && !Z */
846 case 9: /* ls: !C || Z -> !(C && !Z) */
847 cond = TCG_COND_NE;
848 value = tcg_temp_new_i32();
849 global = false;
850 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
851 ZF is non-zero for !Z; so AND the two subexpressions. */
852 tcg_gen_neg_i32(value, cpu_CF);
853 tcg_gen_and_i32(value, value, cpu_ZF);
854 break;
856 case 10: /* ge: N == V -> N ^ V == 0 */
857 case 11: /* lt: N != V -> N ^ V != 0 */
858 /* Since we're only interested in the sign bit, == 0 is >= 0. */
859 cond = TCG_COND_GE;
860 value = tcg_temp_new_i32();
861 global = false;
862 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
863 break;
865 case 12: /* gt: !Z && N == V */
866 case 13: /* le: Z || N != V */
867 cond = TCG_COND_NE;
868 value = tcg_temp_new_i32();
869 global = false;
870 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
871 * the sign bit then AND with ZF to yield the result. */
872 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
873 tcg_gen_sari_i32(value, value, 31);
874 tcg_gen_andc_i32(value, cpu_ZF, value);
875 break;
877 case 14: /* always */
878 case 15: /* always */
879 /* Use the ALWAYS condition, which will fold early.
880 * It doesn't matter what we use for the value. */
881 cond = TCG_COND_ALWAYS;
882 value = cpu_ZF;
883 goto no_invert;
885 default:
886 fprintf(stderr, "Bad condition code 0x%x\n", cc);
887 abort();
890 if (cc & 1) {
891 cond = tcg_invert_cond(cond);
894 no_invert:
895 cmp->cond = cond;
896 cmp->value = value;
897 cmp->value_global = global;
900 void arm_free_cc(DisasCompare *cmp)
902 if (!cmp->value_global) {
903 tcg_temp_free_i32(cmp->value);
907 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
909 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
912 void arm_gen_test_cc(int cc, TCGLabel *label)
914 DisasCompare cmp;
915 arm_test_cc(&cmp, cc);
916 arm_jump_cc(&cmp, label);
917 arm_free_cc(&cmp);
920 static const uint8_t table_logic_cc[16] = {
921 1, /* and */
922 1, /* xor */
923 0, /* sub */
924 0, /* rsb */
925 0, /* add */
926 0, /* adc */
927 0, /* sbc */
928 0, /* rsc */
929 1, /* andl */
930 1, /* xorl */
931 0, /* cmp */
932 0, /* cmn */
933 1, /* orr */
934 1, /* mov */
935 1, /* bic */
936 1, /* mvn */
939 static inline void gen_set_condexec(DisasContext *s)
941 if (s->condexec_mask) {
942 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
943 TCGv_i32 tmp = tcg_temp_new_i32();
944 tcg_gen_movi_i32(tmp, val);
945 store_cpu_field(tmp, condexec_bits);
949 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
951 tcg_gen_movi_i32(cpu_R[15], val);
954 /* Set PC and Thumb state from an immediate address. */
955 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
957 TCGv_i32 tmp;
959 s->base.is_jmp = DISAS_JUMP;
960 if (s->thumb != (addr & 1)) {
961 tmp = tcg_temp_new_i32();
962 tcg_gen_movi_i32(tmp, addr & 1);
963 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
964 tcg_temp_free_i32(tmp);
966 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
969 /* Set PC and Thumb state from var. var is marked as dead. */
970 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
972 s->base.is_jmp = DISAS_JUMP;
973 tcg_gen_andi_i32(cpu_R[15], var, ~1);
974 tcg_gen_andi_i32(var, var, 1);
975 store_cpu_field(var, thumb);
978 /* Set PC and Thumb state from var. var is marked as dead.
979 * For M-profile CPUs, include logic to detect exception-return
980 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
981 * and BX reg, and no others, and happens only for code in Handler mode.
983 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
985 /* Generate the same code here as for a simple bx, but flag via
986 * s->base.is_jmp that we need to do the rest of the work later.
988 gen_bx(s, var);
989 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
990 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
991 s->base.is_jmp = DISAS_BX_EXCRET;
995 static inline void gen_bx_excret_final_code(DisasContext *s)
997 /* Generate the code to finish possible exception return and end the TB */
998 TCGLabel *excret_label = gen_new_label();
999 uint32_t min_magic;
1001 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
1002 /* Covers FNC_RETURN and EXC_RETURN magic */
1003 min_magic = FNC_RETURN_MIN_MAGIC;
1004 } else {
1005 /* EXC_RETURN magic only */
1006 min_magic = EXC_RETURN_MIN_MAGIC;
1009 /* Is the new PC value in the magic range indicating exception return? */
1010 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
1011 /* No: end the TB as we would for a DISAS_JMP */
1012 if (is_singlestepping(s)) {
1013 gen_singlestep_exception(s);
1014 } else {
1015 tcg_gen_exit_tb(NULL, 0);
1017 gen_set_label(excret_label);
1018 /* Yes: this is an exception return.
1019 * At this point in runtime env->regs[15] and env->thumb will hold
1020 * the exception-return magic number, which do_v7m_exception_exit()
1021 * will read. Nothing else will be able to see those values because
1022 * the cpu-exec main loop guarantees that we will always go straight
1023 * from raising the exception to the exception-handling code.
1025 * gen_ss_advance(s) does nothing on M profile currently but
1026 * calling it is conceptually the right thing as we have executed
1027 * this instruction (compare SWI, HVC, SMC handling).
1029 gen_ss_advance(s);
1030 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1033 static inline void gen_bxns(DisasContext *s, int rm)
1035 TCGv_i32 var = load_reg(s, rm);
1037 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1038 * we need to sync state before calling it, but:
1039 * - we don't need to do gen_set_pc_im() because the bxns helper will
1040 * always set the PC itself
1041 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1042 * unless it's outside an IT block or the last insn in an IT block,
1043 * so we know that condexec == 0 (already set at the top of the TB)
1044 * is correct in the non-UNPREDICTABLE cases, and we can choose
1045 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1047 gen_helper_v7m_bxns(cpu_env, var);
1048 tcg_temp_free_i32(var);
1049 s->base.is_jmp = DISAS_EXIT;
1052 static inline void gen_blxns(DisasContext *s, int rm)
1054 TCGv_i32 var = load_reg(s, rm);
1056 /* We don't need to sync condexec state, for the same reason as bxns.
1057 * We do however need to set the PC, because the blxns helper reads it.
1058 * The blxns helper may throw an exception.
1060 gen_set_pc_im(s, s->pc);
1061 gen_helper_v7m_blxns(cpu_env, var);
1062 tcg_temp_free_i32(var);
1063 s->base.is_jmp = DISAS_EXIT;
1066 /* Variant of store_reg which uses branch&exchange logic when storing
1067 to r15 in ARM architecture v7 and above. The source must be a temporary
1068 and will be marked as dead. */
1069 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
1071 if (reg == 15 && ENABLE_ARCH_7) {
1072 gen_bx(s, var);
1073 } else {
1074 store_reg(s, reg, var);
1078 /* Variant of store_reg which uses branch&exchange logic when storing
1079 * to r15 in ARM architecture v5T and above. This is used for storing
1080 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1081 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1082 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
1084 if (reg == 15 && ENABLE_ARCH_5) {
1085 gen_bx_excret(s, var);
1086 } else {
1087 store_reg(s, reg, var);
1091 #ifdef CONFIG_USER_ONLY
1092 #define IS_USER_ONLY 1
1093 #else
1094 #define IS_USER_ONLY 0
1095 #endif
1097 /* Abstractions of "generate code to do a guest load/store for
1098 * AArch32", where a vaddr is always 32 bits (and is zero
1099 * extended if we're a 64 bit core) and data is also
1100 * 32 bits unless specifically doing a 64 bit access.
1101 * These functions work like tcg_gen_qemu_{ld,st}* except
1102 * that the address argument is TCGv_i32 rather than TCGv.
1105 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
1107 TCGv addr = tcg_temp_new();
1108 tcg_gen_extu_i32_tl(addr, a32);
1110 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1111 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1112 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
1114 return addr;
1117 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1118 int index, TCGMemOp opc)
1120 TCGv addr;
1122 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1123 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1124 opc |= MO_ALIGN;
1127 addr = gen_aa32_addr(s, a32, opc);
1128 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1129 tcg_temp_free(addr);
1132 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1133 int index, TCGMemOp opc)
1135 TCGv addr;
1137 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1138 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1139 opc |= MO_ALIGN;
1142 addr = gen_aa32_addr(s, a32, opc);
1143 tcg_gen_qemu_st_i32(val, addr, index, opc);
1144 tcg_temp_free(addr);
1147 #define DO_GEN_LD(SUFF, OPC) \
1148 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1149 TCGv_i32 a32, int index) \
1151 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1153 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1154 TCGv_i32 val, \
1155 TCGv_i32 a32, int index, \
1156 ISSInfo issinfo) \
1158 gen_aa32_ld##SUFF(s, val, a32, index); \
1159 disas_set_da_iss(s, OPC, issinfo); \
1162 #define DO_GEN_ST(SUFF, OPC) \
1163 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1164 TCGv_i32 a32, int index) \
1166 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1168 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1169 TCGv_i32 val, \
1170 TCGv_i32 a32, int index, \
1171 ISSInfo issinfo) \
1173 gen_aa32_st##SUFF(s, val, a32, index); \
1174 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1177 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
1179 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1180 if (!IS_USER_ONLY && s->sctlr_b) {
1181 tcg_gen_rotri_i64(val, val, 32);
1185 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1186 int index, TCGMemOp opc)
1188 TCGv addr = gen_aa32_addr(s, a32, opc);
1189 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1190 gen_aa32_frob64(s, val);
1191 tcg_temp_free(addr);
1194 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1195 TCGv_i32 a32, int index)
1197 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1200 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1201 int index, TCGMemOp opc)
1203 TCGv addr = gen_aa32_addr(s, a32, opc);
1205 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1206 if (!IS_USER_ONLY && s->sctlr_b) {
1207 TCGv_i64 tmp = tcg_temp_new_i64();
1208 tcg_gen_rotri_i64(tmp, val, 32);
1209 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1210 tcg_temp_free_i64(tmp);
1211 } else {
1212 tcg_gen_qemu_st_i64(val, addr, index, opc);
1214 tcg_temp_free(addr);
1217 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1218 TCGv_i32 a32, int index)
1220 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1223 DO_GEN_LD(8s, MO_SB)
1224 DO_GEN_LD(8u, MO_UB)
1225 DO_GEN_LD(16s, MO_SW)
1226 DO_GEN_LD(16u, MO_UW)
1227 DO_GEN_LD(32u, MO_UL)
1228 DO_GEN_ST(8, MO_UB)
1229 DO_GEN_ST(16, MO_UW)
1230 DO_GEN_ST(32, MO_UL)
1232 static inline void gen_hvc(DisasContext *s, int imm16)
1234 /* The pre HVC helper handles cases when HVC gets trapped
1235 * as an undefined insn by runtime configuration (ie before
1236 * the insn really executes).
1238 gen_set_pc_im(s, s->pc - 4);
1239 gen_helper_pre_hvc(cpu_env);
1240 /* Otherwise we will treat this as a real exception which
1241 * happens after execution of the insn. (The distinction matters
1242 * for the PC value reported to the exception handler and also
1243 * for single stepping.)
1245 s->svc_imm = imm16;
1246 gen_set_pc_im(s, s->pc);
1247 s->base.is_jmp = DISAS_HVC;
1250 static inline void gen_smc(DisasContext *s)
1252 /* As with HVC, we may take an exception either before or after
1253 * the insn executes.
1255 TCGv_i32 tmp;
1257 gen_set_pc_im(s, s->pc - 4);
1258 tmp = tcg_const_i32(syn_aa32_smc());
1259 gen_helper_pre_smc(cpu_env, tmp);
1260 tcg_temp_free_i32(tmp);
1261 gen_set_pc_im(s, s->pc);
1262 s->base.is_jmp = DISAS_SMC;
1265 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1267 gen_set_condexec(s);
1268 gen_set_pc_im(s, s->pc - offset);
1269 gen_exception_internal(excp);
1270 s->base.is_jmp = DISAS_NORETURN;
1273 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1274 int syn, uint32_t target_el)
1276 gen_set_condexec(s);
1277 gen_set_pc_im(s, s->pc - offset);
1278 gen_exception(excp, syn, target_el);
1279 s->base.is_jmp = DISAS_NORETURN;
1282 static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1284 TCGv_i32 tcg_syn;
1286 gen_set_condexec(s);
1287 gen_set_pc_im(s, s->pc - offset);
1288 tcg_syn = tcg_const_i32(syn);
1289 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1290 tcg_temp_free_i32(tcg_syn);
1291 s->base.is_jmp = DISAS_NORETURN;
1294 /* Force a TB lookup after an instruction that changes the CPU state. */
1295 static inline void gen_lookup_tb(DisasContext *s)
1297 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1298 s->base.is_jmp = DISAS_EXIT;
1301 static inline void gen_hlt(DisasContext *s, int imm)
1303 /* HLT. This has two purposes.
1304 * Architecturally, it is an external halting debug instruction.
1305 * Since QEMU doesn't implement external debug, we treat this as
1306 * it is required for halting debug disabled: it will UNDEF.
1307 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1308 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1309 * must trigger semihosting even for ARMv7 and earlier, where
1310 * HLT was an undefined encoding.
1311 * In system mode, we don't allow userspace access to
1312 * semihosting, to provide some semblance of security
1313 * (and for consistency with our 32-bit semihosting).
1315 if (semihosting_enabled() &&
1316 #ifndef CONFIG_USER_ONLY
1317 s->current_el != 0 &&
1318 #endif
1319 (imm == (s->thumb ? 0x3c : 0xf000))) {
1320 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1321 return;
1324 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1325 default_exception_el(s));
1328 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1329 TCGv_i32 var)
1331 int val, rm, shift, shiftop;
1332 TCGv_i32 offset;
1334 if (!(insn & (1 << 25))) {
1335 /* immediate */
1336 val = insn & 0xfff;
1337 if (!(insn & (1 << 23)))
1338 val = -val;
1339 if (val != 0)
1340 tcg_gen_addi_i32(var, var, val);
1341 } else {
1342 /* shift/register */
1343 rm = (insn) & 0xf;
1344 shift = (insn >> 7) & 0x1f;
1345 shiftop = (insn >> 5) & 3;
1346 offset = load_reg(s, rm);
1347 gen_arm_shift_im(offset, shiftop, shift, 0);
1348 if (!(insn & (1 << 23)))
1349 tcg_gen_sub_i32(var, var, offset);
1350 else
1351 tcg_gen_add_i32(var, var, offset);
1352 tcg_temp_free_i32(offset);
1356 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1357 int extra, TCGv_i32 var)
1359 int val, rm;
1360 TCGv_i32 offset;
1362 if (insn & (1 << 22)) {
1363 /* immediate */
1364 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1365 if (!(insn & (1 << 23)))
1366 val = -val;
1367 val += extra;
1368 if (val != 0)
1369 tcg_gen_addi_i32(var, var, val);
1370 } else {
1371 /* register */
1372 if (extra)
1373 tcg_gen_addi_i32(var, var, extra);
1374 rm = (insn) & 0xf;
1375 offset = load_reg(s, rm);
1376 if (!(insn & (1 << 23)))
1377 tcg_gen_sub_i32(var, var, offset);
1378 else
1379 tcg_gen_add_i32(var, var, offset);
1380 tcg_temp_free_i32(offset);
1384 static TCGv_ptr get_fpstatus_ptr(int neon)
1386 TCGv_ptr statusptr = tcg_temp_new_ptr();
1387 int offset;
1388 if (neon) {
1389 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1390 } else {
1391 offset = offsetof(CPUARMState, vfp.fp_status);
1393 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1394 return statusptr;
1397 #define VFP_OP2(name) \
1398 static inline void gen_vfp_##name(int dp) \
1400 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1401 if (dp) { \
1402 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1403 } else { \
1404 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1406 tcg_temp_free_ptr(fpst); \
1409 VFP_OP2(add)
1410 VFP_OP2(sub)
1411 VFP_OP2(mul)
1412 VFP_OP2(div)
1414 #undef VFP_OP2
1416 static inline void gen_vfp_F1_mul(int dp)
1418 /* Like gen_vfp_mul() but put result in F1 */
1419 TCGv_ptr fpst = get_fpstatus_ptr(0);
1420 if (dp) {
1421 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1422 } else {
1423 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1425 tcg_temp_free_ptr(fpst);
1428 static inline void gen_vfp_F1_neg(int dp)
1430 /* Like gen_vfp_neg() but put result in F1 */
1431 if (dp) {
1432 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1433 } else {
1434 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1438 static inline void gen_vfp_abs(int dp)
1440 if (dp)
1441 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1442 else
1443 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1446 static inline void gen_vfp_neg(int dp)
1448 if (dp)
1449 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1450 else
1451 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1454 static inline void gen_vfp_sqrt(int dp)
1456 if (dp)
1457 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1458 else
1459 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1462 static inline void gen_vfp_cmp(int dp)
1464 if (dp)
1465 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1466 else
1467 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1470 static inline void gen_vfp_cmpe(int dp)
1472 if (dp)
1473 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1474 else
1475 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1478 static inline void gen_vfp_F1_ld0(int dp)
1480 if (dp)
1481 tcg_gen_movi_i64(cpu_F1d, 0);
1482 else
1483 tcg_gen_movi_i32(cpu_F1s, 0);
1486 #define VFP_GEN_ITOF(name) \
1487 static inline void gen_vfp_##name(int dp, int neon) \
1489 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1490 if (dp) { \
1491 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1492 } else { \
1493 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1495 tcg_temp_free_ptr(statusptr); \
1498 VFP_GEN_ITOF(uito)
1499 VFP_GEN_ITOF(sito)
1500 #undef VFP_GEN_ITOF
1502 #define VFP_GEN_FTOI(name) \
1503 static inline void gen_vfp_##name(int dp, int neon) \
1505 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1506 if (dp) { \
1507 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1508 } else { \
1509 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1511 tcg_temp_free_ptr(statusptr); \
1514 VFP_GEN_FTOI(toui)
1515 VFP_GEN_FTOI(touiz)
1516 VFP_GEN_FTOI(tosi)
1517 VFP_GEN_FTOI(tosiz)
1518 #undef VFP_GEN_FTOI
1520 #define VFP_GEN_FIX(name, round) \
1521 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1523 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1524 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1525 if (dp) { \
1526 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1527 statusptr); \
1528 } else { \
1529 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1530 statusptr); \
1532 tcg_temp_free_i32(tmp_shift); \
1533 tcg_temp_free_ptr(statusptr); \
1535 VFP_GEN_FIX(tosh, _round_to_zero)
1536 VFP_GEN_FIX(tosl, _round_to_zero)
1537 VFP_GEN_FIX(touh, _round_to_zero)
1538 VFP_GEN_FIX(toul, _round_to_zero)
1539 VFP_GEN_FIX(shto, )
1540 VFP_GEN_FIX(slto, )
1541 VFP_GEN_FIX(uhto, )
1542 VFP_GEN_FIX(ulto, )
1543 #undef VFP_GEN_FIX
1545 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1547 if (dp) {
1548 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
1549 } else {
1550 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
1554 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1556 if (dp) {
1557 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
1558 } else {
1559 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
1563 static inline long vfp_reg_offset(bool dp, unsigned reg)
1565 if (dp) {
1566 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1567 } else {
1568 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
1569 if (reg & 1) {
1570 ofs += offsetof(CPU_DoubleU, l.upper);
1571 } else {
1572 ofs += offsetof(CPU_DoubleU, l.lower);
1574 return ofs;
1578 /* Return the offset of a 32-bit piece of a NEON register.
1579 zero is the least significant end of the register. */
1580 static inline long
1581 neon_reg_offset (int reg, int n)
1583 int sreg;
1584 sreg = reg * 2 + n;
1585 return vfp_reg_offset(0, sreg);
1588 /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1589 * where 0 is the least significant end of the register.
1591 static inline long
1592 neon_element_offset(int reg, int element, TCGMemOp size)
1594 int element_size = 1 << size;
1595 int ofs = element * element_size;
1596 #ifdef HOST_WORDS_BIGENDIAN
1597 /* Calculate the offset assuming fully little-endian,
1598 * then XOR to account for the order of the 8-byte units.
1600 if (element_size < 8) {
1601 ofs ^= 8 - element_size;
1603 #endif
1604 return neon_reg_offset(reg, 0) + ofs;
1607 static TCGv_i32 neon_load_reg(int reg, int pass)
1609 TCGv_i32 tmp = tcg_temp_new_i32();
1610 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1611 return tmp;
1614 static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1616 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1618 switch (mop) {
1619 case MO_UB:
1620 tcg_gen_ld8u_i32(var, cpu_env, offset);
1621 break;
1622 case MO_UW:
1623 tcg_gen_ld16u_i32(var, cpu_env, offset);
1624 break;
1625 case MO_UL:
1626 tcg_gen_ld_i32(var, cpu_env, offset);
1627 break;
1628 default:
1629 g_assert_not_reached();
1633 static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1635 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1637 switch (mop) {
1638 case MO_UB:
1639 tcg_gen_ld8u_i64(var, cpu_env, offset);
1640 break;
1641 case MO_UW:
1642 tcg_gen_ld16u_i64(var, cpu_env, offset);
1643 break;
1644 case MO_UL:
1645 tcg_gen_ld32u_i64(var, cpu_env, offset);
1646 break;
1647 case MO_Q:
1648 tcg_gen_ld_i64(var, cpu_env, offset);
1649 break;
1650 default:
1651 g_assert_not_reached();
1655 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1657 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1658 tcg_temp_free_i32(var);
1661 static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1663 long offset = neon_element_offset(reg, ele, size);
1665 switch (size) {
1666 case MO_8:
1667 tcg_gen_st8_i32(var, cpu_env, offset);
1668 break;
1669 case MO_16:
1670 tcg_gen_st16_i32(var, cpu_env, offset);
1671 break;
1672 case MO_32:
1673 tcg_gen_st_i32(var, cpu_env, offset);
1674 break;
1675 default:
1676 g_assert_not_reached();
1680 static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1682 long offset = neon_element_offset(reg, ele, size);
1684 switch (size) {
1685 case MO_8:
1686 tcg_gen_st8_i64(var, cpu_env, offset);
1687 break;
1688 case MO_16:
1689 tcg_gen_st16_i64(var, cpu_env, offset);
1690 break;
1691 case MO_32:
1692 tcg_gen_st32_i64(var, cpu_env, offset);
1693 break;
1694 case MO_64:
1695 tcg_gen_st_i64(var, cpu_env, offset);
1696 break;
1697 default:
1698 g_assert_not_reached();
1702 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1704 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1707 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1709 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1712 static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1714 TCGv_ptr ret = tcg_temp_new_ptr();
1715 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1716 return ret;
1719 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1720 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1721 #define tcg_gen_st_f32 tcg_gen_st_i32
1722 #define tcg_gen_st_f64 tcg_gen_st_i64
1724 static inline void gen_mov_F0_vreg(int dp, int reg)
1726 if (dp)
1727 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1728 else
1729 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1732 static inline void gen_mov_F1_vreg(int dp, int reg)
1734 if (dp)
1735 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1736 else
1737 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1740 static inline void gen_mov_vreg_F0(int dp, int reg)
1742 if (dp)
1743 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1744 else
1745 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1748 #define ARM_CP_RW_BIT (1 << 20)
1750 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1752 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1755 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1757 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1760 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1762 TCGv_i32 var = tcg_temp_new_i32();
1763 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1764 return var;
1767 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1769 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1770 tcg_temp_free_i32(var);
1773 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1775 iwmmxt_store_reg(cpu_M0, rn);
1778 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1780 iwmmxt_load_reg(cpu_M0, rn);
1783 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1785 iwmmxt_load_reg(cpu_V1, rn);
1786 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1789 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1791 iwmmxt_load_reg(cpu_V1, rn);
1792 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1795 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1797 iwmmxt_load_reg(cpu_V1, rn);
1798 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1801 #define IWMMXT_OP(name) \
1802 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1804 iwmmxt_load_reg(cpu_V1, rn); \
1805 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1808 #define IWMMXT_OP_ENV(name) \
1809 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1811 iwmmxt_load_reg(cpu_V1, rn); \
1812 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1815 #define IWMMXT_OP_ENV_SIZE(name) \
1816 IWMMXT_OP_ENV(name##b) \
1817 IWMMXT_OP_ENV(name##w) \
1818 IWMMXT_OP_ENV(name##l)
1820 #define IWMMXT_OP_ENV1(name) \
1821 static inline void gen_op_iwmmxt_##name##_M0(void) \
1823 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1826 IWMMXT_OP(maddsq)
1827 IWMMXT_OP(madduq)
1828 IWMMXT_OP(sadb)
1829 IWMMXT_OP(sadw)
1830 IWMMXT_OP(mulslw)
1831 IWMMXT_OP(mulshw)
1832 IWMMXT_OP(mululw)
1833 IWMMXT_OP(muluhw)
1834 IWMMXT_OP(macsw)
1835 IWMMXT_OP(macuw)
1837 IWMMXT_OP_ENV_SIZE(unpackl)
1838 IWMMXT_OP_ENV_SIZE(unpackh)
1840 IWMMXT_OP_ENV1(unpacklub)
1841 IWMMXT_OP_ENV1(unpackluw)
1842 IWMMXT_OP_ENV1(unpacklul)
1843 IWMMXT_OP_ENV1(unpackhub)
1844 IWMMXT_OP_ENV1(unpackhuw)
1845 IWMMXT_OP_ENV1(unpackhul)
1846 IWMMXT_OP_ENV1(unpacklsb)
1847 IWMMXT_OP_ENV1(unpacklsw)
1848 IWMMXT_OP_ENV1(unpacklsl)
1849 IWMMXT_OP_ENV1(unpackhsb)
1850 IWMMXT_OP_ENV1(unpackhsw)
1851 IWMMXT_OP_ENV1(unpackhsl)
1853 IWMMXT_OP_ENV_SIZE(cmpeq)
1854 IWMMXT_OP_ENV_SIZE(cmpgtu)
1855 IWMMXT_OP_ENV_SIZE(cmpgts)
1857 IWMMXT_OP_ENV_SIZE(mins)
1858 IWMMXT_OP_ENV_SIZE(minu)
1859 IWMMXT_OP_ENV_SIZE(maxs)
1860 IWMMXT_OP_ENV_SIZE(maxu)
1862 IWMMXT_OP_ENV_SIZE(subn)
1863 IWMMXT_OP_ENV_SIZE(addn)
1864 IWMMXT_OP_ENV_SIZE(subu)
1865 IWMMXT_OP_ENV_SIZE(addu)
1866 IWMMXT_OP_ENV_SIZE(subs)
1867 IWMMXT_OP_ENV_SIZE(adds)
1869 IWMMXT_OP_ENV(avgb0)
1870 IWMMXT_OP_ENV(avgb1)
1871 IWMMXT_OP_ENV(avgw0)
1872 IWMMXT_OP_ENV(avgw1)
1874 IWMMXT_OP_ENV(packuw)
1875 IWMMXT_OP_ENV(packul)
1876 IWMMXT_OP_ENV(packuq)
1877 IWMMXT_OP_ENV(packsw)
1878 IWMMXT_OP_ENV(packsl)
1879 IWMMXT_OP_ENV(packsq)
1881 static void gen_op_iwmmxt_set_mup(void)
1883 TCGv_i32 tmp;
1884 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1885 tcg_gen_ori_i32(tmp, tmp, 2);
1886 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1889 static void gen_op_iwmmxt_set_cup(void)
1891 TCGv_i32 tmp;
1892 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1893 tcg_gen_ori_i32(tmp, tmp, 1);
1894 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1897 static void gen_op_iwmmxt_setpsr_nz(void)
1899 TCGv_i32 tmp = tcg_temp_new_i32();
1900 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1901 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1904 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1906 iwmmxt_load_reg(cpu_V1, rn);
1907 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1908 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1911 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1912 TCGv_i32 dest)
1914 int rd;
1915 uint32_t offset;
1916 TCGv_i32 tmp;
1918 rd = (insn >> 16) & 0xf;
1919 tmp = load_reg(s, rd);
1921 offset = (insn & 0xff) << ((insn >> 7) & 2);
1922 if (insn & (1 << 24)) {
1923 /* Pre indexed */
1924 if (insn & (1 << 23))
1925 tcg_gen_addi_i32(tmp, tmp, offset);
1926 else
1927 tcg_gen_addi_i32(tmp, tmp, -offset);
1928 tcg_gen_mov_i32(dest, tmp);
1929 if (insn & (1 << 21))
1930 store_reg(s, rd, tmp);
1931 else
1932 tcg_temp_free_i32(tmp);
1933 } else if (insn & (1 << 21)) {
1934 /* Post indexed */
1935 tcg_gen_mov_i32(dest, tmp);
1936 if (insn & (1 << 23))
1937 tcg_gen_addi_i32(tmp, tmp, offset);
1938 else
1939 tcg_gen_addi_i32(tmp, tmp, -offset);
1940 store_reg(s, rd, tmp);
1941 } else if (!(insn & (1 << 23)))
1942 return 1;
1943 return 0;
1946 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1948 int rd = (insn >> 0) & 0xf;
1949 TCGv_i32 tmp;
1951 if (insn & (1 << 8)) {
1952 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1953 return 1;
1954 } else {
1955 tmp = iwmmxt_load_creg(rd);
1957 } else {
1958 tmp = tcg_temp_new_i32();
1959 iwmmxt_load_reg(cpu_V0, rd);
1960 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1962 tcg_gen_andi_i32(tmp, tmp, mask);
1963 tcg_gen_mov_i32(dest, tmp);
1964 tcg_temp_free_i32(tmp);
1965 return 0;
1968 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1969 (ie. an undefined instruction). */
1970 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1972 int rd, wrd;
1973 int rdhi, rdlo, rd0, rd1, i;
1974 TCGv_i32 addr;
1975 TCGv_i32 tmp, tmp2, tmp3;
1977 if ((insn & 0x0e000e00) == 0x0c000000) {
1978 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1979 wrd = insn & 0xf;
1980 rdlo = (insn >> 12) & 0xf;
1981 rdhi = (insn >> 16) & 0xf;
1982 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1983 iwmmxt_load_reg(cpu_V0, wrd);
1984 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1985 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1986 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1987 } else { /* TMCRR */
1988 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1989 iwmmxt_store_reg(cpu_V0, wrd);
1990 gen_op_iwmmxt_set_mup();
1992 return 0;
1995 wrd = (insn >> 12) & 0xf;
1996 addr = tcg_temp_new_i32();
1997 if (gen_iwmmxt_address(s, insn, addr)) {
1998 tcg_temp_free_i32(addr);
1999 return 1;
2001 if (insn & ARM_CP_RW_BIT) {
2002 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
2003 tmp = tcg_temp_new_i32();
2004 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2005 iwmmxt_store_creg(wrd, tmp);
2006 } else {
2007 i = 1;
2008 if (insn & (1 << 8)) {
2009 if (insn & (1 << 22)) { /* WLDRD */
2010 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
2011 i = 0;
2012 } else { /* WLDRW wRd */
2013 tmp = tcg_temp_new_i32();
2014 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2016 } else {
2017 tmp = tcg_temp_new_i32();
2018 if (insn & (1 << 22)) { /* WLDRH */
2019 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2020 } else { /* WLDRB */
2021 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2024 if (i) {
2025 tcg_gen_extu_i32_i64(cpu_M0, tmp);
2026 tcg_temp_free_i32(tmp);
2028 gen_op_iwmmxt_movq_wRn_M0(wrd);
2030 } else {
2031 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
2032 tmp = iwmmxt_load_creg(wrd);
2033 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
2034 } else {
2035 gen_op_iwmmxt_movq_M0_wRn(wrd);
2036 tmp = tcg_temp_new_i32();
2037 if (insn & (1 << 8)) {
2038 if (insn & (1 << 22)) { /* WSTRD */
2039 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
2040 } else { /* WSTRW wRd */
2041 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2042 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
2044 } else {
2045 if (insn & (1 << 22)) { /* WSTRH */
2046 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2047 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
2048 } else { /* WSTRB */
2049 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2050 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
2054 tcg_temp_free_i32(tmp);
2056 tcg_temp_free_i32(addr);
2057 return 0;
2060 if ((insn & 0x0f000000) != 0x0e000000)
2061 return 1;
2063 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
2064 case 0x000: /* WOR */
2065 wrd = (insn >> 12) & 0xf;
2066 rd0 = (insn >> 0) & 0xf;
2067 rd1 = (insn >> 16) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0);
2069 gen_op_iwmmxt_orq_M0_wRn(rd1);
2070 gen_op_iwmmxt_setpsr_nz();
2071 gen_op_iwmmxt_movq_wRn_M0(wrd);
2072 gen_op_iwmmxt_set_mup();
2073 gen_op_iwmmxt_set_cup();
2074 break;
2075 case 0x011: /* TMCR */
2076 if (insn & 0xf)
2077 return 1;
2078 rd = (insn >> 12) & 0xf;
2079 wrd = (insn >> 16) & 0xf;
2080 switch (wrd) {
2081 case ARM_IWMMXT_wCID:
2082 case ARM_IWMMXT_wCASF:
2083 break;
2084 case ARM_IWMMXT_wCon:
2085 gen_op_iwmmxt_set_cup();
2086 /* Fall through. */
2087 case ARM_IWMMXT_wCSSF:
2088 tmp = iwmmxt_load_creg(wrd);
2089 tmp2 = load_reg(s, rd);
2090 tcg_gen_andc_i32(tmp, tmp, tmp2);
2091 tcg_temp_free_i32(tmp2);
2092 iwmmxt_store_creg(wrd, tmp);
2093 break;
2094 case ARM_IWMMXT_wCGR0:
2095 case ARM_IWMMXT_wCGR1:
2096 case ARM_IWMMXT_wCGR2:
2097 case ARM_IWMMXT_wCGR3:
2098 gen_op_iwmmxt_set_cup();
2099 tmp = load_reg(s, rd);
2100 iwmmxt_store_creg(wrd, tmp);
2101 break;
2102 default:
2103 return 1;
2105 break;
2106 case 0x100: /* WXOR */
2107 wrd = (insn >> 12) & 0xf;
2108 rd0 = (insn >> 0) & 0xf;
2109 rd1 = (insn >> 16) & 0xf;
2110 gen_op_iwmmxt_movq_M0_wRn(rd0);
2111 gen_op_iwmmxt_xorq_M0_wRn(rd1);
2112 gen_op_iwmmxt_setpsr_nz();
2113 gen_op_iwmmxt_movq_wRn_M0(wrd);
2114 gen_op_iwmmxt_set_mup();
2115 gen_op_iwmmxt_set_cup();
2116 break;
2117 case 0x111: /* TMRC */
2118 if (insn & 0xf)
2119 return 1;
2120 rd = (insn >> 12) & 0xf;
2121 wrd = (insn >> 16) & 0xf;
2122 tmp = iwmmxt_load_creg(wrd);
2123 store_reg(s, rd, tmp);
2124 break;
2125 case 0x300: /* WANDN */
2126 wrd = (insn >> 12) & 0xf;
2127 rd0 = (insn >> 0) & 0xf;
2128 rd1 = (insn >> 16) & 0xf;
2129 gen_op_iwmmxt_movq_M0_wRn(rd0);
2130 tcg_gen_neg_i64(cpu_M0, cpu_M0);
2131 gen_op_iwmmxt_andq_M0_wRn(rd1);
2132 gen_op_iwmmxt_setpsr_nz();
2133 gen_op_iwmmxt_movq_wRn_M0(wrd);
2134 gen_op_iwmmxt_set_mup();
2135 gen_op_iwmmxt_set_cup();
2136 break;
2137 case 0x200: /* WAND */
2138 wrd = (insn >> 12) & 0xf;
2139 rd0 = (insn >> 0) & 0xf;
2140 rd1 = (insn >> 16) & 0xf;
2141 gen_op_iwmmxt_movq_M0_wRn(rd0);
2142 gen_op_iwmmxt_andq_M0_wRn(rd1);
2143 gen_op_iwmmxt_setpsr_nz();
2144 gen_op_iwmmxt_movq_wRn_M0(wrd);
2145 gen_op_iwmmxt_set_mup();
2146 gen_op_iwmmxt_set_cup();
2147 break;
2148 case 0x810: case 0xa10: /* WMADD */
2149 wrd = (insn >> 12) & 0xf;
2150 rd0 = (insn >> 0) & 0xf;
2151 rd1 = (insn >> 16) & 0xf;
2152 gen_op_iwmmxt_movq_M0_wRn(rd0);
2153 if (insn & (1 << 21))
2154 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2155 else
2156 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2157 gen_op_iwmmxt_movq_wRn_M0(wrd);
2158 gen_op_iwmmxt_set_mup();
2159 break;
2160 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2161 wrd = (insn >> 12) & 0xf;
2162 rd0 = (insn >> 16) & 0xf;
2163 rd1 = (insn >> 0) & 0xf;
2164 gen_op_iwmmxt_movq_M0_wRn(rd0);
2165 switch ((insn >> 22) & 3) {
2166 case 0:
2167 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2168 break;
2169 case 1:
2170 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2171 break;
2172 case 2:
2173 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2174 break;
2175 case 3:
2176 return 1;
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 gen_op_iwmmxt_set_cup();
2181 break;
2182 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 rd1 = (insn >> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0);
2187 switch ((insn >> 22) & 3) {
2188 case 0:
2189 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2190 break;
2191 case 1:
2192 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2193 break;
2194 case 2:
2195 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2196 break;
2197 case 3:
2198 return 1;
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
2204 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2205 wrd = (insn >> 12) & 0xf;
2206 rd0 = (insn >> 16) & 0xf;
2207 rd1 = (insn >> 0) & 0xf;
2208 gen_op_iwmmxt_movq_M0_wRn(rd0);
2209 if (insn & (1 << 22))
2210 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2211 else
2212 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2213 if (!(insn & (1 << 20)))
2214 gen_op_iwmmxt_addl_M0_wRn(wrd);
2215 gen_op_iwmmxt_movq_wRn_M0(wrd);
2216 gen_op_iwmmxt_set_mup();
2217 break;
2218 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2219 wrd = (insn >> 12) & 0xf;
2220 rd0 = (insn >> 16) & 0xf;
2221 rd1 = (insn >> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0);
2223 if (insn & (1 << 21)) {
2224 if (insn & (1 << 20))
2225 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2226 else
2227 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2228 } else {
2229 if (insn & (1 << 20))
2230 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2231 else
2232 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 break;
2237 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2238 wrd = (insn >> 12) & 0xf;
2239 rd0 = (insn >> 16) & 0xf;
2240 rd1 = (insn >> 0) & 0xf;
2241 gen_op_iwmmxt_movq_M0_wRn(rd0);
2242 if (insn & (1 << 21))
2243 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2244 else
2245 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2246 if (!(insn & (1 << 20))) {
2247 iwmmxt_load_reg(cpu_V1, wrd);
2248 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
2250 gen_op_iwmmxt_movq_wRn_M0(wrd);
2251 gen_op_iwmmxt_set_mup();
2252 break;
2253 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2254 wrd = (insn >> 12) & 0xf;
2255 rd0 = (insn >> 16) & 0xf;
2256 rd1 = (insn >> 0) & 0xf;
2257 gen_op_iwmmxt_movq_M0_wRn(rd0);
2258 switch ((insn >> 22) & 3) {
2259 case 0:
2260 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2261 break;
2262 case 1:
2263 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2264 break;
2265 case 2:
2266 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2267 break;
2268 case 3:
2269 return 1;
2271 gen_op_iwmmxt_movq_wRn_M0(wrd);
2272 gen_op_iwmmxt_set_mup();
2273 gen_op_iwmmxt_set_cup();
2274 break;
2275 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2276 wrd = (insn >> 12) & 0xf;
2277 rd0 = (insn >> 16) & 0xf;
2278 rd1 = (insn >> 0) & 0xf;
2279 gen_op_iwmmxt_movq_M0_wRn(rd0);
2280 if (insn & (1 << 22)) {
2281 if (insn & (1 << 20))
2282 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2283 else
2284 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2285 } else {
2286 if (insn & (1 << 20))
2287 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2288 else
2289 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2296 wrd = (insn >> 12) & 0xf;
2297 rd0 = (insn >> 16) & 0xf;
2298 rd1 = (insn >> 0) & 0xf;
2299 gen_op_iwmmxt_movq_M0_wRn(rd0);
2300 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2301 tcg_gen_andi_i32(tmp, tmp, 7);
2302 iwmmxt_load_reg(cpu_V1, rd1);
2303 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2304 tcg_temp_free_i32(tmp);
2305 gen_op_iwmmxt_movq_wRn_M0(wrd);
2306 gen_op_iwmmxt_set_mup();
2307 break;
2308 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2309 if (((insn >> 6) & 3) == 3)
2310 return 1;
2311 rd = (insn >> 12) & 0xf;
2312 wrd = (insn >> 16) & 0xf;
2313 tmp = load_reg(s, rd);
2314 gen_op_iwmmxt_movq_M0_wRn(wrd);
2315 switch ((insn >> 6) & 3) {
2316 case 0:
2317 tmp2 = tcg_const_i32(0xff);
2318 tmp3 = tcg_const_i32((insn & 7) << 3);
2319 break;
2320 case 1:
2321 tmp2 = tcg_const_i32(0xffff);
2322 tmp3 = tcg_const_i32((insn & 3) << 4);
2323 break;
2324 case 2:
2325 tmp2 = tcg_const_i32(0xffffffff);
2326 tmp3 = tcg_const_i32((insn & 1) << 5);
2327 break;
2328 default:
2329 tmp2 = NULL;
2330 tmp3 = NULL;
2332 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2333 tcg_temp_free_i32(tmp3);
2334 tcg_temp_free_i32(tmp2);
2335 tcg_temp_free_i32(tmp);
2336 gen_op_iwmmxt_movq_wRn_M0(wrd);
2337 gen_op_iwmmxt_set_mup();
2338 break;
2339 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2340 rd = (insn >> 12) & 0xf;
2341 wrd = (insn >> 16) & 0xf;
2342 if (rd == 15 || ((insn >> 22) & 3) == 3)
2343 return 1;
2344 gen_op_iwmmxt_movq_M0_wRn(wrd);
2345 tmp = tcg_temp_new_i32();
2346 switch ((insn >> 22) & 3) {
2347 case 0:
2348 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2349 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2350 if (insn & 8) {
2351 tcg_gen_ext8s_i32(tmp, tmp);
2352 } else {
2353 tcg_gen_andi_i32(tmp, tmp, 0xff);
2355 break;
2356 case 1:
2357 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2358 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2359 if (insn & 8) {
2360 tcg_gen_ext16s_i32(tmp, tmp);
2361 } else {
2362 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2364 break;
2365 case 2:
2366 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2367 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2368 break;
2370 store_reg(s, rd, tmp);
2371 break;
2372 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2373 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2374 return 1;
2375 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2376 switch ((insn >> 22) & 3) {
2377 case 0:
2378 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2379 break;
2380 case 1:
2381 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2382 break;
2383 case 2:
2384 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2385 break;
2387 tcg_gen_shli_i32(tmp, tmp, 28);
2388 gen_set_nzcv(tmp);
2389 tcg_temp_free_i32(tmp);
2390 break;
2391 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2392 if (((insn >> 6) & 3) == 3)
2393 return 1;
2394 rd = (insn >> 12) & 0xf;
2395 wrd = (insn >> 16) & 0xf;
2396 tmp = load_reg(s, rd);
2397 switch ((insn >> 6) & 3) {
2398 case 0:
2399 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2400 break;
2401 case 1:
2402 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2403 break;
2404 case 2:
2405 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2406 break;
2408 tcg_temp_free_i32(tmp);
2409 gen_op_iwmmxt_movq_wRn_M0(wrd);
2410 gen_op_iwmmxt_set_mup();
2411 break;
2412 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2413 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2414 return 1;
2415 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2416 tmp2 = tcg_temp_new_i32();
2417 tcg_gen_mov_i32(tmp2, tmp);
2418 switch ((insn >> 22) & 3) {
2419 case 0:
2420 for (i = 0; i < 7; i ++) {
2421 tcg_gen_shli_i32(tmp2, tmp2, 4);
2422 tcg_gen_and_i32(tmp, tmp, tmp2);
2424 break;
2425 case 1:
2426 for (i = 0; i < 3; i ++) {
2427 tcg_gen_shli_i32(tmp2, tmp2, 8);
2428 tcg_gen_and_i32(tmp, tmp, tmp2);
2430 break;
2431 case 2:
2432 tcg_gen_shli_i32(tmp2, tmp2, 16);
2433 tcg_gen_and_i32(tmp, tmp, tmp2);
2434 break;
2436 gen_set_nzcv(tmp);
2437 tcg_temp_free_i32(tmp2);
2438 tcg_temp_free_i32(tmp);
2439 break;
2440 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2441 wrd = (insn >> 12) & 0xf;
2442 rd0 = (insn >> 16) & 0xf;
2443 gen_op_iwmmxt_movq_M0_wRn(rd0);
2444 switch ((insn >> 22) & 3) {
2445 case 0:
2446 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2447 break;
2448 case 1:
2449 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2450 break;
2451 case 2:
2452 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2453 break;
2454 case 3:
2455 return 1;
2457 gen_op_iwmmxt_movq_wRn_M0(wrd);
2458 gen_op_iwmmxt_set_mup();
2459 break;
2460 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2461 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2462 return 1;
2463 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2464 tmp2 = tcg_temp_new_i32();
2465 tcg_gen_mov_i32(tmp2, tmp);
2466 switch ((insn >> 22) & 3) {
2467 case 0:
2468 for (i = 0; i < 7; i ++) {
2469 tcg_gen_shli_i32(tmp2, tmp2, 4);
2470 tcg_gen_or_i32(tmp, tmp, tmp2);
2472 break;
2473 case 1:
2474 for (i = 0; i < 3; i ++) {
2475 tcg_gen_shli_i32(tmp2, tmp2, 8);
2476 tcg_gen_or_i32(tmp, tmp, tmp2);
2478 break;
2479 case 2:
2480 tcg_gen_shli_i32(tmp2, tmp2, 16);
2481 tcg_gen_or_i32(tmp, tmp, tmp2);
2482 break;
2484 gen_set_nzcv(tmp);
2485 tcg_temp_free_i32(tmp2);
2486 tcg_temp_free_i32(tmp);
2487 break;
2488 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2489 rd = (insn >> 12) & 0xf;
2490 rd0 = (insn >> 16) & 0xf;
2491 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2492 return 1;
2493 gen_op_iwmmxt_movq_M0_wRn(rd0);
2494 tmp = tcg_temp_new_i32();
2495 switch ((insn >> 22) & 3) {
2496 case 0:
2497 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2498 break;
2499 case 1:
2500 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2501 break;
2502 case 2:
2503 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2504 break;
2506 store_reg(s, rd, tmp);
2507 break;
2508 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2509 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2510 wrd = (insn >> 12) & 0xf;
2511 rd0 = (insn >> 16) & 0xf;
2512 rd1 = (insn >> 0) & 0xf;
2513 gen_op_iwmmxt_movq_M0_wRn(rd0);
2514 switch ((insn >> 22) & 3) {
2515 case 0:
2516 if (insn & (1 << 21))
2517 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2518 else
2519 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2520 break;
2521 case 1:
2522 if (insn & (1 << 21))
2523 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2524 else
2525 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2526 break;
2527 case 2:
2528 if (insn & (1 << 21))
2529 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2530 else
2531 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2532 break;
2533 case 3:
2534 return 1;
2536 gen_op_iwmmxt_movq_wRn_M0(wrd);
2537 gen_op_iwmmxt_set_mup();
2538 gen_op_iwmmxt_set_cup();
2539 break;
2540 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2541 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2542 wrd = (insn >> 12) & 0xf;
2543 rd0 = (insn >> 16) & 0xf;
2544 gen_op_iwmmxt_movq_M0_wRn(rd0);
2545 switch ((insn >> 22) & 3) {
2546 case 0:
2547 if (insn & (1 << 21))
2548 gen_op_iwmmxt_unpacklsb_M0();
2549 else
2550 gen_op_iwmmxt_unpacklub_M0();
2551 break;
2552 case 1:
2553 if (insn & (1 << 21))
2554 gen_op_iwmmxt_unpacklsw_M0();
2555 else
2556 gen_op_iwmmxt_unpackluw_M0();
2557 break;
2558 case 2:
2559 if (insn & (1 << 21))
2560 gen_op_iwmmxt_unpacklsl_M0();
2561 else
2562 gen_op_iwmmxt_unpacklul_M0();
2563 break;
2564 case 3:
2565 return 1;
2567 gen_op_iwmmxt_movq_wRn_M0(wrd);
2568 gen_op_iwmmxt_set_mup();
2569 gen_op_iwmmxt_set_cup();
2570 break;
2571 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2572 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2573 wrd = (insn >> 12) & 0xf;
2574 rd0 = (insn >> 16) & 0xf;
2575 gen_op_iwmmxt_movq_M0_wRn(rd0);
2576 switch ((insn >> 22) & 3) {
2577 case 0:
2578 if (insn & (1 << 21))
2579 gen_op_iwmmxt_unpackhsb_M0();
2580 else
2581 gen_op_iwmmxt_unpackhub_M0();
2582 break;
2583 case 1:
2584 if (insn & (1 << 21))
2585 gen_op_iwmmxt_unpackhsw_M0();
2586 else
2587 gen_op_iwmmxt_unpackhuw_M0();
2588 break;
2589 case 2:
2590 if (insn & (1 << 21))
2591 gen_op_iwmmxt_unpackhsl_M0();
2592 else
2593 gen_op_iwmmxt_unpackhul_M0();
2594 break;
2595 case 3:
2596 return 1;
2598 gen_op_iwmmxt_movq_wRn_M0(wrd);
2599 gen_op_iwmmxt_set_mup();
2600 gen_op_iwmmxt_set_cup();
2601 break;
2602 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2603 case 0x214: case 0x614: case 0xa14: case 0xe14:
2604 if (((insn >> 22) & 3) == 0)
2605 return 1;
2606 wrd = (insn >> 12) & 0xf;
2607 rd0 = (insn >> 16) & 0xf;
2608 gen_op_iwmmxt_movq_M0_wRn(rd0);
2609 tmp = tcg_temp_new_i32();
2610 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2611 tcg_temp_free_i32(tmp);
2612 return 1;
2614 switch ((insn >> 22) & 3) {
2615 case 1:
2616 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2617 break;
2618 case 2:
2619 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2620 break;
2621 case 3:
2622 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2623 break;
2625 tcg_temp_free_i32(tmp);
2626 gen_op_iwmmxt_movq_wRn_M0(wrd);
2627 gen_op_iwmmxt_set_mup();
2628 gen_op_iwmmxt_set_cup();
2629 break;
2630 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2631 case 0x014: case 0x414: case 0x814: case 0xc14:
2632 if (((insn >> 22) & 3) == 0)
2633 return 1;
2634 wrd = (insn >> 12) & 0xf;
2635 rd0 = (insn >> 16) & 0xf;
2636 gen_op_iwmmxt_movq_M0_wRn(rd0);
2637 tmp = tcg_temp_new_i32();
2638 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2639 tcg_temp_free_i32(tmp);
2640 return 1;
2642 switch ((insn >> 22) & 3) {
2643 case 1:
2644 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2645 break;
2646 case 2:
2647 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2648 break;
2649 case 3:
2650 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2651 break;
2653 tcg_temp_free_i32(tmp);
2654 gen_op_iwmmxt_movq_wRn_M0(wrd);
2655 gen_op_iwmmxt_set_mup();
2656 gen_op_iwmmxt_set_cup();
2657 break;
2658 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2659 case 0x114: case 0x514: case 0x914: case 0xd14:
2660 if (((insn >> 22) & 3) == 0)
2661 return 1;
2662 wrd = (insn >> 12) & 0xf;
2663 rd0 = (insn >> 16) & 0xf;
2664 gen_op_iwmmxt_movq_M0_wRn(rd0);
2665 tmp = tcg_temp_new_i32();
2666 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2667 tcg_temp_free_i32(tmp);
2668 return 1;
2670 switch ((insn >> 22) & 3) {
2671 case 1:
2672 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2673 break;
2674 case 2:
2675 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2676 break;
2677 case 3:
2678 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2679 break;
2681 tcg_temp_free_i32(tmp);
2682 gen_op_iwmmxt_movq_wRn_M0(wrd);
2683 gen_op_iwmmxt_set_mup();
2684 gen_op_iwmmxt_set_cup();
2685 break;
2686 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2687 case 0x314: case 0x714: case 0xb14: case 0xf14:
2688 if (((insn >> 22) & 3) == 0)
2689 return 1;
2690 wrd = (insn >> 12) & 0xf;
2691 rd0 = (insn >> 16) & 0xf;
2692 gen_op_iwmmxt_movq_M0_wRn(rd0);
2693 tmp = tcg_temp_new_i32();
2694 switch ((insn >> 22) & 3) {
2695 case 1:
2696 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2697 tcg_temp_free_i32(tmp);
2698 return 1;
2700 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2701 break;
2702 case 2:
2703 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2704 tcg_temp_free_i32(tmp);
2705 return 1;
2707 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2708 break;
2709 case 3:
2710 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2711 tcg_temp_free_i32(tmp);
2712 return 1;
2714 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2715 break;
2717 tcg_temp_free_i32(tmp);
2718 gen_op_iwmmxt_movq_wRn_M0(wrd);
2719 gen_op_iwmmxt_set_mup();
2720 gen_op_iwmmxt_set_cup();
2721 break;
2722 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2723 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2724 wrd = (insn >> 12) & 0xf;
2725 rd0 = (insn >> 16) & 0xf;
2726 rd1 = (insn >> 0) & 0xf;
2727 gen_op_iwmmxt_movq_M0_wRn(rd0);
2728 switch ((insn >> 22) & 3) {
2729 case 0:
2730 if (insn & (1 << 21))
2731 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2732 else
2733 gen_op_iwmmxt_minub_M0_wRn(rd1);
2734 break;
2735 case 1:
2736 if (insn & (1 << 21))
2737 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2738 else
2739 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2740 break;
2741 case 2:
2742 if (insn & (1 << 21))
2743 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2744 else
2745 gen_op_iwmmxt_minul_M0_wRn(rd1);
2746 break;
2747 case 3:
2748 return 1;
2750 gen_op_iwmmxt_movq_wRn_M0(wrd);
2751 gen_op_iwmmxt_set_mup();
2752 break;
2753 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2754 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2755 wrd = (insn >> 12) & 0xf;
2756 rd0 = (insn >> 16) & 0xf;
2757 rd1 = (insn >> 0) & 0xf;
2758 gen_op_iwmmxt_movq_M0_wRn(rd0);
2759 switch ((insn >> 22) & 3) {
2760 case 0:
2761 if (insn & (1 << 21))
2762 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2763 else
2764 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2765 break;
2766 case 1:
2767 if (insn & (1 << 21))
2768 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2769 else
2770 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2771 break;
2772 case 2:
2773 if (insn & (1 << 21))
2774 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2775 else
2776 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2777 break;
2778 case 3:
2779 return 1;
2781 gen_op_iwmmxt_movq_wRn_M0(wrd);
2782 gen_op_iwmmxt_set_mup();
2783 break;
2784 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2785 case 0x402: case 0x502: case 0x602: case 0x702:
2786 wrd = (insn >> 12) & 0xf;
2787 rd0 = (insn >> 16) & 0xf;
2788 rd1 = (insn >> 0) & 0xf;
2789 gen_op_iwmmxt_movq_M0_wRn(rd0);
2790 tmp = tcg_const_i32((insn >> 20) & 3);
2791 iwmmxt_load_reg(cpu_V1, rd1);
2792 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2793 tcg_temp_free_i32(tmp);
2794 gen_op_iwmmxt_movq_wRn_M0(wrd);
2795 gen_op_iwmmxt_set_mup();
2796 break;
2797 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2798 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2799 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2800 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2801 wrd = (insn >> 12) & 0xf;
2802 rd0 = (insn >> 16) & 0xf;
2803 rd1 = (insn >> 0) & 0xf;
2804 gen_op_iwmmxt_movq_M0_wRn(rd0);
2805 switch ((insn >> 20) & 0xf) {
2806 case 0x0:
2807 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2808 break;
2809 case 0x1:
2810 gen_op_iwmmxt_subub_M0_wRn(rd1);
2811 break;
2812 case 0x3:
2813 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2814 break;
2815 case 0x4:
2816 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2817 break;
2818 case 0x5:
2819 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2820 break;
2821 case 0x7:
2822 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2823 break;
2824 case 0x8:
2825 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2826 break;
2827 case 0x9:
2828 gen_op_iwmmxt_subul_M0_wRn(rd1);
2829 break;
2830 case 0xb:
2831 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2832 break;
2833 default:
2834 return 1;
2836 gen_op_iwmmxt_movq_wRn_M0(wrd);
2837 gen_op_iwmmxt_set_mup();
2838 gen_op_iwmmxt_set_cup();
2839 break;
2840 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2841 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2842 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2843 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2844 wrd = (insn >> 12) & 0xf;
2845 rd0 = (insn >> 16) & 0xf;
2846 gen_op_iwmmxt_movq_M0_wRn(rd0);
2847 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2848 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2849 tcg_temp_free_i32(tmp);
2850 gen_op_iwmmxt_movq_wRn_M0(wrd);
2851 gen_op_iwmmxt_set_mup();
2852 gen_op_iwmmxt_set_cup();
2853 break;
2854 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2855 case 0x418: case 0x518: case 0x618: case 0x718:
2856 case 0x818: case 0x918: case 0xa18: case 0xb18:
2857 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2858 wrd = (insn >> 12) & 0xf;
2859 rd0 = (insn >> 16) & 0xf;
2860 rd1 = (insn >> 0) & 0xf;
2861 gen_op_iwmmxt_movq_M0_wRn(rd0);
2862 switch ((insn >> 20) & 0xf) {
2863 case 0x0:
2864 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2865 break;
2866 case 0x1:
2867 gen_op_iwmmxt_addub_M0_wRn(rd1);
2868 break;
2869 case 0x3:
2870 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2871 break;
2872 case 0x4:
2873 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2874 break;
2875 case 0x5:
2876 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2877 break;
2878 case 0x7:
2879 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2880 break;
2881 case 0x8:
2882 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2883 break;
2884 case 0x9:
2885 gen_op_iwmmxt_addul_M0_wRn(rd1);
2886 break;
2887 case 0xb:
2888 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2889 break;
2890 default:
2891 return 1;
2893 gen_op_iwmmxt_movq_wRn_M0(wrd);
2894 gen_op_iwmmxt_set_mup();
2895 gen_op_iwmmxt_set_cup();
2896 break;
2897 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2898 case 0x408: case 0x508: case 0x608: case 0x708:
2899 case 0x808: case 0x908: case 0xa08: case 0xb08:
2900 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2901 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2902 return 1;
2903 wrd = (insn >> 12) & 0xf;
2904 rd0 = (insn >> 16) & 0xf;
2905 rd1 = (insn >> 0) & 0xf;
2906 gen_op_iwmmxt_movq_M0_wRn(rd0);
2907 switch ((insn >> 22) & 3) {
2908 case 1:
2909 if (insn & (1 << 21))
2910 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2911 else
2912 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2913 break;
2914 case 2:
2915 if (insn & (1 << 21))
2916 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2917 else
2918 gen_op_iwmmxt_packul_M0_wRn(rd1);
2919 break;
2920 case 3:
2921 if (insn & (1 << 21))
2922 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2923 else
2924 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2925 break;
2927 gen_op_iwmmxt_movq_wRn_M0(wrd);
2928 gen_op_iwmmxt_set_mup();
2929 gen_op_iwmmxt_set_cup();
2930 break;
2931 case 0x201: case 0x203: case 0x205: case 0x207:
2932 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2933 case 0x211: case 0x213: case 0x215: case 0x217:
2934 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2935 wrd = (insn >> 5) & 0xf;
2936 rd0 = (insn >> 12) & 0xf;
2937 rd1 = (insn >> 0) & 0xf;
2938 if (rd0 == 0xf || rd1 == 0xf)
2939 return 1;
2940 gen_op_iwmmxt_movq_M0_wRn(wrd);
2941 tmp = load_reg(s, rd0);
2942 tmp2 = load_reg(s, rd1);
2943 switch ((insn >> 16) & 0xf) {
2944 case 0x0: /* TMIA */
2945 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2946 break;
2947 case 0x8: /* TMIAPH */
2948 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2949 break;
2950 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2951 if (insn & (1 << 16))
2952 tcg_gen_shri_i32(tmp, tmp, 16);
2953 if (insn & (1 << 17))
2954 tcg_gen_shri_i32(tmp2, tmp2, 16);
2955 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2956 break;
2957 default:
2958 tcg_temp_free_i32(tmp2);
2959 tcg_temp_free_i32(tmp);
2960 return 1;
2962 tcg_temp_free_i32(tmp2);
2963 tcg_temp_free_i32(tmp);
2964 gen_op_iwmmxt_movq_wRn_M0(wrd);
2965 gen_op_iwmmxt_set_mup();
2966 break;
2967 default:
2968 return 1;
2971 return 0;
2974 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2975 (ie. an undefined instruction). */
2976 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2978 int acc, rd0, rd1, rdhi, rdlo;
2979 TCGv_i32 tmp, tmp2;
2981 if ((insn & 0x0ff00f10) == 0x0e200010) {
2982 /* Multiply with Internal Accumulate Format */
2983 rd0 = (insn >> 12) & 0xf;
2984 rd1 = insn & 0xf;
2985 acc = (insn >> 5) & 7;
2987 if (acc != 0)
2988 return 1;
2990 tmp = load_reg(s, rd0);
2991 tmp2 = load_reg(s, rd1);
2992 switch ((insn >> 16) & 0xf) {
2993 case 0x0: /* MIA */
2994 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2995 break;
2996 case 0x8: /* MIAPH */
2997 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2998 break;
2999 case 0xc: /* MIABB */
3000 case 0xd: /* MIABT */
3001 case 0xe: /* MIATB */
3002 case 0xf: /* MIATT */
3003 if (insn & (1 << 16))
3004 tcg_gen_shri_i32(tmp, tmp, 16);
3005 if (insn & (1 << 17))
3006 tcg_gen_shri_i32(tmp2, tmp2, 16);
3007 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
3008 break;
3009 default:
3010 return 1;
3012 tcg_temp_free_i32(tmp2);
3013 tcg_temp_free_i32(tmp);
3015 gen_op_iwmmxt_movq_wRn_M0(acc);
3016 return 0;
3019 if ((insn & 0x0fe00ff8) == 0x0c400000) {
3020 /* Internal Accumulator Access Format */
3021 rdhi = (insn >> 16) & 0xf;
3022 rdlo = (insn >> 12) & 0xf;
3023 acc = insn & 7;
3025 if (acc != 0)
3026 return 1;
3028 if (insn & ARM_CP_RW_BIT) { /* MRA */
3029 iwmmxt_load_reg(cpu_V0, acc);
3030 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3031 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
3032 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3033 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
3034 } else { /* MAR */
3035 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
3036 iwmmxt_store_reg(cpu_V0, acc);
3038 return 0;
3041 return 1;
3044 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
3045 #define VFP_SREG(insn, bigbit, smallbit) \
3046 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
3047 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
3048 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
3049 reg = (((insn) >> (bigbit)) & 0x0f) \
3050 | (((insn) >> ((smallbit) - 4)) & 0x10); \
3051 } else { \
3052 if (insn & (1 << (smallbit))) \
3053 return 1; \
3054 reg = ((insn) >> (bigbit)) & 0x0f; \
3055 }} while (0)
3057 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
3058 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
3059 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
3060 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
3061 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
3062 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
3064 /* Move between integer and VFP cores. */
3065 static TCGv_i32 gen_vfp_mrs(void)
3067 TCGv_i32 tmp = tcg_temp_new_i32();
3068 tcg_gen_mov_i32(tmp, cpu_F0s);
3069 return tmp;
3072 static void gen_vfp_msr(TCGv_i32 tmp)
3074 tcg_gen_mov_i32(cpu_F0s, tmp);
3075 tcg_temp_free_i32(tmp);
3078 static void gen_neon_dup_low16(TCGv_i32 var)
3080 TCGv_i32 tmp = tcg_temp_new_i32();
3081 tcg_gen_ext16u_i32(var, var);
3082 tcg_gen_shli_i32(tmp, var, 16);
3083 tcg_gen_or_i32(var, var, tmp);
3084 tcg_temp_free_i32(tmp);
3087 static void gen_neon_dup_high16(TCGv_i32 var)
3089 TCGv_i32 tmp = tcg_temp_new_i32();
3090 tcg_gen_andi_i32(var, var, 0xffff0000);
3091 tcg_gen_shri_i32(tmp, var, 16);
3092 tcg_gen_or_i32(var, var, tmp);
3093 tcg_temp_free_i32(tmp);
3096 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3097 uint32_t dp)
3099 uint32_t cc = extract32(insn, 20, 2);
3101 if (dp) {
3102 TCGv_i64 frn, frm, dest;
3103 TCGv_i64 tmp, zero, zf, nf, vf;
3105 zero = tcg_const_i64(0);
3107 frn = tcg_temp_new_i64();
3108 frm = tcg_temp_new_i64();
3109 dest = tcg_temp_new_i64();
3111 zf = tcg_temp_new_i64();
3112 nf = tcg_temp_new_i64();
3113 vf = tcg_temp_new_i64();
3115 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3116 tcg_gen_ext_i32_i64(nf, cpu_NF);
3117 tcg_gen_ext_i32_i64(vf, cpu_VF);
3119 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3120 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3121 switch (cc) {
3122 case 0: /* eq: Z */
3123 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3124 frn, frm);
3125 break;
3126 case 1: /* vs: V */
3127 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3128 frn, frm);
3129 break;
3130 case 2: /* ge: N == V -> N ^ V == 0 */
3131 tmp = tcg_temp_new_i64();
3132 tcg_gen_xor_i64(tmp, vf, nf);
3133 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3134 frn, frm);
3135 tcg_temp_free_i64(tmp);
3136 break;
3137 case 3: /* gt: !Z && N == V */
3138 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3139 frn, frm);
3140 tmp = tcg_temp_new_i64();
3141 tcg_gen_xor_i64(tmp, vf, nf);
3142 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3143 dest, frm);
3144 tcg_temp_free_i64(tmp);
3145 break;
3147 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3148 tcg_temp_free_i64(frn);
3149 tcg_temp_free_i64(frm);
3150 tcg_temp_free_i64(dest);
3152 tcg_temp_free_i64(zf);
3153 tcg_temp_free_i64(nf);
3154 tcg_temp_free_i64(vf);
3156 tcg_temp_free_i64(zero);
3157 } else {
3158 TCGv_i32 frn, frm, dest;
3159 TCGv_i32 tmp, zero;
3161 zero = tcg_const_i32(0);
3163 frn = tcg_temp_new_i32();
3164 frm = tcg_temp_new_i32();
3165 dest = tcg_temp_new_i32();
3166 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3167 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3168 switch (cc) {
3169 case 0: /* eq: Z */
3170 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3171 frn, frm);
3172 break;
3173 case 1: /* vs: V */
3174 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3175 frn, frm);
3176 break;
3177 case 2: /* ge: N == V -> N ^ V == 0 */
3178 tmp = tcg_temp_new_i32();
3179 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3180 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3181 frn, frm);
3182 tcg_temp_free_i32(tmp);
3183 break;
3184 case 3: /* gt: !Z && N == V */
3185 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3186 frn, frm);
3187 tmp = tcg_temp_new_i32();
3188 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3189 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3190 dest, frm);
3191 tcg_temp_free_i32(tmp);
3192 break;
3194 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3195 tcg_temp_free_i32(frn);
3196 tcg_temp_free_i32(frm);
3197 tcg_temp_free_i32(dest);
3199 tcg_temp_free_i32(zero);
3202 return 0;
3205 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3206 uint32_t rm, uint32_t dp)
3208 uint32_t vmin = extract32(insn, 6, 1);
3209 TCGv_ptr fpst = get_fpstatus_ptr(0);
3211 if (dp) {
3212 TCGv_i64 frn, frm, dest;
3214 frn = tcg_temp_new_i64();
3215 frm = tcg_temp_new_i64();
3216 dest = tcg_temp_new_i64();
3218 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3219 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3220 if (vmin) {
3221 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
3222 } else {
3223 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
3225 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3226 tcg_temp_free_i64(frn);
3227 tcg_temp_free_i64(frm);
3228 tcg_temp_free_i64(dest);
3229 } else {
3230 TCGv_i32 frn, frm, dest;
3232 frn = tcg_temp_new_i32();
3233 frm = tcg_temp_new_i32();
3234 dest = tcg_temp_new_i32();
3236 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3237 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3238 if (vmin) {
3239 gen_helper_vfp_minnums(dest, frn, frm, fpst);
3240 } else {
3241 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
3243 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3244 tcg_temp_free_i32(frn);
3245 tcg_temp_free_i32(frm);
3246 tcg_temp_free_i32(dest);
3249 tcg_temp_free_ptr(fpst);
3250 return 0;
3253 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3254 int rounding)
3256 TCGv_ptr fpst = get_fpstatus_ptr(0);
3257 TCGv_i32 tcg_rmode;
3259 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3260 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3262 if (dp) {
3263 TCGv_i64 tcg_op;
3264 TCGv_i64 tcg_res;
3265 tcg_op = tcg_temp_new_i64();
3266 tcg_res = tcg_temp_new_i64();
3267 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3268 gen_helper_rintd(tcg_res, tcg_op, fpst);
3269 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3270 tcg_temp_free_i64(tcg_op);
3271 tcg_temp_free_i64(tcg_res);
3272 } else {
3273 TCGv_i32 tcg_op;
3274 TCGv_i32 tcg_res;
3275 tcg_op = tcg_temp_new_i32();
3276 tcg_res = tcg_temp_new_i32();
3277 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3278 gen_helper_rints(tcg_res, tcg_op, fpst);
3279 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3280 tcg_temp_free_i32(tcg_op);
3281 tcg_temp_free_i32(tcg_res);
3284 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3285 tcg_temp_free_i32(tcg_rmode);
3287 tcg_temp_free_ptr(fpst);
3288 return 0;
3291 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3292 int rounding)
3294 bool is_signed = extract32(insn, 7, 1);
3295 TCGv_ptr fpst = get_fpstatus_ptr(0);
3296 TCGv_i32 tcg_rmode, tcg_shift;
3298 tcg_shift = tcg_const_i32(0);
3300 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3301 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3303 if (dp) {
3304 TCGv_i64 tcg_double, tcg_res;
3305 TCGv_i32 tcg_tmp;
3306 /* Rd is encoded as a single precision register even when the source
3307 * is double precision.
3309 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3310 tcg_double = tcg_temp_new_i64();
3311 tcg_res = tcg_temp_new_i64();
3312 tcg_tmp = tcg_temp_new_i32();
3313 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3314 if (is_signed) {
3315 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3316 } else {
3317 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3319 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
3320 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3321 tcg_temp_free_i32(tcg_tmp);
3322 tcg_temp_free_i64(tcg_res);
3323 tcg_temp_free_i64(tcg_double);
3324 } else {
3325 TCGv_i32 tcg_single, tcg_res;
3326 tcg_single = tcg_temp_new_i32();
3327 tcg_res = tcg_temp_new_i32();
3328 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3329 if (is_signed) {
3330 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3331 } else {
3332 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3334 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3335 tcg_temp_free_i32(tcg_res);
3336 tcg_temp_free_i32(tcg_single);
3339 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3340 tcg_temp_free_i32(tcg_rmode);
3342 tcg_temp_free_i32(tcg_shift);
3344 tcg_temp_free_ptr(fpst);
3346 return 0;
3349 /* Table for converting the most common AArch32 encoding of
3350 * rounding mode to arm_fprounding order (which matches the
3351 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3353 static const uint8_t fp_decode_rm[] = {
3354 FPROUNDING_TIEAWAY,
3355 FPROUNDING_TIEEVEN,
3356 FPROUNDING_POSINF,
3357 FPROUNDING_NEGINF,
3360 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3362 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3364 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3365 return 1;
3368 if (dp) {
3369 VFP_DREG_D(rd, insn);
3370 VFP_DREG_N(rn, insn);
3371 VFP_DREG_M(rm, insn);
3372 } else {
3373 rd = VFP_SREG_D(insn);
3374 rn = VFP_SREG_N(insn);
3375 rm = VFP_SREG_M(insn);
3378 if ((insn & 0x0f800e50) == 0x0e000a00) {
3379 return handle_vsel(insn, rd, rn, rm, dp);
3380 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3381 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3382 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3383 /* VRINTA, VRINTN, VRINTP, VRINTM */
3384 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3385 return handle_vrint(insn, rd, rm, dp, rounding);
3386 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3387 /* VCVTA, VCVTN, VCVTP, VCVTM */
3388 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3389 return handle_vcvt(insn, rd, rm, dp, rounding);
3391 return 1;
3394 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3395 (ie. an undefined instruction). */
3396 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3398 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3399 int dp, veclen;
3400 TCGv_i32 addr;
3401 TCGv_i32 tmp;
3402 TCGv_i32 tmp2;
3404 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3405 return 1;
3408 /* FIXME: this access check should not take precedence over UNDEF
3409 * for invalid encodings; we will generate incorrect syndrome information
3410 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3412 if (s->fp_excp_el) {
3413 gen_exception_insn(s, 4, EXCP_UDEF,
3414 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
3415 return 0;
3418 if (!s->vfp_enabled) {
3419 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3420 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3421 return 1;
3422 rn = (insn >> 16) & 0xf;
3423 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3424 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3425 return 1;
3429 if (extract32(insn, 28, 4) == 0xf) {
3430 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3431 * only used in v8 and above.
3433 return disas_vfp_v8_insn(s, insn);
3436 dp = ((insn & 0xf00) == 0xb00);
3437 switch ((insn >> 24) & 0xf) {
3438 case 0xe:
3439 if (insn & (1 << 4)) {
3440 /* single register transfer */
3441 rd = (insn >> 12) & 0xf;
3442 if (dp) {
3443 int size;
3444 int pass;
3446 VFP_DREG_N(rn, insn);
3447 if (insn & 0xf)
3448 return 1;
3449 if (insn & 0x00c00060
3450 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3451 return 1;
3454 pass = (insn >> 21) & 1;
3455 if (insn & (1 << 22)) {
3456 size = 0;
3457 offset = ((insn >> 5) & 3) * 8;
3458 } else if (insn & (1 << 5)) {
3459 size = 1;
3460 offset = (insn & (1 << 6)) ? 16 : 0;
3461 } else {
3462 size = 2;
3463 offset = 0;
3465 if (insn & ARM_CP_RW_BIT) {
3466 /* vfp->arm */
3467 tmp = neon_load_reg(rn, pass);
3468 switch (size) {
3469 case 0:
3470 if (offset)
3471 tcg_gen_shri_i32(tmp, tmp, offset);
3472 if (insn & (1 << 23))
3473 gen_uxtb(tmp);
3474 else
3475 gen_sxtb(tmp);
3476 break;
3477 case 1:
3478 if (insn & (1 << 23)) {
3479 if (offset) {
3480 tcg_gen_shri_i32(tmp, tmp, 16);
3481 } else {
3482 gen_uxth(tmp);
3484 } else {
3485 if (offset) {
3486 tcg_gen_sari_i32(tmp, tmp, 16);
3487 } else {
3488 gen_sxth(tmp);
3491 break;
3492 case 2:
3493 break;
3495 store_reg(s, rd, tmp);
3496 } else {
3497 /* arm->vfp */
3498 tmp = load_reg(s, rd);
3499 if (insn & (1 << 23)) {
3500 /* VDUP */
3501 int vec_size = pass ? 16 : 8;
3502 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0),
3503 vec_size, vec_size, tmp);
3504 tcg_temp_free_i32(tmp);
3505 } else {
3506 /* VMOV */
3507 switch (size) {
3508 case 0:
3509 tmp2 = neon_load_reg(rn, pass);
3510 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3511 tcg_temp_free_i32(tmp2);
3512 break;
3513 case 1:
3514 tmp2 = neon_load_reg(rn, pass);
3515 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3516 tcg_temp_free_i32(tmp2);
3517 break;
3518 case 2:
3519 break;
3521 neon_store_reg(rn, pass, tmp);
3524 } else { /* !dp */
3525 if ((insn & 0x6f) != 0x00)
3526 return 1;
3527 rn = VFP_SREG_N(insn);
3528 if (insn & ARM_CP_RW_BIT) {
3529 /* vfp->arm */
3530 if (insn & (1 << 21)) {
3531 /* system register */
3532 rn >>= 1;
3534 switch (rn) {
3535 case ARM_VFP_FPSID:
3536 /* VFP2 allows access to FSID from userspace.
3537 VFP3 restricts all id registers to privileged
3538 accesses. */
3539 if (IS_USER(s)
3540 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3541 return 1;
3543 tmp = load_cpu_field(vfp.xregs[rn]);
3544 break;
3545 case ARM_VFP_FPEXC:
3546 if (IS_USER(s))
3547 return 1;
3548 tmp = load_cpu_field(vfp.xregs[rn]);
3549 break;
3550 case ARM_VFP_FPINST:
3551 case ARM_VFP_FPINST2:
3552 /* Not present in VFP3. */
3553 if (IS_USER(s)
3554 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3555 return 1;
3557 tmp = load_cpu_field(vfp.xregs[rn]);
3558 break;
3559 case ARM_VFP_FPSCR:
3560 if (rd == 15) {
3561 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3562 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3563 } else {
3564 tmp = tcg_temp_new_i32();
3565 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3567 break;
3568 case ARM_VFP_MVFR2:
3569 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3570 return 1;
3572 /* fall through */
3573 case ARM_VFP_MVFR0:
3574 case ARM_VFP_MVFR1:
3575 if (IS_USER(s)
3576 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3577 return 1;
3579 tmp = load_cpu_field(vfp.xregs[rn]);
3580 break;
3581 default:
3582 return 1;
3584 } else {
3585 gen_mov_F0_vreg(0, rn);
3586 tmp = gen_vfp_mrs();
3588 if (rd == 15) {
3589 /* Set the 4 flag bits in the CPSR. */
3590 gen_set_nzcv(tmp);
3591 tcg_temp_free_i32(tmp);
3592 } else {
3593 store_reg(s, rd, tmp);
3595 } else {
3596 /* arm->vfp */
3597 if (insn & (1 << 21)) {
3598 rn >>= 1;
3599 /* system register */
3600 switch (rn) {
3601 case ARM_VFP_FPSID:
3602 case ARM_VFP_MVFR0:
3603 case ARM_VFP_MVFR1:
3604 /* Writes are ignored. */
3605 break;
3606 case ARM_VFP_FPSCR:
3607 tmp = load_reg(s, rd);
3608 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3609 tcg_temp_free_i32(tmp);
3610 gen_lookup_tb(s);
3611 break;
3612 case ARM_VFP_FPEXC:
3613 if (IS_USER(s))
3614 return 1;
3615 /* TODO: VFP subarchitecture support.
3616 * For now, keep the EN bit only */
3617 tmp = load_reg(s, rd);
3618 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3619 store_cpu_field(tmp, vfp.xregs[rn]);
3620 gen_lookup_tb(s);
3621 break;
3622 case ARM_VFP_FPINST:
3623 case ARM_VFP_FPINST2:
3624 if (IS_USER(s)) {
3625 return 1;
3627 tmp = load_reg(s, rd);
3628 store_cpu_field(tmp, vfp.xregs[rn]);
3629 break;
3630 default:
3631 return 1;
3633 } else {
3634 tmp = load_reg(s, rd);
3635 gen_vfp_msr(tmp);
3636 gen_mov_vreg_F0(0, rn);
3640 } else {
3641 /* data processing */
3642 /* The opcode is in bits 23, 21, 20 and 6. */
3643 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3644 if (dp) {
3645 if (op == 15) {
3646 /* rn is opcode */
3647 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3648 } else {
3649 /* rn is register number */
3650 VFP_DREG_N(rn, insn);
3653 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3654 ((rn & 0x1e) == 0x6))) {
3655 /* Integer or single/half precision destination. */
3656 rd = VFP_SREG_D(insn);
3657 } else {
3658 VFP_DREG_D(rd, insn);
3660 if (op == 15 &&
3661 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3662 ((rn & 0x1e) == 0x4))) {
3663 /* VCVT from int or half precision is always from S reg
3664 * regardless of dp bit. VCVT with immediate frac_bits
3665 * has same format as SREG_M.
3667 rm = VFP_SREG_M(insn);
3668 } else {
3669 VFP_DREG_M(rm, insn);
3671 } else {
3672 rn = VFP_SREG_N(insn);
3673 if (op == 15 && rn == 15) {
3674 /* Double precision destination. */
3675 VFP_DREG_D(rd, insn);
3676 } else {
3677 rd = VFP_SREG_D(insn);
3679 /* NB that we implicitly rely on the encoding for the frac_bits
3680 * in VCVT of fixed to float being the same as that of an SREG_M
3682 rm = VFP_SREG_M(insn);
3685 veclen = s->vec_len;
3686 if (op == 15 && rn > 3)
3687 veclen = 0;
3689 /* Shut up compiler warnings. */
3690 delta_m = 0;
3691 delta_d = 0;
3692 bank_mask = 0;
3694 if (veclen > 0) {
3695 if (dp)
3696 bank_mask = 0xc;
3697 else
3698 bank_mask = 0x18;
3700 /* Figure out what type of vector operation this is. */
3701 if ((rd & bank_mask) == 0) {
3702 /* scalar */
3703 veclen = 0;
3704 } else {
3705 if (dp)
3706 delta_d = (s->vec_stride >> 1) + 1;
3707 else
3708 delta_d = s->vec_stride + 1;
3710 if ((rm & bank_mask) == 0) {
3711 /* mixed scalar/vector */
3712 delta_m = 0;
3713 } else {
3714 /* vector */
3715 delta_m = delta_d;
3720 /* Load the initial operands. */
3721 if (op == 15) {
3722 switch (rn) {
3723 case 16:
3724 case 17:
3725 /* Integer source */
3726 gen_mov_F0_vreg(0, rm);
3727 break;
3728 case 8:
3729 case 9:
3730 /* Compare */
3731 gen_mov_F0_vreg(dp, rd);
3732 gen_mov_F1_vreg(dp, rm);
3733 break;
3734 case 10:
3735 case 11:
3736 /* Compare with zero */
3737 gen_mov_F0_vreg(dp, rd);
3738 gen_vfp_F1_ld0(dp);
3739 break;
3740 case 20:
3741 case 21:
3742 case 22:
3743 case 23:
3744 case 28:
3745 case 29:
3746 case 30:
3747 case 31:
3748 /* Source and destination the same. */
3749 gen_mov_F0_vreg(dp, rd);
3750 break;
3751 case 4:
3752 case 5:
3753 case 6:
3754 case 7:
3755 /* VCVTB, VCVTT: only present with the halfprec extension
3756 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3757 * (we choose to UNDEF)
3759 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3760 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3761 return 1;
3763 if (!extract32(rn, 1, 1)) {
3764 /* Half precision source. */
3765 gen_mov_F0_vreg(0, rm);
3766 break;
3768 /* Otherwise fall through */
3769 default:
3770 /* One source operand. */
3771 gen_mov_F0_vreg(dp, rm);
3772 break;
3774 } else {
3775 /* Two source operands. */
3776 gen_mov_F0_vreg(dp, rn);
3777 gen_mov_F1_vreg(dp, rm);
3780 for (;;) {
3781 /* Perform the calculation. */
3782 switch (op) {
3783 case 0: /* VMLA: fd + (fn * fm) */
3784 /* Note that order of inputs to the add matters for NaNs */
3785 gen_vfp_F1_mul(dp);
3786 gen_mov_F0_vreg(dp, rd);
3787 gen_vfp_add(dp);
3788 break;
3789 case 1: /* VMLS: fd + -(fn * fm) */
3790 gen_vfp_mul(dp);
3791 gen_vfp_F1_neg(dp);
3792 gen_mov_F0_vreg(dp, rd);
3793 gen_vfp_add(dp);
3794 break;
3795 case 2: /* VNMLS: -fd + (fn * fm) */
3796 /* Note that it isn't valid to replace (-A + B) with (B - A)
3797 * or similar plausible looking simplifications
3798 * because this will give wrong results for NaNs.
3800 gen_vfp_F1_mul(dp);
3801 gen_mov_F0_vreg(dp, rd);
3802 gen_vfp_neg(dp);
3803 gen_vfp_add(dp);
3804 break;
3805 case 3: /* VNMLA: -fd + -(fn * fm) */
3806 gen_vfp_mul(dp);
3807 gen_vfp_F1_neg(dp);
3808 gen_mov_F0_vreg(dp, rd);
3809 gen_vfp_neg(dp);
3810 gen_vfp_add(dp);
3811 break;
3812 case 4: /* mul: fn * fm */
3813 gen_vfp_mul(dp);
3814 break;
3815 case 5: /* nmul: -(fn * fm) */
3816 gen_vfp_mul(dp);
3817 gen_vfp_neg(dp);
3818 break;
3819 case 6: /* add: fn + fm */
3820 gen_vfp_add(dp);
3821 break;
3822 case 7: /* sub: fn - fm */
3823 gen_vfp_sub(dp);
3824 break;
3825 case 8: /* div: fn / fm */
3826 gen_vfp_div(dp);
3827 break;
3828 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3829 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3830 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3831 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3832 /* These are fused multiply-add, and must be done as one
3833 * floating point operation with no rounding between the
3834 * multiplication and addition steps.
3835 * NB that doing the negations here as separate steps is
3836 * correct : an input NaN should come out with its sign bit
3837 * flipped if it is a negated-input.
3839 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3840 return 1;
3842 if (dp) {
3843 TCGv_ptr fpst;
3844 TCGv_i64 frd;
3845 if (op & 1) {
3846 /* VFNMS, VFMS */
3847 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3849 frd = tcg_temp_new_i64();
3850 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3851 if (op & 2) {
3852 /* VFNMA, VFNMS */
3853 gen_helper_vfp_negd(frd, frd);
3855 fpst = get_fpstatus_ptr(0);
3856 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3857 cpu_F1d, frd, fpst);
3858 tcg_temp_free_ptr(fpst);
3859 tcg_temp_free_i64(frd);
3860 } else {
3861 TCGv_ptr fpst;
3862 TCGv_i32 frd;
3863 if (op & 1) {
3864 /* VFNMS, VFMS */
3865 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3867 frd = tcg_temp_new_i32();
3868 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3869 if (op & 2) {
3870 gen_helper_vfp_negs(frd, frd);
3872 fpst = get_fpstatus_ptr(0);
3873 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3874 cpu_F1s, frd, fpst);
3875 tcg_temp_free_ptr(fpst);
3876 tcg_temp_free_i32(frd);
3878 break;
3879 case 14: /* fconst */
3880 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3881 return 1;
3884 n = (insn << 12) & 0x80000000;
3885 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3886 if (dp) {
3887 if (i & 0x40)
3888 i |= 0x3f80;
3889 else
3890 i |= 0x4000;
3891 n |= i << 16;
3892 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3893 } else {
3894 if (i & 0x40)
3895 i |= 0x780;
3896 else
3897 i |= 0x800;
3898 n |= i << 19;
3899 tcg_gen_movi_i32(cpu_F0s, n);
3901 break;
3902 case 15: /* extension space */
3903 switch (rn) {
3904 case 0: /* cpy */
3905 /* no-op */
3906 break;
3907 case 1: /* abs */
3908 gen_vfp_abs(dp);
3909 break;
3910 case 2: /* neg */
3911 gen_vfp_neg(dp);
3912 break;
3913 case 3: /* sqrt */
3914 gen_vfp_sqrt(dp);
3915 break;
3916 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3918 TCGv_ptr fpst = get_fpstatus_ptr(false);
3919 TCGv_i32 ahp_mode = get_ahp_flag();
3920 tmp = gen_vfp_mrs();
3921 tcg_gen_ext16u_i32(tmp, tmp);
3922 if (dp) {
3923 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3924 fpst, ahp_mode);
3925 } else {
3926 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3927 fpst, ahp_mode);
3929 tcg_temp_free_i32(ahp_mode);
3930 tcg_temp_free_ptr(fpst);
3931 tcg_temp_free_i32(tmp);
3932 break;
3934 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3936 TCGv_ptr fpst = get_fpstatus_ptr(false);
3937 TCGv_i32 ahp = get_ahp_flag();
3938 tmp = gen_vfp_mrs();
3939 tcg_gen_shri_i32(tmp, tmp, 16);
3940 if (dp) {
3941 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3942 fpst, ahp);
3943 } else {
3944 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3945 fpst, ahp);
3947 tcg_temp_free_i32(tmp);
3948 tcg_temp_free_i32(ahp);
3949 tcg_temp_free_ptr(fpst);
3950 break;
3952 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3954 TCGv_ptr fpst = get_fpstatus_ptr(false);
3955 TCGv_i32 ahp = get_ahp_flag();
3956 tmp = tcg_temp_new_i32();
3958 if (dp) {
3959 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3960 fpst, ahp);
3961 } else {
3962 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3963 fpst, ahp);
3965 tcg_temp_free_i32(ahp);
3966 tcg_temp_free_ptr(fpst);
3967 gen_mov_F0_vreg(0, rd);
3968 tmp2 = gen_vfp_mrs();
3969 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3970 tcg_gen_or_i32(tmp, tmp, tmp2);
3971 tcg_temp_free_i32(tmp2);
3972 gen_vfp_msr(tmp);
3973 break;
3975 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3977 TCGv_ptr fpst = get_fpstatus_ptr(false);
3978 TCGv_i32 ahp = get_ahp_flag();
3979 tmp = tcg_temp_new_i32();
3980 if (dp) {
3981 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3982 fpst, ahp);
3983 } else {
3984 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3985 fpst, ahp);
3987 tcg_temp_free_i32(ahp);
3988 tcg_temp_free_ptr(fpst);
3989 tcg_gen_shli_i32(tmp, tmp, 16);
3990 gen_mov_F0_vreg(0, rd);
3991 tmp2 = gen_vfp_mrs();
3992 tcg_gen_ext16u_i32(tmp2, tmp2);
3993 tcg_gen_or_i32(tmp, tmp, tmp2);
3994 tcg_temp_free_i32(tmp2);
3995 gen_vfp_msr(tmp);
3996 break;
3998 case 8: /* cmp */
3999 gen_vfp_cmp(dp);
4000 break;
4001 case 9: /* cmpe */
4002 gen_vfp_cmpe(dp);
4003 break;
4004 case 10: /* cmpz */
4005 gen_vfp_cmp(dp);
4006 break;
4007 case 11: /* cmpez */
4008 gen_vfp_F1_ld0(dp);
4009 gen_vfp_cmpe(dp);
4010 break;
4011 case 12: /* vrintr */
4013 TCGv_ptr fpst = get_fpstatus_ptr(0);
4014 if (dp) {
4015 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4016 } else {
4017 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4019 tcg_temp_free_ptr(fpst);
4020 break;
4022 case 13: /* vrintz */
4024 TCGv_ptr fpst = get_fpstatus_ptr(0);
4025 TCGv_i32 tcg_rmode;
4026 tcg_rmode = tcg_const_i32(float_round_to_zero);
4027 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4028 if (dp) {
4029 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4030 } else {
4031 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4033 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4034 tcg_temp_free_i32(tcg_rmode);
4035 tcg_temp_free_ptr(fpst);
4036 break;
4038 case 14: /* vrintx */
4040 TCGv_ptr fpst = get_fpstatus_ptr(0);
4041 if (dp) {
4042 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
4043 } else {
4044 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
4046 tcg_temp_free_ptr(fpst);
4047 break;
4049 case 15: /* single<->double conversion */
4050 if (dp)
4051 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
4052 else
4053 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
4054 break;
4055 case 16: /* fuito */
4056 gen_vfp_uito(dp, 0);
4057 break;
4058 case 17: /* fsito */
4059 gen_vfp_sito(dp, 0);
4060 break;
4061 case 20: /* fshto */
4062 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4063 return 1;
4065 gen_vfp_shto(dp, 16 - rm, 0);
4066 break;
4067 case 21: /* fslto */
4068 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4069 return 1;
4071 gen_vfp_slto(dp, 32 - rm, 0);
4072 break;
4073 case 22: /* fuhto */
4074 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4075 return 1;
4077 gen_vfp_uhto(dp, 16 - rm, 0);
4078 break;
4079 case 23: /* fulto */
4080 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4081 return 1;
4083 gen_vfp_ulto(dp, 32 - rm, 0);
4084 break;
4085 case 24: /* ftoui */
4086 gen_vfp_toui(dp, 0);
4087 break;
4088 case 25: /* ftouiz */
4089 gen_vfp_touiz(dp, 0);
4090 break;
4091 case 26: /* ftosi */
4092 gen_vfp_tosi(dp, 0);
4093 break;
4094 case 27: /* ftosiz */
4095 gen_vfp_tosiz(dp, 0);
4096 break;
4097 case 28: /* ftosh */
4098 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4099 return 1;
4101 gen_vfp_tosh(dp, 16 - rm, 0);
4102 break;
4103 case 29: /* ftosl */
4104 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4105 return 1;
4107 gen_vfp_tosl(dp, 32 - rm, 0);
4108 break;
4109 case 30: /* ftouh */
4110 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4111 return 1;
4113 gen_vfp_touh(dp, 16 - rm, 0);
4114 break;
4115 case 31: /* ftoul */
4116 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4117 return 1;
4119 gen_vfp_toul(dp, 32 - rm, 0);
4120 break;
4121 default: /* undefined */
4122 return 1;
4124 break;
4125 default: /* undefined */
4126 return 1;
4129 /* Write back the result. */
4130 if (op == 15 && (rn >= 8 && rn <= 11)) {
4131 /* Comparison, do nothing. */
4132 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4133 (rn & 0x1e) == 0x6)) {
4134 /* VCVT double to int: always integer result.
4135 * VCVT double to half precision is always a single
4136 * precision result.
4138 gen_mov_vreg_F0(0, rd);
4139 } else if (op == 15 && rn == 15) {
4140 /* conversion */
4141 gen_mov_vreg_F0(!dp, rd);
4142 } else {
4143 gen_mov_vreg_F0(dp, rd);
4146 /* break out of the loop if we have finished */
4147 if (veclen == 0)
4148 break;
4150 if (op == 15 && delta_m == 0) {
4151 /* single source one-many */
4152 while (veclen--) {
4153 rd = ((rd + delta_d) & (bank_mask - 1))
4154 | (rd & bank_mask);
4155 gen_mov_vreg_F0(dp, rd);
4157 break;
4159 /* Setup the next operands. */
4160 veclen--;
4161 rd = ((rd + delta_d) & (bank_mask - 1))
4162 | (rd & bank_mask);
4164 if (op == 15) {
4165 /* One source operand. */
4166 rm = ((rm + delta_m) & (bank_mask - 1))
4167 | (rm & bank_mask);
4168 gen_mov_F0_vreg(dp, rm);
4169 } else {
4170 /* Two source operands. */
4171 rn = ((rn + delta_d) & (bank_mask - 1))
4172 | (rn & bank_mask);
4173 gen_mov_F0_vreg(dp, rn);
4174 if (delta_m) {
4175 rm = ((rm + delta_m) & (bank_mask - 1))
4176 | (rm & bank_mask);
4177 gen_mov_F1_vreg(dp, rm);
4182 break;
4183 case 0xc:
4184 case 0xd:
4185 if ((insn & 0x03e00000) == 0x00400000) {
4186 /* two-register transfer */
4187 rn = (insn >> 16) & 0xf;
4188 rd = (insn >> 12) & 0xf;
4189 if (dp) {
4190 VFP_DREG_M(rm, insn);
4191 } else {
4192 rm = VFP_SREG_M(insn);
4195 if (insn & ARM_CP_RW_BIT) {
4196 /* vfp->arm */
4197 if (dp) {
4198 gen_mov_F0_vreg(0, rm * 2);
4199 tmp = gen_vfp_mrs();
4200 store_reg(s, rd, tmp);
4201 gen_mov_F0_vreg(0, rm * 2 + 1);
4202 tmp = gen_vfp_mrs();
4203 store_reg(s, rn, tmp);
4204 } else {
4205 gen_mov_F0_vreg(0, rm);
4206 tmp = gen_vfp_mrs();
4207 store_reg(s, rd, tmp);
4208 gen_mov_F0_vreg(0, rm + 1);
4209 tmp = gen_vfp_mrs();
4210 store_reg(s, rn, tmp);
4212 } else {
4213 /* arm->vfp */
4214 if (dp) {
4215 tmp = load_reg(s, rd);
4216 gen_vfp_msr(tmp);
4217 gen_mov_vreg_F0(0, rm * 2);
4218 tmp = load_reg(s, rn);
4219 gen_vfp_msr(tmp);
4220 gen_mov_vreg_F0(0, rm * 2 + 1);
4221 } else {
4222 tmp = load_reg(s, rd);
4223 gen_vfp_msr(tmp);
4224 gen_mov_vreg_F0(0, rm);
4225 tmp = load_reg(s, rn);
4226 gen_vfp_msr(tmp);
4227 gen_mov_vreg_F0(0, rm + 1);
4230 } else {
4231 /* Load/store */
4232 rn = (insn >> 16) & 0xf;
4233 if (dp)
4234 VFP_DREG_D(rd, insn);
4235 else
4236 rd = VFP_SREG_D(insn);
4237 if ((insn & 0x01200000) == 0x01000000) {
4238 /* Single load/store */
4239 offset = (insn & 0xff) << 2;
4240 if ((insn & (1 << 23)) == 0)
4241 offset = -offset;
4242 if (s->thumb && rn == 15) {
4243 /* This is actually UNPREDICTABLE */
4244 addr = tcg_temp_new_i32();
4245 tcg_gen_movi_i32(addr, s->pc & ~2);
4246 } else {
4247 addr = load_reg(s, rn);
4249 tcg_gen_addi_i32(addr, addr, offset);
4250 if (insn & (1 << 20)) {
4251 gen_vfp_ld(s, dp, addr);
4252 gen_mov_vreg_F0(dp, rd);
4253 } else {
4254 gen_mov_F0_vreg(dp, rd);
4255 gen_vfp_st(s, dp, addr);
4257 tcg_temp_free_i32(addr);
4258 } else {
4259 /* load/store multiple */
4260 int w = insn & (1 << 21);
4261 if (dp)
4262 n = (insn >> 1) & 0x7f;
4263 else
4264 n = insn & 0xff;
4266 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4267 /* P == U , W == 1 => UNDEF */
4268 return 1;
4270 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4271 /* UNPREDICTABLE cases for bad immediates: we choose to
4272 * UNDEF to avoid generating huge numbers of TCG ops
4274 return 1;
4276 if (rn == 15 && w) {
4277 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4278 return 1;
4281 if (s->thumb && rn == 15) {
4282 /* This is actually UNPREDICTABLE */
4283 addr = tcg_temp_new_i32();
4284 tcg_gen_movi_i32(addr, s->pc & ~2);
4285 } else {
4286 addr = load_reg(s, rn);
4288 if (insn & (1 << 24)) /* pre-decrement */
4289 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
4291 if (s->v8m_stackcheck && rn == 13 && w) {
4293 * Here 'addr' is the lowest address we will store to,
4294 * and is either the old SP (if post-increment) or
4295 * the new SP (if pre-decrement). For post-increment
4296 * where the old value is below the limit and the new
4297 * value is above, it is UNKNOWN whether the limit check
4298 * triggers; we choose to trigger.
4300 gen_helper_v8m_stackcheck(cpu_env, addr);
4303 if (dp)
4304 offset = 8;
4305 else
4306 offset = 4;
4307 for (i = 0; i < n; i++) {
4308 if (insn & ARM_CP_RW_BIT) {
4309 /* load */
4310 gen_vfp_ld(s, dp, addr);
4311 gen_mov_vreg_F0(dp, rd + i);
4312 } else {
4313 /* store */
4314 gen_mov_F0_vreg(dp, rd + i);
4315 gen_vfp_st(s, dp, addr);
4317 tcg_gen_addi_i32(addr, addr, offset);
4319 if (w) {
4320 /* writeback */
4321 if (insn & (1 << 24))
4322 offset = -offset * n;
4323 else if (dp && (insn & 1))
4324 offset = 4;
4325 else
4326 offset = 0;
4328 if (offset != 0)
4329 tcg_gen_addi_i32(addr, addr, offset);
4330 store_reg(s, rn, addr);
4331 } else {
4332 tcg_temp_free_i32(addr);
4336 break;
4337 default:
4338 /* Should never happen. */
4339 return 1;
4341 return 0;
4344 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
4346 #ifndef CONFIG_USER_ONLY
4347 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4348 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4349 #else
4350 return true;
4351 #endif
4354 static void gen_goto_ptr(void)
4356 tcg_gen_lookup_and_goto_ptr();
4359 /* This will end the TB but doesn't guarantee we'll return to
4360 * cpu_loop_exec. Any live exit_requests will be processed as we
4361 * enter the next TB.
4363 static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4365 if (use_goto_tb(s, dest)) {
4366 tcg_gen_goto_tb(n);
4367 gen_set_pc_im(s, dest);
4368 tcg_gen_exit_tb(s->base.tb, n);
4369 } else {
4370 gen_set_pc_im(s, dest);
4371 gen_goto_ptr();
4373 s->base.is_jmp = DISAS_NORETURN;
4376 static inline void gen_jmp (DisasContext *s, uint32_t dest)
4378 if (unlikely(is_singlestepping(s))) {
4379 /* An indirect jump so that we still trigger the debug exception. */
4380 if (s->thumb)
4381 dest |= 1;
4382 gen_bx_im(s, dest);
4383 } else {
4384 gen_goto_tb(s, 0, dest);
4388 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
4390 if (x)
4391 tcg_gen_sari_i32(t0, t0, 16);
4392 else
4393 gen_sxth(t0);
4394 if (y)
4395 tcg_gen_sari_i32(t1, t1, 16);
4396 else
4397 gen_sxth(t1);
4398 tcg_gen_mul_i32(t0, t0, t1);
4401 /* Return the mask of PSR bits set by a MSR instruction. */
4402 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4404 uint32_t mask;
4406 mask = 0;
4407 if (flags & (1 << 0))
4408 mask |= 0xff;
4409 if (flags & (1 << 1))
4410 mask |= 0xff00;
4411 if (flags & (1 << 2))
4412 mask |= 0xff0000;
4413 if (flags & (1 << 3))
4414 mask |= 0xff000000;
4416 /* Mask out undefined bits. */
4417 mask &= ~CPSR_RESERVED;
4418 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4419 mask &= ~CPSR_T;
4421 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4422 mask &= ~CPSR_Q; /* V5TE in reality*/
4424 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4425 mask &= ~(CPSR_E | CPSR_GE);
4427 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4428 mask &= ~CPSR_IT;
4430 /* Mask out execution state and reserved bits. */
4431 if (!spsr) {
4432 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4434 /* Mask out privileged bits. */
4435 if (IS_USER(s))
4436 mask &= CPSR_USER;
4437 return mask;
4440 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4441 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4443 TCGv_i32 tmp;
4444 if (spsr) {
4445 /* ??? This is also undefined in system mode. */
4446 if (IS_USER(s))
4447 return 1;
4449 tmp = load_cpu_field(spsr);
4450 tcg_gen_andi_i32(tmp, tmp, ~mask);
4451 tcg_gen_andi_i32(t0, t0, mask);
4452 tcg_gen_or_i32(tmp, tmp, t0);
4453 store_cpu_field(tmp, spsr);
4454 } else {
4455 gen_set_cpsr(t0, mask);
4457 tcg_temp_free_i32(t0);
4458 gen_lookup_tb(s);
4459 return 0;
4462 /* Returns nonzero if access to the PSR is not permitted. */
4463 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4465 TCGv_i32 tmp;
4466 tmp = tcg_temp_new_i32();
4467 tcg_gen_movi_i32(tmp, val);
4468 return gen_set_psr(s, mask, spsr, tmp);
4471 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4472 int *tgtmode, int *regno)
4474 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4475 * the target mode and register number, and identify the various
4476 * unpredictable cases.
4477 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4478 * + executed in user mode
4479 * + using R15 as the src/dest register
4480 * + accessing an unimplemented register
4481 * + accessing a register that's inaccessible at current PL/security state*
4482 * + accessing a register that you could access with a different insn
4483 * We choose to UNDEF in all these cases.
4484 * Since we don't know which of the various AArch32 modes we are in
4485 * we have to defer some checks to runtime.
4486 * Accesses to Monitor mode registers from Secure EL1 (which implies
4487 * that EL3 is AArch64) must trap to EL3.
4489 * If the access checks fail this function will emit code to take
4490 * an exception and return false. Otherwise it will return true,
4491 * and set *tgtmode and *regno appropriately.
4493 int exc_target = default_exception_el(s);
4495 /* These instructions are present only in ARMv8, or in ARMv7 with the
4496 * Virtualization Extensions.
4498 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4499 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4500 goto undef;
4503 if (IS_USER(s) || rn == 15) {
4504 goto undef;
4507 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4508 * of registers into (r, sysm).
4510 if (r) {
4511 /* SPSRs for other modes */
4512 switch (sysm) {
4513 case 0xe: /* SPSR_fiq */
4514 *tgtmode = ARM_CPU_MODE_FIQ;
4515 break;
4516 case 0x10: /* SPSR_irq */
4517 *tgtmode = ARM_CPU_MODE_IRQ;
4518 break;
4519 case 0x12: /* SPSR_svc */
4520 *tgtmode = ARM_CPU_MODE_SVC;
4521 break;
4522 case 0x14: /* SPSR_abt */
4523 *tgtmode = ARM_CPU_MODE_ABT;
4524 break;
4525 case 0x16: /* SPSR_und */
4526 *tgtmode = ARM_CPU_MODE_UND;
4527 break;
4528 case 0x1c: /* SPSR_mon */
4529 *tgtmode = ARM_CPU_MODE_MON;
4530 break;
4531 case 0x1e: /* SPSR_hyp */
4532 *tgtmode = ARM_CPU_MODE_HYP;
4533 break;
4534 default: /* unallocated */
4535 goto undef;
4537 /* We arbitrarily assign SPSR a register number of 16. */
4538 *regno = 16;
4539 } else {
4540 /* general purpose registers for other modes */
4541 switch (sysm) {
4542 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4543 *tgtmode = ARM_CPU_MODE_USR;
4544 *regno = sysm + 8;
4545 break;
4546 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4547 *tgtmode = ARM_CPU_MODE_FIQ;
4548 *regno = sysm;
4549 break;
4550 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4551 *tgtmode = ARM_CPU_MODE_IRQ;
4552 *regno = sysm & 1 ? 13 : 14;
4553 break;
4554 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4555 *tgtmode = ARM_CPU_MODE_SVC;
4556 *regno = sysm & 1 ? 13 : 14;
4557 break;
4558 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4559 *tgtmode = ARM_CPU_MODE_ABT;
4560 *regno = sysm & 1 ? 13 : 14;
4561 break;
4562 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4563 *tgtmode = ARM_CPU_MODE_UND;
4564 *regno = sysm & 1 ? 13 : 14;
4565 break;
4566 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4567 *tgtmode = ARM_CPU_MODE_MON;
4568 *regno = sysm & 1 ? 13 : 14;
4569 break;
4570 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4571 *tgtmode = ARM_CPU_MODE_HYP;
4572 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4573 *regno = sysm & 1 ? 13 : 17;
4574 break;
4575 default: /* unallocated */
4576 goto undef;
4580 /* Catch the 'accessing inaccessible register' cases we can detect
4581 * at translate time.
4583 switch (*tgtmode) {
4584 case ARM_CPU_MODE_MON:
4585 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4586 goto undef;
4588 if (s->current_el == 1) {
4589 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4590 * then accesses to Mon registers trap to EL3
4592 exc_target = 3;
4593 goto undef;
4595 break;
4596 case ARM_CPU_MODE_HYP:
4598 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
4599 * (and so we can forbid accesses from EL2 or below). elr_hyp
4600 * can be accessed also from Hyp mode, so forbid accesses from
4601 * EL0 or EL1.
4603 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
4604 (s->current_el < 3 && *regno != 17)) {
4605 goto undef;
4607 break;
4608 default:
4609 break;
4612 return true;
4614 undef:
4615 /* If we get here then some access check did not pass */
4616 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4617 return false;
4620 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4622 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4623 int tgtmode = 0, regno = 0;
4625 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4626 return;
4629 /* Sync state because msr_banked() can raise exceptions */
4630 gen_set_condexec(s);
4631 gen_set_pc_im(s, s->pc - 4);
4632 tcg_reg = load_reg(s, rn);
4633 tcg_tgtmode = tcg_const_i32(tgtmode);
4634 tcg_regno = tcg_const_i32(regno);
4635 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4636 tcg_temp_free_i32(tcg_tgtmode);
4637 tcg_temp_free_i32(tcg_regno);
4638 tcg_temp_free_i32(tcg_reg);
4639 s->base.is_jmp = DISAS_UPDATE;
4642 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4644 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4645 int tgtmode = 0, regno = 0;
4647 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4648 return;
4651 /* Sync state because mrs_banked() can raise exceptions */
4652 gen_set_condexec(s);
4653 gen_set_pc_im(s, s->pc - 4);
4654 tcg_reg = tcg_temp_new_i32();
4655 tcg_tgtmode = tcg_const_i32(tgtmode);
4656 tcg_regno = tcg_const_i32(regno);
4657 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4658 tcg_temp_free_i32(tcg_tgtmode);
4659 tcg_temp_free_i32(tcg_regno);
4660 store_reg(s, rn, tcg_reg);
4661 s->base.is_jmp = DISAS_UPDATE;
4664 /* Store value to PC as for an exception return (ie don't
4665 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4666 * will do the masking based on the new value of the Thumb bit.
4668 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
4670 tcg_gen_mov_i32(cpu_R[15], pc);
4671 tcg_temp_free_i32(pc);
4674 /* Generate a v6 exception return. Marks both values as dead. */
4675 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4677 store_pc_exc_ret(s, pc);
4678 /* The cpsr_write_eret helper will mask the low bits of PC
4679 * appropriately depending on the new Thumb bit, so it must
4680 * be called after storing the new PC.
4682 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4683 gen_io_start();
4685 gen_helper_cpsr_write_eret(cpu_env, cpsr);
4686 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4687 gen_io_end();
4689 tcg_temp_free_i32(cpsr);
4690 /* Must exit loop to check un-masked IRQs */
4691 s->base.is_jmp = DISAS_EXIT;
4694 /* Generate an old-style exception return. Marks pc as dead. */
4695 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4697 gen_rfe(s, pc, load_cpu_field(spsr));
4701 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4702 * only call the helper when running single threaded TCG code to ensure
4703 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4704 * just skip this instruction. Currently the SEV/SEVL instructions
4705 * which are *one* of many ways to wake the CPU from WFE are not
4706 * implemented so we can't sleep like WFI does.
4708 static void gen_nop_hint(DisasContext *s, int val)
4710 switch (val) {
4711 /* When running in MTTCG we don't generate jumps to the yield and
4712 * WFE helpers as it won't affect the scheduling of other vCPUs.
4713 * If we wanted to more completely model WFE/SEV so we don't busy
4714 * spin unnecessarily we would need to do something more involved.
4716 case 1: /* yield */
4717 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4718 gen_set_pc_im(s, s->pc);
4719 s->base.is_jmp = DISAS_YIELD;
4721 break;
4722 case 3: /* wfi */
4723 gen_set_pc_im(s, s->pc);
4724 s->base.is_jmp = DISAS_WFI;
4725 break;
4726 case 2: /* wfe */
4727 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4728 gen_set_pc_im(s, s->pc);
4729 s->base.is_jmp = DISAS_WFE;
4731 break;
4732 case 4: /* sev */
4733 case 5: /* sevl */
4734 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4735 default: /* nop */
4736 break;
4740 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4742 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4744 switch (size) {
4745 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4746 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4747 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4748 default: abort();
4752 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4754 switch (size) {
4755 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4756 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4757 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4758 default: return;
4762 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4763 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4764 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4765 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4766 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4768 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4769 switch ((size << 1) | u) { \
4770 case 0: \
4771 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4772 break; \
4773 case 1: \
4774 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4775 break; \
4776 case 2: \
4777 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4778 break; \
4779 case 3: \
4780 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4781 break; \
4782 case 4: \
4783 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4784 break; \
4785 case 5: \
4786 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4787 break; \
4788 default: return 1; \
4789 }} while (0)
4791 #define GEN_NEON_INTEGER_OP(name) do { \
4792 switch ((size << 1) | u) { \
4793 case 0: \
4794 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4795 break; \
4796 case 1: \
4797 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4798 break; \
4799 case 2: \
4800 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4801 break; \
4802 case 3: \
4803 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4804 break; \
4805 case 4: \
4806 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4807 break; \
4808 case 5: \
4809 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4810 break; \
4811 default: return 1; \
4812 }} while (0)
4814 static TCGv_i32 neon_load_scratch(int scratch)
4816 TCGv_i32 tmp = tcg_temp_new_i32();
4817 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4818 return tmp;
4821 static void neon_store_scratch(int scratch, TCGv_i32 var)
4823 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4824 tcg_temp_free_i32(var);
4827 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4829 TCGv_i32 tmp;
4830 if (size == 1) {
4831 tmp = neon_load_reg(reg & 7, reg >> 4);
4832 if (reg & 8) {
4833 gen_neon_dup_high16(tmp);
4834 } else {
4835 gen_neon_dup_low16(tmp);
4837 } else {
4838 tmp = neon_load_reg(reg & 15, reg >> 4);
4840 return tmp;
4843 static int gen_neon_unzip(int rd, int rm, int size, int q)
4845 TCGv_ptr pd, pm;
4847 if (!q && size == 2) {
4848 return 1;
4850 pd = vfp_reg_ptr(true, rd);
4851 pm = vfp_reg_ptr(true, rm);
4852 if (q) {
4853 switch (size) {
4854 case 0:
4855 gen_helper_neon_qunzip8(pd, pm);
4856 break;
4857 case 1:
4858 gen_helper_neon_qunzip16(pd, pm);
4859 break;
4860 case 2:
4861 gen_helper_neon_qunzip32(pd, pm);
4862 break;
4863 default:
4864 abort();
4866 } else {
4867 switch (size) {
4868 case 0:
4869 gen_helper_neon_unzip8(pd, pm);
4870 break;
4871 case 1:
4872 gen_helper_neon_unzip16(pd, pm);
4873 break;
4874 default:
4875 abort();
4878 tcg_temp_free_ptr(pd);
4879 tcg_temp_free_ptr(pm);
4880 return 0;
4883 static int gen_neon_zip(int rd, int rm, int size, int q)
4885 TCGv_ptr pd, pm;
4887 if (!q && size == 2) {
4888 return 1;
4890 pd = vfp_reg_ptr(true, rd);
4891 pm = vfp_reg_ptr(true, rm);
4892 if (q) {
4893 switch (size) {
4894 case 0:
4895 gen_helper_neon_qzip8(pd, pm);
4896 break;
4897 case 1:
4898 gen_helper_neon_qzip16(pd, pm);
4899 break;
4900 case 2:
4901 gen_helper_neon_qzip32(pd, pm);
4902 break;
4903 default:
4904 abort();
4906 } else {
4907 switch (size) {
4908 case 0:
4909 gen_helper_neon_zip8(pd, pm);
4910 break;
4911 case 1:
4912 gen_helper_neon_zip16(pd, pm);
4913 break;
4914 default:
4915 abort();
4918 tcg_temp_free_ptr(pd);
4919 tcg_temp_free_ptr(pm);
4920 return 0;
4923 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4925 TCGv_i32 rd, tmp;
4927 rd = tcg_temp_new_i32();
4928 tmp = tcg_temp_new_i32();
4930 tcg_gen_shli_i32(rd, t0, 8);
4931 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4932 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4933 tcg_gen_or_i32(rd, rd, tmp);
4935 tcg_gen_shri_i32(t1, t1, 8);
4936 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4937 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4938 tcg_gen_or_i32(t1, t1, tmp);
4939 tcg_gen_mov_i32(t0, rd);
4941 tcg_temp_free_i32(tmp);
4942 tcg_temp_free_i32(rd);
4945 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4947 TCGv_i32 rd, tmp;
4949 rd = tcg_temp_new_i32();
4950 tmp = tcg_temp_new_i32();
4952 tcg_gen_shli_i32(rd, t0, 16);
4953 tcg_gen_andi_i32(tmp, t1, 0xffff);
4954 tcg_gen_or_i32(rd, rd, tmp);
4955 tcg_gen_shri_i32(t1, t1, 16);
4956 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4957 tcg_gen_or_i32(t1, t1, tmp);
4958 tcg_gen_mov_i32(t0, rd);
4960 tcg_temp_free_i32(tmp);
4961 tcg_temp_free_i32(rd);
4965 static struct {
4966 int nregs;
4967 int interleave;
4968 int spacing;
4969 } const neon_ls_element_type[11] = {
4970 {1, 4, 1},
4971 {1, 4, 2},
4972 {4, 1, 1},
4973 {2, 2, 2},
4974 {1, 3, 1},
4975 {1, 3, 2},
4976 {3, 1, 1},
4977 {1, 1, 1},
4978 {1, 2, 1},
4979 {1, 2, 2},
4980 {2, 1, 1}
4983 /* Translate a NEON load/store element instruction. Return nonzero if the
4984 instruction is invalid. */
4985 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4987 int rd, rn, rm;
4988 int op;
4989 int nregs;
4990 int interleave;
4991 int spacing;
4992 int stride;
4993 int size;
4994 int reg;
4995 int load;
4996 int n;
4997 int vec_size;
4998 int mmu_idx;
4999 TCGMemOp endian;
5000 TCGv_i32 addr;
5001 TCGv_i32 tmp;
5002 TCGv_i32 tmp2;
5003 TCGv_i64 tmp64;
5005 /* FIXME: this access check should not take precedence over UNDEF
5006 * for invalid encodings; we will generate incorrect syndrome information
5007 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5009 if (s->fp_excp_el) {
5010 gen_exception_insn(s, 4, EXCP_UDEF,
5011 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
5012 return 0;
5015 if (!s->vfp_enabled)
5016 return 1;
5017 VFP_DREG_D(rd, insn);
5018 rn = (insn >> 16) & 0xf;
5019 rm = insn & 0xf;
5020 load = (insn & (1 << 21)) != 0;
5021 endian = s->be_data;
5022 mmu_idx = get_mem_index(s);
5023 if ((insn & (1 << 23)) == 0) {
5024 /* Load store all elements. */
5025 op = (insn >> 8) & 0xf;
5026 size = (insn >> 6) & 3;
5027 if (op > 10)
5028 return 1;
5029 /* Catch UNDEF cases for bad values of align field */
5030 switch (op & 0xc) {
5031 case 4:
5032 if (((insn >> 5) & 1) == 1) {
5033 return 1;
5035 break;
5036 case 8:
5037 if (((insn >> 4) & 3) == 3) {
5038 return 1;
5040 break;
5041 default:
5042 break;
5044 nregs = neon_ls_element_type[op].nregs;
5045 interleave = neon_ls_element_type[op].interleave;
5046 spacing = neon_ls_element_type[op].spacing;
5047 if (size == 3 && (interleave | spacing) != 1) {
5048 return 1;
5050 /* For our purposes, bytes are always little-endian. */
5051 if (size == 0) {
5052 endian = MO_LE;
5054 /* Consecutive little-endian elements from a single register
5055 * can be promoted to a larger little-endian operation.
5057 if (interleave == 1 && endian == MO_LE) {
5058 size = 3;
5060 tmp64 = tcg_temp_new_i64();
5061 addr = tcg_temp_new_i32();
5062 tmp2 = tcg_const_i32(1 << size);
5063 load_reg_var(s, addr, rn);
5064 for (reg = 0; reg < nregs; reg++) {
5065 for (n = 0; n < 8 >> size; n++) {
5066 int xs;
5067 for (xs = 0; xs < interleave; xs++) {
5068 int tt = rd + reg + spacing * xs;
5070 if (load) {
5071 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
5072 neon_store_element64(tt, n, size, tmp64);
5073 } else {
5074 neon_load_element64(tmp64, tt, n, size);
5075 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
5077 tcg_gen_add_i32(addr, addr, tmp2);
5081 tcg_temp_free_i32(addr);
5082 tcg_temp_free_i32(tmp2);
5083 tcg_temp_free_i64(tmp64);
5084 stride = nregs * interleave * 8;
5085 } else {
5086 size = (insn >> 10) & 3;
5087 if (size == 3) {
5088 /* Load single element to all lanes. */
5089 int a = (insn >> 4) & 1;
5090 if (!load) {
5091 return 1;
5093 size = (insn >> 6) & 3;
5094 nregs = ((insn >> 8) & 3) + 1;
5096 if (size == 3) {
5097 if (nregs != 4 || a == 0) {
5098 return 1;
5100 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5101 size = 2;
5103 if (nregs == 1 && a == 1 && size == 0) {
5104 return 1;
5106 if (nregs == 3 && a == 1) {
5107 return 1;
5109 addr = tcg_temp_new_i32();
5110 load_reg_var(s, addr, rn);
5112 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
5113 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
5115 stride = (insn & (1 << 5)) ? 2 : 1;
5116 vec_size = nregs == 1 ? stride * 8 : 8;
5118 tmp = tcg_temp_new_i32();
5119 for (reg = 0; reg < nregs; reg++) {
5120 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5121 s->be_data | size);
5122 if ((rd & 1) && vec_size == 16) {
5123 /* We cannot write 16 bytes at once because the
5124 * destination is unaligned.
5126 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5127 8, 8, tmp);
5128 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
5129 neon_reg_offset(rd, 0), 8, 8);
5130 } else {
5131 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5132 vec_size, vec_size, tmp);
5134 tcg_gen_addi_i32(addr, addr, 1 << size);
5135 rd += stride;
5137 tcg_temp_free_i32(tmp);
5138 tcg_temp_free_i32(addr);
5139 stride = (1 << size) * nregs;
5140 } else {
5141 /* Single element. */
5142 int idx = (insn >> 4) & 0xf;
5143 int reg_idx;
5144 switch (size) {
5145 case 0:
5146 reg_idx = (insn >> 5) & 7;
5147 stride = 1;
5148 break;
5149 case 1:
5150 reg_idx = (insn >> 6) & 3;
5151 stride = (insn & (1 << 5)) ? 2 : 1;
5152 break;
5153 case 2:
5154 reg_idx = (insn >> 7) & 1;
5155 stride = (insn & (1 << 6)) ? 2 : 1;
5156 break;
5157 default:
5158 abort();
5160 nregs = ((insn >> 8) & 3) + 1;
5161 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5162 switch (nregs) {
5163 case 1:
5164 if (((idx & (1 << size)) != 0) ||
5165 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5166 return 1;
5168 break;
5169 case 3:
5170 if ((idx & 1) != 0) {
5171 return 1;
5173 /* fall through */
5174 case 2:
5175 if (size == 2 && (idx & 2) != 0) {
5176 return 1;
5178 break;
5179 case 4:
5180 if ((size == 2) && ((idx & 3) == 3)) {
5181 return 1;
5183 break;
5184 default:
5185 abort();
5187 if ((rd + stride * (nregs - 1)) > 31) {
5188 /* Attempts to write off the end of the register file
5189 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5190 * the neon_load_reg() would write off the end of the array.
5192 return 1;
5194 tmp = tcg_temp_new_i32();
5195 addr = tcg_temp_new_i32();
5196 load_reg_var(s, addr, rn);
5197 for (reg = 0; reg < nregs; reg++) {
5198 if (load) {
5199 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5200 s->be_data | size);
5201 neon_store_element(rd, reg_idx, size, tmp);
5202 } else { /* Store */
5203 neon_load_element(tmp, rd, reg_idx, size);
5204 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
5205 s->be_data | size);
5207 rd += stride;
5208 tcg_gen_addi_i32(addr, addr, 1 << size);
5210 tcg_temp_free_i32(addr);
5211 tcg_temp_free_i32(tmp);
5212 stride = nregs * (1 << size);
5215 if (rm != 15) {
5216 TCGv_i32 base;
5218 base = load_reg(s, rn);
5219 if (rm == 13) {
5220 tcg_gen_addi_i32(base, base, stride);
5221 } else {
5222 TCGv_i32 index;
5223 index = load_reg(s, rm);
5224 tcg_gen_add_i32(base, base, index);
5225 tcg_temp_free_i32(index);
5227 store_reg(s, rn, base);
5229 return 0;
5232 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
5234 switch (size) {
5235 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5236 case 1: gen_helper_neon_narrow_u16(dest, src); break;
5237 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
5238 default: abort();
5242 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5244 switch (size) {
5245 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5246 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5247 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
5248 default: abort();
5252 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
5254 switch (size) {
5255 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5256 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5257 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
5258 default: abort();
5262 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5264 switch (size) {
5265 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5266 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5267 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
5268 default: abort();
5272 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
5273 int q, int u)
5275 if (q) {
5276 if (u) {
5277 switch (size) {
5278 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5279 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5280 default: abort();
5282 } else {
5283 switch (size) {
5284 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5285 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5286 default: abort();
5289 } else {
5290 if (u) {
5291 switch (size) {
5292 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5293 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
5294 default: abort();
5296 } else {
5297 switch (size) {
5298 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5299 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5300 default: abort();
5306 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
5308 if (u) {
5309 switch (size) {
5310 case 0: gen_helper_neon_widen_u8(dest, src); break;
5311 case 1: gen_helper_neon_widen_u16(dest, src); break;
5312 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5313 default: abort();
5315 } else {
5316 switch (size) {
5317 case 0: gen_helper_neon_widen_s8(dest, src); break;
5318 case 1: gen_helper_neon_widen_s16(dest, src); break;
5319 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5320 default: abort();
5323 tcg_temp_free_i32(src);
5326 static inline void gen_neon_addl(int size)
5328 switch (size) {
5329 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5330 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5331 case 2: tcg_gen_add_i64(CPU_V001); break;
5332 default: abort();
5336 static inline void gen_neon_subl(int size)
5338 switch (size) {
5339 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5340 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5341 case 2: tcg_gen_sub_i64(CPU_V001); break;
5342 default: abort();
5346 static inline void gen_neon_negl(TCGv_i64 var, int size)
5348 switch (size) {
5349 case 0: gen_helper_neon_negl_u16(var, var); break;
5350 case 1: gen_helper_neon_negl_u32(var, var); break;
5351 case 2:
5352 tcg_gen_neg_i64(var, var);
5353 break;
5354 default: abort();
5358 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
5360 switch (size) {
5361 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5362 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
5363 default: abort();
5367 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5368 int size, int u)
5370 TCGv_i64 tmp;
5372 switch ((size << 1) | u) {
5373 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5374 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5375 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5376 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5377 case 4:
5378 tmp = gen_muls_i64_i32(a, b);
5379 tcg_gen_mov_i64(dest, tmp);
5380 tcg_temp_free_i64(tmp);
5381 break;
5382 case 5:
5383 tmp = gen_mulu_i64_i32(a, b);
5384 tcg_gen_mov_i64(dest, tmp);
5385 tcg_temp_free_i64(tmp);
5386 break;
5387 default: abort();
5390 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5391 Don't forget to clean them now. */
5392 if (size < 2) {
5393 tcg_temp_free_i32(a);
5394 tcg_temp_free_i32(b);
5398 static void gen_neon_narrow_op(int op, int u, int size,
5399 TCGv_i32 dest, TCGv_i64 src)
5401 if (op) {
5402 if (u) {
5403 gen_neon_unarrow_sats(size, dest, src);
5404 } else {
5405 gen_neon_narrow(size, dest, src);
5407 } else {
5408 if (u) {
5409 gen_neon_narrow_satu(size, dest, src);
5410 } else {
5411 gen_neon_narrow_sats(size, dest, src);
5416 /* Symbolic constants for op fields for Neon 3-register same-length.
5417 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5418 * table A7-9.
5420 #define NEON_3R_VHADD 0
5421 #define NEON_3R_VQADD 1
5422 #define NEON_3R_VRHADD 2
5423 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5424 #define NEON_3R_VHSUB 4
5425 #define NEON_3R_VQSUB 5
5426 #define NEON_3R_VCGT 6
5427 #define NEON_3R_VCGE 7
5428 #define NEON_3R_VSHL 8
5429 #define NEON_3R_VQSHL 9
5430 #define NEON_3R_VRSHL 10
5431 #define NEON_3R_VQRSHL 11
5432 #define NEON_3R_VMAX 12
5433 #define NEON_3R_VMIN 13
5434 #define NEON_3R_VABD 14
5435 #define NEON_3R_VABA 15
5436 #define NEON_3R_VADD_VSUB 16
5437 #define NEON_3R_VTST_VCEQ 17
5438 #define NEON_3R_VML 18 /* VMLA, VMLS */
5439 #define NEON_3R_VMUL 19
5440 #define NEON_3R_VPMAX 20
5441 #define NEON_3R_VPMIN 21
5442 #define NEON_3R_VQDMULH_VQRDMULH 22
5443 #define NEON_3R_VPADD_VQRDMLAH 23
5444 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
5445 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
5446 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5447 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5448 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5449 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5450 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
5451 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
5453 static const uint8_t neon_3r_sizes[] = {
5454 [NEON_3R_VHADD] = 0x7,
5455 [NEON_3R_VQADD] = 0xf,
5456 [NEON_3R_VRHADD] = 0x7,
5457 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5458 [NEON_3R_VHSUB] = 0x7,
5459 [NEON_3R_VQSUB] = 0xf,
5460 [NEON_3R_VCGT] = 0x7,
5461 [NEON_3R_VCGE] = 0x7,
5462 [NEON_3R_VSHL] = 0xf,
5463 [NEON_3R_VQSHL] = 0xf,
5464 [NEON_3R_VRSHL] = 0xf,
5465 [NEON_3R_VQRSHL] = 0xf,
5466 [NEON_3R_VMAX] = 0x7,
5467 [NEON_3R_VMIN] = 0x7,
5468 [NEON_3R_VABD] = 0x7,
5469 [NEON_3R_VABA] = 0x7,
5470 [NEON_3R_VADD_VSUB] = 0xf,
5471 [NEON_3R_VTST_VCEQ] = 0x7,
5472 [NEON_3R_VML] = 0x7,
5473 [NEON_3R_VMUL] = 0x7,
5474 [NEON_3R_VPMAX] = 0x7,
5475 [NEON_3R_VPMIN] = 0x7,
5476 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5477 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
5478 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
5479 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
5480 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5481 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5482 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5483 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5484 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
5485 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
5488 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
5489 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5490 * table A7-13.
5492 #define NEON_2RM_VREV64 0
5493 #define NEON_2RM_VREV32 1
5494 #define NEON_2RM_VREV16 2
5495 #define NEON_2RM_VPADDL 4
5496 #define NEON_2RM_VPADDL_U 5
5497 #define NEON_2RM_AESE 6 /* Includes AESD */
5498 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5499 #define NEON_2RM_VCLS 8
5500 #define NEON_2RM_VCLZ 9
5501 #define NEON_2RM_VCNT 10
5502 #define NEON_2RM_VMVN 11
5503 #define NEON_2RM_VPADAL 12
5504 #define NEON_2RM_VPADAL_U 13
5505 #define NEON_2RM_VQABS 14
5506 #define NEON_2RM_VQNEG 15
5507 #define NEON_2RM_VCGT0 16
5508 #define NEON_2RM_VCGE0 17
5509 #define NEON_2RM_VCEQ0 18
5510 #define NEON_2RM_VCLE0 19
5511 #define NEON_2RM_VCLT0 20
5512 #define NEON_2RM_SHA1H 21
5513 #define NEON_2RM_VABS 22
5514 #define NEON_2RM_VNEG 23
5515 #define NEON_2RM_VCGT0_F 24
5516 #define NEON_2RM_VCGE0_F 25
5517 #define NEON_2RM_VCEQ0_F 26
5518 #define NEON_2RM_VCLE0_F 27
5519 #define NEON_2RM_VCLT0_F 28
5520 #define NEON_2RM_VABS_F 30
5521 #define NEON_2RM_VNEG_F 31
5522 #define NEON_2RM_VSWP 32
5523 #define NEON_2RM_VTRN 33
5524 #define NEON_2RM_VUZP 34
5525 #define NEON_2RM_VZIP 35
5526 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5527 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5528 #define NEON_2RM_VSHLL 38
5529 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5530 #define NEON_2RM_VRINTN 40
5531 #define NEON_2RM_VRINTX 41
5532 #define NEON_2RM_VRINTA 42
5533 #define NEON_2RM_VRINTZ 43
5534 #define NEON_2RM_VCVT_F16_F32 44
5535 #define NEON_2RM_VRINTM 45
5536 #define NEON_2RM_VCVT_F32_F16 46
5537 #define NEON_2RM_VRINTP 47
5538 #define NEON_2RM_VCVTAU 48
5539 #define NEON_2RM_VCVTAS 49
5540 #define NEON_2RM_VCVTNU 50
5541 #define NEON_2RM_VCVTNS 51
5542 #define NEON_2RM_VCVTPU 52
5543 #define NEON_2RM_VCVTPS 53
5544 #define NEON_2RM_VCVTMU 54
5545 #define NEON_2RM_VCVTMS 55
5546 #define NEON_2RM_VRECPE 56
5547 #define NEON_2RM_VRSQRTE 57
5548 #define NEON_2RM_VRECPE_F 58
5549 #define NEON_2RM_VRSQRTE_F 59
5550 #define NEON_2RM_VCVT_FS 60
5551 #define NEON_2RM_VCVT_FU 61
5552 #define NEON_2RM_VCVT_SF 62
5553 #define NEON_2RM_VCVT_UF 63
5555 static int neon_2rm_is_float_op(int op)
5557 /* Return true if this neon 2reg-misc op is float-to-float */
5558 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
5559 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
5560 op == NEON_2RM_VRINTM ||
5561 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5562 op >= NEON_2RM_VRECPE_F);
5565 static bool neon_2rm_is_v8_op(int op)
5567 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5568 switch (op) {
5569 case NEON_2RM_VRINTN:
5570 case NEON_2RM_VRINTA:
5571 case NEON_2RM_VRINTM:
5572 case NEON_2RM_VRINTP:
5573 case NEON_2RM_VRINTZ:
5574 case NEON_2RM_VRINTX:
5575 case NEON_2RM_VCVTAU:
5576 case NEON_2RM_VCVTAS:
5577 case NEON_2RM_VCVTNU:
5578 case NEON_2RM_VCVTNS:
5579 case NEON_2RM_VCVTPU:
5580 case NEON_2RM_VCVTPS:
5581 case NEON_2RM_VCVTMU:
5582 case NEON_2RM_VCVTMS:
5583 return true;
5584 default:
5585 return false;
5589 /* Each entry in this array has bit n set if the insn allows
5590 * size value n (otherwise it will UNDEF). Since unallocated
5591 * op values will have no bits set they always UNDEF.
5593 static const uint8_t neon_2rm_sizes[] = {
5594 [NEON_2RM_VREV64] = 0x7,
5595 [NEON_2RM_VREV32] = 0x3,
5596 [NEON_2RM_VREV16] = 0x1,
5597 [NEON_2RM_VPADDL] = 0x7,
5598 [NEON_2RM_VPADDL_U] = 0x7,
5599 [NEON_2RM_AESE] = 0x1,
5600 [NEON_2RM_AESMC] = 0x1,
5601 [NEON_2RM_VCLS] = 0x7,
5602 [NEON_2RM_VCLZ] = 0x7,
5603 [NEON_2RM_VCNT] = 0x1,
5604 [NEON_2RM_VMVN] = 0x1,
5605 [NEON_2RM_VPADAL] = 0x7,
5606 [NEON_2RM_VPADAL_U] = 0x7,
5607 [NEON_2RM_VQABS] = 0x7,
5608 [NEON_2RM_VQNEG] = 0x7,
5609 [NEON_2RM_VCGT0] = 0x7,
5610 [NEON_2RM_VCGE0] = 0x7,
5611 [NEON_2RM_VCEQ0] = 0x7,
5612 [NEON_2RM_VCLE0] = 0x7,
5613 [NEON_2RM_VCLT0] = 0x7,
5614 [NEON_2RM_SHA1H] = 0x4,
5615 [NEON_2RM_VABS] = 0x7,
5616 [NEON_2RM_VNEG] = 0x7,
5617 [NEON_2RM_VCGT0_F] = 0x4,
5618 [NEON_2RM_VCGE0_F] = 0x4,
5619 [NEON_2RM_VCEQ0_F] = 0x4,
5620 [NEON_2RM_VCLE0_F] = 0x4,
5621 [NEON_2RM_VCLT0_F] = 0x4,
5622 [NEON_2RM_VABS_F] = 0x4,
5623 [NEON_2RM_VNEG_F] = 0x4,
5624 [NEON_2RM_VSWP] = 0x1,
5625 [NEON_2RM_VTRN] = 0x7,
5626 [NEON_2RM_VUZP] = 0x7,
5627 [NEON_2RM_VZIP] = 0x7,
5628 [NEON_2RM_VMOVN] = 0x7,
5629 [NEON_2RM_VQMOVN] = 0x7,
5630 [NEON_2RM_VSHLL] = 0x7,
5631 [NEON_2RM_SHA1SU1] = 0x4,
5632 [NEON_2RM_VRINTN] = 0x4,
5633 [NEON_2RM_VRINTX] = 0x4,
5634 [NEON_2RM_VRINTA] = 0x4,
5635 [NEON_2RM_VRINTZ] = 0x4,
5636 [NEON_2RM_VCVT_F16_F32] = 0x2,
5637 [NEON_2RM_VRINTM] = 0x4,
5638 [NEON_2RM_VCVT_F32_F16] = 0x2,
5639 [NEON_2RM_VRINTP] = 0x4,
5640 [NEON_2RM_VCVTAU] = 0x4,
5641 [NEON_2RM_VCVTAS] = 0x4,
5642 [NEON_2RM_VCVTNU] = 0x4,
5643 [NEON_2RM_VCVTNS] = 0x4,
5644 [NEON_2RM_VCVTPU] = 0x4,
5645 [NEON_2RM_VCVTPS] = 0x4,
5646 [NEON_2RM_VCVTMU] = 0x4,
5647 [NEON_2RM_VCVTMS] = 0x4,
5648 [NEON_2RM_VRECPE] = 0x4,
5649 [NEON_2RM_VRSQRTE] = 0x4,
5650 [NEON_2RM_VRECPE_F] = 0x4,
5651 [NEON_2RM_VRSQRTE_F] = 0x4,
5652 [NEON_2RM_VCVT_FS] = 0x4,
5653 [NEON_2RM_VCVT_FU] = 0x4,
5654 [NEON_2RM_VCVT_SF] = 0x4,
5655 [NEON_2RM_VCVT_UF] = 0x4,
5659 /* Expand v8.1 simd helper. */
5660 static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5661 int q, int rd, int rn, int rm)
5663 if (dc_isar_feature(aa32_rdm, s)) {
5664 int opr_sz = (1 + q) * 8;
5665 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5666 vfp_reg_offset(1, rn),
5667 vfp_reg_offset(1, rm), cpu_env,
5668 opr_sz, opr_sz, 0, fn);
5669 return 0;
5671 return 1;
5675 * Expanders for VBitOps_VBIF, VBIT, VBSL.
5677 static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5679 tcg_gen_xor_i64(rn, rn, rm);
5680 tcg_gen_and_i64(rn, rn, rd);
5681 tcg_gen_xor_i64(rd, rm, rn);
5684 static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5686 tcg_gen_xor_i64(rn, rn, rd);
5687 tcg_gen_and_i64(rn, rn, rm);
5688 tcg_gen_xor_i64(rd, rd, rn);
5691 static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5693 tcg_gen_xor_i64(rn, rn, rd);
5694 tcg_gen_andc_i64(rn, rn, rm);
5695 tcg_gen_xor_i64(rd, rd, rn);
5698 static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5700 tcg_gen_xor_vec(vece, rn, rn, rm);
5701 tcg_gen_and_vec(vece, rn, rn, rd);
5702 tcg_gen_xor_vec(vece, rd, rm, rn);
5705 static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5707 tcg_gen_xor_vec(vece, rn, rn, rd);
5708 tcg_gen_and_vec(vece, rn, rn, rm);
5709 tcg_gen_xor_vec(vece, rd, rd, rn);
5712 static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5714 tcg_gen_xor_vec(vece, rn, rn, rd);
5715 tcg_gen_andc_vec(vece, rn, rn, rm);
5716 tcg_gen_xor_vec(vece, rd, rd, rn);
5719 const GVecGen3 bsl_op = {
5720 .fni8 = gen_bsl_i64,
5721 .fniv = gen_bsl_vec,
5722 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5723 .load_dest = true
5726 const GVecGen3 bit_op = {
5727 .fni8 = gen_bit_i64,
5728 .fniv = gen_bit_vec,
5729 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5730 .load_dest = true
5733 const GVecGen3 bif_op = {
5734 .fni8 = gen_bif_i64,
5735 .fniv = gen_bif_vec,
5736 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5737 .load_dest = true
5740 static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5742 tcg_gen_vec_sar8i_i64(a, a, shift);
5743 tcg_gen_vec_add8_i64(d, d, a);
5746 static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5748 tcg_gen_vec_sar16i_i64(a, a, shift);
5749 tcg_gen_vec_add16_i64(d, d, a);
5752 static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5754 tcg_gen_sari_i32(a, a, shift);
5755 tcg_gen_add_i32(d, d, a);
5758 static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5760 tcg_gen_sari_i64(a, a, shift);
5761 tcg_gen_add_i64(d, d, a);
5764 static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5766 tcg_gen_sari_vec(vece, a, a, sh);
5767 tcg_gen_add_vec(vece, d, d, a);
5770 const GVecGen2i ssra_op[4] = {
5771 { .fni8 = gen_ssra8_i64,
5772 .fniv = gen_ssra_vec,
5773 .load_dest = true,
5774 .opc = INDEX_op_sari_vec,
5775 .vece = MO_8 },
5776 { .fni8 = gen_ssra16_i64,
5777 .fniv = gen_ssra_vec,
5778 .load_dest = true,
5779 .opc = INDEX_op_sari_vec,
5780 .vece = MO_16 },
5781 { .fni4 = gen_ssra32_i32,
5782 .fniv = gen_ssra_vec,
5783 .load_dest = true,
5784 .opc = INDEX_op_sari_vec,
5785 .vece = MO_32 },
5786 { .fni8 = gen_ssra64_i64,
5787 .fniv = gen_ssra_vec,
5788 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5789 .load_dest = true,
5790 .opc = INDEX_op_sari_vec,
5791 .vece = MO_64 },
5794 static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5796 tcg_gen_vec_shr8i_i64(a, a, shift);
5797 tcg_gen_vec_add8_i64(d, d, a);
5800 static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5802 tcg_gen_vec_shr16i_i64(a, a, shift);
5803 tcg_gen_vec_add16_i64(d, d, a);
5806 static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5808 tcg_gen_shri_i32(a, a, shift);
5809 tcg_gen_add_i32(d, d, a);
5812 static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5814 tcg_gen_shri_i64(a, a, shift);
5815 tcg_gen_add_i64(d, d, a);
5818 static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5820 tcg_gen_shri_vec(vece, a, a, sh);
5821 tcg_gen_add_vec(vece, d, d, a);
5824 const GVecGen2i usra_op[4] = {
5825 { .fni8 = gen_usra8_i64,
5826 .fniv = gen_usra_vec,
5827 .load_dest = true,
5828 .opc = INDEX_op_shri_vec,
5829 .vece = MO_8, },
5830 { .fni8 = gen_usra16_i64,
5831 .fniv = gen_usra_vec,
5832 .load_dest = true,
5833 .opc = INDEX_op_shri_vec,
5834 .vece = MO_16, },
5835 { .fni4 = gen_usra32_i32,
5836 .fniv = gen_usra_vec,
5837 .load_dest = true,
5838 .opc = INDEX_op_shri_vec,
5839 .vece = MO_32, },
5840 { .fni8 = gen_usra64_i64,
5841 .fniv = gen_usra_vec,
5842 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5843 .load_dest = true,
5844 .opc = INDEX_op_shri_vec,
5845 .vece = MO_64, },
5848 static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5850 uint64_t mask = dup_const(MO_8, 0xff >> shift);
5851 TCGv_i64 t = tcg_temp_new_i64();
5853 tcg_gen_shri_i64(t, a, shift);
5854 tcg_gen_andi_i64(t, t, mask);
5855 tcg_gen_andi_i64(d, d, ~mask);
5856 tcg_gen_or_i64(d, d, t);
5857 tcg_temp_free_i64(t);
5860 static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5862 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
5863 TCGv_i64 t = tcg_temp_new_i64();
5865 tcg_gen_shri_i64(t, a, shift);
5866 tcg_gen_andi_i64(t, t, mask);
5867 tcg_gen_andi_i64(d, d, ~mask);
5868 tcg_gen_or_i64(d, d, t);
5869 tcg_temp_free_i64(t);
5872 static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5874 tcg_gen_shri_i32(a, a, shift);
5875 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
5878 static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5880 tcg_gen_shri_i64(a, a, shift);
5881 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
5884 static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5886 if (sh == 0) {
5887 tcg_gen_mov_vec(d, a);
5888 } else {
5889 TCGv_vec t = tcg_temp_new_vec_matching(d);
5890 TCGv_vec m = tcg_temp_new_vec_matching(d);
5892 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
5893 tcg_gen_shri_vec(vece, t, a, sh);
5894 tcg_gen_and_vec(vece, d, d, m);
5895 tcg_gen_or_vec(vece, d, d, t);
5897 tcg_temp_free_vec(t);
5898 tcg_temp_free_vec(m);
5902 const GVecGen2i sri_op[4] = {
5903 { .fni8 = gen_shr8_ins_i64,
5904 .fniv = gen_shr_ins_vec,
5905 .load_dest = true,
5906 .opc = INDEX_op_shri_vec,
5907 .vece = MO_8 },
5908 { .fni8 = gen_shr16_ins_i64,
5909 .fniv = gen_shr_ins_vec,
5910 .load_dest = true,
5911 .opc = INDEX_op_shri_vec,
5912 .vece = MO_16 },
5913 { .fni4 = gen_shr32_ins_i32,
5914 .fniv = gen_shr_ins_vec,
5915 .load_dest = true,
5916 .opc = INDEX_op_shri_vec,
5917 .vece = MO_32 },
5918 { .fni8 = gen_shr64_ins_i64,
5919 .fniv = gen_shr_ins_vec,
5920 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5921 .load_dest = true,
5922 .opc = INDEX_op_shri_vec,
5923 .vece = MO_64 },
5926 static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5928 uint64_t mask = dup_const(MO_8, 0xff << shift);
5929 TCGv_i64 t = tcg_temp_new_i64();
5931 tcg_gen_shli_i64(t, a, shift);
5932 tcg_gen_andi_i64(t, t, mask);
5933 tcg_gen_andi_i64(d, d, ~mask);
5934 tcg_gen_or_i64(d, d, t);
5935 tcg_temp_free_i64(t);
5938 static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5940 uint64_t mask = dup_const(MO_16, 0xffff << shift);
5941 TCGv_i64 t = tcg_temp_new_i64();
5943 tcg_gen_shli_i64(t, a, shift);
5944 tcg_gen_andi_i64(t, t, mask);
5945 tcg_gen_andi_i64(d, d, ~mask);
5946 tcg_gen_or_i64(d, d, t);
5947 tcg_temp_free_i64(t);
5950 static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5952 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
5955 static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5957 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
5960 static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5962 if (sh == 0) {
5963 tcg_gen_mov_vec(d, a);
5964 } else {
5965 TCGv_vec t = tcg_temp_new_vec_matching(d);
5966 TCGv_vec m = tcg_temp_new_vec_matching(d);
5968 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
5969 tcg_gen_shli_vec(vece, t, a, sh);
5970 tcg_gen_and_vec(vece, d, d, m);
5971 tcg_gen_or_vec(vece, d, d, t);
5973 tcg_temp_free_vec(t);
5974 tcg_temp_free_vec(m);
5978 const GVecGen2i sli_op[4] = {
5979 { .fni8 = gen_shl8_ins_i64,
5980 .fniv = gen_shl_ins_vec,
5981 .load_dest = true,
5982 .opc = INDEX_op_shli_vec,
5983 .vece = MO_8 },
5984 { .fni8 = gen_shl16_ins_i64,
5985 .fniv = gen_shl_ins_vec,
5986 .load_dest = true,
5987 .opc = INDEX_op_shli_vec,
5988 .vece = MO_16 },
5989 { .fni4 = gen_shl32_ins_i32,
5990 .fniv = gen_shl_ins_vec,
5991 .load_dest = true,
5992 .opc = INDEX_op_shli_vec,
5993 .vece = MO_32 },
5994 { .fni8 = gen_shl64_ins_i64,
5995 .fniv = gen_shl_ins_vec,
5996 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5997 .load_dest = true,
5998 .opc = INDEX_op_shli_vec,
5999 .vece = MO_64 },
6002 static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6004 gen_helper_neon_mul_u8(a, a, b);
6005 gen_helper_neon_add_u8(d, d, a);
6008 static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6010 gen_helper_neon_mul_u8(a, a, b);
6011 gen_helper_neon_sub_u8(d, d, a);
6014 static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6016 gen_helper_neon_mul_u16(a, a, b);
6017 gen_helper_neon_add_u16(d, d, a);
6020 static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6022 gen_helper_neon_mul_u16(a, a, b);
6023 gen_helper_neon_sub_u16(d, d, a);
6026 static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6028 tcg_gen_mul_i32(a, a, b);
6029 tcg_gen_add_i32(d, d, a);
6032 static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6034 tcg_gen_mul_i32(a, a, b);
6035 tcg_gen_sub_i32(d, d, a);
6038 static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6040 tcg_gen_mul_i64(a, a, b);
6041 tcg_gen_add_i64(d, d, a);
6044 static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6046 tcg_gen_mul_i64(a, a, b);
6047 tcg_gen_sub_i64(d, d, a);
6050 static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6052 tcg_gen_mul_vec(vece, a, a, b);
6053 tcg_gen_add_vec(vece, d, d, a);
6056 static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6058 tcg_gen_mul_vec(vece, a, a, b);
6059 tcg_gen_sub_vec(vece, d, d, a);
6062 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
6063 * these tables are shared with AArch64 which does support them.
6065 const GVecGen3 mla_op[4] = {
6066 { .fni4 = gen_mla8_i32,
6067 .fniv = gen_mla_vec,
6068 .opc = INDEX_op_mul_vec,
6069 .load_dest = true,
6070 .vece = MO_8 },
6071 { .fni4 = gen_mla16_i32,
6072 .fniv = gen_mla_vec,
6073 .opc = INDEX_op_mul_vec,
6074 .load_dest = true,
6075 .vece = MO_16 },
6076 { .fni4 = gen_mla32_i32,
6077 .fniv = gen_mla_vec,
6078 .opc = INDEX_op_mul_vec,
6079 .load_dest = true,
6080 .vece = MO_32 },
6081 { .fni8 = gen_mla64_i64,
6082 .fniv = gen_mla_vec,
6083 .opc = INDEX_op_mul_vec,
6084 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6085 .load_dest = true,
6086 .vece = MO_64 },
6089 const GVecGen3 mls_op[4] = {
6090 { .fni4 = gen_mls8_i32,
6091 .fniv = gen_mls_vec,
6092 .opc = INDEX_op_mul_vec,
6093 .load_dest = true,
6094 .vece = MO_8 },
6095 { .fni4 = gen_mls16_i32,
6096 .fniv = gen_mls_vec,
6097 .opc = INDEX_op_mul_vec,
6098 .load_dest = true,
6099 .vece = MO_16 },
6100 { .fni4 = gen_mls32_i32,
6101 .fniv = gen_mls_vec,
6102 .opc = INDEX_op_mul_vec,
6103 .load_dest = true,
6104 .vece = MO_32 },
6105 { .fni8 = gen_mls64_i64,
6106 .fniv = gen_mls_vec,
6107 .opc = INDEX_op_mul_vec,
6108 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6109 .load_dest = true,
6110 .vece = MO_64 },
6113 /* CMTST : test is "if (X & Y != 0)". */
6114 static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6116 tcg_gen_and_i32(d, a, b);
6117 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
6118 tcg_gen_neg_i32(d, d);
6121 void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6123 tcg_gen_and_i64(d, a, b);
6124 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
6125 tcg_gen_neg_i64(d, d);
6128 static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6130 tcg_gen_and_vec(vece, d, a, b);
6131 tcg_gen_dupi_vec(vece, a, 0);
6132 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
6135 const GVecGen3 cmtst_op[4] = {
6136 { .fni4 = gen_helper_neon_tst_u8,
6137 .fniv = gen_cmtst_vec,
6138 .vece = MO_8 },
6139 { .fni4 = gen_helper_neon_tst_u16,
6140 .fniv = gen_cmtst_vec,
6141 .vece = MO_16 },
6142 { .fni4 = gen_cmtst_i32,
6143 .fniv = gen_cmtst_vec,
6144 .vece = MO_32 },
6145 { .fni8 = gen_cmtst_i64,
6146 .fniv = gen_cmtst_vec,
6147 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6148 .vece = MO_64 },
6151 /* Translate a NEON data processing instruction. Return nonzero if the
6152 instruction is invalid.
6153 We process data in a mixture of 32-bit and 64-bit chunks.
6154 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
6156 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
6158 int op;
6159 int q;
6160 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
6161 int size;
6162 int shift;
6163 int pass;
6164 int count;
6165 int pairwise;
6166 int u;
6167 int vec_size;
6168 uint32_t imm;
6169 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
6170 TCGv_ptr ptr1, ptr2, ptr3;
6171 TCGv_i64 tmp64;
6173 /* FIXME: this access check should not take precedence over UNDEF
6174 * for invalid encodings; we will generate incorrect syndrome information
6175 * for attempts to execute invalid vfp/neon encodings with FP disabled.
6177 if (s->fp_excp_el) {
6178 gen_exception_insn(s, 4, EXCP_UDEF,
6179 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
6180 return 0;
6183 if (!s->vfp_enabled)
6184 return 1;
6185 q = (insn & (1 << 6)) != 0;
6186 u = (insn >> 24) & 1;
6187 VFP_DREG_D(rd, insn);
6188 VFP_DREG_N(rn, insn);
6189 VFP_DREG_M(rm, insn);
6190 size = (insn >> 20) & 3;
6191 vec_size = q ? 16 : 8;
6192 rd_ofs = neon_reg_offset(rd, 0);
6193 rn_ofs = neon_reg_offset(rn, 0);
6194 rm_ofs = neon_reg_offset(rm, 0);
6196 if ((insn & (1 << 23)) == 0) {
6197 /* Three register same length. */
6198 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
6199 /* Catch invalid op and bad size combinations: UNDEF */
6200 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
6201 return 1;
6203 /* All insns of this form UNDEF for either this condition or the
6204 * superset of cases "Q==1"; we catch the latter later.
6206 if (q && ((rd | rn | rm) & 1)) {
6207 return 1;
6209 switch (op) {
6210 case NEON_3R_SHA:
6211 /* The SHA-1/SHA-256 3-register instructions require special
6212 * treatment here, as their size field is overloaded as an
6213 * op type selector, and they all consume their input in a
6214 * single pass.
6216 if (!q) {
6217 return 1;
6219 if (!u) { /* SHA-1 */
6220 if (!dc_isar_feature(aa32_sha1, s)) {
6221 return 1;
6223 ptr1 = vfp_reg_ptr(true, rd);
6224 ptr2 = vfp_reg_ptr(true, rn);
6225 ptr3 = vfp_reg_ptr(true, rm);
6226 tmp4 = tcg_const_i32(size);
6227 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
6228 tcg_temp_free_i32(tmp4);
6229 } else { /* SHA-256 */
6230 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
6231 return 1;
6233 ptr1 = vfp_reg_ptr(true, rd);
6234 ptr2 = vfp_reg_ptr(true, rn);
6235 ptr3 = vfp_reg_ptr(true, rm);
6236 switch (size) {
6237 case 0:
6238 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
6239 break;
6240 case 1:
6241 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
6242 break;
6243 case 2:
6244 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
6245 break;
6248 tcg_temp_free_ptr(ptr1);
6249 tcg_temp_free_ptr(ptr2);
6250 tcg_temp_free_ptr(ptr3);
6251 return 0;
6253 case NEON_3R_VPADD_VQRDMLAH:
6254 if (!u) {
6255 break; /* VPADD */
6257 /* VQRDMLAH */
6258 switch (size) {
6259 case 1:
6260 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
6261 q, rd, rn, rm);
6262 case 2:
6263 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
6264 q, rd, rn, rm);
6266 return 1;
6268 case NEON_3R_VFM_VQRDMLSH:
6269 if (!u) {
6270 /* VFM, VFMS */
6271 if (size == 1) {
6272 return 1;
6274 break;
6276 /* VQRDMLSH */
6277 switch (size) {
6278 case 1:
6279 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
6280 q, rd, rn, rm);
6281 case 2:
6282 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
6283 q, rd, rn, rm);
6285 return 1;
6287 case NEON_3R_LOGIC: /* Logic ops. */
6288 switch ((u << 2) | size) {
6289 case 0: /* VAND */
6290 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
6291 vec_size, vec_size);
6292 break;
6293 case 1: /* VBIC */
6294 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
6295 vec_size, vec_size);
6296 break;
6297 case 2:
6298 if (rn == rm) {
6299 /* VMOV */
6300 tcg_gen_gvec_mov(0, rd_ofs, rn_ofs, vec_size, vec_size);
6301 } else {
6302 /* VORR */
6303 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
6304 vec_size, vec_size);
6306 break;
6307 case 3: /* VORN */
6308 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
6309 vec_size, vec_size);
6310 break;
6311 case 4: /* VEOR */
6312 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
6313 vec_size, vec_size);
6314 break;
6315 case 5: /* VBSL */
6316 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6317 vec_size, vec_size, &bsl_op);
6318 break;
6319 case 6: /* VBIT */
6320 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6321 vec_size, vec_size, &bit_op);
6322 break;
6323 case 7: /* VBIF */
6324 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6325 vec_size, vec_size, &bif_op);
6326 break;
6328 return 0;
6330 case NEON_3R_VADD_VSUB:
6331 if (u) {
6332 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
6333 vec_size, vec_size);
6334 } else {
6335 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
6336 vec_size, vec_size);
6338 return 0;
6340 case NEON_3R_VMUL: /* VMUL */
6341 if (u) {
6342 /* Polynomial case allows only P8 and is handled below. */
6343 if (size != 0) {
6344 return 1;
6346 } else {
6347 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
6348 vec_size, vec_size);
6349 return 0;
6351 break;
6353 case NEON_3R_VML: /* VMLA, VMLS */
6354 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
6355 u ? &mls_op[size] : &mla_op[size]);
6356 return 0;
6358 case NEON_3R_VTST_VCEQ:
6359 if (u) { /* VCEQ */
6360 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
6361 vec_size, vec_size);
6362 } else { /* VTST */
6363 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6364 vec_size, vec_size, &cmtst_op[size]);
6366 return 0;
6368 case NEON_3R_VCGT:
6369 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
6370 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6371 return 0;
6373 case NEON_3R_VCGE:
6374 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
6375 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6376 return 0;
6379 if (size == 3) {
6380 /* 64-bit element instructions. */
6381 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6382 neon_load_reg64(cpu_V0, rn + pass);
6383 neon_load_reg64(cpu_V1, rm + pass);
6384 switch (op) {
6385 case NEON_3R_VQADD:
6386 if (u) {
6387 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
6388 cpu_V0, cpu_V1);
6389 } else {
6390 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
6391 cpu_V0, cpu_V1);
6393 break;
6394 case NEON_3R_VQSUB:
6395 if (u) {
6396 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
6397 cpu_V0, cpu_V1);
6398 } else {
6399 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
6400 cpu_V0, cpu_V1);
6402 break;
6403 case NEON_3R_VSHL:
6404 if (u) {
6405 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
6406 } else {
6407 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
6409 break;
6410 case NEON_3R_VQSHL:
6411 if (u) {
6412 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6413 cpu_V1, cpu_V0);
6414 } else {
6415 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6416 cpu_V1, cpu_V0);
6418 break;
6419 case NEON_3R_VRSHL:
6420 if (u) {
6421 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
6422 } else {
6423 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
6425 break;
6426 case NEON_3R_VQRSHL:
6427 if (u) {
6428 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
6429 cpu_V1, cpu_V0);
6430 } else {
6431 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
6432 cpu_V1, cpu_V0);
6434 break;
6435 default:
6436 abort();
6438 neon_store_reg64(cpu_V0, rd + pass);
6440 return 0;
6442 pairwise = 0;
6443 switch (op) {
6444 case NEON_3R_VSHL:
6445 case NEON_3R_VQSHL:
6446 case NEON_3R_VRSHL:
6447 case NEON_3R_VQRSHL:
6449 int rtmp;
6450 /* Shift instruction operands are reversed. */
6451 rtmp = rn;
6452 rn = rm;
6453 rm = rtmp;
6455 break;
6456 case NEON_3R_VPADD_VQRDMLAH:
6457 case NEON_3R_VPMAX:
6458 case NEON_3R_VPMIN:
6459 pairwise = 1;
6460 break;
6461 case NEON_3R_FLOAT_ARITH:
6462 pairwise = (u && size < 2); /* if VPADD (float) */
6463 break;
6464 case NEON_3R_FLOAT_MINMAX:
6465 pairwise = u; /* if VPMIN/VPMAX (float) */
6466 break;
6467 case NEON_3R_FLOAT_CMP:
6468 if (!u && size) {
6469 /* no encoding for U=0 C=1x */
6470 return 1;
6472 break;
6473 case NEON_3R_FLOAT_ACMP:
6474 if (!u) {
6475 return 1;
6477 break;
6478 case NEON_3R_FLOAT_MISC:
6479 /* VMAXNM/VMINNM in ARMv8 */
6480 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
6481 return 1;
6483 break;
6484 case NEON_3R_VFM_VQRDMLSH:
6485 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
6486 return 1;
6488 break;
6489 default:
6490 break;
6493 if (pairwise && q) {
6494 /* All the pairwise insns UNDEF if Q is set */
6495 return 1;
6498 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6500 if (pairwise) {
6501 /* Pairwise. */
6502 if (pass < 1) {
6503 tmp = neon_load_reg(rn, 0);
6504 tmp2 = neon_load_reg(rn, 1);
6505 } else {
6506 tmp = neon_load_reg(rm, 0);
6507 tmp2 = neon_load_reg(rm, 1);
6509 } else {
6510 /* Elementwise. */
6511 tmp = neon_load_reg(rn, pass);
6512 tmp2 = neon_load_reg(rm, pass);
6514 switch (op) {
6515 case NEON_3R_VHADD:
6516 GEN_NEON_INTEGER_OP(hadd);
6517 break;
6518 case NEON_3R_VQADD:
6519 GEN_NEON_INTEGER_OP_ENV(qadd);
6520 break;
6521 case NEON_3R_VRHADD:
6522 GEN_NEON_INTEGER_OP(rhadd);
6523 break;
6524 case NEON_3R_VHSUB:
6525 GEN_NEON_INTEGER_OP(hsub);
6526 break;
6527 case NEON_3R_VQSUB:
6528 GEN_NEON_INTEGER_OP_ENV(qsub);
6529 break;
6530 case NEON_3R_VSHL:
6531 GEN_NEON_INTEGER_OP(shl);
6532 break;
6533 case NEON_3R_VQSHL:
6534 GEN_NEON_INTEGER_OP_ENV(qshl);
6535 break;
6536 case NEON_3R_VRSHL:
6537 GEN_NEON_INTEGER_OP(rshl);
6538 break;
6539 case NEON_3R_VQRSHL:
6540 GEN_NEON_INTEGER_OP_ENV(qrshl);
6541 break;
6542 case NEON_3R_VMAX:
6543 GEN_NEON_INTEGER_OP(max);
6544 break;
6545 case NEON_3R_VMIN:
6546 GEN_NEON_INTEGER_OP(min);
6547 break;
6548 case NEON_3R_VABD:
6549 GEN_NEON_INTEGER_OP(abd);
6550 break;
6551 case NEON_3R_VABA:
6552 GEN_NEON_INTEGER_OP(abd);
6553 tcg_temp_free_i32(tmp2);
6554 tmp2 = neon_load_reg(rd, pass);
6555 gen_neon_add(size, tmp, tmp2);
6556 break;
6557 case NEON_3R_VMUL:
6558 /* VMUL.P8; other cases already eliminated. */
6559 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
6560 break;
6561 case NEON_3R_VPMAX:
6562 GEN_NEON_INTEGER_OP(pmax);
6563 break;
6564 case NEON_3R_VPMIN:
6565 GEN_NEON_INTEGER_OP(pmin);
6566 break;
6567 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
6568 if (!u) { /* VQDMULH */
6569 switch (size) {
6570 case 1:
6571 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6572 break;
6573 case 2:
6574 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6575 break;
6576 default: abort();
6578 } else { /* VQRDMULH */
6579 switch (size) {
6580 case 1:
6581 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6582 break;
6583 case 2:
6584 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6585 break;
6586 default: abort();
6589 break;
6590 case NEON_3R_VPADD_VQRDMLAH:
6591 switch (size) {
6592 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6593 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6594 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
6595 default: abort();
6597 break;
6598 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
6600 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6601 switch ((u << 2) | size) {
6602 case 0: /* VADD */
6603 case 4: /* VPADD */
6604 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6605 break;
6606 case 2: /* VSUB */
6607 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
6608 break;
6609 case 6: /* VABD */
6610 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
6611 break;
6612 default:
6613 abort();
6615 tcg_temp_free_ptr(fpstatus);
6616 break;
6618 case NEON_3R_FLOAT_MULTIPLY:
6620 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6621 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6622 if (!u) {
6623 tcg_temp_free_i32(tmp2);
6624 tmp2 = neon_load_reg(rd, pass);
6625 if (size == 0) {
6626 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6627 } else {
6628 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6631 tcg_temp_free_ptr(fpstatus);
6632 break;
6634 case NEON_3R_FLOAT_CMP:
6636 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6637 if (!u) {
6638 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6639 } else {
6640 if (size == 0) {
6641 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6642 } else {
6643 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6646 tcg_temp_free_ptr(fpstatus);
6647 break;
6649 case NEON_3R_FLOAT_ACMP:
6651 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6652 if (size == 0) {
6653 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6654 } else {
6655 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6657 tcg_temp_free_ptr(fpstatus);
6658 break;
6660 case NEON_3R_FLOAT_MINMAX:
6662 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6663 if (size == 0) {
6664 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
6665 } else {
6666 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
6668 tcg_temp_free_ptr(fpstatus);
6669 break;
6671 case NEON_3R_FLOAT_MISC:
6672 if (u) {
6673 /* VMAXNM/VMINNM */
6674 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6675 if (size == 0) {
6676 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
6677 } else {
6678 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
6680 tcg_temp_free_ptr(fpstatus);
6681 } else {
6682 if (size == 0) {
6683 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6684 } else {
6685 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6688 break;
6689 case NEON_3R_VFM_VQRDMLSH:
6691 /* VFMA, VFMS: fused multiply-add */
6692 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6693 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6694 if (size) {
6695 /* VFMS */
6696 gen_helper_vfp_negs(tmp, tmp);
6698 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6699 tcg_temp_free_i32(tmp3);
6700 tcg_temp_free_ptr(fpstatus);
6701 break;
6703 default:
6704 abort();
6706 tcg_temp_free_i32(tmp2);
6708 /* Save the result. For elementwise operations we can put it
6709 straight into the destination register. For pairwise operations
6710 we have to be careful to avoid clobbering the source operands. */
6711 if (pairwise && rd == rm) {
6712 neon_store_scratch(pass, tmp);
6713 } else {
6714 neon_store_reg(rd, pass, tmp);
6717 } /* for pass */
6718 if (pairwise && rd == rm) {
6719 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6720 tmp = neon_load_scratch(pass);
6721 neon_store_reg(rd, pass, tmp);
6724 /* End of 3 register same size operations. */
6725 } else if (insn & (1 << 4)) {
6726 if ((insn & 0x00380080) != 0) {
6727 /* Two registers and shift. */
6728 op = (insn >> 8) & 0xf;
6729 if (insn & (1 << 7)) {
6730 /* 64-bit shift. */
6731 if (op > 7) {
6732 return 1;
6734 size = 3;
6735 } else {
6736 size = 2;
6737 while ((insn & (1 << (size + 19))) == 0)
6738 size--;
6740 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
6741 if (op < 8) {
6742 /* Shift by immediate:
6743 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
6744 if (q && ((rd | rm) & 1)) {
6745 return 1;
6747 if (!u && (op == 4 || op == 6)) {
6748 return 1;
6750 /* Right shifts are encoded as N - shift, where N is the
6751 element size in bits. */
6752 if (op <= 4) {
6753 shift = shift - (1 << (size + 3));
6756 switch (op) {
6757 case 0: /* VSHR */
6758 /* Right shift comes here negative. */
6759 shift = -shift;
6760 /* Shifts larger than the element size are architecturally
6761 * valid. Unsigned results in all zeros; signed results
6762 * in all sign bits.
6764 if (!u) {
6765 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
6766 MIN(shift, (8 << size) - 1),
6767 vec_size, vec_size);
6768 } else if (shift >= 8 << size) {
6769 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
6770 } else {
6771 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
6772 vec_size, vec_size);
6774 return 0;
6776 case 1: /* VSRA */
6777 /* Right shift comes here negative. */
6778 shift = -shift;
6779 /* Shifts larger than the element size are architecturally
6780 * valid. Unsigned results in all zeros; signed results
6781 * in all sign bits.
6783 if (!u) {
6784 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6785 MIN(shift, (8 << size) - 1),
6786 &ssra_op[size]);
6787 } else if (shift >= 8 << size) {
6788 /* rd += 0 */
6789 } else {
6790 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6791 shift, &usra_op[size]);
6793 return 0;
6795 case 4: /* VSRI */
6796 if (!u) {
6797 return 1;
6799 /* Right shift comes here negative. */
6800 shift = -shift;
6801 /* Shift out of range leaves destination unchanged. */
6802 if (shift < 8 << size) {
6803 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6804 shift, &sri_op[size]);
6806 return 0;
6808 case 5: /* VSHL, VSLI */
6809 if (u) { /* VSLI */
6810 /* Shift out of range leaves destination unchanged. */
6811 if (shift < 8 << size) {
6812 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
6813 vec_size, shift, &sli_op[size]);
6815 } else { /* VSHL */
6816 /* Shifts larger than the element size are
6817 * architecturally valid and results in zero.
6819 if (shift >= 8 << size) {
6820 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
6821 } else {
6822 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
6823 vec_size, vec_size);
6826 return 0;
6829 if (size == 3) {
6830 count = q + 1;
6831 } else {
6832 count = q ? 4: 2;
6835 /* To avoid excessive duplication of ops we implement shift
6836 * by immediate using the variable shift operations.
6838 imm = dup_const(size, shift);
6840 for (pass = 0; pass < count; pass++) {
6841 if (size == 3) {
6842 neon_load_reg64(cpu_V0, rm + pass);
6843 tcg_gen_movi_i64(cpu_V1, imm);
6844 switch (op) {
6845 case 2: /* VRSHR */
6846 case 3: /* VRSRA */
6847 if (u)
6848 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
6849 else
6850 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
6851 break;
6852 case 6: /* VQSHLU */
6853 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6854 cpu_V0, cpu_V1);
6855 break;
6856 case 7: /* VQSHL */
6857 if (u) {
6858 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6859 cpu_V0, cpu_V1);
6860 } else {
6861 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6862 cpu_V0, cpu_V1);
6864 break;
6865 default:
6866 g_assert_not_reached();
6868 if (op == 3) {
6869 /* Accumulate. */
6870 neon_load_reg64(cpu_V1, rd + pass);
6871 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6873 neon_store_reg64(cpu_V0, rd + pass);
6874 } else { /* size < 3 */
6875 /* Operands in T0 and T1. */
6876 tmp = neon_load_reg(rm, pass);
6877 tmp2 = tcg_temp_new_i32();
6878 tcg_gen_movi_i32(tmp2, imm);
6879 switch (op) {
6880 case 2: /* VRSHR */
6881 case 3: /* VRSRA */
6882 GEN_NEON_INTEGER_OP(rshl);
6883 break;
6884 case 6: /* VQSHLU */
6885 switch (size) {
6886 case 0:
6887 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6888 tmp, tmp2);
6889 break;
6890 case 1:
6891 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6892 tmp, tmp2);
6893 break;
6894 case 2:
6895 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6896 tmp, tmp2);
6897 break;
6898 default:
6899 abort();
6901 break;
6902 case 7: /* VQSHL */
6903 GEN_NEON_INTEGER_OP_ENV(qshl);
6904 break;
6905 default:
6906 g_assert_not_reached();
6908 tcg_temp_free_i32(tmp2);
6910 if (op == 3) {
6911 /* Accumulate. */
6912 tmp2 = neon_load_reg(rd, pass);
6913 gen_neon_add(size, tmp, tmp2);
6914 tcg_temp_free_i32(tmp2);
6916 neon_store_reg(rd, pass, tmp);
6918 } /* for pass */
6919 } else if (op < 10) {
6920 /* Shift by immediate and narrow:
6921 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
6922 int input_unsigned = (op == 8) ? !u : u;
6923 if (rm & 1) {
6924 return 1;
6926 shift = shift - (1 << (size + 3));
6927 size++;
6928 if (size == 3) {
6929 tmp64 = tcg_const_i64(shift);
6930 neon_load_reg64(cpu_V0, rm);
6931 neon_load_reg64(cpu_V1, rm + 1);
6932 for (pass = 0; pass < 2; pass++) {
6933 TCGv_i64 in;
6934 if (pass == 0) {
6935 in = cpu_V0;
6936 } else {
6937 in = cpu_V1;
6939 if (q) {
6940 if (input_unsigned) {
6941 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
6942 } else {
6943 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
6945 } else {
6946 if (input_unsigned) {
6947 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
6948 } else {
6949 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
6952 tmp = tcg_temp_new_i32();
6953 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6954 neon_store_reg(rd, pass, tmp);
6955 } /* for pass */
6956 tcg_temp_free_i64(tmp64);
6957 } else {
6958 if (size == 1) {
6959 imm = (uint16_t)shift;
6960 imm |= imm << 16;
6961 } else {
6962 /* size == 2 */
6963 imm = (uint32_t)shift;
6965 tmp2 = tcg_const_i32(imm);
6966 tmp4 = neon_load_reg(rm + 1, 0);
6967 tmp5 = neon_load_reg(rm + 1, 1);
6968 for (pass = 0; pass < 2; pass++) {
6969 if (pass == 0) {
6970 tmp = neon_load_reg(rm, 0);
6971 } else {
6972 tmp = tmp4;
6974 gen_neon_shift_narrow(size, tmp, tmp2, q,
6975 input_unsigned);
6976 if (pass == 0) {
6977 tmp3 = neon_load_reg(rm, 1);
6978 } else {
6979 tmp3 = tmp5;
6981 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6982 input_unsigned);
6983 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
6984 tcg_temp_free_i32(tmp);
6985 tcg_temp_free_i32(tmp3);
6986 tmp = tcg_temp_new_i32();
6987 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6988 neon_store_reg(rd, pass, tmp);
6989 } /* for pass */
6990 tcg_temp_free_i32(tmp2);
6992 } else if (op == 10) {
6993 /* VSHLL, VMOVL */
6994 if (q || (rd & 1)) {
6995 return 1;
6997 tmp = neon_load_reg(rm, 0);
6998 tmp2 = neon_load_reg(rm, 1);
6999 for (pass = 0; pass < 2; pass++) {
7000 if (pass == 1)
7001 tmp = tmp2;
7003 gen_neon_widen(cpu_V0, tmp, size, u);
7005 if (shift != 0) {
7006 /* The shift is less than the width of the source
7007 type, so we can just shift the whole register. */
7008 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
7009 /* Widen the result of shift: we need to clear
7010 * the potential overflow bits resulting from
7011 * left bits of the narrow input appearing as
7012 * right bits of left the neighbour narrow
7013 * input. */
7014 if (size < 2 || !u) {
7015 uint64_t imm64;
7016 if (size == 0) {
7017 imm = (0xffu >> (8 - shift));
7018 imm |= imm << 16;
7019 } else if (size == 1) {
7020 imm = 0xffff >> (16 - shift);
7021 } else {
7022 /* size == 2 */
7023 imm = 0xffffffff >> (32 - shift);
7025 if (size < 2) {
7026 imm64 = imm | (((uint64_t)imm) << 32);
7027 } else {
7028 imm64 = imm;
7030 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
7033 neon_store_reg64(cpu_V0, rd + pass);
7035 } else if (op >= 14) {
7036 /* VCVT fixed-point. */
7037 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
7038 return 1;
7040 /* We have already masked out the must-be-1 top bit of imm6,
7041 * hence this 32-shift where the ARM ARM has 64-imm6.
7043 shift = 32 - shift;
7044 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7045 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
7046 if (!(op & 1)) {
7047 if (u)
7048 gen_vfp_ulto(0, shift, 1);
7049 else
7050 gen_vfp_slto(0, shift, 1);
7051 } else {
7052 if (u)
7053 gen_vfp_toul(0, shift, 1);
7054 else
7055 gen_vfp_tosl(0, shift, 1);
7057 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
7059 } else {
7060 return 1;
7062 } else { /* (insn & 0x00380080) == 0 */
7063 int invert, reg_ofs, vec_size;
7065 if (q && (rd & 1)) {
7066 return 1;
7069 op = (insn >> 8) & 0xf;
7070 /* One register and immediate. */
7071 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
7072 invert = (insn & (1 << 5)) != 0;
7073 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
7074 * We choose to not special-case this and will behave as if a
7075 * valid constant encoding of 0 had been given.
7077 switch (op) {
7078 case 0: case 1:
7079 /* no-op */
7080 break;
7081 case 2: case 3:
7082 imm <<= 8;
7083 break;
7084 case 4: case 5:
7085 imm <<= 16;
7086 break;
7087 case 6: case 7:
7088 imm <<= 24;
7089 break;
7090 case 8: case 9:
7091 imm |= imm << 16;
7092 break;
7093 case 10: case 11:
7094 imm = (imm << 8) | (imm << 24);
7095 break;
7096 case 12:
7097 imm = (imm << 8) | 0xff;
7098 break;
7099 case 13:
7100 imm = (imm << 16) | 0xffff;
7101 break;
7102 case 14:
7103 imm |= (imm << 8) | (imm << 16) | (imm << 24);
7104 if (invert) {
7105 imm = ~imm;
7107 break;
7108 case 15:
7109 if (invert) {
7110 return 1;
7112 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
7113 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
7114 break;
7116 if (invert) {
7117 imm = ~imm;
7120 reg_ofs = neon_reg_offset(rd, 0);
7121 vec_size = q ? 16 : 8;
7123 if (op & 1 && op < 12) {
7124 if (invert) {
7125 /* The immediate value has already been inverted,
7126 * so BIC becomes AND.
7128 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
7129 vec_size, vec_size);
7130 } else {
7131 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
7132 vec_size, vec_size);
7134 } else {
7135 /* VMOV, VMVN. */
7136 if (op == 14 && invert) {
7137 TCGv_i64 t64 = tcg_temp_new_i64();
7139 for (pass = 0; pass <= q; ++pass) {
7140 uint64_t val = 0;
7141 int n;
7143 for (n = 0; n < 8; n++) {
7144 if (imm & (1 << (n + pass * 8))) {
7145 val |= 0xffull << (n * 8);
7148 tcg_gen_movi_i64(t64, val);
7149 neon_store_reg64(t64, rd + pass);
7151 tcg_temp_free_i64(t64);
7152 } else {
7153 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
7157 } else { /* (insn & 0x00800010 == 0x00800000) */
7158 if (size != 3) {
7159 op = (insn >> 8) & 0xf;
7160 if ((insn & (1 << 6)) == 0) {
7161 /* Three registers of different lengths. */
7162 int src1_wide;
7163 int src2_wide;
7164 int prewiden;
7165 /* undefreq: bit 0 : UNDEF if size == 0
7166 * bit 1 : UNDEF if size == 1
7167 * bit 2 : UNDEF if size == 2
7168 * bit 3 : UNDEF if U == 1
7169 * Note that [2:0] set implies 'always UNDEF'
7171 int undefreq;
7172 /* prewiden, src1_wide, src2_wide, undefreq */
7173 static const int neon_3reg_wide[16][4] = {
7174 {1, 0, 0, 0}, /* VADDL */
7175 {1, 1, 0, 0}, /* VADDW */
7176 {1, 0, 0, 0}, /* VSUBL */
7177 {1, 1, 0, 0}, /* VSUBW */
7178 {0, 1, 1, 0}, /* VADDHN */
7179 {0, 0, 0, 0}, /* VABAL */
7180 {0, 1, 1, 0}, /* VSUBHN */
7181 {0, 0, 0, 0}, /* VABDL */
7182 {0, 0, 0, 0}, /* VMLAL */
7183 {0, 0, 0, 9}, /* VQDMLAL */
7184 {0, 0, 0, 0}, /* VMLSL */
7185 {0, 0, 0, 9}, /* VQDMLSL */
7186 {0, 0, 0, 0}, /* Integer VMULL */
7187 {0, 0, 0, 1}, /* VQDMULL */
7188 {0, 0, 0, 0xa}, /* Polynomial VMULL */
7189 {0, 0, 0, 7}, /* Reserved: always UNDEF */
7192 prewiden = neon_3reg_wide[op][0];
7193 src1_wide = neon_3reg_wide[op][1];
7194 src2_wide = neon_3reg_wide[op][2];
7195 undefreq = neon_3reg_wide[op][3];
7197 if ((undefreq & (1 << size)) ||
7198 ((undefreq & 8) && u)) {
7199 return 1;
7201 if ((src1_wide && (rn & 1)) ||
7202 (src2_wide && (rm & 1)) ||
7203 (!src2_wide && (rd & 1))) {
7204 return 1;
7207 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
7208 * outside the loop below as it only performs a single pass.
7210 if (op == 14 && size == 2) {
7211 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
7213 if (!dc_isar_feature(aa32_pmull, s)) {
7214 return 1;
7216 tcg_rn = tcg_temp_new_i64();
7217 tcg_rm = tcg_temp_new_i64();
7218 tcg_rd = tcg_temp_new_i64();
7219 neon_load_reg64(tcg_rn, rn);
7220 neon_load_reg64(tcg_rm, rm);
7221 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
7222 neon_store_reg64(tcg_rd, rd);
7223 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
7224 neon_store_reg64(tcg_rd, rd + 1);
7225 tcg_temp_free_i64(tcg_rn);
7226 tcg_temp_free_i64(tcg_rm);
7227 tcg_temp_free_i64(tcg_rd);
7228 return 0;
7231 /* Avoid overlapping operands. Wide source operands are
7232 always aligned so will never overlap with wide
7233 destinations in problematic ways. */
7234 if (rd == rm && !src2_wide) {
7235 tmp = neon_load_reg(rm, 1);
7236 neon_store_scratch(2, tmp);
7237 } else if (rd == rn && !src1_wide) {
7238 tmp = neon_load_reg(rn, 1);
7239 neon_store_scratch(2, tmp);
7241 tmp3 = NULL;
7242 for (pass = 0; pass < 2; pass++) {
7243 if (src1_wide) {
7244 neon_load_reg64(cpu_V0, rn + pass);
7245 tmp = NULL;
7246 } else {
7247 if (pass == 1 && rd == rn) {
7248 tmp = neon_load_scratch(2);
7249 } else {
7250 tmp = neon_load_reg(rn, pass);
7252 if (prewiden) {
7253 gen_neon_widen(cpu_V0, tmp, size, u);
7256 if (src2_wide) {
7257 neon_load_reg64(cpu_V1, rm + pass);
7258 tmp2 = NULL;
7259 } else {
7260 if (pass == 1 && rd == rm) {
7261 tmp2 = neon_load_scratch(2);
7262 } else {
7263 tmp2 = neon_load_reg(rm, pass);
7265 if (prewiden) {
7266 gen_neon_widen(cpu_V1, tmp2, size, u);
7269 switch (op) {
7270 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
7271 gen_neon_addl(size);
7272 break;
7273 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
7274 gen_neon_subl(size);
7275 break;
7276 case 5: case 7: /* VABAL, VABDL */
7277 switch ((size << 1) | u) {
7278 case 0:
7279 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
7280 break;
7281 case 1:
7282 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
7283 break;
7284 case 2:
7285 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
7286 break;
7287 case 3:
7288 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
7289 break;
7290 case 4:
7291 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
7292 break;
7293 case 5:
7294 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
7295 break;
7296 default: abort();
7298 tcg_temp_free_i32(tmp2);
7299 tcg_temp_free_i32(tmp);
7300 break;
7301 case 8: case 9: case 10: case 11: case 12: case 13:
7302 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
7303 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
7304 break;
7305 case 14: /* Polynomial VMULL */
7306 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7307 tcg_temp_free_i32(tmp2);
7308 tcg_temp_free_i32(tmp);
7309 break;
7310 default: /* 15 is RESERVED: caught earlier */
7311 abort();
7313 if (op == 13) {
7314 /* VQDMULL */
7315 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7316 neon_store_reg64(cpu_V0, rd + pass);
7317 } else if (op == 5 || (op >= 8 && op <= 11)) {
7318 /* Accumulate. */
7319 neon_load_reg64(cpu_V1, rd + pass);
7320 switch (op) {
7321 case 10: /* VMLSL */
7322 gen_neon_negl(cpu_V0, size);
7323 /* Fall through */
7324 case 5: case 8: /* VABAL, VMLAL */
7325 gen_neon_addl(size);
7326 break;
7327 case 9: case 11: /* VQDMLAL, VQDMLSL */
7328 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7329 if (op == 11) {
7330 gen_neon_negl(cpu_V0, size);
7332 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
7333 break;
7334 default:
7335 abort();
7337 neon_store_reg64(cpu_V0, rd + pass);
7338 } else if (op == 4 || op == 6) {
7339 /* Narrowing operation. */
7340 tmp = tcg_temp_new_i32();
7341 if (!u) {
7342 switch (size) {
7343 case 0:
7344 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
7345 break;
7346 case 1:
7347 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
7348 break;
7349 case 2:
7350 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
7351 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
7352 break;
7353 default: abort();
7355 } else {
7356 switch (size) {
7357 case 0:
7358 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
7359 break;
7360 case 1:
7361 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
7362 break;
7363 case 2:
7364 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
7365 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
7366 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
7367 break;
7368 default: abort();
7371 if (pass == 0) {
7372 tmp3 = tmp;
7373 } else {
7374 neon_store_reg(rd, 0, tmp3);
7375 neon_store_reg(rd, 1, tmp);
7377 } else {
7378 /* Write back the result. */
7379 neon_store_reg64(cpu_V0, rd + pass);
7382 } else {
7383 /* Two registers and a scalar. NB that for ops of this form
7384 * the ARM ARM labels bit 24 as Q, but it is in our variable
7385 * 'u', not 'q'.
7387 if (size == 0) {
7388 return 1;
7390 switch (op) {
7391 case 1: /* Float VMLA scalar */
7392 case 5: /* Floating point VMLS scalar */
7393 case 9: /* Floating point VMUL scalar */
7394 if (size == 1) {
7395 return 1;
7397 /* fall through */
7398 case 0: /* Integer VMLA scalar */
7399 case 4: /* Integer VMLS scalar */
7400 case 8: /* Integer VMUL scalar */
7401 case 12: /* VQDMULH scalar */
7402 case 13: /* VQRDMULH scalar */
7403 if (u && ((rd | rn) & 1)) {
7404 return 1;
7406 tmp = neon_get_scalar(size, rm);
7407 neon_store_scratch(0, tmp);
7408 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7409 tmp = neon_load_scratch(0);
7410 tmp2 = neon_load_reg(rn, pass);
7411 if (op == 12) {
7412 if (size == 1) {
7413 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
7414 } else {
7415 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
7417 } else if (op == 13) {
7418 if (size == 1) {
7419 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
7420 } else {
7421 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
7423 } else if (op & 1) {
7424 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7425 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
7426 tcg_temp_free_ptr(fpstatus);
7427 } else {
7428 switch (size) {
7429 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
7430 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
7431 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
7432 default: abort();
7435 tcg_temp_free_i32(tmp2);
7436 if (op < 8) {
7437 /* Accumulate. */
7438 tmp2 = neon_load_reg(rd, pass);
7439 switch (op) {
7440 case 0:
7441 gen_neon_add(size, tmp, tmp2);
7442 break;
7443 case 1:
7445 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7446 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
7447 tcg_temp_free_ptr(fpstatus);
7448 break;
7450 case 4:
7451 gen_neon_rsb(size, tmp, tmp2);
7452 break;
7453 case 5:
7455 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7456 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
7457 tcg_temp_free_ptr(fpstatus);
7458 break;
7460 default:
7461 abort();
7463 tcg_temp_free_i32(tmp2);
7465 neon_store_reg(rd, pass, tmp);
7467 break;
7468 case 3: /* VQDMLAL scalar */
7469 case 7: /* VQDMLSL scalar */
7470 case 11: /* VQDMULL scalar */
7471 if (u == 1) {
7472 return 1;
7474 /* fall through */
7475 case 2: /* VMLAL sclar */
7476 case 6: /* VMLSL scalar */
7477 case 10: /* VMULL scalar */
7478 if (rd & 1) {
7479 return 1;
7481 tmp2 = neon_get_scalar(size, rm);
7482 /* We need a copy of tmp2 because gen_neon_mull
7483 * deletes it during pass 0. */
7484 tmp4 = tcg_temp_new_i32();
7485 tcg_gen_mov_i32(tmp4, tmp2);
7486 tmp3 = neon_load_reg(rn, 1);
7488 for (pass = 0; pass < 2; pass++) {
7489 if (pass == 0) {
7490 tmp = neon_load_reg(rn, 0);
7491 } else {
7492 tmp = tmp3;
7493 tmp2 = tmp4;
7495 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
7496 if (op != 11) {
7497 neon_load_reg64(cpu_V1, rd + pass);
7499 switch (op) {
7500 case 6:
7501 gen_neon_negl(cpu_V0, size);
7502 /* Fall through */
7503 case 2:
7504 gen_neon_addl(size);
7505 break;
7506 case 3: case 7:
7507 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7508 if (op == 7) {
7509 gen_neon_negl(cpu_V0, size);
7511 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
7512 break;
7513 case 10:
7514 /* no-op */
7515 break;
7516 case 11:
7517 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7518 break;
7519 default:
7520 abort();
7522 neon_store_reg64(cpu_V0, rd + pass);
7524 break;
7525 case 14: /* VQRDMLAH scalar */
7526 case 15: /* VQRDMLSH scalar */
7528 NeonGenThreeOpEnvFn *fn;
7530 if (!dc_isar_feature(aa32_rdm, s)) {
7531 return 1;
7533 if (u && ((rd | rn) & 1)) {
7534 return 1;
7536 if (op == 14) {
7537 if (size == 1) {
7538 fn = gen_helper_neon_qrdmlah_s16;
7539 } else {
7540 fn = gen_helper_neon_qrdmlah_s32;
7542 } else {
7543 if (size == 1) {
7544 fn = gen_helper_neon_qrdmlsh_s16;
7545 } else {
7546 fn = gen_helper_neon_qrdmlsh_s32;
7550 tmp2 = neon_get_scalar(size, rm);
7551 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7552 tmp = neon_load_reg(rn, pass);
7553 tmp3 = neon_load_reg(rd, pass);
7554 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7555 tcg_temp_free_i32(tmp3);
7556 neon_store_reg(rd, pass, tmp);
7558 tcg_temp_free_i32(tmp2);
7560 break;
7561 default:
7562 g_assert_not_reached();
7565 } else { /* size == 3 */
7566 if (!u) {
7567 /* Extract. */
7568 imm = (insn >> 8) & 0xf;
7570 if (imm > 7 && !q)
7571 return 1;
7573 if (q && ((rd | rn | rm) & 1)) {
7574 return 1;
7577 if (imm == 0) {
7578 neon_load_reg64(cpu_V0, rn);
7579 if (q) {
7580 neon_load_reg64(cpu_V1, rn + 1);
7582 } else if (imm == 8) {
7583 neon_load_reg64(cpu_V0, rn + 1);
7584 if (q) {
7585 neon_load_reg64(cpu_V1, rm);
7587 } else if (q) {
7588 tmp64 = tcg_temp_new_i64();
7589 if (imm < 8) {
7590 neon_load_reg64(cpu_V0, rn);
7591 neon_load_reg64(tmp64, rn + 1);
7592 } else {
7593 neon_load_reg64(cpu_V0, rn + 1);
7594 neon_load_reg64(tmp64, rm);
7596 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
7597 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
7598 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7599 if (imm < 8) {
7600 neon_load_reg64(cpu_V1, rm);
7601 } else {
7602 neon_load_reg64(cpu_V1, rm + 1);
7603 imm -= 8;
7605 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
7606 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7607 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
7608 tcg_temp_free_i64(tmp64);
7609 } else {
7610 /* BUGFIX */
7611 neon_load_reg64(cpu_V0, rn);
7612 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
7613 neon_load_reg64(cpu_V1, rm);
7614 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
7615 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7617 neon_store_reg64(cpu_V0, rd);
7618 if (q) {
7619 neon_store_reg64(cpu_V1, rd + 1);
7621 } else if ((insn & (1 << 11)) == 0) {
7622 /* Two register misc. */
7623 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7624 size = (insn >> 18) & 3;
7625 /* UNDEF for unknown op values and bad op-size combinations */
7626 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7627 return 1;
7629 if (neon_2rm_is_v8_op(op) &&
7630 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7631 return 1;
7633 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7634 q && ((rm | rd) & 1)) {
7635 return 1;
7637 switch (op) {
7638 case NEON_2RM_VREV64:
7639 for (pass = 0; pass < (q ? 2 : 1); pass++) {
7640 tmp = neon_load_reg(rm, pass * 2);
7641 tmp2 = neon_load_reg(rm, pass * 2 + 1);
7642 switch (size) {
7643 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7644 case 1: gen_swap_half(tmp); break;
7645 case 2: /* no-op */ break;
7646 default: abort();
7648 neon_store_reg(rd, pass * 2 + 1, tmp);
7649 if (size == 2) {
7650 neon_store_reg(rd, pass * 2, tmp2);
7651 } else {
7652 switch (size) {
7653 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7654 case 1: gen_swap_half(tmp2); break;
7655 default: abort();
7657 neon_store_reg(rd, pass * 2, tmp2);
7660 break;
7661 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7662 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
7663 for (pass = 0; pass < q + 1; pass++) {
7664 tmp = neon_load_reg(rm, pass * 2);
7665 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7666 tmp = neon_load_reg(rm, pass * 2 + 1);
7667 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7668 switch (size) {
7669 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7670 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7671 case 2: tcg_gen_add_i64(CPU_V001); break;
7672 default: abort();
7674 if (op >= NEON_2RM_VPADAL) {
7675 /* Accumulate. */
7676 neon_load_reg64(cpu_V1, rd + pass);
7677 gen_neon_addl(size);
7679 neon_store_reg64(cpu_V0, rd + pass);
7681 break;
7682 case NEON_2RM_VTRN:
7683 if (size == 2) {
7684 int n;
7685 for (n = 0; n < (q ? 4 : 2); n += 2) {
7686 tmp = neon_load_reg(rm, n);
7687 tmp2 = neon_load_reg(rd, n + 1);
7688 neon_store_reg(rm, n, tmp2);
7689 neon_store_reg(rd, n + 1, tmp);
7691 } else {
7692 goto elementwise;
7694 break;
7695 case NEON_2RM_VUZP:
7696 if (gen_neon_unzip(rd, rm, size, q)) {
7697 return 1;
7699 break;
7700 case NEON_2RM_VZIP:
7701 if (gen_neon_zip(rd, rm, size, q)) {
7702 return 1;
7704 break;
7705 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7706 /* also VQMOVUN; op field and mnemonics don't line up */
7707 if (rm & 1) {
7708 return 1;
7710 tmp2 = NULL;
7711 for (pass = 0; pass < 2; pass++) {
7712 neon_load_reg64(cpu_V0, rm + pass);
7713 tmp = tcg_temp_new_i32();
7714 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7715 tmp, cpu_V0);
7716 if (pass == 0) {
7717 tmp2 = tmp;
7718 } else {
7719 neon_store_reg(rd, 0, tmp2);
7720 neon_store_reg(rd, 1, tmp);
7723 break;
7724 case NEON_2RM_VSHLL:
7725 if (q || (rd & 1)) {
7726 return 1;
7728 tmp = neon_load_reg(rm, 0);
7729 tmp2 = neon_load_reg(rm, 1);
7730 for (pass = 0; pass < 2; pass++) {
7731 if (pass == 1)
7732 tmp = tmp2;
7733 gen_neon_widen(cpu_V0, tmp, size, 1);
7734 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
7735 neon_store_reg64(cpu_V0, rd + pass);
7737 break;
7738 case NEON_2RM_VCVT_F16_F32:
7740 TCGv_ptr fpst;
7741 TCGv_i32 ahp;
7743 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
7744 q || (rm & 1)) {
7745 return 1;
7747 tmp = tcg_temp_new_i32();
7748 tmp2 = tcg_temp_new_i32();
7749 fpst = get_fpstatus_ptr(true);
7750 ahp = get_ahp_flag();
7751 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
7752 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
7753 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
7754 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
7755 tcg_gen_shli_i32(tmp2, tmp2, 16);
7756 tcg_gen_or_i32(tmp2, tmp2, tmp);
7757 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
7758 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
7759 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7760 neon_store_reg(rd, 0, tmp2);
7761 tmp2 = tcg_temp_new_i32();
7762 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
7763 tcg_gen_shli_i32(tmp2, tmp2, 16);
7764 tcg_gen_or_i32(tmp2, tmp2, tmp);
7765 neon_store_reg(rd, 1, tmp2);
7766 tcg_temp_free_i32(tmp);
7767 tcg_temp_free_i32(ahp);
7768 tcg_temp_free_ptr(fpst);
7769 break;
7771 case NEON_2RM_VCVT_F32_F16:
7773 TCGv_ptr fpst;
7774 TCGv_i32 ahp;
7775 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
7776 q || (rd & 1)) {
7777 return 1;
7779 fpst = get_fpstatus_ptr(true);
7780 ahp = get_ahp_flag();
7781 tmp3 = tcg_temp_new_i32();
7782 tmp = neon_load_reg(rm, 0);
7783 tmp2 = neon_load_reg(rm, 1);
7784 tcg_gen_ext16u_i32(tmp3, tmp);
7785 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7786 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7787 tcg_gen_shri_i32(tmp3, tmp, 16);
7788 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7789 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7790 tcg_temp_free_i32(tmp);
7791 tcg_gen_ext16u_i32(tmp3, tmp2);
7792 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7793 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7794 tcg_gen_shri_i32(tmp3, tmp2, 16);
7795 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7796 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7797 tcg_temp_free_i32(tmp2);
7798 tcg_temp_free_i32(tmp3);
7799 tcg_temp_free_i32(ahp);
7800 tcg_temp_free_ptr(fpst);
7801 break;
7803 case NEON_2RM_AESE: case NEON_2RM_AESMC:
7804 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
7805 return 1;
7807 ptr1 = vfp_reg_ptr(true, rd);
7808 ptr2 = vfp_reg_ptr(true, rm);
7810 /* Bit 6 is the lowest opcode bit; it distinguishes between
7811 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7813 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7815 if (op == NEON_2RM_AESE) {
7816 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
7817 } else {
7818 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
7820 tcg_temp_free_ptr(ptr1);
7821 tcg_temp_free_ptr(ptr2);
7822 tcg_temp_free_i32(tmp3);
7823 break;
7824 case NEON_2RM_SHA1H:
7825 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
7826 return 1;
7828 ptr1 = vfp_reg_ptr(true, rd);
7829 ptr2 = vfp_reg_ptr(true, rm);
7831 gen_helper_crypto_sha1h(ptr1, ptr2);
7833 tcg_temp_free_ptr(ptr1);
7834 tcg_temp_free_ptr(ptr2);
7835 break;
7836 case NEON_2RM_SHA1SU1:
7837 if ((rm | rd) & 1) {
7838 return 1;
7840 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7841 if (q) {
7842 if (!dc_isar_feature(aa32_sha2, s)) {
7843 return 1;
7845 } else if (!dc_isar_feature(aa32_sha1, s)) {
7846 return 1;
7848 ptr1 = vfp_reg_ptr(true, rd);
7849 ptr2 = vfp_reg_ptr(true, rm);
7850 if (q) {
7851 gen_helper_crypto_sha256su0(ptr1, ptr2);
7852 } else {
7853 gen_helper_crypto_sha1su1(ptr1, ptr2);
7855 tcg_temp_free_ptr(ptr1);
7856 tcg_temp_free_ptr(ptr2);
7857 break;
7859 case NEON_2RM_VMVN:
7860 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
7861 break;
7862 case NEON_2RM_VNEG:
7863 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
7864 break;
7866 default:
7867 elementwise:
7868 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7869 if (neon_2rm_is_float_op(op)) {
7870 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7871 neon_reg_offset(rm, pass));
7872 tmp = NULL;
7873 } else {
7874 tmp = neon_load_reg(rm, pass);
7876 switch (op) {
7877 case NEON_2RM_VREV32:
7878 switch (size) {
7879 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7880 case 1: gen_swap_half(tmp); break;
7881 default: abort();
7883 break;
7884 case NEON_2RM_VREV16:
7885 gen_rev16(tmp);
7886 break;
7887 case NEON_2RM_VCLS:
7888 switch (size) {
7889 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7890 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7891 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
7892 default: abort();
7894 break;
7895 case NEON_2RM_VCLZ:
7896 switch (size) {
7897 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7898 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7899 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
7900 default: abort();
7902 break;
7903 case NEON_2RM_VCNT:
7904 gen_helper_neon_cnt_u8(tmp, tmp);
7905 break;
7906 case NEON_2RM_VQABS:
7907 switch (size) {
7908 case 0:
7909 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7910 break;
7911 case 1:
7912 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7913 break;
7914 case 2:
7915 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7916 break;
7917 default: abort();
7919 break;
7920 case NEON_2RM_VQNEG:
7921 switch (size) {
7922 case 0:
7923 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7924 break;
7925 case 1:
7926 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7927 break;
7928 case 2:
7929 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7930 break;
7931 default: abort();
7933 break;
7934 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
7935 tmp2 = tcg_const_i32(0);
7936 switch(size) {
7937 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7938 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7939 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
7940 default: abort();
7942 tcg_temp_free_i32(tmp2);
7943 if (op == NEON_2RM_VCLE0) {
7944 tcg_gen_not_i32(tmp, tmp);
7946 break;
7947 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
7948 tmp2 = tcg_const_i32(0);
7949 switch(size) {
7950 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7951 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7952 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
7953 default: abort();
7955 tcg_temp_free_i32(tmp2);
7956 if (op == NEON_2RM_VCLT0) {
7957 tcg_gen_not_i32(tmp, tmp);
7959 break;
7960 case NEON_2RM_VCEQ0:
7961 tmp2 = tcg_const_i32(0);
7962 switch(size) {
7963 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7964 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7965 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
7966 default: abort();
7968 tcg_temp_free_i32(tmp2);
7969 break;
7970 case NEON_2RM_VABS:
7971 switch(size) {
7972 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7973 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7974 case 2: tcg_gen_abs_i32(tmp, tmp); break;
7975 default: abort();
7977 break;
7978 case NEON_2RM_VCGT0_F:
7980 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7981 tmp2 = tcg_const_i32(0);
7982 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
7983 tcg_temp_free_i32(tmp2);
7984 tcg_temp_free_ptr(fpstatus);
7985 break;
7987 case NEON_2RM_VCGE0_F:
7989 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7990 tmp2 = tcg_const_i32(0);
7991 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
7992 tcg_temp_free_i32(tmp2);
7993 tcg_temp_free_ptr(fpstatus);
7994 break;
7996 case NEON_2RM_VCEQ0_F:
7998 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7999 tmp2 = tcg_const_i32(0);
8000 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
8001 tcg_temp_free_i32(tmp2);
8002 tcg_temp_free_ptr(fpstatus);
8003 break;
8005 case NEON_2RM_VCLE0_F:
8007 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8008 tmp2 = tcg_const_i32(0);
8009 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
8010 tcg_temp_free_i32(tmp2);
8011 tcg_temp_free_ptr(fpstatus);
8012 break;
8014 case NEON_2RM_VCLT0_F:
8016 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8017 tmp2 = tcg_const_i32(0);
8018 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
8019 tcg_temp_free_i32(tmp2);
8020 tcg_temp_free_ptr(fpstatus);
8021 break;
8023 case NEON_2RM_VABS_F:
8024 gen_vfp_abs(0);
8025 break;
8026 case NEON_2RM_VNEG_F:
8027 gen_vfp_neg(0);
8028 break;
8029 case NEON_2RM_VSWP:
8030 tmp2 = neon_load_reg(rd, pass);
8031 neon_store_reg(rm, pass, tmp2);
8032 break;
8033 case NEON_2RM_VTRN:
8034 tmp2 = neon_load_reg(rd, pass);
8035 switch (size) {
8036 case 0: gen_neon_trn_u8(tmp, tmp2); break;
8037 case 1: gen_neon_trn_u16(tmp, tmp2); break;
8038 default: abort();
8040 neon_store_reg(rm, pass, tmp2);
8041 break;
8042 case NEON_2RM_VRINTN:
8043 case NEON_2RM_VRINTA:
8044 case NEON_2RM_VRINTM:
8045 case NEON_2RM_VRINTP:
8046 case NEON_2RM_VRINTZ:
8048 TCGv_i32 tcg_rmode;
8049 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8050 int rmode;
8052 if (op == NEON_2RM_VRINTZ) {
8053 rmode = FPROUNDING_ZERO;
8054 } else {
8055 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
8058 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8059 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8060 cpu_env);
8061 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
8062 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8063 cpu_env);
8064 tcg_temp_free_ptr(fpstatus);
8065 tcg_temp_free_i32(tcg_rmode);
8066 break;
8068 case NEON_2RM_VRINTX:
8070 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8071 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
8072 tcg_temp_free_ptr(fpstatus);
8073 break;
8075 case NEON_2RM_VCVTAU:
8076 case NEON_2RM_VCVTAS:
8077 case NEON_2RM_VCVTNU:
8078 case NEON_2RM_VCVTNS:
8079 case NEON_2RM_VCVTPU:
8080 case NEON_2RM_VCVTPS:
8081 case NEON_2RM_VCVTMU:
8082 case NEON_2RM_VCVTMS:
8084 bool is_signed = !extract32(insn, 7, 1);
8085 TCGv_ptr fpst = get_fpstatus_ptr(1);
8086 TCGv_i32 tcg_rmode, tcg_shift;
8087 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
8089 tcg_shift = tcg_const_i32(0);
8090 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8091 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8092 cpu_env);
8094 if (is_signed) {
8095 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
8096 tcg_shift, fpst);
8097 } else {
8098 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
8099 tcg_shift, fpst);
8102 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8103 cpu_env);
8104 tcg_temp_free_i32(tcg_rmode);
8105 tcg_temp_free_i32(tcg_shift);
8106 tcg_temp_free_ptr(fpst);
8107 break;
8109 case NEON_2RM_VRECPE:
8111 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8112 gen_helper_recpe_u32(tmp, tmp, fpstatus);
8113 tcg_temp_free_ptr(fpstatus);
8114 break;
8116 case NEON_2RM_VRSQRTE:
8118 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8119 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
8120 tcg_temp_free_ptr(fpstatus);
8121 break;
8123 case NEON_2RM_VRECPE_F:
8125 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8126 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
8127 tcg_temp_free_ptr(fpstatus);
8128 break;
8130 case NEON_2RM_VRSQRTE_F:
8132 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8133 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
8134 tcg_temp_free_ptr(fpstatus);
8135 break;
8137 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
8138 gen_vfp_sito(0, 1);
8139 break;
8140 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
8141 gen_vfp_uito(0, 1);
8142 break;
8143 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
8144 gen_vfp_tosiz(0, 1);
8145 break;
8146 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
8147 gen_vfp_touiz(0, 1);
8148 break;
8149 default:
8150 /* Reserved op values were caught by the
8151 * neon_2rm_sizes[] check earlier.
8153 abort();
8155 if (neon_2rm_is_float_op(op)) {
8156 tcg_gen_st_f32(cpu_F0s, cpu_env,
8157 neon_reg_offset(rd, pass));
8158 } else {
8159 neon_store_reg(rd, pass, tmp);
8162 break;
8164 } else if ((insn & (1 << 10)) == 0) {
8165 /* VTBL, VTBX. */
8166 int n = ((insn >> 8) & 3) + 1;
8167 if ((rn + n) > 32) {
8168 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
8169 * helper function running off the end of the register file.
8171 return 1;
8173 n <<= 3;
8174 if (insn & (1 << 6)) {
8175 tmp = neon_load_reg(rd, 0);
8176 } else {
8177 tmp = tcg_temp_new_i32();
8178 tcg_gen_movi_i32(tmp, 0);
8180 tmp2 = neon_load_reg(rm, 0);
8181 ptr1 = vfp_reg_ptr(true, rn);
8182 tmp5 = tcg_const_i32(n);
8183 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
8184 tcg_temp_free_i32(tmp);
8185 if (insn & (1 << 6)) {
8186 tmp = neon_load_reg(rd, 1);
8187 } else {
8188 tmp = tcg_temp_new_i32();
8189 tcg_gen_movi_i32(tmp, 0);
8191 tmp3 = neon_load_reg(rm, 1);
8192 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
8193 tcg_temp_free_i32(tmp5);
8194 tcg_temp_free_ptr(ptr1);
8195 neon_store_reg(rd, 0, tmp2);
8196 neon_store_reg(rd, 1, tmp3);
8197 tcg_temp_free_i32(tmp);
8198 } else if ((insn & 0x380) == 0) {
8199 /* VDUP */
8200 int element;
8201 TCGMemOp size;
8203 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
8204 return 1;
8206 if (insn & (1 << 16)) {
8207 size = MO_8;
8208 element = (insn >> 17) & 7;
8209 } else if (insn & (1 << 17)) {
8210 size = MO_16;
8211 element = (insn >> 18) & 3;
8212 } else {
8213 size = MO_32;
8214 element = (insn >> 19) & 1;
8216 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
8217 neon_element_offset(rm, element, size),
8218 q ? 16 : 8, q ? 16 : 8);
8219 } else {
8220 return 1;
8224 return 0;
8227 /* Advanced SIMD three registers of the same length extension.
8228 * 31 25 23 22 20 16 12 11 10 9 8 3 0
8229 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8230 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8231 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8233 static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
8235 gen_helper_gvec_3 *fn_gvec = NULL;
8236 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
8237 int rd, rn, rm, opr_sz;
8238 int data = 0;
8239 bool q;
8241 q = extract32(insn, 6, 1);
8242 VFP_DREG_D(rd, insn);
8243 VFP_DREG_N(rn, insn);
8244 VFP_DREG_M(rm, insn);
8245 if ((rd | rn | rm) & q) {
8246 return 1;
8249 if ((insn & 0xfe200f10) == 0xfc200800) {
8250 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
8251 int size = extract32(insn, 20, 1);
8252 data = extract32(insn, 23, 2); /* rot */
8253 if (!dc_isar_feature(aa32_vcma, s)
8254 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8255 return 1;
8257 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
8258 } else if ((insn & 0xfea00f10) == 0xfc800800) {
8259 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
8260 int size = extract32(insn, 20, 1);
8261 data = extract32(insn, 24, 1); /* rot */
8262 if (!dc_isar_feature(aa32_vcma, s)
8263 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8264 return 1;
8266 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
8267 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
8268 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
8269 bool u = extract32(insn, 4, 1);
8270 if (!dc_isar_feature(aa32_dp, s)) {
8271 return 1;
8273 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
8274 } else {
8275 return 1;
8278 if (s->fp_excp_el) {
8279 gen_exception_insn(s, 4, EXCP_UDEF,
8280 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8281 return 0;
8283 if (!s->vfp_enabled) {
8284 return 1;
8287 opr_sz = (1 + q) * 8;
8288 if (fn_gvec_ptr) {
8289 TCGv_ptr fpst = get_fpstatus_ptr(1);
8290 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
8291 vfp_reg_offset(1, rn),
8292 vfp_reg_offset(1, rm), fpst,
8293 opr_sz, opr_sz, data, fn_gvec_ptr);
8294 tcg_temp_free_ptr(fpst);
8295 } else {
8296 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
8297 vfp_reg_offset(1, rn),
8298 vfp_reg_offset(1, rm),
8299 opr_sz, opr_sz, data, fn_gvec);
8301 return 0;
8304 /* Advanced SIMD two registers and a scalar extension.
8305 * 31 24 23 22 20 16 12 11 10 9 8 3 0
8306 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8307 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8308 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8312 static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
8314 gen_helper_gvec_3 *fn_gvec = NULL;
8315 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
8316 int rd, rn, rm, opr_sz, data;
8317 bool q;
8319 q = extract32(insn, 6, 1);
8320 VFP_DREG_D(rd, insn);
8321 VFP_DREG_N(rn, insn);
8322 if ((rd | rn) & q) {
8323 return 1;
8326 if ((insn & 0xff000f10) == 0xfe000800) {
8327 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
8328 int rot = extract32(insn, 20, 2);
8329 int size = extract32(insn, 23, 1);
8330 int index;
8332 if (!dc_isar_feature(aa32_vcma, s)) {
8333 return 1;
8335 if (size == 0) {
8336 if (!dc_isar_feature(aa32_fp16_arith, s)) {
8337 return 1;
8339 /* For fp16, rm is just Vm, and index is M. */
8340 rm = extract32(insn, 0, 4);
8341 index = extract32(insn, 5, 1);
8342 } else {
8343 /* For fp32, rm is the usual M:Vm, and index is 0. */
8344 VFP_DREG_M(rm, insn);
8345 index = 0;
8347 data = (index << 2) | rot;
8348 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
8349 : gen_helper_gvec_fcmlah_idx);
8350 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
8351 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
8352 int u = extract32(insn, 4, 1);
8353 if (!dc_isar_feature(aa32_dp, s)) {
8354 return 1;
8356 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
8357 /* rm is just Vm, and index is M. */
8358 data = extract32(insn, 5, 1); /* index */
8359 rm = extract32(insn, 0, 4);
8360 } else {
8361 return 1;
8364 if (s->fp_excp_el) {
8365 gen_exception_insn(s, 4, EXCP_UDEF,
8366 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8367 return 0;
8369 if (!s->vfp_enabled) {
8370 return 1;
8373 opr_sz = (1 + q) * 8;
8374 if (fn_gvec_ptr) {
8375 TCGv_ptr fpst = get_fpstatus_ptr(1);
8376 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
8377 vfp_reg_offset(1, rn),
8378 vfp_reg_offset(1, rm), fpst,
8379 opr_sz, opr_sz, data, fn_gvec_ptr);
8380 tcg_temp_free_ptr(fpst);
8381 } else {
8382 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
8383 vfp_reg_offset(1, rn),
8384 vfp_reg_offset(1, rm),
8385 opr_sz, opr_sz, data, fn_gvec);
8387 return 0;
8390 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
8392 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
8393 const ARMCPRegInfo *ri;
8395 cpnum = (insn >> 8) & 0xf;
8397 /* First check for coprocessor space used for XScale/iwMMXt insns */
8398 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
8399 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
8400 return 1;
8402 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
8403 return disas_iwmmxt_insn(s, insn);
8404 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
8405 return disas_dsp_insn(s, insn);
8407 return 1;
8410 /* Otherwise treat as a generic register access */
8411 is64 = (insn & (1 << 25)) == 0;
8412 if (!is64 && ((insn & (1 << 4)) == 0)) {
8413 /* cdp */
8414 return 1;
8417 crm = insn & 0xf;
8418 if (is64) {
8419 crn = 0;
8420 opc1 = (insn >> 4) & 0xf;
8421 opc2 = 0;
8422 rt2 = (insn >> 16) & 0xf;
8423 } else {
8424 crn = (insn >> 16) & 0xf;
8425 opc1 = (insn >> 21) & 7;
8426 opc2 = (insn >> 5) & 7;
8427 rt2 = 0;
8429 isread = (insn >> 20) & 1;
8430 rt = (insn >> 12) & 0xf;
8432 ri = get_arm_cp_reginfo(s->cp_regs,
8433 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
8434 if (ri) {
8435 /* Check access permissions */
8436 if (!cp_access_ok(s->current_el, ri, isread)) {
8437 return 1;
8440 if (ri->accessfn ||
8441 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
8442 /* Emit code to perform further access permissions checks at
8443 * runtime; this may result in an exception.
8444 * Note that on XScale all cp0..c13 registers do an access check
8445 * call in order to handle c15_cpar.
8447 TCGv_ptr tmpptr;
8448 TCGv_i32 tcg_syn, tcg_isread;
8449 uint32_t syndrome;
8451 /* Note that since we are an implementation which takes an
8452 * exception on a trapped conditional instruction only if the
8453 * instruction passes its condition code check, we can take
8454 * advantage of the clause in the ARM ARM that allows us to set
8455 * the COND field in the instruction to 0xE in all cases.
8456 * We could fish the actual condition out of the insn (ARM)
8457 * or the condexec bits (Thumb) but it isn't necessary.
8459 switch (cpnum) {
8460 case 14:
8461 if (is64) {
8462 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
8463 isread, false);
8464 } else {
8465 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
8466 rt, isread, false);
8468 break;
8469 case 15:
8470 if (is64) {
8471 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
8472 isread, false);
8473 } else {
8474 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
8475 rt, isread, false);
8477 break;
8478 default:
8479 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8480 * so this can only happen if this is an ARMv7 or earlier CPU,
8481 * in which case the syndrome information won't actually be
8482 * guest visible.
8484 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8485 syndrome = syn_uncategorized();
8486 break;
8489 gen_set_condexec(s);
8490 gen_set_pc_im(s, s->pc - 4);
8491 tmpptr = tcg_const_ptr(ri);
8492 tcg_syn = tcg_const_i32(syndrome);
8493 tcg_isread = tcg_const_i32(isread);
8494 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
8495 tcg_isread);
8496 tcg_temp_free_ptr(tmpptr);
8497 tcg_temp_free_i32(tcg_syn);
8498 tcg_temp_free_i32(tcg_isread);
8501 /* Handle special cases first */
8502 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
8503 case ARM_CP_NOP:
8504 return 0;
8505 case ARM_CP_WFI:
8506 if (isread) {
8507 return 1;
8509 gen_set_pc_im(s, s->pc);
8510 s->base.is_jmp = DISAS_WFI;
8511 return 0;
8512 default:
8513 break;
8516 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
8517 gen_io_start();
8520 if (isread) {
8521 /* Read */
8522 if (is64) {
8523 TCGv_i64 tmp64;
8524 TCGv_i32 tmp;
8525 if (ri->type & ARM_CP_CONST) {
8526 tmp64 = tcg_const_i64(ri->resetvalue);
8527 } else if (ri->readfn) {
8528 TCGv_ptr tmpptr;
8529 tmp64 = tcg_temp_new_i64();
8530 tmpptr = tcg_const_ptr(ri);
8531 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8532 tcg_temp_free_ptr(tmpptr);
8533 } else {
8534 tmp64 = tcg_temp_new_i64();
8535 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8537 tmp = tcg_temp_new_i32();
8538 tcg_gen_extrl_i64_i32(tmp, tmp64);
8539 store_reg(s, rt, tmp);
8540 tcg_gen_shri_i64(tmp64, tmp64, 32);
8541 tmp = tcg_temp_new_i32();
8542 tcg_gen_extrl_i64_i32(tmp, tmp64);
8543 tcg_temp_free_i64(tmp64);
8544 store_reg(s, rt2, tmp);
8545 } else {
8546 TCGv_i32 tmp;
8547 if (ri->type & ARM_CP_CONST) {
8548 tmp = tcg_const_i32(ri->resetvalue);
8549 } else if (ri->readfn) {
8550 TCGv_ptr tmpptr;
8551 tmp = tcg_temp_new_i32();
8552 tmpptr = tcg_const_ptr(ri);
8553 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8554 tcg_temp_free_ptr(tmpptr);
8555 } else {
8556 tmp = load_cpu_offset(ri->fieldoffset);
8558 if (rt == 15) {
8559 /* Destination register of r15 for 32 bit loads sets
8560 * the condition codes from the high 4 bits of the value
8562 gen_set_nzcv(tmp);
8563 tcg_temp_free_i32(tmp);
8564 } else {
8565 store_reg(s, rt, tmp);
8568 } else {
8569 /* Write */
8570 if (ri->type & ARM_CP_CONST) {
8571 /* If not forbidden by access permissions, treat as WI */
8572 return 0;
8575 if (is64) {
8576 TCGv_i32 tmplo, tmphi;
8577 TCGv_i64 tmp64 = tcg_temp_new_i64();
8578 tmplo = load_reg(s, rt);
8579 tmphi = load_reg(s, rt2);
8580 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8581 tcg_temp_free_i32(tmplo);
8582 tcg_temp_free_i32(tmphi);
8583 if (ri->writefn) {
8584 TCGv_ptr tmpptr = tcg_const_ptr(ri);
8585 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8586 tcg_temp_free_ptr(tmpptr);
8587 } else {
8588 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8590 tcg_temp_free_i64(tmp64);
8591 } else {
8592 if (ri->writefn) {
8593 TCGv_i32 tmp;
8594 TCGv_ptr tmpptr;
8595 tmp = load_reg(s, rt);
8596 tmpptr = tcg_const_ptr(ri);
8597 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8598 tcg_temp_free_ptr(tmpptr);
8599 tcg_temp_free_i32(tmp);
8600 } else {
8601 TCGv_i32 tmp = load_reg(s, rt);
8602 store_cpu_offset(tmp, ri->fieldoffset);
8607 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
8608 /* I/O operations must end the TB here (whether read or write) */
8609 gen_io_end();
8610 gen_lookup_tb(s);
8611 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
8612 /* We default to ending the TB on a coprocessor register write,
8613 * but allow this to be suppressed by the register definition
8614 * (usually only necessary to work around guest bugs).
8616 gen_lookup_tb(s);
8619 return 0;
8622 /* Unknown register; this might be a guest error or a QEMU
8623 * unimplemented feature.
8625 if (is64) {
8626 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
8627 "64 bit system register cp:%d opc1: %d crm:%d "
8628 "(%s)\n",
8629 isread ? "read" : "write", cpnum, opc1, crm,
8630 s->ns ? "non-secure" : "secure");
8631 } else {
8632 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
8633 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8634 "(%s)\n",
8635 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8636 s->ns ? "non-secure" : "secure");
8639 return 1;
8643 /* Store a 64-bit value to a register pair. Clobbers val. */
8644 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
8646 TCGv_i32 tmp;
8647 tmp = tcg_temp_new_i32();
8648 tcg_gen_extrl_i64_i32(tmp, val);
8649 store_reg(s, rlow, tmp);
8650 tmp = tcg_temp_new_i32();
8651 tcg_gen_shri_i64(val, val, 32);
8652 tcg_gen_extrl_i64_i32(tmp, val);
8653 store_reg(s, rhigh, tmp);
8656 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
8657 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
8659 TCGv_i64 tmp;
8660 TCGv_i32 tmp2;
8662 /* Load value and extend to 64 bits. */
8663 tmp = tcg_temp_new_i64();
8664 tmp2 = load_reg(s, rlow);
8665 tcg_gen_extu_i32_i64(tmp, tmp2);
8666 tcg_temp_free_i32(tmp2);
8667 tcg_gen_add_i64(val, val, tmp);
8668 tcg_temp_free_i64(tmp);
8671 /* load and add a 64-bit value from a register pair. */
8672 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
8674 TCGv_i64 tmp;
8675 TCGv_i32 tmpl;
8676 TCGv_i32 tmph;
8678 /* Load 64-bit value rd:rn. */
8679 tmpl = load_reg(s, rlow);
8680 tmph = load_reg(s, rhigh);
8681 tmp = tcg_temp_new_i64();
8682 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
8683 tcg_temp_free_i32(tmpl);
8684 tcg_temp_free_i32(tmph);
8685 tcg_gen_add_i64(val, val, tmp);
8686 tcg_temp_free_i64(tmp);
8689 /* Set N and Z flags from hi|lo. */
8690 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
8692 tcg_gen_mov_i32(cpu_NF, hi);
8693 tcg_gen_or_i32(cpu_ZF, lo, hi);
8696 /* Load/Store exclusive instructions are implemented by remembering
8697 the value/address loaded, and seeing if these are the same
8698 when the store is performed. This should be sufficient to implement
8699 the architecturally mandated semantics, and avoids having to monitor
8700 regular stores. The compare vs the remembered value is done during
8701 the cmpxchg operation, but we must compare the addresses manually. */
8702 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
8703 TCGv_i32 addr, int size)
8705 TCGv_i32 tmp = tcg_temp_new_i32();
8706 TCGMemOp opc = size | MO_ALIGN | s->be_data;
8708 s->is_ldex = true;
8710 if (size == 3) {
8711 TCGv_i32 tmp2 = tcg_temp_new_i32();
8712 TCGv_i64 t64 = tcg_temp_new_i64();
8714 /* For AArch32, architecturally the 32-bit word at the lowest
8715 * address is always Rt and the one at addr+4 is Rt2, even if
8716 * the CPU is big-endian. That means we don't want to do a
8717 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8718 * for an architecturally 64-bit access, but instead do a
8719 * 64-bit access using MO_BE if appropriate and then split
8720 * the two halves.
8721 * This only makes a difference for BE32 user-mode, where
8722 * frob64() must not flip the two halves of the 64-bit data
8723 * but this code must treat BE32 user-mode like BE32 system.
8725 TCGv taddr = gen_aa32_addr(s, addr, opc);
8727 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8728 tcg_temp_free(taddr);
8729 tcg_gen_mov_i64(cpu_exclusive_val, t64);
8730 if (s->be_data == MO_BE) {
8731 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8732 } else {
8733 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8735 tcg_temp_free_i64(t64);
8737 store_reg(s, rt2, tmp2);
8738 } else {
8739 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
8740 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
8743 store_reg(s, rt, tmp);
8744 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
8747 static void gen_clrex(DisasContext *s)
8749 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
8752 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
8753 TCGv_i32 addr, int size)
8755 TCGv_i32 t0, t1, t2;
8756 TCGv_i64 extaddr;
8757 TCGv taddr;
8758 TCGLabel *done_label;
8759 TCGLabel *fail_label;
8760 TCGMemOp opc = size | MO_ALIGN | s->be_data;
8762 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8763 [addr] = {Rt};
8764 {Rd} = 0;
8765 } else {
8766 {Rd} = 1;
8767 } */
8768 fail_label = gen_new_label();
8769 done_label = gen_new_label();
8770 extaddr = tcg_temp_new_i64();
8771 tcg_gen_extu_i32_i64(extaddr, addr);
8772 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8773 tcg_temp_free_i64(extaddr);
8775 taddr = gen_aa32_addr(s, addr, opc);
8776 t0 = tcg_temp_new_i32();
8777 t1 = load_reg(s, rt);
8778 if (size == 3) {
8779 TCGv_i64 o64 = tcg_temp_new_i64();
8780 TCGv_i64 n64 = tcg_temp_new_i64();
8782 t2 = load_reg(s, rt2);
8783 /* For AArch32, architecturally the 32-bit word at the lowest
8784 * address is always Rt and the one at addr+4 is Rt2, even if
8785 * the CPU is big-endian. Since we're going to treat this as a
8786 * single 64-bit BE store, we need to put the two halves in the
8787 * opposite order for BE to LE, so that they end up in the right
8788 * places.
8789 * We don't want gen_aa32_frob64() because that does the wrong
8790 * thing for BE32 usermode.
8792 if (s->be_data == MO_BE) {
8793 tcg_gen_concat_i32_i64(n64, t2, t1);
8794 } else {
8795 tcg_gen_concat_i32_i64(n64, t1, t2);
8797 tcg_temp_free_i32(t2);
8799 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8800 get_mem_index(s), opc);
8801 tcg_temp_free_i64(n64);
8803 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8804 tcg_gen_extrl_i64_i32(t0, o64);
8806 tcg_temp_free_i64(o64);
8807 } else {
8808 t2 = tcg_temp_new_i32();
8809 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8810 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8811 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8812 tcg_temp_free_i32(t2);
8814 tcg_temp_free_i32(t1);
8815 tcg_temp_free(taddr);
8816 tcg_gen_mov_i32(cpu_R[rd], t0);
8817 tcg_temp_free_i32(t0);
8818 tcg_gen_br(done_label);
8820 gen_set_label(fail_label);
8821 tcg_gen_movi_i32(cpu_R[rd], 1);
8822 gen_set_label(done_label);
8823 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
8826 /* gen_srs:
8827 * @env: CPUARMState
8828 * @s: DisasContext
8829 * @mode: mode field from insn (which stack to store to)
8830 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8831 * @writeback: true if writeback bit set
8833 * Generate code for the SRS (Store Return State) insn.
8835 static void gen_srs(DisasContext *s,
8836 uint32_t mode, uint32_t amode, bool writeback)
8838 int32_t offset;
8839 TCGv_i32 addr, tmp;
8840 bool undef = false;
8842 /* SRS is:
8843 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
8844 * and specified mode is monitor mode
8845 * - UNDEFINED in Hyp mode
8846 * - UNPREDICTABLE in User or System mode
8847 * - UNPREDICTABLE if the specified mode is:
8848 * -- not implemented
8849 * -- not a valid mode number
8850 * -- a mode that's at a higher exception level
8851 * -- Monitor, if we are Non-secure
8852 * For the UNPREDICTABLE cases we choose to UNDEF.
8854 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
8855 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8856 return;
8859 if (s->current_el == 0 || s->current_el == 2) {
8860 undef = true;
8863 switch (mode) {
8864 case ARM_CPU_MODE_USR:
8865 case ARM_CPU_MODE_FIQ:
8866 case ARM_CPU_MODE_IRQ:
8867 case ARM_CPU_MODE_SVC:
8868 case ARM_CPU_MODE_ABT:
8869 case ARM_CPU_MODE_UND:
8870 case ARM_CPU_MODE_SYS:
8871 break;
8872 case ARM_CPU_MODE_HYP:
8873 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8874 undef = true;
8876 break;
8877 case ARM_CPU_MODE_MON:
8878 /* No need to check specifically for "are we non-secure" because
8879 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8880 * so if this isn't EL3 then we must be non-secure.
8882 if (s->current_el != 3) {
8883 undef = true;
8885 break;
8886 default:
8887 undef = true;
8890 if (undef) {
8891 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8892 default_exception_el(s));
8893 return;
8896 addr = tcg_temp_new_i32();
8897 tmp = tcg_const_i32(mode);
8898 /* get_r13_banked() will raise an exception if called from System mode */
8899 gen_set_condexec(s);
8900 gen_set_pc_im(s, s->pc - 4);
8901 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8902 tcg_temp_free_i32(tmp);
8903 switch (amode) {
8904 case 0: /* DA */
8905 offset = -4;
8906 break;
8907 case 1: /* IA */
8908 offset = 0;
8909 break;
8910 case 2: /* DB */
8911 offset = -8;
8912 break;
8913 case 3: /* IB */
8914 offset = 4;
8915 break;
8916 default:
8917 abort();
8919 tcg_gen_addi_i32(addr, addr, offset);
8920 tmp = load_reg(s, 14);
8921 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8922 tcg_temp_free_i32(tmp);
8923 tmp = load_cpu_field(spsr);
8924 tcg_gen_addi_i32(addr, addr, 4);
8925 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8926 tcg_temp_free_i32(tmp);
8927 if (writeback) {
8928 switch (amode) {
8929 case 0:
8930 offset = -8;
8931 break;
8932 case 1:
8933 offset = 4;
8934 break;
8935 case 2:
8936 offset = -4;
8937 break;
8938 case 3:
8939 offset = 0;
8940 break;
8941 default:
8942 abort();
8944 tcg_gen_addi_i32(addr, addr, offset);
8945 tmp = tcg_const_i32(mode);
8946 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8947 tcg_temp_free_i32(tmp);
8949 tcg_temp_free_i32(addr);
8950 s->base.is_jmp = DISAS_UPDATE;
8953 /* Generate a label used for skipping this instruction */
8954 static void arm_gen_condlabel(DisasContext *s)
8956 if (!s->condjmp) {
8957 s->condlabel = gen_new_label();
8958 s->condjmp = 1;
8962 /* Skip this instruction if the ARM condition is false */
8963 static void arm_skip_unless(DisasContext *s, uint32_t cond)
8965 arm_gen_condlabel(s);
8966 arm_gen_test_cc(cond ^ 1, s->condlabel);
8969 static void disas_arm_insn(DisasContext *s, unsigned int insn)
8971 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
8972 TCGv_i32 tmp;
8973 TCGv_i32 tmp2;
8974 TCGv_i32 tmp3;
8975 TCGv_i32 addr;
8976 TCGv_i64 tmp64;
8978 /* M variants do not implement ARM mode; this must raise the INVSTATE
8979 * UsageFault exception.
8981 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8982 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8983 default_exception_el(s));
8984 return;
8986 cond = insn >> 28;
8987 if (cond == 0xf){
8988 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8989 * choose to UNDEF. In ARMv5 and above the space is used
8990 * for miscellaneous unconditional instructions.
8992 ARCH(5);
8994 /* Unconditional instructions. */
8995 if (((insn >> 25) & 7) == 1) {
8996 /* NEON Data processing. */
8997 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8998 goto illegal_op;
9001 if (disas_neon_data_insn(s, insn)) {
9002 goto illegal_op;
9004 return;
9006 if ((insn & 0x0f100000) == 0x04000000) {
9007 /* NEON load/store. */
9008 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9009 goto illegal_op;
9012 if (disas_neon_ls_insn(s, insn)) {
9013 goto illegal_op;
9015 return;
9017 if ((insn & 0x0f000e10) == 0x0e000a00) {
9018 /* VFP. */
9019 if (disas_vfp_insn(s, insn)) {
9020 goto illegal_op;
9022 return;
9024 if (((insn & 0x0f30f000) == 0x0510f000) ||
9025 ((insn & 0x0f30f010) == 0x0710f000)) {
9026 if ((insn & (1 << 22)) == 0) {
9027 /* PLDW; v7MP */
9028 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
9029 goto illegal_op;
9032 /* Otherwise PLD; v5TE+ */
9033 ARCH(5TE);
9034 return;
9036 if (((insn & 0x0f70f000) == 0x0450f000) ||
9037 ((insn & 0x0f70f010) == 0x0650f000)) {
9038 ARCH(7);
9039 return; /* PLI; V7 */
9041 if (((insn & 0x0f700000) == 0x04100000) ||
9042 ((insn & 0x0f700010) == 0x06100000)) {
9043 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
9044 goto illegal_op;
9046 return; /* v7MP: Unallocated memory hint: must NOP */
9049 if ((insn & 0x0ffffdff) == 0x01010000) {
9050 ARCH(6);
9051 /* setend */
9052 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
9053 gen_helper_setend(cpu_env);
9054 s->base.is_jmp = DISAS_UPDATE;
9056 return;
9057 } else if ((insn & 0x0fffff00) == 0x057ff000) {
9058 switch ((insn >> 4) & 0xf) {
9059 case 1: /* clrex */
9060 ARCH(6K);
9061 gen_clrex(s);
9062 return;
9063 case 4: /* dsb */
9064 case 5: /* dmb */
9065 ARCH(7);
9066 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9067 return;
9068 case 6: /* isb */
9069 /* We need to break the TB after this insn to execute
9070 * self-modifying code correctly and also to take
9071 * any pending interrupts immediately.
9073 gen_goto_tb(s, 0, s->pc & ~1);
9074 return;
9075 default:
9076 goto illegal_op;
9078 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
9079 /* srs */
9080 ARCH(6);
9081 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
9082 return;
9083 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9084 /* rfe */
9085 int32_t offset;
9086 if (IS_USER(s))
9087 goto illegal_op;
9088 ARCH(6);
9089 rn = (insn >> 16) & 0xf;
9090 addr = load_reg(s, rn);
9091 i = (insn >> 23) & 3;
9092 switch (i) {
9093 case 0: offset = -4; break; /* DA */
9094 case 1: offset = 0; break; /* IA */
9095 case 2: offset = -8; break; /* DB */
9096 case 3: offset = 4; break; /* IB */
9097 default: abort();
9099 if (offset)
9100 tcg_gen_addi_i32(addr, addr, offset);
9101 /* Load PC into tmp and CPSR into tmp2. */
9102 tmp = tcg_temp_new_i32();
9103 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9104 tcg_gen_addi_i32(addr, addr, 4);
9105 tmp2 = tcg_temp_new_i32();
9106 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9107 if (insn & (1 << 21)) {
9108 /* Base writeback. */
9109 switch (i) {
9110 case 0: offset = -8; break;
9111 case 1: offset = 4; break;
9112 case 2: offset = -4; break;
9113 case 3: offset = 0; break;
9114 default: abort();
9116 if (offset)
9117 tcg_gen_addi_i32(addr, addr, offset);
9118 store_reg(s, rn, addr);
9119 } else {
9120 tcg_temp_free_i32(addr);
9122 gen_rfe(s, tmp, tmp2);
9123 return;
9124 } else if ((insn & 0x0e000000) == 0x0a000000) {
9125 /* branch link and change to thumb (blx <offset>) */
9126 int32_t offset;
9128 val = (uint32_t)s->pc;
9129 tmp = tcg_temp_new_i32();
9130 tcg_gen_movi_i32(tmp, val);
9131 store_reg(s, 14, tmp);
9132 /* Sign-extend the 24-bit offset */
9133 offset = (((int32_t)insn) << 8) >> 8;
9134 /* offset * 4 + bit24 * 2 + (thumb bit) */
9135 val += (offset << 2) | ((insn >> 23) & 2) | 1;
9136 /* pipeline offset */
9137 val += 4;
9138 /* protected by ARCH(5); above, near the start of uncond block */
9139 gen_bx_im(s, val);
9140 return;
9141 } else if ((insn & 0x0e000f00) == 0x0c000100) {
9142 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9143 /* iWMMXt register transfer. */
9144 if (extract32(s->c15_cpar, 1, 1)) {
9145 if (!disas_iwmmxt_insn(s, insn)) {
9146 return;
9150 } else if ((insn & 0x0e000a00) == 0x0c000800
9151 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9152 if (disas_neon_insn_3same_ext(s, insn)) {
9153 goto illegal_op;
9155 return;
9156 } else if ((insn & 0x0f000a00) == 0x0e000800
9157 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9158 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
9159 goto illegal_op;
9161 return;
9162 } else if ((insn & 0x0fe00000) == 0x0c400000) {
9163 /* Coprocessor double register transfer. */
9164 ARCH(5TE);
9165 } else if ((insn & 0x0f000010) == 0x0e000010) {
9166 /* Additional coprocessor register transfer. */
9167 } else if ((insn & 0x0ff10020) == 0x01000000) {
9168 uint32_t mask;
9169 uint32_t val;
9170 /* cps (privileged) */
9171 if (IS_USER(s))
9172 return;
9173 mask = val = 0;
9174 if (insn & (1 << 19)) {
9175 if (insn & (1 << 8))
9176 mask |= CPSR_A;
9177 if (insn & (1 << 7))
9178 mask |= CPSR_I;
9179 if (insn & (1 << 6))
9180 mask |= CPSR_F;
9181 if (insn & (1 << 18))
9182 val |= mask;
9184 if (insn & (1 << 17)) {
9185 mask |= CPSR_M;
9186 val |= (insn & 0x1f);
9188 if (mask) {
9189 gen_set_psr_im(s, mask, 0, val);
9191 return;
9193 goto illegal_op;
9195 if (cond != 0xe) {
9196 /* if not always execute, we generate a conditional jump to
9197 next instruction */
9198 arm_skip_unless(s, cond);
9200 if ((insn & 0x0f900000) == 0x03000000) {
9201 if ((insn & (1 << 21)) == 0) {
9202 ARCH(6T2);
9203 rd = (insn >> 12) & 0xf;
9204 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9205 if ((insn & (1 << 22)) == 0) {
9206 /* MOVW */
9207 tmp = tcg_temp_new_i32();
9208 tcg_gen_movi_i32(tmp, val);
9209 } else {
9210 /* MOVT */
9211 tmp = load_reg(s, rd);
9212 tcg_gen_ext16u_i32(tmp, tmp);
9213 tcg_gen_ori_i32(tmp, tmp, val << 16);
9215 store_reg(s, rd, tmp);
9216 } else {
9217 if (((insn >> 12) & 0xf) != 0xf)
9218 goto illegal_op;
9219 if (((insn >> 16) & 0xf) == 0) {
9220 gen_nop_hint(s, insn & 0xff);
9221 } else {
9222 /* CPSR = immediate */
9223 val = insn & 0xff;
9224 shift = ((insn >> 8) & 0xf) * 2;
9225 if (shift)
9226 val = (val >> shift) | (val << (32 - shift));
9227 i = ((insn & (1 << 22)) != 0);
9228 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
9229 i, val)) {
9230 goto illegal_op;
9234 } else if ((insn & 0x0f900000) == 0x01000000
9235 && (insn & 0x00000090) != 0x00000090) {
9236 /* miscellaneous instructions */
9237 op1 = (insn >> 21) & 3;
9238 sh = (insn >> 4) & 0xf;
9239 rm = insn & 0xf;
9240 switch (sh) {
9241 case 0x0: /* MSR, MRS */
9242 if (insn & (1 << 9)) {
9243 /* MSR (banked) and MRS (banked) */
9244 int sysm = extract32(insn, 16, 4) |
9245 (extract32(insn, 8, 1) << 4);
9246 int r = extract32(insn, 22, 1);
9248 if (op1 & 1) {
9249 /* MSR (banked) */
9250 gen_msr_banked(s, r, sysm, rm);
9251 } else {
9252 /* MRS (banked) */
9253 int rd = extract32(insn, 12, 4);
9255 gen_mrs_banked(s, r, sysm, rd);
9257 break;
9260 /* MSR, MRS (for PSRs) */
9261 if (op1 & 1) {
9262 /* PSR = reg */
9263 tmp = load_reg(s, rm);
9264 i = ((op1 & 2) != 0);
9265 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9266 goto illegal_op;
9267 } else {
9268 /* reg = PSR */
9269 rd = (insn >> 12) & 0xf;
9270 if (op1 & 2) {
9271 if (IS_USER(s))
9272 goto illegal_op;
9273 tmp = load_cpu_field(spsr);
9274 } else {
9275 tmp = tcg_temp_new_i32();
9276 gen_helper_cpsr_read(tmp, cpu_env);
9278 store_reg(s, rd, tmp);
9280 break;
9281 case 0x1:
9282 if (op1 == 1) {
9283 /* branch/exchange thumb (bx). */
9284 ARCH(4T);
9285 tmp = load_reg(s, rm);
9286 gen_bx(s, tmp);
9287 } else if (op1 == 3) {
9288 /* clz */
9289 ARCH(5);
9290 rd = (insn >> 12) & 0xf;
9291 tmp = load_reg(s, rm);
9292 tcg_gen_clzi_i32(tmp, tmp, 32);
9293 store_reg(s, rd, tmp);
9294 } else {
9295 goto illegal_op;
9297 break;
9298 case 0x2:
9299 if (op1 == 1) {
9300 ARCH(5J); /* bxj */
9301 /* Trivial implementation equivalent to bx. */
9302 tmp = load_reg(s, rm);
9303 gen_bx(s, tmp);
9304 } else {
9305 goto illegal_op;
9307 break;
9308 case 0x3:
9309 if (op1 != 1)
9310 goto illegal_op;
9312 ARCH(5);
9313 /* branch link/exchange thumb (blx) */
9314 tmp = load_reg(s, rm);
9315 tmp2 = tcg_temp_new_i32();
9316 tcg_gen_movi_i32(tmp2, s->pc);
9317 store_reg(s, 14, tmp2);
9318 gen_bx(s, tmp);
9319 break;
9320 case 0x4:
9322 /* crc32/crc32c */
9323 uint32_t c = extract32(insn, 8, 4);
9325 /* Check this CPU supports ARMv8 CRC instructions.
9326 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
9327 * Bits 8, 10 and 11 should be zero.
9329 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
9330 goto illegal_op;
9333 rn = extract32(insn, 16, 4);
9334 rd = extract32(insn, 12, 4);
9336 tmp = load_reg(s, rn);
9337 tmp2 = load_reg(s, rm);
9338 if (op1 == 0) {
9339 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9340 } else if (op1 == 1) {
9341 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9343 tmp3 = tcg_const_i32(1 << op1);
9344 if (c & 0x2) {
9345 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9346 } else {
9347 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9349 tcg_temp_free_i32(tmp2);
9350 tcg_temp_free_i32(tmp3);
9351 store_reg(s, rd, tmp);
9352 break;
9354 case 0x5: /* saturating add/subtract */
9355 ARCH(5TE);
9356 rd = (insn >> 12) & 0xf;
9357 rn = (insn >> 16) & 0xf;
9358 tmp = load_reg(s, rm);
9359 tmp2 = load_reg(s, rn);
9360 if (op1 & 2)
9361 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9362 if (op1 & 1)
9363 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9364 else
9365 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
9366 tcg_temp_free_i32(tmp2);
9367 store_reg(s, rd, tmp);
9368 break;
9369 case 0x6: /* ERET */
9370 if (op1 != 3) {
9371 goto illegal_op;
9373 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
9374 goto illegal_op;
9376 if ((insn & 0x000fff0f) != 0x0000000e) {
9377 /* UNPREDICTABLE; we choose to UNDEF */
9378 goto illegal_op;
9381 if (s->current_el == 2) {
9382 tmp = load_cpu_field(elr_el[2]);
9383 } else {
9384 tmp = load_reg(s, 14);
9386 gen_exception_return(s, tmp);
9387 break;
9388 case 7:
9390 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
9391 switch (op1) {
9392 case 0:
9393 /* HLT */
9394 gen_hlt(s, imm16);
9395 break;
9396 case 1:
9397 /* bkpt */
9398 ARCH(5);
9399 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
9400 break;
9401 case 2:
9402 /* Hypervisor call (v7) */
9403 ARCH(7);
9404 if (IS_USER(s)) {
9405 goto illegal_op;
9407 gen_hvc(s, imm16);
9408 break;
9409 case 3:
9410 /* Secure monitor call (v6+) */
9411 ARCH(6K);
9412 if (IS_USER(s)) {
9413 goto illegal_op;
9415 gen_smc(s);
9416 break;
9417 default:
9418 g_assert_not_reached();
9420 break;
9422 case 0x8: /* signed multiply */
9423 case 0xa:
9424 case 0xc:
9425 case 0xe:
9426 ARCH(5TE);
9427 rs = (insn >> 8) & 0xf;
9428 rn = (insn >> 12) & 0xf;
9429 rd = (insn >> 16) & 0xf;
9430 if (op1 == 1) {
9431 /* (32 * 16) >> 16 */
9432 tmp = load_reg(s, rm);
9433 tmp2 = load_reg(s, rs);
9434 if (sh & 4)
9435 tcg_gen_sari_i32(tmp2, tmp2, 16);
9436 else
9437 gen_sxth(tmp2);
9438 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9439 tcg_gen_shri_i64(tmp64, tmp64, 16);
9440 tmp = tcg_temp_new_i32();
9441 tcg_gen_extrl_i64_i32(tmp, tmp64);
9442 tcg_temp_free_i64(tmp64);
9443 if ((sh & 2) == 0) {
9444 tmp2 = load_reg(s, rn);
9445 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9446 tcg_temp_free_i32(tmp2);
9448 store_reg(s, rd, tmp);
9449 } else {
9450 /* 16 * 16 */
9451 tmp = load_reg(s, rm);
9452 tmp2 = load_reg(s, rs);
9453 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
9454 tcg_temp_free_i32(tmp2);
9455 if (op1 == 2) {
9456 tmp64 = tcg_temp_new_i64();
9457 tcg_gen_ext_i32_i64(tmp64, tmp);
9458 tcg_temp_free_i32(tmp);
9459 gen_addq(s, tmp64, rn, rd);
9460 gen_storeq_reg(s, rn, rd, tmp64);
9461 tcg_temp_free_i64(tmp64);
9462 } else {
9463 if (op1 == 0) {
9464 tmp2 = load_reg(s, rn);
9465 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9466 tcg_temp_free_i32(tmp2);
9468 store_reg(s, rd, tmp);
9471 break;
9472 default:
9473 goto illegal_op;
9475 } else if (((insn & 0x0e000000) == 0 &&
9476 (insn & 0x00000090) != 0x90) ||
9477 ((insn & 0x0e000000) == (1 << 25))) {
9478 int set_cc, logic_cc, shiftop;
9480 op1 = (insn >> 21) & 0xf;
9481 set_cc = (insn >> 20) & 1;
9482 logic_cc = table_logic_cc[op1] & set_cc;
9484 /* data processing instruction */
9485 if (insn & (1 << 25)) {
9486 /* immediate operand */
9487 val = insn & 0xff;
9488 shift = ((insn >> 8) & 0xf) * 2;
9489 if (shift) {
9490 val = (val >> shift) | (val << (32 - shift));
9492 tmp2 = tcg_temp_new_i32();
9493 tcg_gen_movi_i32(tmp2, val);
9494 if (logic_cc && shift) {
9495 gen_set_CF_bit31(tmp2);
9497 } else {
9498 /* register */
9499 rm = (insn) & 0xf;
9500 tmp2 = load_reg(s, rm);
9501 shiftop = (insn >> 5) & 3;
9502 if (!(insn & (1 << 4))) {
9503 shift = (insn >> 7) & 0x1f;
9504 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9505 } else {
9506 rs = (insn >> 8) & 0xf;
9507 tmp = load_reg(s, rs);
9508 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9511 if (op1 != 0x0f && op1 != 0x0d) {
9512 rn = (insn >> 16) & 0xf;
9513 tmp = load_reg(s, rn);
9514 } else {
9515 tmp = NULL;
9517 rd = (insn >> 12) & 0xf;
9518 switch(op1) {
9519 case 0x00:
9520 tcg_gen_and_i32(tmp, tmp, tmp2);
9521 if (logic_cc) {
9522 gen_logic_CC(tmp);
9524 store_reg_bx(s, rd, tmp);
9525 break;
9526 case 0x01:
9527 tcg_gen_xor_i32(tmp, tmp, tmp2);
9528 if (logic_cc) {
9529 gen_logic_CC(tmp);
9531 store_reg_bx(s, rd, tmp);
9532 break;
9533 case 0x02:
9534 if (set_cc && rd == 15) {
9535 /* SUBS r15, ... is used for exception return. */
9536 if (IS_USER(s)) {
9537 goto illegal_op;
9539 gen_sub_CC(tmp, tmp, tmp2);
9540 gen_exception_return(s, tmp);
9541 } else {
9542 if (set_cc) {
9543 gen_sub_CC(tmp, tmp, tmp2);
9544 } else {
9545 tcg_gen_sub_i32(tmp, tmp, tmp2);
9547 store_reg_bx(s, rd, tmp);
9549 break;
9550 case 0x03:
9551 if (set_cc) {
9552 gen_sub_CC(tmp, tmp2, tmp);
9553 } else {
9554 tcg_gen_sub_i32(tmp, tmp2, tmp);
9556 store_reg_bx(s, rd, tmp);
9557 break;
9558 case 0x04:
9559 if (set_cc) {
9560 gen_add_CC(tmp, tmp, tmp2);
9561 } else {
9562 tcg_gen_add_i32(tmp, tmp, tmp2);
9564 store_reg_bx(s, rd, tmp);
9565 break;
9566 case 0x05:
9567 if (set_cc) {
9568 gen_adc_CC(tmp, tmp, tmp2);
9569 } else {
9570 gen_add_carry(tmp, tmp, tmp2);
9572 store_reg_bx(s, rd, tmp);
9573 break;
9574 case 0x06:
9575 if (set_cc) {
9576 gen_sbc_CC(tmp, tmp, tmp2);
9577 } else {
9578 gen_sub_carry(tmp, tmp, tmp2);
9580 store_reg_bx(s, rd, tmp);
9581 break;
9582 case 0x07:
9583 if (set_cc) {
9584 gen_sbc_CC(tmp, tmp2, tmp);
9585 } else {
9586 gen_sub_carry(tmp, tmp2, tmp);
9588 store_reg_bx(s, rd, tmp);
9589 break;
9590 case 0x08:
9591 if (set_cc) {
9592 tcg_gen_and_i32(tmp, tmp, tmp2);
9593 gen_logic_CC(tmp);
9595 tcg_temp_free_i32(tmp);
9596 break;
9597 case 0x09:
9598 if (set_cc) {
9599 tcg_gen_xor_i32(tmp, tmp, tmp2);
9600 gen_logic_CC(tmp);
9602 tcg_temp_free_i32(tmp);
9603 break;
9604 case 0x0a:
9605 if (set_cc) {
9606 gen_sub_CC(tmp, tmp, tmp2);
9608 tcg_temp_free_i32(tmp);
9609 break;
9610 case 0x0b:
9611 if (set_cc) {
9612 gen_add_CC(tmp, tmp, tmp2);
9614 tcg_temp_free_i32(tmp);
9615 break;
9616 case 0x0c:
9617 tcg_gen_or_i32(tmp, tmp, tmp2);
9618 if (logic_cc) {
9619 gen_logic_CC(tmp);
9621 store_reg_bx(s, rd, tmp);
9622 break;
9623 case 0x0d:
9624 if (logic_cc && rd == 15) {
9625 /* MOVS r15, ... is used for exception return. */
9626 if (IS_USER(s)) {
9627 goto illegal_op;
9629 gen_exception_return(s, tmp2);
9630 } else {
9631 if (logic_cc) {
9632 gen_logic_CC(tmp2);
9634 store_reg_bx(s, rd, tmp2);
9636 break;
9637 case 0x0e:
9638 tcg_gen_andc_i32(tmp, tmp, tmp2);
9639 if (logic_cc) {
9640 gen_logic_CC(tmp);
9642 store_reg_bx(s, rd, tmp);
9643 break;
9644 default:
9645 case 0x0f:
9646 tcg_gen_not_i32(tmp2, tmp2);
9647 if (logic_cc) {
9648 gen_logic_CC(tmp2);
9650 store_reg_bx(s, rd, tmp2);
9651 break;
9653 if (op1 != 0x0f && op1 != 0x0d) {
9654 tcg_temp_free_i32(tmp2);
9656 } else {
9657 /* other instructions */
9658 op1 = (insn >> 24) & 0xf;
9659 switch(op1) {
9660 case 0x0:
9661 case 0x1:
9662 /* multiplies, extra load/stores */
9663 sh = (insn >> 5) & 3;
9664 if (sh == 0) {
9665 if (op1 == 0x0) {
9666 rd = (insn >> 16) & 0xf;
9667 rn = (insn >> 12) & 0xf;
9668 rs = (insn >> 8) & 0xf;
9669 rm = (insn) & 0xf;
9670 op1 = (insn >> 20) & 0xf;
9671 switch (op1) {
9672 case 0: case 1: case 2: case 3: case 6:
9673 /* 32 bit mul */
9674 tmp = load_reg(s, rs);
9675 tmp2 = load_reg(s, rm);
9676 tcg_gen_mul_i32(tmp, tmp, tmp2);
9677 tcg_temp_free_i32(tmp2);
9678 if (insn & (1 << 22)) {
9679 /* Subtract (mls) */
9680 ARCH(6T2);
9681 tmp2 = load_reg(s, rn);
9682 tcg_gen_sub_i32(tmp, tmp2, tmp);
9683 tcg_temp_free_i32(tmp2);
9684 } else if (insn & (1 << 21)) {
9685 /* Add */
9686 tmp2 = load_reg(s, rn);
9687 tcg_gen_add_i32(tmp, tmp, tmp2);
9688 tcg_temp_free_i32(tmp2);
9690 if (insn & (1 << 20))
9691 gen_logic_CC(tmp);
9692 store_reg(s, rd, tmp);
9693 break;
9694 case 4:
9695 /* 64 bit mul double accumulate (UMAAL) */
9696 ARCH(6);
9697 tmp = load_reg(s, rs);
9698 tmp2 = load_reg(s, rm);
9699 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9700 gen_addq_lo(s, tmp64, rn);
9701 gen_addq_lo(s, tmp64, rd);
9702 gen_storeq_reg(s, rn, rd, tmp64);
9703 tcg_temp_free_i64(tmp64);
9704 break;
9705 case 8: case 9: case 10: case 11:
9706 case 12: case 13: case 14: case 15:
9707 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
9708 tmp = load_reg(s, rs);
9709 tmp2 = load_reg(s, rm);
9710 if (insn & (1 << 22)) {
9711 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
9712 } else {
9713 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
9715 if (insn & (1 << 21)) { /* mult accumulate */
9716 TCGv_i32 al = load_reg(s, rn);
9717 TCGv_i32 ah = load_reg(s, rd);
9718 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
9719 tcg_temp_free_i32(al);
9720 tcg_temp_free_i32(ah);
9722 if (insn & (1 << 20)) {
9723 gen_logicq_cc(tmp, tmp2);
9725 store_reg(s, rn, tmp);
9726 store_reg(s, rd, tmp2);
9727 break;
9728 default:
9729 goto illegal_op;
9731 } else {
9732 rn = (insn >> 16) & 0xf;
9733 rd = (insn >> 12) & 0xf;
9734 if (insn & (1 << 23)) {
9735 /* load/store exclusive */
9736 bool is_ld = extract32(insn, 20, 1);
9737 bool is_lasr = !extract32(insn, 8, 1);
9738 int op2 = (insn >> 8) & 3;
9739 op1 = (insn >> 21) & 0x3;
9741 switch (op2) {
9742 case 0: /* lda/stl */
9743 if (op1 == 1) {
9744 goto illegal_op;
9746 ARCH(8);
9747 break;
9748 case 1: /* reserved */
9749 goto illegal_op;
9750 case 2: /* ldaex/stlex */
9751 ARCH(8);
9752 break;
9753 case 3: /* ldrex/strex */
9754 if (op1) {
9755 ARCH(6K);
9756 } else {
9757 ARCH(6);
9759 break;
9762 addr = tcg_temp_local_new_i32();
9763 load_reg_var(s, addr, rn);
9765 if (is_lasr && !is_ld) {
9766 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9769 if (op2 == 0) {
9770 if (is_ld) {
9771 tmp = tcg_temp_new_i32();
9772 switch (op1) {
9773 case 0: /* lda */
9774 gen_aa32_ld32u_iss(s, tmp, addr,
9775 get_mem_index(s),
9776 rd | ISSIsAcqRel);
9777 break;
9778 case 2: /* ldab */
9779 gen_aa32_ld8u_iss(s, tmp, addr,
9780 get_mem_index(s),
9781 rd | ISSIsAcqRel);
9782 break;
9783 case 3: /* ldah */
9784 gen_aa32_ld16u_iss(s, tmp, addr,
9785 get_mem_index(s),
9786 rd | ISSIsAcqRel);
9787 break;
9788 default:
9789 abort();
9791 store_reg(s, rd, tmp);
9792 } else {
9793 rm = insn & 0xf;
9794 tmp = load_reg(s, rm);
9795 switch (op1) {
9796 case 0: /* stl */
9797 gen_aa32_st32_iss(s, tmp, addr,
9798 get_mem_index(s),
9799 rm | ISSIsAcqRel);
9800 break;
9801 case 2: /* stlb */
9802 gen_aa32_st8_iss(s, tmp, addr,
9803 get_mem_index(s),
9804 rm | ISSIsAcqRel);
9805 break;
9806 case 3: /* stlh */
9807 gen_aa32_st16_iss(s, tmp, addr,
9808 get_mem_index(s),
9809 rm | ISSIsAcqRel);
9810 break;
9811 default:
9812 abort();
9814 tcg_temp_free_i32(tmp);
9816 } else if (is_ld) {
9817 switch (op1) {
9818 case 0: /* ldrex */
9819 gen_load_exclusive(s, rd, 15, addr, 2);
9820 break;
9821 case 1: /* ldrexd */
9822 gen_load_exclusive(s, rd, rd + 1, addr, 3);
9823 break;
9824 case 2: /* ldrexb */
9825 gen_load_exclusive(s, rd, 15, addr, 0);
9826 break;
9827 case 3: /* ldrexh */
9828 gen_load_exclusive(s, rd, 15, addr, 1);
9829 break;
9830 default:
9831 abort();
9833 } else {
9834 rm = insn & 0xf;
9835 switch (op1) {
9836 case 0: /* strex */
9837 gen_store_exclusive(s, rd, rm, 15, addr, 2);
9838 break;
9839 case 1: /* strexd */
9840 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
9841 break;
9842 case 2: /* strexb */
9843 gen_store_exclusive(s, rd, rm, 15, addr, 0);
9844 break;
9845 case 3: /* strexh */
9846 gen_store_exclusive(s, rd, rm, 15, addr, 1);
9847 break;
9848 default:
9849 abort();
9852 tcg_temp_free_i32(addr);
9854 if (is_lasr && is_ld) {
9855 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
9857 } else if ((insn & 0x00300f00) == 0) {
9858 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
9859 * - SWP, SWPB
9862 TCGv taddr;
9863 TCGMemOp opc = s->be_data;
9865 rm = (insn) & 0xf;
9867 if (insn & (1 << 22)) {
9868 opc |= MO_UB;
9869 } else {
9870 opc |= MO_UL | MO_ALIGN;
9873 addr = load_reg(s, rn);
9874 taddr = gen_aa32_addr(s, addr, opc);
9875 tcg_temp_free_i32(addr);
9877 tmp = load_reg(s, rm);
9878 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9879 get_mem_index(s), opc);
9880 tcg_temp_free(taddr);
9881 store_reg(s, rd, tmp);
9882 } else {
9883 goto illegal_op;
9886 } else {
9887 int address_offset;
9888 bool load = insn & (1 << 20);
9889 bool wbit = insn & (1 << 21);
9890 bool pbit = insn & (1 << 24);
9891 bool doubleword = false;
9892 ISSInfo issinfo;
9894 /* Misc load/store */
9895 rn = (insn >> 16) & 0xf;
9896 rd = (insn >> 12) & 0xf;
9898 /* ISS not valid if writeback */
9899 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9901 if (!load && (sh & 2)) {
9902 /* doubleword */
9903 ARCH(5TE);
9904 if (rd & 1) {
9905 /* UNPREDICTABLE; we choose to UNDEF */
9906 goto illegal_op;
9908 load = (sh & 1) == 0;
9909 doubleword = true;
9912 addr = load_reg(s, rn);
9913 if (pbit) {
9914 gen_add_datah_offset(s, insn, 0, addr);
9916 address_offset = 0;
9918 if (doubleword) {
9919 if (!load) {
9920 /* store */
9921 tmp = load_reg(s, rd);
9922 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9923 tcg_temp_free_i32(tmp);
9924 tcg_gen_addi_i32(addr, addr, 4);
9925 tmp = load_reg(s, rd + 1);
9926 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9927 tcg_temp_free_i32(tmp);
9928 } else {
9929 /* load */
9930 tmp = tcg_temp_new_i32();
9931 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9932 store_reg(s, rd, tmp);
9933 tcg_gen_addi_i32(addr, addr, 4);
9934 tmp = tcg_temp_new_i32();
9935 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9936 rd++;
9938 address_offset = -4;
9939 } else if (load) {
9940 /* load */
9941 tmp = tcg_temp_new_i32();
9942 switch (sh) {
9943 case 1:
9944 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9945 issinfo);
9946 break;
9947 case 2:
9948 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9949 issinfo);
9950 break;
9951 default:
9952 case 3:
9953 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9954 issinfo);
9955 break;
9957 } else {
9958 /* store */
9959 tmp = load_reg(s, rd);
9960 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
9961 tcg_temp_free_i32(tmp);
9963 /* Perform base writeback before the loaded value to
9964 ensure correct behavior with overlapping index registers.
9965 ldrd with base writeback is undefined if the
9966 destination and index registers overlap. */
9967 if (!pbit) {
9968 gen_add_datah_offset(s, insn, address_offset, addr);
9969 store_reg(s, rn, addr);
9970 } else if (wbit) {
9971 if (address_offset)
9972 tcg_gen_addi_i32(addr, addr, address_offset);
9973 store_reg(s, rn, addr);
9974 } else {
9975 tcg_temp_free_i32(addr);
9977 if (load) {
9978 /* Complete the load. */
9979 store_reg(s, rd, tmp);
9982 break;
9983 case 0x4:
9984 case 0x5:
9985 goto do_ldst;
9986 case 0x6:
9987 case 0x7:
9988 if (insn & (1 << 4)) {
9989 ARCH(6);
9990 /* Armv6 Media instructions. */
9991 rm = insn & 0xf;
9992 rn = (insn >> 16) & 0xf;
9993 rd = (insn >> 12) & 0xf;
9994 rs = (insn >> 8) & 0xf;
9995 switch ((insn >> 23) & 3) {
9996 case 0: /* Parallel add/subtract. */
9997 op1 = (insn >> 20) & 7;
9998 tmp = load_reg(s, rn);
9999 tmp2 = load_reg(s, rm);
10000 sh = (insn >> 5) & 7;
10001 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
10002 goto illegal_op;
10003 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
10004 tcg_temp_free_i32(tmp2);
10005 store_reg(s, rd, tmp);
10006 break;
10007 case 1:
10008 if ((insn & 0x00700020) == 0) {
10009 /* Halfword pack. */
10010 tmp = load_reg(s, rn);
10011 tmp2 = load_reg(s, rm);
10012 shift = (insn >> 7) & 0x1f;
10013 if (insn & (1 << 6)) {
10014 /* pkhtb */
10015 if (shift == 0)
10016 shift = 31;
10017 tcg_gen_sari_i32(tmp2, tmp2, shift);
10018 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10019 tcg_gen_ext16u_i32(tmp2, tmp2);
10020 } else {
10021 /* pkhbt */
10022 if (shift)
10023 tcg_gen_shli_i32(tmp2, tmp2, shift);
10024 tcg_gen_ext16u_i32(tmp, tmp);
10025 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10027 tcg_gen_or_i32(tmp, tmp, tmp2);
10028 tcg_temp_free_i32(tmp2);
10029 store_reg(s, rd, tmp);
10030 } else if ((insn & 0x00200020) == 0x00200000) {
10031 /* [us]sat */
10032 tmp = load_reg(s, rm);
10033 shift = (insn >> 7) & 0x1f;
10034 if (insn & (1 << 6)) {
10035 if (shift == 0)
10036 shift = 31;
10037 tcg_gen_sari_i32(tmp, tmp, shift);
10038 } else {
10039 tcg_gen_shli_i32(tmp, tmp, shift);
10041 sh = (insn >> 16) & 0x1f;
10042 tmp2 = tcg_const_i32(sh);
10043 if (insn & (1 << 22))
10044 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10045 else
10046 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10047 tcg_temp_free_i32(tmp2);
10048 store_reg(s, rd, tmp);
10049 } else if ((insn & 0x00300fe0) == 0x00200f20) {
10050 /* [us]sat16 */
10051 tmp = load_reg(s, rm);
10052 sh = (insn >> 16) & 0x1f;
10053 tmp2 = tcg_const_i32(sh);
10054 if (insn & (1 << 22))
10055 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10056 else
10057 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10058 tcg_temp_free_i32(tmp2);
10059 store_reg(s, rd, tmp);
10060 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
10061 /* Select bytes. */
10062 tmp = load_reg(s, rn);
10063 tmp2 = load_reg(s, rm);
10064 tmp3 = tcg_temp_new_i32();
10065 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
10066 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
10067 tcg_temp_free_i32(tmp3);
10068 tcg_temp_free_i32(tmp2);
10069 store_reg(s, rd, tmp);
10070 } else if ((insn & 0x000003e0) == 0x00000060) {
10071 tmp = load_reg(s, rm);
10072 shift = (insn >> 10) & 3;
10073 /* ??? In many cases it's not necessary to do a
10074 rotate, a shift is sufficient. */
10075 if (shift != 0)
10076 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
10077 op1 = (insn >> 20) & 7;
10078 switch (op1) {
10079 case 0: gen_sxtb16(tmp); break;
10080 case 2: gen_sxtb(tmp); break;
10081 case 3: gen_sxth(tmp); break;
10082 case 4: gen_uxtb16(tmp); break;
10083 case 6: gen_uxtb(tmp); break;
10084 case 7: gen_uxth(tmp); break;
10085 default: goto illegal_op;
10087 if (rn != 15) {
10088 tmp2 = load_reg(s, rn);
10089 if ((op1 & 3) == 0) {
10090 gen_add16(tmp, tmp2);
10091 } else {
10092 tcg_gen_add_i32(tmp, tmp, tmp2);
10093 tcg_temp_free_i32(tmp2);
10096 store_reg(s, rd, tmp);
10097 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
10098 /* rev */
10099 tmp = load_reg(s, rm);
10100 if (insn & (1 << 22)) {
10101 if (insn & (1 << 7)) {
10102 gen_revsh(tmp);
10103 } else {
10104 ARCH(6T2);
10105 gen_helper_rbit(tmp, tmp);
10107 } else {
10108 if (insn & (1 << 7))
10109 gen_rev16(tmp);
10110 else
10111 tcg_gen_bswap32_i32(tmp, tmp);
10113 store_reg(s, rd, tmp);
10114 } else {
10115 goto illegal_op;
10117 break;
10118 case 2: /* Multiplies (Type 3). */
10119 switch ((insn >> 20) & 0x7) {
10120 case 5:
10121 if (((insn >> 6) ^ (insn >> 7)) & 1) {
10122 /* op2 not 00x or 11x : UNDEF */
10123 goto illegal_op;
10125 /* Signed multiply most significant [accumulate].
10126 (SMMUL, SMMLA, SMMLS) */
10127 tmp = load_reg(s, rm);
10128 tmp2 = load_reg(s, rs);
10129 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10131 if (rd != 15) {
10132 tmp = load_reg(s, rd);
10133 if (insn & (1 << 6)) {
10134 tmp64 = gen_subq_msw(tmp64, tmp);
10135 } else {
10136 tmp64 = gen_addq_msw(tmp64, tmp);
10139 if (insn & (1 << 5)) {
10140 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10142 tcg_gen_shri_i64(tmp64, tmp64, 32);
10143 tmp = tcg_temp_new_i32();
10144 tcg_gen_extrl_i64_i32(tmp, tmp64);
10145 tcg_temp_free_i64(tmp64);
10146 store_reg(s, rn, tmp);
10147 break;
10148 case 0:
10149 case 4:
10150 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
10151 if (insn & (1 << 7)) {
10152 goto illegal_op;
10154 tmp = load_reg(s, rm);
10155 tmp2 = load_reg(s, rs);
10156 if (insn & (1 << 5))
10157 gen_swap_half(tmp2);
10158 gen_smul_dual(tmp, tmp2);
10159 if (insn & (1 << 22)) {
10160 /* smlald, smlsld */
10161 TCGv_i64 tmp64_2;
10163 tmp64 = tcg_temp_new_i64();
10164 tmp64_2 = tcg_temp_new_i64();
10165 tcg_gen_ext_i32_i64(tmp64, tmp);
10166 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
10167 tcg_temp_free_i32(tmp);
10168 tcg_temp_free_i32(tmp2);
10169 if (insn & (1 << 6)) {
10170 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
10171 } else {
10172 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
10174 tcg_temp_free_i64(tmp64_2);
10175 gen_addq(s, tmp64, rd, rn);
10176 gen_storeq_reg(s, rd, rn, tmp64);
10177 tcg_temp_free_i64(tmp64);
10178 } else {
10179 /* smuad, smusd, smlad, smlsd */
10180 if (insn & (1 << 6)) {
10181 /* This subtraction cannot overflow. */
10182 tcg_gen_sub_i32(tmp, tmp, tmp2);
10183 } else {
10184 /* This addition cannot overflow 32 bits;
10185 * however it may overflow considered as a
10186 * signed operation, in which case we must set
10187 * the Q flag.
10189 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10191 tcg_temp_free_i32(tmp2);
10192 if (rd != 15)
10194 tmp2 = load_reg(s, rd);
10195 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10196 tcg_temp_free_i32(tmp2);
10198 store_reg(s, rn, tmp);
10200 break;
10201 case 1:
10202 case 3:
10203 /* SDIV, UDIV */
10204 if (!dc_isar_feature(arm_div, s)) {
10205 goto illegal_op;
10207 if (((insn >> 5) & 7) || (rd != 15)) {
10208 goto illegal_op;
10210 tmp = load_reg(s, rm);
10211 tmp2 = load_reg(s, rs);
10212 if (insn & (1 << 21)) {
10213 gen_helper_udiv(tmp, tmp, tmp2);
10214 } else {
10215 gen_helper_sdiv(tmp, tmp, tmp2);
10217 tcg_temp_free_i32(tmp2);
10218 store_reg(s, rn, tmp);
10219 break;
10220 default:
10221 goto illegal_op;
10223 break;
10224 case 3:
10225 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
10226 switch (op1) {
10227 case 0: /* Unsigned sum of absolute differences. */
10228 ARCH(6);
10229 tmp = load_reg(s, rm);
10230 tmp2 = load_reg(s, rs);
10231 gen_helper_usad8(tmp, tmp, tmp2);
10232 tcg_temp_free_i32(tmp2);
10233 if (rd != 15) {
10234 tmp2 = load_reg(s, rd);
10235 tcg_gen_add_i32(tmp, tmp, tmp2);
10236 tcg_temp_free_i32(tmp2);
10238 store_reg(s, rn, tmp);
10239 break;
10240 case 0x20: case 0x24: case 0x28: case 0x2c:
10241 /* Bitfield insert/clear. */
10242 ARCH(6T2);
10243 shift = (insn >> 7) & 0x1f;
10244 i = (insn >> 16) & 0x1f;
10245 if (i < shift) {
10246 /* UNPREDICTABLE; we choose to UNDEF */
10247 goto illegal_op;
10249 i = i + 1 - shift;
10250 if (rm == 15) {
10251 tmp = tcg_temp_new_i32();
10252 tcg_gen_movi_i32(tmp, 0);
10253 } else {
10254 tmp = load_reg(s, rm);
10256 if (i != 32) {
10257 tmp2 = load_reg(s, rd);
10258 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
10259 tcg_temp_free_i32(tmp2);
10261 store_reg(s, rd, tmp);
10262 break;
10263 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
10264 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
10265 ARCH(6T2);
10266 tmp = load_reg(s, rm);
10267 shift = (insn >> 7) & 0x1f;
10268 i = ((insn >> 16) & 0x1f) + 1;
10269 if (shift + i > 32)
10270 goto illegal_op;
10271 if (i < 32) {
10272 if (op1 & 0x20) {
10273 tcg_gen_extract_i32(tmp, tmp, shift, i);
10274 } else {
10275 tcg_gen_sextract_i32(tmp, tmp, shift, i);
10278 store_reg(s, rd, tmp);
10279 break;
10280 default:
10281 goto illegal_op;
10283 break;
10285 break;
10287 do_ldst:
10288 /* Check for undefined extension instructions
10289 * per the ARM Bible IE:
10290 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
10292 sh = (0xf << 20) | (0xf << 4);
10293 if (op1 == 0x7 && ((insn & sh) == sh))
10295 goto illegal_op;
10297 /* load/store byte/word */
10298 rn = (insn >> 16) & 0xf;
10299 rd = (insn >> 12) & 0xf;
10300 tmp2 = load_reg(s, rn);
10301 if ((insn & 0x01200000) == 0x00200000) {
10302 /* ldrt/strt */
10303 i = get_a32_user_mem_index(s);
10304 } else {
10305 i = get_mem_index(s);
10307 if (insn & (1 << 24))
10308 gen_add_data_offset(s, insn, tmp2);
10309 if (insn & (1 << 20)) {
10310 /* load */
10311 tmp = tcg_temp_new_i32();
10312 if (insn & (1 << 22)) {
10313 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
10314 } else {
10315 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
10317 } else {
10318 /* store */
10319 tmp = load_reg(s, rd);
10320 if (insn & (1 << 22)) {
10321 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
10322 } else {
10323 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
10325 tcg_temp_free_i32(tmp);
10327 if (!(insn & (1 << 24))) {
10328 gen_add_data_offset(s, insn, tmp2);
10329 store_reg(s, rn, tmp2);
10330 } else if (insn & (1 << 21)) {
10331 store_reg(s, rn, tmp2);
10332 } else {
10333 tcg_temp_free_i32(tmp2);
10335 if (insn & (1 << 20)) {
10336 /* Complete the load. */
10337 store_reg_from_load(s, rd, tmp);
10339 break;
10340 case 0x08:
10341 case 0x09:
10343 int j, n, loaded_base;
10344 bool exc_return = false;
10345 bool is_load = extract32(insn, 20, 1);
10346 bool user = false;
10347 TCGv_i32 loaded_var;
10348 /* load/store multiple words */
10349 /* XXX: store correct base if write back */
10350 if (insn & (1 << 22)) {
10351 /* LDM (user), LDM (exception return) and STM (user) */
10352 if (IS_USER(s))
10353 goto illegal_op; /* only usable in supervisor mode */
10355 if (is_load && extract32(insn, 15, 1)) {
10356 exc_return = true;
10357 } else {
10358 user = true;
10361 rn = (insn >> 16) & 0xf;
10362 addr = load_reg(s, rn);
10364 /* compute total size */
10365 loaded_base = 0;
10366 loaded_var = NULL;
10367 n = 0;
10368 for(i=0;i<16;i++) {
10369 if (insn & (1 << i))
10370 n++;
10372 /* XXX: test invalid n == 0 case ? */
10373 if (insn & (1 << 23)) {
10374 if (insn & (1 << 24)) {
10375 /* pre increment */
10376 tcg_gen_addi_i32(addr, addr, 4);
10377 } else {
10378 /* post increment */
10380 } else {
10381 if (insn & (1 << 24)) {
10382 /* pre decrement */
10383 tcg_gen_addi_i32(addr, addr, -(n * 4));
10384 } else {
10385 /* post decrement */
10386 if (n != 1)
10387 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
10390 j = 0;
10391 for(i=0;i<16;i++) {
10392 if (insn & (1 << i)) {
10393 if (is_load) {
10394 /* load */
10395 tmp = tcg_temp_new_i32();
10396 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10397 if (user) {
10398 tmp2 = tcg_const_i32(i);
10399 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
10400 tcg_temp_free_i32(tmp2);
10401 tcg_temp_free_i32(tmp);
10402 } else if (i == rn) {
10403 loaded_var = tmp;
10404 loaded_base = 1;
10405 } else if (rn == 15 && exc_return) {
10406 store_pc_exc_ret(s, tmp);
10407 } else {
10408 store_reg_from_load(s, i, tmp);
10410 } else {
10411 /* store */
10412 if (i == 15) {
10413 /* special case: r15 = PC + 8 */
10414 val = (long)s->pc + 4;
10415 tmp = tcg_temp_new_i32();
10416 tcg_gen_movi_i32(tmp, val);
10417 } else if (user) {
10418 tmp = tcg_temp_new_i32();
10419 tmp2 = tcg_const_i32(i);
10420 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
10421 tcg_temp_free_i32(tmp2);
10422 } else {
10423 tmp = load_reg(s, i);
10425 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10426 tcg_temp_free_i32(tmp);
10428 j++;
10429 /* no need to add after the last transfer */
10430 if (j != n)
10431 tcg_gen_addi_i32(addr, addr, 4);
10434 if (insn & (1 << 21)) {
10435 /* write back */
10436 if (insn & (1 << 23)) {
10437 if (insn & (1 << 24)) {
10438 /* pre increment */
10439 } else {
10440 /* post increment */
10441 tcg_gen_addi_i32(addr, addr, 4);
10443 } else {
10444 if (insn & (1 << 24)) {
10445 /* pre decrement */
10446 if (n != 1)
10447 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
10448 } else {
10449 /* post decrement */
10450 tcg_gen_addi_i32(addr, addr, -(n * 4));
10453 store_reg(s, rn, addr);
10454 } else {
10455 tcg_temp_free_i32(addr);
10457 if (loaded_base) {
10458 store_reg(s, rn, loaded_var);
10460 if (exc_return) {
10461 /* Restore CPSR from SPSR. */
10462 tmp = load_cpu_field(spsr);
10463 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10464 gen_io_start();
10466 gen_helper_cpsr_write_eret(cpu_env, tmp);
10467 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10468 gen_io_end();
10470 tcg_temp_free_i32(tmp);
10471 /* Must exit loop to check un-masked IRQs */
10472 s->base.is_jmp = DISAS_EXIT;
10475 break;
10476 case 0xa:
10477 case 0xb:
10479 int32_t offset;
10481 /* branch (and link) */
10482 val = (int32_t)s->pc;
10483 if (insn & (1 << 24)) {
10484 tmp = tcg_temp_new_i32();
10485 tcg_gen_movi_i32(tmp, val);
10486 store_reg(s, 14, tmp);
10488 offset = sextract32(insn << 2, 0, 26);
10489 val += offset + 4;
10490 gen_jmp(s, val);
10492 break;
10493 case 0xc:
10494 case 0xd:
10495 case 0xe:
10496 if (((insn >> 8) & 0xe) == 10) {
10497 /* VFP. */
10498 if (disas_vfp_insn(s, insn)) {
10499 goto illegal_op;
10501 } else if (disas_coproc_insn(s, insn)) {
10502 /* Coprocessor. */
10503 goto illegal_op;
10505 break;
10506 case 0xf:
10507 /* swi */
10508 gen_set_pc_im(s, s->pc);
10509 s->svc_imm = extract32(insn, 0, 24);
10510 s->base.is_jmp = DISAS_SWI;
10511 break;
10512 default:
10513 illegal_op:
10514 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10515 default_exception_el(s));
10516 break;
10521 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
10523 /* Return true if this is a 16 bit instruction. We must be precise
10524 * about this (matching the decode). We assume that s->pc still
10525 * points to the first 16 bits of the insn.
10527 if ((insn >> 11) < 0x1d) {
10528 /* Definitely a 16-bit instruction */
10529 return true;
10532 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10533 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10534 * end up actually treating this as two 16-bit insns, though,
10535 * if it's half of a bl/blx pair that might span a page boundary.
10537 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10538 arm_dc_feature(s, ARM_FEATURE_M)) {
10539 /* Thumb2 cores (including all M profile ones) always treat
10540 * 32-bit insns as 32-bit.
10542 return false;
10545 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
10546 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10547 * is not on the next page; we merge this into a 32-bit
10548 * insn.
10550 return false;
10552 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10553 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10554 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10555 * -- handle as single 16 bit insn
10557 return true;
10560 /* Return true if this is a Thumb-2 logical op. */
10561 static int
10562 thumb2_logic_op(int op)
10564 return (op < 8);
10567 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10568 then set condition code flags based on the result of the operation.
10569 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10570 to the high bit of T1.
10571 Returns zero if the opcode is valid. */
10573 static int
10574 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10575 TCGv_i32 t0, TCGv_i32 t1)
10577 int logic_cc;
10579 logic_cc = 0;
10580 switch (op) {
10581 case 0: /* and */
10582 tcg_gen_and_i32(t0, t0, t1);
10583 logic_cc = conds;
10584 break;
10585 case 1: /* bic */
10586 tcg_gen_andc_i32(t0, t0, t1);
10587 logic_cc = conds;
10588 break;
10589 case 2: /* orr */
10590 tcg_gen_or_i32(t0, t0, t1);
10591 logic_cc = conds;
10592 break;
10593 case 3: /* orn */
10594 tcg_gen_orc_i32(t0, t0, t1);
10595 logic_cc = conds;
10596 break;
10597 case 4: /* eor */
10598 tcg_gen_xor_i32(t0, t0, t1);
10599 logic_cc = conds;
10600 break;
10601 case 8: /* add */
10602 if (conds)
10603 gen_add_CC(t0, t0, t1);
10604 else
10605 tcg_gen_add_i32(t0, t0, t1);
10606 break;
10607 case 10: /* adc */
10608 if (conds)
10609 gen_adc_CC(t0, t0, t1);
10610 else
10611 gen_adc(t0, t1);
10612 break;
10613 case 11: /* sbc */
10614 if (conds) {
10615 gen_sbc_CC(t0, t0, t1);
10616 } else {
10617 gen_sub_carry(t0, t0, t1);
10619 break;
10620 case 13: /* sub */
10621 if (conds)
10622 gen_sub_CC(t0, t0, t1);
10623 else
10624 tcg_gen_sub_i32(t0, t0, t1);
10625 break;
10626 case 14: /* rsb */
10627 if (conds)
10628 gen_sub_CC(t0, t1, t0);
10629 else
10630 tcg_gen_sub_i32(t0, t1, t0);
10631 break;
10632 default: /* 5, 6, 7, 9, 12, 15. */
10633 return 1;
10635 if (logic_cc) {
10636 gen_logic_CC(t0);
10637 if (shifter_out)
10638 gen_set_CF_bit31(t1);
10640 return 0;
10643 /* Translate a 32-bit thumb instruction. */
10644 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
10646 uint32_t imm, shift, offset;
10647 uint32_t rd, rn, rm, rs;
10648 TCGv_i32 tmp;
10649 TCGv_i32 tmp2;
10650 TCGv_i32 tmp3;
10651 TCGv_i32 addr;
10652 TCGv_i64 tmp64;
10653 int op;
10654 int shiftop;
10655 int conds;
10656 int logic_cc;
10659 * ARMv6-M supports a limited subset of Thumb2 instructions.
10660 * Other Thumb1 architectures allow only 32-bit
10661 * combined BL/BLX prefix and suffix.
10663 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10664 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10665 int i;
10666 bool found = false;
10667 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10668 0xf3b08040 /* dsb */,
10669 0xf3b08050 /* dmb */,
10670 0xf3b08060 /* isb */,
10671 0xf3e08000 /* mrs */,
10672 0xf000d000 /* bl */};
10673 static const uint32_t armv6m_mask[] = {0xffe0d000,
10674 0xfff0d0f0,
10675 0xfff0d0f0,
10676 0xfff0d0f0,
10677 0xffe0d000,
10678 0xf800d000};
10680 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10681 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10682 found = true;
10683 break;
10686 if (!found) {
10687 goto illegal_op;
10689 } else if ((insn & 0xf800e800) != 0xf000e800) {
10690 ARCH(6T2);
10693 rn = (insn >> 16) & 0xf;
10694 rs = (insn >> 12) & 0xf;
10695 rd = (insn >> 8) & 0xf;
10696 rm = insn & 0xf;
10697 switch ((insn >> 25) & 0xf) {
10698 case 0: case 1: case 2: case 3:
10699 /* 16-bit instructions. Should never happen. */
10700 abort();
10701 case 4:
10702 if (insn & (1 << 22)) {
10703 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10704 * - load/store doubleword, load/store exclusive, ldacq/strel,
10705 * table branch, TT.
10707 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10708 arm_dc_feature(s, ARM_FEATURE_V8)) {
10709 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10710 * - SG (v8M only)
10711 * The bulk of the behaviour for this instruction is implemented
10712 * in v7m_handle_execute_nsc(), which deals with the insn when
10713 * it is executed by a CPU in non-secure state from memory
10714 * which is Secure & NonSecure-Callable.
10715 * Here we only need to handle the remaining cases:
10716 * * in NS memory (including the "security extension not
10717 * implemented" case) : NOP
10718 * * in S memory but CPU already secure (clear IT bits)
10719 * We know that the attribute for the memory this insn is
10720 * in must match the current CPU state, because otherwise
10721 * get_phys_addr_pmsav8 would have generated an exception.
10723 if (s->v8m_secure) {
10724 /* Like the IT insn, we don't need to generate any code */
10725 s->condexec_cond = 0;
10726 s->condexec_mask = 0;
10728 } else if (insn & 0x01200000) {
10729 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10730 * - load/store dual (post-indexed)
10731 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10732 * - load/store dual (literal and immediate)
10733 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10734 * - load/store dual (pre-indexed)
10736 bool wback = extract32(insn, 21, 1);
10738 if (rn == 15) {
10739 if (insn & (1 << 21)) {
10740 /* UNPREDICTABLE */
10741 goto illegal_op;
10743 addr = tcg_temp_new_i32();
10744 tcg_gen_movi_i32(addr, s->pc & ~3);
10745 } else {
10746 addr = load_reg(s, rn);
10748 offset = (insn & 0xff) * 4;
10749 if ((insn & (1 << 23)) == 0) {
10750 offset = -offset;
10753 if (s->v8m_stackcheck && rn == 13 && wback) {
10755 * Here 'addr' is the current SP; if offset is +ve we're
10756 * moving SP up, else down. It is UNKNOWN whether the limit
10757 * check triggers when SP starts below the limit and ends
10758 * up above it; check whichever of the current and final
10759 * SP is lower, so QEMU will trigger in that situation.
10761 if ((int32_t)offset < 0) {
10762 TCGv_i32 newsp = tcg_temp_new_i32();
10764 tcg_gen_addi_i32(newsp, addr, offset);
10765 gen_helper_v8m_stackcheck(cpu_env, newsp);
10766 tcg_temp_free_i32(newsp);
10767 } else {
10768 gen_helper_v8m_stackcheck(cpu_env, addr);
10772 if (insn & (1 << 24)) {
10773 tcg_gen_addi_i32(addr, addr, offset);
10774 offset = 0;
10776 if (insn & (1 << 20)) {
10777 /* ldrd */
10778 tmp = tcg_temp_new_i32();
10779 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10780 store_reg(s, rs, tmp);
10781 tcg_gen_addi_i32(addr, addr, 4);
10782 tmp = tcg_temp_new_i32();
10783 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10784 store_reg(s, rd, tmp);
10785 } else {
10786 /* strd */
10787 tmp = load_reg(s, rs);
10788 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10789 tcg_temp_free_i32(tmp);
10790 tcg_gen_addi_i32(addr, addr, 4);
10791 tmp = load_reg(s, rd);
10792 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10793 tcg_temp_free_i32(tmp);
10795 if (wback) {
10796 /* Base writeback. */
10797 tcg_gen_addi_i32(addr, addr, offset - 4);
10798 store_reg(s, rn, addr);
10799 } else {
10800 tcg_temp_free_i32(addr);
10802 } else if ((insn & (1 << 23)) == 0) {
10803 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10804 * - load/store exclusive word
10805 * - TT (v8M only)
10807 if (rs == 15) {
10808 if (!(insn & (1 << 20)) &&
10809 arm_dc_feature(s, ARM_FEATURE_M) &&
10810 arm_dc_feature(s, ARM_FEATURE_V8)) {
10811 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10812 * - TT (v8M only)
10814 bool alt = insn & (1 << 7);
10815 TCGv_i32 addr, op, ttresp;
10817 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10818 /* we UNDEF for these UNPREDICTABLE cases */
10819 goto illegal_op;
10822 if (alt && !s->v8m_secure) {
10823 goto illegal_op;
10826 addr = load_reg(s, rn);
10827 op = tcg_const_i32(extract32(insn, 6, 2));
10828 ttresp = tcg_temp_new_i32();
10829 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10830 tcg_temp_free_i32(addr);
10831 tcg_temp_free_i32(op);
10832 store_reg(s, rd, ttresp);
10833 break;
10835 goto illegal_op;
10837 addr = tcg_temp_local_new_i32();
10838 load_reg_var(s, addr, rn);
10839 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
10840 if (insn & (1 << 20)) {
10841 gen_load_exclusive(s, rs, 15, addr, 2);
10842 } else {
10843 gen_store_exclusive(s, rd, rs, 15, addr, 2);
10845 tcg_temp_free_i32(addr);
10846 } else if ((insn & (7 << 5)) == 0) {
10847 /* Table Branch. */
10848 if (rn == 15) {
10849 addr = tcg_temp_new_i32();
10850 tcg_gen_movi_i32(addr, s->pc);
10851 } else {
10852 addr = load_reg(s, rn);
10854 tmp = load_reg(s, rm);
10855 tcg_gen_add_i32(addr, addr, tmp);
10856 if (insn & (1 << 4)) {
10857 /* tbh */
10858 tcg_gen_add_i32(addr, addr, tmp);
10859 tcg_temp_free_i32(tmp);
10860 tmp = tcg_temp_new_i32();
10861 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
10862 } else { /* tbb */
10863 tcg_temp_free_i32(tmp);
10864 tmp = tcg_temp_new_i32();
10865 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
10867 tcg_temp_free_i32(addr);
10868 tcg_gen_shli_i32(tmp, tmp, 1);
10869 tcg_gen_addi_i32(tmp, tmp, s->pc);
10870 store_reg(s, 15, tmp);
10871 } else {
10872 bool is_lasr = false;
10873 bool is_ld = extract32(insn, 20, 1);
10874 int op2 = (insn >> 6) & 0x3;
10875 op = (insn >> 4) & 0x3;
10876 switch (op2) {
10877 case 0:
10878 goto illegal_op;
10879 case 1:
10880 /* Load/store exclusive byte/halfword/doubleword */
10881 if (op == 2) {
10882 goto illegal_op;
10884 ARCH(7);
10885 break;
10886 case 2:
10887 /* Load-acquire/store-release */
10888 if (op == 3) {
10889 goto illegal_op;
10891 /* Fall through */
10892 case 3:
10893 /* Load-acquire/store-release exclusive */
10894 ARCH(8);
10895 is_lasr = true;
10896 break;
10899 if (is_lasr && !is_ld) {
10900 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
10903 addr = tcg_temp_local_new_i32();
10904 load_reg_var(s, addr, rn);
10905 if (!(op2 & 1)) {
10906 if (is_ld) {
10907 tmp = tcg_temp_new_i32();
10908 switch (op) {
10909 case 0: /* ldab */
10910 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
10911 rs | ISSIsAcqRel);
10912 break;
10913 case 1: /* ldah */
10914 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10915 rs | ISSIsAcqRel);
10916 break;
10917 case 2: /* lda */
10918 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10919 rs | ISSIsAcqRel);
10920 break;
10921 default:
10922 abort();
10924 store_reg(s, rs, tmp);
10925 } else {
10926 tmp = load_reg(s, rs);
10927 switch (op) {
10928 case 0: /* stlb */
10929 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10930 rs | ISSIsAcqRel);
10931 break;
10932 case 1: /* stlh */
10933 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10934 rs | ISSIsAcqRel);
10935 break;
10936 case 2: /* stl */
10937 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10938 rs | ISSIsAcqRel);
10939 break;
10940 default:
10941 abort();
10943 tcg_temp_free_i32(tmp);
10945 } else if (is_ld) {
10946 gen_load_exclusive(s, rs, rd, addr, op);
10947 } else {
10948 gen_store_exclusive(s, rm, rs, rd, addr, op);
10950 tcg_temp_free_i32(addr);
10952 if (is_lasr && is_ld) {
10953 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
10956 } else {
10957 /* Load/store multiple, RFE, SRS. */
10958 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
10959 /* RFE, SRS: not available in user mode or on M profile */
10960 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10961 goto illegal_op;
10963 if (insn & (1 << 20)) {
10964 /* rfe */
10965 addr = load_reg(s, rn);
10966 if ((insn & (1 << 24)) == 0)
10967 tcg_gen_addi_i32(addr, addr, -8);
10968 /* Load PC into tmp and CPSR into tmp2. */
10969 tmp = tcg_temp_new_i32();
10970 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10971 tcg_gen_addi_i32(addr, addr, 4);
10972 tmp2 = tcg_temp_new_i32();
10973 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
10974 if (insn & (1 << 21)) {
10975 /* Base writeback. */
10976 if (insn & (1 << 24)) {
10977 tcg_gen_addi_i32(addr, addr, 4);
10978 } else {
10979 tcg_gen_addi_i32(addr, addr, -4);
10981 store_reg(s, rn, addr);
10982 } else {
10983 tcg_temp_free_i32(addr);
10985 gen_rfe(s, tmp, tmp2);
10986 } else {
10987 /* srs */
10988 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10989 insn & (1 << 21));
10991 } else {
10992 int i, loaded_base = 0;
10993 TCGv_i32 loaded_var;
10994 bool wback = extract32(insn, 21, 1);
10995 /* Load/store multiple. */
10996 addr = load_reg(s, rn);
10997 offset = 0;
10998 for (i = 0; i < 16; i++) {
10999 if (insn & (1 << i))
11000 offset += 4;
11003 if (insn & (1 << 24)) {
11004 tcg_gen_addi_i32(addr, addr, -offset);
11007 if (s->v8m_stackcheck && rn == 13 && wback) {
11009 * If the writeback is incrementing SP rather than
11010 * decrementing it, and the initial SP is below the
11011 * stack limit but the final written-back SP would
11012 * be above, then then we must not perform any memory
11013 * accesses, but it is IMPDEF whether we generate
11014 * an exception. We choose to do so in this case.
11015 * At this point 'addr' is the lowest address, so
11016 * either the original SP (if incrementing) or our
11017 * final SP (if decrementing), so that's what we check.
11019 gen_helper_v8m_stackcheck(cpu_env, addr);
11022 loaded_var = NULL;
11023 for (i = 0; i < 16; i++) {
11024 if ((insn & (1 << i)) == 0)
11025 continue;
11026 if (insn & (1 << 20)) {
11027 /* Load. */
11028 tmp = tcg_temp_new_i32();
11029 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11030 if (i == 15) {
11031 gen_bx_excret(s, tmp);
11032 } else if (i == rn) {
11033 loaded_var = tmp;
11034 loaded_base = 1;
11035 } else {
11036 store_reg(s, i, tmp);
11038 } else {
11039 /* Store. */
11040 tmp = load_reg(s, i);
11041 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11042 tcg_temp_free_i32(tmp);
11044 tcg_gen_addi_i32(addr, addr, 4);
11046 if (loaded_base) {
11047 store_reg(s, rn, loaded_var);
11049 if (wback) {
11050 /* Base register writeback. */
11051 if (insn & (1 << 24)) {
11052 tcg_gen_addi_i32(addr, addr, -offset);
11054 /* Fault if writeback register is in register list. */
11055 if (insn & (1 << rn))
11056 goto illegal_op;
11057 store_reg(s, rn, addr);
11058 } else {
11059 tcg_temp_free_i32(addr);
11063 break;
11064 case 5:
11066 op = (insn >> 21) & 0xf;
11067 if (op == 6) {
11068 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11069 goto illegal_op;
11071 /* Halfword pack. */
11072 tmp = load_reg(s, rn);
11073 tmp2 = load_reg(s, rm);
11074 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
11075 if (insn & (1 << 5)) {
11076 /* pkhtb */
11077 if (shift == 0)
11078 shift = 31;
11079 tcg_gen_sari_i32(tmp2, tmp2, shift);
11080 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
11081 tcg_gen_ext16u_i32(tmp2, tmp2);
11082 } else {
11083 /* pkhbt */
11084 if (shift)
11085 tcg_gen_shli_i32(tmp2, tmp2, shift);
11086 tcg_gen_ext16u_i32(tmp, tmp);
11087 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
11089 tcg_gen_or_i32(tmp, tmp, tmp2);
11090 tcg_temp_free_i32(tmp2);
11091 store_reg(s, rd, tmp);
11092 } else {
11093 /* Data processing register constant shift. */
11094 if (rn == 15) {
11095 tmp = tcg_temp_new_i32();
11096 tcg_gen_movi_i32(tmp, 0);
11097 } else {
11098 tmp = load_reg(s, rn);
11100 tmp2 = load_reg(s, rm);
11102 shiftop = (insn >> 4) & 3;
11103 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
11104 conds = (insn & (1 << 20)) != 0;
11105 logic_cc = (conds && thumb2_logic_op(op));
11106 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
11107 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
11108 goto illegal_op;
11109 tcg_temp_free_i32(tmp2);
11110 if (rd == 13 &&
11111 ((op == 2 && rn == 15) ||
11112 (op == 8 && rn == 13) ||
11113 (op == 13 && rn == 13))) {
11114 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
11115 store_sp_checked(s, tmp);
11116 } else if (rd != 15) {
11117 store_reg(s, rd, tmp);
11118 } else {
11119 tcg_temp_free_i32(tmp);
11122 break;
11123 case 13: /* Misc data processing. */
11124 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
11125 if (op < 4 && (insn & 0xf000) != 0xf000)
11126 goto illegal_op;
11127 switch (op) {
11128 case 0: /* Register controlled shift. */
11129 tmp = load_reg(s, rn);
11130 tmp2 = load_reg(s, rm);
11131 if ((insn & 0x70) != 0)
11132 goto illegal_op;
11134 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
11135 * - MOV, MOVS (register-shifted register), flagsetting
11137 op = (insn >> 21) & 3;
11138 logic_cc = (insn & (1 << 20)) != 0;
11139 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
11140 if (logic_cc)
11141 gen_logic_CC(tmp);
11142 store_reg(s, rd, tmp);
11143 break;
11144 case 1: /* Sign/zero extend. */
11145 op = (insn >> 20) & 7;
11146 switch (op) {
11147 case 0: /* SXTAH, SXTH */
11148 case 1: /* UXTAH, UXTH */
11149 case 4: /* SXTAB, SXTB */
11150 case 5: /* UXTAB, UXTB */
11151 break;
11152 case 2: /* SXTAB16, SXTB16 */
11153 case 3: /* UXTAB16, UXTB16 */
11154 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11155 goto illegal_op;
11157 break;
11158 default:
11159 goto illegal_op;
11161 if (rn != 15) {
11162 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11163 goto illegal_op;
11166 tmp = load_reg(s, rm);
11167 shift = (insn >> 4) & 3;
11168 /* ??? In many cases it's not necessary to do a
11169 rotate, a shift is sufficient. */
11170 if (shift != 0)
11171 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
11172 op = (insn >> 20) & 7;
11173 switch (op) {
11174 case 0: gen_sxth(tmp); break;
11175 case 1: gen_uxth(tmp); break;
11176 case 2: gen_sxtb16(tmp); break;
11177 case 3: gen_uxtb16(tmp); break;
11178 case 4: gen_sxtb(tmp); break;
11179 case 5: gen_uxtb(tmp); break;
11180 default:
11181 g_assert_not_reached();
11183 if (rn != 15) {
11184 tmp2 = load_reg(s, rn);
11185 if ((op >> 1) == 1) {
11186 gen_add16(tmp, tmp2);
11187 } else {
11188 tcg_gen_add_i32(tmp, tmp, tmp2);
11189 tcg_temp_free_i32(tmp2);
11192 store_reg(s, rd, tmp);
11193 break;
11194 case 2: /* SIMD add/subtract. */
11195 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11196 goto illegal_op;
11198 op = (insn >> 20) & 7;
11199 shift = (insn >> 4) & 7;
11200 if ((op & 3) == 3 || (shift & 3) == 3)
11201 goto illegal_op;
11202 tmp = load_reg(s, rn);
11203 tmp2 = load_reg(s, rm);
11204 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
11205 tcg_temp_free_i32(tmp2);
11206 store_reg(s, rd, tmp);
11207 break;
11208 case 3: /* Other data processing. */
11209 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
11210 if (op < 4) {
11211 /* Saturating add/subtract. */
11212 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11213 goto illegal_op;
11215 tmp = load_reg(s, rn);
11216 tmp2 = load_reg(s, rm);
11217 if (op & 1)
11218 gen_helper_double_saturate(tmp, cpu_env, tmp);
11219 if (op & 2)
11220 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
11221 else
11222 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
11223 tcg_temp_free_i32(tmp2);
11224 } else {
11225 switch (op) {
11226 case 0x0a: /* rbit */
11227 case 0x08: /* rev */
11228 case 0x09: /* rev16 */
11229 case 0x0b: /* revsh */
11230 case 0x18: /* clz */
11231 break;
11232 case 0x10: /* sel */
11233 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11234 goto illegal_op;
11236 break;
11237 case 0x20: /* crc32/crc32c */
11238 case 0x21:
11239 case 0x22:
11240 case 0x28:
11241 case 0x29:
11242 case 0x2a:
11243 if (!dc_isar_feature(aa32_crc32, s)) {
11244 goto illegal_op;
11246 break;
11247 default:
11248 goto illegal_op;
11250 tmp = load_reg(s, rn);
11251 switch (op) {
11252 case 0x0a: /* rbit */
11253 gen_helper_rbit(tmp, tmp);
11254 break;
11255 case 0x08: /* rev */
11256 tcg_gen_bswap32_i32(tmp, tmp);
11257 break;
11258 case 0x09: /* rev16 */
11259 gen_rev16(tmp);
11260 break;
11261 case 0x0b: /* revsh */
11262 gen_revsh(tmp);
11263 break;
11264 case 0x10: /* sel */
11265 tmp2 = load_reg(s, rm);
11266 tmp3 = tcg_temp_new_i32();
11267 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
11268 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
11269 tcg_temp_free_i32(tmp3);
11270 tcg_temp_free_i32(tmp2);
11271 break;
11272 case 0x18: /* clz */
11273 tcg_gen_clzi_i32(tmp, tmp, 32);
11274 break;
11275 case 0x20:
11276 case 0x21:
11277 case 0x22:
11278 case 0x28:
11279 case 0x29:
11280 case 0x2a:
11282 /* crc32/crc32c */
11283 uint32_t sz = op & 0x3;
11284 uint32_t c = op & 0x8;
11286 tmp2 = load_reg(s, rm);
11287 if (sz == 0) {
11288 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
11289 } else if (sz == 1) {
11290 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
11292 tmp3 = tcg_const_i32(1 << sz);
11293 if (c) {
11294 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
11295 } else {
11296 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
11298 tcg_temp_free_i32(tmp2);
11299 tcg_temp_free_i32(tmp3);
11300 break;
11302 default:
11303 g_assert_not_reached();
11306 store_reg(s, rd, tmp);
11307 break;
11308 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
11309 switch ((insn >> 20) & 7) {
11310 case 0: /* 32 x 32 -> 32 */
11311 case 7: /* Unsigned sum of absolute differences. */
11312 break;
11313 case 1: /* 16 x 16 -> 32 */
11314 case 2: /* Dual multiply add. */
11315 case 3: /* 32 * 16 -> 32msb */
11316 case 4: /* Dual multiply subtract. */
11317 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11318 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11319 goto illegal_op;
11321 break;
11323 op = (insn >> 4) & 0xf;
11324 tmp = load_reg(s, rn);
11325 tmp2 = load_reg(s, rm);
11326 switch ((insn >> 20) & 7) {
11327 case 0: /* 32 x 32 -> 32 */
11328 tcg_gen_mul_i32(tmp, tmp, tmp2);
11329 tcg_temp_free_i32(tmp2);
11330 if (rs != 15) {
11331 tmp2 = load_reg(s, rs);
11332 if (op)
11333 tcg_gen_sub_i32(tmp, tmp2, tmp);
11334 else
11335 tcg_gen_add_i32(tmp, tmp, tmp2);
11336 tcg_temp_free_i32(tmp2);
11338 break;
11339 case 1: /* 16 x 16 -> 32 */
11340 gen_mulxy(tmp, tmp2, op & 2, op & 1);
11341 tcg_temp_free_i32(tmp2);
11342 if (rs != 15) {
11343 tmp2 = load_reg(s, rs);
11344 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
11345 tcg_temp_free_i32(tmp2);
11347 break;
11348 case 2: /* Dual multiply add. */
11349 case 4: /* Dual multiply subtract. */
11350 if (op)
11351 gen_swap_half(tmp2);
11352 gen_smul_dual(tmp, tmp2);
11353 if (insn & (1 << 22)) {
11354 /* This subtraction cannot overflow. */
11355 tcg_gen_sub_i32(tmp, tmp, tmp2);
11356 } else {
11357 /* This addition cannot overflow 32 bits;
11358 * however it may overflow considered as a signed
11359 * operation, in which case we must set the Q flag.
11361 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
11363 tcg_temp_free_i32(tmp2);
11364 if (rs != 15)
11366 tmp2 = load_reg(s, rs);
11367 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
11368 tcg_temp_free_i32(tmp2);
11370 break;
11371 case 3: /* 32 * 16 -> 32msb */
11372 if (op)
11373 tcg_gen_sari_i32(tmp2, tmp2, 16);
11374 else
11375 gen_sxth(tmp2);
11376 tmp64 = gen_muls_i64_i32(tmp, tmp2);
11377 tcg_gen_shri_i64(tmp64, tmp64, 16);
11378 tmp = tcg_temp_new_i32();
11379 tcg_gen_extrl_i64_i32(tmp, tmp64);
11380 tcg_temp_free_i64(tmp64);
11381 if (rs != 15)
11383 tmp2 = load_reg(s, rs);
11384 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
11385 tcg_temp_free_i32(tmp2);
11387 break;
11388 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11389 tmp64 = gen_muls_i64_i32(tmp, tmp2);
11390 if (rs != 15) {
11391 tmp = load_reg(s, rs);
11392 if (insn & (1 << 20)) {
11393 tmp64 = gen_addq_msw(tmp64, tmp);
11394 } else {
11395 tmp64 = gen_subq_msw(tmp64, tmp);
11398 if (insn & (1 << 4)) {
11399 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
11401 tcg_gen_shri_i64(tmp64, tmp64, 32);
11402 tmp = tcg_temp_new_i32();
11403 tcg_gen_extrl_i64_i32(tmp, tmp64);
11404 tcg_temp_free_i64(tmp64);
11405 break;
11406 case 7: /* Unsigned sum of absolute differences. */
11407 gen_helper_usad8(tmp, tmp, tmp2);
11408 tcg_temp_free_i32(tmp2);
11409 if (rs != 15) {
11410 tmp2 = load_reg(s, rs);
11411 tcg_gen_add_i32(tmp, tmp, tmp2);
11412 tcg_temp_free_i32(tmp2);
11414 break;
11416 store_reg(s, rd, tmp);
11417 break;
11418 case 6: case 7: /* 64-bit multiply, Divide. */
11419 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
11420 tmp = load_reg(s, rn);
11421 tmp2 = load_reg(s, rm);
11422 if ((op & 0x50) == 0x10) {
11423 /* sdiv, udiv */
11424 if (!dc_isar_feature(thumb_div, s)) {
11425 goto illegal_op;
11427 if (op & 0x20)
11428 gen_helper_udiv(tmp, tmp, tmp2);
11429 else
11430 gen_helper_sdiv(tmp, tmp, tmp2);
11431 tcg_temp_free_i32(tmp2);
11432 store_reg(s, rd, tmp);
11433 } else if ((op & 0xe) == 0xc) {
11434 /* Dual multiply accumulate long. */
11435 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11436 tcg_temp_free_i32(tmp);
11437 tcg_temp_free_i32(tmp2);
11438 goto illegal_op;
11440 if (op & 1)
11441 gen_swap_half(tmp2);
11442 gen_smul_dual(tmp, tmp2);
11443 if (op & 0x10) {
11444 tcg_gen_sub_i32(tmp, tmp, tmp2);
11445 } else {
11446 tcg_gen_add_i32(tmp, tmp, tmp2);
11448 tcg_temp_free_i32(tmp2);
11449 /* BUGFIX */
11450 tmp64 = tcg_temp_new_i64();
11451 tcg_gen_ext_i32_i64(tmp64, tmp);
11452 tcg_temp_free_i32(tmp);
11453 gen_addq(s, tmp64, rs, rd);
11454 gen_storeq_reg(s, rs, rd, tmp64);
11455 tcg_temp_free_i64(tmp64);
11456 } else {
11457 if (op & 0x20) {
11458 /* Unsigned 64-bit multiply */
11459 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
11460 } else {
11461 if (op & 8) {
11462 /* smlalxy */
11463 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11464 tcg_temp_free_i32(tmp2);
11465 tcg_temp_free_i32(tmp);
11466 goto illegal_op;
11468 gen_mulxy(tmp, tmp2, op & 2, op & 1);
11469 tcg_temp_free_i32(tmp2);
11470 tmp64 = tcg_temp_new_i64();
11471 tcg_gen_ext_i32_i64(tmp64, tmp);
11472 tcg_temp_free_i32(tmp);
11473 } else {
11474 /* Signed 64-bit multiply */
11475 tmp64 = gen_muls_i64_i32(tmp, tmp2);
11478 if (op & 4) {
11479 /* umaal */
11480 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11481 tcg_temp_free_i64(tmp64);
11482 goto illegal_op;
11484 gen_addq_lo(s, tmp64, rs);
11485 gen_addq_lo(s, tmp64, rd);
11486 } else if (op & 0x40) {
11487 /* 64-bit accumulate. */
11488 gen_addq(s, tmp64, rs, rd);
11490 gen_storeq_reg(s, rs, rd, tmp64);
11491 tcg_temp_free_i64(tmp64);
11493 break;
11495 break;
11496 case 6: case 7: case 14: case 15:
11497 /* Coprocessor. */
11498 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11499 /* We don't currently implement M profile FP support,
11500 * so this entire space should give a NOCP fault, with
11501 * the exception of the v8M VLLDM and VLSTM insns, which
11502 * must be NOPs in Secure state and UNDEF in Nonsecure state.
11504 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
11505 (insn & 0xffa00f00) == 0xec200a00) {
11506 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
11507 * - VLLDM, VLSTM
11508 * We choose to UNDEF if the RAZ bits are non-zero.
11510 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
11511 goto illegal_op;
11513 /* Just NOP since FP support is not implemented */
11514 break;
11516 /* All other insns: NOCP */
11517 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
11518 default_exception_el(s));
11519 break;
11521 if ((insn & 0xfe000a00) == 0xfc000800
11522 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11523 /* The Thumb2 and ARM encodings are identical. */
11524 if (disas_neon_insn_3same_ext(s, insn)) {
11525 goto illegal_op;
11527 } else if ((insn & 0xff000a00) == 0xfe000800
11528 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11529 /* The Thumb2 and ARM encodings are identical. */
11530 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
11531 goto illegal_op;
11533 } else if (((insn >> 24) & 3) == 3) {
11534 /* Translate into the equivalent ARM encoding. */
11535 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
11536 if (disas_neon_data_insn(s, insn)) {
11537 goto illegal_op;
11539 } else if (((insn >> 8) & 0xe) == 10) {
11540 if (disas_vfp_insn(s, insn)) {
11541 goto illegal_op;
11543 } else {
11544 if (insn & (1 << 28))
11545 goto illegal_op;
11546 if (disas_coproc_insn(s, insn)) {
11547 goto illegal_op;
11550 break;
11551 case 8: case 9: case 10: case 11:
11552 if (insn & (1 << 15)) {
11553 /* Branches, misc control. */
11554 if (insn & 0x5000) {
11555 /* Unconditional branch. */
11556 /* signextend(hw1[10:0]) -> offset[:12]. */
11557 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
11558 /* hw1[10:0] -> offset[11:1]. */
11559 offset |= (insn & 0x7ff) << 1;
11560 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
11561 offset[24:22] already have the same value because of the
11562 sign extension above. */
11563 offset ^= ((~insn) & (1 << 13)) << 10;
11564 offset ^= ((~insn) & (1 << 11)) << 11;
11566 if (insn & (1 << 14)) {
11567 /* Branch and link. */
11568 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
11571 offset += s->pc;
11572 if (insn & (1 << 12)) {
11573 /* b/bl */
11574 gen_jmp(s, offset);
11575 } else {
11576 /* blx */
11577 offset &= ~(uint32_t)2;
11578 /* thumb2 bx, no need to check */
11579 gen_bx_im(s, offset);
11581 } else if (((insn >> 23) & 7) == 7) {
11582 /* Misc control */
11583 if (insn & (1 << 13))
11584 goto illegal_op;
11586 if (insn & (1 << 26)) {
11587 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11588 goto illegal_op;
11590 if (!(insn & (1 << 20))) {
11591 /* Hypervisor call (v7) */
11592 int imm16 = extract32(insn, 16, 4) << 12
11593 | extract32(insn, 0, 12);
11594 ARCH(7);
11595 if (IS_USER(s)) {
11596 goto illegal_op;
11598 gen_hvc(s, imm16);
11599 } else {
11600 /* Secure monitor call (v6+) */
11601 ARCH(6K);
11602 if (IS_USER(s)) {
11603 goto illegal_op;
11605 gen_smc(s);
11607 } else {
11608 op = (insn >> 20) & 7;
11609 switch (op) {
11610 case 0: /* msr cpsr. */
11611 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11612 tmp = load_reg(s, rn);
11613 /* the constant is the mask and SYSm fields */
11614 addr = tcg_const_i32(insn & 0xfff);
11615 gen_helper_v7m_msr(cpu_env, addr, tmp);
11616 tcg_temp_free_i32(addr);
11617 tcg_temp_free_i32(tmp);
11618 gen_lookup_tb(s);
11619 break;
11621 /* fall through */
11622 case 1: /* msr spsr. */
11623 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11624 goto illegal_op;
11627 if (extract32(insn, 5, 1)) {
11628 /* MSR (banked) */
11629 int sysm = extract32(insn, 8, 4) |
11630 (extract32(insn, 4, 1) << 4);
11631 int r = op & 1;
11633 gen_msr_banked(s, r, sysm, rm);
11634 break;
11637 /* MSR (for PSRs) */
11638 tmp = load_reg(s, rn);
11639 if (gen_set_psr(s,
11640 msr_mask(s, (insn >> 8) & 0xf, op == 1),
11641 op == 1, tmp))
11642 goto illegal_op;
11643 break;
11644 case 2: /* cps, nop-hint. */
11645 if (((insn >> 8) & 7) == 0) {
11646 gen_nop_hint(s, insn & 0xff);
11648 /* Implemented as NOP in user mode. */
11649 if (IS_USER(s))
11650 break;
11651 offset = 0;
11652 imm = 0;
11653 if (insn & (1 << 10)) {
11654 if (insn & (1 << 7))
11655 offset |= CPSR_A;
11656 if (insn & (1 << 6))
11657 offset |= CPSR_I;
11658 if (insn & (1 << 5))
11659 offset |= CPSR_F;
11660 if (insn & (1 << 9))
11661 imm = CPSR_A | CPSR_I | CPSR_F;
11663 if (insn & (1 << 8)) {
11664 offset |= 0x1f;
11665 imm |= (insn & 0x1f);
11667 if (offset) {
11668 gen_set_psr_im(s, offset, 0, imm);
11670 break;
11671 case 3: /* Special control operations. */
11672 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
11673 !arm_dc_feature(s, ARM_FEATURE_M)) {
11674 goto illegal_op;
11676 op = (insn >> 4) & 0xf;
11677 switch (op) {
11678 case 2: /* clrex */
11679 gen_clrex(s);
11680 break;
11681 case 4: /* dsb */
11682 case 5: /* dmb */
11683 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
11684 break;
11685 case 6: /* isb */
11686 /* We need to break the TB after this insn
11687 * to execute self-modifying code correctly
11688 * and also to take any pending interrupts
11689 * immediately.
11691 gen_goto_tb(s, 0, s->pc & ~1);
11692 break;
11693 default:
11694 goto illegal_op;
11696 break;
11697 case 4: /* bxj */
11698 /* Trivial implementation equivalent to bx.
11699 * This instruction doesn't exist at all for M-profile.
11701 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11702 goto illegal_op;
11704 tmp = load_reg(s, rn);
11705 gen_bx(s, tmp);
11706 break;
11707 case 5: /* Exception return. */
11708 if (IS_USER(s)) {
11709 goto illegal_op;
11711 if (rn != 14 || rd != 15) {
11712 goto illegal_op;
11714 if (s->current_el == 2) {
11715 /* ERET from Hyp uses ELR_Hyp, not LR */
11716 if (insn & 0xff) {
11717 goto illegal_op;
11719 tmp = load_cpu_field(elr_el[2]);
11720 } else {
11721 tmp = load_reg(s, rn);
11722 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11724 gen_exception_return(s, tmp);
11725 break;
11726 case 6: /* MRS */
11727 if (extract32(insn, 5, 1) &&
11728 !arm_dc_feature(s, ARM_FEATURE_M)) {
11729 /* MRS (banked) */
11730 int sysm = extract32(insn, 16, 4) |
11731 (extract32(insn, 4, 1) << 4);
11733 gen_mrs_banked(s, 0, sysm, rd);
11734 break;
11737 if (extract32(insn, 16, 4) != 0xf) {
11738 goto illegal_op;
11740 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
11741 extract32(insn, 0, 8) != 0) {
11742 goto illegal_op;
11745 /* mrs cpsr */
11746 tmp = tcg_temp_new_i32();
11747 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11748 addr = tcg_const_i32(insn & 0xff);
11749 gen_helper_v7m_mrs(tmp, cpu_env, addr);
11750 tcg_temp_free_i32(addr);
11751 } else {
11752 gen_helper_cpsr_read(tmp, cpu_env);
11754 store_reg(s, rd, tmp);
11755 break;
11756 case 7: /* MRS */
11757 if (extract32(insn, 5, 1) &&
11758 !arm_dc_feature(s, ARM_FEATURE_M)) {
11759 /* MRS (banked) */
11760 int sysm = extract32(insn, 16, 4) |
11761 (extract32(insn, 4, 1) << 4);
11763 gen_mrs_banked(s, 1, sysm, rd);
11764 break;
11767 /* mrs spsr. */
11768 /* Not accessible in user mode. */
11769 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
11770 goto illegal_op;
11773 if (extract32(insn, 16, 4) != 0xf ||
11774 extract32(insn, 0, 8) != 0) {
11775 goto illegal_op;
11778 tmp = load_cpu_field(spsr);
11779 store_reg(s, rd, tmp);
11780 break;
11783 } else {
11784 /* Conditional branch. */
11785 op = (insn >> 22) & 0xf;
11786 /* Generate a conditional jump to next instruction. */
11787 arm_skip_unless(s, op);
11789 /* offset[11:1] = insn[10:0] */
11790 offset = (insn & 0x7ff) << 1;
11791 /* offset[17:12] = insn[21:16]. */
11792 offset |= (insn & 0x003f0000) >> 4;
11793 /* offset[31:20] = insn[26]. */
11794 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
11795 /* offset[18] = insn[13]. */
11796 offset |= (insn & (1 << 13)) << 5;
11797 /* offset[19] = insn[11]. */
11798 offset |= (insn & (1 << 11)) << 8;
11800 /* jump to the offset */
11801 gen_jmp(s, s->pc + offset);
11803 } else {
11805 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
11806 * - Data-processing (modified immediate, plain binary immediate)
11808 if (insn & (1 << 25)) {
11810 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
11811 * - Data-processing (plain binary immediate)
11813 if (insn & (1 << 24)) {
11814 if (insn & (1 << 20))
11815 goto illegal_op;
11816 /* Bitfield/Saturate. */
11817 op = (insn >> 21) & 7;
11818 imm = insn & 0x1f;
11819 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
11820 if (rn == 15) {
11821 tmp = tcg_temp_new_i32();
11822 tcg_gen_movi_i32(tmp, 0);
11823 } else {
11824 tmp = load_reg(s, rn);
11826 switch (op) {
11827 case 2: /* Signed bitfield extract. */
11828 imm++;
11829 if (shift + imm > 32)
11830 goto illegal_op;
11831 if (imm < 32) {
11832 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
11834 break;
11835 case 6: /* Unsigned bitfield extract. */
11836 imm++;
11837 if (shift + imm > 32)
11838 goto illegal_op;
11839 if (imm < 32) {
11840 tcg_gen_extract_i32(tmp, tmp, shift, imm);
11842 break;
11843 case 3: /* Bitfield insert/clear. */
11844 if (imm < shift)
11845 goto illegal_op;
11846 imm = imm + 1 - shift;
11847 if (imm != 32) {
11848 tmp2 = load_reg(s, rd);
11849 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
11850 tcg_temp_free_i32(tmp2);
11852 break;
11853 case 7:
11854 goto illegal_op;
11855 default: /* Saturate. */
11856 if (shift) {
11857 if (op & 1)
11858 tcg_gen_sari_i32(tmp, tmp, shift);
11859 else
11860 tcg_gen_shli_i32(tmp, tmp, shift);
11862 tmp2 = tcg_const_i32(imm);
11863 if (op & 4) {
11864 /* Unsigned. */
11865 if ((op & 1) && shift == 0) {
11866 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11867 tcg_temp_free_i32(tmp);
11868 tcg_temp_free_i32(tmp2);
11869 goto illegal_op;
11871 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
11872 } else {
11873 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
11875 } else {
11876 /* Signed. */
11877 if ((op & 1) && shift == 0) {
11878 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11879 tcg_temp_free_i32(tmp);
11880 tcg_temp_free_i32(tmp2);
11881 goto illegal_op;
11883 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
11884 } else {
11885 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
11888 tcg_temp_free_i32(tmp2);
11889 break;
11891 store_reg(s, rd, tmp);
11892 } else {
11893 imm = ((insn & 0x04000000) >> 15)
11894 | ((insn & 0x7000) >> 4) | (insn & 0xff);
11895 if (insn & (1 << 22)) {
11896 /* 16-bit immediate. */
11897 imm |= (insn >> 4) & 0xf000;
11898 if (insn & (1 << 23)) {
11899 /* movt */
11900 tmp = load_reg(s, rd);
11901 tcg_gen_ext16u_i32(tmp, tmp);
11902 tcg_gen_ori_i32(tmp, tmp, imm << 16);
11903 } else {
11904 /* movw */
11905 tmp = tcg_temp_new_i32();
11906 tcg_gen_movi_i32(tmp, imm);
11908 store_reg(s, rd, tmp);
11909 } else {
11910 /* Add/sub 12-bit immediate. */
11911 if (rn == 15) {
11912 offset = s->pc & ~(uint32_t)3;
11913 if (insn & (1 << 23))
11914 offset -= imm;
11915 else
11916 offset += imm;
11917 tmp = tcg_temp_new_i32();
11918 tcg_gen_movi_i32(tmp, offset);
11919 store_reg(s, rd, tmp);
11920 } else {
11921 tmp = load_reg(s, rn);
11922 if (insn & (1 << 23))
11923 tcg_gen_subi_i32(tmp, tmp, imm);
11924 else
11925 tcg_gen_addi_i32(tmp, tmp, imm);
11926 if (rn == 13 && rd == 13) {
11927 /* ADD SP, SP, imm or SUB SP, SP, imm */
11928 store_sp_checked(s, tmp);
11929 } else {
11930 store_reg(s, rd, tmp);
11935 } else {
11937 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
11938 * - Data-processing (modified immediate)
11940 int shifter_out = 0;
11941 /* modified 12-bit immediate. */
11942 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
11943 imm = (insn & 0xff);
11944 switch (shift) {
11945 case 0: /* XY */
11946 /* Nothing to do. */
11947 break;
11948 case 1: /* 00XY00XY */
11949 imm |= imm << 16;
11950 break;
11951 case 2: /* XY00XY00 */
11952 imm |= imm << 16;
11953 imm <<= 8;
11954 break;
11955 case 3: /* XYXYXYXY */
11956 imm |= imm << 16;
11957 imm |= imm << 8;
11958 break;
11959 default: /* Rotated constant. */
11960 shift = (shift << 1) | (imm >> 7);
11961 imm |= 0x80;
11962 imm = imm << (32 - shift);
11963 shifter_out = 1;
11964 break;
11966 tmp2 = tcg_temp_new_i32();
11967 tcg_gen_movi_i32(tmp2, imm);
11968 rn = (insn >> 16) & 0xf;
11969 if (rn == 15) {
11970 tmp = tcg_temp_new_i32();
11971 tcg_gen_movi_i32(tmp, 0);
11972 } else {
11973 tmp = load_reg(s, rn);
11975 op = (insn >> 21) & 0xf;
11976 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
11977 shifter_out, tmp, tmp2))
11978 goto illegal_op;
11979 tcg_temp_free_i32(tmp2);
11980 rd = (insn >> 8) & 0xf;
11981 if (rd == 13 && rn == 13
11982 && (op == 8 || op == 13)) {
11983 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
11984 store_sp_checked(s, tmp);
11985 } else if (rd != 15) {
11986 store_reg(s, rd, tmp);
11987 } else {
11988 tcg_temp_free_i32(tmp);
11992 break;
11993 case 12: /* Load/store single data item. */
11995 int postinc = 0;
11996 int writeback = 0;
11997 int memidx;
11998 ISSInfo issinfo;
12000 if ((insn & 0x01100000) == 0x01000000) {
12001 if (disas_neon_ls_insn(s, insn)) {
12002 goto illegal_op;
12004 break;
12006 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
12007 if (rs == 15) {
12008 if (!(insn & (1 << 20))) {
12009 goto illegal_op;
12011 if (op != 2) {
12012 /* Byte or halfword load space with dest == r15 : memory hints.
12013 * Catch them early so we don't emit pointless addressing code.
12014 * This space is a mix of:
12015 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
12016 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
12017 * cores)
12018 * unallocated hints, which must be treated as NOPs
12019 * UNPREDICTABLE space, which we NOP or UNDEF depending on
12020 * which is easiest for the decoding logic
12021 * Some space which must UNDEF
12023 int op1 = (insn >> 23) & 3;
12024 int op2 = (insn >> 6) & 0x3f;
12025 if (op & 2) {
12026 goto illegal_op;
12028 if (rn == 15) {
12029 /* UNPREDICTABLE, unallocated hint or
12030 * PLD/PLDW/PLI (literal)
12032 return;
12034 if (op1 & 1) {
12035 return; /* PLD/PLDW/PLI or unallocated hint */
12037 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
12038 return; /* PLD/PLDW/PLI or unallocated hint */
12040 /* UNDEF space, or an UNPREDICTABLE */
12041 goto illegal_op;
12044 memidx = get_mem_index(s);
12045 if (rn == 15) {
12046 addr = tcg_temp_new_i32();
12047 /* PC relative. */
12048 /* s->pc has already been incremented by 4. */
12049 imm = s->pc & 0xfffffffc;
12050 if (insn & (1 << 23))
12051 imm += insn & 0xfff;
12052 else
12053 imm -= insn & 0xfff;
12054 tcg_gen_movi_i32(addr, imm);
12055 } else {
12056 addr = load_reg(s, rn);
12057 if (insn & (1 << 23)) {
12058 /* Positive offset. */
12059 imm = insn & 0xfff;
12060 tcg_gen_addi_i32(addr, addr, imm);
12061 } else {
12062 imm = insn & 0xff;
12063 switch ((insn >> 8) & 0xf) {
12064 case 0x0: /* Shifted Register. */
12065 shift = (insn >> 4) & 0xf;
12066 if (shift > 3) {
12067 tcg_temp_free_i32(addr);
12068 goto illegal_op;
12070 tmp = load_reg(s, rm);
12071 if (shift)
12072 tcg_gen_shli_i32(tmp, tmp, shift);
12073 tcg_gen_add_i32(addr, addr, tmp);
12074 tcg_temp_free_i32(tmp);
12075 break;
12076 case 0xc: /* Negative offset. */
12077 tcg_gen_addi_i32(addr, addr, -imm);
12078 break;
12079 case 0xe: /* User privilege. */
12080 tcg_gen_addi_i32(addr, addr, imm);
12081 memidx = get_a32_user_mem_index(s);
12082 break;
12083 case 0x9: /* Post-decrement. */
12084 imm = -imm;
12085 /* Fall through. */
12086 case 0xb: /* Post-increment. */
12087 postinc = 1;
12088 writeback = 1;
12089 break;
12090 case 0xd: /* Pre-decrement. */
12091 imm = -imm;
12092 /* Fall through. */
12093 case 0xf: /* Pre-increment. */
12094 writeback = 1;
12095 break;
12096 default:
12097 tcg_temp_free_i32(addr);
12098 goto illegal_op;
12103 issinfo = writeback ? ISSInvalid : rs;
12105 if (s->v8m_stackcheck && rn == 13 && writeback) {
12107 * Stackcheck. Here we know 'addr' is the current SP;
12108 * if imm is +ve we're moving SP up, else down. It is
12109 * UNKNOWN whether the limit check triggers when SP starts
12110 * below the limit and ends up above it; we chose to do so.
12112 if ((int32_t)imm < 0) {
12113 TCGv_i32 newsp = tcg_temp_new_i32();
12115 tcg_gen_addi_i32(newsp, addr, imm);
12116 gen_helper_v8m_stackcheck(cpu_env, newsp);
12117 tcg_temp_free_i32(newsp);
12118 } else {
12119 gen_helper_v8m_stackcheck(cpu_env, addr);
12123 if (writeback && !postinc) {
12124 tcg_gen_addi_i32(addr, addr, imm);
12127 if (insn & (1 << 20)) {
12128 /* Load. */
12129 tmp = tcg_temp_new_i32();
12130 switch (op) {
12131 case 0:
12132 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
12133 break;
12134 case 4:
12135 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
12136 break;
12137 case 1:
12138 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
12139 break;
12140 case 5:
12141 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
12142 break;
12143 case 2:
12144 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
12145 break;
12146 default:
12147 tcg_temp_free_i32(tmp);
12148 tcg_temp_free_i32(addr);
12149 goto illegal_op;
12151 if (rs == 15) {
12152 gen_bx_excret(s, tmp);
12153 } else {
12154 store_reg(s, rs, tmp);
12156 } else {
12157 /* Store. */
12158 tmp = load_reg(s, rs);
12159 switch (op) {
12160 case 0:
12161 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
12162 break;
12163 case 1:
12164 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
12165 break;
12166 case 2:
12167 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
12168 break;
12169 default:
12170 tcg_temp_free_i32(tmp);
12171 tcg_temp_free_i32(addr);
12172 goto illegal_op;
12174 tcg_temp_free_i32(tmp);
12176 if (postinc)
12177 tcg_gen_addi_i32(addr, addr, imm);
12178 if (writeback) {
12179 store_reg(s, rn, addr);
12180 } else {
12181 tcg_temp_free_i32(addr);
12184 break;
12185 default:
12186 goto illegal_op;
12188 return;
12189 illegal_op:
12190 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
12191 default_exception_el(s));
12194 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
12196 uint32_t val, op, rm, rn, rd, shift, cond;
12197 int32_t offset;
12198 int i;
12199 TCGv_i32 tmp;
12200 TCGv_i32 tmp2;
12201 TCGv_i32 addr;
12203 switch (insn >> 12) {
12204 case 0: case 1:
12206 rd = insn & 7;
12207 op = (insn >> 11) & 3;
12208 if (op == 3) {
12210 * 0b0001_1xxx_xxxx_xxxx
12211 * - Add, subtract (three low registers)
12212 * - Add, subtract (two low registers and immediate)
12214 rn = (insn >> 3) & 7;
12215 tmp = load_reg(s, rn);
12216 if (insn & (1 << 10)) {
12217 /* immediate */
12218 tmp2 = tcg_temp_new_i32();
12219 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
12220 } else {
12221 /* reg */
12222 rm = (insn >> 6) & 7;
12223 tmp2 = load_reg(s, rm);
12225 if (insn & (1 << 9)) {
12226 if (s->condexec_mask)
12227 tcg_gen_sub_i32(tmp, tmp, tmp2);
12228 else
12229 gen_sub_CC(tmp, tmp, tmp2);
12230 } else {
12231 if (s->condexec_mask)
12232 tcg_gen_add_i32(tmp, tmp, tmp2);
12233 else
12234 gen_add_CC(tmp, tmp, tmp2);
12236 tcg_temp_free_i32(tmp2);
12237 store_reg(s, rd, tmp);
12238 } else {
12239 /* shift immediate */
12240 rm = (insn >> 3) & 7;
12241 shift = (insn >> 6) & 0x1f;
12242 tmp = load_reg(s, rm);
12243 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
12244 if (!s->condexec_mask)
12245 gen_logic_CC(tmp);
12246 store_reg(s, rd, tmp);
12248 break;
12249 case 2: case 3:
12251 * 0b001x_xxxx_xxxx_xxxx
12252 * - Add, subtract, compare, move (one low register and immediate)
12254 op = (insn >> 11) & 3;
12255 rd = (insn >> 8) & 0x7;
12256 if (op == 0) { /* mov */
12257 tmp = tcg_temp_new_i32();
12258 tcg_gen_movi_i32(tmp, insn & 0xff);
12259 if (!s->condexec_mask)
12260 gen_logic_CC(tmp);
12261 store_reg(s, rd, tmp);
12262 } else {
12263 tmp = load_reg(s, rd);
12264 tmp2 = tcg_temp_new_i32();
12265 tcg_gen_movi_i32(tmp2, insn & 0xff);
12266 switch (op) {
12267 case 1: /* cmp */
12268 gen_sub_CC(tmp, tmp, tmp2);
12269 tcg_temp_free_i32(tmp);
12270 tcg_temp_free_i32(tmp2);
12271 break;
12272 case 2: /* add */
12273 if (s->condexec_mask)
12274 tcg_gen_add_i32(tmp, tmp, tmp2);
12275 else
12276 gen_add_CC(tmp, tmp, tmp2);
12277 tcg_temp_free_i32(tmp2);
12278 store_reg(s, rd, tmp);
12279 break;
12280 case 3: /* sub */
12281 if (s->condexec_mask)
12282 tcg_gen_sub_i32(tmp, tmp, tmp2);
12283 else
12284 gen_sub_CC(tmp, tmp, tmp2);
12285 tcg_temp_free_i32(tmp2);
12286 store_reg(s, rd, tmp);
12287 break;
12290 break;
12291 case 4:
12292 if (insn & (1 << 11)) {
12293 rd = (insn >> 8) & 7;
12294 /* load pc-relative. Bit 1 of PC is ignored. */
12295 val = s->pc + 2 + ((insn & 0xff) * 4);
12296 val &= ~(uint32_t)2;
12297 addr = tcg_temp_new_i32();
12298 tcg_gen_movi_i32(addr, val);
12299 tmp = tcg_temp_new_i32();
12300 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
12301 rd | ISSIs16Bit);
12302 tcg_temp_free_i32(addr);
12303 store_reg(s, rd, tmp);
12304 break;
12306 if (insn & (1 << 10)) {
12307 /* 0b0100_01xx_xxxx_xxxx
12308 * - data processing extended, branch and exchange
12310 rd = (insn & 7) | ((insn >> 4) & 8);
12311 rm = (insn >> 3) & 0xf;
12312 op = (insn >> 8) & 3;
12313 switch (op) {
12314 case 0: /* add */
12315 tmp = load_reg(s, rd);
12316 tmp2 = load_reg(s, rm);
12317 tcg_gen_add_i32(tmp, tmp, tmp2);
12318 tcg_temp_free_i32(tmp2);
12319 if (rd == 13) {
12320 /* ADD SP, SP, reg */
12321 store_sp_checked(s, tmp);
12322 } else {
12323 store_reg(s, rd, tmp);
12325 break;
12326 case 1: /* cmp */
12327 tmp = load_reg(s, rd);
12328 tmp2 = load_reg(s, rm);
12329 gen_sub_CC(tmp, tmp, tmp2);
12330 tcg_temp_free_i32(tmp2);
12331 tcg_temp_free_i32(tmp);
12332 break;
12333 case 2: /* mov/cpy */
12334 tmp = load_reg(s, rm);
12335 if (rd == 13) {
12336 /* MOV SP, reg */
12337 store_sp_checked(s, tmp);
12338 } else {
12339 store_reg(s, rd, tmp);
12341 break;
12342 case 3:
12344 /* 0b0100_0111_xxxx_xxxx
12345 * - branch [and link] exchange thumb register
12347 bool link = insn & (1 << 7);
12349 if (insn & 3) {
12350 goto undef;
12352 if (link) {
12353 ARCH(5);
12355 if ((insn & 4)) {
12356 /* BXNS/BLXNS: only exists for v8M with the
12357 * security extensions, and always UNDEF if NonSecure.
12358 * We don't implement these in the user-only mode
12359 * either (in theory you can use them from Secure User
12360 * mode but they are too tied in to system emulation.)
12362 if (!s->v8m_secure || IS_USER_ONLY) {
12363 goto undef;
12365 if (link) {
12366 gen_blxns(s, rm);
12367 } else {
12368 gen_bxns(s, rm);
12370 break;
12372 /* BLX/BX */
12373 tmp = load_reg(s, rm);
12374 if (link) {
12375 val = (uint32_t)s->pc | 1;
12376 tmp2 = tcg_temp_new_i32();
12377 tcg_gen_movi_i32(tmp2, val);
12378 store_reg(s, 14, tmp2);
12379 gen_bx(s, tmp);
12380 } else {
12381 /* Only BX works as exception-return, not BLX */
12382 gen_bx_excret(s, tmp);
12384 break;
12387 break;
12391 * 0b0100_00xx_xxxx_xxxx
12392 * - Data-processing (two low registers)
12394 rd = insn & 7;
12395 rm = (insn >> 3) & 7;
12396 op = (insn >> 6) & 0xf;
12397 if (op == 2 || op == 3 || op == 4 || op == 7) {
12398 /* the shift/rotate ops want the operands backwards */
12399 val = rm;
12400 rm = rd;
12401 rd = val;
12402 val = 1;
12403 } else {
12404 val = 0;
12407 if (op == 9) { /* neg */
12408 tmp = tcg_temp_new_i32();
12409 tcg_gen_movi_i32(tmp, 0);
12410 } else if (op != 0xf) { /* mvn doesn't read its first operand */
12411 tmp = load_reg(s, rd);
12412 } else {
12413 tmp = NULL;
12416 tmp2 = load_reg(s, rm);
12417 switch (op) {
12418 case 0x0: /* and */
12419 tcg_gen_and_i32(tmp, tmp, tmp2);
12420 if (!s->condexec_mask)
12421 gen_logic_CC(tmp);
12422 break;
12423 case 0x1: /* eor */
12424 tcg_gen_xor_i32(tmp, tmp, tmp2);
12425 if (!s->condexec_mask)
12426 gen_logic_CC(tmp);
12427 break;
12428 case 0x2: /* lsl */
12429 if (s->condexec_mask) {
12430 gen_shl(tmp2, tmp2, tmp);
12431 } else {
12432 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
12433 gen_logic_CC(tmp2);
12435 break;
12436 case 0x3: /* lsr */
12437 if (s->condexec_mask) {
12438 gen_shr(tmp2, tmp2, tmp);
12439 } else {
12440 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
12441 gen_logic_CC(tmp2);
12443 break;
12444 case 0x4: /* asr */
12445 if (s->condexec_mask) {
12446 gen_sar(tmp2, tmp2, tmp);
12447 } else {
12448 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
12449 gen_logic_CC(tmp2);
12451 break;
12452 case 0x5: /* adc */
12453 if (s->condexec_mask) {
12454 gen_adc(tmp, tmp2);
12455 } else {
12456 gen_adc_CC(tmp, tmp, tmp2);
12458 break;
12459 case 0x6: /* sbc */
12460 if (s->condexec_mask) {
12461 gen_sub_carry(tmp, tmp, tmp2);
12462 } else {
12463 gen_sbc_CC(tmp, tmp, tmp2);
12465 break;
12466 case 0x7: /* ror */
12467 if (s->condexec_mask) {
12468 tcg_gen_andi_i32(tmp, tmp, 0x1f);
12469 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
12470 } else {
12471 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
12472 gen_logic_CC(tmp2);
12474 break;
12475 case 0x8: /* tst */
12476 tcg_gen_and_i32(tmp, tmp, tmp2);
12477 gen_logic_CC(tmp);
12478 rd = 16;
12479 break;
12480 case 0x9: /* neg */
12481 if (s->condexec_mask)
12482 tcg_gen_neg_i32(tmp, tmp2);
12483 else
12484 gen_sub_CC(tmp, tmp, tmp2);
12485 break;
12486 case 0xa: /* cmp */
12487 gen_sub_CC(tmp, tmp, tmp2);
12488 rd = 16;
12489 break;
12490 case 0xb: /* cmn */
12491 gen_add_CC(tmp, tmp, tmp2);
12492 rd = 16;
12493 break;
12494 case 0xc: /* orr */
12495 tcg_gen_or_i32(tmp, tmp, tmp2);
12496 if (!s->condexec_mask)
12497 gen_logic_CC(tmp);
12498 break;
12499 case 0xd: /* mul */
12500 tcg_gen_mul_i32(tmp, tmp, tmp2);
12501 if (!s->condexec_mask)
12502 gen_logic_CC(tmp);
12503 break;
12504 case 0xe: /* bic */
12505 tcg_gen_andc_i32(tmp, tmp, tmp2);
12506 if (!s->condexec_mask)
12507 gen_logic_CC(tmp);
12508 break;
12509 case 0xf: /* mvn */
12510 tcg_gen_not_i32(tmp2, tmp2);
12511 if (!s->condexec_mask)
12512 gen_logic_CC(tmp2);
12513 val = 1;
12514 rm = rd;
12515 break;
12517 if (rd != 16) {
12518 if (val) {
12519 store_reg(s, rm, tmp2);
12520 if (op != 0xf)
12521 tcg_temp_free_i32(tmp);
12522 } else {
12523 store_reg(s, rd, tmp);
12524 tcg_temp_free_i32(tmp2);
12526 } else {
12527 tcg_temp_free_i32(tmp);
12528 tcg_temp_free_i32(tmp2);
12530 break;
12532 case 5:
12533 /* load/store register offset. */
12534 rd = insn & 7;
12535 rn = (insn >> 3) & 7;
12536 rm = (insn >> 6) & 7;
12537 op = (insn >> 9) & 7;
12538 addr = load_reg(s, rn);
12539 tmp = load_reg(s, rm);
12540 tcg_gen_add_i32(addr, addr, tmp);
12541 tcg_temp_free_i32(tmp);
12543 if (op < 3) { /* store */
12544 tmp = load_reg(s, rd);
12545 } else {
12546 tmp = tcg_temp_new_i32();
12549 switch (op) {
12550 case 0: /* str */
12551 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12552 break;
12553 case 1: /* strh */
12554 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12555 break;
12556 case 2: /* strb */
12557 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12558 break;
12559 case 3: /* ldrsb */
12560 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12561 break;
12562 case 4: /* ldr */
12563 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12564 break;
12565 case 5: /* ldrh */
12566 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12567 break;
12568 case 6: /* ldrb */
12569 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12570 break;
12571 case 7: /* ldrsh */
12572 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12573 break;
12575 if (op >= 3) { /* load */
12576 store_reg(s, rd, tmp);
12577 } else {
12578 tcg_temp_free_i32(tmp);
12580 tcg_temp_free_i32(addr);
12581 break;
12583 case 6:
12584 /* load/store word immediate offset */
12585 rd = insn & 7;
12586 rn = (insn >> 3) & 7;
12587 addr = load_reg(s, rn);
12588 val = (insn >> 4) & 0x7c;
12589 tcg_gen_addi_i32(addr, addr, val);
12591 if (insn & (1 << 11)) {
12592 /* load */
12593 tmp = tcg_temp_new_i32();
12594 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12595 store_reg(s, rd, tmp);
12596 } else {
12597 /* store */
12598 tmp = load_reg(s, rd);
12599 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12600 tcg_temp_free_i32(tmp);
12602 tcg_temp_free_i32(addr);
12603 break;
12605 case 7:
12606 /* load/store byte immediate offset */
12607 rd = insn & 7;
12608 rn = (insn >> 3) & 7;
12609 addr = load_reg(s, rn);
12610 val = (insn >> 6) & 0x1f;
12611 tcg_gen_addi_i32(addr, addr, val);
12613 if (insn & (1 << 11)) {
12614 /* load */
12615 tmp = tcg_temp_new_i32();
12616 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12617 store_reg(s, rd, tmp);
12618 } else {
12619 /* store */
12620 tmp = load_reg(s, rd);
12621 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12622 tcg_temp_free_i32(tmp);
12624 tcg_temp_free_i32(addr);
12625 break;
12627 case 8:
12628 /* load/store halfword immediate offset */
12629 rd = insn & 7;
12630 rn = (insn >> 3) & 7;
12631 addr = load_reg(s, rn);
12632 val = (insn >> 5) & 0x3e;
12633 tcg_gen_addi_i32(addr, addr, val);
12635 if (insn & (1 << 11)) {
12636 /* load */
12637 tmp = tcg_temp_new_i32();
12638 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12639 store_reg(s, rd, tmp);
12640 } else {
12641 /* store */
12642 tmp = load_reg(s, rd);
12643 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12644 tcg_temp_free_i32(tmp);
12646 tcg_temp_free_i32(addr);
12647 break;
12649 case 9:
12650 /* load/store from stack */
12651 rd = (insn >> 8) & 7;
12652 addr = load_reg(s, 13);
12653 val = (insn & 0xff) * 4;
12654 tcg_gen_addi_i32(addr, addr, val);
12656 if (insn & (1 << 11)) {
12657 /* load */
12658 tmp = tcg_temp_new_i32();
12659 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12660 store_reg(s, rd, tmp);
12661 } else {
12662 /* store */
12663 tmp = load_reg(s, rd);
12664 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
12665 tcg_temp_free_i32(tmp);
12667 tcg_temp_free_i32(addr);
12668 break;
12670 case 10:
12672 * 0b1010_xxxx_xxxx_xxxx
12673 * - Add PC/SP (immediate)
12675 rd = (insn >> 8) & 7;
12676 if (insn & (1 << 11)) {
12677 /* SP */
12678 tmp = load_reg(s, 13);
12679 } else {
12680 /* PC. bit 1 is ignored. */
12681 tmp = tcg_temp_new_i32();
12682 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
12684 val = (insn & 0xff) * 4;
12685 tcg_gen_addi_i32(tmp, tmp, val);
12686 store_reg(s, rd, tmp);
12687 break;
12689 case 11:
12690 /* misc */
12691 op = (insn >> 8) & 0xf;
12692 switch (op) {
12693 case 0:
12695 * 0b1011_0000_xxxx_xxxx
12696 * - ADD (SP plus immediate)
12697 * - SUB (SP minus immediate)
12699 tmp = load_reg(s, 13);
12700 val = (insn & 0x7f) * 4;
12701 if (insn & (1 << 7))
12702 val = -(int32_t)val;
12703 tcg_gen_addi_i32(tmp, tmp, val);
12704 store_sp_checked(s, tmp);
12705 break;
12707 case 2: /* sign/zero extend. */
12708 ARCH(6);
12709 rd = insn & 7;
12710 rm = (insn >> 3) & 7;
12711 tmp = load_reg(s, rm);
12712 switch ((insn >> 6) & 3) {
12713 case 0: gen_sxth(tmp); break;
12714 case 1: gen_sxtb(tmp); break;
12715 case 2: gen_uxth(tmp); break;
12716 case 3: gen_uxtb(tmp); break;
12718 store_reg(s, rd, tmp);
12719 break;
12720 case 4: case 5: case 0xc: case 0xd:
12722 * 0b1011_x10x_xxxx_xxxx
12723 * - push/pop
12725 addr = load_reg(s, 13);
12726 if (insn & (1 << 8))
12727 offset = 4;
12728 else
12729 offset = 0;
12730 for (i = 0; i < 8; i++) {
12731 if (insn & (1 << i))
12732 offset += 4;
12734 if ((insn & (1 << 11)) == 0) {
12735 tcg_gen_addi_i32(addr, addr, -offset);
12738 if (s->v8m_stackcheck) {
12740 * Here 'addr' is the lower of "old SP" and "new SP";
12741 * if this is a pop that starts below the limit and ends
12742 * above it, it is UNKNOWN whether the limit check triggers;
12743 * we choose to trigger.
12745 gen_helper_v8m_stackcheck(cpu_env, addr);
12748 for (i = 0; i < 8; i++) {
12749 if (insn & (1 << i)) {
12750 if (insn & (1 << 11)) {
12751 /* pop */
12752 tmp = tcg_temp_new_i32();
12753 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12754 store_reg(s, i, tmp);
12755 } else {
12756 /* push */
12757 tmp = load_reg(s, i);
12758 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12759 tcg_temp_free_i32(tmp);
12761 /* advance to the next address. */
12762 tcg_gen_addi_i32(addr, addr, 4);
12765 tmp = NULL;
12766 if (insn & (1 << 8)) {
12767 if (insn & (1 << 11)) {
12768 /* pop pc */
12769 tmp = tcg_temp_new_i32();
12770 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12771 /* don't set the pc until the rest of the instruction
12772 has completed */
12773 } else {
12774 /* push lr */
12775 tmp = load_reg(s, 14);
12776 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12777 tcg_temp_free_i32(tmp);
12779 tcg_gen_addi_i32(addr, addr, 4);
12781 if ((insn & (1 << 11)) == 0) {
12782 tcg_gen_addi_i32(addr, addr, -offset);
12784 /* write back the new stack pointer */
12785 store_reg(s, 13, addr);
12786 /* set the new PC value */
12787 if ((insn & 0x0900) == 0x0900) {
12788 store_reg_from_load(s, 15, tmp);
12790 break;
12792 case 1: case 3: case 9: case 11: /* czb */
12793 rm = insn & 7;
12794 tmp = load_reg(s, rm);
12795 arm_gen_condlabel(s);
12796 if (insn & (1 << 11))
12797 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
12798 else
12799 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
12800 tcg_temp_free_i32(tmp);
12801 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
12802 val = (uint32_t)s->pc + 2;
12803 val += offset;
12804 gen_jmp(s, val);
12805 break;
12807 case 15: /* IT, nop-hint. */
12808 if ((insn & 0xf) == 0) {
12809 gen_nop_hint(s, (insn >> 4) & 0xf);
12810 break;
12812 /* If Then. */
12813 s->condexec_cond = (insn >> 4) & 0xe;
12814 s->condexec_mask = insn & 0x1f;
12815 /* No actual code generated for this insn, just setup state. */
12816 break;
12818 case 0xe: /* bkpt */
12820 int imm8 = extract32(insn, 0, 8);
12821 ARCH(5);
12822 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
12823 break;
12826 case 0xa: /* rev, and hlt */
12828 int op1 = extract32(insn, 6, 2);
12830 if (op1 == 2) {
12831 /* HLT */
12832 int imm6 = extract32(insn, 0, 6);
12834 gen_hlt(s, imm6);
12835 break;
12838 /* Otherwise this is rev */
12839 ARCH(6);
12840 rn = (insn >> 3) & 0x7;
12841 rd = insn & 0x7;
12842 tmp = load_reg(s, rn);
12843 switch (op1) {
12844 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
12845 case 1: gen_rev16(tmp); break;
12846 case 3: gen_revsh(tmp); break;
12847 default:
12848 g_assert_not_reached();
12850 store_reg(s, rd, tmp);
12851 break;
12854 case 6:
12855 switch ((insn >> 5) & 7) {
12856 case 2:
12857 /* setend */
12858 ARCH(6);
12859 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
12860 gen_helper_setend(cpu_env);
12861 s->base.is_jmp = DISAS_UPDATE;
12863 break;
12864 case 3:
12865 /* cps */
12866 ARCH(6);
12867 if (IS_USER(s)) {
12868 break;
12870 if (arm_dc_feature(s, ARM_FEATURE_M)) {
12871 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
12872 /* FAULTMASK */
12873 if (insn & 1) {
12874 addr = tcg_const_i32(19);
12875 gen_helper_v7m_msr(cpu_env, addr, tmp);
12876 tcg_temp_free_i32(addr);
12878 /* PRIMASK */
12879 if (insn & 2) {
12880 addr = tcg_const_i32(16);
12881 gen_helper_v7m_msr(cpu_env, addr, tmp);
12882 tcg_temp_free_i32(addr);
12884 tcg_temp_free_i32(tmp);
12885 gen_lookup_tb(s);
12886 } else {
12887 if (insn & (1 << 4)) {
12888 shift = CPSR_A | CPSR_I | CPSR_F;
12889 } else {
12890 shift = 0;
12892 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
12894 break;
12895 default:
12896 goto undef;
12898 break;
12900 default:
12901 goto undef;
12903 break;
12905 case 12:
12907 /* load/store multiple */
12908 TCGv_i32 loaded_var = NULL;
12909 rn = (insn >> 8) & 0x7;
12910 addr = load_reg(s, rn);
12911 for (i = 0; i < 8; i++) {
12912 if (insn & (1 << i)) {
12913 if (insn & (1 << 11)) {
12914 /* load */
12915 tmp = tcg_temp_new_i32();
12916 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12917 if (i == rn) {
12918 loaded_var = tmp;
12919 } else {
12920 store_reg(s, i, tmp);
12922 } else {
12923 /* store */
12924 tmp = load_reg(s, i);
12925 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12926 tcg_temp_free_i32(tmp);
12928 /* advance to the next address */
12929 tcg_gen_addi_i32(addr, addr, 4);
12932 if ((insn & (1 << rn)) == 0) {
12933 /* base reg not in list: base register writeback */
12934 store_reg(s, rn, addr);
12935 } else {
12936 /* base reg in list: if load, complete it now */
12937 if (insn & (1 << 11)) {
12938 store_reg(s, rn, loaded_var);
12940 tcg_temp_free_i32(addr);
12942 break;
12944 case 13:
12945 /* conditional branch or swi */
12946 cond = (insn >> 8) & 0xf;
12947 if (cond == 0xe)
12948 goto undef;
12950 if (cond == 0xf) {
12951 /* swi */
12952 gen_set_pc_im(s, s->pc);
12953 s->svc_imm = extract32(insn, 0, 8);
12954 s->base.is_jmp = DISAS_SWI;
12955 break;
12957 /* generate a conditional jump to next instruction */
12958 arm_skip_unless(s, cond);
12960 /* jump to the offset */
12961 val = (uint32_t)s->pc + 2;
12962 offset = ((int32_t)insn << 24) >> 24;
12963 val += offset << 1;
12964 gen_jmp(s, val);
12965 break;
12967 case 14:
12968 if (insn & (1 << 11)) {
12969 /* thumb_insn_is_16bit() ensures we can't get here for
12970 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12971 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12973 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12974 ARCH(5);
12975 offset = ((insn & 0x7ff) << 1);
12976 tmp = load_reg(s, 14);
12977 tcg_gen_addi_i32(tmp, tmp, offset);
12978 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
12980 tmp2 = tcg_temp_new_i32();
12981 tcg_gen_movi_i32(tmp2, s->pc | 1);
12982 store_reg(s, 14, tmp2);
12983 gen_bx(s, tmp);
12984 break;
12986 /* unconditional branch */
12987 val = (uint32_t)s->pc;
12988 offset = ((int32_t)insn << 21) >> 21;
12989 val += (offset << 1) + 2;
12990 gen_jmp(s, val);
12991 break;
12993 case 15:
12994 /* thumb_insn_is_16bit() ensures we can't get here for
12995 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
12997 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12999 if (insn & (1 << 11)) {
13000 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
13001 offset = ((insn & 0x7ff) << 1) | 1;
13002 tmp = load_reg(s, 14);
13003 tcg_gen_addi_i32(tmp, tmp, offset);
13005 tmp2 = tcg_temp_new_i32();
13006 tcg_gen_movi_i32(tmp2, s->pc | 1);
13007 store_reg(s, 14, tmp2);
13008 gen_bx(s, tmp);
13009 } else {
13010 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
13011 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
13013 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
13015 break;
13017 return;
13018 illegal_op:
13019 undef:
13020 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
13021 default_exception_el(s));
13024 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
13026 /* Return true if the insn at dc->pc might cross a page boundary.
13027 * (False positives are OK, false negatives are not.)
13028 * We know this is a Thumb insn, and our caller ensures we are
13029 * only called if dc->pc is less than 4 bytes from the page
13030 * boundary, so we cross the page if the first 16 bits indicate
13031 * that this is a 32 bit insn.
13033 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
13035 return !thumb_insn_is_16bit(s, insn);
13038 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
13040 DisasContext *dc = container_of(dcbase, DisasContext, base);
13041 CPUARMState *env = cs->env_ptr;
13042 ARMCPU *cpu = arm_env_get_cpu(env);
13043 uint32_t tb_flags = dc->base.tb->flags;
13044 uint32_t condexec, core_mmu_idx;
13046 dc->isar = &cpu->isar;
13047 dc->pc = dc->base.pc_first;
13048 dc->condjmp = 0;
13050 dc->aarch64 = 0;
13051 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
13052 * there is no secure EL1, so we route exceptions to EL3.
13054 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
13055 !arm_el_is_aa64(env, 3);
13056 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
13057 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
13058 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
13059 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
13060 dc->condexec_mask = (condexec & 0xf) << 1;
13061 dc->condexec_cond = condexec >> 4;
13062 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
13063 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
13064 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
13065 #if !defined(CONFIG_USER_ONLY)
13066 dc->user = (dc->current_el == 0);
13067 #endif
13068 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
13069 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
13070 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
13071 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
13072 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
13073 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
13074 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
13075 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
13076 regime_is_secure(env, dc->mmu_idx);
13077 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
13078 dc->cp_regs = cpu->cp_regs;
13079 dc->features = env->features;
13081 /* Single step state. The code-generation logic here is:
13082 * SS_ACTIVE == 0:
13083 * generate code with no special handling for single-stepping (except
13084 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
13085 * this happens anyway because those changes are all system register or
13086 * PSTATE writes).
13087 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
13088 * emit code for one insn
13089 * emit code to clear PSTATE.SS
13090 * emit code to generate software step exception for completed step
13091 * end TB (as usual for having generated an exception)
13092 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
13093 * emit code to generate a software step exception
13094 * end the TB
13096 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
13097 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
13098 dc->is_ldex = false;
13099 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
13101 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
13103 /* If architectural single step active, limit to 1. */
13104 if (is_singlestepping(dc)) {
13105 dc->base.max_insns = 1;
13108 /* ARM is a fixed-length ISA. Bound the number of insns to execute
13109 to those left on the page. */
13110 if (!dc->thumb) {
13111 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
13112 dc->base.max_insns = MIN(dc->base.max_insns, bound);
13115 cpu_F0s = tcg_temp_new_i32();
13116 cpu_F1s = tcg_temp_new_i32();
13117 cpu_F0d = tcg_temp_new_i64();
13118 cpu_F1d = tcg_temp_new_i64();
13119 cpu_V0 = cpu_F0d;
13120 cpu_V1 = cpu_F1d;
13121 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
13122 cpu_M0 = tcg_temp_new_i64();
13125 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
13127 DisasContext *dc = container_of(dcbase, DisasContext, base);
13129 /* A note on handling of the condexec (IT) bits:
13131 * We want to avoid the overhead of having to write the updated condexec
13132 * bits back to the CPUARMState for every instruction in an IT block. So:
13133 * (1) if the condexec bits are not already zero then we write
13134 * zero back into the CPUARMState now. This avoids complications trying
13135 * to do it at the end of the block. (For example if we don't do this
13136 * it's hard to identify whether we can safely skip writing condexec
13137 * at the end of the TB, which we definitely want to do for the case
13138 * where a TB doesn't do anything with the IT state at all.)
13139 * (2) if we are going to leave the TB then we call gen_set_condexec()
13140 * which will write the correct value into CPUARMState if zero is wrong.
13141 * This is done both for leaving the TB at the end, and for leaving
13142 * it because of an exception we know will happen, which is done in
13143 * gen_exception_insn(). The latter is necessary because we need to
13144 * leave the TB with the PC/IT state just prior to execution of the
13145 * instruction which caused the exception.
13146 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
13147 * then the CPUARMState will be wrong and we need to reset it.
13148 * This is handled in the same way as restoration of the
13149 * PC in these situations; we save the value of the condexec bits
13150 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
13151 * then uses this to restore them after an exception.
13153 * Note that there are no instructions which can read the condexec
13154 * bits, and none which can write non-static values to them, so
13155 * we don't need to care about whether CPUARMState is correct in the
13156 * middle of a TB.
13159 /* Reset the conditional execution bits immediately. This avoids
13160 complications trying to do it at the end of the block. */
13161 if (dc->condexec_mask || dc->condexec_cond) {
13162 TCGv_i32 tmp = tcg_temp_new_i32();
13163 tcg_gen_movi_i32(tmp, 0);
13164 store_cpu_field(tmp, condexec_bits);
13168 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
13170 DisasContext *dc = container_of(dcbase, DisasContext, base);
13172 tcg_gen_insn_start(dc->pc,
13173 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
13175 dc->insn_start = tcg_last_op();
13178 static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
13179 const CPUBreakpoint *bp)
13181 DisasContext *dc = container_of(dcbase, DisasContext, base);
13183 if (bp->flags & BP_CPU) {
13184 gen_set_condexec(dc);
13185 gen_set_pc_im(dc, dc->pc);
13186 gen_helper_check_breakpoints(cpu_env);
13187 /* End the TB early; it's likely not going to be executed */
13188 dc->base.is_jmp = DISAS_TOO_MANY;
13189 } else {
13190 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
13191 /* The address covered by the breakpoint must be
13192 included in [tb->pc, tb->pc + tb->size) in order
13193 to for it to be properly cleared -- thus we
13194 increment the PC here so that the logic setting
13195 tb->size below does the right thing. */
13196 /* TODO: Advance PC by correct instruction length to
13197 * avoid disassembler error messages */
13198 dc->pc += 2;
13199 dc->base.is_jmp = DISAS_NORETURN;
13202 return true;
13205 static bool arm_pre_translate_insn(DisasContext *dc)
13207 #ifdef CONFIG_USER_ONLY
13208 /* Intercept jump to the magic kernel page. */
13209 if (dc->pc >= 0xffff0000) {
13210 /* We always get here via a jump, so know we are not in a
13211 conditional execution block. */
13212 gen_exception_internal(EXCP_KERNEL_TRAP);
13213 dc->base.is_jmp = DISAS_NORETURN;
13214 return true;
13216 #endif
13218 if (dc->ss_active && !dc->pstate_ss) {
13219 /* Singlestep state is Active-pending.
13220 * If we're in this state at the start of a TB then either
13221 * a) we just took an exception to an EL which is being debugged
13222 * and this is the first insn in the exception handler
13223 * b) debug exceptions were masked and we just unmasked them
13224 * without changing EL (eg by clearing PSTATE.D)
13225 * In either case we're going to take a swstep exception in the
13226 * "did not step an insn" case, and so the syndrome ISV and EX
13227 * bits should be zero.
13229 assert(dc->base.num_insns == 1);
13230 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
13231 default_exception_el(dc));
13232 dc->base.is_jmp = DISAS_NORETURN;
13233 return true;
13236 return false;
13239 static void arm_post_translate_insn(DisasContext *dc)
13241 if (dc->condjmp && !dc->base.is_jmp) {
13242 gen_set_label(dc->condlabel);
13243 dc->condjmp = 0;
13245 dc->base.pc_next = dc->pc;
13246 translator_loop_temp_check(&dc->base);
13249 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13251 DisasContext *dc = container_of(dcbase, DisasContext, base);
13252 CPUARMState *env = cpu->env_ptr;
13253 unsigned int insn;
13255 if (arm_pre_translate_insn(dc)) {
13256 return;
13259 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
13260 dc->insn = insn;
13261 dc->pc += 4;
13262 disas_arm_insn(dc, insn);
13264 arm_post_translate_insn(dc);
13266 /* ARM is a fixed-length ISA. We performed the cross-page check
13267 in init_disas_context by adjusting max_insns. */
13270 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
13272 /* Return true if this Thumb insn is always unconditional,
13273 * even inside an IT block. This is true of only a very few
13274 * instructions: BKPT, HLT, and SG.
13276 * A larger class of instructions are UNPREDICTABLE if used
13277 * inside an IT block; we do not need to detect those here, because
13278 * what we do by default (perform the cc check and update the IT
13279 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
13280 * choice for those situations.
13282 * insn is either a 16-bit or a 32-bit instruction; the two are
13283 * distinguishable because for the 16-bit case the top 16 bits
13284 * are zeroes, and that isn't a valid 32-bit encoding.
13286 if ((insn & 0xffffff00) == 0xbe00) {
13287 /* BKPT */
13288 return true;
13291 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
13292 !arm_dc_feature(s, ARM_FEATURE_M)) {
13293 /* HLT: v8A only. This is unconditional even when it is going to
13294 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
13295 * For v7 cores this was a plain old undefined encoding and so
13296 * honours its cc check. (We might be using the encoding as
13297 * a semihosting trap, but we don't change the cc check behaviour
13298 * on that account, because a debugger connected to a real v7A
13299 * core and emulating semihosting traps by catching the UNDEF
13300 * exception would also only see cases where the cc check passed.
13301 * No guest code should be trying to do a HLT semihosting trap
13302 * in an IT block anyway.
13304 return true;
13307 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
13308 arm_dc_feature(s, ARM_FEATURE_M)) {
13309 /* SG: v8M only */
13310 return true;
13313 return false;
13316 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13318 DisasContext *dc = container_of(dcbase, DisasContext, base);
13319 CPUARMState *env = cpu->env_ptr;
13320 uint32_t insn;
13321 bool is_16bit;
13323 if (arm_pre_translate_insn(dc)) {
13324 return;
13327 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13328 is_16bit = thumb_insn_is_16bit(dc, insn);
13329 dc->pc += 2;
13330 if (!is_16bit) {
13331 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13333 insn = insn << 16 | insn2;
13334 dc->pc += 2;
13336 dc->insn = insn;
13338 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
13339 uint32_t cond = dc->condexec_cond;
13341 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
13342 arm_skip_unless(dc, cond);
13346 if (is_16bit) {
13347 disas_thumb_insn(dc, insn);
13348 } else {
13349 disas_thumb2_insn(dc, insn);
13352 /* Advance the Thumb condexec condition. */
13353 if (dc->condexec_mask) {
13354 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
13355 ((dc->condexec_mask >> 4) & 1));
13356 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
13357 if (dc->condexec_mask == 0) {
13358 dc->condexec_cond = 0;
13362 arm_post_translate_insn(dc);
13364 /* Thumb is a variable-length ISA. Stop translation when the next insn
13365 * will touch a new page. This ensures that prefetch aborts occur at
13366 * the right place.
13368 * We want to stop the TB if the next insn starts in a new page,
13369 * or if it spans between this page and the next. This means that
13370 * if we're looking at the last halfword in the page we need to
13371 * see if it's a 16-bit Thumb insn (which will fit in this TB)
13372 * or a 32-bit Thumb insn (which won't).
13373 * This is to avoid generating a silly TB with a single 16-bit insn
13374 * in it at the end of this page (which would execute correctly
13375 * but isn't very efficient).
13377 if (dc->base.is_jmp == DISAS_NEXT
13378 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
13379 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
13380 && insn_crosses_page(env, dc)))) {
13381 dc->base.is_jmp = DISAS_TOO_MANY;
13385 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
13387 DisasContext *dc = container_of(dcbase, DisasContext, base);
13389 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
13390 /* FIXME: This can theoretically happen with self-modifying code. */
13391 cpu_abort(cpu, "IO on conditional branch instruction");
13394 /* At this stage dc->condjmp will only be set when the skipped
13395 instruction was a conditional branch or trap, and the PC has
13396 already been written. */
13397 gen_set_condexec(dc);
13398 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
13399 /* Exception return branches need some special case code at the
13400 * end of the TB, which is complex enough that it has to
13401 * handle the single-step vs not and the condition-failed
13402 * insn codepath itself.
13404 gen_bx_excret_final_code(dc);
13405 } else if (unlikely(is_singlestepping(dc))) {
13406 /* Unconditional and "condition passed" instruction codepath. */
13407 switch (dc->base.is_jmp) {
13408 case DISAS_SWI:
13409 gen_ss_advance(dc);
13410 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13411 default_exception_el(dc));
13412 break;
13413 case DISAS_HVC:
13414 gen_ss_advance(dc);
13415 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
13416 break;
13417 case DISAS_SMC:
13418 gen_ss_advance(dc);
13419 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
13420 break;
13421 case DISAS_NEXT:
13422 case DISAS_TOO_MANY:
13423 case DISAS_UPDATE:
13424 gen_set_pc_im(dc, dc->pc);
13425 /* fall through */
13426 default:
13427 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
13428 gen_singlestep_exception(dc);
13429 break;
13430 case DISAS_NORETURN:
13431 break;
13433 } else {
13434 /* While branches must always occur at the end of an IT block,
13435 there are a few other things that can cause us to terminate
13436 the TB in the middle of an IT block:
13437 - Exception generating instructions (bkpt, swi, undefined).
13438 - Page boundaries.
13439 - Hardware watchpoints.
13440 Hardware breakpoints have already been handled and skip this code.
13442 switch(dc->base.is_jmp) {
13443 case DISAS_NEXT:
13444 case DISAS_TOO_MANY:
13445 gen_goto_tb(dc, 1, dc->pc);
13446 break;
13447 case DISAS_JUMP:
13448 gen_goto_ptr();
13449 break;
13450 case DISAS_UPDATE:
13451 gen_set_pc_im(dc, dc->pc);
13452 /* fall through */
13453 default:
13454 /* indicate that the hash table must be used to find the next TB */
13455 tcg_gen_exit_tb(NULL, 0);
13456 break;
13457 case DISAS_NORETURN:
13458 /* nothing more to generate */
13459 break;
13460 case DISAS_WFI:
13462 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
13463 !(dc->insn & (1U << 31))) ? 2 : 4);
13465 gen_helper_wfi(cpu_env, tmp);
13466 tcg_temp_free_i32(tmp);
13467 /* The helper doesn't necessarily throw an exception, but we
13468 * must go back to the main loop to check for interrupts anyway.
13470 tcg_gen_exit_tb(NULL, 0);
13471 break;
13473 case DISAS_WFE:
13474 gen_helper_wfe(cpu_env);
13475 break;
13476 case DISAS_YIELD:
13477 gen_helper_yield(cpu_env);
13478 break;
13479 case DISAS_SWI:
13480 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13481 default_exception_el(dc));
13482 break;
13483 case DISAS_HVC:
13484 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
13485 break;
13486 case DISAS_SMC:
13487 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
13488 break;
13492 if (dc->condjmp) {
13493 /* "Condition failed" instruction codepath for the branch/trap insn */
13494 gen_set_label(dc->condlabel);
13495 gen_set_condexec(dc);
13496 if (unlikely(is_singlestepping(dc))) {
13497 gen_set_pc_im(dc, dc->pc);
13498 gen_singlestep_exception(dc);
13499 } else {
13500 gen_goto_tb(dc, 1, dc->pc);
13504 /* Functions above can change dc->pc, so re-align db->pc_next */
13505 dc->base.pc_next = dc->pc;
13508 static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
13510 DisasContext *dc = container_of(dcbase, DisasContext, base);
13512 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
13513 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
13516 static const TranslatorOps arm_translator_ops = {
13517 .init_disas_context = arm_tr_init_disas_context,
13518 .tb_start = arm_tr_tb_start,
13519 .insn_start = arm_tr_insn_start,
13520 .breakpoint_check = arm_tr_breakpoint_check,
13521 .translate_insn = arm_tr_translate_insn,
13522 .tb_stop = arm_tr_tb_stop,
13523 .disas_log = arm_tr_disas_log,
13526 static const TranslatorOps thumb_translator_ops = {
13527 .init_disas_context = arm_tr_init_disas_context,
13528 .tb_start = arm_tr_tb_start,
13529 .insn_start = arm_tr_insn_start,
13530 .breakpoint_check = arm_tr_breakpoint_check,
13531 .translate_insn = thumb_tr_translate_insn,
13532 .tb_stop = arm_tr_tb_stop,
13533 .disas_log = arm_tr_disas_log,
13536 /* generate intermediate code for basic block 'tb'. */
13537 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
13539 DisasContext dc;
13540 const TranslatorOps *ops = &arm_translator_ops;
13542 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
13543 ops = &thumb_translator_ops;
13545 #ifdef TARGET_AARCH64
13546 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
13547 ops = &aarch64_translator_ops;
13549 #endif
13551 translator_loop(ops, &dc.base, cpu, tb);
13554 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
13555 int flags)
13557 ARMCPU *cpu = ARM_CPU(cs);
13558 CPUARMState *env = &cpu->env;
13559 int i;
13561 if (is_a64(env)) {
13562 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
13563 return;
13566 for(i=0;i<16;i++) {
13567 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
13568 if ((i % 4) == 3)
13569 cpu_fprintf(f, "\n");
13570 else
13571 cpu_fprintf(f, " ");
13574 if (arm_feature(env, ARM_FEATURE_M)) {
13575 uint32_t xpsr = xpsr_read(env);
13576 const char *mode;
13577 const char *ns_status = "";
13579 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
13580 ns_status = env->v7m.secure ? "S " : "NS ";
13583 if (xpsr & XPSR_EXCP) {
13584 mode = "handler";
13585 } else {
13586 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
13587 mode = "unpriv-thread";
13588 } else {
13589 mode = "priv-thread";
13593 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
13594 xpsr,
13595 xpsr & XPSR_N ? 'N' : '-',
13596 xpsr & XPSR_Z ? 'Z' : '-',
13597 xpsr & XPSR_C ? 'C' : '-',
13598 xpsr & XPSR_V ? 'V' : '-',
13599 xpsr & XPSR_T ? 'T' : 'A',
13600 ns_status,
13601 mode);
13602 } else {
13603 uint32_t psr = cpsr_read(env);
13604 const char *ns_status = "";
13606 if (arm_feature(env, ARM_FEATURE_EL3) &&
13607 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
13608 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
13611 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
13612 psr,
13613 psr & CPSR_N ? 'N' : '-',
13614 psr & CPSR_Z ? 'Z' : '-',
13615 psr & CPSR_C ? 'C' : '-',
13616 psr & CPSR_V ? 'V' : '-',
13617 psr & CPSR_T ? 'T' : 'A',
13618 ns_status,
13619 aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
13622 if (flags & CPU_DUMP_FPU) {
13623 int numvfpregs = 0;
13624 if (arm_feature(env, ARM_FEATURE_VFP)) {
13625 numvfpregs += 16;
13627 if (arm_feature(env, ARM_FEATURE_VFP3)) {
13628 numvfpregs += 16;
13630 for (i = 0; i < numvfpregs; i++) {
13631 uint64_t v = *aa32_vfp_dreg(env, i);
13632 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
13633 i * 2, (uint32_t)v,
13634 i * 2 + 1, (uint32_t)(v >> 32),
13635 i, v);
13637 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
13641 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
13642 target_ulong *data)
13644 if (is_a64(env)) {
13645 env->pc = data[0];
13646 env->condexec_bits = 0;
13647 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
13648 } else {
13649 env->regs[15] = data[0];
13650 env->condexec_bits = data[1];
13651 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;