target/arm: Implement SVE Memory Contiguous Load Group
[qemu.git] / target / arm / translate.c
blob2a3e4f5d4c9b3488798510901a9035d4e77aeeee
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "tcg-op-gvec.h"
29 #include "qemu/log.h"
30 #include "qemu/bitops.h"
31 #include "arm_ldst.h"
32 #include "exec/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
38 #include "exec/log.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
57 #define IS_USER(s) 1
58 #else
59 #define IS_USER(s) (s->user)
60 #endif
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
69 /* FIXME: These should be removed. */
70 static TCGv_i32 cpu_F0s, cpu_F1s;
71 static TCGv_i64 cpu_F0d, cpu_F1d;
73 #include "exec/gen-icount.h"
75 static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79 /* Function prototypes for gen_ functions calling Neon helpers. */
80 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
81 TCGv_i32, TCGv_i32);
83 /* initialize TCG globals. */
84 void arm_translate_init(void)
86 int i;
88 for (i = 0; i < 16; i++) {
89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
90 offsetof(CPUARMState, regs[i]),
91 regnames[i]);
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
103 a64_translate_init();
106 /* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
109 typedef enum ISSInfo {
110 ISSNone = 0,
111 ISSRegMask = 0x1f,
112 ISSInvalid = (1 << 5),
113 ISSIsAcqRel = (1 << 6),
114 ISSIsWrite = (1 << 7),
115 ISSIs16Bit = (1 << 8),
116 } ISSInfo;
118 /* Save the syndrome information for a Data Abort */
119 static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
121 uint32_t syn;
122 int sas = memop & MO_SIZE;
123 bool sse = memop & MO_SIGN;
124 bool is_acqrel = issinfo & ISSIsAcqRel;
125 bool is_write = issinfo & ISSIsWrite;
126 bool is_16bit = issinfo & ISSIs16Bit;
127 int srt = issinfo & ISSRegMask;
129 if (issinfo & ISSInvalid) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
133 return;
136 if (srt == 15) {
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
139 * the call sites.
141 return;
144 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
145 0, 0, 0, is_write, 0, is_16bit);
146 disas_set_insn_syndrome(s, syn);
149 static inline int get_a32_user_mem_index(DisasContext *s)
151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
152 * insns:
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
156 switch (s->mmu_idx) {
157 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0:
159 case ARMMMUIdx_S12NSE1:
160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
161 case ARMMMUIdx_S1E3:
162 case ARMMMUIdx_S1SE0:
163 case ARMMMUIdx_S1SE1:
164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
177 case ARMMMUIdx_S2NS:
178 default:
179 g_assert_not_reached();
183 static inline TCGv_i32 load_cpu_offset(int offset)
185 TCGv_i32 tmp = tcg_temp_new_i32();
186 tcg_gen_ld_i32(tmp, cpu_env, offset);
187 return tmp;
190 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
192 static inline void store_cpu_offset(TCGv_i32 var, int offset)
194 tcg_gen_st_i32(var, cpu_env, offset);
195 tcg_temp_free_i32(var);
198 #define store_cpu_field(var, name) \
199 store_cpu_offset(var, offsetof(CPUARMState, name))
201 /* Set a variable to the value of a CPU register. */
202 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
204 if (reg == 15) {
205 uint32_t addr;
206 /* normally, since we updated PC, we need only to add one insn */
207 if (s->thumb)
208 addr = (long)s->pc + 2;
209 else
210 addr = (long)s->pc + 4;
211 tcg_gen_movi_i32(var, addr);
212 } else {
213 tcg_gen_mov_i32(var, cpu_R[reg]);
217 /* Create a new temporary and set it to the value of a CPU register. */
218 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
220 TCGv_i32 tmp = tcg_temp_new_i32();
221 load_reg_var(s, tmp, reg);
222 return tmp;
225 /* Set a CPU register. The source must be a temporary and will be
226 marked as dead. */
227 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
229 if (reg == 15) {
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
235 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
236 s->base.is_jmp = DISAS_JUMP;
238 tcg_gen_mov_i32(cpu_R[reg], var);
239 tcg_temp_free_i32(var);
242 /* Value extensions. */
243 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
244 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
245 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
246 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
248 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
249 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
252 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
254 TCGv_i32 tmp_mask = tcg_const_i32(mask);
255 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
256 tcg_temp_free_i32(tmp_mask);
258 /* Set NZCV flags from the high 4 bits of var. */
259 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
261 static void gen_exception_internal(int excp)
263 TCGv_i32 tcg_excp = tcg_const_i32(excp);
265 assert(excp_is_internal(excp));
266 gen_helper_exception_internal(cpu_env, tcg_excp);
267 tcg_temp_free_i32(tcg_excp);
270 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
272 TCGv_i32 tcg_excp = tcg_const_i32(excp);
273 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
274 TCGv_i32 tcg_el = tcg_const_i32(target_el);
276 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
277 tcg_syn, tcg_el);
279 tcg_temp_free_i32(tcg_el);
280 tcg_temp_free_i32(tcg_syn);
281 tcg_temp_free_i32(tcg_excp);
284 static void gen_ss_advance(DisasContext *s)
286 /* If the singlestep state is Active-not-pending, advance to
287 * Active-pending.
289 if (s->ss_active) {
290 s->pstate_ss = 0;
291 gen_helper_clear_pstate_ss(cpu_env);
295 static void gen_step_complete_exception(DisasContext *s)
297 /* We just completed step of an insn. Move from Active-not-pending
298 * to Active-pending, and then also take the swstep exception.
299 * This corresponds to making the (IMPDEF) choice to prioritize
300 * swstep exceptions over asynchronous exceptions taken to an exception
301 * level where debug is disabled. This choice has the advantage that
302 * we do not need to maintain internal state corresponding to the
303 * ISV/EX syndrome bits between completion of the step and generation
304 * of the exception, and our syndrome information is always correct.
306 gen_ss_advance(s);
307 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
308 default_exception_el(s));
309 s->base.is_jmp = DISAS_NORETURN;
312 static void gen_singlestep_exception(DisasContext *s)
314 /* Generate the right kind of exception for singlestep, which is
315 * either the architectural singlestep or EXCP_DEBUG for QEMU's
316 * gdb singlestepping.
318 if (s->ss_active) {
319 gen_step_complete_exception(s);
320 } else {
321 gen_exception_internal(EXCP_DEBUG);
325 static inline bool is_singlestepping(DisasContext *s)
327 /* Return true if we are singlestepping either because of
328 * architectural singlestep or QEMU gdbstub singlestep. This does
329 * not include the command line '-singlestep' mode which is rather
330 * misnamed as it only means "one instruction per TB" and doesn't
331 * affect the code we generate.
333 return s->base.singlestep_enabled || s->ss_active;
336 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
338 TCGv_i32 tmp1 = tcg_temp_new_i32();
339 TCGv_i32 tmp2 = tcg_temp_new_i32();
340 tcg_gen_ext16s_i32(tmp1, a);
341 tcg_gen_ext16s_i32(tmp2, b);
342 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
343 tcg_temp_free_i32(tmp2);
344 tcg_gen_sari_i32(a, a, 16);
345 tcg_gen_sari_i32(b, b, 16);
346 tcg_gen_mul_i32(b, b, a);
347 tcg_gen_mov_i32(a, tmp1);
348 tcg_temp_free_i32(tmp1);
351 /* Byteswap each halfword. */
352 static void gen_rev16(TCGv_i32 var)
354 TCGv_i32 tmp = tcg_temp_new_i32();
355 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
356 tcg_gen_shri_i32(tmp, var, 8);
357 tcg_gen_and_i32(tmp, tmp, mask);
358 tcg_gen_and_i32(var, var, mask);
359 tcg_gen_shli_i32(var, var, 8);
360 tcg_gen_or_i32(var, var, tmp);
361 tcg_temp_free_i32(mask);
362 tcg_temp_free_i32(tmp);
365 /* Byteswap low halfword and sign extend. */
366 static void gen_revsh(TCGv_i32 var)
368 tcg_gen_ext16u_i32(var, var);
369 tcg_gen_bswap16_i32(var, var);
370 tcg_gen_ext16s_i32(var, var);
373 /* Return (b << 32) + a. Mark inputs as dead */
374 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
376 TCGv_i64 tmp64 = tcg_temp_new_i64();
378 tcg_gen_extu_i32_i64(tmp64, b);
379 tcg_temp_free_i32(b);
380 tcg_gen_shli_i64(tmp64, tmp64, 32);
381 tcg_gen_add_i64(a, tmp64, a);
383 tcg_temp_free_i64(tmp64);
384 return a;
387 /* Return (b << 32) - a. Mark inputs as dead. */
388 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
390 TCGv_i64 tmp64 = tcg_temp_new_i64();
392 tcg_gen_extu_i32_i64(tmp64, b);
393 tcg_temp_free_i32(b);
394 tcg_gen_shli_i64(tmp64, tmp64, 32);
395 tcg_gen_sub_i64(a, tmp64, a);
397 tcg_temp_free_i64(tmp64);
398 return a;
401 /* 32x32->64 multiply. Marks inputs as dead. */
402 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
404 TCGv_i32 lo = tcg_temp_new_i32();
405 TCGv_i32 hi = tcg_temp_new_i32();
406 TCGv_i64 ret;
408 tcg_gen_mulu2_i32(lo, hi, a, b);
409 tcg_temp_free_i32(a);
410 tcg_temp_free_i32(b);
412 ret = tcg_temp_new_i64();
413 tcg_gen_concat_i32_i64(ret, lo, hi);
414 tcg_temp_free_i32(lo);
415 tcg_temp_free_i32(hi);
417 return ret;
420 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
422 TCGv_i32 lo = tcg_temp_new_i32();
423 TCGv_i32 hi = tcg_temp_new_i32();
424 TCGv_i64 ret;
426 tcg_gen_muls2_i32(lo, hi, a, b);
427 tcg_temp_free_i32(a);
428 tcg_temp_free_i32(b);
430 ret = tcg_temp_new_i64();
431 tcg_gen_concat_i32_i64(ret, lo, hi);
432 tcg_temp_free_i32(lo);
433 tcg_temp_free_i32(hi);
435 return ret;
438 /* Swap low and high halfwords. */
439 static void gen_swap_half(TCGv_i32 var)
441 TCGv_i32 tmp = tcg_temp_new_i32();
442 tcg_gen_shri_i32(tmp, var, 16);
443 tcg_gen_shli_i32(var, var, 16);
444 tcg_gen_or_i32(var, var, tmp);
445 tcg_temp_free_i32(tmp);
448 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
449 tmp = (t0 ^ t1) & 0x8000;
450 t0 &= ~0x8000;
451 t1 &= ~0x8000;
452 t0 = (t0 + t1) ^ tmp;
455 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
457 TCGv_i32 tmp = tcg_temp_new_i32();
458 tcg_gen_xor_i32(tmp, t0, t1);
459 tcg_gen_andi_i32(tmp, tmp, 0x8000);
460 tcg_gen_andi_i32(t0, t0, ~0x8000);
461 tcg_gen_andi_i32(t1, t1, ~0x8000);
462 tcg_gen_add_i32(t0, t0, t1);
463 tcg_gen_xor_i32(t0, t0, tmp);
464 tcg_temp_free_i32(tmp);
465 tcg_temp_free_i32(t1);
468 /* Set CF to the top bit of var. */
469 static void gen_set_CF_bit31(TCGv_i32 var)
471 tcg_gen_shri_i32(cpu_CF, var, 31);
474 /* Set N and Z flags from var. */
475 static inline void gen_logic_CC(TCGv_i32 var)
477 tcg_gen_mov_i32(cpu_NF, var);
478 tcg_gen_mov_i32(cpu_ZF, var);
481 /* T0 += T1 + CF. */
482 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
484 tcg_gen_add_i32(t0, t0, t1);
485 tcg_gen_add_i32(t0, t0, cpu_CF);
488 /* dest = T0 + T1 + CF. */
489 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
491 tcg_gen_add_i32(dest, t0, t1);
492 tcg_gen_add_i32(dest, dest, cpu_CF);
495 /* dest = T0 - T1 + CF - 1. */
496 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
498 tcg_gen_sub_i32(dest, t0, t1);
499 tcg_gen_add_i32(dest, dest, cpu_CF);
500 tcg_gen_subi_i32(dest, dest, 1);
503 /* dest = T0 + T1. Compute C, N, V and Z flags */
504 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
506 TCGv_i32 tmp = tcg_temp_new_i32();
507 tcg_gen_movi_i32(tmp, 0);
508 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
509 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
510 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
511 tcg_gen_xor_i32(tmp, t0, t1);
512 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
513 tcg_temp_free_i32(tmp);
514 tcg_gen_mov_i32(dest, cpu_NF);
517 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
518 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
520 TCGv_i32 tmp = tcg_temp_new_i32();
521 if (TCG_TARGET_HAS_add2_i32) {
522 tcg_gen_movi_i32(tmp, 0);
523 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
524 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
525 } else {
526 TCGv_i64 q0 = tcg_temp_new_i64();
527 TCGv_i64 q1 = tcg_temp_new_i64();
528 tcg_gen_extu_i32_i64(q0, t0);
529 tcg_gen_extu_i32_i64(q1, t1);
530 tcg_gen_add_i64(q0, q0, q1);
531 tcg_gen_extu_i32_i64(q1, cpu_CF);
532 tcg_gen_add_i64(q0, q0, q1);
533 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
534 tcg_temp_free_i64(q0);
535 tcg_temp_free_i64(q1);
537 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
538 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
539 tcg_gen_xor_i32(tmp, t0, t1);
540 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
541 tcg_temp_free_i32(tmp);
542 tcg_gen_mov_i32(dest, cpu_NF);
545 /* dest = T0 - T1. Compute C, N, V and Z flags */
546 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
548 TCGv_i32 tmp;
549 tcg_gen_sub_i32(cpu_NF, t0, t1);
550 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
551 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
552 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
553 tmp = tcg_temp_new_i32();
554 tcg_gen_xor_i32(tmp, t0, t1);
555 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
556 tcg_temp_free_i32(tmp);
557 tcg_gen_mov_i32(dest, cpu_NF);
560 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
561 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
563 TCGv_i32 tmp = tcg_temp_new_i32();
564 tcg_gen_not_i32(tmp, t1);
565 gen_adc_CC(dest, t0, tmp);
566 tcg_temp_free_i32(tmp);
569 #define GEN_SHIFT(name) \
570 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
572 TCGv_i32 tmp1, tmp2, tmp3; \
573 tmp1 = tcg_temp_new_i32(); \
574 tcg_gen_andi_i32(tmp1, t1, 0xff); \
575 tmp2 = tcg_const_i32(0); \
576 tmp3 = tcg_const_i32(0x1f); \
577 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
578 tcg_temp_free_i32(tmp3); \
579 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
580 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
581 tcg_temp_free_i32(tmp2); \
582 tcg_temp_free_i32(tmp1); \
584 GEN_SHIFT(shl)
585 GEN_SHIFT(shr)
586 #undef GEN_SHIFT
588 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
590 TCGv_i32 tmp1, tmp2;
591 tmp1 = tcg_temp_new_i32();
592 tcg_gen_andi_i32(tmp1, t1, 0xff);
593 tmp2 = tcg_const_i32(0x1f);
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
595 tcg_temp_free_i32(tmp2);
596 tcg_gen_sar_i32(dest, t0, tmp1);
597 tcg_temp_free_i32(tmp1);
600 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
602 TCGv_i32 c0 = tcg_const_i32(0);
603 TCGv_i32 tmp = tcg_temp_new_i32();
604 tcg_gen_neg_i32(tmp, src);
605 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
606 tcg_temp_free_i32(c0);
607 tcg_temp_free_i32(tmp);
610 static void shifter_out_im(TCGv_i32 var, int shift)
612 if (shift == 0) {
613 tcg_gen_andi_i32(cpu_CF, var, 1);
614 } else {
615 tcg_gen_shri_i32(cpu_CF, var, shift);
616 if (shift != 31) {
617 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
622 /* Shift by immediate. Includes special handling for shift == 0. */
623 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
624 int shift, int flags)
626 switch (shiftop) {
627 case 0: /* LSL */
628 if (shift != 0) {
629 if (flags)
630 shifter_out_im(var, 32 - shift);
631 tcg_gen_shli_i32(var, var, shift);
633 break;
634 case 1: /* LSR */
635 if (shift == 0) {
636 if (flags) {
637 tcg_gen_shri_i32(cpu_CF, var, 31);
639 tcg_gen_movi_i32(var, 0);
640 } else {
641 if (flags)
642 shifter_out_im(var, shift - 1);
643 tcg_gen_shri_i32(var, var, shift);
645 break;
646 case 2: /* ASR */
647 if (shift == 0)
648 shift = 32;
649 if (flags)
650 shifter_out_im(var, shift - 1);
651 if (shift == 32)
652 shift = 31;
653 tcg_gen_sari_i32(var, var, shift);
654 break;
655 case 3: /* ROR/RRX */
656 if (shift != 0) {
657 if (flags)
658 shifter_out_im(var, shift - 1);
659 tcg_gen_rotri_i32(var, var, shift); break;
660 } else {
661 TCGv_i32 tmp = tcg_temp_new_i32();
662 tcg_gen_shli_i32(tmp, cpu_CF, 31);
663 if (flags)
664 shifter_out_im(var, 0);
665 tcg_gen_shri_i32(var, var, 1);
666 tcg_gen_or_i32(var, var, tmp);
667 tcg_temp_free_i32(tmp);
672 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
673 TCGv_i32 shift, int flags)
675 if (flags) {
676 switch (shiftop) {
677 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
678 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
679 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
680 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
682 } else {
683 switch (shiftop) {
684 case 0:
685 gen_shl(var, var, shift);
686 break;
687 case 1:
688 gen_shr(var, var, shift);
689 break;
690 case 2:
691 gen_sar(var, var, shift);
692 break;
693 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
694 tcg_gen_rotr_i32(var, var, shift); break;
697 tcg_temp_free_i32(shift);
700 #define PAS_OP(pfx) \
701 switch (op2) { \
702 case 0: gen_pas_helper(glue(pfx,add16)); break; \
703 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
704 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
705 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
706 case 4: gen_pas_helper(glue(pfx,add8)); break; \
707 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
709 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
711 TCGv_ptr tmp;
713 switch (op1) {
714 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
715 case 1:
716 tmp = tcg_temp_new_ptr();
717 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
718 PAS_OP(s)
719 tcg_temp_free_ptr(tmp);
720 break;
721 case 5:
722 tmp = tcg_temp_new_ptr();
723 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
724 PAS_OP(u)
725 tcg_temp_free_ptr(tmp);
726 break;
727 #undef gen_pas_helper
728 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
729 case 2:
730 PAS_OP(q);
731 break;
732 case 3:
733 PAS_OP(sh);
734 break;
735 case 6:
736 PAS_OP(uq);
737 break;
738 case 7:
739 PAS_OP(uh);
740 break;
741 #undef gen_pas_helper
744 #undef PAS_OP
746 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
747 #define PAS_OP(pfx) \
748 switch (op1) { \
749 case 0: gen_pas_helper(glue(pfx,add8)); break; \
750 case 1: gen_pas_helper(glue(pfx,add16)); break; \
751 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
752 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
753 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
754 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
756 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
758 TCGv_ptr tmp;
760 switch (op2) {
761 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
762 case 0:
763 tmp = tcg_temp_new_ptr();
764 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
765 PAS_OP(s)
766 tcg_temp_free_ptr(tmp);
767 break;
768 case 4:
769 tmp = tcg_temp_new_ptr();
770 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
771 PAS_OP(u)
772 tcg_temp_free_ptr(tmp);
773 break;
774 #undef gen_pas_helper
775 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
776 case 1:
777 PAS_OP(q);
778 break;
779 case 2:
780 PAS_OP(sh);
781 break;
782 case 5:
783 PAS_OP(uq);
784 break;
785 case 6:
786 PAS_OP(uh);
787 break;
788 #undef gen_pas_helper
791 #undef PAS_OP
794 * Generate a conditional based on ARM condition code cc.
795 * This is common between ARM and Aarch64 targets.
797 void arm_test_cc(DisasCompare *cmp, int cc)
799 TCGv_i32 value;
800 TCGCond cond;
801 bool global = true;
803 switch (cc) {
804 case 0: /* eq: Z */
805 case 1: /* ne: !Z */
806 cond = TCG_COND_EQ;
807 value = cpu_ZF;
808 break;
810 case 2: /* cs: C */
811 case 3: /* cc: !C */
812 cond = TCG_COND_NE;
813 value = cpu_CF;
814 break;
816 case 4: /* mi: N */
817 case 5: /* pl: !N */
818 cond = TCG_COND_LT;
819 value = cpu_NF;
820 break;
822 case 6: /* vs: V */
823 case 7: /* vc: !V */
824 cond = TCG_COND_LT;
825 value = cpu_VF;
826 break;
828 case 8: /* hi: C && !Z */
829 case 9: /* ls: !C || Z -> !(C && !Z) */
830 cond = TCG_COND_NE;
831 value = tcg_temp_new_i32();
832 global = false;
833 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
834 ZF is non-zero for !Z; so AND the two subexpressions. */
835 tcg_gen_neg_i32(value, cpu_CF);
836 tcg_gen_and_i32(value, value, cpu_ZF);
837 break;
839 case 10: /* ge: N == V -> N ^ V == 0 */
840 case 11: /* lt: N != V -> N ^ V != 0 */
841 /* Since we're only interested in the sign bit, == 0 is >= 0. */
842 cond = TCG_COND_GE;
843 value = tcg_temp_new_i32();
844 global = false;
845 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
846 break;
848 case 12: /* gt: !Z && N == V */
849 case 13: /* le: Z || N != V */
850 cond = TCG_COND_NE;
851 value = tcg_temp_new_i32();
852 global = false;
853 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
854 * the sign bit then AND with ZF to yield the result. */
855 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
856 tcg_gen_sari_i32(value, value, 31);
857 tcg_gen_andc_i32(value, cpu_ZF, value);
858 break;
860 case 14: /* always */
861 case 15: /* always */
862 /* Use the ALWAYS condition, which will fold early.
863 * It doesn't matter what we use for the value. */
864 cond = TCG_COND_ALWAYS;
865 value = cpu_ZF;
866 goto no_invert;
868 default:
869 fprintf(stderr, "Bad condition code 0x%x\n", cc);
870 abort();
873 if (cc & 1) {
874 cond = tcg_invert_cond(cond);
877 no_invert:
878 cmp->cond = cond;
879 cmp->value = value;
880 cmp->value_global = global;
883 void arm_free_cc(DisasCompare *cmp)
885 if (!cmp->value_global) {
886 tcg_temp_free_i32(cmp->value);
890 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
892 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
895 void arm_gen_test_cc(int cc, TCGLabel *label)
897 DisasCompare cmp;
898 arm_test_cc(&cmp, cc);
899 arm_jump_cc(&cmp, label);
900 arm_free_cc(&cmp);
903 static const uint8_t table_logic_cc[16] = {
904 1, /* and */
905 1, /* xor */
906 0, /* sub */
907 0, /* rsb */
908 0, /* add */
909 0, /* adc */
910 0, /* sbc */
911 0, /* rsc */
912 1, /* andl */
913 1, /* xorl */
914 0, /* cmp */
915 0, /* cmn */
916 1, /* orr */
917 1, /* mov */
918 1, /* bic */
919 1, /* mvn */
922 static inline void gen_set_condexec(DisasContext *s)
924 if (s->condexec_mask) {
925 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
926 TCGv_i32 tmp = tcg_temp_new_i32();
927 tcg_gen_movi_i32(tmp, val);
928 store_cpu_field(tmp, condexec_bits);
932 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
934 tcg_gen_movi_i32(cpu_R[15], val);
937 /* Set PC and Thumb state from an immediate address. */
938 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
940 TCGv_i32 tmp;
942 s->base.is_jmp = DISAS_JUMP;
943 if (s->thumb != (addr & 1)) {
944 tmp = tcg_temp_new_i32();
945 tcg_gen_movi_i32(tmp, addr & 1);
946 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
947 tcg_temp_free_i32(tmp);
949 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
952 /* Set PC and Thumb state from var. var is marked as dead. */
953 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
955 s->base.is_jmp = DISAS_JUMP;
956 tcg_gen_andi_i32(cpu_R[15], var, ~1);
957 tcg_gen_andi_i32(var, var, 1);
958 store_cpu_field(var, thumb);
961 /* Set PC and Thumb state from var. var is marked as dead.
962 * For M-profile CPUs, include logic to detect exception-return
963 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
964 * and BX reg, and no others, and happens only for code in Handler mode.
966 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
968 /* Generate the same code here as for a simple bx, but flag via
969 * s->base.is_jmp that we need to do the rest of the work later.
971 gen_bx(s, var);
972 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
973 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
974 s->base.is_jmp = DISAS_BX_EXCRET;
978 static inline void gen_bx_excret_final_code(DisasContext *s)
980 /* Generate the code to finish possible exception return and end the TB */
981 TCGLabel *excret_label = gen_new_label();
982 uint32_t min_magic;
984 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
985 /* Covers FNC_RETURN and EXC_RETURN magic */
986 min_magic = FNC_RETURN_MIN_MAGIC;
987 } else {
988 /* EXC_RETURN magic only */
989 min_magic = EXC_RETURN_MIN_MAGIC;
992 /* Is the new PC value in the magic range indicating exception return? */
993 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
994 /* No: end the TB as we would for a DISAS_JMP */
995 if (is_singlestepping(s)) {
996 gen_singlestep_exception(s);
997 } else {
998 tcg_gen_exit_tb(NULL, 0);
1000 gen_set_label(excret_label);
1001 /* Yes: this is an exception return.
1002 * At this point in runtime env->regs[15] and env->thumb will hold
1003 * the exception-return magic number, which do_v7m_exception_exit()
1004 * will read. Nothing else will be able to see those values because
1005 * the cpu-exec main loop guarantees that we will always go straight
1006 * from raising the exception to the exception-handling code.
1008 * gen_ss_advance(s) does nothing on M profile currently but
1009 * calling it is conceptually the right thing as we have executed
1010 * this instruction (compare SWI, HVC, SMC handling).
1012 gen_ss_advance(s);
1013 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1016 static inline void gen_bxns(DisasContext *s, int rm)
1018 TCGv_i32 var = load_reg(s, rm);
1020 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1021 * we need to sync state before calling it, but:
1022 * - we don't need to do gen_set_pc_im() because the bxns helper will
1023 * always set the PC itself
1024 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1025 * unless it's outside an IT block or the last insn in an IT block,
1026 * so we know that condexec == 0 (already set at the top of the TB)
1027 * is correct in the non-UNPREDICTABLE cases, and we can choose
1028 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1030 gen_helper_v7m_bxns(cpu_env, var);
1031 tcg_temp_free_i32(var);
1032 s->base.is_jmp = DISAS_EXIT;
1035 static inline void gen_blxns(DisasContext *s, int rm)
1037 TCGv_i32 var = load_reg(s, rm);
1039 /* We don't need to sync condexec state, for the same reason as bxns.
1040 * We do however need to set the PC, because the blxns helper reads it.
1041 * The blxns helper may throw an exception.
1043 gen_set_pc_im(s, s->pc);
1044 gen_helper_v7m_blxns(cpu_env, var);
1045 tcg_temp_free_i32(var);
1046 s->base.is_jmp = DISAS_EXIT;
1049 /* Variant of store_reg which uses branch&exchange logic when storing
1050 to r15 in ARM architecture v7 and above. The source must be a temporary
1051 and will be marked as dead. */
1052 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
1054 if (reg == 15 && ENABLE_ARCH_7) {
1055 gen_bx(s, var);
1056 } else {
1057 store_reg(s, reg, var);
1061 /* Variant of store_reg which uses branch&exchange logic when storing
1062 * to r15 in ARM architecture v5T and above. This is used for storing
1063 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1064 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1065 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
1067 if (reg == 15 && ENABLE_ARCH_5) {
1068 gen_bx_excret(s, var);
1069 } else {
1070 store_reg(s, reg, var);
1074 #ifdef CONFIG_USER_ONLY
1075 #define IS_USER_ONLY 1
1076 #else
1077 #define IS_USER_ONLY 0
1078 #endif
1080 /* Abstractions of "generate code to do a guest load/store for
1081 * AArch32", where a vaddr is always 32 bits (and is zero
1082 * extended if we're a 64 bit core) and data is also
1083 * 32 bits unless specifically doing a 64 bit access.
1084 * These functions work like tcg_gen_qemu_{ld,st}* except
1085 * that the address argument is TCGv_i32 rather than TCGv.
1088 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
1090 TCGv addr = tcg_temp_new();
1091 tcg_gen_extu_i32_tl(addr, a32);
1093 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1094 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1095 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
1097 return addr;
1100 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1101 int index, TCGMemOp opc)
1103 TCGv addr;
1105 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1106 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1107 opc |= MO_ALIGN;
1110 addr = gen_aa32_addr(s, a32, opc);
1111 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1112 tcg_temp_free(addr);
1115 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1116 int index, TCGMemOp opc)
1118 TCGv addr;
1120 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1121 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1122 opc |= MO_ALIGN;
1125 addr = gen_aa32_addr(s, a32, opc);
1126 tcg_gen_qemu_st_i32(val, addr, index, opc);
1127 tcg_temp_free(addr);
1130 #define DO_GEN_LD(SUFF, OPC) \
1131 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1132 TCGv_i32 a32, int index) \
1134 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1136 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1137 TCGv_i32 val, \
1138 TCGv_i32 a32, int index, \
1139 ISSInfo issinfo) \
1141 gen_aa32_ld##SUFF(s, val, a32, index); \
1142 disas_set_da_iss(s, OPC, issinfo); \
1145 #define DO_GEN_ST(SUFF, OPC) \
1146 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1147 TCGv_i32 a32, int index) \
1149 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1151 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1152 TCGv_i32 val, \
1153 TCGv_i32 a32, int index, \
1154 ISSInfo issinfo) \
1156 gen_aa32_st##SUFF(s, val, a32, index); \
1157 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1160 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
1162 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1163 if (!IS_USER_ONLY && s->sctlr_b) {
1164 tcg_gen_rotri_i64(val, val, 32);
1168 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1169 int index, TCGMemOp opc)
1171 TCGv addr = gen_aa32_addr(s, a32, opc);
1172 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1173 gen_aa32_frob64(s, val);
1174 tcg_temp_free(addr);
1177 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1178 TCGv_i32 a32, int index)
1180 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1183 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1184 int index, TCGMemOp opc)
1186 TCGv addr = gen_aa32_addr(s, a32, opc);
1188 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1189 if (!IS_USER_ONLY && s->sctlr_b) {
1190 TCGv_i64 tmp = tcg_temp_new_i64();
1191 tcg_gen_rotri_i64(tmp, val, 32);
1192 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1193 tcg_temp_free_i64(tmp);
1194 } else {
1195 tcg_gen_qemu_st_i64(val, addr, index, opc);
1197 tcg_temp_free(addr);
1200 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1201 TCGv_i32 a32, int index)
1203 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1206 DO_GEN_LD(8s, MO_SB)
1207 DO_GEN_LD(8u, MO_UB)
1208 DO_GEN_LD(16s, MO_SW)
1209 DO_GEN_LD(16u, MO_UW)
1210 DO_GEN_LD(32u, MO_UL)
1211 DO_GEN_ST(8, MO_UB)
1212 DO_GEN_ST(16, MO_UW)
1213 DO_GEN_ST(32, MO_UL)
1215 static inline void gen_hvc(DisasContext *s, int imm16)
1217 /* The pre HVC helper handles cases when HVC gets trapped
1218 * as an undefined insn by runtime configuration (ie before
1219 * the insn really executes).
1221 gen_set_pc_im(s, s->pc - 4);
1222 gen_helper_pre_hvc(cpu_env);
1223 /* Otherwise we will treat this as a real exception which
1224 * happens after execution of the insn. (The distinction matters
1225 * for the PC value reported to the exception handler and also
1226 * for single stepping.)
1228 s->svc_imm = imm16;
1229 gen_set_pc_im(s, s->pc);
1230 s->base.is_jmp = DISAS_HVC;
1233 static inline void gen_smc(DisasContext *s)
1235 /* As with HVC, we may take an exception either before or after
1236 * the insn executes.
1238 TCGv_i32 tmp;
1240 gen_set_pc_im(s, s->pc - 4);
1241 tmp = tcg_const_i32(syn_aa32_smc());
1242 gen_helper_pre_smc(cpu_env, tmp);
1243 tcg_temp_free_i32(tmp);
1244 gen_set_pc_im(s, s->pc);
1245 s->base.is_jmp = DISAS_SMC;
1248 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1250 gen_set_condexec(s);
1251 gen_set_pc_im(s, s->pc - offset);
1252 gen_exception_internal(excp);
1253 s->base.is_jmp = DISAS_NORETURN;
1256 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1257 int syn, uint32_t target_el)
1259 gen_set_condexec(s);
1260 gen_set_pc_im(s, s->pc - offset);
1261 gen_exception(excp, syn, target_el);
1262 s->base.is_jmp = DISAS_NORETURN;
1265 static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1267 TCGv_i32 tcg_syn;
1269 gen_set_condexec(s);
1270 gen_set_pc_im(s, s->pc - offset);
1271 tcg_syn = tcg_const_i32(syn);
1272 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1273 tcg_temp_free_i32(tcg_syn);
1274 s->base.is_jmp = DISAS_NORETURN;
1277 /* Force a TB lookup after an instruction that changes the CPU state. */
1278 static inline void gen_lookup_tb(DisasContext *s)
1280 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1281 s->base.is_jmp = DISAS_EXIT;
1284 static inline void gen_hlt(DisasContext *s, int imm)
1286 /* HLT. This has two purposes.
1287 * Architecturally, it is an external halting debug instruction.
1288 * Since QEMU doesn't implement external debug, we treat this as
1289 * it is required for halting debug disabled: it will UNDEF.
1290 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1291 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1292 * must trigger semihosting even for ARMv7 and earlier, where
1293 * HLT was an undefined encoding.
1294 * In system mode, we don't allow userspace access to
1295 * semihosting, to provide some semblance of security
1296 * (and for consistency with our 32-bit semihosting).
1298 if (semihosting_enabled() &&
1299 #ifndef CONFIG_USER_ONLY
1300 s->current_el != 0 &&
1301 #endif
1302 (imm == (s->thumb ? 0x3c : 0xf000))) {
1303 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1304 return;
1307 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1308 default_exception_el(s));
1311 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1312 TCGv_i32 var)
1314 int val, rm, shift, shiftop;
1315 TCGv_i32 offset;
1317 if (!(insn & (1 << 25))) {
1318 /* immediate */
1319 val = insn & 0xfff;
1320 if (!(insn & (1 << 23)))
1321 val = -val;
1322 if (val != 0)
1323 tcg_gen_addi_i32(var, var, val);
1324 } else {
1325 /* shift/register */
1326 rm = (insn) & 0xf;
1327 shift = (insn >> 7) & 0x1f;
1328 shiftop = (insn >> 5) & 3;
1329 offset = load_reg(s, rm);
1330 gen_arm_shift_im(offset, shiftop, shift, 0);
1331 if (!(insn & (1 << 23)))
1332 tcg_gen_sub_i32(var, var, offset);
1333 else
1334 tcg_gen_add_i32(var, var, offset);
1335 tcg_temp_free_i32(offset);
1339 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1340 int extra, TCGv_i32 var)
1342 int val, rm;
1343 TCGv_i32 offset;
1345 if (insn & (1 << 22)) {
1346 /* immediate */
1347 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1348 if (!(insn & (1 << 23)))
1349 val = -val;
1350 val += extra;
1351 if (val != 0)
1352 tcg_gen_addi_i32(var, var, val);
1353 } else {
1354 /* register */
1355 if (extra)
1356 tcg_gen_addi_i32(var, var, extra);
1357 rm = (insn) & 0xf;
1358 offset = load_reg(s, rm);
1359 if (!(insn & (1 << 23)))
1360 tcg_gen_sub_i32(var, var, offset);
1361 else
1362 tcg_gen_add_i32(var, var, offset);
1363 tcg_temp_free_i32(offset);
1367 static TCGv_ptr get_fpstatus_ptr(int neon)
1369 TCGv_ptr statusptr = tcg_temp_new_ptr();
1370 int offset;
1371 if (neon) {
1372 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1373 } else {
1374 offset = offsetof(CPUARMState, vfp.fp_status);
1376 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1377 return statusptr;
1380 #define VFP_OP2(name) \
1381 static inline void gen_vfp_##name(int dp) \
1383 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1384 if (dp) { \
1385 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1386 } else { \
1387 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1389 tcg_temp_free_ptr(fpst); \
1392 VFP_OP2(add)
1393 VFP_OP2(sub)
1394 VFP_OP2(mul)
1395 VFP_OP2(div)
1397 #undef VFP_OP2
1399 static inline void gen_vfp_F1_mul(int dp)
1401 /* Like gen_vfp_mul() but put result in F1 */
1402 TCGv_ptr fpst = get_fpstatus_ptr(0);
1403 if (dp) {
1404 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1405 } else {
1406 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1408 tcg_temp_free_ptr(fpst);
1411 static inline void gen_vfp_F1_neg(int dp)
1413 /* Like gen_vfp_neg() but put result in F1 */
1414 if (dp) {
1415 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1416 } else {
1417 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1421 static inline void gen_vfp_abs(int dp)
1423 if (dp)
1424 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1425 else
1426 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1429 static inline void gen_vfp_neg(int dp)
1431 if (dp)
1432 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1433 else
1434 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1437 static inline void gen_vfp_sqrt(int dp)
1439 if (dp)
1440 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1441 else
1442 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1445 static inline void gen_vfp_cmp(int dp)
1447 if (dp)
1448 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1449 else
1450 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1453 static inline void gen_vfp_cmpe(int dp)
1455 if (dp)
1456 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1457 else
1458 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1461 static inline void gen_vfp_F1_ld0(int dp)
1463 if (dp)
1464 tcg_gen_movi_i64(cpu_F1d, 0);
1465 else
1466 tcg_gen_movi_i32(cpu_F1s, 0);
1469 #define VFP_GEN_ITOF(name) \
1470 static inline void gen_vfp_##name(int dp, int neon) \
1472 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1473 if (dp) { \
1474 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1475 } else { \
1476 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1478 tcg_temp_free_ptr(statusptr); \
1481 VFP_GEN_ITOF(uito)
1482 VFP_GEN_ITOF(sito)
1483 #undef VFP_GEN_ITOF
1485 #define VFP_GEN_FTOI(name) \
1486 static inline void gen_vfp_##name(int dp, int neon) \
1488 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1489 if (dp) { \
1490 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1491 } else { \
1492 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1494 tcg_temp_free_ptr(statusptr); \
1497 VFP_GEN_FTOI(toui)
1498 VFP_GEN_FTOI(touiz)
1499 VFP_GEN_FTOI(tosi)
1500 VFP_GEN_FTOI(tosiz)
1501 #undef VFP_GEN_FTOI
1503 #define VFP_GEN_FIX(name, round) \
1504 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1506 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1507 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1508 if (dp) { \
1509 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1510 statusptr); \
1511 } else { \
1512 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1513 statusptr); \
1515 tcg_temp_free_i32(tmp_shift); \
1516 tcg_temp_free_ptr(statusptr); \
1518 VFP_GEN_FIX(tosh, _round_to_zero)
1519 VFP_GEN_FIX(tosl, _round_to_zero)
1520 VFP_GEN_FIX(touh, _round_to_zero)
1521 VFP_GEN_FIX(toul, _round_to_zero)
1522 VFP_GEN_FIX(shto, )
1523 VFP_GEN_FIX(slto, )
1524 VFP_GEN_FIX(uhto, )
1525 VFP_GEN_FIX(ulto, )
1526 #undef VFP_GEN_FIX
1528 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1530 if (dp) {
1531 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
1532 } else {
1533 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
1537 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1539 if (dp) {
1540 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
1541 } else {
1542 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
1546 static inline long vfp_reg_offset(bool dp, unsigned reg)
1548 if (dp) {
1549 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1550 } else {
1551 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
1552 if (reg & 1) {
1553 ofs += offsetof(CPU_DoubleU, l.upper);
1554 } else {
1555 ofs += offsetof(CPU_DoubleU, l.lower);
1557 return ofs;
1561 /* Return the offset of a 32-bit piece of a NEON register.
1562 zero is the least significant end of the register. */
1563 static inline long
1564 neon_reg_offset (int reg, int n)
1566 int sreg;
1567 sreg = reg * 2 + n;
1568 return vfp_reg_offset(0, sreg);
1571 static TCGv_i32 neon_load_reg(int reg, int pass)
1573 TCGv_i32 tmp = tcg_temp_new_i32();
1574 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1575 return tmp;
1578 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1580 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1581 tcg_temp_free_i32(var);
1584 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1586 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1589 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1591 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1594 static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1596 TCGv_ptr ret = tcg_temp_new_ptr();
1597 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1598 return ret;
1601 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1602 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1603 #define tcg_gen_st_f32 tcg_gen_st_i32
1604 #define tcg_gen_st_f64 tcg_gen_st_i64
1606 static inline void gen_mov_F0_vreg(int dp, int reg)
1608 if (dp)
1609 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1610 else
1611 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1614 static inline void gen_mov_F1_vreg(int dp, int reg)
1616 if (dp)
1617 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1618 else
1619 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1622 static inline void gen_mov_vreg_F0(int dp, int reg)
1624 if (dp)
1625 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1626 else
1627 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1630 #define ARM_CP_RW_BIT (1 << 20)
1632 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1634 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1637 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1639 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1642 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1644 TCGv_i32 var = tcg_temp_new_i32();
1645 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1646 return var;
1649 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1651 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1652 tcg_temp_free_i32(var);
1655 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1657 iwmmxt_store_reg(cpu_M0, rn);
1660 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1662 iwmmxt_load_reg(cpu_M0, rn);
1665 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1667 iwmmxt_load_reg(cpu_V1, rn);
1668 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1671 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1673 iwmmxt_load_reg(cpu_V1, rn);
1674 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1677 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1679 iwmmxt_load_reg(cpu_V1, rn);
1680 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1683 #define IWMMXT_OP(name) \
1684 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1686 iwmmxt_load_reg(cpu_V1, rn); \
1687 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1690 #define IWMMXT_OP_ENV(name) \
1691 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1693 iwmmxt_load_reg(cpu_V1, rn); \
1694 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1697 #define IWMMXT_OP_ENV_SIZE(name) \
1698 IWMMXT_OP_ENV(name##b) \
1699 IWMMXT_OP_ENV(name##w) \
1700 IWMMXT_OP_ENV(name##l)
1702 #define IWMMXT_OP_ENV1(name) \
1703 static inline void gen_op_iwmmxt_##name##_M0(void) \
1705 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1708 IWMMXT_OP(maddsq)
1709 IWMMXT_OP(madduq)
1710 IWMMXT_OP(sadb)
1711 IWMMXT_OP(sadw)
1712 IWMMXT_OP(mulslw)
1713 IWMMXT_OP(mulshw)
1714 IWMMXT_OP(mululw)
1715 IWMMXT_OP(muluhw)
1716 IWMMXT_OP(macsw)
1717 IWMMXT_OP(macuw)
1719 IWMMXT_OP_ENV_SIZE(unpackl)
1720 IWMMXT_OP_ENV_SIZE(unpackh)
1722 IWMMXT_OP_ENV1(unpacklub)
1723 IWMMXT_OP_ENV1(unpackluw)
1724 IWMMXT_OP_ENV1(unpacklul)
1725 IWMMXT_OP_ENV1(unpackhub)
1726 IWMMXT_OP_ENV1(unpackhuw)
1727 IWMMXT_OP_ENV1(unpackhul)
1728 IWMMXT_OP_ENV1(unpacklsb)
1729 IWMMXT_OP_ENV1(unpacklsw)
1730 IWMMXT_OP_ENV1(unpacklsl)
1731 IWMMXT_OP_ENV1(unpackhsb)
1732 IWMMXT_OP_ENV1(unpackhsw)
1733 IWMMXT_OP_ENV1(unpackhsl)
1735 IWMMXT_OP_ENV_SIZE(cmpeq)
1736 IWMMXT_OP_ENV_SIZE(cmpgtu)
1737 IWMMXT_OP_ENV_SIZE(cmpgts)
1739 IWMMXT_OP_ENV_SIZE(mins)
1740 IWMMXT_OP_ENV_SIZE(minu)
1741 IWMMXT_OP_ENV_SIZE(maxs)
1742 IWMMXT_OP_ENV_SIZE(maxu)
1744 IWMMXT_OP_ENV_SIZE(subn)
1745 IWMMXT_OP_ENV_SIZE(addn)
1746 IWMMXT_OP_ENV_SIZE(subu)
1747 IWMMXT_OP_ENV_SIZE(addu)
1748 IWMMXT_OP_ENV_SIZE(subs)
1749 IWMMXT_OP_ENV_SIZE(adds)
1751 IWMMXT_OP_ENV(avgb0)
1752 IWMMXT_OP_ENV(avgb1)
1753 IWMMXT_OP_ENV(avgw0)
1754 IWMMXT_OP_ENV(avgw1)
1756 IWMMXT_OP_ENV(packuw)
1757 IWMMXT_OP_ENV(packul)
1758 IWMMXT_OP_ENV(packuq)
1759 IWMMXT_OP_ENV(packsw)
1760 IWMMXT_OP_ENV(packsl)
1761 IWMMXT_OP_ENV(packsq)
1763 static void gen_op_iwmmxt_set_mup(void)
1765 TCGv_i32 tmp;
1766 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1767 tcg_gen_ori_i32(tmp, tmp, 2);
1768 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1771 static void gen_op_iwmmxt_set_cup(void)
1773 TCGv_i32 tmp;
1774 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1775 tcg_gen_ori_i32(tmp, tmp, 1);
1776 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1779 static void gen_op_iwmmxt_setpsr_nz(void)
1781 TCGv_i32 tmp = tcg_temp_new_i32();
1782 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1783 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1786 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1788 iwmmxt_load_reg(cpu_V1, rn);
1789 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1790 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1793 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1794 TCGv_i32 dest)
1796 int rd;
1797 uint32_t offset;
1798 TCGv_i32 tmp;
1800 rd = (insn >> 16) & 0xf;
1801 tmp = load_reg(s, rd);
1803 offset = (insn & 0xff) << ((insn >> 7) & 2);
1804 if (insn & (1 << 24)) {
1805 /* Pre indexed */
1806 if (insn & (1 << 23))
1807 tcg_gen_addi_i32(tmp, tmp, offset);
1808 else
1809 tcg_gen_addi_i32(tmp, tmp, -offset);
1810 tcg_gen_mov_i32(dest, tmp);
1811 if (insn & (1 << 21))
1812 store_reg(s, rd, tmp);
1813 else
1814 tcg_temp_free_i32(tmp);
1815 } else if (insn & (1 << 21)) {
1816 /* Post indexed */
1817 tcg_gen_mov_i32(dest, tmp);
1818 if (insn & (1 << 23))
1819 tcg_gen_addi_i32(tmp, tmp, offset);
1820 else
1821 tcg_gen_addi_i32(tmp, tmp, -offset);
1822 store_reg(s, rd, tmp);
1823 } else if (!(insn & (1 << 23)))
1824 return 1;
1825 return 0;
1828 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1830 int rd = (insn >> 0) & 0xf;
1831 TCGv_i32 tmp;
1833 if (insn & (1 << 8)) {
1834 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1835 return 1;
1836 } else {
1837 tmp = iwmmxt_load_creg(rd);
1839 } else {
1840 tmp = tcg_temp_new_i32();
1841 iwmmxt_load_reg(cpu_V0, rd);
1842 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1844 tcg_gen_andi_i32(tmp, tmp, mask);
1845 tcg_gen_mov_i32(dest, tmp);
1846 tcg_temp_free_i32(tmp);
1847 return 0;
1850 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1851 (ie. an undefined instruction). */
1852 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1854 int rd, wrd;
1855 int rdhi, rdlo, rd0, rd1, i;
1856 TCGv_i32 addr;
1857 TCGv_i32 tmp, tmp2, tmp3;
1859 if ((insn & 0x0e000e00) == 0x0c000000) {
1860 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1861 wrd = insn & 0xf;
1862 rdlo = (insn >> 12) & 0xf;
1863 rdhi = (insn >> 16) & 0xf;
1864 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1865 iwmmxt_load_reg(cpu_V0, wrd);
1866 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1867 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1868 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1869 } else { /* TMCRR */
1870 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1871 iwmmxt_store_reg(cpu_V0, wrd);
1872 gen_op_iwmmxt_set_mup();
1874 return 0;
1877 wrd = (insn >> 12) & 0xf;
1878 addr = tcg_temp_new_i32();
1879 if (gen_iwmmxt_address(s, insn, addr)) {
1880 tcg_temp_free_i32(addr);
1881 return 1;
1883 if (insn & ARM_CP_RW_BIT) {
1884 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1885 tmp = tcg_temp_new_i32();
1886 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1887 iwmmxt_store_creg(wrd, tmp);
1888 } else {
1889 i = 1;
1890 if (insn & (1 << 8)) {
1891 if (insn & (1 << 22)) { /* WLDRD */
1892 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1893 i = 0;
1894 } else { /* WLDRW wRd */
1895 tmp = tcg_temp_new_i32();
1896 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1898 } else {
1899 tmp = tcg_temp_new_i32();
1900 if (insn & (1 << 22)) { /* WLDRH */
1901 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1902 } else { /* WLDRB */
1903 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1906 if (i) {
1907 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1908 tcg_temp_free_i32(tmp);
1910 gen_op_iwmmxt_movq_wRn_M0(wrd);
1912 } else {
1913 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1914 tmp = iwmmxt_load_creg(wrd);
1915 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1916 } else {
1917 gen_op_iwmmxt_movq_M0_wRn(wrd);
1918 tmp = tcg_temp_new_i32();
1919 if (insn & (1 << 8)) {
1920 if (insn & (1 << 22)) { /* WSTRD */
1921 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1922 } else { /* WSTRW wRd */
1923 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1924 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1926 } else {
1927 if (insn & (1 << 22)) { /* WSTRH */
1928 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1929 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1930 } else { /* WSTRB */
1931 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1932 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1936 tcg_temp_free_i32(tmp);
1938 tcg_temp_free_i32(addr);
1939 return 0;
1942 if ((insn & 0x0f000000) != 0x0e000000)
1943 return 1;
1945 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1946 case 0x000: /* WOR */
1947 wrd = (insn >> 12) & 0xf;
1948 rd0 = (insn >> 0) & 0xf;
1949 rd1 = (insn >> 16) & 0xf;
1950 gen_op_iwmmxt_movq_M0_wRn(rd0);
1951 gen_op_iwmmxt_orq_M0_wRn(rd1);
1952 gen_op_iwmmxt_setpsr_nz();
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1955 gen_op_iwmmxt_set_cup();
1956 break;
1957 case 0x011: /* TMCR */
1958 if (insn & 0xf)
1959 return 1;
1960 rd = (insn >> 12) & 0xf;
1961 wrd = (insn >> 16) & 0xf;
1962 switch (wrd) {
1963 case ARM_IWMMXT_wCID:
1964 case ARM_IWMMXT_wCASF:
1965 break;
1966 case ARM_IWMMXT_wCon:
1967 gen_op_iwmmxt_set_cup();
1968 /* Fall through. */
1969 case ARM_IWMMXT_wCSSF:
1970 tmp = iwmmxt_load_creg(wrd);
1971 tmp2 = load_reg(s, rd);
1972 tcg_gen_andc_i32(tmp, tmp, tmp2);
1973 tcg_temp_free_i32(tmp2);
1974 iwmmxt_store_creg(wrd, tmp);
1975 break;
1976 case ARM_IWMMXT_wCGR0:
1977 case ARM_IWMMXT_wCGR1:
1978 case ARM_IWMMXT_wCGR2:
1979 case ARM_IWMMXT_wCGR3:
1980 gen_op_iwmmxt_set_cup();
1981 tmp = load_reg(s, rd);
1982 iwmmxt_store_creg(wrd, tmp);
1983 break;
1984 default:
1985 return 1;
1987 break;
1988 case 0x100: /* WXOR */
1989 wrd = (insn >> 12) & 0xf;
1990 rd0 = (insn >> 0) & 0xf;
1991 rd1 = (insn >> 16) & 0xf;
1992 gen_op_iwmmxt_movq_M0_wRn(rd0);
1993 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1994 gen_op_iwmmxt_setpsr_nz();
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x111: /* TMRC */
2000 if (insn & 0xf)
2001 return 1;
2002 rd = (insn >> 12) & 0xf;
2003 wrd = (insn >> 16) & 0xf;
2004 tmp = iwmmxt_load_creg(wrd);
2005 store_reg(s, rd, tmp);
2006 break;
2007 case 0x300: /* WANDN */
2008 wrd = (insn >> 12) & 0xf;
2009 rd0 = (insn >> 0) & 0xf;
2010 rd1 = (insn >> 16) & 0xf;
2011 gen_op_iwmmxt_movq_M0_wRn(rd0);
2012 tcg_gen_neg_i64(cpu_M0, cpu_M0);
2013 gen_op_iwmmxt_andq_M0_wRn(rd1);
2014 gen_op_iwmmxt_setpsr_nz();
2015 gen_op_iwmmxt_movq_wRn_M0(wrd);
2016 gen_op_iwmmxt_set_mup();
2017 gen_op_iwmmxt_set_cup();
2018 break;
2019 case 0x200: /* WAND */
2020 wrd = (insn >> 12) & 0xf;
2021 rd0 = (insn >> 0) & 0xf;
2022 rd1 = (insn >> 16) & 0xf;
2023 gen_op_iwmmxt_movq_M0_wRn(rd0);
2024 gen_op_iwmmxt_andq_M0_wRn(rd1);
2025 gen_op_iwmmxt_setpsr_nz();
2026 gen_op_iwmmxt_movq_wRn_M0(wrd);
2027 gen_op_iwmmxt_set_mup();
2028 gen_op_iwmmxt_set_cup();
2029 break;
2030 case 0x810: case 0xa10: /* WMADD */
2031 wrd = (insn >> 12) & 0xf;
2032 rd0 = (insn >> 0) & 0xf;
2033 rd1 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
2035 if (insn & (1 << 21))
2036 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2037 else
2038 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2039 gen_op_iwmmxt_movq_wRn_M0(wrd);
2040 gen_op_iwmmxt_set_mup();
2041 break;
2042 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2043 wrd = (insn >> 12) & 0xf;
2044 rd0 = (insn >> 16) & 0xf;
2045 rd1 = (insn >> 0) & 0xf;
2046 gen_op_iwmmxt_movq_M0_wRn(rd0);
2047 switch ((insn >> 22) & 3) {
2048 case 0:
2049 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2050 break;
2051 case 1:
2052 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2053 break;
2054 case 2:
2055 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2056 break;
2057 case 3:
2058 return 1;
2060 gen_op_iwmmxt_movq_wRn_M0(wrd);
2061 gen_op_iwmmxt_set_mup();
2062 gen_op_iwmmxt_set_cup();
2063 break;
2064 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2065 wrd = (insn >> 12) & 0xf;
2066 rd0 = (insn >> 16) & 0xf;
2067 rd1 = (insn >> 0) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0);
2069 switch ((insn >> 22) & 3) {
2070 case 0:
2071 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2072 break;
2073 case 1:
2074 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2075 break;
2076 case 2:
2077 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2078 break;
2079 case 3:
2080 return 1;
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 rd1 = (insn >> 0) & 0xf;
2090 gen_op_iwmmxt_movq_M0_wRn(rd0);
2091 if (insn & (1 << 22))
2092 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2093 else
2094 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2095 if (!(insn & (1 << 20)))
2096 gen_op_iwmmxt_addl_M0_wRn(wrd);
2097 gen_op_iwmmxt_movq_wRn_M0(wrd);
2098 gen_op_iwmmxt_set_mup();
2099 break;
2100 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2101 wrd = (insn >> 12) & 0xf;
2102 rd0 = (insn >> 16) & 0xf;
2103 rd1 = (insn >> 0) & 0xf;
2104 gen_op_iwmmxt_movq_M0_wRn(rd0);
2105 if (insn & (1 << 21)) {
2106 if (insn & (1 << 20))
2107 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2108 else
2109 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2110 } else {
2111 if (insn & (1 << 20))
2112 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2113 else
2114 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 break;
2119 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2120 wrd = (insn >> 12) & 0xf;
2121 rd0 = (insn >> 16) & 0xf;
2122 rd1 = (insn >> 0) & 0xf;
2123 gen_op_iwmmxt_movq_M0_wRn(rd0);
2124 if (insn & (1 << 21))
2125 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2126 else
2127 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2128 if (!(insn & (1 << 20))) {
2129 iwmmxt_load_reg(cpu_V1, wrd);
2130 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
2132 gen_op_iwmmxt_movq_wRn_M0(wrd);
2133 gen_op_iwmmxt_set_mup();
2134 break;
2135 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2136 wrd = (insn >> 12) & 0xf;
2137 rd0 = (insn >> 16) & 0xf;
2138 rd1 = (insn >> 0) & 0xf;
2139 gen_op_iwmmxt_movq_M0_wRn(rd0);
2140 switch ((insn >> 22) & 3) {
2141 case 0:
2142 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2143 break;
2144 case 1:
2145 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2146 break;
2147 case 2:
2148 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2149 break;
2150 case 3:
2151 return 1;
2153 gen_op_iwmmxt_movq_wRn_M0(wrd);
2154 gen_op_iwmmxt_set_mup();
2155 gen_op_iwmmxt_set_cup();
2156 break;
2157 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2158 wrd = (insn >> 12) & 0xf;
2159 rd0 = (insn >> 16) & 0xf;
2160 rd1 = (insn >> 0) & 0xf;
2161 gen_op_iwmmxt_movq_M0_wRn(rd0);
2162 if (insn & (1 << 22)) {
2163 if (insn & (1 << 20))
2164 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2165 else
2166 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2167 } else {
2168 if (insn & (1 << 20))
2169 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2170 else
2171 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2173 gen_op_iwmmxt_movq_wRn_M0(wrd);
2174 gen_op_iwmmxt_set_mup();
2175 gen_op_iwmmxt_set_cup();
2176 break;
2177 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2178 wrd = (insn >> 12) & 0xf;
2179 rd0 = (insn >> 16) & 0xf;
2180 rd1 = (insn >> 0) & 0xf;
2181 gen_op_iwmmxt_movq_M0_wRn(rd0);
2182 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2183 tcg_gen_andi_i32(tmp, tmp, 7);
2184 iwmmxt_load_reg(cpu_V1, rd1);
2185 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2186 tcg_temp_free_i32(tmp);
2187 gen_op_iwmmxt_movq_wRn_M0(wrd);
2188 gen_op_iwmmxt_set_mup();
2189 break;
2190 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2191 if (((insn >> 6) & 3) == 3)
2192 return 1;
2193 rd = (insn >> 12) & 0xf;
2194 wrd = (insn >> 16) & 0xf;
2195 tmp = load_reg(s, rd);
2196 gen_op_iwmmxt_movq_M0_wRn(wrd);
2197 switch ((insn >> 6) & 3) {
2198 case 0:
2199 tmp2 = tcg_const_i32(0xff);
2200 tmp3 = tcg_const_i32((insn & 7) << 3);
2201 break;
2202 case 1:
2203 tmp2 = tcg_const_i32(0xffff);
2204 tmp3 = tcg_const_i32((insn & 3) << 4);
2205 break;
2206 case 2:
2207 tmp2 = tcg_const_i32(0xffffffff);
2208 tmp3 = tcg_const_i32((insn & 1) << 5);
2209 break;
2210 default:
2211 tmp2 = NULL;
2212 tmp3 = NULL;
2214 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2215 tcg_temp_free_i32(tmp3);
2216 tcg_temp_free_i32(tmp2);
2217 tcg_temp_free_i32(tmp);
2218 gen_op_iwmmxt_movq_wRn_M0(wrd);
2219 gen_op_iwmmxt_set_mup();
2220 break;
2221 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2222 rd = (insn >> 12) & 0xf;
2223 wrd = (insn >> 16) & 0xf;
2224 if (rd == 15 || ((insn >> 22) & 3) == 3)
2225 return 1;
2226 gen_op_iwmmxt_movq_M0_wRn(wrd);
2227 tmp = tcg_temp_new_i32();
2228 switch ((insn >> 22) & 3) {
2229 case 0:
2230 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2231 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2232 if (insn & 8) {
2233 tcg_gen_ext8s_i32(tmp, tmp);
2234 } else {
2235 tcg_gen_andi_i32(tmp, tmp, 0xff);
2237 break;
2238 case 1:
2239 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2240 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2241 if (insn & 8) {
2242 tcg_gen_ext16s_i32(tmp, tmp);
2243 } else {
2244 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2246 break;
2247 case 2:
2248 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2249 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2250 break;
2252 store_reg(s, rd, tmp);
2253 break;
2254 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2255 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2256 return 1;
2257 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2258 switch ((insn >> 22) & 3) {
2259 case 0:
2260 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2261 break;
2262 case 1:
2263 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2264 break;
2265 case 2:
2266 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2267 break;
2269 tcg_gen_shli_i32(tmp, tmp, 28);
2270 gen_set_nzcv(tmp);
2271 tcg_temp_free_i32(tmp);
2272 break;
2273 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2274 if (((insn >> 6) & 3) == 3)
2275 return 1;
2276 rd = (insn >> 12) & 0xf;
2277 wrd = (insn >> 16) & 0xf;
2278 tmp = load_reg(s, rd);
2279 switch ((insn >> 6) & 3) {
2280 case 0:
2281 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2282 break;
2283 case 1:
2284 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2285 break;
2286 case 2:
2287 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2288 break;
2290 tcg_temp_free_i32(tmp);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 break;
2294 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2295 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2296 return 1;
2297 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2298 tmp2 = tcg_temp_new_i32();
2299 tcg_gen_mov_i32(tmp2, tmp);
2300 switch ((insn >> 22) & 3) {
2301 case 0:
2302 for (i = 0; i < 7; i ++) {
2303 tcg_gen_shli_i32(tmp2, tmp2, 4);
2304 tcg_gen_and_i32(tmp, tmp, tmp2);
2306 break;
2307 case 1:
2308 for (i = 0; i < 3; i ++) {
2309 tcg_gen_shli_i32(tmp2, tmp2, 8);
2310 tcg_gen_and_i32(tmp, tmp, tmp2);
2312 break;
2313 case 2:
2314 tcg_gen_shli_i32(tmp2, tmp2, 16);
2315 tcg_gen_and_i32(tmp, tmp, tmp2);
2316 break;
2318 gen_set_nzcv(tmp);
2319 tcg_temp_free_i32(tmp2);
2320 tcg_temp_free_i32(tmp);
2321 break;
2322 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2323 wrd = (insn >> 12) & 0xf;
2324 rd0 = (insn >> 16) & 0xf;
2325 gen_op_iwmmxt_movq_M0_wRn(rd0);
2326 switch ((insn >> 22) & 3) {
2327 case 0:
2328 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2329 break;
2330 case 1:
2331 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2332 break;
2333 case 2:
2334 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2335 break;
2336 case 3:
2337 return 1;
2339 gen_op_iwmmxt_movq_wRn_M0(wrd);
2340 gen_op_iwmmxt_set_mup();
2341 break;
2342 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2343 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2344 return 1;
2345 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2346 tmp2 = tcg_temp_new_i32();
2347 tcg_gen_mov_i32(tmp2, tmp);
2348 switch ((insn >> 22) & 3) {
2349 case 0:
2350 for (i = 0; i < 7; i ++) {
2351 tcg_gen_shli_i32(tmp2, tmp2, 4);
2352 tcg_gen_or_i32(tmp, tmp, tmp2);
2354 break;
2355 case 1:
2356 for (i = 0; i < 3; i ++) {
2357 tcg_gen_shli_i32(tmp2, tmp2, 8);
2358 tcg_gen_or_i32(tmp, tmp, tmp2);
2360 break;
2361 case 2:
2362 tcg_gen_shli_i32(tmp2, tmp2, 16);
2363 tcg_gen_or_i32(tmp, tmp, tmp2);
2364 break;
2366 gen_set_nzcv(tmp);
2367 tcg_temp_free_i32(tmp2);
2368 tcg_temp_free_i32(tmp);
2369 break;
2370 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2371 rd = (insn >> 12) & 0xf;
2372 rd0 = (insn >> 16) & 0xf;
2373 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2374 return 1;
2375 gen_op_iwmmxt_movq_M0_wRn(rd0);
2376 tmp = tcg_temp_new_i32();
2377 switch ((insn >> 22) & 3) {
2378 case 0:
2379 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2380 break;
2381 case 1:
2382 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2383 break;
2384 case 2:
2385 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2386 break;
2388 store_reg(s, rd, tmp);
2389 break;
2390 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2391 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2392 wrd = (insn >> 12) & 0xf;
2393 rd0 = (insn >> 16) & 0xf;
2394 rd1 = (insn >> 0) & 0xf;
2395 gen_op_iwmmxt_movq_M0_wRn(rd0);
2396 switch ((insn >> 22) & 3) {
2397 case 0:
2398 if (insn & (1 << 21))
2399 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2400 else
2401 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2402 break;
2403 case 1:
2404 if (insn & (1 << 21))
2405 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2406 else
2407 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2408 break;
2409 case 2:
2410 if (insn & (1 << 21))
2411 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2412 else
2413 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2414 break;
2415 case 3:
2416 return 1;
2418 gen_op_iwmmxt_movq_wRn_M0(wrd);
2419 gen_op_iwmmxt_set_mup();
2420 gen_op_iwmmxt_set_cup();
2421 break;
2422 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2423 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2424 wrd = (insn >> 12) & 0xf;
2425 rd0 = (insn >> 16) & 0xf;
2426 gen_op_iwmmxt_movq_M0_wRn(rd0);
2427 switch ((insn >> 22) & 3) {
2428 case 0:
2429 if (insn & (1 << 21))
2430 gen_op_iwmmxt_unpacklsb_M0();
2431 else
2432 gen_op_iwmmxt_unpacklub_M0();
2433 break;
2434 case 1:
2435 if (insn & (1 << 21))
2436 gen_op_iwmmxt_unpacklsw_M0();
2437 else
2438 gen_op_iwmmxt_unpackluw_M0();
2439 break;
2440 case 2:
2441 if (insn & (1 << 21))
2442 gen_op_iwmmxt_unpacklsl_M0();
2443 else
2444 gen_op_iwmmxt_unpacklul_M0();
2445 break;
2446 case 3:
2447 return 1;
2449 gen_op_iwmmxt_movq_wRn_M0(wrd);
2450 gen_op_iwmmxt_set_mup();
2451 gen_op_iwmmxt_set_cup();
2452 break;
2453 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2454 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2455 wrd = (insn >> 12) & 0xf;
2456 rd0 = (insn >> 16) & 0xf;
2457 gen_op_iwmmxt_movq_M0_wRn(rd0);
2458 switch ((insn >> 22) & 3) {
2459 case 0:
2460 if (insn & (1 << 21))
2461 gen_op_iwmmxt_unpackhsb_M0();
2462 else
2463 gen_op_iwmmxt_unpackhub_M0();
2464 break;
2465 case 1:
2466 if (insn & (1 << 21))
2467 gen_op_iwmmxt_unpackhsw_M0();
2468 else
2469 gen_op_iwmmxt_unpackhuw_M0();
2470 break;
2471 case 2:
2472 if (insn & (1 << 21))
2473 gen_op_iwmmxt_unpackhsl_M0();
2474 else
2475 gen_op_iwmmxt_unpackhul_M0();
2476 break;
2477 case 3:
2478 return 1;
2480 gen_op_iwmmxt_movq_wRn_M0(wrd);
2481 gen_op_iwmmxt_set_mup();
2482 gen_op_iwmmxt_set_cup();
2483 break;
2484 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2485 case 0x214: case 0x614: case 0xa14: case 0xe14:
2486 if (((insn >> 22) & 3) == 0)
2487 return 1;
2488 wrd = (insn >> 12) & 0xf;
2489 rd0 = (insn >> 16) & 0xf;
2490 gen_op_iwmmxt_movq_M0_wRn(rd0);
2491 tmp = tcg_temp_new_i32();
2492 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2493 tcg_temp_free_i32(tmp);
2494 return 1;
2496 switch ((insn >> 22) & 3) {
2497 case 1:
2498 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2499 break;
2500 case 2:
2501 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2502 break;
2503 case 3:
2504 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2505 break;
2507 tcg_temp_free_i32(tmp);
2508 gen_op_iwmmxt_movq_wRn_M0(wrd);
2509 gen_op_iwmmxt_set_mup();
2510 gen_op_iwmmxt_set_cup();
2511 break;
2512 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2513 case 0x014: case 0x414: case 0x814: case 0xc14:
2514 if (((insn >> 22) & 3) == 0)
2515 return 1;
2516 wrd = (insn >> 12) & 0xf;
2517 rd0 = (insn >> 16) & 0xf;
2518 gen_op_iwmmxt_movq_M0_wRn(rd0);
2519 tmp = tcg_temp_new_i32();
2520 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2521 tcg_temp_free_i32(tmp);
2522 return 1;
2524 switch ((insn >> 22) & 3) {
2525 case 1:
2526 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2527 break;
2528 case 2:
2529 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2530 break;
2531 case 3:
2532 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2533 break;
2535 tcg_temp_free_i32(tmp);
2536 gen_op_iwmmxt_movq_wRn_M0(wrd);
2537 gen_op_iwmmxt_set_mup();
2538 gen_op_iwmmxt_set_cup();
2539 break;
2540 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2541 case 0x114: case 0x514: case 0x914: case 0xd14:
2542 if (((insn >> 22) & 3) == 0)
2543 return 1;
2544 wrd = (insn >> 12) & 0xf;
2545 rd0 = (insn >> 16) & 0xf;
2546 gen_op_iwmmxt_movq_M0_wRn(rd0);
2547 tmp = tcg_temp_new_i32();
2548 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2549 tcg_temp_free_i32(tmp);
2550 return 1;
2552 switch ((insn >> 22) & 3) {
2553 case 1:
2554 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2555 break;
2556 case 2:
2557 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2558 break;
2559 case 3:
2560 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2561 break;
2563 tcg_temp_free_i32(tmp);
2564 gen_op_iwmmxt_movq_wRn_M0(wrd);
2565 gen_op_iwmmxt_set_mup();
2566 gen_op_iwmmxt_set_cup();
2567 break;
2568 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2569 case 0x314: case 0x714: case 0xb14: case 0xf14:
2570 if (((insn >> 22) & 3) == 0)
2571 return 1;
2572 wrd = (insn >> 12) & 0xf;
2573 rd0 = (insn >> 16) & 0xf;
2574 gen_op_iwmmxt_movq_M0_wRn(rd0);
2575 tmp = tcg_temp_new_i32();
2576 switch ((insn >> 22) & 3) {
2577 case 1:
2578 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2579 tcg_temp_free_i32(tmp);
2580 return 1;
2582 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2583 break;
2584 case 2:
2585 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2586 tcg_temp_free_i32(tmp);
2587 return 1;
2589 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2590 break;
2591 case 3:
2592 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2593 tcg_temp_free_i32(tmp);
2594 return 1;
2596 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2597 break;
2599 tcg_temp_free_i32(tmp);
2600 gen_op_iwmmxt_movq_wRn_M0(wrd);
2601 gen_op_iwmmxt_set_mup();
2602 gen_op_iwmmxt_set_cup();
2603 break;
2604 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2605 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2606 wrd = (insn >> 12) & 0xf;
2607 rd0 = (insn >> 16) & 0xf;
2608 rd1 = (insn >> 0) & 0xf;
2609 gen_op_iwmmxt_movq_M0_wRn(rd0);
2610 switch ((insn >> 22) & 3) {
2611 case 0:
2612 if (insn & (1 << 21))
2613 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2614 else
2615 gen_op_iwmmxt_minub_M0_wRn(rd1);
2616 break;
2617 case 1:
2618 if (insn & (1 << 21))
2619 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2620 else
2621 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2622 break;
2623 case 2:
2624 if (insn & (1 << 21))
2625 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2626 else
2627 gen_op_iwmmxt_minul_M0_wRn(rd1);
2628 break;
2629 case 3:
2630 return 1;
2632 gen_op_iwmmxt_movq_wRn_M0(wrd);
2633 gen_op_iwmmxt_set_mup();
2634 break;
2635 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2636 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2637 wrd = (insn >> 12) & 0xf;
2638 rd0 = (insn >> 16) & 0xf;
2639 rd1 = (insn >> 0) & 0xf;
2640 gen_op_iwmmxt_movq_M0_wRn(rd0);
2641 switch ((insn >> 22) & 3) {
2642 case 0:
2643 if (insn & (1 << 21))
2644 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2645 else
2646 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2647 break;
2648 case 1:
2649 if (insn & (1 << 21))
2650 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2651 else
2652 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2653 break;
2654 case 2:
2655 if (insn & (1 << 21))
2656 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2657 else
2658 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2659 break;
2660 case 3:
2661 return 1;
2663 gen_op_iwmmxt_movq_wRn_M0(wrd);
2664 gen_op_iwmmxt_set_mup();
2665 break;
2666 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2667 case 0x402: case 0x502: case 0x602: case 0x702:
2668 wrd = (insn >> 12) & 0xf;
2669 rd0 = (insn >> 16) & 0xf;
2670 rd1 = (insn >> 0) & 0xf;
2671 gen_op_iwmmxt_movq_M0_wRn(rd0);
2672 tmp = tcg_const_i32((insn >> 20) & 3);
2673 iwmmxt_load_reg(cpu_V1, rd1);
2674 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2675 tcg_temp_free_i32(tmp);
2676 gen_op_iwmmxt_movq_wRn_M0(wrd);
2677 gen_op_iwmmxt_set_mup();
2678 break;
2679 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2680 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2681 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2682 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2683 wrd = (insn >> 12) & 0xf;
2684 rd0 = (insn >> 16) & 0xf;
2685 rd1 = (insn >> 0) & 0xf;
2686 gen_op_iwmmxt_movq_M0_wRn(rd0);
2687 switch ((insn >> 20) & 0xf) {
2688 case 0x0:
2689 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2690 break;
2691 case 0x1:
2692 gen_op_iwmmxt_subub_M0_wRn(rd1);
2693 break;
2694 case 0x3:
2695 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2696 break;
2697 case 0x4:
2698 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2699 break;
2700 case 0x5:
2701 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2702 break;
2703 case 0x7:
2704 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2705 break;
2706 case 0x8:
2707 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2708 break;
2709 case 0x9:
2710 gen_op_iwmmxt_subul_M0_wRn(rd1);
2711 break;
2712 case 0xb:
2713 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2714 break;
2715 default:
2716 return 1;
2718 gen_op_iwmmxt_movq_wRn_M0(wrd);
2719 gen_op_iwmmxt_set_mup();
2720 gen_op_iwmmxt_set_cup();
2721 break;
2722 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2723 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2724 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2725 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2726 wrd = (insn >> 12) & 0xf;
2727 rd0 = (insn >> 16) & 0xf;
2728 gen_op_iwmmxt_movq_M0_wRn(rd0);
2729 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2730 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2731 tcg_temp_free_i32(tmp);
2732 gen_op_iwmmxt_movq_wRn_M0(wrd);
2733 gen_op_iwmmxt_set_mup();
2734 gen_op_iwmmxt_set_cup();
2735 break;
2736 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2737 case 0x418: case 0x518: case 0x618: case 0x718:
2738 case 0x818: case 0x918: case 0xa18: case 0xb18:
2739 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2740 wrd = (insn >> 12) & 0xf;
2741 rd0 = (insn >> 16) & 0xf;
2742 rd1 = (insn >> 0) & 0xf;
2743 gen_op_iwmmxt_movq_M0_wRn(rd0);
2744 switch ((insn >> 20) & 0xf) {
2745 case 0x0:
2746 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2747 break;
2748 case 0x1:
2749 gen_op_iwmmxt_addub_M0_wRn(rd1);
2750 break;
2751 case 0x3:
2752 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2753 break;
2754 case 0x4:
2755 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2756 break;
2757 case 0x5:
2758 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2759 break;
2760 case 0x7:
2761 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2762 break;
2763 case 0x8:
2764 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2765 break;
2766 case 0x9:
2767 gen_op_iwmmxt_addul_M0_wRn(rd1);
2768 break;
2769 case 0xb:
2770 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2771 break;
2772 default:
2773 return 1;
2775 gen_op_iwmmxt_movq_wRn_M0(wrd);
2776 gen_op_iwmmxt_set_mup();
2777 gen_op_iwmmxt_set_cup();
2778 break;
2779 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2780 case 0x408: case 0x508: case 0x608: case 0x708:
2781 case 0x808: case 0x908: case 0xa08: case 0xb08:
2782 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2783 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2784 return 1;
2785 wrd = (insn >> 12) & 0xf;
2786 rd0 = (insn >> 16) & 0xf;
2787 rd1 = (insn >> 0) & 0xf;
2788 gen_op_iwmmxt_movq_M0_wRn(rd0);
2789 switch ((insn >> 22) & 3) {
2790 case 1:
2791 if (insn & (1 << 21))
2792 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2793 else
2794 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2795 break;
2796 case 2:
2797 if (insn & (1 << 21))
2798 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2799 else
2800 gen_op_iwmmxt_packul_M0_wRn(rd1);
2801 break;
2802 case 3:
2803 if (insn & (1 << 21))
2804 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2805 else
2806 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2807 break;
2809 gen_op_iwmmxt_movq_wRn_M0(wrd);
2810 gen_op_iwmmxt_set_mup();
2811 gen_op_iwmmxt_set_cup();
2812 break;
2813 case 0x201: case 0x203: case 0x205: case 0x207:
2814 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2815 case 0x211: case 0x213: case 0x215: case 0x217:
2816 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2817 wrd = (insn >> 5) & 0xf;
2818 rd0 = (insn >> 12) & 0xf;
2819 rd1 = (insn >> 0) & 0xf;
2820 if (rd0 == 0xf || rd1 == 0xf)
2821 return 1;
2822 gen_op_iwmmxt_movq_M0_wRn(wrd);
2823 tmp = load_reg(s, rd0);
2824 tmp2 = load_reg(s, rd1);
2825 switch ((insn >> 16) & 0xf) {
2826 case 0x0: /* TMIA */
2827 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2828 break;
2829 case 0x8: /* TMIAPH */
2830 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2831 break;
2832 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2833 if (insn & (1 << 16))
2834 tcg_gen_shri_i32(tmp, tmp, 16);
2835 if (insn & (1 << 17))
2836 tcg_gen_shri_i32(tmp2, tmp2, 16);
2837 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2838 break;
2839 default:
2840 tcg_temp_free_i32(tmp2);
2841 tcg_temp_free_i32(tmp);
2842 return 1;
2844 tcg_temp_free_i32(tmp2);
2845 tcg_temp_free_i32(tmp);
2846 gen_op_iwmmxt_movq_wRn_M0(wrd);
2847 gen_op_iwmmxt_set_mup();
2848 break;
2849 default:
2850 return 1;
2853 return 0;
2856 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2857 (ie. an undefined instruction). */
2858 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2860 int acc, rd0, rd1, rdhi, rdlo;
2861 TCGv_i32 tmp, tmp2;
2863 if ((insn & 0x0ff00f10) == 0x0e200010) {
2864 /* Multiply with Internal Accumulate Format */
2865 rd0 = (insn >> 12) & 0xf;
2866 rd1 = insn & 0xf;
2867 acc = (insn >> 5) & 7;
2869 if (acc != 0)
2870 return 1;
2872 tmp = load_reg(s, rd0);
2873 tmp2 = load_reg(s, rd1);
2874 switch ((insn >> 16) & 0xf) {
2875 case 0x0: /* MIA */
2876 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2877 break;
2878 case 0x8: /* MIAPH */
2879 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2880 break;
2881 case 0xc: /* MIABB */
2882 case 0xd: /* MIABT */
2883 case 0xe: /* MIATB */
2884 case 0xf: /* MIATT */
2885 if (insn & (1 << 16))
2886 tcg_gen_shri_i32(tmp, tmp, 16);
2887 if (insn & (1 << 17))
2888 tcg_gen_shri_i32(tmp2, tmp2, 16);
2889 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2890 break;
2891 default:
2892 return 1;
2894 tcg_temp_free_i32(tmp2);
2895 tcg_temp_free_i32(tmp);
2897 gen_op_iwmmxt_movq_wRn_M0(acc);
2898 return 0;
2901 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2902 /* Internal Accumulator Access Format */
2903 rdhi = (insn >> 16) & 0xf;
2904 rdlo = (insn >> 12) & 0xf;
2905 acc = insn & 7;
2907 if (acc != 0)
2908 return 1;
2910 if (insn & ARM_CP_RW_BIT) { /* MRA */
2911 iwmmxt_load_reg(cpu_V0, acc);
2912 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2913 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2914 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2915 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2916 } else { /* MAR */
2917 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2918 iwmmxt_store_reg(cpu_V0, acc);
2920 return 0;
2923 return 1;
2926 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2927 #define VFP_SREG(insn, bigbit, smallbit) \
2928 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2929 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2930 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2931 reg = (((insn) >> (bigbit)) & 0x0f) \
2932 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2933 } else { \
2934 if (insn & (1 << (smallbit))) \
2935 return 1; \
2936 reg = ((insn) >> (bigbit)) & 0x0f; \
2937 }} while (0)
2939 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2940 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2941 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2942 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2943 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2944 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2946 /* Move between integer and VFP cores. */
2947 static TCGv_i32 gen_vfp_mrs(void)
2949 TCGv_i32 tmp = tcg_temp_new_i32();
2950 tcg_gen_mov_i32(tmp, cpu_F0s);
2951 return tmp;
2954 static void gen_vfp_msr(TCGv_i32 tmp)
2956 tcg_gen_mov_i32(cpu_F0s, tmp);
2957 tcg_temp_free_i32(tmp);
2960 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2962 TCGv_i32 tmp = tcg_temp_new_i32();
2963 if (shift)
2964 tcg_gen_shri_i32(var, var, shift);
2965 tcg_gen_ext8u_i32(var, var);
2966 tcg_gen_shli_i32(tmp, var, 8);
2967 tcg_gen_or_i32(var, var, tmp);
2968 tcg_gen_shli_i32(tmp, var, 16);
2969 tcg_gen_or_i32(var, var, tmp);
2970 tcg_temp_free_i32(tmp);
2973 static void gen_neon_dup_low16(TCGv_i32 var)
2975 TCGv_i32 tmp = tcg_temp_new_i32();
2976 tcg_gen_ext16u_i32(var, var);
2977 tcg_gen_shli_i32(tmp, var, 16);
2978 tcg_gen_or_i32(var, var, tmp);
2979 tcg_temp_free_i32(tmp);
2982 static void gen_neon_dup_high16(TCGv_i32 var)
2984 TCGv_i32 tmp = tcg_temp_new_i32();
2985 tcg_gen_andi_i32(var, var, 0xffff0000);
2986 tcg_gen_shri_i32(tmp, var, 16);
2987 tcg_gen_or_i32(var, var, tmp);
2988 tcg_temp_free_i32(tmp);
2991 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2993 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2994 TCGv_i32 tmp = tcg_temp_new_i32();
2995 switch (size) {
2996 case 0:
2997 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2998 gen_neon_dup_u8(tmp, 0);
2999 break;
3000 case 1:
3001 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
3002 gen_neon_dup_low16(tmp);
3003 break;
3004 case 2:
3005 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
3006 break;
3007 default: /* Avoid compiler warnings. */
3008 abort();
3010 return tmp;
3013 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3014 uint32_t dp)
3016 uint32_t cc = extract32(insn, 20, 2);
3018 if (dp) {
3019 TCGv_i64 frn, frm, dest;
3020 TCGv_i64 tmp, zero, zf, nf, vf;
3022 zero = tcg_const_i64(0);
3024 frn = tcg_temp_new_i64();
3025 frm = tcg_temp_new_i64();
3026 dest = tcg_temp_new_i64();
3028 zf = tcg_temp_new_i64();
3029 nf = tcg_temp_new_i64();
3030 vf = tcg_temp_new_i64();
3032 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3033 tcg_gen_ext_i32_i64(nf, cpu_NF);
3034 tcg_gen_ext_i32_i64(vf, cpu_VF);
3036 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3037 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3038 switch (cc) {
3039 case 0: /* eq: Z */
3040 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3041 frn, frm);
3042 break;
3043 case 1: /* vs: V */
3044 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3045 frn, frm);
3046 break;
3047 case 2: /* ge: N == V -> N ^ V == 0 */
3048 tmp = tcg_temp_new_i64();
3049 tcg_gen_xor_i64(tmp, vf, nf);
3050 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3051 frn, frm);
3052 tcg_temp_free_i64(tmp);
3053 break;
3054 case 3: /* gt: !Z && N == V */
3055 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3056 frn, frm);
3057 tmp = tcg_temp_new_i64();
3058 tcg_gen_xor_i64(tmp, vf, nf);
3059 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3060 dest, frm);
3061 tcg_temp_free_i64(tmp);
3062 break;
3064 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3065 tcg_temp_free_i64(frn);
3066 tcg_temp_free_i64(frm);
3067 tcg_temp_free_i64(dest);
3069 tcg_temp_free_i64(zf);
3070 tcg_temp_free_i64(nf);
3071 tcg_temp_free_i64(vf);
3073 tcg_temp_free_i64(zero);
3074 } else {
3075 TCGv_i32 frn, frm, dest;
3076 TCGv_i32 tmp, zero;
3078 zero = tcg_const_i32(0);
3080 frn = tcg_temp_new_i32();
3081 frm = tcg_temp_new_i32();
3082 dest = tcg_temp_new_i32();
3083 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3084 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3085 switch (cc) {
3086 case 0: /* eq: Z */
3087 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3088 frn, frm);
3089 break;
3090 case 1: /* vs: V */
3091 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3092 frn, frm);
3093 break;
3094 case 2: /* ge: N == V -> N ^ V == 0 */
3095 tmp = tcg_temp_new_i32();
3096 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3097 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3098 frn, frm);
3099 tcg_temp_free_i32(tmp);
3100 break;
3101 case 3: /* gt: !Z && N == V */
3102 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3103 frn, frm);
3104 tmp = tcg_temp_new_i32();
3105 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3106 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3107 dest, frm);
3108 tcg_temp_free_i32(tmp);
3109 break;
3111 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3112 tcg_temp_free_i32(frn);
3113 tcg_temp_free_i32(frm);
3114 tcg_temp_free_i32(dest);
3116 tcg_temp_free_i32(zero);
3119 return 0;
3122 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3123 uint32_t rm, uint32_t dp)
3125 uint32_t vmin = extract32(insn, 6, 1);
3126 TCGv_ptr fpst = get_fpstatus_ptr(0);
3128 if (dp) {
3129 TCGv_i64 frn, frm, dest;
3131 frn = tcg_temp_new_i64();
3132 frm = tcg_temp_new_i64();
3133 dest = tcg_temp_new_i64();
3135 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3136 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3137 if (vmin) {
3138 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
3139 } else {
3140 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
3142 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3143 tcg_temp_free_i64(frn);
3144 tcg_temp_free_i64(frm);
3145 tcg_temp_free_i64(dest);
3146 } else {
3147 TCGv_i32 frn, frm, dest;
3149 frn = tcg_temp_new_i32();
3150 frm = tcg_temp_new_i32();
3151 dest = tcg_temp_new_i32();
3153 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3154 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3155 if (vmin) {
3156 gen_helper_vfp_minnums(dest, frn, frm, fpst);
3157 } else {
3158 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
3160 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3161 tcg_temp_free_i32(frn);
3162 tcg_temp_free_i32(frm);
3163 tcg_temp_free_i32(dest);
3166 tcg_temp_free_ptr(fpst);
3167 return 0;
3170 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3171 int rounding)
3173 TCGv_ptr fpst = get_fpstatus_ptr(0);
3174 TCGv_i32 tcg_rmode;
3176 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3177 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3179 if (dp) {
3180 TCGv_i64 tcg_op;
3181 TCGv_i64 tcg_res;
3182 tcg_op = tcg_temp_new_i64();
3183 tcg_res = tcg_temp_new_i64();
3184 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3185 gen_helper_rintd(tcg_res, tcg_op, fpst);
3186 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3187 tcg_temp_free_i64(tcg_op);
3188 tcg_temp_free_i64(tcg_res);
3189 } else {
3190 TCGv_i32 tcg_op;
3191 TCGv_i32 tcg_res;
3192 tcg_op = tcg_temp_new_i32();
3193 tcg_res = tcg_temp_new_i32();
3194 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3195 gen_helper_rints(tcg_res, tcg_op, fpst);
3196 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3197 tcg_temp_free_i32(tcg_op);
3198 tcg_temp_free_i32(tcg_res);
3201 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3202 tcg_temp_free_i32(tcg_rmode);
3204 tcg_temp_free_ptr(fpst);
3205 return 0;
3208 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3209 int rounding)
3211 bool is_signed = extract32(insn, 7, 1);
3212 TCGv_ptr fpst = get_fpstatus_ptr(0);
3213 TCGv_i32 tcg_rmode, tcg_shift;
3215 tcg_shift = tcg_const_i32(0);
3217 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3218 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3220 if (dp) {
3221 TCGv_i64 tcg_double, tcg_res;
3222 TCGv_i32 tcg_tmp;
3223 /* Rd is encoded as a single precision register even when the source
3224 * is double precision.
3226 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3227 tcg_double = tcg_temp_new_i64();
3228 tcg_res = tcg_temp_new_i64();
3229 tcg_tmp = tcg_temp_new_i32();
3230 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3231 if (is_signed) {
3232 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3233 } else {
3234 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3236 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
3237 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3238 tcg_temp_free_i32(tcg_tmp);
3239 tcg_temp_free_i64(tcg_res);
3240 tcg_temp_free_i64(tcg_double);
3241 } else {
3242 TCGv_i32 tcg_single, tcg_res;
3243 tcg_single = tcg_temp_new_i32();
3244 tcg_res = tcg_temp_new_i32();
3245 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3246 if (is_signed) {
3247 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3248 } else {
3249 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3251 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3252 tcg_temp_free_i32(tcg_res);
3253 tcg_temp_free_i32(tcg_single);
3256 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3257 tcg_temp_free_i32(tcg_rmode);
3259 tcg_temp_free_i32(tcg_shift);
3261 tcg_temp_free_ptr(fpst);
3263 return 0;
3266 /* Table for converting the most common AArch32 encoding of
3267 * rounding mode to arm_fprounding order (which matches the
3268 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3270 static const uint8_t fp_decode_rm[] = {
3271 FPROUNDING_TIEAWAY,
3272 FPROUNDING_TIEEVEN,
3273 FPROUNDING_POSINF,
3274 FPROUNDING_NEGINF,
3277 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3279 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3281 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3282 return 1;
3285 if (dp) {
3286 VFP_DREG_D(rd, insn);
3287 VFP_DREG_N(rn, insn);
3288 VFP_DREG_M(rm, insn);
3289 } else {
3290 rd = VFP_SREG_D(insn);
3291 rn = VFP_SREG_N(insn);
3292 rm = VFP_SREG_M(insn);
3295 if ((insn & 0x0f800e50) == 0x0e000a00) {
3296 return handle_vsel(insn, rd, rn, rm, dp);
3297 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3298 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3299 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3300 /* VRINTA, VRINTN, VRINTP, VRINTM */
3301 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3302 return handle_vrint(insn, rd, rm, dp, rounding);
3303 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3304 /* VCVTA, VCVTN, VCVTP, VCVTM */
3305 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3306 return handle_vcvt(insn, rd, rm, dp, rounding);
3308 return 1;
3311 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3312 (ie. an undefined instruction). */
3313 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3315 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3316 int dp, veclen;
3317 TCGv_i32 addr;
3318 TCGv_i32 tmp;
3319 TCGv_i32 tmp2;
3321 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3322 return 1;
3325 /* FIXME: this access check should not take precedence over UNDEF
3326 * for invalid encodings; we will generate incorrect syndrome information
3327 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3329 if (s->fp_excp_el) {
3330 gen_exception_insn(s, 4, EXCP_UDEF,
3331 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
3332 return 0;
3335 if (!s->vfp_enabled) {
3336 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3337 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3338 return 1;
3339 rn = (insn >> 16) & 0xf;
3340 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3341 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3342 return 1;
3346 if (extract32(insn, 28, 4) == 0xf) {
3347 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3348 * only used in v8 and above.
3350 return disas_vfp_v8_insn(s, insn);
3353 dp = ((insn & 0xf00) == 0xb00);
3354 switch ((insn >> 24) & 0xf) {
3355 case 0xe:
3356 if (insn & (1 << 4)) {
3357 /* single register transfer */
3358 rd = (insn >> 12) & 0xf;
3359 if (dp) {
3360 int size;
3361 int pass;
3363 VFP_DREG_N(rn, insn);
3364 if (insn & 0xf)
3365 return 1;
3366 if (insn & 0x00c00060
3367 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3368 return 1;
3371 pass = (insn >> 21) & 1;
3372 if (insn & (1 << 22)) {
3373 size = 0;
3374 offset = ((insn >> 5) & 3) * 8;
3375 } else if (insn & (1 << 5)) {
3376 size = 1;
3377 offset = (insn & (1 << 6)) ? 16 : 0;
3378 } else {
3379 size = 2;
3380 offset = 0;
3382 if (insn & ARM_CP_RW_BIT) {
3383 /* vfp->arm */
3384 tmp = neon_load_reg(rn, pass);
3385 switch (size) {
3386 case 0:
3387 if (offset)
3388 tcg_gen_shri_i32(tmp, tmp, offset);
3389 if (insn & (1 << 23))
3390 gen_uxtb(tmp);
3391 else
3392 gen_sxtb(tmp);
3393 break;
3394 case 1:
3395 if (insn & (1 << 23)) {
3396 if (offset) {
3397 tcg_gen_shri_i32(tmp, tmp, 16);
3398 } else {
3399 gen_uxth(tmp);
3401 } else {
3402 if (offset) {
3403 tcg_gen_sari_i32(tmp, tmp, 16);
3404 } else {
3405 gen_sxth(tmp);
3408 break;
3409 case 2:
3410 break;
3412 store_reg(s, rd, tmp);
3413 } else {
3414 /* arm->vfp */
3415 tmp = load_reg(s, rd);
3416 if (insn & (1 << 23)) {
3417 /* VDUP */
3418 if (size == 0) {
3419 gen_neon_dup_u8(tmp, 0);
3420 } else if (size == 1) {
3421 gen_neon_dup_low16(tmp);
3423 for (n = 0; n <= pass * 2; n++) {
3424 tmp2 = tcg_temp_new_i32();
3425 tcg_gen_mov_i32(tmp2, tmp);
3426 neon_store_reg(rn, n, tmp2);
3428 neon_store_reg(rn, n, tmp);
3429 } else {
3430 /* VMOV */
3431 switch (size) {
3432 case 0:
3433 tmp2 = neon_load_reg(rn, pass);
3434 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3435 tcg_temp_free_i32(tmp2);
3436 break;
3437 case 1:
3438 tmp2 = neon_load_reg(rn, pass);
3439 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3440 tcg_temp_free_i32(tmp2);
3441 break;
3442 case 2:
3443 break;
3445 neon_store_reg(rn, pass, tmp);
3448 } else { /* !dp */
3449 if ((insn & 0x6f) != 0x00)
3450 return 1;
3451 rn = VFP_SREG_N(insn);
3452 if (insn & ARM_CP_RW_BIT) {
3453 /* vfp->arm */
3454 if (insn & (1 << 21)) {
3455 /* system register */
3456 rn >>= 1;
3458 switch (rn) {
3459 case ARM_VFP_FPSID:
3460 /* VFP2 allows access to FSID from userspace.
3461 VFP3 restricts all id registers to privileged
3462 accesses. */
3463 if (IS_USER(s)
3464 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3465 return 1;
3467 tmp = load_cpu_field(vfp.xregs[rn]);
3468 break;
3469 case ARM_VFP_FPEXC:
3470 if (IS_USER(s))
3471 return 1;
3472 tmp = load_cpu_field(vfp.xregs[rn]);
3473 break;
3474 case ARM_VFP_FPINST:
3475 case ARM_VFP_FPINST2:
3476 /* Not present in VFP3. */
3477 if (IS_USER(s)
3478 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3479 return 1;
3481 tmp = load_cpu_field(vfp.xregs[rn]);
3482 break;
3483 case ARM_VFP_FPSCR:
3484 if (rd == 15) {
3485 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3486 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3487 } else {
3488 tmp = tcg_temp_new_i32();
3489 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3491 break;
3492 case ARM_VFP_MVFR2:
3493 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3494 return 1;
3496 /* fall through */
3497 case ARM_VFP_MVFR0:
3498 case ARM_VFP_MVFR1:
3499 if (IS_USER(s)
3500 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3501 return 1;
3503 tmp = load_cpu_field(vfp.xregs[rn]);
3504 break;
3505 default:
3506 return 1;
3508 } else {
3509 gen_mov_F0_vreg(0, rn);
3510 tmp = gen_vfp_mrs();
3512 if (rd == 15) {
3513 /* Set the 4 flag bits in the CPSR. */
3514 gen_set_nzcv(tmp);
3515 tcg_temp_free_i32(tmp);
3516 } else {
3517 store_reg(s, rd, tmp);
3519 } else {
3520 /* arm->vfp */
3521 if (insn & (1 << 21)) {
3522 rn >>= 1;
3523 /* system register */
3524 switch (rn) {
3525 case ARM_VFP_FPSID:
3526 case ARM_VFP_MVFR0:
3527 case ARM_VFP_MVFR1:
3528 /* Writes are ignored. */
3529 break;
3530 case ARM_VFP_FPSCR:
3531 tmp = load_reg(s, rd);
3532 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3533 tcg_temp_free_i32(tmp);
3534 gen_lookup_tb(s);
3535 break;
3536 case ARM_VFP_FPEXC:
3537 if (IS_USER(s))
3538 return 1;
3539 /* TODO: VFP subarchitecture support.
3540 * For now, keep the EN bit only */
3541 tmp = load_reg(s, rd);
3542 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3543 store_cpu_field(tmp, vfp.xregs[rn]);
3544 gen_lookup_tb(s);
3545 break;
3546 case ARM_VFP_FPINST:
3547 case ARM_VFP_FPINST2:
3548 if (IS_USER(s)) {
3549 return 1;
3551 tmp = load_reg(s, rd);
3552 store_cpu_field(tmp, vfp.xregs[rn]);
3553 break;
3554 default:
3555 return 1;
3557 } else {
3558 tmp = load_reg(s, rd);
3559 gen_vfp_msr(tmp);
3560 gen_mov_vreg_F0(0, rn);
3564 } else {
3565 /* data processing */
3566 /* The opcode is in bits 23, 21, 20 and 6. */
3567 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3568 if (dp) {
3569 if (op == 15) {
3570 /* rn is opcode */
3571 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3572 } else {
3573 /* rn is register number */
3574 VFP_DREG_N(rn, insn);
3577 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3578 ((rn & 0x1e) == 0x6))) {
3579 /* Integer or single/half precision destination. */
3580 rd = VFP_SREG_D(insn);
3581 } else {
3582 VFP_DREG_D(rd, insn);
3584 if (op == 15 &&
3585 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3586 ((rn & 0x1e) == 0x4))) {
3587 /* VCVT from int or half precision is always from S reg
3588 * regardless of dp bit. VCVT with immediate frac_bits
3589 * has same format as SREG_M.
3591 rm = VFP_SREG_M(insn);
3592 } else {
3593 VFP_DREG_M(rm, insn);
3595 } else {
3596 rn = VFP_SREG_N(insn);
3597 if (op == 15 && rn == 15) {
3598 /* Double precision destination. */
3599 VFP_DREG_D(rd, insn);
3600 } else {
3601 rd = VFP_SREG_D(insn);
3603 /* NB that we implicitly rely on the encoding for the frac_bits
3604 * in VCVT of fixed to float being the same as that of an SREG_M
3606 rm = VFP_SREG_M(insn);
3609 veclen = s->vec_len;
3610 if (op == 15 && rn > 3)
3611 veclen = 0;
3613 /* Shut up compiler warnings. */
3614 delta_m = 0;
3615 delta_d = 0;
3616 bank_mask = 0;
3618 if (veclen > 0) {
3619 if (dp)
3620 bank_mask = 0xc;
3621 else
3622 bank_mask = 0x18;
3624 /* Figure out what type of vector operation this is. */
3625 if ((rd & bank_mask) == 0) {
3626 /* scalar */
3627 veclen = 0;
3628 } else {
3629 if (dp)
3630 delta_d = (s->vec_stride >> 1) + 1;
3631 else
3632 delta_d = s->vec_stride + 1;
3634 if ((rm & bank_mask) == 0) {
3635 /* mixed scalar/vector */
3636 delta_m = 0;
3637 } else {
3638 /* vector */
3639 delta_m = delta_d;
3644 /* Load the initial operands. */
3645 if (op == 15) {
3646 switch (rn) {
3647 case 16:
3648 case 17:
3649 /* Integer source */
3650 gen_mov_F0_vreg(0, rm);
3651 break;
3652 case 8:
3653 case 9:
3654 /* Compare */
3655 gen_mov_F0_vreg(dp, rd);
3656 gen_mov_F1_vreg(dp, rm);
3657 break;
3658 case 10:
3659 case 11:
3660 /* Compare with zero */
3661 gen_mov_F0_vreg(dp, rd);
3662 gen_vfp_F1_ld0(dp);
3663 break;
3664 case 20:
3665 case 21:
3666 case 22:
3667 case 23:
3668 case 28:
3669 case 29:
3670 case 30:
3671 case 31:
3672 /* Source and destination the same. */
3673 gen_mov_F0_vreg(dp, rd);
3674 break;
3675 case 4:
3676 case 5:
3677 case 6:
3678 case 7:
3679 /* VCVTB, VCVTT: only present with the halfprec extension
3680 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3681 * (we choose to UNDEF)
3683 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3684 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3685 return 1;
3687 if (!extract32(rn, 1, 1)) {
3688 /* Half precision source. */
3689 gen_mov_F0_vreg(0, rm);
3690 break;
3692 /* Otherwise fall through */
3693 default:
3694 /* One source operand. */
3695 gen_mov_F0_vreg(dp, rm);
3696 break;
3698 } else {
3699 /* Two source operands. */
3700 gen_mov_F0_vreg(dp, rn);
3701 gen_mov_F1_vreg(dp, rm);
3704 for (;;) {
3705 /* Perform the calculation. */
3706 switch (op) {
3707 case 0: /* VMLA: fd + (fn * fm) */
3708 /* Note that order of inputs to the add matters for NaNs */
3709 gen_vfp_F1_mul(dp);
3710 gen_mov_F0_vreg(dp, rd);
3711 gen_vfp_add(dp);
3712 break;
3713 case 1: /* VMLS: fd + -(fn * fm) */
3714 gen_vfp_mul(dp);
3715 gen_vfp_F1_neg(dp);
3716 gen_mov_F0_vreg(dp, rd);
3717 gen_vfp_add(dp);
3718 break;
3719 case 2: /* VNMLS: -fd + (fn * fm) */
3720 /* Note that it isn't valid to replace (-A + B) with (B - A)
3721 * or similar plausible looking simplifications
3722 * because this will give wrong results for NaNs.
3724 gen_vfp_F1_mul(dp);
3725 gen_mov_F0_vreg(dp, rd);
3726 gen_vfp_neg(dp);
3727 gen_vfp_add(dp);
3728 break;
3729 case 3: /* VNMLA: -fd + -(fn * fm) */
3730 gen_vfp_mul(dp);
3731 gen_vfp_F1_neg(dp);
3732 gen_mov_F0_vreg(dp, rd);
3733 gen_vfp_neg(dp);
3734 gen_vfp_add(dp);
3735 break;
3736 case 4: /* mul: fn * fm */
3737 gen_vfp_mul(dp);
3738 break;
3739 case 5: /* nmul: -(fn * fm) */
3740 gen_vfp_mul(dp);
3741 gen_vfp_neg(dp);
3742 break;
3743 case 6: /* add: fn + fm */
3744 gen_vfp_add(dp);
3745 break;
3746 case 7: /* sub: fn - fm */
3747 gen_vfp_sub(dp);
3748 break;
3749 case 8: /* div: fn / fm */
3750 gen_vfp_div(dp);
3751 break;
3752 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3753 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3754 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3755 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3756 /* These are fused multiply-add, and must be done as one
3757 * floating point operation with no rounding between the
3758 * multiplication and addition steps.
3759 * NB that doing the negations here as separate steps is
3760 * correct : an input NaN should come out with its sign bit
3761 * flipped if it is a negated-input.
3763 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3764 return 1;
3766 if (dp) {
3767 TCGv_ptr fpst;
3768 TCGv_i64 frd;
3769 if (op & 1) {
3770 /* VFNMS, VFMS */
3771 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3773 frd = tcg_temp_new_i64();
3774 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3775 if (op & 2) {
3776 /* VFNMA, VFNMS */
3777 gen_helper_vfp_negd(frd, frd);
3779 fpst = get_fpstatus_ptr(0);
3780 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3781 cpu_F1d, frd, fpst);
3782 tcg_temp_free_ptr(fpst);
3783 tcg_temp_free_i64(frd);
3784 } else {
3785 TCGv_ptr fpst;
3786 TCGv_i32 frd;
3787 if (op & 1) {
3788 /* VFNMS, VFMS */
3789 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3791 frd = tcg_temp_new_i32();
3792 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3793 if (op & 2) {
3794 gen_helper_vfp_negs(frd, frd);
3796 fpst = get_fpstatus_ptr(0);
3797 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3798 cpu_F1s, frd, fpst);
3799 tcg_temp_free_ptr(fpst);
3800 tcg_temp_free_i32(frd);
3802 break;
3803 case 14: /* fconst */
3804 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3805 return 1;
3808 n = (insn << 12) & 0x80000000;
3809 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3810 if (dp) {
3811 if (i & 0x40)
3812 i |= 0x3f80;
3813 else
3814 i |= 0x4000;
3815 n |= i << 16;
3816 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3817 } else {
3818 if (i & 0x40)
3819 i |= 0x780;
3820 else
3821 i |= 0x800;
3822 n |= i << 19;
3823 tcg_gen_movi_i32(cpu_F0s, n);
3825 break;
3826 case 15: /* extension space */
3827 switch (rn) {
3828 case 0: /* cpy */
3829 /* no-op */
3830 break;
3831 case 1: /* abs */
3832 gen_vfp_abs(dp);
3833 break;
3834 case 2: /* neg */
3835 gen_vfp_neg(dp);
3836 break;
3837 case 3: /* sqrt */
3838 gen_vfp_sqrt(dp);
3839 break;
3840 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3842 TCGv_ptr fpst = get_fpstatus_ptr(false);
3843 TCGv_i32 ahp_mode = get_ahp_flag();
3844 tmp = gen_vfp_mrs();
3845 tcg_gen_ext16u_i32(tmp, tmp);
3846 if (dp) {
3847 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3848 fpst, ahp_mode);
3849 } else {
3850 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3851 fpst, ahp_mode);
3853 tcg_temp_free_i32(ahp_mode);
3854 tcg_temp_free_ptr(fpst);
3855 tcg_temp_free_i32(tmp);
3856 break;
3858 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3860 TCGv_ptr fpst = get_fpstatus_ptr(false);
3861 TCGv_i32 ahp = get_ahp_flag();
3862 tmp = gen_vfp_mrs();
3863 tcg_gen_shri_i32(tmp, tmp, 16);
3864 if (dp) {
3865 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3866 fpst, ahp);
3867 } else {
3868 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3869 fpst, ahp);
3871 tcg_temp_free_i32(tmp);
3872 tcg_temp_free_i32(ahp);
3873 tcg_temp_free_ptr(fpst);
3874 break;
3876 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3878 TCGv_ptr fpst = get_fpstatus_ptr(false);
3879 TCGv_i32 ahp = get_ahp_flag();
3880 tmp = tcg_temp_new_i32();
3882 if (dp) {
3883 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3884 fpst, ahp);
3885 } else {
3886 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3887 fpst, ahp);
3889 tcg_temp_free_i32(ahp);
3890 tcg_temp_free_ptr(fpst);
3891 gen_mov_F0_vreg(0, rd);
3892 tmp2 = gen_vfp_mrs();
3893 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3894 tcg_gen_or_i32(tmp, tmp, tmp2);
3895 tcg_temp_free_i32(tmp2);
3896 gen_vfp_msr(tmp);
3897 break;
3899 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3901 TCGv_ptr fpst = get_fpstatus_ptr(false);
3902 TCGv_i32 ahp = get_ahp_flag();
3903 tmp = tcg_temp_new_i32();
3904 if (dp) {
3905 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3906 fpst, ahp);
3907 } else {
3908 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3909 fpst, ahp);
3911 tcg_temp_free_i32(ahp);
3912 tcg_temp_free_ptr(fpst);
3913 tcg_gen_shli_i32(tmp, tmp, 16);
3914 gen_mov_F0_vreg(0, rd);
3915 tmp2 = gen_vfp_mrs();
3916 tcg_gen_ext16u_i32(tmp2, tmp2);
3917 tcg_gen_or_i32(tmp, tmp, tmp2);
3918 tcg_temp_free_i32(tmp2);
3919 gen_vfp_msr(tmp);
3920 break;
3922 case 8: /* cmp */
3923 gen_vfp_cmp(dp);
3924 break;
3925 case 9: /* cmpe */
3926 gen_vfp_cmpe(dp);
3927 break;
3928 case 10: /* cmpz */
3929 gen_vfp_cmp(dp);
3930 break;
3931 case 11: /* cmpez */
3932 gen_vfp_F1_ld0(dp);
3933 gen_vfp_cmpe(dp);
3934 break;
3935 case 12: /* vrintr */
3937 TCGv_ptr fpst = get_fpstatus_ptr(0);
3938 if (dp) {
3939 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3940 } else {
3941 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3943 tcg_temp_free_ptr(fpst);
3944 break;
3946 case 13: /* vrintz */
3948 TCGv_ptr fpst = get_fpstatus_ptr(0);
3949 TCGv_i32 tcg_rmode;
3950 tcg_rmode = tcg_const_i32(float_round_to_zero);
3951 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3952 if (dp) {
3953 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3954 } else {
3955 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3957 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3958 tcg_temp_free_i32(tcg_rmode);
3959 tcg_temp_free_ptr(fpst);
3960 break;
3962 case 14: /* vrintx */
3964 TCGv_ptr fpst = get_fpstatus_ptr(0);
3965 if (dp) {
3966 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3967 } else {
3968 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3970 tcg_temp_free_ptr(fpst);
3971 break;
3973 case 15: /* single<->double conversion */
3974 if (dp)
3975 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3976 else
3977 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3978 break;
3979 case 16: /* fuito */
3980 gen_vfp_uito(dp, 0);
3981 break;
3982 case 17: /* fsito */
3983 gen_vfp_sito(dp, 0);
3984 break;
3985 case 20: /* fshto */
3986 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3987 return 1;
3989 gen_vfp_shto(dp, 16 - rm, 0);
3990 break;
3991 case 21: /* fslto */
3992 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3993 return 1;
3995 gen_vfp_slto(dp, 32 - rm, 0);
3996 break;
3997 case 22: /* fuhto */
3998 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3999 return 1;
4001 gen_vfp_uhto(dp, 16 - rm, 0);
4002 break;
4003 case 23: /* fulto */
4004 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4005 return 1;
4007 gen_vfp_ulto(dp, 32 - rm, 0);
4008 break;
4009 case 24: /* ftoui */
4010 gen_vfp_toui(dp, 0);
4011 break;
4012 case 25: /* ftouiz */
4013 gen_vfp_touiz(dp, 0);
4014 break;
4015 case 26: /* ftosi */
4016 gen_vfp_tosi(dp, 0);
4017 break;
4018 case 27: /* ftosiz */
4019 gen_vfp_tosiz(dp, 0);
4020 break;
4021 case 28: /* ftosh */
4022 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4023 return 1;
4025 gen_vfp_tosh(dp, 16 - rm, 0);
4026 break;
4027 case 29: /* ftosl */
4028 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4029 return 1;
4031 gen_vfp_tosl(dp, 32 - rm, 0);
4032 break;
4033 case 30: /* ftouh */
4034 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4035 return 1;
4037 gen_vfp_touh(dp, 16 - rm, 0);
4038 break;
4039 case 31: /* ftoul */
4040 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4041 return 1;
4043 gen_vfp_toul(dp, 32 - rm, 0);
4044 break;
4045 default: /* undefined */
4046 return 1;
4048 break;
4049 default: /* undefined */
4050 return 1;
4053 /* Write back the result. */
4054 if (op == 15 && (rn >= 8 && rn <= 11)) {
4055 /* Comparison, do nothing. */
4056 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4057 (rn & 0x1e) == 0x6)) {
4058 /* VCVT double to int: always integer result.
4059 * VCVT double to half precision is always a single
4060 * precision result.
4062 gen_mov_vreg_F0(0, rd);
4063 } else if (op == 15 && rn == 15) {
4064 /* conversion */
4065 gen_mov_vreg_F0(!dp, rd);
4066 } else {
4067 gen_mov_vreg_F0(dp, rd);
4070 /* break out of the loop if we have finished */
4071 if (veclen == 0)
4072 break;
4074 if (op == 15 && delta_m == 0) {
4075 /* single source one-many */
4076 while (veclen--) {
4077 rd = ((rd + delta_d) & (bank_mask - 1))
4078 | (rd & bank_mask);
4079 gen_mov_vreg_F0(dp, rd);
4081 break;
4083 /* Setup the next operands. */
4084 veclen--;
4085 rd = ((rd + delta_d) & (bank_mask - 1))
4086 | (rd & bank_mask);
4088 if (op == 15) {
4089 /* One source operand. */
4090 rm = ((rm + delta_m) & (bank_mask - 1))
4091 | (rm & bank_mask);
4092 gen_mov_F0_vreg(dp, rm);
4093 } else {
4094 /* Two source operands. */
4095 rn = ((rn + delta_d) & (bank_mask - 1))
4096 | (rn & bank_mask);
4097 gen_mov_F0_vreg(dp, rn);
4098 if (delta_m) {
4099 rm = ((rm + delta_m) & (bank_mask - 1))
4100 | (rm & bank_mask);
4101 gen_mov_F1_vreg(dp, rm);
4106 break;
4107 case 0xc:
4108 case 0xd:
4109 if ((insn & 0x03e00000) == 0x00400000) {
4110 /* two-register transfer */
4111 rn = (insn >> 16) & 0xf;
4112 rd = (insn >> 12) & 0xf;
4113 if (dp) {
4114 VFP_DREG_M(rm, insn);
4115 } else {
4116 rm = VFP_SREG_M(insn);
4119 if (insn & ARM_CP_RW_BIT) {
4120 /* vfp->arm */
4121 if (dp) {
4122 gen_mov_F0_vreg(0, rm * 2);
4123 tmp = gen_vfp_mrs();
4124 store_reg(s, rd, tmp);
4125 gen_mov_F0_vreg(0, rm * 2 + 1);
4126 tmp = gen_vfp_mrs();
4127 store_reg(s, rn, tmp);
4128 } else {
4129 gen_mov_F0_vreg(0, rm);
4130 tmp = gen_vfp_mrs();
4131 store_reg(s, rd, tmp);
4132 gen_mov_F0_vreg(0, rm + 1);
4133 tmp = gen_vfp_mrs();
4134 store_reg(s, rn, tmp);
4136 } else {
4137 /* arm->vfp */
4138 if (dp) {
4139 tmp = load_reg(s, rd);
4140 gen_vfp_msr(tmp);
4141 gen_mov_vreg_F0(0, rm * 2);
4142 tmp = load_reg(s, rn);
4143 gen_vfp_msr(tmp);
4144 gen_mov_vreg_F0(0, rm * 2 + 1);
4145 } else {
4146 tmp = load_reg(s, rd);
4147 gen_vfp_msr(tmp);
4148 gen_mov_vreg_F0(0, rm);
4149 tmp = load_reg(s, rn);
4150 gen_vfp_msr(tmp);
4151 gen_mov_vreg_F0(0, rm + 1);
4154 } else {
4155 /* Load/store */
4156 rn = (insn >> 16) & 0xf;
4157 if (dp)
4158 VFP_DREG_D(rd, insn);
4159 else
4160 rd = VFP_SREG_D(insn);
4161 if ((insn & 0x01200000) == 0x01000000) {
4162 /* Single load/store */
4163 offset = (insn & 0xff) << 2;
4164 if ((insn & (1 << 23)) == 0)
4165 offset = -offset;
4166 if (s->thumb && rn == 15) {
4167 /* This is actually UNPREDICTABLE */
4168 addr = tcg_temp_new_i32();
4169 tcg_gen_movi_i32(addr, s->pc & ~2);
4170 } else {
4171 addr = load_reg(s, rn);
4173 tcg_gen_addi_i32(addr, addr, offset);
4174 if (insn & (1 << 20)) {
4175 gen_vfp_ld(s, dp, addr);
4176 gen_mov_vreg_F0(dp, rd);
4177 } else {
4178 gen_mov_F0_vreg(dp, rd);
4179 gen_vfp_st(s, dp, addr);
4181 tcg_temp_free_i32(addr);
4182 } else {
4183 /* load/store multiple */
4184 int w = insn & (1 << 21);
4185 if (dp)
4186 n = (insn >> 1) & 0x7f;
4187 else
4188 n = insn & 0xff;
4190 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4191 /* P == U , W == 1 => UNDEF */
4192 return 1;
4194 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4195 /* UNPREDICTABLE cases for bad immediates: we choose to
4196 * UNDEF to avoid generating huge numbers of TCG ops
4198 return 1;
4200 if (rn == 15 && w) {
4201 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4202 return 1;
4205 if (s->thumb && rn == 15) {
4206 /* This is actually UNPREDICTABLE */
4207 addr = tcg_temp_new_i32();
4208 tcg_gen_movi_i32(addr, s->pc & ~2);
4209 } else {
4210 addr = load_reg(s, rn);
4212 if (insn & (1 << 24)) /* pre-decrement */
4213 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
4215 if (dp)
4216 offset = 8;
4217 else
4218 offset = 4;
4219 for (i = 0; i < n; i++) {
4220 if (insn & ARM_CP_RW_BIT) {
4221 /* load */
4222 gen_vfp_ld(s, dp, addr);
4223 gen_mov_vreg_F0(dp, rd + i);
4224 } else {
4225 /* store */
4226 gen_mov_F0_vreg(dp, rd + i);
4227 gen_vfp_st(s, dp, addr);
4229 tcg_gen_addi_i32(addr, addr, offset);
4231 if (w) {
4232 /* writeback */
4233 if (insn & (1 << 24))
4234 offset = -offset * n;
4235 else if (dp && (insn & 1))
4236 offset = 4;
4237 else
4238 offset = 0;
4240 if (offset != 0)
4241 tcg_gen_addi_i32(addr, addr, offset);
4242 store_reg(s, rn, addr);
4243 } else {
4244 tcg_temp_free_i32(addr);
4248 break;
4249 default:
4250 /* Should never happen. */
4251 return 1;
4253 return 0;
4256 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
4258 #ifndef CONFIG_USER_ONLY
4259 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4260 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4261 #else
4262 return true;
4263 #endif
4266 static void gen_goto_ptr(void)
4268 tcg_gen_lookup_and_goto_ptr();
4271 /* This will end the TB but doesn't guarantee we'll return to
4272 * cpu_loop_exec. Any live exit_requests will be processed as we
4273 * enter the next TB.
4275 static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4277 if (use_goto_tb(s, dest)) {
4278 tcg_gen_goto_tb(n);
4279 gen_set_pc_im(s, dest);
4280 tcg_gen_exit_tb(s->base.tb, n);
4281 } else {
4282 gen_set_pc_im(s, dest);
4283 gen_goto_ptr();
4285 s->base.is_jmp = DISAS_NORETURN;
4288 static inline void gen_jmp (DisasContext *s, uint32_t dest)
4290 if (unlikely(is_singlestepping(s))) {
4291 /* An indirect jump so that we still trigger the debug exception. */
4292 if (s->thumb)
4293 dest |= 1;
4294 gen_bx_im(s, dest);
4295 } else {
4296 gen_goto_tb(s, 0, dest);
4300 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
4302 if (x)
4303 tcg_gen_sari_i32(t0, t0, 16);
4304 else
4305 gen_sxth(t0);
4306 if (y)
4307 tcg_gen_sari_i32(t1, t1, 16);
4308 else
4309 gen_sxth(t1);
4310 tcg_gen_mul_i32(t0, t0, t1);
4313 /* Return the mask of PSR bits set by a MSR instruction. */
4314 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4316 uint32_t mask;
4318 mask = 0;
4319 if (flags & (1 << 0))
4320 mask |= 0xff;
4321 if (flags & (1 << 1))
4322 mask |= 0xff00;
4323 if (flags & (1 << 2))
4324 mask |= 0xff0000;
4325 if (flags & (1 << 3))
4326 mask |= 0xff000000;
4328 /* Mask out undefined bits. */
4329 mask &= ~CPSR_RESERVED;
4330 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4331 mask &= ~CPSR_T;
4333 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4334 mask &= ~CPSR_Q; /* V5TE in reality*/
4336 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4337 mask &= ~(CPSR_E | CPSR_GE);
4339 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4340 mask &= ~CPSR_IT;
4342 /* Mask out execution state and reserved bits. */
4343 if (!spsr) {
4344 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4346 /* Mask out privileged bits. */
4347 if (IS_USER(s))
4348 mask &= CPSR_USER;
4349 return mask;
4352 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4353 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4355 TCGv_i32 tmp;
4356 if (spsr) {
4357 /* ??? This is also undefined in system mode. */
4358 if (IS_USER(s))
4359 return 1;
4361 tmp = load_cpu_field(spsr);
4362 tcg_gen_andi_i32(tmp, tmp, ~mask);
4363 tcg_gen_andi_i32(t0, t0, mask);
4364 tcg_gen_or_i32(tmp, tmp, t0);
4365 store_cpu_field(tmp, spsr);
4366 } else {
4367 gen_set_cpsr(t0, mask);
4369 tcg_temp_free_i32(t0);
4370 gen_lookup_tb(s);
4371 return 0;
4374 /* Returns nonzero if access to the PSR is not permitted. */
4375 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4377 TCGv_i32 tmp;
4378 tmp = tcg_temp_new_i32();
4379 tcg_gen_movi_i32(tmp, val);
4380 return gen_set_psr(s, mask, spsr, tmp);
4383 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4384 int *tgtmode, int *regno)
4386 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4387 * the target mode and register number, and identify the various
4388 * unpredictable cases.
4389 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4390 * + executed in user mode
4391 * + using R15 as the src/dest register
4392 * + accessing an unimplemented register
4393 * + accessing a register that's inaccessible at current PL/security state*
4394 * + accessing a register that you could access with a different insn
4395 * We choose to UNDEF in all these cases.
4396 * Since we don't know which of the various AArch32 modes we are in
4397 * we have to defer some checks to runtime.
4398 * Accesses to Monitor mode registers from Secure EL1 (which implies
4399 * that EL3 is AArch64) must trap to EL3.
4401 * If the access checks fail this function will emit code to take
4402 * an exception and return false. Otherwise it will return true,
4403 * and set *tgtmode and *regno appropriately.
4405 int exc_target = default_exception_el(s);
4407 /* These instructions are present only in ARMv8, or in ARMv7 with the
4408 * Virtualization Extensions.
4410 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4411 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4412 goto undef;
4415 if (IS_USER(s) || rn == 15) {
4416 goto undef;
4419 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4420 * of registers into (r, sysm).
4422 if (r) {
4423 /* SPSRs for other modes */
4424 switch (sysm) {
4425 case 0xe: /* SPSR_fiq */
4426 *tgtmode = ARM_CPU_MODE_FIQ;
4427 break;
4428 case 0x10: /* SPSR_irq */
4429 *tgtmode = ARM_CPU_MODE_IRQ;
4430 break;
4431 case 0x12: /* SPSR_svc */
4432 *tgtmode = ARM_CPU_MODE_SVC;
4433 break;
4434 case 0x14: /* SPSR_abt */
4435 *tgtmode = ARM_CPU_MODE_ABT;
4436 break;
4437 case 0x16: /* SPSR_und */
4438 *tgtmode = ARM_CPU_MODE_UND;
4439 break;
4440 case 0x1c: /* SPSR_mon */
4441 *tgtmode = ARM_CPU_MODE_MON;
4442 break;
4443 case 0x1e: /* SPSR_hyp */
4444 *tgtmode = ARM_CPU_MODE_HYP;
4445 break;
4446 default: /* unallocated */
4447 goto undef;
4449 /* We arbitrarily assign SPSR a register number of 16. */
4450 *regno = 16;
4451 } else {
4452 /* general purpose registers for other modes */
4453 switch (sysm) {
4454 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4455 *tgtmode = ARM_CPU_MODE_USR;
4456 *regno = sysm + 8;
4457 break;
4458 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4459 *tgtmode = ARM_CPU_MODE_FIQ;
4460 *regno = sysm;
4461 break;
4462 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4463 *tgtmode = ARM_CPU_MODE_IRQ;
4464 *regno = sysm & 1 ? 13 : 14;
4465 break;
4466 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4467 *tgtmode = ARM_CPU_MODE_SVC;
4468 *regno = sysm & 1 ? 13 : 14;
4469 break;
4470 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4471 *tgtmode = ARM_CPU_MODE_ABT;
4472 *regno = sysm & 1 ? 13 : 14;
4473 break;
4474 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4475 *tgtmode = ARM_CPU_MODE_UND;
4476 *regno = sysm & 1 ? 13 : 14;
4477 break;
4478 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4479 *tgtmode = ARM_CPU_MODE_MON;
4480 *regno = sysm & 1 ? 13 : 14;
4481 break;
4482 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4483 *tgtmode = ARM_CPU_MODE_HYP;
4484 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4485 *regno = sysm & 1 ? 13 : 17;
4486 break;
4487 default: /* unallocated */
4488 goto undef;
4492 /* Catch the 'accessing inaccessible register' cases we can detect
4493 * at translate time.
4495 switch (*tgtmode) {
4496 case ARM_CPU_MODE_MON:
4497 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4498 goto undef;
4500 if (s->current_el == 1) {
4501 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4502 * then accesses to Mon registers trap to EL3
4504 exc_target = 3;
4505 goto undef;
4507 break;
4508 case ARM_CPU_MODE_HYP:
4509 /* Note that we can forbid accesses from EL2 here because they
4510 * must be from Hyp mode itself
4512 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4513 goto undef;
4515 break;
4516 default:
4517 break;
4520 return true;
4522 undef:
4523 /* If we get here then some access check did not pass */
4524 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4525 return false;
4528 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4530 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4531 int tgtmode = 0, regno = 0;
4533 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4534 return;
4537 /* Sync state because msr_banked() can raise exceptions */
4538 gen_set_condexec(s);
4539 gen_set_pc_im(s, s->pc - 4);
4540 tcg_reg = load_reg(s, rn);
4541 tcg_tgtmode = tcg_const_i32(tgtmode);
4542 tcg_regno = tcg_const_i32(regno);
4543 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4544 tcg_temp_free_i32(tcg_tgtmode);
4545 tcg_temp_free_i32(tcg_regno);
4546 tcg_temp_free_i32(tcg_reg);
4547 s->base.is_jmp = DISAS_UPDATE;
4550 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4552 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4553 int tgtmode = 0, regno = 0;
4555 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4556 return;
4559 /* Sync state because mrs_banked() can raise exceptions */
4560 gen_set_condexec(s);
4561 gen_set_pc_im(s, s->pc - 4);
4562 tcg_reg = tcg_temp_new_i32();
4563 tcg_tgtmode = tcg_const_i32(tgtmode);
4564 tcg_regno = tcg_const_i32(regno);
4565 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4566 tcg_temp_free_i32(tcg_tgtmode);
4567 tcg_temp_free_i32(tcg_regno);
4568 store_reg(s, rn, tcg_reg);
4569 s->base.is_jmp = DISAS_UPDATE;
4572 /* Store value to PC as for an exception return (ie don't
4573 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4574 * will do the masking based on the new value of the Thumb bit.
4576 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
4578 tcg_gen_mov_i32(cpu_R[15], pc);
4579 tcg_temp_free_i32(pc);
4582 /* Generate a v6 exception return. Marks both values as dead. */
4583 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4585 store_pc_exc_ret(s, pc);
4586 /* The cpsr_write_eret helper will mask the low bits of PC
4587 * appropriately depending on the new Thumb bit, so it must
4588 * be called after storing the new PC.
4590 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4591 gen_io_start();
4593 gen_helper_cpsr_write_eret(cpu_env, cpsr);
4594 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4595 gen_io_end();
4597 tcg_temp_free_i32(cpsr);
4598 /* Must exit loop to check un-masked IRQs */
4599 s->base.is_jmp = DISAS_EXIT;
4602 /* Generate an old-style exception return. Marks pc as dead. */
4603 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4605 gen_rfe(s, pc, load_cpu_field(spsr));
4609 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4610 * only call the helper when running single threaded TCG code to ensure
4611 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4612 * just skip this instruction. Currently the SEV/SEVL instructions
4613 * which are *one* of many ways to wake the CPU from WFE are not
4614 * implemented so we can't sleep like WFI does.
4616 static void gen_nop_hint(DisasContext *s, int val)
4618 switch (val) {
4619 /* When running in MTTCG we don't generate jumps to the yield and
4620 * WFE helpers as it won't affect the scheduling of other vCPUs.
4621 * If we wanted to more completely model WFE/SEV so we don't busy
4622 * spin unnecessarily we would need to do something more involved.
4624 case 1: /* yield */
4625 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4626 gen_set_pc_im(s, s->pc);
4627 s->base.is_jmp = DISAS_YIELD;
4629 break;
4630 case 3: /* wfi */
4631 gen_set_pc_im(s, s->pc);
4632 s->base.is_jmp = DISAS_WFI;
4633 break;
4634 case 2: /* wfe */
4635 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4636 gen_set_pc_im(s, s->pc);
4637 s->base.is_jmp = DISAS_WFE;
4639 break;
4640 case 4: /* sev */
4641 case 5: /* sevl */
4642 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4643 default: /* nop */
4644 break;
4648 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4650 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4652 switch (size) {
4653 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4654 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4655 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4656 default: abort();
4660 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4662 switch (size) {
4663 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4664 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4665 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4666 default: return;
4670 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4671 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4672 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4673 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4674 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4676 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4677 switch ((size << 1) | u) { \
4678 case 0: \
4679 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4680 break; \
4681 case 1: \
4682 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4683 break; \
4684 case 2: \
4685 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4686 break; \
4687 case 3: \
4688 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4689 break; \
4690 case 4: \
4691 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4692 break; \
4693 case 5: \
4694 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4695 break; \
4696 default: return 1; \
4697 }} while (0)
4699 #define GEN_NEON_INTEGER_OP(name) do { \
4700 switch ((size << 1) | u) { \
4701 case 0: \
4702 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4703 break; \
4704 case 1: \
4705 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4706 break; \
4707 case 2: \
4708 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4709 break; \
4710 case 3: \
4711 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4712 break; \
4713 case 4: \
4714 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4715 break; \
4716 case 5: \
4717 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4718 break; \
4719 default: return 1; \
4720 }} while (0)
4722 static TCGv_i32 neon_load_scratch(int scratch)
4724 TCGv_i32 tmp = tcg_temp_new_i32();
4725 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4726 return tmp;
4729 static void neon_store_scratch(int scratch, TCGv_i32 var)
4731 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4732 tcg_temp_free_i32(var);
4735 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4737 TCGv_i32 tmp;
4738 if (size == 1) {
4739 tmp = neon_load_reg(reg & 7, reg >> 4);
4740 if (reg & 8) {
4741 gen_neon_dup_high16(tmp);
4742 } else {
4743 gen_neon_dup_low16(tmp);
4745 } else {
4746 tmp = neon_load_reg(reg & 15, reg >> 4);
4748 return tmp;
4751 static int gen_neon_unzip(int rd, int rm, int size, int q)
4753 TCGv_ptr pd, pm;
4755 if (!q && size == 2) {
4756 return 1;
4758 pd = vfp_reg_ptr(true, rd);
4759 pm = vfp_reg_ptr(true, rm);
4760 if (q) {
4761 switch (size) {
4762 case 0:
4763 gen_helper_neon_qunzip8(pd, pm);
4764 break;
4765 case 1:
4766 gen_helper_neon_qunzip16(pd, pm);
4767 break;
4768 case 2:
4769 gen_helper_neon_qunzip32(pd, pm);
4770 break;
4771 default:
4772 abort();
4774 } else {
4775 switch (size) {
4776 case 0:
4777 gen_helper_neon_unzip8(pd, pm);
4778 break;
4779 case 1:
4780 gen_helper_neon_unzip16(pd, pm);
4781 break;
4782 default:
4783 abort();
4786 tcg_temp_free_ptr(pd);
4787 tcg_temp_free_ptr(pm);
4788 return 0;
4791 static int gen_neon_zip(int rd, int rm, int size, int q)
4793 TCGv_ptr pd, pm;
4795 if (!q && size == 2) {
4796 return 1;
4798 pd = vfp_reg_ptr(true, rd);
4799 pm = vfp_reg_ptr(true, rm);
4800 if (q) {
4801 switch (size) {
4802 case 0:
4803 gen_helper_neon_qzip8(pd, pm);
4804 break;
4805 case 1:
4806 gen_helper_neon_qzip16(pd, pm);
4807 break;
4808 case 2:
4809 gen_helper_neon_qzip32(pd, pm);
4810 break;
4811 default:
4812 abort();
4814 } else {
4815 switch (size) {
4816 case 0:
4817 gen_helper_neon_zip8(pd, pm);
4818 break;
4819 case 1:
4820 gen_helper_neon_zip16(pd, pm);
4821 break;
4822 default:
4823 abort();
4826 tcg_temp_free_ptr(pd);
4827 tcg_temp_free_ptr(pm);
4828 return 0;
4831 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4833 TCGv_i32 rd, tmp;
4835 rd = tcg_temp_new_i32();
4836 tmp = tcg_temp_new_i32();
4838 tcg_gen_shli_i32(rd, t0, 8);
4839 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4840 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4841 tcg_gen_or_i32(rd, rd, tmp);
4843 tcg_gen_shri_i32(t1, t1, 8);
4844 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4845 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4846 tcg_gen_or_i32(t1, t1, tmp);
4847 tcg_gen_mov_i32(t0, rd);
4849 tcg_temp_free_i32(tmp);
4850 tcg_temp_free_i32(rd);
4853 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4855 TCGv_i32 rd, tmp;
4857 rd = tcg_temp_new_i32();
4858 tmp = tcg_temp_new_i32();
4860 tcg_gen_shli_i32(rd, t0, 16);
4861 tcg_gen_andi_i32(tmp, t1, 0xffff);
4862 tcg_gen_or_i32(rd, rd, tmp);
4863 tcg_gen_shri_i32(t1, t1, 16);
4864 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4865 tcg_gen_or_i32(t1, t1, tmp);
4866 tcg_gen_mov_i32(t0, rd);
4868 tcg_temp_free_i32(tmp);
4869 tcg_temp_free_i32(rd);
4873 static struct {
4874 int nregs;
4875 int interleave;
4876 int spacing;
4877 } neon_ls_element_type[11] = {
4878 {4, 4, 1},
4879 {4, 4, 2},
4880 {4, 1, 1},
4881 {4, 2, 1},
4882 {3, 3, 1},
4883 {3, 3, 2},
4884 {3, 1, 1},
4885 {1, 1, 1},
4886 {2, 2, 1},
4887 {2, 2, 2},
4888 {2, 1, 1}
4891 /* Translate a NEON load/store element instruction. Return nonzero if the
4892 instruction is invalid. */
4893 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4895 int rd, rn, rm;
4896 int op;
4897 int nregs;
4898 int interleave;
4899 int spacing;
4900 int stride;
4901 int size;
4902 int reg;
4903 int pass;
4904 int load;
4905 int shift;
4906 int n;
4907 TCGv_i32 addr;
4908 TCGv_i32 tmp;
4909 TCGv_i32 tmp2;
4910 TCGv_i64 tmp64;
4912 /* FIXME: this access check should not take precedence over UNDEF
4913 * for invalid encodings; we will generate incorrect syndrome information
4914 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4916 if (s->fp_excp_el) {
4917 gen_exception_insn(s, 4, EXCP_UDEF,
4918 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
4919 return 0;
4922 if (!s->vfp_enabled)
4923 return 1;
4924 VFP_DREG_D(rd, insn);
4925 rn = (insn >> 16) & 0xf;
4926 rm = insn & 0xf;
4927 load = (insn & (1 << 21)) != 0;
4928 if ((insn & (1 << 23)) == 0) {
4929 /* Load store all elements. */
4930 op = (insn >> 8) & 0xf;
4931 size = (insn >> 6) & 3;
4932 if (op > 10)
4933 return 1;
4934 /* Catch UNDEF cases for bad values of align field */
4935 switch (op & 0xc) {
4936 case 4:
4937 if (((insn >> 5) & 1) == 1) {
4938 return 1;
4940 break;
4941 case 8:
4942 if (((insn >> 4) & 3) == 3) {
4943 return 1;
4945 break;
4946 default:
4947 break;
4949 nregs = neon_ls_element_type[op].nregs;
4950 interleave = neon_ls_element_type[op].interleave;
4951 spacing = neon_ls_element_type[op].spacing;
4952 if (size == 3 && (interleave | spacing) != 1)
4953 return 1;
4954 addr = tcg_temp_new_i32();
4955 load_reg_var(s, addr, rn);
4956 stride = (1 << size) * interleave;
4957 for (reg = 0; reg < nregs; reg++) {
4958 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4959 load_reg_var(s, addr, rn);
4960 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4961 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4962 load_reg_var(s, addr, rn);
4963 tcg_gen_addi_i32(addr, addr, 1 << size);
4965 if (size == 3) {
4966 tmp64 = tcg_temp_new_i64();
4967 if (load) {
4968 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
4969 neon_store_reg64(tmp64, rd);
4970 } else {
4971 neon_load_reg64(tmp64, rd);
4972 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
4974 tcg_temp_free_i64(tmp64);
4975 tcg_gen_addi_i32(addr, addr, stride);
4976 } else {
4977 for (pass = 0; pass < 2; pass++) {
4978 if (size == 2) {
4979 if (load) {
4980 tmp = tcg_temp_new_i32();
4981 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
4982 neon_store_reg(rd, pass, tmp);
4983 } else {
4984 tmp = neon_load_reg(rd, pass);
4985 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
4986 tcg_temp_free_i32(tmp);
4988 tcg_gen_addi_i32(addr, addr, stride);
4989 } else if (size == 1) {
4990 if (load) {
4991 tmp = tcg_temp_new_i32();
4992 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
4993 tcg_gen_addi_i32(addr, addr, stride);
4994 tmp2 = tcg_temp_new_i32();
4995 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
4996 tcg_gen_addi_i32(addr, addr, stride);
4997 tcg_gen_shli_i32(tmp2, tmp2, 16);
4998 tcg_gen_or_i32(tmp, tmp, tmp2);
4999 tcg_temp_free_i32(tmp2);
5000 neon_store_reg(rd, pass, tmp);
5001 } else {
5002 tmp = neon_load_reg(rd, pass);
5003 tmp2 = tcg_temp_new_i32();
5004 tcg_gen_shri_i32(tmp2, tmp, 16);
5005 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
5006 tcg_temp_free_i32(tmp);
5007 tcg_gen_addi_i32(addr, addr, stride);
5008 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
5009 tcg_temp_free_i32(tmp2);
5010 tcg_gen_addi_i32(addr, addr, stride);
5012 } else /* size == 0 */ {
5013 if (load) {
5014 tmp2 = NULL;
5015 for (n = 0; n < 4; n++) {
5016 tmp = tcg_temp_new_i32();
5017 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
5018 tcg_gen_addi_i32(addr, addr, stride);
5019 if (n == 0) {
5020 tmp2 = tmp;
5021 } else {
5022 tcg_gen_shli_i32(tmp, tmp, n * 8);
5023 tcg_gen_or_i32(tmp2, tmp2, tmp);
5024 tcg_temp_free_i32(tmp);
5027 neon_store_reg(rd, pass, tmp2);
5028 } else {
5029 tmp2 = neon_load_reg(rd, pass);
5030 for (n = 0; n < 4; n++) {
5031 tmp = tcg_temp_new_i32();
5032 if (n == 0) {
5033 tcg_gen_mov_i32(tmp, tmp2);
5034 } else {
5035 tcg_gen_shri_i32(tmp, tmp2, n * 8);
5037 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
5038 tcg_temp_free_i32(tmp);
5039 tcg_gen_addi_i32(addr, addr, stride);
5041 tcg_temp_free_i32(tmp2);
5046 rd += spacing;
5048 tcg_temp_free_i32(addr);
5049 stride = nregs * 8;
5050 } else {
5051 size = (insn >> 10) & 3;
5052 if (size == 3) {
5053 /* Load single element to all lanes. */
5054 int a = (insn >> 4) & 1;
5055 if (!load) {
5056 return 1;
5058 size = (insn >> 6) & 3;
5059 nregs = ((insn >> 8) & 3) + 1;
5061 if (size == 3) {
5062 if (nregs != 4 || a == 0) {
5063 return 1;
5065 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5066 size = 2;
5068 if (nregs == 1 && a == 1 && size == 0) {
5069 return 1;
5071 if (nregs == 3 && a == 1) {
5072 return 1;
5074 addr = tcg_temp_new_i32();
5075 load_reg_var(s, addr, rn);
5076 if (nregs == 1) {
5077 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
5078 tmp = gen_load_and_replicate(s, addr, size);
5079 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5080 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5081 if (insn & (1 << 5)) {
5082 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
5083 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
5085 tcg_temp_free_i32(tmp);
5086 } else {
5087 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
5088 stride = (insn & (1 << 5)) ? 2 : 1;
5089 for (reg = 0; reg < nregs; reg++) {
5090 tmp = gen_load_and_replicate(s, addr, size);
5091 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5092 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5093 tcg_temp_free_i32(tmp);
5094 tcg_gen_addi_i32(addr, addr, 1 << size);
5095 rd += stride;
5098 tcg_temp_free_i32(addr);
5099 stride = (1 << size) * nregs;
5100 } else {
5101 /* Single element. */
5102 int idx = (insn >> 4) & 0xf;
5103 pass = (insn >> 7) & 1;
5104 switch (size) {
5105 case 0:
5106 shift = ((insn >> 5) & 3) * 8;
5107 stride = 1;
5108 break;
5109 case 1:
5110 shift = ((insn >> 6) & 1) * 16;
5111 stride = (insn & (1 << 5)) ? 2 : 1;
5112 break;
5113 case 2:
5114 shift = 0;
5115 stride = (insn & (1 << 6)) ? 2 : 1;
5116 break;
5117 default:
5118 abort();
5120 nregs = ((insn >> 8) & 3) + 1;
5121 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5122 switch (nregs) {
5123 case 1:
5124 if (((idx & (1 << size)) != 0) ||
5125 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5126 return 1;
5128 break;
5129 case 3:
5130 if ((idx & 1) != 0) {
5131 return 1;
5133 /* fall through */
5134 case 2:
5135 if (size == 2 && (idx & 2) != 0) {
5136 return 1;
5138 break;
5139 case 4:
5140 if ((size == 2) && ((idx & 3) == 3)) {
5141 return 1;
5143 break;
5144 default:
5145 abort();
5147 if ((rd + stride * (nregs - 1)) > 31) {
5148 /* Attempts to write off the end of the register file
5149 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5150 * the neon_load_reg() would write off the end of the array.
5152 return 1;
5154 addr = tcg_temp_new_i32();
5155 load_reg_var(s, addr, rn);
5156 for (reg = 0; reg < nregs; reg++) {
5157 if (load) {
5158 tmp = tcg_temp_new_i32();
5159 switch (size) {
5160 case 0:
5161 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
5162 break;
5163 case 1:
5164 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
5165 break;
5166 case 2:
5167 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
5168 break;
5169 default: /* Avoid compiler warnings. */
5170 abort();
5172 if (size != 2) {
5173 tmp2 = neon_load_reg(rd, pass);
5174 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5175 shift, size ? 16 : 8);
5176 tcg_temp_free_i32(tmp2);
5178 neon_store_reg(rd, pass, tmp);
5179 } else { /* Store */
5180 tmp = neon_load_reg(rd, pass);
5181 if (shift)
5182 tcg_gen_shri_i32(tmp, tmp, shift);
5183 switch (size) {
5184 case 0:
5185 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
5186 break;
5187 case 1:
5188 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
5189 break;
5190 case 2:
5191 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5192 break;
5194 tcg_temp_free_i32(tmp);
5196 rd += stride;
5197 tcg_gen_addi_i32(addr, addr, 1 << size);
5199 tcg_temp_free_i32(addr);
5200 stride = nregs * (1 << size);
5203 if (rm != 15) {
5204 TCGv_i32 base;
5206 base = load_reg(s, rn);
5207 if (rm == 13) {
5208 tcg_gen_addi_i32(base, base, stride);
5209 } else {
5210 TCGv_i32 index;
5211 index = load_reg(s, rm);
5212 tcg_gen_add_i32(base, base, index);
5213 tcg_temp_free_i32(index);
5215 store_reg(s, rn, base);
5217 return 0;
5220 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
5221 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
5223 tcg_gen_and_i32(t, t, c);
5224 tcg_gen_andc_i32(f, f, c);
5225 tcg_gen_or_i32(dest, t, f);
5228 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
5230 switch (size) {
5231 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5232 case 1: gen_helper_neon_narrow_u16(dest, src); break;
5233 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
5234 default: abort();
5238 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5240 switch (size) {
5241 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5242 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5243 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
5244 default: abort();
5248 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
5250 switch (size) {
5251 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5252 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5253 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
5254 default: abort();
5258 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5260 switch (size) {
5261 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5262 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5263 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
5264 default: abort();
5268 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
5269 int q, int u)
5271 if (q) {
5272 if (u) {
5273 switch (size) {
5274 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5275 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5276 default: abort();
5278 } else {
5279 switch (size) {
5280 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5281 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5282 default: abort();
5285 } else {
5286 if (u) {
5287 switch (size) {
5288 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5289 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
5290 default: abort();
5292 } else {
5293 switch (size) {
5294 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5295 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5296 default: abort();
5302 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
5304 if (u) {
5305 switch (size) {
5306 case 0: gen_helper_neon_widen_u8(dest, src); break;
5307 case 1: gen_helper_neon_widen_u16(dest, src); break;
5308 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5309 default: abort();
5311 } else {
5312 switch (size) {
5313 case 0: gen_helper_neon_widen_s8(dest, src); break;
5314 case 1: gen_helper_neon_widen_s16(dest, src); break;
5315 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5316 default: abort();
5319 tcg_temp_free_i32(src);
5322 static inline void gen_neon_addl(int size)
5324 switch (size) {
5325 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5326 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5327 case 2: tcg_gen_add_i64(CPU_V001); break;
5328 default: abort();
5332 static inline void gen_neon_subl(int size)
5334 switch (size) {
5335 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5336 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5337 case 2: tcg_gen_sub_i64(CPU_V001); break;
5338 default: abort();
5342 static inline void gen_neon_negl(TCGv_i64 var, int size)
5344 switch (size) {
5345 case 0: gen_helper_neon_negl_u16(var, var); break;
5346 case 1: gen_helper_neon_negl_u32(var, var); break;
5347 case 2:
5348 tcg_gen_neg_i64(var, var);
5349 break;
5350 default: abort();
5354 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
5356 switch (size) {
5357 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5358 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
5359 default: abort();
5363 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5364 int size, int u)
5366 TCGv_i64 tmp;
5368 switch ((size << 1) | u) {
5369 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5370 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5371 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5372 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5373 case 4:
5374 tmp = gen_muls_i64_i32(a, b);
5375 tcg_gen_mov_i64(dest, tmp);
5376 tcg_temp_free_i64(tmp);
5377 break;
5378 case 5:
5379 tmp = gen_mulu_i64_i32(a, b);
5380 tcg_gen_mov_i64(dest, tmp);
5381 tcg_temp_free_i64(tmp);
5382 break;
5383 default: abort();
5386 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5387 Don't forget to clean them now. */
5388 if (size < 2) {
5389 tcg_temp_free_i32(a);
5390 tcg_temp_free_i32(b);
5394 static void gen_neon_narrow_op(int op, int u, int size,
5395 TCGv_i32 dest, TCGv_i64 src)
5397 if (op) {
5398 if (u) {
5399 gen_neon_unarrow_sats(size, dest, src);
5400 } else {
5401 gen_neon_narrow(size, dest, src);
5403 } else {
5404 if (u) {
5405 gen_neon_narrow_satu(size, dest, src);
5406 } else {
5407 gen_neon_narrow_sats(size, dest, src);
5412 /* Symbolic constants for op fields for Neon 3-register same-length.
5413 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5414 * table A7-9.
5416 #define NEON_3R_VHADD 0
5417 #define NEON_3R_VQADD 1
5418 #define NEON_3R_VRHADD 2
5419 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5420 #define NEON_3R_VHSUB 4
5421 #define NEON_3R_VQSUB 5
5422 #define NEON_3R_VCGT 6
5423 #define NEON_3R_VCGE 7
5424 #define NEON_3R_VSHL 8
5425 #define NEON_3R_VQSHL 9
5426 #define NEON_3R_VRSHL 10
5427 #define NEON_3R_VQRSHL 11
5428 #define NEON_3R_VMAX 12
5429 #define NEON_3R_VMIN 13
5430 #define NEON_3R_VABD 14
5431 #define NEON_3R_VABA 15
5432 #define NEON_3R_VADD_VSUB 16
5433 #define NEON_3R_VTST_VCEQ 17
5434 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5435 #define NEON_3R_VMUL 19
5436 #define NEON_3R_VPMAX 20
5437 #define NEON_3R_VPMIN 21
5438 #define NEON_3R_VQDMULH_VQRDMULH 22
5439 #define NEON_3R_VPADD_VQRDMLAH 23
5440 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
5441 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
5442 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5443 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5444 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5445 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5446 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
5447 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
5449 static const uint8_t neon_3r_sizes[] = {
5450 [NEON_3R_VHADD] = 0x7,
5451 [NEON_3R_VQADD] = 0xf,
5452 [NEON_3R_VRHADD] = 0x7,
5453 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5454 [NEON_3R_VHSUB] = 0x7,
5455 [NEON_3R_VQSUB] = 0xf,
5456 [NEON_3R_VCGT] = 0x7,
5457 [NEON_3R_VCGE] = 0x7,
5458 [NEON_3R_VSHL] = 0xf,
5459 [NEON_3R_VQSHL] = 0xf,
5460 [NEON_3R_VRSHL] = 0xf,
5461 [NEON_3R_VQRSHL] = 0xf,
5462 [NEON_3R_VMAX] = 0x7,
5463 [NEON_3R_VMIN] = 0x7,
5464 [NEON_3R_VABD] = 0x7,
5465 [NEON_3R_VABA] = 0x7,
5466 [NEON_3R_VADD_VSUB] = 0xf,
5467 [NEON_3R_VTST_VCEQ] = 0x7,
5468 [NEON_3R_VML] = 0x7,
5469 [NEON_3R_VMUL] = 0x7,
5470 [NEON_3R_VPMAX] = 0x7,
5471 [NEON_3R_VPMIN] = 0x7,
5472 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5473 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
5474 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
5475 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
5476 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5477 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5478 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5479 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5480 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
5481 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
5484 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
5485 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5486 * table A7-13.
5488 #define NEON_2RM_VREV64 0
5489 #define NEON_2RM_VREV32 1
5490 #define NEON_2RM_VREV16 2
5491 #define NEON_2RM_VPADDL 4
5492 #define NEON_2RM_VPADDL_U 5
5493 #define NEON_2RM_AESE 6 /* Includes AESD */
5494 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5495 #define NEON_2RM_VCLS 8
5496 #define NEON_2RM_VCLZ 9
5497 #define NEON_2RM_VCNT 10
5498 #define NEON_2RM_VMVN 11
5499 #define NEON_2RM_VPADAL 12
5500 #define NEON_2RM_VPADAL_U 13
5501 #define NEON_2RM_VQABS 14
5502 #define NEON_2RM_VQNEG 15
5503 #define NEON_2RM_VCGT0 16
5504 #define NEON_2RM_VCGE0 17
5505 #define NEON_2RM_VCEQ0 18
5506 #define NEON_2RM_VCLE0 19
5507 #define NEON_2RM_VCLT0 20
5508 #define NEON_2RM_SHA1H 21
5509 #define NEON_2RM_VABS 22
5510 #define NEON_2RM_VNEG 23
5511 #define NEON_2RM_VCGT0_F 24
5512 #define NEON_2RM_VCGE0_F 25
5513 #define NEON_2RM_VCEQ0_F 26
5514 #define NEON_2RM_VCLE0_F 27
5515 #define NEON_2RM_VCLT0_F 28
5516 #define NEON_2RM_VABS_F 30
5517 #define NEON_2RM_VNEG_F 31
5518 #define NEON_2RM_VSWP 32
5519 #define NEON_2RM_VTRN 33
5520 #define NEON_2RM_VUZP 34
5521 #define NEON_2RM_VZIP 35
5522 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5523 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5524 #define NEON_2RM_VSHLL 38
5525 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5526 #define NEON_2RM_VRINTN 40
5527 #define NEON_2RM_VRINTX 41
5528 #define NEON_2RM_VRINTA 42
5529 #define NEON_2RM_VRINTZ 43
5530 #define NEON_2RM_VCVT_F16_F32 44
5531 #define NEON_2RM_VRINTM 45
5532 #define NEON_2RM_VCVT_F32_F16 46
5533 #define NEON_2RM_VRINTP 47
5534 #define NEON_2RM_VCVTAU 48
5535 #define NEON_2RM_VCVTAS 49
5536 #define NEON_2RM_VCVTNU 50
5537 #define NEON_2RM_VCVTNS 51
5538 #define NEON_2RM_VCVTPU 52
5539 #define NEON_2RM_VCVTPS 53
5540 #define NEON_2RM_VCVTMU 54
5541 #define NEON_2RM_VCVTMS 55
5542 #define NEON_2RM_VRECPE 56
5543 #define NEON_2RM_VRSQRTE 57
5544 #define NEON_2RM_VRECPE_F 58
5545 #define NEON_2RM_VRSQRTE_F 59
5546 #define NEON_2RM_VCVT_FS 60
5547 #define NEON_2RM_VCVT_FU 61
5548 #define NEON_2RM_VCVT_SF 62
5549 #define NEON_2RM_VCVT_UF 63
5551 static int neon_2rm_is_float_op(int op)
5553 /* Return true if this neon 2reg-misc op is float-to-float */
5554 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
5555 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
5556 op == NEON_2RM_VRINTM ||
5557 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5558 op >= NEON_2RM_VRECPE_F);
5561 static bool neon_2rm_is_v8_op(int op)
5563 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5564 switch (op) {
5565 case NEON_2RM_VRINTN:
5566 case NEON_2RM_VRINTA:
5567 case NEON_2RM_VRINTM:
5568 case NEON_2RM_VRINTP:
5569 case NEON_2RM_VRINTZ:
5570 case NEON_2RM_VRINTX:
5571 case NEON_2RM_VCVTAU:
5572 case NEON_2RM_VCVTAS:
5573 case NEON_2RM_VCVTNU:
5574 case NEON_2RM_VCVTNS:
5575 case NEON_2RM_VCVTPU:
5576 case NEON_2RM_VCVTPS:
5577 case NEON_2RM_VCVTMU:
5578 case NEON_2RM_VCVTMS:
5579 return true;
5580 default:
5581 return false;
5585 /* Each entry in this array has bit n set if the insn allows
5586 * size value n (otherwise it will UNDEF). Since unallocated
5587 * op values will have no bits set they always UNDEF.
5589 static const uint8_t neon_2rm_sizes[] = {
5590 [NEON_2RM_VREV64] = 0x7,
5591 [NEON_2RM_VREV32] = 0x3,
5592 [NEON_2RM_VREV16] = 0x1,
5593 [NEON_2RM_VPADDL] = 0x7,
5594 [NEON_2RM_VPADDL_U] = 0x7,
5595 [NEON_2RM_AESE] = 0x1,
5596 [NEON_2RM_AESMC] = 0x1,
5597 [NEON_2RM_VCLS] = 0x7,
5598 [NEON_2RM_VCLZ] = 0x7,
5599 [NEON_2RM_VCNT] = 0x1,
5600 [NEON_2RM_VMVN] = 0x1,
5601 [NEON_2RM_VPADAL] = 0x7,
5602 [NEON_2RM_VPADAL_U] = 0x7,
5603 [NEON_2RM_VQABS] = 0x7,
5604 [NEON_2RM_VQNEG] = 0x7,
5605 [NEON_2RM_VCGT0] = 0x7,
5606 [NEON_2RM_VCGE0] = 0x7,
5607 [NEON_2RM_VCEQ0] = 0x7,
5608 [NEON_2RM_VCLE0] = 0x7,
5609 [NEON_2RM_VCLT0] = 0x7,
5610 [NEON_2RM_SHA1H] = 0x4,
5611 [NEON_2RM_VABS] = 0x7,
5612 [NEON_2RM_VNEG] = 0x7,
5613 [NEON_2RM_VCGT0_F] = 0x4,
5614 [NEON_2RM_VCGE0_F] = 0x4,
5615 [NEON_2RM_VCEQ0_F] = 0x4,
5616 [NEON_2RM_VCLE0_F] = 0x4,
5617 [NEON_2RM_VCLT0_F] = 0x4,
5618 [NEON_2RM_VABS_F] = 0x4,
5619 [NEON_2RM_VNEG_F] = 0x4,
5620 [NEON_2RM_VSWP] = 0x1,
5621 [NEON_2RM_VTRN] = 0x7,
5622 [NEON_2RM_VUZP] = 0x7,
5623 [NEON_2RM_VZIP] = 0x7,
5624 [NEON_2RM_VMOVN] = 0x7,
5625 [NEON_2RM_VQMOVN] = 0x7,
5626 [NEON_2RM_VSHLL] = 0x7,
5627 [NEON_2RM_SHA1SU1] = 0x4,
5628 [NEON_2RM_VRINTN] = 0x4,
5629 [NEON_2RM_VRINTX] = 0x4,
5630 [NEON_2RM_VRINTA] = 0x4,
5631 [NEON_2RM_VRINTZ] = 0x4,
5632 [NEON_2RM_VCVT_F16_F32] = 0x2,
5633 [NEON_2RM_VRINTM] = 0x4,
5634 [NEON_2RM_VCVT_F32_F16] = 0x2,
5635 [NEON_2RM_VRINTP] = 0x4,
5636 [NEON_2RM_VCVTAU] = 0x4,
5637 [NEON_2RM_VCVTAS] = 0x4,
5638 [NEON_2RM_VCVTNU] = 0x4,
5639 [NEON_2RM_VCVTNS] = 0x4,
5640 [NEON_2RM_VCVTPU] = 0x4,
5641 [NEON_2RM_VCVTPS] = 0x4,
5642 [NEON_2RM_VCVTMU] = 0x4,
5643 [NEON_2RM_VCVTMS] = 0x4,
5644 [NEON_2RM_VRECPE] = 0x4,
5645 [NEON_2RM_VRSQRTE] = 0x4,
5646 [NEON_2RM_VRECPE_F] = 0x4,
5647 [NEON_2RM_VRSQRTE_F] = 0x4,
5648 [NEON_2RM_VCVT_FS] = 0x4,
5649 [NEON_2RM_VCVT_FU] = 0x4,
5650 [NEON_2RM_VCVT_SF] = 0x4,
5651 [NEON_2RM_VCVT_UF] = 0x4,
5655 /* Expand v8.1 simd helper. */
5656 static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5657 int q, int rd, int rn, int rm)
5659 if (arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
5660 int opr_sz = (1 + q) * 8;
5661 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5662 vfp_reg_offset(1, rn),
5663 vfp_reg_offset(1, rm), cpu_env,
5664 opr_sz, opr_sz, 0, fn);
5665 return 0;
5667 return 1;
5670 /* Translate a NEON data processing instruction. Return nonzero if the
5671 instruction is invalid.
5672 We process data in a mixture of 32-bit and 64-bit chunks.
5673 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5675 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
5677 int op;
5678 int q;
5679 int rd, rn, rm;
5680 int size;
5681 int shift;
5682 int pass;
5683 int count;
5684 int pairwise;
5685 int u;
5686 uint32_t imm, mask;
5687 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
5688 TCGv_ptr ptr1, ptr2, ptr3;
5689 TCGv_i64 tmp64;
5691 /* FIXME: this access check should not take precedence over UNDEF
5692 * for invalid encodings; we will generate incorrect syndrome information
5693 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5695 if (s->fp_excp_el) {
5696 gen_exception_insn(s, 4, EXCP_UDEF,
5697 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
5698 return 0;
5701 if (!s->vfp_enabled)
5702 return 1;
5703 q = (insn & (1 << 6)) != 0;
5704 u = (insn >> 24) & 1;
5705 VFP_DREG_D(rd, insn);
5706 VFP_DREG_N(rn, insn);
5707 VFP_DREG_M(rm, insn);
5708 size = (insn >> 20) & 3;
5709 if ((insn & (1 << 23)) == 0) {
5710 /* Three register same length. */
5711 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5712 /* Catch invalid op and bad size combinations: UNDEF */
5713 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5714 return 1;
5716 /* All insns of this form UNDEF for either this condition or the
5717 * superset of cases "Q==1"; we catch the latter later.
5719 if (q && ((rd | rn | rm) & 1)) {
5720 return 1;
5722 switch (op) {
5723 case NEON_3R_SHA:
5724 /* The SHA-1/SHA-256 3-register instructions require special
5725 * treatment here, as their size field is overloaded as an
5726 * op type selector, and they all consume their input in a
5727 * single pass.
5729 if (!q) {
5730 return 1;
5732 if (!u) { /* SHA-1 */
5733 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
5734 return 1;
5736 ptr1 = vfp_reg_ptr(true, rd);
5737 ptr2 = vfp_reg_ptr(true, rn);
5738 ptr3 = vfp_reg_ptr(true, rm);
5739 tmp4 = tcg_const_i32(size);
5740 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
5741 tcg_temp_free_i32(tmp4);
5742 } else { /* SHA-256 */
5743 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
5744 return 1;
5746 ptr1 = vfp_reg_ptr(true, rd);
5747 ptr2 = vfp_reg_ptr(true, rn);
5748 ptr3 = vfp_reg_ptr(true, rm);
5749 switch (size) {
5750 case 0:
5751 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
5752 break;
5753 case 1:
5754 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
5755 break;
5756 case 2:
5757 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
5758 break;
5761 tcg_temp_free_ptr(ptr1);
5762 tcg_temp_free_ptr(ptr2);
5763 tcg_temp_free_ptr(ptr3);
5764 return 0;
5766 case NEON_3R_VPADD_VQRDMLAH:
5767 if (!u) {
5768 break; /* VPADD */
5770 /* VQRDMLAH */
5771 switch (size) {
5772 case 1:
5773 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
5774 q, rd, rn, rm);
5775 case 2:
5776 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
5777 q, rd, rn, rm);
5779 return 1;
5781 case NEON_3R_VFM_VQRDMLSH:
5782 if (!u) {
5783 /* VFM, VFMS */
5784 if (size == 1) {
5785 return 1;
5787 break;
5789 /* VQRDMLSH */
5790 switch (size) {
5791 case 1:
5792 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
5793 q, rd, rn, rm);
5794 case 2:
5795 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
5796 q, rd, rn, rm);
5798 return 1;
5800 if (size == 3 && op != NEON_3R_LOGIC) {
5801 /* 64-bit element instructions. */
5802 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5803 neon_load_reg64(cpu_V0, rn + pass);
5804 neon_load_reg64(cpu_V1, rm + pass);
5805 switch (op) {
5806 case NEON_3R_VQADD:
5807 if (u) {
5808 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5809 cpu_V0, cpu_V1);
5810 } else {
5811 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5812 cpu_V0, cpu_V1);
5814 break;
5815 case NEON_3R_VQSUB:
5816 if (u) {
5817 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5818 cpu_V0, cpu_V1);
5819 } else {
5820 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5821 cpu_V0, cpu_V1);
5823 break;
5824 case NEON_3R_VSHL:
5825 if (u) {
5826 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5827 } else {
5828 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5830 break;
5831 case NEON_3R_VQSHL:
5832 if (u) {
5833 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5834 cpu_V1, cpu_V0);
5835 } else {
5836 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5837 cpu_V1, cpu_V0);
5839 break;
5840 case NEON_3R_VRSHL:
5841 if (u) {
5842 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5843 } else {
5844 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5846 break;
5847 case NEON_3R_VQRSHL:
5848 if (u) {
5849 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5850 cpu_V1, cpu_V0);
5851 } else {
5852 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5853 cpu_V1, cpu_V0);
5855 break;
5856 case NEON_3R_VADD_VSUB:
5857 if (u) {
5858 tcg_gen_sub_i64(CPU_V001);
5859 } else {
5860 tcg_gen_add_i64(CPU_V001);
5862 break;
5863 default:
5864 abort();
5866 neon_store_reg64(cpu_V0, rd + pass);
5868 return 0;
5870 pairwise = 0;
5871 switch (op) {
5872 case NEON_3R_VSHL:
5873 case NEON_3R_VQSHL:
5874 case NEON_3R_VRSHL:
5875 case NEON_3R_VQRSHL:
5877 int rtmp;
5878 /* Shift instruction operands are reversed. */
5879 rtmp = rn;
5880 rn = rm;
5881 rm = rtmp;
5883 break;
5884 case NEON_3R_VPADD_VQRDMLAH:
5885 case NEON_3R_VPMAX:
5886 case NEON_3R_VPMIN:
5887 pairwise = 1;
5888 break;
5889 case NEON_3R_FLOAT_ARITH:
5890 pairwise = (u && size < 2); /* if VPADD (float) */
5891 break;
5892 case NEON_3R_FLOAT_MINMAX:
5893 pairwise = u; /* if VPMIN/VPMAX (float) */
5894 break;
5895 case NEON_3R_FLOAT_CMP:
5896 if (!u && size) {
5897 /* no encoding for U=0 C=1x */
5898 return 1;
5900 break;
5901 case NEON_3R_FLOAT_ACMP:
5902 if (!u) {
5903 return 1;
5905 break;
5906 case NEON_3R_FLOAT_MISC:
5907 /* VMAXNM/VMINNM in ARMv8 */
5908 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5909 return 1;
5911 break;
5912 case NEON_3R_VMUL:
5913 if (u && (size != 0)) {
5914 /* UNDEF on invalid size for polynomial subcase */
5915 return 1;
5917 break;
5918 case NEON_3R_VFM_VQRDMLSH:
5919 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
5920 return 1;
5922 break;
5923 default:
5924 break;
5927 if (pairwise && q) {
5928 /* All the pairwise insns UNDEF if Q is set */
5929 return 1;
5932 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5934 if (pairwise) {
5935 /* Pairwise. */
5936 if (pass < 1) {
5937 tmp = neon_load_reg(rn, 0);
5938 tmp2 = neon_load_reg(rn, 1);
5939 } else {
5940 tmp = neon_load_reg(rm, 0);
5941 tmp2 = neon_load_reg(rm, 1);
5943 } else {
5944 /* Elementwise. */
5945 tmp = neon_load_reg(rn, pass);
5946 tmp2 = neon_load_reg(rm, pass);
5948 switch (op) {
5949 case NEON_3R_VHADD:
5950 GEN_NEON_INTEGER_OP(hadd);
5951 break;
5952 case NEON_3R_VQADD:
5953 GEN_NEON_INTEGER_OP_ENV(qadd);
5954 break;
5955 case NEON_3R_VRHADD:
5956 GEN_NEON_INTEGER_OP(rhadd);
5957 break;
5958 case NEON_3R_LOGIC: /* Logic ops. */
5959 switch ((u << 2) | size) {
5960 case 0: /* VAND */
5961 tcg_gen_and_i32(tmp, tmp, tmp2);
5962 break;
5963 case 1: /* BIC */
5964 tcg_gen_andc_i32(tmp, tmp, tmp2);
5965 break;
5966 case 2: /* VORR */
5967 tcg_gen_or_i32(tmp, tmp, tmp2);
5968 break;
5969 case 3: /* VORN */
5970 tcg_gen_orc_i32(tmp, tmp, tmp2);
5971 break;
5972 case 4: /* VEOR */
5973 tcg_gen_xor_i32(tmp, tmp, tmp2);
5974 break;
5975 case 5: /* VBSL */
5976 tmp3 = neon_load_reg(rd, pass);
5977 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5978 tcg_temp_free_i32(tmp3);
5979 break;
5980 case 6: /* VBIT */
5981 tmp3 = neon_load_reg(rd, pass);
5982 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5983 tcg_temp_free_i32(tmp3);
5984 break;
5985 case 7: /* VBIF */
5986 tmp3 = neon_load_reg(rd, pass);
5987 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5988 tcg_temp_free_i32(tmp3);
5989 break;
5991 break;
5992 case NEON_3R_VHSUB:
5993 GEN_NEON_INTEGER_OP(hsub);
5994 break;
5995 case NEON_3R_VQSUB:
5996 GEN_NEON_INTEGER_OP_ENV(qsub);
5997 break;
5998 case NEON_3R_VCGT:
5999 GEN_NEON_INTEGER_OP(cgt);
6000 break;
6001 case NEON_3R_VCGE:
6002 GEN_NEON_INTEGER_OP(cge);
6003 break;
6004 case NEON_3R_VSHL:
6005 GEN_NEON_INTEGER_OP(shl);
6006 break;
6007 case NEON_3R_VQSHL:
6008 GEN_NEON_INTEGER_OP_ENV(qshl);
6009 break;
6010 case NEON_3R_VRSHL:
6011 GEN_NEON_INTEGER_OP(rshl);
6012 break;
6013 case NEON_3R_VQRSHL:
6014 GEN_NEON_INTEGER_OP_ENV(qrshl);
6015 break;
6016 case NEON_3R_VMAX:
6017 GEN_NEON_INTEGER_OP(max);
6018 break;
6019 case NEON_3R_VMIN:
6020 GEN_NEON_INTEGER_OP(min);
6021 break;
6022 case NEON_3R_VABD:
6023 GEN_NEON_INTEGER_OP(abd);
6024 break;
6025 case NEON_3R_VABA:
6026 GEN_NEON_INTEGER_OP(abd);
6027 tcg_temp_free_i32(tmp2);
6028 tmp2 = neon_load_reg(rd, pass);
6029 gen_neon_add(size, tmp, tmp2);
6030 break;
6031 case NEON_3R_VADD_VSUB:
6032 if (!u) { /* VADD */
6033 gen_neon_add(size, tmp, tmp2);
6034 } else { /* VSUB */
6035 switch (size) {
6036 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
6037 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
6038 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
6039 default: abort();
6042 break;
6043 case NEON_3R_VTST_VCEQ:
6044 if (!u) { /* VTST */
6045 switch (size) {
6046 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
6047 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
6048 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
6049 default: abort();
6051 } else { /* VCEQ */
6052 switch (size) {
6053 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6054 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6055 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6056 default: abort();
6059 break;
6060 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
6061 switch (size) {
6062 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6063 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6064 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6065 default: abort();
6067 tcg_temp_free_i32(tmp2);
6068 tmp2 = neon_load_reg(rd, pass);
6069 if (u) { /* VMLS */
6070 gen_neon_rsb(size, tmp, tmp2);
6071 } else { /* VMLA */
6072 gen_neon_add(size, tmp, tmp2);
6074 break;
6075 case NEON_3R_VMUL:
6076 if (u) { /* polynomial */
6077 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
6078 } else { /* Integer */
6079 switch (size) {
6080 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6081 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6082 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6083 default: abort();
6086 break;
6087 case NEON_3R_VPMAX:
6088 GEN_NEON_INTEGER_OP(pmax);
6089 break;
6090 case NEON_3R_VPMIN:
6091 GEN_NEON_INTEGER_OP(pmin);
6092 break;
6093 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
6094 if (!u) { /* VQDMULH */
6095 switch (size) {
6096 case 1:
6097 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6098 break;
6099 case 2:
6100 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6101 break;
6102 default: abort();
6104 } else { /* VQRDMULH */
6105 switch (size) {
6106 case 1:
6107 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6108 break;
6109 case 2:
6110 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6111 break;
6112 default: abort();
6115 break;
6116 case NEON_3R_VPADD_VQRDMLAH:
6117 switch (size) {
6118 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6119 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6120 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
6121 default: abort();
6123 break;
6124 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
6126 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6127 switch ((u << 2) | size) {
6128 case 0: /* VADD */
6129 case 4: /* VPADD */
6130 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6131 break;
6132 case 2: /* VSUB */
6133 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
6134 break;
6135 case 6: /* VABD */
6136 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
6137 break;
6138 default:
6139 abort();
6141 tcg_temp_free_ptr(fpstatus);
6142 break;
6144 case NEON_3R_FLOAT_MULTIPLY:
6146 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6147 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6148 if (!u) {
6149 tcg_temp_free_i32(tmp2);
6150 tmp2 = neon_load_reg(rd, pass);
6151 if (size == 0) {
6152 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6153 } else {
6154 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6157 tcg_temp_free_ptr(fpstatus);
6158 break;
6160 case NEON_3R_FLOAT_CMP:
6162 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6163 if (!u) {
6164 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6165 } else {
6166 if (size == 0) {
6167 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6168 } else {
6169 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6172 tcg_temp_free_ptr(fpstatus);
6173 break;
6175 case NEON_3R_FLOAT_ACMP:
6177 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6178 if (size == 0) {
6179 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6180 } else {
6181 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6183 tcg_temp_free_ptr(fpstatus);
6184 break;
6186 case NEON_3R_FLOAT_MINMAX:
6188 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6189 if (size == 0) {
6190 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
6191 } else {
6192 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
6194 tcg_temp_free_ptr(fpstatus);
6195 break;
6197 case NEON_3R_FLOAT_MISC:
6198 if (u) {
6199 /* VMAXNM/VMINNM */
6200 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6201 if (size == 0) {
6202 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
6203 } else {
6204 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
6206 tcg_temp_free_ptr(fpstatus);
6207 } else {
6208 if (size == 0) {
6209 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6210 } else {
6211 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6214 break;
6215 case NEON_3R_VFM_VQRDMLSH:
6217 /* VFMA, VFMS: fused multiply-add */
6218 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6219 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6220 if (size) {
6221 /* VFMS */
6222 gen_helper_vfp_negs(tmp, tmp);
6224 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6225 tcg_temp_free_i32(tmp3);
6226 tcg_temp_free_ptr(fpstatus);
6227 break;
6229 default:
6230 abort();
6232 tcg_temp_free_i32(tmp2);
6234 /* Save the result. For elementwise operations we can put it
6235 straight into the destination register. For pairwise operations
6236 we have to be careful to avoid clobbering the source operands. */
6237 if (pairwise && rd == rm) {
6238 neon_store_scratch(pass, tmp);
6239 } else {
6240 neon_store_reg(rd, pass, tmp);
6243 } /* for pass */
6244 if (pairwise && rd == rm) {
6245 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6246 tmp = neon_load_scratch(pass);
6247 neon_store_reg(rd, pass, tmp);
6250 /* End of 3 register same size operations. */
6251 } else if (insn & (1 << 4)) {
6252 if ((insn & 0x00380080) != 0) {
6253 /* Two registers and shift. */
6254 op = (insn >> 8) & 0xf;
6255 if (insn & (1 << 7)) {
6256 /* 64-bit shift. */
6257 if (op > 7) {
6258 return 1;
6260 size = 3;
6261 } else {
6262 size = 2;
6263 while ((insn & (1 << (size + 19))) == 0)
6264 size--;
6266 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
6267 /* To avoid excessive duplication of ops we implement shift
6268 by immediate using the variable shift operations. */
6269 if (op < 8) {
6270 /* Shift by immediate:
6271 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
6272 if (q && ((rd | rm) & 1)) {
6273 return 1;
6275 if (!u && (op == 4 || op == 6)) {
6276 return 1;
6278 /* Right shifts are encoded as N - shift, where N is the
6279 element size in bits. */
6280 if (op <= 4)
6281 shift = shift - (1 << (size + 3));
6282 if (size == 3) {
6283 count = q + 1;
6284 } else {
6285 count = q ? 4: 2;
6287 switch (size) {
6288 case 0:
6289 imm = (uint8_t) shift;
6290 imm |= imm << 8;
6291 imm |= imm << 16;
6292 break;
6293 case 1:
6294 imm = (uint16_t) shift;
6295 imm |= imm << 16;
6296 break;
6297 case 2:
6298 case 3:
6299 imm = shift;
6300 break;
6301 default:
6302 abort();
6305 for (pass = 0; pass < count; pass++) {
6306 if (size == 3) {
6307 neon_load_reg64(cpu_V0, rm + pass);
6308 tcg_gen_movi_i64(cpu_V1, imm);
6309 switch (op) {
6310 case 0: /* VSHR */
6311 case 1: /* VSRA */
6312 if (u)
6313 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6314 else
6315 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
6316 break;
6317 case 2: /* VRSHR */
6318 case 3: /* VRSRA */
6319 if (u)
6320 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
6321 else
6322 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
6323 break;
6324 case 4: /* VSRI */
6325 case 5: /* VSHL, VSLI */
6326 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6327 break;
6328 case 6: /* VQSHLU */
6329 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6330 cpu_V0, cpu_V1);
6331 break;
6332 case 7: /* VQSHL */
6333 if (u) {
6334 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6335 cpu_V0, cpu_V1);
6336 } else {
6337 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6338 cpu_V0, cpu_V1);
6340 break;
6342 if (op == 1 || op == 3) {
6343 /* Accumulate. */
6344 neon_load_reg64(cpu_V1, rd + pass);
6345 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6346 } else if (op == 4 || (op == 5 && u)) {
6347 /* Insert */
6348 neon_load_reg64(cpu_V1, rd + pass);
6349 uint64_t mask;
6350 if (shift < -63 || shift > 63) {
6351 mask = 0;
6352 } else {
6353 if (op == 4) {
6354 mask = 0xffffffffffffffffull >> -shift;
6355 } else {
6356 mask = 0xffffffffffffffffull << shift;
6359 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6360 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6362 neon_store_reg64(cpu_V0, rd + pass);
6363 } else { /* size < 3 */
6364 /* Operands in T0 and T1. */
6365 tmp = neon_load_reg(rm, pass);
6366 tmp2 = tcg_temp_new_i32();
6367 tcg_gen_movi_i32(tmp2, imm);
6368 switch (op) {
6369 case 0: /* VSHR */
6370 case 1: /* VSRA */
6371 GEN_NEON_INTEGER_OP(shl);
6372 break;
6373 case 2: /* VRSHR */
6374 case 3: /* VRSRA */
6375 GEN_NEON_INTEGER_OP(rshl);
6376 break;
6377 case 4: /* VSRI */
6378 case 5: /* VSHL, VSLI */
6379 switch (size) {
6380 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6381 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6382 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
6383 default: abort();
6385 break;
6386 case 6: /* VQSHLU */
6387 switch (size) {
6388 case 0:
6389 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6390 tmp, tmp2);
6391 break;
6392 case 1:
6393 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6394 tmp, tmp2);
6395 break;
6396 case 2:
6397 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6398 tmp, tmp2);
6399 break;
6400 default:
6401 abort();
6403 break;
6404 case 7: /* VQSHL */
6405 GEN_NEON_INTEGER_OP_ENV(qshl);
6406 break;
6408 tcg_temp_free_i32(tmp2);
6410 if (op == 1 || op == 3) {
6411 /* Accumulate. */
6412 tmp2 = neon_load_reg(rd, pass);
6413 gen_neon_add(size, tmp, tmp2);
6414 tcg_temp_free_i32(tmp2);
6415 } else if (op == 4 || (op == 5 && u)) {
6416 /* Insert */
6417 switch (size) {
6418 case 0:
6419 if (op == 4)
6420 mask = 0xff >> -shift;
6421 else
6422 mask = (uint8_t)(0xff << shift);
6423 mask |= mask << 8;
6424 mask |= mask << 16;
6425 break;
6426 case 1:
6427 if (op == 4)
6428 mask = 0xffff >> -shift;
6429 else
6430 mask = (uint16_t)(0xffff << shift);
6431 mask |= mask << 16;
6432 break;
6433 case 2:
6434 if (shift < -31 || shift > 31) {
6435 mask = 0;
6436 } else {
6437 if (op == 4)
6438 mask = 0xffffffffu >> -shift;
6439 else
6440 mask = 0xffffffffu << shift;
6442 break;
6443 default:
6444 abort();
6446 tmp2 = neon_load_reg(rd, pass);
6447 tcg_gen_andi_i32(tmp, tmp, mask);
6448 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
6449 tcg_gen_or_i32(tmp, tmp, tmp2);
6450 tcg_temp_free_i32(tmp2);
6452 neon_store_reg(rd, pass, tmp);
6454 } /* for pass */
6455 } else if (op < 10) {
6456 /* Shift by immediate and narrow:
6457 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
6458 int input_unsigned = (op == 8) ? !u : u;
6459 if (rm & 1) {
6460 return 1;
6462 shift = shift - (1 << (size + 3));
6463 size++;
6464 if (size == 3) {
6465 tmp64 = tcg_const_i64(shift);
6466 neon_load_reg64(cpu_V0, rm);
6467 neon_load_reg64(cpu_V1, rm + 1);
6468 for (pass = 0; pass < 2; pass++) {
6469 TCGv_i64 in;
6470 if (pass == 0) {
6471 in = cpu_V0;
6472 } else {
6473 in = cpu_V1;
6475 if (q) {
6476 if (input_unsigned) {
6477 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
6478 } else {
6479 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
6481 } else {
6482 if (input_unsigned) {
6483 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
6484 } else {
6485 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
6488 tmp = tcg_temp_new_i32();
6489 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6490 neon_store_reg(rd, pass, tmp);
6491 } /* for pass */
6492 tcg_temp_free_i64(tmp64);
6493 } else {
6494 if (size == 1) {
6495 imm = (uint16_t)shift;
6496 imm |= imm << 16;
6497 } else {
6498 /* size == 2 */
6499 imm = (uint32_t)shift;
6501 tmp2 = tcg_const_i32(imm);
6502 tmp4 = neon_load_reg(rm + 1, 0);
6503 tmp5 = neon_load_reg(rm + 1, 1);
6504 for (pass = 0; pass < 2; pass++) {
6505 if (pass == 0) {
6506 tmp = neon_load_reg(rm, 0);
6507 } else {
6508 tmp = tmp4;
6510 gen_neon_shift_narrow(size, tmp, tmp2, q,
6511 input_unsigned);
6512 if (pass == 0) {
6513 tmp3 = neon_load_reg(rm, 1);
6514 } else {
6515 tmp3 = tmp5;
6517 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6518 input_unsigned);
6519 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
6520 tcg_temp_free_i32(tmp);
6521 tcg_temp_free_i32(tmp3);
6522 tmp = tcg_temp_new_i32();
6523 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6524 neon_store_reg(rd, pass, tmp);
6525 } /* for pass */
6526 tcg_temp_free_i32(tmp2);
6528 } else if (op == 10) {
6529 /* VSHLL, VMOVL */
6530 if (q || (rd & 1)) {
6531 return 1;
6533 tmp = neon_load_reg(rm, 0);
6534 tmp2 = neon_load_reg(rm, 1);
6535 for (pass = 0; pass < 2; pass++) {
6536 if (pass == 1)
6537 tmp = tmp2;
6539 gen_neon_widen(cpu_V0, tmp, size, u);
6541 if (shift != 0) {
6542 /* The shift is less than the width of the source
6543 type, so we can just shift the whole register. */
6544 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
6545 /* Widen the result of shift: we need to clear
6546 * the potential overflow bits resulting from
6547 * left bits of the narrow input appearing as
6548 * right bits of left the neighbour narrow
6549 * input. */
6550 if (size < 2 || !u) {
6551 uint64_t imm64;
6552 if (size == 0) {
6553 imm = (0xffu >> (8 - shift));
6554 imm |= imm << 16;
6555 } else if (size == 1) {
6556 imm = 0xffff >> (16 - shift);
6557 } else {
6558 /* size == 2 */
6559 imm = 0xffffffff >> (32 - shift);
6561 if (size < 2) {
6562 imm64 = imm | (((uint64_t)imm) << 32);
6563 } else {
6564 imm64 = imm;
6566 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
6569 neon_store_reg64(cpu_V0, rd + pass);
6571 } else if (op >= 14) {
6572 /* VCVT fixed-point. */
6573 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6574 return 1;
6576 /* We have already masked out the must-be-1 top bit of imm6,
6577 * hence this 32-shift where the ARM ARM has 64-imm6.
6579 shift = 32 - shift;
6580 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6581 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
6582 if (!(op & 1)) {
6583 if (u)
6584 gen_vfp_ulto(0, shift, 1);
6585 else
6586 gen_vfp_slto(0, shift, 1);
6587 } else {
6588 if (u)
6589 gen_vfp_toul(0, shift, 1);
6590 else
6591 gen_vfp_tosl(0, shift, 1);
6593 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
6595 } else {
6596 return 1;
6598 } else { /* (insn & 0x00380080) == 0 */
6599 int invert;
6600 if (q && (rd & 1)) {
6601 return 1;
6604 op = (insn >> 8) & 0xf;
6605 /* One register and immediate. */
6606 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6607 invert = (insn & (1 << 5)) != 0;
6608 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6609 * We choose to not special-case this and will behave as if a
6610 * valid constant encoding of 0 had been given.
6612 switch (op) {
6613 case 0: case 1:
6614 /* no-op */
6615 break;
6616 case 2: case 3:
6617 imm <<= 8;
6618 break;
6619 case 4: case 5:
6620 imm <<= 16;
6621 break;
6622 case 6: case 7:
6623 imm <<= 24;
6624 break;
6625 case 8: case 9:
6626 imm |= imm << 16;
6627 break;
6628 case 10: case 11:
6629 imm = (imm << 8) | (imm << 24);
6630 break;
6631 case 12:
6632 imm = (imm << 8) | 0xff;
6633 break;
6634 case 13:
6635 imm = (imm << 16) | 0xffff;
6636 break;
6637 case 14:
6638 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6639 if (invert)
6640 imm = ~imm;
6641 break;
6642 case 15:
6643 if (invert) {
6644 return 1;
6646 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6647 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6648 break;
6650 if (invert)
6651 imm = ~imm;
6653 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6654 if (op & 1 && op < 12) {
6655 tmp = neon_load_reg(rd, pass);
6656 if (invert) {
6657 /* The immediate value has already been inverted, so
6658 BIC becomes AND. */
6659 tcg_gen_andi_i32(tmp, tmp, imm);
6660 } else {
6661 tcg_gen_ori_i32(tmp, tmp, imm);
6663 } else {
6664 /* VMOV, VMVN. */
6665 tmp = tcg_temp_new_i32();
6666 if (op == 14 && invert) {
6667 int n;
6668 uint32_t val;
6669 val = 0;
6670 for (n = 0; n < 4; n++) {
6671 if (imm & (1 << (n + (pass & 1) * 4)))
6672 val |= 0xff << (n * 8);
6674 tcg_gen_movi_i32(tmp, val);
6675 } else {
6676 tcg_gen_movi_i32(tmp, imm);
6679 neon_store_reg(rd, pass, tmp);
6682 } else { /* (insn & 0x00800010 == 0x00800000) */
6683 if (size != 3) {
6684 op = (insn >> 8) & 0xf;
6685 if ((insn & (1 << 6)) == 0) {
6686 /* Three registers of different lengths. */
6687 int src1_wide;
6688 int src2_wide;
6689 int prewiden;
6690 /* undefreq: bit 0 : UNDEF if size == 0
6691 * bit 1 : UNDEF if size == 1
6692 * bit 2 : UNDEF if size == 2
6693 * bit 3 : UNDEF if U == 1
6694 * Note that [2:0] set implies 'always UNDEF'
6696 int undefreq;
6697 /* prewiden, src1_wide, src2_wide, undefreq */
6698 static const int neon_3reg_wide[16][4] = {
6699 {1, 0, 0, 0}, /* VADDL */
6700 {1, 1, 0, 0}, /* VADDW */
6701 {1, 0, 0, 0}, /* VSUBL */
6702 {1, 1, 0, 0}, /* VSUBW */
6703 {0, 1, 1, 0}, /* VADDHN */
6704 {0, 0, 0, 0}, /* VABAL */
6705 {0, 1, 1, 0}, /* VSUBHN */
6706 {0, 0, 0, 0}, /* VABDL */
6707 {0, 0, 0, 0}, /* VMLAL */
6708 {0, 0, 0, 9}, /* VQDMLAL */
6709 {0, 0, 0, 0}, /* VMLSL */
6710 {0, 0, 0, 9}, /* VQDMLSL */
6711 {0, 0, 0, 0}, /* Integer VMULL */
6712 {0, 0, 0, 1}, /* VQDMULL */
6713 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6714 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6717 prewiden = neon_3reg_wide[op][0];
6718 src1_wide = neon_3reg_wide[op][1];
6719 src2_wide = neon_3reg_wide[op][2];
6720 undefreq = neon_3reg_wide[op][3];
6722 if ((undefreq & (1 << size)) ||
6723 ((undefreq & 8) && u)) {
6724 return 1;
6726 if ((src1_wide && (rn & 1)) ||
6727 (src2_wide && (rm & 1)) ||
6728 (!src2_wide && (rd & 1))) {
6729 return 1;
6732 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6733 * outside the loop below as it only performs a single pass.
6735 if (op == 14 && size == 2) {
6736 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6738 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
6739 return 1;
6741 tcg_rn = tcg_temp_new_i64();
6742 tcg_rm = tcg_temp_new_i64();
6743 tcg_rd = tcg_temp_new_i64();
6744 neon_load_reg64(tcg_rn, rn);
6745 neon_load_reg64(tcg_rm, rm);
6746 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6747 neon_store_reg64(tcg_rd, rd);
6748 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6749 neon_store_reg64(tcg_rd, rd + 1);
6750 tcg_temp_free_i64(tcg_rn);
6751 tcg_temp_free_i64(tcg_rm);
6752 tcg_temp_free_i64(tcg_rd);
6753 return 0;
6756 /* Avoid overlapping operands. Wide source operands are
6757 always aligned so will never overlap with wide
6758 destinations in problematic ways. */
6759 if (rd == rm && !src2_wide) {
6760 tmp = neon_load_reg(rm, 1);
6761 neon_store_scratch(2, tmp);
6762 } else if (rd == rn && !src1_wide) {
6763 tmp = neon_load_reg(rn, 1);
6764 neon_store_scratch(2, tmp);
6766 tmp3 = NULL;
6767 for (pass = 0; pass < 2; pass++) {
6768 if (src1_wide) {
6769 neon_load_reg64(cpu_V0, rn + pass);
6770 tmp = NULL;
6771 } else {
6772 if (pass == 1 && rd == rn) {
6773 tmp = neon_load_scratch(2);
6774 } else {
6775 tmp = neon_load_reg(rn, pass);
6777 if (prewiden) {
6778 gen_neon_widen(cpu_V0, tmp, size, u);
6781 if (src2_wide) {
6782 neon_load_reg64(cpu_V1, rm + pass);
6783 tmp2 = NULL;
6784 } else {
6785 if (pass == 1 && rd == rm) {
6786 tmp2 = neon_load_scratch(2);
6787 } else {
6788 tmp2 = neon_load_reg(rm, pass);
6790 if (prewiden) {
6791 gen_neon_widen(cpu_V1, tmp2, size, u);
6794 switch (op) {
6795 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6796 gen_neon_addl(size);
6797 break;
6798 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6799 gen_neon_subl(size);
6800 break;
6801 case 5: case 7: /* VABAL, VABDL */
6802 switch ((size << 1) | u) {
6803 case 0:
6804 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6805 break;
6806 case 1:
6807 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6808 break;
6809 case 2:
6810 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6811 break;
6812 case 3:
6813 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6814 break;
6815 case 4:
6816 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6817 break;
6818 case 5:
6819 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6820 break;
6821 default: abort();
6823 tcg_temp_free_i32(tmp2);
6824 tcg_temp_free_i32(tmp);
6825 break;
6826 case 8: case 9: case 10: case 11: case 12: case 13:
6827 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6828 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6829 break;
6830 case 14: /* Polynomial VMULL */
6831 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6832 tcg_temp_free_i32(tmp2);
6833 tcg_temp_free_i32(tmp);
6834 break;
6835 default: /* 15 is RESERVED: caught earlier */
6836 abort();
6838 if (op == 13) {
6839 /* VQDMULL */
6840 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6841 neon_store_reg64(cpu_V0, rd + pass);
6842 } else if (op == 5 || (op >= 8 && op <= 11)) {
6843 /* Accumulate. */
6844 neon_load_reg64(cpu_V1, rd + pass);
6845 switch (op) {
6846 case 10: /* VMLSL */
6847 gen_neon_negl(cpu_V0, size);
6848 /* Fall through */
6849 case 5: case 8: /* VABAL, VMLAL */
6850 gen_neon_addl(size);
6851 break;
6852 case 9: case 11: /* VQDMLAL, VQDMLSL */
6853 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6854 if (op == 11) {
6855 gen_neon_negl(cpu_V0, size);
6857 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6858 break;
6859 default:
6860 abort();
6862 neon_store_reg64(cpu_V0, rd + pass);
6863 } else if (op == 4 || op == 6) {
6864 /* Narrowing operation. */
6865 tmp = tcg_temp_new_i32();
6866 if (!u) {
6867 switch (size) {
6868 case 0:
6869 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6870 break;
6871 case 1:
6872 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6873 break;
6874 case 2:
6875 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6876 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6877 break;
6878 default: abort();
6880 } else {
6881 switch (size) {
6882 case 0:
6883 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6884 break;
6885 case 1:
6886 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6887 break;
6888 case 2:
6889 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6890 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6891 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6892 break;
6893 default: abort();
6896 if (pass == 0) {
6897 tmp3 = tmp;
6898 } else {
6899 neon_store_reg(rd, 0, tmp3);
6900 neon_store_reg(rd, 1, tmp);
6902 } else {
6903 /* Write back the result. */
6904 neon_store_reg64(cpu_V0, rd + pass);
6907 } else {
6908 /* Two registers and a scalar. NB that for ops of this form
6909 * the ARM ARM labels bit 24 as Q, but it is in our variable
6910 * 'u', not 'q'.
6912 if (size == 0) {
6913 return 1;
6915 switch (op) {
6916 case 1: /* Float VMLA scalar */
6917 case 5: /* Floating point VMLS scalar */
6918 case 9: /* Floating point VMUL scalar */
6919 if (size == 1) {
6920 return 1;
6922 /* fall through */
6923 case 0: /* Integer VMLA scalar */
6924 case 4: /* Integer VMLS scalar */
6925 case 8: /* Integer VMUL scalar */
6926 case 12: /* VQDMULH scalar */
6927 case 13: /* VQRDMULH scalar */
6928 if (u && ((rd | rn) & 1)) {
6929 return 1;
6931 tmp = neon_get_scalar(size, rm);
6932 neon_store_scratch(0, tmp);
6933 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6934 tmp = neon_load_scratch(0);
6935 tmp2 = neon_load_reg(rn, pass);
6936 if (op == 12) {
6937 if (size == 1) {
6938 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6939 } else {
6940 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6942 } else if (op == 13) {
6943 if (size == 1) {
6944 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6945 } else {
6946 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6948 } else if (op & 1) {
6949 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6950 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6951 tcg_temp_free_ptr(fpstatus);
6952 } else {
6953 switch (size) {
6954 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6955 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6956 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6957 default: abort();
6960 tcg_temp_free_i32(tmp2);
6961 if (op < 8) {
6962 /* Accumulate. */
6963 tmp2 = neon_load_reg(rd, pass);
6964 switch (op) {
6965 case 0:
6966 gen_neon_add(size, tmp, tmp2);
6967 break;
6968 case 1:
6970 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6971 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6972 tcg_temp_free_ptr(fpstatus);
6973 break;
6975 case 4:
6976 gen_neon_rsb(size, tmp, tmp2);
6977 break;
6978 case 5:
6980 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6981 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6982 tcg_temp_free_ptr(fpstatus);
6983 break;
6985 default:
6986 abort();
6988 tcg_temp_free_i32(tmp2);
6990 neon_store_reg(rd, pass, tmp);
6992 break;
6993 case 3: /* VQDMLAL scalar */
6994 case 7: /* VQDMLSL scalar */
6995 case 11: /* VQDMULL scalar */
6996 if (u == 1) {
6997 return 1;
6999 /* fall through */
7000 case 2: /* VMLAL sclar */
7001 case 6: /* VMLSL scalar */
7002 case 10: /* VMULL scalar */
7003 if (rd & 1) {
7004 return 1;
7006 tmp2 = neon_get_scalar(size, rm);
7007 /* We need a copy of tmp2 because gen_neon_mull
7008 * deletes it during pass 0. */
7009 tmp4 = tcg_temp_new_i32();
7010 tcg_gen_mov_i32(tmp4, tmp2);
7011 tmp3 = neon_load_reg(rn, 1);
7013 for (pass = 0; pass < 2; pass++) {
7014 if (pass == 0) {
7015 tmp = neon_load_reg(rn, 0);
7016 } else {
7017 tmp = tmp3;
7018 tmp2 = tmp4;
7020 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
7021 if (op != 11) {
7022 neon_load_reg64(cpu_V1, rd + pass);
7024 switch (op) {
7025 case 6:
7026 gen_neon_negl(cpu_V0, size);
7027 /* Fall through */
7028 case 2:
7029 gen_neon_addl(size);
7030 break;
7031 case 3: case 7:
7032 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7033 if (op == 7) {
7034 gen_neon_negl(cpu_V0, size);
7036 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
7037 break;
7038 case 10:
7039 /* no-op */
7040 break;
7041 case 11:
7042 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7043 break;
7044 default:
7045 abort();
7047 neon_store_reg64(cpu_V0, rd + pass);
7049 break;
7050 case 14: /* VQRDMLAH scalar */
7051 case 15: /* VQRDMLSH scalar */
7053 NeonGenThreeOpEnvFn *fn;
7055 if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
7056 return 1;
7058 if (u && ((rd | rn) & 1)) {
7059 return 1;
7061 if (op == 14) {
7062 if (size == 1) {
7063 fn = gen_helper_neon_qrdmlah_s16;
7064 } else {
7065 fn = gen_helper_neon_qrdmlah_s32;
7067 } else {
7068 if (size == 1) {
7069 fn = gen_helper_neon_qrdmlsh_s16;
7070 } else {
7071 fn = gen_helper_neon_qrdmlsh_s32;
7075 tmp2 = neon_get_scalar(size, rm);
7076 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7077 tmp = neon_load_reg(rn, pass);
7078 tmp3 = neon_load_reg(rd, pass);
7079 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7080 tcg_temp_free_i32(tmp3);
7081 neon_store_reg(rd, pass, tmp);
7083 tcg_temp_free_i32(tmp2);
7085 break;
7086 default:
7087 g_assert_not_reached();
7090 } else { /* size == 3 */
7091 if (!u) {
7092 /* Extract. */
7093 imm = (insn >> 8) & 0xf;
7095 if (imm > 7 && !q)
7096 return 1;
7098 if (q && ((rd | rn | rm) & 1)) {
7099 return 1;
7102 if (imm == 0) {
7103 neon_load_reg64(cpu_V0, rn);
7104 if (q) {
7105 neon_load_reg64(cpu_V1, rn + 1);
7107 } else if (imm == 8) {
7108 neon_load_reg64(cpu_V0, rn + 1);
7109 if (q) {
7110 neon_load_reg64(cpu_V1, rm);
7112 } else if (q) {
7113 tmp64 = tcg_temp_new_i64();
7114 if (imm < 8) {
7115 neon_load_reg64(cpu_V0, rn);
7116 neon_load_reg64(tmp64, rn + 1);
7117 } else {
7118 neon_load_reg64(cpu_V0, rn + 1);
7119 neon_load_reg64(tmp64, rm);
7121 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
7122 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
7123 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7124 if (imm < 8) {
7125 neon_load_reg64(cpu_V1, rm);
7126 } else {
7127 neon_load_reg64(cpu_V1, rm + 1);
7128 imm -= 8;
7130 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
7131 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7132 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
7133 tcg_temp_free_i64(tmp64);
7134 } else {
7135 /* BUGFIX */
7136 neon_load_reg64(cpu_V0, rn);
7137 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
7138 neon_load_reg64(cpu_V1, rm);
7139 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
7140 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7142 neon_store_reg64(cpu_V0, rd);
7143 if (q) {
7144 neon_store_reg64(cpu_V1, rd + 1);
7146 } else if ((insn & (1 << 11)) == 0) {
7147 /* Two register misc. */
7148 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7149 size = (insn >> 18) & 3;
7150 /* UNDEF for unknown op values and bad op-size combinations */
7151 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7152 return 1;
7154 if (neon_2rm_is_v8_op(op) &&
7155 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7156 return 1;
7158 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7159 q && ((rm | rd) & 1)) {
7160 return 1;
7162 switch (op) {
7163 case NEON_2RM_VREV64:
7164 for (pass = 0; pass < (q ? 2 : 1); pass++) {
7165 tmp = neon_load_reg(rm, pass * 2);
7166 tmp2 = neon_load_reg(rm, pass * 2 + 1);
7167 switch (size) {
7168 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7169 case 1: gen_swap_half(tmp); break;
7170 case 2: /* no-op */ break;
7171 default: abort();
7173 neon_store_reg(rd, pass * 2 + 1, tmp);
7174 if (size == 2) {
7175 neon_store_reg(rd, pass * 2, tmp2);
7176 } else {
7177 switch (size) {
7178 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7179 case 1: gen_swap_half(tmp2); break;
7180 default: abort();
7182 neon_store_reg(rd, pass * 2, tmp2);
7185 break;
7186 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7187 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
7188 for (pass = 0; pass < q + 1; pass++) {
7189 tmp = neon_load_reg(rm, pass * 2);
7190 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7191 tmp = neon_load_reg(rm, pass * 2 + 1);
7192 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7193 switch (size) {
7194 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7195 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7196 case 2: tcg_gen_add_i64(CPU_V001); break;
7197 default: abort();
7199 if (op >= NEON_2RM_VPADAL) {
7200 /* Accumulate. */
7201 neon_load_reg64(cpu_V1, rd + pass);
7202 gen_neon_addl(size);
7204 neon_store_reg64(cpu_V0, rd + pass);
7206 break;
7207 case NEON_2RM_VTRN:
7208 if (size == 2) {
7209 int n;
7210 for (n = 0; n < (q ? 4 : 2); n += 2) {
7211 tmp = neon_load_reg(rm, n);
7212 tmp2 = neon_load_reg(rd, n + 1);
7213 neon_store_reg(rm, n, tmp2);
7214 neon_store_reg(rd, n + 1, tmp);
7216 } else {
7217 goto elementwise;
7219 break;
7220 case NEON_2RM_VUZP:
7221 if (gen_neon_unzip(rd, rm, size, q)) {
7222 return 1;
7224 break;
7225 case NEON_2RM_VZIP:
7226 if (gen_neon_zip(rd, rm, size, q)) {
7227 return 1;
7229 break;
7230 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7231 /* also VQMOVUN; op field and mnemonics don't line up */
7232 if (rm & 1) {
7233 return 1;
7235 tmp2 = NULL;
7236 for (pass = 0; pass < 2; pass++) {
7237 neon_load_reg64(cpu_V0, rm + pass);
7238 tmp = tcg_temp_new_i32();
7239 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7240 tmp, cpu_V0);
7241 if (pass == 0) {
7242 tmp2 = tmp;
7243 } else {
7244 neon_store_reg(rd, 0, tmp2);
7245 neon_store_reg(rd, 1, tmp);
7248 break;
7249 case NEON_2RM_VSHLL:
7250 if (q || (rd & 1)) {
7251 return 1;
7253 tmp = neon_load_reg(rm, 0);
7254 tmp2 = neon_load_reg(rm, 1);
7255 for (pass = 0; pass < 2; pass++) {
7256 if (pass == 1)
7257 tmp = tmp2;
7258 gen_neon_widen(cpu_V0, tmp, size, 1);
7259 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
7260 neon_store_reg64(cpu_V0, rd + pass);
7262 break;
7263 case NEON_2RM_VCVT_F16_F32:
7265 TCGv_ptr fpst;
7266 TCGv_i32 ahp;
7268 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
7269 q || (rm & 1)) {
7270 return 1;
7272 tmp = tcg_temp_new_i32();
7273 tmp2 = tcg_temp_new_i32();
7274 fpst = get_fpstatus_ptr(true);
7275 ahp = get_ahp_flag();
7276 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
7277 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
7278 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
7279 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
7280 tcg_gen_shli_i32(tmp2, tmp2, 16);
7281 tcg_gen_or_i32(tmp2, tmp2, tmp);
7282 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
7283 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
7284 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7285 neon_store_reg(rd, 0, tmp2);
7286 tmp2 = tcg_temp_new_i32();
7287 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
7288 tcg_gen_shli_i32(tmp2, tmp2, 16);
7289 tcg_gen_or_i32(tmp2, tmp2, tmp);
7290 neon_store_reg(rd, 1, tmp2);
7291 tcg_temp_free_i32(tmp);
7292 tcg_temp_free_i32(ahp);
7293 tcg_temp_free_ptr(fpst);
7294 break;
7296 case NEON_2RM_VCVT_F32_F16:
7298 TCGv_ptr fpst;
7299 TCGv_i32 ahp;
7300 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
7301 q || (rd & 1)) {
7302 return 1;
7304 fpst = get_fpstatus_ptr(true);
7305 ahp = get_ahp_flag();
7306 tmp3 = tcg_temp_new_i32();
7307 tmp = neon_load_reg(rm, 0);
7308 tmp2 = neon_load_reg(rm, 1);
7309 tcg_gen_ext16u_i32(tmp3, tmp);
7310 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7311 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7312 tcg_gen_shri_i32(tmp3, tmp, 16);
7313 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7314 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7315 tcg_temp_free_i32(tmp);
7316 tcg_gen_ext16u_i32(tmp3, tmp2);
7317 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7318 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7319 tcg_gen_shri_i32(tmp3, tmp2, 16);
7320 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
7321 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7322 tcg_temp_free_i32(tmp2);
7323 tcg_temp_free_i32(tmp3);
7324 tcg_temp_free_i32(ahp);
7325 tcg_temp_free_ptr(fpst);
7326 break;
7328 case NEON_2RM_AESE: case NEON_2RM_AESMC:
7329 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
7330 || ((rm | rd) & 1)) {
7331 return 1;
7333 ptr1 = vfp_reg_ptr(true, rd);
7334 ptr2 = vfp_reg_ptr(true, rm);
7336 /* Bit 6 is the lowest opcode bit; it distinguishes between
7337 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7339 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7341 if (op == NEON_2RM_AESE) {
7342 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
7343 } else {
7344 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
7346 tcg_temp_free_ptr(ptr1);
7347 tcg_temp_free_ptr(ptr2);
7348 tcg_temp_free_i32(tmp3);
7349 break;
7350 case NEON_2RM_SHA1H:
7351 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
7352 || ((rm | rd) & 1)) {
7353 return 1;
7355 ptr1 = vfp_reg_ptr(true, rd);
7356 ptr2 = vfp_reg_ptr(true, rm);
7358 gen_helper_crypto_sha1h(ptr1, ptr2);
7360 tcg_temp_free_ptr(ptr1);
7361 tcg_temp_free_ptr(ptr2);
7362 break;
7363 case NEON_2RM_SHA1SU1:
7364 if ((rm | rd) & 1) {
7365 return 1;
7367 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7368 if (q) {
7369 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
7370 return 1;
7372 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
7373 return 1;
7375 ptr1 = vfp_reg_ptr(true, rd);
7376 ptr2 = vfp_reg_ptr(true, rm);
7377 if (q) {
7378 gen_helper_crypto_sha256su0(ptr1, ptr2);
7379 } else {
7380 gen_helper_crypto_sha1su1(ptr1, ptr2);
7382 tcg_temp_free_ptr(ptr1);
7383 tcg_temp_free_ptr(ptr2);
7384 break;
7385 default:
7386 elementwise:
7387 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7388 if (neon_2rm_is_float_op(op)) {
7389 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7390 neon_reg_offset(rm, pass));
7391 tmp = NULL;
7392 } else {
7393 tmp = neon_load_reg(rm, pass);
7395 switch (op) {
7396 case NEON_2RM_VREV32:
7397 switch (size) {
7398 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7399 case 1: gen_swap_half(tmp); break;
7400 default: abort();
7402 break;
7403 case NEON_2RM_VREV16:
7404 gen_rev16(tmp);
7405 break;
7406 case NEON_2RM_VCLS:
7407 switch (size) {
7408 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7409 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7410 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
7411 default: abort();
7413 break;
7414 case NEON_2RM_VCLZ:
7415 switch (size) {
7416 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7417 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7418 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
7419 default: abort();
7421 break;
7422 case NEON_2RM_VCNT:
7423 gen_helper_neon_cnt_u8(tmp, tmp);
7424 break;
7425 case NEON_2RM_VMVN:
7426 tcg_gen_not_i32(tmp, tmp);
7427 break;
7428 case NEON_2RM_VQABS:
7429 switch (size) {
7430 case 0:
7431 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7432 break;
7433 case 1:
7434 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7435 break;
7436 case 2:
7437 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7438 break;
7439 default: abort();
7441 break;
7442 case NEON_2RM_VQNEG:
7443 switch (size) {
7444 case 0:
7445 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7446 break;
7447 case 1:
7448 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7449 break;
7450 case 2:
7451 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7452 break;
7453 default: abort();
7455 break;
7456 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
7457 tmp2 = tcg_const_i32(0);
7458 switch(size) {
7459 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7460 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7461 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
7462 default: abort();
7464 tcg_temp_free_i32(tmp2);
7465 if (op == NEON_2RM_VCLE0) {
7466 tcg_gen_not_i32(tmp, tmp);
7468 break;
7469 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
7470 tmp2 = tcg_const_i32(0);
7471 switch(size) {
7472 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7473 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7474 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
7475 default: abort();
7477 tcg_temp_free_i32(tmp2);
7478 if (op == NEON_2RM_VCLT0) {
7479 tcg_gen_not_i32(tmp, tmp);
7481 break;
7482 case NEON_2RM_VCEQ0:
7483 tmp2 = tcg_const_i32(0);
7484 switch(size) {
7485 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7486 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7487 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
7488 default: abort();
7490 tcg_temp_free_i32(tmp2);
7491 break;
7492 case NEON_2RM_VABS:
7493 switch(size) {
7494 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7495 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7496 case 2: tcg_gen_abs_i32(tmp, tmp); break;
7497 default: abort();
7499 break;
7500 case NEON_2RM_VNEG:
7501 tmp2 = tcg_const_i32(0);
7502 gen_neon_rsb(size, tmp, tmp2);
7503 tcg_temp_free_i32(tmp2);
7504 break;
7505 case NEON_2RM_VCGT0_F:
7507 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7508 tmp2 = tcg_const_i32(0);
7509 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
7510 tcg_temp_free_i32(tmp2);
7511 tcg_temp_free_ptr(fpstatus);
7512 break;
7514 case NEON_2RM_VCGE0_F:
7516 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7517 tmp2 = tcg_const_i32(0);
7518 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
7519 tcg_temp_free_i32(tmp2);
7520 tcg_temp_free_ptr(fpstatus);
7521 break;
7523 case NEON_2RM_VCEQ0_F:
7525 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7526 tmp2 = tcg_const_i32(0);
7527 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
7528 tcg_temp_free_i32(tmp2);
7529 tcg_temp_free_ptr(fpstatus);
7530 break;
7532 case NEON_2RM_VCLE0_F:
7534 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7535 tmp2 = tcg_const_i32(0);
7536 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
7537 tcg_temp_free_i32(tmp2);
7538 tcg_temp_free_ptr(fpstatus);
7539 break;
7541 case NEON_2RM_VCLT0_F:
7543 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7544 tmp2 = tcg_const_i32(0);
7545 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
7546 tcg_temp_free_i32(tmp2);
7547 tcg_temp_free_ptr(fpstatus);
7548 break;
7550 case NEON_2RM_VABS_F:
7551 gen_vfp_abs(0);
7552 break;
7553 case NEON_2RM_VNEG_F:
7554 gen_vfp_neg(0);
7555 break;
7556 case NEON_2RM_VSWP:
7557 tmp2 = neon_load_reg(rd, pass);
7558 neon_store_reg(rm, pass, tmp2);
7559 break;
7560 case NEON_2RM_VTRN:
7561 tmp2 = neon_load_reg(rd, pass);
7562 switch (size) {
7563 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7564 case 1: gen_neon_trn_u16(tmp, tmp2); break;
7565 default: abort();
7567 neon_store_reg(rm, pass, tmp2);
7568 break;
7569 case NEON_2RM_VRINTN:
7570 case NEON_2RM_VRINTA:
7571 case NEON_2RM_VRINTM:
7572 case NEON_2RM_VRINTP:
7573 case NEON_2RM_VRINTZ:
7575 TCGv_i32 tcg_rmode;
7576 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7577 int rmode;
7579 if (op == NEON_2RM_VRINTZ) {
7580 rmode = FPROUNDING_ZERO;
7581 } else {
7582 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7585 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7586 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7587 cpu_env);
7588 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7589 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7590 cpu_env);
7591 tcg_temp_free_ptr(fpstatus);
7592 tcg_temp_free_i32(tcg_rmode);
7593 break;
7595 case NEON_2RM_VRINTX:
7597 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7598 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7599 tcg_temp_free_ptr(fpstatus);
7600 break;
7602 case NEON_2RM_VCVTAU:
7603 case NEON_2RM_VCVTAS:
7604 case NEON_2RM_VCVTNU:
7605 case NEON_2RM_VCVTNS:
7606 case NEON_2RM_VCVTPU:
7607 case NEON_2RM_VCVTPS:
7608 case NEON_2RM_VCVTMU:
7609 case NEON_2RM_VCVTMS:
7611 bool is_signed = !extract32(insn, 7, 1);
7612 TCGv_ptr fpst = get_fpstatus_ptr(1);
7613 TCGv_i32 tcg_rmode, tcg_shift;
7614 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7616 tcg_shift = tcg_const_i32(0);
7617 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7618 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7619 cpu_env);
7621 if (is_signed) {
7622 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7623 tcg_shift, fpst);
7624 } else {
7625 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7626 tcg_shift, fpst);
7629 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7630 cpu_env);
7631 tcg_temp_free_i32(tcg_rmode);
7632 tcg_temp_free_i32(tcg_shift);
7633 tcg_temp_free_ptr(fpst);
7634 break;
7636 case NEON_2RM_VRECPE:
7638 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7639 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7640 tcg_temp_free_ptr(fpstatus);
7641 break;
7643 case NEON_2RM_VRSQRTE:
7645 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7646 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7647 tcg_temp_free_ptr(fpstatus);
7648 break;
7650 case NEON_2RM_VRECPE_F:
7652 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7653 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7654 tcg_temp_free_ptr(fpstatus);
7655 break;
7657 case NEON_2RM_VRSQRTE_F:
7659 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7660 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7661 tcg_temp_free_ptr(fpstatus);
7662 break;
7664 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
7665 gen_vfp_sito(0, 1);
7666 break;
7667 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
7668 gen_vfp_uito(0, 1);
7669 break;
7670 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
7671 gen_vfp_tosiz(0, 1);
7672 break;
7673 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
7674 gen_vfp_touiz(0, 1);
7675 break;
7676 default:
7677 /* Reserved op values were caught by the
7678 * neon_2rm_sizes[] check earlier.
7680 abort();
7682 if (neon_2rm_is_float_op(op)) {
7683 tcg_gen_st_f32(cpu_F0s, cpu_env,
7684 neon_reg_offset(rd, pass));
7685 } else {
7686 neon_store_reg(rd, pass, tmp);
7689 break;
7691 } else if ((insn & (1 << 10)) == 0) {
7692 /* VTBL, VTBX. */
7693 int n = ((insn >> 8) & 3) + 1;
7694 if ((rn + n) > 32) {
7695 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7696 * helper function running off the end of the register file.
7698 return 1;
7700 n <<= 3;
7701 if (insn & (1 << 6)) {
7702 tmp = neon_load_reg(rd, 0);
7703 } else {
7704 tmp = tcg_temp_new_i32();
7705 tcg_gen_movi_i32(tmp, 0);
7707 tmp2 = neon_load_reg(rm, 0);
7708 ptr1 = vfp_reg_ptr(true, rn);
7709 tmp5 = tcg_const_i32(n);
7710 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7711 tcg_temp_free_i32(tmp);
7712 if (insn & (1 << 6)) {
7713 tmp = neon_load_reg(rd, 1);
7714 } else {
7715 tmp = tcg_temp_new_i32();
7716 tcg_gen_movi_i32(tmp, 0);
7718 tmp3 = neon_load_reg(rm, 1);
7719 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
7720 tcg_temp_free_i32(tmp5);
7721 tcg_temp_free_ptr(ptr1);
7722 neon_store_reg(rd, 0, tmp2);
7723 neon_store_reg(rd, 1, tmp3);
7724 tcg_temp_free_i32(tmp);
7725 } else if ((insn & 0x380) == 0) {
7726 /* VDUP */
7727 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7728 return 1;
7730 if (insn & (1 << 19)) {
7731 tmp = neon_load_reg(rm, 1);
7732 } else {
7733 tmp = neon_load_reg(rm, 0);
7735 if (insn & (1 << 16)) {
7736 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
7737 } else if (insn & (1 << 17)) {
7738 if ((insn >> 18) & 1)
7739 gen_neon_dup_high16(tmp);
7740 else
7741 gen_neon_dup_low16(tmp);
7743 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7744 tmp2 = tcg_temp_new_i32();
7745 tcg_gen_mov_i32(tmp2, tmp);
7746 neon_store_reg(rd, pass, tmp2);
7748 tcg_temp_free_i32(tmp);
7749 } else {
7750 return 1;
7754 return 0;
7757 /* Advanced SIMD three registers of the same length extension.
7758 * 31 25 23 22 20 16 12 11 10 9 8 3 0
7759 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7760 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7761 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7763 static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
7765 gen_helper_gvec_3_ptr *fn_gvec_ptr;
7766 int rd, rn, rm, rot, size, opr_sz;
7767 TCGv_ptr fpst;
7768 bool q;
7770 q = extract32(insn, 6, 1);
7771 VFP_DREG_D(rd, insn);
7772 VFP_DREG_N(rn, insn);
7773 VFP_DREG_M(rm, insn);
7774 if ((rd | rn | rm) & q) {
7775 return 1;
7778 if ((insn & 0xfe200f10) == 0xfc200800) {
7779 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
7780 size = extract32(insn, 20, 1);
7781 rot = extract32(insn, 23, 2);
7782 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7783 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7784 return 1;
7786 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
7787 } else if ((insn & 0xfea00f10) == 0xfc800800) {
7788 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
7789 size = extract32(insn, 20, 1);
7790 rot = extract32(insn, 24, 1);
7791 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7792 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7793 return 1;
7795 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
7796 } else {
7797 return 1;
7800 if (s->fp_excp_el) {
7801 gen_exception_insn(s, 4, EXCP_UDEF,
7802 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7803 return 0;
7805 if (!s->vfp_enabled) {
7806 return 1;
7809 opr_sz = (1 + q) * 8;
7810 fpst = get_fpstatus_ptr(1);
7811 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7812 vfp_reg_offset(1, rn),
7813 vfp_reg_offset(1, rm), fpst,
7814 opr_sz, opr_sz, rot, fn_gvec_ptr);
7815 tcg_temp_free_ptr(fpst);
7816 return 0;
7819 /* Advanced SIMD two registers and a scalar extension.
7820 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7821 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7822 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7823 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7827 static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7829 int rd, rn, rm, rot, size, opr_sz;
7830 TCGv_ptr fpst;
7831 bool q;
7833 q = extract32(insn, 6, 1);
7834 VFP_DREG_D(rd, insn);
7835 VFP_DREG_N(rn, insn);
7836 VFP_DREG_M(rm, insn);
7837 if ((rd | rn) & q) {
7838 return 1;
7841 if ((insn & 0xff000f10) == 0xfe000800) {
7842 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
7843 rot = extract32(insn, 20, 2);
7844 size = extract32(insn, 23, 1);
7845 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
7846 || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
7847 return 1;
7849 } else {
7850 return 1;
7853 if (s->fp_excp_el) {
7854 gen_exception_insn(s, 4, EXCP_UDEF,
7855 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
7856 return 0;
7858 if (!s->vfp_enabled) {
7859 return 1;
7862 opr_sz = (1 + q) * 8;
7863 fpst = get_fpstatus_ptr(1);
7864 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
7865 vfp_reg_offset(1, rn),
7866 vfp_reg_offset(1, rm), fpst,
7867 opr_sz, opr_sz, rot,
7868 size ? gen_helper_gvec_fcmlas_idx
7869 : gen_helper_gvec_fcmlah_idx);
7870 tcg_temp_free_ptr(fpst);
7871 return 0;
7874 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7876 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7877 const ARMCPRegInfo *ri;
7879 cpnum = (insn >> 8) & 0xf;
7881 /* First check for coprocessor space used for XScale/iwMMXt insns */
7882 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7883 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7884 return 1;
7886 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7887 return disas_iwmmxt_insn(s, insn);
7888 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7889 return disas_dsp_insn(s, insn);
7891 return 1;
7894 /* Otherwise treat as a generic register access */
7895 is64 = (insn & (1 << 25)) == 0;
7896 if (!is64 && ((insn & (1 << 4)) == 0)) {
7897 /* cdp */
7898 return 1;
7901 crm = insn & 0xf;
7902 if (is64) {
7903 crn = 0;
7904 opc1 = (insn >> 4) & 0xf;
7905 opc2 = 0;
7906 rt2 = (insn >> 16) & 0xf;
7907 } else {
7908 crn = (insn >> 16) & 0xf;
7909 opc1 = (insn >> 21) & 7;
7910 opc2 = (insn >> 5) & 7;
7911 rt2 = 0;
7913 isread = (insn >> 20) & 1;
7914 rt = (insn >> 12) & 0xf;
7916 ri = get_arm_cp_reginfo(s->cp_regs,
7917 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7918 if (ri) {
7919 /* Check access permissions */
7920 if (!cp_access_ok(s->current_el, ri, isread)) {
7921 return 1;
7924 if (ri->accessfn ||
7925 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7926 /* Emit code to perform further access permissions checks at
7927 * runtime; this may result in an exception.
7928 * Note that on XScale all cp0..c13 registers do an access check
7929 * call in order to handle c15_cpar.
7931 TCGv_ptr tmpptr;
7932 TCGv_i32 tcg_syn, tcg_isread;
7933 uint32_t syndrome;
7935 /* Note that since we are an implementation which takes an
7936 * exception on a trapped conditional instruction only if the
7937 * instruction passes its condition code check, we can take
7938 * advantage of the clause in the ARM ARM that allows us to set
7939 * the COND field in the instruction to 0xE in all cases.
7940 * We could fish the actual condition out of the insn (ARM)
7941 * or the condexec bits (Thumb) but it isn't necessary.
7943 switch (cpnum) {
7944 case 14:
7945 if (is64) {
7946 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7947 isread, false);
7948 } else {
7949 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7950 rt, isread, false);
7952 break;
7953 case 15:
7954 if (is64) {
7955 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7956 isread, false);
7957 } else {
7958 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7959 rt, isread, false);
7961 break;
7962 default:
7963 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7964 * so this can only happen if this is an ARMv7 or earlier CPU,
7965 * in which case the syndrome information won't actually be
7966 * guest visible.
7968 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7969 syndrome = syn_uncategorized();
7970 break;
7973 gen_set_condexec(s);
7974 gen_set_pc_im(s, s->pc - 4);
7975 tmpptr = tcg_const_ptr(ri);
7976 tcg_syn = tcg_const_i32(syndrome);
7977 tcg_isread = tcg_const_i32(isread);
7978 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7979 tcg_isread);
7980 tcg_temp_free_ptr(tmpptr);
7981 tcg_temp_free_i32(tcg_syn);
7982 tcg_temp_free_i32(tcg_isread);
7985 /* Handle special cases first */
7986 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7987 case ARM_CP_NOP:
7988 return 0;
7989 case ARM_CP_WFI:
7990 if (isread) {
7991 return 1;
7993 gen_set_pc_im(s, s->pc);
7994 s->base.is_jmp = DISAS_WFI;
7995 return 0;
7996 default:
7997 break;
8000 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
8001 gen_io_start();
8004 if (isread) {
8005 /* Read */
8006 if (is64) {
8007 TCGv_i64 tmp64;
8008 TCGv_i32 tmp;
8009 if (ri->type & ARM_CP_CONST) {
8010 tmp64 = tcg_const_i64(ri->resetvalue);
8011 } else if (ri->readfn) {
8012 TCGv_ptr tmpptr;
8013 tmp64 = tcg_temp_new_i64();
8014 tmpptr = tcg_const_ptr(ri);
8015 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8016 tcg_temp_free_ptr(tmpptr);
8017 } else {
8018 tmp64 = tcg_temp_new_i64();
8019 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8021 tmp = tcg_temp_new_i32();
8022 tcg_gen_extrl_i64_i32(tmp, tmp64);
8023 store_reg(s, rt, tmp);
8024 tcg_gen_shri_i64(tmp64, tmp64, 32);
8025 tmp = tcg_temp_new_i32();
8026 tcg_gen_extrl_i64_i32(tmp, tmp64);
8027 tcg_temp_free_i64(tmp64);
8028 store_reg(s, rt2, tmp);
8029 } else {
8030 TCGv_i32 tmp;
8031 if (ri->type & ARM_CP_CONST) {
8032 tmp = tcg_const_i32(ri->resetvalue);
8033 } else if (ri->readfn) {
8034 TCGv_ptr tmpptr;
8035 tmp = tcg_temp_new_i32();
8036 tmpptr = tcg_const_ptr(ri);
8037 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8038 tcg_temp_free_ptr(tmpptr);
8039 } else {
8040 tmp = load_cpu_offset(ri->fieldoffset);
8042 if (rt == 15) {
8043 /* Destination register of r15 for 32 bit loads sets
8044 * the condition codes from the high 4 bits of the value
8046 gen_set_nzcv(tmp);
8047 tcg_temp_free_i32(tmp);
8048 } else {
8049 store_reg(s, rt, tmp);
8052 } else {
8053 /* Write */
8054 if (ri->type & ARM_CP_CONST) {
8055 /* If not forbidden by access permissions, treat as WI */
8056 return 0;
8059 if (is64) {
8060 TCGv_i32 tmplo, tmphi;
8061 TCGv_i64 tmp64 = tcg_temp_new_i64();
8062 tmplo = load_reg(s, rt);
8063 tmphi = load_reg(s, rt2);
8064 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8065 tcg_temp_free_i32(tmplo);
8066 tcg_temp_free_i32(tmphi);
8067 if (ri->writefn) {
8068 TCGv_ptr tmpptr = tcg_const_ptr(ri);
8069 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8070 tcg_temp_free_ptr(tmpptr);
8071 } else {
8072 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8074 tcg_temp_free_i64(tmp64);
8075 } else {
8076 if (ri->writefn) {
8077 TCGv_i32 tmp;
8078 TCGv_ptr tmpptr;
8079 tmp = load_reg(s, rt);
8080 tmpptr = tcg_const_ptr(ri);
8081 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8082 tcg_temp_free_ptr(tmpptr);
8083 tcg_temp_free_i32(tmp);
8084 } else {
8085 TCGv_i32 tmp = load_reg(s, rt);
8086 store_cpu_offset(tmp, ri->fieldoffset);
8091 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
8092 /* I/O operations must end the TB here (whether read or write) */
8093 gen_io_end();
8094 gen_lookup_tb(s);
8095 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
8096 /* We default to ending the TB on a coprocessor register write,
8097 * but allow this to be suppressed by the register definition
8098 * (usually only necessary to work around guest bugs).
8100 gen_lookup_tb(s);
8103 return 0;
8106 /* Unknown register; this might be a guest error or a QEMU
8107 * unimplemented feature.
8109 if (is64) {
8110 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
8111 "64 bit system register cp:%d opc1: %d crm:%d "
8112 "(%s)\n",
8113 isread ? "read" : "write", cpnum, opc1, crm,
8114 s->ns ? "non-secure" : "secure");
8115 } else {
8116 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
8117 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8118 "(%s)\n",
8119 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8120 s->ns ? "non-secure" : "secure");
8123 return 1;
8127 /* Store a 64-bit value to a register pair. Clobbers val. */
8128 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
8130 TCGv_i32 tmp;
8131 tmp = tcg_temp_new_i32();
8132 tcg_gen_extrl_i64_i32(tmp, val);
8133 store_reg(s, rlow, tmp);
8134 tmp = tcg_temp_new_i32();
8135 tcg_gen_shri_i64(val, val, 32);
8136 tcg_gen_extrl_i64_i32(tmp, val);
8137 store_reg(s, rhigh, tmp);
8140 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
8141 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
8143 TCGv_i64 tmp;
8144 TCGv_i32 tmp2;
8146 /* Load value and extend to 64 bits. */
8147 tmp = tcg_temp_new_i64();
8148 tmp2 = load_reg(s, rlow);
8149 tcg_gen_extu_i32_i64(tmp, tmp2);
8150 tcg_temp_free_i32(tmp2);
8151 tcg_gen_add_i64(val, val, tmp);
8152 tcg_temp_free_i64(tmp);
8155 /* load and add a 64-bit value from a register pair. */
8156 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
8158 TCGv_i64 tmp;
8159 TCGv_i32 tmpl;
8160 TCGv_i32 tmph;
8162 /* Load 64-bit value rd:rn. */
8163 tmpl = load_reg(s, rlow);
8164 tmph = load_reg(s, rhigh);
8165 tmp = tcg_temp_new_i64();
8166 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
8167 tcg_temp_free_i32(tmpl);
8168 tcg_temp_free_i32(tmph);
8169 tcg_gen_add_i64(val, val, tmp);
8170 tcg_temp_free_i64(tmp);
8173 /* Set N and Z flags from hi|lo. */
8174 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
8176 tcg_gen_mov_i32(cpu_NF, hi);
8177 tcg_gen_or_i32(cpu_ZF, lo, hi);
8180 /* Load/Store exclusive instructions are implemented by remembering
8181 the value/address loaded, and seeing if these are the same
8182 when the store is performed. This should be sufficient to implement
8183 the architecturally mandated semantics, and avoids having to monitor
8184 regular stores. The compare vs the remembered value is done during
8185 the cmpxchg operation, but we must compare the addresses manually. */
8186 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
8187 TCGv_i32 addr, int size)
8189 TCGv_i32 tmp = tcg_temp_new_i32();
8190 TCGMemOp opc = size | MO_ALIGN | s->be_data;
8192 s->is_ldex = true;
8194 if (size == 3) {
8195 TCGv_i32 tmp2 = tcg_temp_new_i32();
8196 TCGv_i64 t64 = tcg_temp_new_i64();
8198 /* For AArch32, architecturally the 32-bit word at the lowest
8199 * address is always Rt and the one at addr+4 is Rt2, even if
8200 * the CPU is big-endian. That means we don't want to do a
8201 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8202 * for an architecturally 64-bit access, but instead do a
8203 * 64-bit access using MO_BE if appropriate and then split
8204 * the two halves.
8205 * This only makes a difference for BE32 user-mode, where
8206 * frob64() must not flip the two halves of the 64-bit data
8207 * but this code must treat BE32 user-mode like BE32 system.
8209 TCGv taddr = gen_aa32_addr(s, addr, opc);
8211 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8212 tcg_temp_free(taddr);
8213 tcg_gen_mov_i64(cpu_exclusive_val, t64);
8214 if (s->be_data == MO_BE) {
8215 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8216 } else {
8217 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8219 tcg_temp_free_i64(t64);
8221 store_reg(s, rt2, tmp2);
8222 } else {
8223 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
8224 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
8227 store_reg(s, rt, tmp);
8228 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
8231 static void gen_clrex(DisasContext *s)
8233 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
8236 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
8237 TCGv_i32 addr, int size)
8239 TCGv_i32 t0, t1, t2;
8240 TCGv_i64 extaddr;
8241 TCGv taddr;
8242 TCGLabel *done_label;
8243 TCGLabel *fail_label;
8244 TCGMemOp opc = size | MO_ALIGN | s->be_data;
8246 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8247 [addr] = {Rt};
8248 {Rd} = 0;
8249 } else {
8250 {Rd} = 1;
8251 } */
8252 fail_label = gen_new_label();
8253 done_label = gen_new_label();
8254 extaddr = tcg_temp_new_i64();
8255 tcg_gen_extu_i32_i64(extaddr, addr);
8256 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8257 tcg_temp_free_i64(extaddr);
8259 taddr = gen_aa32_addr(s, addr, opc);
8260 t0 = tcg_temp_new_i32();
8261 t1 = load_reg(s, rt);
8262 if (size == 3) {
8263 TCGv_i64 o64 = tcg_temp_new_i64();
8264 TCGv_i64 n64 = tcg_temp_new_i64();
8266 t2 = load_reg(s, rt2);
8267 /* For AArch32, architecturally the 32-bit word at the lowest
8268 * address is always Rt and the one at addr+4 is Rt2, even if
8269 * the CPU is big-endian. Since we're going to treat this as a
8270 * single 64-bit BE store, we need to put the two halves in the
8271 * opposite order for BE to LE, so that they end up in the right
8272 * places.
8273 * We don't want gen_aa32_frob64() because that does the wrong
8274 * thing for BE32 usermode.
8276 if (s->be_data == MO_BE) {
8277 tcg_gen_concat_i32_i64(n64, t2, t1);
8278 } else {
8279 tcg_gen_concat_i32_i64(n64, t1, t2);
8281 tcg_temp_free_i32(t2);
8283 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8284 get_mem_index(s), opc);
8285 tcg_temp_free_i64(n64);
8287 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8288 tcg_gen_extrl_i64_i32(t0, o64);
8290 tcg_temp_free_i64(o64);
8291 } else {
8292 t2 = tcg_temp_new_i32();
8293 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8294 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8295 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8296 tcg_temp_free_i32(t2);
8298 tcg_temp_free_i32(t1);
8299 tcg_temp_free(taddr);
8300 tcg_gen_mov_i32(cpu_R[rd], t0);
8301 tcg_temp_free_i32(t0);
8302 tcg_gen_br(done_label);
8304 gen_set_label(fail_label);
8305 tcg_gen_movi_i32(cpu_R[rd], 1);
8306 gen_set_label(done_label);
8307 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
8310 /* gen_srs:
8311 * @env: CPUARMState
8312 * @s: DisasContext
8313 * @mode: mode field from insn (which stack to store to)
8314 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8315 * @writeback: true if writeback bit set
8317 * Generate code for the SRS (Store Return State) insn.
8319 static void gen_srs(DisasContext *s,
8320 uint32_t mode, uint32_t amode, bool writeback)
8322 int32_t offset;
8323 TCGv_i32 addr, tmp;
8324 bool undef = false;
8326 /* SRS is:
8327 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
8328 * and specified mode is monitor mode
8329 * - UNDEFINED in Hyp mode
8330 * - UNPREDICTABLE in User or System mode
8331 * - UNPREDICTABLE if the specified mode is:
8332 * -- not implemented
8333 * -- not a valid mode number
8334 * -- a mode that's at a higher exception level
8335 * -- Monitor, if we are Non-secure
8336 * For the UNPREDICTABLE cases we choose to UNDEF.
8338 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
8339 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8340 return;
8343 if (s->current_el == 0 || s->current_el == 2) {
8344 undef = true;
8347 switch (mode) {
8348 case ARM_CPU_MODE_USR:
8349 case ARM_CPU_MODE_FIQ:
8350 case ARM_CPU_MODE_IRQ:
8351 case ARM_CPU_MODE_SVC:
8352 case ARM_CPU_MODE_ABT:
8353 case ARM_CPU_MODE_UND:
8354 case ARM_CPU_MODE_SYS:
8355 break;
8356 case ARM_CPU_MODE_HYP:
8357 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8358 undef = true;
8360 break;
8361 case ARM_CPU_MODE_MON:
8362 /* No need to check specifically for "are we non-secure" because
8363 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8364 * so if this isn't EL3 then we must be non-secure.
8366 if (s->current_el != 3) {
8367 undef = true;
8369 break;
8370 default:
8371 undef = true;
8374 if (undef) {
8375 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8376 default_exception_el(s));
8377 return;
8380 addr = tcg_temp_new_i32();
8381 tmp = tcg_const_i32(mode);
8382 /* get_r13_banked() will raise an exception if called from System mode */
8383 gen_set_condexec(s);
8384 gen_set_pc_im(s, s->pc - 4);
8385 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8386 tcg_temp_free_i32(tmp);
8387 switch (amode) {
8388 case 0: /* DA */
8389 offset = -4;
8390 break;
8391 case 1: /* IA */
8392 offset = 0;
8393 break;
8394 case 2: /* DB */
8395 offset = -8;
8396 break;
8397 case 3: /* IB */
8398 offset = 4;
8399 break;
8400 default:
8401 abort();
8403 tcg_gen_addi_i32(addr, addr, offset);
8404 tmp = load_reg(s, 14);
8405 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8406 tcg_temp_free_i32(tmp);
8407 tmp = load_cpu_field(spsr);
8408 tcg_gen_addi_i32(addr, addr, 4);
8409 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8410 tcg_temp_free_i32(tmp);
8411 if (writeback) {
8412 switch (amode) {
8413 case 0:
8414 offset = -8;
8415 break;
8416 case 1:
8417 offset = 4;
8418 break;
8419 case 2:
8420 offset = -4;
8421 break;
8422 case 3:
8423 offset = 0;
8424 break;
8425 default:
8426 abort();
8428 tcg_gen_addi_i32(addr, addr, offset);
8429 tmp = tcg_const_i32(mode);
8430 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8431 tcg_temp_free_i32(tmp);
8433 tcg_temp_free_i32(addr);
8434 s->base.is_jmp = DISAS_UPDATE;
8437 static void disas_arm_insn(DisasContext *s, unsigned int insn)
8439 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
8440 TCGv_i32 tmp;
8441 TCGv_i32 tmp2;
8442 TCGv_i32 tmp3;
8443 TCGv_i32 addr;
8444 TCGv_i64 tmp64;
8446 /* M variants do not implement ARM mode; this must raise the INVSTATE
8447 * UsageFault exception.
8449 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8450 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8451 default_exception_el(s));
8452 return;
8454 cond = insn >> 28;
8455 if (cond == 0xf){
8456 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8457 * choose to UNDEF. In ARMv5 and above the space is used
8458 * for miscellaneous unconditional instructions.
8460 ARCH(5);
8462 /* Unconditional instructions. */
8463 if (((insn >> 25) & 7) == 1) {
8464 /* NEON Data processing. */
8465 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8466 goto illegal_op;
8469 if (disas_neon_data_insn(s, insn)) {
8470 goto illegal_op;
8472 return;
8474 if ((insn & 0x0f100000) == 0x04000000) {
8475 /* NEON load/store. */
8476 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8477 goto illegal_op;
8480 if (disas_neon_ls_insn(s, insn)) {
8481 goto illegal_op;
8483 return;
8485 if ((insn & 0x0f000e10) == 0x0e000a00) {
8486 /* VFP. */
8487 if (disas_vfp_insn(s, insn)) {
8488 goto illegal_op;
8490 return;
8492 if (((insn & 0x0f30f000) == 0x0510f000) ||
8493 ((insn & 0x0f30f010) == 0x0710f000)) {
8494 if ((insn & (1 << 22)) == 0) {
8495 /* PLDW; v7MP */
8496 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8497 goto illegal_op;
8500 /* Otherwise PLD; v5TE+ */
8501 ARCH(5TE);
8502 return;
8504 if (((insn & 0x0f70f000) == 0x0450f000) ||
8505 ((insn & 0x0f70f010) == 0x0650f000)) {
8506 ARCH(7);
8507 return; /* PLI; V7 */
8509 if (((insn & 0x0f700000) == 0x04100000) ||
8510 ((insn & 0x0f700010) == 0x06100000)) {
8511 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8512 goto illegal_op;
8514 return; /* v7MP: Unallocated memory hint: must NOP */
8517 if ((insn & 0x0ffffdff) == 0x01010000) {
8518 ARCH(6);
8519 /* setend */
8520 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8521 gen_helper_setend(cpu_env);
8522 s->base.is_jmp = DISAS_UPDATE;
8524 return;
8525 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8526 switch ((insn >> 4) & 0xf) {
8527 case 1: /* clrex */
8528 ARCH(6K);
8529 gen_clrex(s);
8530 return;
8531 case 4: /* dsb */
8532 case 5: /* dmb */
8533 ARCH(7);
8534 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8535 return;
8536 case 6: /* isb */
8537 /* We need to break the TB after this insn to execute
8538 * self-modifying code correctly and also to take
8539 * any pending interrupts immediately.
8541 gen_goto_tb(s, 0, s->pc & ~1);
8542 return;
8543 default:
8544 goto illegal_op;
8546 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8547 /* srs */
8548 ARCH(6);
8549 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
8550 return;
8551 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
8552 /* rfe */
8553 int32_t offset;
8554 if (IS_USER(s))
8555 goto illegal_op;
8556 ARCH(6);
8557 rn = (insn >> 16) & 0xf;
8558 addr = load_reg(s, rn);
8559 i = (insn >> 23) & 3;
8560 switch (i) {
8561 case 0: offset = -4; break; /* DA */
8562 case 1: offset = 0; break; /* IA */
8563 case 2: offset = -8; break; /* DB */
8564 case 3: offset = 4; break; /* IB */
8565 default: abort();
8567 if (offset)
8568 tcg_gen_addi_i32(addr, addr, offset);
8569 /* Load PC into tmp and CPSR into tmp2. */
8570 tmp = tcg_temp_new_i32();
8571 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8572 tcg_gen_addi_i32(addr, addr, 4);
8573 tmp2 = tcg_temp_new_i32();
8574 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8575 if (insn & (1 << 21)) {
8576 /* Base writeback. */
8577 switch (i) {
8578 case 0: offset = -8; break;
8579 case 1: offset = 4; break;
8580 case 2: offset = -4; break;
8581 case 3: offset = 0; break;
8582 default: abort();
8584 if (offset)
8585 tcg_gen_addi_i32(addr, addr, offset);
8586 store_reg(s, rn, addr);
8587 } else {
8588 tcg_temp_free_i32(addr);
8590 gen_rfe(s, tmp, tmp2);
8591 return;
8592 } else if ((insn & 0x0e000000) == 0x0a000000) {
8593 /* branch link and change to thumb (blx <offset>) */
8594 int32_t offset;
8596 val = (uint32_t)s->pc;
8597 tmp = tcg_temp_new_i32();
8598 tcg_gen_movi_i32(tmp, val);
8599 store_reg(s, 14, tmp);
8600 /* Sign-extend the 24-bit offset */
8601 offset = (((int32_t)insn) << 8) >> 8;
8602 /* offset * 4 + bit24 * 2 + (thumb bit) */
8603 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8604 /* pipeline offset */
8605 val += 4;
8606 /* protected by ARCH(5); above, near the start of uncond block */
8607 gen_bx_im(s, val);
8608 return;
8609 } else if ((insn & 0x0e000f00) == 0x0c000100) {
8610 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
8611 /* iWMMXt register transfer. */
8612 if (extract32(s->c15_cpar, 1, 1)) {
8613 if (!disas_iwmmxt_insn(s, insn)) {
8614 return;
8618 } else if ((insn & 0x0e000a00) == 0x0c000800
8619 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8620 if (disas_neon_insn_3same_ext(s, insn)) {
8621 goto illegal_op;
8623 return;
8624 } else if ((insn & 0x0f000a00) == 0x0e000800
8625 && arm_dc_feature(s, ARM_FEATURE_V8)) {
8626 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
8627 goto illegal_op;
8629 return;
8630 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8631 /* Coprocessor double register transfer. */
8632 ARCH(5TE);
8633 } else if ((insn & 0x0f000010) == 0x0e000010) {
8634 /* Additional coprocessor register transfer. */
8635 } else if ((insn & 0x0ff10020) == 0x01000000) {
8636 uint32_t mask;
8637 uint32_t val;
8638 /* cps (privileged) */
8639 if (IS_USER(s))
8640 return;
8641 mask = val = 0;
8642 if (insn & (1 << 19)) {
8643 if (insn & (1 << 8))
8644 mask |= CPSR_A;
8645 if (insn & (1 << 7))
8646 mask |= CPSR_I;
8647 if (insn & (1 << 6))
8648 mask |= CPSR_F;
8649 if (insn & (1 << 18))
8650 val |= mask;
8652 if (insn & (1 << 17)) {
8653 mask |= CPSR_M;
8654 val |= (insn & 0x1f);
8656 if (mask) {
8657 gen_set_psr_im(s, mask, 0, val);
8659 return;
8661 goto illegal_op;
8663 if (cond != 0xe) {
8664 /* if not always execute, we generate a conditional jump to
8665 next instruction */
8666 s->condlabel = gen_new_label();
8667 arm_gen_test_cc(cond ^ 1, s->condlabel);
8668 s->condjmp = 1;
8670 if ((insn & 0x0f900000) == 0x03000000) {
8671 if ((insn & (1 << 21)) == 0) {
8672 ARCH(6T2);
8673 rd = (insn >> 12) & 0xf;
8674 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8675 if ((insn & (1 << 22)) == 0) {
8676 /* MOVW */
8677 tmp = tcg_temp_new_i32();
8678 tcg_gen_movi_i32(tmp, val);
8679 } else {
8680 /* MOVT */
8681 tmp = load_reg(s, rd);
8682 tcg_gen_ext16u_i32(tmp, tmp);
8683 tcg_gen_ori_i32(tmp, tmp, val << 16);
8685 store_reg(s, rd, tmp);
8686 } else {
8687 if (((insn >> 12) & 0xf) != 0xf)
8688 goto illegal_op;
8689 if (((insn >> 16) & 0xf) == 0) {
8690 gen_nop_hint(s, insn & 0xff);
8691 } else {
8692 /* CPSR = immediate */
8693 val = insn & 0xff;
8694 shift = ((insn >> 8) & 0xf) * 2;
8695 if (shift)
8696 val = (val >> shift) | (val << (32 - shift));
8697 i = ((insn & (1 << 22)) != 0);
8698 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8699 i, val)) {
8700 goto illegal_op;
8704 } else if ((insn & 0x0f900000) == 0x01000000
8705 && (insn & 0x00000090) != 0x00000090) {
8706 /* miscellaneous instructions */
8707 op1 = (insn >> 21) & 3;
8708 sh = (insn >> 4) & 0xf;
8709 rm = insn & 0xf;
8710 switch (sh) {
8711 case 0x0: /* MSR, MRS */
8712 if (insn & (1 << 9)) {
8713 /* MSR (banked) and MRS (banked) */
8714 int sysm = extract32(insn, 16, 4) |
8715 (extract32(insn, 8, 1) << 4);
8716 int r = extract32(insn, 22, 1);
8718 if (op1 & 1) {
8719 /* MSR (banked) */
8720 gen_msr_banked(s, r, sysm, rm);
8721 } else {
8722 /* MRS (banked) */
8723 int rd = extract32(insn, 12, 4);
8725 gen_mrs_banked(s, r, sysm, rd);
8727 break;
8730 /* MSR, MRS (for PSRs) */
8731 if (op1 & 1) {
8732 /* PSR = reg */
8733 tmp = load_reg(s, rm);
8734 i = ((op1 & 2) != 0);
8735 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
8736 goto illegal_op;
8737 } else {
8738 /* reg = PSR */
8739 rd = (insn >> 12) & 0xf;
8740 if (op1 & 2) {
8741 if (IS_USER(s))
8742 goto illegal_op;
8743 tmp = load_cpu_field(spsr);
8744 } else {
8745 tmp = tcg_temp_new_i32();
8746 gen_helper_cpsr_read(tmp, cpu_env);
8748 store_reg(s, rd, tmp);
8750 break;
8751 case 0x1:
8752 if (op1 == 1) {
8753 /* branch/exchange thumb (bx). */
8754 ARCH(4T);
8755 tmp = load_reg(s, rm);
8756 gen_bx(s, tmp);
8757 } else if (op1 == 3) {
8758 /* clz */
8759 ARCH(5);
8760 rd = (insn >> 12) & 0xf;
8761 tmp = load_reg(s, rm);
8762 tcg_gen_clzi_i32(tmp, tmp, 32);
8763 store_reg(s, rd, tmp);
8764 } else {
8765 goto illegal_op;
8767 break;
8768 case 0x2:
8769 if (op1 == 1) {
8770 ARCH(5J); /* bxj */
8771 /* Trivial implementation equivalent to bx. */
8772 tmp = load_reg(s, rm);
8773 gen_bx(s, tmp);
8774 } else {
8775 goto illegal_op;
8777 break;
8778 case 0x3:
8779 if (op1 != 1)
8780 goto illegal_op;
8782 ARCH(5);
8783 /* branch link/exchange thumb (blx) */
8784 tmp = load_reg(s, rm);
8785 tmp2 = tcg_temp_new_i32();
8786 tcg_gen_movi_i32(tmp2, s->pc);
8787 store_reg(s, 14, tmp2);
8788 gen_bx(s, tmp);
8789 break;
8790 case 0x4:
8792 /* crc32/crc32c */
8793 uint32_t c = extract32(insn, 8, 4);
8795 /* Check this CPU supports ARMv8 CRC instructions.
8796 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8797 * Bits 8, 10 and 11 should be zero.
8799 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
8800 (c & 0xd) != 0) {
8801 goto illegal_op;
8804 rn = extract32(insn, 16, 4);
8805 rd = extract32(insn, 12, 4);
8807 tmp = load_reg(s, rn);
8808 tmp2 = load_reg(s, rm);
8809 if (op1 == 0) {
8810 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8811 } else if (op1 == 1) {
8812 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8814 tmp3 = tcg_const_i32(1 << op1);
8815 if (c & 0x2) {
8816 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8817 } else {
8818 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8820 tcg_temp_free_i32(tmp2);
8821 tcg_temp_free_i32(tmp3);
8822 store_reg(s, rd, tmp);
8823 break;
8825 case 0x5: /* saturating add/subtract */
8826 ARCH(5TE);
8827 rd = (insn >> 12) & 0xf;
8828 rn = (insn >> 16) & 0xf;
8829 tmp = load_reg(s, rm);
8830 tmp2 = load_reg(s, rn);
8831 if (op1 & 2)
8832 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
8833 if (op1 & 1)
8834 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8835 else
8836 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8837 tcg_temp_free_i32(tmp2);
8838 store_reg(s, rd, tmp);
8839 break;
8840 case 7:
8842 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8843 switch (op1) {
8844 case 0:
8845 /* HLT */
8846 gen_hlt(s, imm16);
8847 break;
8848 case 1:
8849 /* bkpt */
8850 ARCH(5);
8851 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
8852 break;
8853 case 2:
8854 /* Hypervisor call (v7) */
8855 ARCH(7);
8856 if (IS_USER(s)) {
8857 goto illegal_op;
8859 gen_hvc(s, imm16);
8860 break;
8861 case 3:
8862 /* Secure monitor call (v6+) */
8863 ARCH(6K);
8864 if (IS_USER(s)) {
8865 goto illegal_op;
8867 gen_smc(s);
8868 break;
8869 default:
8870 g_assert_not_reached();
8872 break;
8874 case 0x8: /* signed multiply */
8875 case 0xa:
8876 case 0xc:
8877 case 0xe:
8878 ARCH(5TE);
8879 rs = (insn >> 8) & 0xf;
8880 rn = (insn >> 12) & 0xf;
8881 rd = (insn >> 16) & 0xf;
8882 if (op1 == 1) {
8883 /* (32 * 16) >> 16 */
8884 tmp = load_reg(s, rm);
8885 tmp2 = load_reg(s, rs);
8886 if (sh & 4)
8887 tcg_gen_sari_i32(tmp2, tmp2, 16);
8888 else
8889 gen_sxth(tmp2);
8890 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8891 tcg_gen_shri_i64(tmp64, tmp64, 16);
8892 tmp = tcg_temp_new_i32();
8893 tcg_gen_extrl_i64_i32(tmp, tmp64);
8894 tcg_temp_free_i64(tmp64);
8895 if ((sh & 2) == 0) {
8896 tmp2 = load_reg(s, rn);
8897 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8898 tcg_temp_free_i32(tmp2);
8900 store_reg(s, rd, tmp);
8901 } else {
8902 /* 16 * 16 */
8903 tmp = load_reg(s, rm);
8904 tmp2 = load_reg(s, rs);
8905 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8906 tcg_temp_free_i32(tmp2);
8907 if (op1 == 2) {
8908 tmp64 = tcg_temp_new_i64();
8909 tcg_gen_ext_i32_i64(tmp64, tmp);
8910 tcg_temp_free_i32(tmp);
8911 gen_addq(s, tmp64, rn, rd);
8912 gen_storeq_reg(s, rn, rd, tmp64);
8913 tcg_temp_free_i64(tmp64);
8914 } else {
8915 if (op1 == 0) {
8916 tmp2 = load_reg(s, rn);
8917 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8918 tcg_temp_free_i32(tmp2);
8920 store_reg(s, rd, tmp);
8923 break;
8924 default:
8925 goto illegal_op;
8927 } else if (((insn & 0x0e000000) == 0 &&
8928 (insn & 0x00000090) != 0x90) ||
8929 ((insn & 0x0e000000) == (1 << 25))) {
8930 int set_cc, logic_cc, shiftop;
8932 op1 = (insn >> 21) & 0xf;
8933 set_cc = (insn >> 20) & 1;
8934 logic_cc = table_logic_cc[op1] & set_cc;
8936 /* data processing instruction */
8937 if (insn & (1 << 25)) {
8938 /* immediate operand */
8939 val = insn & 0xff;
8940 shift = ((insn >> 8) & 0xf) * 2;
8941 if (shift) {
8942 val = (val >> shift) | (val << (32 - shift));
8944 tmp2 = tcg_temp_new_i32();
8945 tcg_gen_movi_i32(tmp2, val);
8946 if (logic_cc && shift) {
8947 gen_set_CF_bit31(tmp2);
8949 } else {
8950 /* register */
8951 rm = (insn) & 0xf;
8952 tmp2 = load_reg(s, rm);
8953 shiftop = (insn >> 5) & 3;
8954 if (!(insn & (1 << 4))) {
8955 shift = (insn >> 7) & 0x1f;
8956 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8957 } else {
8958 rs = (insn >> 8) & 0xf;
8959 tmp = load_reg(s, rs);
8960 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8963 if (op1 != 0x0f && op1 != 0x0d) {
8964 rn = (insn >> 16) & 0xf;
8965 tmp = load_reg(s, rn);
8966 } else {
8967 tmp = NULL;
8969 rd = (insn >> 12) & 0xf;
8970 switch(op1) {
8971 case 0x00:
8972 tcg_gen_and_i32(tmp, tmp, tmp2);
8973 if (logic_cc) {
8974 gen_logic_CC(tmp);
8976 store_reg_bx(s, rd, tmp);
8977 break;
8978 case 0x01:
8979 tcg_gen_xor_i32(tmp, tmp, tmp2);
8980 if (logic_cc) {
8981 gen_logic_CC(tmp);
8983 store_reg_bx(s, rd, tmp);
8984 break;
8985 case 0x02:
8986 if (set_cc && rd == 15) {
8987 /* SUBS r15, ... is used for exception return. */
8988 if (IS_USER(s)) {
8989 goto illegal_op;
8991 gen_sub_CC(tmp, tmp, tmp2);
8992 gen_exception_return(s, tmp);
8993 } else {
8994 if (set_cc) {
8995 gen_sub_CC(tmp, tmp, tmp2);
8996 } else {
8997 tcg_gen_sub_i32(tmp, tmp, tmp2);
8999 store_reg_bx(s, rd, tmp);
9001 break;
9002 case 0x03:
9003 if (set_cc) {
9004 gen_sub_CC(tmp, tmp2, tmp);
9005 } else {
9006 tcg_gen_sub_i32(tmp, tmp2, tmp);
9008 store_reg_bx(s, rd, tmp);
9009 break;
9010 case 0x04:
9011 if (set_cc) {
9012 gen_add_CC(tmp, tmp, tmp2);
9013 } else {
9014 tcg_gen_add_i32(tmp, tmp, tmp2);
9016 store_reg_bx(s, rd, tmp);
9017 break;
9018 case 0x05:
9019 if (set_cc) {
9020 gen_adc_CC(tmp, tmp, tmp2);
9021 } else {
9022 gen_add_carry(tmp, tmp, tmp2);
9024 store_reg_bx(s, rd, tmp);
9025 break;
9026 case 0x06:
9027 if (set_cc) {
9028 gen_sbc_CC(tmp, tmp, tmp2);
9029 } else {
9030 gen_sub_carry(tmp, tmp, tmp2);
9032 store_reg_bx(s, rd, tmp);
9033 break;
9034 case 0x07:
9035 if (set_cc) {
9036 gen_sbc_CC(tmp, tmp2, tmp);
9037 } else {
9038 gen_sub_carry(tmp, tmp2, tmp);
9040 store_reg_bx(s, rd, tmp);
9041 break;
9042 case 0x08:
9043 if (set_cc) {
9044 tcg_gen_and_i32(tmp, tmp, tmp2);
9045 gen_logic_CC(tmp);
9047 tcg_temp_free_i32(tmp);
9048 break;
9049 case 0x09:
9050 if (set_cc) {
9051 tcg_gen_xor_i32(tmp, tmp, tmp2);
9052 gen_logic_CC(tmp);
9054 tcg_temp_free_i32(tmp);
9055 break;
9056 case 0x0a:
9057 if (set_cc) {
9058 gen_sub_CC(tmp, tmp, tmp2);
9060 tcg_temp_free_i32(tmp);
9061 break;
9062 case 0x0b:
9063 if (set_cc) {
9064 gen_add_CC(tmp, tmp, tmp2);
9066 tcg_temp_free_i32(tmp);
9067 break;
9068 case 0x0c:
9069 tcg_gen_or_i32(tmp, tmp, tmp2);
9070 if (logic_cc) {
9071 gen_logic_CC(tmp);
9073 store_reg_bx(s, rd, tmp);
9074 break;
9075 case 0x0d:
9076 if (logic_cc && rd == 15) {
9077 /* MOVS r15, ... is used for exception return. */
9078 if (IS_USER(s)) {
9079 goto illegal_op;
9081 gen_exception_return(s, tmp2);
9082 } else {
9083 if (logic_cc) {
9084 gen_logic_CC(tmp2);
9086 store_reg_bx(s, rd, tmp2);
9088 break;
9089 case 0x0e:
9090 tcg_gen_andc_i32(tmp, tmp, tmp2);
9091 if (logic_cc) {
9092 gen_logic_CC(tmp);
9094 store_reg_bx(s, rd, tmp);
9095 break;
9096 default:
9097 case 0x0f:
9098 tcg_gen_not_i32(tmp2, tmp2);
9099 if (logic_cc) {
9100 gen_logic_CC(tmp2);
9102 store_reg_bx(s, rd, tmp2);
9103 break;
9105 if (op1 != 0x0f && op1 != 0x0d) {
9106 tcg_temp_free_i32(tmp2);
9108 } else {
9109 /* other instructions */
9110 op1 = (insn >> 24) & 0xf;
9111 switch(op1) {
9112 case 0x0:
9113 case 0x1:
9114 /* multiplies, extra load/stores */
9115 sh = (insn >> 5) & 3;
9116 if (sh == 0) {
9117 if (op1 == 0x0) {
9118 rd = (insn >> 16) & 0xf;
9119 rn = (insn >> 12) & 0xf;
9120 rs = (insn >> 8) & 0xf;
9121 rm = (insn) & 0xf;
9122 op1 = (insn >> 20) & 0xf;
9123 switch (op1) {
9124 case 0: case 1: case 2: case 3: case 6:
9125 /* 32 bit mul */
9126 tmp = load_reg(s, rs);
9127 tmp2 = load_reg(s, rm);
9128 tcg_gen_mul_i32(tmp, tmp, tmp2);
9129 tcg_temp_free_i32(tmp2);
9130 if (insn & (1 << 22)) {
9131 /* Subtract (mls) */
9132 ARCH(6T2);
9133 tmp2 = load_reg(s, rn);
9134 tcg_gen_sub_i32(tmp, tmp2, tmp);
9135 tcg_temp_free_i32(tmp2);
9136 } else if (insn & (1 << 21)) {
9137 /* Add */
9138 tmp2 = load_reg(s, rn);
9139 tcg_gen_add_i32(tmp, tmp, tmp2);
9140 tcg_temp_free_i32(tmp2);
9142 if (insn & (1 << 20))
9143 gen_logic_CC(tmp);
9144 store_reg(s, rd, tmp);
9145 break;
9146 case 4:
9147 /* 64 bit mul double accumulate (UMAAL) */
9148 ARCH(6);
9149 tmp = load_reg(s, rs);
9150 tmp2 = load_reg(s, rm);
9151 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9152 gen_addq_lo(s, tmp64, rn);
9153 gen_addq_lo(s, tmp64, rd);
9154 gen_storeq_reg(s, rn, rd, tmp64);
9155 tcg_temp_free_i64(tmp64);
9156 break;
9157 case 8: case 9: case 10: case 11:
9158 case 12: case 13: case 14: case 15:
9159 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
9160 tmp = load_reg(s, rs);
9161 tmp2 = load_reg(s, rm);
9162 if (insn & (1 << 22)) {
9163 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
9164 } else {
9165 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
9167 if (insn & (1 << 21)) { /* mult accumulate */
9168 TCGv_i32 al = load_reg(s, rn);
9169 TCGv_i32 ah = load_reg(s, rd);
9170 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
9171 tcg_temp_free_i32(al);
9172 tcg_temp_free_i32(ah);
9174 if (insn & (1 << 20)) {
9175 gen_logicq_cc(tmp, tmp2);
9177 store_reg(s, rn, tmp);
9178 store_reg(s, rd, tmp2);
9179 break;
9180 default:
9181 goto illegal_op;
9183 } else {
9184 rn = (insn >> 16) & 0xf;
9185 rd = (insn >> 12) & 0xf;
9186 if (insn & (1 << 23)) {
9187 /* load/store exclusive */
9188 int op2 = (insn >> 8) & 3;
9189 op1 = (insn >> 21) & 0x3;
9191 switch (op2) {
9192 case 0: /* lda/stl */
9193 if (op1 == 1) {
9194 goto illegal_op;
9196 ARCH(8);
9197 break;
9198 case 1: /* reserved */
9199 goto illegal_op;
9200 case 2: /* ldaex/stlex */
9201 ARCH(8);
9202 break;
9203 case 3: /* ldrex/strex */
9204 if (op1) {
9205 ARCH(6K);
9206 } else {
9207 ARCH(6);
9209 break;
9212 addr = tcg_temp_local_new_i32();
9213 load_reg_var(s, addr, rn);
9215 /* Since the emulation does not have barriers,
9216 the acquire/release semantics need no special
9217 handling */
9218 if (op2 == 0) {
9219 if (insn & (1 << 20)) {
9220 tmp = tcg_temp_new_i32();
9221 switch (op1) {
9222 case 0: /* lda */
9223 gen_aa32_ld32u_iss(s, tmp, addr,
9224 get_mem_index(s),
9225 rd | ISSIsAcqRel);
9226 break;
9227 case 2: /* ldab */
9228 gen_aa32_ld8u_iss(s, tmp, addr,
9229 get_mem_index(s),
9230 rd | ISSIsAcqRel);
9231 break;
9232 case 3: /* ldah */
9233 gen_aa32_ld16u_iss(s, tmp, addr,
9234 get_mem_index(s),
9235 rd | ISSIsAcqRel);
9236 break;
9237 default:
9238 abort();
9240 store_reg(s, rd, tmp);
9241 } else {
9242 rm = insn & 0xf;
9243 tmp = load_reg(s, rm);
9244 switch (op1) {
9245 case 0: /* stl */
9246 gen_aa32_st32_iss(s, tmp, addr,
9247 get_mem_index(s),
9248 rm | ISSIsAcqRel);
9249 break;
9250 case 2: /* stlb */
9251 gen_aa32_st8_iss(s, tmp, addr,
9252 get_mem_index(s),
9253 rm | ISSIsAcqRel);
9254 break;
9255 case 3: /* stlh */
9256 gen_aa32_st16_iss(s, tmp, addr,
9257 get_mem_index(s),
9258 rm | ISSIsAcqRel);
9259 break;
9260 default:
9261 abort();
9263 tcg_temp_free_i32(tmp);
9265 } else if (insn & (1 << 20)) {
9266 switch (op1) {
9267 case 0: /* ldrex */
9268 gen_load_exclusive(s, rd, 15, addr, 2);
9269 break;
9270 case 1: /* ldrexd */
9271 gen_load_exclusive(s, rd, rd + 1, addr, 3);
9272 break;
9273 case 2: /* ldrexb */
9274 gen_load_exclusive(s, rd, 15, addr, 0);
9275 break;
9276 case 3: /* ldrexh */
9277 gen_load_exclusive(s, rd, 15, addr, 1);
9278 break;
9279 default:
9280 abort();
9282 } else {
9283 rm = insn & 0xf;
9284 switch (op1) {
9285 case 0: /* strex */
9286 gen_store_exclusive(s, rd, rm, 15, addr, 2);
9287 break;
9288 case 1: /* strexd */
9289 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
9290 break;
9291 case 2: /* strexb */
9292 gen_store_exclusive(s, rd, rm, 15, addr, 0);
9293 break;
9294 case 3: /* strexh */
9295 gen_store_exclusive(s, rd, rm, 15, addr, 1);
9296 break;
9297 default:
9298 abort();
9301 tcg_temp_free_i32(addr);
9302 } else if ((insn & 0x00300f00) == 0) {
9303 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
9304 * - SWP, SWPB
9307 TCGv taddr;
9308 TCGMemOp opc = s->be_data;
9310 rm = (insn) & 0xf;
9312 if (insn & (1 << 22)) {
9313 opc |= MO_UB;
9314 } else {
9315 opc |= MO_UL | MO_ALIGN;
9318 addr = load_reg(s, rn);
9319 taddr = gen_aa32_addr(s, addr, opc);
9320 tcg_temp_free_i32(addr);
9322 tmp = load_reg(s, rm);
9323 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9324 get_mem_index(s), opc);
9325 tcg_temp_free(taddr);
9326 store_reg(s, rd, tmp);
9327 } else {
9328 goto illegal_op;
9331 } else {
9332 int address_offset;
9333 bool load = insn & (1 << 20);
9334 bool wbit = insn & (1 << 21);
9335 bool pbit = insn & (1 << 24);
9336 bool doubleword = false;
9337 ISSInfo issinfo;
9339 /* Misc load/store */
9340 rn = (insn >> 16) & 0xf;
9341 rd = (insn >> 12) & 0xf;
9343 /* ISS not valid if writeback */
9344 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9346 if (!load && (sh & 2)) {
9347 /* doubleword */
9348 ARCH(5TE);
9349 if (rd & 1) {
9350 /* UNPREDICTABLE; we choose to UNDEF */
9351 goto illegal_op;
9353 load = (sh & 1) == 0;
9354 doubleword = true;
9357 addr = load_reg(s, rn);
9358 if (pbit) {
9359 gen_add_datah_offset(s, insn, 0, addr);
9361 address_offset = 0;
9363 if (doubleword) {
9364 if (!load) {
9365 /* store */
9366 tmp = load_reg(s, rd);
9367 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9368 tcg_temp_free_i32(tmp);
9369 tcg_gen_addi_i32(addr, addr, 4);
9370 tmp = load_reg(s, rd + 1);
9371 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9372 tcg_temp_free_i32(tmp);
9373 } else {
9374 /* load */
9375 tmp = tcg_temp_new_i32();
9376 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9377 store_reg(s, rd, tmp);
9378 tcg_gen_addi_i32(addr, addr, 4);
9379 tmp = tcg_temp_new_i32();
9380 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9381 rd++;
9383 address_offset = -4;
9384 } else if (load) {
9385 /* load */
9386 tmp = tcg_temp_new_i32();
9387 switch (sh) {
9388 case 1:
9389 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9390 issinfo);
9391 break;
9392 case 2:
9393 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9394 issinfo);
9395 break;
9396 default:
9397 case 3:
9398 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9399 issinfo);
9400 break;
9402 } else {
9403 /* store */
9404 tmp = load_reg(s, rd);
9405 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
9406 tcg_temp_free_i32(tmp);
9408 /* Perform base writeback before the loaded value to
9409 ensure correct behavior with overlapping index registers.
9410 ldrd with base writeback is undefined if the
9411 destination and index registers overlap. */
9412 if (!pbit) {
9413 gen_add_datah_offset(s, insn, address_offset, addr);
9414 store_reg(s, rn, addr);
9415 } else if (wbit) {
9416 if (address_offset)
9417 tcg_gen_addi_i32(addr, addr, address_offset);
9418 store_reg(s, rn, addr);
9419 } else {
9420 tcg_temp_free_i32(addr);
9422 if (load) {
9423 /* Complete the load. */
9424 store_reg(s, rd, tmp);
9427 break;
9428 case 0x4:
9429 case 0x5:
9430 goto do_ldst;
9431 case 0x6:
9432 case 0x7:
9433 if (insn & (1 << 4)) {
9434 ARCH(6);
9435 /* Armv6 Media instructions. */
9436 rm = insn & 0xf;
9437 rn = (insn >> 16) & 0xf;
9438 rd = (insn >> 12) & 0xf;
9439 rs = (insn >> 8) & 0xf;
9440 switch ((insn >> 23) & 3) {
9441 case 0: /* Parallel add/subtract. */
9442 op1 = (insn >> 20) & 7;
9443 tmp = load_reg(s, rn);
9444 tmp2 = load_reg(s, rm);
9445 sh = (insn >> 5) & 7;
9446 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9447 goto illegal_op;
9448 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
9449 tcg_temp_free_i32(tmp2);
9450 store_reg(s, rd, tmp);
9451 break;
9452 case 1:
9453 if ((insn & 0x00700020) == 0) {
9454 /* Halfword pack. */
9455 tmp = load_reg(s, rn);
9456 tmp2 = load_reg(s, rm);
9457 shift = (insn >> 7) & 0x1f;
9458 if (insn & (1 << 6)) {
9459 /* pkhtb */
9460 if (shift == 0)
9461 shift = 31;
9462 tcg_gen_sari_i32(tmp2, tmp2, shift);
9463 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9464 tcg_gen_ext16u_i32(tmp2, tmp2);
9465 } else {
9466 /* pkhbt */
9467 if (shift)
9468 tcg_gen_shli_i32(tmp2, tmp2, shift);
9469 tcg_gen_ext16u_i32(tmp, tmp);
9470 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9472 tcg_gen_or_i32(tmp, tmp, tmp2);
9473 tcg_temp_free_i32(tmp2);
9474 store_reg(s, rd, tmp);
9475 } else if ((insn & 0x00200020) == 0x00200000) {
9476 /* [us]sat */
9477 tmp = load_reg(s, rm);
9478 shift = (insn >> 7) & 0x1f;
9479 if (insn & (1 << 6)) {
9480 if (shift == 0)
9481 shift = 31;
9482 tcg_gen_sari_i32(tmp, tmp, shift);
9483 } else {
9484 tcg_gen_shli_i32(tmp, tmp, shift);
9486 sh = (insn >> 16) & 0x1f;
9487 tmp2 = tcg_const_i32(sh);
9488 if (insn & (1 << 22))
9489 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
9490 else
9491 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
9492 tcg_temp_free_i32(tmp2);
9493 store_reg(s, rd, tmp);
9494 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9495 /* [us]sat16 */
9496 tmp = load_reg(s, rm);
9497 sh = (insn >> 16) & 0x1f;
9498 tmp2 = tcg_const_i32(sh);
9499 if (insn & (1 << 22))
9500 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9501 else
9502 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9503 tcg_temp_free_i32(tmp2);
9504 store_reg(s, rd, tmp);
9505 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9506 /* Select bytes. */
9507 tmp = load_reg(s, rn);
9508 tmp2 = load_reg(s, rm);
9509 tmp3 = tcg_temp_new_i32();
9510 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9511 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9512 tcg_temp_free_i32(tmp3);
9513 tcg_temp_free_i32(tmp2);
9514 store_reg(s, rd, tmp);
9515 } else if ((insn & 0x000003e0) == 0x00000060) {
9516 tmp = load_reg(s, rm);
9517 shift = (insn >> 10) & 3;
9518 /* ??? In many cases it's not necessary to do a
9519 rotate, a shift is sufficient. */
9520 if (shift != 0)
9521 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9522 op1 = (insn >> 20) & 7;
9523 switch (op1) {
9524 case 0: gen_sxtb16(tmp); break;
9525 case 2: gen_sxtb(tmp); break;
9526 case 3: gen_sxth(tmp); break;
9527 case 4: gen_uxtb16(tmp); break;
9528 case 6: gen_uxtb(tmp); break;
9529 case 7: gen_uxth(tmp); break;
9530 default: goto illegal_op;
9532 if (rn != 15) {
9533 tmp2 = load_reg(s, rn);
9534 if ((op1 & 3) == 0) {
9535 gen_add16(tmp, tmp2);
9536 } else {
9537 tcg_gen_add_i32(tmp, tmp, tmp2);
9538 tcg_temp_free_i32(tmp2);
9541 store_reg(s, rd, tmp);
9542 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9543 /* rev */
9544 tmp = load_reg(s, rm);
9545 if (insn & (1 << 22)) {
9546 if (insn & (1 << 7)) {
9547 gen_revsh(tmp);
9548 } else {
9549 ARCH(6T2);
9550 gen_helper_rbit(tmp, tmp);
9552 } else {
9553 if (insn & (1 << 7))
9554 gen_rev16(tmp);
9555 else
9556 tcg_gen_bswap32_i32(tmp, tmp);
9558 store_reg(s, rd, tmp);
9559 } else {
9560 goto illegal_op;
9562 break;
9563 case 2: /* Multiplies (Type 3). */
9564 switch ((insn >> 20) & 0x7) {
9565 case 5:
9566 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9567 /* op2 not 00x or 11x : UNDEF */
9568 goto illegal_op;
9570 /* Signed multiply most significant [accumulate].
9571 (SMMUL, SMMLA, SMMLS) */
9572 tmp = load_reg(s, rm);
9573 tmp2 = load_reg(s, rs);
9574 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9576 if (rd != 15) {
9577 tmp = load_reg(s, rd);
9578 if (insn & (1 << 6)) {
9579 tmp64 = gen_subq_msw(tmp64, tmp);
9580 } else {
9581 tmp64 = gen_addq_msw(tmp64, tmp);
9584 if (insn & (1 << 5)) {
9585 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9587 tcg_gen_shri_i64(tmp64, tmp64, 32);
9588 tmp = tcg_temp_new_i32();
9589 tcg_gen_extrl_i64_i32(tmp, tmp64);
9590 tcg_temp_free_i64(tmp64);
9591 store_reg(s, rn, tmp);
9592 break;
9593 case 0:
9594 case 4:
9595 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9596 if (insn & (1 << 7)) {
9597 goto illegal_op;
9599 tmp = load_reg(s, rm);
9600 tmp2 = load_reg(s, rs);
9601 if (insn & (1 << 5))
9602 gen_swap_half(tmp2);
9603 gen_smul_dual(tmp, tmp2);
9604 if (insn & (1 << 22)) {
9605 /* smlald, smlsld */
9606 TCGv_i64 tmp64_2;
9608 tmp64 = tcg_temp_new_i64();
9609 tmp64_2 = tcg_temp_new_i64();
9610 tcg_gen_ext_i32_i64(tmp64, tmp);
9611 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
9612 tcg_temp_free_i32(tmp);
9613 tcg_temp_free_i32(tmp2);
9614 if (insn & (1 << 6)) {
9615 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9616 } else {
9617 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9619 tcg_temp_free_i64(tmp64_2);
9620 gen_addq(s, tmp64, rd, rn);
9621 gen_storeq_reg(s, rd, rn, tmp64);
9622 tcg_temp_free_i64(tmp64);
9623 } else {
9624 /* smuad, smusd, smlad, smlsd */
9625 if (insn & (1 << 6)) {
9626 /* This subtraction cannot overflow. */
9627 tcg_gen_sub_i32(tmp, tmp, tmp2);
9628 } else {
9629 /* This addition cannot overflow 32 bits;
9630 * however it may overflow considered as a
9631 * signed operation, in which case we must set
9632 * the Q flag.
9634 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9636 tcg_temp_free_i32(tmp2);
9637 if (rd != 15)
9639 tmp2 = load_reg(s, rd);
9640 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9641 tcg_temp_free_i32(tmp2);
9643 store_reg(s, rn, tmp);
9645 break;
9646 case 1:
9647 case 3:
9648 /* SDIV, UDIV */
9649 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
9650 goto illegal_op;
9652 if (((insn >> 5) & 7) || (rd != 15)) {
9653 goto illegal_op;
9655 tmp = load_reg(s, rm);
9656 tmp2 = load_reg(s, rs);
9657 if (insn & (1 << 21)) {
9658 gen_helper_udiv(tmp, tmp, tmp2);
9659 } else {
9660 gen_helper_sdiv(tmp, tmp, tmp2);
9662 tcg_temp_free_i32(tmp2);
9663 store_reg(s, rn, tmp);
9664 break;
9665 default:
9666 goto illegal_op;
9668 break;
9669 case 3:
9670 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9671 switch (op1) {
9672 case 0: /* Unsigned sum of absolute differences. */
9673 ARCH(6);
9674 tmp = load_reg(s, rm);
9675 tmp2 = load_reg(s, rs);
9676 gen_helper_usad8(tmp, tmp, tmp2);
9677 tcg_temp_free_i32(tmp2);
9678 if (rd != 15) {
9679 tmp2 = load_reg(s, rd);
9680 tcg_gen_add_i32(tmp, tmp, tmp2);
9681 tcg_temp_free_i32(tmp2);
9683 store_reg(s, rn, tmp);
9684 break;
9685 case 0x20: case 0x24: case 0x28: case 0x2c:
9686 /* Bitfield insert/clear. */
9687 ARCH(6T2);
9688 shift = (insn >> 7) & 0x1f;
9689 i = (insn >> 16) & 0x1f;
9690 if (i < shift) {
9691 /* UNPREDICTABLE; we choose to UNDEF */
9692 goto illegal_op;
9694 i = i + 1 - shift;
9695 if (rm == 15) {
9696 tmp = tcg_temp_new_i32();
9697 tcg_gen_movi_i32(tmp, 0);
9698 } else {
9699 tmp = load_reg(s, rm);
9701 if (i != 32) {
9702 tmp2 = load_reg(s, rd);
9703 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
9704 tcg_temp_free_i32(tmp2);
9706 store_reg(s, rd, tmp);
9707 break;
9708 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9709 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9710 ARCH(6T2);
9711 tmp = load_reg(s, rm);
9712 shift = (insn >> 7) & 0x1f;
9713 i = ((insn >> 16) & 0x1f) + 1;
9714 if (shift + i > 32)
9715 goto illegal_op;
9716 if (i < 32) {
9717 if (op1 & 0x20) {
9718 tcg_gen_extract_i32(tmp, tmp, shift, i);
9719 } else {
9720 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9723 store_reg(s, rd, tmp);
9724 break;
9725 default:
9726 goto illegal_op;
9728 break;
9730 break;
9732 do_ldst:
9733 /* Check for undefined extension instructions
9734 * per the ARM Bible IE:
9735 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9737 sh = (0xf << 20) | (0xf << 4);
9738 if (op1 == 0x7 && ((insn & sh) == sh))
9740 goto illegal_op;
9742 /* load/store byte/word */
9743 rn = (insn >> 16) & 0xf;
9744 rd = (insn >> 12) & 0xf;
9745 tmp2 = load_reg(s, rn);
9746 if ((insn & 0x01200000) == 0x00200000) {
9747 /* ldrt/strt */
9748 i = get_a32_user_mem_index(s);
9749 } else {
9750 i = get_mem_index(s);
9752 if (insn & (1 << 24))
9753 gen_add_data_offset(s, insn, tmp2);
9754 if (insn & (1 << 20)) {
9755 /* load */
9756 tmp = tcg_temp_new_i32();
9757 if (insn & (1 << 22)) {
9758 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9759 } else {
9760 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9762 } else {
9763 /* store */
9764 tmp = load_reg(s, rd);
9765 if (insn & (1 << 22)) {
9766 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
9767 } else {
9768 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
9770 tcg_temp_free_i32(tmp);
9772 if (!(insn & (1 << 24))) {
9773 gen_add_data_offset(s, insn, tmp2);
9774 store_reg(s, rn, tmp2);
9775 } else if (insn & (1 << 21)) {
9776 store_reg(s, rn, tmp2);
9777 } else {
9778 tcg_temp_free_i32(tmp2);
9780 if (insn & (1 << 20)) {
9781 /* Complete the load. */
9782 store_reg_from_load(s, rd, tmp);
9784 break;
9785 case 0x08:
9786 case 0x09:
9788 int j, n, loaded_base;
9789 bool exc_return = false;
9790 bool is_load = extract32(insn, 20, 1);
9791 bool user = false;
9792 TCGv_i32 loaded_var;
9793 /* load/store multiple words */
9794 /* XXX: store correct base if write back */
9795 if (insn & (1 << 22)) {
9796 /* LDM (user), LDM (exception return) and STM (user) */
9797 if (IS_USER(s))
9798 goto illegal_op; /* only usable in supervisor mode */
9800 if (is_load && extract32(insn, 15, 1)) {
9801 exc_return = true;
9802 } else {
9803 user = true;
9806 rn = (insn >> 16) & 0xf;
9807 addr = load_reg(s, rn);
9809 /* compute total size */
9810 loaded_base = 0;
9811 loaded_var = NULL;
9812 n = 0;
9813 for(i=0;i<16;i++) {
9814 if (insn & (1 << i))
9815 n++;
9817 /* XXX: test invalid n == 0 case ? */
9818 if (insn & (1 << 23)) {
9819 if (insn & (1 << 24)) {
9820 /* pre increment */
9821 tcg_gen_addi_i32(addr, addr, 4);
9822 } else {
9823 /* post increment */
9825 } else {
9826 if (insn & (1 << 24)) {
9827 /* pre decrement */
9828 tcg_gen_addi_i32(addr, addr, -(n * 4));
9829 } else {
9830 /* post decrement */
9831 if (n != 1)
9832 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9835 j = 0;
9836 for(i=0;i<16;i++) {
9837 if (insn & (1 << i)) {
9838 if (is_load) {
9839 /* load */
9840 tmp = tcg_temp_new_i32();
9841 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9842 if (user) {
9843 tmp2 = tcg_const_i32(i);
9844 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9845 tcg_temp_free_i32(tmp2);
9846 tcg_temp_free_i32(tmp);
9847 } else if (i == rn) {
9848 loaded_var = tmp;
9849 loaded_base = 1;
9850 } else if (rn == 15 && exc_return) {
9851 store_pc_exc_ret(s, tmp);
9852 } else {
9853 store_reg_from_load(s, i, tmp);
9855 } else {
9856 /* store */
9857 if (i == 15) {
9858 /* special case: r15 = PC + 8 */
9859 val = (long)s->pc + 4;
9860 tmp = tcg_temp_new_i32();
9861 tcg_gen_movi_i32(tmp, val);
9862 } else if (user) {
9863 tmp = tcg_temp_new_i32();
9864 tmp2 = tcg_const_i32(i);
9865 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9866 tcg_temp_free_i32(tmp2);
9867 } else {
9868 tmp = load_reg(s, i);
9870 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9871 tcg_temp_free_i32(tmp);
9873 j++;
9874 /* no need to add after the last transfer */
9875 if (j != n)
9876 tcg_gen_addi_i32(addr, addr, 4);
9879 if (insn & (1 << 21)) {
9880 /* write back */
9881 if (insn & (1 << 23)) {
9882 if (insn & (1 << 24)) {
9883 /* pre increment */
9884 } else {
9885 /* post increment */
9886 tcg_gen_addi_i32(addr, addr, 4);
9888 } else {
9889 if (insn & (1 << 24)) {
9890 /* pre decrement */
9891 if (n != 1)
9892 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9893 } else {
9894 /* post decrement */
9895 tcg_gen_addi_i32(addr, addr, -(n * 4));
9898 store_reg(s, rn, addr);
9899 } else {
9900 tcg_temp_free_i32(addr);
9902 if (loaded_base) {
9903 store_reg(s, rn, loaded_var);
9905 if (exc_return) {
9906 /* Restore CPSR from SPSR. */
9907 tmp = load_cpu_field(spsr);
9908 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9909 gen_io_start();
9911 gen_helper_cpsr_write_eret(cpu_env, tmp);
9912 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9913 gen_io_end();
9915 tcg_temp_free_i32(tmp);
9916 /* Must exit loop to check un-masked IRQs */
9917 s->base.is_jmp = DISAS_EXIT;
9920 break;
9921 case 0xa:
9922 case 0xb:
9924 int32_t offset;
9926 /* branch (and link) */
9927 val = (int32_t)s->pc;
9928 if (insn & (1 << 24)) {
9929 tmp = tcg_temp_new_i32();
9930 tcg_gen_movi_i32(tmp, val);
9931 store_reg(s, 14, tmp);
9933 offset = sextract32(insn << 2, 0, 26);
9934 val += offset + 4;
9935 gen_jmp(s, val);
9937 break;
9938 case 0xc:
9939 case 0xd:
9940 case 0xe:
9941 if (((insn >> 8) & 0xe) == 10) {
9942 /* VFP. */
9943 if (disas_vfp_insn(s, insn)) {
9944 goto illegal_op;
9946 } else if (disas_coproc_insn(s, insn)) {
9947 /* Coprocessor. */
9948 goto illegal_op;
9950 break;
9951 case 0xf:
9952 /* swi */
9953 gen_set_pc_im(s, s->pc);
9954 s->svc_imm = extract32(insn, 0, 24);
9955 s->base.is_jmp = DISAS_SWI;
9956 break;
9957 default:
9958 illegal_op:
9959 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9960 default_exception_el(s));
9961 break;
9966 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
9968 /* Return true if this is a 16 bit instruction. We must be precise
9969 * about this (matching the decode). We assume that s->pc still
9970 * points to the first 16 bits of the insn.
9972 if ((insn >> 11) < 0x1d) {
9973 /* Definitely a 16-bit instruction */
9974 return true;
9977 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9978 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9979 * end up actually treating this as two 16-bit insns, though,
9980 * if it's half of a bl/blx pair that might span a page boundary.
9982 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
9983 arm_dc_feature(s, ARM_FEATURE_M)) {
9984 /* Thumb2 cores (including all M profile ones) always treat
9985 * 32-bit insns as 32-bit.
9987 return false;
9990 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
9991 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9992 * is not on the next page; we merge this into a 32-bit
9993 * insn.
9995 return false;
9997 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9998 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9999 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10000 * -- handle as single 16 bit insn
10002 return true;
10005 /* Return true if this is a Thumb-2 logical op. */
10006 static int
10007 thumb2_logic_op(int op)
10009 return (op < 8);
10012 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10013 then set condition code flags based on the result of the operation.
10014 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10015 to the high bit of T1.
10016 Returns zero if the opcode is valid. */
10018 static int
10019 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10020 TCGv_i32 t0, TCGv_i32 t1)
10022 int logic_cc;
10024 logic_cc = 0;
10025 switch (op) {
10026 case 0: /* and */
10027 tcg_gen_and_i32(t0, t0, t1);
10028 logic_cc = conds;
10029 break;
10030 case 1: /* bic */
10031 tcg_gen_andc_i32(t0, t0, t1);
10032 logic_cc = conds;
10033 break;
10034 case 2: /* orr */
10035 tcg_gen_or_i32(t0, t0, t1);
10036 logic_cc = conds;
10037 break;
10038 case 3: /* orn */
10039 tcg_gen_orc_i32(t0, t0, t1);
10040 logic_cc = conds;
10041 break;
10042 case 4: /* eor */
10043 tcg_gen_xor_i32(t0, t0, t1);
10044 logic_cc = conds;
10045 break;
10046 case 8: /* add */
10047 if (conds)
10048 gen_add_CC(t0, t0, t1);
10049 else
10050 tcg_gen_add_i32(t0, t0, t1);
10051 break;
10052 case 10: /* adc */
10053 if (conds)
10054 gen_adc_CC(t0, t0, t1);
10055 else
10056 gen_adc(t0, t1);
10057 break;
10058 case 11: /* sbc */
10059 if (conds) {
10060 gen_sbc_CC(t0, t0, t1);
10061 } else {
10062 gen_sub_carry(t0, t0, t1);
10064 break;
10065 case 13: /* sub */
10066 if (conds)
10067 gen_sub_CC(t0, t0, t1);
10068 else
10069 tcg_gen_sub_i32(t0, t0, t1);
10070 break;
10071 case 14: /* rsb */
10072 if (conds)
10073 gen_sub_CC(t0, t1, t0);
10074 else
10075 tcg_gen_sub_i32(t0, t1, t0);
10076 break;
10077 default: /* 5, 6, 7, 9, 12, 15. */
10078 return 1;
10080 if (logic_cc) {
10081 gen_logic_CC(t0);
10082 if (shifter_out)
10083 gen_set_CF_bit31(t1);
10085 return 0;
10088 /* Translate a 32-bit thumb instruction. */
10089 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
10091 uint32_t imm, shift, offset;
10092 uint32_t rd, rn, rm, rs;
10093 TCGv_i32 tmp;
10094 TCGv_i32 tmp2;
10095 TCGv_i32 tmp3;
10096 TCGv_i32 addr;
10097 TCGv_i64 tmp64;
10098 int op;
10099 int shiftop;
10100 int conds;
10101 int logic_cc;
10104 * ARMv6-M supports a limited subset of Thumb2 instructions.
10105 * Other Thumb1 architectures allow only 32-bit
10106 * combined BL/BLX prefix and suffix.
10108 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10109 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10110 int i;
10111 bool found = false;
10112 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10113 0xf3b08040 /* dsb */,
10114 0xf3b08050 /* dmb */,
10115 0xf3b08060 /* isb */,
10116 0xf3e08000 /* mrs */,
10117 0xf000d000 /* bl */};
10118 static const uint32_t armv6m_mask[] = {0xffe0d000,
10119 0xfff0d0f0,
10120 0xfff0d0f0,
10121 0xfff0d0f0,
10122 0xffe0d000,
10123 0xf800d000};
10125 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10126 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10127 found = true;
10128 break;
10131 if (!found) {
10132 goto illegal_op;
10134 } else if ((insn & 0xf800e800) != 0xf000e800) {
10135 ARCH(6T2);
10138 rn = (insn >> 16) & 0xf;
10139 rs = (insn >> 12) & 0xf;
10140 rd = (insn >> 8) & 0xf;
10141 rm = insn & 0xf;
10142 switch ((insn >> 25) & 0xf) {
10143 case 0: case 1: case 2: case 3:
10144 /* 16-bit instructions. Should never happen. */
10145 abort();
10146 case 4:
10147 if (insn & (1 << 22)) {
10148 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10149 * - load/store doubleword, load/store exclusive, ldacq/strel,
10150 * table branch, TT.
10152 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10153 arm_dc_feature(s, ARM_FEATURE_V8)) {
10154 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10155 * - SG (v8M only)
10156 * The bulk of the behaviour for this instruction is implemented
10157 * in v7m_handle_execute_nsc(), which deals with the insn when
10158 * it is executed by a CPU in non-secure state from memory
10159 * which is Secure & NonSecure-Callable.
10160 * Here we only need to handle the remaining cases:
10161 * * in NS memory (including the "security extension not
10162 * implemented" case) : NOP
10163 * * in S memory but CPU already secure (clear IT bits)
10164 * We know that the attribute for the memory this insn is
10165 * in must match the current CPU state, because otherwise
10166 * get_phys_addr_pmsav8 would have generated an exception.
10168 if (s->v8m_secure) {
10169 /* Like the IT insn, we don't need to generate any code */
10170 s->condexec_cond = 0;
10171 s->condexec_mask = 0;
10173 } else if (insn & 0x01200000) {
10174 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10175 * - load/store dual (post-indexed)
10176 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10177 * - load/store dual (literal and immediate)
10178 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10179 * - load/store dual (pre-indexed)
10181 if (rn == 15) {
10182 if (insn & (1 << 21)) {
10183 /* UNPREDICTABLE */
10184 goto illegal_op;
10186 addr = tcg_temp_new_i32();
10187 tcg_gen_movi_i32(addr, s->pc & ~3);
10188 } else {
10189 addr = load_reg(s, rn);
10191 offset = (insn & 0xff) * 4;
10192 if ((insn & (1 << 23)) == 0)
10193 offset = -offset;
10194 if (insn & (1 << 24)) {
10195 tcg_gen_addi_i32(addr, addr, offset);
10196 offset = 0;
10198 if (insn & (1 << 20)) {
10199 /* ldrd */
10200 tmp = tcg_temp_new_i32();
10201 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10202 store_reg(s, rs, tmp);
10203 tcg_gen_addi_i32(addr, addr, 4);
10204 tmp = tcg_temp_new_i32();
10205 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10206 store_reg(s, rd, tmp);
10207 } else {
10208 /* strd */
10209 tmp = load_reg(s, rs);
10210 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10211 tcg_temp_free_i32(tmp);
10212 tcg_gen_addi_i32(addr, addr, 4);
10213 tmp = load_reg(s, rd);
10214 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10215 tcg_temp_free_i32(tmp);
10217 if (insn & (1 << 21)) {
10218 /* Base writeback. */
10219 tcg_gen_addi_i32(addr, addr, offset - 4);
10220 store_reg(s, rn, addr);
10221 } else {
10222 tcg_temp_free_i32(addr);
10224 } else if ((insn & (1 << 23)) == 0) {
10225 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10226 * - load/store exclusive word
10227 * - TT (v8M only)
10229 if (rs == 15) {
10230 if (!(insn & (1 << 20)) &&
10231 arm_dc_feature(s, ARM_FEATURE_M) &&
10232 arm_dc_feature(s, ARM_FEATURE_V8)) {
10233 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10234 * - TT (v8M only)
10236 bool alt = insn & (1 << 7);
10237 TCGv_i32 addr, op, ttresp;
10239 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10240 /* we UNDEF for these UNPREDICTABLE cases */
10241 goto illegal_op;
10244 if (alt && !s->v8m_secure) {
10245 goto illegal_op;
10248 addr = load_reg(s, rn);
10249 op = tcg_const_i32(extract32(insn, 6, 2));
10250 ttresp = tcg_temp_new_i32();
10251 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10252 tcg_temp_free_i32(addr);
10253 tcg_temp_free_i32(op);
10254 store_reg(s, rd, ttresp);
10255 break;
10257 goto illegal_op;
10259 addr = tcg_temp_local_new_i32();
10260 load_reg_var(s, addr, rn);
10261 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
10262 if (insn & (1 << 20)) {
10263 gen_load_exclusive(s, rs, 15, addr, 2);
10264 } else {
10265 gen_store_exclusive(s, rd, rs, 15, addr, 2);
10267 tcg_temp_free_i32(addr);
10268 } else if ((insn & (7 << 5)) == 0) {
10269 /* Table Branch. */
10270 if (rn == 15) {
10271 addr = tcg_temp_new_i32();
10272 tcg_gen_movi_i32(addr, s->pc);
10273 } else {
10274 addr = load_reg(s, rn);
10276 tmp = load_reg(s, rm);
10277 tcg_gen_add_i32(addr, addr, tmp);
10278 if (insn & (1 << 4)) {
10279 /* tbh */
10280 tcg_gen_add_i32(addr, addr, tmp);
10281 tcg_temp_free_i32(tmp);
10282 tmp = tcg_temp_new_i32();
10283 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
10284 } else { /* tbb */
10285 tcg_temp_free_i32(tmp);
10286 tmp = tcg_temp_new_i32();
10287 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
10289 tcg_temp_free_i32(addr);
10290 tcg_gen_shli_i32(tmp, tmp, 1);
10291 tcg_gen_addi_i32(tmp, tmp, s->pc);
10292 store_reg(s, 15, tmp);
10293 } else {
10294 int op2 = (insn >> 6) & 0x3;
10295 op = (insn >> 4) & 0x3;
10296 switch (op2) {
10297 case 0:
10298 goto illegal_op;
10299 case 1:
10300 /* Load/store exclusive byte/halfword/doubleword */
10301 if (op == 2) {
10302 goto illegal_op;
10304 ARCH(7);
10305 break;
10306 case 2:
10307 /* Load-acquire/store-release */
10308 if (op == 3) {
10309 goto illegal_op;
10311 /* Fall through */
10312 case 3:
10313 /* Load-acquire/store-release exclusive */
10314 ARCH(8);
10315 break;
10317 addr = tcg_temp_local_new_i32();
10318 load_reg_var(s, addr, rn);
10319 if (!(op2 & 1)) {
10320 if (insn & (1 << 20)) {
10321 tmp = tcg_temp_new_i32();
10322 switch (op) {
10323 case 0: /* ldab */
10324 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
10325 rs | ISSIsAcqRel);
10326 break;
10327 case 1: /* ldah */
10328 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10329 rs | ISSIsAcqRel);
10330 break;
10331 case 2: /* lda */
10332 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10333 rs | ISSIsAcqRel);
10334 break;
10335 default:
10336 abort();
10338 store_reg(s, rs, tmp);
10339 } else {
10340 tmp = load_reg(s, rs);
10341 switch (op) {
10342 case 0: /* stlb */
10343 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10344 rs | ISSIsAcqRel);
10345 break;
10346 case 1: /* stlh */
10347 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10348 rs | ISSIsAcqRel);
10349 break;
10350 case 2: /* stl */
10351 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10352 rs | ISSIsAcqRel);
10353 break;
10354 default:
10355 abort();
10357 tcg_temp_free_i32(tmp);
10359 } else if (insn & (1 << 20)) {
10360 gen_load_exclusive(s, rs, rd, addr, op);
10361 } else {
10362 gen_store_exclusive(s, rm, rs, rd, addr, op);
10364 tcg_temp_free_i32(addr);
10366 } else {
10367 /* Load/store multiple, RFE, SRS. */
10368 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
10369 /* RFE, SRS: not available in user mode or on M profile */
10370 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10371 goto illegal_op;
10373 if (insn & (1 << 20)) {
10374 /* rfe */
10375 addr = load_reg(s, rn);
10376 if ((insn & (1 << 24)) == 0)
10377 tcg_gen_addi_i32(addr, addr, -8);
10378 /* Load PC into tmp and CPSR into tmp2. */
10379 tmp = tcg_temp_new_i32();
10380 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10381 tcg_gen_addi_i32(addr, addr, 4);
10382 tmp2 = tcg_temp_new_i32();
10383 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
10384 if (insn & (1 << 21)) {
10385 /* Base writeback. */
10386 if (insn & (1 << 24)) {
10387 tcg_gen_addi_i32(addr, addr, 4);
10388 } else {
10389 tcg_gen_addi_i32(addr, addr, -4);
10391 store_reg(s, rn, addr);
10392 } else {
10393 tcg_temp_free_i32(addr);
10395 gen_rfe(s, tmp, tmp2);
10396 } else {
10397 /* srs */
10398 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10399 insn & (1 << 21));
10401 } else {
10402 int i, loaded_base = 0;
10403 TCGv_i32 loaded_var;
10404 /* Load/store multiple. */
10405 addr = load_reg(s, rn);
10406 offset = 0;
10407 for (i = 0; i < 16; i++) {
10408 if (insn & (1 << i))
10409 offset += 4;
10411 if (insn & (1 << 24)) {
10412 tcg_gen_addi_i32(addr, addr, -offset);
10415 loaded_var = NULL;
10416 for (i = 0; i < 16; i++) {
10417 if ((insn & (1 << i)) == 0)
10418 continue;
10419 if (insn & (1 << 20)) {
10420 /* Load. */
10421 tmp = tcg_temp_new_i32();
10422 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10423 if (i == 15) {
10424 gen_bx_excret(s, tmp);
10425 } else if (i == rn) {
10426 loaded_var = tmp;
10427 loaded_base = 1;
10428 } else {
10429 store_reg(s, i, tmp);
10431 } else {
10432 /* Store. */
10433 tmp = load_reg(s, i);
10434 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
10435 tcg_temp_free_i32(tmp);
10437 tcg_gen_addi_i32(addr, addr, 4);
10439 if (loaded_base) {
10440 store_reg(s, rn, loaded_var);
10442 if (insn & (1 << 21)) {
10443 /* Base register writeback. */
10444 if (insn & (1 << 24)) {
10445 tcg_gen_addi_i32(addr, addr, -offset);
10447 /* Fault if writeback register is in register list. */
10448 if (insn & (1 << rn))
10449 goto illegal_op;
10450 store_reg(s, rn, addr);
10451 } else {
10452 tcg_temp_free_i32(addr);
10456 break;
10457 case 5:
10459 op = (insn >> 21) & 0xf;
10460 if (op == 6) {
10461 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10462 goto illegal_op;
10464 /* Halfword pack. */
10465 tmp = load_reg(s, rn);
10466 tmp2 = load_reg(s, rm);
10467 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10468 if (insn & (1 << 5)) {
10469 /* pkhtb */
10470 if (shift == 0)
10471 shift = 31;
10472 tcg_gen_sari_i32(tmp2, tmp2, shift);
10473 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10474 tcg_gen_ext16u_i32(tmp2, tmp2);
10475 } else {
10476 /* pkhbt */
10477 if (shift)
10478 tcg_gen_shli_i32(tmp2, tmp2, shift);
10479 tcg_gen_ext16u_i32(tmp, tmp);
10480 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10482 tcg_gen_or_i32(tmp, tmp, tmp2);
10483 tcg_temp_free_i32(tmp2);
10484 store_reg(s, rd, tmp);
10485 } else {
10486 /* Data processing register constant shift. */
10487 if (rn == 15) {
10488 tmp = tcg_temp_new_i32();
10489 tcg_gen_movi_i32(tmp, 0);
10490 } else {
10491 tmp = load_reg(s, rn);
10493 tmp2 = load_reg(s, rm);
10495 shiftop = (insn >> 4) & 3;
10496 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10497 conds = (insn & (1 << 20)) != 0;
10498 logic_cc = (conds && thumb2_logic_op(op));
10499 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10500 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10501 goto illegal_op;
10502 tcg_temp_free_i32(tmp2);
10503 if (rd != 15) {
10504 store_reg(s, rd, tmp);
10505 } else {
10506 tcg_temp_free_i32(tmp);
10509 break;
10510 case 13: /* Misc data processing. */
10511 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10512 if (op < 4 && (insn & 0xf000) != 0xf000)
10513 goto illegal_op;
10514 switch (op) {
10515 case 0: /* Register controlled shift. */
10516 tmp = load_reg(s, rn);
10517 tmp2 = load_reg(s, rm);
10518 if ((insn & 0x70) != 0)
10519 goto illegal_op;
10520 op = (insn >> 21) & 3;
10521 logic_cc = (insn & (1 << 20)) != 0;
10522 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10523 if (logic_cc)
10524 gen_logic_CC(tmp);
10525 store_reg(s, rd, tmp);
10526 break;
10527 case 1: /* Sign/zero extend. */
10528 op = (insn >> 20) & 7;
10529 switch (op) {
10530 case 0: /* SXTAH, SXTH */
10531 case 1: /* UXTAH, UXTH */
10532 case 4: /* SXTAB, SXTB */
10533 case 5: /* UXTAB, UXTB */
10534 break;
10535 case 2: /* SXTAB16, SXTB16 */
10536 case 3: /* UXTAB16, UXTB16 */
10537 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10538 goto illegal_op;
10540 break;
10541 default:
10542 goto illegal_op;
10544 if (rn != 15) {
10545 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10546 goto illegal_op;
10549 tmp = load_reg(s, rm);
10550 shift = (insn >> 4) & 3;
10551 /* ??? In many cases it's not necessary to do a
10552 rotate, a shift is sufficient. */
10553 if (shift != 0)
10554 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
10555 op = (insn >> 20) & 7;
10556 switch (op) {
10557 case 0: gen_sxth(tmp); break;
10558 case 1: gen_uxth(tmp); break;
10559 case 2: gen_sxtb16(tmp); break;
10560 case 3: gen_uxtb16(tmp); break;
10561 case 4: gen_sxtb(tmp); break;
10562 case 5: gen_uxtb(tmp); break;
10563 default:
10564 g_assert_not_reached();
10566 if (rn != 15) {
10567 tmp2 = load_reg(s, rn);
10568 if ((op >> 1) == 1) {
10569 gen_add16(tmp, tmp2);
10570 } else {
10571 tcg_gen_add_i32(tmp, tmp, tmp2);
10572 tcg_temp_free_i32(tmp2);
10575 store_reg(s, rd, tmp);
10576 break;
10577 case 2: /* SIMD add/subtract. */
10578 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10579 goto illegal_op;
10581 op = (insn >> 20) & 7;
10582 shift = (insn >> 4) & 7;
10583 if ((op & 3) == 3 || (shift & 3) == 3)
10584 goto illegal_op;
10585 tmp = load_reg(s, rn);
10586 tmp2 = load_reg(s, rm);
10587 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
10588 tcg_temp_free_i32(tmp2);
10589 store_reg(s, rd, tmp);
10590 break;
10591 case 3: /* Other data processing. */
10592 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10593 if (op < 4) {
10594 /* Saturating add/subtract. */
10595 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10596 goto illegal_op;
10598 tmp = load_reg(s, rn);
10599 tmp2 = load_reg(s, rm);
10600 if (op & 1)
10601 gen_helper_double_saturate(tmp, cpu_env, tmp);
10602 if (op & 2)
10603 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
10604 else
10605 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
10606 tcg_temp_free_i32(tmp2);
10607 } else {
10608 switch (op) {
10609 case 0x0a: /* rbit */
10610 case 0x08: /* rev */
10611 case 0x09: /* rev16 */
10612 case 0x0b: /* revsh */
10613 case 0x18: /* clz */
10614 break;
10615 case 0x10: /* sel */
10616 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10617 goto illegal_op;
10619 break;
10620 case 0x20: /* crc32/crc32c */
10621 case 0x21:
10622 case 0x22:
10623 case 0x28:
10624 case 0x29:
10625 case 0x2a:
10626 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10627 goto illegal_op;
10629 break;
10630 default:
10631 goto illegal_op;
10633 tmp = load_reg(s, rn);
10634 switch (op) {
10635 case 0x0a: /* rbit */
10636 gen_helper_rbit(tmp, tmp);
10637 break;
10638 case 0x08: /* rev */
10639 tcg_gen_bswap32_i32(tmp, tmp);
10640 break;
10641 case 0x09: /* rev16 */
10642 gen_rev16(tmp);
10643 break;
10644 case 0x0b: /* revsh */
10645 gen_revsh(tmp);
10646 break;
10647 case 0x10: /* sel */
10648 tmp2 = load_reg(s, rm);
10649 tmp3 = tcg_temp_new_i32();
10650 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
10651 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
10652 tcg_temp_free_i32(tmp3);
10653 tcg_temp_free_i32(tmp2);
10654 break;
10655 case 0x18: /* clz */
10656 tcg_gen_clzi_i32(tmp, tmp, 32);
10657 break;
10658 case 0x20:
10659 case 0x21:
10660 case 0x22:
10661 case 0x28:
10662 case 0x29:
10663 case 0x2a:
10665 /* crc32/crc32c */
10666 uint32_t sz = op & 0x3;
10667 uint32_t c = op & 0x8;
10669 tmp2 = load_reg(s, rm);
10670 if (sz == 0) {
10671 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10672 } else if (sz == 1) {
10673 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10675 tmp3 = tcg_const_i32(1 << sz);
10676 if (c) {
10677 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10678 } else {
10679 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10681 tcg_temp_free_i32(tmp2);
10682 tcg_temp_free_i32(tmp3);
10683 break;
10685 default:
10686 g_assert_not_reached();
10689 store_reg(s, rd, tmp);
10690 break;
10691 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10692 switch ((insn >> 20) & 7) {
10693 case 0: /* 32 x 32 -> 32 */
10694 case 7: /* Unsigned sum of absolute differences. */
10695 break;
10696 case 1: /* 16 x 16 -> 32 */
10697 case 2: /* Dual multiply add. */
10698 case 3: /* 32 * 16 -> 32msb */
10699 case 4: /* Dual multiply subtract. */
10700 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10701 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10702 goto illegal_op;
10704 break;
10706 op = (insn >> 4) & 0xf;
10707 tmp = load_reg(s, rn);
10708 tmp2 = load_reg(s, rm);
10709 switch ((insn >> 20) & 7) {
10710 case 0: /* 32 x 32 -> 32 */
10711 tcg_gen_mul_i32(tmp, tmp, tmp2);
10712 tcg_temp_free_i32(tmp2);
10713 if (rs != 15) {
10714 tmp2 = load_reg(s, rs);
10715 if (op)
10716 tcg_gen_sub_i32(tmp, tmp2, tmp);
10717 else
10718 tcg_gen_add_i32(tmp, tmp, tmp2);
10719 tcg_temp_free_i32(tmp2);
10721 break;
10722 case 1: /* 16 x 16 -> 32 */
10723 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10724 tcg_temp_free_i32(tmp2);
10725 if (rs != 15) {
10726 tmp2 = load_reg(s, rs);
10727 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10728 tcg_temp_free_i32(tmp2);
10730 break;
10731 case 2: /* Dual multiply add. */
10732 case 4: /* Dual multiply subtract. */
10733 if (op)
10734 gen_swap_half(tmp2);
10735 gen_smul_dual(tmp, tmp2);
10736 if (insn & (1 << 22)) {
10737 /* This subtraction cannot overflow. */
10738 tcg_gen_sub_i32(tmp, tmp, tmp2);
10739 } else {
10740 /* This addition cannot overflow 32 bits;
10741 * however it may overflow considered as a signed
10742 * operation, in which case we must set the Q flag.
10744 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10746 tcg_temp_free_i32(tmp2);
10747 if (rs != 15)
10749 tmp2 = load_reg(s, rs);
10750 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10751 tcg_temp_free_i32(tmp2);
10753 break;
10754 case 3: /* 32 * 16 -> 32msb */
10755 if (op)
10756 tcg_gen_sari_i32(tmp2, tmp2, 16);
10757 else
10758 gen_sxth(tmp2);
10759 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10760 tcg_gen_shri_i64(tmp64, tmp64, 16);
10761 tmp = tcg_temp_new_i32();
10762 tcg_gen_extrl_i64_i32(tmp, tmp64);
10763 tcg_temp_free_i64(tmp64);
10764 if (rs != 15)
10766 tmp2 = load_reg(s, rs);
10767 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10768 tcg_temp_free_i32(tmp2);
10770 break;
10771 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10772 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10773 if (rs != 15) {
10774 tmp = load_reg(s, rs);
10775 if (insn & (1 << 20)) {
10776 tmp64 = gen_addq_msw(tmp64, tmp);
10777 } else {
10778 tmp64 = gen_subq_msw(tmp64, tmp);
10781 if (insn & (1 << 4)) {
10782 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10784 tcg_gen_shri_i64(tmp64, tmp64, 32);
10785 tmp = tcg_temp_new_i32();
10786 tcg_gen_extrl_i64_i32(tmp, tmp64);
10787 tcg_temp_free_i64(tmp64);
10788 break;
10789 case 7: /* Unsigned sum of absolute differences. */
10790 gen_helper_usad8(tmp, tmp, tmp2);
10791 tcg_temp_free_i32(tmp2);
10792 if (rs != 15) {
10793 tmp2 = load_reg(s, rs);
10794 tcg_gen_add_i32(tmp, tmp, tmp2);
10795 tcg_temp_free_i32(tmp2);
10797 break;
10799 store_reg(s, rd, tmp);
10800 break;
10801 case 6: case 7: /* 64-bit multiply, Divide. */
10802 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
10803 tmp = load_reg(s, rn);
10804 tmp2 = load_reg(s, rm);
10805 if ((op & 0x50) == 0x10) {
10806 /* sdiv, udiv */
10807 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
10808 goto illegal_op;
10810 if (op & 0x20)
10811 gen_helper_udiv(tmp, tmp, tmp2);
10812 else
10813 gen_helper_sdiv(tmp, tmp, tmp2);
10814 tcg_temp_free_i32(tmp2);
10815 store_reg(s, rd, tmp);
10816 } else if ((op & 0xe) == 0xc) {
10817 /* Dual multiply accumulate long. */
10818 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10819 tcg_temp_free_i32(tmp);
10820 tcg_temp_free_i32(tmp2);
10821 goto illegal_op;
10823 if (op & 1)
10824 gen_swap_half(tmp2);
10825 gen_smul_dual(tmp, tmp2);
10826 if (op & 0x10) {
10827 tcg_gen_sub_i32(tmp, tmp, tmp2);
10828 } else {
10829 tcg_gen_add_i32(tmp, tmp, tmp2);
10831 tcg_temp_free_i32(tmp2);
10832 /* BUGFIX */
10833 tmp64 = tcg_temp_new_i64();
10834 tcg_gen_ext_i32_i64(tmp64, tmp);
10835 tcg_temp_free_i32(tmp);
10836 gen_addq(s, tmp64, rs, rd);
10837 gen_storeq_reg(s, rs, rd, tmp64);
10838 tcg_temp_free_i64(tmp64);
10839 } else {
10840 if (op & 0x20) {
10841 /* Unsigned 64-bit multiply */
10842 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
10843 } else {
10844 if (op & 8) {
10845 /* smlalxy */
10846 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10847 tcg_temp_free_i32(tmp2);
10848 tcg_temp_free_i32(tmp);
10849 goto illegal_op;
10851 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10852 tcg_temp_free_i32(tmp2);
10853 tmp64 = tcg_temp_new_i64();
10854 tcg_gen_ext_i32_i64(tmp64, tmp);
10855 tcg_temp_free_i32(tmp);
10856 } else {
10857 /* Signed 64-bit multiply */
10858 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10861 if (op & 4) {
10862 /* umaal */
10863 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10864 tcg_temp_free_i64(tmp64);
10865 goto illegal_op;
10867 gen_addq_lo(s, tmp64, rs);
10868 gen_addq_lo(s, tmp64, rd);
10869 } else if (op & 0x40) {
10870 /* 64-bit accumulate. */
10871 gen_addq(s, tmp64, rs, rd);
10873 gen_storeq_reg(s, rs, rd, tmp64);
10874 tcg_temp_free_i64(tmp64);
10876 break;
10878 break;
10879 case 6: case 7: case 14: case 15:
10880 /* Coprocessor. */
10881 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10882 /* We don't currently implement M profile FP support,
10883 * so this entire space should give a NOCP fault, with
10884 * the exception of the v8M VLLDM and VLSTM insns, which
10885 * must be NOPs in Secure state and UNDEF in Nonsecure state.
10887 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10888 (insn & 0xffa00f00) == 0xec200a00) {
10889 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10890 * - VLLDM, VLSTM
10891 * We choose to UNDEF if the RAZ bits are non-zero.
10893 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10894 goto illegal_op;
10896 /* Just NOP since FP support is not implemented */
10897 break;
10899 /* All other insns: NOCP */
10900 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10901 default_exception_el(s));
10902 break;
10904 if ((insn & 0xfe000a00) == 0xfc000800
10905 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10906 /* The Thumb2 and ARM encodings are identical. */
10907 if (disas_neon_insn_3same_ext(s, insn)) {
10908 goto illegal_op;
10910 } else if ((insn & 0xff000a00) == 0xfe000800
10911 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10912 /* The Thumb2 and ARM encodings are identical. */
10913 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10914 goto illegal_op;
10916 } else if (((insn >> 24) & 3) == 3) {
10917 /* Translate into the equivalent ARM encoding. */
10918 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10919 if (disas_neon_data_insn(s, insn)) {
10920 goto illegal_op;
10922 } else if (((insn >> 8) & 0xe) == 10) {
10923 if (disas_vfp_insn(s, insn)) {
10924 goto illegal_op;
10926 } else {
10927 if (insn & (1 << 28))
10928 goto illegal_op;
10929 if (disas_coproc_insn(s, insn)) {
10930 goto illegal_op;
10933 break;
10934 case 8: case 9: case 10: case 11:
10935 if (insn & (1 << 15)) {
10936 /* Branches, misc control. */
10937 if (insn & 0x5000) {
10938 /* Unconditional branch. */
10939 /* signextend(hw1[10:0]) -> offset[:12]. */
10940 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10941 /* hw1[10:0] -> offset[11:1]. */
10942 offset |= (insn & 0x7ff) << 1;
10943 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10944 offset[24:22] already have the same value because of the
10945 sign extension above. */
10946 offset ^= ((~insn) & (1 << 13)) << 10;
10947 offset ^= ((~insn) & (1 << 11)) << 11;
10949 if (insn & (1 << 14)) {
10950 /* Branch and link. */
10951 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
10954 offset += s->pc;
10955 if (insn & (1 << 12)) {
10956 /* b/bl */
10957 gen_jmp(s, offset);
10958 } else {
10959 /* blx */
10960 offset &= ~(uint32_t)2;
10961 /* thumb2 bx, no need to check */
10962 gen_bx_im(s, offset);
10964 } else if (((insn >> 23) & 7) == 7) {
10965 /* Misc control */
10966 if (insn & (1 << 13))
10967 goto illegal_op;
10969 if (insn & (1 << 26)) {
10970 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10971 goto illegal_op;
10973 if (!(insn & (1 << 20))) {
10974 /* Hypervisor call (v7) */
10975 int imm16 = extract32(insn, 16, 4) << 12
10976 | extract32(insn, 0, 12);
10977 ARCH(7);
10978 if (IS_USER(s)) {
10979 goto illegal_op;
10981 gen_hvc(s, imm16);
10982 } else {
10983 /* Secure monitor call (v6+) */
10984 ARCH(6K);
10985 if (IS_USER(s)) {
10986 goto illegal_op;
10988 gen_smc(s);
10990 } else {
10991 op = (insn >> 20) & 7;
10992 switch (op) {
10993 case 0: /* msr cpsr. */
10994 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10995 tmp = load_reg(s, rn);
10996 /* the constant is the mask and SYSm fields */
10997 addr = tcg_const_i32(insn & 0xfff);
10998 gen_helper_v7m_msr(cpu_env, addr, tmp);
10999 tcg_temp_free_i32(addr);
11000 tcg_temp_free_i32(tmp);
11001 gen_lookup_tb(s);
11002 break;
11004 /* fall through */
11005 case 1: /* msr spsr. */
11006 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11007 goto illegal_op;
11010 if (extract32(insn, 5, 1)) {
11011 /* MSR (banked) */
11012 int sysm = extract32(insn, 8, 4) |
11013 (extract32(insn, 4, 1) << 4);
11014 int r = op & 1;
11016 gen_msr_banked(s, r, sysm, rm);
11017 break;
11020 /* MSR (for PSRs) */
11021 tmp = load_reg(s, rn);
11022 if (gen_set_psr(s,
11023 msr_mask(s, (insn >> 8) & 0xf, op == 1),
11024 op == 1, tmp))
11025 goto illegal_op;
11026 break;
11027 case 2: /* cps, nop-hint. */
11028 if (((insn >> 8) & 7) == 0) {
11029 gen_nop_hint(s, insn & 0xff);
11031 /* Implemented as NOP in user mode. */
11032 if (IS_USER(s))
11033 break;
11034 offset = 0;
11035 imm = 0;
11036 if (insn & (1 << 10)) {
11037 if (insn & (1 << 7))
11038 offset |= CPSR_A;
11039 if (insn & (1 << 6))
11040 offset |= CPSR_I;
11041 if (insn & (1 << 5))
11042 offset |= CPSR_F;
11043 if (insn & (1 << 9))
11044 imm = CPSR_A | CPSR_I | CPSR_F;
11046 if (insn & (1 << 8)) {
11047 offset |= 0x1f;
11048 imm |= (insn & 0x1f);
11050 if (offset) {
11051 gen_set_psr_im(s, offset, 0, imm);
11053 break;
11054 case 3: /* Special control operations. */
11055 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
11056 !arm_dc_feature(s, ARM_FEATURE_M)) {
11057 goto illegal_op;
11059 op = (insn >> 4) & 0xf;
11060 switch (op) {
11061 case 2: /* clrex */
11062 gen_clrex(s);
11063 break;
11064 case 4: /* dsb */
11065 case 5: /* dmb */
11066 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
11067 break;
11068 case 6: /* isb */
11069 /* We need to break the TB after this insn
11070 * to execute self-modifying code correctly
11071 * and also to take any pending interrupts
11072 * immediately.
11074 gen_goto_tb(s, 0, s->pc & ~1);
11075 break;
11076 default:
11077 goto illegal_op;
11079 break;
11080 case 4: /* bxj */
11081 /* Trivial implementation equivalent to bx.
11082 * This instruction doesn't exist at all for M-profile.
11084 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11085 goto illegal_op;
11087 tmp = load_reg(s, rn);
11088 gen_bx(s, tmp);
11089 break;
11090 case 5: /* Exception return. */
11091 if (IS_USER(s)) {
11092 goto illegal_op;
11094 if (rn != 14 || rd != 15) {
11095 goto illegal_op;
11097 tmp = load_reg(s, rn);
11098 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11099 gen_exception_return(s, tmp);
11100 break;
11101 case 6: /* MRS */
11102 if (extract32(insn, 5, 1) &&
11103 !arm_dc_feature(s, ARM_FEATURE_M)) {
11104 /* MRS (banked) */
11105 int sysm = extract32(insn, 16, 4) |
11106 (extract32(insn, 4, 1) << 4);
11108 gen_mrs_banked(s, 0, sysm, rd);
11109 break;
11112 if (extract32(insn, 16, 4) != 0xf) {
11113 goto illegal_op;
11115 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
11116 extract32(insn, 0, 8) != 0) {
11117 goto illegal_op;
11120 /* mrs cpsr */
11121 tmp = tcg_temp_new_i32();
11122 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11123 addr = tcg_const_i32(insn & 0xff);
11124 gen_helper_v7m_mrs(tmp, cpu_env, addr);
11125 tcg_temp_free_i32(addr);
11126 } else {
11127 gen_helper_cpsr_read(tmp, cpu_env);
11129 store_reg(s, rd, tmp);
11130 break;
11131 case 7: /* MRS */
11132 if (extract32(insn, 5, 1) &&
11133 !arm_dc_feature(s, ARM_FEATURE_M)) {
11134 /* MRS (banked) */
11135 int sysm = extract32(insn, 16, 4) |
11136 (extract32(insn, 4, 1) << 4);
11138 gen_mrs_banked(s, 1, sysm, rd);
11139 break;
11142 /* mrs spsr. */
11143 /* Not accessible in user mode. */
11144 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
11145 goto illegal_op;
11148 if (extract32(insn, 16, 4) != 0xf ||
11149 extract32(insn, 0, 8) != 0) {
11150 goto illegal_op;
11153 tmp = load_cpu_field(spsr);
11154 store_reg(s, rd, tmp);
11155 break;
11158 } else {
11159 /* Conditional branch. */
11160 op = (insn >> 22) & 0xf;
11161 /* Generate a conditional jump to next instruction. */
11162 s->condlabel = gen_new_label();
11163 arm_gen_test_cc(op ^ 1, s->condlabel);
11164 s->condjmp = 1;
11166 /* offset[11:1] = insn[10:0] */
11167 offset = (insn & 0x7ff) << 1;
11168 /* offset[17:12] = insn[21:16]. */
11169 offset |= (insn & 0x003f0000) >> 4;
11170 /* offset[31:20] = insn[26]. */
11171 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
11172 /* offset[18] = insn[13]. */
11173 offset |= (insn & (1 << 13)) << 5;
11174 /* offset[19] = insn[11]. */
11175 offset |= (insn & (1 << 11)) << 8;
11177 /* jump to the offset */
11178 gen_jmp(s, s->pc + offset);
11180 } else {
11181 /* Data processing immediate. */
11182 if (insn & (1 << 25)) {
11183 if (insn & (1 << 24)) {
11184 if (insn & (1 << 20))
11185 goto illegal_op;
11186 /* Bitfield/Saturate. */
11187 op = (insn >> 21) & 7;
11188 imm = insn & 0x1f;
11189 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
11190 if (rn == 15) {
11191 tmp = tcg_temp_new_i32();
11192 tcg_gen_movi_i32(tmp, 0);
11193 } else {
11194 tmp = load_reg(s, rn);
11196 switch (op) {
11197 case 2: /* Signed bitfield extract. */
11198 imm++;
11199 if (shift + imm > 32)
11200 goto illegal_op;
11201 if (imm < 32) {
11202 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
11204 break;
11205 case 6: /* Unsigned bitfield extract. */
11206 imm++;
11207 if (shift + imm > 32)
11208 goto illegal_op;
11209 if (imm < 32) {
11210 tcg_gen_extract_i32(tmp, tmp, shift, imm);
11212 break;
11213 case 3: /* Bitfield insert/clear. */
11214 if (imm < shift)
11215 goto illegal_op;
11216 imm = imm + 1 - shift;
11217 if (imm != 32) {
11218 tmp2 = load_reg(s, rd);
11219 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
11220 tcg_temp_free_i32(tmp2);
11222 break;
11223 case 7:
11224 goto illegal_op;
11225 default: /* Saturate. */
11226 if (shift) {
11227 if (op & 1)
11228 tcg_gen_sari_i32(tmp, tmp, shift);
11229 else
11230 tcg_gen_shli_i32(tmp, tmp, shift);
11232 tmp2 = tcg_const_i32(imm);
11233 if (op & 4) {
11234 /* Unsigned. */
11235 if ((op & 1) && shift == 0) {
11236 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11237 tcg_temp_free_i32(tmp);
11238 tcg_temp_free_i32(tmp2);
11239 goto illegal_op;
11241 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
11242 } else {
11243 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
11245 } else {
11246 /* Signed. */
11247 if ((op & 1) && shift == 0) {
11248 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11249 tcg_temp_free_i32(tmp);
11250 tcg_temp_free_i32(tmp2);
11251 goto illegal_op;
11253 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
11254 } else {
11255 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
11258 tcg_temp_free_i32(tmp2);
11259 break;
11261 store_reg(s, rd, tmp);
11262 } else {
11263 imm = ((insn & 0x04000000) >> 15)
11264 | ((insn & 0x7000) >> 4) | (insn & 0xff);
11265 if (insn & (1 << 22)) {
11266 /* 16-bit immediate. */
11267 imm |= (insn >> 4) & 0xf000;
11268 if (insn & (1 << 23)) {
11269 /* movt */
11270 tmp = load_reg(s, rd);
11271 tcg_gen_ext16u_i32(tmp, tmp);
11272 tcg_gen_ori_i32(tmp, tmp, imm << 16);
11273 } else {
11274 /* movw */
11275 tmp = tcg_temp_new_i32();
11276 tcg_gen_movi_i32(tmp, imm);
11278 } else {
11279 /* Add/sub 12-bit immediate. */
11280 if (rn == 15) {
11281 offset = s->pc & ~(uint32_t)3;
11282 if (insn & (1 << 23))
11283 offset -= imm;
11284 else
11285 offset += imm;
11286 tmp = tcg_temp_new_i32();
11287 tcg_gen_movi_i32(tmp, offset);
11288 } else {
11289 tmp = load_reg(s, rn);
11290 if (insn & (1 << 23))
11291 tcg_gen_subi_i32(tmp, tmp, imm);
11292 else
11293 tcg_gen_addi_i32(tmp, tmp, imm);
11296 store_reg(s, rd, tmp);
11298 } else {
11299 int shifter_out = 0;
11300 /* modified 12-bit immediate. */
11301 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
11302 imm = (insn & 0xff);
11303 switch (shift) {
11304 case 0: /* XY */
11305 /* Nothing to do. */
11306 break;
11307 case 1: /* 00XY00XY */
11308 imm |= imm << 16;
11309 break;
11310 case 2: /* XY00XY00 */
11311 imm |= imm << 16;
11312 imm <<= 8;
11313 break;
11314 case 3: /* XYXYXYXY */
11315 imm |= imm << 16;
11316 imm |= imm << 8;
11317 break;
11318 default: /* Rotated constant. */
11319 shift = (shift << 1) | (imm >> 7);
11320 imm |= 0x80;
11321 imm = imm << (32 - shift);
11322 shifter_out = 1;
11323 break;
11325 tmp2 = tcg_temp_new_i32();
11326 tcg_gen_movi_i32(tmp2, imm);
11327 rn = (insn >> 16) & 0xf;
11328 if (rn == 15) {
11329 tmp = tcg_temp_new_i32();
11330 tcg_gen_movi_i32(tmp, 0);
11331 } else {
11332 tmp = load_reg(s, rn);
11334 op = (insn >> 21) & 0xf;
11335 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
11336 shifter_out, tmp, tmp2))
11337 goto illegal_op;
11338 tcg_temp_free_i32(tmp2);
11339 rd = (insn >> 8) & 0xf;
11340 if (rd != 15) {
11341 store_reg(s, rd, tmp);
11342 } else {
11343 tcg_temp_free_i32(tmp);
11347 break;
11348 case 12: /* Load/store single data item. */
11350 int postinc = 0;
11351 int writeback = 0;
11352 int memidx;
11353 ISSInfo issinfo;
11355 if ((insn & 0x01100000) == 0x01000000) {
11356 if (disas_neon_ls_insn(s, insn)) {
11357 goto illegal_op;
11359 break;
11361 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
11362 if (rs == 15) {
11363 if (!(insn & (1 << 20))) {
11364 goto illegal_op;
11366 if (op != 2) {
11367 /* Byte or halfword load space with dest == r15 : memory hints.
11368 * Catch them early so we don't emit pointless addressing code.
11369 * This space is a mix of:
11370 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
11371 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
11372 * cores)
11373 * unallocated hints, which must be treated as NOPs
11374 * UNPREDICTABLE space, which we NOP or UNDEF depending on
11375 * which is easiest for the decoding logic
11376 * Some space which must UNDEF
11378 int op1 = (insn >> 23) & 3;
11379 int op2 = (insn >> 6) & 0x3f;
11380 if (op & 2) {
11381 goto illegal_op;
11383 if (rn == 15) {
11384 /* UNPREDICTABLE, unallocated hint or
11385 * PLD/PLDW/PLI (literal)
11387 return;
11389 if (op1 & 1) {
11390 return; /* PLD/PLDW/PLI or unallocated hint */
11392 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
11393 return; /* PLD/PLDW/PLI or unallocated hint */
11395 /* UNDEF space, or an UNPREDICTABLE */
11396 goto illegal_op;
11399 memidx = get_mem_index(s);
11400 if (rn == 15) {
11401 addr = tcg_temp_new_i32();
11402 /* PC relative. */
11403 /* s->pc has already been incremented by 4. */
11404 imm = s->pc & 0xfffffffc;
11405 if (insn & (1 << 23))
11406 imm += insn & 0xfff;
11407 else
11408 imm -= insn & 0xfff;
11409 tcg_gen_movi_i32(addr, imm);
11410 } else {
11411 addr = load_reg(s, rn);
11412 if (insn & (1 << 23)) {
11413 /* Positive offset. */
11414 imm = insn & 0xfff;
11415 tcg_gen_addi_i32(addr, addr, imm);
11416 } else {
11417 imm = insn & 0xff;
11418 switch ((insn >> 8) & 0xf) {
11419 case 0x0: /* Shifted Register. */
11420 shift = (insn >> 4) & 0xf;
11421 if (shift > 3) {
11422 tcg_temp_free_i32(addr);
11423 goto illegal_op;
11425 tmp = load_reg(s, rm);
11426 if (shift)
11427 tcg_gen_shli_i32(tmp, tmp, shift);
11428 tcg_gen_add_i32(addr, addr, tmp);
11429 tcg_temp_free_i32(tmp);
11430 break;
11431 case 0xc: /* Negative offset. */
11432 tcg_gen_addi_i32(addr, addr, -imm);
11433 break;
11434 case 0xe: /* User privilege. */
11435 tcg_gen_addi_i32(addr, addr, imm);
11436 memidx = get_a32_user_mem_index(s);
11437 break;
11438 case 0x9: /* Post-decrement. */
11439 imm = -imm;
11440 /* Fall through. */
11441 case 0xb: /* Post-increment. */
11442 postinc = 1;
11443 writeback = 1;
11444 break;
11445 case 0xd: /* Pre-decrement. */
11446 imm = -imm;
11447 /* Fall through. */
11448 case 0xf: /* Pre-increment. */
11449 tcg_gen_addi_i32(addr, addr, imm);
11450 writeback = 1;
11451 break;
11452 default:
11453 tcg_temp_free_i32(addr);
11454 goto illegal_op;
11459 issinfo = writeback ? ISSInvalid : rs;
11461 if (insn & (1 << 20)) {
11462 /* Load. */
11463 tmp = tcg_temp_new_i32();
11464 switch (op) {
11465 case 0:
11466 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
11467 break;
11468 case 4:
11469 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
11470 break;
11471 case 1:
11472 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
11473 break;
11474 case 5:
11475 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
11476 break;
11477 case 2:
11478 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
11479 break;
11480 default:
11481 tcg_temp_free_i32(tmp);
11482 tcg_temp_free_i32(addr);
11483 goto illegal_op;
11485 if (rs == 15) {
11486 gen_bx_excret(s, tmp);
11487 } else {
11488 store_reg(s, rs, tmp);
11490 } else {
11491 /* Store. */
11492 tmp = load_reg(s, rs);
11493 switch (op) {
11494 case 0:
11495 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
11496 break;
11497 case 1:
11498 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
11499 break;
11500 case 2:
11501 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
11502 break;
11503 default:
11504 tcg_temp_free_i32(tmp);
11505 tcg_temp_free_i32(addr);
11506 goto illegal_op;
11508 tcg_temp_free_i32(tmp);
11510 if (postinc)
11511 tcg_gen_addi_i32(addr, addr, imm);
11512 if (writeback) {
11513 store_reg(s, rn, addr);
11514 } else {
11515 tcg_temp_free_i32(addr);
11518 break;
11519 default:
11520 goto illegal_op;
11522 return;
11523 illegal_op:
11524 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11525 default_exception_el(s));
11528 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
11530 uint32_t val, op, rm, rn, rd, shift, cond;
11531 int32_t offset;
11532 int i;
11533 TCGv_i32 tmp;
11534 TCGv_i32 tmp2;
11535 TCGv_i32 addr;
11537 switch (insn >> 12) {
11538 case 0: case 1:
11540 rd = insn & 7;
11541 op = (insn >> 11) & 3;
11542 if (op == 3) {
11543 /* add/subtract */
11544 rn = (insn >> 3) & 7;
11545 tmp = load_reg(s, rn);
11546 if (insn & (1 << 10)) {
11547 /* immediate */
11548 tmp2 = tcg_temp_new_i32();
11549 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
11550 } else {
11551 /* reg */
11552 rm = (insn >> 6) & 7;
11553 tmp2 = load_reg(s, rm);
11555 if (insn & (1 << 9)) {
11556 if (s->condexec_mask)
11557 tcg_gen_sub_i32(tmp, tmp, tmp2);
11558 else
11559 gen_sub_CC(tmp, tmp, tmp2);
11560 } else {
11561 if (s->condexec_mask)
11562 tcg_gen_add_i32(tmp, tmp, tmp2);
11563 else
11564 gen_add_CC(tmp, tmp, tmp2);
11566 tcg_temp_free_i32(tmp2);
11567 store_reg(s, rd, tmp);
11568 } else {
11569 /* shift immediate */
11570 rm = (insn >> 3) & 7;
11571 shift = (insn >> 6) & 0x1f;
11572 tmp = load_reg(s, rm);
11573 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11574 if (!s->condexec_mask)
11575 gen_logic_CC(tmp);
11576 store_reg(s, rd, tmp);
11578 break;
11579 case 2: case 3:
11580 /* arithmetic large immediate */
11581 op = (insn >> 11) & 3;
11582 rd = (insn >> 8) & 0x7;
11583 if (op == 0) { /* mov */
11584 tmp = tcg_temp_new_i32();
11585 tcg_gen_movi_i32(tmp, insn & 0xff);
11586 if (!s->condexec_mask)
11587 gen_logic_CC(tmp);
11588 store_reg(s, rd, tmp);
11589 } else {
11590 tmp = load_reg(s, rd);
11591 tmp2 = tcg_temp_new_i32();
11592 tcg_gen_movi_i32(tmp2, insn & 0xff);
11593 switch (op) {
11594 case 1: /* cmp */
11595 gen_sub_CC(tmp, tmp, tmp2);
11596 tcg_temp_free_i32(tmp);
11597 tcg_temp_free_i32(tmp2);
11598 break;
11599 case 2: /* add */
11600 if (s->condexec_mask)
11601 tcg_gen_add_i32(tmp, tmp, tmp2);
11602 else
11603 gen_add_CC(tmp, tmp, tmp2);
11604 tcg_temp_free_i32(tmp2);
11605 store_reg(s, rd, tmp);
11606 break;
11607 case 3: /* sub */
11608 if (s->condexec_mask)
11609 tcg_gen_sub_i32(tmp, tmp, tmp2);
11610 else
11611 gen_sub_CC(tmp, tmp, tmp2);
11612 tcg_temp_free_i32(tmp2);
11613 store_reg(s, rd, tmp);
11614 break;
11617 break;
11618 case 4:
11619 if (insn & (1 << 11)) {
11620 rd = (insn >> 8) & 7;
11621 /* load pc-relative. Bit 1 of PC is ignored. */
11622 val = s->pc + 2 + ((insn & 0xff) * 4);
11623 val &= ~(uint32_t)2;
11624 addr = tcg_temp_new_i32();
11625 tcg_gen_movi_i32(addr, val);
11626 tmp = tcg_temp_new_i32();
11627 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11628 rd | ISSIs16Bit);
11629 tcg_temp_free_i32(addr);
11630 store_reg(s, rd, tmp);
11631 break;
11633 if (insn & (1 << 10)) {
11634 /* 0b0100_01xx_xxxx_xxxx
11635 * - data processing extended, branch and exchange
11637 rd = (insn & 7) | ((insn >> 4) & 8);
11638 rm = (insn >> 3) & 0xf;
11639 op = (insn >> 8) & 3;
11640 switch (op) {
11641 case 0: /* add */
11642 tmp = load_reg(s, rd);
11643 tmp2 = load_reg(s, rm);
11644 tcg_gen_add_i32(tmp, tmp, tmp2);
11645 tcg_temp_free_i32(tmp2);
11646 store_reg(s, rd, tmp);
11647 break;
11648 case 1: /* cmp */
11649 tmp = load_reg(s, rd);
11650 tmp2 = load_reg(s, rm);
11651 gen_sub_CC(tmp, tmp, tmp2);
11652 tcg_temp_free_i32(tmp2);
11653 tcg_temp_free_i32(tmp);
11654 break;
11655 case 2: /* mov/cpy */
11656 tmp = load_reg(s, rm);
11657 store_reg(s, rd, tmp);
11658 break;
11659 case 3:
11661 /* 0b0100_0111_xxxx_xxxx
11662 * - branch [and link] exchange thumb register
11664 bool link = insn & (1 << 7);
11666 if (insn & 3) {
11667 goto undef;
11669 if (link) {
11670 ARCH(5);
11672 if ((insn & 4)) {
11673 /* BXNS/BLXNS: only exists for v8M with the
11674 * security extensions, and always UNDEF if NonSecure.
11675 * We don't implement these in the user-only mode
11676 * either (in theory you can use them from Secure User
11677 * mode but they are too tied in to system emulation.)
11679 if (!s->v8m_secure || IS_USER_ONLY) {
11680 goto undef;
11682 if (link) {
11683 gen_blxns(s, rm);
11684 } else {
11685 gen_bxns(s, rm);
11687 break;
11689 /* BLX/BX */
11690 tmp = load_reg(s, rm);
11691 if (link) {
11692 val = (uint32_t)s->pc | 1;
11693 tmp2 = tcg_temp_new_i32();
11694 tcg_gen_movi_i32(tmp2, val);
11695 store_reg(s, 14, tmp2);
11696 gen_bx(s, tmp);
11697 } else {
11698 /* Only BX works as exception-return, not BLX */
11699 gen_bx_excret(s, tmp);
11701 break;
11704 break;
11707 /* data processing register */
11708 rd = insn & 7;
11709 rm = (insn >> 3) & 7;
11710 op = (insn >> 6) & 0xf;
11711 if (op == 2 || op == 3 || op == 4 || op == 7) {
11712 /* the shift/rotate ops want the operands backwards */
11713 val = rm;
11714 rm = rd;
11715 rd = val;
11716 val = 1;
11717 } else {
11718 val = 0;
11721 if (op == 9) { /* neg */
11722 tmp = tcg_temp_new_i32();
11723 tcg_gen_movi_i32(tmp, 0);
11724 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11725 tmp = load_reg(s, rd);
11726 } else {
11727 tmp = NULL;
11730 tmp2 = load_reg(s, rm);
11731 switch (op) {
11732 case 0x0: /* and */
11733 tcg_gen_and_i32(tmp, tmp, tmp2);
11734 if (!s->condexec_mask)
11735 gen_logic_CC(tmp);
11736 break;
11737 case 0x1: /* eor */
11738 tcg_gen_xor_i32(tmp, tmp, tmp2);
11739 if (!s->condexec_mask)
11740 gen_logic_CC(tmp);
11741 break;
11742 case 0x2: /* lsl */
11743 if (s->condexec_mask) {
11744 gen_shl(tmp2, tmp2, tmp);
11745 } else {
11746 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
11747 gen_logic_CC(tmp2);
11749 break;
11750 case 0x3: /* lsr */
11751 if (s->condexec_mask) {
11752 gen_shr(tmp2, tmp2, tmp);
11753 } else {
11754 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
11755 gen_logic_CC(tmp2);
11757 break;
11758 case 0x4: /* asr */
11759 if (s->condexec_mask) {
11760 gen_sar(tmp2, tmp2, tmp);
11761 } else {
11762 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
11763 gen_logic_CC(tmp2);
11765 break;
11766 case 0x5: /* adc */
11767 if (s->condexec_mask) {
11768 gen_adc(tmp, tmp2);
11769 } else {
11770 gen_adc_CC(tmp, tmp, tmp2);
11772 break;
11773 case 0x6: /* sbc */
11774 if (s->condexec_mask) {
11775 gen_sub_carry(tmp, tmp, tmp2);
11776 } else {
11777 gen_sbc_CC(tmp, tmp, tmp2);
11779 break;
11780 case 0x7: /* ror */
11781 if (s->condexec_mask) {
11782 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11783 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
11784 } else {
11785 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
11786 gen_logic_CC(tmp2);
11788 break;
11789 case 0x8: /* tst */
11790 tcg_gen_and_i32(tmp, tmp, tmp2);
11791 gen_logic_CC(tmp);
11792 rd = 16;
11793 break;
11794 case 0x9: /* neg */
11795 if (s->condexec_mask)
11796 tcg_gen_neg_i32(tmp, tmp2);
11797 else
11798 gen_sub_CC(tmp, tmp, tmp2);
11799 break;
11800 case 0xa: /* cmp */
11801 gen_sub_CC(tmp, tmp, tmp2);
11802 rd = 16;
11803 break;
11804 case 0xb: /* cmn */
11805 gen_add_CC(tmp, tmp, tmp2);
11806 rd = 16;
11807 break;
11808 case 0xc: /* orr */
11809 tcg_gen_or_i32(tmp, tmp, tmp2);
11810 if (!s->condexec_mask)
11811 gen_logic_CC(tmp);
11812 break;
11813 case 0xd: /* mul */
11814 tcg_gen_mul_i32(tmp, tmp, tmp2);
11815 if (!s->condexec_mask)
11816 gen_logic_CC(tmp);
11817 break;
11818 case 0xe: /* bic */
11819 tcg_gen_andc_i32(tmp, tmp, tmp2);
11820 if (!s->condexec_mask)
11821 gen_logic_CC(tmp);
11822 break;
11823 case 0xf: /* mvn */
11824 tcg_gen_not_i32(tmp2, tmp2);
11825 if (!s->condexec_mask)
11826 gen_logic_CC(tmp2);
11827 val = 1;
11828 rm = rd;
11829 break;
11831 if (rd != 16) {
11832 if (val) {
11833 store_reg(s, rm, tmp2);
11834 if (op != 0xf)
11835 tcg_temp_free_i32(tmp);
11836 } else {
11837 store_reg(s, rd, tmp);
11838 tcg_temp_free_i32(tmp2);
11840 } else {
11841 tcg_temp_free_i32(tmp);
11842 tcg_temp_free_i32(tmp2);
11844 break;
11846 case 5:
11847 /* load/store register offset. */
11848 rd = insn & 7;
11849 rn = (insn >> 3) & 7;
11850 rm = (insn >> 6) & 7;
11851 op = (insn >> 9) & 7;
11852 addr = load_reg(s, rn);
11853 tmp = load_reg(s, rm);
11854 tcg_gen_add_i32(addr, addr, tmp);
11855 tcg_temp_free_i32(tmp);
11857 if (op < 3) { /* store */
11858 tmp = load_reg(s, rd);
11859 } else {
11860 tmp = tcg_temp_new_i32();
11863 switch (op) {
11864 case 0: /* str */
11865 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11866 break;
11867 case 1: /* strh */
11868 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11869 break;
11870 case 2: /* strb */
11871 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11872 break;
11873 case 3: /* ldrsb */
11874 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11875 break;
11876 case 4: /* ldr */
11877 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11878 break;
11879 case 5: /* ldrh */
11880 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11881 break;
11882 case 6: /* ldrb */
11883 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11884 break;
11885 case 7: /* ldrsh */
11886 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11887 break;
11889 if (op >= 3) { /* load */
11890 store_reg(s, rd, tmp);
11891 } else {
11892 tcg_temp_free_i32(tmp);
11894 tcg_temp_free_i32(addr);
11895 break;
11897 case 6:
11898 /* load/store word immediate offset */
11899 rd = insn & 7;
11900 rn = (insn >> 3) & 7;
11901 addr = load_reg(s, rn);
11902 val = (insn >> 4) & 0x7c;
11903 tcg_gen_addi_i32(addr, addr, val);
11905 if (insn & (1 << 11)) {
11906 /* load */
11907 tmp = tcg_temp_new_i32();
11908 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11909 store_reg(s, rd, tmp);
11910 } else {
11911 /* store */
11912 tmp = load_reg(s, rd);
11913 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11914 tcg_temp_free_i32(tmp);
11916 tcg_temp_free_i32(addr);
11917 break;
11919 case 7:
11920 /* load/store byte immediate offset */
11921 rd = insn & 7;
11922 rn = (insn >> 3) & 7;
11923 addr = load_reg(s, rn);
11924 val = (insn >> 6) & 0x1f;
11925 tcg_gen_addi_i32(addr, addr, val);
11927 if (insn & (1 << 11)) {
11928 /* load */
11929 tmp = tcg_temp_new_i32();
11930 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11931 store_reg(s, rd, tmp);
11932 } else {
11933 /* store */
11934 tmp = load_reg(s, rd);
11935 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11936 tcg_temp_free_i32(tmp);
11938 tcg_temp_free_i32(addr);
11939 break;
11941 case 8:
11942 /* load/store halfword immediate offset */
11943 rd = insn & 7;
11944 rn = (insn >> 3) & 7;
11945 addr = load_reg(s, rn);
11946 val = (insn >> 5) & 0x3e;
11947 tcg_gen_addi_i32(addr, addr, val);
11949 if (insn & (1 << 11)) {
11950 /* load */
11951 tmp = tcg_temp_new_i32();
11952 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11953 store_reg(s, rd, tmp);
11954 } else {
11955 /* store */
11956 tmp = load_reg(s, rd);
11957 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11958 tcg_temp_free_i32(tmp);
11960 tcg_temp_free_i32(addr);
11961 break;
11963 case 9:
11964 /* load/store from stack */
11965 rd = (insn >> 8) & 7;
11966 addr = load_reg(s, 13);
11967 val = (insn & 0xff) * 4;
11968 tcg_gen_addi_i32(addr, addr, val);
11970 if (insn & (1 << 11)) {
11971 /* load */
11972 tmp = tcg_temp_new_i32();
11973 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11974 store_reg(s, rd, tmp);
11975 } else {
11976 /* store */
11977 tmp = load_reg(s, rd);
11978 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11979 tcg_temp_free_i32(tmp);
11981 tcg_temp_free_i32(addr);
11982 break;
11984 case 10:
11985 /* add to high reg */
11986 rd = (insn >> 8) & 7;
11987 if (insn & (1 << 11)) {
11988 /* SP */
11989 tmp = load_reg(s, 13);
11990 } else {
11991 /* PC. bit 1 is ignored. */
11992 tmp = tcg_temp_new_i32();
11993 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
11995 val = (insn & 0xff) * 4;
11996 tcg_gen_addi_i32(tmp, tmp, val);
11997 store_reg(s, rd, tmp);
11998 break;
12000 case 11:
12001 /* misc */
12002 op = (insn >> 8) & 0xf;
12003 switch (op) {
12004 case 0:
12005 /* adjust stack pointer */
12006 tmp = load_reg(s, 13);
12007 val = (insn & 0x7f) * 4;
12008 if (insn & (1 << 7))
12009 val = -(int32_t)val;
12010 tcg_gen_addi_i32(tmp, tmp, val);
12011 store_reg(s, 13, tmp);
12012 break;
12014 case 2: /* sign/zero extend. */
12015 ARCH(6);
12016 rd = insn & 7;
12017 rm = (insn >> 3) & 7;
12018 tmp = load_reg(s, rm);
12019 switch ((insn >> 6) & 3) {
12020 case 0: gen_sxth(tmp); break;
12021 case 1: gen_sxtb(tmp); break;
12022 case 2: gen_uxth(tmp); break;
12023 case 3: gen_uxtb(tmp); break;
12025 store_reg(s, rd, tmp);
12026 break;
12027 case 4: case 5: case 0xc: case 0xd:
12028 /* push/pop */
12029 addr = load_reg(s, 13);
12030 if (insn & (1 << 8))
12031 offset = 4;
12032 else
12033 offset = 0;
12034 for (i = 0; i < 8; i++) {
12035 if (insn & (1 << i))
12036 offset += 4;
12038 if ((insn & (1 << 11)) == 0) {
12039 tcg_gen_addi_i32(addr, addr, -offset);
12041 for (i = 0; i < 8; i++) {
12042 if (insn & (1 << i)) {
12043 if (insn & (1 << 11)) {
12044 /* pop */
12045 tmp = tcg_temp_new_i32();
12046 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12047 store_reg(s, i, tmp);
12048 } else {
12049 /* push */
12050 tmp = load_reg(s, i);
12051 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12052 tcg_temp_free_i32(tmp);
12054 /* advance to the next address. */
12055 tcg_gen_addi_i32(addr, addr, 4);
12058 tmp = NULL;
12059 if (insn & (1 << 8)) {
12060 if (insn & (1 << 11)) {
12061 /* pop pc */
12062 tmp = tcg_temp_new_i32();
12063 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12064 /* don't set the pc until the rest of the instruction
12065 has completed */
12066 } else {
12067 /* push lr */
12068 tmp = load_reg(s, 14);
12069 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12070 tcg_temp_free_i32(tmp);
12072 tcg_gen_addi_i32(addr, addr, 4);
12074 if ((insn & (1 << 11)) == 0) {
12075 tcg_gen_addi_i32(addr, addr, -offset);
12077 /* write back the new stack pointer */
12078 store_reg(s, 13, addr);
12079 /* set the new PC value */
12080 if ((insn & 0x0900) == 0x0900) {
12081 store_reg_from_load(s, 15, tmp);
12083 break;
12085 case 1: case 3: case 9: case 11: /* czb */
12086 rm = insn & 7;
12087 tmp = load_reg(s, rm);
12088 s->condlabel = gen_new_label();
12089 s->condjmp = 1;
12090 if (insn & (1 << 11))
12091 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
12092 else
12093 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
12094 tcg_temp_free_i32(tmp);
12095 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
12096 val = (uint32_t)s->pc + 2;
12097 val += offset;
12098 gen_jmp(s, val);
12099 break;
12101 case 15: /* IT, nop-hint. */
12102 if ((insn & 0xf) == 0) {
12103 gen_nop_hint(s, (insn >> 4) & 0xf);
12104 break;
12106 /* If Then. */
12107 s->condexec_cond = (insn >> 4) & 0xe;
12108 s->condexec_mask = insn & 0x1f;
12109 /* No actual code generated for this insn, just setup state. */
12110 break;
12112 case 0xe: /* bkpt */
12114 int imm8 = extract32(insn, 0, 8);
12115 ARCH(5);
12116 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
12117 break;
12120 case 0xa: /* rev, and hlt */
12122 int op1 = extract32(insn, 6, 2);
12124 if (op1 == 2) {
12125 /* HLT */
12126 int imm6 = extract32(insn, 0, 6);
12128 gen_hlt(s, imm6);
12129 break;
12132 /* Otherwise this is rev */
12133 ARCH(6);
12134 rn = (insn >> 3) & 0x7;
12135 rd = insn & 0x7;
12136 tmp = load_reg(s, rn);
12137 switch (op1) {
12138 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
12139 case 1: gen_rev16(tmp); break;
12140 case 3: gen_revsh(tmp); break;
12141 default:
12142 g_assert_not_reached();
12144 store_reg(s, rd, tmp);
12145 break;
12148 case 6:
12149 switch ((insn >> 5) & 7) {
12150 case 2:
12151 /* setend */
12152 ARCH(6);
12153 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
12154 gen_helper_setend(cpu_env);
12155 s->base.is_jmp = DISAS_UPDATE;
12157 break;
12158 case 3:
12159 /* cps */
12160 ARCH(6);
12161 if (IS_USER(s)) {
12162 break;
12164 if (arm_dc_feature(s, ARM_FEATURE_M)) {
12165 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
12166 /* FAULTMASK */
12167 if (insn & 1) {
12168 addr = tcg_const_i32(19);
12169 gen_helper_v7m_msr(cpu_env, addr, tmp);
12170 tcg_temp_free_i32(addr);
12172 /* PRIMASK */
12173 if (insn & 2) {
12174 addr = tcg_const_i32(16);
12175 gen_helper_v7m_msr(cpu_env, addr, tmp);
12176 tcg_temp_free_i32(addr);
12178 tcg_temp_free_i32(tmp);
12179 gen_lookup_tb(s);
12180 } else {
12181 if (insn & (1 << 4)) {
12182 shift = CPSR_A | CPSR_I | CPSR_F;
12183 } else {
12184 shift = 0;
12186 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
12188 break;
12189 default:
12190 goto undef;
12192 break;
12194 default:
12195 goto undef;
12197 break;
12199 case 12:
12201 /* load/store multiple */
12202 TCGv_i32 loaded_var = NULL;
12203 rn = (insn >> 8) & 0x7;
12204 addr = load_reg(s, rn);
12205 for (i = 0; i < 8; i++) {
12206 if (insn & (1 << i)) {
12207 if (insn & (1 << 11)) {
12208 /* load */
12209 tmp = tcg_temp_new_i32();
12210 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
12211 if (i == rn) {
12212 loaded_var = tmp;
12213 } else {
12214 store_reg(s, i, tmp);
12216 } else {
12217 /* store */
12218 tmp = load_reg(s, i);
12219 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
12220 tcg_temp_free_i32(tmp);
12222 /* advance to the next address */
12223 tcg_gen_addi_i32(addr, addr, 4);
12226 if ((insn & (1 << rn)) == 0) {
12227 /* base reg not in list: base register writeback */
12228 store_reg(s, rn, addr);
12229 } else {
12230 /* base reg in list: if load, complete it now */
12231 if (insn & (1 << 11)) {
12232 store_reg(s, rn, loaded_var);
12234 tcg_temp_free_i32(addr);
12236 break;
12238 case 13:
12239 /* conditional branch or swi */
12240 cond = (insn >> 8) & 0xf;
12241 if (cond == 0xe)
12242 goto undef;
12244 if (cond == 0xf) {
12245 /* swi */
12246 gen_set_pc_im(s, s->pc);
12247 s->svc_imm = extract32(insn, 0, 8);
12248 s->base.is_jmp = DISAS_SWI;
12249 break;
12251 /* generate a conditional jump to next instruction */
12252 s->condlabel = gen_new_label();
12253 arm_gen_test_cc(cond ^ 1, s->condlabel);
12254 s->condjmp = 1;
12256 /* jump to the offset */
12257 val = (uint32_t)s->pc + 2;
12258 offset = ((int32_t)insn << 24) >> 24;
12259 val += offset << 1;
12260 gen_jmp(s, val);
12261 break;
12263 case 14:
12264 if (insn & (1 << 11)) {
12265 /* thumb_insn_is_16bit() ensures we can't get here for
12266 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12267 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12269 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12270 ARCH(5);
12271 offset = ((insn & 0x7ff) << 1);
12272 tmp = load_reg(s, 14);
12273 tcg_gen_addi_i32(tmp, tmp, offset);
12274 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
12276 tmp2 = tcg_temp_new_i32();
12277 tcg_gen_movi_i32(tmp2, s->pc | 1);
12278 store_reg(s, 14, tmp2);
12279 gen_bx(s, tmp);
12280 break;
12282 /* unconditional branch */
12283 val = (uint32_t)s->pc;
12284 offset = ((int32_t)insn << 21) >> 21;
12285 val += (offset << 1) + 2;
12286 gen_jmp(s, val);
12287 break;
12289 case 15:
12290 /* thumb_insn_is_16bit() ensures we can't get here for
12291 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
12293 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12295 if (insn & (1 << 11)) {
12296 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
12297 offset = ((insn & 0x7ff) << 1) | 1;
12298 tmp = load_reg(s, 14);
12299 tcg_gen_addi_i32(tmp, tmp, offset);
12301 tmp2 = tcg_temp_new_i32();
12302 tcg_gen_movi_i32(tmp2, s->pc | 1);
12303 store_reg(s, 14, tmp2);
12304 gen_bx(s, tmp);
12305 } else {
12306 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
12307 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
12309 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
12311 break;
12313 return;
12314 illegal_op:
12315 undef:
12316 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
12317 default_exception_el(s));
12320 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
12322 /* Return true if the insn at dc->pc might cross a page boundary.
12323 * (False positives are OK, false negatives are not.)
12324 * We know this is a Thumb insn, and our caller ensures we are
12325 * only called if dc->pc is less than 4 bytes from the page
12326 * boundary, so we cross the page if the first 16 bits indicate
12327 * that this is a 32 bit insn.
12329 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
12331 return !thumb_insn_is_16bit(s, insn);
12334 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
12336 DisasContext *dc = container_of(dcbase, DisasContext, base);
12337 CPUARMState *env = cs->env_ptr;
12338 ARMCPU *cpu = arm_env_get_cpu(env);
12340 dc->pc = dc->base.pc_first;
12341 dc->condjmp = 0;
12343 dc->aarch64 = 0;
12344 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
12345 * there is no secure EL1, so we route exceptions to EL3.
12347 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
12348 !arm_el_is_aa64(env, 3);
12349 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
12350 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
12351 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
12352 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
12353 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
12354 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
12355 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
12356 #if !defined(CONFIG_USER_ONLY)
12357 dc->user = (dc->current_el == 0);
12358 #endif
12359 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
12360 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
12361 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
12362 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
12363 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
12364 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
12365 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
12366 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12367 regime_is_secure(env, dc->mmu_idx);
12368 dc->cp_regs = cpu->cp_regs;
12369 dc->features = env->features;
12371 /* Single step state. The code-generation logic here is:
12372 * SS_ACTIVE == 0:
12373 * generate code with no special handling for single-stepping (except
12374 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12375 * this happens anyway because those changes are all system register or
12376 * PSTATE writes).
12377 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12378 * emit code for one insn
12379 * emit code to clear PSTATE.SS
12380 * emit code to generate software step exception for completed step
12381 * end TB (as usual for having generated an exception)
12382 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12383 * emit code to generate a software step exception
12384 * end the TB
12386 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
12387 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
12388 dc->is_ldex = false;
12389 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
12391 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
12393 /* If architectural single step active, limit to 1. */
12394 if (is_singlestepping(dc)) {
12395 dc->base.max_insns = 1;
12398 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12399 to those left on the page. */
12400 if (!dc->thumb) {
12401 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
12402 dc->base.max_insns = MIN(dc->base.max_insns, bound);
12405 cpu_F0s = tcg_temp_new_i32();
12406 cpu_F1s = tcg_temp_new_i32();
12407 cpu_F0d = tcg_temp_new_i64();
12408 cpu_F1d = tcg_temp_new_i64();
12409 cpu_V0 = cpu_F0d;
12410 cpu_V1 = cpu_F1d;
12411 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
12412 cpu_M0 = tcg_temp_new_i64();
12415 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
12417 DisasContext *dc = container_of(dcbase, DisasContext, base);
12419 /* A note on handling of the condexec (IT) bits:
12421 * We want to avoid the overhead of having to write the updated condexec
12422 * bits back to the CPUARMState for every instruction in an IT block. So:
12423 * (1) if the condexec bits are not already zero then we write
12424 * zero back into the CPUARMState now. This avoids complications trying
12425 * to do it at the end of the block. (For example if we don't do this
12426 * it's hard to identify whether we can safely skip writing condexec
12427 * at the end of the TB, which we definitely want to do for the case
12428 * where a TB doesn't do anything with the IT state at all.)
12429 * (2) if we are going to leave the TB then we call gen_set_condexec()
12430 * which will write the correct value into CPUARMState if zero is wrong.
12431 * This is done both for leaving the TB at the end, and for leaving
12432 * it because of an exception we know will happen, which is done in
12433 * gen_exception_insn(). The latter is necessary because we need to
12434 * leave the TB with the PC/IT state just prior to execution of the
12435 * instruction which caused the exception.
12436 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12437 * then the CPUARMState will be wrong and we need to reset it.
12438 * This is handled in the same way as restoration of the
12439 * PC in these situations; we save the value of the condexec bits
12440 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12441 * then uses this to restore them after an exception.
12443 * Note that there are no instructions which can read the condexec
12444 * bits, and none which can write non-static values to them, so
12445 * we don't need to care about whether CPUARMState is correct in the
12446 * middle of a TB.
12449 /* Reset the conditional execution bits immediately. This avoids
12450 complications trying to do it at the end of the block. */
12451 if (dc->condexec_mask || dc->condexec_cond) {
12452 TCGv_i32 tmp = tcg_temp_new_i32();
12453 tcg_gen_movi_i32(tmp, 0);
12454 store_cpu_field(tmp, condexec_bits);
12456 tcg_clear_temp_count();
12459 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12461 DisasContext *dc = container_of(dcbase, DisasContext, base);
12463 tcg_gen_insn_start(dc->pc,
12464 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12466 dc->insn_start = tcg_last_op();
12469 static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12470 const CPUBreakpoint *bp)
12472 DisasContext *dc = container_of(dcbase, DisasContext, base);
12474 if (bp->flags & BP_CPU) {
12475 gen_set_condexec(dc);
12476 gen_set_pc_im(dc, dc->pc);
12477 gen_helper_check_breakpoints(cpu_env);
12478 /* End the TB early; it's likely not going to be executed */
12479 dc->base.is_jmp = DISAS_TOO_MANY;
12480 } else {
12481 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12482 /* The address covered by the breakpoint must be
12483 included in [tb->pc, tb->pc + tb->size) in order
12484 to for it to be properly cleared -- thus we
12485 increment the PC here so that the logic setting
12486 tb->size below does the right thing. */
12487 /* TODO: Advance PC by correct instruction length to
12488 * avoid disassembler error messages */
12489 dc->pc += 2;
12490 dc->base.is_jmp = DISAS_NORETURN;
12493 return true;
12496 static bool arm_pre_translate_insn(DisasContext *dc)
12498 #ifdef CONFIG_USER_ONLY
12499 /* Intercept jump to the magic kernel page. */
12500 if (dc->pc >= 0xffff0000) {
12501 /* We always get here via a jump, so know we are not in a
12502 conditional execution block. */
12503 gen_exception_internal(EXCP_KERNEL_TRAP);
12504 dc->base.is_jmp = DISAS_NORETURN;
12505 return true;
12507 #endif
12509 if (dc->ss_active && !dc->pstate_ss) {
12510 /* Singlestep state is Active-pending.
12511 * If we're in this state at the start of a TB then either
12512 * a) we just took an exception to an EL which is being debugged
12513 * and this is the first insn in the exception handler
12514 * b) debug exceptions were masked and we just unmasked them
12515 * without changing EL (eg by clearing PSTATE.D)
12516 * In either case we're going to take a swstep exception in the
12517 * "did not step an insn" case, and so the syndrome ISV and EX
12518 * bits should be zero.
12520 assert(dc->base.num_insns == 1);
12521 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12522 default_exception_el(dc));
12523 dc->base.is_jmp = DISAS_NORETURN;
12524 return true;
12527 return false;
12530 static void arm_post_translate_insn(DisasContext *dc)
12532 if (dc->condjmp && !dc->base.is_jmp) {
12533 gen_set_label(dc->condlabel);
12534 dc->condjmp = 0;
12536 dc->base.pc_next = dc->pc;
12537 translator_loop_temp_check(&dc->base);
12540 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12542 DisasContext *dc = container_of(dcbase, DisasContext, base);
12543 CPUARMState *env = cpu->env_ptr;
12544 unsigned int insn;
12546 if (arm_pre_translate_insn(dc)) {
12547 return;
12550 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
12551 dc->insn = insn;
12552 dc->pc += 4;
12553 disas_arm_insn(dc, insn);
12555 arm_post_translate_insn(dc);
12557 /* ARM is a fixed-length ISA. We performed the cross-page check
12558 in init_disas_context by adjusting max_insns. */
12561 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12563 /* Return true if this Thumb insn is always unconditional,
12564 * even inside an IT block. This is true of only a very few
12565 * instructions: BKPT, HLT, and SG.
12567 * A larger class of instructions are UNPREDICTABLE if used
12568 * inside an IT block; we do not need to detect those here, because
12569 * what we do by default (perform the cc check and update the IT
12570 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12571 * choice for those situations.
12573 * insn is either a 16-bit or a 32-bit instruction; the two are
12574 * distinguishable because for the 16-bit case the top 16 bits
12575 * are zeroes, and that isn't a valid 32-bit encoding.
12577 if ((insn & 0xffffff00) == 0xbe00) {
12578 /* BKPT */
12579 return true;
12582 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12583 !arm_dc_feature(s, ARM_FEATURE_M)) {
12584 /* HLT: v8A only. This is unconditional even when it is going to
12585 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12586 * For v7 cores this was a plain old undefined encoding and so
12587 * honours its cc check. (We might be using the encoding as
12588 * a semihosting trap, but we don't change the cc check behaviour
12589 * on that account, because a debugger connected to a real v7A
12590 * core and emulating semihosting traps by catching the UNDEF
12591 * exception would also only see cases where the cc check passed.
12592 * No guest code should be trying to do a HLT semihosting trap
12593 * in an IT block anyway.
12595 return true;
12598 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12599 arm_dc_feature(s, ARM_FEATURE_M)) {
12600 /* SG: v8M only */
12601 return true;
12604 return false;
12607 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12609 DisasContext *dc = container_of(dcbase, DisasContext, base);
12610 CPUARMState *env = cpu->env_ptr;
12611 uint32_t insn;
12612 bool is_16bit;
12614 if (arm_pre_translate_insn(dc)) {
12615 return;
12618 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12619 is_16bit = thumb_insn_is_16bit(dc, insn);
12620 dc->pc += 2;
12621 if (!is_16bit) {
12622 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12624 insn = insn << 16 | insn2;
12625 dc->pc += 2;
12627 dc->insn = insn;
12629 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
12630 uint32_t cond = dc->condexec_cond;
12632 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
12633 dc->condlabel = gen_new_label();
12634 arm_gen_test_cc(cond ^ 1, dc->condlabel);
12635 dc->condjmp = 1;
12639 if (is_16bit) {
12640 disas_thumb_insn(dc, insn);
12641 } else {
12642 disas_thumb2_insn(dc, insn);
12645 /* Advance the Thumb condexec condition. */
12646 if (dc->condexec_mask) {
12647 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12648 ((dc->condexec_mask >> 4) & 1));
12649 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12650 if (dc->condexec_mask == 0) {
12651 dc->condexec_cond = 0;
12655 arm_post_translate_insn(dc);
12657 /* Thumb is a variable-length ISA. Stop translation when the next insn
12658 * will touch a new page. This ensures that prefetch aborts occur at
12659 * the right place.
12661 * We want to stop the TB if the next insn starts in a new page,
12662 * or if it spans between this page and the next. This means that
12663 * if we're looking at the last halfword in the page we need to
12664 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12665 * or a 32-bit Thumb insn (which won't).
12666 * This is to avoid generating a silly TB with a single 16-bit insn
12667 * in it at the end of this page (which would execute correctly
12668 * but isn't very efficient).
12670 if (dc->base.is_jmp == DISAS_NEXT
12671 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
12672 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
12673 && insn_crosses_page(env, dc)))) {
12674 dc->base.is_jmp = DISAS_TOO_MANY;
12678 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
12680 DisasContext *dc = container_of(dcbase, DisasContext, base);
12682 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
12683 /* FIXME: This can theoretically happen with self-modifying code. */
12684 cpu_abort(cpu, "IO on conditional branch instruction");
12687 /* At this stage dc->condjmp will only be set when the skipped
12688 instruction was a conditional branch or trap, and the PC has
12689 already been written. */
12690 gen_set_condexec(dc);
12691 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
12692 /* Exception return branches need some special case code at the
12693 * end of the TB, which is complex enough that it has to
12694 * handle the single-step vs not and the condition-failed
12695 * insn codepath itself.
12697 gen_bx_excret_final_code(dc);
12698 } else if (unlikely(is_singlestepping(dc))) {
12699 /* Unconditional and "condition passed" instruction codepath. */
12700 switch (dc->base.is_jmp) {
12701 case DISAS_SWI:
12702 gen_ss_advance(dc);
12703 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12704 default_exception_el(dc));
12705 break;
12706 case DISAS_HVC:
12707 gen_ss_advance(dc);
12708 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12709 break;
12710 case DISAS_SMC:
12711 gen_ss_advance(dc);
12712 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12713 break;
12714 case DISAS_NEXT:
12715 case DISAS_TOO_MANY:
12716 case DISAS_UPDATE:
12717 gen_set_pc_im(dc, dc->pc);
12718 /* fall through */
12719 default:
12720 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12721 gen_singlestep_exception(dc);
12722 break;
12723 case DISAS_NORETURN:
12724 break;
12726 } else {
12727 /* While branches must always occur at the end of an IT block,
12728 there are a few other things that can cause us to terminate
12729 the TB in the middle of an IT block:
12730 - Exception generating instructions (bkpt, swi, undefined).
12731 - Page boundaries.
12732 - Hardware watchpoints.
12733 Hardware breakpoints have already been handled and skip this code.
12735 switch(dc->base.is_jmp) {
12736 case DISAS_NEXT:
12737 case DISAS_TOO_MANY:
12738 gen_goto_tb(dc, 1, dc->pc);
12739 break;
12740 case DISAS_JUMP:
12741 gen_goto_ptr();
12742 break;
12743 case DISAS_UPDATE:
12744 gen_set_pc_im(dc, dc->pc);
12745 /* fall through */
12746 default:
12747 /* indicate that the hash table must be used to find the next TB */
12748 tcg_gen_exit_tb(NULL, 0);
12749 break;
12750 case DISAS_NORETURN:
12751 /* nothing more to generate */
12752 break;
12753 case DISAS_WFI:
12755 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12756 !(dc->insn & (1U << 31))) ? 2 : 4);
12758 gen_helper_wfi(cpu_env, tmp);
12759 tcg_temp_free_i32(tmp);
12760 /* The helper doesn't necessarily throw an exception, but we
12761 * must go back to the main loop to check for interrupts anyway.
12763 tcg_gen_exit_tb(NULL, 0);
12764 break;
12766 case DISAS_WFE:
12767 gen_helper_wfe(cpu_env);
12768 break;
12769 case DISAS_YIELD:
12770 gen_helper_yield(cpu_env);
12771 break;
12772 case DISAS_SWI:
12773 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12774 default_exception_el(dc));
12775 break;
12776 case DISAS_HVC:
12777 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12778 break;
12779 case DISAS_SMC:
12780 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12781 break;
12785 if (dc->condjmp) {
12786 /* "Condition failed" instruction codepath for the branch/trap insn */
12787 gen_set_label(dc->condlabel);
12788 gen_set_condexec(dc);
12789 if (unlikely(is_singlestepping(dc))) {
12790 gen_set_pc_im(dc, dc->pc);
12791 gen_singlestep_exception(dc);
12792 } else {
12793 gen_goto_tb(dc, 1, dc->pc);
12797 /* Functions above can change dc->pc, so re-align db->pc_next */
12798 dc->base.pc_next = dc->pc;
12801 static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12803 DisasContext *dc = container_of(dcbase, DisasContext, base);
12805 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
12806 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
12809 static const TranslatorOps arm_translator_ops = {
12810 .init_disas_context = arm_tr_init_disas_context,
12811 .tb_start = arm_tr_tb_start,
12812 .insn_start = arm_tr_insn_start,
12813 .breakpoint_check = arm_tr_breakpoint_check,
12814 .translate_insn = arm_tr_translate_insn,
12815 .tb_stop = arm_tr_tb_stop,
12816 .disas_log = arm_tr_disas_log,
12819 static const TranslatorOps thumb_translator_ops = {
12820 .init_disas_context = arm_tr_init_disas_context,
12821 .tb_start = arm_tr_tb_start,
12822 .insn_start = arm_tr_insn_start,
12823 .breakpoint_check = arm_tr_breakpoint_check,
12824 .translate_insn = thumb_tr_translate_insn,
12825 .tb_stop = arm_tr_tb_stop,
12826 .disas_log = arm_tr_disas_log,
12829 /* generate intermediate code for basic block 'tb'. */
12830 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
12832 DisasContext dc;
12833 const TranslatorOps *ops = &arm_translator_ops;
12835 if (ARM_TBFLAG_THUMB(tb->flags)) {
12836 ops = &thumb_translator_ops;
12838 #ifdef TARGET_AARCH64
12839 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
12840 ops = &aarch64_translator_ops;
12842 #endif
12844 translator_loop(ops, &dc.base, cpu, tb);
12847 static const char *cpu_mode_names[16] = {
12848 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12849 "???", "???", "hyp", "und", "???", "???", "???", "sys"
12852 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12853 int flags)
12855 ARMCPU *cpu = ARM_CPU(cs);
12856 CPUARMState *env = &cpu->env;
12857 int i;
12859 if (is_a64(env)) {
12860 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12861 return;
12864 for(i=0;i<16;i++) {
12865 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
12866 if ((i % 4) == 3)
12867 cpu_fprintf(f, "\n");
12868 else
12869 cpu_fprintf(f, " ");
12872 if (arm_feature(env, ARM_FEATURE_M)) {
12873 uint32_t xpsr = xpsr_read(env);
12874 const char *mode;
12875 const char *ns_status = "";
12877 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12878 ns_status = env->v7m.secure ? "S " : "NS ";
12881 if (xpsr & XPSR_EXCP) {
12882 mode = "handler";
12883 } else {
12884 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
12885 mode = "unpriv-thread";
12886 } else {
12887 mode = "priv-thread";
12891 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
12892 xpsr,
12893 xpsr & XPSR_N ? 'N' : '-',
12894 xpsr & XPSR_Z ? 'Z' : '-',
12895 xpsr & XPSR_C ? 'C' : '-',
12896 xpsr & XPSR_V ? 'V' : '-',
12897 xpsr & XPSR_T ? 'T' : 'A',
12898 ns_status,
12899 mode);
12900 } else {
12901 uint32_t psr = cpsr_read(env);
12902 const char *ns_status = "";
12904 if (arm_feature(env, ARM_FEATURE_EL3) &&
12905 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12906 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12909 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12910 psr,
12911 psr & CPSR_N ? 'N' : '-',
12912 psr & CPSR_Z ? 'Z' : '-',
12913 psr & CPSR_C ? 'C' : '-',
12914 psr & CPSR_V ? 'V' : '-',
12915 psr & CPSR_T ? 'T' : 'A',
12916 ns_status,
12917 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12920 if (flags & CPU_DUMP_FPU) {
12921 int numvfpregs = 0;
12922 if (arm_feature(env, ARM_FEATURE_VFP)) {
12923 numvfpregs += 16;
12925 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12926 numvfpregs += 16;
12928 for (i = 0; i < numvfpregs; i++) {
12929 uint64_t v = *aa32_vfp_dreg(env, i);
12930 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12931 i * 2, (uint32_t)v,
12932 i * 2 + 1, (uint32_t)(v >> 32),
12933 i, v);
12935 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
12939 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12940 target_ulong *data)
12942 if (is_a64(env)) {
12943 env->pc = data[0];
12944 env->condexec_bits = 0;
12945 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
12946 } else {
12947 env->regs[15] = data[0];
12948 env->condexec_bits = data[1];
12949 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;