iotests: use -ccw on s390x for 040, 139, and 182
[qemu.git] / target / arm / translate.c
blobab1a12a1b8cf2e8033bdd797d30a847991382049
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "qemu/log.h"
29 #include "qemu/bitops.h"
30 #include "arm_ldst.h"
31 #include "exec/semihost.h"
33 #include "exec/helper-proto.h"
34 #include "exec/helper-gen.h"
36 #include "trace-tcg.h"
37 #include "exec/log.h"
40 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
42 /* currently all emulated v5 cores are also v5TE, so don't bother */
43 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
44 #define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
45 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
51 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
53 #include "translate.h"
55 #if defined(CONFIG_USER_ONLY)
56 #define IS_USER(s) 1
57 #else
58 #define IS_USER(s) (s->user)
59 #endif
61 TCGv_env cpu_env;
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
69 /* FIXME: These should be removed. */
70 static TCGv_i32 cpu_F0s, cpu_F1s;
71 static TCGv_i64 cpu_F0d, cpu_F1d;
73 #include "exec/gen-icount.h"
75 static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79 /* initialize TCG globals. */
80 void arm_translate_init(void)
82 int i;
84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
85 tcg_ctx.tcg_env = cpu_env;
87 for (i = 0; i < 16; i++) {
88 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
89 offsetof(CPUARMState, regs[i]),
90 regnames[i]);
92 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
93 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
94 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
95 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
97 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
98 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
99 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
100 offsetof(CPUARMState, exclusive_val), "exclusive_val");
102 a64_translate_init();
105 /* Flags for the disas_set_da_iss info argument:
106 * lower bits hold the Rt register number, higher bits are flags.
108 typedef enum ISSInfo {
109 ISSNone = 0,
110 ISSRegMask = 0x1f,
111 ISSInvalid = (1 << 5),
112 ISSIsAcqRel = (1 << 6),
113 ISSIsWrite = (1 << 7),
114 ISSIs16Bit = (1 << 8),
115 } ISSInfo;
117 /* Save the syndrome information for a Data Abort */
118 static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
120 uint32_t syn;
121 int sas = memop & MO_SIZE;
122 bool sse = memop & MO_SIGN;
123 bool is_acqrel = issinfo & ISSIsAcqRel;
124 bool is_write = issinfo & ISSIsWrite;
125 bool is_16bit = issinfo & ISSIs16Bit;
126 int srt = issinfo & ISSRegMask;
128 if (issinfo & ISSInvalid) {
129 /* Some callsites want to conditionally provide ISS info,
130 * eg "only if this was not a writeback"
132 return;
135 if (srt == 15) {
136 /* For AArch32, insns where the src/dest is R15 never generate
137 * ISS information. Catching that here saves checking at all
138 * the call sites.
140 return;
143 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
144 0, 0, 0, is_write, 0, is_16bit);
145 disas_set_insn_syndrome(s, syn);
148 static inline int get_a32_user_mem_index(DisasContext *s)
150 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
151 * insns:
152 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
153 * otherwise, access as if at PL0.
155 switch (s->mmu_idx) {
156 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
157 case ARMMMUIdx_S12NSE0:
158 case ARMMMUIdx_S12NSE1:
159 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
160 case ARMMMUIdx_S1E3:
161 case ARMMMUIdx_S1SE0:
162 case ARMMMUIdx_S1SE1:
163 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
164 case ARMMMUIdx_MUser:
165 case ARMMMUIdx_MPriv:
166 case ARMMMUIdx_MNegPri:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
168 case ARMMMUIdx_S2NS:
169 default:
170 g_assert_not_reached();
174 static inline TCGv_i32 load_cpu_offset(int offset)
176 TCGv_i32 tmp = tcg_temp_new_i32();
177 tcg_gen_ld_i32(tmp, cpu_env, offset);
178 return tmp;
181 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
183 static inline void store_cpu_offset(TCGv_i32 var, int offset)
185 tcg_gen_st_i32(var, cpu_env, offset);
186 tcg_temp_free_i32(var);
189 #define store_cpu_field(var, name) \
190 store_cpu_offset(var, offsetof(CPUARMState, name))
192 /* Set a variable to the value of a CPU register. */
193 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
195 if (reg == 15) {
196 uint32_t addr;
197 /* normally, since we updated PC, we need only to add one insn */
198 if (s->thumb)
199 addr = (long)s->pc + 2;
200 else
201 addr = (long)s->pc + 4;
202 tcg_gen_movi_i32(var, addr);
203 } else {
204 tcg_gen_mov_i32(var, cpu_R[reg]);
208 /* Create a new temporary and set it to the value of a CPU register. */
209 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
211 TCGv_i32 tmp = tcg_temp_new_i32();
212 load_reg_var(s, tmp, reg);
213 return tmp;
216 /* Set a CPU register. The source must be a temporary and will be
217 marked as dead. */
218 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
220 if (reg == 15) {
221 /* In Thumb mode, we must ignore bit 0.
222 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
223 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
224 * We choose to ignore [1:0] in ARM mode for all architecture versions.
226 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
227 s->base.is_jmp = DISAS_JUMP;
229 tcg_gen_mov_i32(cpu_R[reg], var);
230 tcg_temp_free_i32(var);
233 /* Value extensions. */
234 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
235 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
236 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
237 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
239 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
240 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
243 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
245 TCGv_i32 tmp_mask = tcg_const_i32(mask);
246 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
247 tcg_temp_free_i32(tmp_mask);
249 /* Set NZCV flags from the high 4 bits of var. */
250 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
252 static void gen_exception_internal(int excp)
254 TCGv_i32 tcg_excp = tcg_const_i32(excp);
256 assert(excp_is_internal(excp));
257 gen_helper_exception_internal(cpu_env, tcg_excp);
258 tcg_temp_free_i32(tcg_excp);
261 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
263 TCGv_i32 tcg_excp = tcg_const_i32(excp);
264 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
265 TCGv_i32 tcg_el = tcg_const_i32(target_el);
267 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
268 tcg_syn, tcg_el);
270 tcg_temp_free_i32(tcg_el);
271 tcg_temp_free_i32(tcg_syn);
272 tcg_temp_free_i32(tcg_excp);
275 static void gen_ss_advance(DisasContext *s)
277 /* If the singlestep state is Active-not-pending, advance to
278 * Active-pending.
280 if (s->ss_active) {
281 s->pstate_ss = 0;
282 gen_helper_clear_pstate_ss(cpu_env);
286 static void gen_step_complete_exception(DisasContext *s)
288 /* We just completed step of an insn. Move from Active-not-pending
289 * to Active-pending, and then also take the swstep exception.
290 * This corresponds to making the (IMPDEF) choice to prioritize
291 * swstep exceptions over asynchronous exceptions taken to an exception
292 * level where debug is disabled. This choice has the advantage that
293 * we do not need to maintain internal state corresponding to the
294 * ISV/EX syndrome bits between completion of the step and generation
295 * of the exception, and our syndrome information is always correct.
297 gen_ss_advance(s);
298 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
299 default_exception_el(s));
300 s->base.is_jmp = DISAS_NORETURN;
303 static void gen_singlestep_exception(DisasContext *s)
305 /* Generate the right kind of exception for singlestep, which is
306 * either the architectural singlestep or EXCP_DEBUG for QEMU's
307 * gdb singlestepping.
309 if (s->ss_active) {
310 gen_step_complete_exception(s);
311 } else {
312 gen_exception_internal(EXCP_DEBUG);
316 static inline bool is_singlestepping(DisasContext *s)
318 /* Return true if we are singlestepping either because of
319 * architectural singlestep or QEMU gdbstub singlestep. This does
320 * not include the command line '-singlestep' mode which is rather
321 * misnamed as it only means "one instruction per TB" and doesn't
322 * affect the code we generate.
324 return s->base.singlestep_enabled || s->ss_active;
327 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
329 TCGv_i32 tmp1 = tcg_temp_new_i32();
330 TCGv_i32 tmp2 = tcg_temp_new_i32();
331 tcg_gen_ext16s_i32(tmp1, a);
332 tcg_gen_ext16s_i32(tmp2, b);
333 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
334 tcg_temp_free_i32(tmp2);
335 tcg_gen_sari_i32(a, a, 16);
336 tcg_gen_sari_i32(b, b, 16);
337 tcg_gen_mul_i32(b, b, a);
338 tcg_gen_mov_i32(a, tmp1);
339 tcg_temp_free_i32(tmp1);
342 /* Byteswap each halfword. */
343 static void gen_rev16(TCGv_i32 var)
345 TCGv_i32 tmp = tcg_temp_new_i32();
346 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
347 tcg_gen_shri_i32(tmp, var, 8);
348 tcg_gen_and_i32(tmp, tmp, mask);
349 tcg_gen_and_i32(var, var, mask);
350 tcg_gen_shli_i32(var, var, 8);
351 tcg_gen_or_i32(var, var, tmp);
352 tcg_temp_free_i32(mask);
353 tcg_temp_free_i32(tmp);
356 /* Byteswap low halfword and sign extend. */
357 static void gen_revsh(TCGv_i32 var)
359 tcg_gen_ext16u_i32(var, var);
360 tcg_gen_bswap16_i32(var, var);
361 tcg_gen_ext16s_i32(var, var);
364 /* Return (b << 32) + a. Mark inputs as dead */
365 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
367 TCGv_i64 tmp64 = tcg_temp_new_i64();
369 tcg_gen_extu_i32_i64(tmp64, b);
370 tcg_temp_free_i32(b);
371 tcg_gen_shli_i64(tmp64, tmp64, 32);
372 tcg_gen_add_i64(a, tmp64, a);
374 tcg_temp_free_i64(tmp64);
375 return a;
378 /* Return (b << 32) - a. Mark inputs as dead. */
379 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
381 TCGv_i64 tmp64 = tcg_temp_new_i64();
383 tcg_gen_extu_i32_i64(tmp64, b);
384 tcg_temp_free_i32(b);
385 tcg_gen_shli_i64(tmp64, tmp64, 32);
386 tcg_gen_sub_i64(a, tmp64, a);
388 tcg_temp_free_i64(tmp64);
389 return a;
392 /* 32x32->64 multiply. Marks inputs as dead. */
393 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
395 TCGv_i32 lo = tcg_temp_new_i32();
396 TCGv_i32 hi = tcg_temp_new_i32();
397 TCGv_i64 ret;
399 tcg_gen_mulu2_i32(lo, hi, a, b);
400 tcg_temp_free_i32(a);
401 tcg_temp_free_i32(b);
403 ret = tcg_temp_new_i64();
404 tcg_gen_concat_i32_i64(ret, lo, hi);
405 tcg_temp_free_i32(lo);
406 tcg_temp_free_i32(hi);
408 return ret;
411 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
413 TCGv_i32 lo = tcg_temp_new_i32();
414 TCGv_i32 hi = tcg_temp_new_i32();
415 TCGv_i64 ret;
417 tcg_gen_muls2_i32(lo, hi, a, b);
418 tcg_temp_free_i32(a);
419 tcg_temp_free_i32(b);
421 ret = tcg_temp_new_i64();
422 tcg_gen_concat_i32_i64(ret, lo, hi);
423 tcg_temp_free_i32(lo);
424 tcg_temp_free_i32(hi);
426 return ret;
429 /* Swap low and high halfwords. */
430 static void gen_swap_half(TCGv_i32 var)
432 TCGv_i32 tmp = tcg_temp_new_i32();
433 tcg_gen_shri_i32(tmp, var, 16);
434 tcg_gen_shli_i32(var, var, 16);
435 tcg_gen_or_i32(var, var, tmp);
436 tcg_temp_free_i32(tmp);
439 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
440 tmp = (t0 ^ t1) & 0x8000;
441 t0 &= ~0x8000;
442 t1 &= ~0x8000;
443 t0 = (t0 + t1) ^ tmp;
446 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
448 TCGv_i32 tmp = tcg_temp_new_i32();
449 tcg_gen_xor_i32(tmp, t0, t1);
450 tcg_gen_andi_i32(tmp, tmp, 0x8000);
451 tcg_gen_andi_i32(t0, t0, ~0x8000);
452 tcg_gen_andi_i32(t1, t1, ~0x8000);
453 tcg_gen_add_i32(t0, t0, t1);
454 tcg_gen_xor_i32(t0, t0, tmp);
455 tcg_temp_free_i32(tmp);
456 tcg_temp_free_i32(t1);
459 /* Set CF to the top bit of var. */
460 static void gen_set_CF_bit31(TCGv_i32 var)
462 tcg_gen_shri_i32(cpu_CF, var, 31);
465 /* Set N and Z flags from var. */
466 static inline void gen_logic_CC(TCGv_i32 var)
468 tcg_gen_mov_i32(cpu_NF, var);
469 tcg_gen_mov_i32(cpu_ZF, var);
472 /* T0 += T1 + CF. */
473 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
475 tcg_gen_add_i32(t0, t0, t1);
476 tcg_gen_add_i32(t0, t0, cpu_CF);
479 /* dest = T0 + T1 + CF. */
480 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
482 tcg_gen_add_i32(dest, t0, t1);
483 tcg_gen_add_i32(dest, dest, cpu_CF);
486 /* dest = T0 - T1 + CF - 1. */
487 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
489 tcg_gen_sub_i32(dest, t0, t1);
490 tcg_gen_add_i32(dest, dest, cpu_CF);
491 tcg_gen_subi_i32(dest, dest, 1);
494 /* dest = T0 + T1. Compute C, N, V and Z flags */
495 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
497 TCGv_i32 tmp = tcg_temp_new_i32();
498 tcg_gen_movi_i32(tmp, 0);
499 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
500 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
501 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
502 tcg_gen_xor_i32(tmp, t0, t1);
503 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
504 tcg_temp_free_i32(tmp);
505 tcg_gen_mov_i32(dest, cpu_NF);
508 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
509 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
511 TCGv_i32 tmp = tcg_temp_new_i32();
512 if (TCG_TARGET_HAS_add2_i32) {
513 tcg_gen_movi_i32(tmp, 0);
514 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
515 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
516 } else {
517 TCGv_i64 q0 = tcg_temp_new_i64();
518 TCGv_i64 q1 = tcg_temp_new_i64();
519 tcg_gen_extu_i32_i64(q0, t0);
520 tcg_gen_extu_i32_i64(q1, t1);
521 tcg_gen_add_i64(q0, q0, q1);
522 tcg_gen_extu_i32_i64(q1, cpu_CF);
523 tcg_gen_add_i64(q0, q0, q1);
524 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
525 tcg_temp_free_i64(q0);
526 tcg_temp_free_i64(q1);
528 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
529 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
530 tcg_gen_xor_i32(tmp, t0, t1);
531 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
532 tcg_temp_free_i32(tmp);
533 tcg_gen_mov_i32(dest, cpu_NF);
536 /* dest = T0 - T1. Compute C, N, V and Z flags */
537 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
539 TCGv_i32 tmp;
540 tcg_gen_sub_i32(cpu_NF, t0, t1);
541 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
542 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
543 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
544 tmp = tcg_temp_new_i32();
545 tcg_gen_xor_i32(tmp, t0, t1);
546 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
547 tcg_temp_free_i32(tmp);
548 tcg_gen_mov_i32(dest, cpu_NF);
551 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
552 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
554 TCGv_i32 tmp = tcg_temp_new_i32();
555 tcg_gen_not_i32(tmp, t1);
556 gen_adc_CC(dest, t0, tmp);
557 tcg_temp_free_i32(tmp);
560 #define GEN_SHIFT(name) \
561 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
563 TCGv_i32 tmp1, tmp2, tmp3; \
564 tmp1 = tcg_temp_new_i32(); \
565 tcg_gen_andi_i32(tmp1, t1, 0xff); \
566 tmp2 = tcg_const_i32(0); \
567 tmp3 = tcg_const_i32(0x1f); \
568 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
569 tcg_temp_free_i32(tmp3); \
570 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
571 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
572 tcg_temp_free_i32(tmp2); \
573 tcg_temp_free_i32(tmp1); \
575 GEN_SHIFT(shl)
576 GEN_SHIFT(shr)
577 #undef GEN_SHIFT
579 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
581 TCGv_i32 tmp1, tmp2;
582 tmp1 = tcg_temp_new_i32();
583 tcg_gen_andi_i32(tmp1, t1, 0xff);
584 tmp2 = tcg_const_i32(0x1f);
585 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
586 tcg_temp_free_i32(tmp2);
587 tcg_gen_sar_i32(dest, t0, tmp1);
588 tcg_temp_free_i32(tmp1);
591 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
593 TCGv_i32 c0 = tcg_const_i32(0);
594 TCGv_i32 tmp = tcg_temp_new_i32();
595 tcg_gen_neg_i32(tmp, src);
596 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
597 tcg_temp_free_i32(c0);
598 tcg_temp_free_i32(tmp);
601 static void shifter_out_im(TCGv_i32 var, int shift)
603 if (shift == 0) {
604 tcg_gen_andi_i32(cpu_CF, var, 1);
605 } else {
606 tcg_gen_shri_i32(cpu_CF, var, shift);
607 if (shift != 31) {
608 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
613 /* Shift by immediate. Includes special handling for shift == 0. */
614 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
615 int shift, int flags)
617 switch (shiftop) {
618 case 0: /* LSL */
619 if (shift != 0) {
620 if (flags)
621 shifter_out_im(var, 32 - shift);
622 tcg_gen_shli_i32(var, var, shift);
624 break;
625 case 1: /* LSR */
626 if (shift == 0) {
627 if (flags) {
628 tcg_gen_shri_i32(cpu_CF, var, 31);
630 tcg_gen_movi_i32(var, 0);
631 } else {
632 if (flags)
633 shifter_out_im(var, shift - 1);
634 tcg_gen_shri_i32(var, var, shift);
636 break;
637 case 2: /* ASR */
638 if (shift == 0)
639 shift = 32;
640 if (flags)
641 shifter_out_im(var, shift - 1);
642 if (shift == 32)
643 shift = 31;
644 tcg_gen_sari_i32(var, var, shift);
645 break;
646 case 3: /* ROR/RRX */
647 if (shift != 0) {
648 if (flags)
649 shifter_out_im(var, shift - 1);
650 tcg_gen_rotri_i32(var, var, shift); break;
651 } else {
652 TCGv_i32 tmp = tcg_temp_new_i32();
653 tcg_gen_shli_i32(tmp, cpu_CF, 31);
654 if (flags)
655 shifter_out_im(var, 0);
656 tcg_gen_shri_i32(var, var, 1);
657 tcg_gen_or_i32(var, var, tmp);
658 tcg_temp_free_i32(tmp);
663 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
664 TCGv_i32 shift, int flags)
666 if (flags) {
667 switch (shiftop) {
668 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
669 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
670 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
671 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
673 } else {
674 switch (shiftop) {
675 case 0:
676 gen_shl(var, var, shift);
677 break;
678 case 1:
679 gen_shr(var, var, shift);
680 break;
681 case 2:
682 gen_sar(var, var, shift);
683 break;
684 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
685 tcg_gen_rotr_i32(var, var, shift); break;
688 tcg_temp_free_i32(shift);
691 #define PAS_OP(pfx) \
692 switch (op2) { \
693 case 0: gen_pas_helper(glue(pfx,add16)); break; \
694 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
695 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
696 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
697 case 4: gen_pas_helper(glue(pfx,add8)); break; \
698 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
700 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
702 TCGv_ptr tmp;
704 switch (op1) {
705 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
706 case 1:
707 tmp = tcg_temp_new_ptr();
708 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
709 PAS_OP(s)
710 tcg_temp_free_ptr(tmp);
711 break;
712 case 5:
713 tmp = tcg_temp_new_ptr();
714 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
715 PAS_OP(u)
716 tcg_temp_free_ptr(tmp);
717 break;
718 #undef gen_pas_helper
719 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
720 case 2:
721 PAS_OP(q);
722 break;
723 case 3:
724 PAS_OP(sh);
725 break;
726 case 6:
727 PAS_OP(uq);
728 break;
729 case 7:
730 PAS_OP(uh);
731 break;
732 #undef gen_pas_helper
735 #undef PAS_OP
737 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
738 #define PAS_OP(pfx) \
739 switch (op1) { \
740 case 0: gen_pas_helper(glue(pfx,add8)); break; \
741 case 1: gen_pas_helper(glue(pfx,add16)); break; \
742 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
743 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
744 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
745 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
747 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
749 TCGv_ptr tmp;
751 switch (op2) {
752 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
753 case 0:
754 tmp = tcg_temp_new_ptr();
755 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
756 PAS_OP(s)
757 tcg_temp_free_ptr(tmp);
758 break;
759 case 4:
760 tmp = tcg_temp_new_ptr();
761 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
762 PAS_OP(u)
763 tcg_temp_free_ptr(tmp);
764 break;
765 #undef gen_pas_helper
766 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
767 case 1:
768 PAS_OP(q);
769 break;
770 case 2:
771 PAS_OP(sh);
772 break;
773 case 5:
774 PAS_OP(uq);
775 break;
776 case 6:
777 PAS_OP(uh);
778 break;
779 #undef gen_pas_helper
782 #undef PAS_OP
785 * Generate a conditional based on ARM condition code cc.
786 * This is common between ARM and Aarch64 targets.
788 void arm_test_cc(DisasCompare *cmp, int cc)
790 TCGv_i32 value;
791 TCGCond cond;
792 bool global = true;
794 switch (cc) {
795 case 0: /* eq: Z */
796 case 1: /* ne: !Z */
797 cond = TCG_COND_EQ;
798 value = cpu_ZF;
799 break;
801 case 2: /* cs: C */
802 case 3: /* cc: !C */
803 cond = TCG_COND_NE;
804 value = cpu_CF;
805 break;
807 case 4: /* mi: N */
808 case 5: /* pl: !N */
809 cond = TCG_COND_LT;
810 value = cpu_NF;
811 break;
813 case 6: /* vs: V */
814 case 7: /* vc: !V */
815 cond = TCG_COND_LT;
816 value = cpu_VF;
817 break;
819 case 8: /* hi: C && !Z */
820 case 9: /* ls: !C || Z -> !(C && !Z) */
821 cond = TCG_COND_NE;
822 value = tcg_temp_new_i32();
823 global = false;
824 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
825 ZF is non-zero for !Z; so AND the two subexpressions. */
826 tcg_gen_neg_i32(value, cpu_CF);
827 tcg_gen_and_i32(value, value, cpu_ZF);
828 break;
830 case 10: /* ge: N == V -> N ^ V == 0 */
831 case 11: /* lt: N != V -> N ^ V != 0 */
832 /* Since we're only interested in the sign bit, == 0 is >= 0. */
833 cond = TCG_COND_GE;
834 value = tcg_temp_new_i32();
835 global = false;
836 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
837 break;
839 case 12: /* gt: !Z && N == V */
840 case 13: /* le: Z || N != V */
841 cond = TCG_COND_NE;
842 value = tcg_temp_new_i32();
843 global = false;
844 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
845 * the sign bit then AND with ZF to yield the result. */
846 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
847 tcg_gen_sari_i32(value, value, 31);
848 tcg_gen_andc_i32(value, cpu_ZF, value);
849 break;
851 case 14: /* always */
852 case 15: /* always */
853 /* Use the ALWAYS condition, which will fold early.
854 * It doesn't matter what we use for the value. */
855 cond = TCG_COND_ALWAYS;
856 value = cpu_ZF;
857 goto no_invert;
859 default:
860 fprintf(stderr, "Bad condition code 0x%x\n", cc);
861 abort();
864 if (cc & 1) {
865 cond = tcg_invert_cond(cond);
868 no_invert:
869 cmp->cond = cond;
870 cmp->value = value;
871 cmp->value_global = global;
874 void arm_free_cc(DisasCompare *cmp)
876 if (!cmp->value_global) {
877 tcg_temp_free_i32(cmp->value);
881 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
883 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
886 void arm_gen_test_cc(int cc, TCGLabel *label)
888 DisasCompare cmp;
889 arm_test_cc(&cmp, cc);
890 arm_jump_cc(&cmp, label);
891 arm_free_cc(&cmp);
894 static const uint8_t table_logic_cc[16] = {
895 1, /* and */
896 1, /* xor */
897 0, /* sub */
898 0, /* rsb */
899 0, /* add */
900 0, /* adc */
901 0, /* sbc */
902 0, /* rsc */
903 1, /* andl */
904 1, /* xorl */
905 0, /* cmp */
906 0, /* cmn */
907 1, /* orr */
908 1, /* mov */
909 1, /* bic */
910 1, /* mvn */
913 static inline void gen_set_condexec(DisasContext *s)
915 if (s->condexec_mask) {
916 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
917 TCGv_i32 tmp = tcg_temp_new_i32();
918 tcg_gen_movi_i32(tmp, val);
919 store_cpu_field(tmp, condexec_bits);
923 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
925 tcg_gen_movi_i32(cpu_R[15], val);
928 /* Set PC and Thumb state from an immediate address. */
929 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
931 TCGv_i32 tmp;
933 s->base.is_jmp = DISAS_JUMP;
934 if (s->thumb != (addr & 1)) {
935 tmp = tcg_temp_new_i32();
936 tcg_gen_movi_i32(tmp, addr & 1);
937 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
938 tcg_temp_free_i32(tmp);
940 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
943 /* Set PC and Thumb state from var. var is marked as dead. */
944 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
946 s->base.is_jmp = DISAS_JUMP;
947 tcg_gen_andi_i32(cpu_R[15], var, ~1);
948 tcg_gen_andi_i32(var, var, 1);
949 store_cpu_field(var, thumb);
952 /* Set PC and Thumb state from var. var is marked as dead.
953 * For M-profile CPUs, include logic to detect exception-return
954 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
955 * and BX reg, and no others, and happens only for code in Handler mode.
957 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
959 /* Generate the same code here as for a simple bx, but flag via
960 * s->base.is_jmp that we need to do the rest of the work later.
962 gen_bx(s, var);
963 if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) {
964 s->base.is_jmp = DISAS_BX_EXCRET;
968 static inline void gen_bx_excret_final_code(DisasContext *s)
970 /* Generate the code to finish possible exception return and end the TB */
971 TCGLabel *excret_label = gen_new_label();
973 /* Is the new PC value in the magic range indicating exception return? */
974 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], 0xff000000, excret_label);
975 /* No: end the TB as we would for a DISAS_JMP */
976 if (is_singlestepping(s)) {
977 gen_singlestep_exception(s);
978 } else {
979 tcg_gen_exit_tb(0);
981 gen_set_label(excret_label);
982 /* Yes: this is an exception return.
983 * At this point in runtime env->regs[15] and env->thumb will hold
984 * the exception-return magic number, which do_v7m_exception_exit()
985 * will read. Nothing else will be able to see those values because
986 * the cpu-exec main loop guarantees that we will always go straight
987 * from raising the exception to the exception-handling code.
989 * gen_ss_advance(s) does nothing on M profile currently but
990 * calling it is conceptually the right thing as we have executed
991 * this instruction (compare SWI, HVC, SMC handling).
993 gen_ss_advance(s);
994 gen_exception_internal(EXCP_EXCEPTION_EXIT);
997 static inline void gen_bxns(DisasContext *s, int rm)
999 TCGv_i32 var = load_reg(s, rm);
1001 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1002 * we need to sync state before calling it, but:
1003 * - we don't need to do gen_set_pc_im() because the bxns helper will
1004 * always set the PC itself
1005 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1006 * unless it's outside an IT block or the last insn in an IT block,
1007 * so we know that condexec == 0 (already set at the top of the TB)
1008 * is correct in the non-UNPREDICTABLE cases, and we can choose
1009 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1011 gen_helper_v7m_bxns(cpu_env, var);
1012 tcg_temp_free_i32(var);
1013 s->base.is_jmp = DISAS_EXIT;
1016 /* Variant of store_reg which uses branch&exchange logic when storing
1017 to r15 in ARM architecture v7 and above. The source must be a temporary
1018 and will be marked as dead. */
1019 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
1021 if (reg == 15 && ENABLE_ARCH_7) {
1022 gen_bx(s, var);
1023 } else {
1024 store_reg(s, reg, var);
1028 /* Variant of store_reg which uses branch&exchange logic when storing
1029 * to r15 in ARM architecture v5T and above. This is used for storing
1030 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1031 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1032 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
1034 if (reg == 15 && ENABLE_ARCH_5) {
1035 gen_bx_excret(s, var);
1036 } else {
1037 store_reg(s, reg, var);
1041 #ifdef CONFIG_USER_ONLY
1042 #define IS_USER_ONLY 1
1043 #else
1044 #define IS_USER_ONLY 0
1045 #endif
1047 /* Abstractions of "generate code to do a guest load/store for
1048 * AArch32", where a vaddr is always 32 bits (and is zero
1049 * extended if we're a 64 bit core) and data is also
1050 * 32 bits unless specifically doing a 64 bit access.
1051 * These functions work like tcg_gen_qemu_{ld,st}* except
1052 * that the address argument is TCGv_i32 rather than TCGv.
1055 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
1057 TCGv addr = tcg_temp_new();
1058 tcg_gen_extu_i32_tl(addr, a32);
1060 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1061 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1062 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
1064 return addr;
1067 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1068 int index, TCGMemOp opc)
1070 TCGv addr = gen_aa32_addr(s, a32, opc);
1071 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1072 tcg_temp_free(addr);
1075 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1076 int index, TCGMemOp opc)
1078 TCGv addr = gen_aa32_addr(s, a32, opc);
1079 tcg_gen_qemu_st_i32(val, addr, index, opc);
1080 tcg_temp_free(addr);
1083 #define DO_GEN_LD(SUFF, OPC) \
1084 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1085 TCGv_i32 a32, int index) \
1087 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1089 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1090 TCGv_i32 val, \
1091 TCGv_i32 a32, int index, \
1092 ISSInfo issinfo) \
1094 gen_aa32_ld##SUFF(s, val, a32, index); \
1095 disas_set_da_iss(s, OPC, issinfo); \
1098 #define DO_GEN_ST(SUFF, OPC) \
1099 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1100 TCGv_i32 a32, int index) \
1102 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1104 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1105 TCGv_i32 val, \
1106 TCGv_i32 a32, int index, \
1107 ISSInfo issinfo) \
1109 gen_aa32_st##SUFF(s, val, a32, index); \
1110 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1113 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
1115 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1116 if (!IS_USER_ONLY && s->sctlr_b) {
1117 tcg_gen_rotri_i64(val, val, 32);
1121 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1122 int index, TCGMemOp opc)
1124 TCGv addr = gen_aa32_addr(s, a32, opc);
1125 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1126 gen_aa32_frob64(s, val);
1127 tcg_temp_free(addr);
1130 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1131 TCGv_i32 a32, int index)
1133 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1136 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1137 int index, TCGMemOp opc)
1139 TCGv addr = gen_aa32_addr(s, a32, opc);
1141 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1142 if (!IS_USER_ONLY && s->sctlr_b) {
1143 TCGv_i64 tmp = tcg_temp_new_i64();
1144 tcg_gen_rotri_i64(tmp, val, 32);
1145 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1146 tcg_temp_free_i64(tmp);
1147 } else {
1148 tcg_gen_qemu_st_i64(val, addr, index, opc);
1150 tcg_temp_free(addr);
1153 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1154 TCGv_i32 a32, int index)
1156 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1159 DO_GEN_LD(8s, MO_SB)
1160 DO_GEN_LD(8u, MO_UB)
1161 DO_GEN_LD(16s, MO_SW)
1162 DO_GEN_LD(16u, MO_UW)
1163 DO_GEN_LD(32u, MO_UL)
1164 DO_GEN_ST(8, MO_UB)
1165 DO_GEN_ST(16, MO_UW)
1166 DO_GEN_ST(32, MO_UL)
1168 static inline void gen_hvc(DisasContext *s, int imm16)
1170 /* The pre HVC helper handles cases when HVC gets trapped
1171 * as an undefined insn by runtime configuration (ie before
1172 * the insn really executes).
1174 gen_set_pc_im(s, s->pc - 4);
1175 gen_helper_pre_hvc(cpu_env);
1176 /* Otherwise we will treat this as a real exception which
1177 * happens after execution of the insn. (The distinction matters
1178 * for the PC value reported to the exception handler and also
1179 * for single stepping.)
1181 s->svc_imm = imm16;
1182 gen_set_pc_im(s, s->pc);
1183 s->base.is_jmp = DISAS_HVC;
1186 static inline void gen_smc(DisasContext *s)
1188 /* As with HVC, we may take an exception either before or after
1189 * the insn executes.
1191 TCGv_i32 tmp;
1193 gen_set_pc_im(s, s->pc - 4);
1194 tmp = tcg_const_i32(syn_aa32_smc());
1195 gen_helper_pre_smc(cpu_env, tmp);
1196 tcg_temp_free_i32(tmp);
1197 gen_set_pc_im(s, s->pc);
1198 s->base.is_jmp = DISAS_SMC;
1201 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1203 gen_set_condexec(s);
1204 gen_set_pc_im(s, s->pc - offset);
1205 gen_exception_internal(excp);
1206 s->base.is_jmp = DISAS_NORETURN;
1209 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1210 int syn, uint32_t target_el)
1212 gen_set_condexec(s);
1213 gen_set_pc_im(s, s->pc - offset);
1214 gen_exception(excp, syn, target_el);
1215 s->base.is_jmp = DISAS_NORETURN;
1218 /* Force a TB lookup after an instruction that changes the CPU state. */
1219 static inline void gen_lookup_tb(DisasContext *s)
1221 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1222 s->base.is_jmp = DISAS_EXIT;
1225 static inline void gen_hlt(DisasContext *s, int imm)
1227 /* HLT. This has two purposes.
1228 * Architecturally, it is an external halting debug instruction.
1229 * Since QEMU doesn't implement external debug, we treat this as
1230 * it is required for halting debug disabled: it will UNDEF.
1231 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1232 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1233 * must trigger semihosting even for ARMv7 and earlier, where
1234 * HLT was an undefined encoding.
1235 * In system mode, we don't allow userspace access to
1236 * semihosting, to provide some semblance of security
1237 * (and for consistency with our 32-bit semihosting).
1239 if (semihosting_enabled() &&
1240 #ifndef CONFIG_USER_ONLY
1241 s->current_el != 0 &&
1242 #endif
1243 (imm == (s->thumb ? 0x3c : 0xf000))) {
1244 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1245 return;
1248 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1249 default_exception_el(s));
1252 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1253 TCGv_i32 var)
1255 int val, rm, shift, shiftop;
1256 TCGv_i32 offset;
1258 if (!(insn & (1 << 25))) {
1259 /* immediate */
1260 val = insn & 0xfff;
1261 if (!(insn & (1 << 23)))
1262 val = -val;
1263 if (val != 0)
1264 tcg_gen_addi_i32(var, var, val);
1265 } else {
1266 /* shift/register */
1267 rm = (insn) & 0xf;
1268 shift = (insn >> 7) & 0x1f;
1269 shiftop = (insn >> 5) & 3;
1270 offset = load_reg(s, rm);
1271 gen_arm_shift_im(offset, shiftop, shift, 0);
1272 if (!(insn & (1 << 23)))
1273 tcg_gen_sub_i32(var, var, offset);
1274 else
1275 tcg_gen_add_i32(var, var, offset);
1276 tcg_temp_free_i32(offset);
1280 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1281 int extra, TCGv_i32 var)
1283 int val, rm;
1284 TCGv_i32 offset;
1286 if (insn & (1 << 22)) {
1287 /* immediate */
1288 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1289 if (!(insn & (1 << 23)))
1290 val = -val;
1291 val += extra;
1292 if (val != 0)
1293 tcg_gen_addi_i32(var, var, val);
1294 } else {
1295 /* register */
1296 if (extra)
1297 tcg_gen_addi_i32(var, var, extra);
1298 rm = (insn) & 0xf;
1299 offset = load_reg(s, rm);
1300 if (!(insn & (1 << 23)))
1301 tcg_gen_sub_i32(var, var, offset);
1302 else
1303 tcg_gen_add_i32(var, var, offset);
1304 tcg_temp_free_i32(offset);
1308 static TCGv_ptr get_fpstatus_ptr(int neon)
1310 TCGv_ptr statusptr = tcg_temp_new_ptr();
1311 int offset;
1312 if (neon) {
1313 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1314 } else {
1315 offset = offsetof(CPUARMState, vfp.fp_status);
1317 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1318 return statusptr;
1321 #define VFP_OP2(name) \
1322 static inline void gen_vfp_##name(int dp) \
1324 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1325 if (dp) { \
1326 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1327 } else { \
1328 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1330 tcg_temp_free_ptr(fpst); \
1333 VFP_OP2(add)
1334 VFP_OP2(sub)
1335 VFP_OP2(mul)
1336 VFP_OP2(div)
1338 #undef VFP_OP2
1340 static inline void gen_vfp_F1_mul(int dp)
1342 /* Like gen_vfp_mul() but put result in F1 */
1343 TCGv_ptr fpst = get_fpstatus_ptr(0);
1344 if (dp) {
1345 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1346 } else {
1347 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1349 tcg_temp_free_ptr(fpst);
1352 static inline void gen_vfp_F1_neg(int dp)
1354 /* Like gen_vfp_neg() but put result in F1 */
1355 if (dp) {
1356 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1357 } else {
1358 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1362 static inline void gen_vfp_abs(int dp)
1364 if (dp)
1365 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1366 else
1367 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1370 static inline void gen_vfp_neg(int dp)
1372 if (dp)
1373 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1374 else
1375 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1378 static inline void gen_vfp_sqrt(int dp)
1380 if (dp)
1381 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1382 else
1383 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1386 static inline void gen_vfp_cmp(int dp)
1388 if (dp)
1389 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1390 else
1391 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1394 static inline void gen_vfp_cmpe(int dp)
1396 if (dp)
1397 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1398 else
1399 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1402 static inline void gen_vfp_F1_ld0(int dp)
1404 if (dp)
1405 tcg_gen_movi_i64(cpu_F1d, 0);
1406 else
1407 tcg_gen_movi_i32(cpu_F1s, 0);
1410 #define VFP_GEN_ITOF(name) \
1411 static inline void gen_vfp_##name(int dp, int neon) \
1413 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1414 if (dp) { \
1415 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1416 } else { \
1417 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1419 tcg_temp_free_ptr(statusptr); \
1422 VFP_GEN_ITOF(uito)
1423 VFP_GEN_ITOF(sito)
1424 #undef VFP_GEN_ITOF
1426 #define VFP_GEN_FTOI(name) \
1427 static inline void gen_vfp_##name(int dp, int neon) \
1429 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1430 if (dp) { \
1431 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1432 } else { \
1433 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1435 tcg_temp_free_ptr(statusptr); \
1438 VFP_GEN_FTOI(toui)
1439 VFP_GEN_FTOI(touiz)
1440 VFP_GEN_FTOI(tosi)
1441 VFP_GEN_FTOI(tosiz)
1442 #undef VFP_GEN_FTOI
1444 #define VFP_GEN_FIX(name, round) \
1445 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1447 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1448 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1449 if (dp) { \
1450 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1451 statusptr); \
1452 } else { \
1453 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1454 statusptr); \
1456 tcg_temp_free_i32(tmp_shift); \
1457 tcg_temp_free_ptr(statusptr); \
1459 VFP_GEN_FIX(tosh, _round_to_zero)
1460 VFP_GEN_FIX(tosl, _round_to_zero)
1461 VFP_GEN_FIX(touh, _round_to_zero)
1462 VFP_GEN_FIX(toul, _round_to_zero)
1463 VFP_GEN_FIX(shto, )
1464 VFP_GEN_FIX(slto, )
1465 VFP_GEN_FIX(uhto, )
1466 VFP_GEN_FIX(ulto, )
1467 #undef VFP_GEN_FIX
1469 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1471 if (dp) {
1472 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
1473 } else {
1474 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
1478 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1480 if (dp) {
1481 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
1482 } else {
1483 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
1487 static inline long
1488 vfp_reg_offset (int dp, int reg)
1490 if (dp)
1491 return offsetof(CPUARMState, vfp.regs[reg]);
1492 else if (reg & 1) {
1493 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1494 + offsetof(CPU_DoubleU, l.upper);
1495 } else {
1496 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1497 + offsetof(CPU_DoubleU, l.lower);
1501 /* Return the offset of a 32-bit piece of a NEON register.
1502 zero is the least significant end of the register. */
1503 static inline long
1504 neon_reg_offset (int reg, int n)
1506 int sreg;
1507 sreg = reg * 2 + n;
1508 return vfp_reg_offset(0, sreg);
1511 static TCGv_i32 neon_load_reg(int reg, int pass)
1513 TCGv_i32 tmp = tcg_temp_new_i32();
1514 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1515 return tmp;
1518 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1520 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1521 tcg_temp_free_i32(var);
1524 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1526 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1529 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1531 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1534 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1535 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1536 #define tcg_gen_st_f32 tcg_gen_st_i32
1537 #define tcg_gen_st_f64 tcg_gen_st_i64
1539 static inline void gen_mov_F0_vreg(int dp, int reg)
1541 if (dp)
1542 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1543 else
1544 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1547 static inline void gen_mov_F1_vreg(int dp, int reg)
1549 if (dp)
1550 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1551 else
1552 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1555 static inline void gen_mov_vreg_F0(int dp, int reg)
1557 if (dp)
1558 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1559 else
1560 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1563 #define ARM_CP_RW_BIT (1 << 20)
1565 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1567 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1570 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1572 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1575 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1577 TCGv_i32 var = tcg_temp_new_i32();
1578 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1579 return var;
1582 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1584 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1585 tcg_temp_free_i32(var);
1588 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1590 iwmmxt_store_reg(cpu_M0, rn);
1593 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1595 iwmmxt_load_reg(cpu_M0, rn);
1598 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1600 iwmmxt_load_reg(cpu_V1, rn);
1601 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1604 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1606 iwmmxt_load_reg(cpu_V1, rn);
1607 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1610 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1612 iwmmxt_load_reg(cpu_V1, rn);
1613 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1616 #define IWMMXT_OP(name) \
1617 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1619 iwmmxt_load_reg(cpu_V1, rn); \
1620 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1623 #define IWMMXT_OP_ENV(name) \
1624 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1626 iwmmxt_load_reg(cpu_V1, rn); \
1627 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1630 #define IWMMXT_OP_ENV_SIZE(name) \
1631 IWMMXT_OP_ENV(name##b) \
1632 IWMMXT_OP_ENV(name##w) \
1633 IWMMXT_OP_ENV(name##l)
1635 #define IWMMXT_OP_ENV1(name) \
1636 static inline void gen_op_iwmmxt_##name##_M0(void) \
1638 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1641 IWMMXT_OP(maddsq)
1642 IWMMXT_OP(madduq)
1643 IWMMXT_OP(sadb)
1644 IWMMXT_OP(sadw)
1645 IWMMXT_OP(mulslw)
1646 IWMMXT_OP(mulshw)
1647 IWMMXT_OP(mululw)
1648 IWMMXT_OP(muluhw)
1649 IWMMXT_OP(macsw)
1650 IWMMXT_OP(macuw)
1652 IWMMXT_OP_ENV_SIZE(unpackl)
1653 IWMMXT_OP_ENV_SIZE(unpackh)
1655 IWMMXT_OP_ENV1(unpacklub)
1656 IWMMXT_OP_ENV1(unpackluw)
1657 IWMMXT_OP_ENV1(unpacklul)
1658 IWMMXT_OP_ENV1(unpackhub)
1659 IWMMXT_OP_ENV1(unpackhuw)
1660 IWMMXT_OP_ENV1(unpackhul)
1661 IWMMXT_OP_ENV1(unpacklsb)
1662 IWMMXT_OP_ENV1(unpacklsw)
1663 IWMMXT_OP_ENV1(unpacklsl)
1664 IWMMXT_OP_ENV1(unpackhsb)
1665 IWMMXT_OP_ENV1(unpackhsw)
1666 IWMMXT_OP_ENV1(unpackhsl)
1668 IWMMXT_OP_ENV_SIZE(cmpeq)
1669 IWMMXT_OP_ENV_SIZE(cmpgtu)
1670 IWMMXT_OP_ENV_SIZE(cmpgts)
1672 IWMMXT_OP_ENV_SIZE(mins)
1673 IWMMXT_OP_ENV_SIZE(minu)
1674 IWMMXT_OP_ENV_SIZE(maxs)
1675 IWMMXT_OP_ENV_SIZE(maxu)
1677 IWMMXT_OP_ENV_SIZE(subn)
1678 IWMMXT_OP_ENV_SIZE(addn)
1679 IWMMXT_OP_ENV_SIZE(subu)
1680 IWMMXT_OP_ENV_SIZE(addu)
1681 IWMMXT_OP_ENV_SIZE(subs)
1682 IWMMXT_OP_ENV_SIZE(adds)
1684 IWMMXT_OP_ENV(avgb0)
1685 IWMMXT_OP_ENV(avgb1)
1686 IWMMXT_OP_ENV(avgw0)
1687 IWMMXT_OP_ENV(avgw1)
1689 IWMMXT_OP_ENV(packuw)
1690 IWMMXT_OP_ENV(packul)
1691 IWMMXT_OP_ENV(packuq)
1692 IWMMXT_OP_ENV(packsw)
1693 IWMMXT_OP_ENV(packsl)
1694 IWMMXT_OP_ENV(packsq)
1696 static void gen_op_iwmmxt_set_mup(void)
1698 TCGv_i32 tmp;
1699 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1700 tcg_gen_ori_i32(tmp, tmp, 2);
1701 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1704 static void gen_op_iwmmxt_set_cup(void)
1706 TCGv_i32 tmp;
1707 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1708 tcg_gen_ori_i32(tmp, tmp, 1);
1709 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1712 static void gen_op_iwmmxt_setpsr_nz(void)
1714 TCGv_i32 tmp = tcg_temp_new_i32();
1715 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1716 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1719 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1721 iwmmxt_load_reg(cpu_V1, rn);
1722 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1723 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1726 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1727 TCGv_i32 dest)
1729 int rd;
1730 uint32_t offset;
1731 TCGv_i32 tmp;
1733 rd = (insn >> 16) & 0xf;
1734 tmp = load_reg(s, rd);
1736 offset = (insn & 0xff) << ((insn >> 7) & 2);
1737 if (insn & (1 << 24)) {
1738 /* Pre indexed */
1739 if (insn & (1 << 23))
1740 tcg_gen_addi_i32(tmp, tmp, offset);
1741 else
1742 tcg_gen_addi_i32(tmp, tmp, -offset);
1743 tcg_gen_mov_i32(dest, tmp);
1744 if (insn & (1 << 21))
1745 store_reg(s, rd, tmp);
1746 else
1747 tcg_temp_free_i32(tmp);
1748 } else if (insn & (1 << 21)) {
1749 /* Post indexed */
1750 tcg_gen_mov_i32(dest, tmp);
1751 if (insn & (1 << 23))
1752 tcg_gen_addi_i32(tmp, tmp, offset);
1753 else
1754 tcg_gen_addi_i32(tmp, tmp, -offset);
1755 store_reg(s, rd, tmp);
1756 } else if (!(insn & (1 << 23)))
1757 return 1;
1758 return 0;
1761 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1763 int rd = (insn >> 0) & 0xf;
1764 TCGv_i32 tmp;
1766 if (insn & (1 << 8)) {
1767 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1768 return 1;
1769 } else {
1770 tmp = iwmmxt_load_creg(rd);
1772 } else {
1773 tmp = tcg_temp_new_i32();
1774 iwmmxt_load_reg(cpu_V0, rd);
1775 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1777 tcg_gen_andi_i32(tmp, tmp, mask);
1778 tcg_gen_mov_i32(dest, tmp);
1779 tcg_temp_free_i32(tmp);
1780 return 0;
1783 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1784 (ie. an undefined instruction). */
1785 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1787 int rd, wrd;
1788 int rdhi, rdlo, rd0, rd1, i;
1789 TCGv_i32 addr;
1790 TCGv_i32 tmp, tmp2, tmp3;
1792 if ((insn & 0x0e000e00) == 0x0c000000) {
1793 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1794 wrd = insn & 0xf;
1795 rdlo = (insn >> 12) & 0xf;
1796 rdhi = (insn >> 16) & 0xf;
1797 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1798 iwmmxt_load_reg(cpu_V0, wrd);
1799 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1800 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1801 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1802 } else { /* TMCRR */
1803 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1804 iwmmxt_store_reg(cpu_V0, wrd);
1805 gen_op_iwmmxt_set_mup();
1807 return 0;
1810 wrd = (insn >> 12) & 0xf;
1811 addr = tcg_temp_new_i32();
1812 if (gen_iwmmxt_address(s, insn, addr)) {
1813 tcg_temp_free_i32(addr);
1814 return 1;
1816 if (insn & ARM_CP_RW_BIT) {
1817 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1818 tmp = tcg_temp_new_i32();
1819 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1820 iwmmxt_store_creg(wrd, tmp);
1821 } else {
1822 i = 1;
1823 if (insn & (1 << 8)) {
1824 if (insn & (1 << 22)) { /* WLDRD */
1825 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1826 i = 0;
1827 } else { /* WLDRW wRd */
1828 tmp = tcg_temp_new_i32();
1829 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1831 } else {
1832 tmp = tcg_temp_new_i32();
1833 if (insn & (1 << 22)) { /* WLDRH */
1834 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1835 } else { /* WLDRB */
1836 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1839 if (i) {
1840 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1841 tcg_temp_free_i32(tmp);
1843 gen_op_iwmmxt_movq_wRn_M0(wrd);
1845 } else {
1846 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1847 tmp = iwmmxt_load_creg(wrd);
1848 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1849 } else {
1850 gen_op_iwmmxt_movq_M0_wRn(wrd);
1851 tmp = tcg_temp_new_i32();
1852 if (insn & (1 << 8)) {
1853 if (insn & (1 << 22)) { /* WSTRD */
1854 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1855 } else { /* WSTRW wRd */
1856 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1857 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1859 } else {
1860 if (insn & (1 << 22)) { /* WSTRH */
1861 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1862 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1863 } else { /* WSTRB */
1864 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1865 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1869 tcg_temp_free_i32(tmp);
1871 tcg_temp_free_i32(addr);
1872 return 0;
1875 if ((insn & 0x0f000000) != 0x0e000000)
1876 return 1;
1878 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1879 case 0x000: /* WOR */
1880 wrd = (insn >> 12) & 0xf;
1881 rd0 = (insn >> 0) & 0xf;
1882 rd1 = (insn >> 16) & 0xf;
1883 gen_op_iwmmxt_movq_M0_wRn(rd0);
1884 gen_op_iwmmxt_orq_M0_wRn(rd1);
1885 gen_op_iwmmxt_setpsr_nz();
1886 gen_op_iwmmxt_movq_wRn_M0(wrd);
1887 gen_op_iwmmxt_set_mup();
1888 gen_op_iwmmxt_set_cup();
1889 break;
1890 case 0x011: /* TMCR */
1891 if (insn & 0xf)
1892 return 1;
1893 rd = (insn >> 12) & 0xf;
1894 wrd = (insn >> 16) & 0xf;
1895 switch (wrd) {
1896 case ARM_IWMMXT_wCID:
1897 case ARM_IWMMXT_wCASF:
1898 break;
1899 case ARM_IWMMXT_wCon:
1900 gen_op_iwmmxt_set_cup();
1901 /* Fall through. */
1902 case ARM_IWMMXT_wCSSF:
1903 tmp = iwmmxt_load_creg(wrd);
1904 tmp2 = load_reg(s, rd);
1905 tcg_gen_andc_i32(tmp, tmp, tmp2);
1906 tcg_temp_free_i32(tmp2);
1907 iwmmxt_store_creg(wrd, tmp);
1908 break;
1909 case ARM_IWMMXT_wCGR0:
1910 case ARM_IWMMXT_wCGR1:
1911 case ARM_IWMMXT_wCGR2:
1912 case ARM_IWMMXT_wCGR3:
1913 gen_op_iwmmxt_set_cup();
1914 tmp = load_reg(s, rd);
1915 iwmmxt_store_creg(wrd, tmp);
1916 break;
1917 default:
1918 return 1;
1920 break;
1921 case 0x100: /* WXOR */
1922 wrd = (insn >> 12) & 0xf;
1923 rd0 = (insn >> 0) & 0xf;
1924 rd1 = (insn >> 16) & 0xf;
1925 gen_op_iwmmxt_movq_M0_wRn(rd0);
1926 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1927 gen_op_iwmmxt_setpsr_nz();
1928 gen_op_iwmmxt_movq_wRn_M0(wrd);
1929 gen_op_iwmmxt_set_mup();
1930 gen_op_iwmmxt_set_cup();
1931 break;
1932 case 0x111: /* TMRC */
1933 if (insn & 0xf)
1934 return 1;
1935 rd = (insn >> 12) & 0xf;
1936 wrd = (insn >> 16) & 0xf;
1937 tmp = iwmmxt_load_creg(wrd);
1938 store_reg(s, rd, tmp);
1939 break;
1940 case 0x300: /* WANDN */
1941 wrd = (insn >> 12) & 0xf;
1942 rd0 = (insn >> 0) & 0xf;
1943 rd1 = (insn >> 16) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
1945 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1946 gen_op_iwmmxt_andq_M0_wRn(rd1);
1947 gen_op_iwmmxt_setpsr_nz();
1948 gen_op_iwmmxt_movq_wRn_M0(wrd);
1949 gen_op_iwmmxt_set_mup();
1950 gen_op_iwmmxt_set_cup();
1951 break;
1952 case 0x200: /* WAND */
1953 wrd = (insn >> 12) & 0xf;
1954 rd0 = (insn >> 0) & 0xf;
1955 rd1 = (insn >> 16) & 0xf;
1956 gen_op_iwmmxt_movq_M0_wRn(rd0);
1957 gen_op_iwmmxt_andq_M0_wRn(rd1);
1958 gen_op_iwmmxt_setpsr_nz();
1959 gen_op_iwmmxt_movq_wRn_M0(wrd);
1960 gen_op_iwmmxt_set_mup();
1961 gen_op_iwmmxt_set_cup();
1962 break;
1963 case 0x810: case 0xa10: /* WMADD */
1964 wrd = (insn >> 12) & 0xf;
1965 rd0 = (insn >> 0) & 0xf;
1966 rd1 = (insn >> 16) & 0xf;
1967 gen_op_iwmmxt_movq_M0_wRn(rd0);
1968 if (insn & (1 << 21))
1969 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1970 else
1971 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1972 gen_op_iwmmxt_movq_wRn_M0(wrd);
1973 gen_op_iwmmxt_set_mup();
1974 break;
1975 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1976 wrd = (insn >> 12) & 0xf;
1977 rd0 = (insn >> 16) & 0xf;
1978 rd1 = (insn >> 0) & 0xf;
1979 gen_op_iwmmxt_movq_M0_wRn(rd0);
1980 switch ((insn >> 22) & 3) {
1981 case 0:
1982 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1983 break;
1984 case 1:
1985 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1986 break;
1987 case 2:
1988 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1989 break;
1990 case 3:
1991 return 1;
1993 gen_op_iwmmxt_movq_wRn_M0(wrd);
1994 gen_op_iwmmxt_set_mup();
1995 gen_op_iwmmxt_set_cup();
1996 break;
1997 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1998 wrd = (insn >> 12) & 0xf;
1999 rd0 = (insn >> 16) & 0xf;
2000 rd1 = (insn >> 0) & 0xf;
2001 gen_op_iwmmxt_movq_M0_wRn(rd0);
2002 switch ((insn >> 22) & 3) {
2003 case 0:
2004 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2005 break;
2006 case 1:
2007 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2008 break;
2009 case 2:
2010 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2011 break;
2012 case 3:
2013 return 1;
2015 gen_op_iwmmxt_movq_wRn_M0(wrd);
2016 gen_op_iwmmxt_set_mup();
2017 gen_op_iwmmxt_set_cup();
2018 break;
2019 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2020 wrd = (insn >> 12) & 0xf;
2021 rd0 = (insn >> 16) & 0xf;
2022 rd1 = (insn >> 0) & 0xf;
2023 gen_op_iwmmxt_movq_M0_wRn(rd0);
2024 if (insn & (1 << 22))
2025 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2026 else
2027 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2028 if (!(insn & (1 << 20)))
2029 gen_op_iwmmxt_addl_M0_wRn(wrd);
2030 gen_op_iwmmxt_movq_wRn_M0(wrd);
2031 gen_op_iwmmxt_set_mup();
2032 break;
2033 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2034 wrd = (insn >> 12) & 0xf;
2035 rd0 = (insn >> 16) & 0xf;
2036 rd1 = (insn >> 0) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
2038 if (insn & (1 << 21)) {
2039 if (insn & (1 << 20))
2040 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2041 else
2042 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2043 } else {
2044 if (insn & (1 << 20))
2045 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2046 else
2047 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2049 gen_op_iwmmxt_movq_wRn_M0(wrd);
2050 gen_op_iwmmxt_set_mup();
2051 break;
2052 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2053 wrd = (insn >> 12) & 0xf;
2054 rd0 = (insn >> 16) & 0xf;
2055 rd1 = (insn >> 0) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0);
2057 if (insn & (1 << 21))
2058 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2059 else
2060 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2061 if (!(insn & (1 << 20))) {
2062 iwmmxt_load_reg(cpu_V1, wrd);
2063 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
2065 gen_op_iwmmxt_movq_wRn_M0(wrd);
2066 gen_op_iwmmxt_set_mup();
2067 break;
2068 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2069 wrd = (insn >> 12) & 0xf;
2070 rd0 = (insn >> 16) & 0xf;
2071 rd1 = (insn >> 0) & 0xf;
2072 gen_op_iwmmxt_movq_M0_wRn(rd0);
2073 switch ((insn >> 22) & 3) {
2074 case 0:
2075 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2076 break;
2077 case 1:
2078 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2079 break;
2080 case 2:
2081 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2082 break;
2083 case 3:
2084 return 1;
2086 gen_op_iwmmxt_movq_wRn_M0(wrd);
2087 gen_op_iwmmxt_set_mup();
2088 gen_op_iwmmxt_set_cup();
2089 break;
2090 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2091 wrd = (insn >> 12) & 0xf;
2092 rd0 = (insn >> 16) & 0xf;
2093 rd1 = (insn >> 0) & 0xf;
2094 gen_op_iwmmxt_movq_M0_wRn(rd0);
2095 if (insn & (1 << 22)) {
2096 if (insn & (1 << 20))
2097 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2098 else
2099 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2100 } else {
2101 if (insn & (1 << 20))
2102 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2103 else
2104 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2106 gen_op_iwmmxt_movq_wRn_M0(wrd);
2107 gen_op_iwmmxt_set_mup();
2108 gen_op_iwmmxt_set_cup();
2109 break;
2110 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2111 wrd = (insn >> 12) & 0xf;
2112 rd0 = (insn >> 16) & 0xf;
2113 rd1 = (insn >> 0) & 0xf;
2114 gen_op_iwmmxt_movq_M0_wRn(rd0);
2115 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2116 tcg_gen_andi_i32(tmp, tmp, 7);
2117 iwmmxt_load_reg(cpu_V1, rd1);
2118 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2119 tcg_temp_free_i32(tmp);
2120 gen_op_iwmmxt_movq_wRn_M0(wrd);
2121 gen_op_iwmmxt_set_mup();
2122 break;
2123 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2124 if (((insn >> 6) & 3) == 3)
2125 return 1;
2126 rd = (insn >> 12) & 0xf;
2127 wrd = (insn >> 16) & 0xf;
2128 tmp = load_reg(s, rd);
2129 gen_op_iwmmxt_movq_M0_wRn(wrd);
2130 switch ((insn >> 6) & 3) {
2131 case 0:
2132 tmp2 = tcg_const_i32(0xff);
2133 tmp3 = tcg_const_i32((insn & 7) << 3);
2134 break;
2135 case 1:
2136 tmp2 = tcg_const_i32(0xffff);
2137 tmp3 = tcg_const_i32((insn & 3) << 4);
2138 break;
2139 case 2:
2140 tmp2 = tcg_const_i32(0xffffffff);
2141 tmp3 = tcg_const_i32((insn & 1) << 5);
2142 break;
2143 default:
2144 TCGV_UNUSED_I32(tmp2);
2145 TCGV_UNUSED_I32(tmp3);
2147 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2148 tcg_temp_free_i32(tmp3);
2149 tcg_temp_free_i32(tmp2);
2150 tcg_temp_free_i32(tmp);
2151 gen_op_iwmmxt_movq_wRn_M0(wrd);
2152 gen_op_iwmmxt_set_mup();
2153 break;
2154 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2155 rd = (insn >> 12) & 0xf;
2156 wrd = (insn >> 16) & 0xf;
2157 if (rd == 15 || ((insn >> 22) & 3) == 3)
2158 return 1;
2159 gen_op_iwmmxt_movq_M0_wRn(wrd);
2160 tmp = tcg_temp_new_i32();
2161 switch ((insn >> 22) & 3) {
2162 case 0:
2163 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2164 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2165 if (insn & 8) {
2166 tcg_gen_ext8s_i32(tmp, tmp);
2167 } else {
2168 tcg_gen_andi_i32(tmp, tmp, 0xff);
2170 break;
2171 case 1:
2172 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2173 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2174 if (insn & 8) {
2175 tcg_gen_ext16s_i32(tmp, tmp);
2176 } else {
2177 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2179 break;
2180 case 2:
2181 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2182 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2183 break;
2185 store_reg(s, rd, tmp);
2186 break;
2187 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2188 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2189 return 1;
2190 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2191 switch ((insn >> 22) & 3) {
2192 case 0:
2193 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2194 break;
2195 case 1:
2196 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2197 break;
2198 case 2:
2199 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2200 break;
2202 tcg_gen_shli_i32(tmp, tmp, 28);
2203 gen_set_nzcv(tmp);
2204 tcg_temp_free_i32(tmp);
2205 break;
2206 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2207 if (((insn >> 6) & 3) == 3)
2208 return 1;
2209 rd = (insn >> 12) & 0xf;
2210 wrd = (insn >> 16) & 0xf;
2211 tmp = load_reg(s, rd);
2212 switch ((insn >> 6) & 3) {
2213 case 0:
2214 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2215 break;
2216 case 1:
2217 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2218 break;
2219 case 2:
2220 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2221 break;
2223 tcg_temp_free_i32(tmp);
2224 gen_op_iwmmxt_movq_wRn_M0(wrd);
2225 gen_op_iwmmxt_set_mup();
2226 break;
2227 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2228 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2229 return 1;
2230 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2231 tmp2 = tcg_temp_new_i32();
2232 tcg_gen_mov_i32(tmp2, tmp);
2233 switch ((insn >> 22) & 3) {
2234 case 0:
2235 for (i = 0; i < 7; i ++) {
2236 tcg_gen_shli_i32(tmp2, tmp2, 4);
2237 tcg_gen_and_i32(tmp, tmp, tmp2);
2239 break;
2240 case 1:
2241 for (i = 0; i < 3; i ++) {
2242 tcg_gen_shli_i32(tmp2, tmp2, 8);
2243 tcg_gen_and_i32(tmp, tmp, tmp2);
2245 break;
2246 case 2:
2247 tcg_gen_shli_i32(tmp2, tmp2, 16);
2248 tcg_gen_and_i32(tmp, tmp, tmp2);
2249 break;
2251 gen_set_nzcv(tmp);
2252 tcg_temp_free_i32(tmp2);
2253 tcg_temp_free_i32(tmp);
2254 break;
2255 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0);
2259 switch ((insn >> 22) & 3) {
2260 case 0:
2261 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2262 break;
2263 case 1:
2264 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2265 break;
2266 case 2:
2267 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2268 break;
2269 case 3:
2270 return 1;
2272 gen_op_iwmmxt_movq_wRn_M0(wrd);
2273 gen_op_iwmmxt_set_mup();
2274 break;
2275 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2276 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2277 return 1;
2278 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2279 tmp2 = tcg_temp_new_i32();
2280 tcg_gen_mov_i32(tmp2, tmp);
2281 switch ((insn >> 22) & 3) {
2282 case 0:
2283 for (i = 0; i < 7; i ++) {
2284 tcg_gen_shli_i32(tmp2, tmp2, 4);
2285 tcg_gen_or_i32(tmp, tmp, tmp2);
2287 break;
2288 case 1:
2289 for (i = 0; i < 3; i ++) {
2290 tcg_gen_shli_i32(tmp2, tmp2, 8);
2291 tcg_gen_or_i32(tmp, tmp, tmp2);
2293 break;
2294 case 2:
2295 tcg_gen_shli_i32(tmp2, tmp2, 16);
2296 tcg_gen_or_i32(tmp, tmp, tmp2);
2297 break;
2299 gen_set_nzcv(tmp);
2300 tcg_temp_free_i32(tmp2);
2301 tcg_temp_free_i32(tmp);
2302 break;
2303 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2304 rd = (insn >> 12) & 0xf;
2305 rd0 = (insn >> 16) & 0xf;
2306 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2307 return 1;
2308 gen_op_iwmmxt_movq_M0_wRn(rd0);
2309 tmp = tcg_temp_new_i32();
2310 switch ((insn >> 22) & 3) {
2311 case 0:
2312 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2313 break;
2314 case 1:
2315 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2316 break;
2317 case 2:
2318 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2319 break;
2321 store_reg(s, rd, tmp);
2322 break;
2323 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2324 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2325 wrd = (insn >> 12) & 0xf;
2326 rd0 = (insn >> 16) & 0xf;
2327 rd1 = (insn >> 0) & 0xf;
2328 gen_op_iwmmxt_movq_M0_wRn(rd0);
2329 switch ((insn >> 22) & 3) {
2330 case 0:
2331 if (insn & (1 << 21))
2332 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2333 else
2334 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2335 break;
2336 case 1:
2337 if (insn & (1 << 21))
2338 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2339 else
2340 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2341 break;
2342 case 2:
2343 if (insn & (1 << 21))
2344 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2345 else
2346 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2347 break;
2348 case 3:
2349 return 1;
2351 gen_op_iwmmxt_movq_wRn_M0(wrd);
2352 gen_op_iwmmxt_set_mup();
2353 gen_op_iwmmxt_set_cup();
2354 break;
2355 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2356 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2357 wrd = (insn >> 12) & 0xf;
2358 rd0 = (insn >> 16) & 0xf;
2359 gen_op_iwmmxt_movq_M0_wRn(rd0);
2360 switch ((insn >> 22) & 3) {
2361 case 0:
2362 if (insn & (1 << 21))
2363 gen_op_iwmmxt_unpacklsb_M0();
2364 else
2365 gen_op_iwmmxt_unpacklub_M0();
2366 break;
2367 case 1:
2368 if (insn & (1 << 21))
2369 gen_op_iwmmxt_unpacklsw_M0();
2370 else
2371 gen_op_iwmmxt_unpackluw_M0();
2372 break;
2373 case 2:
2374 if (insn & (1 << 21))
2375 gen_op_iwmmxt_unpacklsl_M0();
2376 else
2377 gen_op_iwmmxt_unpacklul_M0();
2378 break;
2379 case 3:
2380 return 1;
2382 gen_op_iwmmxt_movq_wRn_M0(wrd);
2383 gen_op_iwmmxt_set_mup();
2384 gen_op_iwmmxt_set_cup();
2385 break;
2386 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2387 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2388 wrd = (insn >> 12) & 0xf;
2389 rd0 = (insn >> 16) & 0xf;
2390 gen_op_iwmmxt_movq_M0_wRn(rd0);
2391 switch ((insn >> 22) & 3) {
2392 case 0:
2393 if (insn & (1 << 21))
2394 gen_op_iwmmxt_unpackhsb_M0();
2395 else
2396 gen_op_iwmmxt_unpackhub_M0();
2397 break;
2398 case 1:
2399 if (insn & (1 << 21))
2400 gen_op_iwmmxt_unpackhsw_M0();
2401 else
2402 gen_op_iwmmxt_unpackhuw_M0();
2403 break;
2404 case 2:
2405 if (insn & (1 << 21))
2406 gen_op_iwmmxt_unpackhsl_M0();
2407 else
2408 gen_op_iwmmxt_unpackhul_M0();
2409 break;
2410 case 3:
2411 return 1;
2413 gen_op_iwmmxt_movq_wRn_M0(wrd);
2414 gen_op_iwmmxt_set_mup();
2415 gen_op_iwmmxt_set_cup();
2416 break;
2417 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2418 case 0x214: case 0x614: case 0xa14: case 0xe14:
2419 if (((insn >> 22) & 3) == 0)
2420 return 1;
2421 wrd = (insn >> 12) & 0xf;
2422 rd0 = (insn >> 16) & 0xf;
2423 gen_op_iwmmxt_movq_M0_wRn(rd0);
2424 tmp = tcg_temp_new_i32();
2425 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2426 tcg_temp_free_i32(tmp);
2427 return 1;
2429 switch ((insn >> 22) & 3) {
2430 case 1:
2431 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2432 break;
2433 case 2:
2434 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2435 break;
2436 case 3:
2437 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2438 break;
2440 tcg_temp_free_i32(tmp);
2441 gen_op_iwmmxt_movq_wRn_M0(wrd);
2442 gen_op_iwmmxt_set_mup();
2443 gen_op_iwmmxt_set_cup();
2444 break;
2445 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2446 case 0x014: case 0x414: case 0x814: case 0xc14:
2447 if (((insn >> 22) & 3) == 0)
2448 return 1;
2449 wrd = (insn >> 12) & 0xf;
2450 rd0 = (insn >> 16) & 0xf;
2451 gen_op_iwmmxt_movq_M0_wRn(rd0);
2452 tmp = tcg_temp_new_i32();
2453 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2454 tcg_temp_free_i32(tmp);
2455 return 1;
2457 switch ((insn >> 22) & 3) {
2458 case 1:
2459 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2460 break;
2461 case 2:
2462 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2463 break;
2464 case 3:
2465 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2466 break;
2468 tcg_temp_free_i32(tmp);
2469 gen_op_iwmmxt_movq_wRn_M0(wrd);
2470 gen_op_iwmmxt_set_mup();
2471 gen_op_iwmmxt_set_cup();
2472 break;
2473 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2474 case 0x114: case 0x514: case 0x914: case 0xd14:
2475 if (((insn >> 22) & 3) == 0)
2476 return 1;
2477 wrd = (insn >> 12) & 0xf;
2478 rd0 = (insn >> 16) & 0xf;
2479 gen_op_iwmmxt_movq_M0_wRn(rd0);
2480 tmp = tcg_temp_new_i32();
2481 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2482 tcg_temp_free_i32(tmp);
2483 return 1;
2485 switch ((insn >> 22) & 3) {
2486 case 1:
2487 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2488 break;
2489 case 2:
2490 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2491 break;
2492 case 3:
2493 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2494 break;
2496 tcg_temp_free_i32(tmp);
2497 gen_op_iwmmxt_movq_wRn_M0(wrd);
2498 gen_op_iwmmxt_set_mup();
2499 gen_op_iwmmxt_set_cup();
2500 break;
2501 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2502 case 0x314: case 0x714: case 0xb14: case 0xf14:
2503 if (((insn >> 22) & 3) == 0)
2504 return 1;
2505 wrd = (insn >> 12) & 0xf;
2506 rd0 = (insn >> 16) & 0xf;
2507 gen_op_iwmmxt_movq_M0_wRn(rd0);
2508 tmp = tcg_temp_new_i32();
2509 switch ((insn >> 22) & 3) {
2510 case 1:
2511 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2512 tcg_temp_free_i32(tmp);
2513 return 1;
2515 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2516 break;
2517 case 2:
2518 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2519 tcg_temp_free_i32(tmp);
2520 return 1;
2522 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2523 break;
2524 case 3:
2525 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2526 tcg_temp_free_i32(tmp);
2527 return 1;
2529 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2530 break;
2532 tcg_temp_free_i32(tmp);
2533 gen_op_iwmmxt_movq_wRn_M0(wrd);
2534 gen_op_iwmmxt_set_mup();
2535 gen_op_iwmmxt_set_cup();
2536 break;
2537 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2538 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2539 wrd = (insn >> 12) & 0xf;
2540 rd0 = (insn >> 16) & 0xf;
2541 rd1 = (insn >> 0) & 0xf;
2542 gen_op_iwmmxt_movq_M0_wRn(rd0);
2543 switch ((insn >> 22) & 3) {
2544 case 0:
2545 if (insn & (1 << 21))
2546 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2547 else
2548 gen_op_iwmmxt_minub_M0_wRn(rd1);
2549 break;
2550 case 1:
2551 if (insn & (1 << 21))
2552 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2553 else
2554 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2555 break;
2556 case 2:
2557 if (insn & (1 << 21))
2558 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2559 else
2560 gen_op_iwmmxt_minul_M0_wRn(rd1);
2561 break;
2562 case 3:
2563 return 1;
2565 gen_op_iwmmxt_movq_wRn_M0(wrd);
2566 gen_op_iwmmxt_set_mup();
2567 break;
2568 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2569 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2570 wrd = (insn >> 12) & 0xf;
2571 rd0 = (insn >> 16) & 0xf;
2572 rd1 = (insn >> 0) & 0xf;
2573 gen_op_iwmmxt_movq_M0_wRn(rd0);
2574 switch ((insn >> 22) & 3) {
2575 case 0:
2576 if (insn & (1 << 21))
2577 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2578 else
2579 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2580 break;
2581 case 1:
2582 if (insn & (1 << 21))
2583 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2584 else
2585 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2586 break;
2587 case 2:
2588 if (insn & (1 << 21))
2589 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2590 else
2591 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2592 break;
2593 case 3:
2594 return 1;
2596 gen_op_iwmmxt_movq_wRn_M0(wrd);
2597 gen_op_iwmmxt_set_mup();
2598 break;
2599 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2600 case 0x402: case 0x502: case 0x602: case 0x702:
2601 wrd = (insn >> 12) & 0xf;
2602 rd0 = (insn >> 16) & 0xf;
2603 rd1 = (insn >> 0) & 0xf;
2604 gen_op_iwmmxt_movq_M0_wRn(rd0);
2605 tmp = tcg_const_i32((insn >> 20) & 3);
2606 iwmmxt_load_reg(cpu_V1, rd1);
2607 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2608 tcg_temp_free_i32(tmp);
2609 gen_op_iwmmxt_movq_wRn_M0(wrd);
2610 gen_op_iwmmxt_set_mup();
2611 break;
2612 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2613 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2614 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2615 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2616 wrd = (insn >> 12) & 0xf;
2617 rd0 = (insn >> 16) & 0xf;
2618 rd1 = (insn >> 0) & 0xf;
2619 gen_op_iwmmxt_movq_M0_wRn(rd0);
2620 switch ((insn >> 20) & 0xf) {
2621 case 0x0:
2622 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2623 break;
2624 case 0x1:
2625 gen_op_iwmmxt_subub_M0_wRn(rd1);
2626 break;
2627 case 0x3:
2628 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2629 break;
2630 case 0x4:
2631 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2632 break;
2633 case 0x5:
2634 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2635 break;
2636 case 0x7:
2637 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2638 break;
2639 case 0x8:
2640 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2641 break;
2642 case 0x9:
2643 gen_op_iwmmxt_subul_M0_wRn(rd1);
2644 break;
2645 case 0xb:
2646 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2647 break;
2648 default:
2649 return 1;
2651 gen_op_iwmmxt_movq_wRn_M0(wrd);
2652 gen_op_iwmmxt_set_mup();
2653 gen_op_iwmmxt_set_cup();
2654 break;
2655 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2656 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2657 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2658 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2659 wrd = (insn >> 12) & 0xf;
2660 rd0 = (insn >> 16) & 0xf;
2661 gen_op_iwmmxt_movq_M0_wRn(rd0);
2662 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2663 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2664 tcg_temp_free_i32(tmp);
2665 gen_op_iwmmxt_movq_wRn_M0(wrd);
2666 gen_op_iwmmxt_set_mup();
2667 gen_op_iwmmxt_set_cup();
2668 break;
2669 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2670 case 0x418: case 0x518: case 0x618: case 0x718:
2671 case 0x818: case 0x918: case 0xa18: case 0xb18:
2672 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2673 wrd = (insn >> 12) & 0xf;
2674 rd0 = (insn >> 16) & 0xf;
2675 rd1 = (insn >> 0) & 0xf;
2676 gen_op_iwmmxt_movq_M0_wRn(rd0);
2677 switch ((insn >> 20) & 0xf) {
2678 case 0x0:
2679 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2680 break;
2681 case 0x1:
2682 gen_op_iwmmxt_addub_M0_wRn(rd1);
2683 break;
2684 case 0x3:
2685 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2686 break;
2687 case 0x4:
2688 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2689 break;
2690 case 0x5:
2691 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2692 break;
2693 case 0x7:
2694 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2695 break;
2696 case 0x8:
2697 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2698 break;
2699 case 0x9:
2700 gen_op_iwmmxt_addul_M0_wRn(rd1);
2701 break;
2702 case 0xb:
2703 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2704 break;
2705 default:
2706 return 1;
2708 gen_op_iwmmxt_movq_wRn_M0(wrd);
2709 gen_op_iwmmxt_set_mup();
2710 gen_op_iwmmxt_set_cup();
2711 break;
2712 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2713 case 0x408: case 0x508: case 0x608: case 0x708:
2714 case 0x808: case 0x908: case 0xa08: case 0xb08:
2715 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2716 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2717 return 1;
2718 wrd = (insn >> 12) & 0xf;
2719 rd0 = (insn >> 16) & 0xf;
2720 rd1 = (insn >> 0) & 0xf;
2721 gen_op_iwmmxt_movq_M0_wRn(rd0);
2722 switch ((insn >> 22) & 3) {
2723 case 1:
2724 if (insn & (1 << 21))
2725 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2726 else
2727 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2728 break;
2729 case 2:
2730 if (insn & (1 << 21))
2731 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2732 else
2733 gen_op_iwmmxt_packul_M0_wRn(rd1);
2734 break;
2735 case 3:
2736 if (insn & (1 << 21))
2737 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2738 else
2739 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2740 break;
2742 gen_op_iwmmxt_movq_wRn_M0(wrd);
2743 gen_op_iwmmxt_set_mup();
2744 gen_op_iwmmxt_set_cup();
2745 break;
2746 case 0x201: case 0x203: case 0x205: case 0x207:
2747 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2748 case 0x211: case 0x213: case 0x215: case 0x217:
2749 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2750 wrd = (insn >> 5) & 0xf;
2751 rd0 = (insn >> 12) & 0xf;
2752 rd1 = (insn >> 0) & 0xf;
2753 if (rd0 == 0xf || rd1 == 0xf)
2754 return 1;
2755 gen_op_iwmmxt_movq_M0_wRn(wrd);
2756 tmp = load_reg(s, rd0);
2757 tmp2 = load_reg(s, rd1);
2758 switch ((insn >> 16) & 0xf) {
2759 case 0x0: /* TMIA */
2760 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2761 break;
2762 case 0x8: /* TMIAPH */
2763 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2764 break;
2765 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2766 if (insn & (1 << 16))
2767 tcg_gen_shri_i32(tmp, tmp, 16);
2768 if (insn & (1 << 17))
2769 tcg_gen_shri_i32(tmp2, tmp2, 16);
2770 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2771 break;
2772 default:
2773 tcg_temp_free_i32(tmp2);
2774 tcg_temp_free_i32(tmp);
2775 return 1;
2777 tcg_temp_free_i32(tmp2);
2778 tcg_temp_free_i32(tmp);
2779 gen_op_iwmmxt_movq_wRn_M0(wrd);
2780 gen_op_iwmmxt_set_mup();
2781 break;
2782 default:
2783 return 1;
2786 return 0;
2789 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2790 (ie. an undefined instruction). */
2791 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2793 int acc, rd0, rd1, rdhi, rdlo;
2794 TCGv_i32 tmp, tmp2;
2796 if ((insn & 0x0ff00f10) == 0x0e200010) {
2797 /* Multiply with Internal Accumulate Format */
2798 rd0 = (insn >> 12) & 0xf;
2799 rd1 = insn & 0xf;
2800 acc = (insn >> 5) & 7;
2802 if (acc != 0)
2803 return 1;
2805 tmp = load_reg(s, rd0);
2806 tmp2 = load_reg(s, rd1);
2807 switch ((insn >> 16) & 0xf) {
2808 case 0x0: /* MIA */
2809 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2810 break;
2811 case 0x8: /* MIAPH */
2812 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2813 break;
2814 case 0xc: /* MIABB */
2815 case 0xd: /* MIABT */
2816 case 0xe: /* MIATB */
2817 case 0xf: /* MIATT */
2818 if (insn & (1 << 16))
2819 tcg_gen_shri_i32(tmp, tmp, 16);
2820 if (insn & (1 << 17))
2821 tcg_gen_shri_i32(tmp2, tmp2, 16);
2822 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2823 break;
2824 default:
2825 return 1;
2827 tcg_temp_free_i32(tmp2);
2828 tcg_temp_free_i32(tmp);
2830 gen_op_iwmmxt_movq_wRn_M0(acc);
2831 return 0;
2834 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2835 /* Internal Accumulator Access Format */
2836 rdhi = (insn >> 16) & 0xf;
2837 rdlo = (insn >> 12) & 0xf;
2838 acc = insn & 7;
2840 if (acc != 0)
2841 return 1;
2843 if (insn & ARM_CP_RW_BIT) { /* MRA */
2844 iwmmxt_load_reg(cpu_V0, acc);
2845 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2846 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2847 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2848 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2849 } else { /* MAR */
2850 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2851 iwmmxt_store_reg(cpu_V0, acc);
2853 return 0;
2856 return 1;
2859 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2860 #define VFP_SREG(insn, bigbit, smallbit) \
2861 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2862 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2863 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2864 reg = (((insn) >> (bigbit)) & 0x0f) \
2865 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2866 } else { \
2867 if (insn & (1 << (smallbit))) \
2868 return 1; \
2869 reg = ((insn) >> (bigbit)) & 0x0f; \
2870 }} while (0)
2872 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2873 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2874 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2875 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2876 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2877 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2879 /* Move between integer and VFP cores. */
2880 static TCGv_i32 gen_vfp_mrs(void)
2882 TCGv_i32 tmp = tcg_temp_new_i32();
2883 tcg_gen_mov_i32(tmp, cpu_F0s);
2884 return tmp;
2887 static void gen_vfp_msr(TCGv_i32 tmp)
2889 tcg_gen_mov_i32(cpu_F0s, tmp);
2890 tcg_temp_free_i32(tmp);
2893 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2895 TCGv_i32 tmp = tcg_temp_new_i32();
2896 if (shift)
2897 tcg_gen_shri_i32(var, var, shift);
2898 tcg_gen_ext8u_i32(var, var);
2899 tcg_gen_shli_i32(tmp, var, 8);
2900 tcg_gen_or_i32(var, var, tmp);
2901 tcg_gen_shli_i32(tmp, var, 16);
2902 tcg_gen_or_i32(var, var, tmp);
2903 tcg_temp_free_i32(tmp);
2906 static void gen_neon_dup_low16(TCGv_i32 var)
2908 TCGv_i32 tmp = tcg_temp_new_i32();
2909 tcg_gen_ext16u_i32(var, var);
2910 tcg_gen_shli_i32(tmp, var, 16);
2911 tcg_gen_or_i32(var, var, tmp);
2912 tcg_temp_free_i32(tmp);
2915 static void gen_neon_dup_high16(TCGv_i32 var)
2917 TCGv_i32 tmp = tcg_temp_new_i32();
2918 tcg_gen_andi_i32(var, var, 0xffff0000);
2919 tcg_gen_shri_i32(tmp, var, 16);
2920 tcg_gen_or_i32(var, var, tmp);
2921 tcg_temp_free_i32(tmp);
2924 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2926 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2927 TCGv_i32 tmp = tcg_temp_new_i32();
2928 switch (size) {
2929 case 0:
2930 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2931 gen_neon_dup_u8(tmp, 0);
2932 break;
2933 case 1:
2934 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2935 gen_neon_dup_low16(tmp);
2936 break;
2937 case 2:
2938 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2939 break;
2940 default: /* Avoid compiler warnings. */
2941 abort();
2943 return tmp;
2946 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2947 uint32_t dp)
2949 uint32_t cc = extract32(insn, 20, 2);
2951 if (dp) {
2952 TCGv_i64 frn, frm, dest;
2953 TCGv_i64 tmp, zero, zf, nf, vf;
2955 zero = tcg_const_i64(0);
2957 frn = tcg_temp_new_i64();
2958 frm = tcg_temp_new_i64();
2959 dest = tcg_temp_new_i64();
2961 zf = tcg_temp_new_i64();
2962 nf = tcg_temp_new_i64();
2963 vf = tcg_temp_new_i64();
2965 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2966 tcg_gen_ext_i32_i64(nf, cpu_NF);
2967 tcg_gen_ext_i32_i64(vf, cpu_VF);
2969 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2970 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2971 switch (cc) {
2972 case 0: /* eq: Z */
2973 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2974 frn, frm);
2975 break;
2976 case 1: /* vs: V */
2977 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2978 frn, frm);
2979 break;
2980 case 2: /* ge: N == V -> N ^ V == 0 */
2981 tmp = tcg_temp_new_i64();
2982 tcg_gen_xor_i64(tmp, vf, nf);
2983 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2984 frn, frm);
2985 tcg_temp_free_i64(tmp);
2986 break;
2987 case 3: /* gt: !Z && N == V */
2988 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2989 frn, frm);
2990 tmp = tcg_temp_new_i64();
2991 tcg_gen_xor_i64(tmp, vf, nf);
2992 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2993 dest, frm);
2994 tcg_temp_free_i64(tmp);
2995 break;
2997 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2998 tcg_temp_free_i64(frn);
2999 tcg_temp_free_i64(frm);
3000 tcg_temp_free_i64(dest);
3002 tcg_temp_free_i64(zf);
3003 tcg_temp_free_i64(nf);
3004 tcg_temp_free_i64(vf);
3006 tcg_temp_free_i64(zero);
3007 } else {
3008 TCGv_i32 frn, frm, dest;
3009 TCGv_i32 tmp, zero;
3011 zero = tcg_const_i32(0);
3013 frn = tcg_temp_new_i32();
3014 frm = tcg_temp_new_i32();
3015 dest = tcg_temp_new_i32();
3016 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3017 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3018 switch (cc) {
3019 case 0: /* eq: Z */
3020 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3021 frn, frm);
3022 break;
3023 case 1: /* vs: V */
3024 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3025 frn, frm);
3026 break;
3027 case 2: /* ge: N == V -> N ^ V == 0 */
3028 tmp = tcg_temp_new_i32();
3029 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3030 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3031 frn, frm);
3032 tcg_temp_free_i32(tmp);
3033 break;
3034 case 3: /* gt: !Z && N == V */
3035 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3036 frn, frm);
3037 tmp = tcg_temp_new_i32();
3038 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3039 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3040 dest, frm);
3041 tcg_temp_free_i32(tmp);
3042 break;
3044 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3045 tcg_temp_free_i32(frn);
3046 tcg_temp_free_i32(frm);
3047 tcg_temp_free_i32(dest);
3049 tcg_temp_free_i32(zero);
3052 return 0;
3055 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3056 uint32_t rm, uint32_t dp)
3058 uint32_t vmin = extract32(insn, 6, 1);
3059 TCGv_ptr fpst = get_fpstatus_ptr(0);
3061 if (dp) {
3062 TCGv_i64 frn, frm, dest;
3064 frn = tcg_temp_new_i64();
3065 frm = tcg_temp_new_i64();
3066 dest = tcg_temp_new_i64();
3068 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3069 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3070 if (vmin) {
3071 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
3072 } else {
3073 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
3075 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3076 tcg_temp_free_i64(frn);
3077 tcg_temp_free_i64(frm);
3078 tcg_temp_free_i64(dest);
3079 } else {
3080 TCGv_i32 frn, frm, dest;
3082 frn = tcg_temp_new_i32();
3083 frm = tcg_temp_new_i32();
3084 dest = tcg_temp_new_i32();
3086 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3087 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3088 if (vmin) {
3089 gen_helper_vfp_minnums(dest, frn, frm, fpst);
3090 } else {
3091 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
3093 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3094 tcg_temp_free_i32(frn);
3095 tcg_temp_free_i32(frm);
3096 tcg_temp_free_i32(dest);
3099 tcg_temp_free_ptr(fpst);
3100 return 0;
3103 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3104 int rounding)
3106 TCGv_ptr fpst = get_fpstatus_ptr(0);
3107 TCGv_i32 tcg_rmode;
3109 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3110 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3112 if (dp) {
3113 TCGv_i64 tcg_op;
3114 TCGv_i64 tcg_res;
3115 tcg_op = tcg_temp_new_i64();
3116 tcg_res = tcg_temp_new_i64();
3117 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3118 gen_helper_rintd(tcg_res, tcg_op, fpst);
3119 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3120 tcg_temp_free_i64(tcg_op);
3121 tcg_temp_free_i64(tcg_res);
3122 } else {
3123 TCGv_i32 tcg_op;
3124 TCGv_i32 tcg_res;
3125 tcg_op = tcg_temp_new_i32();
3126 tcg_res = tcg_temp_new_i32();
3127 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3128 gen_helper_rints(tcg_res, tcg_op, fpst);
3129 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3130 tcg_temp_free_i32(tcg_op);
3131 tcg_temp_free_i32(tcg_res);
3134 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3135 tcg_temp_free_i32(tcg_rmode);
3137 tcg_temp_free_ptr(fpst);
3138 return 0;
3141 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3142 int rounding)
3144 bool is_signed = extract32(insn, 7, 1);
3145 TCGv_ptr fpst = get_fpstatus_ptr(0);
3146 TCGv_i32 tcg_rmode, tcg_shift;
3148 tcg_shift = tcg_const_i32(0);
3150 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3151 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3153 if (dp) {
3154 TCGv_i64 tcg_double, tcg_res;
3155 TCGv_i32 tcg_tmp;
3156 /* Rd is encoded as a single precision register even when the source
3157 * is double precision.
3159 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3160 tcg_double = tcg_temp_new_i64();
3161 tcg_res = tcg_temp_new_i64();
3162 tcg_tmp = tcg_temp_new_i32();
3163 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3164 if (is_signed) {
3165 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3166 } else {
3167 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3169 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
3170 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3171 tcg_temp_free_i32(tcg_tmp);
3172 tcg_temp_free_i64(tcg_res);
3173 tcg_temp_free_i64(tcg_double);
3174 } else {
3175 TCGv_i32 tcg_single, tcg_res;
3176 tcg_single = tcg_temp_new_i32();
3177 tcg_res = tcg_temp_new_i32();
3178 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3179 if (is_signed) {
3180 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3181 } else {
3182 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3184 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3185 tcg_temp_free_i32(tcg_res);
3186 tcg_temp_free_i32(tcg_single);
3189 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3190 tcg_temp_free_i32(tcg_rmode);
3192 tcg_temp_free_i32(tcg_shift);
3194 tcg_temp_free_ptr(fpst);
3196 return 0;
3199 /* Table for converting the most common AArch32 encoding of
3200 * rounding mode to arm_fprounding order (which matches the
3201 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3203 static const uint8_t fp_decode_rm[] = {
3204 FPROUNDING_TIEAWAY,
3205 FPROUNDING_TIEEVEN,
3206 FPROUNDING_POSINF,
3207 FPROUNDING_NEGINF,
3210 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3212 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3214 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3215 return 1;
3218 if (dp) {
3219 VFP_DREG_D(rd, insn);
3220 VFP_DREG_N(rn, insn);
3221 VFP_DREG_M(rm, insn);
3222 } else {
3223 rd = VFP_SREG_D(insn);
3224 rn = VFP_SREG_N(insn);
3225 rm = VFP_SREG_M(insn);
3228 if ((insn & 0x0f800e50) == 0x0e000a00) {
3229 return handle_vsel(insn, rd, rn, rm, dp);
3230 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3231 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3232 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3233 /* VRINTA, VRINTN, VRINTP, VRINTM */
3234 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3235 return handle_vrint(insn, rd, rm, dp, rounding);
3236 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3237 /* VCVTA, VCVTN, VCVTP, VCVTM */
3238 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3239 return handle_vcvt(insn, rd, rm, dp, rounding);
3241 return 1;
3244 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3245 (ie. an undefined instruction). */
3246 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3248 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3249 int dp, veclen;
3250 TCGv_i32 addr;
3251 TCGv_i32 tmp;
3252 TCGv_i32 tmp2;
3254 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3255 return 1;
3258 /* FIXME: this access check should not take precedence over UNDEF
3259 * for invalid encodings; we will generate incorrect syndrome information
3260 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3262 if (s->fp_excp_el) {
3263 gen_exception_insn(s, 4, EXCP_UDEF,
3264 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
3265 return 0;
3268 if (!s->vfp_enabled) {
3269 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3270 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3271 return 1;
3272 rn = (insn >> 16) & 0xf;
3273 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3274 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3275 return 1;
3279 if (extract32(insn, 28, 4) == 0xf) {
3280 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3281 * only used in v8 and above.
3283 return disas_vfp_v8_insn(s, insn);
3286 dp = ((insn & 0xf00) == 0xb00);
3287 switch ((insn >> 24) & 0xf) {
3288 case 0xe:
3289 if (insn & (1 << 4)) {
3290 /* single register transfer */
3291 rd = (insn >> 12) & 0xf;
3292 if (dp) {
3293 int size;
3294 int pass;
3296 VFP_DREG_N(rn, insn);
3297 if (insn & 0xf)
3298 return 1;
3299 if (insn & 0x00c00060
3300 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3301 return 1;
3304 pass = (insn >> 21) & 1;
3305 if (insn & (1 << 22)) {
3306 size = 0;
3307 offset = ((insn >> 5) & 3) * 8;
3308 } else if (insn & (1 << 5)) {
3309 size = 1;
3310 offset = (insn & (1 << 6)) ? 16 : 0;
3311 } else {
3312 size = 2;
3313 offset = 0;
3315 if (insn & ARM_CP_RW_BIT) {
3316 /* vfp->arm */
3317 tmp = neon_load_reg(rn, pass);
3318 switch (size) {
3319 case 0:
3320 if (offset)
3321 tcg_gen_shri_i32(tmp, tmp, offset);
3322 if (insn & (1 << 23))
3323 gen_uxtb(tmp);
3324 else
3325 gen_sxtb(tmp);
3326 break;
3327 case 1:
3328 if (insn & (1 << 23)) {
3329 if (offset) {
3330 tcg_gen_shri_i32(tmp, tmp, 16);
3331 } else {
3332 gen_uxth(tmp);
3334 } else {
3335 if (offset) {
3336 tcg_gen_sari_i32(tmp, tmp, 16);
3337 } else {
3338 gen_sxth(tmp);
3341 break;
3342 case 2:
3343 break;
3345 store_reg(s, rd, tmp);
3346 } else {
3347 /* arm->vfp */
3348 tmp = load_reg(s, rd);
3349 if (insn & (1 << 23)) {
3350 /* VDUP */
3351 if (size == 0) {
3352 gen_neon_dup_u8(tmp, 0);
3353 } else if (size == 1) {
3354 gen_neon_dup_low16(tmp);
3356 for (n = 0; n <= pass * 2; n++) {
3357 tmp2 = tcg_temp_new_i32();
3358 tcg_gen_mov_i32(tmp2, tmp);
3359 neon_store_reg(rn, n, tmp2);
3361 neon_store_reg(rn, n, tmp);
3362 } else {
3363 /* VMOV */
3364 switch (size) {
3365 case 0:
3366 tmp2 = neon_load_reg(rn, pass);
3367 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3368 tcg_temp_free_i32(tmp2);
3369 break;
3370 case 1:
3371 tmp2 = neon_load_reg(rn, pass);
3372 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3373 tcg_temp_free_i32(tmp2);
3374 break;
3375 case 2:
3376 break;
3378 neon_store_reg(rn, pass, tmp);
3381 } else { /* !dp */
3382 if ((insn & 0x6f) != 0x00)
3383 return 1;
3384 rn = VFP_SREG_N(insn);
3385 if (insn & ARM_CP_RW_BIT) {
3386 /* vfp->arm */
3387 if (insn & (1 << 21)) {
3388 /* system register */
3389 rn >>= 1;
3391 switch (rn) {
3392 case ARM_VFP_FPSID:
3393 /* VFP2 allows access to FSID from userspace.
3394 VFP3 restricts all id registers to privileged
3395 accesses. */
3396 if (IS_USER(s)
3397 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3398 return 1;
3400 tmp = load_cpu_field(vfp.xregs[rn]);
3401 break;
3402 case ARM_VFP_FPEXC:
3403 if (IS_USER(s))
3404 return 1;
3405 tmp = load_cpu_field(vfp.xregs[rn]);
3406 break;
3407 case ARM_VFP_FPINST:
3408 case ARM_VFP_FPINST2:
3409 /* Not present in VFP3. */
3410 if (IS_USER(s)
3411 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3412 return 1;
3414 tmp = load_cpu_field(vfp.xregs[rn]);
3415 break;
3416 case ARM_VFP_FPSCR:
3417 if (rd == 15) {
3418 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3419 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3420 } else {
3421 tmp = tcg_temp_new_i32();
3422 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3424 break;
3425 case ARM_VFP_MVFR2:
3426 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3427 return 1;
3429 /* fall through */
3430 case ARM_VFP_MVFR0:
3431 case ARM_VFP_MVFR1:
3432 if (IS_USER(s)
3433 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3434 return 1;
3436 tmp = load_cpu_field(vfp.xregs[rn]);
3437 break;
3438 default:
3439 return 1;
3441 } else {
3442 gen_mov_F0_vreg(0, rn);
3443 tmp = gen_vfp_mrs();
3445 if (rd == 15) {
3446 /* Set the 4 flag bits in the CPSR. */
3447 gen_set_nzcv(tmp);
3448 tcg_temp_free_i32(tmp);
3449 } else {
3450 store_reg(s, rd, tmp);
3452 } else {
3453 /* arm->vfp */
3454 if (insn & (1 << 21)) {
3455 rn >>= 1;
3456 /* system register */
3457 switch (rn) {
3458 case ARM_VFP_FPSID:
3459 case ARM_VFP_MVFR0:
3460 case ARM_VFP_MVFR1:
3461 /* Writes are ignored. */
3462 break;
3463 case ARM_VFP_FPSCR:
3464 tmp = load_reg(s, rd);
3465 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3466 tcg_temp_free_i32(tmp);
3467 gen_lookup_tb(s);
3468 break;
3469 case ARM_VFP_FPEXC:
3470 if (IS_USER(s))
3471 return 1;
3472 /* TODO: VFP subarchitecture support.
3473 * For now, keep the EN bit only */
3474 tmp = load_reg(s, rd);
3475 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3476 store_cpu_field(tmp, vfp.xregs[rn]);
3477 gen_lookup_tb(s);
3478 break;
3479 case ARM_VFP_FPINST:
3480 case ARM_VFP_FPINST2:
3481 if (IS_USER(s)) {
3482 return 1;
3484 tmp = load_reg(s, rd);
3485 store_cpu_field(tmp, vfp.xregs[rn]);
3486 break;
3487 default:
3488 return 1;
3490 } else {
3491 tmp = load_reg(s, rd);
3492 gen_vfp_msr(tmp);
3493 gen_mov_vreg_F0(0, rn);
3497 } else {
3498 /* data processing */
3499 /* The opcode is in bits 23, 21, 20 and 6. */
3500 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3501 if (dp) {
3502 if (op == 15) {
3503 /* rn is opcode */
3504 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3505 } else {
3506 /* rn is register number */
3507 VFP_DREG_N(rn, insn);
3510 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3511 ((rn & 0x1e) == 0x6))) {
3512 /* Integer or single/half precision destination. */
3513 rd = VFP_SREG_D(insn);
3514 } else {
3515 VFP_DREG_D(rd, insn);
3517 if (op == 15 &&
3518 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3519 ((rn & 0x1e) == 0x4))) {
3520 /* VCVT from int or half precision is always from S reg
3521 * regardless of dp bit. VCVT with immediate frac_bits
3522 * has same format as SREG_M.
3524 rm = VFP_SREG_M(insn);
3525 } else {
3526 VFP_DREG_M(rm, insn);
3528 } else {
3529 rn = VFP_SREG_N(insn);
3530 if (op == 15 && rn == 15) {
3531 /* Double precision destination. */
3532 VFP_DREG_D(rd, insn);
3533 } else {
3534 rd = VFP_SREG_D(insn);
3536 /* NB that we implicitly rely on the encoding for the frac_bits
3537 * in VCVT of fixed to float being the same as that of an SREG_M
3539 rm = VFP_SREG_M(insn);
3542 veclen = s->vec_len;
3543 if (op == 15 && rn > 3)
3544 veclen = 0;
3546 /* Shut up compiler warnings. */
3547 delta_m = 0;
3548 delta_d = 0;
3549 bank_mask = 0;
3551 if (veclen > 0) {
3552 if (dp)
3553 bank_mask = 0xc;
3554 else
3555 bank_mask = 0x18;
3557 /* Figure out what type of vector operation this is. */
3558 if ((rd & bank_mask) == 0) {
3559 /* scalar */
3560 veclen = 0;
3561 } else {
3562 if (dp)
3563 delta_d = (s->vec_stride >> 1) + 1;
3564 else
3565 delta_d = s->vec_stride + 1;
3567 if ((rm & bank_mask) == 0) {
3568 /* mixed scalar/vector */
3569 delta_m = 0;
3570 } else {
3571 /* vector */
3572 delta_m = delta_d;
3577 /* Load the initial operands. */
3578 if (op == 15) {
3579 switch (rn) {
3580 case 16:
3581 case 17:
3582 /* Integer source */
3583 gen_mov_F0_vreg(0, rm);
3584 break;
3585 case 8:
3586 case 9:
3587 /* Compare */
3588 gen_mov_F0_vreg(dp, rd);
3589 gen_mov_F1_vreg(dp, rm);
3590 break;
3591 case 10:
3592 case 11:
3593 /* Compare with zero */
3594 gen_mov_F0_vreg(dp, rd);
3595 gen_vfp_F1_ld0(dp);
3596 break;
3597 case 20:
3598 case 21:
3599 case 22:
3600 case 23:
3601 case 28:
3602 case 29:
3603 case 30:
3604 case 31:
3605 /* Source and destination the same. */
3606 gen_mov_F0_vreg(dp, rd);
3607 break;
3608 case 4:
3609 case 5:
3610 case 6:
3611 case 7:
3612 /* VCVTB, VCVTT: only present with the halfprec extension
3613 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3614 * (we choose to UNDEF)
3616 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3617 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3618 return 1;
3620 if (!extract32(rn, 1, 1)) {
3621 /* Half precision source. */
3622 gen_mov_F0_vreg(0, rm);
3623 break;
3625 /* Otherwise fall through */
3626 default:
3627 /* One source operand. */
3628 gen_mov_F0_vreg(dp, rm);
3629 break;
3631 } else {
3632 /* Two source operands. */
3633 gen_mov_F0_vreg(dp, rn);
3634 gen_mov_F1_vreg(dp, rm);
3637 for (;;) {
3638 /* Perform the calculation. */
3639 switch (op) {
3640 case 0: /* VMLA: fd + (fn * fm) */
3641 /* Note that order of inputs to the add matters for NaNs */
3642 gen_vfp_F1_mul(dp);
3643 gen_mov_F0_vreg(dp, rd);
3644 gen_vfp_add(dp);
3645 break;
3646 case 1: /* VMLS: fd + -(fn * fm) */
3647 gen_vfp_mul(dp);
3648 gen_vfp_F1_neg(dp);
3649 gen_mov_F0_vreg(dp, rd);
3650 gen_vfp_add(dp);
3651 break;
3652 case 2: /* VNMLS: -fd + (fn * fm) */
3653 /* Note that it isn't valid to replace (-A + B) with (B - A)
3654 * or similar plausible looking simplifications
3655 * because this will give wrong results for NaNs.
3657 gen_vfp_F1_mul(dp);
3658 gen_mov_F0_vreg(dp, rd);
3659 gen_vfp_neg(dp);
3660 gen_vfp_add(dp);
3661 break;
3662 case 3: /* VNMLA: -fd + -(fn * fm) */
3663 gen_vfp_mul(dp);
3664 gen_vfp_F1_neg(dp);
3665 gen_mov_F0_vreg(dp, rd);
3666 gen_vfp_neg(dp);
3667 gen_vfp_add(dp);
3668 break;
3669 case 4: /* mul: fn * fm */
3670 gen_vfp_mul(dp);
3671 break;
3672 case 5: /* nmul: -(fn * fm) */
3673 gen_vfp_mul(dp);
3674 gen_vfp_neg(dp);
3675 break;
3676 case 6: /* add: fn + fm */
3677 gen_vfp_add(dp);
3678 break;
3679 case 7: /* sub: fn - fm */
3680 gen_vfp_sub(dp);
3681 break;
3682 case 8: /* div: fn / fm */
3683 gen_vfp_div(dp);
3684 break;
3685 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3686 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3687 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3688 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3689 /* These are fused multiply-add, and must be done as one
3690 * floating point operation with no rounding between the
3691 * multiplication and addition steps.
3692 * NB that doing the negations here as separate steps is
3693 * correct : an input NaN should come out with its sign bit
3694 * flipped if it is a negated-input.
3696 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3697 return 1;
3699 if (dp) {
3700 TCGv_ptr fpst;
3701 TCGv_i64 frd;
3702 if (op & 1) {
3703 /* VFNMS, VFMS */
3704 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3706 frd = tcg_temp_new_i64();
3707 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3708 if (op & 2) {
3709 /* VFNMA, VFNMS */
3710 gen_helper_vfp_negd(frd, frd);
3712 fpst = get_fpstatus_ptr(0);
3713 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3714 cpu_F1d, frd, fpst);
3715 tcg_temp_free_ptr(fpst);
3716 tcg_temp_free_i64(frd);
3717 } else {
3718 TCGv_ptr fpst;
3719 TCGv_i32 frd;
3720 if (op & 1) {
3721 /* VFNMS, VFMS */
3722 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3724 frd = tcg_temp_new_i32();
3725 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3726 if (op & 2) {
3727 gen_helper_vfp_negs(frd, frd);
3729 fpst = get_fpstatus_ptr(0);
3730 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3731 cpu_F1s, frd, fpst);
3732 tcg_temp_free_ptr(fpst);
3733 tcg_temp_free_i32(frd);
3735 break;
3736 case 14: /* fconst */
3737 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3738 return 1;
3741 n = (insn << 12) & 0x80000000;
3742 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3743 if (dp) {
3744 if (i & 0x40)
3745 i |= 0x3f80;
3746 else
3747 i |= 0x4000;
3748 n |= i << 16;
3749 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3750 } else {
3751 if (i & 0x40)
3752 i |= 0x780;
3753 else
3754 i |= 0x800;
3755 n |= i << 19;
3756 tcg_gen_movi_i32(cpu_F0s, n);
3758 break;
3759 case 15: /* extension space */
3760 switch (rn) {
3761 case 0: /* cpy */
3762 /* no-op */
3763 break;
3764 case 1: /* abs */
3765 gen_vfp_abs(dp);
3766 break;
3767 case 2: /* neg */
3768 gen_vfp_neg(dp);
3769 break;
3770 case 3: /* sqrt */
3771 gen_vfp_sqrt(dp);
3772 break;
3773 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3774 tmp = gen_vfp_mrs();
3775 tcg_gen_ext16u_i32(tmp, tmp);
3776 if (dp) {
3777 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3778 cpu_env);
3779 } else {
3780 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3781 cpu_env);
3783 tcg_temp_free_i32(tmp);
3784 break;
3785 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3786 tmp = gen_vfp_mrs();
3787 tcg_gen_shri_i32(tmp, tmp, 16);
3788 if (dp) {
3789 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3790 cpu_env);
3791 } else {
3792 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3793 cpu_env);
3795 tcg_temp_free_i32(tmp);
3796 break;
3797 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3798 tmp = tcg_temp_new_i32();
3799 if (dp) {
3800 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3801 cpu_env);
3802 } else {
3803 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3804 cpu_env);
3806 gen_mov_F0_vreg(0, rd);
3807 tmp2 = gen_vfp_mrs();
3808 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3809 tcg_gen_or_i32(tmp, tmp, tmp2);
3810 tcg_temp_free_i32(tmp2);
3811 gen_vfp_msr(tmp);
3812 break;
3813 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3814 tmp = tcg_temp_new_i32();
3815 if (dp) {
3816 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3817 cpu_env);
3818 } else {
3819 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3820 cpu_env);
3822 tcg_gen_shli_i32(tmp, tmp, 16);
3823 gen_mov_F0_vreg(0, rd);
3824 tmp2 = gen_vfp_mrs();
3825 tcg_gen_ext16u_i32(tmp2, tmp2);
3826 tcg_gen_or_i32(tmp, tmp, tmp2);
3827 tcg_temp_free_i32(tmp2);
3828 gen_vfp_msr(tmp);
3829 break;
3830 case 8: /* cmp */
3831 gen_vfp_cmp(dp);
3832 break;
3833 case 9: /* cmpe */
3834 gen_vfp_cmpe(dp);
3835 break;
3836 case 10: /* cmpz */
3837 gen_vfp_cmp(dp);
3838 break;
3839 case 11: /* cmpez */
3840 gen_vfp_F1_ld0(dp);
3841 gen_vfp_cmpe(dp);
3842 break;
3843 case 12: /* vrintr */
3845 TCGv_ptr fpst = get_fpstatus_ptr(0);
3846 if (dp) {
3847 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3848 } else {
3849 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3851 tcg_temp_free_ptr(fpst);
3852 break;
3854 case 13: /* vrintz */
3856 TCGv_ptr fpst = get_fpstatus_ptr(0);
3857 TCGv_i32 tcg_rmode;
3858 tcg_rmode = tcg_const_i32(float_round_to_zero);
3859 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3860 if (dp) {
3861 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3862 } else {
3863 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3865 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3866 tcg_temp_free_i32(tcg_rmode);
3867 tcg_temp_free_ptr(fpst);
3868 break;
3870 case 14: /* vrintx */
3872 TCGv_ptr fpst = get_fpstatus_ptr(0);
3873 if (dp) {
3874 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3875 } else {
3876 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3878 tcg_temp_free_ptr(fpst);
3879 break;
3881 case 15: /* single<->double conversion */
3882 if (dp)
3883 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3884 else
3885 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3886 break;
3887 case 16: /* fuito */
3888 gen_vfp_uito(dp, 0);
3889 break;
3890 case 17: /* fsito */
3891 gen_vfp_sito(dp, 0);
3892 break;
3893 case 20: /* fshto */
3894 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3895 return 1;
3897 gen_vfp_shto(dp, 16 - rm, 0);
3898 break;
3899 case 21: /* fslto */
3900 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3901 return 1;
3903 gen_vfp_slto(dp, 32 - rm, 0);
3904 break;
3905 case 22: /* fuhto */
3906 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3907 return 1;
3909 gen_vfp_uhto(dp, 16 - rm, 0);
3910 break;
3911 case 23: /* fulto */
3912 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3913 return 1;
3915 gen_vfp_ulto(dp, 32 - rm, 0);
3916 break;
3917 case 24: /* ftoui */
3918 gen_vfp_toui(dp, 0);
3919 break;
3920 case 25: /* ftouiz */
3921 gen_vfp_touiz(dp, 0);
3922 break;
3923 case 26: /* ftosi */
3924 gen_vfp_tosi(dp, 0);
3925 break;
3926 case 27: /* ftosiz */
3927 gen_vfp_tosiz(dp, 0);
3928 break;
3929 case 28: /* ftosh */
3930 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3931 return 1;
3933 gen_vfp_tosh(dp, 16 - rm, 0);
3934 break;
3935 case 29: /* ftosl */
3936 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3937 return 1;
3939 gen_vfp_tosl(dp, 32 - rm, 0);
3940 break;
3941 case 30: /* ftouh */
3942 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3943 return 1;
3945 gen_vfp_touh(dp, 16 - rm, 0);
3946 break;
3947 case 31: /* ftoul */
3948 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3949 return 1;
3951 gen_vfp_toul(dp, 32 - rm, 0);
3952 break;
3953 default: /* undefined */
3954 return 1;
3956 break;
3957 default: /* undefined */
3958 return 1;
3961 /* Write back the result. */
3962 if (op == 15 && (rn >= 8 && rn <= 11)) {
3963 /* Comparison, do nothing. */
3964 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3965 (rn & 0x1e) == 0x6)) {
3966 /* VCVT double to int: always integer result.
3967 * VCVT double to half precision is always a single
3968 * precision result.
3970 gen_mov_vreg_F0(0, rd);
3971 } else if (op == 15 && rn == 15) {
3972 /* conversion */
3973 gen_mov_vreg_F0(!dp, rd);
3974 } else {
3975 gen_mov_vreg_F0(dp, rd);
3978 /* break out of the loop if we have finished */
3979 if (veclen == 0)
3980 break;
3982 if (op == 15 && delta_m == 0) {
3983 /* single source one-many */
3984 while (veclen--) {
3985 rd = ((rd + delta_d) & (bank_mask - 1))
3986 | (rd & bank_mask);
3987 gen_mov_vreg_F0(dp, rd);
3989 break;
3991 /* Setup the next operands. */
3992 veclen--;
3993 rd = ((rd + delta_d) & (bank_mask - 1))
3994 | (rd & bank_mask);
3996 if (op == 15) {
3997 /* One source operand. */
3998 rm = ((rm + delta_m) & (bank_mask - 1))
3999 | (rm & bank_mask);
4000 gen_mov_F0_vreg(dp, rm);
4001 } else {
4002 /* Two source operands. */
4003 rn = ((rn + delta_d) & (bank_mask - 1))
4004 | (rn & bank_mask);
4005 gen_mov_F0_vreg(dp, rn);
4006 if (delta_m) {
4007 rm = ((rm + delta_m) & (bank_mask - 1))
4008 | (rm & bank_mask);
4009 gen_mov_F1_vreg(dp, rm);
4014 break;
4015 case 0xc:
4016 case 0xd:
4017 if ((insn & 0x03e00000) == 0x00400000) {
4018 /* two-register transfer */
4019 rn = (insn >> 16) & 0xf;
4020 rd = (insn >> 12) & 0xf;
4021 if (dp) {
4022 VFP_DREG_M(rm, insn);
4023 } else {
4024 rm = VFP_SREG_M(insn);
4027 if (insn & ARM_CP_RW_BIT) {
4028 /* vfp->arm */
4029 if (dp) {
4030 gen_mov_F0_vreg(0, rm * 2);
4031 tmp = gen_vfp_mrs();
4032 store_reg(s, rd, tmp);
4033 gen_mov_F0_vreg(0, rm * 2 + 1);
4034 tmp = gen_vfp_mrs();
4035 store_reg(s, rn, tmp);
4036 } else {
4037 gen_mov_F0_vreg(0, rm);
4038 tmp = gen_vfp_mrs();
4039 store_reg(s, rd, tmp);
4040 gen_mov_F0_vreg(0, rm + 1);
4041 tmp = gen_vfp_mrs();
4042 store_reg(s, rn, tmp);
4044 } else {
4045 /* arm->vfp */
4046 if (dp) {
4047 tmp = load_reg(s, rd);
4048 gen_vfp_msr(tmp);
4049 gen_mov_vreg_F0(0, rm * 2);
4050 tmp = load_reg(s, rn);
4051 gen_vfp_msr(tmp);
4052 gen_mov_vreg_F0(0, rm * 2 + 1);
4053 } else {
4054 tmp = load_reg(s, rd);
4055 gen_vfp_msr(tmp);
4056 gen_mov_vreg_F0(0, rm);
4057 tmp = load_reg(s, rn);
4058 gen_vfp_msr(tmp);
4059 gen_mov_vreg_F0(0, rm + 1);
4062 } else {
4063 /* Load/store */
4064 rn = (insn >> 16) & 0xf;
4065 if (dp)
4066 VFP_DREG_D(rd, insn);
4067 else
4068 rd = VFP_SREG_D(insn);
4069 if ((insn & 0x01200000) == 0x01000000) {
4070 /* Single load/store */
4071 offset = (insn & 0xff) << 2;
4072 if ((insn & (1 << 23)) == 0)
4073 offset = -offset;
4074 if (s->thumb && rn == 15) {
4075 /* This is actually UNPREDICTABLE */
4076 addr = tcg_temp_new_i32();
4077 tcg_gen_movi_i32(addr, s->pc & ~2);
4078 } else {
4079 addr = load_reg(s, rn);
4081 tcg_gen_addi_i32(addr, addr, offset);
4082 if (insn & (1 << 20)) {
4083 gen_vfp_ld(s, dp, addr);
4084 gen_mov_vreg_F0(dp, rd);
4085 } else {
4086 gen_mov_F0_vreg(dp, rd);
4087 gen_vfp_st(s, dp, addr);
4089 tcg_temp_free_i32(addr);
4090 } else {
4091 /* load/store multiple */
4092 int w = insn & (1 << 21);
4093 if (dp)
4094 n = (insn >> 1) & 0x7f;
4095 else
4096 n = insn & 0xff;
4098 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4099 /* P == U , W == 1 => UNDEF */
4100 return 1;
4102 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4103 /* UNPREDICTABLE cases for bad immediates: we choose to
4104 * UNDEF to avoid generating huge numbers of TCG ops
4106 return 1;
4108 if (rn == 15 && w) {
4109 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4110 return 1;
4113 if (s->thumb && rn == 15) {
4114 /* This is actually UNPREDICTABLE */
4115 addr = tcg_temp_new_i32();
4116 tcg_gen_movi_i32(addr, s->pc & ~2);
4117 } else {
4118 addr = load_reg(s, rn);
4120 if (insn & (1 << 24)) /* pre-decrement */
4121 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
4123 if (dp)
4124 offset = 8;
4125 else
4126 offset = 4;
4127 for (i = 0; i < n; i++) {
4128 if (insn & ARM_CP_RW_BIT) {
4129 /* load */
4130 gen_vfp_ld(s, dp, addr);
4131 gen_mov_vreg_F0(dp, rd + i);
4132 } else {
4133 /* store */
4134 gen_mov_F0_vreg(dp, rd + i);
4135 gen_vfp_st(s, dp, addr);
4137 tcg_gen_addi_i32(addr, addr, offset);
4139 if (w) {
4140 /* writeback */
4141 if (insn & (1 << 24))
4142 offset = -offset * n;
4143 else if (dp && (insn & 1))
4144 offset = 4;
4145 else
4146 offset = 0;
4148 if (offset != 0)
4149 tcg_gen_addi_i32(addr, addr, offset);
4150 store_reg(s, rn, addr);
4151 } else {
4152 tcg_temp_free_i32(addr);
4156 break;
4157 default:
4158 /* Should never happen. */
4159 return 1;
4161 return 0;
4164 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
4166 #ifndef CONFIG_USER_ONLY
4167 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4168 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4169 #else
4170 return true;
4171 #endif
4174 static void gen_goto_ptr(void)
4176 TCGv addr = tcg_temp_new();
4177 tcg_gen_extu_i32_tl(addr, cpu_R[15]);
4178 tcg_gen_lookup_and_goto_ptr(addr);
4179 tcg_temp_free(addr);
4182 /* This will end the TB but doesn't guarantee we'll return to
4183 * cpu_loop_exec. Any live exit_requests will be processed as we
4184 * enter the next TB.
4186 static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4188 if (use_goto_tb(s, dest)) {
4189 tcg_gen_goto_tb(n);
4190 gen_set_pc_im(s, dest);
4191 tcg_gen_exit_tb((uintptr_t)s->base.tb + n);
4192 } else {
4193 gen_set_pc_im(s, dest);
4194 gen_goto_ptr();
4196 s->base.is_jmp = DISAS_NORETURN;
4199 static inline void gen_jmp (DisasContext *s, uint32_t dest)
4201 if (unlikely(is_singlestepping(s))) {
4202 /* An indirect jump so that we still trigger the debug exception. */
4203 if (s->thumb)
4204 dest |= 1;
4205 gen_bx_im(s, dest);
4206 } else {
4207 gen_goto_tb(s, 0, dest);
4211 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
4213 if (x)
4214 tcg_gen_sari_i32(t0, t0, 16);
4215 else
4216 gen_sxth(t0);
4217 if (y)
4218 tcg_gen_sari_i32(t1, t1, 16);
4219 else
4220 gen_sxth(t1);
4221 tcg_gen_mul_i32(t0, t0, t1);
4224 /* Return the mask of PSR bits set by a MSR instruction. */
4225 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4227 uint32_t mask;
4229 mask = 0;
4230 if (flags & (1 << 0))
4231 mask |= 0xff;
4232 if (flags & (1 << 1))
4233 mask |= 0xff00;
4234 if (flags & (1 << 2))
4235 mask |= 0xff0000;
4236 if (flags & (1 << 3))
4237 mask |= 0xff000000;
4239 /* Mask out undefined bits. */
4240 mask &= ~CPSR_RESERVED;
4241 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4242 mask &= ~CPSR_T;
4244 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4245 mask &= ~CPSR_Q; /* V5TE in reality*/
4247 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4248 mask &= ~(CPSR_E | CPSR_GE);
4250 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4251 mask &= ~CPSR_IT;
4253 /* Mask out execution state and reserved bits. */
4254 if (!spsr) {
4255 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4257 /* Mask out privileged bits. */
4258 if (IS_USER(s))
4259 mask &= CPSR_USER;
4260 return mask;
4263 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4264 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4266 TCGv_i32 tmp;
4267 if (spsr) {
4268 /* ??? This is also undefined in system mode. */
4269 if (IS_USER(s))
4270 return 1;
4272 tmp = load_cpu_field(spsr);
4273 tcg_gen_andi_i32(tmp, tmp, ~mask);
4274 tcg_gen_andi_i32(t0, t0, mask);
4275 tcg_gen_or_i32(tmp, tmp, t0);
4276 store_cpu_field(tmp, spsr);
4277 } else {
4278 gen_set_cpsr(t0, mask);
4280 tcg_temp_free_i32(t0);
4281 gen_lookup_tb(s);
4282 return 0;
4285 /* Returns nonzero if access to the PSR is not permitted. */
4286 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4288 TCGv_i32 tmp;
4289 tmp = tcg_temp_new_i32();
4290 tcg_gen_movi_i32(tmp, val);
4291 return gen_set_psr(s, mask, spsr, tmp);
4294 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4295 int *tgtmode, int *regno)
4297 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4298 * the target mode and register number, and identify the various
4299 * unpredictable cases.
4300 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4301 * + executed in user mode
4302 * + using R15 as the src/dest register
4303 * + accessing an unimplemented register
4304 * + accessing a register that's inaccessible at current PL/security state*
4305 * + accessing a register that you could access with a different insn
4306 * We choose to UNDEF in all these cases.
4307 * Since we don't know which of the various AArch32 modes we are in
4308 * we have to defer some checks to runtime.
4309 * Accesses to Monitor mode registers from Secure EL1 (which implies
4310 * that EL3 is AArch64) must trap to EL3.
4312 * If the access checks fail this function will emit code to take
4313 * an exception and return false. Otherwise it will return true,
4314 * and set *tgtmode and *regno appropriately.
4316 int exc_target = default_exception_el(s);
4318 /* These instructions are present only in ARMv8, or in ARMv7 with the
4319 * Virtualization Extensions.
4321 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4322 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4323 goto undef;
4326 if (IS_USER(s) || rn == 15) {
4327 goto undef;
4330 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4331 * of registers into (r, sysm).
4333 if (r) {
4334 /* SPSRs for other modes */
4335 switch (sysm) {
4336 case 0xe: /* SPSR_fiq */
4337 *tgtmode = ARM_CPU_MODE_FIQ;
4338 break;
4339 case 0x10: /* SPSR_irq */
4340 *tgtmode = ARM_CPU_MODE_IRQ;
4341 break;
4342 case 0x12: /* SPSR_svc */
4343 *tgtmode = ARM_CPU_MODE_SVC;
4344 break;
4345 case 0x14: /* SPSR_abt */
4346 *tgtmode = ARM_CPU_MODE_ABT;
4347 break;
4348 case 0x16: /* SPSR_und */
4349 *tgtmode = ARM_CPU_MODE_UND;
4350 break;
4351 case 0x1c: /* SPSR_mon */
4352 *tgtmode = ARM_CPU_MODE_MON;
4353 break;
4354 case 0x1e: /* SPSR_hyp */
4355 *tgtmode = ARM_CPU_MODE_HYP;
4356 break;
4357 default: /* unallocated */
4358 goto undef;
4360 /* We arbitrarily assign SPSR a register number of 16. */
4361 *regno = 16;
4362 } else {
4363 /* general purpose registers for other modes */
4364 switch (sysm) {
4365 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4366 *tgtmode = ARM_CPU_MODE_USR;
4367 *regno = sysm + 8;
4368 break;
4369 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4370 *tgtmode = ARM_CPU_MODE_FIQ;
4371 *regno = sysm;
4372 break;
4373 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4374 *tgtmode = ARM_CPU_MODE_IRQ;
4375 *regno = sysm & 1 ? 13 : 14;
4376 break;
4377 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4378 *tgtmode = ARM_CPU_MODE_SVC;
4379 *regno = sysm & 1 ? 13 : 14;
4380 break;
4381 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4382 *tgtmode = ARM_CPU_MODE_ABT;
4383 *regno = sysm & 1 ? 13 : 14;
4384 break;
4385 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4386 *tgtmode = ARM_CPU_MODE_UND;
4387 *regno = sysm & 1 ? 13 : 14;
4388 break;
4389 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4390 *tgtmode = ARM_CPU_MODE_MON;
4391 *regno = sysm & 1 ? 13 : 14;
4392 break;
4393 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4394 *tgtmode = ARM_CPU_MODE_HYP;
4395 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4396 *regno = sysm & 1 ? 13 : 17;
4397 break;
4398 default: /* unallocated */
4399 goto undef;
4403 /* Catch the 'accessing inaccessible register' cases we can detect
4404 * at translate time.
4406 switch (*tgtmode) {
4407 case ARM_CPU_MODE_MON:
4408 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4409 goto undef;
4411 if (s->current_el == 1) {
4412 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4413 * then accesses to Mon registers trap to EL3
4415 exc_target = 3;
4416 goto undef;
4418 break;
4419 case ARM_CPU_MODE_HYP:
4420 /* Note that we can forbid accesses from EL2 here because they
4421 * must be from Hyp mode itself
4423 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4424 goto undef;
4426 break;
4427 default:
4428 break;
4431 return true;
4433 undef:
4434 /* If we get here then some access check did not pass */
4435 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4436 return false;
4439 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4441 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4442 int tgtmode = 0, regno = 0;
4444 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4445 return;
4448 /* Sync state because msr_banked() can raise exceptions */
4449 gen_set_condexec(s);
4450 gen_set_pc_im(s, s->pc - 4);
4451 tcg_reg = load_reg(s, rn);
4452 tcg_tgtmode = tcg_const_i32(tgtmode);
4453 tcg_regno = tcg_const_i32(regno);
4454 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4455 tcg_temp_free_i32(tcg_tgtmode);
4456 tcg_temp_free_i32(tcg_regno);
4457 tcg_temp_free_i32(tcg_reg);
4458 s->base.is_jmp = DISAS_UPDATE;
4461 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4463 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4464 int tgtmode = 0, regno = 0;
4466 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4467 return;
4470 /* Sync state because mrs_banked() can raise exceptions */
4471 gen_set_condexec(s);
4472 gen_set_pc_im(s, s->pc - 4);
4473 tcg_reg = tcg_temp_new_i32();
4474 tcg_tgtmode = tcg_const_i32(tgtmode);
4475 tcg_regno = tcg_const_i32(regno);
4476 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4477 tcg_temp_free_i32(tcg_tgtmode);
4478 tcg_temp_free_i32(tcg_regno);
4479 store_reg(s, rn, tcg_reg);
4480 s->base.is_jmp = DISAS_UPDATE;
4483 /* Store value to PC as for an exception return (ie don't
4484 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4485 * will do the masking based on the new value of the Thumb bit.
4487 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
4489 tcg_gen_mov_i32(cpu_R[15], pc);
4490 tcg_temp_free_i32(pc);
4493 /* Generate a v6 exception return. Marks both values as dead. */
4494 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4496 store_pc_exc_ret(s, pc);
4497 /* The cpsr_write_eret helper will mask the low bits of PC
4498 * appropriately depending on the new Thumb bit, so it must
4499 * be called after storing the new PC.
4501 gen_helper_cpsr_write_eret(cpu_env, cpsr);
4502 tcg_temp_free_i32(cpsr);
4503 /* Must exit loop to check un-masked IRQs */
4504 s->base.is_jmp = DISAS_EXIT;
4507 /* Generate an old-style exception return. Marks pc as dead. */
4508 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4510 gen_rfe(s, pc, load_cpu_field(spsr));
4514 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4515 * only call the helper when running single threaded TCG code to ensure
4516 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4517 * just skip this instruction. Currently the SEV/SEVL instructions
4518 * which are *one* of many ways to wake the CPU from WFE are not
4519 * implemented so we can't sleep like WFI does.
4521 static void gen_nop_hint(DisasContext *s, int val)
4523 switch (val) {
4524 case 1: /* yield */
4525 if (!parallel_cpus) {
4526 gen_set_pc_im(s, s->pc);
4527 s->base.is_jmp = DISAS_YIELD;
4529 break;
4530 case 3: /* wfi */
4531 gen_set_pc_im(s, s->pc);
4532 s->base.is_jmp = DISAS_WFI;
4533 break;
4534 case 2: /* wfe */
4535 if (!parallel_cpus) {
4536 gen_set_pc_im(s, s->pc);
4537 s->base.is_jmp = DISAS_WFE;
4539 break;
4540 case 4: /* sev */
4541 case 5: /* sevl */
4542 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4543 default: /* nop */
4544 break;
4548 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4550 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4552 switch (size) {
4553 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4554 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4555 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4556 default: abort();
4560 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4562 switch (size) {
4563 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4564 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4565 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4566 default: return;
4570 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4571 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4572 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4573 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4574 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4576 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4577 switch ((size << 1) | u) { \
4578 case 0: \
4579 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4580 break; \
4581 case 1: \
4582 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4583 break; \
4584 case 2: \
4585 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4586 break; \
4587 case 3: \
4588 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4589 break; \
4590 case 4: \
4591 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4592 break; \
4593 case 5: \
4594 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4595 break; \
4596 default: return 1; \
4597 }} while (0)
4599 #define GEN_NEON_INTEGER_OP(name) do { \
4600 switch ((size << 1) | u) { \
4601 case 0: \
4602 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4603 break; \
4604 case 1: \
4605 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4606 break; \
4607 case 2: \
4608 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4609 break; \
4610 case 3: \
4611 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4612 break; \
4613 case 4: \
4614 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4615 break; \
4616 case 5: \
4617 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4618 break; \
4619 default: return 1; \
4620 }} while (0)
4622 static TCGv_i32 neon_load_scratch(int scratch)
4624 TCGv_i32 tmp = tcg_temp_new_i32();
4625 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4626 return tmp;
4629 static void neon_store_scratch(int scratch, TCGv_i32 var)
4631 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4632 tcg_temp_free_i32(var);
4635 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4637 TCGv_i32 tmp;
4638 if (size == 1) {
4639 tmp = neon_load_reg(reg & 7, reg >> 4);
4640 if (reg & 8) {
4641 gen_neon_dup_high16(tmp);
4642 } else {
4643 gen_neon_dup_low16(tmp);
4645 } else {
4646 tmp = neon_load_reg(reg & 15, reg >> 4);
4648 return tmp;
4651 static int gen_neon_unzip(int rd, int rm, int size, int q)
4653 TCGv_i32 tmp, tmp2;
4654 if (!q && size == 2) {
4655 return 1;
4657 tmp = tcg_const_i32(rd);
4658 tmp2 = tcg_const_i32(rm);
4659 if (q) {
4660 switch (size) {
4661 case 0:
4662 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
4663 break;
4664 case 1:
4665 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
4666 break;
4667 case 2:
4668 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
4669 break;
4670 default:
4671 abort();
4673 } else {
4674 switch (size) {
4675 case 0:
4676 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
4677 break;
4678 case 1:
4679 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
4680 break;
4681 default:
4682 abort();
4685 tcg_temp_free_i32(tmp);
4686 tcg_temp_free_i32(tmp2);
4687 return 0;
4690 static int gen_neon_zip(int rd, int rm, int size, int q)
4692 TCGv_i32 tmp, tmp2;
4693 if (!q && size == 2) {
4694 return 1;
4696 tmp = tcg_const_i32(rd);
4697 tmp2 = tcg_const_i32(rm);
4698 if (q) {
4699 switch (size) {
4700 case 0:
4701 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
4702 break;
4703 case 1:
4704 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
4705 break;
4706 case 2:
4707 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
4708 break;
4709 default:
4710 abort();
4712 } else {
4713 switch (size) {
4714 case 0:
4715 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
4716 break;
4717 case 1:
4718 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
4719 break;
4720 default:
4721 abort();
4724 tcg_temp_free_i32(tmp);
4725 tcg_temp_free_i32(tmp2);
4726 return 0;
4729 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4731 TCGv_i32 rd, tmp;
4733 rd = tcg_temp_new_i32();
4734 tmp = tcg_temp_new_i32();
4736 tcg_gen_shli_i32(rd, t0, 8);
4737 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4738 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4739 tcg_gen_or_i32(rd, rd, tmp);
4741 tcg_gen_shri_i32(t1, t1, 8);
4742 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4743 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4744 tcg_gen_or_i32(t1, t1, tmp);
4745 tcg_gen_mov_i32(t0, rd);
4747 tcg_temp_free_i32(tmp);
4748 tcg_temp_free_i32(rd);
4751 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4753 TCGv_i32 rd, tmp;
4755 rd = tcg_temp_new_i32();
4756 tmp = tcg_temp_new_i32();
4758 tcg_gen_shli_i32(rd, t0, 16);
4759 tcg_gen_andi_i32(tmp, t1, 0xffff);
4760 tcg_gen_or_i32(rd, rd, tmp);
4761 tcg_gen_shri_i32(t1, t1, 16);
4762 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4763 tcg_gen_or_i32(t1, t1, tmp);
4764 tcg_gen_mov_i32(t0, rd);
4766 tcg_temp_free_i32(tmp);
4767 tcg_temp_free_i32(rd);
4771 static struct {
4772 int nregs;
4773 int interleave;
4774 int spacing;
4775 } neon_ls_element_type[11] = {
4776 {4, 4, 1},
4777 {4, 4, 2},
4778 {4, 1, 1},
4779 {4, 2, 1},
4780 {3, 3, 1},
4781 {3, 3, 2},
4782 {3, 1, 1},
4783 {1, 1, 1},
4784 {2, 2, 1},
4785 {2, 2, 2},
4786 {2, 1, 1}
4789 /* Translate a NEON load/store element instruction. Return nonzero if the
4790 instruction is invalid. */
4791 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4793 int rd, rn, rm;
4794 int op;
4795 int nregs;
4796 int interleave;
4797 int spacing;
4798 int stride;
4799 int size;
4800 int reg;
4801 int pass;
4802 int load;
4803 int shift;
4804 int n;
4805 TCGv_i32 addr;
4806 TCGv_i32 tmp;
4807 TCGv_i32 tmp2;
4808 TCGv_i64 tmp64;
4810 /* FIXME: this access check should not take precedence over UNDEF
4811 * for invalid encodings; we will generate incorrect syndrome information
4812 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4814 if (s->fp_excp_el) {
4815 gen_exception_insn(s, 4, EXCP_UDEF,
4816 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
4817 return 0;
4820 if (!s->vfp_enabled)
4821 return 1;
4822 VFP_DREG_D(rd, insn);
4823 rn = (insn >> 16) & 0xf;
4824 rm = insn & 0xf;
4825 load = (insn & (1 << 21)) != 0;
4826 if ((insn & (1 << 23)) == 0) {
4827 /* Load store all elements. */
4828 op = (insn >> 8) & 0xf;
4829 size = (insn >> 6) & 3;
4830 if (op > 10)
4831 return 1;
4832 /* Catch UNDEF cases for bad values of align field */
4833 switch (op & 0xc) {
4834 case 4:
4835 if (((insn >> 5) & 1) == 1) {
4836 return 1;
4838 break;
4839 case 8:
4840 if (((insn >> 4) & 3) == 3) {
4841 return 1;
4843 break;
4844 default:
4845 break;
4847 nregs = neon_ls_element_type[op].nregs;
4848 interleave = neon_ls_element_type[op].interleave;
4849 spacing = neon_ls_element_type[op].spacing;
4850 if (size == 3 && (interleave | spacing) != 1)
4851 return 1;
4852 addr = tcg_temp_new_i32();
4853 load_reg_var(s, addr, rn);
4854 stride = (1 << size) * interleave;
4855 for (reg = 0; reg < nregs; reg++) {
4856 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4857 load_reg_var(s, addr, rn);
4858 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4859 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4860 load_reg_var(s, addr, rn);
4861 tcg_gen_addi_i32(addr, addr, 1 << size);
4863 if (size == 3) {
4864 tmp64 = tcg_temp_new_i64();
4865 if (load) {
4866 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
4867 neon_store_reg64(tmp64, rd);
4868 } else {
4869 neon_load_reg64(tmp64, rd);
4870 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
4872 tcg_temp_free_i64(tmp64);
4873 tcg_gen_addi_i32(addr, addr, stride);
4874 } else {
4875 for (pass = 0; pass < 2; pass++) {
4876 if (size == 2) {
4877 if (load) {
4878 tmp = tcg_temp_new_i32();
4879 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
4880 neon_store_reg(rd, pass, tmp);
4881 } else {
4882 tmp = neon_load_reg(rd, pass);
4883 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
4884 tcg_temp_free_i32(tmp);
4886 tcg_gen_addi_i32(addr, addr, stride);
4887 } else if (size == 1) {
4888 if (load) {
4889 tmp = tcg_temp_new_i32();
4890 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
4891 tcg_gen_addi_i32(addr, addr, stride);
4892 tmp2 = tcg_temp_new_i32();
4893 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
4894 tcg_gen_addi_i32(addr, addr, stride);
4895 tcg_gen_shli_i32(tmp2, tmp2, 16);
4896 tcg_gen_or_i32(tmp, tmp, tmp2);
4897 tcg_temp_free_i32(tmp2);
4898 neon_store_reg(rd, pass, tmp);
4899 } else {
4900 tmp = neon_load_reg(rd, pass);
4901 tmp2 = tcg_temp_new_i32();
4902 tcg_gen_shri_i32(tmp2, tmp, 16);
4903 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
4904 tcg_temp_free_i32(tmp);
4905 tcg_gen_addi_i32(addr, addr, stride);
4906 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
4907 tcg_temp_free_i32(tmp2);
4908 tcg_gen_addi_i32(addr, addr, stride);
4910 } else /* size == 0 */ {
4911 if (load) {
4912 TCGV_UNUSED_I32(tmp2);
4913 for (n = 0; n < 4; n++) {
4914 tmp = tcg_temp_new_i32();
4915 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
4916 tcg_gen_addi_i32(addr, addr, stride);
4917 if (n == 0) {
4918 tmp2 = tmp;
4919 } else {
4920 tcg_gen_shli_i32(tmp, tmp, n * 8);
4921 tcg_gen_or_i32(tmp2, tmp2, tmp);
4922 tcg_temp_free_i32(tmp);
4925 neon_store_reg(rd, pass, tmp2);
4926 } else {
4927 tmp2 = neon_load_reg(rd, pass);
4928 for (n = 0; n < 4; n++) {
4929 tmp = tcg_temp_new_i32();
4930 if (n == 0) {
4931 tcg_gen_mov_i32(tmp, tmp2);
4932 } else {
4933 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4935 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
4936 tcg_temp_free_i32(tmp);
4937 tcg_gen_addi_i32(addr, addr, stride);
4939 tcg_temp_free_i32(tmp2);
4944 rd += spacing;
4946 tcg_temp_free_i32(addr);
4947 stride = nregs * 8;
4948 } else {
4949 size = (insn >> 10) & 3;
4950 if (size == 3) {
4951 /* Load single element to all lanes. */
4952 int a = (insn >> 4) & 1;
4953 if (!load) {
4954 return 1;
4956 size = (insn >> 6) & 3;
4957 nregs = ((insn >> 8) & 3) + 1;
4959 if (size == 3) {
4960 if (nregs != 4 || a == 0) {
4961 return 1;
4963 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4964 size = 2;
4966 if (nregs == 1 && a == 1 && size == 0) {
4967 return 1;
4969 if (nregs == 3 && a == 1) {
4970 return 1;
4972 addr = tcg_temp_new_i32();
4973 load_reg_var(s, addr, rn);
4974 if (nregs == 1) {
4975 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4976 tmp = gen_load_and_replicate(s, addr, size);
4977 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4978 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4979 if (insn & (1 << 5)) {
4980 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4981 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4983 tcg_temp_free_i32(tmp);
4984 } else {
4985 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4986 stride = (insn & (1 << 5)) ? 2 : 1;
4987 for (reg = 0; reg < nregs; reg++) {
4988 tmp = gen_load_and_replicate(s, addr, size);
4989 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4990 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4991 tcg_temp_free_i32(tmp);
4992 tcg_gen_addi_i32(addr, addr, 1 << size);
4993 rd += stride;
4996 tcg_temp_free_i32(addr);
4997 stride = (1 << size) * nregs;
4998 } else {
4999 /* Single element. */
5000 int idx = (insn >> 4) & 0xf;
5001 pass = (insn >> 7) & 1;
5002 switch (size) {
5003 case 0:
5004 shift = ((insn >> 5) & 3) * 8;
5005 stride = 1;
5006 break;
5007 case 1:
5008 shift = ((insn >> 6) & 1) * 16;
5009 stride = (insn & (1 << 5)) ? 2 : 1;
5010 break;
5011 case 2:
5012 shift = 0;
5013 stride = (insn & (1 << 6)) ? 2 : 1;
5014 break;
5015 default:
5016 abort();
5018 nregs = ((insn >> 8) & 3) + 1;
5019 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5020 switch (nregs) {
5021 case 1:
5022 if (((idx & (1 << size)) != 0) ||
5023 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5024 return 1;
5026 break;
5027 case 3:
5028 if ((idx & 1) != 0) {
5029 return 1;
5031 /* fall through */
5032 case 2:
5033 if (size == 2 && (idx & 2) != 0) {
5034 return 1;
5036 break;
5037 case 4:
5038 if ((size == 2) && ((idx & 3) == 3)) {
5039 return 1;
5041 break;
5042 default:
5043 abort();
5045 if ((rd + stride * (nregs - 1)) > 31) {
5046 /* Attempts to write off the end of the register file
5047 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5048 * the neon_load_reg() would write off the end of the array.
5050 return 1;
5052 addr = tcg_temp_new_i32();
5053 load_reg_var(s, addr, rn);
5054 for (reg = 0; reg < nregs; reg++) {
5055 if (load) {
5056 tmp = tcg_temp_new_i32();
5057 switch (size) {
5058 case 0:
5059 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
5060 break;
5061 case 1:
5062 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
5063 break;
5064 case 2:
5065 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
5066 break;
5067 default: /* Avoid compiler warnings. */
5068 abort();
5070 if (size != 2) {
5071 tmp2 = neon_load_reg(rd, pass);
5072 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5073 shift, size ? 16 : 8);
5074 tcg_temp_free_i32(tmp2);
5076 neon_store_reg(rd, pass, tmp);
5077 } else { /* Store */
5078 tmp = neon_load_reg(rd, pass);
5079 if (shift)
5080 tcg_gen_shri_i32(tmp, tmp, shift);
5081 switch (size) {
5082 case 0:
5083 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
5084 break;
5085 case 1:
5086 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
5087 break;
5088 case 2:
5089 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5090 break;
5092 tcg_temp_free_i32(tmp);
5094 rd += stride;
5095 tcg_gen_addi_i32(addr, addr, 1 << size);
5097 tcg_temp_free_i32(addr);
5098 stride = nregs * (1 << size);
5101 if (rm != 15) {
5102 TCGv_i32 base;
5104 base = load_reg(s, rn);
5105 if (rm == 13) {
5106 tcg_gen_addi_i32(base, base, stride);
5107 } else {
5108 TCGv_i32 index;
5109 index = load_reg(s, rm);
5110 tcg_gen_add_i32(base, base, index);
5111 tcg_temp_free_i32(index);
5113 store_reg(s, rn, base);
5115 return 0;
5118 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
5119 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
5121 tcg_gen_and_i32(t, t, c);
5122 tcg_gen_andc_i32(f, f, c);
5123 tcg_gen_or_i32(dest, t, f);
5126 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
5128 switch (size) {
5129 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5130 case 1: gen_helper_neon_narrow_u16(dest, src); break;
5131 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
5132 default: abort();
5136 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5138 switch (size) {
5139 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5140 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5141 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
5142 default: abort();
5146 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
5148 switch (size) {
5149 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5150 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5151 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
5152 default: abort();
5156 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5158 switch (size) {
5159 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5160 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5161 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
5162 default: abort();
5166 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
5167 int q, int u)
5169 if (q) {
5170 if (u) {
5171 switch (size) {
5172 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5173 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5174 default: abort();
5176 } else {
5177 switch (size) {
5178 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5179 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5180 default: abort();
5183 } else {
5184 if (u) {
5185 switch (size) {
5186 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5187 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
5188 default: abort();
5190 } else {
5191 switch (size) {
5192 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5193 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5194 default: abort();
5200 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
5202 if (u) {
5203 switch (size) {
5204 case 0: gen_helper_neon_widen_u8(dest, src); break;
5205 case 1: gen_helper_neon_widen_u16(dest, src); break;
5206 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5207 default: abort();
5209 } else {
5210 switch (size) {
5211 case 0: gen_helper_neon_widen_s8(dest, src); break;
5212 case 1: gen_helper_neon_widen_s16(dest, src); break;
5213 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5214 default: abort();
5217 tcg_temp_free_i32(src);
5220 static inline void gen_neon_addl(int size)
5222 switch (size) {
5223 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5224 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5225 case 2: tcg_gen_add_i64(CPU_V001); break;
5226 default: abort();
5230 static inline void gen_neon_subl(int size)
5232 switch (size) {
5233 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5234 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5235 case 2: tcg_gen_sub_i64(CPU_V001); break;
5236 default: abort();
5240 static inline void gen_neon_negl(TCGv_i64 var, int size)
5242 switch (size) {
5243 case 0: gen_helper_neon_negl_u16(var, var); break;
5244 case 1: gen_helper_neon_negl_u32(var, var); break;
5245 case 2:
5246 tcg_gen_neg_i64(var, var);
5247 break;
5248 default: abort();
5252 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
5254 switch (size) {
5255 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5256 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
5257 default: abort();
5261 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5262 int size, int u)
5264 TCGv_i64 tmp;
5266 switch ((size << 1) | u) {
5267 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5268 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5269 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5270 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5271 case 4:
5272 tmp = gen_muls_i64_i32(a, b);
5273 tcg_gen_mov_i64(dest, tmp);
5274 tcg_temp_free_i64(tmp);
5275 break;
5276 case 5:
5277 tmp = gen_mulu_i64_i32(a, b);
5278 tcg_gen_mov_i64(dest, tmp);
5279 tcg_temp_free_i64(tmp);
5280 break;
5281 default: abort();
5284 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5285 Don't forget to clean them now. */
5286 if (size < 2) {
5287 tcg_temp_free_i32(a);
5288 tcg_temp_free_i32(b);
5292 static void gen_neon_narrow_op(int op, int u, int size,
5293 TCGv_i32 dest, TCGv_i64 src)
5295 if (op) {
5296 if (u) {
5297 gen_neon_unarrow_sats(size, dest, src);
5298 } else {
5299 gen_neon_narrow(size, dest, src);
5301 } else {
5302 if (u) {
5303 gen_neon_narrow_satu(size, dest, src);
5304 } else {
5305 gen_neon_narrow_sats(size, dest, src);
5310 /* Symbolic constants for op fields for Neon 3-register same-length.
5311 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5312 * table A7-9.
5314 #define NEON_3R_VHADD 0
5315 #define NEON_3R_VQADD 1
5316 #define NEON_3R_VRHADD 2
5317 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5318 #define NEON_3R_VHSUB 4
5319 #define NEON_3R_VQSUB 5
5320 #define NEON_3R_VCGT 6
5321 #define NEON_3R_VCGE 7
5322 #define NEON_3R_VSHL 8
5323 #define NEON_3R_VQSHL 9
5324 #define NEON_3R_VRSHL 10
5325 #define NEON_3R_VQRSHL 11
5326 #define NEON_3R_VMAX 12
5327 #define NEON_3R_VMIN 13
5328 #define NEON_3R_VABD 14
5329 #define NEON_3R_VABA 15
5330 #define NEON_3R_VADD_VSUB 16
5331 #define NEON_3R_VTST_VCEQ 17
5332 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5333 #define NEON_3R_VMUL 19
5334 #define NEON_3R_VPMAX 20
5335 #define NEON_3R_VPMIN 21
5336 #define NEON_3R_VQDMULH_VQRDMULH 22
5337 #define NEON_3R_VPADD 23
5338 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
5339 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
5340 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5341 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5342 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5343 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5344 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
5345 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
5347 static const uint8_t neon_3r_sizes[] = {
5348 [NEON_3R_VHADD] = 0x7,
5349 [NEON_3R_VQADD] = 0xf,
5350 [NEON_3R_VRHADD] = 0x7,
5351 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5352 [NEON_3R_VHSUB] = 0x7,
5353 [NEON_3R_VQSUB] = 0xf,
5354 [NEON_3R_VCGT] = 0x7,
5355 [NEON_3R_VCGE] = 0x7,
5356 [NEON_3R_VSHL] = 0xf,
5357 [NEON_3R_VQSHL] = 0xf,
5358 [NEON_3R_VRSHL] = 0xf,
5359 [NEON_3R_VQRSHL] = 0xf,
5360 [NEON_3R_VMAX] = 0x7,
5361 [NEON_3R_VMIN] = 0x7,
5362 [NEON_3R_VABD] = 0x7,
5363 [NEON_3R_VABA] = 0x7,
5364 [NEON_3R_VADD_VSUB] = 0xf,
5365 [NEON_3R_VTST_VCEQ] = 0x7,
5366 [NEON_3R_VML] = 0x7,
5367 [NEON_3R_VMUL] = 0x7,
5368 [NEON_3R_VPMAX] = 0x7,
5369 [NEON_3R_VPMIN] = 0x7,
5370 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5371 [NEON_3R_VPADD] = 0x7,
5372 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
5373 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
5374 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5375 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5376 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5377 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5378 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
5379 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
5382 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
5383 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5384 * table A7-13.
5386 #define NEON_2RM_VREV64 0
5387 #define NEON_2RM_VREV32 1
5388 #define NEON_2RM_VREV16 2
5389 #define NEON_2RM_VPADDL 4
5390 #define NEON_2RM_VPADDL_U 5
5391 #define NEON_2RM_AESE 6 /* Includes AESD */
5392 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5393 #define NEON_2RM_VCLS 8
5394 #define NEON_2RM_VCLZ 9
5395 #define NEON_2RM_VCNT 10
5396 #define NEON_2RM_VMVN 11
5397 #define NEON_2RM_VPADAL 12
5398 #define NEON_2RM_VPADAL_U 13
5399 #define NEON_2RM_VQABS 14
5400 #define NEON_2RM_VQNEG 15
5401 #define NEON_2RM_VCGT0 16
5402 #define NEON_2RM_VCGE0 17
5403 #define NEON_2RM_VCEQ0 18
5404 #define NEON_2RM_VCLE0 19
5405 #define NEON_2RM_VCLT0 20
5406 #define NEON_2RM_SHA1H 21
5407 #define NEON_2RM_VABS 22
5408 #define NEON_2RM_VNEG 23
5409 #define NEON_2RM_VCGT0_F 24
5410 #define NEON_2RM_VCGE0_F 25
5411 #define NEON_2RM_VCEQ0_F 26
5412 #define NEON_2RM_VCLE0_F 27
5413 #define NEON_2RM_VCLT0_F 28
5414 #define NEON_2RM_VABS_F 30
5415 #define NEON_2RM_VNEG_F 31
5416 #define NEON_2RM_VSWP 32
5417 #define NEON_2RM_VTRN 33
5418 #define NEON_2RM_VUZP 34
5419 #define NEON_2RM_VZIP 35
5420 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5421 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5422 #define NEON_2RM_VSHLL 38
5423 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5424 #define NEON_2RM_VRINTN 40
5425 #define NEON_2RM_VRINTX 41
5426 #define NEON_2RM_VRINTA 42
5427 #define NEON_2RM_VRINTZ 43
5428 #define NEON_2RM_VCVT_F16_F32 44
5429 #define NEON_2RM_VRINTM 45
5430 #define NEON_2RM_VCVT_F32_F16 46
5431 #define NEON_2RM_VRINTP 47
5432 #define NEON_2RM_VCVTAU 48
5433 #define NEON_2RM_VCVTAS 49
5434 #define NEON_2RM_VCVTNU 50
5435 #define NEON_2RM_VCVTNS 51
5436 #define NEON_2RM_VCVTPU 52
5437 #define NEON_2RM_VCVTPS 53
5438 #define NEON_2RM_VCVTMU 54
5439 #define NEON_2RM_VCVTMS 55
5440 #define NEON_2RM_VRECPE 56
5441 #define NEON_2RM_VRSQRTE 57
5442 #define NEON_2RM_VRECPE_F 58
5443 #define NEON_2RM_VRSQRTE_F 59
5444 #define NEON_2RM_VCVT_FS 60
5445 #define NEON_2RM_VCVT_FU 61
5446 #define NEON_2RM_VCVT_SF 62
5447 #define NEON_2RM_VCVT_UF 63
5449 static int neon_2rm_is_float_op(int op)
5451 /* Return true if this neon 2reg-misc op is float-to-float */
5452 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
5453 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
5454 op == NEON_2RM_VRINTM ||
5455 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5456 op >= NEON_2RM_VRECPE_F);
5459 static bool neon_2rm_is_v8_op(int op)
5461 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5462 switch (op) {
5463 case NEON_2RM_VRINTN:
5464 case NEON_2RM_VRINTA:
5465 case NEON_2RM_VRINTM:
5466 case NEON_2RM_VRINTP:
5467 case NEON_2RM_VRINTZ:
5468 case NEON_2RM_VRINTX:
5469 case NEON_2RM_VCVTAU:
5470 case NEON_2RM_VCVTAS:
5471 case NEON_2RM_VCVTNU:
5472 case NEON_2RM_VCVTNS:
5473 case NEON_2RM_VCVTPU:
5474 case NEON_2RM_VCVTPS:
5475 case NEON_2RM_VCVTMU:
5476 case NEON_2RM_VCVTMS:
5477 return true;
5478 default:
5479 return false;
5483 /* Each entry in this array has bit n set if the insn allows
5484 * size value n (otherwise it will UNDEF). Since unallocated
5485 * op values will have no bits set they always UNDEF.
5487 static const uint8_t neon_2rm_sizes[] = {
5488 [NEON_2RM_VREV64] = 0x7,
5489 [NEON_2RM_VREV32] = 0x3,
5490 [NEON_2RM_VREV16] = 0x1,
5491 [NEON_2RM_VPADDL] = 0x7,
5492 [NEON_2RM_VPADDL_U] = 0x7,
5493 [NEON_2RM_AESE] = 0x1,
5494 [NEON_2RM_AESMC] = 0x1,
5495 [NEON_2RM_VCLS] = 0x7,
5496 [NEON_2RM_VCLZ] = 0x7,
5497 [NEON_2RM_VCNT] = 0x1,
5498 [NEON_2RM_VMVN] = 0x1,
5499 [NEON_2RM_VPADAL] = 0x7,
5500 [NEON_2RM_VPADAL_U] = 0x7,
5501 [NEON_2RM_VQABS] = 0x7,
5502 [NEON_2RM_VQNEG] = 0x7,
5503 [NEON_2RM_VCGT0] = 0x7,
5504 [NEON_2RM_VCGE0] = 0x7,
5505 [NEON_2RM_VCEQ0] = 0x7,
5506 [NEON_2RM_VCLE0] = 0x7,
5507 [NEON_2RM_VCLT0] = 0x7,
5508 [NEON_2RM_SHA1H] = 0x4,
5509 [NEON_2RM_VABS] = 0x7,
5510 [NEON_2RM_VNEG] = 0x7,
5511 [NEON_2RM_VCGT0_F] = 0x4,
5512 [NEON_2RM_VCGE0_F] = 0x4,
5513 [NEON_2RM_VCEQ0_F] = 0x4,
5514 [NEON_2RM_VCLE0_F] = 0x4,
5515 [NEON_2RM_VCLT0_F] = 0x4,
5516 [NEON_2RM_VABS_F] = 0x4,
5517 [NEON_2RM_VNEG_F] = 0x4,
5518 [NEON_2RM_VSWP] = 0x1,
5519 [NEON_2RM_VTRN] = 0x7,
5520 [NEON_2RM_VUZP] = 0x7,
5521 [NEON_2RM_VZIP] = 0x7,
5522 [NEON_2RM_VMOVN] = 0x7,
5523 [NEON_2RM_VQMOVN] = 0x7,
5524 [NEON_2RM_VSHLL] = 0x7,
5525 [NEON_2RM_SHA1SU1] = 0x4,
5526 [NEON_2RM_VRINTN] = 0x4,
5527 [NEON_2RM_VRINTX] = 0x4,
5528 [NEON_2RM_VRINTA] = 0x4,
5529 [NEON_2RM_VRINTZ] = 0x4,
5530 [NEON_2RM_VCVT_F16_F32] = 0x2,
5531 [NEON_2RM_VRINTM] = 0x4,
5532 [NEON_2RM_VCVT_F32_F16] = 0x2,
5533 [NEON_2RM_VRINTP] = 0x4,
5534 [NEON_2RM_VCVTAU] = 0x4,
5535 [NEON_2RM_VCVTAS] = 0x4,
5536 [NEON_2RM_VCVTNU] = 0x4,
5537 [NEON_2RM_VCVTNS] = 0x4,
5538 [NEON_2RM_VCVTPU] = 0x4,
5539 [NEON_2RM_VCVTPS] = 0x4,
5540 [NEON_2RM_VCVTMU] = 0x4,
5541 [NEON_2RM_VCVTMS] = 0x4,
5542 [NEON_2RM_VRECPE] = 0x4,
5543 [NEON_2RM_VRSQRTE] = 0x4,
5544 [NEON_2RM_VRECPE_F] = 0x4,
5545 [NEON_2RM_VRSQRTE_F] = 0x4,
5546 [NEON_2RM_VCVT_FS] = 0x4,
5547 [NEON_2RM_VCVT_FU] = 0x4,
5548 [NEON_2RM_VCVT_SF] = 0x4,
5549 [NEON_2RM_VCVT_UF] = 0x4,
5552 /* Translate a NEON data processing instruction. Return nonzero if the
5553 instruction is invalid.
5554 We process data in a mixture of 32-bit and 64-bit chunks.
5555 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5557 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
5559 int op;
5560 int q;
5561 int rd, rn, rm;
5562 int size;
5563 int shift;
5564 int pass;
5565 int count;
5566 int pairwise;
5567 int u;
5568 uint32_t imm, mask;
5569 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
5570 TCGv_i64 tmp64;
5572 /* FIXME: this access check should not take precedence over UNDEF
5573 * for invalid encodings; we will generate incorrect syndrome information
5574 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5576 if (s->fp_excp_el) {
5577 gen_exception_insn(s, 4, EXCP_UDEF,
5578 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
5579 return 0;
5582 if (!s->vfp_enabled)
5583 return 1;
5584 q = (insn & (1 << 6)) != 0;
5585 u = (insn >> 24) & 1;
5586 VFP_DREG_D(rd, insn);
5587 VFP_DREG_N(rn, insn);
5588 VFP_DREG_M(rm, insn);
5589 size = (insn >> 20) & 3;
5590 if ((insn & (1 << 23)) == 0) {
5591 /* Three register same length. */
5592 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5593 /* Catch invalid op and bad size combinations: UNDEF */
5594 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5595 return 1;
5597 /* All insns of this form UNDEF for either this condition or the
5598 * superset of cases "Q==1"; we catch the latter later.
5600 if (q && ((rd | rn | rm) & 1)) {
5601 return 1;
5604 * The SHA-1/SHA-256 3-register instructions require special treatment
5605 * here, as their size field is overloaded as an op type selector, and
5606 * they all consume their input in a single pass.
5608 if (op == NEON_3R_SHA) {
5609 if (!q) {
5610 return 1;
5612 if (!u) { /* SHA-1 */
5613 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
5614 return 1;
5616 tmp = tcg_const_i32(rd);
5617 tmp2 = tcg_const_i32(rn);
5618 tmp3 = tcg_const_i32(rm);
5619 tmp4 = tcg_const_i32(size);
5620 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5621 tcg_temp_free_i32(tmp4);
5622 } else { /* SHA-256 */
5623 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
5624 return 1;
5626 tmp = tcg_const_i32(rd);
5627 tmp2 = tcg_const_i32(rn);
5628 tmp3 = tcg_const_i32(rm);
5629 switch (size) {
5630 case 0:
5631 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5632 break;
5633 case 1:
5634 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5635 break;
5636 case 2:
5637 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5638 break;
5641 tcg_temp_free_i32(tmp);
5642 tcg_temp_free_i32(tmp2);
5643 tcg_temp_free_i32(tmp3);
5644 return 0;
5646 if (size == 3 && op != NEON_3R_LOGIC) {
5647 /* 64-bit element instructions. */
5648 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5649 neon_load_reg64(cpu_V0, rn + pass);
5650 neon_load_reg64(cpu_V1, rm + pass);
5651 switch (op) {
5652 case NEON_3R_VQADD:
5653 if (u) {
5654 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5655 cpu_V0, cpu_V1);
5656 } else {
5657 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5658 cpu_V0, cpu_V1);
5660 break;
5661 case NEON_3R_VQSUB:
5662 if (u) {
5663 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5664 cpu_V0, cpu_V1);
5665 } else {
5666 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5667 cpu_V0, cpu_V1);
5669 break;
5670 case NEON_3R_VSHL:
5671 if (u) {
5672 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5673 } else {
5674 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5676 break;
5677 case NEON_3R_VQSHL:
5678 if (u) {
5679 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5680 cpu_V1, cpu_V0);
5681 } else {
5682 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5683 cpu_V1, cpu_V0);
5685 break;
5686 case NEON_3R_VRSHL:
5687 if (u) {
5688 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5689 } else {
5690 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5692 break;
5693 case NEON_3R_VQRSHL:
5694 if (u) {
5695 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5696 cpu_V1, cpu_V0);
5697 } else {
5698 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5699 cpu_V1, cpu_V0);
5701 break;
5702 case NEON_3R_VADD_VSUB:
5703 if (u) {
5704 tcg_gen_sub_i64(CPU_V001);
5705 } else {
5706 tcg_gen_add_i64(CPU_V001);
5708 break;
5709 default:
5710 abort();
5712 neon_store_reg64(cpu_V0, rd + pass);
5714 return 0;
5716 pairwise = 0;
5717 switch (op) {
5718 case NEON_3R_VSHL:
5719 case NEON_3R_VQSHL:
5720 case NEON_3R_VRSHL:
5721 case NEON_3R_VQRSHL:
5723 int rtmp;
5724 /* Shift instruction operands are reversed. */
5725 rtmp = rn;
5726 rn = rm;
5727 rm = rtmp;
5729 break;
5730 case NEON_3R_VPADD:
5731 if (u) {
5732 return 1;
5734 /* Fall through */
5735 case NEON_3R_VPMAX:
5736 case NEON_3R_VPMIN:
5737 pairwise = 1;
5738 break;
5739 case NEON_3R_FLOAT_ARITH:
5740 pairwise = (u && size < 2); /* if VPADD (float) */
5741 break;
5742 case NEON_3R_FLOAT_MINMAX:
5743 pairwise = u; /* if VPMIN/VPMAX (float) */
5744 break;
5745 case NEON_3R_FLOAT_CMP:
5746 if (!u && size) {
5747 /* no encoding for U=0 C=1x */
5748 return 1;
5750 break;
5751 case NEON_3R_FLOAT_ACMP:
5752 if (!u) {
5753 return 1;
5755 break;
5756 case NEON_3R_FLOAT_MISC:
5757 /* VMAXNM/VMINNM in ARMv8 */
5758 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5759 return 1;
5761 break;
5762 case NEON_3R_VMUL:
5763 if (u && (size != 0)) {
5764 /* UNDEF on invalid size for polynomial subcase */
5765 return 1;
5767 break;
5768 case NEON_3R_VFM:
5769 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
5770 return 1;
5772 break;
5773 default:
5774 break;
5777 if (pairwise && q) {
5778 /* All the pairwise insns UNDEF if Q is set */
5779 return 1;
5782 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5784 if (pairwise) {
5785 /* Pairwise. */
5786 if (pass < 1) {
5787 tmp = neon_load_reg(rn, 0);
5788 tmp2 = neon_load_reg(rn, 1);
5789 } else {
5790 tmp = neon_load_reg(rm, 0);
5791 tmp2 = neon_load_reg(rm, 1);
5793 } else {
5794 /* Elementwise. */
5795 tmp = neon_load_reg(rn, pass);
5796 tmp2 = neon_load_reg(rm, pass);
5798 switch (op) {
5799 case NEON_3R_VHADD:
5800 GEN_NEON_INTEGER_OP(hadd);
5801 break;
5802 case NEON_3R_VQADD:
5803 GEN_NEON_INTEGER_OP_ENV(qadd);
5804 break;
5805 case NEON_3R_VRHADD:
5806 GEN_NEON_INTEGER_OP(rhadd);
5807 break;
5808 case NEON_3R_LOGIC: /* Logic ops. */
5809 switch ((u << 2) | size) {
5810 case 0: /* VAND */
5811 tcg_gen_and_i32(tmp, tmp, tmp2);
5812 break;
5813 case 1: /* BIC */
5814 tcg_gen_andc_i32(tmp, tmp, tmp2);
5815 break;
5816 case 2: /* VORR */
5817 tcg_gen_or_i32(tmp, tmp, tmp2);
5818 break;
5819 case 3: /* VORN */
5820 tcg_gen_orc_i32(tmp, tmp, tmp2);
5821 break;
5822 case 4: /* VEOR */
5823 tcg_gen_xor_i32(tmp, tmp, tmp2);
5824 break;
5825 case 5: /* VBSL */
5826 tmp3 = neon_load_reg(rd, pass);
5827 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5828 tcg_temp_free_i32(tmp3);
5829 break;
5830 case 6: /* VBIT */
5831 tmp3 = neon_load_reg(rd, pass);
5832 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5833 tcg_temp_free_i32(tmp3);
5834 break;
5835 case 7: /* VBIF */
5836 tmp3 = neon_load_reg(rd, pass);
5837 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5838 tcg_temp_free_i32(tmp3);
5839 break;
5841 break;
5842 case NEON_3R_VHSUB:
5843 GEN_NEON_INTEGER_OP(hsub);
5844 break;
5845 case NEON_3R_VQSUB:
5846 GEN_NEON_INTEGER_OP_ENV(qsub);
5847 break;
5848 case NEON_3R_VCGT:
5849 GEN_NEON_INTEGER_OP(cgt);
5850 break;
5851 case NEON_3R_VCGE:
5852 GEN_NEON_INTEGER_OP(cge);
5853 break;
5854 case NEON_3R_VSHL:
5855 GEN_NEON_INTEGER_OP(shl);
5856 break;
5857 case NEON_3R_VQSHL:
5858 GEN_NEON_INTEGER_OP_ENV(qshl);
5859 break;
5860 case NEON_3R_VRSHL:
5861 GEN_NEON_INTEGER_OP(rshl);
5862 break;
5863 case NEON_3R_VQRSHL:
5864 GEN_NEON_INTEGER_OP_ENV(qrshl);
5865 break;
5866 case NEON_3R_VMAX:
5867 GEN_NEON_INTEGER_OP(max);
5868 break;
5869 case NEON_3R_VMIN:
5870 GEN_NEON_INTEGER_OP(min);
5871 break;
5872 case NEON_3R_VABD:
5873 GEN_NEON_INTEGER_OP(abd);
5874 break;
5875 case NEON_3R_VABA:
5876 GEN_NEON_INTEGER_OP(abd);
5877 tcg_temp_free_i32(tmp2);
5878 tmp2 = neon_load_reg(rd, pass);
5879 gen_neon_add(size, tmp, tmp2);
5880 break;
5881 case NEON_3R_VADD_VSUB:
5882 if (!u) { /* VADD */
5883 gen_neon_add(size, tmp, tmp2);
5884 } else { /* VSUB */
5885 switch (size) {
5886 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5887 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5888 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
5889 default: abort();
5892 break;
5893 case NEON_3R_VTST_VCEQ:
5894 if (!u) { /* VTST */
5895 switch (size) {
5896 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5897 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5898 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
5899 default: abort();
5901 } else { /* VCEQ */
5902 switch (size) {
5903 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5904 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5905 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5906 default: abort();
5909 break;
5910 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
5911 switch (size) {
5912 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5913 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5914 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5915 default: abort();
5917 tcg_temp_free_i32(tmp2);
5918 tmp2 = neon_load_reg(rd, pass);
5919 if (u) { /* VMLS */
5920 gen_neon_rsb(size, tmp, tmp2);
5921 } else { /* VMLA */
5922 gen_neon_add(size, tmp, tmp2);
5924 break;
5925 case NEON_3R_VMUL:
5926 if (u) { /* polynomial */
5927 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5928 } else { /* Integer */
5929 switch (size) {
5930 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5931 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5932 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5933 default: abort();
5936 break;
5937 case NEON_3R_VPMAX:
5938 GEN_NEON_INTEGER_OP(pmax);
5939 break;
5940 case NEON_3R_VPMIN:
5941 GEN_NEON_INTEGER_OP(pmin);
5942 break;
5943 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5944 if (!u) { /* VQDMULH */
5945 switch (size) {
5946 case 1:
5947 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5948 break;
5949 case 2:
5950 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5951 break;
5952 default: abort();
5954 } else { /* VQRDMULH */
5955 switch (size) {
5956 case 1:
5957 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5958 break;
5959 case 2:
5960 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5961 break;
5962 default: abort();
5965 break;
5966 case NEON_3R_VPADD:
5967 switch (size) {
5968 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5969 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5970 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5971 default: abort();
5973 break;
5974 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5976 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5977 switch ((u << 2) | size) {
5978 case 0: /* VADD */
5979 case 4: /* VPADD */
5980 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5981 break;
5982 case 2: /* VSUB */
5983 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5984 break;
5985 case 6: /* VABD */
5986 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5987 break;
5988 default:
5989 abort();
5991 tcg_temp_free_ptr(fpstatus);
5992 break;
5994 case NEON_3R_FLOAT_MULTIPLY:
5996 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5997 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5998 if (!u) {
5999 tcg_temp_free_i32(tmp2);
6000 tmp2 = neon_load_reg(rd, pass);
6001 if (size == 0) {
6002 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6003 } else {
6004 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6007 tcg_temp_free_ptr(fpstatus);
6008 break;
6010 case NEON_3R_FLOAT_CMP:
6012 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6013 if (!u) {
6014 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6015 } else {
6016 if (size == 0) {
6017 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6018 } else {
6019 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6022 tcg_temp_free_ptr(fpstatus);
6023 break;
6025 case NEON_3R_FLOAT_ACMP:
6027 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6028 if (size == 0) {
6029 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6030 } else {
6031 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6033 tcg_temp_free_ptr(fpstatus);
6034 break;
6036 case NEON_3R_FLOAT_MINMAX:
6038 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6039 if (size == 0) {
6040 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
6041 } else {
6042 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
6044 tcg_temp_free_ptr(fpstatus);
6045 break;
6047 case NEON_3R_FLOAT_MISC:
6048 if (u) {
6049 /* VMAXNM/VMINNM */
6050 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6051 if (size == 0) {
6052 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
6053 } else {
6054 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
6056 tcg_temp_free_ptr(fpstatus);
6057 } else {
6058 if (size == 0) {
6059 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6060 } else {
6061 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6064 break;
6065 case NEON_3R_VFM:
6067 /* VFMA, VFMS: fused multiply-add */
6068 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6069 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6070 if (size) {
6071 /* VFMS */
6072 gen_helper_vfp_negs(tmp, tmp);
6074 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6075 tcg_temp_free_i32(tmp3);
6076 tcg_temp_free_ptr(fpstatus);
6077 break;
6079 default:
6080 abort();
6082 tcg_temp_free_i32(tmp2);
6084 /* Save the result. For elementwise operations we can put it
6085 straight into the destination register. For pairwise operations
6086 we have to be careful to avoid clobbering the source operands. */
6087 if (pairwise && rd == rm) {
6088 neon_store_scratch(pass, tmp);
6089 } else {
6090 neon_store_reg(rd, pass, tmp);
6093 } /* for pass */
6094 if (pairwise && rd == rm) {
6095 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6096 tmp = neon_load_scratch(pass);
6097 neon_store_reg(rd, pass, tmp);
6100 /* End of 3 register same size operations. */
6101 } else if (insn & (1 << 4)) {
6102 if ((insn & 0x00380080) != 0) {
6103 /* Two registers and shift. */
6104 op = (insn >> 8) & 0xf;
6105 if (insn & (1 << 7)) {
6106 /* 64-bit shift. */
6107 if (op > 7) {
6108 return 1;
6110 size = 3;
6111 } else {
6112 size = 2;
6113 while ((insn & (1 << (size + 19))) == 0)
6114 size--;
6116 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
6117 /* To avoid excessive duplication of ops we implement shift
6118 by immediate using the variable shift operations. */
6119 if (op < 8) {
6120 /* Shift by immediate:
6121 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
6122 if (q && ((rd | rm) & 1)) {
6123 return 1;
6125 if (!u && (op == 4 || op == 6)) {
6126 return 1;
6128 /* Right shifts are encoded as N - shift, where N is the
6129 element size in bits. */
6130 if (op <= 4)
6131 shift = shift - (1 << (size + 3));
6132 if (size == 3) {
6133 count = q + 1;
6134 } else {
6135 count = q ? 4: 2;
6137 switch (size) {
6138 case 0:
6139 imm = (uint8_t) shift;
6140 imm |= imm << 8;
6141 imm |= imm << 16;
6142 break;
6143 case 1:
6144 imm = (uint16_t) shift;
6145 imm |= imm << 16;
6146 break;
6147 case 2:
6148 case 3:
6149 imm = shift;
6150 break;
6151 default:
6152 abort();
6155 for (pass = 0; pass < count; pass++) {
6156 if (size == 3) {
6157 neon_load_reg64(cpu_V0, rm + pass);
6158 tcg_gen_movi_i64(cpu_V1, imm);
6159 switch (op) {
6160 case 0: /* VSHR */
6161 case 1: /* VSRA */
6162 if (u)
6163 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6164 else
6165 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
6166 break;
6167 case 2: /* VRSHR */
6168 case 3: /* VRSRA */
6169 if (u)
6170 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
6171 else
6172 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
6173 break;
6174 case 4: /* VSRI */
6175 case 5: /* VSHL, VSLI */
6176 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6177 break;
6178 case 6: /* VQSHLU */
6179 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6180 cpu_V0, cpu_V1);
6181 break;
6182 case 7: /* VQSHL */
6183 if (u) {
6184 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6185 cpu_V0, cpu_V1);
6186 } else {
6187 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6188 cpu_V0, cpu_V1);
6190 break;
6192 if (op == 1 || op == 3) {
6193 /* Accumulate. */
6194 neon_load_reg64(cpu_V1, rd + pass);
6195 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6196 } else if (op == 4 || (op == 5 && u)) {
6197 /* Insert */
6198 neon_load_reg64(cpu_V1, rd + pass);
6199 uint64_t mask;
6200 if (shift < -63 || shift > 63) {
6201 mask = 0;
6202 } else {
6203 if (op == 4) {
6204 mask = 0xffffffffffffffffull >> -shift;
6205 } else {
6206 mask = 0xffffffffffffffffull << shift;
6209 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6210 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6212 neon_store_reg64(cpu_V0, rd + pass);
6213 } else { /* size < 3 */
6214 /* Operands in T0 and T1. */
6215 tmp = neon_load_reg(rm, pass);
6216 tmp2 = tcg_temp_new_i32();
6217 tcg_gen_movi_i32(tmp2, imm);
6218 switch (op) {
6219 case 0: /* VSHR */
6220 case 1: /* VSRA */
6221 GEN_NEON_INTEGER_OP(shl);
6222 break;
6223 case 2: /* VRSHR */
6224 case 3: /* VRSRA */
6225 GEN_NEON_INTEGER_OP(rshl);
6226 break;
6227 case 4: /* VSRI */
6228 case 5: /* VSHL, VSLI */
6229 switch (size) {
6230 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6231 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6232 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
6233 default: abort();
6235 break;
6236 case 6: /* VQSHLU */
6237 switch (size) {
6238 case 0:
6239 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6240 tmp, tmp2);
6241 break;
6242 case 1:
6243 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6244 tmp, tmp2);
6245 break;
6246 case 2:
6247 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6248 tmp, tmp2);
6249 break;
6250 default:
6251 abort();
6253 break;
6254 case 7: /* VQSHL */
6255 GEN_NEON_INTEGER_OP_ENV(qshl);
6256 break;
6258 tcg_temp_free_i32(tmp2);
6260 if (op == 1 || op == 3) {
6261 /* Accumulate. */
6262 tmp2 = neon_load_reg(rd, pass);
6263 gen_neon_add(size, tmp, tmp2);
6264 tcg_temp_free_i32(tmp2);
6265 } else if (op == 4 || (op == 5 && u)) {
6266 /* Insert */
6267 switch (size) {
6268 case 0:
6269 if (op == 4)
6270 mask = 0xff >> -shift;
6271 else
6272 mask = (uint8_t)(0xff << shift);
6273 mask |= mask << 8;
6274 mask |= mask << 16;
6275 break;
6276 case 1:
6277 if (op == 4)
6278 mask = 0xffff >> -shift;
6279 else
6280 mask = (uint16_t)(0xffff << shift);
6281 mask |= mask << 16;
6282 break;
6283 case 2:
6284 if (shift < -31 || shift > 31) {
6285 mask = 0;
6286 } else {
6287 if (op == 4)
6288 mask = 0xffffffffu >> -shift;
6289 else
6290 mask = 0xffffffffu << shift;
6292 break;
6293 default:
6294 abort();
6296 tmp2 = neon_load_reg(rd, pass);
6297 tcg_gen_andi_i32(tmp, tmp, mask);
6298 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
6299 tcg_gen_or_i32(tmp, tmp, tmp2);
6300 tcg_temp_free_i32(tmp2);
6302 neon_store_reg(rd, pass, tmp);
6304 } /* for pass */
6305 } else if (op < 10) {
6306 /* Shift by immediate and narrow:
6307 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
6308 int input_unsigned = (op == 8) ? !u : u;
6309 if (rm & 1) {
6310 return 1;
6312 shift = shift - (1 << (size + 3));
6313 size++;
6314 if (size == 3) {
6315 tmp64 = tcg_const_i64(shift);
6316 neon_load_reg64(cpu_V0, rm);
6317 neon_load_reg64(cpu_V1, rm + 1);
6318 for (pass = 0; pass < 2; pass++) {
6319 TCGv_i64 in;
6320 if (pass == 0) {
6321 in = cpu_V0;
6322 } else {
6323 in = cpu_V1;
6325 if (q) {
6326 if (input_unsigned) {
6327 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
6328 } else {
6329 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
6331 } else {
6332 if (input_unsigned) {
6333 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
6334 } else {
6335 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
6338 tmp = tcg_temp_new_i32();
6339 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6340 neon_store_reg(rd, pass, tmp);
6341 } /* for pass */
6342 tcg_temp_free_i64(tmp64);
6343 } else {
6344 if (size == 1) {
6345 imm = (uint16_t)shift;
6346 imm |= imm << 16;
6347 } else {
6348 /* size == 2 */
6349 imm = (uint32_t)shift;
6351 tmp2 = tcg_const_i32(imm);
6352 tmp4 = neon_load_reg(rm + 1, 0);
6353 tmp5 = neon_load_reg(rm + 1, 1);
6354 for (pass = 0; pass < 2; pass++) {
6355 if (pass == 0) {
6356 tmp = neon_load_reg(rm, 0);
6357 } else {
6358 tmp = tmp4;
6360 gen_neon_shift_narrow(size, tmp, tmp2, q,
6361 input_unsigned);
6362 if (pass == 0) {
6363 tmp3 = neon_load_reg(rm, 1);
6364 } else {
6365 tmp3 = tmp5;
6367 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6368 input_unsigned);
6369 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
6370 tcg_temp_free_i32(tmp);
6371 tcg_temp_free_i32(tmp3);
6372 tmp = tcg_temp_new_i32();
6373 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6374 neon_store_reg(rd, pass, tmp);
6375 } /* for pass */
6376 tcg_temp_free_i32(tmp2);
6378 } else if (op == 10) {
6379 /* VSHLL, VMOVL */
6380 if (q || (rd & 1)) {
6381 return 1;
6383 tmp = neon_load_reg(rm, 0);
6384 tmp2 = neon_load_reg(rm, 1);
6385 for (pass = 0; pass < 2; pass++) {
6386 if (pass == 1)
6387 tmp = tmp2;
6389 gen_neon_widen(cpu_V0, tmp, size, u);
6391 if (shift != 0) {
6392 /* The shift is less than the width of the source
6393 type, so we can just shift the whole register. */
6394 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
6395 /* Widen the result of shift: we need to clear
6396 * the potential overflow bits resulting from
6397 * left bits of the narrow input appearing as
6398 * right bits of left the neighbour narrow
6399 * input. */
6400 if (size < 2 || !u) {
6401 uint64_t imm64;
6402 if (size == 0) {
6403 imm = (0xffu >> (8 - shift));
6404 imm |= imm << 16;
6405 } else if (size == 1) {
6406 imm = 0xffff >> (16 - shift);
6407 } else {
6408 /* size == 2 */
6409 imm = 0xffffffff >> (32 - shift);
6411 if (size < 2) {
6412 imm64 = imm | (((uint64_t)imm) << 32);
6413 } else {
6414 imm64 = imm;
6416 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
6419 neon_store_reg64(cpu_V0, rd + pass);
6421 } else if (op >= 14) {
6422 /* VCVT fixed-point. */
6423 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6424 return 1;
6426 /* We have already masked out the must-be-1 top bit of imm6,
6427 * hence this 32-shift where the ARM ARM has 64-imm6.
6429 shift = 32 - shift;
6430 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6431 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
6432 if (!(op & 1)) {
6433 if (u)
6434 gen_vfp_ulto(0, shift, 1);
6435 else
6436 gen_vfp_slto(0, shift, 1);
6437 } else {
6438 if (u)
6439 gen_vfp_toul(0, shift, 1);
6440 else
6441 gen_vfp_tosl(0, shift, 1);
6443 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
6445 } else {
6446 return 1;
6448 } else { /* (insn & 0x00380080) == 0 */
6449 int invert;
6450 if (q && (rd & 1)) {
6451 return 1;
6454 op = (insn >> 8) & 0xf;
6455 /* One register and immediate. */
6456 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6457 invert = (insn & (1 << 5)) != 0;
6458 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6459 * We choose to not special-case this and will behave as if a
6460 * valid constant encoding of 0 had been given.
6462 switch (op) {
6463 case 0: case 1:
6464 /* no-op */
6465 break;
6466 case 2: case 3:
6467 imm <<= 8;
6468 break;
6469 case 4: case 5:
6470 imm <<= 16;
6471 break;
6472 case 6: case 7:
6473 imm <<= 24;
6474 break;
6475 case 8: case 9:
6476 imm |= imm << 16;
6477 break;
6478 case 10: case 11:
6479 imm = (imm << 8) | (imm << 24);
6480 break;
6481 case 12:
6482 imm = (imm << 8) | 0xff;
6483 break;
6484 case 13:
6485 imm = (imm << 16) | 0xffff;
6486 break;
6487 case 14:
6488 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6489 if (invert)
6490 imm = ~imm;
6491 break;
6492 case 15:
6493 if (invert) {
6494 return 1;
6496 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6497 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6498 break;
6500 if (invert)
6501 imm = ~imm;
6503 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6504 if (op & 1 && op < 12) {
6505 tmp = neon_load_reg(rd, pass);
6506 if (invert) {
6507 /* The immediate value has already been inverted, so
6508 BIC becomes AND. */
6509 tcg_gen_andi_i32(tmp, tmp, imm);
6510 } else {
6511 tcg_gen_ori_i32(tmp, tmp, imm);
6513 } else {
6514 /* VMOV, VMVN. */
6515 tmp = tcg_temp_new_i32();
6516 if (op == 14 && invert) {
6517 int n;
6518 uint32_t val;
6519 val = 0;
6520 for (n = 0; n < 4; n++) {
6521 if (imm & (1 << (n + (pass & 1) * 4)))
6522 val |= 0xff << (n * 8);
6524 tcg_gen_movi_i32(tmp, val);
6525 } else {
6526 tcg_gen_movi_i32(tmp, imm);
6529 neon_store_reg(rd, pass, tmp);
6532 } else { /* (insn & 0x00800010 == 0x00800000) */
6533 if (size != 3) {
6534 op = (insn >> 8) & 0xf;
6535 if ((insn & (1 << 6)) == 0) {
6536 /* Three registers of different lengths. */
6537 int src1_wide;
6538 int src2_wide;
6539 int prewiden;
6540 /* undefreq: bit 0 : UNDEF if size == 0
6541 * bit 1 : UNDEF if size == 1
6542 * bit 2 : UNDEF if size == 2
6543 * bit 3 : UNDEF if U == 1
6544 * Note that [2:0] set implies 'always UNDEF'
6546 int undefreq;
6547 /* prewiden, src1_wide, src2_wide, undefreq */
6548 static const int neon_3reg_wide[16][4] = {
6549 {1, 0, 0, 0}, /* VADDL */
6550 {1, 1, 0, 0}, /* VADDW */
6551 {1, 0, 0, 0}, /* VSUBL */
6552 {1, 1, 0, 0}, /* VSUBW */
6553 {0, 1, 1, 0}, /* VADDHN */
6554 {0, 0, 0, 0}, /* VABAL */
6555 {0, 1, 1, 0}, /* VSUBHN */
6556 {0, 0, 0, 0}, /* VABDL */
6557 {0, 0, 0, 0}, /* VMLAL */
6558 {0, 0, 0, 9}, /* VQDMLAL */
6559 {0, 0, 0, 0}, /* VMLSL */
6560 {0, 0, 0, 9}, /* VQDMLSL */
6561 {0, 0, 0, 0}, /* Integer VMULL */
6562 {0, 0, 0, 1}, /* VQDMULL */
6563 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6564 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6567 prewiden = neon_3reg_wide[op][0];
6568 src1_wide = neon_3reg_wide[op][1];
6569 src2_wide = neon_3reg_wide[op][2];
6570 undefreq = neon_3reg_wide[op][3];
6572 if ((undefreq & (1 << size)) ||
6573 ((undefreq & 8) && u)) {
6574 return 1;
6576 if ((src1_wide && (rn & 1)) ||
6577 (src2_wide && (rm & 1)) ||
6578 (!src2_wide && (rd & 1))) {
6579 return 1;
6582 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6583 * outside the loop below as it only performs a single pass.
6585 if (op == 14 && size == 2) {
6586 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6588 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
6589 return 1;
6591 tcg_rn = tcg_temp_new_i64();
6592 tcg_rm = tcg_temp_new_i64();
6593 tcg_rd = tcg_temp_new_i64();
6594 neon_load_reg64(tcg_rn, rn);
6595 neon_load_reg64(tcg_rm, rm);
6596 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6597 neon_store_reg64(tcg_rd, rd);
6598 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6599 neon_store_reg64(tcg_rd, rd + 1);
6600 tcg_temp_free_i64(tcg_rn);
6601 tcg_temp_free_i64(tcg_rm);
6602 tcg_temp_free_i64(tcg_rd);
6603 return 0;
6606 /* Avoid overlapping operands. Wide source operands are
6607 always aligned so will never overlap with wide
6608 destinations in problematic ways. */
6609 if (rd == rm && !src2_wide) {
6610 tmp = neon_load_reg(rm, 1);
6611 neon_store_scratch(2, tmp);
6612 } else if (rd == rn && !src1_wide) {
6613 tmp = neon_load_reg(rn, 1);
6614 neon_store_scratch(2, tmp);
6616 TCGV_UNUSED_I32(tmp3);
6617 for (pass = 0; pass < 2; pass++) {
6618 if (src1_wide) {
6619 neon_load_reg64(cpu_V0, rn + pass);
6620 TCGV_UNUSED_I32(tmp);
6621 } else {
6622 if (pass == 1 && rd == rn) {
6623 tmp = neon_load_scratch(2);
6624 } else {
6625 tmp = neon_load_reg(rn, pass);
6627 if (prewiden) {
6628 gen_neon_widen(cpu_V0, tmp, size, u);
6631 if (src2_wide) {
6632 neon_load_reg64(cpu_V1, rm + pass);
6633 TCGV_UNUSED_I32(tmp2);
6634 } else {
6635 if (pass == 1 && rd == rm) {
6636 tmp2 = neon_load_scratch(2);
6637 } else {
6638 tmp2 = neon_load_reg(rm, pass);
6640 if (prewiden) {
6641 gen_neon_widen(cpu_V1, tmp2, size, u);
6644 switch (op) {
6645 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6646 gen_neon_addl(size);
6647 break;
6648 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6649 gen_neon_subl(size);
6650 break;
6651 case 5: case 7: /* VABAL, VABDL */
6652 switch ((size << 1) | u) {
6653 case 0:
6654 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6655 break;
6656 case 1:
6657 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6658 break;
6659 case 2:
6660 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6661 break;
6662 case 3:
6663 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6664 break;
6665 case 4:
6666 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6667 break;
6668 case 5:
6669 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6670 break;
6671 default: abort();
6673 tcg_temp_free_i32(tmp2);
6674 tcg_temp_free_i32(tmp);
6675 break;
6676 case 8: case 9: case 10: case 11: case 12: case 13:
6677 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6678 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6679 break;
6680 case 14: /* Polynomial VMULL */
6681 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6682 tcg_temp_free_i32(tmp2);
6683 tcg_temp_free_i32(tmp);
6684 break;
6685 default: /* 15 is RESERVED: caught earlier */
6686 abort();
6688 if (op == 13) {
6689 /* VQDMULL */
6690 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6691 neon_store_reg64(cpu_V0, rd + pass);
6692 } else if (op == 5 || (op >= 8 && op <= 11)) {
6693 /* Accumulate. */
6694 neon_load_reg64(cpu_V1, rd + pass);
6695 switch (op) {
6696 case 10: /* VMLSL */
6697 gen_neon_negl(cpu_V0, size);
6698 /* Fall through */
6699 case 5: case 8: /* VABAL, VMLAL */
6700 gen_neon_addl(size);
6701 break;
6702 case 9: case 11: /* VQDMLAL, VQDMLSL */
6703 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6704 if (op == 11) {
6705 gen_neon_negl(cpu_V0, size);
6707 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6708 break;
6709 default:
6710 abort();
6712 neon_store_reg64(cpu_V0, rd + pass);
6713 } else if (op == 4 || op == 6) {
6714 /* Narrowing operation. */
6715 tmp = tcg_temp_new_i32();
6716 if (!u) {
6717 switch (size) {
6718 case 0:
6719 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6720 break;
6721 case 1:
6722 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6723 break;
6724 case 2:
6725 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6726 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6727 break;
6728 default: abort();
6730 } else {
6731 switch (size) {
6732 case 0:
6733 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6734 break;
6735 case 1:
6736 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6737 break;
6738 case 2:
6739 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6740 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6741 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6742 break;
6743 default: abort();
6746 if (pass == 0) {
6747 tmp3 = tmp;
6748 } else {
6749 neon_store_reg(rd, 0, tmp3);
6750 neon_store_reg(rd, 1, tmp);
6752 } else {
6753 /* Write back the result. */
6754 neon_store_reg64(cpu_V0, rd + pass);
6757 } else {
6758 /* Two registers and a scalar. NB that for ops of this form
6759 * the ARM ARM labels bit 24 as Q, but it is in our variable
6760 * 'u', not 'q'.
6762 if (size == 0) {
6763 return 1;
6765 switch (op) {
6766 case 1: /* Float VMLA scalar */
6767 case 5: /* Floating point VMLS scalar */
6768 case 9: /* Floating point VMUL scalar */
6769 if (size == 1) {
6770 return 1;
6772 /* fall through */
6773 case 0: /* Integer VMLA scalar */
6774 case 4: /* Integer VMLS scalar */
6775 case 8: /* Integer VMUL scalar */
6776 case 12: /* VQDMULH scalar */
6777 case 13: /* VQRDMULH scalar */
6778 if (u && ((rd | rn) & 1)) {
6779 return 1;
6781 tmp = neon_get_scalar(size, rm);
6782 neon_store_scratch(0, tmp);
6783 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6784 tmp = neon_load_scratch(0);
6785 tmp2 = neon_load_reg(rn, pass);
6786 if (op == 12) {
6787 if (size == 1) {
6788 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6789 } else {
6790 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6792 } else if (op == 13) {
6793 if (size == 1) {
6794 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6795 } else {
6796 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6798 } else if (op & 1) {
6799 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6800 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6801 tcg_temp_free_ptr(fpstatus);
6802 } else {
6803 switch (size) {
6804 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6805 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6806 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6807 default: abort();
6810 tcg_temp_free_i32(tmp2);
6811 if (op < 8) {
6812 /* Accumulate. */
6813 tmp2 = neon_load_reg(rd, pass);
6814 switch (op) {
6815 case 0:
6816 gen_neon_add(size, tmp, tmp2);
6817 break;
6818 case 1:
6820 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6821 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6822 tcg_temp_free_ptr(fpstatus);
6823 break;
6825 case 4:
6826 gen_neon_rsb(size, tmp, tmp2);
6827 break;
6828 case 5:
6830 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6831 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6832 tcg_temp_free_ptr(fpstatus);
6833 break;
6835 default:
6836 abort();
6838 tcg_temp_free_i32(tmp2);
6840 neon_store_reg(rd, pass, tmp);
6842 break;
6843 case 3: /* VQDMLAL scalar */
6844 case 7: /* VQDMLSL scalar */
6845 case 11: /* VQDMULL scalar */
6846 if (u == 1) {
6847 return 1;
6849 /* fall through */
6850 case 2: /* VMLAL sclar */
6851 case 6: /* VMLSL scalar */
6852 case 10: /* VMULL scalar */
6853 if (rd & 1) {
6854 return 1;
6856 tmp2 = neon_get_scalar(size, rm);
6857 /* We need a copy of tmp2 because gen_neon_mull
6858 * deletes it during pass 0. */
6859 tmp4 = tcg_temp_new_i32();
6860 tcg_gen_mov_i32(tmp4, tmp2);
6861 tmp3 = neon_load_reg(rn, 1);
6863 for (pass = 0; pass < 2; pass++) {
6864 if (pass == 0) {
6865 tmp = neon_load_reg(rn, 0);
6866 } else {
6867 tmp = tmp3;
6868 tmp2 = tmp4;
6870 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6871 if (op != 11) {
6872 neon_load_reg64(cpu_V1, rd + pass);
6874 switch (op) {
6875 case 6:
6876 gen_neon_negl(cpu_V0, size);
6877 /* Fall through */
6878 case 2:
6879 gen_neon_addl(size);
6880 break;
6881 case 3: case 7:
6882 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6883 if (op == 7) {
6884 gen_neon_negl(cpu_V0, size);
6886 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6887 break;
6888 case 10:
6889 /* no-op */
6890 break;
6891 case 11:
6892 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6893 break;
6894 default:
6895 abort();
6897 neon_store_reg64(cpu_V0, rd + pass);
6901 break;
6902 default: /* 14 and 15 are RESERVED */
6903 return 1;
6906 } else { /* size == 3 */
6907 if (!u) {
6908 /* Extract. */
6909 imm = (insn >> 8) & 0xf;
6911 if (imm > 7 && !q)
6912 return 1;
6914 if (q && ((rd | rn | rm) & 1)) {
6915 return 1;
6918 if (imm == 0) {
6919 neon_load_reg64(cpu_V0, rn);
6920 if (q) {
6921 neon_load_reg64(cpu_V1, rn + 1);
6923 } else if (imm == 8) {
6924 neon_load_reg64(cpu_V0, rn + 1);
6925 if (q) {
6926 neon_load_reg64(cpu_V1, rm);
6928 } else if (q) {
6929 tmp64 = tcg_temp_new_i64();
6930 if (imm < 8) {
6931 neon_load_reg64(cpu_V0, rn);
6932 neon_load_reg64(tmp64, rn + 1);
6933 } else {
6934 neon_load_reg64(cpu_V0, rn + 1);
6935 neon_load_reg64(tmp64, rm);
6937 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6938 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6939 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6940 if (imm < 8) {
6941 neon_load_reg64(cpu_V1, rm);
6942 } else {
6943 neon_load_reg64(cpu_V1, rm + 1);
6944 imm -= 8;
6946 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6947 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6948 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6949 tcg_temp_free_i64(tmp64);
6950 } else {
6951 /* BUGFIX */
6952 neon_load_reg64(cpu_V0, rn);
6953 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6954 neon_load_reg64(cpu_V1, rm);
6955 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6956 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6958 neon_store_reg64(cpu_V0, rd);
6959 if (q) {
6960 neon_store_reg64(cpu_V1, rd + 1);
6962 } else if ((insn & (1 << 11)) == 0) {
6963 /* Two register misc. */
6964 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6965 size = (insn >> 18) & 3;
6966 /* UNDEF for unknown op values and bad op-size combinations */
6967 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6968 return 1;
6970 if (neon_2rm_is_v8_op(op) &&
6971 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6972 return 1;
6974 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6975 q && ((rm | rd) & 1)) {
6976 return 1;
6978 switch (op) {
6979 case NEON_2RM_VREV64:
6980 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6981 tmp = neon_load_reg(rm, pass * 2);
6982 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6983 switch (size) {
6984 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6985 case 1: gen_swap_half(tmp); break;
6986 case 2: /* no-op */ break;
6987 default: abort();
6989 neon_store_reg(rd, pass * 2 + 1, tmp);
6990 if (size == 2) {
6991 neon_store_reg(rd, pass * 2, tmp2);
6992 } else {
6993 switch (size) {
6994 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6995 case 1: gen_swap_half(tmp2); break;
6996 default: abort();
6998 neon_store_reg(rd, pass * 2, tmp2);
7001 break;
7002 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7003 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
7004 for (pass = 0; pass < q + 1; pass++) {
7005 tmp = neon_load_reg(rm, pass * 2);
7006 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7007 tmp = neon_load_reg(rm, pass * 2 + 1);
7008 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7009 switch (size) {
7010 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7011 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7012 case 2: tcg_gen_add_i64(CPU_V001); break;
7013 default: abort();
7015 if (op >= NEON_2RM_VPADAL) {
7016 /* Accumulate. */
7017 neon_load_reg64(cpu_V1, rd + pass);
7018 gen_neon_addl(size);
7020 neon_store_reg64(cpu_V0, rd + pass);
7022 break;
7023 case NEON_2RM_VTRN:
7024 if (size == 2) {
7025 int n;
7026 for (n = 0; n < (q ? 4 : 2); n += 2) {
7027 tmp = neon_load_reg(rm, n);
7028 tmp2 = neon_load_reg(rd, n + 1);
7029 neon_store_reg(rm, n, tmp2);
7030 neon_store_reg(rd, n + 1, tmp);
7032 } else {
7033 goto elementwise;
7035 break;
7036 case NEON_2RM_VUZP:
7037 if (gen_neon_unzip(rd, rm, size, q)) {
7038 return 1;
7040 break;
7041 case NEON_2RM_VZIP:
7042 if (gen_neon_zip(rd, rm, size, q)) {
7043 return 1;
7045 break;
7046 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7047 /* also VQMOVUN; op field and mnemonics don't line up */
7048 if (rm & 1) {
7049 return 1;
7051 TCGV_UNUSED_I32(tmp2);
7052 for (pass = 0; pass < 2; pass++) {
7053 neon_load_reg64(cpu_V0, rm + pass);
7054 tmp = tcg_temp_new_i32();
7055 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7056 tmp, cpu_V0);
7057 if (pass == 0) {
7058 tmp2 = tmp;
7059 } else {
7060 neon_store_reg(rd, 0, tmp2);
7061 neon_store_reg(rd, 1, tmp);
7064 break;
7065 case NEON_2RM_VSHLL:
7066 if (q || (rd & 1)) {
7067 return 1;
7069 tmp = neon_load_reg(rm, 0);
7070 tmp2 = neon_load_reg(rm, 1);
7071 for (pass = 0; pass < 2; pass++) {
7072 if (pass == 1)
7073 tmp = tmp2;
7074 gen_neon_widen(cpu_V0, tmp, size, 1);
7075 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
7076 neon_store_reg64(cpu_V0, rd + pass);
7078 break;
7079 case NEON_2RM_VCVT_F16_F32:
7080 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
7081 q || (rm & 1)) {
7082 return 1;
7084 tmp = tcg_temp_new_i32();
7085 tmp2 = tcg_temp_new_i32();
7086 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
7087 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
7088 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
7089 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
7090 tcg_gen_shli_i32(tmp2, tmp2, 16);
7091 tcg_gen_or_i32(tmp2, tmp2, tmp);
7092 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
7093 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
7094 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7095 neon_store_reg(rd, 0, tmp2);
7096 tmp2 = tcg_temp_new_i32();
7097 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
7098 tcg_gen_shli_i32(tmp2, tmp2, 16);
7099 tcg_gen_or_i32(tmp2, tmp2, tmp);
7100 neon_store_reg(rd, 1, tmp2);
7101 tcg_temp_free_i32(tmp);
7102 break;
7103 case NEON_2RM_VCVT_F32_F16:
7104 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
7105 q || (rd & 1)) {
7106 return 1;
7108 tmp3 = tcg_temp_new_i32();
7109 tmp = neon_load_reg(rm, 0);
7110 tmp2 = neon_load_reg(rm, 1);
7111 tcg_gen_ext16u_i32(tmp3, tmp);
7112 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
7113 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7114 tcg_gen_shri_i32(tmp3, tmp, 16);
7115 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
7116 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7117 tcg_temp_free_i32(tmp);
7118 tcg_gen_ext16u_i32(tmp3, tmp2);
7119 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
7120 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7121 tcg_gen_shri_i32(tmp3, tmp2, 16);
7122 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
7123 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7124 tcg_temp_free_i32(tmp2);
7125 tcg_temp_free_i32(tmp3);
7126 break;
7127 case NEON_2RM_AESE: case NEON_2RM_AESMC:
7128 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
7129 || ((rm | rd) & 1)) {
7130 return 1;
7132 tmp = tcg_const_i32(rd);
7133 tmp2 = tcg_const_i32(rm);
7135 /* Bit 6 is the lowest opcode bit; it distinguishes between
7136 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7138 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7140 if (op == NEON_2RM_AESE) {
7141 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
7142 } else {
7143 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7145 tcg_temp_free_i32(tmp);
7146 tcg_temp_free_i32(tmp2);
7147 tcg_temp_free_i32(tmp3);
7148 break;
7149 case NEON_2RM_SHA1H:
7150 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
7151 || ((rm | rd) & 1)) {
7152 return 1;
7154 tmp = tcg_const_i32(rd);
7155 tmp2 = tcg_const_i32(rm);
7157 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7159 tcg_temp_free_i32(tmp);
7160 tcg_temp_free_i32(tmp2);
7161 break;
7162 case NEON_2RM_SHA1SU1:
7163 if ((rm | rd) & 1) {
7164 return 1;
7166 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7167 if (q) {
7168 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
7169 return 1;
7171 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
7172 return 1;
7174 tmp = tcg_const_i32(rd);
7175 tmp2 = tcg_const_i32(rm);
7176 if (q) {
7177 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7178 } else {
7179 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7181 tcg_temp_free_i32(tmp);
7182 tcg_temp_free_i32(tmp2);
7183 break;
7184 default:
7185 elementwise:
7186 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7187 if (neon_2rm_is_float_op(op)) {
7188 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7189 neon_reg_offset(rm, pass));
7190 TCGV_UNUSED_I32(tmp);
7191 } else {
7192 tmp = neon_load_reg(rm, pass);
7194 switch (op) {
7195 case NEON_2RM_VREV32:
7196 switch (size) {
7197 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7198 case 1: gen_swap_half(tmp); break;
7199 default: abort();
7201 break;
7202 case NEON_2RM_VREV16:
7203 gen_rev16(tmp);
7204 break;
7205 case NEON_2RM_VCLS:
7206 switch (size) {
7207 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7208 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7209 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
7210 default: abort();
7212 break;
7213 case NEON_2RM_VCLZ:
7214 switch (size) {
7215 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7216 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7217 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
7218 default: abort();
7220 break;
7221 case NEON_2RM_VCNT:
7222 gen_helper_neon_cnt_u8(tmp, tmp);
7223 break;
7224 case NEON_2RM_VMVN:
7225 tcg_gen_not_i32(tmp, tmp);
7226 break;
7227 case NEON_2RM_VQABS:
7228 switch (size) {
7229 case 0:
7230 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7231 break;
7232 case 1:
7233 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7234 break;
7235 case 2:
7236 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7237 break;
7238 default: abort();
7240 break;
7241 case NEON_2RM_VQNEG:
7242 switch (size) {
7243 case 0:
7244 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7245 break;
7246 case 1:
7247 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7248 break;
7249 case 2:
7250 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7251 break;
7252 default: abort();
7254 break;
7255 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
7256 tmp2 = tcg_const_i32(0);
7257 switch(size) {
7258 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7259 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7260 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
7261 default: abort();
7263 tcg_temp_free_i32(tmp2);
7264 if (op == NEON_2RM_VCLE0) {
7265 tcg_gen_not_i32(tmp, tmp);
7267 break;
7268 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
7269 tmp2 = tcg_const_i32(0);
7270 switch(size) {
7271 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7272 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7273 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
7274 default: abort();
7276 tcg_temp_free_i32(tmp2);
7277 if (op == NEON_2RM_VCLT0) {
7278 tcg_gen_not_i32(tmp, tmp);
7280 break;
7281 case NEON_2RM_VCEQ0:
7282 tmp2 = tcg_const_i32(0);
7283 switch(size) {
7284 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7285 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7286 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
7287 default: abort();
7289 tcg_temp_free_i32(tmp2);
7290 break;
7291 case NEON_2RM_VABS:
7292 switch(size) {
7293 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7294 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7295 case 2: tcg_gen_abs_i32(tmp, tmp); break;
7296 default: abort();
7298 break;
7299 case NEON_2RM_VNEG:
7300 tmp2 = tcg_const_i32(0);
7301 gen_neon_rsb(size, tmp, tmp2);
7302 tcg_temp_free_i32(tmp2);
7303 break;
7304 case NEON_2RM_VCGT0_F:
7306 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7307 tmp2 = tcg_const_i32(0);
7308 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
7309 tcg_temp_free_i32(tmp2);
7310 tcg_temp_free_ptr(fpstatus);
7311 break;
7313 case NEON_2RM_VCGE0_F:
7315 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7316 tmp2 = tcg_const_i32(0);
7317 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
7318 tcg_temp_free_i32(tmp2);
7319 tcg_temp_free_ptr(fpstatus);
7320 break;
7322 case NEON_2RM_VCEQ0_F:
7324 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7325 tmp2 = tcg_const_i32(0);
7326 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
7327 tcg_temp_free_i32(tmp2);
7328 tcg_temp_free_ptr(fpstatus);
7329 break;
7331 case NEON_2RM_VCLE0_F:
7333 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7334 tmp2 = tcg_const_i32(0);
7335 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
7336 tcg_temp_free_i32(tmp2);
7337 tcg_temp_free_ptr(fpstatus);
7338 break;
7340 case NEON_2RM_VCLT0_F:
7342 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7343 tmp2 = tcg_const_i32(0);
7344 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
7345 tcg_temp_free_i32(tmp2);
7346 tcg_temp_free_ptr(fpstatus);
7347 break;
7349 case NEON_2RM_VABS_F:
7350 gen_vfp_abs(0);
7351 break;
7352 case NEON_2RM_VNEG_F:
7353 gen_vfp_neg(0);
7354 break;
7355 case NEON_2RM_VSWP:
7356 tmp2 = neon_load_reg(rd, pass);
7357 neon_store_reg(rm, pass, tmp2);
7358 break;
7359 case NEON_2RM_VTRN:
7360 tmp2 = neon_load_reg(rd, pass);
7361 switch (size) {
7362 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7363 case 1: gen_neon_trn_u16(tmp, tmp2); break;
7364 default: abort();
7366 neon_store_reg(rm, pass, tmp2);
7367 break;
7368 case NEON_2RM_VRINTN:
7369 case NEON_2RM_VRINTA:
7370 case NEON_2RM_VRINTM:
7371 case NEON_2RM_VRINTP:
7372 case NEON_2RM_VRINTZ:
7374 TCGv_i32 tcg_rmode;
7375 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7376 int rmode;
7378 if (op == NEON_2RM_VRINTZ) {
7379 rmode = FPROUNDING_ZERO;
7380 } else {
7381 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7384 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7385 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7386 cpu_env);
7387 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7388 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7389 cpu_env);
7390 tcg_temp_free_ptr(fpstatus);
7391 tcg_temp_free_i32(tcg_rmode);
7392 break;
7394 case NEON_2RM_VRINTX:
7396 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7397 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7398 tcg_temp_free_ptr(fpstatus);
7399 break;
7401 case NEON_2RM_VCVTAU:
7402 case NEON_2RM_VCVTAS:
7403 case NEON_2RM_VCVTNU:
7404 case NEON_2RM_VCVTNS:
7405 case NEON_2RM_VCVTPU:
7406 case NEON_2RM_VCVTPS:
7407 case NEON_2RM_VCVTMU:
7408 case NEON_2RM_VCVTMS:
7410 bool is_signed = !extract32(insn, 7, 1);
7411 TCGv_ptr fpst = get_fpstatus_ptr(1);
7412 TCGv_i32 tcg_rmode, tcg_shift;
7413 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7415 tcg_shift = tcg_const_i32(0);
7416 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7417 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7418 cpu_env);
7420 if (is_signed) {
7421 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7422 tcg_shift, fpst);
7423 } else {
7424 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7425 tcg_shift, fpst);
7428 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7429 cpu_env);
7430 tcg_temp_free_i32(tcg_rmode);
7431 tcg_temp_free_i32(tcg_shift);
7432 tcg_temp_free_ptr(fpst);
7433 break;
7435 case NEON_2RM_VRECPE:
7437 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7438 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7439 tcg_temp_free_ptr(fpstatus);
7440 break;
7442 case NEON_2RM_VRSQRTE:
7444 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7445 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7446 tcg_temp_free_ptr(fpstatus);
7447 break;
7449 case NEON_2RM_VRECPE_F:
7451 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7452 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7453 tcg_temp_free_ptr(fpstatus);
7454 break;
7456 case NEON_2RM_VRSQRTE_F:
7458 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7459 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7460 tcg_temp_free_ptr(fpstatus);
7461 break;
7463 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
7464 gen_vfp_sito(0, 1);
7465 break;
7466 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
7467 gen_vfp_uito(0, 1);
7468 break;
7469 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
7470 gen_vfp_tosiz(0, 1);
7471 break;
7472 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
7473 gen_vfp_touiz(0, 1);
7474 break;
7475 default:
7476 /* Reserved op values were caught by the
7477 * neon_2rm_sizes[] check earlier.
7479 abort();
7481 if (neon_2rm_is_float_op(op)) {
7482 tcg_gen_st_f32(cpu_F0s, cpu_env,
7483 neon_reg_offset(rd, pass));
7484 } else {
7485 neon_store_reg(rd, pass, tmp);
7488 break;
7490 } else if ((insn & (1 << 10)) == 0) {
7491 /* VTBL, VTBX. */
7492 int n = ((insn >> 8) & 3) + 1;
7493 if ((rn + n) > 32) {
7494 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7495 * helper function running off the end of the register file.
7497 return 1;
7499 n <<= 3;
7500 if (insn & (1 << 6)) {
7501 tmp = neon_load_reg(rd, 0);
7502 } else {
7503 tmp = tcg_temp_new_i32();
7504 tcg_gen_movi_i32(tmp, 0);
7506 tmp2 = neon_load_reg(rm, 0);
7507 tmp4 = tcg_const_i32(rn);
7508 tmp5 = tcg_const_i32(n);
7509 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7510 tcg_temp_free_i32(tmp);
7511 if (insn & (1 << 6)) {
7512 tmp = neon_load_reg(rd, 1);
7513 } else {
7514 tmp = tcg_temp_new_i32();
7515 tcg_gen_movi_i32(tmp, 0);
7517 tmp3 = neon_load_reg(rm, 1);
7518 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
7519 tcg_temp_free_i32(tmp5);
7520 tcg_temp_free_i32(tmp4);
7521 neon_store_reg(rd, 0, tmp2);
7522 neon_store_reg(rd, 1, tmp3);
7523 tcg_temp_free_i32(tmp);
7524 } else if ((insn & 0x380) == 0) {
7525 /* VDUP */
7526 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7527 return 1;
7529 if (insn & (1 << 19)) {
7530 tmp = neon_load_reg(rm, 1);
7531 } else {
7532 tmp = neon_load_reg(rm, 0);
7534 if (insn & (1 << 16)) {
7535 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
7536 } else if (insn & (1 << 17)) {
7537 if ((insn >> 18) & 1)
7538 gen_neon_dup_high16(tmp);
7539 else
7540 gen_neon_dup_low16(tmp);
7542 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7543 tmp2 = tcg_temp_new_i32();
7544 tcg_gen_mov_i32(tmp2, tmp);
7545 neon_store_reg(rd, pass, tmp2);
7547 tcg_temp_free_i32(tmp);
7548 } else {
7549 return 1;
7553 return 0;
7556 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7558 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7559 const ARMCPRegInfo *ri;
7561 cpnum = (insn >> 8) & 0xf;
7563 /* First check for coprocessor space used for XScale/iwMMXt insns */
7564 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7565 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7566 return 1;
7568 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7569 return disas_iwmmxt_insn(s, insn);
7570 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7571 return disas_dsp_insn(s, insn);
7573 return 1;
7576 /* Otherwise treat as a generic register access */
7577 is64 = (insn & (1 << 25)) == 0;
7578 if (!is64 && ((insn & (1 << 4)) == 0)) {
7579 /* cdp */
7580 return 1;
7583 crm = insn & 0xf;
7584 if (is64) {
7585 crn = 0;
7586 opc1 = (insn >> 4) & 0xf;
7587 opc2 = 0;
7588 rt2 = (insn >> 16) & 0xf;
7589 } else {
7590 crn = (insn >> 16) & 0xf;
7591 opc1 = (insn >> 21) & 7;
7592 opc2 = (insn >> 5) & 7;
7593 rt2 = 0;
7595 isread = (insn >> 20) & 1;
7596 rt = (insn >> 12) & 0xf;
7598 ri = get_arm_cp_reginfo(s->cp_regs,
7599 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7600 if (ri) {
7601 /* Check access permissions */
7602 if (!cp_access_ok(s->current_el, ri, isread)) {
7603 return 1;
7606 if (ri->accessfn ||
7607 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7608 /* Emit code to perform further access permissions checks at
7609 * runtime; this may result in an exception.
7610 * Note that on XScale all cp0..c13 registers do an access check
7611 * call in order to handle c15_cpar.
7613 TCGv_ptr tmpptr;
7614 TCGv_i32 tcg_syn, tcg_isread;
7615 uint32_t syndrome;
7617 /* Note that since we are an implementation which takes an
7618 * exception on a trapped conditional instruction only if the
7619 * instruction passes its condition code check, we can take
7620 * advantage of the clause in the ARM ARM that allows us to set
7621 * the COND field in the instruction to 0xE in all cases.
7622 * We could fish the actual condition out of the insn (ARM)
7623 * or the condexec bits (Thumb) but it isn't necessary.
7625 switch (cpnum) {
7626 case 14:
7627 if (is64) {
7628 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7629 isread, false);
7630 } else {
7631 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7632 rt, isread, false);
7634 break;
7635 case 15:
7636 if (is64) {
7637 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7638 isread, false);
7639 } else {
7640 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7641 rt, isread, false);
7643 break;
7644 default:
7645 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7646 * so this can only happen if this is an ARMv7 or earlier CPU,
7647 * in which case the syndrome information won't actually be
7648 * guest visible.
7650 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7651 syndrome = syn_uncategorized();
7652 break;
7655 gen_set_condexec(s);
7656 gen_set_pc_im(s, s->pc - 4);
7657 tmpptr = tcg_const_ptr(ri);
7658 tcg_syn = tcg_const_i32(syndrome);
7659 tcg_isread = tcg_const_i32(isread);
7660 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7661 tcg_isread);
7662 tcg_temp_free_ptr(tmpptr);
7663 tcg_temp_free_i32(tcg_syn);
7664 tcg_temp_free_i32(tcg_isread);
7667 /* Handle special cases first */
7668 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7669 case ARM_CP_NOP:
7670 return 0;
7671 case ARM_CP_WFI:
7672 if (isread) {
7673 return 1;
7675 gen_set_pc_im(s, s->pc);
7676 s->base.is_jmp = DISAS_WFI;
7677 return 0;
7678 default:
7679 break;
7682 if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7683 gen_io_start();
7686 if (isread) {
7687 /* Read */
7688 if (is64) {
7689 TCGv_i64 tmp64;
7690 TCGv_i32 tmp;
7691 if (ri->type & ARM_CP_CONST) {
7692 tmp64 = tcg_const_i64(ri->resetvalue);
7693 } else if (ri->readfn) {
7694 TCGv_ptr tmpptr;
7695 tmp64 = tcg_temp_new_i64();
7696 tmpptr = tcg_const_ptr(ri);
7697 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7698 tcg_temp_free_ptr(tmpptr);
7699 } else {
7700 tmp64 = tcg_temp_new_i64();
7701 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7703 tmp = tcg_temp_new_i32();
7704 tcg_gen_extrl_i64_i32(tmp, tmp64);
7705 store_reg(s, rt, tmp);
7706 tcg_gen_shri_i64(tmp64, tmp64, 32);
7707 tmp = tcg_temp_new_i32();
7708 tcg_gen_extrl_i64_i32(tmp, tmp64);
7709 tcg_temp_free_i64(tmp64);
7710 store_reg(s, rt2, tmp);
7711 } else {
7712 TCGv_i32 tmp;
7713 if (ri->type & ARM_CP_CONST) {
7714 tmp = tcg_const_i32(ri->resetvalue);
7715 } else if (ri->readfn) {
7716 TCGv_ptr tmpptr;
7717 tmp = tcg_temp_new_i32();
7718 tmpptr = tcg_const_ptr(ri);
7719 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7720 tcg_temp_free_ptr(tmpptr);
7721 } else {
7722 tmp = load_cpu_offset(ri->fieldoffset);
7724 if (rt == 15) {
7725 /* Destination register of r15 for 32 bit loads sets
7726 * the condition codes from the high 4 bits of the value
7728 gen_set_nzcv(tmp);
7729 tcg_temp_free_i32(tmp);
7730 } else {
7731 store_reg(s, rt, tmp);
7734 } else {
7735 /* Write */
7736 if (ri->type & ARM_CP_CONST) {
7737 /* If not forbidden by access permissions, treat as WI */
7738 return 0;
7741 if (is64) {
7742 TCGv_i32 tmplo, tmphi;
7743 TCGv_i64 tmp64 = tcg_temp_new_i64();
7744 tmplo = load_reg(s, rt);
7745 tmphi = load_reg(s, rt2);
7746 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7747 tcg_temp_free_i32(tmplo);
7748 tcg_temp_free_i32(tmphi);
7749 if (ri->writefn) {
7750 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7751 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7752 tcg_temp_free_ptr(tmpptr);
7753 } else {
7754 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7756 tcg_temp_free_i64(tmp64);
7757 } else {
7758 if (ri->writefn) {
7759 TCGv_i32 tmp;
7760 TCGv_ptr tmpptr;
7761 tmp = load_reg(s, rt);
7762 tmpptr = tcg_const_ptr(ri);
7763 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7764 tcg_temp_free_ptr(tmpptr);
7765 tcg_temp_free_i32(tmp);
7766 } else {
7767 TCGv_i32 tmp = load_reg(s, rt);
7768 store_cpu_offset(tmp, ri->fieldoffset);
7773 if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7774 /* I/O operations must end the TB here (whether read or write) */
7775 gen_io_end();
7776 gen_lookup_tb(s);
7777 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7778 /* We default to ending the TB on a coprocessor register write,
7779 * but allow this to be suppressed by the register definition
7780 * (usually only necessary to work around guest bugs).
7782 gen_lookup_tb(s);
7785 return 0;
7788 /* Unknown register; this might be a guest error or a QEMU
7789 * unimplemented feature.
7791 if (is64) {
7792 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7793 "64 bit system register cp:%d opc1: %d crm:%d "
7794 "(%s)\n",
7795 isread ? "read" : "write", cpnum, opc1, crm,
7796 s->ns ? "non-secure" : "secure");
7797 } else {
7798 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7799 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7800 "(%s)\n",
7801 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7802 s->ns ? "non-secure" : "secure");
7805 return 1;
7809 /* Store a 64-bit value to a register pair. Clobbers val. */
7810 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7812 TCGv_i32 tmp;
7813 tmp = tcg_temp_new_i32();
7814 tcg_gen_extrl_i64_i32(tmp, val);
7815 store_reg(s, rlow, tmp);
7816 tmp = tcg_temp_new_i32();
7817 tcg_gen_shri_i64(val, val, 32);
7818 tcg_gen_extrl_i64_i32(tmp, val);
7819 store_reg(s, rhigh, tmp);
7822 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7823 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7825 TCGv_i64 tmp;
7826 TCGv_i32 tmp2;
7828 /* Load value and extend to 64 bits. */
7829 tmp = tcg_temp_new_i64();
7830 tmp2 = load_reg(s, rlow);
7831 tcg_gen_extu_i32_i64(tmp, tmp2);
7832 tcg_temp_free_i32(tmp2);
7833 tcg_gen_add_i64(val, val, tmp);
7834 tcg_temp_free_i64(tmp);
7837 /* load and add a 64-bit value from a register pair. */
7838 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7840 TCGv_i64 tmp;
7841 TCGv_i32 tmpl;
7842 TCGv_i32 tmph;
7844 /* Load 64-bit value rd:rn. */
7845 tmpl = load_reg(s, rlow);
7846 tmph = load_reg(s, rhigh);
7847 tmp = tcg_temp_new_i64();
7848 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7849 tcg_temp_free_i32(tmpl);
7850 tcg_temp_free_i32(tmph);
7851 tcg_gen_add_i64(val, val, tmp);
7852 tcg_temp_free_i64(tmp);
7855 /* Set N and Z flags from hi|lo. */
7856 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7858 tcg_gen_mov_i32(cpu_NF, hi);
7859 tcg_gen_or_i32(cpu_ZF, lo, hi);
7862 /* Load/Store exclusive instructions are implemented by remembering
7863 the value/address loaded, and seeing if these are the same
7864 when the store is performed. This should be sufficient to implement
7865 the architecturally mandated semantics, and avoids having to monitor
7866 regular stores. The compare vs the remembered value is done during
7867 the cmpxchg operation, but we must compare the addresses manually. */
7868 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7869 TCGv_i32 addr, int size)
7871 TCGv_i32 tmp = tcg_temp_new_i32();
7872 TCGMemOp opc = size | MO_ALIGN | s->be_data;
7874 s->is_ldex = true;
7876 if (size == 3) {
7877 TCGv_i32 tmp2 = tcg_temp_new_i32();
7878 TCGv_i64 t64 = tcg_temp_new_i64();
7880 gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc);
7881 tcg_gen_mov_i64(cpu_exclusive_val, t64);
7882 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7883 tcg_temp_free_i64(t64);
7885 store_reg(s, rt2, tmp2);
7886 } else {
7887 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
7888 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7891 store_reg(s, rt, tmp);
7892 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7895 static void gen_clrex(DisasContext *s)
7897 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7900 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7901 TCGv_i32 addr, int size)
7903 TCGv_i32 t0, t1, t2;
7904 TCGv_i64 extaddr;
7905 TCGv taddr;
7906 TCGLabel *done_label;
7907 TCGLabel *fail_label;
7908 TCGMemOp opc = size | MO_ALIGN | s->be_data;
7910 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7911 [addr] = {Rt};
7912 {Rd} = 0;
7913 } else {
7914 {Rd} = 1;
7915 } */
7916 fail_label = gen_new_label();
7917 done_label = gen_new_label();
7918 extaddr = tcg_temp_new_i64();
7919 tcg_gen_extu_i32_i64(extaddr, addr);
7920 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7921 tcg_temp_free_i64(extaddr);
7923 taddr = gen_aa32_addr(s, addr, opc);
7924 t0 = tcg_temp_new_i32();
7925 t1 = load_reg(s, rt);
7926 if (size == 3) {
7927 TCGv_i64 o64 = tcg_temp_new_i64();
7928 TCGv_i64 n64 = tcg_temp_new_i64();
7930 t2 = load_reg(s, rt2);
7931 tcg_gen_concat_i32_i64(n64, t1, t2);
7932 tcg_temp_free_i32(t2);
7933 gen_aa32_frob64(s, n64);
7935 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7936 get_mem_index(s), opc);
7937 tcg_temp_free_i64(n64);
7939 gen_aa32_frob64(s, o64);
7940 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7941 tcg_gen_extrl_i64_i32(t0, o64);
7943 tcg_temp_free_i64(o64);
7944 } else {
7945 t2 = tcg_temp_new_i32();
7946 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7947 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7948 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7949 tcg_temp_free_i32(t2);
7951 tcg_temp_free_i32(t1);
7952 tcg_temp_free(taddr);
7953 tcg_gen_mov_i32(cpu_R[rd], t0);
7954 tcg_temp_free_i32(t0);
7955 tcg_gen_br(done_label);
7957 gen_set_label(fail_label);
7958 tcg_gen_movi_i32(cpu_R[rd], 1);
7959 gen_set_label(done_label);
7960 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7963 /* gen_srs:
7964 * @env: CPUARMState
7965 * @s: DisasContext
7966 * @mode: mode field from insn (which stack to store to)
7967 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7968 * @writeback: true if writeback bit set
7970 * Generate code for the SRS (Store Return State) insn.
7972 static void gen_srs(DisasContext *s,
7973 uint32_t mode, uint32_t amode, bool writeback)
7975 int32_t offset;
7976 TCGv_i32 addr, tmp;
7977 bool undef = false;
7979 /* SRS is:
7980 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7981 * and specified mode is monitor mode
7982 * - UNDEFINED in Hyp mode
7983 * - UNPREDICTABLE in User or System mode
7984 * - UNPREDICTABLE if the specified mode is:
7985 * -- not implemented
7986 * -- not a valid mode number
7987 * -- a mode that's at a higher exception level
7988 * -- Monitor, if we are Non-secure
7989 * For the UNPREDICTABLE cases we choose to UNDEF.
7991 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
7992 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7993 return;
7996 if (s->current_el == 0 || s->current_el == 2) {
7997 undef = true;
8000 switch (mode) {
8001 case ARM_CPU_MODE_USR:
8002 case ARM_CPU_MODE_FIQ:
8003 case ARM_CPU_MODE_IRQ:
8004 case ARM_CPU_MODE_SVC:
8005 case ARM_CPU_MODE_ABT:
8006 case ARM_CPU_MODE_UND:
8007 case ARM_CPU_MODE_SYS:
8008 break;
8009 case ARM_CPU_MODE_HYP:
8010 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8011 undef = true;
8013 break;
8014 case ARM_CPU_MODE_MON:
8015 /* No need to check specifically for "are we non-secure" because
8016 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8017 * so if this isn't EL3 then we must be non-secure.
8019 if (s->current_el != 3) {
8020 undef = true;
8022 break;
8023 default:
8024 undef = true;
8027 if (undef) {
8028 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8029 default_exception_el(s));
8030 return;
8033 addr = tcg_temp_new_i32();
8034 tmp = tcg_const_i32(mode);
8035 /* get_r13_banked() will raise an exception if called from System mode */
8036 gen_set_condexec(s);
8037 gen_set_pc_im(s, s->pc - 4);
8038 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8039 tcg_temp_free_i32(tmp);
8040 switch (amode) {
8041 case 0: /* DA */
8042 offset = -4;
8043 break;
8044 case 1: /* IA */
8045 offset = 0;
8046 break;
8047 case 2: /* DB */
8048 offset = -8;
8049 break;
8050 case 3: /* IB */
8051 offset = 4;
8052 break;
8053 default:
8054 abort();
8056 tcg_gen_addi_i32(addr, addr, offset);
8057 tmp = load_reg(s, 14);
8058 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8059 tcg_temp_free_i32(tmp);
8060 tmp = load_cpu_field(spsr);
8061 tcg_gen_addi_i32(addr, addr, 4);
8062 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8063 tcg_temp_free_i32(tmp);
8064 if (writeback) {
8065 switch (amode) {
8066 case 0:
8067 offset = -8;
8068 break;
8069 case 1:
8070 offset = 4;
8071 break;
8072 case 2:
8073 offset = -4;
8074 break;
8075 case 3:
8076 offset = 0;
8077 break;
8078 default:
8079 abort();
8081 tcg_gen_addi_i32(addr, addr, offset);
8082 tmp = tcg_const_i32(mode);
8083 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8084 tcg_temp_free_i32(tmp);
8086 tcg_temp_free_i32(addr);
8087 s->base.is_jmp = DISAS_UPDATE;
8090 static void disas_arm_insn(DisasContext *s, unsigned int insn)
8092 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
8093 TCGv_i32 tmp;
8094 TCGv_i32 tmp2;
8095 TCGv_i32 tmp3;
8096 TCGv_i32 addr;
8097 TCGv_i64 tmp64;
8099 /* M variants do not implement ARM mode; this must raise the INVSTATE
8100 * UsageFault exception.
8102 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8103 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8104 default_exception_el(s));
8105 return;
8107 cond = insn >> 28;
8108 if (cond == 0xf){
8109 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8110 * choose to UNDEF. In ARMv5 and above the space is used
8111 * for miscellaneous unconditional instructions.
8113 ARCH(5);
8115 /* Unconditional instructions. */
8116 if (((insn >> 25) & 7) == 1) {
8117 /* NEON Data processing. */
8118 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8119 goto illegal_op;
8122 if (disas_neon_data_insn(s, insn)) {
8123 goto illegal_op;
8125 return;
8127 if ((insn & 0x0f100000) == 0x04000000) {
8128 /* NEON load/store. */
8129 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8130 goto illegal_op;
8133 if (disas_neon_ls_insn(s, insn)) {
8134 goto illegal_op;
8136 return;
8138 if ((insn & 0x0f000e10) == 0x0e000a00) {
8139 /* VFP. */
8140 if (disas_vfp_insn(s, insn)) {
8141 goto illegal_op;
8143 return;
8145 if (((insn & 0x0f30f000) == 0x0510f000) ||
8146 ((insn & 0x0f30f010) == 0x0710f000)) {
8147 if ((insn & (1 << 22)) == 0) {
8148 /* PLDW; v7MP */
8149 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8150 goto illegal_op;
8153 /* Otherwise PLD; v5TE+ */
8154 ARCH(5TE);
8155 return;
8157 if (((insn & 0x0f70f000) == 0x0450f000) ||
8158 ((insn & 0x0f70f010) == 0x0650f000)) {
8159 ARCH(7);
8160 return; /* PLI; V7 */
8162 if (((insn & 0x0f700000) == 0x04100000) ||
8163 ((insn & 0x0f700010) == 0x06100000)) {
8164 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8165 goto illegal_op;
8167 return; /* v7MP: Unallocated memory hint: must NOP */
8170 if ((insn & 0x0ffffdff) == 0x01010000) {
8171 ARCH(6);
8172 /* setend */
8173 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8174 gen_helper_setend(cpu_env);
8175 s->base.is_jmp = DISAS_UPDATE;
8177 return;
8178 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8179 switch ((insn >> 4) & 0xf) {
8180 case 1: /* clrex */
8181 ARCH(6K);
8182 gen_clrex(s);
8183 return;
8184 case 4: /* dsb */
8185 case 5: /* dmb */
8186 ARCH(7);
8187 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8188 return;
8189 case 6: /* isb */
8190 /* We need to break the TB after this insn to execute
8191 * self-modifying code correctly and also to take
8192 * any pending interrupts immediately.
8194 gen_goto_tb(s, 0, s->pc & ~1);
8195 return;
8196 default:
8197 goto illegal_op;
8199 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8200 /* srs */
8201 ARCH(6);
8202 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
8203 return;
8204 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
8205 /* rfe */
8206 int32_t offset;
8207 if (IS_USER(s))
8208 goto illegal_op;
8209 ARCH(6);
8210 rn = (insn >> 16) & 0xf;
8211 addr = load_reg(s, rn);
8212 i = (insn >> 23) & 3;
8213 switch (i) {
8214 case 0: offset = -4; break; /* DA */
8215 case 1: offset = 0; break; /* IA */
8216 case 2: offset = -8; break; /* DB */
8217 case 3: offset = 4; break; /* IB */
8218 default: abort();
8220 if (offset)
8221 tcg_gen_addi_i32(addr, addr, offset);
8222 /* Load PC into tmp and CPSR into tmp2. */
8223 tmp = tcg_temp_new_i32();
8224 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8225 tcg_gen_addi_i32(addr, addr, 4);
8226 tmp2 = tcg_temp_new_i32();
8227 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8228 if (insn & (1 << 21)) {
8229 /* Base writeback. */
8230 switch (i) {
8231 case 0: offset = -8; break;
8232 case 1: offset = 4; break;
8233 case 2: offset = -4; break;
8234 case 3: offset = 0; break;
8235 default: abort();
8237 if (offset)
8238 tcg_gen_addi_i32(addr, addr, offset);
8239 store_reg(s, rn, addr);
8240 } else {
8241 tcg_temp_free_i32(addr);
8243 gen_rfe(s, tmp, tmp2);
8244 return;
8245 } else if ((insn & 0x0e000000) == 0x0a000000) {
8246 /* branch link and change to thumb (blx <offset>) */
8247 int32_t offset;
8249 val = (uint32_t)s->pc;
8250 tmp = tcg_temp_new_i32();
8251 tcg_gen_movi_i32(tmp, val);
8252 store_reg(s, 14, tmp);
8253 /* Sign-extend the 24-bit offset */
8254 offset = (((int32_t)insn) << 8) >> 8;
8255 /* offset * 4 + bit24 * 2 + (thumb bit) */
8256 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8257 /* pipeline offset */
8258 val += 4;
8259 /* protected by ARCH(5); above, near the start of uncond block */
8260 gen_bx_im(s, val);
8261 return;
8262 } else if ((insn & 0x0e000f00) == 0x0c000100) {
8263 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
8264 /* iWMMXt register transfer. */
8265 if (extract32(s->c15_cpar, 1, 1)) {
8266 if (!disas_iwmmxt_insn(s, insn)) {
8267 return;
8271 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8272 /* Coprocessor double register transfer. */
8273 ARCH(5TE);
8274 } else if ((insn & 0x0f000010) == 0x0e000010) {
8275 /* Additional coprocessor register transfer. */
8276 } else if ((insn & 0x0ff10020) == 0x01000000) {
8277 uint32_t mask;
8278 uint32_t val;
8279 /* cps (privileged) */
8280 if (IS_USER(s))
8281 return;
8282 mask = val = 0;
8283 if (insn & (1 << 19)) {
8284 if (insn & (1 << 8))
8285 mask |= CPSR_A;
8286 if (insn & (1 << 7))
8287 mask |= CPSR_I;
8288 if (insn & (1 << 6))
8289 mask |= CPSR_F;
8290 if (insn & (1 << 18))
8291 val |= mask;
8293 if (insn & (1 << 17)) {
8294 mask |= CPSR_M;
8295 val |= (insn & 0x1f);
8297 if (mask) {
8298 gen_set_psr_im(s, mask, 0, val);
8300 return;
8302 goto illegal_op;
8304 if (cond != 0xe) {
8305 /* if not always execute, we generate a conditional jump to
8306 next instruction */
8307 s->condlabel = gen_new_label();
8308 arm_gen_test_cc(cond ^ 1, s->condlabel);
8309 s->condjmp = 1;
8311 if ((insn & 0x0f900000) == 0x03000000) {
8312 if ((insn & (1 << 21)) == 0) {
8313 ARCH(6T2);
8314 rd = (insn >> 12) & 0xf;
8315 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8316 if ((insn & (1 << 22)) == 0) {
8317 /* MOVW */
8318 tmp = tcg_temp_new_i32();
8319 tcg_gen_movi_i32(tmp, val);
8320 } else {
8321 /* MOVT */
8322 tmp = load_reg(s, rd);
8323 tcg_gen_ext16u_i32(tmp, tmp);
8324 tcg_gen_ori_i32(tmp, tmp, val << 16);
8326 store_reg(s, rd, tmp);
8327 } else {
8328 if (((insn >> 12) & 0xf) != 0xf)
8329 goto illegal_op;
8330 if (((insn >> 16) & 0xf) == 0) {
8331 gen_nop_hint(s, insn & 0xff);
8332 } else {
8333 /* CPSR = immediate */
8334 val = insn & 0xff;
8335 shift = ((insn >> 8) & 0xf) * 2;
8336 if (shift)
8337 val = (val >> shift) | (val << (32 - shift));
8338 i = ((insn & (1 << 22)) != 0);
8339 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8340 i, val)) {
8341 goto illegal_op;
8345 } else if ((insn & 0x0f900000) == 0x01000000
8346 && (insn & 0x00000090) != 0x00000090) {
8347 /* miscellaneous instructions */
8348 op1 = (insn >> 21) & 3;
8349 sh = (insn >> 4) & 0xf;
8350 rm = insn & 0xf;
8351 switch (sh) {
8352 case 0x0: /* MSR, MRS */
8353 if (insn & (1 << 9)) {
8354 /* MSR (banked) and MRS (banked) */
8355 int sysm = extract32(insn, 16, 4) |
8356 (extract32(insn, 8, 1) << 4);
8357 int r = extract32(insn, 22, 1);
8359 if (op1 & 1) {
8360 /* MSR (banked) */
8361 gen_msr_banked(s, r, sysm, rm);
8362 } else {
8363 /* MRS (banked) */
8364 int rd = extract32(insn, 12, 4);
8366 gen_mrs_banked(s, r, sysm, rd);
8368 break;
8371 /* MSR, MRS (for PSRs) */
8372 if (op1 & 1) {
8373 /* PSR = reg */
8374 tmp = load_reg(s, rm);
8375 i = ((op1 & 2) != 0);
8376 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
8377 goto illegal_op;
8378 } else {
8379 /* reg = PSR */
8380 rd = (insn >> 12) & 0xf;
8381 if (op1 & 2) {
8382 if (IS_USER(s))
8383 goto illegal_op;
8384 tmp = load_cpu_field(spsr);
8385 } else {
8386 tmp = tcg_temp_new_i32();
8387 gen_helper_cpsr_read(tmp, cpu_env);
8389 store_reg(s, rd, tmp);
8391 break;
8392 case 0x1:
8393 if (op1 == 1) {
8394 /* branch/exchange thumb (bx). */
8395 ARCH(4T);
8396 tmp = load_reg(s, rm);
8397 gen_bx(s, tmp);
8398 } else if (op1 == 3) {
8399 /* clz */
8400 ARCH(5);
8401 rd = (insn >> 12) & 0xf;
8402 tmp = load_reg(s, rm);
8403 tcg_gen_clzi_i32(tmp, tmp, 32);
8404 store_reg(s, rd, tmp);
8405 } else {
8406 goto illegal_op;
8408 break;
8409 case 0x2:
8410 if (op1 == 1) {
8411 ARCH(5J); /* bxj */
8412 /* Trivial implementation equivalent to bx. */
8413 tmp = load_reg(s, rm);
8414 gen_bx(s, tmp);
8415 } else {
8416 goto illegal_op;
8418 break;
8419 case 0x3:
8420 if (op1 != 1)
8421 goto illegal_op;
8423 ARCH(5);
8424 /* branch link/exchange thumb (blx) */
8425 tmp = load_reg(s, rm);
8426 tmp2 = tcg_temp_new_i32();
8427 tcg_gen_movi_i32(tmp2, s->pc);
8428 store_reg(s, 14, tmp2);
8429 gen_bx(s, tmp);
8430 break;
8431 case 0x4:
8433 /* crc32/crc32c */
8434 uint32_t c = extract32(insn, 8, 4);
8436 /* Check this CPU supports ARMv8 CRC instructions.
8437 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8438 * Bits 8, 10 and 11 should be zero.
8440 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
8441 (c & 0xd) != 0) {
8442 goto illegal_op;
8445 rn = extract32(insn, 16, 4);
8446 rd = extract32(insn, 12, 4);
8448 tmp = load_reg(s, rn);
8449 tmp2 = load_reg(s, rm);
8450 if (op1 == 0) {
8451 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8452 } else if (op1 == 1) {
8453 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8455 tmp3 = tcg_const_i32(1 << op1);
8456 if (c & 0x2) {
8457 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8458 } else {
8459 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8461 tcg_temp_free_i32(tmp2);
8462 tcg_temp_free_i32(tmp3);
8463 store_reg(s, rd, tmp);
8464 break;
8466 case 0x5: /* saturating add/subtract */
8467 ARCH(5TE);
8468 rd = (insn >> 12) & 0xf;
8469 rn = (insn >> 16) & 0xf;
8470 tmp = load_reg(s, rm);
8471 tmp2 = load_reg(s, rn);
8472 if (op1 & 2)
8473 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
8474 if (op1 & 1)
8475 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8476 else
8477 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8478 tcg_temp_free_i32(tmp2);
8479 store_reg(s, rd, tmp);
8480 break;
8481 case 7:
8483 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8484 switch (op1) {
8485 case 0:
8486 /* HLT */
8487 gen_hlt(s, imm16);
8488 break;
8489 case 1:
8490 /* bkpt */
8491 ARCH(5);
8492 gen_exception_insn(s, 4, EXCP_BKPT,
8493 syn_aa32_bkpt(imm16, false),
8494 default_exception_el(s));
8495 break;
8496 case 2:
8497 /* Hypervisor call (v7) */
8498 ARCH(7);
8499 if (IS_USER(s)) {
8500 goto illegal_op;
8502 gen_hvc(s, imm16);
8503 break;
8504 case 3:
8505 /* Secure monitor call (v6+) */
8506 ARCH(6K);
8507 if (IS_USER(s)) {
8508 goto illegal_op;
8510 gen_smc(s);
8511 break;
8512 default:
8513 g_assert_not_reached();
8515 break;
8517 case 0x8: /* signed multiply */
8518 case 0xa:
8519 case 0xc:
8520 case 0xe:
8521 ARCH(5TE);
8522 rs = (insn >> 8) & 0xf;
8523 rn = (insn >> 12) & 0xf;
8524 rd = (insn >> 16) & 0xf;
8525 if (op1 == 1) {
8526 /* (32 * 16) >> 16 */
8527 tmp = load_reg(s, rm);
8528 tmp2 = load_reg(s, rs);
8529 if (sh & 4)
8530 tcg_gen_sari_i32(tmp2, tmp2, 16);
8531 else
8532 gen_sxth(tmp2);
8533 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8534 tcg_gen_shri_i64(tmp64, tmp64, 16);
8535 tmp = tcg_temp_new_i32();
8536 tcg_gen_extrl_i64_i32(tmp, tmp64);
8537 tcg_temp_free_i64(tmp64);
8538 if ((sh & 2) == 0) {
8539 tmp2 = load_reg(s, rn);
8540 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8541 tcg_temp_free_i32(tmp2);
8543 store_reg(s, rd, tmp);
8544 } else {
8545 /* 16 * 16 */
8546 tmp = load_reg(s, rm);
8547 tmp2 = load_reg(s, rs);
8548 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8549 tcg_temp_free_i32(tmp2);
8550 if (op1 == 2) {
8551 tmp64 = tcg_temp_new_i64();
8552 tcg_gen_ext_i32_i64(tmp64, tmp);
8553 tcg_temp_free_i32(tmp);
8554 gen_addq(s, tmp64, rn, rd);
8555 gen_storeq_reg(s, rn, rd, tmp64);
8556 tcg_temp_free_i64(tmp64);
8557 } else {
8558 if (op1 == 0) {
8559 tmp2 = load_reg(s, rn);
8560 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8561 tcg_temp_free_i32(tmp2);
8563 store_reg(s, rd, tmp);
8566 break;
8567 default:
8568 goto illegal_op;
8570 } else if (((insn & 0x0e000000) == 0 &&
8571 (insn & 0x00000090) != 0x90) ||
8572 ((insn & 0x0e000000) == (1 << 25))) {
8573 int set_cc, logic_cc, shiftop;
8575 op1 = (insn >> 21) & 0xf;
8576 set_cc = (insn >> 20) & 1;
8577 logic_cc = table_logic_cc[op1] & set_cc;
8579 /* data processing instruction */
8580 if (insn & (1 << 25)) {
8581 /* immediate operand */
8582 val = insn & 0xff;
8583 shift = ((insn >> 8) & 0xf) * 2;
8584 if (shift) {
8585 val = (val >> shift) | (val << (32 - shift));
8587 tmp2 = tcg_temp_new_i32();
8588 tcg_gen_movi_i32(tmp2, val);
8589 if (logic_cc && shift) {
8590 gen_set_CF_bit31(tmp2);
8592 } else {
8593 /* register */
8594 rm = (insn) & 0xf;
8595 tmp2 = load_reg(s, rm);
8596 shiftop = (insn >> 5) & 3;
8597 if (!(insn & (1 << 4))) {
8598 shift = (insn >> 7) & 0x1f;
8599 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8600 } else {
8601 rs = (insn >> 8) & 0xf;
8602 tmp = load_reg(s, rs);
8603 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8606 if (op1 != 0x0f && op1 != 0x0d) {
8607 rn = (insn >> 16) & 0xf;
8608 tmp = load_reg(s, rn);
8609 } else {
8610 TCGV_UNUSED_I32(tmp);
8612 rd = (insn >> 12) & 0xf;
8613 switch(op1) {
8614 case 0x00:
8615 tcg_gen_and_i32(tmp, tmp, tmp2);
8616 if (logic_cc) {
8617 gen_logic_CC(tmp);
8619 store_reg_bx(s, rd, tmp);
8620 break;
8621 case 0x01:
8622 tcg_gen_xor_i32(tmp, tmp, tmp2);
8623 if (logic_cc) {
8624 gen_logic_CC(tmp);
8626 store_reg_bx(s, rd, tmp);
8627 break;
8628 case 0x02:
8629 if (set_cc && rd == 15) {
8630 /* SUBS r15, ... is used for exception return. */
8631 if (IS_USER(s)) {
8632 goto illegal_op;
8634 gen_sub_CC(tmp, tmp, tmp2);
8635 gen_exception_return(s, tmp);
8636 } else {
8637 if (set_cc) {
8638 gen_sub_CC(tmp, tmp, tmp2);
8639 } else {
8640 tcg_gen_sub_i32(tmp, tmp, tmp2);
8642 store_reg_bx(s, rd, tmp);
8644 break;
8645 case 0x03:
8646 if (set_cc) {
8647 gen_sub_CC(tmp, tmp2, tmp);
8648 } else {
8649 tcg_gen_sub_i32(tmp, tmp2, tmp);
8651 store_reg_bx(s, rd, tmp);
8652 break;
8653 case 0x04:
8654 if (set_cc) {
8655 gen_add_CC(tmp, tmp, tmp2);
8656 } else {
8657 tcg_gen_add_i32(tmp, tmp, tmp2);
8659 store_reg_bx(s, rd, tmp);
8660 break;
8661 case 0x05:
8662 if (set_cc) {
8663 gen_adc_CC(tmp, tmp, tmp2);
8664 } else {
8665 gen_add_carry(tmp, tmp, tmp2);
8667 store_reg_bx(s, rd, tmp);
8668 break;
8669 case 0x06:
8670 if (set_cc) {
8671 gen_sbc_CC(tmp, tmp, tmp2);
8672 } else {
8673 gen_sub_carry(tmp, tmp, tmp2);
8675 store_reg_bx(s, rd, tmp);
8676 break;
8677 case 0x07:
8678 if (set_cc) {
8679 gen_sbc_CC(tmp, tmp2, tmp);
8680 } else {
8681 gen_sub_carry(tmp, tmp2, tmp);
8683 store_reg_bx(s, rd, tmp);
8684 break;
8685 case 0x08:
8686 if (set_cc) {
8687 tcg_gen_and_i32(tmp, tmp, tmp2);
8688 gen_logic_CC(tmp);
8690 tcg_temp_free_i32(tmp);
8691 break;
8692 case 0x09:
8693 if (set_cc) {
8694 tcg_gen_xor_i32(tmp, tmp, tmp2);
8695 gen_logic_CC(tmp);
8697 tcg_temp_free_i32(tmp);
8698 break;
8699 case 0x0a:
8700 if (set_cc) {
8701 gen_sub_CC(tmp, tmp, tmp2);
8703 tcg_temp_free_i32(tmp);
8704 break;
8705 case 0x0b:
8706 if (set_cc) {
8707 gen_add_CC(tmp, tmp, tmp2);
8709 tcg_temp_free_i32(tmp);
8710 break;
8711 case 0x0c:
8712 tcg_gen_or_i32(tmp, tmp, tmp2);
8713 if (logic_cc) {
8714 gen_logic_CC(tmp);
8716 store_reg_bx(s, rd, tmp);
8717 break;
8718 case 0x0d:
8719 if (logic_cc && rd == 15) {
8720 /* MOVS r15, ... is used for exception return. */
8721 if (IS_USER(s)) {
8722 goto illegal_op;
8724 gen_exception_return(s, tmp2);
8725 } else {
8726 if (logic_cc) {
8727 gen_logic_CC(tmp2);
8729 store_reg_bx(s, rd, tmp2);
8731 break;
8732 case 0x0e:
8733 tcg_gen_andc_i32(tmp, tmp, tmp2);
8734 if (logic_cc) {
8735 gen_logic_CC(tmp);
8737 store_reg_bx(s, rd, tmp);
8738 break;
8739 default:
8740 case 0x0f:
8741 tcg_gen_not_i32(tmp2, tmp2);
8742 if (logic_cc) {
8743 gen_logic_CC(tmp2);
8745 store_reg_bx(s, rd, tmp2);
8746 break;
8748 if (op1 != 0x0f && op1 != 0x0d) {
8749 tcg_temp_free_i32(tmp2);
8751 } else {
8752 /* other instructions */
8753 op1 = (insn >> 24) & 0xf;
8754 switch(op1) {
8755 case 0x0:
8756 case 0x1:
8757 /* multiplies, extra load/stores */
8758 sh = (insn >> 5) & 3;
8759 if (sh == 0) {
8760 if (op1 == 0x0) {
8761 rd = (insn >> 16) & 0xf;
8762 rn = (insn >> 12) & 0xf;
8763 rs = (insn >> 8) & 0xf;
8764 rm = (insn) & 0xf;
8765 op1 = (insn >> 20) & 0xf;
8766 switch (op1) {
8767 case 0: case 1: case 2: case 3: case 6:
8768 /* 32 bit mul */
8769 tmp = load_reg(s, rs);
8770 tmp2 = load_reg(s, rm);
8771 tcg_gen_mul_i32(tmp, tmp, tmp2);
8772 tcg_temp_free_i32(tmp2);
8773 if (insn & (1 << 22)) {
8774 /* Subtract (mls) */
8775 ARCH(6T2);
8776 tmp2 = load_reg(s, rn);
8777 tcg_gen_sub_i32(tmp, tmp2, tmp);
8778 tcg_temp_free_i32(tmp2);
8779 } else if (insn & (1 << 21)) {
8780 /* Add */
8781 tmp2 = load_reg(s, rn);
8782 tcg_gen_add_i32(tmp, tmp, tmp2);
8783 tcg_temp_free_i32(tmp2);
8785 if (insn & (1 << 20))
8786 gen_logic_CC(tmp);
8787 store_reg(s, rd, tmp);
8788 break;
8789 case 4:
8790 /* 64 bit mul double accumulate (UMAAL) */
8791 ARCH(6);
8792 tmp = load_reg(s, rs);
8793 tmp2 = load_reg(s, rm);
8794 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8795 gen_addq_lo(s, tmp64, rn);
8796 gen_addq_lo(s, tmp64, rd);
8797 gen_storeq_reg(s, rn, rd, tmp64);
8798 tcg_temp_free_i64(tmp64);
8799 break;
8800 case 8: case 9: case 10: case 11:
8801 case 12: case 13: case 14: case 15:
8802 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8803 tmp = load_reg(s, rs);
8804 tmp2 = load_reg(s, rm);
8805 if (insn & (1 << 22)) {
8806 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8807 } else {
8808 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8810 if (insn & (1 << 21)) { /* mult accumulate */
8811 TCGv_i32 al = load_reg(s, rn);
8812 TCGv_i32 ah = load_reg(s, rd);
8813 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8814 tcg_temp_free_i32(al);
8815 tcg_temp_free_i32(ah);
8817 if (insn & (1 << 20)) {
8818 gen_logicq_cc(tmp, tmp2);
8820 store_reg(s, rn, tmp);
8821 store_reg(s, rd, tmp2);
8822 break;
8823 default:
8824 goto illegal_op;
8826 } else {
8827 rn = (insn >> 16) & 0xf;
8828 rd = (insn >> 12) & 0xf;
8829 if (insn & (1 << 23)) {
8830 /* load/store exclusive */
8831 int op2 = (insn >> 8) & 3;
8832 op1 = (insn >> 21) & 0x3;
8834 switch (op2) {
8835 case 0: /* lda/stl */
8836 if (op1 == 1) {
8837 goto illegal_op;
8839 ARCH(8);
8840 break;
8841 case 1: /* reserved */
8842 goto illegal_op;
8843 case 2: /* ldaex/stlex */
8844 ARCH(8);
8845 break;
8846 case 3: /* ldrex/strex */
8847 if (op1) {
8848 ARCH(6K);
8849 } else {
8850 ARCH(6);
8852 break;
8855 addr = tcg_temp_local_new_i32();
8856 load_reg_var(s, addr, rn);
8858 /* Since the emulation does not have barriers,
8859 the acquire/release semantics need no special
8860 handling */
8861 if (op2 == 0) {
8862 if (insn & (1 << 20)) {
8863 tmp = tcg_temp_new_i32();
8864 switch (op1) {
8865 case 0: /* lda */
8866 gen_aa32_ld32u_iss(s, tmp, addr,
8867 get_mem_index(s),
8868 rd | ISSIsAcqRel);
8869 break;
8870 case 2: /* ldab */
8871 gen_aa32_ld8u_iss(s, tmp, addr,
8872 get_mem_index(s),
8873 rd | ISSIsAcqRel);
8874 break;
8875 case 3: /* ldah */
8876 gen_aa32_ld16u_iss(s, tmp, addr,
8877 get_mem_index(s),
8878 rd | ISSIsAcqRel);
8879 break;
8880 default:
8881 abort();
8883 store_reg(s, rd, tmp);
8884 } else {
8885 rm = insn & 0xf;
8886 tmp = load_reg(s, rm);
8887 switch (op1) {
8888 case 0: /* stl */
8889 gen_aa32_st32_iss(s, tmp, addr,
8890 get_mem_index(s),
8891 rm | ISSIsAcqRel);
8892 break;
8893 case 2: /* stlb */
8894 gen_aa32_st8_iss(s, tmp, addr,
8895 get_mem_index(s),
8896 rm | ISSIsAcqRel);
8897 break;
8898 case 3: /* stlh */
8899 gen_aa32_st16_iss(s, tmp, addr,
8900 get_mem_index(s),
8901 rm | ISSIsAcqRel);
8902 break;
8903 default:
8904 abort();
8906 tcg_temp_free_i32(tmp);
8908 } else if (insn & (1 << 20)) {
8909 switch (op1) {
8910 case 0: /* ldrex */
8911 gen_load_exclusive(s, rd, 15, addr, 2);
8912 break;
8913 case 1: /* ldrexd */
8914 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8915 break;
8916 case 2: /* ldrexb */
8917 gen_load_exclusive(s, rd, 15, addr, 0);
8918 break;
8919 case 3: /* ldrexh */
8920 gen_load_exclusive(s, rd, 15, addr, 1);
8921 break;
8922 default:
8923 abort();
8925 } else {
8926 rm = insn & 0xf;
8927 switch (op1) {
8928 case 0: /* strex */
8929 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8930 break;
8931 case 1: /* strexd */
8932 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8933 break;
8934 case 2: /* strexb */
8935 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8936 break;
8937 case 3: /* strexh */
8938 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8939 break;
8940 default:
8941 abort();
8944 tcg_temp_free_i32(addr);
8945 } else {
8946 TCGv taddr;
8947 TCGMemOp opc = s->be_data;
8949 /* SWP instruction */
8950 rm = (insn) & 0xf;
8952 if (insn & (1 << 22)) {
8953 opc |= MO_UB;
8954 } else {
8955 opc |= MO_UL | MO_ALIGN;
8958 addr = load_reg(s, rn);
8959 taddr = gen_aa32_addr(s, addr, opc);
8960 tcg_temp_free_i32(addr);
8962 tmp = load_reg(s, rm);
8963 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8964 get_mem_index(s), opc);
8965 tcg_temp_free(taddr);
8966 store_reg(s, rd, tmp);
8969 } else {
8970 int address_offset;
8971 bool load = insn & (1 << 20);
8972 bool wbit = insn & (1 << 21);
8973 bool pbit = insn & (1 << 24);
8974 bool doubleword = false;
8975 ISSInfo issinfo;
8977 /* Misc load/store */
8978 rn = (insn >> 16) & 0xf;
8979 rd = (insn >> 12) & 0xf;
8981 /* ISS not valid if writeback */
8982 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8984 if (!load && (sh & 2)) {
8985 /* doubleword */
8986 ARCH(5TE);
8987 if (rd & 1) {
8988 /* UNPREDICTABLE; we choose to UNDEF */
8989 goto illegal_op;
8991 load = (sh & 1) == 0;
8992 doubleword = true;
8995 addr = load_reg(s, rn);
8996 if (pbit) {
8997 gen_add_datah_offset(s, insn, 0, addr);
8999 address_offset = 0;
9001 if (doubleword) {
9002 if (!load) {
9003 /* store */
9004 tmp = load_reg(s, rd);
9005 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9006 tcg_temp_free_i32(tmp);
9007 tcg_gen_addi_i32(addr, addr, 4);
9008 tmp = load_reg(s, rd + 1);
9009 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9010 tcg_temp_free_i32(tmp);
9011 } else {
9012 /* load */
9013 tmp = tcg_temp_new_i32();
9014 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9015 store_reg(s, rd, tmp);
9016 tcg_gen_addi_i32(addr, addr, 4);
9017 tmp = tcg_temp_new_i32();
9018 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9019 rd++;
9021 address_offset = -4;
9022 } else if (load) {
9023 /* load */
9024 tmp = tcg_temp_new_i32();
9025 switch (sh) {
9026 case 1:
9027 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9028 issinfo);
9029 break;
9030 case 2:
9031 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9032 issinfo);
9033 break;
9034 default:
9035 case 3:
9036 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9037 issinfo);
9038 break;
9040 } else {
9041 /* store */
9042 tmp = load_reg(s, rd);
9043 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
9044 tcg_temp_free_i32(tmp);
9046 /* Perform base writeback before the loaded value to
9047 ensure correct behavior with overlapping index registers.
9048 ldrd with base writeback is undefined if the
9049 destination and index registers overlap. */
9050 if (!pbit) {
9051 gen_add_datah_offset(s, insn, address_offset, addr);
9052 store_reg(s, rn, addr);
9053 } else if (wbit) {
9054 if (address_offset)
9055 tcg_gen_addi_i32(addr, addr, address_offset);
9056 store_reg(s, rn, addr);
9057 } else {
9058 tcg_temp_free_i32(addr);
9060 if (load) {
9061 /* Complete the load. */
9062 store_reg(s, rd, tmp);
9065 break;
9066 case 0x4:
9067 case 0x5:
9068 goto do_ldst;
9069 case 0x6:
9070 case 0x7:
9071 if (insn & (1 << 4)) {
9072 ARCH(6);
9073 /* Armv6 Media instructions. */
9074 rm = insn & 0xf;
9075 rn = (insn >> 16) & 0xf;
9076 rd = (insn >> 12) & 0xf;
9077 rs = (insn >> 8) & 0xf;
9078 switch ((insn >> 23) & 3) {
9079 case 0: /* Parallel add/subtract. */
9080 op1 = (insn >> 20) & 7;
9081 tmp = load_reg(s, rn);
9082 tmp2 = load_reg(s, rm);
9083 sh = (insn >> 5) & 7;
9084 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9085 goto illegal_op;
9086 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
9087 tcg_temp_free_i32(tmp2);
9088 store_reg(s, rd, tmp);
9089 break;
9090 case 1:
9091 if ((insn & 0x00700020) == 0) {
9092 /* Halfword pack. */
9093 tmp = load_reg(s, rn);
9094 tmp2 = load_reg(s, rm);
9095 shift = (insn >> 7) & 0x1f;
9096 if (insn & (1 << 6)) {
9097 /* pkhtb */
9098 if (shift == 0)
9099 shift = 31;
9100 tcg_gen_sari_i32(tmp2, tmp2, shift);
9101 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9102 tcg_gen_ext16u_i32(tmp2, tmp2);
9103 } else {
9104 /* pkhbt */
9105 if (shift)
9106 tcg_gen_shli_i32(tmp2, tmp2, shift);
9107 tcg_gen_ext16u_i32(tmp, tmp);
9108 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9110 tcg_gen_or_i32(tmp, tmp, tmp2);
9111 tcg_temp_free_i32(tmp2);
9112 store_reg(s, rd, tmp);
9113 } else if ((insn & 0x00200020) == 0x00200000) {
9114 /* [us]sat */
9115 tmp = load_reg(s, rm);
9116 shift = (insn >> 7) & 0x1f;
9117 if (insn & (1 << 6)) {
9118 if (shift == 0)
9119 shift = 31;
9120 tcg_gen_sari_i32(tmp, tmp, shift);
9121 } else {
9122 tcg_gen_shli_i32(tmp, tmp, shift);
9124 sh = (insn >> 16) & 0x1f;
9125 tmp2 = tcg_const_i32(sh);
9126 if (insn & (1 << 22))
9127 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
9128 else
9129 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
9130 tcg_temp_free_i32(tmp2);
9131 store_reg(s, rd, tmp);
9132 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9133 /* [us]sat16 */
9134 tmp = load_reg(s, rm);
9135 sh = (insn >> 16) & 0x1f;
9136 tmp2 = tcg_const_i32(sh);
9137 if (insn & (1 << 22))
9138 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9139 else
9140 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9141 tcg_temp_free_i32(tmp2);
9142 store_reg(s, rd, tmp);
9143 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9144 /* Select bytes. */
9145 tmp = load_reg(s, rn);
9146 tmp2 = load_reg(s, rm);
9147 tmp3 = tcg_temp_new_i32();
9148 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9149 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9150 tcg_temp_free_i32(tmp3);
9151 tcg_temp_free_i32(tmp2);
9152 store_reg(s, rd, tmp);
9153 } else if ((insn & 0x000003e0) == 0x00000060) {
9154 tmp = load_reg(s, rm);
9155 shift = (insn >> 10) & 3;
9156 /* ??? In many cases it's not necessary to do a
9157 rotate, a shift is sufficient. */
9158 if (shift != 0)
9159 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9160 op1 = (insn >> 20) & 7;
9161 switch (op1) {
9162 case 0: gen_sxtb16(tmp); break;
9163 case 2: gen_sxtb(tmp); break;
9164 case 3: gen_sxth(tmp); break;
9165 case 4: gen_uxtb16(tmp); break;
9166 case 6: gen_uxtb(tmp); break;
9167 case 7: gen_uxth(tmp); break;
9168 default: goto illegal_op;
9170 if (rn != 15) {
9171 tmp2 = load_reg(s, rn);
9172 if ((op1 & 3) == 0) {
9173 gen_add16(tmp, tmp2);
9174 } else {
9175 tcg_gen_add_i32(tmp, tmp, tmp2);
9176 tcg_temp_free_i32(tmp2);
9179 store_reg(s, rd, tmp);
9180 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9181 /* rev */
9182 tmp = load_reg(s, rm);
9183 if (insn & (1 << 22)) {
9184 if (insn & (1 << 7)) {
9185 gen_revsh(tmp);
9186 } else {
9187 ARCH(6T2);
9188 gen_helper_rbit(tmp, tmp);
9190 } else {
9191 if (insn & (1 << 7))
9192 gen_rev16(tmp);
9193 else
9194 tcg_gen_bswap32_i32(tmp, tmp);
9196 store_reg(s, rd, tmp);
9197 } else {
9198 goto illegal_op;
9200 break;
9201 case 2: /* Multiplies (Type 3). */
9202 switch ((insn >> 20) & 0x7) {
9203 case 5:
9204 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9205 /* op2 not 00x or 11x : UNDEF */
9206 goto illegal_op;
9208 /* Signed multiply most significant [accumulate].
9209 (SMMUL, SMMLA, SMMLS) */
9210 tmp = load_reg(s, rm);
9211 tmp2 = load_reg(s, rs);
9212 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9214 if (rd != 15) {
9215 tmp = load_reg(s, rd);
9216 if (insn & (1 << 6)) {
9217 tmp64 = gen_subq_msw(tmp64, tmp);
9218 } else {
9219 tmp64 = gen_addq_msw(tmp64, tmp);
9222 if (insn & (1 << 5)) {
9223 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9225 tcg_gen_shri_i64(tmp64, tmp64, 32);
9226 tmp = tcg_temp_new_i32();
9227 tcg_gen_extrl_i64_i32(tmp, tmp64);
9228 tcg_temp_free_i64(tmp64);
9229 store_reg(s, rn, tmp);
9230 break;
9231 case 0:
9232 case 4:
9233 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9234 if (insn & (1 << 7)) {
9235 goto illegal_op;
9237 tmp = load_reg(s, rm);
9238 tmp2 = load_reg(s, rs);
9239 if (insn & (1 << 5))
9240 gen_swap_half(tmp2);
9241 gen_smul_dual(tmp, tmp2);
9242 if (insn & (1 << 22)) {
9243 /* smlald, smlsld */
9244 TCGv_i64 tmp64_2;
9246 tmp64 = tcg_temp_new_i64();
9247 tmp64_2 = tcg_temp_new_i64();
9248 tcg_gen_ext_i32_i64(tmp64, tmp);
9249 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
9250 tcg_temp_free_i32(tmp);
9251 tcg_temp_free_i32(tmp2);
9252 if (insn & (1 << 6)) {
9253 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9254 } else {
9255 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9257 tcg_temp_free_i64(tmp64_2);
9258 gen_addq(s, tmp64, rd, rn);
9259 gen_storeq_reg(s, rd, rn, tmp64);
9260 tcg_temp_free_i64(tmp64);
9261 } else {
9262 /* smuad, smusd, smlad, smlsd */
9263 if (insn & (1 << 6)) {
9264 /* This subtraction cannot overflow. */
9265 tcg_gen_sub_i32(tmp, tmp, tmp2);
9266 } else {
9267 /* This addition cannot overflow 32 bits;
9268 * however it may overflow considered as a
9269 * signed operation, in which case we must set
9270 * the Q flag.
9272 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9274 tcg_temp_free_i32(tmp2);
9275 if (rd != 15)
9277 tmp2 = load_reg(s, rd);
9278 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9279 tcg_temp_free_i32(tmp2);
9281 store_reg(s, rn, tmp);
9283 break;
9284 case 1:
9285 case 3:
9286 /* SDIV, UDIV */
9287 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
9288 goto illegal_op;
9290 if (((insn >> 5) & 7) || (rd != 15)) {
9291 goto illegal_op;
9293 tmp = load_reg(s, rm);
9294 tmp2 = load_reg(s, rs);
9295 if (insn & (1 << 21)) {
9296 gen_helper_udiv(tmp, tmp, tmp2);
9297 } else {
9298 gen_helper_sdiv(tmp, tmp, tmp2);
9300 tcg_temp_free_i32(tmp2);
9301 store_reg(s, rn, tmp);
9302 break;
9303 default:
9304 goto illegal_op;
9306 break;
9307 case 3:
9308 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9309 switch (op1) {
9310 case 0: /* Unsigned sum of absolute differences. */
9311 ARCH(6);
9312 tmp = load_reg(s, rm);
9313 tmp2 = load_reg(s, rs);
9314 gen_helper_usad8(tmp, tmp, tmp2);
9315 tcg_temp_free_i32(tmp2);
9316 if (rd != 15) {
9317 tmp2 = load_reg(s, rd);
9318 tcg_gen_add_i32(tmp, tmp, tmp2);
9319 tcg_temp_free_i32(tmp2);
9321 store_reg(s, rn, tmp);
9322 break;
9323 case 0x20: case 0x24: case 0x28: case 0x2c:
9324 /* Bitfield insert/clear. */
9325 ARCH(6T2);
9326 shift = (insn >> 7) & 0x1f;
9327 i = (insn >> 16) & 0x1f;
9328 if (i < shift) {
9329 /* UNPREDICTABLE; we choose to UNDEF */
9330 goto illegal_op;
9332 i = i + 1 - shift;
9333 if (rm == 15) {
9334 tmp = tcg_temp_new_i32();
9335 tcg_gen_movi_i32(tmp, 0);
9336 } else {
9337 tmp = load_reg(s, rm);
9339 if (i != 32) {
9340 tmp2 = load_reg(s, rd);
9341 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
9342 tcg_temp_free_i32(tmp2);
9344 store_reg(s, rd, tmp);
9345 break;
9346 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9347 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9348 ARCH(6T2);
9349 tmp = load_reg(s, rm);
9350 shift = (insn >> 7) & 0x1f;
9351 i = ((insn >> 16) & 0x1f) + 1;
9352 if (shift + i > 32)
9353 goto illegal_op;
9354 if (i < 32) {
9355 if (op1 & 0x20) {
9356 tcg_gen_extract_i32(tmp, tmp, shift, i);
9357 } else {
9358 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9361 store_reg(s, rd, tmp);
9362 break;
9363 default:
9364 goto illegal_op;
9366 break;
9368 break;
9370 do_ldst:
9371 /* Check for undefined extension instructions
9372 * per the ARM Bible IE:
9373 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9375 sh = (0xf << 20) | (0xf << 4);
9376 if (op1 == 0x7 && ((insn & sh) == sh))
9378 goto illegal_op;
9380 /* load/store byte/word */
9381 rn = (insn >> 16) & 0xf;
9382 rd = (insn >> 12) & 0xf;
9383 tmp2 = load_reg(s, rn);
9384 if ((insn & 0x01200000) == 0x00200000) {
9385 /* ldrt/strt */
9386 i = get_a32_user_mem_index(s);
9387 } else {
9388 i = get_mem_index(s);
9390 if (insn & (1 << 24))
9391 gen_add_data_offset(s, insn, tmp2);
9392 if (insn & (1 << 20)) {
9393 /* load */
9394 tmp = tcg_temp_new_i32();
9395 if (insn & (1 << 22)) {
9396 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9397 } else {
9398 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9400 } else {
9401 /* store */
9402 tmp = load_reg(s, rd);
9403 if (insn & (1 << 22)) {
9404 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
9405 } else {
9406 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
9408 tcg_temp_free_i32(tmp);
9410 if (!(insn & (1 << 24))) {
9411 gen_add_data_offset(s, insn, tmp2);
9412 store_reg(s, rn, tmp2);
9413 } else if (insn & (1 << 21)) {
9414 store_reg(s, rn, tmp2);
9415 } else {
9416 tcg_temp_free_i32(tmp2);
9418 if (insn & (1 << 20)) {
9419 /* Complete the load. */
9420 store_reg_from_load(s, rd, tmp);
9422 break;
9423 case 0x08:
9424 case 0x09:
9426 int j, n, loaded_base;
9427 bool exc_return = false;
9428 bool is_load = extract32(insn, 20, 1);
9429 bool user = false;
9430 TCGv_i32 loaded_var;
9431 /* load/store multiple words */
9432 /* XXX: store correct base if write back */
9433 if (insn & (1 << 22)) {
9434 /* LDM (user), LDM (exception return) and STM (user) */
9435 if (IS_USER(s))
9436 goto illegal_op; /* only usable in supervisor mode */
9438 if (is_load && extract32(insn, 15, 1)) {
9439 exc_return = true;
9440 } else {
9441 user = true;
9444 rn = (insn >> 16) & 0xf;
9445 addr = load_reg(s, rn);
9447 /* compute total size */
9448 loaded_base = 0;
9449 TCGV_UNUSED_I32(loaded_var);
9450 n = 0;
9451 for(i=0;i<16;i++) {
9452 if (insn & (1 << i))
9453 n++;
9455 /* XXX: test invalid n == 0 case ? */
9456 if (insn & (1 << 23)) {
9457 if (insn & (1 << 24)) {
9458 /* pre increment */
9459 tcg_gen_addi_i32(addr, addr, 4);
9460 } else {
9461 /* post increment */
9463 } else {
9464 if (insn & (1 << 24)) {
9465 /* pre decrement */
9466 tcg_gen_addi_i32(addr, addr, -(n * 4));
9467 } else {
9468 /* post decrement */
9469 if (n != 1)
9470 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9473 j = 0;
9474 for(i=0;i<16;i++) {
9475 if (insn & (1 << i)) {
9476 if (is_load) {
9477 /* load */
9478 tmp = tcg_temp_new_i32();
9479 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9480 if (user) {
9481 tmp2 = tcg_const_i32(i);
9482 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9483 tcg_temp_free_i32(tmp2);
9484 tcg_temp_free_i32(tmp);
9485 } else if (i == rn) {
9486 loaded_var = tmp;
9487 loaded_base = 1;
9488 } else if (rn == 15 && exc_return) {
9489 store_pc_exc_ret(s, tmp);
9490 } else {
9491 store_reg_from_load(s, i, tmp);
9493 } else {
9494 /* store */
9495 if (i == 15) {
9496 /* special case: r15 = PC + 8 */
9497 val = (long)s->pc + 4;
9498 tmp = tcg_temp_new_i32();
9499 tcg_gen_movi_i32(tmp, val);
9500 } else if (user) {
9501 tmp = tcg_temp_new_i32();
9502 tmp2 = tcg_const_i32(i);
9503 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9504 tcg_temp_free_i32(tmp2);
9505 } else {
9506 tmp = load_reg(s, i);
9508 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9509 tcg_temp_free_i32(tmp);
9511 j++;
9512 /* no need to add after the last transfer */
9513 if (j != n)
9514 tcg_gen_addi_i32(addr, addr, 4);
9517 if (insn & (1 << 21)) {
9518 /* write back */
9519 if (insn & (1 << 23)) {
9520 if (insn & (1 << 24)) {
9521 /* pre increment */
9522 } else {
9523 /* post increment */
9524 tcg_gen_addi_i32(addr, addr, 4);
9526 } else {
9527 if (insn & (1 << 24)) {
9528 /* pre decrement */
9529 if (n != 1)
9530 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9531 } else {
9532 /* post decrement */
9533 tcg_gen_addi_i32(addr, addr, -(n * 4));
9536 store_reg(s, rn, addr);
9537 } else {
9538 tcg_temp_free_i32(addr);
9540 if (loaded_base) {
9541 store_reg(s, rn, loaded_var);
9543 if (exc_return) {
9544 /* Restore CPSR from SPSR. */
9545 tmp = load_cpu_field(spsr);
9546 gen_helper_cpsr_write_eret(cpu_env, tmp);
9547 tcg_temp_free_i32(tmp);
9548 /* Must exit loop to check un-masked IRQs */
9549 s->base.is_jmp = DISAS_EXIT;
9552 break;
9553 case 0xa:
9554 case 0xb:
9556 int32_t offset;
9558 /* branch (and link) */
9559 val = (int32_t)s->pc;
9560 if (insn & (1 << 24)) {
9561 tmp = tcg_temp_new_i32();
9562 tcg_gen_movi_i32(tmp, val);
9563 store_reg(s, 14, tmp);
9565 offset = sextract32(insn << 2, 0, 26);
9566 val += offset + 4;
9567 gen_jmp(s, val);
9569 break;
9570 case 0xc:
9571 case 0xd:
9572 case 0xe:
9573 if (((insn >> 8) & 0xe) == 10) {
9574 /* VFP. */
9575 if (disas_vfp_insn(s, insn)) {
9576 goto illegal_op;
9578 } else if (disas_coproc_insn(s, insn)) {
9579 /* Coprocessor. */
9580 goto illegal_op;
9582 break;
9583 case 0xf:
9584 /* swi */
9585 gen_set_pc_im(s, s->pc);
9586 s->svc_imm = extract32(insn, 0, 24);
9587 s->base.is_jmp = DISAS_SWI;
9588 break;
9589 default:
9590 illegal_op:
9591 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9592 default_exception_el(s));
9593 break;
9598 /* Return true if this is a Thumb-2 logical op. */
9599 static int
9600 thumb2_logic_op(int op)
9602 return (op < 8);
9605 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9606 then set condition code flags based on the result of the operation.
9607 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9608 to the high bit of T1.
9609 Returns zero if the opcode is valid. */
9611 static int
9612 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9613 TCGv_i32 t0, TCGv_i32 t1)
9615 int logic_cc;
9617 logic_cc = 0;
9618 switch (op) {
9619 case 0: /* and */
9620 tcg_gen_and_i32(t0, t0, t1);
9621 logic_cc = conds;
9622 break;
9623 case 1: /* bic */
9624 tcg_gen_andc_i32(t0, t0, t1);
9625 logic_cc = conds;
9626 break;
9627 case 2: /* orr */
9628 tcg_gen_or_i32(t0, t0, t1);
9629 logic_cc = conds;
9630 break;
9631 case 3: /* orn */
9632 tcg_gen_orc_i32(t0, t0, t1);
9633 logic_cc = conds;
9634 break;
9635 case 4: /* eor */
9636 tcg_gen_xor_i32(t0, t0, t1);
9637 logic_cc = conds;
9638 break;
9639 case 8: /* add */
9640 if (conds)
9641 gen_add_CC(t0, t0, t1);
9642 else
9643 tcg_gen_add_i32(t0, t0, t1);
9644 break;
9645 case 10: /* adc */
9646 if (conds)
9647 gen_adc_CC(t0, t0, t1);
9648 else
9649 gen_adc(t0, t1);
9650 break;
9651 case 11: /* sbc */
9652 if (conds) {
9653 gen_sbc_CC(t0, t0, t1);
9654 } else {
9655 gen_sub_carry(t0, t0, t1);
9657 break;
9658 case 13: /* sub */
9659 if (conds)
9660 gen_sub_CC(t0, t0, t1);
9661 else
9662 tcg_gen_sub_i32(t0, t0, t1);
9663 break;
9664 case 14: /* rsb */
9665 if (conds)
9666 gen_sub_CC(t0, t1, t0);
9667 else
9668 tcg_gen_sub_i32(t0, t1, t0);
9669 break;
9670 default: /* 5, 6, 7, 9, 12, 15. */
9671 return 1;
9673 if (logic_cc) {
9674 gen_logic_CC(t0);
9675 if (shifter_out)
9676 gen_set_CF_bit31(t1);
9678 return 0;
9681 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9682 is not legal. */
9683 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9685 uint32_t insn, imm, shift, offset;
9686 uint32_t rd, rn, rm, rs;
9687 TCGv_i32 tmp;
9688 TCGv_i32 tmp2;
9689 TCGv_i32 tmp3;
9690 TCGv_i32 addr;
9691 TCGv_i64 tmp64;
9692 int op;
9693 int shiftop;
9694 int conds;
9695 int logic_cc;
9697 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9698 || arm_dc_feature(s, ARM_FEATURE_M))) {
9699 /* Thumb-1 cores may need to treat bl and blx as a pair of
9700 16-bit instructions to get correct prefetch abort behavior. */
9701 insn = insn_hw1;
9702 if ((insn & (1 << 12)) == 0) {
9703 ARCH(5);
9704 /* Second half of blx. */
9705 offset = ((insn & 0x7ff) << 1);
9706 tmp = load_reg(s, 14);
9707 tcg_gen_addi_i32(tmp, tmp, offset);
9708 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9710 tmp2 = tcg_temp_new_i32();
9711 tcg_gen_movi_i32(tmp2, s->pc | 1);
9712 store_reg(s, 14, tmp2);
9713 gen_bx(s, tmp);
9714 return 0;
9716 if (insn & (1 << 11)) {
9717 /* Second half of bl. */
9718 offset = ((insn & 0x7ff) << 1) | 1;
9719 tmp = load_reg(s, 14);
9720 tcg_gen_addi_i32(tmp, tmp, offset);
9722 tmp2 = tcg_temp_new_i32();
9723 tcg_gen_movi_i32(tmp2, s->pc | 1);
9724 store_reg(s, 14, tmp2);
9725 gen_bx(s, tmp);
9726 return 0;
9728 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9729 /* Instruction spans a page boundary. Implement it as two
9730 16-bit instructions in case the second half causes an
9731 prefetch abort. */
9732 offset = ((int32_t)insn << 21) >> 9;
9733 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9734 return 0;
9736 /* Fall through to 32-bit decode. */
9739 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9740 s->pc += 2;
9741 insn |= (uint32_t)insn_hw1 << 16;
9743 if ((insn & 0xf800e800) != 0xf000e800) {
9744 ARCH(6T2);
9747 rn = (insn >> 16) & 0xf;
9748 rs = (insn >> 12) & 0xf;
9749 rd = (insn >> 8) & 0xf;
9750 rm = insn & 0xf;
9751 switch ((insn >> 25) & 0xf) {
9752 case 0: case 1: case 2: case 3:
9753 /* 16-bit instructions. Should never happen. */
9754 abort();
9755 case 4:
9756 if (insn & (1 << 22)) {
9757 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9758 * - load/store doubleword, load/store exclusive, ldacq/strel,
9759 * table branch.
9761 if (insn & 0x01200000) {
9762 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9763 * - load/store dual (post-indexed)
9764 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9765 * - load/store dual (literal and immediate)
9766 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9767 * - load/store dual (pre-indexed)
9769 if (rn == 15) {
9770 if (insn & (1 << 21)) {
9771 /* UNPREDICTABLE */
9772 goto illegal_op;
9774 addr = tcg_temp_new_i32();
9775 tcg_gen_movi_i32(addr, s->pc & ~3);
9776 } else {
9777 addr = load_reg(s, rn);
9779 offset = (insn & 0xff) * 4;
9780 if ((insn & (1 << 23)) == 0)
9781 offset = -offset;
9782 if (insn & (1 << 24)) {
9783 tcg_gen_addi_i32(addr, addr, offset);
9784 offset = 0;
9786 if (insn & (1 << 20)) {
9787 /* ldrd */
9788 tmp = tcg_temp_new_i32();
9789 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9790 store_reg(s, rs, tmp);
9791 tcg_gen_addi_i32(addr, addr, 4);
9792 tmp = tcg_temp_new_i32();
9793 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9794 store_reg(s, rd, tmp);
9795 } else {
9796 /* strd */
9797 tmp = load_reg(s, rs);
9798 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9799 tcg_temp_free_i32(tmp);
9800 tcg_gen_addi_i32(addr, addr, 4);
9801 tmp = load_reg(s, rd);
9802 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9803 tcg_temp_free_i32(tmp);
9805 if (insn & (1 << 21)) {
9806 /* Base writeback. */
9807 tcg_gen_addi_i32(addr, addr, offset - 4);
9808 store_reg(s, rn, addr);
9809 } else {
9810 tcg_temp_free_i32(addr);
9812 } else if ((insn & (1 << 23)) == 0) {
9813 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9814 * - load/store exclusive word
9816 if (rs == 15) {
9817 goto illegal_op;
9819 addr = tcg_temp_local_new_i32();
9820 load_reg_var(s, addr, rn);
9821 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9822 if (insn & (1 << 20)) {
9823 gen_load_exclusive(s, rs, 15, addr, 2);
9824 } else {
9825 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9827 tcg_temp_free_i32(addr);
9828 } else if ((insn & (7 << 5)) == 0) {
9829 /* Table Branch. */
9830 if (rn == 15) {
9831 addr = tcg_temp_new_i32();
9832 tcg_gen_movi_i32(addr, s->pc);
9833 } else {
9834 addr = load_reg(s, rn);
9836 tmp = load_reg(s, rm);
9837 tcg_gen_add_i32(addr, addr, tmp);
9838 if (insn & (1 << 4)) {
9839 /* tbh */
9840 tcg_gen_add_i32(addr, addr, tmp);
9841 tcg_temp_free_i32(tmp);
9842 tmp = tcg_temp_new_i32();
9843 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9844 } else { /* tbb */
9845 tcg_temp_free_i32(tmp);
9846 tmp = tcg_temp_new_i32();
9847 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9849 tcg_temp_free_i32(addr);
9850 tcg_gen_shli_i32(tmp, tmp, 1);
9851 tcg_gen_addi_i32(tmp, tmp, s->pc);
9852 store_reg(s, 15, tmp);
9853 } else {
9854 int op2 = (insn >> 6) & 0x3;
9855 op = (insn >> 4) & 0x3;
9856 switch (op2) {
9857 case 0:
9858 goto illegal_op;
9859 case 1:
9860 /* Load/store exclusive byte/halfword/doubleword */
9861 if (op == 2) {
9862 goto illegal_op;
9864 ARCH(7);
9865 break;
9866 case 2:
9867 /* Load-acquire/store-release */
9868 if (op == 3) {
9869 goto illegal_op;
9871 /* Fall through */
9872 case 3:
9873 /* Load-acquire/store-release exclusive */
9874 ARCH(8);
9875 break;
9877 addr = tcg_temp_local_new_i32();
9878 load_reg_var(s, addr, rn);
9879 if (!(op2 & 1)) {
9880 if (insn & (1 << 20)) {
9881 tmp = tcg_temp_new_i32();
9882 switch (op) {
9883 case 0: /* ldab */
9884 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9885 rs | ISSIsAcqRel);
9886 break;
9887 case 1: /* ldah */
9888 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9889 rs | ISSIsAcqRel);
9890 break;
9891 case 2: /* lda */
9892 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9893 rs | ISSIsAcqRel);
9894 break;
9895 default:
9896 abort();
9898 store_reg(s, rs, tmp);
9899 } else {
9900 tmp = load_reg(s, rs);
9901 switch (op) {
9902 case 0: /* stlb */
9903 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9904 rs | ISSIsAcqRel);
9905 break;
9906 case 1: /* stlh */
9907 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9908 rs | ISSIsAcqRel);
9909 break;
9910 case 2: /* stl */
9911 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9912 rs | ISSIsAcqRel);
9913 break;
9914 default:
9915 abort();
9917 tcg_temp_free_i32(tmp);
9919 } else if (insn & (1 << 20)) {
9920 gen_load_exclusive(s, rs, rd, addr, op);
9921 } else {
9922 gen_store_exclusive(s, rm, rs, rd, addr, op);
9924 tcg_temp_free_i32(addr);
9926 } else {
9927 /* Load/store multiple, RFE, SRS. */
9928 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9929 /* RFE, SRS: not available in user mode or on M profile */
9930 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9931 goto illegal_op;
9933 if (insn & (1 << 20)) {
9934 /* rfe */
9935 addr = load_reg(s, rn);
9936 if ((insn & (1 << 24)) == 0)
9937 tcg_gen_addi_i32(addr, addr, -8);
9938 /* Load PC into tmp and CPSR into tmp2. */
9939 tmp = tcg_temp_new_i32();
9940 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9941 tcg_gen_addi_i32(addr, addr, 4);
9942 tmp2 = tcg_temp_new_i32();
9943 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9944 if (insn & (1 << 21)) {
9945 /* Base writeback. */
9946 if (insn & (1 << 24)) {
9947 tcg_gen_addi_i32(addr, addr, 4);
9948 } else {
9949 tcg_gen_addi_i32(addr, addr, -4);
9951 store_reg(s, rn, addr);
9952 } else {
9953 tcg_temp_free_i32(addr);
9955 gen_rfe(s, tmp, tmp2);
9956 } else {
9957 /* srs */
9958 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9959 insn & (1 << 21));
9961 } else {
9962 int i, loaded_base = 0;
9963 TCGv_i32 loaded_var;
9964 /* Load/store multiple. */
9965 addr = load_reg(s, rn);
9966 offset = 0;
9967 for (i = 0; i < 16; i++) {
9968 if (insn & (1 << i))
9969 offset += 4;
9971 if (insn & (1 << 24)) {
9972 tcg_gen_addi_i32(addr, addr, -offset);
9975 TCGV_UNUSED_I32(loaded_var);
9976 for (i = 0; i < 16; i++) {
9977 if ((insn & (1 << i)) == 0)
9978 continue;
9979 if (insn & (1 << 20)) {
9980 /* Load. */
9981 tmp = tcg_temp_new_i32();
9982 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9983 if (i == 15) {
9984 gen_bx_excret(s, tmp);
9985 } else if (i == rn) {
9986 loaded_var = tmp;
9987 loaded_base = 1;
9988 } else {
9989 store_reg(s, i, tmp);
9991 } else {
9992 /* Store. */
9993 tmp = load_reg(s, i);
9994 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9995 tcg_temp_free_i32(tmp);
9997 tcg_gen_addi_i32(addr, addr, 4);
9999 if (loaded_base) {
10000 store_reg(s, rn, loaded_var);
10002 if (insn & (1 << 21)) {
10003 /* Base register writeback. */
10004 if (insn & (1 << 24)) {
10005 tcg_gen_addi_i32(addr, addr, -offset);
10007 /* Fault if writeback register is in register list. */
10008 if (insn & (1 << rn))
10009 goto illegal_op;
10010 store_reg(s, rn, addr);
10011 } else {
10012 tcg_temp_free_i32(addr);
10016 break;
10017 case 5:
10019 op = (insn >> 21) & 0xf;
10020 if (op == 6) {
10021 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10022 goto illegal_op;
10024 /* Halfword pack. */
10025 tmp = load_reg(s, rn);
10026 tmp2 = load_reg(s, rm);
10027 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10028 if (insn & (1 << 5)) {
10029 /* pkhtb */
10030 if (shift == 0)
10031 shift = 31;
10032 tcg_gen_sari_i32(tmp2, tmp2, shift);
10033 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10034 tcg_gen_ext16u_i32(tmp2, tmp2);
10035 } else {
10036 /* pkhbt */
10037 if (shift)
10038 tcg_gen_shli_i32(tmp2, tmp2, shift);
10039 tcg_gen_ext16u_i32(tmp, tmp);
10040 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10042 tcg_gen_or_i32(tmp, tmp, tmp2);
10043 tcg_temp_free_i32(tmp2);
10044 store_reg(s, rd, tmp);
10045 } else {
10046 /* Data processing register constant shift. */
10047 if (rn == 15) {
10048 tmp = tcg_temp_new_i32();
10049 tcg_gen_movi_i32(tmp, 0);
10050 } else {
10051 tmp = load_reg(s, rn);
10053 tmp2 = load_reg(s, rm);
10055 shiftop = (insn >> 4) & 3;
10056 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10057 conds = (insn & (1 << 20)) != 0;
10058 logic_cc = (conds && thumb2_logic_op(op));
10059 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10060 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10061 goto illegal_op;
10062 tcg_temp_free_i32(tmp2);
10063 if (rd != 15) {
10064 store_reg(s, rd, tmp);
10065 } else {
10066 tcg_temp_free_i32(tmp);
10069 break;
10070 case 13: /* Misc data processing. */
10071 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10072 if (op < 4 && (insn & 0xf000) != 0xf000)
10073 goto illegal_op;
10074 switch (op) {
10075 case 0: /* Register controlled shift. */
10076 tmp = load_reg(s, rn);
10077 tmp2 = load_reg(s, rm);
10078 if ((insn & 0x70) != 0)
10079 goto illegal_op;
10080 op = (insn >> 21) & 3;
10081 logic_cc = (insn & (1 << 20)) != 0;
10082 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10083 if (logic_cc)
10084 gen_logic_CC(tmp);
10085 store_reg(s, rd, tmp);
10086 break;
10087 case 1: /* Sign/zero extend. */
10088 op = (insn >> 20) & 7;
10089 switch (op) {
10090 case 0: /* SXTAH, SXTH */
10091 case 1: /* UXTAH, UXTH */
10092 case 4: /* SXTAB, SXTB */
10093 case 5: /* UXTAB, UXTB */
10094 break;
10095 case 2: /* SXTAB16, SXTB16 */
10096 case 3: /* UXTAB16, UXTB16 */
10097 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10098 goto illegal_op;
10100 break;
10101 default:
10102 goto illegal_op;
10104 if (rn != 15) {
10105 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10106 goto illegal_op;
10109 tmp = load_reg(s, rm);
10110 shift = (insn >> 4) & 3;
10111 /* ??? In many cases it's not necessary to do a
10112 rotate, a shift is sufficient. */
10113 if (shift != 0)
10114 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
10115 op = (insn >> 20) & 7;
10116 switch (op) {
10117 case 0: gen_sxth(tmp); break;
10118 case 1: gen_uxth(tmp); break;
10119 case 2: gen_sxtb16(tmp); break;
10120 case 3: gen_uxtb16(tmp); break;
10121 case 4: gen_sxtb(tmp); break;
10122 case 5: gen_uxtb(tmp); break;
10123 default:
10124 g_assert_not_reached();
10126 if (rn != 15) {
10127 tmp2 = load_reg(s, rn);
10128 if ((op >> 1) == 1) {
10129 gen_add16(tmp, tmp2);
10130 } else {
10131 tcg_gen_add_i32(tmp, tmp, tmp2);
10132 tcg_temp_free_i32(tmp2);
10135 store_reg(s, rd, tmp);
10136 break;
10137 case 2: /* SIMD add/subtract. */
10138 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10139 goto illegal_op;
10141 op = (insn >> 20) & 7;
10142 shift = (insn >> 4) & 7;
10143 if ((op & 3) == 3 || (shift & 3) == 3)
10144 goto illegal_op;
10145 tmp = load_reg(s, rn);
10146 tmp2 = load_reg(s, rm);
10147 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
10148 tcg_temp_free_i32(tmp2);
10149 store_reg(s, rd, tmp);
10150 break;
10151 case 3: /* Other data processing. */
10152 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10153 if (op < 4) {
10154 /* Saturating add/subtract. */
10155 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10156 goto illegal_op;
10158 tmp = load_reg(s, rn);
10159 tmp2 = load_reg(s, rm);
10160 if (op & 1)
10161 gen_helper_double_saturate(tmp, cpu_env, tmp);
10162 if (op & 2)
10163 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
10164 else
10165 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
10166 tcg_temp_free_i32(tmp2);
10167 } else {
10168 switch (op) {
10169 case 0x0a: /* rbit */
10170 case 0x08: /* rev */
10171 case 0x09: /* rev16 */
10172 case 0x0b: /* revsh */
10173 case 0x18: /* clz */
10174 break;
10175 case 0x10: /* sel */
10176 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10177 goto illegal_op;
10179 break;
10180 case 0x20: /* crc32/crc32c */
10181 case 0x21:
10182 case 0x22:
10183 case 0x28:
10184 case 0x29:
10185 case 0x2a:
10186 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10187 goto illegal_op;
10189 break;
10190 default:
10191 goto illegal_op;
10193 tmp = load_reg(s, rn);
10194 switch (op) {
10195 case 0x0a: /* rbit */
10196 gen_helper_rbit(tmp, tmp);
10197 break;
10198 case 0x08: /* rev */
10199 tcg_gen_bswap32_i32(tmp, tmp);
10200 break;
10201 case 0x09: /* rev16 */
10202 gen_rev16(tmp);
10203 break;
10204 case 0x0b: /* revsh */
10205 gen_revsh(tmp);
10206 break;
10207 case 0x10: /* sel */
10208 tmp2 = load_reg(s, rm);
10209 tmp3 = tcg_temp_new_i32();
10210 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
10211 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
10212 tcg_temp_free_i32(tmp3);
10213 tcg_temp_free_i32(tmp2);
10214 break;
10215 case 0x18: /* clz */
10216 tcg_gen_clzi_i32(tmp, tmp, 32);
10217 break;
10218 case 0x20:
10219 case 0x21:
10220 case 0x22:
10221 case 0x28:
10222 case 0x29:
10223 case 0x2a:
10225 /* crc32/crc32c */
10226 uint32_t sz = op & 0x3;
10227 uint32_t c = op & 0x8;
10229 tmp2 = load_reg(s, rm);
10230 if (sz == 0) {
10231 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10232 } else if (sz == 1) {
10233 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10235 tmp3 = tcg_const_i32(1 << sz);
10236 if (c) {
10237 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10238 } else {
10239 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10241 tcg_temp_free_i32(tmp2);
10242 tcg_temp_free_i32(tmp3);
10243 break;
10245 default:
10246 g_assert_not_reached();
10249 store_reg(s, rd, tmp);
10250 break;
10251 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10252 switch ((insn >> 20) & 7) {
10253 case 0: /* 32 x 32 -> 32 */
10254 case 7: /* Unsigned sum of absolute differences. */
10255 break;
10256 case 1: /* 16 x 16 -> 32 */
10257 case 2: /* Dual multiply add. */
10258 case 3: /* 32 * 16 -> 32msb */
10259 case 4: /* Dual multiply subtract. */
10260 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10261 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10262 goto illegal_op;
10264 break;
10266 op = (insn >> 4) & 0xf;
10267 tmp = load_reg(s, rn);
10268 tmp2 = load_reg(s, rm);
10269 switch ((insn >> 20) & 7) {
10270 case 0: /* 32 x 32 -> 32 */
10271 tcg_gen_mul_i32(tmp, tmp, tmp2);
10272 tcg_temp_free_i32(tmp2);
10273 if (rs != 15) {
10274 tmp2 = load_reg(s, rs);
10275 if (op)
10276 tcg_gen_sub_i32(tmp, tmp2, tmp);
10277 else
10278 tcg_gen_add_i32(tmp, tmp, tmp2);
10279 tcg_temp_free_i32(tmp2);
10281 break;
10282 case 1: /* 16 x 16 -> 32 */
10283 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10284 tcg_temp_free_i32(tmp2);
10285 if (rs != 15) {
10286 tmp2 = load_reg(s, rs);
10287 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10288 tcg_temp_free_i32(tmp2);
10290 break;
10291 case 2: /* Dual multiply add. */
10292 case 4: /* Dual multiply subtract. */
10293 if (op)
10294 gen_swap_half(tmp2);
10295 gen_smul_dual(tmp, tmp2);
10296 if (insn & (1 << 22)) {
10297 /* This subtraction cannot overflow. */
10298 tcg_gen_sub_i32(tmp, tmp, tmp2);
10299 } else {
10300 /* This addition cannot overflow 32 bits;
10301 * however it may overflow considered as a signed
10302 * operation, in which case we must set the Q flag.
10304 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10306 tcg_temp_free_i32(tmp2);
10307 if (rs != 15)
10309 tmp2 = load_reg(s, rs);
10310 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10311 tcg_temp_free_i32(tmp2);
10313 break;
10314 case 3: /* 32 * 16 -> 32msb */
10315 if (op)
10316 tcg_gen_sari_i32(tmp2, tmp2, 16);
10317 else
10318 gen_sxth(tmp2);
10319 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10320 tcg_gen_shri_i64(tmp64, tmp64, 16);
10321 tmp = tcg_temp_new_i32();
10322 tcg_gen_extrl_i64_i32(tmp, tmp64);
10323 tcg_temp_free_i64(tmp64);
10324 if (rs != 15)
10326 tmp2 = load_reg(s, rs);
10327 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10328 tcg_temp_free_i32(tmp2);
10330 break;
10331 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10332 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10333 if (rs != 15) {
10334 tmp = load_reg(s, rs);
10335 if (insn & (1 << 20)) {
10336 tmp64 = gen_addq_msw(tmp64, tmp);
10337 } else {
10338 tmp64 = gen_subq_msw(tmp64, tmp);
10341 if (insn & (1 << 4)) {
10342 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10344 tcg_gen_shri_i64(tmp64, tmp64, 32);
10345 tmp = tcg_temp_new_i32();
10346 tcg_gen_extrl_i64_i32(tmp, tmp64);
10347 tcg_temp_free_i64(tmp64);
10348 break;
10349 case 7: /* Unsigned sum of absolute differences. */
10350 gen_helper_usad8(tmp, tmp, tmp2);
10351 tcg_temp_free_i32(tmp2);
10352 if (rs != 15) {
10353 tmp2 = load_reg(s, rs);
10354 tcg_gen_add_i32(tmp, tmp, tmp2);
10355 tcg_temp_free_i32(tmp2);
10357 break;
10359 store_reg(s, rd, tmp);
10360 break;
10361 case 6: case 7: /* 64-bit multiply, Divide. */
10362 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
10363 tmp = load_reg(s, rn);
10364 tmp2 = load_reg(s, rm);
10365 if ((op & 0x50) == 0x10) {
10366 /* sdiv, udiv */
10367 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
10368 goto illegal_op;
10370 if (op & 0x20)
10371 gen_helper_udiv(tmp, tmp, tmp2);
10372 else
10373 gen_helper_sdiv(tmp, tmp, tmp2);
10374 tcg_temp_free_i32(tmp2);
10375 store_reg(s, rd, tmp);
10376 } else if ((op & 0xe) == 0xc) {
10377 /* Dual multiply accumulate long. */
10378 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10379 tcg_temp_free_i32(tmp);
10380 tcg_temp_free_i32(tmp2);
10381 goto illegal_op;
10383 if (op & 1)
10384 gen_swap_half(tmp2);
10385 gen_smul_dual(tmp, tmp2);
10386 if (op & 0x10) {
10387 tcg_gen_sub_i32(tmp, tmp, tmp2);
10388 } else {
10389 tcg_gen_add_i32(tmp, tmp, tmp2);
10391 tcg_temp_free_i32(tmp2);
10392 /* BUGFIX */
10393 tmp64 = tcg_temp_new_i64();
10394 tcg_gen_ext_i32_i64(tmp64, tmp);
10395 tcg_temp_free_i32(tmp);
10396 gen_addq(s, tmp64, rs, rd);
10397 gen_storeq_reg(s, rs, rd, tmp64);
10398 tcg_temp_free_i64(tmp64);
10399 } else {
10400 if (op & 0x20) {
10401 /* Unsigned 64-bit multiply */
10402 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
10403 } else {
10404 if (op & 8) {
10405 /* smlalxy */
10406 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10407 tcg_temp_free_i32(tmp2);
10408 tcg_temp_free_i32(tmp);
10409 goto illegal_op;
10411 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10412 tcg_temp_free_i32(tmp2);
10413 tmp64 = tcg_temp_new_i64();
10414 tcg_gen_ext_i32_i64(tmp64, tmp);
10415 tcg_temp_free_i32(tmp);
10416 } else {
10417 /* Signed 64-bit multiply */
10418 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10421 if (op & 4) {
10422 /* umaal */
10423 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10424 tcg_temp_free_i64(tmp64);
10425 goto illegal_op;
10427 gen_addq_lo(s, tmp64, rs);
10428 gen_addq_lo(s, tmp64, rd);
10429 } else if (op & 0x40) {
10430 /* 64-bit accumulate. */
10431 gen_addq(s, tmp64, rs, rd);
10433 gen_storeq_reg(s, rs, rd, tmp64);
10434 tcg_temp_free_i64(tmp64);
10436 break;
10438 break;
10439 case 6: case 7: case 14: case 15:
10440 /* Coprocessor. */
10441 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10442 /* We don't currently implement M profile FP support,
10443 * so this entire space should give a NOCP fault.
10445 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10446 default_exception_el(s));
10447 break;
10449 if (((insn >> 24) & 3) == 3) {
10450 /* Translate into the equivalent ARM encoding. */
10451 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10452 if (disas_neon_data_insn(s, insn)) {
10453 goto illegal_op;
10455 } else if (((insn >> 8) & 0xe) == 10) {
10456 if (disas_vfp_insn(s, insn)) {
10457 goto illegal_op;
10459 } else {
10460 if (insn & (1 << 28))
10461 goto illegal_op;
10462 if (disas_coproc_insn(s, insn)) {
10463 goto illegal_op;
10466 break;
10467 case 8: case 9: case 10: case 11:
10468 if (insn & (1 << 15)) {
10469 /* Branches, misc control. */
10470 if (insn & 0x5000) {
10471 /* Unconditional branch. */
10472 /* signextend(hw1[10:0]) -> offset[:12]. */
10473 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10474 /* hw1[10:0] -> offset[11:1]. */
10475 offset |= (insn & 0x7ff) << 1;
10476 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10477 offset[24:22] already have the same value because of the
10478 sign extension above. */
10479 offset ^= ((~insn) & (1 << 13)) << 10;
10480 offset ^= ((~insn) & (1 << 11)) << 11;
10482 if (insn & (1 << 14)) {
10483 /* Branch and link. */
10484 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
10487 offset += s->pc;
10488 if (insn & (1 << 12)) {
10489 /* b/bl */
10490 gen_jmp(s, offset);
10491 } else {
10492 /* blx */
10493 offset &= ~(uint32_t)2;
10494 /* thumb2 bx, no need to check */
10495 gen_bx_im(s, offset);
10497 } else if (((insn >> 23) & 7) == 7) {
10498 /* Misc control */
10499 if (insn & (1 << 13))
10500 goto illegal_op;
10502 if (insn & (1 << 26)) {
10503 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10504 goto illegal_op;
10506 if (!(insn & (1 << 20))) {
10507 /* Hypervisor call (v7) */
10508 int imm16 = extract32(insn, 16, 4) << 12
10509 | extract32(insn, 0, 12);
10510 ARCH(7);
10511 if (IS_USER(s)) {
10512 goto illegal_op;
10514 gen_hvc(s, imm16);
10515 } else {
10516 /* Secure monitor call (v6+) */
10517 ARCH(6K);
10518 if (IS_USER(s)) {
10519 goto illegal_op;
10521 gen_smc(s);
10523 } else {
10524 op = (insn >> 20) & 7;
10525 switch (op) {
10526 case 0: /* msr cpsr. */
10527 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10528 tmp = load_reg(s, rn);
10529 /* the constant is the mask and SYSm fields */
10530 addr = tcg_const_i32(insn & 0xfff);
10531 gen_helper_v7m_msr(cpu_env, addr, tmp);
10532 tcg_temp_free_i32(addr);
10533 tcg_temp_free_i32(tmp);
10534 gen_lookup_tb(s);
10535 break;
10537 /* fall through */
10538 case 1: /* msr spsr. */
10539 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10540 goto illegal_op;
10543 if (extract32(insn, 5, 1)) {
10544 /* MSR (banked) */
10545 int sysm = extract32(insn, 8, 4) |
10546 (extract32(insn, 4, 1) << 4);
10547 int r = op & 1;
10549 gen_msr_banked(s, r, sysm, rm);
10550 break;
10553 /* MSR (for PSRs) */
10554 tmp = load_reg(s, rn);
10555 if (gen_set_psr(s,
10556 msr_mask(s, (insn >> 8) & 0xf, op == 1),
10557 op == 1, tmp))
10558 goto illegal_op;
10559 break;
10560 case 2: /* cps, nop-hint. */
10561 if (((insn >> 8) & 7) == 0) {
10562 gen_nop_hint(s, insn & 0xff);
10564 /* Implemented as NOP in user mode. */
10565 if (IS_USER(s))
10566 break;
10567 offset = 0;
10568 imm = 0;
10569 if (insn & (1 << 10)) {
10570 if (insn & (1 << 7))
10571 offset |= CPSR_A;
10572 if (insn & (1 << 6))
10573 offset |= CPSR_I;
10574 if (insn & (1 << 5))
10575 offset |= CPSR_F;
10576 if (insn & (1 << 9))
10577 imm = CPSR_A | CPSR_I | CPSR_F;
10579 if (insn & (1 << 8)) {
10580 offset |= 0x1f;
10581 imm |= (insn & 0x1f);
10583 if (offset) {
10584 gen_set_psr_im(s, offset, 0, imm);
10586 break;
10587 case 3: /* Special control operations. */
10588 ARCH(7);
10589 op = (insn >> 4) & 0xf;
10590 switch (op) {
10591 case 2: /* clrex */
10592 gen_clrex(s);
10593 break;
10594 case 4: /* dsb */
10595 case 5: /* dmb */
10596 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10597 break;
10598 case 6: /* isb */
10599 /* We need to break the TB after this insn
10600 * to execute self-modifying code correctly
10601 * and also to take any pending interrupts
10602 * immediately.
10604 gen_goto_tb(s, 0, s->pc & ~1);
10605 break;
10606 default:
10607 goto illegal_op;
10609 break;
10610 case 4: /* bxj */
10611 /* Trivial implementation equivalent to bx.
10612 * This instruction doesn't exist at all for M-profile.
10614 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10615 goto illegal_op;
10617 tmp = load_reg(s, rn);
10618 gen_bx(s, tmp);
10619 break;
10620 case 5: /* Exception return. */
10621 if (IS_USER(s)) {
10622 goto illegal_op;
10624 if (rn != 14 || rd != 15) {
10625 goto illegal_op;
10627 tmp = load_reg(s, rn);
10628 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10629 gen_exception_return(s, tmp);
10630 break;
10631 case 6: /* MRS */
10632 if (extract32(insn, 5, 1) &&
10633 !arm_dc_feature(s, ARM_FEATURE_M)) {
10634 /* MRS (banked) */
10635 int sysm = extract32(insn, 16, 4) |
10636 (extract32(insn, 4, 1) << 4);
10638 gen_mrs_banked(s, 0, sysm, rd);
10639 break;
10642 if (extract32(insn, 16, 4) != 0xf) {
10643 goto illegal_op;
10645 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10646 extract32(insn, 0, 8) != 0) {
10647 goto illegal_op;
10650 /* mrs cpsr */
10651 tmp = tcg_temp_new_i32();
10652 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10653 addr = tcg_const_i32(insn & 0xff);
10654 gen_helper_v7m_mrs(tmp, cpu_env, addr);
10655 tcg_temp_free_i32(addr);
10656 } else {
10657 gen_helper_cpsr_read(tmp, cpu_env);
10659 store_reg(s, rd, tmp);
10660 break;
10661 case 7: /* MRS */
10662 if (extract32(insn, 5, 1) &&
10663 !arm_dc_feature(s, ARM_FEATURE_M)) {
10664 /* MRS (banked) */
10665 int sysm = extract32(insn, 16, 4) |
10666 (extract32(insn, 4, 1) << 4);
10668 gen_mrs_banked(s, 1, sysm, rd);
10669 break;
10672 /* mrs spsr. */
10673 /* Not accessible in user mode. */
10674 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10675 goto illegal_op;
10678 if (extract32(insn, 16, 4) != 0xf ||
10679 extract32(insn, 0, 8) != 0) {
10680 goto illegal_op;
10683 tmp = load_cpu_field(spsr);
10684 store_reg(s, rd, tmp);
10685 break;
10688 } else {
10689 /* Conditional branch. */
10690 op = (insn >> 22) & 0xf;
10691 /* Generate a conditional jump to next instruction. */
10692 s->condlabel = gen_new_label();
10693 arm_gen_test_cc(op ^ 1, s->condlabel);
10694 s->condjmp = 1;
10696 /* offset[11:1] = insn[10:0] */
10697 offset = (insn & 0x7ff) << 1;
10698 /* offset[17:12] = insn[21:16]. */
10699 offset |= (insn & 0x003f0000) >> 4;
10700 /* offset[31:20] = insn[26]. */
10701 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10702 /* offset[18] = insn[13]. */
10703 offset |= (insn & (1 << 13)) << 5;
10704 /* offset[19] = insn[11]. */
10705 offset |= (insn & (1 << 11)) << 8;
10707 /* jump to the offset */
10708 gen_jmp(s, s->pc + offset);
10710 } else {
10711 /* Data processing immediate. */
10712 if (insn & (1 << 25)) {
10713 if (insn & (1 << 24)) {
10714 if (insn & (1 << 20))
10715 goto illegal_op;
10716 /* Bitfield/Saturate. */
10717 op = (insn >> 21) & 7;
10718 imm = insn & 0x1f;
10719 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10720 if (rn == 15) {
10721 tmp = tcg_temp_new_i32();
10722 tcg_gen_movi_i32(tmp, 0);
10723 } else {
10724 tmp = load_reg(s, rn);
10726 switch (op) {
10727 case 2: /* Signed bitfield extract. */
10728 imm++;
10729 if (shift + imm > 32)
10730 goto illegal_op;
10731 if (imm < 32) {
10732 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10734 break;
10735 case 6: /* Unsigned bitfield extract. */
10736 imm++;
10737 if (shift + imm > 32)
10738 goto illegal_op;
10739 if (imm < 32) {
10740 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10742 break;
10743 case 3: /* Bitfield insert/clear. */
10744 if (imm < shift)
10745 goto illegal_op;
10746 imm = imm + 1 - shift;
10747 if (imm != 32) {
10748 tmp2 = load_reg(s, rd);
10749 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
10750 tcg_temp_free_i32(tmp2);
10752 break;
10753 case 7:
10754 goto illegal_op;
10755 default: /* Saturate. */
10756 if (shift) {
10757 if (op & 1)
10758 tcg_gen_sari_i32(tmp, tmp, shift);
10759 else
10760 tcg_gen_shli_i32(tmp, tmp, shift);
10762 tmp2 = tcg_const_i32(imm);
10763 if (op & 4) {
10764 /* Unsigned. */
10765 if ((op & 1) && shift == 0) {
10766 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10767 tcg_temp_free_i32(tmp);
10768 tcg_temp_free_i32(tmp2);
10769 goto illegal_op;
10771 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10772 } else {
10773 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10775 } else {
10776 /* Signed. */
10777 if ((op & 1) && shift == 0) {
10778 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10779 tcg_temp_free_i32(tmp);
10780 tcg_temp_free_i32(tmp2);
10781 goto illegal_op;
10783 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10784 } else {
10785 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10788 tcg_temp_free_i32(tmp2);
10789 break;
10791 store_reg(s, rd, tmp);
10792 } else {
10793 imm = ((insn & 0x04000000) >> 15)
10794 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10795 if (insn & (1 << 22)) {
10796 /* 16-bit immediate. */
10797 imm |= (insn >> 4) & 0xf000;
10798 if (insn & (1 << 23)) {
10799 /* movt */
10800 tmp = load_reg(s, rd);
10801 tcg_gen_ext16u_i32(tmp, tmp);
10802 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10803 } else {
10804 /* movw */
10805 tmp = tcg_temp_new_i32();
10806 tcg_gen_movi_i32(tmp, imm);
10808 } else {
10809 /* Add/sub 12-bit immediate. */
10810 if (rn == 15) {
10811 offset = s->pc & ~(uint32_t)3;
10812 if (insn & (1 << 23))
10813 offset -= imm;
10814 else
10815 offset += imm;
10816 tmp = tcg_temp_new_i32();
10817 tcg_gen_movi_i32(tmp, offset);
10818 } else {
10819 tmp = load_reg(s, rn);
10820 if (insn & (1 << 23))
10821 tcg_gen_subi_i32(tmp, tmp, imm);
10822 else
10823 tcg_gen_addi_i32(tmp, tmp, imm);
10826 store_reg(s, rd, tmp);
10828 } else {
10829 int shifter_out = 0;
10830 /* modified 12-bit immediate. */
10831 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10832 imm = (insn & 0xff);
10833 switch (shift) {
10834 case 0: /* XY */
10835 /* Nothing to do. */
10836 break;
10837 case 1: /* 00XY00XY */
10838 imm |= imm << 16;
10839 break;
10840 case 2: /* XY00XY00 */
10841 imm |= imm << 16;
10842 imm <<= 8;
10843 break;
10844 case 3: /* XYXYXYXY */
10845 imm |= imm << 16;
10846 imm |= imm << 8;
10847 break;
10848 default: /* Rotated constant. */
10849 shift = (shift << 1) | (imm >> 7);
10850 imm |= 0x80;
10851 imm = imm << (32 - shift);
10852 shifter_out = 1;
10853 break;
10855 tmp2 = tcg_temp_new_i32();
10856 tcg_gen_movi_i32(tmp2, imm);
10857 rn = (insn >> 16) & 0xf;
10858 if (rn == 15) {
10859 tmp = tcg_temp_new_i32();
10860 tcg_gen_movi_i32(tmp, 0);
10861 } else {
10862 tmp = load_reg(s, rn);
10864 op = (insn >> 21) & 0xf;
10865 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10866 shifter_out, tmp, tmp2))
10867 goto illegal_op;
10868 tcg_temp_free_i32(tmp2);
10869 rd = (insn >> 8) & 0xf;
10870 if (rd != 15) {
10871 store_reg(s, rd, tmp);
10872 } else {
10873 tcg_temp_free_i32(tmp);
10877 break;
10878 case 12: /* Load/store single data item. */
10880 int postinc = 0;
10881 int writeback = 0;
10882 int memidx;
10883 ISSInfo issinfo;
10885 if ((insn & 0x01100000) == 0x01000000) {
10886 if (disas_neon_ls_insn(s, insn)) {
10887 goto illegal_op;
10889 break;
10891 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10892 if (rs == 15) {
10893 if (!(insn & (1 << 20))) {
10894 goto illegal_op;
10896 if (op != 2) {
10897 /* Byte or halfword load space with dest == r15 : memory hints.
10898 * Catch them early so we don't emit pointless addressing code.
10899 * This space is a mix of:
10900 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10901 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10902 * cores)
10903 * unallocated hints, which must be treated as NOPs
10904 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10905 * which is easiest for the decoding logic
10906 * Some space which must UNDEF
10908 int op1 = (insn >> 23) & 3;
10909 int op2 = (insn >> 6) & 0x3f;
10910 if (op & 2) {
10911 goto illegal_op;
10913 if (rn == 15) {
10914 /* UNPREDICTABLE, unallocated hint or
10915 * PLD/PLDW/PLI (literal)
10917 return 0;
10919 if (op1 & 1) {
10920 return 0; /* PLD/PLDW/PLI or unallocated hint */
10922 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10923 return 0; /* PLD/PLDW/PLI or unallocated hint */
10925 /* UNDEF space, or an UNPREDICTABLE */
10926 return 1;
10929 memidx = get_mem_index(s);
10930 if (rn == 15) {
10931 addr = tcg_temp_new_i32();
10932 /* PC relative. */
10933 /* s->pc has already been incremented by 4. */
10934 imm = s->pc & 0xfffffffc;
10935 if (insn & (1 << 23))
10936 imm += insn & 0xfff;
10937 else
10938 imm -= insn & 0xfff;
10939 tcg_gen_movi_i32(addr, imm);
10940 } else {
10941 addr = load_reg(s, rn);
10942 if (insn & (1 << 23)) {
10943 /* Positive offset. */
10944 imm = insn & 0xfff;
10945 tcg_gen_addi_i32(addr, addr, imm);
10946 } else {
10947 imm = insn & 0xff;
10948 switch ((insn >> 8) & 0xf) {
10949 case 0x0: /* Shifted Register. */
10950 shift = (insn >> 4) & 0xf;
10951 if (shift > 3) {
10952 tcg_temp_free_i32(addr);
10953 goto illegal_op;
10955 tmp = load_reg(s, rm);
10956 if (shift)
10957 tcg_gen_shli_i32(tmp, tmp, shift);
10958 tcg_gen_add_i32(addr, addr, tmp);
10959 tcg_temp_free_i32(tmp);
10960 break;
10961 case 0xc: /* Negative offset. */
10962 tcg_gen_addi_i32(addr, addr, -imm);
10963 break;
10964 case 0xe: /* User privilege. */
10965 tcg_gen_addi_i32(addr, addr, imm);
10966 memidx = get_a32_user_mem_index(s);
10967 break;
10968 case 0x9: /* Post-decrement. */
10969 imm = -imm;
10970 /* Fall through. */
10971 case 0xb: /* Post-increment. */
10972 postinc = 1;
10973 writeback = 1;
10974 break;
10975 case 0xd: /* Pre-decrement. */
10976 imm = -imm;
10977 /* Fall through. */
10978 case 0xf: /* Pre-increment. */
10979 tcg_gen_addi_i32(addr, addr, imm);
10980 writeback = 1;
10981 break;
10982 default:
10983 tcg_temp_free_i32(addr);
10984 goto illegal_op;
10989 issinfo = writeback ? ISSInvalid : rs;
10991 if (insn & (1 << 20)) {
10992 /* Load. */
10993 tmp = tcg_temp_new_i32();
10994 switch (op) {
10995 case 0:
10996 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
10997 break;
10998 case 4:
10999 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
11000 break;
11001 case 1:
11002 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
11003 break;
11004 case 5:
11005 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
11006 break;
11007 case 2:
11008 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
11009 break;
11010 default:
11011 tcg_temp_free_i32(tmp);
11012 tcg_temp_free_i32(addr);
11013 goto illegal_op;
11015 if (rs == 15) {
11016 gen_bx_excret(s, tmp);
11017 } else {
11018 store_reg(s, rs, tmp);
11020 } else {
11021 /* Store. */
11022 tmp = load_reg(s, rs);
11023 switch (op) {
11024 case 0:
11025 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
11026 break;
11027 case 1:
11028 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
11029 break;
11030 case 2:
11031 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
11032 break;
11033 default:
11034 tcg_temp_free_i32(tmp);
11035 tcg_temp_free_i32(addr);
11036 goto illegal_op;
11038 tcg_temp_free_i32(tmp);
11040 if (postinc)
11041 tcg_gen_addi_i32(addr, addr, imm);
11042 if (writeback) {
11043 store_reg(s, rn, addr);
11044 } else {
11045 tcg_temp_free_i32(addr);
11048 break;
11049 default:
11050 goto illegal_op;
11052 return 0;
11053 illegal_op:
11054 return 1;
11057 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
11059 uint32_t val, insn, op, rm, rn, rd, shift, cond;
11060 int32_t offset;
11061 int i;
11062 TCGv_i32 tmp;
11063 TCGv_i32 tmp2;
11064 TCGv_i32 addr;
11066 if (s->condexec_mask) {
11067 cond = s->condexec_cond;
11068 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
11069 s->condlabel = gen_new_label();
11070 arm_gen_test_cc(cond ^ 1, s->condlabel);
11071 s->condjmp = 1;
11075 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
11076 s->pc += 2;
11078 switch (insn >> 12) {
11079 case 0: case 1:
11081 rd = insn & 7;
11082 op = (insn >> 11) & 3;
11083 if (op == 3) {
11084 /* add/subtract */
11085 rn = (insn >> 3) & 7;
11086 tmp = load_reg(s, rn);
11087 if (insn & (1 << 10)) {
11088 /* immediate */
11089 tmp2 = tcg_temp_new_i32();
11090 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
11091 } else {
11092 /* reg */
11093 rm = (insn >> 6) & 7;
11094 tmp2 = load_reg(s, rm);
11096 if (insn & (1 << 9)) {
11097 if (s->condexec_mask)
11098 tcg_gen_sub_i32(tmp, tmp, tmp2);
11099 else
11100 gen_sub_CC(tmp, tmp, tmp2);
11101 } else {
11102 if (s->condexec_mask)
11103 tcg_gen_add_i32(tmp, tmp, tmp2);
11104 else
11105 gen_add_CC(tmp, tmp, tmp2);
11107 tcg_temp_free_i32(tmp2);
11108 store_reg(s, rd, tmp);
11109 } else {
11110 /* shift immediate */
11111 rm = (insn >> 3) & 7;
11112 shift = (insn >> 6) & 0x1f;
11113 tmp = load_reg(s, rm);
11114 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11115 if (!s->condexec_mask)
11116 gen_logic_CC(tmp);
11117 store_reg(s, rd, tmp);
11119 break;
11120 case 2: case 3:
11121 /* arithmetic large immediate */
11122 op = (insn >> 11) & 3;
11123 rd = (insn >> 8) & 0x7;
11124 if (op == 0) { /* mov */
11125 tmp = tcg_temp_new_i32();
11126 tcg_gen_movi_i32(tmp, insn & 0xff);
11127 if (!s->condexec_mask)
11128 gen_logic_CC(tmp);
11129 store_reg(s, rd, tmp);
11130 } else {
11131 tmp = load_reg(s, rd);
11132 tmp2 = tcg_temp_new_i32();
11133 tcg_gen_movi_i32(tmp2, insn & 0xff);
11134 switch (op) {
11135 case 1: /* cmp */
11136 gen_sub_CC(tmp, tmp, tmp2);
11137 tcg_temp_free_i32(tmp);
11138 tcg_temp_free_i32(tmp2);
11139 break;
11140 case 2: /* add */
11141 if (s->condexec_mask)
11142 tcg_gen_add_i32(tmp, tmp, tmp2);
11143 else
11144 gen_add_CC(tmp, tmp, tmp2);
11145 tcg_temp_free_i32(tmp2);
11146 store_reg(s, rd, tmp);
11147 break;
11148 case 3: /* sub */
11149 if (s->condexec_mask)
11150 tcg_gen_sub_i32(tmp, tmp, tmp2);
11151 else
11152 gen_sub_CC(tmp, tmp, tmp2);
11153 tcg_temp_free_i32(tmp2);
11154 store_reg(s, rd, tmp);
11155 break;
11158 break;
11159 case 4:
11160 if (insn & (1 << 11)) {
11161 rd = (insn >> 8) & 7;
11162 /* load pc-relative. Bit 1 of PC is ignored. */
11163 val = s->pc + 2 + ((insn & 0xff) * 4);
11164 val &= ~(uint32_t)2;
11165 addr = tcg_temp_new_i32();
11166 tcg_gen_movi_i32(addr, val);
11167 tmp = tcg_temp_new_i32();
11168 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11169 rd | ISSIs16Bit);
11170 tcg_temp_free_i32(addr);
11171 store_reg(s, rd, tmp);
11172 break;
11174 if (insn & (1 << 10)) {
11175 /* 0b0100_01xx_xxxx_xxxx
11176 * - data processing extended, branch and exchange
11178 rd = (insn & 7) | ((insn >> 4) & 8);
11179 rm = (insn >> 3) & 0xf;
11180 op = (insn >> 8) & 3;
11181 switch (op) {
11182 case 0: /* add */
11183 tmp = load_reg(s, rd);
11184 tmp2 = load_reg(s, rm);
11185 tcg_gen_add_i32(tmp, tmp, tmp2);
11186 tcg_temp_free_i32(tmp2);
11187 store_reg(s, rd, tmp);
11188 break;
11189 case 1: /* cmp */
11190 tmp = load_reg(s, rd);
11191 tmp2 = load_reg(s, rm);
11192 gen_sub_CC(tmp, tmp, tmp2);
11193 tcg_temp_free_i32(tmp2);
11194 tcg_temp_free_i32(tmp);
11195 break;
11196 case 2: /* mov/cpy */
11197 tmp = load_reg(s, rm);
11198 store_reg(s, rd, tmp);
11199 break;
11200 case 3:
11202 /* 0b0100_0111_xxxx_xxxx
11203 * - branch [and link] exchange thumb register
11205 bool link = insn & (1 << 7);
11207 if (insn & 3) {
11208 goto undef;
11210 if (link) {
11211 ARCH(5);
11213 if ((insn & 4)) {
11214 /* BXNS/BLXNS: only exists for v8M with the
11215 * security extensions, and always UNDEF if NonSecure.
11216 * We don't implement these in the user-only mode
11217 * either (in theory you can use them from Secure User
11218 * mode but they are too tied in to system emulation.)
11220 if (!s->v8m_secure || IS_USER_ONLY) {
11221 goto undef;
11223 if (link) {
11224 /* BLXNS: not yet implemented */
11225 goto undef;
11226 } else {
11227 gen_bxns(s, rm);
11229 break;
11231 /* BLX/BX */
11232 tmp = load_reg(s, rm);
11233 if (link) {
11234 val = (uint32_t)s->pc | 1;
11235 tmp2 = tcg_temp_new_i32();
11236 tcg_gen_movi_i32(tmp2, val);
11237 store_reg(s, 14, tmp2);
11238 gen_bx(s, tmp);
11239 } else {
11240 /* Only BX works as exception-return, not BLX */
11241 gen_bx_excret(s, tmp);
11243 break;
11246 break;
11249 /* data processing register */
11250 rd = insn & 7;
11251 rm = (insn >> 3) & 7;
11252 op = (insn >> 6) & 0xf;
11253 if (op == 2 || op == 3 || op == 4 || op == 7) {
11254 /* the shift/rotate ops want the operands backwards */
11255 val = rm;
11256 rm = rd;
11257 rd = val;
11258 val = 1;
11259 } else {
11260 val = 0;
11263 if (op == 9) { /* neg */
11264 tmp = tcg_temp_new_i32();
11265 tcg_gen_movi_i32(tmp, 0);
11266 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11267 tmp = load_reg(s, rd);
11268 } else {
11269 TCGV_UNUSED_I32(tmp);
11272 tmp2 = load_reg(s, rm);
11273 switch (op) {
11274 case 0x0: /* and */
11275 tcg_gen_and_i32(tmp, tmp, tmp2);
11276 if (!s->condexec_mask)
11277 gen_logic_CC(tmp);
11278 break;
11279 case 0x1: /* eor */
11280 tcg_gen_xor_i32(tmp, tmp, tmp2);
11281 if (!s->condexec_mask)
11282 gen_logic_CC(tmp);
11283 break;
11284 case 0x2: /* lsl */
11285 if (s->condexec_mask) {
11286 gen_shl(tmp2, tmp2, tmp);
11287 } else {
11288 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
11289 gen_logic_CC(tmp2);
11291 break;
11292 case 0x3: /* lsr */
11293 if (s->condexec_mask) {
11294 gen_shr(tmp2, tmp2, tmp);
11295 } else {
11296 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
11297 gen_logic_CC(tmp2);
11299 break;
11300 case 0x4: /* asr */
11301 if (s->condexec_mask) {
11302 gen_sar(tmp2, tmp2, tmp);
11303 } else {
11304 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
11305 gen_logic_CC(tmp2);
11307 break;
11308 case 0x5: /* adc */
11309 if (s->condexec_mask) {
11310 gen_adc(tmp, tmp2);
11311 } else {
11312 gen_adc_CC(tmp, tmp, tmp2);
11314 break;
11315 case 0x6: /* sbc */
11316 if (s->condexec_mask) {
11317 gen_sub_carry(tmp, tmp, tmp2);
11318 } else {
11319 gen_sbc_CC(tmp, tmp, tmp2);
11321 break;
11322 case 0x7: /* ror */
11323 if (s->condexec_mask) {
11324 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11325 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
11326 } else {
11327 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
11328 gen_logic_CC(tmp2);
11330 break;
11331 case 0x8: /* tst */
11332 tcg_gen_and_i32(tmp, tmp, tmp2);
11333 gen_logic_CC(tmp);
11334 rd = 16;
11335 break;
11336 case 0x9: /* neg */
11337 if (s->condexec_mask)
11338 tcg_gen_neg_i32(tmp, tmp2);
11339 else
11340 gen_sub_CC(tmp, tmp, tmp2);
11341 break;
11342 case 0xa: /* cmp */
11343 gen_sub_CC(tmp, tmp, tmp2);
11344 rd = 16;
11345 break;
11346 case 0xb: /* cmn */
11347 gen_add_CC(tmp, tmp, tmp2);
11348 rd = 16;
11349 break;
11350 case 0xc: /* orr */
11351 tcg_gen_or_i32(tmp, tmp, tmp2);
11352 if (!s->condexec_mask)
11353 gen_logic_CC(tmp);
11354 break;
11355 case 0xd: /* mul */
11356 tcg_gen_mul_i32(tmp, tmp, tmp2);
11357 if (!s->condexec_mask)
11358 gen_logic_CC(tmp);
11359 break;
11360 case 0xe: /* bic */
11361 tcg_gen_andc_i32(tmp, tmp, tmp2);
11362 if (!s->condexec_mask)
11363 gen_logic_CC(tmp);
11364 break;
11365 case 0xf: /* mvn */
11366 tcg_gen_not_i32(tmp2, tmp2);
11367 if (!s->condexec_mask)
11368 gen_logic_CC(tmp2);
11369 val = 1;
11370 rm = rd;
11371 break;
11373 if (rd != 16) {
11374 if (val) {
11375 store_reg(s, rm, tmp2);
11376 if (op != 0xf)
11377 tcg_temp_free_i32(tmp);
11378 } else {
11379 store_reg(s, rd, tmp);
11380 tcg_temp_free_i32(tmp2);
11382 } else {
11383 tcg_temp_free_i32(tmp);
11384 tcg_temp_free_i32(tmp2);
11386 break;
11388 case 5:
11389 /* load/store register offset. */
11390 rd = insn & 7;
11391 rn = (insn >> 3) & 7;
11392 rm = (insn >> 6) & 7;
11393 op = (insn >> 9) & 7;
11394 addr = load_reg(s, rn);
11395 tmp = load_reg(s, rm);
11396 tcg_gen_add_i32(addr, addr, tmp);
11397 tcg_temp_free_i32(tmp);
11399 if (op < 3) { /* store */
11400 tmp = load_reg(s, rd);
11401 } else {
11402 tmp = tcg_temp_new_i32();
11405 switch (op) {
11406 case 0: /* str */
11407 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11408 break;
11409 case 1: /* strh */
11410 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11411 break;
11412 case 2: /* strb */
11413 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11414 break;
11415 case 3: /* ldrsb */
11416 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11417 break;
11418 case 4: /* ldr */
11419 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11420 break;
11421 case 5: /* ldrh */
11422 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11423 break;
11424 case 6: /* ldrb */
11425 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11426 break;
11427 case 7: /* ldrsh */
11428 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11429 break;
11431 if (op >= 3) { /* load */
11432 store_reg(s, rd, tmp);
11433 } else {
11434 tcg_temp_free_i32(tmp);
11436 tcg_temp_free_i32(addr);
11437 break;
11439 case 6:
11440 /* load/store word immediate offset */
11441 rd = insn & 7;
11442 rn = (insn >> 3) & 7;
11443 addr = load_reg(s, rn);
11444 val = (insn >> 4) & 0x7c;
11445 tcg_gen_addi_i32(addr, addr, val);
11447 if (insn & (1 << 11)) {
11448 /* load */
11449 tmp = tcg_temp_new_i32();
11450 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11451 store_reg(s, rd, tmp);
11452 } else {
11453 /* store */
11454 tmp = load_reg(s, rd);
11455 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11456 tcg_temp_free_i32(tmp);
11458 tcg_temp_free_i32(addr);
11459 break;
11461 case 7:
11462 /* load/store byte immediate offset */
11463 rd = insn & 7;
11464 rn = (insn >> 3) & 7;
11465 addr = load_reg(s, rn);
11466 val = (insn >> 6) & 0x1f;
11467 tcg_gen_addi_i32(addr, addr, val);
11469 if (insn & (1 << 11)) {
11470 /* load */
11471 tmp = tcg_temp_new_i32();
11472 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11473 store_reg(s, rd, tmp);
11474 } else {
11475 /* store */
11476 tmp = load_reg(s, rd);
11477 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11478 tcg_temp_free_i32(tmp);
11480 tcg_temp_free_i32(addr);
11481 break;
11483 case 8:
11484 /* load/store halfword immediate offset */
11485 rd = insn & 7;
11486 rn = (insn >> 3) & 7;
11487 addr = load_reg(s, rn);
11488 val = (insn >> 5) & 0x3e;
11489 tcg_gen_addi_i32(addr, addr, val);
11491 if (insn & (1 << 11)) {
11492 /* load */
11493 tmp = tcg_temp_new_i32();
11494 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11495 store_reg(s, rd, tmp);
11496 } else {
11497 /* store */
11498 tmp = load_reg(s, rd);
11499 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11500 tcg_temp_free_i32(tmp);
11502 tcg_temp_free_i32(addr);
11503 break;
11505 case 9:
11506 /* load/store from stack */
11507 rd = (insn >> 8) & 7;
11508 addr = load_reg(s, 13);
11509 val = (insn & 0xff) * 4;
11510 tcg_gen_addi_i32(addr, addr, val);
11512 if (insn & (1 << 11)) {
11513 /* load */
11514 tmp = tcg_temp_new_i32();
11515 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11516 store_reg(s, rd, tmp);
11517 } else {
11518 /* store */
11519 tmp = load_reg(s, rd);
11520 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11521 tcg_temp_free_i32(tmp);
11523 tcg_temp_free_i32(addr);
11524 break;
11526 case 10:
11527 /* add to high reg */
11528 rd = (insn >> 8) & 7;
11529 if (insn & (1 << 11)) {
11530 /* SP */
11531 tmp = load_reg(s, 13);
11532 } else {
11533 /* PC. bit 1 is ignored. */
11534 tmp = tcg_temp_new_i32();
11535 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
11537 val = (insn & 0xff) * 4;
11538 tcg_gen_addi_i32(tmp, tmp, val);
11539 store_reg(s, rd, tmp);
11540 break;
11542 case 11:
11543 /* misc */
11544 op = (insn >> 8) & 0xf;
11545 switch (op) {
11546 case 0:
11547 /* adjust stack pointer */
11548 tmp = load_reg(s, 13);
11549 val = (insn & 0x7f) * 4;
11550 if (insn & (1 << 7))
11551 val = -(int32_t)val;
11552 tcg_gen_addi_i32(tmp, tmp, val);
11553 store_reg(s, 13, tmp);
11554 break;
11556 case 2: /* sign/zero extend. */
11557 ARCH(6);
11558 rd = insn & 7;
11559 rm = (insn >> 3) & 7;
11560 tmp = load_reg(s, rm);
11561 switch ((insn >> 6) & 3) {
11562 case 0: gen_sxth(tmp); break;
11563 case 1: gen_sxtb(tmp); break;
11564 case 2: gen_uxth(tmp); break;
11565 case 3: gen_uxtb(tmp); break;
11567 store_reg(s, rd, tmp);
11568 break;
11569 case 4: case 5: case 0xc: case 0xd:
11570 /* push/pop */
11571 addr = load_reg(s, 13);
11572 if (insn & (1 << 8))
11573 offset = 4;
11574 else
11575 offset = 0;
11576 for (i = 0; i < 8; i++) {
11577 if (insn & (1 << i))
11578 offset += 4;
11580 if ((insn & (1 << 11)) == 0) {
11581 tcg_gen_addi_i32(addr, addr, -offset);
11583 for (i = 0; i < 8; i++) {
11584 if (insn & (1 << i)) {
11585 if (insn & (1 << 11)) {
11586 /* pop */
11587 tmp = tcg_temp_new_i32();
11588 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11589 store_reg(s, i, tmp);
11590 } else {
11591 /* push */
11592 tmp = load_reg(s, i);
11593 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11594 tcg_temp_free_i32(tmp);
11596 /* advance to the next address. */
11597 tcg_gen_addi_i32(addr, addr, 4);
11600 TCGV_UNUSED_I32(tmp);
11601 if (insn & (1 << 8)) {
11602 if (insn & (1 << 11)) {
11603 /* pop pc */
11604 tmp = tcg_temp_new_i32();
11605 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11606 /* don't set the pc until the rest of the instruction
11607 has completed */
11608 } else {
11609 /* push lr */
11610 tmp = load_reg(s, 14);
11611 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11612 tcg_temp_free_i32(tmp);
11614 tcg_gen_addi_i32(addr, addr, 4);
11616 if ((insn & (1 << 11)) == 0) {
11617 tcg_gen_addi_i32(addr, addr, -offset);
11619 /* write back the new stack pointer */
11620 store_reg(s, 13, addr);
11621 /* set the new PC value */
11622 if ((insn & 0x0900) == 0x0900) {
11623 store_reg_from_load(s, 15, tmp);
11625 break;
11627 case 1: case 3: case 9: case 11: /* czb */
11628 rm = insn & 7;
11629 tmp = load_reg(s, rm);
11630 s->condlabel = gen_new_label();
11631 s->condjmp = 1;
11632 if (insn & (1 << 11))
11633 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
11634 else
11635 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
11636 tcg_temp_free_i32(tmp);
11637 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11638 val = (uint32_t)s->pc + 2;
11639 val += offset;
11640 gen_jmp(s, val);
11641 break;
11643 case 15: /* IT, nop-hint. */
11644 if ((insn & 0xf) == 0) {
11645 gen_nop_hint(s, (insn >> 4) & 0xf);
11646 break;
11648 /* If Then. */
11649 s->condexec_cond = (insn >> 4) & 0xe;
11650 s->condexec_mask = insn & 0x1f;
11651 /* No actual code generated for this insn, just setup state. */
11652 break;
11654 case 0xe: /* bkpt */
11656 int imm8 = extract32(insn, 0, 8);
11657 ARCH(5);
11658 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11659 default_exception_el(s));
11660 break;
11663 case 0xa: /* rev, and hlt */
11665 int op1 = extract32(insn, 6, 2);
11667 if (op1 == 2) {
11668 /* HLT */
11669 int imm6 = extract32(insn, 0, 6);
11671 gen_hlt(s, imm6);
11672 break;
11675 /* Otherwise this is rev */
11676 ARCH(6);
11677 rn = (insn >> 3) & 0x7;
11678 rd = insn & 0x7;
11679 tmp = load_reg(s, rn);
11680 switch (op1) {
11681 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
11682 case 1: gen_rev16(tmp); break;
11683 case 3: gen_revsh(tmp); break;
11684 default:
11685 g_assert_not_reached();
11687 store_reg(s, rd, tmp);
11688 break;
11691 case 6:
11692 switch ((insn >> 5) & 7) {
11693 case 2:
11694 /* setend */
11695 ARCH(6);
11696 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11697 gen_helper_setend(cpu_env);
11698 s->base.is_jmp = DISAS_UPDATE;
11700 break;
11701 case 3:
11702 /* cps */
11703 ARCH(6);
11704 if (IS_USER(s)) {
11705 break;
11707 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11708 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11709 /* FAULTMASK */
11710 if (insn & 1) {
11711 addr = tcg_const_i32(19);
11712 gen_helper_v7m_msr(cpu_env, addr, tmp);
11713 tcg_temp_free_i32(addr);
11715 /* PRIMASK */
11716 if (insn & 2) {
11717 addr = tcg_const_i32(16);
11718 gen_helper_v7m_msr(cpu_env, addr, tmp);
11719 tcg_temp_free_i32(addr);
11721 tcg_temp_free_i32(tmp);
11722 gen_lookup_tb(s);
11723 } else {
11724 if (insn & (1 << 4)) {
11725 shift = CPSR_A | CPSR_I | CPSR_F;
11726 } else {
11727 shift = 0;
11729 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
11731 break;
11732 default:
11733 goto undef;
11735 break;
11737 default:
11738 goto undef;
11740 break;
11742 case 12:
11744 /* load/store multiple */
11745 TCGv_i32 loaded_var;
11746 TCGV_UNUSED_I32(loaded_var);
11747 rn = (insn >> 8) & 0x7;
11748 addr = load_reg(s, rn);
11749 for (i = 0; i < 8; i++) {
11750 if (insn & (1 << i)) {
11751 if (insn & (1 << 11)) {
11752 /* load */
11753 tmp = tcg_temp_new_i32();
11754 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11755 if (i == rn) {
11756 loaded_var = tmp;
11757 } else {
11758 store_reg(s, i, tmp);
11760 } else {
11761 /* store */
11762 tmp = load_reg(s, i);
11763 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11764 tcg_temp_free_i32(tmp);
11766 /* advance to the next address */
11767 tcg_gen_addi_i32(addr, addr, 4);
11770 if ((insn & (1 << rn)) == 0) {
11771 /* base reg not in list: base register writeback */
11772 store_reg(s, rn, addr);
11773 } else {
11774 /* base reg in list: if load, complete it now */
11775 if (insn & (1 << 11)) {
11776 store_reg(s, rn, loaded_var);
11778 tcg_temp_free_i32(addr);
11780 break;
11782 case 13:
11783 /* conditional branch or swi */
11784 cond = (insn >> 8) & 0xf;
11785 if (cond == 0xe)
11786 goto undef;
11788 if (cond == 0xf) {
11789 /* swi */
11790 gen_set_pc_im(s, s->pc);
11791 s->svc_imm = extract32(insn, 0, 8);
11792 s->base.is_jmp = DISAS_SWI;
11793 break;
11795 /* generate a conditional jump to next instruction */
11796 s->condlabel = gen_new_label();
11797 arm_gen_test_cc(cond ^ 1, s->condlabel);
11798 s->condjmp = 1;
11800 /* jump to the offset */
11801 val = (uint32_t)s->pc + 2;
11802 offset = ((int32_t)insn << 24) >> 24;
11803 val += offset << 1;
11804 gen_jmp(s, val);
11805 break;
11807 case 14:
11808 if (insn & (1 << 11)) {
11809 if (disas_thumb2_insn(env, s, insn))
11810 goto undef32;
11811 break;
11813 /* unconditional branch */
11814 val = (uint32_t)s->pc;
11815 offset = ((int32_t)insn << 21) >> 21;
11816 val += (offset << 1) + 2;
11817 gen_jmp(s, val);
11818 break;
11820 case 15:
11821 if (disas_thumb2_insn(env, s, insn))
11822 goto undef32;
11823 break;
11825 return;
11826 undef32:
11827 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11828 default_exception_el(s));
11829 return;
11830 illegal_op:
11831 undef:
11832 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11833 default_exception_el(s));
11836 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11838 /* Return true if the insn at dc->pc might cross a page boundary.
11839 * (False positives are OK, false negatives are not.)
11841 uint16_t insn;
11843 if ((s->pc & 3) == 0) {
11844 /* At a 4-aligned address we can't be crossing a page */
11845 return false;
11848 /* This must be a Thumb insn */
11849 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
11851 if ((insn >> 11) >= 0x1d) {
11852 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11853 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11854 * end up actually treating this as two 16-bit insns (see the
11855 * code at the start of disas_thumb2_insn()) but we don't bother
11856 * to check for that as it is unlikely, and false positives here
11857 * are harmless.
11859 return true;
11861 /* Definitely a 16-bit insn, can't be crossing a page. */
11862 return false;
11865 static int arm_tr_init_disas_context(DisasContextBase *dcbase,
11866 CPUState *cs, int max_insns)
11868 DisasContext *dc = container_of(dcbase, DisasContext, base);
11869 CPUARMState *env = cs->env_ptr;
11870 ARMCPU *cpu = arm_env_get_cpu(env);
11872 dc->pc = dc->base.pc_first;
11873 dc->condjmp = 0;
11875 dc->aarch64 = 0;
11876 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11877 * there is no secure EL1, so we route exceptions to EL3.
11879 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11880 !arm_el_is_aa64(env, 3);
11881 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
11882 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
11883 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
11884 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
11885 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
11886 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
11887 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11888 #if !defined(CONFIG_USER_ONLY)
11889 dc->user = (dc->current_el == 0);
11890 #endif
11891 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
11892 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
11893 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
11894 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
11895 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
11896 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
11897 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
11898 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11899 regime_is_secure(env, dc->mmu_idx);
11900 dc->cp_regs = cpu->cp_regs;
11901 dc->features = env->features;
11903 /* Single step state. The code-generation logic here is:
11904 * SS_ACTIVE == 0:
11905 * generate code with no special handling for single-stepping (except
11906 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11907 * this happens anyway because those changes are all system register or
11908 * PSTATE writes).
11909 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11910 * emit code for one insn
11911 * emit code to clear PSTATE.SS
11912 * emit code to generate software step exception for completed step
11913 * end TB (as usual for having generated an exception)
11914 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11915 * emit code to generate a software step exception
11916 * end the TB
11918 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
11919 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
11920 dc->is_ldex = false;
11921 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11923 dc->next_page_start =
11924 (dc->base.pc_first & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
11926 /* If architectural single step active, limit to 1. */
11927 if (is_singlestepping(dc)) {
11928 max_insns = 1;
11931 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11932 to those left on the page. */
11933 if (!dc->thumb) {
11934 int bound = (dc->next_page_start - dc->base.pc_first) / 4;
11935 max_insns = MIN(max_insns, bound);
11938 cpu_F0s = tcg_temp_new_i32();
11939 cpu_F1s = tcg_temp_new_i32();
11940 cpu_F0d = tcg_temp_new_i64();
11941 cpu_F1d = tcg_temp_new_i64();
11942 cpu_V0 = cpu_F0d;
11943 cpu_V1 = cpu_F1d;
11944 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11945 cpu_M0 = tcg_temp_new_i64();
11947 return max_insns;
11950 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
11952 DisasContext *dc = container_of(dcbase, DisasContext, base);
11954 /* A note on handling of the condexec (IT) bits:
11956 * We want to avoid the overhead of having to write the updated condexec
11957 * bits back to the CPUARMState for every instruction in an IT block. So:
11958 * (1) if the condexec bits are not already zero then we write
11959 * zero back into the CPUARMState now. This avoids complications trying
11960 * to do it at the end of the block. (For example if we don't do this
11961 * it's hard to identify whether we can safely skip writing condexec
11962 * at the end of the TB, which we definitely want to do for the case
11963 * where a TB doesn't do anything with the IT state at all.)
11964 * (2) if we are going to leave the TB then we call gen_set_condexec()
11965 * which will write the correct value into CPUARMState if zero is wrong.
11966 * This is done both for leaving the TB at the end, and for leaving
11967 * it because of an exception we know will happen, which is done in
11968 * gen_exception_insn(). The latter is necessary because we need to
11969 * leave the TB with the PC/IT state just prior to execution of the
11970 * instruction which caused the exception.
11971 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11972 * then the CPUARMState will be wrong and we need to reset it.
11973 * This is handled in the same way as restoration of the
11974 * PC in these situations; we save the value of the condexec bits
11975 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11976 * then uses this to restore them after an exception.
11978 * Note that there are no instructions which can read the condexec
11979 * bits, and none which can write non-static values to them, so
11980 * we don't need to care about whether CPUARMState is correct in the
11981 * middle of a TB.
11984 /* Reset the conditional execution bits immediately. This avoids
11985 complications trying to do it at the end of the block. */
11986 if (dc->condexec_mask || dc->condexec_cond) {
11987 TCGv_i32 tmp = tcg_temp_new_i32();
11988 tcg_gen_movi_i32(tmp, 0);
11989 store_cpu_field(tmp, condexec_bits);
11991 tcg_clear_temp_count();
11994 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
11996 DisasContext *dc = container_of(dcbase, DisasContext, base);
11998 dc->insn_start_idx = tcg_op_buf_count();
11999 tcg_gen_insn_start(dc->pc,
12000 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12004 static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12005 const CPUBreakpoint *bp)
12007 DisasContext *dc = container_of(dcbase, DisasContext, base);
12009 if (bp->flags & BP_CPU) {
12010 gen_set_condexec(dc);
12011 gen_set_pc_im(dc, dc->pc);
12012 gen_helper_check_breakpoints(cpu_env);
12013 /* End the TB early; it's likely not going to be executed */
12014 dc->base.is_jmp = DISAS_TOO_MANY;
12015 } else {
12016 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12017 /* The address covered by the breakpoint must be
12018 included in [tb->pc, tb->pc + tb->size) in order
12019 to for it to be properly cleared -- thus we
12020 increment the PC here so that the logic setting
12021 tb->size below does the right thing. */
12022 /* TODO: Advance PC by correct instruction length to
12023 * avoid disassembler error messages */
12024 dc->pc += 2;
12025 dc->base.is_jmp = DISAS_NORETURN;
12028 return true;
12031 static bool arm_pre_translate_insn(DisasContext *dc)
12033 #ifdef CONFIG_USER_ONLY
12034 /* Intercept jump to the magic kernel page. */
12035 if (dc->pc >= 0xffff0000) {
12036 /* We always get here via a jump, so know we are not in a
12037 conditional execution block. */
12038 gen_exception_internal(EXCP_KERNEL_TRAP);
12039 dc->base.is_jmp = DISAS_NORETURN;
12040 return true;
12042 #endif
12044 if (dc->ss_active && !dc->pstate_ss) {
12045 /* Singlestep state is Active-pending.
12046 * If we're in this state at the start of a TB then either
12047 * a) we just took an exception to an EL which is being debugged
12048 * and this is the first insn in the exception handler
12049 * b) debug exceptions were masked and we just unmasked them
12050 * without changing EL (eg by clearing PSTATE.D)
12051 * In either case we're going to take a swstep exception in the
12052 * "did not step an insn" case, and so the syndrome ISV and EX
12053 * bits should be zero.
12055 assert(dc->base.num_insns == 1);
12056 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12057 default_exception_el(dc));
12058 dc->base.is_jmp = DISAS_NORETURN;
12059 return true;
12062 return false;
12065 static void arm_post_translate_insn(DisasContext *dc)
12067 if (dc->condjmp && !dc->base.is_jmp) {
12068 gen_set_label(dc->condlabel);
12069 dc->condjmp = 0;
12071 dc->base.pc_next = dc->pc;
12072 translator_loop_temp_check(&dc->base);
12075 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12077 DisasContext *dc = container_of(dcbase, DisasContext, base);
12078 CPUARMState *env = cpu->env_ptr;
12079 unsigned int insn;
12081 if (arm_pre_translate_insn(dc)) {
12082 return;
12085 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
12086 dc->pc += 4;
12087 disas_arm_insn(dc, insn);
12089 arm_post_translate_insn(dc);
12091 /* ARM is a fixed-length ISA. We performed the cross-page check
12092 in init_disas_context by adjusting max_insns. */
12095 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12097 DisasContext *dc = container_of(dcbase, DisasContext, base);
12098 CPUARMState *env = cpu->env_ptr;
12100 if (arm_pre_translate_insn(dc)) {
12101 return;
12104 disas_thumb_insn(env, dc);
12106 /* Advance the Thumb condexec condition. */
12107 if (dc->condexec_mask) {
12108 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12109 ((dc->condexec_mask >> 4) & 1));
12110 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12111 if (dc->condexec_mask == 0) {
12112 dc->condexec_cond = 0;
12116 arm_post_translate_insn(dc);
12118 /* Thumb is a variable-length ISA. Stop translation when the next insn
12119 * will touch a new page. This ensures that prefetch aborts occur at
12120 * the right place.
12122 * We want to stop the TB if the next insn starts in a new page,
12123 * or if it spans between this page and the next. This means that
12124 * if we're looking at the last halfword in the page we need to
12125 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12126 * or a 32-bit Thumb insn (which won't).
12127 * This is to avoid generating a silly TB with a single 16-bit insn
12128 * in it at the end of this page (which would execute correctly
12129 * but isn't very efficient).
12131 if (dc->base.is_jmp == DISAS_NEXT
12132 && (dc->pc >= dc->next_page_start
12133 || (dc->pc >= dc->next_page_start - 3
12134 && insn_crosses_page(env, dc)))) {
12135 dc->base.is_jmp = DISAS_TOO_MANY;
12139 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
12141 DisasContext *dc = container_of(dcbase, DisasContext, base);
12143 if (dc->base.tb->cflags & CF_LAST_IO && dc->condjmp) {
12144 /* FIXME: This can theoretically happen with self-modifying code. */
12145 cpu_abort(cpu, "IO on conditional branch instruction");
12148 /* At this stage dc->condjmp will only be set when the skipped
12149 instruction was a conditional branch or trap, and the PC has
12150 already been written. */
12151 gen_set_condexec(dc);
12152 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
12153 /* Exception return branches need some special case code at the
12154 * end of the TB, which is complex enough that it has to
12155 * handle the single-step vs not and the condition-failed
12156 * insn codepath itself.
12158 gen_bx_excret_final_code(dc);
12159 } else if (unlikely(is_singlestepping(dc))) {
12160 /* Unconditional and "condition passed" instruction codepath. */
12161 switch (dc->base.is_jmp) {
12162 case DISAS_SWI:
12163 gen_ss_advance(dc);
12164 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12165 default_exception_el(dc));
12166 break;
12167 case DISAS_HVC:
12168 gen_ss_advance(dc);
12169 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12170 break;
12171 case DISAS_SMC:
12172 gen_ss_advance(dc);
12173 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12174 break;
12175 case DISAS_NEXT:
12176 case DISAS_TOO_MANY:
12177 case DISAS_UPDATE:
12178 gen_set_pc_im(dc, dc->pc);
12179 /* fall through */
12180 default:
12181 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12182 gen_singlestep_exception(dc);
12183 break;
12184 case DISAS_NORETURN:
12185 break;
12187 } else {
12188 /* While branches must always occur at the end of an IT block,
12189 there are a few other things that can cause us to terminate
12190 the TB in the middle of an IT block:
12191 - Exception generating instructions (bkpt, swi, undefined).
12192 - Page boundaries.
12193 - Hardware watchpoints.
12194 Hardware breakpoints have already been handled and skip this code.
12196 switch(dc->base.is_jmp) {
12197 case DISAS_NEXT:
12198 case DISAS_TOO_MANY:
12199 gen_goto_tb(dc, 1, dc->pc);
12200 break;
12201 case DISAS_JUMP:
12202 gen_goto_ptr();
12203 break;
12204 case DISAS_UPDATE:
12205 gen_set_pc_im(dc, dc->pc);
12206 /* fall through */
12207 default:
12208 /* indicate that the hash table must be used to find the next TB */
12209 tcg_gen_exit_tb(0);
12210 break;
12211 case DISAS_NORETURN:
12212 /* nothing more to generate */
12213 break;
12214 case DISAS_WFI:
12215 gen_helper_wfi(cpu_env);
12216 /* The helper doesn't necessarily throw an exception, but we
12217 * must go back to the main loop to check for interrupts anyway.
12219 tcg_gen_exit_tb(0);
12220 break;
12221 case DISAS_WFE:
12222 gen_helper_wfe(cpu_env);
12223 break;
12224 case DISAS_YIELD:
12225 gen_helper_yield(cpu_env);
12226 break;
12227 case DISAS_SWI:
12228 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12229 default_exception_el(dc));
12230 break;
12231 case DISAS_HVC:
12232 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12233 break;
12234 case DISAS_SMC:
12235 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12236 break;
12240 if (dc->condjmp) {
12241 /* "Condition failed" instruction codepath for the branch/trap insn */
12242 gen_set_label(dc->condlabel);
12243 gen_set_condexec(dc);
12244 if (unlikely(is_singlestepping(dc))) {
12245 gen_set_pc_im(dc, dc->pc);
12246 gen_singlestep_exception(dc);
12247 } else {
12248 gen_goto_tb(dc, 1, dc->pc);
12252 /* Functions above can change dc->pc, so re-align db->pc_next */
12253 dc->base.pc_next = dc->pc;
12256 static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12258 DisasContext *dc = container_of(dcbase, DisasContext, base);
12260 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
12261 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size,
12262 dc->thumb | (dc->sctlr_b << 1));
12265 static const TranslatorOps arm_translator_ops = {
12266 .init_disas_context = arm_tr_init_disas_context,
12267 .tb_start = arm_tr_tb_start,
12268 .insn_start = arm_tr_insn_start,
12269 .breakpoint_check = arm_tr_breakpoint_check,
12270 .translate_insn = arm_tr_translate_insn,
12271 .tb_stop = arm_tr_tb_stop,
12272 .disas_log = arm_tr_disas_log,
12275 static const TranslatorOps thumb_translator_ops = {
12276 .init_disas_context = arm_tr_init_disas_context,
12277 .tb_start = arm_tr_tb_start,
12278 .insn_start = arm_tr_insn_start,
12279 .breakpoint_check = arm_tr_breakpoint_check,
12280 .translate_insn = thumb_tr_translate_insn,
12281 .tb_stop = arm_tr_tb_stop,
12282 .disas_log = arm_tr_disas_log,
12285 /* generate intermediate code for basic block 'tb'. */
12286 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
12288 DisasContext dc;
12289 const TranslatorOps *ops = &arm_translator_ops;
12291 if (ARM_TBFLAG_THUMB(tb->flags)) {
12292 ops = &thumb_translator_ops;
12294 #ifdef TARGET_AARCH64
12295 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
12296 ops = &aarch64_translator_ops;
12298 #endif
12300 translator_loop(ops, &dc.base, cpu, tb);
12303 static const char *cpu_mode_names[16] = {
12304 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12305 "???", "???", "hyp", "und", "???", "???", "???", "sys"
12308 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12309 int flags)
12311 ARMCPU *cpu = ARM_CPU(cs);
12312 CPUARMState *env = &cpu->env;
12313 int i;
12315 if (is_a64(env)) {
12316 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12317 return;
12320 for(i=0;i<16;i++) {
12321 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
12322 if ((i % 4) == 3)
12323 cpu_fprintf(f, "\n");
12324 else
12325 cpu_fprintf(f, " ");
12328 if (arm_feature(env, ARM_FEATURE_M)) {
12329 uint32_t xpsr = xpsr_read(env);
12330 const char *mode;
12331 const char *ns_status = "";
12333 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12334 ns_status = env->v7m.secure ? "S " : "NS ";
12337 if (xpsr & XPSR_EXCP) {
12338 mode = "handler";
12339 } else {
12340 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
12341 mode = "unpriv-thread";
12342 } else {
12343 mode = "priv-thread";
12347 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
12348 xpsr,
12349 xpsr & XPSR_N ? 'N' : '-',
12350 xpsr & XPSR_Z ? 'Z' : '-',
12351 xpsr & XPSR_C ? 'C' : '-',
12352 xpsr & XPSR_V ? 'V' : '-',
12353 xpsr & XPSR_T ? 'T' : 'A',
12354 ns_status,
12355 mode);
12356 } else {
12357 uint32_t psr = cpsr_read(env);
12358 const char *ns_status = "";
12360 if (arm_feature(env, ARM_FEATURE_EL3) &&
12361 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12362 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12365 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12366 psr,
12367 psr & CPSR_N ? 'N' : '-',
12368 psr & CPSR_Z ? 'Z' : '-',
12369 psr & CPSR_C ? 'C' : '-',
12370 psr & CPSR_V ? 'V' : '-',
12371 psr & CPSR_T ? 'T' : 'A',
12372 ns_status,
12373 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12376 if (flags & CPU_DUMP_FPU) {
12377 int numvfpregs = 0;
12378 if (arm_feature(env, ARM_FEATURE_VFP)) {
12379 numvfpregs += 16;
12381 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12382 numvfpregs += 16;
12384 for (i = 0; i < numvfpregs; i++) {
12385 uint64_t v = float64_val(env->vfp.regs[i]);
12386 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12387 i * 2, (uint32_t)v,
12388 i * 2 + 1, (uint32_t)(v >> 32),
12389 i, v);
12391 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
12395 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12396 target_ulong *data)
12398 if (is_a64(env)) {
12399 env->pc = data[0];
12400 env->condexec_bits = 0;
12401 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
12402 } else {
12403 env->regs[15] = data[0];
12404 env->condexec_bits = data[1];
12405 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;