colo-proxy: fix memory leak
[qemu/kevin.git] / target-arm / translate.c
blobef62f8b0c4309f142e577f96ddf70babc67fa6f5
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "qemu/log.h"
29 #include "qemu/bitops.h"
30 #include "arm_ldst.h"
31 #include "exec/semihost.h"
33 #include "exec/helper-proto.h"
34 #include "exec/helper-gen.h"
36 #include "trace-tcg.h"
37 #include "exec/log.h"
40 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
42 /* currently all emulated v5 cores are also v5TE, so don't bother */
43 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
44 #define ENABLE_ARCH_5J 0
45 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
51 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
53 #include "translate.h"
55 #if defined(CONFIG_USER_ONLY)
56 #define IS_USER(s) 1
57 #else
58 #define IS_USER(s) (s->user)
59 #endif
61 TCGv_env cpu_env;
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64 static TCGv_i32 cpu_R[16];
65 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66 TCGv_i64 cpu_exclusive_addr;
67 TCGv_i64 cpu_exclusive_val;
68 #ifdef CONFIG_USER_ONLY
69 TCGv_i64 cpu_exclusive_test;
70 TCGv_i32 cpu_exclusive_info;
71 #endif
73 /* FIXME: These should be removed. */
74 static TCGv_i32 cpu_F0s, cpu_F1s;
75 static TCGv_i64 cpu_F0d, cpu_F1d;
77 #include "exec/gen-icount.h"
79 static const char *regnames[] =
80 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
81 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
83 /* initialize TCG globals. */
84 void arm_translate_init(void)
86 int i;
88 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
89 tcg_ctx.tcg_env = cpu_env;
91 for (i = 0; i < 16; i++) {
92 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
93 offsetof(CPUARMState, regs[i]),
94 regnames[i]);
96 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
97 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
98 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
99 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
101 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
102 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
103 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
104 offsetof(CPUARMState, exclusive_val), "exclusive_val");
105 #ifdef CONFIG_USER_ONLY
106 cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
107 offsetof(CPUARMState, exclusive_test), "exclusive_test");
108 cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
109 offsetof(CPUARMState, exclusive_info), "exclusive_info");
110 #endif
112 a64_translate_init();
115 static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
117 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
118 * insns:
119 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
120 * otherwise, access as if at PL0.
122 switch (s->mmu_idx) {
123 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
124 case ARMMMUIdx_S12NSE0:
125 case ARMMMUIdx_S12NSE1:
126 return ARMMMUIdx_S12NSE0;
127 case ARMMMUIdx_S1E3:
128 case ARMMMUIdx_S1SE0:
129 case ARMMMUIdx_S1SE1:
130 return ARMMMUIdx_S1SE0;
131 case ARMMMUIdx_S2NS:
132 default:
133 g_assert_not_reached();
137 static inline TCGv_i32 load_cpu_offset(int offset)
139 TCGv_i32 tmp = tcg_temp_new_i32();
140 tcg_gen_ld_i32(tmp, cpu_env, offset);
141 return tmp;
144 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
146 static inline void store_cpu_offset(TCGv_i32 var, int offset)
148 tcg_gen_st_i32(var, cpu_env, offset);
149 tcg_temp_free_i32(var);
152 #define store_cpu_field(var, name) \
153 store_cpu_offset(var, offsetof(CPUARMState, name))
155 /* Set a variable to the value of a CPU register. */
156 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
158 if (reg == 15) {
159 uint32_t addr;
160 /* normally, since we updated PC, we need only to add one insn */
161 if (s->thumb)
162 addr = (long)s->pc + 2;
163 else
164 addr = (long)s->pc + 4;
165 tcg_gen_movi_i32(var, addr);
166 } else {
167 tcg_gen_mov_i32(var, cpu_R[reg]);
171 /* Create a new temporary and set it to the value of a CPU register. */
172 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
174 TCGv_i32 tmp = tcg_temp_new_i32();
175 load_reg_var(s, tmp, reg);
176 return tmp;
179 /* Set a CPU register. The source must be a temporary and will be
180 marked as dead. */
181 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
183 if (reg == 15) {
184 /* In Thumb mode, we must ignore bit 0.
185 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
186 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
187 * We choose to ignore [1:0] in ARM mode for all architecture versions.
189 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
190 s->is_jmp = DISAS_JUMP;
192 tcg_gen_mov_i32(cpu_R[reg], var);
193 tcg_temp_free_i32(var);
196 /* Value extensions. */
197 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
199 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
202 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
206 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
208 TCGv_i32 tmp_mask = tcg_const_i32(mask);
209 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
210 tcg_temp_free_i32(tmp_mask);
212 /* Set NZCV flags from the high 4 bits of var. */
213 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
215 static void gen_exception_internal(int excp)
217 TCGv_i32 tcg_excp = tcg_const_i32(excp);
219 assert(excp_is_internal(excp));
220 gen_helper_exception_internal(cpu_env, tcg_excp);
221 tcg_temp_free_i32(tcg_excp);
224 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
226 TCGv_i32 tcg_excp = tcg_const_i32(excp);
227 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
228 TCGv_i32 tcg_el = tcg_const_i32(target_el);
230 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
231 tcg_syn, tcg_el);
233 tcg_temp_free_i32(tcg_el);
234 tcg_temp_free_i32(tcg_syn);
235 tcg_temp_free_i32(tcg_excp);
238 static void gen_ss_advance(DisasContext *s)
240 /* If the singlestep state is Active-not-pending, advance to
241 * Active-pending.
243 if (s->ss_active) {
244 s->pstate_ss = 0;
245 gen_helper_clear_pstate_ss(cpu_env);
249 static void gen_step_complete_exception(DisasContext *s)
251 /* We just completed step of an insn. Move from Active-not-pending
252 * to Active-pending, and then also take the swstep exception.
253 * This corresponds to making the (IMPDEF) choice to prioritize
254 * swstep exceptions over asynchronous exceptions taken to an exception
255 * level where debug is disabled. This choice has the advantage that
256 * we do not need to maintain internal state corresponding to the
257 * ISV/EX syndrome bits between completion of the step and generation
258 * of the exception, and our syndrome information is always correct.
260 gen_ss_advance(s);
261 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
262 default_exception_el(s));
263 s->is_jmp = DISAS_EXC;
266 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
268 TCGv_i32 tmp1 = tcg_temp_new_i32();
269 TCGv_i32 tmp2 = tcg_temp_new_i32();
270 tcg_gen_ext16s_i32(tmp1, a);
271 tcg_gen_ext16s_i32(tmp2, b);
272 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
273 tcg_temp_free_i32(tmp2);
274 tcg_gen_sari_i32(a, a, 16);
275 tcg_gen_sari_i32(b, b, 16);
276 tcg_gen_mul_i32(b, b, a);
277 tcg_gen_mov_i32(a, tmp1);
278 tcg_temp_free_i32(tmp1);
281 /* Byteswap each halfword. */
282 static void gen_rev16(TCGv_i32 var)
284 TCGv_i32 tmp = tcg_temp_new_i32();
285 tcg_gen_shri_i32(tmp, var, 8);
286 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
287 tcg_gen_shli_i32(var, var, 8);
288 tcg_gen_andi_i32(var, var, 0xff00ff00);
289 tcg_gen_or_i32(var, var, tmp);
290 tcg_temp_free_i32(tmp);
293 /* Byteswap low halfword and sign extend. */
294 static void gen_revsh(TCGv_i32 var)
296 tcg_gen_ext16u_i32(var, var);
297 tcg_gen_bswap16_i32(var, var);
298 tcg_gen_ext16s_i32(var, var);
301 /* Unsigned bitfield extract. */
302 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
304 if (shift)
305 tcg_gen_shri_i32(var, var, shift);
306 tcg_gen_andi_i32(var, var, mask);
309 /* Signed bitfield extract. */
310 static void gen_sbfx(TCGv_i32 var, int shift, int width)
312 uint32_t signbit;
314 if (shift)
315 tcg_gen_sari_i32(var, var, shift);
316 if (shift + width < 32) {
317 signbit = 1u << (width - 1);
318 tcg_gen_andi_i32(var, var, (1u << width) - 1);
319 tcg_gen_xori_i32(var, var, signbit);
320 tcg_gen_subi_i32(var, var, signbit);
324 /* Return (b << 32) + a. Mark inputs as dead */
325 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
327 TCGv_i64 tmp64 = tcg_temp_new_i64();
329 tcg_gen_extu_i32_i64(tmp64, b);
330 tcg_temp_free_i32(b);
331 tcg_gen_shli_i64(tmp64, tmp64, 32);
332 tcg_gen_add_i64(a, tmp64, a);
334 tcg_temp_free_i64(tmp64);
335 return a;
338 /* Return (b << 32) - a. Mark inputs as dead. */
339 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
341 TCGv_i64 tmp64 = tcg_temp_new_i64();
343 tcg_gen_extu_i32_i64(tmp64, b);
344 tcg_temp_free_i32(b);
345 tcg_gen_shli_i64(tmp64, tmp64, 32);
346 tcg_gen_sub_i64(a, tmp64, a);
348 tcg_temp_free_i64(tmp64);
349 return a;
352 /* 32x32->64 multiply. Marks inputs as dead. */
353 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
355 TCGv_i32 lo = tcg_temp_new_i32();
356 TCGv_i32 hi = tcg_temp_new_i32();
357 TCGv_i64 ret;
359 tcg_gen_mulu2_i32(lo, hi, a, b);
360 tcg_temp_free_i32(a);
361 tcg_temp_free_i32(b);
363 ret = tcg_temp_new_i64();
364 tcg_gen_concat_i32_i64(ret, lo, hi);
365 tcg_temp_free_i32(lo);
366 tcg_temp_free_i32(hi);
368 return ret;
371 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
373 TCGv_i32 lo = tcg_temp_new_i32();
374 TCGv_i32 hi = tcg_temp_new_i32();
375 TCGv_i64 ret;
377 tcg_gen_muls2_i32(lo, hi, a, b);
378 tcg_temp_free_i32(a);
379 tcg_temp_free_i32(b);
381 ret = tcg_temp_new_i64();
382 tcg_gen_concat_i32_i64(ret, lo, hi);
383 tcg_temp_free_i32(lo);
384 tcg_temp_free_i32(hi);
386 return ret;
389 /* Swap low and high halfwords. */
390 static void gen_swap_half(TCGv_i32 var)
392 TCGv_i32 tmp = tcg_temp_new_i32();
393 tcg_gen_shri_i32(tmp, var, 16);
394 tcg_gen_shli_i32(var, var, 16);
395 tcg_gen_or_i32(var, var, tmp);
396 tcg_temp_free_i32(tmp);
399 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
400 tmp = (t0 ^ t1) & 0x8000;
401 t0 &= ~0x8000;
402 t1 &= ~0x8000;
403 t0 = (t0 + t1) ^ tmp;
406 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
408 TCGv_i32 tmp = tcg_temp_new_i32();
409 tcg_gen_xor_i32(tmp, t0, t1);
410 tcg_gen_andi_i32(tmp, tmp, 0x8000);
411 tcg_gen_andi_i32(t0, t0, ~0x8000);
412 tcg_gen_andi_i32(t1, t1, ~0x8000);
413 tcg_gen_add_i32(t0, t0, t1);
414 tcg_gen_xor_i32(t0, t0, tmp);
415 tcg_temp_free_i32(tmp);
416 tcg_temp_free_i32(t1);
419 /* Set CF to the top bit of var. */
420 static void gen_set_CF_bit31(TCGv_i32 var)
422 tcg_gen_shri_i32(cpu_CF, var, 31);
425 /* Set N and Z flags from var. */
426 static inline void gen_logic_CC(TCGv_i32 var)
428 tcg_gen_mov_i32(cpu_NF, var);
429 tcg_gen_mov_i32(cpu_ZF, var);
432 /* T0 += T1 + CF. */
433 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
435 tcg_gen_add_i32(t0, t0, t1);
436 tcg_gen_add_i32(t0, t0, cpu_CF);
439 /* dest = T0 + T1 + CF. */
440 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
442 tcg_gen_add_i32(dest, t0, t1);
443 tcg_gen_add_i32(dest, dest, cpu_CF);
446 /* dest = T0 - T1 + CF - 1. */
447 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
449 tcg_gen_sub_i32(dest, t0, t1);
450 tcg_gen_add_i32(dest, dest, cpu_CF);
451 tcg_gen_subi_i32(dest, dest, 1);
454 /* dest = T0 + T1. Compute C, N, V and Z flags */
455 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
457 TCGv_i32 tmp = tcg_temp_new_i32();
458 tcg_gen_movi_i32(tmp, 0);
459 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
460 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
461 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
462 tcg_gen_xor_i32(tmp, t0, t1);
463 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
464 tcg_temp_free_i32(tmp);
465 tcg_gen_mov_i32(dest, cpu_NF);
468 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
469 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
471 TCGv_i32 tmp = tcg_temp_new_i32();
472 if (TCG_TARGET_HAS_add2_i32) {
473 tcg_gen_movi_i32(tmp, 0);
474 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
475 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
476 } else {
477 TCGv_i64 q0 = tcg_temp_new_i64();
478 TCGv_i64 q1 = tcg_temp_new_i64();
479 tcg_gen_extu_i32_i64(q0, t0);
480 tcg_gen_extu_i32_i64(q1, t1);
481 tcg_gen_add_i64(q0, q0, q1);
482 tcg_gen_extu_i32_i64(q1, cpu_CF);
483 tcg_gen_add_i64(q0, q0, q1);
484 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
485 tcg_temp_free_i64(q0);
486 tcg_temp_free_i64(q1);
488 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
489 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
490 tcg_gen_xor_i32(tmp, t0, t1);
491 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
492 tcg_temp_free_i32(tmp);
493 tcg_gen_mov_i32(dest, cpu_NF);
496 /* dest = T0 - T1. Compute C, N, V and Z flags */
497 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
499 TCGv_i32 tmp;
500 tcg_gen_sub_i32(cpu_NF, t0, t1);
501 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
502 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
503 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
504 tmp = tcg_temp_new_i32();
505 tcg_gen_xor_i32(tmp, t0, t1);
506 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
507 tcg_temp_free_i32(tmp);
508 tcg_gen_mov_i32(dest, cpu_NF);
511 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
512 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
514 TCGv_i32 tmp = tcg_temp_new_i32();
515 tcg_gen_not_i32(tmp, t1);
516 gen_adc_CC(dest, t0, tmp);
517 tcg_temp_free_i32(tmp);
520 #define GEN_SHIFT(name) \
521 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
523 TCGv_i32 tmp1, tmp2, tmp3; \
524 tmp1 = tcg_temp_new_i32(); \
525 tcg_gen_andi_i32(tmp1, t1, 0xff); \
526 tmp2 = tcg_const_i32(0); \
527 tmp3 = tcg_const_i32(0x1f); \
528 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
529 tcg_temp_free_i32(tmp3); \
530 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
531 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
532 tcg_temp_free_i32(tmp2); \
533 tcg_temp_free_i32(tmp1); \
535 GEN_SHIFT(shl)
536 GEN_SHIFT(shr)
537 #undef GEN_SHIFT
539 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
541 TCGv_i32 tmp1, tmp2;
542 tmp1 = tcg_temp_new_i32();
543 tcg_gen_andi_i32(tmp1, t1, 0xff);
544 tmp2 = tcg_const_i32(0x1f);
545 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
546 tcg_temp_free_i32(tmp2);
547 tcg_gen_sar_i32(dest, t0, tmp1);
548 tcg_temp_free_i32(tmp1);
551 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
553 TCGv_i32 c0 = tcg_const_i32(0);
554 TCGv_i32 tmp = tcg_temp_new_i32();
555 tcg_gen_neg_i32(tmp, src);
556 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
557 tcg_temp_free_i32(c0);
558 tcg_temp_free_i32(tmp);
561 static void shifter_out_im(TCGv_i32 var, int shift)
563 if (shift == 0) {
564 tcg_gen_andi_i32(cpu_CF, var, 1);
565 } else {
566 tcg_gen_shri_i32(cpu_CF, var, shift);
567 if (shift != 31) {
568 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
573 /* Shift by immediate. Includes special handling for shift == 0. */
574 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
575 int shift, int flags)
577 switch (shiftop) {
578 case 0: /* LSL */
579 if (shift != 0) {
580 if (flags)
581 shifter_out_im(var, 32 - shift);
582 tcg_gen_shli_i32(var, var, shift);
584 break;
585 case 1: /* LSR */
586 if (shift == 0) {
587 if (flags) {
588 tcg_gen_shri_i32(cpu_CF, var, 31);
590 tcg_gen_movi_i32(var, 0);
591 } else {
592 if (flags)
593 shifter_out_im(var, shift - 1);
594 tcg_gen_shri_i32(var, var, shift);
596 break;
597 case 2: /* ASR */
598 if (shift == 0)
599 shift = 32;
600 if (flags)
601 shifter_out_im(var, shift - 1);
602 if (shift == 32)
603 shift = 31;
604 tcg_gen_sari_i32(var, var, shift);
605 break;
606 case 3: /* ROR/RRX */
607 if (shift != 0) {
608 if (flags)
609 shifter_out_im(var, shift - 1);
610 tcg_gen_rotri_i32(var, var, shift); break;
611 } else {
612 TCGv_i32 tmp = tcg_temp_new_i32();
613 tcg_gen_shli_i32(tmp, cpu_CF, 31);
614 if (flags)
615 shifter_out_im(var, 0);
616 tcg_gen_shri_i32(var, var, 1);
617 tcg_gen_or_i32(var, var, tmp);
618 tcg_temp_free_i32(tmp);
623 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
624 TCGv_i32 shift, int flags)
626 if (flags) {
627 switch (shiftop) {
628 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
629 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
630 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
631 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
633 } else {
634 switch (shiftop) {
635 case 0:
636 gen_shl(var, var, shift);
637 break;
638 case 1:
639 gen_shr(var, var, shift);
640 break;
641 case 2:
642 gen_sar(var, var, shift);
643 break;
644 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
645 tcg_gen_rotr_i32(var, var, shift); break;
648 tcg_temp_free_i32(shift);
651 #define PAS_OP(pfx) \
652 switch (op2) { \
653 case 0: gen_pas_helper(glue(pfx,add16)); break; \
654 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
655 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
656 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
657 case 4: gen_pas_helper(glue(pfx,add8)); break; \
658 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
660 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
662 TCGv_ptr tmp;
664 switch (op1) {
665 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
666 case 1:
667 tmp = tcg_temp_new_ptr();
668 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
669 PAS_OP(s)
670 tcg_temp_free_ptr(tmp);
671 break;
672 case 5:
673 tmp = tcg_temp_new_ptr();
674 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
675 PAS_OP(u)
676 tcg_temp_free_ptr(tmp);
677 break;
678 #undef gen_pas_helper
679 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
680 case 2:
681 PAS_OP(q);
682 break;
683 case 3:
684 PAS_OP(sh);
685 break;
686 case 6:
687 PAS_OP(uq);
688 break;
689 case 7:
690 PAS_OP(uh);
691 break;
692 #undef gen_pas_helper
695 #undef PAS_OP
697 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
698 #define PAS_OP(pfx) \
699 switch (op1) { \
700 case 0: gen_pas_helper(glue(pfx,add8)); break; \
701 case 1: gen_pas_helper(glue(pfx,add16)); break; \
702 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
703 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
704 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
705 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
707 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
709 TCGv_ptr tmp;
711 switch (op2) {
712 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
713 case 0:
714 tmp = tcg_temp_new_ptr();
715 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
716 PAS_OP(s)
717 tcg_temp_free_ptr(tmp);
718 break;
719 case 4:
720 tmp = tcg_temp_new_ptr();
721 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
722 PAS_OP(u)
723 tcg_temp_free_ptr(tmp);
724 break;
725 #undef gen_pas_helper
726 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
727 case 1:
728 PAS_OP(q);
729 break;
730 case 2:
731 PAS_OP(sh);
732 break;
733 case 5:
734 PAS_OP(uq);
735 break;
736 case 6:
737 PAS_OP(uh);
738 break;
739 #undef gen_pas_helper
742 #undef PAS_OP
745 * Generate a conditional based on ARM condition code cc.
746 * This is common between ARM and Aarch64 targets.
748 void arm_test_cc(DisasCompare *cmp, int cc)
750 TCGv_i32 value;
751 TCGCond cond;
752 bool global = true;
754 switch (cc) {
755 case 0: /* eq: Z */
756 case 1: /* ne: !Z */
757 cond = TCG_COND_EQ;
758 value = cpu_ZF;
759 break;
761 case 2: /* cs: C */
762 case 3: /* cc: !C */
763 cond = TCG_COND_NE;
764 value = cpu_CF;
765 break;
767 case 4: /* mi: N */
768 case 5: /* pl: !N */
769 cond = TCG_COND_LT;
770 value = cpu_NF;
771 break;
773 case 6: /* vs: V */
774 case 7: /* vc: !V */
775 cond = TCG_COND_LT;
776 value = cpu_VF;
777 break;
779 case 8: /* hi: C && !Z */
780 case 9: /* ls: !C || Z -> !(C && !Z) */
781 cond = TCG_COND_NE;
782 value = tcg_temp_new_i32();
783 global = false;
784 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
785 ZF is non-zero for !Z; so AND the two subexpressions. */
786 tcg_gen_neg_i32(value, cpu_CF);
787 tcg_gen_and_i32(value, value, cpu_ZF);
788 break;
790 case 10: /* ge: N == V -> N ^ V == 0 */
791 case 11: /* lt: N != V -> N ^ V != 0 */
792 /* Since we're only interested in the sign bit, == 0 is >= 0. */
793 cond = TCG_COND_GE;
794 value = tcg_temp_new_i32();
795 global = false;
796 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
797 break;
799 case 12: /* gt: !Z && N == V */
800 case 13: /* le: Z || N != V */
801 cond = TCG_COND_NE;
802 value = tcg_temp_new_i32();
803 global = false;
804 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
805 * the sign bit then AND with ZF to yield the result. */
806 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
807 tcg_gen_sari_i32(value, value, 31);
808 tcg_gen_andc_i32(value, cpu_ZF, value);
809 break;
811 case 14: /* always */
812 case 15: /* always */
813 /* Use the ALWAYS condition, which will fold early.
814 * It doesn't matter what we use for the value. */
815 cond = TCG_COND_ALWAYS;
816 value = cpu_ZF;
817 goto no_invert;
819 default:
820 fprintf(stderr, "Bad condition code 0x%x\n", cc);
821 abort();
824 if (cc & 1) {
825 cond = tcg_invert_cond(cond);
828 no_invert:
829 cmp->cond = cond;
830 cmp->value = value;
831 cmp->value_global = global;
834 void arm_free_cc(DisasCompare *cmp)
836 if (!cmp->value_global) {
837 tcg_temp_free_i32(cmp->value);
841 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
843 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
846 void arm_gen_test_cc(int cc, TCGLabel *label)
848 DisasCompare cmp;
849 arm_test_cc(&cmp, cc);
850 arm_jump_cc(&cmp, label);
851 arm_free_cc(&cmp);
854 static const uint8_t table_logic_cc[16] = {
855 1, /* and */
856 1, /* xor */
857 0, /* sub */
858 0, /* rsb */
859 0, /* add */
860 0, /* adc */
861 0, /* sbc */
862 0, /* rsc */
863 1, /* andl */
864 1, /* xorl */
865 0, /* cmp */
866 0, /* cmn */
867 1, /* orr */
868 1, /* mov */
869 1, /* bic */
870 1, /* mvn */
873 /* Set PC and Thumb state from an immediate address. */
874 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
876 TCGv_i32 tmp;
878 s->is_jmp = DISAS_JUMP;
879 if (s->thumb != (addr & 1)) {
880 tmp = tcg_temp_new_i32();
881 tcg_gen_movi_i32(tmp, addr & 1);
882 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
883 tcg_temp_free_i32(tmp);
885 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
888 /* Set PC and Thumb state from var. var is marked as dead. */
889 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
891 s->is_jmp = DISAS_JUMP;
892 tcg_gen_andi_i32(cpu_R[15], var, ~1);
893 tcg_gen_andi_i32(var, var, 1);
894 store_cpu_field(var, thumb);
897 /* Variant of store_reg which uses branch&exchange logic when storing
898 to r15 in ARM architecture v7 and above. The source must be a temporary
899 and will be marked as dead. */
900 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
902 if (reg == 15 && ENABLE_ARCH_7) {
903 gen_bx(s, var);
904 } else {
905 store_reg(s, reg, var);
909 /* Variant of store_reg which uses branch&exchange logic when storing
910 * to r15 in ARM architecture v5T and above. This is used for storing
911 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
912 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
913 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
915 if (reg == 15 && ENABLE_ARCH_5) {
916 gen_bx(s, var);
917 } else {
918 store_reg(s, reg, var);
922 #ifdef CONFIG_USER_ONLY
923 #define IS_USER_ONLY 1
924 #else
925 #define IS_USER_ONLY 0
926 #endif
928 /* Abstractions of "generate code to do a guest load/store for
929 * AArch32", where a vaddr is always 32 bits (and is zero
930 * extended if we're a 64 bit core) and data is also
931 * 32 bits unless specifically doing a 64 bit access.
932 * These functions work like tcg_gen_qemu_{ld,st}* except
933 * that the address argument is TCGv_i32 rather than TCGv.
935 #if TARGET_LONG_BITS == 32
937 #define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
938 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
939 TCGv_i32 addr, int index) \
941 TCGMemOp opc = (OPC) | s->be_data; \
942 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
943 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
944 TCGv addr_be = tcg_temp_new(); \
945 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
946 tcg_gen_qemu_ld_i32(val, addr_be, index, opc); \
947 tcg_temp_free(addr_be); \
948 return; \
950 tcg_gen_qemu_ld_i32(val, addr, index, opc); \
953 #define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
954 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
955 TCGv_i32 addr, int index) \
957 TCGMemOp opc = (OPC) | s->be_data; \
958 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
959 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
960 TCGv addr_be = tcg_temp_new(); \
961 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
962 tcg_gen_qemu_st_i32(val, addr_be, index, opc); \
963 tcg_temp_free(addr_be); \
964 return; \
966 tcg_gen_qemu_st_i32(val, addr, index, opc); \
969 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
970 TCGv_i32 addr, int index)
972 TCGMemOp opc = MO_Q | s->be_data;
973 tcg_gen_qemu_ld_i64(val, addr, index, opc);
974 /* Not needed for user-mode BE32, where we use MO_BE instead. */
975 if (!IS_USER_ONLY && s->sctlr_b) {
976 tcg_gen_rotri_i64(val, val, 32);
980 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
981 TCGv_i32 addr, int index)
983 TCGMemOp opc = MO_Q | s->be_data;
984 /* Not needed for user-mode BE32, where we use MO_BE instead. */
985 if (!IS_USER_ONLY && s->sctlr_b) {
986 TCGv_i64 tmp = tcg_temp_new_i64();
987 tcg_gen_rotri_i64(tmp, val, 32);
988 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
989 tcg_temp_free_i64(tmp);
990 return;
992 tcg_gen_qemu_st_i64(val, addr, index, opc);
995 #else
997 #define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
998 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
999 TCGv_i32 addr, int index) \
1001 TCGMemOp opc = (OPC) | s->be_data; \
1002 TCGv addr64 = tcg_temp_new(); \
1003 tcg_gen_extu_i32_i64(addr64, addr); \
1004 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1005 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1006 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1008 tcg_gen_qemu_ld_i32(val, addr64, index, opc); \
1009 tcg_temp_free(addr64); \
1012 #define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
1013 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1014 TCGv_i32 addr, int index) \
1016 TCGMemOp opc = (OPC) | s->be_data; \
1017 TCGv addr64 = tcg_temp_new(); \
1018 tcg_gen_extu_i32_i64(addr64, addr); \
1019 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1020 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1021 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1023 tcg_gen_qemu_st_i32(val, addr64, index, opc); \
1024 tcg_temp_free(addr64); \
1027 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1028 TCGv_i32 addr, int index)
1030 TCGMemOp opc = MO_Q | s->be_data;
1031 TCGv addr64 = tcg_temp_new();
1032 tcg_gen_extu_i32_i64(addr64, addr);
1033 tcg_gen_qemu_ld_i64(val, addr64, index, opc);
1035 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1036 if (!IS_USER_ONLY && s->sctlr_b) {
1037 tcg_gen_rotri_i64(val, val, 32);
1039 tcg_temp_free(addr64);
1042 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1043 TCGv_i32 addr, int index)
1045 TCGMemOp opc = MO_Q | s->be_data;
1046 TCGv addr64 = tcg_temp_new();
1047 tcg_gen_extu_i32_i64(addr64, addr);
1049 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1050 if (!IS_USER_ONLY && s->sctlr_b) {
1051 TCGv tmp = tcg_temp_new();
1052 tcg_gen_rotri_i64(tmp, val, 32);
1053 tcg_gen_qemu_st_i64(tmp, addr64, index, opc);
1054 tcg_temp_free(tmp);
1055 } else {
1056 tcg_gen_qemu_st_i64(val, addr64, index, opc);
1058 tcg_temp_free(addr64);
1061 #endif
1063 DO_GEN_LD(8s, MO_SB, 3)
1064 DO_GEN_LD(8u, MO_UB, 3)
1065 DO_GEN_LD(16s, MO_SW, 2)
1066 DO_GEN_LD(16u, MO_UW, 2)
1067 DO_GEN_LD(32u, MO_UL, 0)
1068 /* 'a' variants include an alignment check */
1069 DO_GEN_LD(16ua, MO_UW | MO_ALIGN, 2)
1070 DO_GEN_LD(32ua, MO_UL | MO_ALIGN, 0)
1071 DO_GEN_ST(8, MO_UB, 3)
1072 DO_GEN_ST(16, MO_UW, 2)
1073 DO_GEN_ST(32, MO_UL, 0)
1075 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
1077 tcg_gen_movi_i32(cpu_R[15], val);
1080 static inline void gen_hvc(DisasContext *s, int imm16)
1082 /* The pre HVC helper handles cases when HVC gets trapped
1083 * as an undefined insn by runtime configuration (ie before
1084 * the insn really executes).
1086 gen_set_pc_im(s, s->pc - 4);
1087 gen_helper_pre_hvc(cpu_env);
1088 /* Otherwise we will treat this as a real exception which
1089 * happens after execution of the insn. (The distinction matters
1090 * for the PC value reported to the exception handler and also
1091 * for single stepping.)
1093 s->svc_imm = imm16;
1094 gen_set_pc_im(s, s->pc);
1095 s->is_jmp = DISAS_HVC;
1098 static inline void gen_smc(DisasContext *s)
1100 /* As with HVC, we may take an exception either before or after
1101 * the insn executes.
1103 TCGv_i32 tmp;
1105 gen_set_pc_im(s, s->pc - 4);
1106 tmp = tcg_const_i32(syn_aa32_smc());
1107 gen_helper_pre_smc(cpu_env, tmp);
1108 tcg_temp_free_i32(tmp);
1109 gen_set_pc_im(s, s->pc);
1110 s->is_jmp = DISAS_SMC;
1113 static inline void
1114 gen_set_condexec (DisasContext *s)
1116 if (s->condexec_mask) {
1117 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1118 TCGv_i32 tmp = tcg_temp_new_i32();
1119 tcg_gen_movi_i32(tmp, val);
1120 store_cpu_field(tmp, condexec_bits);
1124 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1126 gen_set_condexec(s);
1127 gen_set_pc_im(s, s->pc - offset);
1128 gen_exception_internal(excp);
1129 s->is_jmp = DISAS_JUMP;
1132 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1133 int syn, uint32_t target_el)
1135 gen_set_condexec(s);
1136 gen_set_pc_im(s, s->pc - offset);
1137 gen_exception(excp, syn, target_el);
1138 s->is_jmp = DISAS_JUMP;
1141 /* Force a TB lookup after an instruction that changes the CPU state. */
1142 static inline void gen_lookup_tb(DisasContext *s)
1144 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1145 s->is_jmp = DISAS_JUMP;
1148 static inline void gen_hlt(DisasContext *s, int imm)
1150 /* HLT. This has two purposes.
1151 * Architecturally, it is an external halting debug instruction.
1152 * Since QEMU doesn't implement external debug, we treat this as
1153 * it is required for halting debug disabled: it will UNDEF.
1154 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1155 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1156 * must trigger semihosting even for ARMv7 and earlier, where
1157 * HLT was an undefined encoding.
1158 * In system mode, we don't allow userspace access to
1159 * semihosting, to provide some semblance of security
1160 * (and for consistency with our 32-bit semihosting).
1162 if (semihosting_enabled() &&
1163 #ifndef CONFIG_USER_ONLY
1164 s->current_el != 0 &&
1165 #endif
1166 (imm == (s->thumb ? 0x3c : 0xf000))) {
1167 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1168 return;
1171 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1172 default_exception_el(s));
1175 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1176 TCGv_i32 var)
1178 int val, rm, shift, shiftop;
1179 TCGv_i32 offset;
1181 if (!(insn & (1 << 25))) {
1182 /* immediate */
1183 val = insn & 0xfff;
1184 if (!(insn & (1 << 23)))
1185 val = -val;
1186 if (val != 0)
1187 tcg_gen_addi_i32(var, var, val);
1188 } else {
1189 /* shift/register */
1190 rm = (insn) & 0xf;
1191 shift = (insn >> 7) & 0x1f;
1192 shiftop = (insn >> 5) & 3;
1193 offset = load_reg(s, rm);
1194 gen_arm_shift_im(offset, shiftop, shift, 0);
1195 if (!(insn & (1 << 23)))
1196 tcg_gen_sub_i32(var, var, offset);
1197 else
1198 tcg_gen_add_i32(var, var, offset);
1199 tcg_temp_free_i32(offset);
1203 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1204 int extra, TCGv_i32 var)
1206 int val, rm;
1207 TCGv_i32 offset;
1209 if (insn & (1 << 22)) {
1210 /* immediate */
1211 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1212 if (!(insn & (1 << 23)))
1213 val = -val;
1214 val += extra;
1215 if (val != 0)
1216 tcg_gen_addi_i32(var, var, val);
1217 } else {
1218 /* register */
1219 if (extra)
1220 tcg_gen_addi_i32(var, var, extra);
1221 rm = (insn) & 0xf;
1222 offset = load_reg(s, rm);
1223 if (!(insn & (1 << 23)))
1224 tcg_gen_sub_i32(var, var, offset);
1225 else
1226 tcg_gen_add_i32(var, var, offset);
1227 tcg_temp_free_i32(offset);
1231 static TCGv_ptr get_fpstatus_ptr(int neon)
1233 TCGv_ptr statusptr = tcg_temp_new_ptr();
1234 int offset;
1235 if (neon) {
1236 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1237 } else {
1238 offset = offsetof(CPUARMState, vfp.fp_status);
1240 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1241 return statusptr;
1244 #define VFP_OP2(name) \
1245 static inline void gen_vfp_##name(int dp) \
1247 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1248 if (dp) { \
1249 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1250 } else { \
1251 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1253 tcg_temp_free_ptr(fpst); \
1256 VFP_OP2(add)
1257 VFP_OP2(sub)
1258 VFP_OP2(mul)
1259 VFP_OP2(div)
1261 #undef VFP_OP2
1263 static inline void gen_vfp_F1_mul(int dp)
1265 /* Like gen_vfp_mul() but put result in F1 */
1266 TCGv_ptr fpst = get_fpstatus_ptr(0);
1267 if (dp) {
1268 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1269 } else {
1270 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1272 tcg_temp_free_ptr(fpst);
1275 static inline void gen_vfp_F1_neg(int dp)
1277 /* Like gen_vfp_neg() but put result in F1 */
1278 if (dp) {
1279 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1280 } else {
1281 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1285 static inline void gen_vfp_abs(int dp)
1287 if (dp)
1288 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1289 else
1290 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1293 static inline void gen_vfp_neg(int dp)
1295 if (dp)
1296 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1297 else
1298 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1301 static inline void gen_vfp_sqrt(int dp)
1303 if (dp)
1304 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1305 else
1306 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1309 static inline void gen_vfp_cmp(int dp)
1311 if (dp)
1312 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1313 else
1314 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1317 static inline void gen_vfp_cmpe(int dp)
1319 if (dp)
1320 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1321 else
1322 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1325 static inline void gen_vfp_F1_ld0(int dp)
1327 if (dp)
1328 tcg_gen_movi_i64(cpu_F1d, 0);
1329 else
1330 tcg_gen_movi_i32(cpu_F1s, 0);
1333 #define VFP_GEN_ITOF(name) \
1334 static inline void gen_vfp_##name(int dp, int neon) \
1336 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1337 if (dp) { \
1338 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1339 } else { \
1340 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1342 tcg_temp_free_ptr(statusptr); \
1345 VFP_GEN_ITOF(uito)
1346 VFP_GEN_ITOF(sito)
1347 #undef VFP_GEN_ITOF
1349 #define VFP_GEN_FTOI(name) \
1350 static inline void gen_vfp_##name(int dp, int neon) \
1352 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1353 if (dp) { \
1354 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1355 } else { \
1356 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1358 tcg_temp_free_ptr(statusptr); \
1361 VFP_GEN_FTOI(toui)
1362 VFP_GEN_FTOI(touiz)
1363 VFP_GEN_FTOI(tosi)
1364 VFP_GEN_FTOI(tosiz)
1365 #undef VFP_GEN_FTOI
1367 #define VFP_GEN_FIX(name, round) \
1368 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1370 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1371 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1372 if (dp) { \
1373 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1374 statusptr); \
1375 } else { \
1376 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1377 statusptr); \
1379 tcg_temp_free_i32(tmp_shift); \
1380 tcg_temp_free_ptr(statusptr); \
1382 VFP_GEN_FIX(tosh, _round_to_zero)
1383 VFP_GEN_FIX(tosl, _round_to_zero)
1384 VFP_GEN_FIX(touh, _round_to_zero)
1385 VFP_GEN_FIX(toul, _round_to_zero)
1386 VFP_GEN_FIX(shto, )
1387 VFP_GEN_FIX(slto, )
1388 VFP_GEN_FIX(uhto, )
1389 VFP_GEN_FIX(ulto, )
1390 #undef VFP_GEN_FIX
1392 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1394 if (dp) {
1395 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
1396 } else {
1397 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
1401 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1403 if (dp) {
1404 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
1405 } else {
1406 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
1410 static inline long
1411 vfp_reg_offset (int dp, int reg)
1413 if (dp)
1414 return offsetof(CPUARMState, vfp.regs[reg]);
1415 else if (reg & 1) {
1416 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1417 + offsetof(CPU_DoubleU, l.upper);
1418 } else {
1419 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1420 + offsetof(CPU_DoubleU, l.lower);
1424 /* Return the offset of a 32-bit piece of a NEON register.
1425 zero is the least significant end of the register. */
1426 static inline long
1427 neon_reg_offset (int reg, int n)
1429 int sreg;
1430 sreg = reg * 2 + n;
1431 return vfp_reg_offset(0, sreg);
1434 static TCGv_i32 neon_load_reg(int reg, int pass)
1436 TCGv_i32 tmp = tcg_temp_new_i32();
1437 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1438 return tmp;
1441 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1443 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1444 tcg_temp_free_i32(var);
1447 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1449 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1452 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1454 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1457 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1458 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1459 #define tcg_gen_st_f32 tcg_gen_st_i32
1460 #define tcg_gen_st_f64 tcg_gen_st_i64
1462 static inline void gen_mov_F0_vreg(int dp, int reg)
1464 if (dp)
1465 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1466 else
1467 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1470 static inline void gen_mov_F1_vreg(int dp, int reg)
1472 if (dp)
1473 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1474 else
1475 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1478 static inline void gen_mov_vreg_F0(int dp, int reg)
1480 if (dp)
1481 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1482 else
1483 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1486 #define ARM_CP_RW_BIT (1 << 20)
1488 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1490 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1493 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1495 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1498 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1500 TCGv_i32 var = tcg_temp_new_i32();
1501 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1502 return var;
1505 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1507 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1508 tcg_temp_free_i32(var);
1511 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1513 iwmmxt_store_reg(cpu_M0, rn);
1516 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1518 iwmmxt_load_reg(cpu_M0, rn);
1521 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1523 iwmmxt_load_reg(cpu_V1, rn);
1524 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1527 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1529 iwmmxt_load_reg(cpu_V1, rn);
1530 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1533 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1535 iwmmxt_load_reg(cpu_V1, rn);
1536 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1539 #define IWMMXT_OP(name) \
1540 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1542 iwmmxt_load_reg(cpu_V1, rn); \
1543 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1546 #define IWMMXT_OP_ENV(name) \
1547 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1549 iwmmxt_load_reg(cpu_V1, rn); \
1550 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1553 #define IWMMXT_OP_ENV_SIZE(name) \
1554 IWMMXT_OP_ENV(name##b) \
1555 IWMMXT_OP_ENV(name##w) \
1556 IWMMXT_OP_ENV(name##l)
1558 #define IWMMXT_OP_ENV1(name) \
1559 static inline void gen_op_iwmmxt_##name##_M0(void) \
1561 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1564 IWMMXT_OP(maddsq)
1565 IWMMXT_OP(madduq)
1566 IWMMXT_OP(sadb)
1567 IWMMXT_OP(sadw)
1568 IWMMXT_OP(mulslw)
1569 IWMMXT_OP(mulshw)
1570 IWMMXT_OP(mululw)
1571 IWMMXT_OP(muluhw)
1572 IWMMXT_OP(macsw)
1573 IWMMXT_OP(macuw)
1575 IWMMXT_OP_ENV_SIZE(unpackl)
1576 IWMMXT_OP_ENV_SIZE(unpackh)
1578 IWMMXT_OP_ENV1(unpacklub)
1579 IWMMXT_OP_ENV1(unpackluw)
1580 IWMMXT_OP_ENV1(unpacklul)
1581 IWMMXT_OP_ENV1(unpackhub)
1582 IWMMXT_OP_ENV1(unpackhuw)
1583 IWMMXT_OP_ENV1(unpackhul)
1584 IWMMXT_OP_ENV1(unpacklsb)
1585 IWMMXT_OP_ENV1(unpacklsw)
1586 IWMMXT_OP_ENV1(unpacklsl)
1587 IWMMXT_OP_ENV1(unpackhsb)
1588 IWMMXT_OP_ENV1(unpackhsw)
1589 IWMMXT_OP_ENV1(unpackhsl)
1591 IWMMXT_OP_ENV_SIZE(cmpeq)
1592 IWMMXT_OP_ENV_SIZE(cmpgtu)
1593 IWMMXT_OP_ENV_SIZE(cmpgts)
1595 IWMMXT_OP_ENV_SIZE(mins)
1596 IWMMXT_OP_ENV_SIZE(minu)
1597 IWMMXT_OP_ENV_SIZE(maxs)
1598 IWMMXT_OP_ENV_SIZE(maxu)
1600 IWMMXT_OP_ENV_SIZE(subn)
1601 IWMMXT_OP_ENV_SIZE(addn)
1602 IWMMXT_OP_ENV_SIZE(subu)
1603 IWMMXT_OP_ENV_SIZE(addu)
1604 IWMMXT_OP_ENV_SIZE(subs)
1605 IWMMXT_OP_ENV_SIZE(adds)
1607 IWMMXT_OP_ENV(avgb0)
1608 IWMMXT_OP_ENV(avgb1)
1609 IWMMXT_OP_ENV(avgw0)
1610 IWMMXT_OP_ENV(avgw1)
1612 IWMMXT_OP_ENV(packuw)
1613 IWMMXT_OP_ENV(packul)
1614 IWMMXT_OP_ENV(packuq)
1615 IWMMXT_OP_ENV(packsw)
1616 IWMMXT_OP_ENV(packsl)
1617 IWMMXT_OP_ENV(packsq)
1619 static void gen_op_iwmmxt_set_mup(void)
1621 TCGv_i32 tmp;
1622 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1623 tcg_gen_ori_i32(tmp, tmp, 2);
1624 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1627 static void gen_op_iwmmxt_set_cup(void)
1629 TCGv_i32 tmp;
1630 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1631 tcg_gen_ori_i32(tmp, tmp, 1);
1632 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1635 static void gen_op_iwmmxt_setpsr_nz(void)
1637 TCGv_i32 tmp = tcg_temp_new_i32();
1638 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1639 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1642 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1644 iwmmxt_load_reg(cpu_V1, rn);
1645 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1646 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1649 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1650 TCGv_i32 dest)
1652 int rd;
1653 uint32_t offset;
1654 TCGv_i32 tmp;
1656 rd = (insn >> 16) & 0xf;
1657 tmp = load_reg(s, rd);
1659 offset = (insn & 0xff) << ((insn >> 7) & 2);
1660 if (insn & (1 << 24)) {
1661 /* Pre indexed */
1662 if (insn & (1 << 23))
1663 tcg_gen_addi_i32(tmp, tmp, offset);
1664 else
1665 tcg_gen_addi_i32(tmp, tmp, -offset);
1666 tcg_gen_mov_i32(dest, tmp);
1667 if (insn & (1 << 21))
1668 store_reg(s, rd, tmp);
1669 else
1670 tcg_temp_free_i32(tmp);
1671 } else if (insn & (1 << 21)) {
1672 /* Post indexed */
1673 tcg_gen_mov_i32(dest, tmp);
1674 if (insn & (1 << 23))
1675 tcg_gen_addi_i32(tmp, tmp, offset);
1676 else
1677 tcg_gen_addi_i32(tmp, tmp, -offset);
1678 store_reg(s, rd, tmp);
1679 } else if (!(insn & (1 << 23)))
1680 return 1;
1681 return 0;
1684 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1686 int rd = (insn >> 0) & 0xf;
1687 TCGv_i32 tmp;
1689 if (insn & (1 << 8)) {
1690 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1691 return 1;
1692 } else {
1693 tmp = iwmmxt_load_creg(rd);
1695 } else {
1696 tmp = tcg_temp_new_i32();
1697 iwmmxt_load_reg(cpu_V0, rd);
1698 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1700 tcg_gen_andi_i32(tmp, tmp, mask);
1701 tcg_gen_mov_i32(dest, tmp);
1702 tcg_temp_free_i32(tmp);
1703 return 0;
1706 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1707 (ie. an undefined instruction). */
1708 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1710 int rd, wrd;
1711 int rdhi, rdlo, rd0, rd1, i;
1712 TCGv_i32 addr;
1713 TCGv_i32 tmp, tmp2, tmp3;
1715 if ((insn & 0x0e000e00) == 0x0c000000) {
1716 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1717 wrd = insn & 0xf;
1718 rdlo = (insn >> 12) & 0xf;
1719 rdhi = (insn >> 16) & 0xf;
1720 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1721 iwmmxt_load_reg(cpu_V0, wrd);
1722 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1723 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1724 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1725 } else { /* TMCRR */
1726 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1727 iwmmxt_store_reg(cpu_V0, wrd);
1728 gen_op_iwmmxt_set_mup();
1730 return 0;
1733 wrd = (insn >> 12) & 0xf;
1734 addr = tcg_temp_new_i32();
1735 if (gen_iwmmxt_address(s, insn, addr)) {
1736 tcg_temp_free_i32(addr);
1737 return 1;
1739 if (insn & ARM_CP_RW_BIT) {
1740 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1741 tmp = tcg_temp_new_i32();
1742 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1743 iwmmxt_store_creg(wrd, tmp);
1744 } else {
1745 i = 1;
1746 if (insn & (1 << 8)) {
1747 if (insn & (1 << 22)) { /* WLDRD */
1748 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1749 i = 0;
1750 } else { /* WLDRW wRd */
1751 tmp = tcg_temp_new_i32();
1752 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1754 } else {
1755 tmp = tcg_temp_new_i32();
1756 if (insn & (1 << 22)) { /* WLDRH */
1757 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1758 } else { /* WLDRB */
1759 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1762 if (i) {
1763 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1764 tcg_temp_free_i32(tmp);
1766 gen_op_iwmmxt_movq_wRn_M0(wrd);
1768 } else {
1769 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1770 tmp = iwmmxt_load_creg(wrd);
1771 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1772 } else {
1773 gen_op_iwmmxt_movq_M0_wRn(wrd);
1774 tmp = tcg_temp_new_i32();
1775 if (insn & (1 << 8)) {
1776 if (insn & (1 << 22)) { /* WSTRD */
1777 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1778 } else { /* WSTRW wRd */
1779 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1780 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1782 } else {
1783 if (insn & (1 << 22)) { /* WSTRH */
1784 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1785 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1786 } else { /* WSTRB */
1787 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1788 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1792 tcg_temp_free_i32(tmp);
1794 tcg_temp_free_i32(addr);
1795 return 0;
1798 if ((insn & 0x0f000000) != 0x0e000000)
1799 return 1;
1801 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1802 case 0x000: /* WOR */
1803 wrd = (insn >> 12) & 0xf;
1804 rd0 = (insn >> 0) & 0xf;
1805 rd1 = (insn >> 16) & 0xf;
1806 gen_op_iwmmxt_movq_M0_wRn(rd0);
1807 gen_op_iwmmxt_orq_M0_wRn(rd1);
1808 gen_op_iwmmxt_setpsr_nz();
1809 gen_op_iwmmxt_movq_wRn_M0(wrd);
1810 gen_op_iwmmxt_set_mup();
1811 gen_op_iwmmxt_set_cup();
1812 break;
1813 case 0x011: /* TMCR */
1814 if (insn & 0xf)
1815 return 1;
1816 rd = (insn >> 12) & 0xf;
1817 wrd = (insn >> 16) & 0xf;
1818 switch (wrd) {
1819 case ARM_IWMMXT_wCID:
1820 case ARM_IWMMXT_wCASF:
1821 break;
1822 case ARM_IWMMXT_wCon:
1823 gen_op_iwmmxt_set_cup();
1824 /* Fall through. */
1825 case ARM_IWMMXT_wCSSF:
1826 tmp = iwmmxt_load_creg(wrd);
1827 tmp2 = load_reg(s, rd);
1828 tcg_gen_andc_i32(tmp, tmp, tmp2);
1829 tcg_temp_free_i32(tmp2);
1830 iwmmxt_store_creg(wrd, tmp);
1831 break;
1832 case ARM_IWMMXT_wCGR0:
1833 case ARM_IWMMXT_wCGR1:
1834 case ARM_IWMMXT_wCGR2:
1835 case ARM_IWMMXT_wCGR3:
1836 gen_op_iwmmxt_set_cup();
1837 tmp = load_reg(s, rd);
1838 iwmmxt_store_creg(wrd, tmp);
1839 break;
1840 default:
1841 return 1;
1843 break;
1844 case 0x100: /* WXOR */
1845 wrd = (insn >> 12) & 0xf;
1846 rd0 = (insn >> 0) & 0xf;
1847 rd1 = (insn >> 16) & 0xf;
1848 gen_op_iwmmxt_movq_M0_wRn(rd0);
1849 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1850 gen_op_iwmmxt_setpsr_nz();
1851 gen_op_iwmmxt_movq_wRn_M0(wrd);
1852 gen_op_iwmmxt_set_mup();
1853 gen_op_iwmmxt_set_cup();
1854 break;
1855 case 0x111: /* TMRC */
1856 if (insn & 0xf)
1857 return 1;
1858 rd = (insn >> 12) & 0xf;
1859 wrd = (insn >> 16) & 0xf;
1860 tmp = iwmmxt_load_creg(wrd);
1861 store_reg(s, rd, tmp);
1862 break;
1863 case 0x300: /* WANDN */
1864 wrd = (insn >> 12) & 0xf;
1865 rd0 = (insn >> 0) & 0xf;
1866 rd1 = (insn >> 16) & 0xf;
1867 gen_op_iwmmxt_movq_M0_wRn(rd0);
1868 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1869 gen_op_iwmmxt_andq_M0_wRn(rd1);
1870 gen_op_iwmmxt_setpsr_nz();
1871 gen_op_iwmmxt_movq_wRn_M0(wrd);
1872 gen_op_iwmmxt_set_mup();
1873 gen_op_iwmmxt_set_cup();
1874 break;
1875 case 0x200: /* WAND */
1876 wrd = (insn >> 12) & 0xf;
1877 rd0 = (insn >> 0) & 0xf;
1878 rd1 = (insn >> 16) & 0xf;
1879 gen_op_iwmmxt_movq_M0_wRn(rd0);
1880 gen_op_iwmmxt_andq_M0_wRn(rd1);
1881 gen_op_iwmmxt_setpsr_nz();
1882 gen_op_iwmmxt_movq_wRn_M0(wrd);
1883 gen_op_iwmmxt_set_mup();
1884 gen_op_iwmmxt_set_cup();
1885 break;
1886 case 0x810: case 0xa10: /* WMADD */
1887 wrd = (insn >> 12) & 0xf;
1888 rd0 = (insn >> 0) & 0xf;
1889 rd1 = (insn >> 16) & 0xf;
1890 gen_op_iwmmxt_movq_M0_wRn(rd0);
1891 if (insn & (1 << 21))
1892 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1893 else
1894 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1895 gen_op_iwmmxt_movq_wRn_M0(wrd);
1896 gen_op_iwmmxt_set_mup();
1897 break;
1898 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1899 wrd = (insn >> 12) & 0xf;
1900 rd0 = (insn >> 16) & 0xf;
1901 rd1 = (insn >> 0) & 0xf;
1902 gen_op_iwmmxt_movq_M0_wRn(rd0);
1903 switch ((insn >> 22) & 3) {
1904 case 0:
1905 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1906 break;
1907 case 1:
1908 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1909 break;
1910 case 2:
1911 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1912 break;
1913 case 3:
1914 return 1;
1916 gen_op_iwmmxt_movq_wRn_M0(wrd);
1917 gen_op_iwmmxt_set_mup();
1918 gen_op_iwmmxt_set_cup();
1919 break;
1920 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1921 wrd = (insn >> 12) & 0xf;
1922 rd0 = (insn >> 16) & 0xf;
1923 rd1 = (insn >> 0) & 0xf;
1924 gen_op_iwmmxt_movq_M0_wRn(rd0);
1925 switch ((insn >> 22) & 3) {
1926 case 0:
1927 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1928 break;
1929 case 1:
1930 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1931 break;
1932 case 2:
1933 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1934 break;
1935 case 3:
1936 return 1;
1938 gen_op_iwmmxt_movq_wRn_M0(wrd);
1939 gen_op_iwmmxt_set_mup();
1940 gen_op_iwmmxt_set_cup();
1941 break;
1942 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1943 wrd = (insn >> 12) & 0xf;
1944 rd0 = (insn >> 16) & 0xf;
1945 rd1 = (insn >> 0) & 0xf;
1946 gen_op_iwmmxt_movq_M0_wRn(rd0);
1947 if (insn & (1 << 22))
1948 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1949 else
1950 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1951 if (!(insn & (1 << 20)))
1952 gen_op_iwmmxt_addl_M0_wRn(wrd);
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1955 break;
1956 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1957 wrd = (insn >> 12) & 0xf;
1958 rd0 = (insn >> 16) & 0xf;
1959 rd1 = (insn >> 0) & 0xf;
1960 gen_op_iwmmxt_movq_M0_wRn(rd0);
1961 if (insn & (1 << 21)) {
1962 if (insn & (1 << 20))
1963 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1964 else
1965 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1966 } else {
1967 if (insn & (1 << 20))
1968 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1969 else
1970 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1972 gen_op_iwmmxt_movq_wRn_M0(wrd);
1973 gen_op_iwmmxt_set_mup();
1974 break;
1975 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1976 wrd = (insn >> 12) & 0xf;
1977 rd0 = (insn >> 16) & 0xf;
1978 rd1 = (insn >> 0) & 0xf;
1979 gen_op_iwmmxt_movq_M0_wRn(rd0);
1980 if (insn & (1 << 21))
1981 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1982 else
1983 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1984 if (!(insn & (1 << 20))) {
1985 iwmmxt_load_reg(cpu_V1, wrd);
1986 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1988 gen_op_iwmmxt_movq_wRn_M0(wrd);
1989 gen_op_iwmmxt_set_mup();
1990 break;
1991 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1992 wrd = (insn >> 12) & 0xf;
1993 rd0 = (insn >> 16) & 0xf;
1994 rd1 = (insn >> 0) & 0xf;
1995 gen_op_iwmmxt_movq_M0_wRn(rd0);
1996 switch ((insn >> 22) & 3) {
1997 case 0:
1998 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1999 break;
2000 case 1:
2001 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2002 break;
2003 case 2:
2004 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2005 break;
2006 case 3:
2007 return 1;
2009 gen_op_iwmmxt_movq_wRn_M0(wrd);
2010 gen_op_iwmmxt_set_mup();
2011 gen_op_iwmmxt_set_cup();
2012 break;
2013 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2014 wrd = (insn >> 12) & 0xf;
2015 rd0 = (insn >> 16) & 0xf;
2016 rd1 = (insn >> 0) & 0xf;
2017 gen_op_iwmmxt_movq_M0_wRn(rd0);
2018 if (insn & (1 << 22)) {
2019 if (insn & (1 << 20))
2020 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2021 else
2022 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2023 } else {
2024 if (insn & (1 << 20))
2025 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2026 else
2027 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2029 gen_op_iwmmxt_movq_wRn_M0(wrd);
2030 gen_op_iwmmxt_set_mup();
2031 gen_op_iwmmxt_set_cup();
2032 break;
2033 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2034 wrd = (insn >> 12) & 0xf;
2035 rd0 = (insn >> 16) & 0xf;
2036 rd1 = (insn >> 0) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
2038 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2039 tcg_gen_andi_i32(tmp, tmp, 7);
2040 iwmmxt_load_reg(cpu_V1, rd1);
2041 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2042 tcg_temp_free_i32(tmp);
2043 gen_op_iwmmxt_movq_wRn_M0(wrd);
2044 gen_op_iwmmxt_set_mup();
2045 break;
2046 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2047 if (((insn >> 6) & 3) == 3)
2048 return 1;
2049 rd = (insn >> 12) & 0xf;
2050 wrd = (insn >> 16) & 0xf;
2051 tmp = load_reg(s, rd);
2052 gen_op_iwmmxt_movq_M0_wRn(wrd);
2053 switch ((insn >> 6) & 3) {
2054 case 0:
2055 tmp2 = tcg_const_i32(0xff);
2056 tmp3 = tcg_const_i32((insn & 7) << 3);
2057 break;
2058 case 1:
2059 tmp2 = tcg_const_i32(0xffff);
2060 tmp3 = tcg_const_i32((insn & 3) << 4);
2061 break;
2062 case 2:
2063 tmp2 = tcg_const_i32(0xffffffff);
2064 tmp3 = tcg_const_i32((insn & 1) << 5);
2065 break;
2066 default:
2067 TCGV_UNUSED_I32(tmp2);
2068 TCGV_UNUSED_I32(tmp3);
2070 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2071 tcg_temp_free_i32(tmp3);
2072 tcg_temp_free_i32(tmp2);
2073 tcg_temp_free_i32(tmp);
2074 gen_op_iwmmxt_movq_wRn_M0(wrd);
2075 gen_op_iwmmxt_set_mup();
2076 break;
2077 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2078 rd = (insn >> 12) & 0xf;
2079 wrd = (insn >> 16) & 0xf;
2080 if (rd == 15 || ((insn >> 22) & 3) == 3)
2081 return 1;
2082 gen_op_iwmmxt_movq_M0_wRn(wrd);
2083 tmp = tcg_temp_new_i32();
2084 switch ((insn >> 22) & 3) {
2085 case 0:
2086 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2087 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2088 if (insn & 8) {
2089 tcg_gen_ext8s_i32(tmp, tmp);
2090 } else {
2091 tcg_gen_andi_i32(tmp, tmp, 0xff);
2093 break;
2094 case 1:
2095 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2096 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2097 if (insn & 8) {
2098 tcg_gen_ext16s_i32(tmp, tmp);
2099 } else {
2100 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2102 break;
2103 case 2:
2104 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2105 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2106 break;
2108 store_reg(s, rd, tmp);
2109 break;
2110 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2111 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2112 return 1;
2113 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2114 switch ((insn >> 22) & 3) {
2115 case 0:
2116 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2117 break;
2118 case 1:
2119 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2120 break;
2121 case 2:
2122 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2123 break;
2125 tcg_gen_shli_i32(tmp, tmp, 28);
2126 gen_set_nzcv(tmp);
2127 tcg_temp_free_i32(tmp);
2128 break;
2129 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2130 if (((insn >> 6) & 3) == 3)
2131 return 1;
2132 rd = (insn >> 12) & 0xf;
2133 wrd = (insn >> 16) & 0xf;
2134 tmp = load_reg(s, rd);
2135 switch ((insn >> 6) & 3) {
2136 case 0:
2137 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2138 break;
2139 case 1:
2140 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2141 break;
2142 case 2:
2143 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2144 break;
2146 tcg_temp_free_i32(tmp);
2147 gen_op_iwmmxt_movq_wRn_M0(wrd);
2148 gen_op_iwmmxt_set_mup();
2149 break;
2150 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2151 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2152 return 1;
2153 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2154 tmp2 = tcg_temp_new_i32();
2155 tcg_gen_mov_i32(tmp2, tmp);
2156 switch ((insn >> 22) & 3) {
2157 case 0:
2158 for (i = 0; i < 7; i ++) {
2159 tcg_gen_shli_i32(tmp2, tmp2, 4);
2160 tcg_gen_and_i32(tmp, tmp, tmp2);
2162 break;
2163 case 1:
2164 for (i = 0; i < 3; i ++) {
2165 tcg_gen_shli_i32(tmp2, tmp2, 8);
2166 tcg_gen_and_i32(tmp, tmp, tmp2);
2168 break;
2169 case 2:
2170 tcg_gen_shli_i32(tmp2, tmp2, 16);
2171 tcg_gen_and_i32(tmp, tmp, tmp2);
2172 break;
2174 gen_set_nzcv(tmp);
2175 tcg_temp_free_i32(tmp2);
2176 tcg_temp_free_i32(tmp);
2177 break;
2178 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2179 wrd = (insn >> 12) & 0xf;
2180 rd0 = (insn >> 16) & 0xf;
2181 gen_op_iwmmxt_movq_M0_wRn(rd0);
2182 switch ((insn >> 22) & 3) {
2183 case 0:
2184 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2185 break;
2186 case 1:
2187 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2188 break;
2189 case 2:
2190 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2191 break;
2192 case 3:
2193 return 1;
2195 gen_op_iwmmxt_movq_wRn_M0(wrd);
2196 gen_op_iwmmxt_set_mup();
2197 break;
2198 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2199 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2200 return 1;
2201 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2202 tmp2 = tcg_temp_new_i32();
2203 tcg_gen_mov_i32(tmp2, tmp);
2204 switch ((insn >> 22) & 3) {
2205 case 0:
2206 for (i = 0; i < 7; i ++) {
2207 tcg_gen_shli_i32(tmp2, tmp2, 4);
2208 tcg_gen_or_i32(tmp, tmp, tmp2);
2210 break;
2211 case 1:
2212 for (i = 0; i < 3; i ++) {
2213 tcg_gen_shli_i32(tmp2, tmp2, 8);
2214 tcg_gen_or_i32(tmp, tmp, tmp2);
2216 break;
2217 case 2:
2218 tcg_gen_shli_i32(tmp2, tmp2, 16);
2219 tcg_gen_or_i32(tmp, tmp, tmp2);
2220 break;
2222 gen_set_nzcv(tmp);
2223 tcg_temp_free_i32(tmp2);
2224 tcg_temp_free_i32(tmp);
2225 break;
2226 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2227 rd = (insn >> 12) & 0xf;
2228 rd0 = (insn >> 16) & 0xf;
2229 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2230 return 1;
2231 gen_op_iwmmxt_movq_M0_wRn(rd0);
2232 tmp = tcg_temp_new_i32();
2233 switch ((insn >> 22) & 3) {
2234 case 0:
2235 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2236 break;
2237 case 1:
2238 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2239 break;
2240 case 2:
2241 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2242 break;
2244 store_reg(s, rd, tmp);
2245 break;
2246 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2247 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2248 wrd = (insn >> 12) & 0xf;
2249 rd0 = (insn >> 16) & 0xf;
2250 rd1 = (insn >> 0) & 0xf;
2251 gen_op_iwmmxt_movq_M0_wRn(rd0);
2252 switch ((insn >> 22) & 3) {
2253 case 0:
2254 if (insn & (1 << 21))
2255 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2256 else
2257 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2258 break;
2259 case 1:
2260 if (insn & (1 << 21))
2261 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2262 else
2263 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2264 break;
2265 case 2:
2266 if (insn & (1 << 21))
2267 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2268 else
2269 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2270 break;
2271 case 3:
2272 return 1;
2274 gen_op_iwmmxt_movq_wRn_M0(wrd);
2275 gen_op_iwmmxt_set_mup();
2276 gen_op_iwmmxt_set_cup();
2277 break;
2278 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2279 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2280 wrd = (insn >> 12) & 0xf;
2281 rd0 = (insn >> 16) & 0xf;
2282 gen_op_iwmmxt_movq_M0_wRn(rd0);
2283 switch ((insn >> 22) & 3) {
2284 case 0:
2285 if (insn & (1 << 21))
2286 gen_op_iwmmxt_unpacklsb_M0();
2287 else
2288 gen_op_iwmmxt_unpacklub_M0();
2289 break;
2290 case 1:
2291 if (insn & (1 << 21))
2292 gen_op_iwmmxt_unpacklsw_M0();
2293 else
2294 gen_op_iwmmxt_unpackluw_M0();
2295 break;
2296 case 2:
2297 if (insn & (1 << 21))
2298 gen_op_iwmmxt_unpacklsl_M0();
2299 else
2300 gen_op_iwmmxt_unpacklul_M0();
2301 break;
2302 case 3:
2303 return 1;
2305 gen_op_iwmmxt_movq_wRn_M0(wrd);
2306 gen_op_iwmmxt_set_mup();
2307 gen_op_iwmmxt_set_cup();
2308 break;
2309 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2310 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2311 wrd = (insn >> 12) & 0xf;
2312 rd0 = (insn >> 16) & 0xf;
2313 gen_op_iwmmxt_movq_M0_wRn(rd0);
2314 switch ((insn >> 22) & 3) {
2315 case 0:
2316 if (insn & (1 << 21))
2317 gen_op_iwmmxt_unpackhsb_M0();
2318 else
2319 gen_op_iwmmxt_unpackhub_M0();
2320 break;
2321 case 1:
2322 if (insn & (1 << 21))
2323 gen_op_iwmmxt_unpackhsw_M0();
2324 else
2325 gen_op_iwmmxt_unpackhuw_M0();
2326 break;
2327 case 2:
2328 if (insn & (1 << 21))
2329 gen_op_iwmmxt_unpackhsl_M0();
2330 else
2331 gen_op_iwmmxt_unpackhul_M0();
2332 break;
2333 case 3:
2334 return 1;
2336 gen_op_iwmmxt_movq_wRn_M0(wrd);
2337 gen_op_iwmmxt_set_mup();
2338 gen_op_iwmmxt_set_cup();
2339 break;
2340 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2341 case 0x214: case 0x614: case 0xa14: case 0xe14:
2342 if (((insn >> 22) & 3) == 0)
2343 return 1;
2344 wrd = (insn >> 12) & 0xf;
2345 rd0 = (insn >> 16) & 0xf;
2346 gen_op_iwmmxt_movq_M0_wRn(rd0);
2347 tmp = tcg_temp_new_i32();
2348 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2349 tcg_temp_free_i32(tmp);
2350 return 1;
2352 switch ((insn >> 22) & 3) {
2353 case 1:
2354 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2355 break;
2356 case 2:
2357 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2358 break;
2359 case 3:
2360 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2361 break;
2363 tcg_temp_free_i32(tmp);
2364 gen_op_iwmmxt_movq_wRn_M0(wrd);
2365 gen_op_iwmmxt_set_mup();
2366 gen_op_iwmmxt_set_cup();
2367 break;
2368 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2369 case 0x014: case 0x414: case 0x814: case 0xc14:
2370 if (((insn >> 22) & 3) == 0)
2371 return 1;
2372 wrd = (insn >> 12) & 0xf;
2373 rd0 = (insn >> 16) & 0xf;
2374 gen_op_iwmmxt_movq_M0_wRn(rd0);
2375 tmp = tcg_temp_new_i32();
2376 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2377 tcg_temp_free_i32(tmp);
2378 return 1;
2380 switch ((insn >> 22) & 3) {
2381 case 1:
2382 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2383 break;
2384 case 2:
2385 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2386 break;
2387 case 3:
2388 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2389 break;
2391 tcg_temp_free_i32(tmp);
2392 gen_op_iwmmxt_movq_wRn_M0(wrd);
2393 gen_op_iwmmxt_set_mup();
2394 gen_op_iwmmxt_set_cup();
2395 break;
2396 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2397 case 0x114: case 0x514: case 0x914: case 0xd14:
2398 if (((insn >> 22) & 3) == 0)
2399 return 1;
2400 wrd = (insn >> 12) & 0xf;
2401 rd0 = (insn >> 16) & 0xf;
2402 gen_op_iwmmxt_movq_M0_wRn(rd0);
2403 tmp = tcg_temp_new_i32();
2404 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2405 tcg_temp_free_i32(tmp);
2406 return 1;
2408 switch ((insn >> 22) & 3) {
2409 case 1:
2410 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2411 break;
2412 case 2:
2413 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2414 break;
2415 case 3:
2416 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2417 break;
2419 tcg_temp_free_i32(tmp);
2420 gen_op_iwmmxt_movq_wRn_M0(wrd);
2421 gen_op_iwmmxt_set_mup();
2422 gen_op_iwmmxt_set_cup();
2423 break;
2424 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2425 case 0x314: case 0x714: case 0xb14: case 0xf14:
2426 if (((insn >> 22) & 3) == 0)
2427 return 1;
2428 wrd = (insn >> 12) & 0xf;
2429 rd0 = (insn >> 16) & 0xf;
2430 gen_op_iwmmxt_movq_M0_wRn(rd0);
2431 tmp = tcg_temp_new_i32();
2432 switch ((insn >> 22) & 3) {
2433 case 1:
2434 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2435 tcg_temp_free_i32(tmp);
2436 return 1;
2438 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2439 break;
2440 case 2:
2441 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2442 tcg_temp_free_i32(tmp);
2443 return 1;
2445 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2446 break;
2447 case 3:
2448 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2449 tcg_temp_free_i32(tmp);
2450 return 1;
2452 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2453 break;
2455 tcg_temp_free_i32(tmp);
2456 gen_op_iwmmxt_movq_wRn_M0(wrd);
2457 gen_op_iwmmxt_set_mup();
2458 gen_op_iwmmxt_set_cup();
2459 break;
2460 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2461 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2462 wrd = (insn >> 12) & 0xf;
2463 rd0 = (insn >> 16) & 0xf;
2464 rd1 = (insn >> 0) & 0xf;
2465 gen_op_iwmmxt_movq_M0_wRn(rd0);
2466 switch ((insn >> 22) & 3) {
2467 case 0:
2468 if (insn & (1 << 21))
2469 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2470 else
2471 gen_op_iwmmxt_minub_M0_wRn(rd1);
2472 break;
2473 case 1:
2474 if (insn & (1 << 21))
2475 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2476 else
2477 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2478 break;
2479 case 2:
2480 if (insn & (1 << 21))
2481 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2482 else
2483 gen_op_iwmmxt_minul_M0_wRn(rd1);
2484 break;
2485 case 3:
2486 return 1;
2488 gen_op_iwmmxt_movq_wRn_M0(wrd);
2489 gen_op_iwmmxt_set_mup();
2490 break;
2491 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2492 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2493 wrd = (insn >> 12) & 0xf;
2494 rd0 = (insn >> 16) & 0xf;
2495 rd1 = (insn >> 0) & 0xf;
2496 gen_op_iwmmxt_movq_M0_wRn(rd0);
2497 switch ((insn >> 22) & 3) {
2498 case 0:
2499 if (insn & (1 << 21))
2500 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2501 else
2502 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2503 break;
2504 case 1:
2505 if (insn & (1 << 21))
2506 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2507 else
2508 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2509 break;
2510 case 2:
2511 if (insn & (1 << 21))
2512 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2513 else
2514 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2515 break;
2516 case 3:
2517 return 1;
2519 gen_op_iwmmxt_movq_wRn_M0(wrd);
2520 gen_op_iwmmxt_set_mup();
2521 break;
2522 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2523 case 0x402: case 0x502: case 0x602: case 0x702:
2524 wrd = (insn >> 12) & 0xf;
2525 rd0 = (insn >> 16) & 0xf;
2526 rd1 = (insn >> 0) & 0xf;
2527 gen_op_iwmmxt_movq_M0_wRn(rd0);
2528 tmp = tcg_const_i32((insn >> 20) & 3);
2529 iwmmxt_load_reg(cpu_V1, rd1);
2530 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2531 tcg_temp_free_i32(tmp);
2532 gen_op_iwmmxt_movq_wRn_M0(wrd);
2533 gen_op_iwmmxt_set_mup();
2534 break;
2535 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2536 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2537 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2538 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2539 wrd = (insn >> 12) & 0xf;
2540 rd0 = (insn >> 16) & 0xf;
2541 rd1 = (insn >> 0) & 0xf;
2542 gen_op_iwmmxt_movq_M0_wRn(rd0);
2543 switch ((insn >> 20) & 0xf) {
2544 case 0x0:
2545 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2546 break;
2547 case 0x1:
2548 gen_op_iwmmxt_subub_M0_wRn(rd1);
2549 break;
2550 case 0x3:
2551 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2552 break;
2553 case 0x4:
2554 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2555 break;
2556 case 0x5:
2557 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2558 break;
2559 case 0x7:
2560 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2561 break;
2562 case 0x8:
2563 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2564 break;
2565 case 0x9:
2566 gen_op_iwmmxt_subul_M0_wRn(rd1);
2567 break;
2568 case 0xb:
2569 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2570 break;
2571 default:
2572 return 1;
2574 gen_op_iwmmxt_movq_wRn_M0(wrd);
2575 gen_op_iwmmxt_set_mup();
2576 gen_op_iwmmxt_set_cup();
2577 break;
2578 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2579 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2580 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2581 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2582 wrd = (insn >> 12) & 0xf;
2583 rd0 = (insn >> 16) & 0xf;
2584 gen_op_iwmmxt_movq_M0_wRn(rd0);
2585 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2586 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2587 tcg_temp_free_i32(tmp);
2588 gen_op_iwmmxt_movq_wRn_M0(wrd);
2589 gen_op_iwmmxt_set_mup();
2590 gen_op_iwmmxt_set_cup();
2591 break;
2592 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2593 case 0x418: case 0x518: case 0x618: case 0x718:
2594 case 0x818: case 0x918: case 0xa18: case 0xb18:
2595 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2596 wrd = (insn >> 12) & 0xf;
2597 rd0 = (insn >> 16) & 0xf;
2598 rd1 = (insn >> 0) & 0xf;
2599 gen_op_iwmmxt_movq_M0_wRn(rd0);
2600 switch ((insn >> 20) & 0xf) {
2601 case 0x0:
2602 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2603 break;
2604 case 0x1:
2605 gen_op_iwmmxt_addub_M0_wRn(rd1);
2606 break;
2607 case 0x3:
2608 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2609 break;
2610 case 0x4:
2611 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2612 break;
2613 case 0x5:
2614 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2615 break;
2616 case 0x7:
2617 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2618 break;
2619 case 0x8:
2620 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2621 break;
2622 case 0x9:
2623 gen_op_iwmmxt_addul_M0_wRn(rd1);
2624 break;
2625 case 0xb:
2626 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2627 break;
2628 default:
2629 return 1;
2631 gen_op_iwmmxt_movq_wRn_M0(wrd);
2632 gen_op_iwmmxt_set_mup();
2633 gen_op_iwmmxt_set_cup();
2634 break;
2635 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2636 case 0x408: case 0x508: case 0x608: case 0x708:
2637 case 0x808: case 0x908: case 0xa08: case 0xb08:
2638 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2639 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2640 return 1;
2641 wrd = (insn >> 12) & 0xf;
2642 rd0 = (insn >> 16) & 0xf;
2643 rd1 = (insn >> 0) & 0xf;
2644 gen_op_iwmmxt_movq_M0_wRn(rd0);
2645 switch ((insn >> 22) & 3) {
2646 case 1:
2647 if (insn & (1 << 21))
2648 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2649 else
2650 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2651 break;
2652 case 2:
2653 if (insn & (1 << 21))
2654 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2655 else
2656 gen_op_iwmmxt_packul_M0_wRn(rd1);
2657 break;
2658 case 3:
2659 if (insn & (1 << 21))
2660 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2661 else
2662 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2663 break;
2665 gen_op_iwmmxt_movq_wRn_M0(wrd);
2666 gen_op_iwmmxt_set_mup();
2667 gen_op_iwmmxt_set_cup();
2668 break;
2669 case 0x201: case 0x203: case 0x205: case 0x207:
2670 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2671 case 0x211: case 0x213: case 0x215: case 0x217:
2672 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2673 wrd = (insn >> 5) & 0xf;
2674 rd0 = (insn >> 12) & 0xf;
2675 rd1 = (insn >> 0) & 0xf;
2676 if (rd0 == 0xf || rd1 == 0xf)
2677 return 1;
2678 gen_op_iwmmxt_movq_M0_wRn(wrd);
2679 tmp = load_reg(s, rd0);
2680 tmp2 = load_reg(s, rd1);
2681 switch ((insn >> 16) & 0xf) {
2682 case 0x0: /* TMIA */
2683 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2684 break;
2685 case 0x8: /* TMIAPH */
2686 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2687 break;
2688 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2689 if (insn & (1 << 16))
2690 tcg_gen_shri_i32(tmp, tmp, 16);
2691 if (insn & (1 << 17))
2692 tcg_gen_shri_i32(tmp2, tmp2, 16);
2693 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2694 break;
2695 default:
2696 tcg_temp_free_i32(tmp2);
2697 tcg_temp_free_i32(tmp);
2698 return 1;
2700 tcg_temp_free_i32(tmp2);
2701 tcg_temp_free_i32(tmp);
2702 gen_op_iwmmxt_movq_wRn_M0(wrd);
2703 gen_op_iwmmxt_set_mup();
2704 break;
2705 default:
2706 return 1;
2709 return 0;
2712 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2713 (ie. an undefined instruction). */
2714 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2716 int acc, rd0, rd1, rdhi, rdlo;
2717 TCGv_i32 tmp, tmp2;
2719 if ((insn & 0x0ff00f10) == 0x0e200010) {
2720 /* Multiply with Internal Accumulate Format */
2721 rd0 = (insn >> 12) & 0xf;
2722 rd1 = insn & 0xf;
2723 acc = (insn >> 5) & 7;
2725 if (acc != 0)
2726 return 1;
2728 tmp = load_reg(s, rd0);
2729 tmp2 = load_reg(s, rd1);
2730 switch ((insn >> 16) & 0xf) {
2731 case 0x0: /* MIA */
2732 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2733 break;
2734 case 0x8: /* MIAPH */
2735 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2736 break;
2737 case 0xc: /* MIABB */
2738 case 0xd: /* MIABT */
2739 case 0xe: /* MIATB */
2740 case 0xf: /* MIATT */
2741 if (insn & (1 << 16))
2742 tcg_gen_shri_i32(tmp, tmp, 16);
2743 if (insn & (1 << 17))
2744 tcg_gen_shri_i32(tmp2, tmp2, 16);
2745 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2746 break;
2747 default:
2748 return 1;
2750 tcg_temp_free_i32(tmp2);
2751 tcg_temp_free_i32(tmp);
2753 gen_op_iwmmxt_movq_wRn_M0(acc);
2754 return 0;
2757 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2758 /* Internal Accumulator Access Format */
2759 rdhi = (insn >> 16) & 0xf;
2760 rdlo = (insn >> 12) & 0xf;
2761 acc = insn & 7;
2763 if (acc != 0)
2764 return 1;
2766 if (insn & ARM_CP_RW_BIT) { /* MRA */
2767 iwmmxt_load_reg(cpu_V0, acc);
2768 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2769 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2770 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2771 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2772 } else { /* MAR */
2773 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2774 iwmmxt_store_reg(cpu_V0, acc);
2776 return 0;
2779 return 1;
2782 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2783 #define VFP_SREG(insn, bigbit, smallbit) \
2784 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2785 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2786 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2787 reg = (((insn) >> (bigbit)) & 0x0f) \
2788 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2789 } else { \
2790 if (insn & (1 << (smallbit))) \
2791 return 1; \
2792 reg = ((insn) >> (bigbit)) & 0x0f; \
2793 }} while (0)
2795 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2796 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2797 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2798 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2799 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2800 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2802 /* Move between integer and VFP cores. */
2803 static TCGv_i32 gen_vfp_mrs(void)
2805 TCGv_i32 tmp = tcg_temp_new_i32();
2806 tcg_gen_mov_i32(tmp, cpu_F0s);
2807 return tmp;
2810 static void gen_vfp_msr(TCGv_i32 tmp)
2812 tcg_gen_mov_i32(cpu_F0s, tmp);
2813 tcg_temp_free_i32(tmp);
2816 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2818 TCGv_i32 tmp = tcg_temp_new_i32();
2819 if (shift)
2820 tcg_gen_shri_i32(var, var, shift);
2821 tcg_gen_ext8u_i32(var, var);
2822 tcg_gen_shli_i32(tmp, var, 8);
2823 tcg_gen_or_i32(var, var, tmp);
2824 tcg_gen_shli_i32(tmp, var, 16);
2825 tcg_gen_or_i32(var, var, tmp);
2826 tcg_temp_free_i32(tmp);
2829 static void gen_neon_dup_low16(TCGv_i32 var)
2831 TCGv_i32 tmp = tcg_temp_new_i32();
2832 tcg_gen_ext16u_i32(var, var);
2833 tcg_gen_shli_i32(tmp, var, 16);
2834 tcg_gen_or_i32(var, var, tmp);
2835 tcg_temp_free_i32(tmp);
2838 static void gen_neon_dup_high16(TCGv_i32 var)
2840 TCGv_i32 tmp = tcg_temp_new_i32();
2841 tcg_gen_andi_i32(var, var, 0xffff0000);
2842 tcg_gen_shri_i32(tmp, var, 16);
2843 tcg_gen_or_i32(var, var, tmp);
2844 tcg_temp_free_i32(tmp);
2847 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2849 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2850 TCGv_i32 tmp = tcg_temp_new_i32();
2851 switch (size) {
2852 case 0:
2853 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2854 gen_neon_dup_u8(tmp, 0);
2855 break;
2856 case 1:
2857 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2858 gen_neon_dup_low16(tmp);
2859 break;
2860 case 2:
2861 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2862 break;
2863 default: /* Avoid compiler warnings. */
2864 abort();
2866 return tmp;
2869 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2870 uint32_t dp)
2872 uint32_t cc = extract32(insn, 20, 2);
2874 if (dp) {
2875 TCGv_i64 frn, frm, dest;
2876 TCGv_i64 tmp, zero, zf, nf, vf;
2878 zero = tcg_const_i64(0);
2880 frn = tcg_temp_new_i64();
2881 frm = tcg_temp_new_i64();
2882 dest = tcg_temp_new_i64();
2884 zf = tcg_temp_new_i64();
2885 nf = tcg_temp_new_i64();
2886 vf = tcg_temp_new_i64();
2888 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2889 tcg_gen_ext_i32_i64(nf, cpu_NF);
2890 tcg_gen_ext_i32_i64(vf, cpu_VF);
2892 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2893 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2894 switch (cc) {
2895 case 0: /* eq: Z */
2896 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2897 frn, frm);
2898 break;
2899 case 1: /* vs: V */
2900 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2901 frn, frm);
2902 break;
2903 case 2: /* ge: N == V -> N ^ V == 0 */
2904 tmp = tcg_temp_new_i64();
2905 tcg_gen_xor_i64(tmp, vf, nf);
2906 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2907 frn, frm);
2908 tcg_temp_free_i64(tmp);
2909 break;
2910 case 3: /* gt: !Z && N == V */
2911 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2912 frn, frm);
2913 tmp = tcg_temp_new_i64();
2914 tcg_gen_xor_i64(tmp, vf, nf);
2915 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2916 dest, frm);
2917 tcg_temp_free_i64(tmp);
2918 break;
2920 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2921 tcg_temp_free_i64(frn);
2922 tcg_temp_free_i64(frm);
2923 tcg_temp_free_i64(dest);
2925 tcg_temp_free_i64(zf);
2926 tcg_temp_free_i64(nf);
2927 tcg_temp_free_i64(vf);
2929 tcg_temp_free_i64(zero);
2930 } else {
2931 TCGv_i32 frn, frm, dest;
2932 TCGv_i32 tmp, zero;
2934 zero = tcg_const_i32(0);
2936 frn = tcg_temp_new_i32();
2937 frm = tcg_temp_new_i32();
2938 dest = tcg_temp_new_i32();
2939 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2940 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2941 switch (cc) {
2942 case 0: /* eq: Z */
2943 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2944 frn, frm);
2945 break;
2946 case 1: /* vs: V */
2947 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2948 frn, frm);
2949 break;
2950 case 2: /* ge: N == V -> N ^ V == 0 */
2951 tmp = tcg_temp_new_i32();
2952 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2953 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2954 frn, frm);
2955 tcg_temp_free_i32(tmp);
2956 break;
2957 case 3: /* gt: !Z && N == V */
2958 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2959 frn, frm);
2960 tmp = tcg_temp_new_i32();
2961 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2962 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2963 dest, frm);
2964 tcg_temp_free_i32(tmp);
2965 break;
2967 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2968 tcg_temp_free_i32(frn);
2969 tcg_temp_free_i32(frm);
2970 tcg_temp_free_i32(dest);
2972 tcg_temp_free_i32(zero);
2975 return 0;
2978 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2979 uint32_t rm, uint32_t dp)
2981 uint32_t vmin = extract32(insn, 6, 1);
2982 TCGv_ptr fpst = get_fpstatus_ptr(0);
2984 if (dp) {
2985 TCGv_i64 frn, frm, dest;
2987 frn = tcg_temp_new_i64();
2988 frm = tcg_temp_new_i64();
2989 dest = tcg_temp_new_i64();
2991 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2992 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2993 if (vmin) {
2994 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2995 } else {
2996 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2998 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2999 tcg_temp_free_i64(frn);
3000 tcg_temp_free_i64(frm);
3001 tcg_temp_free_i64(dest);
3002 } else {
3003 TCGv_i32 frn, frm, dest;
3005 frn = tcg_temp_new_i32();
3006 frm = tcg_temp_new_i32();
3007 dest = tcg_temp_new_i32();
3009 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3010 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3011 if (vmin) {
3012 gen_helper_vfp_minnums(dest, frn, frm, fpst);
3013 } else {
3014 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
3016 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3017 tcg_temp_free_i32(frn);
3018 tcg_temp_free_i32(frm);
3019 tcg_temp_free_i32(dest);
3022 tcg_temp_free_ptr(fpst);
3023 return 0;
3026 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3027 int rounding)
3029 TCGv_ptr fpst = get_fpstatus_ptr(0);
3030 TCGv_i32 tcg_rmode;
3032 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3033 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3035 if (dp) {
3036 TCGv_i64 tcg_op;
3037 TCGv_i64 tcg_res;
3038 tcg_op = tcg_temp_new_i64();
3039 tcg_res = tcg_temp_new_i64();
3040 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3041 gen_helper_rintd(tcg_res, tcg_op, fpst);
3042 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3043 tcg_temp_free_i64(tcg_op);
3044 tcg_temp_free_i64(tcg_res);
3045 } else {
3046 TCGv_i32 tcg_op;
3047 TCGv_i32 tcg_res;
3048 tcg_op = tcg_temp_new_i32();
3049 tcg_res = tcg_temp_new_i32();
3050 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3051 gen_helper_rints(tcg_res, tcg_op, fpst);
3052 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3053 tcg_temp_free_i32(tcg_op);
3054 tcg_temp_free_i32(tcg_res);
3057 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3058 tcg_temp_free_i32(tcg_rmode);
3060 tcg_temp_free_ptr(fpst);
3061 return 0;
3064 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3065 int rounding)
3067 bool is_signed = extract32(insn, 7, 1);
3068 TCGv_ptr fpst = get_fpstatus_ptr(0);
3069 TCGv_i32 tcg_rmode, tcg_shift;
3071 tcg_shift = tcg_const_i32(0);
3073 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3074 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3076 if (dp) {
3077 TCGv_i64 tcg_double, tcg_res;
3078 TCGv_i32 tcg_tmp;
3079 /* Rd is encoded as a single precision register even when the source
3080 * is double precision.
3082 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3083 tcg_double = tcg_temp_new_i64();
3084 tcg_res = tcg_temp_new_i64();
3085 tcg_tmp = tcg_temp_new_i32();
3086 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3087 if (is_signed) {
3088 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3089 } else {
3090 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3092 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
3093 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3094 tcg_temp_free_i32(tcg_tmp);
3095 tcg_temp_free_i64(tcg_res);
3096 tcg_temp_free_i64(tcg_double);
3097 } else {
3098 TCGv_i32 tcg_single, tcg_res;
3099 tcg_single = tcg_temp_new_i32();
3100 tcg_res = tcg_temp_new_i32();
3101 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3102 if (is_signed) {
3103 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3104 } else {
3105 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3107 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3108 tcg_temp_free_i32(tcg_res);
3109 tcg_temp_free_i32(tcg_single);
3112 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3113 tcg_temp_free_i32(tcg_rmode);
3115 tcg_temp_free_i32(tcg_shift);
3117 tcg_temp_free_ptr(fpst);
3119 return 0;
3122 /* Table for converting the most common AArch32 encoding of
3123 * rounding mode to arm_fprounding order (which matches the
3124 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3126 static const uint8_t fp_decode_rm[] = {
3127 FPROUNDING_TIEAWAY,
3128 FPROUNDING_TIEEVEN,
3129 FPROUNDING_POSINF,
3130 FPROUNDING_NEGINF,
3133 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3135 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3137 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3138 return 1;
3141 if (dp) {
3142 VFP_DREG_D(rd, insn);
3143 VFP_DREG_N(rn, insn);
3144 VFP_DREG_M(rm, insn);
3145 } else {
3146 rd = VFP_SREG_D(insn);
3147 rn = VFP_SREG_N(insn);
3148 rm = VFP_SREG_M(insn);
3151 if ((insn & 0x0f800e50) == 0x0e000a00) {
3152 return handle_vsel(insn, rd, rn, rm, dp);
3153 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3154 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3155 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3156 /* VRINTA, VRINTN, VRINTP, VRINTM */
3157 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3158 return handle_vrint(insn, rd, rm, dp, rounding);
3159 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3160 /* VCVTA, VCVTN, VCVTP, VCVTM */
3161 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3162 return handle_vcvt(insn, rd, rm, dp, rounding);
3164 return 1;
3167 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3168 (ie. an undefined instruction). */
3169 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3171 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3172 int dp, veclen;
3173 TCGv_i32 addr;
3174 TCGv_i32 tmp;
3175 TCGv_i32 tmp2;
3177 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3178 return 1;
3181 /* FIXME: this access check should not take precedence over UNDEF
3182 * for invalid encodings; we will generate incorrect syndrome information
3183 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3185 if (s->fp_excp_el) {
3186 gen_exception_insn(s, 4, EXCP_UDEF,
3187 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
3188 return 0;
3191 if (!s->vfp_enabled) {
3192 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3193 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3194 return 1;
3195 rn = (insn >> 16) & 0xf;
3196 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3197 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3198 return 1;
3202 if (extract32(insn, 28, 4) == 0xf) {
3203 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3204 * only used in v8 and above.
3206 return disas_vfp_v8_insn(s, insn);
3209 dp = ((insn & 0xf00) == 0xb00);
3210 switch ((insn >> 24) & 0xf) {
3211 case 0xe:
3212 if (insn & (1 << 4)) {
3213 /* single register transfer */
3214 rd = (insn >> 12) & 0xf;
3215 if (dp) {
3216 int size;
3217 int pass;
3219 VFP_DREG_N(rn, insn);
3220 if (insn & 0xf)
3221 return 1;
3222 if (insn & 0x00c00060
3223 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3224 return 1;
3227 pass = (insn >> 21) & 1;
3228 if (insn & (1 << 22)) {
3229 size = 0;
3230 offset = ((insn >> 5) & 3) * 8;
3231 } else if (insn & (1 << 5)) {
3232 size = 1;
3233 offset = (insn & (1 << 6)) ? 16 : 0;
3234 } else {
3235 size = 2;
3236 offset = 0;
3238 if (insn & ARM_CP_RW_BIT) {
3239 /* vfp->arm */
3240 tmp = neon_load_reg(rn, pass);
3241 switch (size) {
3242 case 0:
3243 if (offset)
3244 tcg_gen_shri_i32(tmp, tmp, offset);
3245 if (insn & (1 << 23))
3246 gen_uxtb(tmp);
3247 else
3248 gen_sxtb(tmp);
3249 break;
3250 case 1:
3251 if (insn & (1 << 23)) {
3252 if (offset) {
3253 tcg_gen_shri_i32(tmp, tmp, 16);
3254 } else {
3255 gen_uxth(tmp);
3257 } else {
3258 if (offset) {
3259 tcg_gen_sari_i32(tmp, tmp, 16);
3260 } else {
3261 gen_sxth(tmp);
3264 break;
3265 case 2:
3266 break;
3268 store_reg(s, rd, tmp);
3269 } else {
3270 /* arm->vfp */
3271 tmp = load_reg(s, rd);
3272 if (insn & (1 << 23)) {
3273 /* VDUP */
3274 if (size == 0) {
3275 gen_neon_dup_u8(tmp, 0);
3276 } else if (size == 1) {
3277 gen_neon_dup_low16(tmp);
3279 for (n = 0; n <= pass * 2; n++) {
3280 tmp2 = tcg_temp_new_i32();
3281 tcg_gen_mov_i32(tmp2, tmp);
3282 neon_store_reg(rn, n, tmp2);
3284 neon_store_reg(rn, n, tmp);
3285 } else {
3286 /* VMOV */
3287 switch (size) {
3288 case 0:
3289 tmp2 = neon_load_reg(rn, pass);
3290 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3291 tcg_temp_free_i32(tmp2);
3292 break;
3293 case 1:
3294 tmp2 = neon_load_reg(rn, pass);
3295 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3296 tcg_temp_free_i32(tmp2);
3297 break;
3298 case 2:
3299 break;
3301 neon_store_reg(rn, pass, tmp);
3304 } else { /* !dp */
3305 if ((insn & 0x6f) != 0x00)
3306 return 1;
3307 rn = VFP_SREG_N(insn);
3308 if (insn & ARM_CP_RW_BIT) {
3309 /* vfp->arm */
3310 if (insn & (1 << 21)) {
3311 /* system register */
3312 rn >>= 1;
3314 switch (rn) {
3315 case ARM_VFP_FPSID:
3316 /* VFP2 allows access to FSID from userspace.
3317 VFP3 restricts all id registers to privileged
3318 accesses. */
3319 if (IS_USER(s)
3320 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3321 return 1;
3323 tmp = load_cpu_field(vfp.xregs[rn]);
3324 break;
3325 case ARM_VFP_FPEXC:
3326 if (IS_USER(s))
3327 return 1;
3328 tmp = load_cpu_field(vfp.xregs[rn]);
3329 break;
3330 case ARM_VFP_FPINST:
3331 case ARM_VFP_FPINST2:
3332 /* Not present in VFP3. */
3333 if (IS_USER(s)
3334 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3335 return 1;
3337 tmp = load_cpu_field(vfp.xregs[rn]);
3338 break;
3339 case ARM_VFP_FPSCR:
3340 if (rd == 15) {
3341 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3342 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3343 } else {
3344 tmp = tcg_temp_new_i32();
3345 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3347 break;
3348 case ARM_VFP_MVFR2:
3349 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3350 return 1;
3352 /* fall through */
3353 case ARM_VFP_MVFR0:
3354 case ARM_VFP_MVFR1:
3355 if (IS_USER(s)
3356 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3357 return 1;
3359 tmp = load_cpu_field(vfp.xregs[rn]);
3360 break;
3361 default:
3362 return 1;
3364 } else {
3365 gen_mov_F0_vreg(0, rn);
3366 tmp = gen_vfp_mrs();
3368 if (rd == 15) {
3369 /* Set the 4 flag bits in the CPSR. */
3370 gen_set_nzcv(tmp);
3371 tcg_temp_free_i32(tmp);
3372 } else {
3373 store_reg(s, rd, tmp);
3375 } else {
3376 /* arm->vfp */
3377 if (insn & (1 << 21)) {
3378 rn >>= 1;
3379 /* system register */
3380 switch (rn) {
3381 case ARM_VFP_FPSID:
3382 case ARM_VFP_MVFR0:
3383 case ARM_VFP_MVFR1:
3384 /* Writes are ignored. */
3385 break;
3386 case ARM_VFP_FPSCR:
3387 tmp = load_reg(s, rd);
3388 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3389 tcg_temp_free_i32(tmp);
3390 gen_lookup_tb(s);
3391 break;
3392 case ARM_VFP_FPEXC:
3393 if (IS_USER(s))
3394 return 1;
3395 /* TODO: VFP subarchitecture support.
3396 * For now, keep the EN bit only */
3397 tmp = load_reg(s, rd);
3398 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3399 store_cpu_field(tmp, vfp.xregs[rn]);
3400 gen_lookup_tb(s);
3401 break;
3402 case ARM_VFP_FPINST:
3403 case ARM_VFP_FPINST2:
3404 if (IS_USER(s)) {
3405 return 1;
3407 tmp = load_reg(s, rd);
3408 store_cpu_field(tmp, vfp.xregs[rn]);
3409 break;
3410 default:
3411 return 1;
3413 } else {
3414 tmp = load_reg(s, rd);
3415 gen_vfp_msr(tmp);
3416 gen_mov_vreg_F0(0, rn);
3420 } else {
3421 /* data processing */
3422 /* The opcode is in bits 23, 21, 20 and 6. */
3423 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3424 if (dp) {
3425 if (op == 15) {
3426 /* rn is opcode */
3427 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3428 } else {
3429 /* rn is register number */
3430 VFP_DREG_N(rn, insn);
3433 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3434 ((rn & 0x1e) == 0x6))) {
3435 /* Integer or single/half precision destination. */
3436 rd = VFP_SREG_D(insn);
3437 } else {
3438 VFP_DREG_D(rd, insn);
3440 if (op == 15 &&
3441 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3442 ((rn & 0x1e) == 0x4))) {
3443 /* VCVT from int or half precision is always from S reg
3444 * regardless of dp bit. VCVT with immediate frac_bits
3445 * has same format as SREG_M.
3447 rm = VFP_SREG_M(insn);
3448 } else {
3449 VFP_DREG_M(rm, insn);
3451 } else {
3452 rn = VFP_SREG_N(insn);
3453 if (op == 15 && rn == 15) {
3454 /* Double precision destination. */
3455 VFP_DREG_D(rd, insn);
3456 } else {
3457 rd = VFP_SREG_D(insn);
3459 /* NB that we implicitly rely on the encoding for the frac_bits
3460 * in VCVT of fixed to float being the same as that of an SREG_M
3462 rm = VFP_SREG_M(insn);
3465 veclen = s->vec_len;
3466 if (op == 15 && rn > 3)
3467 veclen = 0;
3469 /* Shut up compiler warnings. */
3470 delta_m = 0;
3471 delta_d = 0;
3472 bank_mask = 0;
3474 if (veclen > 0) {
3475 if (dp)
3476 bank_mask = 0xc;
3477 else
3478 bank_mask = 0x18;
3480 /* Figure out what type of vector operation this is. */
3481 if ((rd & bank_mask) == 0) {
3482 /* scalar */
3483 veclen = 0;
3484 } else {
3485 if (dp)
3486 delta_d = (s->vec_stride >> 1) + 1;
3487 else
3488 delta_d = s->vec_stride + 1;
3490 if ((rm & bank_mask) == 0) {
3491 /* mixed scalar/vector */
3492 delta_m = 0;
3493 } else {
3494 /* vector */
3495 delta_m = delta_d;
3500 /* Load the initial operands. */
3501 if (op == 15) {
3502 switch (rn) {
3503 case 16:
3504 case 17:
3505 /* Integer source */
3506 gen_mov_F0_vreg(0, rm);
3507 break;
3508 case 8:
3509 case 9:
3510 /* Compare */
3511 gen_mov_F0_vreg(dp, rd);
3512 gen_mov_F1_vreg(dp, rm);
3513 break;
3514 case 10:
3515 case 11:
3516 /* Compare with zero */
3517 gen_mov_F0_vreg(dp, rd);
3518 gen_vfp_F1_ld0(dp);
3519 break;
3520 case 20:
3521 case 21:
3522 case 22:
3523 case 23:
3524 case 28:
3525 case 29:
3526 case 30:
3527 case 31:
3528 /* Source and destination the same. */
3529 gen_mov_F0_vreg(dp, rd);
3530 break;
3531 case 4:
3532 case 5:
3533 case 6:
3534 case 7:
3535 /* VCVTB, VCVTT: only present with the halfprec extension
3536 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3537 * (we choose to UNDEF)
3539 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3540 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3541 return 1;
3543 if (!extract32(rn, 1, 1)) {
3544 /* Half precision source. */
3545 gen_mov_F0_vreg(0, rm);
3546 break;
3548 /* Otherwise fall through */
3549 default:
3550 /* One source operand. */
3551 gen_mov_F0_vreg(dp, rm);
3552 break;
3554 } else {
3555 /* Two source operands. */
3556 gen_mov_F0_vreg(dp, rn);
3557 gen_mov_F1_vreg(dp, rm);
3560 for (;;) {
3561 /* Perform the calculation. */
3562 switch (op) {
3563 case 0: /* VMLA: fd + (fn * fm) */
3564 /* Note that order of inputs to the add matters for NaNs */
3565 gen_vfp_F1_mul(dp);
3566 gen_mov_F0_vreg(dp, rd);
3567 gen_vfp_add(dp);
3568 break;
3569 case 1: /* VMLS: fd + -(fn * fm) */
3570 gen_vfp_mul(dp);
3571 gen_vfp_F1_neg(dp);
3572 gen_mov_F0_vreg(dp, rd);
3573 gen_vfp_add(dp);
3574 break;
3575 case 2: /* VNMLS: -fd + (fn * fm) */
3576 /* Note that it isn't valid to replace (-A + B) with (B - A)
3577 * or similar plausible looking simplifications
3578 * because this will give wrong results for NaNs.
3580 gen_vfp_F1_mul(dp);
3581 gen_mov_F0_vreg(dp, rd);
3582 gen_vfp_neg(dp);
3583 gen_vfp_add(dp);
3584 break;
3585 case 3: /* VNMLA: -fd + -(fn * fm) */
3586 gen_vfp_mul(dp);
3587 gen_vfp_F1_neg(dp);
3588 gen_mov_F0_vreg(dp, rd);
3589 gen_vfp_neg(dp);
3590 gen_vfp_add(dp);
3591 break;
3592 case 4: /* mul: fn * fm */
3593 gen_vfp_mul(dp);
3594 break;
3595 case 5: /* nmul: -(fn * fm) */
3596 gen_vfp_mul(dp);
3597 gen_vfp_neg(dp);
3598 break;
3599 case 6: /* add: fn + fm */
3600 gen_vfp_add(dp);
3601 break;
3602 case 7: /* sub: fn - fm */
3603 gen_vfp_sub(dp);
3604 break;
3605 case 8: /* div: fn / fm */
3606 gen_vfp_div(dp);
3607 break;
3608 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3609 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3610 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3611 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3612 /* These are fused multiply-add, and must be done as one
3613 * floating point operation with no rounding between the
3614 * multiplication and addition steps.
3615 * NB that doing the negations here as separate steps is
3616 * correct : an input NaN should come out with its sign bit
3617 * flipped if it is a negated-input.
3619 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3620 return 1;
3622 if (dp) {
3623 TCGv_ptr fpst;
3624 TCGv_i64 frd;
3625 if (op & 1) {
3626 /* VFNMS, VFMS */
3627 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3629 frd = tcg_temp_new_i64();
3630 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3631 if (op & 2) {
3632 /* VFNMA, VFNMS */
3633 gen_helper_vfp_negd(frd, frd);
3635 fpst = get_fpstatus_ptr(0);
3636 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3637 cpu_F1d, frd, fpst);
3638 tcg_temp_free_ptr(fpst);
3639 tcg_temp_free_i64(frd);
3640 } else {
3641 TCGv_ptr fpst;
3642 TCGv_i32 frd;
3643 if (op & 1) {
3644 /* VFNMS, VFMS */
3645 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3647 frd = tcg_temp_new_i32();
3648 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3649 if (op & 2) {
3650 gen_helper_vfp_negs(frd, frd);
3652 fpst = get_fpstatus_ptr(0);
3653 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3654 cpu_F1s, frd, fpst);
3655 tcg_temp_free_ptr(fpst);
3656 tcg_temp_free_i32(frd);
3658 break;
3659 case 14: /* fconst */
3660 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3661 return 1;
3664 n = (insn << 12) & 0x80000000;
3665 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3666 if (dp) {
3667 if (i & 0x40)
3668 i |= 0x3f80;
3669 else
3670 i |= 0x4000;
3671 n |= i << 16;
3672 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3673 } else {
3674 if (i & 0x40)
3675 i |= 0x780;
3676 else
3677 i |= 0x800;
3678 n |= i << 19;
3679 tcg_gen_movi_i32(cpu_F0s, n);
3681 break;
3682 case 15: /* extension space */
3683 switch (rn) {
3684 case 0: /* cpy */
3685 /* no-op */
3686 break;
3687 case 1: /* abs */
3688 gen_vfp_abs(dp);
3689 break;
3690 case 2: /* neg */
3691 gen_vfp_neg(dp);
3692 break;
3693 case 3: /* sqrt */
3694 gen_vfp_sqrt(dp);
3695 break;
3696 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3697 tmp = gen_vfp_mrs();
3698 tcg_gen_ext16u_i32(tmp, tmp);
3699 if (dp) {
3700 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3701 cpu_env);
3702 } else {
3703 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3704 cpu_env);
3706 tcg_temp_free_i32(tmp);
3707 break;
3708 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3709 tmp = gen_vfp_mrs();
3710 tcg_gen_shri_i32(tmp, tmp, 16);
3711 if (dp) {
3712 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3713 cpu_env);
3714 } else {
3715 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3716 cpu_env);
3718 tcg_temp_free_i32(tmp);
3719 break;
3720 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3721 tmp = tcg_temp_new_i32();
3722 if (dp) {
3723 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3724 cpu_env);
3725 } else {
3726 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3727 cpu_env);
3729 gen_mov_F0_vreg(0, rd);
3730 tmp2 = gen_vfp_mrs();
3731 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3732 tcg_gen_or_i32(tmp, tmp, tmp2);
3733 tcg_temp_free_i32(tmp2);
3734 gen_vfp_msr(tmp);
3735 break;
3736 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3737 tmp = tcg_temp_new_i32();
3738 if (dp) {
3739 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3740 cpu_env);
3741 } else {
3742 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3743 cpu_env);
3745 tcg_gen_shli_i32(tmp, tmp, 16);
3746 gen_mov_F0_vreg(0, rd);
3747 tmp2 = gen_vfp_mrs();
3748 tcg_gen_ext16u_i32(tmp2, tmp2);
3749 tcg_gen_or_i32(tmp, tmp, tmp2);
3750 tcg_temp_free_i32(tmp2);
3751 gen_vfp_msr(tmp);
3752 break;
3753 case 8: /* cmp */
3754 gen_vfp_cmp(dp);
3755 break;
3756 case 9: /* cmpe */
3757 gen_vfp_cmpe(dp);
3758 break;
3759 case 10: /* cmpz */
3760 gen_vfp_cmp(dp);
3761 break;
3762 case 11: /* cmpez */
3763 gen_vfp_F1_ld0(dp);
3764 gen_vfp_cmpe(dp);
3765 break;
3766 case 12: /* vrintr */
3768 TCGv_ptr fpst = get_fpstatus_ptr(0);
3769 if (dp) {
3770 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3771 } else {
3772 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3774 tcg_temp_free_ptr(fpst);
3775 break;
3777 case 13: /* vrintz */
3779 TCGv_ptr fpst = get_fpstatus_ptr(0);
3780 TCGv_i32 tcg_rmode;
3781 tcg_rmode = tcg_const_i32(float_round_to_zero);
3782 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3783 if (dp) {
3784 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3785 } else {
3786 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3788 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3789 tcg_temp_free_i32(tcg_rmode);
3790 tcg_temp_free_ptr(fpst);
3791 break;
3793 case 14: /* vrintx */
3795 TCGv_ptr fpst = get_fpstatus_ptr(0);
3796 if (dp) {
3797 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3798 } else {
3799 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3801 tcg_temp_free_ptr(fpst);
3802 break;
3804 case 15: /* single<->double conversion */
3805 if (dp)
3806 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3807 else
3808 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3809 break;
3810 case 16: /* fuito */
3811 gen_vfp_uito(dp, 0);
3812 break;
3813 case 17: /* fsito */
3814 gen_vfp_sito(dp, 0);
3815 break;
3816 case 20: /* fshto */
3817 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3818 return 1;
3820 gen_vfp_shto(dp, 16 - rm, 0);
3821 break;
3822 case 21: /* fslto */
3823 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3824 return 1;
3826 gen_vfp_slto(dp, 32 - rm, 0);
3827 break;
3828 case 22: /* fuhto */
3829 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3830 return 1;
3832 gen_vfp_uhto(dp, 16 - rm, 0);
3833 break;
3834 case 23: /* fulto */
3835 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3836 return 1;
3838 gen_vfp_ulto(dp, 32 - rm, 0);
3839 break;
3840 case 24: /* ftoui */
3841 gen_vfp_toui(dp, 0);
3842 break;
3843 case 25: /* ftouiz */
3844 gen_vfp_touiz(dp, 0);
3845 break;
3846 case 26: /* ftosi */
3847 gen_vfp_tosi(dp, 0);
3848 break;
3849 case 27: /* ftosiz */
3850 gen_vfp_tosiz(dp, 0);
3851 break;
3852 case 28: /* ftosh */
3853 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3854 return 1;
3856 gen_vfp_tosh(dp, 16 - rm, 0);
3857 break;
3858 case 29: /* ftosl */
3859 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3860 return 1;
3862 gen_vfp_tosl(dp, 32 - rm, 0);
3863 break;
3864 case 30: /* ftouh */
3865 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3866 return 1;
3868 gen_vfp_touh(dp, 16 - rm, 0);
3869 break;
3870 case 31: /* ftoul */
3871 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3872 return 1;
3874 gen_vfp_toul(dp, 32 - rm, 0);
3875 break;
3876 default: /* undefined */
3877 return 1;
3879 break;
3880 default: /* undefined */
3881 return 1;
3884 /* Write back the result. */
3885 if (op == 15 && (rn >= 8 && rn <= 11)) {
3886 /* Comparison, do nothing. */
3887 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3888 (rn & 0x1e) == 0x6)) {
3889 /* VCVT double to int: always integer result.
3890 * VCVT double to half precision is always a single
3891 * precision result.
3893 gen_mov_vreg_F0(0, rd);
3894 } else if (op == 15 && rn == 15) {
3895 /* conversion */
3896 gen_mov_vreg_F0(!dp, rd);
3897 } else {
3898 gen_mov_vreg_F0(dp, rd);
3901 /* break out of the loop if we have finished */
3902 if (veclen == 0)
3903 break;
3905 if (op == 15 && delta_m == 0) {
3906 /* single source one-many */
3907 while (veclen--) {
3908 rd = ((rd + delta_d) & (bank_mask - 1))
3909 | (rd & bank_mask);
3910 gen_mov_vreg_F0(dp, rd);
3912 break;
3914 /* Setup the next operands. */
3915 veclen--;
3916 rd = ((rd + delta_d) & (bank_mask - 1))
3917 | (rd & bank_mask);
3919 if (op == 15) {
3920 /* One source operand. */
3921 rm = ((rm + delta_m) & (bank_mask - 1))
3922 | (rm & bank_mask);
3923 gen_mov_F0_vreg(dp, rm);
3924 } else {
3925 /* Two source operands. */
3926 rn = ((rn + delta_d) & (bank_mask - 1))
3927 | (rn & bank_mask);
3928 gen_mov_F0_vreg(dp, rn);
3929 if (delta_m) {
3930 rm = ((rm + delta_m) & (bank_mask - 1))
3931 | (rm & bank_mask);
3932 gen_mov_F1_vreg(dp, rm);
3937 break;
3938 case 0xc:
3939 case 0xd:
3940 if ((insn & 0x03e00000) == 0x00400000) {
3941 /* two-register transfer */
3942 rn = (insn >> 16) & 0xf;
3943 rd = (insn >> 12) & 0xf;
3944 if (dp) {
3945 VFP_DREG_M(rm, insn);
3946 } else {
3947 rm = VFP_SREG_M(insn);
3950 if (insn & ARM_CP_RW_BIT) {
3951 /* vfp->arm */
3952 if (dp) {
3953 gen_mov_F0_vreg(0, rm * 2);
3954 tmp = gen_vfp_mrs();
3955 store_reg(s, rd, tmp);
3956 gen_mov_F0_vreg(0, rm * 2 + 1);
3957 tmp = gen_vfp_mrs();
3958 store_reg(s, rn, tmp);
3959 } else {
3960 gen_mov_F0_vreg(0, rm);
3961 tmp = gen_vfp_mrs();
3962 store_reg(s, rd, tmp);
3963 gen_mov_F0_vreg(0, rm + 1);
3964 tmp = gen_vfp_mrs();
3965 store_reg(s, rn, tmp);
3967 } else {
3968 /* arm->vfp */
3969 if (dp) {
3970 tmp = load_reg(s, rd);
3971 gen_vfp_msr(tmp);
3972 gen_mov_vreg_F0(0, rm * 2);
3973 tmp = load_reg(s, rn);
3974 gen_vfp_msr(tmp);
3975 gen_mov_vreg_F0(0, rm * 2 + 1);
3976 } else {
3977 tmp = load_reg(s, rd);
3978 gen_vfp_msr(tmp);
3979 gen_mov_vreg_F0(0, rm);
3980 tmp = load_reg(s, rn);
3981 gen_vfp_msr(tmp);
3982 gen_mov_vreg_F0(0, rm + 1);
3985 } else {
3986 /* Load/store */
3987 rn = (insn >> 16) & 0xf;
3988 if (dp)
3989 VFP_DREG_D(rd, insn);
3990 else
3991 rd = VFP_SREG_D(insn);
3992 if ((insn & 0x01200000) == 0x01000000) {
3993 /* Single load/store */
3994 offset = (insn & 0xff) << 2;
3995 if ((insn & (1 << 23)) == 0)
3996 offset = -offset;
3997 if (s->thumb && rn == 15) {
3998 /* This is actually UNPREDICTABLE */
3999 addr = tcg_temp_new_i32();
4000 tcg_gen_movi_i32(addr, s->pc & ~2);
4001 } else {
4002 addr = load_reg(s, rn);
4004 tcg_gen_addi_i32(addr, addr, offset);
4005 if (insn & (1 << 20)) {
4006 gen_vfp_ld(s, dp, addr);
4007 gen_mov_vreg_F0(dp, rd);
4008 } else {
4009 gen_mov_F0_vreg(dp, rd);
4010 gen_vfp_st(s, dp, addr);
4012 tcg_temp_free_i32(addr);
4013 } else {
4014 /* load/store multiple */
4015 int w = insn & (1 << 21);
4016 if (dp)
4017 n = (insn >> 1) & 0x7f;
4018 else
4019 n = insn & 0xff;
4021 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4022 /* P == U , W == 1 => UNDEF */
4023 return 1;
4025 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4026 /* UNPREDICTABLE cases for bad immediates: we choose to
4027 * UNDEF to avoid generating huge numbers of TCG ops
4029 return 1;
4031 if (rn == 15 && w) {
4032 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4033 return 1;
4036 if (s->thumb && rn == 15) {
4037 /* This is actually UNPREDICTABLE */
4038 addr = tcg_temp_new_i32();
4039 tcg_gen_movi_i32(addr, s->pc & ~2);
4040 } else {
4041 addr = load_reg(s, rn);
4043 if (insn & (1 << 24)) /* pre-decrement */
4044 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
4046 if (dp)
4047 offset = 8;
4048 else
4049 offset = 4;
4050 for (i = 0; i < n; i++) {
4051 if (insn & ARM_CP_RW_BIT) {
4052 /* load */
4053 gen_vfp_ld(s, dp, addr);
4054 gen_mov_vreg_F0(dp, rd + i);
4055 } else {
4056 /* store */
4057 gen_mov_F0_vreg(dp, rd + i);
4058 gen_vfp_st(s, dp, addr);
4060 tcg_gen_addi_i32(addr, addr, offset);
4062 if (w) {
4063 /* writeback */
4064 if (insn & (1 << 24))
4065 offset = -offset * n;
4066 else if (dp && (insn & 1))
4067 offset = 4;
4068 else
4069 offset = 0;
4071 if (offset != 0)
4072 tcg_gen_addi_i32(addr, addr, offset);
4073 store_reg(s, rn, addr);
4074 } else {
4075 tcg_temp_free_i32(addr);
4079 break;
4080 default:
4081 /* Should never happen. */
4082 return 1;
4084 return 0;
4087 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
4089 #ifndef CONFIG_USER_ONLY
4090 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4091 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4092 #else
4093 return true;
4094 #endif
4097 static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4099 if (use_goto_tb(s, dest)) {
4100 tcg_gen_goto_tb(n);
4101 gen_set_pc_im(s, dest);
4102 tcg_gen_exit_tb((uintptr_t)s->tb + n);
4103 } else {
4104 gen_set_pc_im(s, dest);
4105 tcg_gen_exit_tb(0);
4109 static inline void gen_jmp (DisasContext *s, uint32_t dest)
4111 if (unlikely(s->singlestep_enabled || s->ss_active)) {
4112 /* An indirect jump so that we still trigger the debug exception. */
4113 if (s->thumb)
4114 dest |= 1;
4115 gen_bx_im(s, dest);
4116 } else {
4117 gen_goto_tb(s, 0, dest);
4118 s->is_jmp = DISAS_TB_JUMP;
4122 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
4124 if (x)
4125 tcg_gen_sari_i32(t0, t0, 16);
4126 else
4127 gen_sxth(t0);
4128 if (y)
4129 tcg_gen_sari_i32(t1, t1, 16);
4130 else
4131 gen_sxth(t1);
4132 tcg_gen_mul_i32(t0, t0, t1);
4135 /* Return the mask of PSR bits set by a MSR instruction. */
4136 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4138 uint32_t mask;
4140 mask = 0;
4141 if (flags & (1 << 0))
4142 mask |= 0xff;
4143 if (flags & (1 << 1))
4144 mask |= 0xff00;
4145 if (flags & (1 << 2))
4146 mask |= 0xff0000;
4147 if (flags & (1 << 3))
4148 mask |= 0xff000000;
4150 /* Mask out undefined bits. */
4151 mask &= ~CPSR_RESERVED;
4152 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4153 mask &= ~CPSR_T;
4155 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4156 mask &= ~CPSR_Q; /* V5TE in reality*/
4158 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4159 mask &= ~(CPSR_E | CPSR_GE);
4161 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4162 mask &= ~CPSR_IT;
4164 /* Mask out execution state and reserved bits. */
4165 if (!spsr) {
4166 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4168 /* Mask out privileged bits. */
4169 if (IS_USER(s))
4170 mask &= CPSR_USER;
4171 return mask;
4174 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4175 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4177 TCGv_i32 tmp;
4178 if (spsr) {
4179 /* ??? This is also undefined in system mode. */
4180 if (IS_USER(s))
4181 return 1;
4183 tmp = load_cpu_field(spsr);
4184 tcg_gen_andi_i32(tmp, tmp, ~mask);
4185 tcg_gen_andi_i32(t0, t0, mask);
4186 tcg_gen_or_i32(tmp, tmp, t0);
4187 store_cpu_field(tmp, spsr);
4188 } else {
4189 gen_set_cpsr(t0, mask);
4191 tcg_temp_free_i32(t0);
4192 gen_lookup_tb(s);
4193 return 0;
4196 /* Returns nonzero if access to the PSR is not permitted. */
4197 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4199 TCGv_i32 tmp;
4200 tmp = tcg_temp_new_i32();
4201 tcg_gen_movi_i32(tmp, val);
4202 return gen_set_psr(s, mask, spsr, tmp);
4205 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4206 int *tgtmode, int *regno)
4208 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4209 * the target mode and register number, and identify the various
4210 * unpredictable cases.
4211 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4212 * + executed in user mode
4213 * + using R15 as the src/dest register
4214 * + accessing an unimplemented register
4215 * + accessing a register that's inaccessible at current PL/security state*
4216 * + accessing a register that you could access with a different insn
4217 * We choose to UNDEF in all these cases.
4218 * Since we don't know which of the various AArch32 modes we are in
4219 * we have to defer some checks to runtime.
4220 * Accesses to Monitor mode registers from Secure EL1 (which implies
4221 * that EL3 is AArch64) must trap to EL3.
4223 * If the access checks fail this function will emit code to take
4224 * an exception and return false. Otherwise it will return true,
4225 * and set *tgtmode and *regno appropriately.
4227 int exc_target = default_exception_el(s);
4229 /* These instructions are present only in ARMv8, or in ARMv7 with the
4230 * Virtualization Extensions.
4232 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4233 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4234 goto undef;
4237 if (IS_USER(s) || rn == 15) {
4238 goto undef;
4241 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4242 * of registers into (r, sysm).
4244 if (r) {
4245 /* SPSRs for other modes */
4246 switch (sysm) {
4247 case 0xe: /* SPSR_fiq */
4248 *tgtmode = ARM_CPU_MODE_FIQ;
4249 break;
4250 case 0x10: /* SPSR_irq */
4251 *tgtmode = ARM_CPU_MODE_IRQ;
4252 break;
4253 case 0x12: /* SPSR_svc */
4254 *tgtmode = ARM_CPU_MODE_SVC;
4255 break;
4256 case 0x14: /* SPSR_abt */
4257 *tgtmode = ARM_CPU_MODE_ABT;
4258 break;
4259 case 0x16: /* SPSR_und */
4260 *tgtmode = ARM_CPU_MODE_UND;
4261 break;
4262 case 0x1c: /* SPSR_mon */
4263 *tgtmode = ARM_CPU_MODE_MON;
4264 break;
4265 case 0x1e: /* SPSR_hyp */
4266 *tgtmode = ARM_CPU_MODE_HYP;
4267 break;
4268 default: /* unallocated */
4269 goto undef;
4271 /* We arbitrarily assign SPSR a register number of 16. */
4272 *regno = 16;
4273 } else {
4274 /* general purpose registers for other modes */
4275 switch (sysm) {
4276 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4277 *tgtmode = ARM_CPU_MODE_USR;
4278 *regno = sysm + 8;
4279 break;
4280 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4281 *tgtmode = ARM_CPU_MODE_FIQ;
4282 *regno = sysm;
4283 break;
4284 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4285 *tgtmode = ARM_CPU_MODE_IRQ;
4286 *regno = sysm & 1 ? 13 : 14;
4287 break;
4288 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4289 *tgtmode = ARM_CPU_MODE_SVC;
4290 *regno = sysm & 1 ? 13 : 14;
4291 break;
4292 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4293 *tgtmode = ARM_CPU_MODE_ABT;
4294 *regno = sysm & 1 ? 13 : 14;
4295 break;
4296 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4297 *tgtmode = ARM_CPU_MODE_UND;
4298 *regno = sysm & 1 ? 13 : 14;
4299 break;
4300 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4301 *tgtmode = ARM_CPU_MODE_MON;
4302 *regno = sysm & 1 ? 13 : 14;
4303 break;
4304 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4305 *tgtmode = ARM_CPU_MODE_HYP;
4306 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4307 *regno = sysm & 1 ? 13 : 17;
4308 break;
4309 default: /* unallocated */
4310 goto undef;
4314 /* Catch the 'accessing inaccessible register' cases we can detect
4315 * at translate time.
4317 switch (*tgtmode) {
4318 case ARM_CPU_MODE_MON:
4319 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4320 goto undef;
4322 if (s->current_el == 1) {
4323 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4324 * then accesses to Mon registers trap to EL3
4326 exc_target = 3;
4327 goto undef;
4329 break;
4330 case ARM_CPU_MODE_HYP:
4331 /* Note that we can forbid accesses from EL2 here because they
4332 * must be from Hyp mode itself
4334 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4335 goto undef;
4337 break;
4338 default:
4339 break;
4342 return true;
4344 undef:
4345 /* If we get here then some access check did not pass */
4346 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4347 return false;
4350 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4352 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4353 int tgtmode = 0, regno = 0;
4355 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4356 return;
4359 /* Sync state because msr_banked() can raise exceptions */
4360 gen_set_condexec(s);
4361 gen_set_pc_im(s, s->pc - 4);
4362 tcg_reg = load_reg(s, rn);
4363 tcg_tgtmode = tcg_const_i32(tgtmode);
4364 tcg_regno = tcg_const_i32(regno);
4365 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4366 tcg_temp_free_i32(tcg_tgtmode);
4367 tcg_temp_free_i32(tcg_regno);
4368 tcg_temp_free_i32(tcg_reg);
4369 s->is_jmp = DISAS_UPDATE;
4372 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4374 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4375 int tgtmode = 0, regno = 0;
4377 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4378 return;
4381 /* Sync state because mrs_banked() can raise exceptions */
4382 gen_set_condexec(s);
4383 gen_set_pc_im(s, s->pc - 4);
4384 tcg_reg = tcg_temp_new_i32();
4385 tcg_tgtmode = tcg_const_i32(tgtmode);
4386 tcg_regno = tcg_const_i32(regno);
4387 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4388 tcg_temp_free_i32(tcg_tgtmode);
4389 tcg_temp_free_i32(tcg_regno);
4390 store_reg(s, rn, tcg_reg);
4391 s->is_jmp = DISAS_UPDATE;
4394 /* Store value to PC as for an exception return (ie don't
4395 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4396 * will do the masking based on the new value of the Thumb bit.
4398 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
4400 tcg_gen_mov_i32(cpu_R[15], pc);
4401 tcg_temp_free_i32(pc);
4404 /* Generate a v6 exception return. Marks both values as dead. */
4405 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4407 store_pc_exc_ret(s, pc);
4408 /* The cpsr_write_eret helper will mask the low bits of PC
4409 * appropriately depending on the new Thumb bit, so it must
4410 * be called after storing the new PC.
4412 gen_helper_cpsr_write_eret(cpu_env, cpsr);
4413 tcg_temp_free_i32(cpsr);
4414 s->is_jmp = DISAS_JUMP;
4417 /* Generate an old-style exception return. Marks pc as dead. */
4418 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4420 gen_rfe(s, pc, load_cpu_field(spsr));
4423 static void gen_nop_hint(DisasContext *s, int val)
4425 switch (val) {
4426 case 1: /* yield */
4427 gen_set_pc_im(s, s->pc);
4428 s->is_jmp = DISAS_YIELD;
4429 break;
4430 case 3: /* wfi */
4431 gen_set_pc_im(s, s->pc);
4432 s->is_jmp = DISAS_WFI;
4433 break;
4434 case 2: /* wfe */
4435 gen_set_pc_im(s, s->pc);
4436 s->is_jmp = DISAS_WFE;
4437 break;
4438 case 4: /* sev */
4439 case 5: /* sevl */
4440 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4441 default: /* nop */
4442 break;
4446 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4448 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4450 switch (size) {
4451 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4452 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4453 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4454 default: abort();
4458 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4460 switch (size) {
4461 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4462 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4463 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4464 default: return;
4468 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4469 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4470 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4471 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4472 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4474 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4475 switch ((size << 1) | u) { \
4476 case 0: \
4477 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4478 break; \
4479 case 1: \
4480 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4481 break; \
4482 case 2: \
4483 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4484 break; \
4485 case 3: \
4486 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4487 break; \
4488 case 4: \
4489 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4490 break; \
4491 case 5: \
4492 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4493 break; \
4494 default: return 1; \
4495 }} while (0)
4497 #define GEN_NEON_INTEGER_OP(name) do { \
4498 switch ((size << 1) | u) { \
4499 case 0: \
4500 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4501 break; \
4502 case 1: \
4503 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4504 break; \
4505 case 2: \
4506 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4507 break; \
4508 case 3: \
4509 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4510 break; \
4511 case 4: \
4512 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4513 break; \
4514 case 5: \
4515 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4516 break; \
4517 default: return 1; \
4518 }} while (0)
4520 static TCGv_i32 neon_load_scratch(int scratch)
4522 TCGv_i32 tmp = tcg_temp_new_i32();
4523 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4524 return tmp;
4527 static void neon_store_scratch(int scratch, TCGv_i32 var)
4529 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4530 tcg_temp_free_i32(var);
4533 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4535 TCGv_i32 tmp;
4536 if (size == 1) {
4537 tmp = neon_load_reg(reg & 7, reg >> 4);
4538 if (reg & 8) {
4539 gen_neon_dup_high16(tmp);
4540 } else {
4541 gen_neon_dup_low16(tmp);
4543 } else {
4544 tmp = neon_load_reg(reg & 15, reg >> 4);
4546 return tmp;
4549 static int gen_neon_unzip(int rd, int rm, int size, int q)
4551 TCGv_i32 tmp, tmp2;
4552 if (!q && size == 2) {
4553 return 1;
4555 tmp = tcg_const_i32(rd);
4556 tmp2 = tcg_const_i32(rm);
4557 if (q) {
4558 switch (size) {
4559 case 0:
4560 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
4561 break;
4562 case 1:
4563 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
4564 break;
4565 case 2:
4566 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
4567 break;
4568 default:
4569 abort();
4571 } else {
4572 switch (size) {
4573 case 0:
4574 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
4575 break;
4576 case 1:
4577 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
4578 break;
4579 default:
4580 abort();
4583 tcg_temp_free_i32(tmp);
4584 tcg_temp_free_i32(tmp2);
4585 return 0;
4588 static int gen_neon_zip(int rd, int rm, int size, int q)
4590 TCGv_i32 tmp, tmp2;
4591 if (!q && size == 2) {
4592 return 1;
4594 tmp = tcg_const_i32(rd);
4595 tmp2 = tcg_const_i32(rm);
4596 if (q) {
4597 switch (size) {
4598 case 0:
4599 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
4600 break;
4601 case 1:
4602 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
4603 break;
4604 case 2:
4605 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
4606 break;
4607 default:
4608 abort();
4610 } else {
4611 switch (size) {
4612 case 0:
4613 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
4614 break;
4615 case 1:
4616 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
4617 break;
4618 default:
4619 abort();
4622 tcg_temp_free_i32(tmp);
4623 tcg_temp_free_i32(tmp2);
4624 return 0;
4627 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4629 TCGv_i32 rd, tmp;
4631 rd = tcg_temp_new_i32();
4632 tmp = tcg_temp_new_i32();
4634 tcg_gen_shli_i32(rd, t0, 8);
4635 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4636 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4637 tcg_gen_or_i32(rd, rd, tmp);
4639 tcg_gen_shri_i32(t1, t1, 8);
4640 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4641 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4642 tcg_gen_or_i32(t1, t1, tmp);
4643 tcg_gen_mov_i32(t0, rd);
4645 tcg_temp_free_i32(tmp);
4646 tcg_temp_free_i32(rd);
4649 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4651 TCGv_i32 rd, tmp;
4653 rd = tcg_temp_new_i32();
4654 tmp = tcg_temp_new_i32();
4656 tcg_gen_shli_i32(rd, t0, 16);
4657 tcg_gen_andi_i32(tmp, t1, 0xffff);
4658 tcg_gen_or_i32(rd, rd, tmp);
4659 tcg_gen_shri_i32(t1, t1, 16);
4660 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4661 tcg_gen_or_i32(t1, t1, tmp);
4662 tcg_gen_mov_i32(t0, rd);
4664 tcg_temp_free_i32(tmp);
4665 tcg_temp_free_i32(rd);
4669 static struct {
4670 int nregs;
4671 int interleave;
4672 int spacing;
4673 } neon_ls_element_type[11] = {
4674 {4, 4, 1},
4675 {4, 4, 2},
4676 {4, 1, 1},
4677 {4, 2, 1},
4678 {3, 3, 1},
4679 {3, 3, 2},
4680 {3, 1, 1},
4681 {1, 1, 1},
4682 {2, 2, 1},
4683 {2, 2, 2},
4684 {2, 1, 1}
4687 /* Translate a NEON load/store element instruction. Return nonzero if the
4688 instruction is invalid. */
4689 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4691 int rd, rn, rm;
4692 int op;
4693 int nregs;
4694 int interleave;
4695 int spacing;
4696 int stride;
4697 int size;
4698 int reg;
4699 int pass;
4700 int load;
4701 int shift;
4702 int n;
4703 TCGv_i32 addr;
4704 TCGv_i32 tmp;
4705 TCGv_i32 tmp2;
4706 TCGv_i64 tmp64;
4708 /* FIXME: this access check should not take precedence over UNDEF
4709 * for invalid encodings; we will generate incorrect syndrome information
4710 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4712 if (s->fp_excp_el) {
4713 gen_exception_insn(s, 4, EXCP_UDEF,
4714 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
4715 return 0;
4718 if (!s->vfp_enabled)
4719 return 1;
4720 VFP_DREG_D(rd, insn);
4721 rn = (insn >> 16) & 0xf;
4722 rm = insn & 0xf;
4723 load = (insn & (1 << 21)) != 0;
4724 if ((insn & (1 << 23)) == 0) {
4725 /* Load store all elements. */
4726 op = (insn >> 8) & 0xf;
4727 size = (insn >> 6) & 3;
4728 if (op > 10)
4729 return 1;
4730 /* Catch UNDEF cases for bad values of align field */
4731 switch (op & 0xc) {
4732 case 4:
4733 if (((insn >> 5) & 1) == 1) {
4734 return 1;
4736 break;
4737 case 8:
4738 if (((insn >> 4) & 3) == 3) {
4739 return 1;
4741 break;
4742 default:
4743 break;
4745 nregs = neon_ls_element_type[op].nregs;
4746 interleave = neon_ls_element_type[op].interleave;
4747 spacing = neon_ls_element_type[op].spacing;
4748 if (size == 3 && (interleave | spacing) != 1)
4749 return 1;
4750 addr = tcg_temp_new_i32();
4751 load_reg_var(s, addr, rn);
4752 stride = (1 << size) * interleave;
4753 for (reg = 0; reg < nregs; reg++) {
4754 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4755 load_reg_var(s, addr, rn);
4756 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4757 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4758 load_reg_var(s, addr, rn);
4759 tcg_gen_addi_i32(addr, addr, 1 << size);
4761 if (size == 3) {
4762 tmp64 = tcg_temp_new_i64();
4763 if (load) {
4764 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
4765 neon_store_reg64(tmp64, rd);
4766 } else {
4767 neon_load_reg64(tmp64, rd);
4768 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
4770 tcg_temp_free_i64(tmp64);
4771 tcg_gen_addi_i32(addr, addr, stride);
4772 } else {
4773 for (pass = 0; pass < 2; pass++) {
4774 if (size == 2) {
4775 if (load) {
4776 tmp = tcg_temp_new_i32();
4777 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
4778 neon_store_reg(rd, pass, tmp);
4779 } else {
4780 tmp = neon_load_reg(rd, pass);
4781 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
4782 tcg_temp_free_i32(tmp);
4784 tcg_gen_addi_i32(addr, addr, stride);
4785 } else if (size == 1) {
4786 if (load) {
4787 tmp = tcg_temp_new_i32();
4788 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
4789 tcg_gen_addi_i32(addr, addr, stride);
4790 tmp2 = tcg_temp_new_i32();
4791 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
4792 tcg_gen_addi_i32(addr, addr, stride);
4793 tcg_gen_shli_i32(tmp2, tmp2, 16);
4794 tcg_gen_or_i32(tmp, tmp, tmp2);
4795 tcg_temp_free_i32(tmp2);
4796 neon_store_reg(rd, pass, tmp);
4797 } else {
4798 tmp = neon_load_reg(rd, pass);
4799 tmp2 = tcg_temp_new_i32();
4800 tcg_gen_shri_i32(tmp2, tmp, 16);
4801 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
4802 tcg_temp_free_i32(tmp);
4803 tcg_gen_addi_i32(addr, addr, stride);
4804 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
4805 tcg_temp_free_i32(tmp2);
4806 tcg_gen_addi_i32(addr, addr, stride);
4808 } else /* size == 0 */ {
4809 if (load) {
4810 TCGV_UNUSED_I32(tmp2);
4811 for (n = 0; n < 4; n++) {
4812 tmp = tcg_temp_new_i32();
4813 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
4814 tcg_gen_addi_i32(addr, addr, stride);
4815 if (n == 0) {
4816 tmp2 = tmp;
4817 } else {
4818 tcg_gen_shli_i32(tmp, tmp, n * 8);
4819 tcg_gen_or_i32(tmp2, tmp2, tmp);
4820 tcg_temp_free_i32(tmp);
4823 neon_store_reg(rd, pass, tmp2);
4824 } else {
4825 tmp2 = neon_load_reg(rd, pass);
4826 for (n = 0; n < 4; n++) {
4827 tmp = tcg_temp_new_i32();
4828 if (n == 0) {
4829 tcg_gen_mov_i32(tmp, tmp2);
4830 } else {
4831 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4833 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
4834 tcg_temp_free_i32(tmp);
4835 tcg_gen_addi_i32(addr, addr, stride);
4837 tcg_temp_free_i32(tmp2);
4842 rd += spacing;
4844 tcg_temp_free_i32(addr);
4845 stride = nregs * 8;
4846 } else {
4847 size = (insn >> 10) & 3;
4848 if (size == 3) {
4849 /* Load single element to all lanes. */
4850 int a = (insn >> 4) & 1;
4851 if (!load) {
4852 return 1;
4854 size = (insn >> 6) & 3;
4855 nregs = ((insn >> 8) & 3) + 1;
4857 if (size == 3) {
4858 if (nregs != 4 || a == 0) {
4859 return 1;
4861 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4862 size = 2;
4864 if (nregs == 1 && a == 1 && size == 0) {
4865 return 1;
4867 if (nregs == 3 && a == 1) {
4868 return 1;
4870 addr = tcg_temp_new_i32();
4871 load_reg_var(s, addr, rn);
4872 if (nregs == 1) {
4873 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4874 tmp = gen_load_and_replicate(s, addr, size);
4875 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4876 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4877 if (insn & (1 << 5)) {
4878 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4879 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4881 tcg_temp_free_i32(tmp);
4882 } else {
4883 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4884 stride = (insn & (1 << 5)) ? 2 : 1;
4885 for (reg = 0; reg < nregs; reg++) {
4886 tmp = gen_load_and_replicate(s, addr, size);
4887 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4888 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4889 tcg_temp_free_i32(tmp);
4890 tcg_gen_addi_i32(addr, addr, 1 << size);
4891 rd += stride;
4894 tcg_temp_free_i32(addr);
4895 stride = (1 << size) * nregs;
4896 } else {
4897 /* Single element. */
4898 int idx = (insn >> 4) & 0xf;
4899 pass = (insn >> 7) & 1;
4900 switch (size) {
4901 case 0:
4902 shift = ((insn >> 5) & 3) * 8;
4903 stride = 1;
4904 break;
4905 case 1:
4906 shift = ((insn >> 6) & 1) * 16;
4907 stride = (insn & (1 << 5)) ? 2 : 1;
4908 break;
4909 case 2:
4910 shift = 0;
4911 stride = (insn & (1 << 6)) ? 2 : 1;
4912 break;
4913 default:
4914 abort();
4916 nregs = ((insn >> 8) & 3) + 1;
4917 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4918 switch (nregs) {
4919 case 1:
4920 if (((idx & (1 << size)) != 0) ||
4921 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4922 return 1;
4924 break;
4925 case 3:
4926 if ((idx & 1) != 0) {
4927 return 1;
4929 /* fall through */
4930 case 2:
4931 if (size == 2 && (idx & 2) != 0) {
4932 return 1;
4934 break;
4935 case 4:
4936 if ((size == 2) && ((idx & 3) == 3)) {
4937 return 1;
4939 break;
4940 default:
4941 abort();
4943 if ((rd + stride * (nregs - 1)) > 31) {
4944 /* Attempts to write off the end of the register file
4945 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4946 * the neon_load_reg() would write off the end of the array.
4948 return 1;
4950 addr = tcg_temp_new_i32();
4951 load_reg_var(s, addr, rn);
4952 for (reg = 0; reg < nregs; reg++) {
4953 if (load) {
4954 tmp = tcg_temp_new_i32();
4955 switch (size) {
4956 case 0:
4957 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
4958 break;
4959 case 1:
4960 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
4961 break;
4962 case 2:
4963 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
4964 break;
4965 default: /* Avoid compiler warnings. */
4966 abort();
4968 if (size != 2) {
4969 tmp2 = neon_load_reg(rd, pass);
4970 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4971 shift, size ? 16 : 8);
4972 tcg_temp_free_i32(tmp2);
4974 neon_store_reg(rd, pass, tmp);
4975 } else { /* Store */
4976 tmp = neon_load_reg(rd, pass);
4977 if (shift)
4978 tcg_gen_shri_i32(tmp, tmp, shift);
4979 switch (size) {
4980 case 0:
4981 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
4982 break;
4983 case 1:
4984 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
4985 break;
4986 case 2:
4987 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
4988 break;
4990 tcg_temp_free_i32(tmp);
4992 rd += stride;
4993 tcg_gen_addi_i32(addr, addr, 1 << size);
4995 tcg_temp_free_i32(addr);
4996 stride = nregs * (1 << size);
4999 if (rm != 15) {
5000 TCGv_i32 base;
5002 base = load_reg(s, rn);
5003 if (rm == 13) {
5004 tcg_gen_addi_i32(base, base, stride);
5005 } else {
5006 TCGv_i32 index;
5007 index = load_reg(s, rm);
5008 tcg_gen_add_i32(base, base, index);
5009 tcg_temp_free_i32(index);
5011 store_reg(s, rn, base);
5013 return 0;
5016 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
5017 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
5019 tcg_gen_and_i32(t, t, c);
5020 tcg_gen_andc_i32(f, f, c);
5021 tcg_gen_or_i32(dest, t, f);
5024 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
5026 switch (size) {
5027 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5028 case 1: gen_helper_neon_narrow_u16(dest, src); break;
5029 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
5030 default: abort();
5034 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5036 switch (size) {
5037 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5038 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5039 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
5040 default: abort();
5044 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
5046 switch (size) {
5047 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5048 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5049 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
5050 default: abort();
5054 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5056 switch (size) {
5057 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5058 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5059 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
5060 default: abort();
5064 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
5065 int q, int u)
5067 if (q) {
5068 if (u) {
5069 switch (size) {
5070 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5071 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5072 default: abort();
5074 } else {
5075 switch (size) {
5076 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5077 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5078 default: abort();
5081 } else {
5082 if (u) {
5083 switch (size) {
5084 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5085 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
5086 default: abort();
5088 } else {
5089 switch (size) {
5090 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5091 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5092 default: abort();
5098 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
5100 if (u) {
5101 switch (size) {
5102 case 0: gen_helper_neon_widen_u8(dest, src); break;
5103 case 1: gen_helper_neon_widen_u16(dest, src); break;
5104 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5105 default: abort();
5107 } else {
5108 switch (size) {
5109 case 0: gen_helper_neon_widen_s8(dest, src); break;
5110 case 1: gen_helper_neon_widen_s16(dest, src); break;
5111 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5112 default: abort();
5115 tcg_temp_free_i32(src);
5118 static inline void gen_neon_addl(int size)
5120 switch (size) {
5121 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5122 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5123 case 2: tcg_gen_add_i64(CPU_V001); break;
5124 default: abort();
5128 static inline void gen_neon_subl(int size)
5130 switch (size) {
5131 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5132 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5133 case 2: tcg_gen_sub_i64(CPU_V001); break;
5134 default: abort();
5138 static inline void gen_neon_negl(TCGv_i64 var, int size)
5140 switch (size) {
5141 case 0: gen_helper_neon_negl_u16(var, var); break;
5142 case 1: gen_helper_neon_negl_u32(var, var); break;
5143 case 2:
5144 tcg_gen_neg_i64(var, var);
5145 break;
5146 default: abort();
5150 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
5152 switch (size) {
5153 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5154 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
5155 default: abort();
5159 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5160 int size, int u)
5162 TCGv_i64 tmp;
5164 switch ((size << 1) | u) {
5165 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5166 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5167 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5168 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5169 case 4:
5170 tmp = gen_muls_i64_i32(a, b);
5171 tcg_gen_mov_i64(dest, tmp);
5172 tcg_temp_free_i64(tmp);
5173 break;
5174 case 5:
5175 tmp = gen_mulu_i64_i32(a, b);
5176 tcg_gen_mov_i64(dest, tmp);
5177 tcg_temp_free_i64(tmp);
5178 break;
5179 default: abort();
5182 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5183 Don't forget to clean them now. */
5184 if (size < 2) {
5185 tcg_temp_free_i32(a);
5186 tcg_temp_free_i32(b);
5190 static void gen_neon_narrow_op(int op, int u, int size,
5191 TCGv_i32 dest, TCGv_i64 src)
5193 if (op) {
5194 if (u) {
5195 gen_neon_unarrow_sats(size, dest, src);
5196 } else {
5197 gen_neon_narrow(size, dest, src);
5199 } else {
5200 if (u) {
5201 gen_neon_narrow_satu(size, dest, src);
5202 } else {
5203 gen_neon_narrow_sats(size, dest, src);
5208 /* Symbolic constants for op fields for Neon 3-register same-length.
5209 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5210 * table A7-9.
5212 #define NEON_3R_VHADD 0
5213 #define NEON_3R_VQADD 1
5214 #define NEON_3R_VRHADD 2
5215 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5216 #define NEON_3R_VHSUB 4
5217 #define NEON_3R_VQSUB 5
5218 #define NEON_3R_VCGT 6
5219 #define NEON_3R_VCGE 7
5220 #define NEON_3R_VSHL 8
5221 #define NEON_3R_VQSHL 9
5222 #define NEON_3R_VRSHL 10
5223 #define NEON_3R_VQRSHL 11
5224 #define NEON_3R_VMAX 12
5225 #define NEON_3R_VMIN 13
5226 #define NEON_3R_VABD 14
5227 #define NEON_3R_VABA 15
5228 #define NEON_3R_VADD_VSUB 16
5229 #define NEON_3R_VTST_VCEQ 17
5230 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5231 #define NEON_3R_VMUL 19
5232 #define NEON_3R_VPMAX 20
5233 #define NEON_3R_VPMIN 21
5234 #define NEON_3R_VQDMULH_VQRDMULH 22
5235 #define NEON_3R_VPADD 23
5236 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
5237 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
5238 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5239 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5240 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5241 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5242 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
5243 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
5245 static const uint8_t neon_3r_sizes[] = {
5246 [NEON_3R_VHADD] = 0x7,
5247 [NEON_3R_VQADD] = 0xf,
5248 [NEON_3R_VRHADD] = 0x7,
5249 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5250 [NEON_3R_VHSUB] = 0x7,
5251 [NEON_3R_VQSUB] = 0xf,
5252 [NEON_3R_VCGT] = 0x7,
5253 [NEON_3R_VCGE] = 0x7,
5254 [NEON_3R_VSHL] = 0xf,
5255 [NEON_3R_VQSHL] = 0xf,
5256 [NEON_3R_VRSHL] = 0xf,
5257 [NEON_3R_VQRSHL] = 0xf,
5258 [NEON_3R_VMAX] = 0x7,
5259 [NEON_3R_VMIN] = 0x7,
5260 [NEON_3R_VABD] = 0x7,
5261 [NEON_3R_VABA] = 0x7,
5262 [NEON_3R_VADD_VSUB] = 0xf,
5263 [NEON_3R_VTST_VCEQ] = 0x7,
5264 [NEON_3R_VML] = 0x7,
5265 [NEON_3R_VMUL] = 0x7,
5266 [NEON_3R_VPMAX] = 0x7,
5267 [NEON_3R_VPMIN] = 0x7,
5268 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5269 [NEON_3R_VPADD] = 0x7,
5270 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
5271 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
5272 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5273 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5274 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5275 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5276 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
5277 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
5280 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
5281 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5282 * table A7-13.
5284 #define NEON_2RM_VREV64 0
5285 #define NEON_2RM_VREV32 1
5286 #define NEON_2RM_VREV16 2
5287 #define NEON_2RM_VPADDL 4
5288 #define NEON_2RM_VPADDL_U 5
5289 #define NEON_2RM_AESE 6 /* Includes AESD */
5290 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5291 #define NEON_2RM_VCLS 8
5292 #define NEON_2RM_VCLZ 9
5293 #define NEON_2RM_VCNT 10
5294 #define NEON_2RM_VMVN 11
5295 #define NEON_2RM_VPADAL 12
5296 #define NEON_2RM_VPADAL_U 13
5297 #define NEON_2RM_VQABS 14
5298 #define NEON_2RM_VQNEG 15
5299 #define NEON_2RM_VCGT0 16
5300 #define NEON_2RM_VCGE0 17
5301 #define NEON_2RM_VCEQ0 18
5302 #define NEON_2RM_VCLE0 19
5303 #define NEON_2RM_VCLT0 20
5304 #define NEON_2RM_SHA1H 21
5305 #define NEON_2RM_VABS 22
5306 #define NEON_2RM_VNEG 23
5307 #define NEON_2RM_VCGT0_F 24
5308 #define NEON_2RM_VCGE0_F 25
5309 #define NEON_2RM_VCEQ0_F 26
5310 #define NEON_2RM_VCLE0_F 27
5311 #define NEON_2RM_VCLT0_F 28
5312 #define NEON_2RM_VABS_F 30
5313 #define NEON_2RM_VNEG_F 31
5314 #define NEON_2RM_VSWP 32
5315 #define NEON_2RM_VTRN 33
5316 #define NEON_2RM_VUZP 34
5317 #define NEON_2RM_VZIP 35
5318 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5319 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5320 #define NEON_2RM_VSHLL 38
5321 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5322 #define NEON_2RM_VRINTN 40
5323 #define NEON_2RM_VRINTX 41
5324 #define NEON_2RM_VRINTA 42
5325 #define NEON_2RM_VRINTZ 43
5326 #define NEON_2RM_VCVT_F16_F32 44
5327 #define NEON_2RM_VRINTM 45
5328 #define NEON_2RM_VCVT_F32_F16 46
5329 #define NEON_2RM_VRINTP 47
5330 #define NEON_2RM_VCVTAU 48
5331 #define NEON_2RM_VCVTAS 49
5332 #define NEON_2RM_VCVTNU 50
5333 #define NEON_2RM_VCVTNS 51
5334 #define NEON_2RM_VCVTPU 52
5335 #define NEON_2RM_VCVTPS 53
5336 #define NEON_2RM_VCVTMU 54
5337 #define NEON_2RM_VCVTMS 55
5338 #define NEON_2RM_VRECPE 56
5339 #define NEON_2RM_VRSQRTE 57
5340 #define NEON_2RM_VRECPE_F 58
5341 #define NEON_2RM_VRSQRTE_F 59
5342 #define NEON_2RM_VCVT_FS 60
5343 #define NEON_2RM_VCVT_FU 61
5344 #define NEON_2RM_VCVT_SF 62
5345 #define NEON_2RM_VCVT_UF 63
5347 static int neon_2rm_is_float_op(int op)
5349 /* Return true if this neon 2reg-misc op is float-to-float */
5350 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
5351 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
5352 op == NEON_2RM_VRINTM ||
5353 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5354 op >= NEON_2RM_VRECPE_F);
5357 static bool neon_2rm_is_v8_op(int op)
5359 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5360 switch (op) {
5361 case NEON_2RM_VRINTN:
5362 case NEON_2RM_VRINTA:
5363 case NEON_2RM_VRINTM:
5364 case NEON_2RM_VRINTP:
5365 case NEON_2RM_VRINTZ:
5366 case NEON_2RM_VRINTX:
5367 case NEON_2RM_VCVTAU:
5368 case NEON_2RM_VCVTAS:
5369 case NEON_2RM_VCVTNU:
5370 case NEON_2RM_VCVTNS:
5371 case NEON_2RM_VCVTPU:
5372 case NEON_2RM_VCVTPS:
5373 case NEON_2RM_VCVTMU:
5374 case NEON_2RM_VCVTMS:
5375 return true;
5376 default:
5377 return false;
5381 /* Each entry in this array has bit n set if the insn allows
5382 * size value n (otherwise it will UNDEF). Since unallocated
5383 * op values will have no bits set they always UNDEF.
5385 static const uint8_t neon_2rm_sizes[] = {
5386 [NEON_2RM_VREV64] = 0x7,
5387 [NEON_2RM_VREV32] = 0x3,
5388 [NEON_2RM_VREV16] = 0x1,
5389 [NEON_2RM_VPADDL] = 0x7,
5390 [NEON_2RM_VPADDL_U] = 0x7,
5391 [NEON_2RM_AESE] = 0x1,
5392 [NEON_2RM_AESMC] = 0x1,
5393 [NEON_2RM_VCLS] = 0x7,
5394 [NEON_2RM_VCLZ] = 0x7,
5395 [NEON_2RM_VCNT] = 0x1,
5396 [NEON_2RM_VMVN] = 0x1,
5397 [NEON_2RM_VPADAL] = 0x7,
5398 [NEON_2RM_VPADAL_U] = 0x7,
5399 [NEON_2RM_VQABS] = 0x7,
5400 [NEON_2RM_VQNEG] = 0x7,
5401 [NEON_2RM_VCGT0] = 0x7,
5402 [NEON_2RM_VCGE0] = 0x7,
5403 [NEON_2RM_VCEQ0] = 0x7,
5404 [NEON_2RM_VCLE0] = 0x7,
5405 [NEON_2RM_VCLT0] = 0x7,
5406 [NEON_2RM_SHA1H] = 0x4,
5407 [NEON_2RM_VABS] = 0x7,
5408 [NEON_2RM_VNEG] = 0x7,
5409 [NEON_2RM_VCGT0_F] = 0x4,
5410 [NEON_2RM_VCGE0_F] = 0x4,
5411 [NEON_2RM_VCEQ0_F] = 0x4,
5412 [NEON_2RM_VCLE0_F] = 0x4,
5413 [NEON_2RM_VCLT0_F] = 0x4,
5414 [NEON_2RM_VABS_F] = 0x4,
5415 [NEON_2RM_VNEG_F] = 0x4,
5416 [NEON_2RM_VSWP] = 0x1,
5417 [NEON_2RM_VTRN] = 0x7,
5418 [NEON_2RM_VUZP] = 0x7,
5419 [NEON_2RM_VZIP] = 0x7,
5420 [NEON_2RM_VMOVN] = 0x7,
5421 [NEON_2RM_VQMOVN] = 0x7,
5422 [NEON_2RM_VSHLL] = 0x7,
5423 [NEON_2RM_SHA1SU1] = 0x4,
5424 [NEON_2RM_VRINTN] = 0x4,
5425 [NEON_2RM_VRINTX] = 0x4,
5426 [NEON_2RM_VRINTA] = 0x4,
5427 [NEON_2RM_VRINTZ] = 0x4,
5428 [NEON_2RM_VCVT_F16_F32] = 0x2,
5429 [NEON_2RM_VRINTM] = 0x4,
5430 [NEON_2RM_VCVT_F32_F16] = 0x2,
5431 [NEON_2RM_VRINTP] = 0x4,
5432 [NEON_2RM_VCVTAU] = 0x4,
5433 [NEON_2RM_VCVTAS] = 0x4,
5434 [NEON_2RM_VCVTNU] = 0x4,
5435 [NEON_2RM_VCVTNS] = 0x4,
5436 [NEON_2RM_VCVTPU] = 0x4,
5437 [NEON_2RM_VCVTPS] = 0x4,
5438 [NEON_2RM_VCVTMU] = 0x4,
5439 [NEON_2RM_VCVTMS] = 0x4,
5440 [NEON_2RM_VRECPE] = 0x4,
5441 [NEON_2RM_VRSQRTE] = 0x4,
5442 [NEON_2RM_VRECPE_F] = 0x4,
5443 [NEON_2RM_VRSQRTE_F] = 0x4,
5444 [NEON_2RM_VCVT_FS] = 0x4,
5445 [NEON_2RM_VCVT_FU] = 0x4,
5446 [NEON_2RM_VCVT_SF] = 0x4,
5447 [NEON_2RM_VCVT_UF] = 0x4,
5450 /* Translate a NEON data processing instruction. Return nonzero if the
5451 instruction is invalid.
5452 We process data in a mixture of 32-bit and 64-bit chunks.
5453 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5455 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
5457 int op;
5458 int q;
5459 int rd, rn, rm;
5460 int size;
5461 int shift;
5462 int pass;
5463 int count;
5464 int pairwise;
5465 int u;
5466 uint32_t imm, mask;
5467 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
5468 TCGv_i64 tmp64;
5470 /* FIXME: this access check should not take precedence over UNDEF
5471 * for invalid encodings; we will generate incorrect syndrome information
5472 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5474 if (s->fp_excp_el) {
5475 gen_exception_insn(s, 4, EXCP_UDEF,
5476 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
5477 return 0;
5480 if (!s->vfp_enabled)
5481 return 1;
5482 q = (insn & (1 << 6)) != 0;
5483 u = (insn >> 24) & 1;
5484 VFP_DREG_D(rd, insn);
5485 VFP_DREG_N(rn, insn);
5486 VFP_DREG_M(rm, insn);
5487 size = (insn >> 20) & 3;
5488 if ((insn & (1 << 23)) == 0) {
5489 /* Three register same length. */
5490 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5491 /* Catch invalid op and bad size combinations: UNDEF */
5492 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5493 return 1;
5495 /* All insns of this form UNDEF for either this condition or the
5496 * superset of cases "Q==1"; we catch the latter later.
5498 if (q && ((rd | rn | rm) & 1)) {
5499 return 1;
5502 * The SHA-1/SHA-256 3-register instructions require special treatment
5503 * here, as their size field is overloaded as an op type selector, and
5504 * they all consume their input in a single pass.
5506 if (op == NEON_3R_SHA) {
5507 if (!q) {
5508 return 1;
5510 if (!u) { /* SHA-1 */
5511 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
5512 return 1;
5514 tmp = tcg_const_i32(rd);
5515 tmp2 = tcg_const_i32(rn);
5516 tmp3 = tcg_const_i32(rm);
5517 tmp4 = tcg_const_i32(size);
5518 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5519 tcg_temp_free_i32(tmp4);
5520 } else { /* SHA-256 */
5521 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
5522 return 1;
5524 tmp = tcg_const_i32(rd);
5525 tmp2 = tcg_const_i32(rn);
5526 tmp3 = tcg_const_i32(rm);
5527 switch (size) {
5528 case 0:
5529 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5530 break;
5531 case 1:
5532 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5533 break;
5534 case 2:
5535 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5536 break;
5539 tcg_temp_free_i32(tmp);
5540 tcg_temp_free_i32(tmp2);
5541 tcg_temp_free_i32(tmp3);
5542 return 0;
5544 if (size == 3 && op != NEON_3R_LOGIC) {
5545 /* 64-bit element instructions. */
5546 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5547 neon_load_reg64(cpu_V0, rn + pass);
5548 neon_load_reg64(cpu_V1, rm + pass);
5549 switch (op) {
5550 case NEON_3R_VQADD:
5551 if (u) {
5552 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5553 cpu_V0, cpu_V1);
5554 } else {
5555 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5556 cpu_V0, cpu_V1);
5558 break;
5559 case NEON_3R_VQSUB:
5560 if (u) {
5561 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5562 cpu_V0, cpu_V1);
5563 } else {
5564 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5565 cpu_V0, cpu_V1);
5567 break;
5568 case NEON_3R_VSHL:
5569 if (u) {
5570 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5571 } else {
5572 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5574 break;
5575 case NEON_3R_VQSHL:
5576 if (u) {
5577 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5578 cpu_V1, cpu_V0);
5579 } else {
5580 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5581 cpu_V1, cpu_V0);
5583 break;
5584 case NEON_3R_VRSHL:
5585 if (u) {
5586 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5587 } else {
5588 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5590 break;
5591 case NEON_3R_VQRSHL:
5592 if (u) {
5593 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5594 cpu_V1, cpu_V0);
5595 } else {
5596 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5597 cpu_V1, cpu_V0);
5599 break;
5600 case NEON_3R_VADD_VSUB:
5601 if (u) {
5602 tcg_gen_sub_i64(CPU_V001);
5603 } else {
5604 tcg_gen_add_i64(CPU_V001);
5606 break;
5607 default:
5608 abort();
5610 neon_store_reg64(cpu_V0, rd + pass);
5612 return 0;
5614 pairwise = 0;
5615 switch (op) {
5616 case NEON_3R_VSHL:
5617 case NEON_3R_VQSHL:
5618 case NEON_3R_VRSHL:
5619 case NEON_3R_VQRSHL:
5621 int rtmp;
5622 /* Shift instruction operands are reversed. */
5623 rtmp = rn;
5624 rn = rm;
5625 rm = rtmp;
5627 break;
5628 case NEON_3R_VPADD:
5629 if (u) {
5630 return 1;
5632 /* Fall through */
5633 case NEON_3R_VPMAX:
5634 case NEON_3R_VPMIN:
5635 pairwise = 1;
5636 break;
5637 case NEON_3R_FLOAT_ARITH:
5638 pairwise = (u && size < 2); /* if VPADD (float) */
5639 break;
5640 case NEON_3R_FLOAT_MINMAX:
5641 pairwise = u; /* if VPMIN/VPMAX (float) */
5642 break;
5643 case NEON_3R_FLOAT_CMP:
5644 if (!u && size) {
5645 /* no encoding for U=0 C=1x */
5646 return 1;
5648 break;
5649 case NEON_3R_FLOAT_ACMP:
5650 if (!u) {
5651 return 1;
5653 break;
5654 case NEON_3R_FLOAT_MISC:
5655 /* VMAXNM/VMINNM in ARMv8 */
5656 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5657 return 1;
5659 break;
5660 case NEON_3R_VMUL:
5661 if (u && (size != 0)) {
5662 /* UNDEF on invalid size for polynomial subcase */
5663 return 1;
5665 break;
5666 case NEON_3R_VFM:
5667 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
5668 return 1;
5670 break;
5671 default:
5672 break;
5675 if (pairwise && q) {
5676 /* All the pairwise insns UNDEF if Q is set */
5677 return 1;
5680 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5682 if (pairwise) {
5683 /* Pairwise. */
5684 if (pass < 1) {
5685 tmp = neon_load_reg(rn, 0);
5686 tmp2 = neon_load_reg(rn, 1);
5687 } else {
5688 tmp = neon_load_reg(rm, 0);
5689 tmp2 = neon_load_reg(rm, 1);
5691 } else {
5692 /* Elementwise. */
5693 tmp = neon_load_reg(rn, pass);
5694 tmp2 = neon_load_reg(rm, pass);
5696 switch (op) {
5697 case NEON_3R_VHADD:
5698 GEN_NEON_INTEGER_OP(hadd);
5699 break;
5700 case NEON_3R_VQADD:
5701 GEN_NEON_INTEGER_OP_ENV(qadd);
5702 break;
5703 case NEON_3R_VRHADD:
5704 GEN_NEON_INTEGER_OP(rhadd);
5705 break;
5706 case NEON_3R_LOGIC: /* Logic ops. */
5707 switch ((u << 2) | size) {
5708 case 0: /* VAND */
5709 tcg_gen_and_i32(tmp, tmp, tmp2);
5710 break;
5711 case 1: /* BIC */
5712 tcg_gen_andc_i32(tmp, tmp, tmp2);
5713 break;
5714 case 2: /* VORR */
5715 tcg_gen_or_i32(tmp, tmp, tmp2);
5716 break;
5717 case 3: /* VORN */
5718 tcg_gen_orc_i32(tmp, tmp, tmp2);
5719 break;
5720 case 4: /* VEOR */
5721 tcg_gen_xor_i32(tmp, tmp, tmp2);
5722 break;
5723 case 5: /* VBSL */
5724 tmp3 = neon_load_reg(rd, pass);
5725 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5726 tcg_temp_free_i32(tmp3);
5727 break;
5728 case 6: /* VBIT */
5729 tmp3 = neon_load_reg(rd, pass);
5730 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5731 tcg_temp_free_i32(tmp3);
5732 break;
5733 case 7: /* VBIF */
5734 tmp3 = neon_load_reg(rd, pass);
5735 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5736 tcg_temp_free_i32(tmp3);
5737 break;
5739 break;
5740 case NEON_3R_VHSUB:
5741 GEN_NEON_INTEGER_OP(hsub);
5742 break;
5743 case NEON_3R_VQSUB:
5744 GEN_NEON_INTEGER_OP_ENV(qsub);
5745 break;
5746 case NEON_3R_VCGT:
5747 GEN_NEON_INTEGER_OP(cgt);
5748 break;
5749 case NEON_3R_VCGE:
5750 GEN_NEON_INTEGER_OP(cge);
5751 break;
5752 case NEON_3R_VSHL:
5753 GEN_NEON_INTEGER_OP(shl);
5754 break;
5755 case NEON_3R_VQSHL:
5756 GEN_NEON_INTEGER_OP_ENV(qshl);
5757 break;
5758 case NEON_3R_VRSHL:
5759 GEN_NEON_INTEGER_OP(rshl);
5760 break;
5761 case NEON_3R_VQRSHL:
5762 GEN_NEON_INTEGER_OP_ENV(qrshl);
5763 break;
5764 case NEON_3R_VMAX:
5765 GEN_NEON_INTEGER_OP(max);
5766 break;
5767 case NEON_3R_VMIN:
5768 GEN_NEON_INTEGER_OP(min);
5769 break;
5770 case NEON_3R_VABD:
5771 GEN_NEON_INTEGER_OP(abd);
5772 break;
5773 case NEON_3R_VABA:
5774 GEN_NEON_INTEGER_OP(abd);
5775 tcg_temp_free_i32(tmp2);
5776 tmp2 = neon_load_reg(rd, pass);
5777 gen_neon_add(size, tmp, tmp2);
5778 break;
5779 case NEON_3R_VADD_VSUB:
5780 if (!u) { /* VADD */
5781 gen_neon_add(size, tmp, tmp2);
5782 } else { /* VSUB */
5783 switch (size) {
5784 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5785 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5786 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
5787 default: abort();
5790 break;
5791 case NEON_3R_VTST_VCEQ:
5792 if (!u) { /* VTST */
5793 switch (size) {
5794 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5795 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5796 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
5797 default: abort();
5799 } else { /* VCEQ */
5800 switch (size) {
5801 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5802 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5803 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5804 default: abort();
5807 break;
5808 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
5809 switch (size) {
5810 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5811 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5812 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5813 default: abort();
5815 tcg_temp_free_i32(tmp2);
5816 tmp2 = neon_load_reg(rd, pass);
5817 if (u) { /* VMLS */
5818 gen_neon_rsb(size, tmp, tmp2);
5819 } else { /* VMLA */
5820 gen_neon_add(size, tmp, tmp2);
5822 break;
5823 case NEON_3R_VMUL:
5824 if (u) { /* polynomial */
5825 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5826 } else { /* Integer */
5827 switch (size) {
5828 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5829 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5830 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5831 default: abort();
5834 break;
5835 case NEON_3R_VPMAX:
5836 GEN_NEON_INTEGER_OP(pmax);
5837 break;
5838 case NEON_3R_VPMIN:
5839 GEN_NEON_INTEGER_OP(pmin);
5840 break;
5841 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5842 if (!u) { /* VQDMULH */
5843 switch (size) {
5844 case 1:
5845 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5846 break;
5847 case 2:
5848 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5849 break;
5850 default: abort();
5852 } else { /* VQRDMULH */
5853 switch (size) {
5854 case 1:
5855 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5856 break;
5857 case 2:
5858 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5859 break;
5860 default: abort();
5863 break;
5864 case NEON_3R_VPADD:
5865 switch (size) {
5866 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5867 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5868 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5869 default: abort();
5871 break;
5872 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5874 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5875 switch ((u << 2) | size) {
5876 case 0: /* VADD */
5877 case 4: /* VPADD */
5878 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5879 break;
5880 case 2: /* VSUB */
5881 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5882 break;
5883 case 6: /* VABD */
5884 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5885 break;
5886 default:
5887 abort();
5889 tcg_temp_free_ptr(fpstatus);
5890 break;
5892 case NEON_3R_FLOAT_MULTIPLY:
5894 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5895 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5896 if (!u) {
5897 tcg_temp_free_i32(tmp2);
5898 tmp2 = neon_load_reg(rd, pass);
5899 if (size == 0) {
5900 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5901 } else {
5902 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5905 tcg_temp_free_ptr(fpstatus);
5906 break;
5908 case NEON_3R_FLOAT_CMP:
5910 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5911 if (!u) {
5912 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5913 } else {
5914 if (size == 0) {
5915 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5916 } else {
5917 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5920 tcg_temp_free_ptr(fpstatus);
5921 break;
5923 case NEON_3R_FLOAT_ACMP:
5925 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5926 if (size == 0) {
5927 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5928 } else {
5929 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5931 tcg_temp_free_ptr(fpstatus);
5932 break;
5934 case NEON_3R_FLOAT_MINMAX:
5936 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5937 if (size == 0) {
5938 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5939 } else {
5940 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5942 tcg_temp_free_ptr(fpstatus);
5943 break;
5945 case NEON_3R_FLOAT_MISC:
5946 if (u) {
5947 /* VMAXNM/VMINNM */
5948 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5949 if (size == 0) {
5950 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5951 } else {
5952 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5954 tcg_temp_free_ptr(fpstatus);
5955 } else {
5956 if (size == 0) {
5957 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5958 } else {
5959 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5962 break;
5963 case NEON_3R_VFM:
5965 /* VFMA, VFMS: fused multiply-add */
5966 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5967 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5968 if (size) {
5969 /* VFMS */
5970 gen_helper_vfp_negs(tmp, tmp);
5972 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5973 tcg_temp_free_i32(tmp3);
5974 tcg_temp_free_ptr(fpstatus);
5975 break;
5977 default:
5978 abort();
5980 tcg_temp_free_i32(tmp2);
5982 /* Save the result. For elementwise operations we can put it
5983 straight into the destination register. For pairwise operations
5984 we have to be careful to avoid clobbering the source operands. */
5985 if (pairwise && rd == rm) {
5986 neon_store_scratch(pass, tmp);
5987 } else {
5988 neon_store_reg(rd, pass, tmp);
5991 } /* for pass */
5992 if (pairwise && rd == rm) {
5993 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5994 tmp = neon_load_scratch(pass);
5995 neon_store_reg(rd, pass, tmp);
5998 /* End of 3 register same size operations. */
5999 } else if (insn & (1 << 4)) {
6000 if ((insn & 0x00380080) != 0) {
6001 /* Two registers and shift. */
6002 op = (insn >> 8) & 0xf;
6003 if (insn & (1 << 7)) {
6004 /* 64-bit shift. */
6005 if (op > 7) {
6006 return 1;
6008 size = 3;
6009 } else {
6010 size = 2;
6011 while ((insn & (1 << (size + 19))) == 0)
6012 size--;
6014 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
6015 /* To avoid excessive duplication of ops we implement shift
6016 by immediate using the variable shift operations. */
6017 if (op < 8) {
6018 /* Shift by immediate:
6019 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
6020 if (q && ((rd | rm) & 1)) {
6021 return 1;
6023 if (!u && (op == 4 || op == 6)) {
6024 return 1;
6026 /* Right shifts are encoded as N - shift, where N is the
6027 element size in bits. */
6028 if (op <= 4)
6029 shift = shift - (1 << (size + 3));
6030 if (size == 3) {
6031 count = q + 1;
6032 } else {
6033 count = q ? 4: 2;
6035 switch (size) {
6036 case 0:
6037 imm = (uint8_t) shift;
6038 imm |= imm << 8;
6039 imm |= imm << 16;
6040 break;
6041 case 1:
6042 imm = (uint16_t) shift;
6043 imm |= imm << 16;
6044 break;
6045 case 2:
6046 case 3:
6047 imm = shift;
6048 break;
6049 default:
6050 abort();
6053 for (pass = 0; pass < count; pass++) {
6054 if (size == 3) {
6055 neon_load_reg64(cpu_V0, rm + pass);
6056 tcg_gen_movi_i64(cpu_V1, imm);
6057 switch (op) {
6058 case 0: /* VSHR */
6059 case 1: /* VSRA */
6060 if (u)
6061 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6062 else
6063 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
6064 break;
6065 case 2: /* VRSHR */
6066 case 3: /* VRSRA */
6067 if (u)
6068 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
6069 else
6070 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
6071 break;
6072 case 4: /* VSRI */
6073 case 5: /* VSHL, VSLI */
6074 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6075 break;
6076 case 6: /* VQSHLU */
6077 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6078 cpu_V0, cpu_V1);
6079 break;
6080 case 7: /* VQSHL */
6081 if (u) {
6082 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6083 cpu_V0, cpu_V1);
6084 } else {
6085 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6086 cpu_V0, cpu_V1);
6088 break;
6090 if (op == 1 || op == 3) {
6091 /* Accumulate. */
6092 neon_load_reg64(cpu_V1, rd + pass);
6093 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6094 } else if (op == 4 || (op == 5 && u)) {
6095 /* Insert */
6096 neon_load_reg64(cpu_V1, rd + pass);
6097 uint64_t mask;
6098 if (shift < -63 || shift > 63) {
6099 mask = 0;
6100 } else {
6101 if (op == 4) {
6102 mask = 0xffffffffffffffffull >> -shift;
6103 } else {
6104 mask = 0xffffffffffffffffull << shift;
6107 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6108 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6110 neon_store_reg64(cpu_V0, rd + pass);
6111 } else { /* size < 3 */
6112 /* Operands in T0 and T1. */
6113 tmp = neon_load_reg(rm, pass);
6114 tmp2 = tcg_temp_new_i32();
6115 tcg_gen_movi_i32(tmp2, imm);
6116 switch (op) {
6117 case 0: /* VSHR */
6118 case 1: /* VSRA */
6119 GEN_NEON_INTEGER_OP(shl);
6120 break;
6121 case 2: /* VRSHR */
6122 case 3: /* VRSRA */
6123 GEN_NEON_INTEGER_OP(rshl);
6124 break;
6125 case 4: /* VSRI */
6126 case 5: /* VSHL, VSLI */
6127 switch (size) {
6128 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6129 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6130 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
6131 default: abort();
6133 break;
6134 case 6: /* VQSHLU */
6135 switch (size) {
6136 case 0:
6137 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6138 tmp, tmp2);
6139 break;
6140 case 1:
6141 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6142 tmp, tmp2);
6143 break;
6144 case 2:
6145 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6146 tmp, tmp2);
6147 break;
6148 default:
6149 abort();
6151 break;
6152 case 7: /* VQSHL */
6153 GEN_NEON_INTEGER_OP_ENV(qshl);
6154 break;
6156 tcg_temp_free_i32(tmp2);
6158 if (op == 1 || op == 3) {
6159 /* Accumulate. */
6160 tmp2 = neon_load_reg(rd, pass);
6161 gen_neon_add(size, tmp, tmp2);
6162 tcg_temp_free_i32(tmp2);
6163 } else if (op == 4 || (op == 5 && u)) {
6164 /* Insert */
6165 switch (size) {
6166 case 0:
6167 if (op == 4)
6168 mask = 0xff >> -shift;
6169 else
6170 mask = (uint8_t)(0xff << shift);
6171 mask |= mask << 8;
6172 mask |= mask << 16;
6173 break;
6174 case 1:
6175 if (op == 4)
6176 mask = 0xffff >> -shift;
6177 else
6178 mask = (uint16_t)(0xffff << shift);
6179 mask |= mask << 16;
6180 break;
6181 case 2:
6182 if (shift < -31 || shift > 31) {
6183 mask = 0;
6184 } else {
6185 if (op == 4)
6186 mask = 0xffffffffu >> -shift;
6187 else
6188 mask = 0xffffffffu << shift;
6190 break;
6191 default:
6192 abort();
6194 tmp2 = neon_load_reg(rd, pass);
6195 tcg_gen_andi_i32(tmp, tmp, mask);
6196 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
6197 tcg_gen_or_i32(tmp, tmp, tmp2);
6198 tcg_temp_free_i32(tmp2);
6200 neon_store_reg(rd, pass, tmp);
6202 } /* for pass */
6203 } else if (op < 10) {
6204 /* Shift by immediate and narrow:
6205 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
6206 int input_unsigned = (op == 8) ? !u : u;
6207 if (rm & 1) {
6208 return 1;
6210 shift = shift - (1 << (size + 3));
6211 size++;
6212 if (size == 3) {
6213 tmp64 = tcg_const_i64(shift);
6214 neon_load_reg64(cpu_V0, rm);
6215 neon_load_reg64(cpu_V1, rm + 1);
6216 for (pass = 0; pass < 2; pass++) {
6217 TCGv_i64 in;
6218 if (pass == 0) {
6219 in = cpu_V0;
6220 } else {
6221 in = cpu_V1;
6223 if (q) {
6224 if (input_unsigned) {
6225 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
6226 } else {
6227 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
6229 } else {
6230 if (input_unsigned) {
6231 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
6232 } else {
6233 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
6236 tmp = tcg_temp_new_i32();
6237 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6238 neon_store_reg(rd, pass, tmp);
6239 } /* for pass */
6240 tcg_temp_free_i64(tmp64);
6241 } else {
6242 if (size == 1) {
6243 imm = (uint16_t)shift;
6244 imm |= imm << 16;
6245 } else {
6246 /* size == 2 */
6247 imm = (uint32_t)shift;
6249 tmp2 = tcg_const_i32(imm);
6250 tmp4 = neon_load_reg(rm + 1, 0);
6251 tmp5 = neon_load_reg(rm + 1, 1);
6252 for (pass = 0; pass < 2; pass++) {
6253 if (pass == 0) {
6254 tmp = neon_load_reg(rm, 0);
6255 } else {
6256 tmp = tmp4;
6258 gen_neon_shift_narrow(size, tmp, tmp2, q,
6259 input_unsigned);
6260 if (pass == 0) {
6261 tmp3 = neon_load_reg(rm, 1);
6262 } else {
6263 tmp3 = tmp5;
6265 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6266 input_unsigned);
6267 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
6268 tcg_temp_free_i32(tmp);
6269 tcg_temp_free_i32(tmp3);
6270 tmp = tcg_temp_new_i32();
6271 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6272 neon_store_reg(rd, pass, tmp);
6273 } /* for pass */
6274 tcg_temp_free_i32(tmp2);
6276 } else if (op == 10) {
6277 /* VSHLL, VMOVL */
6278 if (q || (rd & 1)) {
6279 return 1;
6281 tmp = neon_load_reg(rm, 0);
6282 tmp2 = neon_load_reg(rm, 1);
6283 for (pass = 0; pass < 2; pass++) {
6284 if (pass == 1)
6285 tmp = tmp2;
6287 gen_neon_widen(cpu_V0, tmp, size, u);
6289 if (shift != 0) {
6290 /* The shift is less than the width of the source
6291 type, so we can just shift the whole register. */
6292 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
6293 /* Widen the result of shift: we need to clear
6294 * the potential overflow bits resulting from
6295 * left bits of the narrow input appearing as
6296 * right bits of left the neighbour narrow
6297 * input. */
6298 if (size < 2 || !u) {
6299 uint64_t imm64;
6300 if (size == 0) {
6301 imm = (0xffu >> (8 - shift));
6302 imm |= imm << 16;
6303 } else if (size == 1) {
6304 imm = 0xffff >> (16 - shift);
6305 } else {
6306 /* size == 2 */
6307 imm = 0xffffffff >> (32 - shift);
6309 if (size < 2) {
6310 imm64 = imm | (((uint64_t)imm) << 32);
6311 } else {
6312 imm64 = imm;
6314 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
6317 neon_store_reg64(cpu_V0, rd + pass);
6319 } else if (op >= 14) {
6320 /* VCVT fixed-point. */
6321 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6322 return 1;
6324 /* We have already masked out the must-be-1 top bit of imm6,
6325 * hence this 32-shift where the ARM ARM has 64-imm6.
6327 shift = 32 - shift;
6328 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6329 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
6330 if (!(op & 1)) {
6331 if (u)
6332 gen_vfp_ulto(0, shift, 1);
6333 else
6334 gen_vfp_slto(0, shift, 1);
6335 } else {
6336 if (u)
6337 gen_vfp_toul(0, shift, 1);
6338 else
6339 gen_vfp_tosl(0, shift, 1);
6341 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
6343 } else {
6344 return 1;
6346 } else { /* (insn & 0x00380080) == 0 */
6347 int invert;
6348 if (q && (rd & 1)) {
6349 return 1;
6352 op = (insn >> 8) & 0xf;
6353 /* One register and immediate. */
6354 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6355 invert = (insn & (1 << 5)) != 0;
6356 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6357 * We choose to not special-case this and will behave as if a
6358 * valid constant encoding of 0 had been given.
6360 switch (op) {
6361 case 0: case 1:
6362 /* no-op */
6363 break;
6364 case 2: case 3:
6365 imm <<= 8;
6366 break;
6367 case 4: case 5:
6368 imm <<= 16;
6369 break;
6370 case 6: case 7:
6371 imm <<= 24;
6372 break;
6373 case 8: case 9:
6374 imm |= imm << 16;
6375 break;
6376 case 10: case 11:
6377 imm = (imm << 8) | (imm << 24);
6378 break;
6379 case 12:
6380 imm = (imm << 8) | 0xff;
6381 break;
6382 case 13:
6383 imm = (imm << 16) | 0xffff;
6384 break;
6385 case 14:
6386 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6387 if (invert)
6388 imm = ~imm;
6389 break;
6390 case 15:
6391 if (invert) {
6392 return 1;
6394 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6395 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6396 break;
6398 if (invert)
6399 imm = ~imm;
6401 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6402 if (op & 1 && op < 12) {
6403 tmp = neon_load_reg(rd, pass);
6404 if (invert) {
6405 /* The immediate value has already been inverted, so
6406 BIC becomes AND. */
6407 tcg_gen_andi_i32(tmp, tmp, imm);
6408 } else {
6409 tcg_gen_ori_i32(tmp, tmp, imm);
6411 } else {
6412 /* VMOV, VMVN. */
6413 tmp = tcg_temp_new_i32();
6414 if (op == 14 && invert) {
6415 int n;
6416 uint32_t val;
6417 val = 0;
6418 for (n = 0; n < 4; n++) {
6419 if (imm & (1 << (n + (pass & 1) * 4)))
6420 val |= 0xff << (n * 8);
6422 tcg_gen_movi_i32(tmp, val);
6423 } else {
6424 tcg_gen_movi_i32(tmp, imm);
6427 neon_store_reg(rd, pass, tmp);
6430 } else { /* (insn & 0x00800010 == 0x00800000) */
6431 if (size != 3) {
6432 op = (insn >> 8) & 0xf;
6433 if ((insn & (1 << 6)) == 0) {
6434 /* Three registers of different lengths. */
6435 int src1_wide;
6436 int src2_wide;
6437 int prewiden;
6438 /* undefreq: bit 0 : UNDEF if size == 0
6439 * bit 1 : UNDEF if size == 1
6440 * bit 2 : UNDEF if size == 2
6441 * bit 3 : UNDEF if U == 1
6442 * Note that [2:0] set implies 'always UNDEF'
6444 int undefreq;
6445 /* prewiden, src1_wide, src2_wide, undefreq */
6446 static const int neon_3reg_wide[16][4] = {
6447 {1, 0, 0, 0}, /* VADDL */
6448 {1, 1, 0, 0}, /* VADDW */
6449 {1, 0, 0, 0}, /* VSUBL */
6450 {1, 1, 0, 0}, /* VSUBW */
6451 {0, 1, 1, 0}, /* VADDHN */
6452 {0, 0, 0, 0}, /* VABAL */
6453 {0, 1, 1, 0}, /* VSUBHN */
6454 {0, 0, 0, 0}, /* VABDL */
6455 {0, 0, 0, 0}, /* VMLAL */
6456 {0, 0, 0, 9}, /* VQDMLAL */
6457 {0, 0, 0, 0}, /* VMLSL */
6458 {0, 0, 0, 9}, /* VQDMLSL */
6459 {0, 0, 0, 0}, /* Integer VMULL */
6460 {0, 0, 0, 1}, /* VQDMULL */
6461 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6462 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6465 prewiden = neon_3reg_wide[op][0];
6466 src1_wide = neon_3reg_wide[op][1];
6467 src2_wide = neon_3reg_wide[op][2];
6468 undefreq = neon_3reg_wide[op][3];
6470 if ((undefreq & (1 << size)) ||
6471 ((undefreq & 8) && u)) {
6472 return 1;
6474 if ((src1_wide && (rn & 1)) ||
6475 (src2_wide && (rm & 1)) ||
6476 (!src2_wide && (rd & 1))) {
6477 return 1;
6480 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6481 * outside the loop below as it only performs a single pass.
6483 if (op == 14 && size == 2) {
6484 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6486 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
6487 return 1;
6489 tcg_rn = tcg_temp_new_i64();
6490 tcg_rm = tcg_temp_new_i64();
6491 tcg_rd = tcg_temp_new_i64();
6492 neon_load_reg64(tcg_rn, rn);
6493 neon_load_reg64(tcg_rm, rm);
6494 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6495 neon_store_reg64(tcg_rd, rd);
6496 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6497 neon_store_reg64(tcg_rd, rd + 1);
6498 tcg_temp_free_i64(tcg_rn);
6499 tcg_temp_free_i64(tcg_rm);
6500 tcg_temp_free_i64(tcg_rd);
6501 return 0;
6504 /* Avoid overlapping operands. Wide source operands are
6505 always aligned so will never overlap with wide
6506 destinations in problematic ways. */
6507 if (rd == rm && !src2_wide) {
6508 tmp = neon_load_reg(rm, 1);
6509 neon_store_scratch(2, tmp);
6510 } else if (rd == rn && !src1_wide) {
6511 tmp = neon_load_reg(rn, 1);
6512 neon_store_scratch(2, tmp);
6514 TCGV_UNUSED_I32(tmp3);
6515 for (pass = 0; pass < 2; pass++) {
6516 if (src1_wide) {
6517 neon_load_reg64(cpu_V0, rn + pass);
6518 TCGV_UNUSED_I32(tmp);
6519 } else {
6520 if (pass == 1 && rd == rn) {
6521 tmp = neon_load_scratch(2);
6522 } else {
6523 tmp = neon_load_reg(rn, pass);
6525 if (prewiden) {
6526 gen_neon_widen(cpu_V0, tmp, size, u);
6529 if (src2_wide) {
6530 neon_load_reg64(cpu_V1, rm + pass);
6531 TCGV_UNUSED_I32(tmp2);
6532 } else {
6533 if (pass == 1 && rd == rm) {
6534 tmp2 = neon_load_scratch(2);
6535 } else {
6536 tmp2 = neon_load_reg(rm, pass);
6538 if (prewiden) {
6539 gen_neon_widen(cpu_V1, tmp2, size, u);
6542 switch (op) {
6543 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6544 gen_neon_addl(size);
6545 break;
6546 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6547 gen_neon_subl(size);
6548 break;
6549 case 5: case 7: /* VABAL, VABDL */
6550 switch ((size << 1) | u) {
6551 case 0:
6552 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6553 break;
6554 case 1:
6555 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6556 break;
6557 case 2:
6558 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6559 break;
6560 case 3:
6561 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6562 break;
6563 case 4:
6564 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6565 break;
6566 case 5:
6567 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6568 break;
6569 default: abort();
6571 tcg_temp_free_i32(tmp2);
6572 tcg_temp_free_i32(tmp);
6573 break;
6574 case 8: case 9: case 10: case 11: case 12: case 13:
6575 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6576 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6577 break;
6578 case 14: /* Polynomial VMULL */
6579 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6580 tcg_temp_free_i32(tmp2);
6581 tcg_temp_free_i32(tmp);
6582 break;
6583 default: /* 15 is RESERVED: caught earlier */
6584 abort();
6586 if (op == 13) {
6587 /* VQDMULL */
6588 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6589 neon_store_reg64(cpu_V0, rd + pass);
6590 } else if (op == 5 || (op >= 8 && op <= 11)) {
6591 /* Accumulate. */
6592 neon_load_reg64(cpu_V1, rd + pass);
6593 switch (op) {
6594 case 10: /* VMLSL */
6595 gen_neon_negl(cpu_V0, size);
6596 /* Fall through */
6597 case 5: case 8: /* VABAL, VMLAL */
6598 gen_neon_addl(size);
6599 break;
6600 case 9: case 11: /* VQDMLAL, VQDMLSL */
6601 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6602 if (op == 11) {
6603 gen_neon_negl(cpu_V0, size);
6605 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6606 break;
6607 default:
6608 abort();
6610 neon_store_reg64(cpu_V0, rd + pass);
6611 } else if (op == 4 || op == 6) {
6612 /* Narrowing operation. */
6613 tmp = tcg_temp_new_i32();
6614 if (!u) {
6615 switch (size) {
6616 case 0:
6617 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6618 break;
6619 case 1:
6620 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6621 break;
6622 case 2:
6623 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6624 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6625 break;
6626 default: abort();
6628 } else {
6629 switch (size) {
6630 case 0:
6631 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6632 break;
6633 case 1:
6634 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6635 break;
6636 case 2:
6637 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6638 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6639 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6640 break;
6641 default: abort();
6644 if (pass == 0) {
6645 tmp3 = tmp;
6646 } else {
6647 neon_store_reg(rd, 0, tmp3);
6648 neon_store_reg(rd, 1, tmp);
6650 } else {
6651 /* Write back the result. */
6652 neon_store_reg64(cpu_V0, rd + pass);
6655 } else {
6656 /* Two registers and a scalar. NB that for ops of this form
6657 * the ARM ARM labels bit 24 as Q, but it is in our variable
6658 * 'u', not 'q'.
6660 if (size == 0) {
6661 return 1;
6663 switch (op) {
6664 case 1: /* Float VMLA scalar */
6665 case 5: /* Floating point VMLS scalar */
6666 case 9: /* Floating point VMUL scalar */
6667 if (size == 1) {
6668 return 1;
6670 /* fall through */
6671 case 0: /* Integer VMLA scalar */
6672 case 4: /* Integer VMLS scalar */
6673 case 8: /* Integer VMUL scalar */
6674 case 12: /* VQDMULH scalar */
6675 case 13: /* VQRDMULH scalar */
6676 if (u && ((rd | rn) & 1)) {
6677 return 1;
6679 tmp = neon_get_scalar(size, rm);
6680 neon_store_scratch(0, tmp);
6681 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6682 tmp = neon_load_scratch(0);
6683 tmp2 = neon_load_reg(rn, pass);
6684 if (op == 12) {
6685 if (size == 1) {
6686 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6687 } else {
6688 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6690 } else if (op == 13) {
6691 if (size == 1) {
6692 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6693 } else {
6694 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6696 } else if (op & 1) {
6697 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6698 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6699 tcg_temp_free_ptr(fpstatus);
6700 } else {
6701 switch (size) {
6702 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6703 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6704 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6705 default: abort();
6708 tcg_temp_free_i32(tmp2);
6709 if (op < 8) {
6710 /* Accumulate. */
6711 tmp2 = neon_load_reg(rd, pass);
6712 switch (op) {
6713 case 0:
6714 gen_neon_add(size, tmp, tmp2);
6715 break;
6716 case 1:
6718 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6719 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6720 tcg_temp_free_ptr(fpstatus);
6721 break;
6723 case 4:
6724 gen_neon_rsb(size, tmp, tmp2);
6725 break;
6726 case 5:
6728 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6729 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6730 tcg_temp_free_ptr(fpstatus);
6731 break;
6733 default:
6734 abort();
6736 tcg_temp_free_i32(tmp2);
6738 neon_store_reg(rd, pass, tmp);
6740 break;
6741 case 3: /* VQDMLAL scalar */
6742 case 7: /* VQDMLSL scalar */
6743 case 11: /* VQDMULL scalar */
6744 if (u == 1) {
6745 return 1;
6747 /* fall through */
6748 case 2: /* VMLAL sclar */
6749 case 6: /* VMLSL scalar */
6750 case 10: /* VMULL scalar */
6751 if (rd & 1) {
6752 return 1;
6754 tmp2 = neon_get_scalar(size, rm);
6755 /* We need a copy of tmp2 because gen_neon_mull
6756 * deletes it during pass 0. */
6757 tmp4 = tcg_temp_new_i32();
6758 tcg_gen_mov_i32(tmp4, tmp2);
6759 tmp3 = neon_load_reg(rn, 1);
6761 for (pass = 0; pass < 2; pass++) {
6762 if (pass == 0) {
6763 tmp = neon_load_reg(rn, 0);
6764 } else {
6765 tmp = tmp3;
6766 tmp2 = tmp4;
6768 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6769 if (op != 11) {
6770 neon_load_reg64(cpu_V1, rd + pass);
6772 switch (op) {
6773 case 6:
6774 gen_neon_negl(cpu_V0, size);
6775 /* Fall through */
6776 case 2:
6777 gen_neon_addl(size);
6778 break;
6779 case 3: case 7:
6780 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6781 if (op == 7) {
6782 gen_neon_negl(cpu_V0, size);
6784 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6785 break;
6786 case 10:
6787 /* no-op */
6788 break;
6789 case 11:
6790 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6791 break;
6792 default:
6793 abort();
6795 neon_store_reg64(cpu_V0, rd + pass);
6799 break;
6800 default: /* 14 and 15 are RESERVED */
6801 return 1;
6804 } else { /* size == 3 */
6805 if (!u) {
6806 /* Extract. */
6807 imm = (insn >> 8) & 0xf;
6809 if (imm > 7 && !q)
6810 return 1;
6812 if (q && ((rd | rn | rm) & 1)) {
6813 return 1;
6816 if (imm == 0) {
6817 neon_load_reg64(cpu_V0, rn);
6818 if (q) {
6819 neon_load_reg64(cpu_V1, rn + 1);
6821 } else if (imm == 8) {
6822 neon_load_reg64(cpu_V0, rn + 1);
6823 if (q) {
6824 neon_load_reg64(cpu_V1, rm);
6826 } else if (q) {
6827 tmp64 = tcg_temp_new_i64();
6828 if (imm < 8) {
6829 neon_load_reg64(cpu_V0, rn);
6830 neon_load_reg64(tmp64, rn + 1);
6831 } else {
6832 neon_load_reg64(cpu_V0, rn + 1);
6833 neon_load_reg64(tmp64, rm);
6835 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6836 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6837 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6838 if (imm < 8) {
6839 neon_load_reg64(cpu_V1, rm);
6840 } else {
6841 neon_load_reg64(cpu_V1, rm + 1);
6842 imm -= 8;
6844 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6845 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6846 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6847 tcg_temp_free_i64(tmp64);
6848 } else {
6849 /* BUGFIX */
6850 neon_load_reg64(cpu_V0, rn);
6851 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6852 neon_load_reg64(cpu_V1, rm);
6853 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6854 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6856 neon_store_reg64(cpu_V0, rd);
6857 if (q) {
6858 neon_store_reg64(cpu_V1, rd + 1);
6860 } else if ((insn & (1 << 11)) == 0) {
6861 /* Two register misc. */
6862 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6863 size = (insn >> 18) & 3;
6864 /* UNDEF for unknown op values and bad op-size combinations */
6865 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6866 return 1;
6868 if (neon_2rm_is_v8_op(op) &&
6869 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6870 return 1;
6872 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6873 q && ((rm | rd) & 1)) {
6874 return 1;
6876 switch (op) {
6877 case NEON_2RM_VREV64:
6878 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6879 tmp = neon_load_reg(rm, pass * 2);
6880 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6881 switch (size) {
6882 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6883 case 1: gen_swap_half(tmp); break;
6884 case 2: /* no-op */ break;
6885 default: abort();
6887 neon_store_reg(rd, pass * 2 + 1, tmp);
6888 if (size == 2) {
6889 neon_store_reg(rd, pass * 2, tmp2);
6890 } else {
6891 switch (size) {
6892 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6893 case 1: gen_swap_half(tmp2); break;
6894 default: abort();
6896 neon_store_reg(rd, pass * 2, tmp2);
6899 break;
6900 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6901 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6902 for (pass = 0; pass < q + 1; pass++) {
6903 tmp = neon_load_reg(rm, pass * 2);
6904 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6905 tmp = neon_load_reg(rm, pass * 2 + 1);
6906 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6907 switch (size) {
6908 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6909 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6910 case 2: tcg_gen_add_i64(CPU_V001); break;
6911 default: abort();
6913 if (op >= NEON_2RM_VPADAL) {
6914 /* Accumulate. */
6915 neon_load_reg64(cpu_V1, rd + pass);
6916 gen_neon_addl(size);
6918 neon_store_reg64(cpu_V0, rd + pass);
6920 break;
6921 case NEON_2RM_VTRN:
6922 if (size == 2) {
6923 int n;
6924 for (n = 0; n < (q ? 4 : 2); n += 2) {
6925 tmp = neon_load_reg(rm, n);
6926 tmp2 = neon_load_reg(rd, n + 1);
6927 neon_store_reg(rm, n, tmp2);
6928 neon_store_reg(rd, n + 1, tmp);
6930 } else {
6931 goto elementwise;
6933 break;
6934 case NEON_2RM_VUZP:
6935 if (gen_neon_unzip(rd, rm, size, q)) {
6936 return 1;
6938 break;
6939 case NEON_2RM_VZIP:
6940 if (gen_neon_zip(rd, rm, size, q)) {
6941 return 1;
6943 break;
6944 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6945 /* also VQMOVUN; op field and mnemonics don't line up */
6946 if (rm & 1) {
6947 return 1;
6949 TCGV_UNUSED_I32(tmp2);
6950 for (pass = 0; pass < 2; pass++) {
6951 neon_load_reg64(cpu_V0, rm + pass);
6952 tmp = tcg_temp_new_i32();
6953 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6954 tmp, cpu_V0);
6955 if (pass == 0) {
6956 tmp2 = tmp;
6957 } else {
6958 neon_store_reg(rd, 0, tmp2);
6959 neon_store_reg(rd, 1, tmp);
6962 break;
6963 case NEON_2RM_VSHLL:
6964 if (q || (rd & 1)) {
6965 return 1;
6967 tmp = neon_load_reg(rm, 0);
6968 tmp2 = neon_load_reg(rm, 1);
6969 for (pass = 0; pass < 2; pass++) {
6970 if (pass == 1)
6971 tmp = tmp2;
6972 gen_neon_widen(cpu_V0, tmp, size, 1);
6973 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6974 neon_store_reg64(cpu_V0, rd + pass);
6976 break;
6977 case NEON_2RM_VCVT_F16_F32:
6978 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6979 q || (rm & 1)) {
6980 return 1;
6982 tmp = tcg_temp_new_i32();
6983 tmp2 = tcg_temp_new_i32();
6984 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
6985 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6986 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
6987 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6988 tcg_gen_shli_i32(tmp2, tmp2, 16);
6989 tcg_gen_or_i32(tmp2, tmp2, tmp);
6990 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
6991 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6992 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6993 neon_store_reg(rd, 0, tmp2);
6994 tmp2 = tcg_temp_new_i32();
6995 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6996 tcg_gen_shli_i32(tmp2, tmp2, 16);
6997 tcg_gen_or_i32(tmp2, tmp2, tmp);
6998 neon_store_reg(rd, 1, tmp2);
6999 tcg_temp_free_i32(tmp);
7000 break;
7001 case NEON_2RM_VCVT_F32_F16:
7002 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
7003 q || (rd & 1)) {
7004 return 1;
7006 tmp3 = tcg_temp_new_i32();
7007 tmp = neon_load_reg(rm, 0);
7008 tmp2 = neon_load_reg(rm, 1);
7009 tcg_gen_ext16u_i32(tmp3, tmp);
7010 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
7011 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7012 tcg_gen_shri_i32(tmp3, tmp, 16);
7013 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
7014 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7015 tcg_temp_free_i32(tmp);
7016 tcg_gen_ext16u_i32(tmp3, tmp2);
7017 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
7018 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7019 tcg_gen_shri_i32(tmp3, tmp2, 16);
7020 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
7021 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7022 tcg_temp_free_i32(tmp2);
7023 tcg_temp_free_i32(tmp3);
7024 break;
7025 case NEON_2RM_AESE: case NEON_2RM_AESMC:
7026 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
7027 || ((rm | rd) & 1)) {
7028 return 1;
7030 tmp = tcg_const_i32(rd);
7031 tmp2 = tcg_const_i32(rm);
7033 /* Bit 6 is the lowest opcode bit; it distinguishes between
7034 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7036 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7038 if (op == NEON_2RM_AESE) {
7039 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
7040 } else {
7041 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7043 tcg_temp_free_i32(tmp);
7044 tcg_temp_free_i32(tmp2);
7045 tcg_temp_free_i32(tmp3);
7046 break;
7047 case NEON_2RM_SHA1H:
7048 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
7049 || ((rm | rd) & 1)) {
7050 return 1;
7052 tmp = tcg_const_i32(rd);
7053 tmp2 = tcg_const_i32(rm);
7055 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7057 tcg_temp_free_i32(tmp);
7058 tcg_temp_free_i32(tmp2);
7059 break;
7060 case NEON_2RM_SHA1SU1:
7061 if ((rm | rd) & 1) {
7062 return 1;
7064 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7065 if (q) {
7066 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
7067 return 1;
7069 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
7070 return 1;
7072 tmp = tcg_const_i32(rd);
7073 tmp2 = tcg_const_i32(rm);
7074 if (q) {
7075 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7076 } else {
7077 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7079 tcg_temp_free_i32(tmp);
7080 tcg_temp_free_i32(tmp2);
7081 break;
7082 default:
7083 elementwise:
7084 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7085 if (neon_2rm_is_float_op(op)) {
7086 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7087 neon_reg_offset(rm, pass));
7088 TCGV_UNUSED_I32(tmp);
7089 } else {
7090 tmp = neon_load_reg(rm, pass);
7092 switch (op) {
7093 case NEON_2RM_VREV32:
7094 switch (size) {
7095 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7096 case 1: gen_swap_half(tmp); break;
7097 default: abort();
7099 break;
7100 case NEON_2RM_VREV16:
7101 gen_rev16(tmp);
7102 break;
7103 case NEON_2RM_VCLS:
7104 switch (size) {
7105 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7106 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7107 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
7108 default: abort();
7110 break;
7111 case NEON_2RM_VCLZ:
7112 switch (size) {
7113 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7114 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7115 case 2: gen_helper_clz(tmp, tmp); break;
7116 default: abort();
7118 break;
7119 case NEON_2RM_VCNT:
7120 gen_helper_neon_cnt_u8(tmp, tmp);
7121 break;
7122 case NEON_2RM_VMVN:
7123 tcg_gen_not_i32(tmp, tmp);
7124 break;
7125 case NEON_2RM_VQABS:
7126 switch (size) {
7127 case 0:
7128 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7129 break;
7130 case 1:
7131 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7132 break;
7133 case 2:
7134 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7135 break;
7136 default: abort();
7138 break;
7139 case NEON_2RM_VQNEG:
7140 switch (size) {
7141 case 0:
7142 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7143 break;
7144 case 1:
7145 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7146 break;
7147 case 2:
7148 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7149 break;
7150 default: abort();
7152 break;
7153 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
7154 tmp2 = tcg_const_i32(0);
7155 switch(size) {
7156 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7157 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7158 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
7159 default: abort();
7161 tcg_temp_free_i32(tmp2);
7162 if (op == NEON_2RM_VCLE0) {
7163 tcg_gen_not_i32(tmp, tmp);
7165 break;
7166 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
7167 tmp2 = tcg_const_i32(0);
7168 switch(size) {
7169 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7170 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7171 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
7172 default: abort();
7174 tcg_temp_free_i32(tmp2);
7175 if (op == NEON_2RM_VCLT0) {
7176 tcg_gen_not_i32(tmp, tmp);
7178 break;
7179 case NEON_2RM_VCEQ0:
7180 tmp2 = tcg_const_i32(0);
7181 switch(size) {
7182 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7183 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7184 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
7185 default: abort();
7187 tcg_temp_free_i32(tmp2);
7188 break;
7189 case NEON_2RM_VABS:
7190 switch(size) {
7191 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7192 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7193 case 2: tcg_gen_abs_i32(tmp, tmp); break;
7194 default: abort();
7196 break;
7197 case NEON_2RM_VNEG:
7198 tmp2 = tcg_const_i32(0);
7199 gen_neon_rsb(size, tmp, tmp2);
7200 tcg_temp_free_i32(tmp2);
7201 break;
7202 case NEON_2RM_VCGT0_F:
7204 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7205 tmp2 = tcg_const_i32(0);
7206 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
7207 tcg_temp_free_i32(tmp2);
7208 tcg_temp_free_ptr(fpstatus);
7209 break;
7211 case NEON_2RM_VCGE0_F:
7213 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7214 tmp2 = tcg_const_i32(0);
7215 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
7216 tcg_temp_free_i32(tmp2);
7217 tcg_temp_free_ptr(fpstatus);
7218 break;
7220 case NEON_2RM_VCEQ0_F:
7222 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7223 tmp2 = tcg_const_i32(0);
7224 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
7225 tcg_temp_free_i32(tmp2);
7226 tcg_temp_free_ptr(fpstatus);
7227 break;
7229 case NEON_2RM_VCLE0_F:
7231 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7232 tmp2 = tcg_const_i32(0);
7233 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
7234 tcg_temp_free_i32(tmp2);
7235 tcg_temp_free_ptr(fpstatus);
7236 break;
7238 case NEON_2RM_VCLT0_F:
7240 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7241 tmp2 = tcg_const_i32(0);
7242 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
7243 tcg_temp_free_i32(tmp2);
7244 tcg_temp_free_ptr(fpstatus);
7245 break;
7247 case NEON_2RM_VABS_F:
7248 gen_vfp_abs(0);
7249 break;
7250 case NEON_2RM_VNEG_F:
7251 gen_vfp_neg(0);
7252 break;
7253 case NEON_2RM_VSWP:
7254 tmp2 = neon_load_reg(rd, pass);
7255 neon_store_reg(rm, pass, tmp2);
7256 break;
7257 case NEON_2RM_VTRN:
7258 tmp2 = neon_load_reg(rd, pass);
7259 switch (size) {
7260 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7261 case 1: gen_neon_trn_u16(tmp, tmp2); break;
7262 default: abort();
7264 neon_store_reg(rm, pass, tmp2);
7265 break;
7266 case NEON_2RM_VRINTN:
7267 case NEON_2RM_VRINTA:
7268 case NEON_2RM_VRINTM:
7269 case NEON_2RM_VRINTP:
7270 case NEON_2RM_VRINTZ:
7272 TCGv_i32 tcg_rmode;
7273 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7274 int rmode;
7276 if (op == NEON_2RM_VRINTZ) {
7277 rmode = FPROUNDING_ZERO;
7278 } else {
7279 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7282 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7283 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7284 cpu_env);
7285 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7286 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7287 cpu_env);
7288 tcg_temp_free_ptr(fpstatus);
7289 tcg_temp_free_i32(tcg_rmode);
7290 break;
7292 case NEON_2RM_VRINTX:
7294 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7295 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7296 tcg_temp_free_ptr(fpstatus);
7297 break;
7299 case NEON_2RM_VCVTAU:
7300 case NEON_2RM_VCVTAS:
7301 case NEON_2RM_VCVTNU:
7302 case NEON_2RM_VCVTNS:
7303 case NEON_2RM_VCVTPU:
7304 case NEON_2RM_VCVTPS:
7305 case NEON_2RM_VCVTMU:
7306 case NEON_2RM_VCVTMS:
7308 bool is_signed = !extract32(insn, 7, 1);
7309 TCGv_ptr fpst = get_fpstatus_ptr(1);
7310 TCGv_i32 tcg_rmode, tcg_shift;
7311 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7313 tcg_shift = tcg_const_i32(0);
7314 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7315 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7316 cpu_env);
7318 if (is_signed) {
7319 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7320 tcg_shift, fpst);
7321 } else {
7322 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7323 tcg_shift, fpst);
7326 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7327 cpu_env);
7328 tcg_temp_free_i32(tcg_rmode);
7329 tcg_temp_free_i32(tcg_shift);
7330 tcg_temp_free_ptr(fpst);
7331 break;
7333 case NEON_2RM_VRECPE:
7335 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7336 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7337 tcg_temp_free_ptr(fpstatus);
7338 break;
7340 case NEON_2RM_VRSQRTE:
7342 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7343 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7344 tcg_temp_free_ptr(fpstatus);
7345 break;
7347 case NEON_2RM_VRECPE_F:
7349 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7350 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7351 tcg_temp_free_ptr(fpstatus);
7352 break;
7354 case NEON_2RM_VRSQRTE_F:
7356 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7357 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7358 tcg_temp_free_ptr(fpstatus);
7359 break;
7361 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
7362 gen_vfp_sito(0, 1);
7363 break;
7364 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
7365 gen_vfp_uito(0, 1);
7366 break;
7367 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
7368 gen_vfp_tosiz(0, 1);
7369 break;
7370 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
7371 gen_vfp_touiz(0, 1);
7372 break;
7373 default:
7374 /* Reserved op values were caught by the
7375 * neon_2rm_sizes[] check earlier.
7377 abort();
7379 if (neon_2rm_is_float_op(op)) {
7380 tcg_gen_st_f32(cpu_F0s, cpu_env,
7381 neon_reg_offset(rd, pass));
7382 } else {
7383 neon_store_reg(rd, pass, tmp);
7386 break;
7388 } else if ((insn & (1 << 10)) == 0) {
7389 /* VTBL, VTBX. */
7390 int n = ((insn >> 8) & 3) + 1;
7391 if ((rn + n) > 32) {
7392 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7393 * helper function running off the end of the register file.
7395 return 1;
7397 n <<= 3;
7398 if (insn & (1 << 6)) {
7399 tmp = neon_load_reg(rd, 0);
7400 } else {
7401 tmp = tcg_temp_new_i32();
7402 tcg_gen_movi_i32(tmp, 0);
7404 tmp2 = neon_load_reg(rm, 0);
7405 tmp4 = tcg_const_i32(rn);
7406 tmp5 = tcg_const_i32(n);
7407 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7408 tcg_temp_free_i32(tmp);
7409 if (insn & (1 << 6)) {
7410 tmp = neon_load_reg(rd, 1);
7411 } else {
7412 tmp = tcg_temp_new_i32();
7413 tcg_gen_movi_i32(tmp, 0);
7415 tmp3 = neon_load_reg(rm, 1);
7416 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
7417 tcg_temp_free_i32(tmp5);
7418 tcg_temp_free_i32(tmp4);
7419 neon_store_reg(rd, 0, tmp2);
7420 neon_store_reg(rd, 1, tmp3);
7421 tcg_temp_free_i32(tmp);
7422 } else if ((insn & 0x380) == 0) {
7423 /* VDUP */
7424 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7425 return 1;
7427 if (insn & (1 << 19)) {
7428 tmp = neon_load_reg(rm, 1);
7429 } else {
7430 tmp = neon_load_reg(rm, 0);
7432 if (insn & (1 << 16)) {
7433 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
7434 } else if (insn & (1 << 17)) {
7435 if ((insn >> 18) & 1)
7436 gen_neon_dup_high16(tmp);
7437 else
7438 gen_neon_dup_low16(tmp);
7440 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7441 tmp2 = tcg_temp_new_i32();
7442 tcg_gen_mov_i32(tmp2, tmp);
7443 neon_store_reg(rd, pass, tmp2);
7445 tcg_temp_free_i32(tmp);
7446 } else {
7447 return 1;
7451 return 0;
7454 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7456 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7457 const ARMCPRegInfo *ri;
7459 cpnum = (insn >> 8) & 0xf;
7461 /* First check for coprocessor space used for XScale/iwMMXt insns */
7462 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7463 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7464 return 1;
7466 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7467 return disas_iwmmxt_insn(s, insn);
7468 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7469 return disas_dsp_insn(s, insn);
7471 return 1;
7474 /* Otherwise treat as a generic register access */
7475 is64 = (insn & (1 << 25)) == 0;
7476 if (!is64 && ((insn & (1 << 4)) == 0)) {
7477 /* cdp */
7478 return 1;
7481 crm = insn & 0xf;
7482 if (is64) {
7483 crn = 0;
7484 opc1 = (insn >> 4) & 0xf;
7485 opc2 = 0;
7486 rt2 = (insn >> 16) & 0xf;
7487 } else {
7488 crn = (insn >> 16) & 0xf;
7489 opc1 = (insn >> 21) & 7;
7490 opc2 = (insn >> 5) & 7;
7491 rt2 = 0;
7493 isread = (insn >> 20) & 1;
7494 rt = (insn >> 12) & 0xf;
7496 ri = get_arm_cp_reginfo(s->cp_regs,
7497 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7498 if (ri) {
7499 /* Check access permissions */
7500 if (!cp_access_ok(s->current_el, ri, isread)) {
7501 return 1;
7504 if (ri->accessfn ||
7505 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7506 /* Emit code to perform further access permissions checks at
7507 * runtime; this may result in an exception.
7508 * Note that on XScale all cp0..c13 registers do an access check
7509 * call in order to handle c15_cpar.
7511 TCGv_ptr tmpptr;
7512 TCGv_i32 tcg_syn, tcg_isread;
7513 uint32_t syndrome;
7515 /* Note that since we are an implementation which takes an
7516 * exception on a trapped conditional instruction only if the
7517 * instruction passes its condition code check, we can take
7518 * advantage of the clause in the ARM ARM that allows us to set
7519 * the COND field in the instruction to 0xE in all cases.
7520 * We could fish the actual condition out of the insn (ARM)
7521 * or the condexec bits (Thumb) but it isn't necessary.
7523 switch (cpnum) {
7524 case 14:
7525 if (is64) {
7526 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7527 isread, false);
7528 } else {
7529 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7530 rt, isread, false);
7532 break;
7533 case 15:
7534 if (is64) {
7535 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7536 isread, false);
7537 } else {
7538 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7539 rt, isread, false);
7541 break;
7542 default:
7543 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7544 * so this can only happen if this is an ARMv7 or earlier CPU,
7545 * in which case the syndrome information won't actually be
7546 * guest visible.
7548 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7549 syndrome = syn_uncategorized();
7550 break;
7553 gen_set_condexec(s);
7554 gen_set_pc_im(s, s->pc - 4);
7555 tmpptr = tcg_const_ptr(ri);
7556 tcg_syn = tcg_const_i32(syndrome);
7557 tcg_isread = tcg_const_i32(isread);
7558 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7559 tcg_isread);
7560 tcg_temp_free_ptr(tmpptr);
7561 tcg_temp_free_i32(tcg_syn);
7562 tcg_temp_free_i32(tcg_isread);
7565 /* Handle special cases first */
7566 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7567 case ARM_CP_NOP:
7568 return 0;
7569 case ARM_CP_WFI:
7570 if (isread) {
7571 return 1;
7573 gen_set_pc_im(s, s->pc);
7574 s->is_jmp = DISAS_WFI;
7575 return 0;
7576 default:
7577 break;
7580 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7581 gen_io_start();
7584 if (isread) {
7585 /* Read */
7586 if (is64) {
7587 TCGv_i64 tmp64;
7588 TCGv_i32 tmp;
7589 if (ri->type & ARM_CP_CONST) {
7590 tmp64 = tcg_const_i64(ri->resetvalue);
7591 } else if (ri->readfn) {
7592 TCGv_ptr tmpptr;
7593 tmp64 = tcg_temp_new_i64();
7594 tmpptr = tcg_const_ptr(ri);
7595 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7596 tcg_temp_free_ptr(tmpptr);
7597 } else {
7598 tmp64 = tcg_temp_new_i64();
7599 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7601 tmp = tcg_temp_new_i32();
7602 tcg_gen_extrl_i64_i32(tmp, tmp64);
7603 store_reg(s, rt, tmp);
7604 tcg_gen_shri_i64(tmp64, tmp64, 32);
7605 tmp = tcg_temp_new_i32();
7606 tcg_gen_extrl_i64_i32(tmp, tmp64);
7607 tcg_temp_free_i64(tmp64);
7608 store_reg(s, rt2, tmp);
7609 } else {
7610 TCGv_i32 tmp;
7611 if (ri->type & ARM_CP_CONST) {
7612 tmp = tcg_const_i32(ri->resetvalue);
7613 } else if (ri->readfn) {
7614 TCGv_ptr tmpptr;
7615 tmp = tcg_temp_new_i32();
7616 tmpptr = tcg_const_ptr(ri);
7617 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7618 tcg_temp_free_ptr(tmpptr);
7619 } else {
7620 tmp = load_cpu_offset(ri->fieldoffset);
7622 if (rt == 15) {
7623 /* Destination register of r15 for 32 bit loads sets
7624 * the condition codes from the high 4 bits of the value
7626 gen_set_nzcv(tmp);
7627 tcg_temp_free_i32(tmp);
7628 } else {
7629 store_reg(s, rt, tmp);
7632 } else {
7633 /* Write */
7634 if (ri->type & ARM_CP_CONST) {
7635 /* If not forbidden by access permissions, treat as WI */
7636 return 0;
7639 if (is64) {
7640 TCGv_i32 tmplo, tmphi;
7641 TCGv_i64 tmp64 = tcg_temp_new_i64();
7642 tmplo = load_reg(s, rt);
7643 tmphi = load_reg(s, rt2);
7644 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7645 tcg_temp_free_i32(tmplo);
7646 tcg_temp_free_i32(tmphi);
7647 if (ri->writefn) {
7648 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7649 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7650 tcg_temp_free_ptr(tmpptr);
7651 } else {
7652 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7654 tcg_temp_free_i64(tmp64);
7655 } else {
7656 if (ri->writefn) {
7657 TCGv_i32 tmp;
7658 TCGv_ptr tmpptr;
7659 tmp = load_reg(s, rt);
7660 tmpptr = tcg_const_ptr(ri);
7661 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7662 tcg_temp_free_ptr(tmpptr);
7663 tcg_temp_free_i32(tmp);
7664 } else {
7665 TCGv_i32 tmp = load_reg(s, rt);
7666 store_cpu_offset(tmp, ri->fieldoffset);
7671 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7672 /* I/O operations must end the TB here (whether read or write) */
7673 gen_io_end();
7674 gen_lookup_tb(s);
7675 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7676 /* We default to ending the TB on a coprocessor register write,
7677 * but allow this to be suppressed by the register definition
7678 * (usually only necessary to work around guest bugs).
7680 gen_lookup_tb(s);
7683 return 0;
7686 /* Unknown register; this might be a guest error or a QEMU
7687 * unimplemented feature.
7689 if (is64) {
7690 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7691 "64 bit system register cp:%d opc1: %d crm:%d "
7692 "(%s)\n",
7693 isread ? "read" : "write", cpnum, opc1, crm,
7694 s->ns ? "non-secure" : "secure");
7695 } else {
7696 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7697 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7698 "(%s)\n",
7699 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7700 s->ns ? "non-secure" : "secure");
7703 return 1;
7707 /* Store a 64-bit value to a register pair. Clobbers val. */
7708 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7710 TCGv_i32 tmp;
7711 tmp = tcg_temp_new_i32();
7712 tcg_gen_extrl_i64_i32(tmp, val);
7713 store_reg(s, rlow, tmp);
7714 tmp = tcg_temp_new_i32();
7715 tcg_gen_shri_i64(val, val, 32);
7716 tcg_gen_extrl_i64_i32(tmp, val);
7717 store_reg(s, rhigh, tmp);
7720 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7721 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7723 TCGv_i64 tmp;
7724 TCGv_i32 tmp2;
7726 /* Load value and extend to 64 bits. */
7727 tmp = tcg_temp_new_i64();
7728 tmp2 = load_reg(s, rlow);
7729 tcg_gen_extu_i32_i64(tmp, tmp2);
7730 tcg_temp_free_i32(tmp2);
7731 tcg_gen_add_i64(val, val, tmp);
7732 tcg_temp_free_i64(tmp);
7735 /* load and add a 64-bit value from a register pair. */
7736 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7738 TCGv_i64 tmp;
7739 TCGv_i32 tmpl;
7740 TCGv_i32 tmph;
7742 /* Load 64-bit value rd:rn. */
7743 tmpl = load_reg(s, rlow);
7744 tmph = load_reg(s, rhigh);
7745 tmp = tcg_temp_new_i64();
7746 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7747 tcg_temp_free_i32(tmpl);
7748 tcg_temp_free_i32(tmph);
7749 tcg_gen_add_i64(val, val, tmp);
7750 tcg_temp_free_i64(tmp);
7753 /* Set N and Z flags from hi|lo. */
7754 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7756 tcg_gen_mov_i32(cpu_NF, hi);
7757 tcg_gen_or_i32(cpu_ZF, lo, hi);
7760 /* Load/Store exclusive instructions are implemented by remembering
7761 the value/address loaded, and seeing if these are the same
7762 when the store is performed. This should be sufficient to implement
7763 the architecturally mandated semantics, and avoids having to monitor
7764 regular stores.
7766 In system emulation mode only one CPU will be running at once, so
7767 this sequence is effectively atomic. In user emulation mode we
7768 throw an exception and handle the atomic operation elsewhere. */
7769 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7770 TCGv_i32 addr, int size)
7772 TCGv_i32 tmp = tcg_temp_new_i32();
7774 s->is_ldex = true;
7776 switch (size) {
7777 case 0:
7778 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
7779 break;
7780 case 1:
7781 gen_aa32_ld16ua(s, tmp, addr, get_mem_index(s));
7782 break;
7783 case 2:
7784 case 3:
7785 gen_aa32_ld32ua(s, tmp, addr, get_mem_index(s));
7786 break;
7787 default:
7788 abort();
7791 if (size == 3) {
7792 TCGv_i32 tmp2 = tcg_temp_new_i32();
7793 TCGv_i32 tmp3 = tcg_temp_new_i32();
7795 tcg_gen_addi_i32(tmp2, addr, 4);
7796 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7797 tcg_temp_free_i32(tmp2);
7798 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7799 store_reg(s, rt2, tmp3);
7800 } else {
7801 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7804 store_reg(s, rt, tmp);
7805 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7808 static void gen_clrex(DisasContext *s)
7810 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7813 #ifdef CONFIG_USER_ONLY
7814 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7815 TCGv_i32 addr, int size)
7817 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
7818 tcg_gen_movi_i32(cpu_exclusive_info,
7819 size | (rd << 4) | (rt << 8) | (rt2 << 12));
7820 gen_exception_internal_insn(s, 4, EXCP_STREX);
7822 #else
7823 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7824 TCGv_i32 addr, int size)
7826 TCGv_i32 tmp;
7827 TCGv_i64 val64, extaddr;
7828 TCGLabel *done_label;
7829 TCGLabel *fail_label;
7831 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7832 [addr] = {Rt};
7833 {Rd} = 0;
7834 } else {
7835 {Rd} = 1;
7836 } */
7837 fail_label = gen_new_label();
7838 done_label = gen_new_label();
7839 extaddr = tcg_temp_new_i64();
7840 tcg_gen_extu_i32_i64(extaddr, addr);
7841 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7842 tcg_temp_free_i64(extaddr);
7844 tmp = tcg_temp_new_i32();
7845 switch (size) {
7846 case 0:
7847 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
7848 break;
7849 case 1:
7850 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
7851 break;
7852 case 2:
7853 case 3:
7854 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
7855 break;
7856 default:
7857 abort();
7860 val64 = tcg_temp_new_i64();
7861 if (size == 3) {
7862 TCGv_i32 tmp2 = tcg_temp_new_i32();
7863 TCGv_i32 tmp3 = tcg_temp_new_i32();
7864 tcg_gen_addi_i32(tmp2, addr, 4);
7865 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7866 tcg_temp_free_i32(tmp2);
7867 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7868 tcg_temp_free_i32(tmp3);
7869 } else {
7870 tcg_gen_extu_i32_i64(val64, tmp);
7872 tcg_temp_free_i32(tmp);
7874 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7875 tcg_temp_free_i64(val64);
7877 tmp = load_reg(s, rt);
7878 switch (size) {
7879 case 0:
7880 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
7881 break;
7882 case 1:
7883 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
7884 break;
7885 case 2:
7886 case 3:
7887 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7888 break;
7889 default:
7890 abort();
7892 tcg_temp_free_i32(tmp);
7893 if (size == 3) {
7894 tcg_gen_addi_i32(addr, addr, 4);
7895 tmp = load_reg(s, rt2);
7896 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7897 tcg_temp_free_i32(tmp);
7899 tcg_gen_movi_i32(cpu_R[rd], 0);
7900 tcg_gen_br(done_label);
7901 gen_set_label(fail_label);
7902 tcg_gen_movi_i32(cpu_R[rd], 1);
7903 gen_set_label(done_label);
7904 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7906 #endif
7908 /* gen_srs:
7909 * @env: CPUARMState
7910 * @s: DisasContext
7911 * @mode: mode field from insn (which stack to store to)
7912 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7913 * @writeback: true if writeback bit set
7915 * Generate code for the SRS (Store Return State) insn.
7917 static void gen_srs(DisasContext *s,
7918 uint32_t mode, uint32_t amode, bool writeback)
7920 int32_t offset;
7921 TCGv_i32 addr, tmp;
7922 bool undef = false;
7924 /* SRS is:
7925 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7926 * and specified mode is monitor mode
7927 * - UNDEFINED in Hyp mode
7928 * - UNPREDICTABLE in User or System mode
7929 * - UNPREDICTABLE if the specified mode is:
7930 * -- not implemented
7931 * -- not a valid mode number
7932 * -- a mode that's at a higher exception level
7933 * -- Monitor, if we are Non-secure
7934 * For the UNPREDICTABLE cases we choose to UNDEF.
7936 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
7937 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7938 return;
7941 if (s->current_el == 0 || s->current_el == 2) {
7942 undef = true;
7945 switch (mode) {
7946 case ARM_CPU_MODE_USR:
7947 case ARM_CPU_MODE_FIQ:
7948 case ARM_CPU_MODE_IRQ:
7949 case ARM_CPU_MODE_SVC:
7950 case ARM_CPU_MODE_ABT:
7951 case ARM_CPU_MODE_UND:
7952 case ARM_CPU_MODE_SYS:
7953 break;
7954 case ARM_CPU_MODE_HYP:
7955 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7956 undef = true;
7958 break;
7959 case ARM_CPU_MODE_MON:
7960 /* No need to check specifically for "are we non-secure" because
7961 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7962 * so if this isn't EL3 then we must be non-secure.
7964 if (s->current_el != 3) {
7965 undef = true;
7967 break;
7968 default:
7969 undef = true;
7972 if (undef) {
7973 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7974 default_exception_el(s));
7975 return;
7978 addr = tcg_temp_new_i32();
7979 tmp = tcg_const_i32(mode);
7980 /* get_r13_banked() will raise an exception if called from System mode */
7981 gen_set_condexec(s);
7982 gen_set_pc_im(s, s->pc - 4);
7983 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7984 tcg_temp_free_i32(tmp);
7985 switch (amode) {
7986 case 0: /* DA */
7987 offset = -4;
7988 break;
7989 case 1: /* IA */
7990 offset = 0;
7991 break;
7992 case 2: /* DB */
7993 offset = -8;
7994 break;
7995 case 3: /* IB */
7996 offset = 4;
7997 break;
7998 default:
7999 abort();
8001 tcg_gen_addi_i32(addr, addr, offset);
8002 tmp = load_reg(s, 14);
8003 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8004 tcg_temp_free_i32(tmp);
8005 tmp = load_cpu_field(spsr);
8006 tcg_gen_addi_i32(addr, addr, 4);
8007 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8008 tcg_temp_free_i32(tmp);
8009 if (writeback) {
8010 switch (amode) {
8011 case 0:
8012 offset = -8;
8013 break;
8014 case 1:
8015 offset = 4;
8016 break;
8017 case 2:
8018 offset = -4;
8019 break;
8020 case 3:
8021 offset = 0;
8022 break;
8023 default:
8024 abort();
8026 tcg_gen_addi_i32(addr, addr, offset);
8027 tmp = tcg_const_i32(mode);
8028 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8029 tcg_temp_free_i32(tmp);
8031 tcg_temp_free_i32(addr);
8032 s->is_jmp = DISAS_UPDATE;
8035 static void disas_arm_insn(DisasContext *s, unsigned int insn)
8037 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
8038 TCGv_i32 tmp;
8039 TCGv_i32 tmp2;
8040 TCGv_i32 tmp3;
8041 TCGv_i32 addr;
8042 TCGv_i64 tmp64;
8044 /* M variants do not implement ARM mode. */
8045 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8046 goto illegal_op;
8048 cond = insn >> 28;
8049 if (cond == 0xf){
8050 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8051 * choose to UNDEF. In ARMv5 and above the space is used
8052 * for miscellaneous unconditional instructions.
8054 ARCH(5);
8056 /* Unconditional instructions. */
8057 if (((insn >> 25) & 7) == 1) {
8058 /* NEON Data processing. */
8059 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8060 goto illegal_op;
8063 if (disas_neon_data_insn(s, insn)) {
8064 goto illegal_op;
8066 return;
8068 if ((insn & 0x0f100000) == 0x04000000) {
8069 /* NEON load/store. */
8070 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8071 goto illegal_op;
8074 if (disas_neon_ls_insn(s, insn)) {
8075 goto illegal_op;
8077 return;
8079 if ((insn & 0x0f000e10) == 0x0e000a00) {
8080 /* VFP. */
8081 if (disas_vfp_insn(s, insn)) {
8082 goto illegal_op;
8084 return;
8086 if (((insn & 0x0f30f000) == 0x0510f000) ||
8087 ((insn & 0x0f30f010) == 0x0710f000)) {
8088 if ((insn & (1 << 22)) == 0) {
8089 /* PLDW; v7MP */
8090 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8091 goto illegal_op;
8094 /* Otherwise PLD; v5TE+ */
8095 ARCH(5TE);
8096 return;
8098 if (((insn & 0x0f70f000) == 0x0450f000) ||
8099 ((insn & 0x0f70f010) == 0x0650f000)) {
8100 ARCH(7);
8101 return; /* PLI; V7 */
8103 if (((insn & 0x0f700000) == 0x04100000) ||
8104 ((insn & 0x0f700010) == 0x06100000)) {
8105 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8106 goto illegal_op;
8108 return; /* v7MP: Unallocated memory hint: must NOP */
8111 if ((insn & 0x0ffffdff) == 0x01010000) {
8112 ARCH(6);
8113 /* setend */
8114 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8115 gen_helper_setend(cpu_env);
8116 s->is_jmp = DISAS_UPDATE;
8118 return;
8119 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8120 switch ((insn >> 4) & 0xf) {
8121 case 1: /* clrex */
8122 ARCH(6K);
8123 gen_clrex(s);
8124 return;
8125 case 4: /* dsb */
8126 case 5: /* dmb */
8127 ARCH(7);
8128 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8129 return;
8130 case 6: /* isb */
8131 /* We need to break the TB after this insn to execute
8132 * self-modifying code correctly and also to take
8133 * any pending interrupts immediately.
8135 gen_lookup_tb(s);
8136 return;
8137 default:
8138 goto illegal_op;
8140 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8141 /* srs */
8142 ARCH(6);
8143 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
8144 return;
8145 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
8146 /* rfe */
8147 int32_t offset;
8148 if (IS_USER(s))
8149 goto illegal_op;
8150 ARCH(6);
8151 rn = (insn >> 16) & 0xf;
8152 addr = load_reg(s, rn);
8153 i = (insn >> 23) & 3;
8154 switch (i) {
8155 case 0: offset = -4; break; /* DA */
8156 case 1: offset = 0; break; /* IA */
8157 case 2: offset = -8; break; /* DB */
8158 case 3: offset = 4; break; /* IB */
8159 default: abort();
8161 if (offset)
8162 tcg_gen_addi_i32(addr, addr, offset);
8163 /* Load PC into tmp and CPSR into tmp2. */
8164 tmp = tcg_temp_new_i32();
8165 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8166 tcg_gen_addi_i32(addr, addr, 4);
8167 tmp2 = tcg_temp_new_i32();
8168 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8169 if (insn & (1 << 21)) {
8170 /* Base writeback. */
8171 switch (i) {
8172 case 0: offset = -8; break;
8173 case 1: offset = 4; break;
8174 case 2: offset = -4; break;
8175 case 3: offset = 0; break;
8176 default: abort();
8178 if (offset)
8179 tcg_gen_addi_i32(addr, addr, offset);
8180 store_reg(s, rn, addr);
8181 } else {
8182 tcg_temp_free_i32(addr);
8184 gen_rfe(s, tmp, tmp2);
8185 return;
8186 } else if ((insn & 0x0e000000) == 0x0a000000) {
8187 /* branch link and change to thumb (blx <offset>) */
8188 int32_t offset;
8190 val = (uint32_t)s->pc;
8191 tmp = tcg_temp_new_i32();
8192 tcg_gen_movi_i32(tmp, val);
8193 store_reg(s, 14, tmp);
8194 /* Sign-extend the 24-bit offset */
8195 offset = (((int32_t)insn) << 8) >> 8;
8196 /* offset * 4 + bit24 * 2 + (thumb bit) */
8197 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8198 /* pipeline offset */
8199 val += 4;
8200 /* protected by ARCH(5); above, near the start of uncond block */
8201 gen_bx_im(s, val);
8202 return;
8203 } else if ((insn & 0x0e000f00) == 0x0c000100) {
8204 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
8205 /* iWMMXt register transfer. */
8206 if (extract32(s->c15_cpar, 1, 1)) {
8207 if (!disas_iwmmxt_insn(s, insn)) {
8208 return;
8212 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8213 /* Coprocessor double register transfer. */
8214 ARCH(5TE);
8215 } else if ((insn & 0x0f000010) == 0x0e000010) {
8216 /* Additional coprocessor register transfer. */
8217 } else if ((insn & 0x0ff10020) == 0x01000000) {
8218 uint32_t mask;
8219 uint32_t val;
8220 /* cps (privileged) */
8221 if (IS_USER(s))
8222 return;
8223 mask = val = 0;
8224 if (insn & (1 << 19)) {
8225 if (insn & (1 << 8))
8226 mask |= CPSR_A;
8227 if (insn & (1 << 7))
8228 mask |= CPSR_I;
8229 if (insn & (1 << 6))
8230 mask |= CPSR_F;
8231 if (insn & (1 << 18))
8232 val |= mask;
8234 if (insn & (1 << 17)) {
8235 mask |= CPSR_M;
8236 val |= (insn & 0x1f);
8238 if (mask) {
8239 gen_set_psr_im(s, mask, 0, val);
8241 return;
8243 goto illegal_op;
8245 if (cond != 0xe) {
8246 /* if not always execute, we generate a conditional jump to
8247 next instruction */
8248 s->condlabel = gen_new_label();
8249 arm_gen_test_cc(cond ^ 1, s->condlabel);
8250 s->condjmp = 1;
8252 if ((insn & 0x0f900000) == 0x03000000) {
8253 if ((insn & (1 << 21)) == 0) {
8254 ARCH(6T2);
8255 rd = (insn >> 12) & 0xf;
8256 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8257 if ((insn & (1 << 22)) == 0) {
8258 /* MOVW */
8259 tmp = tcg_temp_new_i32();
8260 tcg_gen_movi_i32(tmp, val);
8261 } else {
8262 /* MOVT */
8263 tmp = load_reg(s, rd);
8264 tcg_gen_ext16u_i32(tmp, tmp);
8265 tcg_gen_ori_i32(tmp, tmp, val << 16);
8267 store_reg(s, rd, tmp);
8268 } else {
8269 if (((insn >> 12) & 0xf) != 0xf)
8270 goto illegal_op;
8271 if (((insn >> 16) & 0xf) == 0) {
8272 gen_nop_hint(s, insn & 0xff);
8273 } else {
8274 /* CPSR = immediate */
8275 val = insn & 0xff;
8276 shift = ((insn >> 8) & 0xf) * 2;
8277 if (shift)
8278 val = (val >> shift) | (val << (32 - shift));
8279 i = ((insn & (1 << 22)) != 0);
8280 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8281 i, val)) {
8282 goto illegal_op;
8286 } else if ((insn & 0x0f900000) == 0x01000000
8287 && (insn & 0x00000090) != 0x00000090) {
8288 /* miscellaneous instructions */
8289 op1 = (insn >> 21) & 3;
8290 sh = (insn >> 4) & 0xf;
8291 rm = insn & 0xf;
8292 switch (sh) {
8293 case 0x0: /* MSR, MRS */
8294 if (insn & (1 << 9)) {
8295 /* MSR (banked) and MRS (banked) */
8296 int sysm = extract32(insn, 16, 4) |
8297 (extract32(insn, 8, 1) << 4);
8298 int r = extract32(insn, 22, 1);
8300 if (op1 & 1) {
8301 /* MSR (banked) */
8302 gen_msr_banked(s, r, sysm, rm);
8303 } else {
8304 /* MRS (banked) */
8305 int rd = extract32(insn, 12, 4);
8307 gen_mrs_banked(s, r, sysm, rd);
8309 break;
8312 /* MSR, MRS (for PSRs) */
8313 if (op1 & 1) {
8314 /* PSR = reg */
8315 tmp = load_reg(s, rm);
8316 i = ((op1 & 2) != 0);
8317 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
8318 goto illegal_op;
8319 } else {
8320 /* reg = PSR */
8321 rd = (insn >> 12) & 0xf;
8322 if (op1 & 2) {
8323 if (IS_USER(s))
8324 goto illegal_op;
8325 tmp = load_cpu_field(spsr);
8326 } else {
8327 tmp = tcg_temp_new_i32();
8328 gen_helper_cpsr_read(tmp, cpu_env);
8330 store_reg(s, rd, tmp);
8332 break;
8333 case 0x1:
8334 if (op1 == 1) {
8335 /* branch/exchange thumb (bx). */
8336 ARCH(4T);
8337 tmp = load_reg(s, rm);
8338 gen_bx(s, tmp);
8339 } else if (op1 == 3) {
8340 /* clz */
8341 ARCH(5);
8342 rd = (insn >> 12) & 0xf;
8343 tmp = load_reg(s, rm);
8344 gen_helper_clz(tmp, tmp);
8345 store_reg(s, rd, tmp);
8346 } else {
8347 goto illegal_op;
8349 break;
8350 case 0x2:
8351 if (op1 == 1) {
8352 ARCH(5J); /* bxj */
8353 /* Trivial implementation equivalent to bx. */
8354 tmp = load_reg(s, rm);
8355 gen_bx(s, tmp);
8356 } else {
8357 goto illegal_op;
8359 break;
8360 case 0x3:
8361 if (op1 != 1)
8362 goto illegal_op;
8364 ARCH(5);
8365 /* branch link/exchange thumb (blx) */
8366 tmp = load_reg(s, rm);
8367 tmp2 = tcg_temp_new_i32();
8368 tcg_gen_movi_i32(tmp2, s->pc);
8369 store_reg(s, 14, tmp2);
8370 gen_bx(s, tmp);
8371 break;
8372 case 0x4:
8374 /* crc32/crc32c */
8375 uint32_t c = extract32(insn, 8, 4);
8377 /* Check this CPU supports ARMv8 CRC instructions.
8378 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8379 * Bits 8, 10 and 11 should be zero.
8381 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
8382 (c & 0xd) != 0) {
8383 goto illegal_op;
8386 rn = extract32(insn, 16, 4);
8387 rd = extract32(insn, 12, 4);
8389 tmp = load_reg(s, rn);
8390 tmp2 = load_reg(s, rm);
8391 if (op1 == 0) {
8392 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8393 } else if (op1 == 1) {
8394 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8396 tmp3 = tcg_const_i32(1 << op1);
8397 if (c & 0x2) {
8398 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8399 } else {
8400 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8402 tcg_temp_free_i32(tmp2);
8403 tcg_temp_free_i32(tmp3);
8404 store_reg(s, rd, tmp);
8405 break;
8407 case 0x5: /* saturating add/subtract */
8408 ARCH(5TE);
8409 rd = (insn >> 12) & 0xf;
8410 rn = (insn >> 16) & 0xf;
8411 tmp = load_reg(s, rm);
8412 tmp2 = load_reg(s, rn);
8413 if (op1 & 2)
8414 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
8415 if (op1 & 1)
8416 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8417 else
8418 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8419 tcg_temp_free_i32(tmp2);
8420 store_reg(s, rd, tmp);
8421 break;
8422 case 7:
8424 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8425 switch (op1) {
8426 case 0:
8427 /* HLT */
8428 gen_hlt(s, imm16);
8429 break;
8430 case 1:
8431 /* bkpt */
8432 ARCH(5);
8433 gen_exception_insn(s, 4, EXCP_BKPT,
8434 syn_aa32_bkpt(imm16, false),
8435 default_exception_el(s));
8436 break;
8437 case 2:
8438 /* Hypervisor call (v7) */
8439 ARCH(7);
8440 if (IS_USER(s)) {
8441 goto illegal_op;
8443 gen_hvc(s, imm16);
8444 break;
8445 case 3:
8446 /* Secure monitor call (v6+) */
8447 ARCH(6K);
8448 if (IS_USER(s)) {
8449 goto illegal_op;
8451 gen_smc(s);
8452 break;
8453 default:
8454 g_assert_not_reached();
8456 break;
8458 case 0x8: /* signed multiply */
8459 case 0xa:
8460 case 0xc:
8461 case 0xe:
8462 ARCH(5TE);
8463 rs = (insn >> 8) & 0xf;
8464 rn = (insn >> 12) & 0xf;
8465 rd = (insn >> 16) & 0xf;
8466 if (op1 == 1) {
8467 /* (32 * 16) >> 16 */
8468 tmp = load_reg(s, rm);
8469 tmp2 = load_reg(s, rs);
8470 if (sh & 4)
8471 tcg_gen_sari_i32(tmp2, tmp2, 16);
8472 else
8473 gen_sxth(tmp2);
8474 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8475 tcg_gen_shri_i64(tmp64, tmp64, 16);
8476 tmp = tcg_temp_new_i32();
8477 tcg_gen_extrl_i64_i32(tmp, tmp64);
8478 tcg_temp_free_i64(tmp64);
8479 if ((sh & 2) == 0) {
8480 tmp2 = load_reg(s, rn);
8481 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8482 tcg_temp_free_i32(tmp2);
8484 store_reg(s, rd, tmp);
8485 } else {
8486 /* 16 * 16 */
8487 tmp = load_reg(s, rm);
8488 tmp2 = load_reg(s, rs);
8489 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8490 tcg_temp_free_i32(tmp2);
8491 if (op1 == 2) {
8492 tmp64 = tcg_temp_new_i64();
8493 tcg_gen_ext_i32_i64(tmp64, tmp);
8494 tcg_temp_free_i32(tmp);
8495 gen_addq(s, tmp64, rn, rd);
8496 gen_storeq_reg(s, rn, rd, tmp64);
8497 tcg_temp_free_i64(tmp64);
8498 } else {
8499 if (op1 == 0) {
8500 tmp2 = load_reg(s, rn);
8501 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8502 tcg_temp_free_i32(tmp2);
8504 store_reg(s, rd, tmp);
8507 break;
8508 default:
8509 goto illegal_op;
8511 } else if (((insn & 0x0e000000) == 0 &&
8512 (insn & 0x00000090) != 0x90) ||
8513 ((insn & 0x0e000000) == (1 << 25))) {
8514 int set_cc, logic_cc, shiftop;
8516 op1 = (insn >> 21) & 0xf;
8517 set_cc = (insn >> 20) & 1;
8518 logic_cc = table_logic_cc[op1] & set_cc;
8520 /* data processing instruction */
8521 if (insn & (1 << 25)) {
8522 /* immediate operand */
8523 val = insn & 0xff;
8524 shift = ((insn >> 8) & 0xf) * 2;
8525 if (shift) {
8526 val = (val >> shift) | (val << (32 - shift));
8528 tmp2 = tcg_temp_new_i32();
8529 tcg_gen_movi_i32(tmp2, val);
8530 if (logic_cc && shift) {
8531 gen_set_CF_bit31(tmp2);
8533 } else {
8534 /* register */
8535 rm = (insn) & 0xf;
8536 tmp2 = load_reg(s, rm);
8537 shiftop = (insn >> 5) & 3;
8538 if (!(insn & (1 << 4))) {
8539 shift = (insn >> 7) & 0x1f;
8540 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8541 } else {
8542 rs = (insn >> 8) & 0xf;
8543 tmp = load_reg(s, rs);
8544 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8547 if (op1 != 0x0f && op1 != 0x0d) {
8548 rn = (insn >> 16) & 0xf;
8549 tmp = load_reg(s, rn);
8550 } else {
8551 TCGV_UNUSED_I32(tmp);
8553 rd = (insn >> 12) & 0xf;
8554 switch(op1) {
8555 case 0x00:
8556 tcg_gen_and_i32(tmp, tmp, tmp2);
8557 if (logic_cc) {
8558 gen_logic_CC(tmp);
8560 store_reg_bx(s, rd, tmp);
8561 break;
8562 case 0x01:
8563 tcg_gen_xor_i32(tmp, tmp, tmp2);
8564 if (logic_cc) {
8565 gen_logic_CC(tmp);
8567 store_reg_bx(s, rd, tmp);
8568 break;
8569 case 0x02:
8570 if (set_cc && rd == 15) {
8571 /* SUBS r15, ... is used for exception return. */
8572 if (IS_USER(s)) {
8573 goto illegal_op;
8575 gen_sub_CC(tmp, tmp, tmp2);
8576 gen_exception_return(s, tmp);
8577 } else {
8578 if (set_cc) {
8579 gen_sub_CC(tmp, tmp, tmp2);
8580 } else {
8581 tcg_gen_sub_i32(tmp, tmp, tmp2);
8583 store_reg_bx(s, rd, tmp);
8585 break;
8586 case 0x03:
8587 if (set_cc) {
8588 gen_sub_CC(tmp, tmp2, tmp);
8589 } else {
8590 tcg_gen_sub_i32(tmp, tmp2, tmp);
8592 store_reg_bx(s, rd, tmp);
8593 break;
8594 case 0x04:
8595 if (set_cc) {
8596 gen_add_CC(tmp, tmp, tmp2);
8597 } else {
8598 tcg_gen_add_i32(tmp, tmp, tmp2);
8600 store_reg_bx(s, rd, tmp);
8601 break;
8602 case 0x05:
8603 if (set_cc) {
8604 gen_adc_CC(tmp, tmp, tmp2);
8605 } else {
8606 gen_add_carry(tmp, tmp, tmp2);
8608 store_reg_bx(s, rd, tmp);
8609 break;
8610 case 0x06:
8611 if (set_cc) {
8612 gen_sbc_CC(tmp, tmp, tmp2);
8613 } else {
8614 gen_sub_carry(tmp, tmp, tmp2);
8616 store_reg_bx(s, rd, tmp);
8617 break;
8618 case 0x07:
8619 if (set_cc) {
8620 gen_sbc_CC(tmp, tmp2, tmp);
8621 } else {
8622 gen_sub_carry(tmp, tmp2, tmp);
8624 store_reg_bx(s, rd, tmp);
8625 break;
8626 case 0x08:
8627 if (set_cc) {
8628 tcg_gen_and_i32(tmp, tmp, tmp2);
8629 gen_logic_CC(tmp);
8631 tcg_temp_free_i32(tmp);
8632 break;
8633 case 0x09:
8634 if (set_cc) {
8635 tcg_gen_xor_i32(tmp, tmp, tmp2);
8636 gen_logic_CC(tmp);
8638 tcg_temp_free_i32(tmp);
8639 break;
8640 case 0x0a:
8641 if (set_cc) {
8642 gen_sub_CC(tmp, tmp, tmp2);
8644 tcg_temp_free_i32(tmp);
8645 break;
8646 case 0x0b:
8647 if (set_cc) {
8648 gen_add_CC(tmp, tmp, tmp2);
8650 tcg_temp_free_i32(tmp);
8651 break;
8652 case 0x0c:
8653 tcg_gen_or_i32(tmp, tmp, tmp2);
8654 if (logic_cc) {
8655 gen_logic_CC(tmp);
8657 store_reg_bx(s, rd, tmp);
8658 break;
8659 case 0x0d:
8660 if (logic_cc && rd == 15) {
8661 /* MOVS r15, ... is used for exception return. */
8662 if (IS_USER(s)) {
8663 goto illegal_op;
8665 gen_exception_return(s, tmp2);
8666 } else {
8667 if (logic_cc) {
8668 gen_logic_CC(tmp2);
8670 store_reg_bx(s, rd, tmp2);
8672 break;
8673 case 0x0e:
8674 tcg_gen_andc_i32(tmp, tmp, tmp2);
8675 if (logic_cc) {
8676 gen_logic_CC(tmp);
8678 store_reg_bx(s, rd, tmp);
8679 break;
8680 default:
8681 case 0x0f:
8682 tcg_gen_not_i32(tmp2, tmp2);
8683 if (logic_cc) {
8684 gen_logic_CC(tmp2);
8686 store_reg_bx(s, rd, tmp2);
8687 break;
8689 if (op1 != 0x0f && op1 != 0x0d) {
8690 tcg_temp_free_i32(tmp2);
8692 } else {
8693 /* other instructions */
8694 op1 = (insn >> 24) & 0xf;
8695 switch(op1) {
8696 case 0x0:
8697 case 0x1:
8698 /* multiplies, extra load/stores */
8699 sh = (insn >> 5) & 3;
8700 if (sh == 0) {
8701 if (op1 == 0x0) {
8702 rd = (insn >> 16) & 0xf;
8703 rn = (insn >> 12) & 0xf;
8704 rs = (insn >> 8) & 0xf;
8705 rm = (insn) & 0xf;
8706 op1 = (insn >> 20) & 0xf;
8707 switch (op1) {
8708 case 0: case 1: case 2: case 3: case 6:
8709 /* 32 bit mul */
8710 tmp = load_reg(s, rs);
8711 tmp2 = load_reg(s, rm);
8712 tcg_gen_mul_i32(tmp, tmp, tmp2);
8713 tcg_temp_free_i32(tmp2);
8714 if (insn & (1 << 22)) {
8715 /* Subtract (mls) */
8716 ARCH(6T2);
8717 tmp2 = load_reg(s, rn);
8718 tcg_gen_sub_i32(tmp, tmp2, tmp);
8719 tcg_temp_free_i32(tmp2);
8720 } else if (insn & (1 << 21)) {
8721 /* Add */
8722 tmp2 = load_reg(s, rn);
8723 tcg_gen_add_i32(tmp, tmp, tmp2);
8724 tcg_temp_free_i32(tmp2);
8726 if (insn & (1 << 20))
8727 gen_logic_CC(tmp);
8728 store_reg(s, rd, tmp);
8729 break;
8730 case 4:
8731 /* 64 bit mul double accumulate (UMAAL) */
8732 ARCH(6);
8733 tmp = load_reg(s, rs);
8734 tmp2 = load_reg(s, rm);
8735 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8736 gen_addq_lo(s, tmp64, rn);
8737 gen_addq_lo(s, tmp64, rd);
8738 gen_storeq_reg(s, rn, rd, tmp64);
8739 tcg_temp_free_i64(tmp64);
8740 break;
8741 case 8: case 9: case 10: case 11:
8742 case 12: case 13: case 14: case 15:
8743 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8744 tmp = load_reg(s, rs);
8745 tmp2 = load_reg(s, rm);
8746 if (insn & (1 << 22)) {
8747 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8748 } else {
8749 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8751 if (insn & (1 << 21)) { /* mult accumulate */
8752 TCGv_i32 al = load_reg(s, rn);
8753 TCGv_i32 ah = load_reg(s, rd);
8754 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8755 tcg_temp_free_i32(al);
8756 tcg_temp_free_i32(ah);
8758 if (insn & (1 << 20)) {
8759 gen_logicq_cc(tmp, tmp2);
8761 store_reg(s, rn, tmp);
8762 store_reg(s, rd, tmp2);
8763 break;
8764 default:
8765 goto illegal_op;
8767 } else {
8768 rn = (insn >> 16) & 0xf;
8769 rd = (insn >> 12) & 0xf;
8770 if (insn & (1 << 23)) {
8771 /* load/store exclusive */
8772 int op2 = (insn >> 8) & 3;
8773 op1 = (insn >> 21) & 0x3;
8775 switch (op2) {
8776 case 0: /* lda/stl */
8777 if (op1 == 1) {
8778 goto illegal_op;
8780 ARCH(8);
8781 break;
8782 case 1: /* reserved */
8783 goto illegal_op;
8784 case 2: /* ldaex/stlex */
8785 ARCH(8);
8786 break;
8787 case 3: /* ldrex/strex */
8788 if (op1) {
8789 ARCH(6K);
8790 } else {
8791 ARCH(6);
8793 break;
8796 addr = tcg_temp_local_new_i32();
8797 load_reg_var(s, addr, rn);
8799 /* Since the emulation does not have barriers,
8800 the acquire/release semantics need no special
8801 handling */
8802 if (op2 == 0) {
8803 if (insn & (1 << 20)) {
8804 tmp = tcg_temp_new_i32();
8805 switch (op1) {
8806 case 0: /* lda */
8807 gen_aa32_ld32u(s, tmp, addr,
8808 get_mem_index(s));
8809 break;
8810 case 2: /* ldab */
8811 gen_aa32_ld8u(s, tmp, addr,
8812 get_mem_index(s));
8813 break;
8814 case 3: /* ldah */
8815 gen_aa32_ld16u(s, tmp, addr,
8816 get_mem_index(s));
8817 break;
8818 default:
8819 abort();
8821 store_reg(s, rd, tmp);
8822 } else {
8823 rm = insn & 0xf;
8824 tmp = load_reg(s, rm);
8825 switch (op1) {
8826 case 0: /* stl */
8827 gen_aa32_st32(s, tmp, addr,
8828 get_mem_index(s));
8829 break;
8830 case 2: /* stlb */
8831 gen_aa32_st8(s, tmp, addr,
8832 get_mem_index(s));
8833 break;
8834 case 3: /* stlh */
8835 gen_aa32_st16(s, tmp, addr,
8836 get_mem_index(s));
8837 break;
8838 default:
8839 abort();
8841 tcg_temp_free_i32(tmp);
8843 } else if (insn & (1 << 20)) {
8844 switch (op1) {
8845 case 0: /* ldrex */
8846 gen_load_exclusive(s, rd, 15, addr, 2);
8847 break;
8848 case 1: /* ldrexd */
8849 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8850 break;
8851 case 2: /* ldrexb */
8852 gen_load_exclusive(s, rd, 15, addr, 0);
8853 break;
8854 case 3: /* ldrexh */
8855 gen_load_exclusive(s, rd, 15, addr, 1);
8856 break;
8857 default:
8858 abort();
8860 } else {
8861 rm = insn & 0xf;
8862 switch (op1) {
8863 case 0: /* strex */
8864 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8865 break;
8866 case 1: /* strexd */
8867 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8868 break;
8869 case 2: /* strexb */
8870 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8871 break;
8872 case 3: /* strexh */
8873 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8874 break;
8875 default:
8876 abort();
8879 tcg_temp_free_i32(addr);
8880 } else {
8881 /* SWP instruction */
8882 rm = (insn) & 0xf;
8884 /* ??? This is not really atomic. However we know
8885 we never have multiple CPUs running in parallel,
8886 so it is good enough. */
8887 addr = load_reg(s, rn);
8888 tmp = load_reg(s, rm);
8889 tmp2 = tcg_temp_new_i32();
8890 if (insn & (1 << 22)) {
8891 gen_aa32_ld8u(s, tmp2, addr, get_mem_index(s));
8892 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
8893 } else {
8894 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8895 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8897 tcg_temp_free_i32(tmp);
8898 tcg_temp_free_i32(addr);
8899 store_reg(s, rd, tmp2);
8902 } else {
8903 int address_offset;
8904 bool load = insn & (1 << 20);
8905 bool doubleword = false;
8906 /* Misc load/store */
8907 rn = (insn >> 16) & 0xf;
8908 rd = (insn >> 12) & 0xf;
8910 if (!load && (sh & 2)) {
8911 /* doubleword */
8912 ARCH(5TE);
8913 if (rd & 1) {
8914 /* UNPREDICTABLE; we choose to UNDEF */
8915 goto illegal_op;
8917 load = (sh & 1) == 0;
8918 doubleword = true;
8921 addr = load_reg(s, rn);
8922 if (insn & (1 << 24))
8923 gen_add_datah_offset(s, insn, 0, addr);
8924 address_offset = 0;
8926 if (doubleword) {
8927 if (!load) {
8928 /* store */
8929 tmp = load_reg(s, rd);
8930 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8931 tcg_temp_free_i32(tmp);
8932 tcg_gen_addi_i32(addr, addr, 4);
8933 tmp = load_reg(s, rd + 1);
8934 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8935 tcg_temp_free_i32(tmp);
8936 } else {
8937 /* load */
8938 tmp = tcg_temp_new_i32();
8939 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8940 store_reg(s, rd, tmp);
8941 tcg_gen_addi_i32(addr, addr, 4);
8942 tmp = tcg_temp_new_i32();
8943 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8944 rd++;
8946 address_offset = -4;
8947 } else if (load) {
8948 /* load */
8949 tmp = tcg_temp_new_i32();
8950 switch (sh) {
8951 case 1:
8952 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8953 break;
8954 case 2:
8955 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
8956 break;
8957 default:
8958 case 3:
8959 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
8960 break;
8962 } else {
8963 /* store */
8964 tmp = load_reg(s, rd);
8965 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
8966 tcg_temp_free_i32(tmp);
8968 /* Perform base writeback before the loaded value to
8969 ensure correct behavior with overlapping index registers.
8970 ldrd with base writeback is undefined if the
8971 destination and index registers overlap. */
8972 if (!(insn & (1 << 24))) {
8973 gen_add_datah_offset(s, insn, address_offset, addr);
8974 store_reg(s, rn, addr);
8975 } else if (insn & (1 << 21)) {
8976 if (address_offset)
8977 tcg_gen_addi_i32(addr, addr, address_offset);
8978 store_reg(s, rn, addr);
8979 } else {
8980 tcg_temp_free_i32(addr);
8982 if (load) {
8983 /* Complete the load. */
8984 store_reg(s, rd, tmp);
8987 break;
8988 case 0x4:
8989 case 0x5:
8990 goto do_ldst;
8991 case 0x6:
8992 case 0x7:
8993 if (insn & (1 << 4)) {
8994 ARCH(6);
8995 /* Armv6 Media instructions. */
8996 rm = insn & 0xf;
8997 rn = (insn >> 16) & 0xf;
8998 rd = (insn >> 12) & 0xf;
8999 rs = (insn >> 8) & 0xf;
9000 switch ((insn >> 23) & 3) {
9001 case 0: /* Parallel add/subtract. */
9002 op1 = (insn >> 20) & 7;
9003 tmp = load_reg(s, rn);
9004 tmp2 = load_reg(s, rm);
9005 sh = (insn >> 5) & 7;
9006 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9007 goto illegal_op;
9008 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
9009 tcg_temp_free_i32(tmp2);
9010 store_reg(s, rd, tmp);
9011 break;
9012 case 1:
9013 if ((insn & 0x00700020) == 0) {
9014 /* Halfword pack. */
9015 tmp = load_reg(s, rn);
9016 tmp2 = load_reg(s, rm);
9017 shift = (insn >> 7) & 0x1f;
9018 if (insn & (1 << 6)) {
9019 /* pkhtb */
9020 if (shift == 0)
9021 shift = 31;
9022 tcg_gen_sari_i32(tmp2, tmp2, shift);
9023 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9024 tcg_gen_ext16u_i32(tmp2, tmp2);
9025 } else {
9026 /* pkhbt */
9027 if (shift)
9028 tcg_gen_shli_i32(tmp2, tmp2, shift);
9029 tcg_gen_ext16u_i32(tmp, tmp);
9030 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9032 tcg_gen_or_i32(tmp, tmp, tmp2);
9033 tcg_temp_free_i32(tmp2);
9034 store_reg(s, rd, tmp);
9035 } else if ((insn & 0x00200020) == 0x00200000) {
9036 /* [us]sat */
9037 tmp = load_reg(s, rm);
9038 shift = (insn >> 7) & 0x1f;
9039 if (insn & (1 << 6)) {
9040 if (shift == 0)
9041 shift = 31;
9042 tcg_gen_sari_i32(tmp, tmp, shift);
9043 } else {
9044 tcg_gen_shli_i32(tmp, tmp, shift);
9046 sh = (insn >> 16) & 0x1f;
9047 tmp2 = tcg_const_i32(sh);
9048 if (insn & (1 << 22))
9049 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
9050 else
9051 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
9052 tcg_temp_free_i32(tmp2);
9053 store_reg(s, rd, tmp);
9054 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9055 /* [us]sat16 */
9056 tmp = load_reg(s, rm);
9057 sh = (insn >> 16) & 0x1f;
9058 tmp2 = tcg_const_i32(sh);
9059 if (insn & (1 << 22))
9060 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9061 else
9062 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9063 tcg_temp_free_i32(tmp2);
9064 store_reg(s, rd, tmp);
9065 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9066 /* Select bytes. */
9067 tmp = load_reg(s, rn);
9068 tmp2 = load_reg(s, rm);
9069 tmp3 = tcg_temp_new_i32();
9070 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9071 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9072 tcg_temp_free_i32(tmp3);
9073 tcg_temp_free_i32(tmp2);
9074 store_reg(s, rd, tmp);
9075 } else if ((insn & 0x000003e0) == 0x00000060) {
9076 tmp = load_reg(s, rm);
9077 shift = (insn >> 10) & 3;
9078 /* ??? In many cases it's not necessary to do a
9079 rotate, a shift is sufficient. */
9080 if (shift != 0)
9081 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9082 op1 = (insn >> 20) & 7;
9083 switch (op1) {
9084 case 0: gen_sxtb16(tmp); break;
9085 case 2: gen_sxtb(tmp); break;
9086 case 3: gen_sxth(tmp); break;
9087 case 4: gen_uxtb16(tmp); break;
9088 case 6: gen_uxtb(tmp); break;
9089 case 7: gen_uxth(tmp); break;
9090 default: goto illegal_op;
9092 if (rn != 15) {
9093 tmp2 = load_reg(s, rn);
9094 if ((op1 & 3) == 0) {
9095 gen_add16(tmp, tmp2);
9096 } else {
9097 tcg_gen_add_i32(tmp, tmp, tmp2);
9098 tcg_temp_free_i32(tmp2);
9101 store_reg(s, rd, tmp);
9102 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9103 /* rev */
9104 tmp = load_reg(s, rm);
9105 if (insn & (1 << 22)) {
9106 if (insn & (1 << 7)) {
9107 gen_revsh(tmp);
9108 } else {
9109 ARCH(6T2);
9110 gen_helper_rbit(tmp, tmp);
9112 } else {
9113 if (insn & (1 << 7))
9114 gen_rev16(tmp);
9115 else
9116 tcg_gen_bswap32_i32(tmp, tmp);
9118 store_reg(s, rd, tmp);
9119 } else {
9120 goto illegal_op;
9122 break;
9123 case 2: /* Multiplies (Type 3). */
9124 switch ((insn >> 20) & 0x7) {
9125 case 5:
9126 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9127 /* op2 not 00x or 11x : UNDEF */
9128 goto illegal_op;
9130 /* Signed multiply most significant [accumulate].
9131 (SMMUL, SMMLA, SMMLS) */
9132 tmp = load_reg(s, rm);
9133 tmp2 = load_reg(s, rs);
9134 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9136 if (rd != 15) {
9137 tmp = load_reg(s, rd);
9138 if (insn & (1 << 6)) {
9139 tmp64 = gen_subq_msw(tmp64, tmp);
9140 } else {
9141 tmp64 = gen_addq_msw(tmp64, tmp);
9144 if (insn & (1 << 5)) {
9145 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9147 tcg_gen_shri_i64(tmp64, tmp64, 32);
9148 tmp = tcg_temp_new_i32();
9149 tcg_gen_extrl_i64_i32(tmp, tmp64);
9150 tcg_temp_free_i64(tmp64);
9151 store_reg(s, rn, tmp);
9152 break;
9153 case 0:
9154 case 4:
9155 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9156 if (insn & (1 << 7)) {
9157 goto illegal_op;
9159 tmp = load_reg(s, rm);
9160 tmp2 = load_reg(s, rs);
9161 if (insn & (1 << 5))
9162 gen_swap_half(tmp2);
9163 gen_smul_dual(tmp, tmp2);
9164 if (insn & (1 << 22)) {
9165 /* smlald, smlsld */
9166 TCGv_i64 tmp64_2;
9168 tmp64 = tcg_temp_new_i64();
9169 tmp64_2 = tcg_temp_new_i64();
9170 tcg_gen_ext_i32_i64(tmp64, tmp);
9171 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
9172 tcg_temp_free_i32(tmp);
9173 tcg_temp_free_i32(tmp2);
9174 if (insn & (1 << 6)) {
9175 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9176 } else {
9177 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9179 tcg_temp_free_i64(tmp64_2);
9180 gen_addq(s, tmp64, rd, rn);
9181 gen_storeq_reg(s, rd, rn, tmp64);
9182 tcg_temp_free_i64(tmp64);
9183 } else {
9184 /* smuad, smusd, smlad, smlsd */
9185 if (insn & (1 << 6)) {
9186 /* This subtraction cannot overflow. */
9187 tcg_gen_sub_i32(tmp, tmp, tmp2);
9188 } else {
9189 /* This addition cannot overflow 32 bits;
9190 * however it may overflow considered as a
9191 * signed operation, in which case we must set
9192 * the Q flag.
9194 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9196 tcg_temp_free_i32(tmp2);
9197 if (rd != 15)
9199 tmp2 = load_reg(s, rd);
9200 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9201 tcg_temp_free_i32(tmp2);
9203 store_reg(s, rn, tmp);
9205 break;
9206 case 1:
9207 case 3:
9208 /* SDIV, UDIV */
9209 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
9210 goto illegal_op;
9212 if (((insn >> 5) & 7) || (rd != 15)) {
9213 goto illegal_op;
9215 tmp = load_reg(s, rm);
9216 tmp2 = load_reg(s, rs);
9217 if (insn & (1 << 21)) {
9218 gen_helper_udiv(tmp, tmp, tmp2);
9219 } else {
9220 gen_helper_sdiv(tmp, tmp, tmp2);
9222 tcg_temp_free_i32(tmp2);
9223 store_reg(s, rn, tmp);
9224 break;
9225 default:
9226 goto illegal_op;
9228 break;
9229 case 3:
9230 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9231 switch (op1) {
9232 case 0: /* Unsigned sum of absolute differences. */
9233 ARCH(6);
9234 tmp = load_reg(s, rm);
9235 tmp2 = load_reg(s, rs);
9236 gen_helper_usad8(tmp, tmp, tmp2);
9237 tcg_temp_free_i32(tmp2);
9238 if (rd != 15) {
9239 tmp2 = load_reg(s, rd);
9240 tcg_gen_add_i32(tmp, tmp, tmp2);
9241 tcg_temp_free_i32(tmp2);
9243 store_reg(s, rn, tmp);
9244 break;
9245 case 0x20: case 0x24: case 0x28: case 0x2c:
9246 /* Bitfield insert/clear. */
9247 ARCH(6T2);
9248 shift = (insn >> 7) & 0x1f;
9249 i = (insn >> 16) & 0x1f;
9250 if (i < shift) {
9251 /* UNPREDICTABLE; we choose to UNDEF */
9252 goto illegal_op;
9254 i = i + 1 - shift;
9255 if (rm == 15) {
9256 tmp = tcg_temp_new_i32();
9257 tcg_gen_movi_i32(tmp, 0);
9258 } else {
9259 tmp = load_reg(s, rm);
9261 if (i != 32) {
9262 tmp2 = load_reg(s, rd);
9263 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
9264 tcg_temp_free_i32(tmp2);
9266 store_reg(s, rd, tmp);
9267 break;
9268 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9269 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9270 ARCH(6T2);
9271 tmp = load_reg(s, rm);
9272 shift = (insn >> 7) & 0x1f;
9273 i = ((insn >> 16) & 0x1f) + 1;
9274 if (shift + i > 32)
9275 goto illegal_op;
9276 if (i < 32) {
9277 if (op1 & 0x20) {
9278 gen_ubfx(tmp, shift, (1u << i) - 1);
9279 } else {
9280 gen_sbfx(tmp, shift, i);
9283 store_reg(s, rd, tmp);
9284 break;
9285 default:
9286 goto illegal_op;
9288 break;
9290 break;
9292 do_ldst:
9293 /* Check for undefined extension instructions
9294 * per the ARM Bible IE:
9295 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9297 sh = (0xf << 20) | (0xf << 4);
9298 if (op1 == 0x7 && ((insn & sh) == sh))
9300 goto illegal_op;
9302 /* load/store byte/word */
9303 rn = (insn >> 16) & 0xf;
9304 rd = (insn >> 12) & 0xf;
9305 tmp2 = load_reg(s, rn);
9306 if ((insn & 0x01200000) == 0x00200000) {
9307 /* ldrt/strt */
9308 i = get_a32_user_mem_index(s);
9309 } else {
9310 i = get_mem_index(s);
9312 if (insn & (1 << 24))
9313 gen_add_data_offset(s, insn, tmp2);
9314 if (insn & (1 << 20)) {
9315 /* load */
9316 tmp = tcg_temp_new_i32();
9317 if (insn & (1 << 22)) {
9318 gen_aa32_ld8u(s, tmp, tmp2, i);
9319 } else {
9320 gen_aa32_ld32u(s, tmp, tmp2, i);
9322 } else {
9323 /* store */
9324 tmp = load_reg(s, rd);
9325 if (insn & (1 << 22)) {
9326 gen_aa32_st8(s, tmp, tmp2, i);
9327 } else {
9328 gen_aa32_st32(s, tmp, tmp2, i);
9330 tcg_temp_free_i32(tmp);
9332 if (!(insn & (1 << 24))) {
9333 gen_add_data_offset(s, insn, tmp2);
9334 store_reg(s, rn, tmp2);
9335 } else if (insn & (1 << 21)) {
9336 store_reg(s, rn, tmp2);
9337 } else {
9338 tcg_temp_free_i32(tmp2);
9340 if (insn & (1 << 20)) {
9341 /* Complete the load. */
9342 store_reg_from_load(s, rd, tmp);
9344 break;
9345 case 0x08:
9346 case 0x09:
9348 int j, n, loaded_base;
9349 bool exc_return = false;
9350 bool is_load = extract32(insn, 20, 1);
9351 bool user = false;
9352 TCGv_i32 loaded_var;
9353 /* load/store multiple words */
9354 /* XXX: store correct base if write back */
9355 if (insn & (1 << 22)) {
9356 /* LDM (user), LDM (exception return) and STM (user) */
9357 if (IS_USER(s))
9358 goto illegal_op; /* only usable in supervisor mode */
9360 if (is_load && extract32(insn, 15, 1)) {
9361 exc_return = true;
9362 } else {
9363 user = true;
9366 rn = (insn >> 16) & 0xf;
9367 addr = load_reg(s, rn);
9369 /* compute total size */
9370 loaded_base = 0;
9371 TCGV_UNUSED_I32(loaded_var);
9372 n = 0;
9373 for(i=0;i<16;i++) {
9374 if (insn & (1 << i))
9375 n++;
9377 /* XXX: test invalid n == 0 case ? */
9378 if (insn & (1 << 23)) {
9379 if (insn & (1 << 24)) {
9380 /* pre increment */
9381 tcg_gen_addi_i32(addr, addr, 4);
9382 } else {
9383 /* post increment */
9385 } else {
9386 if (insn & (1 << 24)) {
9387 /* pre decrement */
9388 tcg_gen_addi_i32(addr, addr, -(n * 4));
9389 } else {
9390 /* post decrement */
9391 if (n != 1)
9392 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9395 j = 0;
9396 for(i=0;i<16;i++) {
9397 if (insn & (1 << i)) {
9398 if (is_load) {
9399 /* load */
9400 tmp = tcg_temp_new_i32();
9401 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9402 if (user) {
9403 tmp2 = tcg_const_i32(i);
9404 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9405 tcg_temp_free_i32(tmp2);
9406 tcg_temp_free_i32(tmp);
9407 } else if (i == rn) {
9408 loaded_var = tmp;
9409 loaded_base = 1;
9410 } else if (rn == 15 && exc_return) {
9411 store_pc_exc_ret(s, tmp);
9412 } else {
9413 store_reg_from_load(s, i, tmp);
9415 } else {
9416 /* store */
9417 if (i == 15) {
9418 /* special case: r15 = PC + 8 */
9419 val = (long)s->pc + 4;
9420 tmp = tcg_temp_new_i32();
9421 tcg_gen_movi_i32(tmp, val);
9422 } else if (user) {
9423 tmp = tcg_temp_new_i32();
9424 tmp2 = tcg_const_i32(i);
9425 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9426 tcg_temp_free_i32(tmp2);
9427 } else {
9428 tmp = load_reg(s, i);
9430 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9431 tcg_temp_free_i32(tmp);
9433 j++;
9434 /* no need to add after the last transfer */
9435 if (j != n)
9436 tcg_gen_addi_i32(addr, addr, 4);
9439 if (insn & (1 << 21)) {
9440 /* write back */
9441 if (insn & (1 << 23)) {
9442 if (insn & (1 << 24)) {
9443 /* pre increment */
9444 } else {
9445 /* post increment */
9446 tcg_gen_addi_i32(addr, addr, 4);
9448 } else {
9449 if (insn & (1 << 24)) {
9450 /* pre decrement */
9451 if (n != 1)
9452 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9453 } else {
9454 /* post decrement */
9455 tcg_gen_addi_i32(addr, addr, -(n * 4));
9458 store_reg(s, rn, addr);
9459 } else {
9460 tcg_temp_free_i32(addr);
9462 if (loaded_base) {
9463 store_reg(s, rn, loaded_var);
9465 if (exc_return) {
9466 /* Restore CPSR from SPSR. */
9467 tmp = load_cpu_field(spsr);
9468 gen_helper_cpsr_write_eret(cpu_env, tmp);
9469 tcg_temp_free_i32(tmp);
9470 s->is_jmp = DISAS_JUMP;
9473 break;
9474 case 0xa:
9475 case 0xb:
9477 int32_t offset;
9479 /* branch (and link) */
9480 val = (int32_t)s->pc;
9481 if (insn & (1 << 24)) {
9482 tmp = tcg_temp_new_i32();
9483 tcg_gen_movi_i32(tmp, val);
9484 store_reg(s, 14, tmp);
9486 offset = sextract32(insn << 2, 0, 26);
9487 val += offset + 4;
9488 gen_jmp(s, val);
9490 break;
9491 case 0xc:
9492 case 0xd:
9493 case 0xe:
9494 if (((insn >> 8) & 0xe) == 10) {
9495 /* VFP. */
9496 if (disas_vfp_insn(s, insn)) {
9497 goto illegal_op;
9499 } else if (disas_coproc_insn(s, insn)) {
9500 /* Coprocessor. */
9501 goto illegal_op;
9503 break;
9504 case 0xf:
9505 /* swi */
9506 gen_set_pc_im(s, s->pc);
9507 s->svc_imm = extract32(insn, 0, 24);
9508 s->is_jmp = DISAS_SWI;
9509 break;
9510 default:
9511 illegal_op:
9512 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9513 default_exception_el(s));
9514 break;
9519 /* Return true if this is a Thumb-2 logical op. */
9520 static int
9521 thumb2_logic_op(int op)
9523 return (op < 8);
9526 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9527 then set condition code flags based on the result of the operation.
9528 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9529 to the high bit of T1.
9530 Returns zero if the opcode is valid. */
9532 static int
9533 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9534 TCGv_i32 t0, TCGv_i32 t1)
9536 int logic_cc;
9538 logic_cc = 0;
9539 switch (op) {
9540 case 0: /* and */
9541 tcg_gen_and_i32(t0, t0, t1);
9542 logic_cc = conds;
9543 break;
9544 case 1: /* bic */
9545 tcg_gen_andc_i32(t0, t0, t1);
9546 logic_cc = conds;
9547 break;
9548 case 2: /* orr */
9549 tcg_gen_or_i32(t0, t0, t1);
9550 logic_cc = conds;
9551 break;
9552 case 3: /* orn */
9553 tcg_gen_orc_i32(t0, t0, t1);
9554 logic_cc = conds;
9555 break;
9556 case 4: /* eor */
9557 tcg_gen_xor_i32(t0, t0, t1);
9558 logic_cc = conds;
9559 break;
9560 case 8: /* add */
9561 if (conds)
9562 gen_add_CC(t0, t0, t1);
9563 else
9564 tcg_gen_add_i32(t0, t0, t1);
9565 break;
9566 case 10: /* adc */
9567 if (conds)
9568 gen_adc_CC(t0, t0, t1);
9569 else
9570 gen_adc(t0, t1);
9571 break;
9572 case 11: /* sbc */
9573 if (conds) {
9574 gen_sbc_CC(t0, t0, t1);
9575 } else {
9576 gen_sub_carry(t0, t0, t1);
9578 break;
9579 case 13: /* sub */
9580 if (conds)
9581 gen_sub_CC(t0, t0, t1);
9582 else
9583 tcg_gen_sub_i32(t0, t0, t1);
9584 break;
9585 case 14: /* rsb */
9586 if (conds)
9587 gen_sub_CC(t0, t1, t0);
9588 else
9589 tcg_gen_sub_i32(t0, t1, t0);
9590 break;
9591 default: /* 5, 6, 7, 9, 12, 15. */
9592 return 1;
9594 if (logic_cc) {
9595 gen_logic_CC(t0);
9596 if (shifter_out)
9597 gen_set_CF_bit31(t1);
9599 return 0;
9602 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9603 is not legal. */
9604 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9606 uint32_t insn, imm, shift, offset;
9607 uint32_t rd, rn, rm, rs;
9608 TCGv_i32 tmp;
9609 TCGv_i32 tmp2;
9610 TCGv_i32 tmp3;
9611 TCGv_i32 addr;
9612 TCGv_i64 tmp64;
9613 int op;
9614 int shiftop;
9615 int conds;
9616 int logic_cc;
9618 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9619 || arm_dc_feature(s, ARM_FEATURE_M))) {
9620 /* Thumb-1 cores may need to treat bl and blx as a pair of
9621 16-bit instructions to get correct prefetch abort behavior. */
9622 insn = insn_hw1;
9623 if ((insn & (1 << 12)) == 0) {
9624 ARCH(5);
9625 /* Second half of blx. */
9626 offset = ((insn & 0x7ff) << 1);
9627 tmp = load_reg(s, 14);
9628 tcg_gen_addi_i32(tmp, tmp, offset);
9629 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9631 tmp2 = tcg_temp_new_i32();
9632 tcg_gen_movi_i32(tmp2, s->pc | 1);
9633 store_reg(s, 14, tmp2);
9634 gen_bx(s, tmp);
9635 return 0;
9637 if (insn & (1 << 11)) {
9638 /* Second half of bl. */
9639 offset = ((insn & 0x7ff) << 1) | 1;
9640 tmp = load_reg(s, 14);
9641 tcg_gen_addi_i32(tmp, tmp, offset);
9643 tmp2 = tcg_temp_new_i32();
9644 tcg_gen_movi_i32(tmp2, s->pc | 1);
9645 store_reg(s, 14, tmp2);
9646 gen_bx(s, tmp);
9647 return 0;
9649 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9650 /* Instruction spans a page boundary. Implement it as two
9651 16-bit instructions in case the second half causes an
9652 prefetch abort. */
9653 offset = ((int32_t)insn << 21) >> 9;
9654 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9655 return 0;
9657 /* Fall through to 32-bit decode. */
9660 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9661 s->pc += 2;
9662 insn |= (uint32_t)insn_hw1 << 16;
9664 if ((insn & 0xf800e800) != 0xf000e800) {
9665 ARCH(6T2);
9668 rn = (insn >> 16) & 0xf;
9669 rs = (insn >> 12) & 0xf;
9670 rd = (insn >> 8) & 0xf;
9671 rm = insn & 0xf;
9672 switch ((insn >> 25) & 0xf) {
9673 case 0: case 1: case 2: case 3:
9674 /* 16-bit instructions. Should never happen. */
9675 abort();
9676 case 4:
9677 if (insn & (1 << 22)) {
9678 /* Other load/store, table branch. */
9679 if (insn & 0x01200000) {
9680 /* Load/store doubleword. */
9681 if (rn == 15) {
9682 addr = tcg_temp_new_i32();
9683 tcg_gen_movi_i32(addr, s->pc & ~3);
9684 } else {
9685 addr = load_reg(s, rn);
9687 offset = (insn & 0xff) * 4;
9688 if ((insn & (1 << 23)) == 0)
9689 offset = -offset;
9690 if (insn & (1 << 24)) {
9691 tcg_gen_addi_i32(addr, addr, offset);
9692 offset = 0;
9694 if (insn & (1 << 20)) {
9695 /* ldrd */
9696 tmp = tcg_temp_new_i32();
9697 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9698 store_reg(s, rs, tmp);
9699 tcg_gen_addi_i32(addr, addr, 4);
9700 tmp = tcg_temp_new_i32();
9701 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9702 store_reg(s, rd, tmp);
9703 } else {
9704 /* strd */
9705 tmp = load_reg(s, rs);
9706 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9707 tcg_temp_free_i32(tmp);
9708 tcg_gen_addi_i32(addr, addr, 4);
9709 tmp = load_reg(s, rd);
9710 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9711 tcg_temp_free_i32(tmp);
9713 if (insn & (1 << 21)) {
9714 /* Base writeback. */
9715 if (rn == 15)
9716 goto illegal_op;
9717 tcg_gen_addi_i32(addr, addr, offset - 4);
9718 store_reg(s, rn, addr);
9719 } else {
9720 tcg_temp_free_i32(addr);
9722 } else if ((insn & (1 << 23)) == 0) {
9723 /* Load/store exclusive word. */
9724 addr = tcg_temp_local_new_i32();
9725 load_reg_var(s, addr, rn);
9726 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9727 if (insn & (1 << 20)) {
9728 gen_load_exclusive(s, rs, 15, addr, 2);
9729 } else {
9730 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9732 tcg_temp_free_i32(addr);
9733 } else if ((insn & (7 << 5)) == 0) {
9734 /* Table Branch. */
9735 if (rn == 15) {
9736 addr = tcg_temp_new_i32();
9737 tcg_gen_movi_i32(addr, s->pc);
9738 } else {
9739 addr = load_reg(s, rn);
9741 tmp = load_reg(s, rm);
9742 tcg_gen_add_i32(addr, addr, tmp);
9743 if (insn & (1 << 4)) {
9744 /* tbh */
9745 tcg_gen_add_i32(addr, addr, tmp);
9746 tcg_temp_free_i32(tmp);
9747 tmp = tcg_temp_new_i32();
9748 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9749 } else { /* tbb */
9750 tcg_temp_free_i32(tmp);
9751 tmp = tcg_temp_new_i32();
9752 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9754 tcg_temp_free_i32(addr);
9755 tcg_gen_shli_i32(tmp, tmp, 1);
9756 tcg_gen_addi_i32(tmp, tmp, s->pc);
9757 store_reg(s, 15, tmp);
9758 } else {
9759 int op2 = (insn >> 6) & 0x3;
9760 op = (insn >> 4) & 0x3;
9761 switch (op2) {
9762 case 0:
9763 goto illegal_op;
9764 case 1:
9765 /* Load/store exclusive byte/halfword/doubleword */
9766 if (op == 2) {
9767 goto illegal_op;
9769 ARCH(7);
9770 break;
9771 case 2:
9772 /* Load-acquire/store-release */
9773 if (op == 3) {
9774 goto illegal_op;
9776 /* Fall through */
9777 case 3:
9778 /* Load-acquire/store-release exclusive */
9779 ARCH(8);
9780 break;
9782 addr = tcg_temp_local_new_i32();
9783 load_reg_var(s, addr, rn);
9784 if (!(op2 & 1)) {
9785 if (insn & (1 << 20)) {
9786 tmp = tcg_temp_new_i32();
9787 switch (op) {
9788 case 0: /* ldab */
9789 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9790 break;
9791 case 1: /* ldah */
9792 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9793 break;
9794 case 2: /* lda */
9795 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9796 break;
9797 default:
9798 abort();
9800 store_reg(s, rs, tmp);
9801 } else {
9802 tmp = load_reg(s, rs);
9803 switch (op) {
9804 case 0: /* stlb */
9805 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9806 break;
9807 case 1: /* stlh */
9808 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9809 break;
9810 case 2: /* stl */
9811 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9812 break;
9813 default:
9814 abort();
9816 tcg_temp_free_i32(tmp);
9818 } else if (insn & (1 << 20)) {
9819 gen_load_exclusive(s, rs, rd, addr, op);
9820 } else {
9821 gen_store_exclusive(s, rm, rs, rd, addr, op);
9823 tcg_temp_free_i32(addr);
9825 } else {
9826 /* Load/store multiple, RFE, SRS. */
9827 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9828 /* RFE, SRS: not available in user mode or on M profile */
9829 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9830 goto illegal_op;
9832 if (insn & (1 << 20)) {
9833 /* rfe */
9834 addr = load_reg(s, rn);
9835 if ((insn & (1 << 24)) == 0)
9836 tcg_gen_addi_i32(addr, addr, -8);
9837 /* Load PC into tmp and CPSR into tmp2. */
9838 tmp = tcg_temp_new_i32();
9839 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9840 tcg_gen_addi_i32(addr, addr, 4);
9841 tmp2 = tcg_temp_new_i32();
9842 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9843 if (insn & (1 << 21)) {
9844 /* Base writeback. */
9845 if (insn & (1 << 24)) {
9846 tcg_gen_addi_i32(addr, addr, 4);
9847 } else {
9848 tcg_gen_addi_i32(addr, addr, -4);
9850 store_reg(s, rn, addr);
9851 } else {
9852 tcg_temp_free_i32(addr);
9854 gen_rfe(s, tmp, tmp2);
9855 } else {
9856 /* srs */
9857 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9858 insn & (1 << 21));
9860 } else {
9861 int i, loaded_base = 0;
9862 TCGv_i32 loaded_var;
9863 /* Load/store multiple. */
9864 addr = load_reg(s, rn);
9865 offset = 0;
9866 for (i = 0; i < 16; i++) {
9867 if (insn & (1 << i))
9868 offset += 4;
9870 if (insn & (1 << 24)) {
9871 tcg_gen_addi_i32(addr, addr, -offset);
9874 TCGV_UNUSED_I32(loaded_var);
9875 for (i = 0; i < 16; i++) {
9876 if ((insn & (1 << i)) == 0)
9877 continue;
9878 if (insn & (1 << 20)) {
9879 /* Load. */
9880 tmp = tcg_temp_new_i32();
9881 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9882 if (i == 15) {
9883 gen_bx(s, tmp);
9884 } else if (i == rn) {
9885 loaded_var = tmp;
9886 loaded_base = 1;
9887 } else {
9888 store_reg(s, i, tmp);
9890 } else {
9891 /* Store. */
9892 tmp = load_reg(s, i);
9893 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9894 tcg_temp_free_i32(tmp);
9896 tcg_gen_addi_i32(addr, addr, 4);
9898 if (loaded_base) {
9899 store_reg(s, rn, loaded_var);
9901 if (insn & (1 << 21)) {
9902 /* Base register writeback. */
9903 if (insn & (1 << 24)) {
9904 tcg_gen_addi_i32(addr, addr, -offset);
9906 /* Fault if writeback register is in register list. */
9907 if (insn & (1 << rn))
9908 goto illegal_op;
9909 store_reg(s, rn, addr);
9910 } else {
9911 tcg_temp_free_i32(addr);
9915 break;
9916 case 5:
9918 op = (insn >> 21) & 0xf;
9919 if (op == 6) {
9920 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9921 goto illegal_op;
9923 /* Halfword pack. */
9924 tmp = load_reg(s, rn);
9925 tmp2 = load_reg(s, rm);
9926 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9927 if (insn & (1 << 5)) {
9928 /* pkhtb */
9929 if (shift == 0)
9930 shift = 31;
9931 tcg_gen_sari_i32(tmp2, tmp2, shift);
9932 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9933 tcg_gen_ext16u_i32(tmp2, tmp2);
9934 } else {
9935 /* pkhbt */
9936 if (shift)
9937 tcg_gen_shli_i32(tmp2, tmp2, shift);
9938 tcg_gen_ext16u_i32(tmp, tmp);
9939 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9941 tcg_gen_or_i32(tmp, tmp, tmp2);
9942 tcg_temp_free_i32(tmp2);
9943 store_reg(s, rd, tmp);
9944 } else {
9945 /* Data processing register constant shift. */
9946 if (rn == 15) {
9947 tmp = tcg_temp_new_i32();
9948 tcg_gen_movi_i32(tmp, 0);
9949 } else {
9950 tmp = load_reg(s, rn);
9952 tmp2 = load_reg(s, rm);
9954 shiftop = (insn >> 4) & 3;
9955 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9956 conds = (insn & (1 << 20)) != 0;
9957 logic_cc = (conds && thumb2_logic_op(op));
9958 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9959 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9960 goto illegal_op;
9961 tcg_temp_free_i32(tmp2);
9962 if (rd != 15) {
9963 store_reg(s, rd, tmp);
9964 } else {
9965 tcg_temp_free_i32(tmp);
9968 break;
9969 case 13: /* Misc data processing. */
9970 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9971 if (op < 4 && (insn & 0xf000) != 0xf000)
9972 goto illegal_op;
9973 switch (op) {
9974 case 0: /* Register controlled shift. */
9975 tmp = load_reg(s, rn);
9976 tmp2 = load_reg(s, rm);
9977 if ((insn & 0x70) != 0)
9978 goto illegal_op;
9979 op = (insn >> 21) & 3;
9980 logic_cc = (insn & (1 << 20)) != 0;
9981 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9982 if (logic_cc)
9983 gen_logic_CC(tmp);
9984 store_reg_bx(s, rd, tmp);
9985 break;
9986 case 1: /* Sign/zero extend. */
9987 op = (insn >> 20) & 7;
9988 switch (op) {
9989 case 0: /* SXTAH, SXTH */
9990 case 1: /* UXTAH, UXTH */
9991 case 4: /* SXTAB, SXTB */
9992 case 5: /* UXTAB, UXTB */
9993 break;
9994 case 2: /* SXTAB16, SXTB16 */
9995 case 3: /* UXTAB16, UXTB16 */
9996 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9997 goto illegal_op;
9999 break;
10000 default:
10001 goto illegal_op;
10003 if (rn != 15) {
10004 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10005 goto illegal_op;
10008 tmp = load_reg(s, rm);
10009 shift = (insn >> 4) & 3;
10010 /* ??? In many cases it's not necessary to do a
10011 rotate, a shift is sufficient. */
10012 if (shift != 0)
10013 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
10014 op = (insn >> 20) & 7;
10015 switch (op) {
10016 case 0: gen_sxth(tmp); break;
10017 case 1: gen_uxth(tmp); break;
10018 case 2: gen_sxtb16(tmp); break;
10019 case 3: gen_uxtb16(tmp); break;
10020 case 4: gen_sxtb(tmp); break;
10021 case 5: gen_uxtb(tmp); break;
10022 default:
10023 g_assert_not_reached();
10025 if (rn != 15) {
10026 tmp2 = load_reg(s, rn);
10027 if ((op >> 1) == 1) {
10028 gen_add16(tmp, tmp2);
10029 } else {
10030 tcg_gen_add_i32(tmp, tmp, tmp2);
10031 tcg_temp_free_i32(tmp2);
10034 store_reg(s, rd, tmp);
10035 break;
10036 case 2: /* SIMD add/subtract. */
10037 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10038 goto illegal_op;
10040 op = (insn >> 20) & 7;
10041 shift = (insn >> 4) & 7;
10042 if ((op & 3) == 3 || (shift & 3) == 3)
10043 goto illegal_op;
10044 tmp = load_reg(s, rn);
10045 tmp2 = load_reg(s, rm);
10046 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
10047 tcg_temp_free_i32(tmp2);
10048 store_reg(s, rd, tmp);
10049 break;
10050 case 3: /* Other data processing. */
10051 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10052 if (op < 4) {
10053 /* Saturating add/subtract. */
10054 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10055 goto illegal_op;
10057 tmp = load_reg(s, rn);
10058 tmp2 = load_reg(s, rm);
10059 if (op & 1)
10060 gen_helper_double_saturate(tmp, cpu_env, tmp);
10061 if (op & 2)
10062 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
10063 else
10064 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
10065 tcg_temp_free_i32(tmp2);
10066 } else {
10067 switch (op) {
10068 case 0x0a: /* rbit */
10069 case 0x08: /* rev */
10070 case 0x09: /* rev16 */
10071 case 0x0b: /* revsh */
10072 case 0x18: /* clz */
10073 break;
10074 case 0x10: /* sel */
10075 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10076 goto illegal_op;
10078 break;
10079 case 0x20: /* crc32/crc32c */
10080 case 0x21:
10081 case 0x22:
10082 case 0x28:
10083 case 0x29:
10084 case 0x2a:
10085 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10086 goto illegal_op;
10088 break;
10089 default:
10090 goto illegal_op;
10092 tmp = load_reg(s, rn);
10093 switch (op) {
10094 case 0x0a: /* rbit */
10095 gen_helper_rbit(tmp, tmp);
10096 break;
10097 case 0x08: /* rev */
10098 tcg_gen_bswap32_i32(tmp, tmp);
10099 break;
10100 case 0x09: /* rev16 */
10101 gen_rev16(tmp);
10102 break;
10103 case 0x0b: /* revsh */
10104 gen_revsh(tmp);
10105 break;
10106 case 0x10: /* sel */
10107 tmp2 = load_reg(s, rm);
10108 tmp3 = tcg_temp_new_i32();
10109 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
10110 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
10111 tcg_temp_free_i32(tmp3);
10112 tcg_temp_free_i32(tmp2);
10113 break;
10114 case 0x18: /* clz */
10115 gen_helper_clz(tmp, tmp);
10116 break;
10117 case 0x20:
10118 case 0x21:
10119 case 0x22:
10120 case 0x28:
10121 case 0x29:
10122 case 0x2a:
10124 /* crc32/crc32c */
10125 uint32_t sz = op & 0x3;
10126 uint32_t c = op & 0x8;
10128 tmp2 = load_reg(s, rm);
10129 if (sz == 0) {
10130 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10131 } else if (sz == 1) {
10132 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10134 tmp3 = tcg_const_i32(1 << sz);
10135 if (c) {
10136 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10137 } else {
10138 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10140 tcg_temp_free_i32(tmp2);
10141 tcg_temp_free_i32(tmp3);
10142 break;
10144 default:
10145 g_assert_not_reached();
10148 store_reg(s, rd, tmp);
10149 break;
10150 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10151 switch ((insn >> 20) & 7) {
10152 case 0: /* 32 x 32 -> 32 */
10153 case 7: /* Unsigned sum of absolute differences. */
10154 break;
10155 case 1: /* 16 x 16 -> 32 */
10156 case 2: /* Dual multiply add. */
10157 case 3: /* 32 * 16 -> 32msb */
10158 case 4: /* Dual multiply subtract. */
10159 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10160 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10161 goto illegal_op;
10163 break;
10165 op = (insn >> 4) & 0xf;
10166 tmp = load_reg(s, rn);
10167 tmp2 = load_reg(s, rm);
10168 switch ((insn >> 20) & 7) {
10169 case 0: /* 32 x 32 -> 32 */
10170 tcg_gen_mul_i32(tmp, tmp, tmp2);
10171 tcg_temp_free_i32(tmp2);
10172 if (rs != 15) {
10173 tmp2 = load_reg(s, rs);
10174 if (op)
10175 tcg_gen_sub_i32(tmp, tmp2, tmp);
10176 else
10177 tcg_gen_add_i32(tmp, tmp, tmp2);
10178 tcg_temp_free_i32(tmp2);
10180 break;
10181 case 1: /* 16 x 16 -> 32 */
10182 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10183 tcg_temp_free_i32(tmp2);
10184 if (rs != 15) {
10185 tmp2 = load_reg(s, rs);
10186 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10187 tcg_temp_free_i32(tmp2);
10189 break;
10190 case 2: /* Dual multiply add. */
10191 case 4: /* Dual multiply subtract. */
10192 if (op)
10193 gen_swap_half(tmp2);
10194 gen_smul_dual(tmp, tmp2);
10195 if (insn & (1 << 22)) {
10196 /* This subtraction cannot overflow. */
10197 tcg_gen_sub_i32(tmp, tmp, tmp2);
10198 } else {
10199 /* This addition cannot overflow 32 bits;
10200 * however it may overflow considered as a signed
10201 * operation, in which case we must set the Q flag.
10203 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10205 tcg_temp_free_i32(tmp2);
10206 if (rs != 15)
10208 tmp2 = load_reg(s, rs);
10209 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10210 tcg_temp_free_i32(tmp2);
10212 break;
10213 case 3: /* 32 * 16 -> 32msb */
10214 if (op)
10215 tcg_gen_sari_i32(tmp2, tmp2, 16);
10216 else
10217 gen_sxth(tmp2);
10218 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10219 tcg_gen_shri_i64(tmp64, tmp64, 16);
10220 tmp = tcg_temp_new_i32();
10221 tcg_gen_extrl_i64_i32(tmp, tmp64);
10222 tcg_temp_free_i64(tmp64);
10223 if (rs != 15)
10225 tmp2 = load_reg(s, rs);
10226 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10227 tcg_temp_free_i32(tmp2);
10229 break;
10230 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10231 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10232 if (rs != 15) {
10233 tmp = load_reg(s, rs);
10234 if (insn & (1 << 20)) {
10235 tmp64 = gen_addq_msw(tmp64, tmp);
10236 } else {
10237 tmp64 = gen_subq_msw(tmp64, tmp);
10240 if (insn & (1 << 4)) {
10241 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10243 tcg_gen_shri_i64(tmp64, tmp64, 32);
10244 tmp = tcg_temp_new_i32();
10245 tcg_gen_extrl_i64_i32(tmp, tmp64);
10246 tcg_temp_free_i64(tmp64);
10247 break;
10248 case 7: /* Unsigned sum of absolute differences. */
10249 gen_helper_usad8(tmp, tmp, tmp2);
10250 tcg_temp_free_i32(tmp2);
10251 if (rs != 15) {
10252 tmp2 = load_reg(s, rs);
10253 tcg_gen_add_i32(tmp, tmp, tmp2);
10254 tcg_temp_free_i32(tmp2);
10256 break;
10258 store_reg(s, rd, tmp);
10259 break;
10260 case 6: case 7: /* 64-bit multiply, Divide. */
10261 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
10262 tmp = load_reg(s, rn);
10263 tmp2 = load_reg(s, rm);
10264 if ((op & 0x50) == 0x10) {
10265 /* sdiv, udiv */
10266 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
10267 goto illegal_op;
10269 if (op & 0x20)
10270 gen_helper_udiv(tmp, tmp, tmp2);
10271 else
10272 gen_helper_sdiv(tmp, tmp, tmp2);
10273 tcg_temp_free_i32(tmp2);
10274 store_reg(s, rd, tmp);
10275 } else if ((op & 0xe) == 0xc) {
10276 /* Dual multiply accumulate long. */
10277 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10278 tcg_temp_free_i32(tmp);
10279 tcg_temp_free_i32(tmp2);
10280 goto illegal_op;
10282 if (op & 1)
10283 gen_swap_half(tmp2);
10284 gen_smul_dual(tmp, tmp2);
10285 if (op & 0x10) {
10286 tcg_gen_sub_i32(tmp, tmp, tmp2);
10287 } else {
10288 tcg_gen_add_i32(tmp, tmp, tmp2);
10290 tcg_temp_free_i32(tmp2);
10291 /* BUGFIX */
10292 tmp64 = tcg_temp_new_i64();
10293 tcg_gen_ext_i32_i64(tmp64, tmp);
10294 tcg_temp_free_i32(tmp);
10295 gen_addq(s, tmp64, rs, rd);
10296 gen_storeq_reg(s, rs, rd, tmp64);
10297 tcg_temp_free_i64(tmp64);
10298 } else {
10299 if (op & 0x20) {
10300 /* Unsigned 64-bit multiply */
10301 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
10302 } else {
10303 if (op & 8) {
10304 /* smlalxy */
10305 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10306 tcg_temp_free_i32(tmp2);
10307 tcg_temp_free_i32(tmp);
10308 goto illegal_op;
10310 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10311 tcg_temp_free_i32(tmp2);
10312 tmp64 = tcg_temp_new_i64();
10313 tcg_gen_ext_i32_i64(tmp64, tmp);
10314 tcg_temp_free_i32(tmp);
10315 } else {
10316 /* Signed 64-bit multiply */
10317 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10320 if (op & 4) {
10321 /* umaal */
10322 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10323 tcg_temp_free_i64(tmp64);
10324 goto illegal_op;
10326 gen_addq_lo(s, tmp64, rs);
10327 gen_addq_lo(s, tmp64, rd);
10328 } else if (op & 0x40) {
10329 /* 64-bit accumulate. */
10330 gen_addq(s, tmp64, rs, rd);
10332 gen_storeq_reg(s, rs, rd, tmp64);
10333 tcg_temp_free_i64(tmp64);
10335 break;
10337 break;
10338 case 6: case 7: case 14: case 15:
10339 /* Coprocessor. */
10340 if (((insn >> 24) & 3) == 3) {
10341 /* Translate into the equivalent ARM encoding. */
10342 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10343 if (disas_neon_data_insn(s, insn)) {
10344 goto illegal_op;
10346 } else if (((insn >> 8) & 0xe) == 10) {
10347 if (disas_vfp_insn(s, insn)) {
10348 goto illegal_op;
10350 } else {
10351 if (insn & (1 << 28))
10352 goto illegal_op;
10353 if (disas_coproc_insn(s, insn)) {
10354 goto illegal_op;
10357 break;
10358 case 8: case 9: case 10: case 11:
10359 if (insn & (1 << 15)) {
10360 /* Branches, misc control. */
10361 if (insn & 0x5000) {
10362 /* Unconditional branch. */
10363 /* signextend(hw1[10:0]) -> offset[:12]. */
10364 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10365 /* hw1[10:0] -> offset[11:1]. */
10366 offset |= (insn & 0x7ff) << 1;
10367 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10368 offset[24:22] already have the same value because of the
10369 sign extension above. */
10370 offset ^= ((~insn) & (1 << 13)) << 10;
10371 offset ^= ((~insn) & (1 << 11)) << 11;
10373 if (insn & (1 << 14)) {
10374 /* Branch and link. */
10375 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
10378 offset += s->pc;
10379 if (insn & (1 << 12)) {
10380 /* b/bl */
10381 gen_jmp(s, offset);
10382 } else {
10383 /* blx */
10384 offset &= ~(uint32_t)2;
10385 /* thumb2 bx, no need to check */
10386 gen_bx_im(s, offset);
10388 } else if (((insn >> 23) & 7) == 7) {
10389 /* Misc control */
10390 if (insn & (1 << 13))
10391 goto illegal_op;
10393 if (insn & (1 << 26)) {
10394 if (!(insn & (1 << 20))) {
10395 /* Hypervisor call (v7) */
10396 int imm16 = extract32(insn, 16, 4) << 12
10397 | extract32(insn, 0, 12);
10398 ARCH(7);
10399 if (IS_USER(s)) {
10400 goto illegal_op;
10402 gen_hvc(s, imm16);
10403 } else {
10404 /* Secure monitor call (v6+) */
10405 ARCH(6K);
10406 if (IS_USER(s)) {
10407 goto illegal_op;
10409 gen_smc(s);
10411 } else {
10412 op = (insn >> 20) & 7;
10413 switch (op) {
10414 case 0: /* msr cpsr. */
10415 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10416 tmp = load_reg(s, rn);
10417 addr = tcg_const_i32(insn & 0xff);
10418 gen_helper_v7m_msr(cpu_env, addr, tmp);
10419 tcg_temp_free_i32(addr);
10420 tcg_temp_free_i32(tmp);
10421 gen_lookup_tb(s);
10422 break;
10424 /* fall through */
10425 case 1: /* msr spsr. */
10426 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10427 goto illegal_op;
10430 if (extract32(insn, 5, 1)) {
10431 /* MSR (banked) */
10432 int sysm = extract32(insn, 8, 4) |
10433 (extract32(insn, 4, 1) << 4);
10434 int r = op & 1;
10436 gen_msr_banked(s, r, sysm, rm);
10437 break;
10440 /* MSR (for PSRs) */
10441 tmp = load_reg(s, rn);
10442 if (gen_set_psr(s,
10443 msr_mask(s, (insn >> 8) & 0xf, op == 1),
10444 op == 1, tmp))
10445 goto illegal_op;
10446 break;
10447 case 2: /* cps, nop-hint. */
10448 if (((insn >> 8) & 7) == 0) {
10449 gen_nop_hint(s, insn & 0xff);
10451 /* Implemented as NOP in user mode. */
10452 if (IS_USER(s))
10453 break;
10454 offset = 0;
10455 imm = 0;
10456 if (insn & (1 << 10)) {
10457 if (insn & (1 << 7))
10458 offset |= CPSR_A;
10459 if (insn & (1 << 6))
10460 offset |= CPSR_I;
10461 if (insn & (1 << 5))
10462 offset |= CPSR_F;
10463 if (insn & (1 << 9))
10464 imm = CPSR_A | CPSR_I | CPSR_F;
10466 if (insn & (1 << 8)) {
10467 offset |= 0x1f;
10468 imm |= (insn & 0x1f);
10470 if (offset) {
10471 gen_set_psr_im(s, offset, 0, imm);
10473 break;
10474 case 3: /* Special control operations. */
10475 ARCH(7);
10476 op = (insn >> 4) & 0xf;
10477 switch (op) {
10478 case 2: /* clrex */
10479 gen_clrex(s);
10480 break;
10481 case 4: /* dsb */
10482 case 5: /* dmb */
10483 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10484 break;
10485 case 6: /* isb */
10486 /* We need to break the TB after this insn
10487 * to execute self-modifying code correctly
10488 * and also to take any pending interrupts
10489 * immediately.
10491 gen_lookup_tb(s);
10492 break;
10493 default:
10494 goto illegal_op;
10496 break;
10497 case 4: /* bxj */
10498 /* Trivial implementation equivalent to bx. */
10499 tmp = load_reg(s, rn);
10500 gen_bx(s, tmp);
10501 break;
10502 case 5: /* Exception return. */
10503 if (IS_USER(s)) {
10504 goto illegal_op;
10506 if (rn != 14 || rd != 15) {
10507 goto illegal_op;
10509 tmp = load_reg(s, rn);
10510 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10511 gen_exception_return(s, tmp);
10512 break;
10513 case 6: /* MRS */
10514 if (extract32(insn, 5, 1)) {
10515 /* MRS (banked) */
10516 int sysm = extract32(insn, 16, 4) |
10517 (extract32(insn, 4, 1) << 4);
10519 gen_mrs_banked(s, 0, sysm, rd);
10520 break;
10523 /* mrs cpsr */
10524 tmp = tcg_temp_new_i32();
10525 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10526 addr = tcg_const_i32(insn & 0xff);
10527 gen_helper_v7m_mrs(tmp, cpu_env, addr);
10528 tcg_temp_free_i32(addr);
10529 } else {
10530 gen_helper_cpsr_read(tmp, cpu_env);
10532 store_reg(s, rd, tmp);
10533 break;
10534 case 7: /* MRS */
10535 if (extract32(insn, 5, 1)) {
10536 /* MRS (banked) */
10537 int sysm = extract32(insn, 16, 4) |
10538 (extract32(insn, 4, 1) << 4);
10540 gen_mrs_banked(s, 1, sysm, rd);
10541 break;
10544 /* mrs spsr. */
10545 /* Not accessible in user mode. */
10546 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10547 goto illegal_op;
10549 tmp = load_cpu_field(spsr);
10550 store_reg(s, rd, tmp);
10551 break;
10554 } else {
10555 /* Conditional branch. */
10556 op = (insn >> 22) & 0xf;
10557 /* Generate a conditional jump to next instruction. */
10558 s->condlabel = gen_new_label();
10559 arm_gen_test_cc(op ^ 1, s->condlabel);
10560 s->condjmp = 1;
10562 /* offset[11:1] = insn[10:0] */
10563 offset = (insn & 0x7ff) << 1;
10564 /* offset[17:12] = insn[21:16]. */
10565 offset |= (insn & 0x003f0000) >> 4;
10566 /* offset[31:20] = insn[26]. */
10567 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10568 /* offset[18] = insn[13]. */
10569 offset |= (insn & (1 << 13)) << 5;
10570 /* offset[19] = insn[11]. */
10571 offset |= (insn & (1 << 11)) << 8;
10573 /* jump to the offset */
10574 gen_jmp(s, s->pc + offset);
10576 } else {
10577 /* Data processing immediate. */
10578 if (insn & (1 << 25)) {
10579 if (insn & (1 << 24)) {
10580 if (insn & (1 << 20))
10581 goto illegal_op;
10582 /* Bitfield/Saturate. */
10583 op = (insn >> 21) & 7;
10584 imm = insn & 0x1f;
10585 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10586 if (rn == 15) {
10587 tmp = tcg_temp_new_i32();
10588 tcg_gen_movi_i32(tmp, 0);
10589 } else {
10590 tmp = load_reg(s, rn);
10592 switch (op) {
10593 case 2: /* Signed bitfield extract. */
10594 imm++;
10595 if (shift + imm > 32)
10596 goto illegal_op;
10597 if (imm < 32)
10598 gen_sbfx(tmp, shift, imm);
10599 break;
10600 case 6: /* Unsigned bitfield extract. */
10601 imm++;
10602 if (shift + imm > 32)
10603 goto illegal_op;
10604 if (imm < 32)
10605 gen_ubfx(tmp, shift, (1u << imm) - 1);
10606 break;
10607 case 3: /* Bitfield insert/clear. */
10608 if (imm < shift)
10609 goto illegal_op;
10610 imm = imm + 1 - shift;
10611 if (imm != 32) {
10612 tmp2 = load_reg(s, rd);
10613 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
10614 tcg_temp_free_i32(tmp2);
10616 break;
10617 case 7:
10618 goto illegal_op;
10619 default: /* Saturate. */
10620 if (shift) {
10621 if (op & 1)
10622 tcg_gen_sari_i32(tmp, tmp, shift);
10623 else
10624 tcg_gen_shli_i32(tmp, tmp, shift);
10626 tmp2 = tcg_const_i32(imm);
10627 if (op & 4) {
10628 /* Unsigned. */
10629 if ((op & 1) && shift == 0) {
10630 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10631 tcg_temp_free_i32(tmp);
10632 tcg_temp_free_i32(tmp2);
10633 goto illegal_op;
10635 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10636 } else {
10637 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10639 } else {
10640 /* Signed. */
10641 if ((op & 1) && shift == 0) {
10642 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10643 tcg_temp_free_i32(tmp);
10644 tcg_temp_free_i32(tmp2);
10645 goto illegal_op;
10647 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10648 } else {
10649 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10652 tcg_temp_free_i32(tmp2);
10653 break;
10655 store_reg(s, rd, tmp);
10656 } else {
10657 imm = ((insn & 0x04000000) >> 15)
10658 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10659 if (insn & (1 << 22)) {
10660 /* 16-bit immediate. */
10661 imm |= (insn >> 4) & 0xf000;
10662 if (insn & (1 << 23)) {
10663 /* movt */
10664 tmp = load_reg(s, rd);
10665 tcg_gen_ext16u_i32(tmp, tmp);
10666 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10667 } else {
10668 /* movw */
10669 tmp = tcg_temp_new_i32();
10670 tcg_gen_movi_i32(tmp, imm);
10672 } else {
10673 /* Add/sub 12-bit immediate. */
10674 if (rn == 15) {
10675 offset = s->pc & ~(uint32_t)3;
10676 if (insn & (1 << 23))
10677 offset -= imm;
10678 else
10679 offset += imm;
10680 tmp = tcg_temp_new_i32();
10681 tcg_gen_movi_i32(tmp, offset);
10682 } else {
10683 tmp = load_reg(s, rn);
10684 if (insn & (1 << 23))
10685 tcg_gen_subi_i32(tmp, tmp, imm);
10686 else
10687 tcg_gen_addi_i32(tmp, tmp, imm);
10690 store_reg(s, rd, tmp);
10692 } else {
10693 int shifter_out = 0;
10694 /* modified 12-bit immediate. */
10695 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10696 imm = (insn & 0xff);
10697 switch (shift) {
10698 case 0: /* XY */
10699 /* Nothing to do. */
10700 break;
10701 case 1: /* 00XY00XY */
10702 imm |= imm << 16;
10703 break;
10704 case 2: /* XY00XY00 */
10705 imm |= imm << 16;
10706 imm <<= 8;
10707 break;
10708 case 3: /* XYXYXYXY */
10709 imm |= imm << 16;
10710 imm |= imm << 8;
10711 break;
10712 default: /* Rotated constant. */
10713 shift = (shift << 1) | (imm >> 7);
10714 imm |= 0x80;
10715 imm = imm << (32 - shift);
10716 shifter_out = 1;
10717 break;
10719 tmp2 = tcg_temp_new_i32();
10720 tcg_gen_movi_i32(tmp2, imm);
10721 rn = (insn >> 16) & 0xf;
10722 if (rn == 15) {
10723 tmp = tcg_temp_new_i32();
10724 tcg_gen_movi_i32(tmp, 0);
10725 } else {
10726 tmp = load_reg(s, rn);
10728 op = (insn >> 21) & 0xf;
10729 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10730 shifter_out, tmp, tmp2))
10731 goto illegal_op;
10732 tcg_temp_free_i32(tmp2);
10733 rd = (insn >> 8) & 0xf;
10734 if (rd != 15) {
10735 store_reg(s, rd, tmp);
10736 } else {
10737 tcg_temp_free_i32(tmp);
10741 break;
10742 case 12: /* Load/store single data item. */
10744 int postinc = 0;
10745 int writeback = 0;
10746 int memidx;
10747 if ((insn & 0x01100000) == 0x01000000) {
10748 if (disas_neon_ls_insn(s, insn)) {
10749 goto illegal_op;
10751 break;
10753 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10754 if (rs == 15) {
10755 if (!(insn & (1 << 20))) {
10756 goto illegal_op;
10758 if (op != 2) {
10759 /* Byte or halfword load space with dest == r15 : memory hints.
10760 * Catch them early so we don't emit pointless addressing code.
10761 * This space is a mix of:
10762 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10763 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10764 * cores)
10765 * unallocated hints, which must be treated as NOPs
10766 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10767 * which is easiest for the decoding logic
10768 * Some space which must UNDEF
10770 int op1 = (insn >> 23) & 3;
10771 int op2 = (insn >> 6) & 0x3f;
10772 if (op & 2) {
10773 goto illegal_op;
10775 if (rn == 15) {
10776 /* UNPREDICTABLE, unallocated hint or
10777 * PLD/PLDW/PLI (literal)
10779 return 0;
10781 if (op1 & 1) {
10782 return 0; /* PLD/PLDW/PLI or unallocated hint */
10784 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10785 return 0; /* PLD/PLDW/PLI or unallocated hint */
10787 /* UNDEF space, or an UNPREDICTABLE */
10788 return 1;
10791 memidx = get_mem_index(s);
10792 if (rn == 15) {
10793 addr = tcg_temp_new_i32();
10794 /* PC relative. */
10795 /* s->pc has already been incremented by 4. */
10796 imm = s->pc & 0xfffffffc;
10797 if (insn & (1 << 23))
10798 imm += insn & 0xfff;
10799 else
10800 imm -= insn & 0xfff;
10801 tcg_gen_movi_i32(addr, imm);
10802 } else {
10803 addr = load_reg(s, rn);
10804 if (insn & (1 << 23)) {
10805 /* Positive offset. */
10806 imm = insn & 0xfff;
10807 tcg_gen_addi_i32(addr, addr, imm);
10808 } else {
10809 imm = insn & 0xff;
10810 switch ((insn >> 8) & 0xf) {
10811 case 0x0: /* Shifted Register. */
10812 shift = (insn >> 4) & 0xf;
10813 if (shift > 3) {
10814 tcg_temp_free_i32(addr);
10815 goto illegal_op;
10817 tmp = load_reg(s, rm);
10818 if (shift)
10819 tcg_gen_shli_i32(tmp, tmp, shift);
10820 tcg_gen_add_i32(addr, addr, tmp);
10821 tcg_temp_free_i32(tmp);
10822 break;
10823 case 0xc: /* Negative offset. */
10824 tcg_gen_addi_i32(addr, addr, -imm);
10825 break;
10826 case 0xe: /* User privilege. */
10827 tcg_gen_addi_i32(addr, addr, imm);
10828 memidx = get_a32_user_mem_index(s);
10829 break;
10830 case 0x9: /* Post-decrement. */
10831 imm = -imm;
10832 /* Fall through. */
10833 case 0xb: /* Post-increment. */
10834 postinc = 1;
10835 writeback = 1;
10836 break;
10837 case 0xd: /* Pre-decrement. */
10838 imm = -imm;
10839 /* Fall through. */
10840 case 0xf: /* Pre-increment. */
10841 tcg_gen_addi_i32(addr, addr, imm);
10842 writeback = 1;
10843 break;
10844 default:
10845 tcg_temp_free_i32(addr);
10846 goto illegal_op;
10850 if (insn & (1 << 20)) {
10851 /* Load. */
10852 tmp = tcg_temp_new_i32();
10853 switch (op) {
10854 case 0:
10855 gen_aa32_ld8u(s, tmp, addr, memidx);
10856 break;
10857 case 4:
10858 gen_aa32_ld8s(s, tmp, addr, memidx);
10859 break;
10860 case 1:
10861 gen_aa32_ld16u(s, tmp, addr, memidx);
10862 break;
10863 case 5:
10864 gen_aa32_ld16s(s, tmp, addr, memidx);
10865 break;
10866 case 2:
10867 gen_aa32_ld32u(s, tmp, addr, memidx);
10868 break;
10869 default:
10870 tcg_temp_free_i32(tmp);
10871 tcg_temp_free_i32(addr);
10872 goto illegal_op;
10874 if (rs == 15) {
10875 gen_bx(s, tmp);
10876 } else {
10877 store_reg(s, rs, tmp);
10879 } else {
10880 /* Store. */
10881 tmp = load_reg(s, rs);
10882 switch (op) {
10883 case 0:
10884 gen_aa32_st8(s, tmp, addr, memidx);
10885 break;
10886 case 1:
10887 gen_aa32_st16(s, tmp, addr, memidx);
10888 break;
10889 case 2:
10890 gen_aa32_st32(s, tmp, addr, memidx);
10891 break;
10892 default:
10893 tcg_temp_free_i32(tmp);
10894 tcg_temp_free_i32(addr);
10895 goto illegal_op;
10897 tcg_temp_free_i32(tmp);
10899 if (postinc)
10900 tcg_gen_addi_i32(addr, addr, imm);
10901 if (writeback) {
10902 store_reg(s, rn, addr);
10903 } else {
10904 tcg_temp_free_i32(addr);
10907 break;
10908 default:
10909 goto illegal_op;
10911 return 0;
10912 illegal_op:
10913 return 1;
10916 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
10918 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10919 int32_t offset;
10920 int i;
10921 TCGv_i32 tmp;
10922 TCGv_i32 tmp2;
10923 TCGv_i32 addr;
10925 if (s->condexec_mask) {
10926 cond = s->condexec_cond;
10927 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10928 s->condlabel = gen_new_label();
10929 arm_gen_test_cc(cond ^ 1, s->condlabel);
10930 s->condjmp = 1;
10934 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
10935 s->pc += 2;
10937 switch (insn >> 12) {
10938 case 0: case 1:
10940 rd = insn & 7;
10941 op = (insn >> 11) & 3;
10942 if (op == 3) {
10943 /* add/subtract */
10944 rn = (insn >> 3) & 7;
10945 tmp = load_reg(s, rn);
10946 if (insn & (1 << 10)) {
10947 /* immediate */
10948 tmp2 = tcg_temp_new_i32();
10949 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
10950 } else {
10951 /* reg */
10952 rm = (insn >> 6) & 7;
10953 tmp2 = load_reg(s, rm);
10955 if (insn & (1 << 9)) {
10956 if (s->condexec_mask)
10957 tcg_gen_sub_i32(tmp, tmp, tmp2);
10958 else
10959 gen_sub_CC(tmp, tmp, tmp2);
10960 } else {
10961 if (s->condexec_mask)
10962 tcg_gen_add_i32(tmp, tmp, tmp2);
10963 else
10964 gen_add_CC(tmp, tmp, tmp2);
10966 tcg_temp_free_i32(tmp2);
10967 store_reg(s, rd, tmp);
10968 } else {
10969 /* shift immediate */
10970 rm = (insn >> 3) & 7;
10971 shift = (insn >> 6) & 0x1f;
10972 tmp = load_reg(s, rm);
10973 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10974 if (!s->condexec_mask)
10975 gen_logic_CC(tmp);
10976 store_reg(s, rd, tmp);
10978 break;
10979 case 2: case 3:
10980 /* arithmetic large immediate */
10981 op = (insn >> 11) & 3;
10982 rd = (insn >> 8) & 0x7;
10983 if (op == 0) { /* mov */
10984 tmp = tcg_temp_new_i32();
10985 tcg_gen_movi_i32(tmp, insn & 0xff);
10986 if (!s->condexec_mask)
10987 gen_logic_CC(tmp);
10988 store_reg(s, rd, tmp);
10989 } else {
10990 tmp = load_reg(s, rd);
10991 tmp2 = tcg_temp_new_i32();
10992 tcg_gen_movi_i32(tmp2, insn & 0xff);
10993 switch (op) {
10994 case 1: /* cmp */
10995 gen_sub_CC(tmp, tmp, tmp2);
10996 tcg_temp_free_i32(tmp);
10997 tcg_temp_free_i32(tmp2);
10998 break;
10999 case 2: /* add */
11000 if (s->condexec_mask)
11001 tcg_gen_add_i32(tmp, tmp, tmp2);
11002 else
11003 gen_add_CC(tmp, tmp, tmp2);
11004 tcg_temp_free_i32(tmp2);
11005 store_reg(s, rd, tmp);
11006 break;
11007 case 3: /* sub */
11008 if (s->condexec_mask)
11009 tcg_gen_sub_i32(tmp, tmp, tmp2);
11010 else
11011 gen_sub_CC(tmp, tmp, tmp2);
11012 tcg_temp_free_i32(tmp2);
11013 store_reg(s, rd, tmp);
11014 break;
11017 break;
11018 case 4:
11019 if (insn & (1 << 11)) {
11020 rd = (insn >> 8) & 7;
11021 /* load pc-relative. Bit 1 of PC is ignored. */
11022 val = s->pc + 2 + ((insn & 0xff) * 4);
11023 val &= ~(uint32_t)2;
11024 addr = tcg_temp_new_i32();
11025 tcg_gen_movi_i32(addr, val);
11026 tmp = tcg_temp_new_i32();
11027 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11028 tcg_temp_free_i32(addr);
11029 store_reg(s, rd, tmp);
11030 break;
11032 if (insn & (1 << 10)) {
11033 /* data processing extended or blx */
11034 rd = (insn & 7) | ((insn >> 4) & 8);
11035 rm = (insn >> 3) & 0xf;
11036 op = (insn >> 8) & 3;
11037 switch (op) {
11038 case 0: /* add */
11039 tmp = load_reg(s, rd);
11040 tmp2 = load_reg(s, rm);
11041 tcg_gen_add_i32(tmp, tmp, tmp2);
11042 tcg_temp_free_i32(tmp2);
11043 store_reg(s, rd, tmp);
11044 break;
11045 case 1: /* cmp */
11046 tmp = load_reg(s, rd);
11047 tmp2 = load_reg(s, rm);
11048 gen_sub_CC(tmp, tmp, tmp2);
11049 tcg_temp_free_i32(tmp2);
11050 tcg_temp_free_i32(tmp);
11051 break;
11052 case 2: /* mov/cpy */
11053 tmp = load_reg(s, rm);
11054 store_reg(s, rd, tmp);
11055 break;
11056 case 3:/* branch [and link] exchange thumb register */
11057 tmp = load_reg(s, rm);
11058 if (insn & (1 << 7)) {
11059 ARCH(5);
11060 val = (uint32_t)s->pc | 1;
11061 tmp2 = tcg_temp_new_i32();
11062 tcg_gen_movi_i32(tmp2, val);
11063 store_reg(s, 14, tmp2);
11065 /* already thumb, no need to check */
11066 gen_bx(s, tmp);
11067 break;
11069 break;
11072 /* data processing register */
11073 rd = insn & 7;
11074 rm = (insn >> 3) & 7;
11075 op = (insn >> 6) & 0xf;
11076 if (op == 2 || op == 3 || op == 4 || op == 7) {
11077 /* the shift/rotate ops want the operands backwards */
11078 val = rm;
11079 rm = rd;
11080 rd = val;
11081 val = 1;
11082 } else {
11083 val = 0;
11086 if (op == 9) { /* neg */
11087 tmp = tcg_temp_new_i32();
11088 tcg_gen_movi_i32(tmp, 0);
11089 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11090 tmp = load_reg(s, rd);
11091 } else {
11092 TCGV_UNUSED_I32(tmp);
11095 tmp2 = load_reg(s, rm);
11096 switch (op) {
11097 case 0x0: /* and */
11098 tcg_gen_and_i32(tmp, tmp, tmp2);
11099 if (!s->condexec_mask)
11100 gen_logic_CC(tmp);
11101 break;
11102 case 0x1: /* eor */
11103 tcg_gen_xor_i32(tmp, tmp, tmp2);
11104 if (!s->condexec_mask)
11105 gen_logic_CC(tmp);
11106 break;
11107 case 0x2: /* lsl */
11108 if (s->condexec_mask) {
11109 gen_shl(tmp2, tmp2, tmp);
11110 } else {
11111 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
11112 gen_logic_CC(tmp2);
11114 break;
11115 case 0x3: /* lsr */
11116 if (s->condexec_mask) {
11117 gen_shr(tmp2, tmp2, tmp);
11118 } else {
11119 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
11120 gen_logic_CC(tmp2);
11122 break;
11123 case 0x4: /* asr */
11124 if (s->condexec_mask) {
11125 gen_sar(tmp2, tmp2, tmp);
11126 } else {
11127 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
11128 gen_logic_CC(tmp2);
11130 break;
11131 case 0x5: /* adc */
11132 if (s->condexec_mask) {
11133 gen_adc(tmp, tmp2);
11134 } else {
11135 gen_adc_CC(tmp, tmp, tmp2);
11137 break;
11138 case 0x6: /* sbc */
11139 if (s->condexec_mask) {
11140 gen_sub_carry(tmp, tmp, tmp2);
11141 } else {
11142 gen_sbc_CC(tmp, tmp, tmp2);
11144 break;
11145 case 0x7: /* ror */
11146 if (s->condexec_mask) {
11147 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11148 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
11149 } else {
11150 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
11151 gen_logic_CC(tmp2);
11153 break;
11154 case 0x8: /* tst */
11155 tcg_gen_and_i32(tmp, tmp, tmp2);
11156 gen_logic_CC(tmp);
11157 rd = 16;
11158 break;
11159 case 0x9: /* neg */
11160 if (s->condexec_mask)
11161 tcg_gen_neg_i32(tmp, tmp2);
11162 else
11163 gen_sub_CC(tmp, tmp, tmp2);
11164 break;
11165 case 0xa: /* cmp */
11166 gen_sub_CC(tmp, tmp, tmp2);
11167 rd = 16;
11168 break;
11169 case 0xb: /* cmn */
11170 gen_add_CC(tmp, tmp, tmp2);
11171 rd = 16;
11172 break;
11173 case 0xc: /* orr */
11174 tcg_gen_or_i32(tmp, tmp, tmp2);
11175 if (!s->condexec_mask)
11176 gen_logic_CC(tmp);
11177 break;
11178 case 0xd: /* mul */
11179 tcg_gen_mul_i32(tmp, tmp, tmp2);
11180 if (!s->condexec_mask)
11181 gen_logic_CC(tmp);
11182 break;
11183 case 0xe: /* bic */
11184 tcg_gen_andc_i32(tmp, tmp, tmp2);
11185 if (!s->condexec_mask)
11186 gen_logic_CC(tmp);
11187 break;
11188 case 0xf: /* mvn */
11189 tcg_gen_not_i32(tmp2, tmp2);
11190 if (!s->condexec_mask)
11191 gen_logic_CC(tmp2);
11192 val = 1;
11193 rm = rd;
11194 break;
11196 if (rd != 16) {
11197 if (val) {
11198 store_reg(s, rm, tmp2);
11199 if (op != 0xf)
11200 tcg_temp_free_i32(tmp);
11201 } else {
11202 store_reg(s, rd, tmp);
11203 tcg_temp_free_i32(tmp2);
11205 } else {
11206 tcg_temp_free_i32(tmp);
11207 tcg_temp_free_i32(tmp2);
11209 break;
11211 case 5:
11212 /* load/store register offset. */
11213 rd = insn & 7;
11214 rn = (insn >> 3) & 7;
11215 rm = (insn >> 6) & 7;
11216 op = (insn >> 9) & 7;
11217 addr = load_reg(s, rn);
11218 tmp = load_reg(s, rm);
11219 tcg_gen_add_i32(addr, addr, tmp);
11220 tcg_temp_free_i32(tmp);
11222 if (op < 3) { /* store */
11223 tmp = load_reg(s, rd);
11224 } else {
11225 tmp = tcg_temp_new_i32();
11228 switch (op) {
11229 case 0: /* str */
11230 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11231 break;
11232 case 1: /* strh */
11233 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
11234 break;
11235 case 2: /* strb */
11236 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
11237 break;
11238 case 3: /* ldrsb */
11239 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
11240 break;
11241 case 4: /* ldr */
11242 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11243 break;
11244 case 5: /* ldrh */
11245 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
11246 break;
11247 case 6: /* ldrb */
11248 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
11249 break;
11250 case 7: /* ldrsh */
11251 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
11252 break;
11254 if (op >= 3) { /* load */
11255 store_reg(s, rd, tmp);
11256 } else {
11257 tcg_temp_free_i32(tmp);
11259 tcg_temp_free_i32(addr);
11260 break;
11262 case 6:
11263 /* load/store word immediate offset */
11264 rd = insn & 7;
11265 rn = (insn >> 3) & 7;
11266 addr = load_reg(s, rn);
11267 val = (insn >> 4) & 0x7c;
11268 tcg_gen_addi_i32(addr, addr, val);
11270 if (insn & (1 << 11)) {
11271 /* load */
11272 tmp = tcg_temp_new_i32();
11273 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11274 store_reg(s, rd, tmp);
11275 } else {
11276 /* store */
11277 tmp = load_reg(s, rd);
11278 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11279 tcg_temp_free_i32(tmp);
11281 tcg_temp_free_i32(addr);
11282 break;
11284 case 7:
11285 /* load/store byte immediate offset */
11286 rd = insn & 7;
11287 rn = (insn >> 3) & 7;
11288 addr = load_reg(s, rn);
11289 val = (insn >> 6) & 0x1f;
11290 tcg_gen_addi_i32(addr, addr, val);
11292 if (insn & (1 << 11)) {
11293 /* load */
11294 tmp = tcg_temp_new_i32();
11295 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
11296 store_reg(s, rd, tmp);
11297 } else {
11298 /* store */
11299 tmp = load_reg(s, rd);
11300 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
11301 tcg_temp_free_i32(tmp);
11303 tcg_temp_free_i32(addr);
11304 break;
11306 case 8:
11307 /* load/store halfword immediate offset */
11308 rd = insn & 7;
11309 rn = (insn >> 3) & 7;
11310 addr = load_reg(s, rn);
11311 val = (insn >> 5) & 0x3e;
11312 tcg_gen_addi_i32(addr, addr, val);
11314 if (insn & (1 << 11)) {
11315 /* load */
11316 tmp = tcg_temp_new_i32();
11317 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
11318 store_reg(s, rd, tmp);
11319 } else {
11320 /* store */
11321 tmp = load_reg(s, rd);
11322 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
11323 tcg_temp_free_i32(tmp);
11325 tcg_temp_free_i32(addr);
11326 break;
11328 case 9:
11329 /* load/store from stack */
11330 rd = (insn >> 8) & 7;
11331 addr = load_reg(s, 13);
11332 val = (insn & 0xff) * 4;
11333 tcg_gen_addi_i32(addr, addr, val);
11335 if (insn & (1 << 11)) {
11336 /* load */
11337 tmp = tcg_temp_new_i32();
11338 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11339 store_reg(s, rd, tmp);
11340 } else {
11341 /* store */
11342 tmp = load_reg(s, rd);
11343 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11344 tcg_temp_free_i32(tmp);
11346 tcg_temp_free_i32(addr);
11347 break;
11349 case 10:
11350 /* add to high reg */
11351 rd = (insn >> 8) & 7;
11352 if (insn & (1 << 11)) {
11353 /* SP */
11354 tmp = load_reg(s, 13);
11355 } else {
11356 /* PC. bit 1 is ignored. */
11357 tmp = tcg_temp_new_i32();
11358 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
11360 val = (insn & 0xff) * 4;
11361 tcg_gen_addi_i32(tmp, tmp, val);
11362 store_reg(s, rd, tmp);
11363 break;
11365 case 11:
11366 /* misc */
11367 op = (insn >> 8) & 0xf;
11368 switch (op) {
11369 case 0:
11370 /* adjust stack pointer */
11371 tmp = load_reg(s, 13);
11372 val = (insn & 0x7f) * 4;
11373 if (insn & (1 << 7))
11374 val = -(int32_t)val;
11375 tcg_gen_addi_i32(tmp, tmp, val);
11376 store_reg(s, 13, tmp);
11377 break;
11379 case 2: /* sign/zero extend. */
11380 ARCH(6);
11381 rd = insn & 7;
11382 rm = (insn >> 3) & 7;
11383 tmp = load_reg(s, rm);
11384 switch ((insn >> 6) & 3) {
11385 case 0: gen_sxth(tmp); break;
11386 case 1: gen_sxtb(tmp); break;
11387 case 2: gen_uxth(tmp); break;
11388 case 3: gen_uxtb(tmp); break;
11390 store_reg(s, rd, tmp);
11391 break;
11392 case 4: case 5: case 0xc: case 0xd:
11393 /* push/pop */
11394 addr = load_reg(s, 13);
11395 if (insn & (1 << 8))
11396 offset = 4;
11397 else
11398 offset = 0;
11399 for (i = 0; i < 8; i++) {
11400 if (insn & (1 << i))
11401 offset += 4;
11403 if ((insn & (1 << 11)) == 0) {
11404 tcg_gen_addi_i32(addr, addr, -offset);
11406 for (i = 0; i < 8; i++) {
11407 if (insn & (1 << i)) {
11408 if (insn & (1 << 11)) {
11409 /* pop */
11410 tmp = tcg_temp_new_i32();
11411 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11412 store_reg(s, i, tmp);
11413 } else {
11414 /* push */
11415 tmp = load_reg(s, i);
11416 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11417 tcg_temp_free_i32(tmp);
11419 /* advance to the next address. */
11420 tcg_gen_addi_i32(addr, addr, 4);
11423 TCGV_UNUSED_I32(tmp);
11424 if (insn & (1 << 8)) {
11425 if (insn & (1 << 11)) {
11426 /* pop pc */
11427 tmp = tcg_temp_new_i32();
11428 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11429 /* don't set the pc until the rest of the instruction
11430 has completed */
11431 } else {
11432 /* push lr */
11433 tmp = load_reg(s, 14);
11434 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11435 tcg_temp_free_i32(tmp);
11437 tcg_gen_addi_i32(addr, addr, 4);
11439 if ((insn & (1 << 11)) == 0) {
11440 tcg_gen_addi_i32(addr, addr, -offset);
11442 /* write back the new stack pointer */
11443 store_reg(s, 13, addr);
11444 /* set the new PC value */
11445 if ((insn & 0x0900) == 0x0900) {
11446 store_reg_from_load(s, 15, tmp);
11448 break;
11450 case 1: case 3: case 9: case 11: /* czb */
11451 rm = insn & 7;
11452 tmp = load_reg(s, rm);
11453 s->condlabel = gen_new_label();
11454 s->condjmp = 1;
11455 if (insn & (1 << 11))
11456 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
11457 else
11458 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
11459 tcg_temp_free_i32(tmp);
11460 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11461 val = (uint32_t)s->pc + 2;
11462 val += offset;
11463 gen_jmp(s, val);
11464 break;
11466 case 15: /* IT, nop-hint. */
11467 if ((insn & 0xf) == 0) {
11468 gen_nop_hint(s, (insn >> 4) & 0xf);
11469 break;
11471 /* If Then. */
11472 s->condexec_cond = (insn >> 4) & 0xe;
11473 s->condexec_mask = insn & 0x1f;
11474 /* No actual code generated for this insn, just setup state. */
11475 break;
11477 case 0xe: /* bkpt */
11479 int imm8 = extract32(insn, 0, 8);
11480 ARCH(5);
11481 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11482 default_exception_el(s));
11483 break;
11486 case 0xa: /* rev, and hlt */
11488 int op1 = extract32(insn, 6, 2);
11490 if (op1 == 2) {
11491 /* HLT */
11492 int imm6 = extract32(insn, 0, 6);
11494 gen_hlt(s, imm6);
11495 break;
11498 /* Otherwise this is rev */
11499 ARCH(6);
11500 rn = (insn >> 3) & 0x7;
11501 rd = insn & 0x7;
11502 tmp = load_reg(s, rn);
11503 switch (op1) {
11504 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
11505 case 1: gen_rev16(tmp); break;
11506 case 3: gen_revsh(tmp); break;
11507 default:
11508 g_assert_not_reached();
11510 store_reg(s, rd, tmp);
11511 break;
11514 case 6:
11515 switch ((insn >> 5) & 7) {
11516 case 2:
11517 /* setend */
11518 ARCH(6);
11519 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11520 gen_helper_setend(cpu_env);
11521 s->is_jmp = DISAS_UPDATE;
11523 break;
11524 case 3:
11525 /* cps */
11526 ARCH(6);
11527 if (IS_USER(s)) {
11528 break;
11530 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11531 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11532 /* FAULTMASK */
11533 if (insn & 1) {
11534 addr = tcg_const_i32(19);
11535 gen_helper_v7m_msr(cpu_env, addr, tmp);
11536 tcg_temp_free_i32(addr);
11538 /* PRIMASK */
11539 if (insn & 2) {
11540 addr = tcg_const_i32(16);
11541 gen_helper_v7m_msr(cpu_env, addr, tmp);
11542 tcg_temp_free_i32(addr);
11544 tcg_temp_free_i32(tmp);
11545 gen_lookup_tb(s);
11546 } else {
11547 if (insn & (1 << 4)) {
11548 shift = CPSR_A | CPSR_I | CPSR_F;
11549 } else {
11550 shift = 0;
11552 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
11554 break;
11555 default:
11556 goto undef;
11558 break;
11560 default:
11561 goto undef;
11563 break;
11565 case 12:
11567 /* load/store multiple */
11568 TCGv_i32 loaded_var;
11569 TCGV_UNUSED_I32(loaded_var);
11570 rn = (insn >> 8) & 0x7;
11571 addr = load_reg(s, rn);
11572 for (i = 0; i < 8; i++) {
11573 if (insn & (1 << i)) {
11574 if (insn & (1 << 11)) {
11575 /* load */
11576 tmp = tcg_temp_new_i32();
11577 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11578 if (i == rn) {
11579 loaded_var = tmp;
11580 } else {
11581 store_reg(s, i, tmp);
11583 } else {
11584 /* store */
11585 tmp = load_reg(s, i);
11586 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11587 tcg_temp_free_i32(tmp);
11589 /* advance to the next address */
11590 tcg_gen_addi_i32(addr, addr, 4);
11593 if ((insn & (1 << rn)) == 0) {
11594 /* base reg not in list: base register writeback */
11595 store_reg(s, rn, addr);
11596 } else {
11597 /* base reg in list: if load, complete it now */
11598 if (insn & (1 << 11)) {
11599 store_reg(s, rn, loaded_var);
11601 tcg_temp_free_i32(addr);
11603 break;
11605 case 13:
11606 /* conditional branch or swi */
11607 cond = (insn >> 8) & 0xf;
11608 if (cond == 0xe)
11609 goto undef;
11611 if (cond == 0xf) {
11612 /* swi */
11613 gen_set_pc_im(s, s->pc);
11614 s->svc_imm = extract32(insn, 0, 8);
11615 s->is_jmp = DISAS_SWI;
11616 break;
11618 /* generate a conditional jump to next instruction */
11619 s->condlabel = gen_new_label();
11620 arm_gen_test_cc(cond ^ 1, s->condlabel);
11621 s->condjmp = 1;
11623 /* jump to the offset */
11624 val = (uint32_t)s->pc + 2;
11625 offset = ((int32_t)insn << 24) >> 24;
11626 val += offset << 1;
11627 gen_jmp(s, val);
11628 break;
11630 case 14:
11631 if (insn & (1 << 11)) {
11632 if (disas_thumb2_insn(env, s, insn))
11633 goto undef32;
11634 break;
11636 /* unconditional branch */
11637 val = (uint32_t)s->pc;
11638 offset = ((int32_t)insn << 21) >> 21;
11639 val += (offset << 1) + 2;
11640 gen_jmp(s, val);
11641 break;
11643 case 15:
11644 if (disas_thumb2_insn(env, s, insn))
11645 goto undef32;
11646 break;
11648 return;
11649 undef32:
11650 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11651 default_exception_el(s));
11652 return;
11653 illegal_op:
11654 undef:
11655 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11656 default_exception_el(s));
11659 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11661 /* Return true if the insn at dc->pc might cross a page boundary.
11662 * (False positives are OK, false negatives are not.)
11664 uint16_t insn;
11666 if ((s->pc & 3) == 0) {
11667 /* At a 4-aligned address we can't be crossing a page */
11668 return false;
11671 /* This must be a Thumb insn */
11672 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
11674 if ((insn >> 11) >= 0x1d) {
11675 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11676 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11677 * end up actually treating this as two 16-bit insns (see the
11678 * code at the start of disas_thumb2_insn()) but we don't bother
11679 * to check for that as it is unlikely, and false positives here
11680 * are harmless.
11682 return true;
11684 /* Definitely a 16-bit insn, can't be crossing a page. */
11685 return false;
11688 /* generate intermediate code for basic block 'tb'. */
11689 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
11691 ARMCPU *cpu = arm_env_get_cpu(env);
11692 CPUState *cs = CPU(cpu);
11693 DisasContext dc1, *dc = &dc1;
11694 target_ulong pc_start;
11695 target_ulong next_page_start;
11696 int num_insns;
11697 int max_insns;
11698 bool end_of_page;
11700 /* generate intermediate code */
11702 /* The A64 decoder has its own top level loop, because it doesn't need
11703 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11705 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
11706 gen_intermediate_code_a64(cpu, tb);
11707 return;
11710 pc_start = tb->pc;
11712 dc->tb = tb;
11714 dc->is_jmp = DISAS_NEXT;
11715 dc->pc = pc_start;
11716 dc->singlestep_enabled = cs->singlestep_enabled;
11717 dc->condjmp = 0;
11719 dc->aarch64 = 0;
11720 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11721 * there is no secure EL1, so we route exceptions to EL3.
11723 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11724 !arm_el_is_aa64(env, 3);
11725 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
11726 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
11727 dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
11728 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11729 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
11730 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11731 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11732 #if !defined(CONFIG_USER_ONLY)
11733 dc->user = (dc->current_el == 0);
11734 #endif
11735 dc->ns = ARM_TBFLAG_NS(tb->flags);
11736 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
11737 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11738 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11739 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
11740 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
11741 dc->cp_regs = cpu->cp_regs;
11742 dc->features = env->features;
11744 /* Single step state. The code-generation logic here is:
11745 * SS_ACTIVE == 0:
11746 * generate code with no special handling for single-stepping (except
11747 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11748 * this happens anyway because those changes are all system register or
11749 * PSTATE writes).
11750 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11751 * emit code for one insn
11752 * emit code to clear PSTATE.SS
11753 * emit code to generate software step exception for completed step
11754 * end TB (as usual for having generated an exception)
11755 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11756 * emit code to generate a software step exception
11757 * end the TB
11759 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11760 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11761 dc->is_ldex = false;
11762 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11764 cpu_F0s = tcg_temp_new_i32();
11765 cpu_F1s = tcg_temp_new_i32();
11766 cpu_F0d = tcg_temp_new_i64();
11767 cpu_F1d = tcg_temp_new_i64();
11768 cpu_V0 = cpu_F0d;
11769 cpu_V1 = cpu_F1d;
11770 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11771 cpu_M0 = tcg_temp_new_i64();
11772 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
11773 num_insns = 0;
11774 max_insns = tb->cflags & CF_COUNT_MASK;
11775 if (max_insns == 0) {
11776 max_insns = CF_COUNT_MASK;
11778 if (max_insns > TCG_MAX_INSNS) {
11779 max_insns = TCG_MAX_INSNS;
11782 gen_tb_start(tb);
11784 tcg_clear_temp_count();
11786 /* A note on handling of the condexec (IT) bits:
11788 * We want to avoid the overhead of having to write the updated condexec
11789 * bits back to the CPUARMState for every instruction in an IT block. So:
11790 * (1) if the condexec bits are not already zero then we write
11791 * zero back into the CPUARMState now. This avoids complications trying
11792 * to do it at the end of the block. (For example if we don't do this
11793 * it's hard to identify whether we can safely skip writing condexec
11794 * at the end of the TB, which we definitely want to do for the case
11795 * where a TB doesn't do anything with the IT state at all.)
11796 * (2) if we are going to leave the TB then we call gen_set_condexec()
11797 * which will write the correct value into CPUARMState if zero is wrong.
11798 * This is done both for leaving the TB at the end, and for leaving
11799 * it because of an exception we know will happen, which is done in
11800 * gen_exception_insn(). The latter is necessary because we need to
11801 * leave the TB with the PC/IT state just prior to execution of the
11802 * instruction which caused the exception.
11803 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11804 * then the CPUARMState will be wrong and we need to reset it.
11805 * This is handled in the same way as restoration of the
11806 * PC in these situations; we save the value of the condexec bits
11807 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11808 * then uses this to restore them after an exception.
11810 * Note that there are no instructions which can read the condexec
11811 * bits, and none which can write non-static values to them, so
11812 * we don't need to care about whether CPUARMState is correct in the
11813 * middle of a TB.
11816 /* Reset the conditional execution bits immediately. This avoids
11817 complications trying to do it at the end of the block. */
11818 if (dc->condexec_mask || dc->condexec_cond)
11820 TCGv_i32 tmp = tcg_temp_new_i32();
11821 tcg_gen_movi_i32(tmp, 0);
11822 store_cpu_field(tmp, condexec_bits);
11824 do {
11825 tcg_gen_insn_start(dc->pc,
11826 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11828 num_insns++;
11830 #ifdef CONFIG_USER_ONLY
11831 /* Intercept jump to the magic kernel page. */
11832 if (dc->pc >= 0xffff0000) {
11833 /* We always get here via a jump, so know we are not in a
11834 conditional execution block. */
11835 gen_exception_internal(EXCP_KERNEL_TRAP);
11836 dc->is_jmp = DISAS_EXC;
11837 break;
11839 #else
11840 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
11841 /* We always get here via a jump, so know we are not in a
11842 conditional execution block. */
11843 gen_exception_internal(EXCP_EXCEPTION_EXIT);
11844 dc->is_jmp = DISAS_EXC;
11845 break;
11847 #endif
11849 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11850 CPUBreakpoint *bp;
11851 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
11852 if (bp->pc == dc->pc) {
11853 if (bp->flags & BP_CPU) {
11854 gen_set_condexec(dc);
11855 gen_set_pc_im(dc, dc->pc);
11856 gen_helper_check_breakpoints(cpu_env);
11857 /* End the TB early; it's likely not going to be executed */
11858 dc->is_jmp = DISAS_UPDATE;
11859 } else {
11860 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11861 /* The address covered by the breakpoint must be
11862 included in [tb->pc, tb->pc + tb->size) in order
11863 to for it to be properly cleared -- thus we
11864 increment the PC here so that the logic setting
11865 tb->size below does the right thing. */
11866 /* TODO: Advance PC by correct instruction length to
11867 * avoid disassembler error messages */
11868 dc->pc += 2;
11869 goto done_generating;
11871 break;
11876 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
11877 gen_io_start();
11880 if (dc->ss_active && !dc->pstate_ss) {
11881 /* Singlestep state is Active-pending.
11882 * If we're in this state at the start of a TB then either
11883 * a) we just took an exception to an EL which is being debugged
11884 * and this is the first insn in the exception handler
11885 * b) debug exceptions were masked and we just unmasked them
11886 * without changing EL (eg by clearing PSTATE.D)
11887 * In either case we're going to take a swstep exception in the
11888 * "did not step an insn" case, and so the syndrome ISV and EX
11889 * bits should be zero.
11891 assert(num_insns == 1);
11892 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11893 default_exception_el(dc));
11894 goto done_generating;
11897 if (dc->thumb) {
11898 disas_thumb_insn(env, dc);
11899 if (dc->condexec_mask) {
11900 dc->condexec_cond = (dc->condexec_cond & 0xe)
11901 | ((dc->condexec_mask >> 4) & 1);
11902 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11903 if (dc->condexec_mask == 0) {
11904 dc->condexec_cond = 0;
11907 } else {
11908 unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
11909 dc->pc += 4;
11910 disas_arm_insn(dc, insn);
11913 if (dc->condjmp && !dc->is_jmp) {
11914 gen_set_label(dc->condlabel);
11915 dc->condjmp = 0;
11918 if (tcg_check_temp_count()) {
11919 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11920 dc->pc);
11923 /* Translation stops when a conditional branch is encountered.
11924 * Otherwise the subsequent code could get translated several times.
11925 * Also stop translation when a page boundary is reached. This
11926 * ensures prefetch aborts occur at the right place. */
11928 /* We want to stop the TB if the next insn starts in a new page,
11929 * or if it spans between this page and the next. This means that
11930 * if we're looking at the last halfword in the page we need to
11931 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11932 * or a 32-bit Thumb insn (which won't).
11933 * This is to avoid generating a silly TB with a single 16-bit insn
11934 * in it at the end of this page (which would execute correctly
11935 * but isn't very efficient).
11937 end_of_page = (dc->pc >= next_page_start) ||
11938 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11940 } while (!dc->is_jmp && !tcg_op_buf_full() &&
11941 !cs->singlestep_enabled &&
11942 !singlestep &&
11943 !dc->ss_active &&
11944 !end_of_page &&
11945 num_insns < max_insns);
11947 if (tb->cflags & CF_LAST_IO) {
11948 if (dc->condjmp) {
11949 /* FIXME: This can theoretically happen with self-modifying
11950 code. */
11951 cpu_abort(cs, "IO on conditional branch instruction");
11953 gen_io_end();
11956 /* At this stage dc->condjmp will only be set when the skipped
11957 instruction was a conditional branch or trap, and the PC has
11958 already been written. */
11959 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
11960 /* Unconditional and "condition passed" instruction codepath. */
11961 gen_set_condexec(dc);
11962 switch (dc->is_jmp) {
11963 case DISAS_SWI:
11964 gen_ss_advance(dc);
11965 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11966 default_exception_el(dc));
11967 break;
11968 case DISAS_HVC:
11969 gen_ss_advance(dc);
11970 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11971 break;
11972 case DISAS_SMC:
11973 gen_ss_advance(dc);
11974 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11975 break;
11976 case DISAS_NEXT:
11977 case DISAS_UPDATE:
11978 gen_set_pc_im(dc, dc->pc);
11979 /* fall through */
11980 default:
11981 if (dc->ss_active) {
11982 gen_step_complete_exception(dc);
11983 } else {
11984 /* FIXME: Single stepping a WFI insn will not halt
11985 the CPU. */
11986 gen_exception_internal(EXCP_DEBUG);
11989 if (dc->condjmp) {
11990 /* "Condition failed" instruction codepath. */
11991 gen_set_label(dc->condlabel);
11992 gen_set_condexec(dc);
11993 gen_set_pc_im(dc, dc->pc);
11994 if (dc->ss_active) {
11995 gen_step_complete_exception(dc);
11996 } else {
11997 gen_exception_internal(EXCP_DEBUG);
12000 } else {
12001 /* While branches must always occur at the end of an IT block,
12002 there are a few other things that can cause us to terminate
12003 the TB in the middle of an IT block:
12004 - Exception generating instructions (bkpt, swi, undefined).
12005 - Page boundaries.
12006 - Hardware watchpoints.
12007 Hardware breakpoints have already been handled and skip this code.
12009 gen_set_condexec(dc);
12010 switch(dc->is_jmp) {
12011 case DISAS_NEXT:
12012 gen_goto_tb(dc, 1, dc->pc);
12013 break;
12014 case DISAS_UPDATE:
12015 gen_set_pc_im(dc, dc->pc);
12016 /* fall through */
12017 case DISAS_JUMP:
12018 default:
12019 /* indicate that the hash table must be used to find the next TB */
12020 tcg_gen_exit_tb(0);
12021 break;
12022 case DISAS_TB_JUMP:
12023 /* nothing more to generate */
12024 break;
12025 case DISAS_WFI:
12026 gen_helper_wfi(cpu_env);
12027 /* The helper doesn't necessarily throw an exception, but we
12028 * must go back to the main loop to check for interrupts anyway.
12030 tcg_gen_exit_tb(0);
12031 break;
12032 case DISAS_WFE:
12033 gen_helper_wfe(cpu_env);
12034 break;
12035 case DISAS_YIELD:
12036 gen_helper_yield(cpu_env);
12037 break;
12038 case DISAS_SWI:
12039 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12040 default_exception_el(dc));
12041 break;
12042 case DISAS_HVC:
12043 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12044 break;
12045 case DISAS_SMC:
12046 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12047 break;
12049 if (dc->condjmp) {
12050 gen_set_label(dc->condlabel);
12051 gen_set_condexec(dc);
12052 gen_goto_tb(dc, 1, dc->pc);
12053 dc->condjmp = 0;
12057 done_generating:
12058 gen_tb_end(tb, num_insns);
12060 #ifdef DEBUG_DISAS
12061 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
12062 qemu_log_in_addr_range(pc_start)) {
12063 qemu_log("----------------\n");
12064 qemu_log("IN: %s\n", lookup_symbol(pc_start));
12065 log_target_disas(cs, pc_start, dc->pc - pc_start,
12066 dc->thumb | (dc->sctlr_b << 1));
12067 qemu_log("\n");
12069 #endif
12070 tb->size = dc->pc - pc_start;
12071 tb->icount = num_insns;
12074 static const char *cpu_mode_names[16] = {
12075 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12076 "???", "???", "hyp", "und", "???", "???", "???", "sys"
12079 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12080 int flags)
12082 ARMCPU *cpu = ARM_CPU(cs);
12083 CPUARMState *env = &cpu->env;
12084 int i;
12085 uint32_t psr;
12086 const char *ns_status;
12088 if (is_a64(env)) {
12089 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12090 return;
12093 for(i=0;i<16;i++) {
12094 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
12095 if ((i % 4) == 3)
12096 cpu_fprintf(f, "\n");
12097 else
12098 cpu_fprintf(f, " ");
12100 psr = cpsr_read(env);
12102 if (arm_feature(env, ARM_FEATURE_EL3) &&
12103 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12104 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12105 } else {
12106 ns_status = "";
12109 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12110 psr,
12111 psr & (1 << 31) ? 'N' : '-',
12112 psr & (1 << 30) ? 'Z' : '-',
12113 psr & (1 << 29) ? 'C' : '-',
12114 psr & (1 << 28) ? 'V' : '-',
12115 psr & CPSR_T ? 'T' : 'A',
12116 ns_status,
12117 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12119 if (flags & CPU_DUMP_FPU) {
12120 int numvfpregs = 0;
12121 if (arm_feature(env, ARM_FEATURE_VFP)) {
12122 numvfpregs += 16;
12124 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12125 numvfpregs += 16;
12127 for (i = 0; i < numvfpregs; i++) {
12128 uint64_t v = float64_val(env->vfp.regs[i]);
12129 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12130 i * 2, (uint32_t)v,
12131 i * 2 + 1, (uint32_t)(v >> 32),
12132 i, v);
12134 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
12138 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12139 target_ulong *data)
12141 if (is_a64(env)) {
12142 env->pc = data[0];
12143 env->condexec_bits = 0;
12144 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
12145 } else {
12146 env->regs[15] = data[0];
12147 env->condexec_bits = data[1];
12148 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;