loader: fix potential memory leak
[qemu.git] / target-arm / translate.c
bloba43b1f61cf77af44ec6cb22d3b499ed8910b05f3
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "tcg-op.h"
27 #include "qemu/log.h"
28 #include "qemu/bitops.h"
29 #include "arm_ldst.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
34 #include "trace-tcg.h"
35 #include "exec/log.h"
38 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
39 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
40 /* currently all emulated v5 cores are also v5TE, so don't bother */
41 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
42 #define ENABLE_ARCH_5J 0
43 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
44 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
45 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
46 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
47 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
49 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
51 #include "translate.h"
53 #if defined(CONFIG_USER_ONLY)
54 #define IS_USER(s) 1
55 #else
56 #define IS_USER(s) (s->user)
57 #endif
59 TCGv_env cpu_env;
60 /* We reuse the same 64-bit temporaries for efficiency. */
61 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
62 static TCGv_i32 cpu_R[16];
63 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
64 TCGv_i64 cpu_exclusive_addr;
65 TCGv_i64 cpu_exclusive_val;
66 #ifdef CONFIG_USER_ONLY
67 TCGv_i64 cpu_exclusive_test;
68 TCGv_i32 cpu_exclusive_info;
69 #endif
71 /* FIXME: These should be removed. */
72 static TCGv_i32 cpu_F0s, cpu_F1s;
73 static TCGv_i64 cpu_F0d, cpu_F1d;
75 #include "exec/gen-icount.h"
77 static const char *regnames[] =
78 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
79 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
81 /* initialize TCG globals. */
82 void arm_translate_init(void)
84 int i;
86 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
88 for (i = 0; i < 16; i++) {
89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
90 offsetof(CPUARMState, regs[i]),
91 regnames[i]);
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
102 #ifdef CONFIG_USER_ONLY
103 cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
104 offsetof(CPUARMState, exclusive_test), "exclusive_test");
105 cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
106 offsetof(CPUARMState, exclusive_info), "exclusive_info");
107 #endif
109 a64_translate_init();
112 static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
114 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
115 * insns:
116 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
117 * otherwise, access as if at PL0.
119 switch (s->mmu_idx) {
120 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
121 case ARMMMUIdx_S12NSE0:
122 case ARMMMUIdx_S12NSE1:
123 return ARMMMUIdx_S12NSE0;
124 case ARMMMUIdx_S1E3:
125 case ARMMMUIdx_S1SE0:
126 case ARMMMUIdx_S1SE1:
127 return ARMMMUIdx_S1SE0;
128 case ARMMMUIdx_S2NS:
129 default:
130 g_assert_not_reached();
134 static inline TCGv_i32 load_cpu_offset(int offset)
136 TCGv_i32 tmp = tcg_temp_new_i32();
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
141 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
143 static inline void store_cpu_offset(TCGv_i32 var, int offset)
145 tcg_gen_st_i32(var, cpu_env, offset);
146 tcg_temp_free_i32(var);
149 #define store_cpu_field(var, name) \
150 store_cpu_offset(var, offsetof(CPUARMState, name))
152 /* Set a variable to the value of a CPU register. */
153 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
155 if (reg == 15) {
156 uint32_t addr;
157 /* normally, since we updated PC, we need only to add one insn */
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
164 tcg_gen_mov_i32(var, cpu_R[reg]);
168 /* Create a new temporary and set it to the value of a CPU register. */
169 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
171 TCGv_i32 tmp = tcg_temp_new_i32();
172 load_reg_var(s, tmp, reg);
173 return tmp;
176 /* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
178 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
184 tcg_gen_mov_i32(cpu_R[reg], var);
185 tcg_temp_free_i32(var);
188 /* Value extensions. */
189 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
191 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
198 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
200 TCGv_i32 tmp_mask = tcg_const_i32(mask);
201 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
202 tcg_temp_free_i32(tmp_mask);
204 /* Set NZCV flags from the high 4 bits of var. */
205 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207 static void gen_exception_internal(int excp)
209 TCGv_i32 tcg_excp = tcg_const_i32(excp);
211 assert(excp_is_internal(excp));
212 gen_helper_exception_internal(cpu_env, tcg_excp);
213 tcg_temp_free_i32(tcg_excp);
216 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
218 TCGv_i32 tcg_excp = tcg_const_i32(excp);
219 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
220 TCGv_i32 tcg_el = tcg_const_i32(target_el);
222 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
223 tcg_syn, tcg_el);
225 tcg_temp_free_i32(tcg_el);
226 tcg_temp_free_i32(tcg_syn);
227 tcg_temp_free_i32(tcg_excp);
230 static void gen_ss_advance(DisasContext *s)
232 /* If the singlestep state is Active-not-pending, advance to
233 * Active-pending.
235 if (s->ss_active) {
236 s->pstate_ss = 0;
237 gen_helper_clear_pstate_ss(cpu_env);
241 static void gen_step_complete_exception(DisasContext *s)
243 /* We just completed step of an insn. Move from Active-not-pending
244 * to Active-pending, and then also take the swstep exception.
245 * This corresponds to making the (IMPDEF) choice to prioritize
246 * swstep exceptions over asynchronous exceptions taken to an exception
247 * level where debug is disabled. This choice has the advantage that
248 * we do not need to maintain internal state corresponding to the
249 * ISV/EX syndrome bits between completion of the step and generation
250 * of the exception, and our syndrome information is always correct.
252 gen_ss_advance(s);
253 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
254 default_exception_el(s));
255 s->is_jmp = DISAS_EXC;
258 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
260 TCGv_i32 tmp1 = tcg_temp_new_i32();
261 TCGv_i32 tmp2 = tcg_temp_new_i32();
262 tcg_gen_ext16s_i32(tmp1, a);
263 tcg_gen_ext16s_i32(tmp2, b);
264 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
265 tcg_temp_free_i32(tmp2);
266 tcg_gen_sari_i32(a, a, 16);
267 tcg_gen_sari_i32(b, b, 16);
268 tcg_gen_mul_i32(b, b, a);
269 tcg_gen_mov_i32(a, tmp1);
270 tcg_temp_free_i32(tmp1);
273 /* Byteswap each halfword. */
274 static void gen_rev16(TCGv_i32 var)
276 TCGv_i32 tmp = tcg_temp_new_i32();
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_andi_i32(var, var, 0xff00ff00);
281 tcg_gen_or_i32(var, var, tmp);
282 tcg_temp_free_i32(tmp);
285 /* Byteswap low halfword and sign extend. */
286 static void gen_revsh(TCGv_i32 var)
288 tcg_gen_ext16u_i32(var, var);
289 tcg_gen_bswap16_i32(var, var);
290 tcg_gen_ext16s_i32(var, var);
293 /* Unsigned bitfield extract. */
294 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
296 if (shift)
297 tcg_gen_shri_i32(var, var, shift);
298 tcg_gen_andi_i32(var, var, mask);
301 /* Signed bitfield extract. */
302 static void gen_sbfx(TCGv_i32 var, int shift, int width)
304 uint32_t signbit;
306 if (shift)
307 tcg_gen_sari_i32(var, var, shift);
308 if (shift + width < 32) {
309 signbit = 1u << (width - 1);
310 tcg_gen_andi_i32(var, var, (1u << width) - 1);
311 tcg_gen_xori_i32(var, var, signbit);
312 tcg_gen_subi_i32(var, var, signbit);
316 /* Return (b << 32) + a. Mark inputs as dead */
317 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
319 TCGv_i64 tmp64 = tcg_temp_new_i64();
321 tcg_gen_extu_i32_i64(tmp64, b);
322 tcg_temp_free_i32(b);
323 tcg_gen_shli_i64(tmp64, tmp64, 32);
324 tcg_gen_add_i64(a, tmp64, a);
326 tcg_temp_free_i64(tmp64);
327 return a;
330 /* Return (b << 32) - a. Mark inputs as dead. */
331 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
333 TCGv_i64 tmp64 = tcg_temp_new_i64();
335 tcg_gen_extu_i32_i64(tmp64, b);
336 tcg_temp_free_i32(b);
337 tcg_gen_shli_i64(tmp64, tmp64, 32);
338 tcg_gen_sub_i64(a, tmp64, a);
340 tcg_temp_free_i64(tmp64);
341 return a;
344 /* 32x32->64 multiply. Marks inputs as dead. */
345 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
347 TCGv_i32 lo = tcg_temp_new_i32();
348 TCGv_i32 hi = tcg_temp_new_i32();
349 TCGv_i64 ret;
351 tcg_gen_mulu2_i32(lo, hi, a, b);
352 tcg_temp_free_i32(a);
353 tcg_temp_free_i32(b);
355 ret = tcg_temp_new_i64();
356 tcg_gen_concat_i32_i64(ret, lo, hi);
357 tcg_temp_free_i32(lo);
358 tcg_temp_free_i32(hi);
360 return ret;
363 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
365 TCGv_i32 lo = tcg_temp_new_i32();
366 TCGv_i32 hi = tcg_temp_new_i32();
367 TCGv_i64 ret;
369 tcg_gen_muls2_i32(lo, hi, a, b);
370 tcg_temp_free_i32(a);
371 tcg_temp_free_i32(b);
373 ret = tcg_temp_new_i64();
374 tcg_gen_concat_i32_i64(ret, lo, hi);
375 tcg_temp_free_i32(lo);
376 tcg_temp_free_i32(hi);
378 return ret;
381 /* Swap low and high halfwords. */
382 static void gen_swap_half(TCGv_i32 var)
384 TCGv_i32 tmp = tcg_temp_new_i32();
385 tcg_gen_shri_i32(tmp, var, 16);
386 tcg_gen_shli_i32(var, var, 16);
387 tcg_gen_or_i32(var, var, tmp);
388 tcg_temp_free_i32(tmp);
391 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
392 tmp = (t0 ^ t1) & 0x8000;
393 t0 &= ~0x8000;
394 t1 &= ~0x8000;
395 t0 = (t0 + t1) ^ tmp;
398 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
400 TCGv_i32 tmp = tcg_temp_new_i32();
401 tcg_gen_xor_i32(tmp, t0, t1);
402 tcg_gen_andi_i32(tmp, tmp, 0x8000);
403 tcg_gen_andi_i32(t0, t0, ~0x8000);
404 tcg_gen_andi_i32(t1, t1, ~0x8000);
405 tcg_gen_add_i32(t0, t0, t1);
406 tcg_gen_xor_i32(t0, t0, tmp);
407 tcg_temp_free_i32(tmp);
408 tcg_temp_free_i32(t1);
411 /* Set CF to the top bit of var. */
412 static void gen_set_CF_bit31(TCGv_i32 var)
414 tcg_gen_shri_i32(cpu_CF, var, 31);
417 /* Set N and Z flags from var. */
418 static inline void gen_logic_CC(TCGv_i32 var)
420 tcg_gen_mov_i32(cpu_NF, var);
421 tcg_gen_mov_i32(cpu_ZF, var);
424 /* T0 += T1 + CF. */
425 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
427 tcg_gen_add_i32(t0, t0, t1);
428 tcg_gen_add_i32(t0, t0, cpu_CF);
431 /* dest = T0 + T1 + CF. */
432 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
434 tcg_gen_add_i32(dest, t0, t1);
435 tcg_gen_add_i32(dest, dest, cpu_CF);
438 /* dest = T0 - T1 + CF - 1. */
439 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
441 tcg_gen_sub_i32(dest, t0, t1);
442 tcg_gen_add_i32(dest, dest, cpu_CF);
443 tcg_gen_subi_i32(dest, dest, 1);
446 /* dest = T0 + T1. Compute C, N, V and Z flags */
447 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
449 TCGv_i32 tmp = tcg_temp_new_i32();
450 tcg_gen_movi_i32(tmp, 0);
451 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
452 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
453 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
454 tcg_gen_xor_i32(tmp, t0, t1);
455 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
456 tcg_temp_free_i32(tmp);
457 tcg_gen_mov_i32(dest, cpu_NF);
460 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
461 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
463 TCGv_i32 tmp = tcg_temp_new_i32();
464 if (TCG_TARGET_HAS_add2_i32) {
465 tcg_gen_movi_i32(tmp, 0);
466 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
467 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
468 } else {
469 TCGv_i64 q0 = tcg_temp_new_i64();
470 TCGv_i64 q1 = tcg_temp_new_i64();
471 tcg_gen_extu_i32_i64(q0, t0);
472 tcg_gen_extu_i32_i64(q1, t1);
473 tcg_gen_add_i64(q0, q0, q1);
474 tcg_gen_extu_i32_i64(q1, cpu_CF);
475 tcg_gen_add_i64(q0, q0, q1);
476 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
477 tcg_temp_free_i64(q0);
478 tcg_temp_free_i64(q1);
480 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
481 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
482 tcg_gen_xor_i32(tmp, t0, t1);
483 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
484 tcg_temp_free_i32(tmp);
485 tcg_gen_mov_i32(dest, cpu_NF);
488 /* dest = T0 - T1. Compute C, N, V and Z flags */
489 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
491 TCGv_i32 tmp;
492 tcg_gen_sub_i32(cpu_NF, t0, t1);
493 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
494 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
495 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
496 tmp = tcg_temp_new_i32();
497 tcg_gen_xor_i32(tmp, t0, t1);
498 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
499 tcg_temp_free_i32(tmp);
500 tcg_gen_mov_i32(dest, cpu_NF);
503 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
504 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
506 TCGv_i32 tmp = tcg_temp_new_i32();
507 tcg_gen_not_i32(tmp, t1);
508 gen_adc_CC(dest, t0, tmp);
509 tcg_temp_free_i32(tmp);
512 #define GEN_SHIFT(name) \
513 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
515 TCGv_i32 tmp1, tmp2, tmp3; \
516 tmp1 = tcg_temp_new_i32(); \
517 tcg_gen_andi_i32(tmp1, t1, 0xff); \
518 tmp2 = tcg_const_i32(0); \
519 tmp3 = tcg_const_i32(0x1f); \
520 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
521 tcg_temp_free_i32(tmp3); \
522 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
523 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
524 tcg_temp_free_i32(tmp2); \
525 tcg_temp_free_i32(tmp1); \
527 GEN_SHIFT(shl)
528 GEN_SHIFT(shr)
529 #undef GEN_SHIFT
531 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
533 TCGv_i32 tmp1, tmp2;
534 tmp1 = tcg_temp_new_i32();
535 tcg_gen_andi_i32(tmp1, t1, 0xff);
536 tmp2 = tcg_const_i32(0x1f);
537 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
538 tcg_temp_free_i32(tmp2);
539 tcg_gen_sar_i32(dest, t0, tmp1);
540 tcg_temp_free_i32(tmp1);
543 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
545 TCGv_i32 c0 = tcg_const_i32(0);
546 TCGv_i32 tmp = tcg_temp_new_i32();
547 tcg_gen_neg_i32(tmp, src);
548 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
549 tcg_temp_free_i32(c0);
550 tcg_temp_free_i32(tmp);
553 static void shifter_out_im(TCGv_i32 var, int shift)
555 if (shift == 0) {
556 tcg_gen_andi_i32(cpu_CF, var, 1);
557 } else {
558 tcg_gen_shri_i32(cpu_CF, var, shift);
559 if (shift != 31) {
560 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
565 /* Shift by immediate. Includes special handling for shift == 0. */
566 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
567 int shift, int flags)
569 switch (shiftop) {
570 case 0: /* LSL */
571 if (shift != 0) {
572 if (flags)
573 shifter_out_im(var, 32 - shift);
574 tcg_gen_shli_i32(var, var, shift);
576 break;
577 case 1: /* LSR */
578 if (shift == 0) {
579 if (flags) {
580 tcg_gen_shri_i32(cpu_CF, var, 31);
582 tcg_gen_movi_i32(var, 0);
583 } else {
584 if (flags)
585 shifter_out_im(var, shift - 1);
586 tcg_gen_shri_i32(var, var, shift);
588 break;
589 case 2: /* ASR */
590 if (shift == 0)
591 shift = 32;
592 if (flags)
593 shifter_out_im(var, shift - 1);
594 if (shift == 32)
595 shift = 31;
596 tcg_gen_sari_i32(var, var, shift);
597 break;
598 case 3: /* ROR/RRX */
599 if (shift != 0) {
600 if (flags)
601 shifter_out_im(var, shift - 1);
602 tcg_gen_rotri_i32(var, var, shift); break;
603 } else {
604 TCGv_i32 tmp = tcg_temp_new_i32();
605 tcg_gen_shli_i32(tmp, cpu_CF, 31);
606 if (flags)
607 shifter_out_im(var, 0);
608 tcg_gen_shri_i32(var, var, 1);
609 tcg_gen_or_i32(var, var, tmp);
610 tcg_temp_free_i32(tmp);
615 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
616 TCGv_i32 shift, int flags)
618 if (flags) {
619 switch (shiftop) {
620 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
621 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
622 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
623 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
625 } else {
626 switch (shiftop) {
627 case 0:
628 gen_shl(var, var, shift);
629 break;
630 case 1:
631 gen_shr(var, var, shift);
632 break;
633 case 2:
634 gen_sar(var, var, shift);
635 break;
636 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
637 tcg_gen_rotr_i32(var, var, shift); break;
640 tcg_temp_free_i32(shift);
643 #define PAS_OP(pfx) \
644 switch (op2) { \
645 case 0: gen_pas_helper(glue(pfx,add16)); break; \
646 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
647 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
648 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
649 case 4: gen_pas_helper(glue(pfx,add8)); break; \
650 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
652 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
654 TCGv_ptr tmp;
656 switch (op1) {
657 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
658 case 1:
659 tmp = tcg_temp_new_ptr();
660 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
661 PAS_OP(s)
662 tcg_temp_free_ptr(tmp);
663 break;
664 case 5:
665 tmp = tcg_temp_new_ptr();
666 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
667 PAS_OP(u)
668 tcg_temp_free_ptr(tmp);
669 break;
670 #undef gen_pas_helper
671 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
672 case 2:
673 PAS_OP(q);
674 break;
675 case 3:
676 PAS_OP(sh);
677 break;
678 case 6:
679 PAS_OP(uq);
680 break;
681 case 7:
682 PAS_OP(uh);
683 break;
684 #undef gen_pas_helper
687 #undef PAS_OP
689 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
690 #define PAS_OP(pfx) \
691 switch (op1) { \
692 case 0: gen_pas_helper(glue(pfx,add8)); break; \
693 case 1: gen_pas_helper(glue(pfx,add16)); break; \
694 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
695 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
696 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
697 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
699 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
701 TCGv_ptr tmp;
703 switch (op2) {
704 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
705 case 0:
706 tmp = tcg_temp_new_ptr();
707 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
708 PAS_OP(s)
709 tcg_temp_free_ptr(tmp);
710 break;
711 case 4:
712 tmp = tcg_temp_new_ptr();
713 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
714 PAS_OP(u)
715 tcg_temp_free_ptr(tmp);
716 break;
717 #undef gen_pas_helper
718 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
719 case 1:
720 PAS_OP(q);
721 break;
722 case 2:
723 PAS_OP(sh);
724 break;
725 case 5:
726 PAS_OP(uq);
727 break;
728 case 6:
729 PAS_OP(uh);
730 break;
731 #undef gen_pas_helper
734 #undef PAS_OP
737 * Generate a conditional based on ARM condition code cc.
738 * This is common between ARM and Aarch64 targets.
740 void arm_test_cc(DisasCompare *cmp, int cc)
742 TCGv_i32 value;
743 TCGCond cond;
744 bool global = true;
746 switch (cc) {
747 case 0: /* eq: Z */
748 case 1: /* ne: !Z */
749 cond = TCG_COND_EQ;
750 value = cpu_ZF;
751 break;
753 case 2: /* cs: C */
754 case 3: /* cc: !C */
755 cond = TCG_COND_NE;
756 value = cpu_CF;
757 break;
759 case 4: /* mi: N */
760 case 5: /* pl: !N */
761 cond = TCG_COND_LT;
762 value = cpu_NF;
763 break;
765 case 6: /* vs: V */
766 case 7: /* vc: !V */
767 cond = TCG_COND_LT;
768 value = cpu_VF;
769 break;
771 case 8: /* hi: C && !Z */
772 case 9: /* ls: !C || Z -> !(C && !Z) */
773 cond = TCG_COND_NE;
774 value = tcg_temp_new_i32();
775 global = false;
776 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
777 ZF is non-zero for !Z; so AND the two subexpressions. */
778 tcg_gen_neg_i32(value, cpu_CF);
779 tcg_gen_and_i32(value, value, cpu_ZF);
780 break;
782 case 10: /* ge: N == V -> N ^ V == 0 */
783 case 11: /* lt: N != V -> N ^ V != 0 */
784 /* Since we're only interested in the sign bit, == 0 is >= 0. */
785 cond = TCG_COND_GE;
786 value = tcg_temp_new_i32();
787 global = false;
788 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
789 break;
791 case 12: /* gt: !Z && N == V */
792 case 13: /* le: Z || N != V */
793 cond = TCG_COND_NE;
794 value = tcg_temp_new_i32();
795 global = false;
796 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
797 * the sign bit then AND with ZF to yield the result. */
798 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
799 tcg_gen_sari_i32(value, value, 31);
800 tcg_gen_andc_i32(value, cpu_ZF, value);
801 break;
803 case 14: /* always */
804 case 15: /* always */
805 /* Use the ALWAYS condition, which will fold early.
806 * It doesn't matter what we use for the value. */
807 cond = TCG_COND_ALWAYS;
808 value = cpu_ZF;
809 goto no_invert;
811 default:
812 fprintf(stderr, "Bad condition code 0x%x\n", cc);
813 abort();
816 if (cc & 1) {
817 cond = tcg_invert_cond(cond);
820 no_invert:
821 cmp->cond = cond;
822 cmp->value = value;
823 cmp->value_global = global;
826 void arm_free_cc(DisasCompare *cmp)
828 if (!cmp->value_global) {
829 tcg_temp_free_i32(cmp->value);
833 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
835 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
838 void arm_gen_test_cc(int cc, TCGLabel *label)
840 DisasCompare cmp;
841 arm_test_cc(&cmp, cc);
842 arm_jump_cc(&cmp, label);
843 arm_free_cc(&cmp);
846 static const uint8_t table_logic_cc[16] = {
847 1, /* and */
848 1, /* xor */
849 0, /* sub */
850 0, /* rsb */
851 0, /* add */
852 0, /* adc */
853 0, /* sbc */
854 0, /* rsc */
855 1, /* andl */
856 1, /* xorl */
857 0, /* cmp */
858 0, /* cmn */
859 1, /* orr */
860 1, /* mov */
861 1, /* bic */
862 1, /* mvn */
865 /* Set PC and Thumb state from an immediate address. */
866 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
868 TCGv_i32 tmp;
870 s->is_jmp = DISAS_JUMP;
871 if (s->thumb != (addr & 1)) {
872 tmp = tcg_temp_new_i32();
873 tcg_gen_movi_i32(tmp, addr & 1);
874 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
875 tcg_temp_free_i32(tmp);
877 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
880 /* Set PC and Thumb state from var. var is marked as dead. */
881 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
883 s->is_jmp = DISAS_JUMP;
884 tcg_gen_andi_i32(cpu_R[15], var, ~1);
885 tcg_gen_andi_i32(var, var, 1);
886 store_cpu_field(var, thumb);
889 /* Variant of store_reg which uses branch&exchange logic when storing
890 to r15 in ARM architecture v7 and above. The source must be a temporary
891 and will be marked as dead. */
892 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
894 if (reg == 15 && ENABLE_ARCH_7) {
895 gen_bx(s, var);
896 } else {
897 store_reg(s, reg, var);
901 /* Variant of store_reg which uses branch&exchange logic when storing
902 * to r15 in ARM architecture v5T and above. This is used for storing
903 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
904 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
905 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
907 if (reg == 15 && ENABLE_ARCH_5) {
908 gen_bx(s, var);
909 } else {
910 store_reg(s, reg, var);
914 #ifdef CONFIG_USER_ONLY
915 #define IS_USER_ONLY 1
916 #else
917 #define IS_USER_ONLY 0
918 #endif
920 /* Abstractions of "generate code to do a guest load/store for
921 * AArch32", where a vaddr is always 32 bits (and is zero
922 * extended if we're a 64 bit core) and data is also
923 * 32 bits unless specifically doing a 64 bit access.
924 * These functions work like tcg_gen_qemu_{ld,st}* except
925 * that the address argument is TCGv_i32 rather than TCGv.
927 #if TARGET_LONG_BITS == 32
929 #define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
930 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
931 TCGv_i32 addr, int index) \
933 TCGMemOp opc = (OPC) | s->be_data; \
934 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
935 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
936 TCGv addr_be = tcg_temp_new(); \
937 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
938 tcg_gen_qemu_ld_i32(val, addr_be, index, opc); \
939 tcg_temp_free(addr_be); \
940 return; \
942 tcg_gen_qemu_ld_i32(val, addr, index, opc); \
945 #define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
946 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
947 TCGv_i32 addr, int index) \
949 TCGMemOp opc = (OPC) | s->be_data; \
950 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
951 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
952 TCGv addr_be = tcg_temp_new(); \
953 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
954 tcg_gen_qemu_st_i32(val, addr_be, index, opc); \
955 tcg_temp_free(addr_be); \
956 return; \
958 tcg_gen_qemu_st_i32(val, addr, index, opc); \
961 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
962 TCGv_i32 addr, int index)
964 TCGMemOp opc = MO_Q | s->be_data;
965 tcg_gen_qemu_ld_i64(val, addr, index, opc);
966 /* Not needed for user-mode BE32, where we use MO_BE instead. */
967 if (!IS_USER_ONLY && s->sctlr_b) {
968 tcg_gen_rotri_i64(val, val, 32);
972 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
973 TCGv_i32 addr, int index)
975 TCGMemOp opc = MO_Q | s->be_data;
976 /* Not needed for user-mode BE32, where we use MO_BE instead. */
977 if (!IS_USER_ONLY && s->sctlr_b) {
978 TCGv_i64 tmp = tcg_temp_new_i64();
979 tcg_gen_rotri_i64(tmp, val, 32);
980 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
981 tcg_temp_free_i64(tmp);
982 return;
984 tcg_gen_qemu_st_i64(val, addr, index, opc);
987 #else
989 #define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
990 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
991 TCGv_i32 addr, int index) \
993 TCGMemOp opc = (OPC) | s->be_data; \
994 TCGv addr64 = tcg_temp_new(); \
995 tcg_gen_extu_i32_i64(addr64, addr); \
996 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
997 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
998 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1000 tcg_gen_qemu_ld_i32(val, addr64, index, opc); \
1001 tcg_temp_free(addr64); \
1004 #define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
1005 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1006 TCGv_i32 addr, int index) \
1008 TCGMemOp opc = (OPC) | s->be_data; \
1009 TCGv addr64 = tcg_temp_new(); \
1010 tcg_gen_extu_i32_i64(addr64, addr); \
1011 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1012 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1013 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1015 tcg_gen_qemu_st_i32(val, addr64, index, opc); \
1016 tcg_temp_free(addr64); \
1019 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1020 TCGv_i32 addr, int index)
1022 TCGMemOp opc = MO_Q | s->be_data;
1023 TCGv addr64 = tcg_temp_new();
1024 tcg_gen_extu_i32_i64(addr64, addr);
1025 tcg_gen_qemu_ld_i64(val, addr64, index, opc);
1027 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1028 if (!IS_USER_ONLY && s->sctlr_b) {
1029 tcg_gen_rotri_i64(val, val, 32);
1031 tcg_temp_free(addr64);
1034 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1035 TCGv_i32 addr, int index)
1037 TCGMemOp opc = MO_Q | s->be_data;
1038 TCGv addr64 = tcg_temp_new();
1039 tcg_gen_extu_i32_i64(addr64, addr);
1041 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1042 if (!IS_USER_ONLY && s->sctlr_b) {
1043 TCGv tmp = tcg_temp_new();
1044 tcg_gen_rotri_i64(tmp, val, 32);
1045 tcg_gen_qemu_st_i64(tmp, addr64, index, opc);
1046 tcg_temp_free(tmp);
1047 } else {
1048 tcg_gen_qemu_st_i64(val, addr64, index, opc);
1050 tcg_temp_free(addr64);
1053 #endif
1055 DO_GEN_LD(8s, MO_SB, 3)
1056 DO_GEN_LD(8u, MO_UB, 3)
1057 DO_GEN_LD(16s, MO_SW, 2)
1058 DO_GEN_LD(16u, MO_UW, 2)
1059 DO_GEN_LD(32u, MO_UL, 0)
1060 /* 'a' variants include an alignment check */
1061 DO_GEN_LD(16ua, MO_UW | MO_ALIGN, 2)
1062 DO_GEN_LD(32ua, MO_UL | MO_ALIGN, 0)
1063 DO_GEN_ST(8, MO_UB, 3)
1064 DO_GEN_ST(16, MO_UW, 2)
1065 DO_GEN_ST(32, MO_UL, 0)
1067 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
1069 tcg_gen_movi_i32(cpu_R[15], val);
1072 static inline void gen_hvc(DisasContext *s, int imm16)
1074 /* The pre HVC helper handles cases when HVC gets trapped
1075 * as an undefined insn by runtime configuration (ie before
1076 * the insn really executes).
1078 gen_set_pc_im(s, s->pc - 4);
1079 gen_helper_pre_hvc(cpu_env);
1080 /* Otherwise we will treat this as a real exception which
1081 * happens after execution of the insn. (The distinction matters
1082 * for the PC value reported to the exception handler and also
1083 * for single stepping.)
1085 s->svc_imm = imm16;
1086 gen_set_pc_im(s, s->pc);
1087 s->is_jmp = DISAS_HVC;
1090 static inline void gen_smc(DisasContext *s)
1092 /* As with HVC, we may take an exception either before or after
1093 * the insn executes.
1095 TCGv_i32 tmp;
1097 gen_set_pc_im(s, s->pc - 4);
1098 tmp = tcg_const_i32(syn_aa32_smc());
1099 gen_helper_pre_smc(cpu_env, tmp);
1100 tcg_temp_free_i32(tmp);
1101 gen_set_pc_im(s, s->pc);
1102 s->is_jmp = DISAS_SMC;
1105 static inline void
1106 gen_set_condexec (DisasContext *s)
1108 if (s->condexec_mask) {
1109 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1110 TCGv_i32 tmp = tcg_temp_new_i32();
1111 tcg_gen_movi_i32(tmp, val);
1112 store_cpu_field(tmp, condexec_bits);
1116 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1118 gen_set_condexec(s);
1119 gen_set_pc_im(s, s->pc - offset);
1120 gen_exception_internal(excp);
1121 s->is_jmp = DISAS_JUMP;
1124 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1125 int syn, uint32_t target_el)
1127 gen_set_condexec(s);
1128 gen_set_pc_im(s, s->pc - offset);
1129 gen_exception(excp, syn, target_el);
1130 s->is_jmp = DISAS_JUMP;
1133 /* Force a TB lookup after an instruction that changes the CPU state. */
1134 static inline void gen_lookup_tb(DisasContext *s)
1136 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1137 s->is_jmp = DISAS_JUMP;
1140 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1141 TCGv_i32 var)
1143 int val, rm, shift, shiftop;
1144 TCGv_i32 offset;
1146 if (!(insn & (1 << 25))) {
1147 /* immediate */
1148 val = insn & 0xfff;
1149 if (!(insn & (1 << 23)))
1150 val = -val;
1151 if (val != 0)
1152 tcg_gen_addi_i32(var, var, val);
1153 } else {
1154 /* shift/register */
1155 rm = (insn) & 0xf;
1156 shift = (insn >> 7) & 0x1f;
1157 shiftop = (insn >> 5) & 3;
1158 offset = load_reg(s, rm);
1159 gen_arm_shift_im(offset, shiftop, shift, 0);
1160 if (!(insn & (1 << 23)))
1161 tcg_gen_sub_i32(var, var, offset);
1162 else
1163 tcg_gen_add_i32(var, var, offset);
1164 tcg_temp_free_i32(offset);
1168 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1169 int extra, TCGv_i32 var)
1171 int val, rm;
1172 TCGv_i32 offset;
1174 if (insn & (1 << 22)) {
1175 /* immediate */
1176 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1177 if (!(insn & (1 << 23)))
1178 val = -val;
1179 val += extra;
1180 if (val != 0)
1181 tcg_gen_addi_i32(var, var, val);
1182 } else {
1183 /* register */
1184 if (extra)
1185 tcg_gen_addi_i32(var, var, extra);
1186 rm = (insn) & 0xf;
1187 offset = load_reg(s, rm);
1188 if (!(insn & (1 << 23)))
1189 tcg_gen_sub_i32(var, var, offset);
1190 else
1191 tcg_gen_add_i32(var, var, offset);
1192 tcg_temp_free_i32(offset);
1196 static TCGv_ptr get_fpstatus_ptr(int neon)
1198 TCGv_ptr statusptr = tcg_temp_new_ptr();
1199 int offset;
1200 if (neon) {
1201 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1202 } else {
1203 offset = offsetof(CPUARMState, vfp.fp_status);
1205 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1206 return statusptr;
1209 #define VFP_OP2(name) \
1210 static inline void gen_vfp_##name(int dp) \
1212 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1213 if (dp) { \
1214 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1215 } else { \
1216 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1218 tcg_temp_free_ptr(fpst); \
1221 VFP_OP2(add)
1222 VFP_OP2(sub)
1223 VFP_OP2(mul)
1224 VFP_OP2(div)
1226 #undef VFP_OP2
1228 static inline void gen_vfp_F1_mul(int dp)
1230 /* Like gen_vfp_mul() but put result in F1 */
1231 TCGv_ptr fpst = get_fpstatus_ptr(0);
1232 if (dp) {
1233 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1234 } else {
1235 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1237 tcg_temp_free_ptr(fpst);
1240 static inline void gen_vfp_F1_neg(int dp)
1242 /* Like gen_vfp_neg() but put result in F1 */
1243 if (dp) {
1244 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1245 } else {
1246 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1250 static inline void gen_vfp_abs(int dp)
1252 if (dp)
1253 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1254 else
1255 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1258 static inline void gen_vfp_neg(int dp)
1260 if (dp)
1261 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1262 else
1263 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1266 static inline void gen_vfp_sqrt(int dp)
1268 if (dp)
1269 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1270 else
1271 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1274 static inline void gen_vfp_cmp(int dp)
1276 if (dp)
1277 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1278 else
1279 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1282 static inline void gen_vfp_cmpe(int dp)
1284 if (dp)
1285 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1286 else
1287 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1290 static inline void gen_vfp_F1_ld0(int dp)
1292 if (dp)
1293 tcg_gen_movi_i64(cpu_F1d, 0);
1294 else
1295 tcg_gen_movi_i32(cpu_F1s, 0);
1298 #define VFP_GEN_ITOF(name) \
1299 static inline void gen_vfp_##name(int dp, int neon) \
1301 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1302 if (dp) { \
1303 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1304 } else { \
1305 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1307 tcg_temp_free_ptr(statusptr); \
1310 VFP_GEN_ITOF(uito)
1311 VFP_GEN_ITOF(sito)
1312 #undef VFP_GEN_ITOF
1314 #define VFP_GEN_FTOI(name) \
1315 static inline void gen_vfp_##name(int dp, int neon) \
1317 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1318 if (dp) { \
1319 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1320 } else { \
1321 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1323 tcg_temp_free_ptr(statusptr); \
1326 VFP_GEN_FTOI(toui)
1327 VFP_GEN_FTOI(touiz)
1328 VFP_GEN_FTOI(tosi)
1329 VFP_GEN_FTOI(tosiz)
1330 #undef VFP_GEN_FTOI
1332 #define VFP_GEN_FIX(name, round) \
1333 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1335 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1336 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1337 if (dp) { \
1338 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1339 statusptr); \
1340 } else { \
1341 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1342 statusptr); \
1344 tcg_temp_free_i32(tmp_shift); \
1345 tcg_temp_free_ptr(statusptr); \
1347 VFP_GEN_FIX(tosh, _round_to_zero)
1348 VFP_GEN_FIX(tosl, _round_to_zero)
1349 VFP_GEN_FIX(touh, _round_to_zero)
1350 VFP_GEN_FIX(toul, _round_to_zero)
1351 VFP_GEN_FIX(shto, )
1352 VFP_GEN_FIX(slto, )
1353 VFP_GEN_FIX(uhto, )
1354 VFP_GEN_FIX(ulto, )
1355 #undef VFP_GEN_FIX
1357 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1359 if (dp) {
1360 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
1361 } else {
1362 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
1366 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1368 if (dp) {
1369 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
1370 } else {
1371 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
1375 static inline long
1376 vfp_reg_offset (int dp, int reg)
1378 if (dp)
1379 return offsetof(CPUARMState, vfp.regs[reg]);
1380 else if (reg & 1) {
1381 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1382 + offsetof(CPU_DoubleU, l.upper);
1383 } else {
1384 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1385 + offsetof(CPU_DoubleU, l.lower);
1389 /* Return the offset of a 32-bit piece of a NEON register.
1390 zero is the least significant end of the register. */
1391 static inline long
1392 neon_reg_offset (int reg, int n)
1394 int sreg;
1395 sreg = reg * 2 + n;
1396 return vfp_reg_offset(0, sreg);
1399 static TCGv_i32 neon_load_reg(int reg, int pass)
1401 TCGv_i32 tmp = tcg_temp_new_i32();
1402 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1403 return tmp;
1406 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1408 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1409 tcg_temp_free_i32(var);
1412 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1414 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1417 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1419 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1422 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1423 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1424 #define tcg_gen_st_f32 tcg_gen_st_i32
1425 #define tcg_gen_st_f64 tcg_gen_st_i64
1427 static inline void gen_mov_F0_vreg(int dp, int reg)
1429 if (dp)
1430 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1431 else
1432 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1435 static inline void gen_mov_F1_vreg(int dp, int reg)
1437 if (dp)
1438 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1439 else
1440 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1443 static inline void gen_mov_vreg_F0(int dp, int reg)
1445 if (dp)
1446 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1447 else
1448 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1451 #define ARM_CP_RW_BIT (1 << 20)
1453 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1455 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1458 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1460 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1463 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1465 TCGv_i32 var = tcg_temp_new_i32();
1466 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1467 return var;
1470 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1472 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1473 tcg_temp_free_i32(var);
1476 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1478 iwmmxt_store_reg(cpu_M0, rn);
1481 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1483 iwmmxt_load_reg(cpu_M0, rn);
1486 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1488 iwmmxt_load_reg(cpu_V1, rn);
1489 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1492 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1494 iwmmxt_load_reg(cpu_V1, rn);
1495 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1498 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1500 iwmmxt_load_reg(cpu_V1, rn);
1501 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1504 #define IWMMXT_OP(name) \
1505 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1507 iwmmxt_load_reg(cpu_V1, rn); \
1508 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1511 #define IWMMXT_OP_ENV(name) \
1512 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1514 iwmmxt_load_reg(cpu_V1, rn); \
1515 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1518 #define IWMMXT_OP_ENV_SIZE(name) \
1519 IWMMXT_OP_ENV(name##b) \
1520 IWMMXT_OP_ENV(name##w) \
1521 IWMMXT_OP_ENV(name##l)
1523 #define IWMMXT_OP_ENV1(name) \
1524 static inline void gen_op_iwmmxt_##name##_M0(void) \
1526 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1529 IWMMXT_OP(maddsq)
1530 IWMMXT_OP(madduq)
1531 IWMMXT_OP(sadb)
1532 IWMMXT_OP(sadw)
1533 IWMMXT_OP(mulslw)
1534 IWMMXT_OP(mulshw)
1535 IWMMXT_OP(mululw)
1536 IWMMXT_OP(muluhw)
1537 IWMMXT_OP(macsw)
1538 IWMMXT_OP(macuw)
1540 IWMMXT_OP_ENV_SIZE(unpackl)
1541 IWMMXT_OP_ENV_SIZE(unpackh)
1543 IWMMXT_OP_ENV1(unpacklub)
1544 IWMMXT_OP_ENV1(unpackluw)
1545 IWMMXT_OP_ENV1(unpacklul)
1546 IWMMXT_OP_ENV1(unpackhub)
1547 IWMMXT_OP_ENV1(unpackhuw)
1548 IWMMXT_OP_ENV1(unpackhul)
1549 IWMMXT_OP_ENV1(unpacklsb)
1550 IWMMXT_OP_ENV1(unpacklsw)
1551 IWMMXT_OP_ENV1(unpacklsl)
1552 IWMMXT_OP_ENV1(unpackhsb)
1553 IWMMXT_OP_ENV1(unpackhsw)
1554 IWMMXT_OP_ENV1(unpackhsl)
1556 IWMMXT_OP_ENV_SIZE(cmpeq)
1557 IWMMXT_OP_ENV_SIZE(cmpgtu)
1558 IWMMXT_OP_ENV_SIZE(cmpgts)
1560 IWMMXT_OP_ENV_SIZE(mins)
1561 IWMMXT_OP_ENV_SIZE(minu)
1562 IWMMXT_OP_ENV_SIZE(maxs)
1563 IWMMXT_OP_ENV_SIZE(maxu)
1565 IWMMXT_OP_ENV_SIZE(subn)
1566 IWMMXT_OP_ENV_SIZE(addn)
1567 IWMMXT_OP_ENV_SIZE(subu)
1568 IWMMXT_OP_ENV_SIZE(addu)
1569 IWMMXT_OP_ENV_SIZE(subs)
1570 IWMMXT_OP_ENV_SIZE(adds)
1572 IWMMXT_OP_ENV(avgb0)
1573 IWMMXT_OP_ENV(avgb1)
1574 IWMMXT_OP_ENV(avgw0)
1575 IWMMXT_OP_ENV(avgw1)
1577 IWMMXT_OP_ENV(packuw)
1578 IWMMXT_OP_ENV(packul)
1579 IWMMXT_OP_ENV(packuq)
1580 IWMMXT_OP_ENV(packsw)
1581 IWMMXT_OP_ENV(packsl)
1582 IWMMXT_OP_ENV(packsq)
1584 static void gen_op_iwmmxt_set_mup(void)
1586 TCGv_i32 tmp;
1587 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1588 tcg_gen_ori_i32(tmp, tmp, 2);
1589 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1592 static void gen_op_iwmmxt_set_cup(void)
1594 TCGv_i32 tmp;
1595 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1596 tcg_gen_ori_i32(tmp, tmp, 1);
1597 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1600 static void gen_op_iwmmxt_setpsr_nz(void)
1602 TCGv_i32 tmp = tcg_temp_new_i32();
1603 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1604 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1607 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1609 iwmmxt_load_reg(cpu_V1, rn);
1610 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1611 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1614 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1615 TCGv_i32 dest)
1617 int rd;
1618 uint32_t offset;
1619 TCGv_i32 tmp;
1621 rd = (insn >> 16) & 0xf;
1622 tmp = load_reg(s, rd);
1624 offset = (insn & 0xff) << ((insn >> 7) & 2);
1625 if (insn & (1 << 24)) {
1626 /* Pre indexed */
1627 if (insn & (1 << 23))
1628 tcg_gen_addi_i32(tmp, tmp, offset);
1629 else
1630 tcg_gen_addi_i32(tmp, tmp, -offset);
1631 tcg_gen_mov_i32(dest, tmp);
1632 if (insn & (1 << 21))
1633 store_reg(s, rd, tmp);
1634 else
1635 tcg_temp_free_i32(tmp);
1636 } else if (insn & (1 << 21)) {
1637 /* Post indexed */
1638 tcg_gen_mov_i32(dest, tmp);
1639 if (insn & (1 << 23))
1640 tcg_gen_addi_i32(tmp, tmp, offset);
1641 else
1642 tcg_gen_addi_i32(tmp, tmp, -offset);
1643 store_reg(s, rd, tmp);
1644 } else if (!(insn & (1 << 23)))
1645 return 1;
1646 return 0;
1649 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1651 int rd = (insn >> 0) & 0xf;
1652 TCGv_i32 tmp;
1654 if (insn & (1 << 8)) {
1655 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1656 return 1;
1657 } else {
1658 tmp = iwmmxt_load_creg(rd);
1660 } else {
1661 tmp = tcg_temp_new_i32();
1662 iwmmxt_load_reg(cpu_V0, rd);
1663 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1665 tcg_gen_andi_i32(tmp, tmp, mask);
1666 tcg_gen_mov_i32(dest, tmp);
1667 tcg_temp_free_i32(tmp);
1668 return 0;
1671 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1672 (ie. an undefined instruction). */
1673 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1675 int rd, wrd;
1676 int rdhi, rdlo, rd0, rd1, i;
1677 TCGv_i32 addr;
1678 TCGv_i32 tmp, tmp2, tmp3;
1680 if ((insn & 0x0e000e00) == 0x0c000000) {
1681 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1682 wrd = insn & 0xf;
1683 rdlo = (insn >> 12) & 0xf;
1684 rdhi = (insn >> 16) & 0xf;
1685 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1686 iwmmxt_load_reg(cpu_V0, wrd);
1687 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1688 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1689 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1690 } else { /* TMCRR */
1691 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1692 iwmmxt_store_reg(cpu_V0, wrd);
1693 gen_op_iwmmxt_set_mup();
1695 return 0;
1698 wrd = (insn >> 12) & 0xf;
1699 addr = tcg_temp_new_i32();
1700 if (gen_iwmmxt_address(s, insn, addr)) {
1701 tcg_temp_free_i32(addr);
1702 return 1;
1704 if (insn & ARM_CP_RW_BIT) {
1705 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1706 tmp = tcg_temp_new_i32();
1707 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1708 iwmmxt_store_creg(wrd, tmp);
1709 } else {
1710 i = 1;
1711 if (insn & (1 << 8)) {
1712 if (insn & (1 << 22)) { /* WLDRD */
1713 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1714 i = 0;
1715 } else { /* WLDRW wRd */
1716 tmp = tcg_temp_new_i32();
1717 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1719 } else {
1720 tmp = tcg_temp_new_i32();
1721 if (insn & (1 << 22)) { /* WLDRH */
1722 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1723 } else { /* WLDRB */
1724 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1727 if (i) {
1728 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1729 tcg_temp_free_i32(tmp);
1731 gen_op_iwmmxt_movq_wRn_M0(wrd);
1733 } else {
1734 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1735 tmp = iwmmxt_load_creg(wrd);
1736 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1737 } else {
1738 gen_op_iwmmxt_movq_M0_wRn(wrd);
1739 tmp = tcg_temp_new_i32();
1740 if (insn & (1 << 8)) {
1741 if (insn & (1 << 22)) { /* WSTRD */
1742 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1743 } else { /* WSTRW wRd */
1744 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1745 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1747 } else {
1748 if (insn & (1 << 22)) { /* WSTRH */
1749 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1750 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1751 } else { /* WSTRB */
1752 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1753 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1757 tcg_temp_free_i32(tmp);
1759 tcg_temp_free_i32(addr);
1760 return 0;
1763 if ((insn & 0x0f000000) != 0x0e000000)
1764 return 1;
1766 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1767 case 0x000: /* WOR */
1768 wrd = (insn >> 12) & 0xf;
1769 rd0 = (insn >> 0) & 0xf;
1770 rd1 = (insn >> 16) & 0xf;
1771 gen_op_iwmmxt_movq_M0_wRn(rd0);
1772 gen_op_iwmmxt_orq_M0_wRn(rd1);
1773 gen_op_iwmmxt_setpsr_nz();
1774 gen_op_iwmmxt_movq_wRn_M0(wrd);
1775 gen_op_iwmmxt_set_mup();
1776 gen_op_iwmmxt_set_cup();
1777 break;
1778 case 0x011: /* TMCR */
1779 if (insn & 0xf)
1780 return 1;
1781 rd = (insn >> 12) & 0xf;
1782 wrd = (insn >> 16) & 0xf;
1783 switch (wrd) {
1784 case ARM_IWMMXT_wCID:
1785 case ARM_IWMMXT_wCASF:
1786 break;
1787 case ARM_IWMMXT_wCon:
1788 gen_op_iwmmxt_set_cup();
1789 /* Fall through. */
1790 case ARM_IWMMXT_wCSSF:
1791 tmp = iwmmxt_load_creg(wrd);
1792 tmp2 = load_reg(s, rd);
1793 tcg_gen_andc_i32(tmp, tmp, tmp2);
1794 tcg_temp_free_i32(tmp2);
1795 iwmmxt_store_creg(wrd, tmp);
1796 break;
1797 case ARM_IWMMXT_wCGR0:
1798 case ARM_IWMMXT_wCGR1:
1799 case ARM_IWMMXT_wCGR2:
1800 case ARM_IWMMXT_wCGR3:
1801 gen_op_iwmmxt_set_cup();
1802 tmp = load_reg(s, rd);
1803 iwmmxt_store_creg(wrd, tmp);
1804 break;
1805 default:
1806 return 1;
1808 break;
1809 case 0x100: /* WXOR */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 0) & 0xf;
1812 rd1 = (insn >> 16) & 0xf;
1813 gen_op_iwmmxt_movq_M0_wRn(rd0);
1814 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1815 gen_op_iwmmxt_setpsr_nz();
1816 gen_op_iwmmxt_movq_wRn_M0(wrd);
1817 gen_op_iwmmxt_set_mup();
1818 gen_op_iwmmxt_set_cup();
1819 break;
1820 case 0x111: /* TMRC */
1821 if (insn & 0xf)
1822 return 1;
1823 rd = (insn >> 12) & 0xf;
1824 wrd = (insn >> 16) & 0xf;
1825 tmp = iwmmxt_load_creg(wrd);
1826 store_reg(s, rd, tmp);
1827 break;
1828 case 0x300: /* WANDN */
1829 wrd = (insn >> 12) & 0xf;
1830 rd0 = (insn >> 0) & 0xf;
1831 rd1 = (insn >> 16) & 0xf;
1832 gen_op_iwmmxt_movq_M0_wRn(rd0);
1833 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1834 gen_op_iwmmxt_andq_M0_wRn(rd1);
1835 gen_op_iwmmxt_setpsr_nz();
1836 gen_op_iwmmxt_movq_wRn_M0(wrd);
1837 gen_op_iwmmxt_set_mup();
1838 gen_op_iwmmxt_set_cup();
1839 break;
1840 case 0x200: /* WAND */
1841 wrd = (insn >> 12) & 0xf;
1842 rd0 = (insn >> 0) & 0xf;
1843 rd1 = (insn >> 16) & 0xf;
1844 gen_op_iwmmxt_movq_M0_wRn(rd0);
1845 gen_op_iwmmxt_andq_M0_wRn(rd1);
1846 gen_op_iwmmxt_setpsr_nz();
1847 gen_op_iwmmxt_movq_wRn_M0(wrd);
1848 gen_op_iwmmxt_set_mup();
1849 gen_op_iwmmxt_set_cup();
1850 break;
1851 case 0x810: case 0xa10: /* WMADD */
1852 wrd = (insn >> 12) & 0xf;
1853 rd0 = (insn >> 0) & 0xf;
1854 rd1 = (insn >> 16) & 0xf;
1855 gen_op_iwmmxt_movq_M0_wRn(rd0);
1856 if (insn & (1 << 21))
1857 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1858 else
1859 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1860 gen_op_iwmmxt_movq_wRn_M0(wrd);
1861 gen_op_iwmmxt_set_mup();
1862 break;
1863 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1864 wrd = (insn >> 12) & 0xf;
1865 rd0 = (insn >> 16) & 0xf;
1866 rd1 = (insn >> 0) & 0xf;
1867 gen_op_iwmmxt_movq_M0_wRn(rd0);
1868 switch ((insn >> 22) & 3) {
1869 case 0:
1870 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1871 break;
1872 case 1:
1873 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1874 break;
1875 case 2:
1876 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1877 break;
1878 case 3:
1879 return 1;
1881 gen_op_iwmmxt_movq_wRn_M0(wrd);
1882 gen_op_iwmmxt_set_mup();
1883 gen_op_iwmmxt_set_cup();
1884 break;
1885 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1886 wrd = (insn >> 12) & 0xf;
1887 rd0 = (insn >> 16) & 0xf;
1888 rd1 = (insn >> 0) & 0xf;
1889 gen_op_iwmmxt_movq_M0_wRn(rd0);
1890 switch ((insn >> 22) & 3) {
1891 case 0:
1892 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1893 break;
1894 case 1:
1895 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1896 break;
1897 case 2:
1898 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1899 break;
1900 case 3:
1901 return 1;
1903 gen_op_iwmmxt_movq_wRn_M0(wrd);
1904 gen_op_iwmmxt_set_mup();
1905 gen_op_iwmmxt_set_cup();
1906 break;
1907 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1908 wrd = (insn >> 12) & 0xf;
1909 rd0 = (insn >> 16) & 0xf;
1910 rd1 = (insn >> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 if (insn & (1 << 22))
1913 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1914 else
1915 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1916 if (!(insn & (1 << 20)))
1917 gen_op_iwmmxt_addl_M0_wRn(wrd);
1918 gen_op_iwmmxt_movq_wRn_M0(wrd);
1919 gen_op_iwmmxt_set_mup();
1920 break;
1921 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1922 wrd = (insn >> 12) & 0xf;
1923 rd0 = (insn >> 16) & 0xf;
1924 rd1 = (insn >> 0) & 0xf;
1925 gen_op_iwmmxt_movq_M0_wRn(rd0);
1926 if (insn & (1 << 21)) {
1927 if (insn & (1 << 20))
1928 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1929 else
1930 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1931 } else {
1932 if (insn & (1 << 20))
1933 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1934 else
1935 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1937 gen_op_iwmmxt_movq_wRn_M0(wrd);
1938 gen_op_iwmmxt_set_mup();
1939 break;
1940 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1941 wrd = (insn >> 12) & 0xf;
1942 rd0 = (insn >> 16) & 0xf;
1943 rd1 = (insn >> 0) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1947 else
1948 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1949 if (!(insn & (1 << 20))) {
1950 iwmmxt_load_reg(cpu_V1, wrd);
1951 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1955 break;
1956 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1957 wrd = (insn >> 12) & 0xf;
1958 rd0 = (insn >> 16) & 0xf;
1959 rd1 = (insn >> 0) & 0xf;
1960 gen_op_iwmmxt_movq_M0_wRn(rd0);
1961 switch ((insn >> 22) & 3) {
1962 case 0:
1963 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1964 break;
1965 case 1:
1966 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1967 break;
1968 case 2:
1969 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1970 break;
1971 case 3:
1972 return 1;
1974 gen_op_iwmmxt_movq_wRn_M0(wrd);
1975 gen_op_iwmmxt_set_mup();
1976 gen_op_iwmmxt_set_cup();
1977 break;
1978 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1979 wrd = (insn >> 12) & 0xf;
1980 rd0 = (insn >> 16) & 0xf;
1981 rd1 = (insn >> 0) & 0xf;
1982 gen_op_iwmmxt_movq_M0_wRn(rd0);
1983 if (insn & (1 << 22)) {
1984 if (insn & (1 << 20))
1985 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1986 else
1987 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1988 } else {
1989 if (insn & (1 << 20))
1990 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1991 else
1992 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1994 gen_op_iwmmxt_movq_wRn_M0(wrd);
1995 gen_op_iwmmxt_set_mup();
1996 gen_op_iwmmxt_set_cup();
1997 break;
1998 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1999 wrd = (insn >> 12) & 0xf;
2000 rd0 = (insn >> 16) & 0xf;
2001 rd1 = (insn >> 0) & 0xf;
2002 gen_op_iwmmxt_movq_M0_wRn(rd0);
2003 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2004 tcg_gen_andi_i32(tmp, tmp, 7);
2005 iwmmxt_load_reg(cpu_V1, rd1);
2006 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2007 tcg_temp_free_i32(tmp);
2008 gen_op_iwmmxt_movq_wRn_M0(wrd);
2009 gen_op_iwmmxt_set_mup();
2010 break;
2011 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2012 if (((insn >> 6) & 3) == 3)
2013 return 1;
2014 rd = (insn >> 12) & 0xf;
2015 wrd = (insn >> 16) & 0xf;
2016 tmp = load_reg(s, rd);
2017 gen_op_iwmmxt_movq_M0_wRn(wrd);
2018 switch ((insn >> 6) & 3) {
2019 case 0:
2020 tmp2 = tcg_const_i32(0xff);
2021 tmp3 = tcg_const_i32((insn & 7) << 3);
2022 break;
2023 case 1:
2024 tmp2 = tcg_const_i32(0xffff);
2025 tmp3 = tcg_const_i32((insn & 3) << 4);
2026 break;
2027 case 2:
2028 tmp2 = tcg_const_i32(0xffffffff);
2029 tmp3 = tcg_const_i32((insn & 1) << 5);
2030 break;
2031 default:
2032 TCGV_UNUSED_I32(tmp2);
2033 TCGV_UNUSED_I32(tmp3);
2035 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2036 tcg_temp_free_i32(tmp3);
2037 tcg_temp_free_i32(tmp2);
2038 tcg_temp_free_i32(tmp);
2039 gen_op_iwmmxt_movq_wRn_M0(wrd);
2040 gen_op_iwmmxt_set_mup();
2041 break;
2042 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2043 rd = (insn >> 12) & 0xf;
2044 wrd = (insn >> 16) & 0xf;
2045 if (rd == 15 || ((insn >> 22) & 3) == 3)
2046 return 1;
2047 gen_op_iwmmxt_movq_M0_wRn(wrd);
2048 tmp = tcg_temp_new_i32();
2049 switch ((insn >> 22) & 3) {
2050 case 0:
2051 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2052 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2053 if (insn & 8) {
2054 tcg_gen_ext8s_i32(tmp, tmp);
2055 } else {
2056 tcg_gen_andi_i32(tmp, tmp, 0xff);
2058 break;
2059 case 1:
2060 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2061 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2062 if (insn & 8) {
2063 tcg_gen_ext16s_i32(tmp, tmp);
2064 } else {
2065 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2067 break;
2068 case 2:
2069 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2070 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2071 break;
2073 store_reg(s, rd, tmp);
2074 break;
2075 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2076 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2077 return 1;
2078 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2079 switch ((insn >> 22) & 3) {
2080 case 0:
2081 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2082 break;
2083 case 1:
2084 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2085 break;
2086 case 2:
2087 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2088 break;
2090 tcg_gen_shli_i32(tmp, tmp, 28);
2091 gen_set_nzcv(tmp);
2092 tcg_temp_free_i32(tmp);
2093 break;
2094 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2095 if (((insn >> 6) & 3) == 3)
2096 return 1;
2097 rd = (insn >> 12) & 0xf;
2098 wrd = (insn >> 16) & 0xf;
2099 tmp = load_reg(s, rd);
2100 switch ((insn >> 6) & 3) {
2101 case 0:
2102 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2103 break;
2104 case 1:
2105 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2106 break;
2107 case 2:
2108 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2109 break;
2111 tcg_temp_free_i32(tmp);
2112 gen_op_iwmmxt_movq_wRn_M0(wrd);
2113 gen_op_iwmmxt_set_mup();
2114 break;
2115 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2116 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2117 return 1;
2118 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2119 tmp2 = tcg_temp_new_i32();
2120 tcg_gen_mov_i32(tmp2, tmp);
2121 switch ((insn >> 22) & 3) {
2122 case 0:
2123 for (i = 0; i < 7; i ++) {
2124 tcg_gen_shli_i32(tmp2, tmp2, 4);
2125 tcg_gen_and_i32(tmp, tmp, tmp2);
2127 break;
2128 case 1:
2129 for (i = 0; i < 3; i ++) {
2130 tcg_gen_shli_i32(tmp2, tmp2, 8);
2131 tcg_gen_and_i32(tmp, tmp, tmp2);
2133 break;
2134 case 2:
2135 tcg_gen_shli_i32(tmp2, tmp2, 16);
2136 tcg_gen_and_i32(tmp, tmp, tmp2);
2137 break;
2139 gen_set_nzcv(tmp);
2140 tcg_temp_free_i32(tmp2);
2141 tcg_temp_free_i32(tmp);
2142 break;
2143 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2144 wrd = (insn >> 12) & 0xf;
2145 rd0 = (insn >> 16) & 0xf;
2146 gen_op_iwmmxt_movq_M0_wRn(rd0);
2147 switch ((insn >> 22) & 3) {
2148 case 0:
2149 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2150 break;
2151 case 1:
2152 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2153 break;
2154 case 2:
2155 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2156 break;
2157 case 3:
2158 return 1;
2160 gen_op_iwmmxt_movq_wRn_M0(wrd);
2161 gen_op_iwmmxt_set_mup();
2162 break;
2163 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2164 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2165 return 1;
2166 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2167 tmp2 = tcg_temp_new_i32();
2168 tcg_gen_mov_i32(tmp2, tmp);
2169 switch ((insn >> 22) & 3) {
2170 case 0:
2171 for (i = 0; i < 7; i ++) {
2172 tcg_gen_shli_i32(tmp2, tmp2, 4);
2173 tcg_gen_or_i32(tmp, tmp, tmp2);
2175 break;
2176 case 1:
2177 for (i = 0; i < 3; i ++) {
2178 tcg_gen_shli_i32(tmp2, tmp2, 8);
2179 tcg_gen_or_i32(tmp, tmp, tmp2);
2181 break;
2182 case 2:
2183 tcg_gen_shli_i32(tmp2, tmp2, 16);
2184 tcg_gen_or_i32(tmp, tmp, tmp2);
2185 break;
2187 gen_set_nzcv(tmp);
2188 tcg_temp_free_i32(tmp2);
2189 tcg_temp_free_i32(tmp);
2190 break;
2191 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2192 rd = (insn >> 12) & 0xf;
2193 rd0 = (insn >> 16) & 0xf;
2194 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2195 return 1;
2196 gen_op_iwmmxt_movq_M0_wRn(rd0);
2197 tmp = tcg_temp_new_i32();
2198 switch ((insn >> 22) & 3) {
2199 case 0:
2200 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2201 break;
2202 case 1:
2203 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2204 break;
2205 case 2:
2206 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2207 break;
2209 store_reg(s, rd, tmp);
2210 break;
2211 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2212 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2213 wrd = (insn >> 12) & 0xf;
2214 rd0 = (insn >> 16) & 0xf;
2215 rd1 = (insn >> 0) & 0xf;
2216 gen_op_iwmmxt_movq_M0_wRn(rd0);
2217 switch ((insn >> 22) & 3) {
2218 case 0:
2219 if (insn & (1 << 21))
2220 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2221 else
2222 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2223 break;
2224 case 1:
2225 if (insn & (1 << 21))
2226 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2227 else
2228 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2229 break;
2230 case 2:
2231 if (insn & (1 << 21))
2232 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2233 else
2234 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2235 break;
2236 case 3:
2237 return 1;
2239 gen_op_iwmmxt_movq_wRn_M0(wrd);
2240 gen_op_iwmmxt_set_mup();
2241 gen_op_iwmmxt_set_cup();
2242 break;
2243 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2244 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2245 wrd = (insn >> 12) & 0xf;
2246 rd0 = (insn >> 16) & 0xf;
2247 gen_op_iwmmxt_movq_M0_wRn(rd0);
2248 switch ((insn >> 22) & 3) {
2249 case 0:
2250 if (insn & (1 << 21))
2251 gen_op_iwmmxt_unpacklsb_M0();
2252 else
2253 gen_op_iwmmxt_unpacklub_M0();
2254 break;
2255 case 1:
2256 if (insn & (1 << 21))
2257 gen_op_iwmmxt_unpacklsw_M0();
2258 else
2259 gen_op_iwmmxt_unpackluw_M0();
2260 break;
2261 case 2:
2262 if (insn & (1 << 21))
2263 gen_op_iwmmxt_unpacklsl_M0();
2264 else
2265 gen_op_iwmmxt_unpacklul_M0();
2266 break;
2267 case 3:
2268 return 1;
2270 gen_op_iwmmxt_movq_wRn_M0(wrd);
2271 gen_op_iwmmxt_set_mup();
2272 gen_op_iwmmxt_set_cup();
2273 break;
2274 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2275 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2276 wrd = (insn >> 12) & 0xf;
2277 rd0 = (insn >> 16) & 0xf;
2278 gen_op_iwmmxt_movq_M0_wRn(rd0);
2279 switch ((insn >> 22) & 3) {
2280 case 0:
2281 if (insn & (1 << 21))
2282 gen_op_iwmmxt_unpackhsb_M0();
2283 else
2284 gen_op_iwmmxt_unpackhub_M0();
2285 break;
2286 case 1:
2287 if (insn & (1 << 21))
2288 gen_op_iwmmxt_unpackhsw_M0();
2289 else
2290 gen_op_iwmmxt_unpackhuw_M0();
2291 break;
2292 case 2:
2293 if (insn & (1 << 21))
2294 gen_op_iwmmxt_unpackhsl_M0();
2295 else
2296 gen_op_iwmmxt_unpackhul_M0();
2297 break;
2298 case 3:
2299 return 1;
2301 gen_op_iwmmxt_movq_wRn_M0(wrd);
2302 gen_op_iwmmxt_set_mup();
2303 gen_op_iwmmxt_set_cup();
2304 break;
2305 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2306 case 0x214: case 0x614: case 0xa14: case 0xe14:
2307 if (((insn >> 22) & 3) == 0)
2308 return 1;
2309 wrd = (insn >> 12) & 0xf;
2310 rd0 = (insn >> 16) & 0xf;
2311 gen_op_iwmmxt_movq_M0_wRn(rd0);
2312 tmp = tcg_temp_new_i32();
2313 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2314 tcg_temp_free_i32(tmp);
2315 return 1;
2317 switch ((insn >> 22) & 3) {
2318 case 1:
2319 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2320 break;
2321 case 2:
2322 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2323 break;
2324 case 3:
2325 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2326 break;
2328 tcg_temp_free_i32(tmp);
2329 gen_op_iwmmxt_movq_wRn_M0(wrd);
2330 gen_op_iwmmxt_set_mup();
2331 gen_op_iwmmxt_set_cup();
2332 break;
2333 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2334 case 0x014: case 0x414: case 0x814: case 0xc14:
2335 if (((insn >> 22) & 3) == 0)
2336 return 1;
2337 wrd = (insn >> 12) & 0xf;
2338 rd0 = (insn >> 16) & 0xf;
2339 gen_op_iwmmxt_movq_M0_wRn(rd0);
2340 tmp = tcg_temp_new_i32();
2341 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2342 tcg_temp_free_i32(tmp);
2343 return 1;
2345 switch ((insn >> 22) & 3) {
2346 case 1:
2347 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2348 break;
2349 case 2:
2350 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2351 break;
2352 case 3:
2353 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2354 break;
2356 tcg_temp_free_i32(tmp);
2357 gen_op_iwmmxt_movq_wRn_M0(wrd);
2358 gen_op_iwmmxt_set_mup();
2359 gen_op_iwmmxt_set_cup();
2360 break;
2361 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2362 case 0x114: case 0x514: case 0x914: case 0xd14:
2363 if (((insn >> 22) & 3) == 0)
2364 return 1;
2365 wrd = (insn >> 12) & 0xf;
2366 rd0 = (insn >> 16) & 0xf;
2367 gen_op_iwmmxt_movq_M0_wRn(rd0);
2368 tmp = tcg_temp_new_i32();
2369 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2370 tcg_temp_free_i32(tmp);
2371 return 1;
2373 switch ((insn >> 22) & 3) {
2374 case 1:
2375 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2376 break;
2377 case 2:
2378 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2379 break;
2380 case 3:
2381 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2382 break;
2384 tcg_temp_free_i32(tmp);
2385 gen_op_iwmmxt_movq_wRn_M0(wrd);
2386 gen_op_iwmmxt_set_mup();
2387 gen_op_iwmmxt_set_cup();
2388 break;
2389 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2390 case 0x314: case 0x714: case 0xb14: case 0xf14:
2391 if (((insn >> 22) & 3) == 0)
2392 return 1;
2393 wrd = (insn >> 12) & 0xf;
2394 rd0 = (insn >> 16) & 0xf;
2395 gen_op_iwmmxt_movq_M0_wRn(rd0);
2396 tmp = tcg_temp_new_i32();
2397 switch ((insn >> 22) & 3) {
2398 case 1:
2399 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2400 tcg_temp_free_i32(tmp);
2401 return 1;
2403 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2404 break;
2405 case 2:
2406 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2407 tcg_temp_free_i32(tmp);
2408 return 1;
2410 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2411 break;
2412 case 3:
2413 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2414 tcg_temp_free_i32(tmp);
2415 return 1;
2417 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2418 break;
2420 tcg_temp_free_i32(tmp);
2421 gen_op_iwmmxt_movq_wRn_M0(wrd);
2422 gen_op_iwmmxt_set_mup();
2423 gen_op_iwmmxt_set_cup();
2424 break;
2425 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2426 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2427 wrd = (insn >> 12) & 0xf;
2428 rd0 = (insn >> 16) & 0xf;
2429 rd1 = (insn >> 0) & 0xf;
2430 gen_op_iwmmxt_movq_M0_wRn(rd0);
2431 switch ((insn >> 22) & 3) {
2432 case 0:
2433 if (insn & (1 << 21))
2434 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2435 else
2436 gen_op_iwmmxt_minub_M0_wRn(rd1);
2437 break;
2438 case 1:
2439 if (insn & (1 << 21))
2440 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2441 else
2442 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2443 break;
2444 case 2:
2445 if (insn & (1 << 21))
2446 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2447 else
2448 gen_op_iwmmxt_minul_M0_wRn(rd1);
2449 break;
2450 case 3:
2451 return 1;
2453 gen_op_iwmmxt_movq_wRn_M0(wrd);
2454 gen_op_iwmmxt_set_mup();
2455 break;
2456 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2457 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2458 wrd = (insn >> 12) & 0xf;
2459 rd0 = (insn >> 16) & 0xf;
2460 rd1 = (insn >> 0) & 0xf;
2461 gen_op_iwmmxt_movq_M0_wRn(rd0);
2462 switch ((insn >> 22) & 3) {
2463 case 0:
2464 if (insn & (1 << 21))
2465 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2466 else
2467 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2468 break;
2469 case 1:
2470 if (insn & (1 << 21))
2471 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2472 else
2473 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2474 break;
2475 case 2:
2476 if (insn & (1 << 21))
2477 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2478 else
2479 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2480 break;
2481 case 3:
2482 return 1;
2484 gen_op_iwmmxt_movq_wRn_M0(wrd);
2485 gen_op_iwmmxt_set_mup();
2486 break;
2487 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2488 case 0x402: case 0x502: case 0x602: case 0x702:
2489 wrd = (insn >> 12) & 0xf;
2490 rd0 = (insn >> 16) & 0xf;
2491 rd1 = (insn >> 0) & 0xf;
2492 gen_op_iwmmxt_movq_M0_wRn(rd0);
2493 tmp = tcg_const_i32((insn >> 20) & 3);
2494 iwmmxt_load_reg(cpu_V1, rd1);
2495 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2496 tcg_temp_free_i32(tmp);
2497 gen_op_iwmmxt_movq_wRn_M0(wrd);
2498 gen_op_iwmmxt_set_mup();
2499 break;
2500 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2501 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2502 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2503 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2504 wrd = (insn >> 12) & 0xf;
2505 rd0 = (insn >> 16) & 0xf;
2506 rd1 = (insn >> 0) & 0xf;
2507 gen_op_iwmmxt_movq_M0_wRn(rd0);
2508 switch ((insn >> 20) & 0xf) {
2509 case 0x0:
2510 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2511 break;
2512 case 0x1:
2513 gen_op_iwmmxt_subub_M0_wRn(rd1);
2514 break;
2515 case 0x3:
2516 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2517 break;
2518 case 0x4:
2519 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2520 break;
2521 case 0x5:
2522 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2523 break;
2524 case 0x7:
2525 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2526 break;
2527 case 0x8:
2528 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2529 break;
2530 case 0x9:
2531 gen_op_iwmmxt_subul_M0_wRn(rd1);
2532 break;
2533 case 0xb:
2534 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2535 break;
2536 default:
2537 return 1;
2539 gen_op_iwmmxt_movq_wRn_M0(wrd);
2540 gen_op_iwmmxt_set_mup();
2541 gen_op_iwmmxt_set_cup();
2542 break;
2543 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2544 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2545 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2546 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2547 wrd = (insn >> 12) & 0xf;
2548 rd0 = (insn >> 16) & 0xf;
2549 gen_op_iwmmxt_movq_M0_wRn(rd0);
2550 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2551 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2552 tcg_temp_free_i32(tmp);
2553 gen_op_iwmmxt_movq_wRn_M0(wrd);
2554 gen_op_iwmmxt_set_mup();
2555 gen_op_iwmmxt_set_cup();
2556 break;
2557 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2558 case 0x418: case 0x518: case 0x618: case 0x718:
2559 case 0x818: case 0x918: case 0xa18: case 0xb18:
2560 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2561 wrd = (insn >> 12) & 0xf;
2562 rd0 = (insn >> 16) & 0xf;
2563 rd1 = (insn >> 0) & 0xf;
2564 gen_op_iwmmxt_movq_M0_wRn(rd0);
2565 switch ((insn >> 20) & 0xf) {
2566 case 0x0:
2567 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2568 break;
2569 case 0x1:
2570 gen_op_iwmmxt_addub_M0_wRn(rd1);
2571 break;
2572 case 0x3:
2573 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2574 break;
2575 case 0x4:
2576 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2577 break;
2578 case 0x5:
2579 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2580 break;
2581 case 0x7:
2582 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2583 break;
2584 case 0x8:
2585 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2586 break;
2587 case 0x9:
2588 gen_op_iwmmxt_addul_M0_wRn(rd1);
2589 break;
2590 case 0xb:
2591 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2592 break;
2593 default:
2594 return 1;
2596 gen_op_iwmmxt_movq_wRn_M0(wrd);
2597 gen_op_iwmmxt_set_mup();
2598 gen_op_iwmmxt_set_cup();
2599 break;
2600 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2601 case 0x408: case 0x508: case 0x608: case 0x708:
2602 case 0x808: case 0x908: case 0xa08: case 0xb08:
2603 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2604 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2605 return 1;
2606 wrd = (insn >> 12) & 0xf;
2607 rd0 = (insn >> 16) & 0xf;
2608 rd1 = (insn >> 0) & 0xf;
2609 gen_op_iwmmxt_movq_M0_wRn(rd0);
2610 switch ((insn >> 22) & 3) {
2611 case 1:
2612 if (insn & (1 << 21))
2613 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2614 else
2615 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2616 break;
2617 case 2:
2618 if (insn & (1 << 21))
2619 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2620 else
2621 gen_op_iwmmxt_packul_M0_wRn(rd1);
2622 break;
2623 case 3:
2624 if (insn & (1 << 21))
2625 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2626 else
2627 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2628 break;
2630 gen_op_iwmmxt_movq_wRn_M0(wrd);
2631 gen_op_iwmmxt_set_mup();
2632 gen_op_iwmmxt_set_cup();
2633 break;
2634 case 0x201: case 0x203: case 0x205: case 0x207:
2635 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2636 case 0x211: case 0x213: case 0x215: case 0x217:
2637 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2638 wrd = (insn >> 5) & 0xf;
2639 rd0 = (insn >> 12) & 0xf;
2640 rd1 = (insn >> 0) & 0xf;
2641 if (rd0 == 0xf || rd1 == 0xf)
2642 return 1;
2643 gen_op_iwmmxt_movq_M0_wRn(wrd);
2644 tmp = load_reg(s, rd0);
2645 tmp2 = load_reg(s, rd1);
2646 switch ((insn >> 16) & 0xf) {
2647 case 0x0: /* TMIA */
2648 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2649 break;
2650 case 0x8: /* TMIAPH */
2651 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2652 break;
2653 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2654 if (insn & (1 << 16))
2655 tcg_gen_shri_i32(tmp, tmp, 16);
2656 if (insn & (1 << 17))
2657 tcg_gen_shri_i32(tmp2, tmp2, 16);
2658 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2659 break;
2660 default:
2661 tcg_temp_free_i32(tmp2);
2662 tcg_temp_free_i32(tmp);
2663 return 1;
2665 tcg_temp_free_i32(tmp2);
2666 tcg_temp_free_i32(tmp);
2667 gen_op_iwmmxt_movq_wRn_M0(wrd);
2668 gen_op_iwmmxt_set_mup();
2669 break;
2670 default:
2671 return 1;
2674 return 0;
2677 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2678 (ie. an undefined instruction). */
2679 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2681 int acc, rd0, rd1, rdhi, rdlo;
2682 TCGv_i32 tmp, tmp2;
2684 if ((insn & 0x0ff00f10) == 0x0e200010) {
2685 /* Multiply with Internal Accumulate Format */
2686 rd0 = (insn >> 12) & 0xf;
2687 rd1 = insn & 0xf;
2688 acc = (insn >> 5) & 7;
2690 if (acc != 0)
2691 return 1;
2693 tmp = load_reg(s, rd0);
2694 tmp2 = load_reg(s, rd1);
2695 switch ((insn >> 16) & 0xf) {
2696 case 0x0: /* MIA */
2697 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2698 break;
2699 case 0x8: /* MIAPH */
2700 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2701 break;
2702 case 0xc: /* MIABB */
2703 case 0xd: /* MIABT */
2704 case 0xe: /* MIATB */
2705 case 0xf: /* MIATT */
2706 if (insn & (1 << 16))
2707 tcg_gen_shri_i32(tmp, tmp, 16);
2708 if (insn & (1 << 17))
2709 tcg_gen_shri_i32(tmp2, tmp2, 16);
2710 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2711 break;
2712 default:
2713 return 1;
2715 tcg_temp_free_i32(tmp2);
2716 tcg_temp_free_i32(tmp);
2718 gen_op_iwmmxt_movq_wRn_M0(acc);
2719 return 0;
2722 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2723 /* Internal Accumulator Access Format */
2724 rdhi = (insn >> 16) & 0xf;
2725 rdlo = (insn >> 12) & 0xf;
2726 acc = insn & 7;
2728 if (acc != 0)
2729 return 1;
2731 if (insn & ARM_CP_RW_BIT) { /* MRA */
2732 iwmmxt_load_reg(cpu_V0, acc);
2733 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2734 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2735 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2736 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2737 } else { /* MAR */
2738 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2739 iwmmxt_store_reg(cpu_V0, acc);
2741 return 0;
2744 return 1;
2747 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2748 #define VFP_SREG(insn, bigbit, smallbit) \
2749 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2750 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2751 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2752 reg = (((insn) >> (bigbit)) & 0x0f) \
2753 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2754 } else { \
2755 if (insn & (1 << (smallbit))) \
2756 return 1; \
2757 reg = ((insn) >> (bigbit)) & 0x0f; \
2758 }} while (0)
2760 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2761 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2762 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2763 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2764 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2765 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2767 /* Move between integer and VFP cores. */
2768 static TCGv_i32 gen_vfp_mrs(void)
2770 TCGv_i32 tmp = tcg_temp_new_i32();
2771 tcg_gen_mov_i32(tmp, cpu_F0s);
2772 return tmp;
2775 static void gen_vfp_msr(TCGv_i32 tmp)
2777 tcg_gen_mov_i32(cpu_F0s, tmp);
2778 tcg_temp_free_i32(tmp);
2781 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2783 TCGv_i32 tmp = tcg_temp_new_i32();
2784 if (shift)
2785 tcg_gen_shri_i32(var, var, shift);
2786 tcg_gen_ext8u_i32(var, var);
2787 tcg_gen_shli_i32(tmp, var, 8);
2788 tcg_gen_or_i32(var, var, tmp);
2789 tcg_gen_shli_i32(tmp, var, 16);
2790 tcg_gen_or_i32(var, var, tmp);
2791 tcg_temp_free_i32(tmp);
2794 static void gen_neon_dup_low16(TCGv_i32 var)
2796 TCGv_i32 tmp = tcg_temp_new_i32();
2797 tcg_gen_ext16u_i32(var, var);
2798 tcg_gen_shli_i32(tmp, var, 16);
2799 tcg_gen_or_i32(var, var, tmp);
2800 tcg_temp_free_i32(tmp);
2803 static void gen_neon_dup_high16(TCGv_i32 var)
2805 TCGv_i32 tmp = tcg_temp_new_i32();
2806 tcg_gen_andi_i32(var, var, 0xffff0000);
2807 tcg_gen_shri_i32(tmp, var, 16);
2808 tcg_gen_or_i32(var, var, tmp);
2809 tcg_temp_free_i32(tmp);
2812 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2814 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2815 TCGv_i32 tmp = tcg_temp_new_i32();
2816 switch (size) {
2817 case 0:
2818 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2819 gen_neon_dup_u8(tmp, 0);
2820 break;
2821 case 1:
2822 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2823 gen_neon_dup_low16(tmp);
2824 break;
2825 case 2:
2826 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2827 break;
2828 default: /* Avoid compiler warnings. */
2829 abort();
2831 return tmp;
2834 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2835 uint32_t dp)
2837 uint32_t cc = extract32(insn, 20, 2);
2839 if (dp) {
2840 TCGv_i64 frn, frm, dest;
2841 TCGv_i64 tmp, zero, zf, nf, vf;
2843 zero = tcg_const_i64(0);
2845 frn = tcg_temp_new_i64();
2846 frm = tcg_temp_new_i64();
2847 dest = tcg_temp_new_i64();
2849 zf = tcg_temp_new_i64();
2850 nf = tcg_temp_new_i64();
2851 vf = tcg_temp_new_i64();
2853 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2854 tcg_gen_ext_i32_i64(nf, cpu_NF);
2855 tcg_gen_ext_i32_i64(vf, cpu_VF);
2857 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2858 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2859 switch (cc) {
2860 case 0: /* eq: Z */
2861 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2862 frn, frm);
2863 break;
2864 case 1: /* vs: V */
2865 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2866 frn, frm);
2867 break;
2868 case 2: /* ge: N == V -> N ^ V == 0 */
2869 tmp = tcg_temp_new_i64();
2870 tcg_gen_xor_i64(tmp, vf, nf);
2871 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2872 frn, frm);
2873 tcg_temp_free_i64(tmp);
2874 break;
2875 case 3: /* gt: !Z && N == V */
2876 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2877 frn, frm);
2878 tmp = tcg_temp_new_i64();
2879 tcg_gen_xor_i64(tmp, vf, nf);
2880 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2881 dest, frm);
2882 tcg_temp_free_i64(tmp);
2883 break;
2885 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2886 tcg_temp_free_i64(frn);
2887 tcg_temp_free_i64(frm);
2888 tcg_temp_free_i64(dest);
2890 tcg_temp_free_i64(zf);
2891 tcg_temp_free_i64(nf);
2892 tcg_temp_free_i64(vf);
2894 tcg_temp_free_i64(zero);
2895 } else {
2896 TCGv_i32 frn, frm, dest;
2897 TCGv_i32 tmp, zero;
2899 zero = tcg_const_i32(0);
2901 frn = tcg_temp_new_i32();
2902 frm = tcg_temp_new_i32();
2903 dest = tcg_temp_new_i32();
2904 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2905 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2906 switch (cc) {
2907 case 0: /* eq: Z */
2908 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2909 frn, frm);
2910 break;
2911 case 1: /* vs: V */
2912 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2913 frn, frm);
2914 break;
2915 case 2: /* ge: N == V -> N ^ V == 0 */
2916 tmp = tcg_temp_new_i32();
2917 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2918 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2919 frn, frm);
2920 tcg_temp_free_i32(tmp);
2921 break;
2922 case 3: /* gt: !Z && N == V */
2923 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2924 frn, frm);
2925 tmp = tcg_temp_new_i32();
2926 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2927 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2928 dest, frm);
2929 tcg_temp_free_i32(tmp);
2930 break;
2932 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2933 tcg_temp_free_i32(frn);
2934 tcg_temp_free_i32(frm);
2935 tcg_temp_free_i32(dest);
2937 tcg_temp_free_i32(zero);
2940 return 0;
2943 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2944 uint32_t rm, uint32_t dp)
2946 uint32_t vmin = extract32(insn, 6, 1);
2947 TCGv_ptr fpst = get_fpstatus_ptr(0);
2949 if (dp) {
2950 TCGv_i64 frn, frm, dest;
2952 frn = tcg_temp_new_i64();
2953 frm = tcg_temp_new_i64();
2954 dest = tcg_temp_new_i64();
2956 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2957 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2958 if (vmin) {
2959 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2960 } else {
2961 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2963 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2964 tcg_temp_free_i64(frn);
2965 tcg_temp_free_i64(frm);
2966 tcg_temp_free_i64(dest);
2967 } else {
2968 TCGv_i32 frn, frm, dest;
2970 frn = tcg_temp_new_i32();
2971 frm = tcg_temp_new_i32();
2972 dest = tcg_temp_new_i32();
2974 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2975 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2976 if (vmin) {
2977 gen_helper_vfp_minnums(dest, frn, frm, fpst);
2978 } else {
2979 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
2981 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2982 tcg_temp_free_i32(frn);
2983 tcg_temp_free_i32(frm);
2984 tcg_temp_free_i32(dest);
2987 tcg_temp_free_ptr(fpst);
2988 return 0;
2991 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2992 int rounding)
2994 TCGv_ptr fpst = get_fpstatus_ptr(0);
2995 TCGv_i32 tcg_rmode;
2997 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2998 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3000 if (dp) {
3001 TCGv_i64 tcg_op;
3002 TCGv_i64 tcg_res;
3003 tcg_op = tcg_temp_new_i64();
3004 tcg_res = tcg_temp_new_i64();
3005 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3006 gen_helper_rintd(tcg_res, tcg_op, fpst);
3007 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3008 tcg_temp_free_i64(tcg_op);
3009 tcg_temp_free_i64(tcg_res);
3010 } else {
3011 TCGv_i32 tcg_op;
3012 TCGv_i32 tcg_res;
3013 tcg_op = tcg_temp_new_i32();
3014 tcg_res = tcg_temp_new_i32();
3015 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3016 gen_helper_rints(tcg_res, tcg_op, fpst);
3017 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3018 tcg_temp_free_i32(tcg_op);
3019 tcg_temp_free_i32(tcg_res);
3022 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3023 tcg_temp_free_i32(tcg_rmode);
3025 tcg_temp_free_ptr(fpst);
3026 return 0;
3029 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3030 int rounding)
3032 bool is_signed = extract32(insn, 7, 1);
3033 TCGv_ptr fpst = get_fpstatus_ptr(0);
3034 TCGv_i32 tcg_rmode, tcg_shift;
3036 tcg_shift = tcg_const_i32(0);
3038 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3039 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3041 if (dp) {
3042 TCGv_i64 tcg_double, tcg_res;
3043 TCGv_i32 tcg_tmp;
3044 /* Rd is encoded as a single precision register even when the source
3045 * is double precision.
3047 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3048 tcg_double = tcg_temp_new_i64();
3049 tcg_res = tcg_temp_new_i64();
3050 tcg_tmp = tcg_temp_new_i32();
3051 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3052 if (is_signed) {
3053 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3054 } else {
3055 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3057 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
3058 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3059 tcg_temp_free_i32(tcg_tmp);
3060 tcg_temp_free_i64(tcg_res);
3061 tcg_temp_free_i64(tcg_double);
3062 } else {
3063 TCGv_i32 tcg_single, tcg_res;
3064 tcg_single = tcg_temp_new_i32();
3065 tcg_res = tcg_temp_new_i32();
3066 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3067 if (is_signed) {
3068 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3069 } else {
3070 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3072 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3073 tcg_temp_free_i32(tcg_res);
3074 tcg_temp_free_i32(tcg_single);
3077 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3078 tcg_temp_free_i32(tcg_rmode);
3080 tcg_temp_free_i32(tcg_shift);
3082 tcg_temp_free_ptr(fpst);
3084 return 0;
3087 /* Table for converting the most common AArch32 encoding of
3088 * rounding mode to arm_fprounding order (which matches the
3089 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3091 static const uint8_t fp_decode_rm[] = {
3092 FPROUNDING_TIEAWAY,
3093 FPROUNDING_TIEEVEN,
3094 FPROUNDING_POSINF,
3095 FPROUNDING_NEGINF,
3098 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3100 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3102 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3103 return 1;
3106 if (dp) {
3107 VFP_DREG_D(rd, insn);
3108 VFP_DREG_N(rn, insn);
3109 VFP_DREG_M(rm, insn);
3110 } else {
3111 rd = VFP_SREG_D(insn);
3112 rn = VFP_SREG_N(insn);
3113 rm = VFP_SREG_M(insn);
3116 if ((insn & 0x0f800e50) == 0x0e000a00) {
3117 return handle_vsel(insn, rd, rn, rm, dp);
3118 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3119 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3120 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3121 /* VRINTA, VRINTN, VRINTP, VRINTM */
3122 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3123 return handle_vrint(insn, rd, rm, dp, rounding);
3124 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3125 /* VCVTA, VCVTN, VCVTP, VCVTM */
3126 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3127 return handle_vcvt(insn, rd, rm, dp, rounding);
3129 return 1;
3132 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3133 (ie. an undefined instruction). */
3134 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3136 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3137 int dp, veclen;
3138 TCGv_i32 addr;
3139 TCGv_i32 tmp;
3140 TCGv_i32 tmp2;
3142 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3143 return 1;
3146 /* FIXME: this access check should not take precedence over UNDEF
3147 * for invalid encodings; we will generate incorrect syndrome information
3148 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3150 if (s->fp_excp_el) {
3151 gen_exception_insn(s, 4, EXCP_UDEF,
3152 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
3153 return 0;
3156 if (!s->vfp_enabled) {
3157 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3158 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3159 return 1;
3160 rn = (insn >> 16) & 0xf;
3161 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3162 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3163 return 1;
3167 if (extract32(insn, 28, 4) == 0xf) {
3168 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3169 * only used in v8 and above.
3171 return disas_vfp_v8_insn(s, insn);
3174 dp = ((insn & 0xf00) == 0xb00);
3175 switch ((insn >> 24) & 0xf) {
3176 case 0xe:
3177 if (insn & (1 << 4)) {
3178 /* single register transfer */
3179 rd = (insn >> 12) & 0xf;
3180 if (dp) {
3181 int size;
3182 int pass;
3184 VFP_DREG_N(rn, insn);
3185 if (insn & 0xf)
3186 return 1;
3187 if (insn & 0x00c00060
3188 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3189 return 1;
3192 pass = (insn >> 21) & 1;
3193 if (insn & (1 << 22)) {
3194 size = 0;
3195 offset = ((insn >> 5) & 3) * 8;
3196 } else if (insn & (1 << 5)) {
3197 size = 1;
3198 offset = (insn & (1 << 6)) ? 16 : 0;
3199 } else {
3200 size = 2;
3201 offset = 0;
3203 if (insn & ARM_CP_RW_BIT) {
3204 /* vfp->arm */
3205 tmp = neon_load_reg(rn, pass);
3206 switch (size) {
3207 case 0:
3208 if (offset)
3209 tcg_gen_shri_i32(tmp, tmp, offset);
3210 if (insn & (1 << 23))
3211 gen_uxtb(tmp);
3212 else
3213 gen_sxtb(tmp);
3214 break;
3215 case 1:
3216 if (insn & (1 << 23)) {
3217 if (offset) {
3218 tcg_gen_shri_i32(tmp, tmp, 16);
3219 } else {
3220 gen_uxth(tmp);
3222 } else {
3223 if (offset) {
3224 tcg_gen_sari_i32(tmp, tmp, 16);
3225 } else {
3226 gen_sxth(tmp);
3229 break;
3230 case 2:
3231 break;
3233 store_reg(s, rd, tmp);
3234 } else {
3235 /* arm->vfp */
3236 tmp = load_reg(s, rd);
3237 if (insn & (1 << 23)) {
3238 /* VDUP */
3239 if (size == 0) {
3240 gen_neon_dup_u8(tmp, 0);
3241 } else if (size == 1) {
3242 gen_neon_dup_low16(tmp);
3244 for (n = 0; n <= pass * 2; n++) {
3245 tmp2 = tcg_temp_new_i32();
3246 tcg_gen_mov_i32(tmp2, tmp);
3247 neon_store_reg(rn, n, tmp2);
3249 neon_store_reg(rn, n, tmp);
3250 } else {
3251 /* VMOV */
3252 switch (size) {
3253 case 0:
3254 tmp2 = neon_load_reg(rn, pass);
3255 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3256 tcg_temp_free_i32(tmp2);
3257 break;
3258 case 1:
3259 tmp2 = neon_load_reg(rn, pass);
3260 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3261 tcg_temp_free_i32(tmp2);
3262 break;
3263 case 2:
3264 break;
3266 neon_store_reg(rn, pass, tmp);
3269 } else { /* !dp */
3270 if ((insn & 0x6f) != 0x00)
3271 return 1;
3272 rn = VFP_SREG_N(insn);
3273 if (insn & ARM_CP_RW_BIT) {
3274 /* vfp->arm */
3275 if (insn & (1 << 21)) {
3276 /* system register */
3277 rn >>= 1;
3279 switch (rn) {
3280 case ARM_VFP_FPSID:
3281 /* VFP2 allows access to FSID from userspace.
3282 VFP3 restricts all id registers to privileged
3283 accesses. */
3284 if (IS_USER(s)
3285 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3286 return 1;
3288 tmp = load_cpu_field(vfp.xregs[rn]);
3289 break;
3290 case ARM_VFP_FPEXC:
3291 if (IS_USER(s))
3292 return 1;
3293 tmp = load_cpu_field(vfp.xregs[rn]);
3294 break;
3295 case ARM_VFP_FPINST:
3296 case ARM_VFP_FPINST2:
3297 /* Not present in VFP3. */
3298 if (IS_USER(s)
3299 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3300 return 1;
3302 tmp = load_cpu_field(vfp.xregs[rn]);
3303 break;
3304 case ARM_VFP_FPSCR:
3305 if (rd == 15) {
3306 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3307 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3308 } else {
3309 tmp = tcg_temp_new_i32();
3310 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3312 break;
3313 case ARM_VFP_MVFR2:
3314 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3315 return 1;
3317 /* fall through */
3318 case ARM_VFP_MVFR0:
3319 case ARM_VFP_MVFR1:
3320 if (IS_USER(s)
3321 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3322 return 1;
3324 tmp = load_cpu_field(vfp.xregs[rn]);
3325 break;
3326 default:
3327 return 1;
3329 } else {
3330 gen_mov_F0_vreg(0, rn);
3331 tmp = gen_vfp_mrs();
3333 if (rd == 15) {
3334 /* Set the 4 flag bits in the CPSR. */
3335 gen_set_nzcv(tmp);
3336 tcg_temp_free_i32(tmp);
3337 } else {
3338 store_reg(s, rd, tmp);
3340 } else {
3341 /* arm->vfp */
3342 if (insn & (1 << 21)) {
3343 rn >>= 1;
3344 /* system register */
3345 switch (rn) {
3346 case ARM_VFP_FPSID:
3347 case ARM_VFP_MVFR0:
3348 case ARM_VFP_MVFR1:
3349 /* Writes are ignored. */
3350 break;
3351 case ARM_VFP_FPSCR:
3352 tmp = load_reg(s, rd);
3353 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3354 tcg_temp_free_i32(tmp);
3355 gen_lookup_tb(s);
3356 break;
3357 case ARM_VFP_FPEXC:
3358 if (IS_USER(s))
3359 return 1;
3360 /* TODO: VFP subarchitecture support.
3361 * For now, keep the EN bit only */
3362 tmp = load_reg(s, rd);
3363 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3364 store_cpu_field(tmp, vfp.xregs[rn]);
3365 gen_lookup_tb(s);
3366 break;
3367 case ARM_VFP_FPINST:
3368 case ARM_VFP_FPINST2:
3369 if (IS_USER(s)) {
3370 return 1;
3372 tmp = load_reg(s, rd);
3373 store_cpu_field(tmp, vfp.xregs[rn]);
3374 break;
3375 default:
3376 return 1;
3378 } else {
3379 tmp = load_reg(s, rd);
3380 gen_vfp_msr(tmp);
3381 gen_mov_vreg_F0(0, rn);
3385 } else {
3386 /* data processing */
3387 /* The opcode is in bits 23, 21, 20 and 6. */
3388 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3389 if (dp) {
3390 if (op == 15) {
3391 /* rn is opcode */
3392 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3393 } else {
3394 /* rn is register number */
3395 VFP_DREG_N(rn, insn);
3398 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3399 ((rn & 0x1e) == 0x6))) {
3400 /* Integer or single/half precision destination. */
3401 rd = VFP_SREG_D(insn);
3402 } else {
3403 VFP_DREG_D(rd, insn);
3405 if (op == 15 &&
3406 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3407 ((rn & 0x1e) == 0x4))) {
3408 /* VCVT from int or half precision is always from S reg
3409 * regardless of dp bit. VCVT with immediate frac_bits
3410 * has same format as SREG_M.
3412 rm = VFP_SREG_M(insn);
3413 } else {
3414 VFP_DREG_M(rm, insn);
3416 } else {
3417 rn = VFP_SREG_N(insn);
3418 if (op == 15 && rn == 15) {
3419 /* Double precision destination. */
3420 VFP_DREG_D(rd, insn);
3421 } else {
3422 rd = VFP_SREG_D(insn);
3424 /* NB that we implicitly rely on the encoding for the frac_bits
3425 * in VCVT of fixed to float being the same as that of an SREG_M
3427 rm = VFP_SREG_M(insn);
3430 veclen = s->vec_len;
3431 if (op == 15 && rn > 3)
3432 veclen = 0;
3434 /* Shut up compiler warnings. */
3435 delta_m = 0;
3436 delta_d = 0;
3437 bank_mask = 0;
3439 if (veclen > 0) {
3440 if (dp)
3441 bank_mask = 0xc;
3442 else
3443 bank_mask = 0x18;
3445 /* Figure out what type of vector operation this is. */
3446 if ((rd & bank_mask) == 0) {
3447 /* scalar */
3448 veclen = 0;
3449 } else {
3450 if (dp)
3451 delta_d = (s->vec_stride >> 1) + 1;
3452 else
3453 delta_d = s->vec_stride + 1;
3455 if ((rm & bank_mask) == 0) {
3456 /* mixed scalar/vector */
3457 delta_m = 0;
3458 } else {
3459 /* vector */
3460 delta_m = delta_d;
3465 /* Load the initial operands. */
3466 if (op == 15) {
3467 switch (rn) {
3468 case 16:
3469 case 17:
3470 /* Integer source */
3471 gen_mov_F0_vreg(0, rm);
3472 break;
3473 case 8:
3474 case 9:
3475 /* Compare */
3476 gen_mov_F0_vreg(dp, rd);
3477 gen_mov_F1_vreg(dp, rm);
3478 break;
3479 case 10:
3480 case 11:
3481 /* Compare with zero */
3482 gen_mov_F0_vreg(dp, rd);
3483 gen_vfp_F1_ld0(dp);
3484 break;
3485 case 20:
3486 case 21:
3487 case 22:
3488 case 23:
3489 case 28:
3490 case 29:
3491 case 30:
3492 case 31:
3493 /* Source and destination the same. */
3494 gen_mov_F0_vreg(dp, rd);
3495 break;
3496 case 4:
3497 case 5:
3498 case 6:
3499 case 7:
3500 /* VCVTB, VCVTT: only present with the halfprec extension
3501 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3502 * (we choose to UNDEF)
3504 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3505 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3506 return 1;
3508 if (!extract32(rn, 1, 1)) {
3509 /* Half precision source. */
3510 gen_mov_F0_vreg(0, rm);
3511 break;
3513 /* Otherwise fall through */
3514 default:
3515 /* One source operand. */
3516 gen_mov_F0_vreg(dp, rm);
3517 break;
3519 } else {
3520 /* Two source operands. */
3521 gen_mov_F0_vreg(dp, rn);
3522 gen_mov_F1_vreg(dp, rm);
3525 for (;;) {
3526 /* Perform the calculation. */
3527 switch (op) {
3528 case 0: /* VMLA: fd + (fn * fm) */
3529 /* Note that order of inputs to the add matters for NaNs */
3530 gen_vfp_F1_mul(dp);
3531 gen_mov_F0_vreg(dp, rd);
3532 gen_vfp_add(dp);
3533 break;
3534 case 1: /* VMLS: fd + -(fn * fm) */
3535 gen_vfp_mul(dp);
3536 gen_vfp_F1_neg(dp);
3537 gen_mov_F0_vreg(dp, rd);
3538 gen_vfp_add(dp);
3539 break;
3540 case 2: /* VNMLS: -fd + (fn * fm) */
3541 /* Note that it isn't valid to replace (-A + B) with (B - A)
3542 * or similar plausible looking simplifications
3543 * because this will give wrong results for NaNs.
3545 gen_vfp_F1_mul(dp);
3546 gen_mov_F0_vreg(dp, rd);
3547 gen_vfp_neg(dp);
3548 gen_vfp_add(dp);
3549 break;
3550 case 3: /* VNMLA: -fd + -(fn * fm) */
3551 gen_vfp_mul(dp);
3552 gen_vfp_F1_neg(dp);
3553 gen_mov_F0_vreg(dp, rd);
3554 gen_vfp_neg(dp);
3555 gen_vfp_add(dp);
3556 break;
3557 case 4: /* mul: fn * fm */
3558 gen_vfp_mul(dp);
3559 break;
3560 case 5: /* nmul: -(fn * fm) */
3561 gen_vfp_mul(dp);
3562 gen_vfp_neg(dp);
3563 break;
3564 case 6: /* add: fn + fm */
3565 gen_vfp_add(dp);
3566 break;
3567 case 7: /* sub: fn - fm */
3568 gen_vfp_sub(dp);
3569 break;
3570 case 8: /* div: fn / fm */
3571 gen_vfp_div(dp);
3572 break;
3573 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3574 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3575 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3576 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3577 /* These are fused multiply-add, and must be done as one
3578 * floating point operation with no rounding between the
3579 * multiplication and addition steps.
3580 * NB that doing the negations here as separate steps is
3581 * correct : an input NaN should come out with its sign bit
3582 * flipped if it is a negated-input.
3584 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3585 return 1;
3587 if (dp) {
3588 TCGv_ptr fpst;
3589 TCGv_i64 frd;
3590 if (op & 1) {
3591 /* VFNMS, VFMS */
3592 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3594 frd = tcg_temp_new_i64();
3595 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3596 if (op & 2) {
3597 /* VFNMA, VFNMS */
3598 gen_helper_vfp_negd(frd, frd);
3600 fpst = get_fpstatus_ptr(0);
3601 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3602 cpu_F1d, frd, fpst);
3603 tcg_temp_free_ptr(fpst);
3604 tcg_temp_free_i64(frd);
3605 } else {
3606 TCGv_ptr fpst;
3607 TCGv_i32 frd;
3608 if (op & 1) {
3609 /* VFNMS, VFMS */
3610 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3612 frd = tcg_temp_new_i32();
3613 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3614 if (op & 2) {
3615 gen_helper_vfp_negs(frd, frd);
3617 fpst = get_fpstatus_ptr(0);
3618 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3619 cpu_F1s, frd, fpst);
3620 tcg_temp_free_ptr(fpst);
3621 tcg_temp_free_i32(frd);
3623 break;
3624 case 14: /* fconst */
3625 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3626 return 1;
3629 n = (insn << 12) & 0x80000000;
3630 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3631 if (dp) {
3632 if (i & 0x40)
3633 i |= 0x3f80;
3634 else
3635 i |= 0x4000;
3636 n |= i << 16;
3637 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3638 } else {
3639 if (i & 0x40)
3640 i |= 0x780;
3641 else
3642 i |= 0x800;
3643 n |= i << 19;
3644 tcg_gen_movi_i32(cpu_F0s, n);
3646 break;
3647 case 15: /* extension space */
3648 switch (rn) {
3649 case 0: /* cpy */
3650 /* no-op */
3651 break;
3652 case 1: /* abs */
3653 gen_vfp_abs(dp);
3654 break;
3655 case 2: /* neg */
3656 gen_vfp_neg(dp);
3657 break;
3658 case 3: /* sqrt */
3659 gen_vfp_sqrt(dp);
3660 break;
3661 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3662 tmp = gen_vfp_mrs();
3663 tcg_gen_ext16u_i32(tmp, tmp);
3664 if (dp) {
3665 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3666 cpu_env);
3667 } else {
3668 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3669 cpu_env);
3671 tcg_temp_free_i32(tmp);
3672 break;
3673 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3674 tmp = gen_vfp_mrs();
3675 tcg_gen_shri_i32(tmp, tmp, 16);
3676 if (dp) {
3677 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3678 cpu_env);
3679 } else {
3680 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3681 cpu_env);
3683 tcg_temp_free_i32(tmp);
3684 break;
3685 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3686 tmp = tcg_temp_new_i32();
3687 if (dp) {
3688 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3689 cpu_env);
3690 } else {
3691 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3692 cpu_env);
3694 gen_mov_F0_vreg(0, rd);
3695 tmp2 = gen_vfp_mrs();
3696 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3697 tcg_gen_or_i32(tmp, tmp, tmp2);
3698 tcg_temp_free_i32(tmp2);
3699 gen_vfp_msr(tmp);
3700 break;
3701 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3702 tmp = tcg_temp_new_i32();
3703 if (dp) {
3704 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3705 cpu_env);
3706 } else {
3707 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3708 cpu_env);
3710 tcg_gen_shli_i32(tmp, tmp, 16);
3711 gen_mov_F0_vreg(0, rd);
3712 tmp2 = gen_vfp_mrs();
3713 tcg_gen_ext16u_i32(tmp2, tmp2);
3714 tcg_gen_or_i32(tmp, tmp, tmp2);
3715 tcg_temp_free_i32(tmp2);
3716 gen_vfp_msr(tmp);
3717 break;
3718 case 8: /* cmp */
3719 gen_vfp_cmp(dp);
3720 break;
3721 case 9: /* cmpe */
3722 gen_vfp_cmpe(dp);
3723 break;
3724 case 10: /* cmpz */
3725 gen_vfp_cmp(dp);
3726 break;
3727 case 11: /* cmpez */
3728 gen_vfp_F1_ld0(dp);
3729 gen_vfp_cmpe(dp);
3730 break;
3731 case 12: /* vrintr */
3733 TCGv_ptr fpst = get_fpstatus_ptr(0);
3734 if (dp) {
3735 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3736 } else {
3737 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3739 tcg_temp_free_ptr(fpst);
3740 break;
3742 case 13: /* vrintz */
3744 TCGv_ptr fpst = get_fpstatus_ptr(0);
3745 TCGv_i32 tcg_rmode;
3746 tcg_rmode = tcg_const_i32(float_round_to_zero);
3747 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3748 if (dp) {
3749 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3750 } else {
3751 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3753 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3754 tcg_temp_free_i32(tcg_rmode);
3755 tcg_temp_free_ptr(fpst);
3756 break;
3758 case 14: /* vrintx */
3760 TCGv_ptr fpst = get_fpstatus_ptr(0);
3761 if (dp) {
3762 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3763 } else {
3764 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3766 tcg_temp_free_ptr(fpst);
3767 break;
3769 case 15: /* single<->double conversion */
3770 if (dp)
3771 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3772 else
3773 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3774 break;
3775 case 16: /* fuito */
3776 gen_vfp_uito(dp, 0);
3777 break;
3778 case 17: /* fsito */
3779 gen_vfp_sito(dp, 0);
3780 break;
3781 case 20: /* fshto */
3782 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3783 return 1;
3785 gen_vfp_shto(dp, 16 - rm, 0);
3786 break;
3787 case 21: /* fslto */
3788 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3789 return 1;
3791 gen_vfp_slto(dp, 32 - rm, 0);
3792 break;
3793 case 22: /* fuhto */
3794 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3795 return 1;
3797 gen_vfp_uhto(dp, 16 - rm, 0);
3798 break;
3799 case 23: /* fulto */
3800 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3801 return 1;
3803 gen_vfp_ulto(dp, 32 - rm, 0);
3804 break;
3805 case 24: /* ftoui */
3806 gen_vfp_toui(dp, 0);
3807 break;
3808 case 25: /* ftouiz */
3809 gen_vfp_touiz(dp, 0);
3810 break;
3811 case 26: /* ftosi */
3812 gen_vfp_tosi(dp, 0);
3813 break;
3814 case 27: /* ftosiz */
3815 gen_vfp_tosiz(dp, 0);
3816 break;
3817 case 28: /* ftosh */
3818 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3819 return 1;
3821 gen_vfp_tosh(dp, 16 - rm, 0);
3822 break;
3823 case 29: /* ftosl */
3824 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3825 return 1;
3827 gen_vfp_tosl(dp, 32 - rm, 0);
3828 break;
3829 case 30: /* ftouh */
3830 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3831 return 1;
3833 gen_vfp_touh(dp, 16 - rm, 0);
3834 break;
3835 case 31: /* ftoul */
3836 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3837 return 1;
3839 gen_vfp_toul(dp, 32 - rm, 0);
3840 break;
3841 default: /* undefined */
3842 return 1;
3844 break;
3845 default: /* undefined */
3846 return 1;
3849 /* Write back the result. */
3850 if (op == 15 && (rn >= 8 && rn <= 11)) {
3851 /* Comparison, do nothing. */
3852 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3853 (rn & 0x1e) == 0x6)) {
3854 /* VCVT double to int: always integer result.
3855 * VCVT double to half precision is always a single
3856 * precision result.
3858 gen_mov_vreg_F0(0, rd);
3859 } else if (op == 15 && rn == 15) {
3860 /* conversion */
3861 gen_mov_vreg_F0(!dp, rd);
3862 } else {
3863 gen_mov_vreg_F0(dp, rd);
3866 /* break out of the loop if we have finished */
3867 if (veclen == 0)
3868 break;
3870 if (op == 15 && delta_m == 0) {
3871 /* single source one-many */
3872 while (veclen--) {
3873 rd = ((rd + delta_d) & (bank_mask - 1))
3874 | (rd & bank_mask);
3875 gen_mov_vreg_F0(dp, rd);
3877 break;
3879 /* Setup the next operands. */
3880 veclen--;
3881 rd = ((rd + delta_d) & (bank_mask - 1))
3882 | (rd & bank_mask);
3884 if (op == 15) {
3885 /* One source operand. */
3886 rm = ((rm + delta_m) & (bank_mask - 1))
3887 | (rm & bank_mask);
3888 gen_mov_F0_vreg(dp, rm);
3889 } else {
3890 /* Two source operands. */
3891 rn = ((rn + delta_d) & (bank_mask - 1))
3892 | (rn & bank_mask);
3893 gen_mov_F0_vreg(dp, rn);
3894 if (delta_m) {
3895 rm = ((rm + delta_m) & (bank_mask - 1))
3896 | (rm & bank_mask);
3897 gen_mov_F1_vreg(dp, rm);
3902 break;
3903 case 0xc:
3904 case 0xd:
3905 if ((insn & 0x03e00000) == 0x00400000) {
3906 /* two-register transfer */
3907 rn = (insn >> 16) & 0xf;
3908 rd = (insn >> 12) & 0xf;
3909 if (dp) {
3910 VFP_DREG_M(rm, insn);
3911 } else {
3912 rm = VFP_SREG_M(insn);
3915 if (insn & ARM_CP_RW_BIT) {
3916 /* vfp->arm */
3917 if (dp) {
3918 gen_mov_F0_vreg(0, rm * 2);
3919 tmp = gen_vfp_mrs();
3920 store_reg(s, rd, tmp);
3921 gen_mov_F0_vreg(0, rm * 2 + 1);
3922 tmp = gen_vfp_mrs();
3923 store_reg(s, rn, tmp);
3924 } else {
3925 gen_mov_F0_vreg(0, rm);
3926 tmp = gen_vfp_mrs();
3927 store_reg(s, rd, tmp);
3928 gen_mov_F0_vreg(0, rm + 1);
3929 tmp = gen_vfp_mrs();
3930 store_reg(s, rn, tmp);
3932 } else {
3933 /* arm->vfp */
3934 if (dp) {
3935 tmp = load_reg(s, rd);
3936 gen_vfp_msr(tmp);
3937 gen_mov_vreg_F0(0, rm * 2);
3938 tmp = load_reg(s, rn);
3939 gen_vfp_msr(tmp);
3940 gen_mov_vreg_F0(0, rm * 2 + 1);
3941 } else {
3942 tmp = load_reg(s, rd);
3943 gen_vfp_msr(tmp);
3944 gen_mov_vreg_F0(0, rm);
3945 tmp = load_reg(s, rn);
3946 gen_vfp_msr(tmp);
3947 gen_mov_vreg_F0(0, rm + 1);
3950 } else {
3951 /* Load/store */
3952 rn = (insn >> 16) & 0xf;
3953 if (dp)
3954 VFP_DREG_D(rd, insn);
3955 else
3956 rd = VFP_SREG_D(insn);
3957 if ((insn & 0x01200000) == 0x01000000) {
3958 /* Single load/store */
3959 offset = (insn & 0xff) << 2;
3960 if ((insn & (1 << 23)) == 0)
3961 offset = -offset;
3962 if (s->thumb && rn == 15) {
3963 /* This is actually UNPREDICTABLE */
3964 addr = tcg_temp_new_i32();
3965 tcg_gen_movi_i32(addr, s->pc & ~2);
3966 } else {
3967 addr = load_reg(s, rn);
3969 tcg_gen_addi_i32(addr, addr, offset);
3970 if (insn & (1 << 20)) {
3971 gen_vfp_ld(s, dp, addr);
3972 gen_mov_vreg_F0(dp, rd);
3973 } else {
3974 gen_mov_F0_vreg(dp, rd);
3975 gen_vfp_st(s, dp, addr);
3977 tcg_temp_free_i32(addr);
3978 } else {
3979 /* load/store multiple */
3980 int w = insn & (1 << 21);
3981 if (dp)
3982 n = (insn >> 1) & 0x7f;
3983 else
3984 n = insn & 0xff;
3986 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3987 /* P == U , W == 1 => UNDEF */
3988 return 1;
3990 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3991 /* UNPREDICTABLE cases for bad immediates: we choose to
3992 * UNDEF to avoid generating huge numbers of TCG ops
3994 return 1;
3996 if (rn == 15 && w) {
3997 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3998 return 1;
4001 if (s->thumb && rn == 15) {
4002 /* This is actually UNPREDICTABLE */
4003 addr = tcg_temp_new_i32();
4004 tcg_gen_movi_i32(addr, s->pc & ~2);
4005 } else {
4006 addr = load_reg(s, rn);
4008 if (insn & (1 << 24)) /* pre-decrement */
4009 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
4011 if (dp)
4012 offset = 8;
4013 else
4014 offset = 4;
4015 for (i = 0; i < n; i++) {
4016 if (insn & ARM_CP_RW_BIT) {
4017 /* load */
4018 gen_vfp_ld(s, dp, addr);
4019 gen_mov_vreg_F0(dp, rd + i);
4020 } else {
4021 /* store */
4022 gen_mov_F0_vreg(dp, rd + i);
4023 gen_vfp_st(s, dp, addr);
4025 tcg_gen_addi_i32(addr, addr, offset);
4027 if (w) {
4028 /* writeback */
4029 if (insn & (1 << 24))
4030 offset = -offset * n;
4031 else if (dp && (insn & 1))
4032 offset = 4;
4033 else
4034 offset = 0;
4036 if (offset != 0)
4037 tcg_gen_addi_i32(addr, addr, offset);
4038 store_reg(s, rn, addr);
4039 } else {
4040 tcg_temp_free_i32(addr);
4044 break;
4045 default:
4046 /* Should never happen. */
4047 return 1;
4049 return 0;
4052 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
4054 #ifndef CONFIG_USER_ONLY
4055 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4056 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4057 #else
4058 return true;
4059 #endif
4062 static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4064 if (use_goto_tb(s, dest)) {
4065 tcg_gen_goto_tb(n);
4066 gen_set_pc_im(s, dest);
4067 tcg_gen_exit_tb((uintptr_t)s->tb + n);
4068 } else {
4069 gen_set_pc_im(s, dest);
4070 tcg_gen_exit_tb(0);
4074 static inline void gen_jmp (DisasContext *s, uint32_t dest)
4076 if (unlikely(s->singlestep_enabled || s->ss_active)) {
4077 /* An indirect jump so that we still trigger the debug exception. */
4078 if (s->thumb)
4079 dest |= 1;
4080 gen_bx_im(s, dest);
4081 } else {
4082 gen_goto_tb(s, 0, dest);
4083 s->is_jmp = DISAS_TB_JUMP;
4087 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
4089 if (x)
4090 tcg_gen_sari_i32(t0, t0, 16);
4091 else
4092 gen_sxth(t0);
4093 if (y)
4094 tcg_gen_sari_i32(t1, t1, 16);
4095 else
4096 gen_sxth(t1);
4097 tcg_gen_mul_i32(t0, t0, t1);
4100 /* Return the mask of PSR bits set by a MSR instruction. */
4101 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4103 uint32_t mask;
4105 mask = 0;
4106 if (flags & (1 << 0))
4107 mask |= 0xff;
4108 if (flags & (1 << 1))
4109 mask |= 0xff00;
4110 if (flags & (1 << 2))
4111 mask |= 0xff0000;
4112 if (flags & (1 << 3))
4113 mask |= 0xff000000;
4115 /* Mask out undefined bits. */
4116 mask &= ~CPSR_RESERVED;
4117 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4118 mask &= ~CPSR_T;
4120 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4121 mask &= ~CPSR_Q; /* V5TE in reality*/
4123 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4124 mask &= ~(CPSR_E | CPSR_GE);
4126 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4127 mask &= ~CPSR_IT;
4129 /* Mask out execution state and reserved bits. */
4130 if (!spsr) {
4131 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4133 /* Mask out privileged bits. */
4134 if (IS_USER(s))
4135 mask &= CPSR_USER;
4136 return mask;
4139 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4140 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4142 TCGv_i32 tmp;
4143 if (spsr) {
4144 /* ??? This is also undefined in system mode. */
4145 if (IS_USER(s))
4146 return 1;
4148 tmp = load_cpu_field(spsr);
4149 tcg_gen_andi_i32(tmp, tmp, ~mask);
4150 tcg_gen_andi_i32(t0, t0, mask);
4151 tcg_gen_or_i32(tmp, tmp, t0);
4152 store_cpu_field(tmp, spsr);
4153 } else {
4154 gen_set_cpsr(t0, mask);
4156 tcg_temp_free_i32(t0);
4157 gen_lookup_tb(s);
4158 return 0;
4161 /* Returns nonzero if access to the PSR is not permitted. */
4162 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4164 TCGv_i32 tmp;
4165 tmp = tcg_temp_new_i32();
4166 tcg_gen_movi_i32(tmp, val);
4167 return gen_set_psr(s, mask, spsr, tmp);
4170 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4171 int *tgtmode, int *regno)
4173 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4174 * the target mode and register number, and identify the various
4175 * unpredictable cases.
4176 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4177 * + executed in user mode
4178 * + using R15 as the src/dest register
4179 * + accessing an unimplemented register
4180 * + accessing a register that's inaccessible at current PL/security state*
4181 * + accessing a register that you could access with a different insn
4182 * We choose to UNDEF in all these cases.
4183 * Since we don't know which of the various AArch32 modes we are in
4184 * we have to defer some checks to runtime.
4185 * Accesses to Monitor mode registers from Secure EL1 (which implies
4186 * that EL3 is AArch64) must trap to EL3.
4188 * If the access checks fail this function will emit code to take
4189 * an exception and return false. Otherwise it will return true,
4190 * and set *tgtmode and *regno appropriately.
4192 int exc_target = default_exception_el(s);
4194 /* These instructions are present only in ARMv8, or in ARMv7 with the
4195 * Virtualization Extensions.
4197 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4198 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4199 goto undef;
4202 if (IS_USER(s) || rn == 15) {
4203 goto undef;
4206 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4207 * of registers into (r, sysm).
4209 if (r) {
4210 /* SPSRs for other modes */
4211 switch (sysm) {
4212 case 0xe: /* SPSR_fiq */
4213 *tgtmode = ARM_CPU_MODE_FIQ;
4214 break;
4215 case 0x10: /* SPSR_irq */
4216 *tgtmode = ARM_CPU_MODE_IRQ;
4217 break;
4218 case 0x12: /* SPSR_svc */
4219 *tgtmode = ARM_CPU_MODE_SVC;
4220 break;
4221 case 0x14: /* SPSR_abt */
4222 *tgtmode = ARM_CPU_MODE_ABT;
4223 break;
4224 case 0x16: /* SPSR_und */
4225 *tgtmode = ARM_CPU_MODE_UND;
4226 break;
4227 case 0x1c: /* SPSR_mon */
4228 *tgtmode = ARM_CPU_MODE_MON;
4229 break;
4230 case 0x1e: /* SPSR_hyp */
4231 *tgtmode = ARM_CPU_MODE_HYP;
4232 break;
4233 default: /* unallocated */
4234 goto undef;
4236 /* We arbitrarily assign SPSR a register number of 16. */
4237 *regno = 16;
4238 } else {
4239 /* general purpose registers for other modes */
4240 switch (sysm) {
4241 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4242 *tgtmode = ARM_CPU_MODE_USR;
4243 *regno = sysm + 8;
4244 break;
4245 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4246 *tgtmode = ARM_CPU_MODE_FIQ;
4247 *regno = sysm;
4248 break;
4249 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4250 *tgtmode = ARM_CPU_MODE_IRQ;
4251 *regno = sysm & 1 ? 13 : 14;
4252 break;
4253 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4254 *tgtmode = ARM_CPU_MODE_SVC;
4255 *regno = sysm & 1 ? 13 : 14;
4256 break;
4257 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4258 *tgtmode = ARM_CPU_MODE_ABT;
4259 *regno = sysm & 1 ? 13 : 14;
4260 break;
4261 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4262 *tgtmode = ARM_CPU_MODE_UND;
4263 *regno = sysm & 1 ? 13 : 14;
4264 break;
4265 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4266 *tgtmode = ARM_CPU_MODE_MON;
4267 *regno = sysm & 1 ? 13 : 14;
4268 break;
4269 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4270 *tgtmode = ARM_CPU_MODE_HYP;
4271 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4272 *regno = sysm & 1 ? 13 : 17;
4273 break;
4274 default: /* unallocated */
4275 goto undef;
4279 /* Catch the 'accessing inaccessible register' cases we can detect
4280 * at translate time.
4282 switch (*tgtmode) {
4283 case ARM_CPU_MODE_MON:
4284 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4285 goto undef;
4287 if (s->current_el == 1) {
4288 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4289 * then accesses to Mon registers trap to EL3
4291 exc_target = 3;
4292 goto undef;
4294 break;
4295 case ARM_CPU_MODE_HYP:
4296 /* Note that we can forbid accesses from EL2 here because they
4297 * must be from Hyp mode itself
4299 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4300 goto undef;
4302 break;
4303 default:
4304 break;
4307 return true;
4309 undef:
4310 /* If we get here then some access check did not pass */
4311 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4312 return false;
4315 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4317 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4318 int tgtmode = 0, regno = 0;
4320 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4321 return;
4324 /* Sync state because msr_banked() can raise exceptions */
4325 gen_set_condexec(s);
4326 gen_set_pc_im(s, s->pc - 4);
4327 tcg_reg = load_reg(s, rn);
4328 tcg_tgtmode = tcg_const_i32(tgtmode);
4329 tcg_regno = tcg_const_i32(regno);
4330 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4331 tcg_temp_free_i32(tcg_tgtmode);
4332 tcg_temp_free_i32(tcg_regno);
4333 tcg_temp_free_i32(tcg_reg);
4334 s->is_jmp = DISAS_UPDATE;
4337 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4339 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4340 int tgtmode = 0, regno = 0;
4342 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4343 return;
4346 /* Sync state because mrs_banked() can raise exceptions */
4347 gen_set_condexec(s);
4348 gen_set_pc_im(s, s->pc - 4);
4349 tcg_reg = tcg_temp_new_i32();
4350 tcg_tgtmode = tcg_const_i32(tgtmode);
4351 tcg_regno = tcg_const_i32(regno);
4352 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4353 tcg_temp_free_i32(tcg_tgtmode);
4354 tcg_temp_free_i32(tcg_regno);
4355 store_reg(s, rn, tcg_reg);
4356 s->is_jmp = DISAS_UPDATE;
4359 /* Generate an old-style exception return. Marks pc as dead. */
4360 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4362 TCGv_i32 tmp;
4363 store_reg(s, 15, pc);
4364 tmp = load_cpu_field(spsr);
4365 gen_helper_cpsr_write_eret(cpu_env, tmp);
4366 tcg_temp_free_i32(tmp);
4367 s->is_jmp = DISAS_JUMP;
4370 /* Generate a v6 exception return. Marks both values as dead. */
4371 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4373 gen_helper_cpsr_write_eret(cpu_env, cpsr);
4374 tcg_temp_free_i32(cpsr);
4375 store_reg(s, 15, pc);
4376 s->is_jmp = DISAS_JUMP;
4379 static void gen_nop_hint(DisasContext *s, int val)
4381 switch (val) {
4382 case 1: /* yield */
4383 gen_set_pc_im(s, s->pc);
4384 s->is_jmp = DISAS_YIELD;
4385 break;
4386 case 3: /* wfi */
4387 gen_set_pc_im(s, s->pc);
4388 s->is_jmp = DISAS_WFI;
4389 break;
4390 case 2: /* wfe */
4391 gen_set_pc_im(s, s->pc);
4392 s->is_jmp = DISAS_WFE;
4393 break;
4394 case 4: /* sev */
4395 case 5: /* sevl */
4396 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4397 default: /* nop */
4398 break;
4402 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4404 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4406 switch (size) {
4407 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4408 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4409 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4410 default: abort();
4414 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4416 switch (size) {
4417 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4418 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4419 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4420 default: return;
4424 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4425 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4426 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4427 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4428 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4430 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4431 switch ((size << 1) | u) { \
4432 case 0: \
4433 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4434 break; \
4435 case 1: \
4436 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4437 break; \
4438 case 2: \
4439 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4440 break; \
4441 case 3: \
4442 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4443 break; \
4444 case 4: \
4445 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4446 break; \
4447 case 5: \
4448 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4449 break; \
4450 default: return 1; \
4451 }} while (0)
4453 #define GEN_NEON_INTEGER_OP(name) do { \
4454 switch ((size << 1) | u) { \
4455 case 0: \
4456 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4457 break; \
4458 case 1: \
4459 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4460 break; \
4461 case 2: \
4462 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4463 break; \
4464 case 3: \
4465 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4466 break; \
4467 case 4: \
4468 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4469 break; \
4470 case 5: \
4471 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4472 break; \
4473 default: return 1; \
4474 }} while (0)
4476 static TCGv_i32 neon_load_scratch(int scratch)
4478 TCGv_i32 tmp = tcg_temp_new_i32();
4479 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4480 return tmp;
4483 static void neon_store_scratch(int scratch, TCGv_i32 var)
4485 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4486 tcg_temp_free_i32(var);
4489 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4491 TCGv_i32 tmp;
4492 if (size == 1) {
4493 tmp = neon_load_reg(reg & 7, reg >> 4);
4494 if (reg & 8) {
4495 gen_neon_dup_high16(tmp);
4496 } else {
4497 gen_neon_dup_low16(tmp);
4499 } else {
4500 tmp = neon_load_reg(reg & 15, reg >> 4);
4502 return tmp;
4505 static int gen_neon_unzip(int rd, int rm, int size, int q)
4507 TCGv_i32 tmp, tmp2;
4508 if (!q && size == 2) {
4509 return 1;
4511 tmp = tcg_const_i32(rd);
4512 tmp2 = tcg_const_i32(rm);
4513 if (q) {
4514 switch (size) {
4515 case 0:
4516 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
4517 break;
4518 case 1:
4519 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
4520 break;
4521 case 2:
4522 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
4523 break;
4524 default:
4525 abort();
4527 } else {
4528 switch (size) {
4529 case 0:
4530 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
4531 break;
4532 case 1:
4533 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
4534 break;
4535 default:
4536 abort();
4539 tcg_temp_free_i32(tmp);
4540 tcg_temp_free_i32(tmp2);
4541 return 0;
4544 static int gen_neon_zip(int rd, int rm, int size, int q)
4546 TCGv_i32 tmp, tmp2;
4547 if (!q && size == 2) {
4548 return 1;
4550 tmp = tcg_const_i32(rd);
4551 tmp2 = tcg_const_i32(rm);
4552 if (q) {
4553 switch (size) {
4554 case 0:
4555 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
4556 break;
4557 case 1:
4558 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
4559 break;
4560 case 2:
4561 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
4562 break;
4563 default:
4564 abort();
4566 } else {
4567 switch (size) {
4568 case 0:
4569 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
4570 break;
4571 case 1:
4572 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
4573 break;
4574 default:
4575 abort();
4578 tcg_temp_free_i32(tmp);
4579 tcg_temp_free_i32(tmp2);
4580 return 0;
4583 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4585 TCGv_i32 rd, tmp;
4587 rd = tcg_temp_new_i32();
4588 tmp = tcg_temp_new_i32();
4590 tcg_gen_shli_i32(rd, t0, 8);
4591 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4592 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4593 tcg_gen_or_i32(rd, rd, tmp);
4595 tcg_gen_shri_i32(t1, t1, 8);
4596 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4597 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4598 tcg_gen_or_i32(t1, t1, tmp);
4599 tcg_gen_mov_i32(t0, rd);
4601 tcg_temp_free_i32(tmp);
4602 tcg_temp_free_i32(rd);
4605 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4607 TCGv_i32 rd, tmp;
4609 rd = tcg_temp_new_i32();
4610 tmp = tcg_temp_new_i32();
4612 tcg_gen_shli_i32(rd, t0, 16);
4613 tcg_gen_andi_i32(tmp, t1, 0xffff);
4614 tcg_gen_or_i32(rd, rd, tmp);
4615 tcg_gen_shri_i32(t1, t1, 16);
4616 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4617 tcg_gen_or_i32(t1, t1, tmp);
4618 tcg_gen_mov_i32(t0, rd);
4620 tcg_temp_free_i32(tmp);
4621 tcg_temp_free_i32(rd);
4625 static struct {
4626 int nregs;
4627 int interleave;
4628 int spacing;
4629 } neon_ls_element_type[11] = {
4630 {4, 4, 1},
4631 {4, 4, 2},
4632 {4, 1, 1},
4633 {4, 2, 1},
4634 {3, 3, 1},
4635 {3, 3, 2},
4636 {3, 1, 1},
4637 {1, 1, 1},
4638 {2, 2, 1},
4639 {2, 2, 2},
4640 {2, 1, 1}
4643 /* Translate a NEON load/store element instruction. Return nonzero if the
4644 instruction is invalid. */
4645 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4647 int rd, rn, rm;
4648 int op;
4649 int nregs;
4650 int interleave;
4651 int spacing;
4652 int stride;
4653 int size;
4654 int reg;
4655 int pass;
4656 int load;
4657 int shift;
4658 int n;
4659 TCGv_i32 addr;
4660 TCGv_i32 tmp;
4661 TCGv_i32 tmp2;
4662 TCGv_i64 tmp64;
4664 /* FIXME: this access check should not take precedence over UNDEF
4665 * for invalid encodings; we will generate incorrect syndrome information
4666 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4668 if (s->fp_excp_el) {
4669 gen_exception_insn(s, 4, EXCP_UDEF,
4670 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
4671 return 0;
4674 if (!s->vfp_enabled)
4675 return 1;
4676 VFP_DREG_D(rd, insn);
4677 rn = (insn >> 16) & 0xf;
4678 rm = insn & 0xf;
4679 load = (insn & (1 << 21)) != 0;
4680 if ((insn & (1 << 23)) == 0) {
4681 /* Load store all elements. */
4682 op = (insn >> 8) & 0xf;
4683 size = (insn >> 6) & 3;
4684 if (op > 10)
4685 return 1;
4686 /* Catch UNDEF cases for bad values of align field */
4687 switch (op & 0xc) {
4688 case 4:
4689 if (((insn >> 5) & 1) == 1) {
4690 return 1;
4692 break;
4693 case 8:
4694 if (((insn >> 4) & 3) == 3) {
4695 return 1;
4697 break;
4698 default:
4699 break;
4701 nregs = neon_ls_element_type[op].nregs;
4702 interleave = neon_ls_element_type[op].interleave;
4703 spacing = neon_ls_element_type[op].spacing;
4704 if (size == 3 && (interleave | spacing) != 1)
4705 return 1;
4706 addr = tcg_temp_new_i32();
4707 load_reg_var(s, addr, rn);
4708 stride = (1 << size) * interleave;
4709 for (reg = 0; reg < nregs; reg++) {
4710 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4711 load_reg_var(s, addr, rn);
4712 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4713 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4714 load_reg_var(s, addr, rn);
4715 tcg_gen_addi_i32(addr, addr, 1 << size);
4717 if (size == 3) {
4718 tmp64 = tcg_temp_new_i64();
4719 if (load) {
4720 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
4721 neon_store_reg64(tmp64, rd);
4722 } else {
4723 neon_load_reg64(tmp64, rd);
4724 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
4726 tcg_temp_free_i64(tmp64);
4727 tcg_gen_addi_i32(addr, addr, stride);
4728 } else {
4729 for (pass = 0; pass < 2; pass++) {
4730 if (size == 2) {
4731 if (load) {
4732 tmp = tcg_temp_new_i32();
4733 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
4734 neon_store_reg(rd, pass, tmp);
4735 } else {
4736 tmp = neon_load_reg(rd, pass);
4737 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
4738 tcg_temp_free_i32(tmp);
4740 tcg_gen_addi_i32(addr, addr, stride);
4741 } else if (size == 1) {
4742 if (load) {
4743 tmp = tcg_temp_new_i32();
4744 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
4745 tcg_gen_addi_i32(addr, addr, stride);
4746 tmp2 = tcg_temp_new_i32();
4747 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
4748 tcg_gen_addi_i32(addr, addr, stride);
4749 tcg_gen_shli_i32(tmp2, tmp2, 16);
4750 tcg_gen_or_i32(tmp, tmp, tmp2);
4751 tcg_temp_free_i32(tmp2);
4752 neon_store_reg(rd, pass, tmp);
4753 } else {
4754 tmp = neon_load_reg(rd, pass);
4755 tmp2 = tcg_temp_new_i32();
4756 tcg_gen_shri_i32(tmp2, tmp, 16);
4757 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
4758 tcg_temp_free_i32(tmp);
4759 tcg_gen_addi_i32(addr, addr, stride);
4760 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
4761 tcg_temp_free_i32(tmp2);
4762 tcg_gen_addi_i32(addr, addr, stride);
4764 } else /* size == 0 */ {
4765 if (load) {
4766 TCGV_UNUSED_I32(tmp2);
4767 for (n = 0; n < 4; n++) {
4768 tmp = tcg_temp_new_i32();
4769 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
4770 tcg_gen_addi_i32(addr, addr, stride);
4771 if (n == 0) {
4772 tmp2 = tmp;
4773 } else {
4774 tcg_gen_shli_i32(tmp, tmp, n * 8);
4775 tcg_gen_or_i32(tmp2, tmp2, tmp);
4776 tcg_temp_free_i32(tmp);
4779 neon_store_reg(rd, pass, tmp2);
4780 } else {
4781 tmp2 = neon_load_reg(rd, pass);
4782 for (n = 0; n < 4; n++) {
4783 tmp = tcg_temp_new_i32();
4784 if (n == 0) {
4785 tcg_gen_mov_i32(tmp, tmp2);
4786 } else {
4787 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4789 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
4790 tcg_temp_free_i32(tmp);
4791 tcg_gen_addi_i32(addr, addr, stride);
4793 tcg_temp_free_i32(tmp2);
4798 rd += spacing;
4800 tcg_temp_free_i32(addr);
4801 stride = nregs * 8;
4802 } else {
4803 size = (insn >> 10) & 3;
4804 if (size == 3) {
4805 /* Load single element to all lanes. */
4806 int a = (insn >> 4) & 1;
4807 if (!load) {
4808 return 1;
4810 size = (insn >> 6) & 3;
4811 nregs = ((insn >> 8) & 3) + 1;
4813 if (size == 3) {
4814 if (nregs != 4 || a == 0) {
4815 return 1;
4817 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4818 size = 2;
4820 if (nregs == 1 && a == 1 && size == 0) {
4821 return 1;
4823 if (nregs == 3 && a == 1) {
4824 return 1;
4826 addr = tcg_temp_new_i32();
4827 load_reg_var(s, addr, rn);
4828 if (nregs == 1) {
4829 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4830 tmp = gen_load_and_replicate(s, addr, size);
4831 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4832 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4833 if (insn & (1 << 5)) {
4834 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4835 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4837 tcg_temp_free_i32(tmp);
4838 } else {
4839 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4840 stride = (insn & (1 << 5)) ? 2 : 1;
4841 for (reg = 0; reg < nregs; reg++) {
4842 tmp = gen_load_and_replicate(s, addr, size);
4843 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4844 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4845 tcg_temp_free_i32(tmp);
4846 tcg_gen_addi_i32(addr, addr, 1 << size);
4847 rd += stride;
4850 tcg_temp_free_i32(addr);
4851 stride = (1 << size) * nregs;
4852 } else {
4853 /* Single element. */
4854 int idx = (insn >> 4) & 0xf;
4855 pass = (insn >> 7) & 1;
4856 switch (size) {
4857 case 0:
4858 shift = ((insn >> 5) & 3) * 8;
4859 stride = 1;
4860 break;
4861 case 1:
4862 shift = ((insn >> 6) & 1) * 16;
4863 stride = (insn & (1 << 5)) ? 2 : 1;
4864 break;
4865 case 2:
4866 shift = 0;
4867 stride = (insn & (1 << 6)) ? 2 : 1;
4868 break;
4869 default:
4870 abort();
4872 nregs = ((insn >> 8) & 3) + 1;
4873 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4874 switch (nregs) {
4875 case 1:
4876 if (((idx & (1 << size)) != 0) ||
4877 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4878 return 1;
4880 break;
4881 case 3:
4882 if ((idx & 1) != 0) {
4883 return 1;
4885 /* fall through */
4886 case 2:
4887 if (size == 2 && (idx & 2) != 0) {
4888 return 1;
4890 break;
4891 case 4:
4892 if ((size == 2) && ((idx & 3) == 3)) {
4893 return 1;
4895 break;
4896 default:
4897 abort();
4899 if ((rd + stride * (nregs - 1)) > 31) {
4900 /* Attempts to write off the end of the register file
4901 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4902 * the neon_load_reg() would write off the end of the array.
4904 return 1;
4906 addr = tcg_temp_new_i32();
4907 load_reg_var(s, addr, rn);
4908 for (reg = 0; reg < nregs; reg++) {
4909 if (load) {
4910 tmp = tcg_temp_new_i32();
4911 switch (size) {
4912 case 0:
4913 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
4914 break;
4915 case 1:
4916 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
4917 break;
4918 case 2:
4919 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
4920 break;
4921 default: /* Avoid compiler warnings. */
4922 abort();
4924 if (size != 2) {
4925 tmp2 = neon_load_reg(rd, pass);
4926 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4927 shift, size ? 16 : 8);
4928 tcg_temp_free_i32(tmp2);
4930 neon_store_reg(rd, pass, tmp);
4931 } else { /* Store */
4932 tmp = neon_load_reg(rd, pass);
4933 if (shift)
4934 tcg_gen_shri_i32(tmp, tmp, shift);
4935 switch (size) {
4936 case 0:
4937 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
4938 break;
4939 case 1:
4940 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
4941 break;
4942 case 2:
4943 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
4944 break;
4946 tcg_temp_free_i32(tmp);
4948 rd += stride;
4949 tcg_gen_addi_i32(addr, addr, 1 << size);
4951 tcg_temp_free_i32(addr);
4952 stride = nregs * (1 << size);
4955 if (rm != 15) {
4956 TCGv_i32 base;
4958 base = load_reg(s, rn);
4959 if (rm == 13) {
4960 tcg_gen_addi_i32(base, base, stride);
4961 } else {
4962 TCGv_i32 index;
4963 index = load_reg(s, rm);
4964 tcg_gen_add_i32(base, base, index);
4965 tcg_temp_free_i32(index);
4967 store_reg(s, rn, base);
4969 return 0;
4972 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4973 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
4975 tcg_gen_and_i32(t, t, c);
4976 tcg_gen_andc_i32(f, f, c);
4977 tcg_gen_or_i32(dest, t, f);
4980 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
4982 switch (size) {
4983 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4984 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4985 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
4986 default: abort();
4990 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4992 switch (size) {
4993 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4994 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4995 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4996 default: abort();
5000 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
5002 switch (size) {
5003 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5004 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5005 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
5006 default: abort();
5010 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5012 switch (size) {
5013 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5014 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5015 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
5016 default: abort();
5020 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
5021 int q, int u)
5023 if (q) {
5024 if (u) {
5025 switch (size) {
5026 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5027 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5028 default: abort();
5030 } else {
5031 switch (size) {
5032 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5033 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5034 default: abort();
5037 } else {
5038 if (u) {
5039 switch (size) {
5040 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5041 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
5042 default: abort();
5044 } else {
5045 switch (size) {
5046 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5047 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5048 default: abort();
5054 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
5056 if (u) {
5057 switch (size) {
5058 case 0: gen_helper_neon_widen_u8(dest, src); break;
5059 case 1: gen_helper_neon_widen_u16(dest, src); break;
5060 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5061 default: abort();
5063 } else {
5064 switch (size) {
5065 case 0: gen_helper_neon_widen_s8(dest, src); break;
5066 case 1: gen_helper_neon_widen_s16(dest, src); break;
5067 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5068 default: abort();
5071 tcg_temp_free_i32(src);
5074 static inline void gen_neon_addl(int size)
5076 switch (size) {
5077 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5078 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5079 case 2: tcg_gen_add_i64(CPU_V001); break;
5080 default: abort();
5084 static inline void gen_neon_subl(int size)
5086 switch (size) {
5087 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5088 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5089 case 2: tcg_gen_sub_i64(CPU_V001); break;
5090 default: abort();
5094 static inline void gen_neon_negl(TCGv_i64 var, int size)
5096 switch (size) {
5097 case 0: gen_helper_neon_negl_u16(var, var); break;
5098 case 1: gen_helper_neon_negl_u32(var, var); break;
5099 case 2:
5100 tcg_gen_neg_i64(var, var);
5101 break;
5102 default: abort();
5106 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
5108 switch (size) {
5109 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5110 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
5111 default: abort();
5115 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5116 int size, int u)
5118 TCGv_i64 tmp;
5120 switch ((size << 1) | u) {
5121 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5122 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5123 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5124 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5125 case 4:
5126 tmp = gen_muls_i64_i32(a, b);
5127 tcg_gen_mov_i64(dest, tmp);
5128 tcg_temp_free_i64(tmp);
5129 break;
5130 case 5:
5131 tmp = gen_mulu_i64_i32(a, b);
5132 tcg_gen_mov_i64(dest, tmp);
5133 tcg_temp_free_i64(tmp);
5134 break;
5135 default: abort();
5138 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5139 Don't forget to clean them now. */
5140 if (size < 2) {
5141 tcg_temp_free_i32(a);
5142 tcg_temp_free_i32(b);
5146 static void gen_neon_narrow_op(int op, int u, int size,
5147 TCGv_i32 dest, TCGv_i64 src)
5149 if (op) {
5150 if (u) {
5151 gen_neon_unarrow_sats(size, dest, src);
5152 } else {
5153 gen_neon_narrow(size, dest, src);
5155 } else {
5156 if (u) {
5157 gen_neon_narrow_satu(size, dest, src);
5158 } else {
5159 gen_neon_narrow_sats(size, dest, src);
5164 /* Symbolic constants for op fields for Neon 3-register same-length.
5165 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5166 * table A7-9.
5168 #define NEON_3R_VHADD 0
5169 #define NEON_3R_VQADD 1
5170 #define NEON_3R_VRHADD 2
5171 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5172 #define NEON_3R_VHSUB 4
5173 #define NEON_3R_VQSUB 5
5174 #define NEON_3R_VCGT 6
5175 #define NEON_3R_VCGE 7
5176 #define NEON_3R_VSHL 8
5177 #define NEON_3R_VQSHL 9
5178 #define NEON_3R_VRSHL 10
5179 #define NEON_3R_VQRSHL 11
5180 #define NEON_3R_VMAX 12
5181 #define NEON_3R_VMIN 13
5182 #define NEON_3R_VABD 14
5183 #define NEON_3R_VABA 15
5184 #define NEON_3R_VADD_VSUB 16
5185 #define NEON_3R_VTST_VCEQ 17
5186 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5187 #define NEON_3R_VMUL 19
5188 #define NEON_3R_VPMAX 20
5189 #define NEON_3R_VPMIN 21
5190 #define NEON_3R_VQDMULH_VQRDMULH 22
5191 #define NEON_3R_VPADD 23
5192 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
5193 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
5194 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5195 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5196 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5197 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5198 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
5199 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
5201 static const uint8_t neon_3r_sizes[] = {
5202 [NEON_3R_VHADD] = 0x7,
5203 [NEON_3R_VQADD] = 0xf,
5204 [NEON_3R_VRHADD] = 0x7,
5205 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5206 [NEON_3R_VHSUB] = 0x7,
5207 [NEON_3R_VQSUB] = 0xf,
5208 [NEON_3R_VCGT] = 0x7,
5209 [NEON_3R_VCGE] = 0x7,
5210 [NEON_3R_VSHL] = 0xf,
5211 [NEON_3R_VQSHL] = 0xf,
5212 [NEON_3R_VRSHL] = 0xf,
5213 [NEON_3R_VQRSHL] = 0xf,
5214 [NEON_3R_VMAX] = 0x7,
5215 [NEON_3R_VMIN] = 0x7,
5216 [NEON_3R_VABD] = 0x7,
5217 [NEON_3R_VABA] = 0x7,
5218 [NEON_3R_VADD_VSUB] = 0xf,
5219 [NEON_3R_VTST_VCEQ] = 0x7,
5220 [NEON_3R_VML] = 0x7,
5221 [NEON_3R_VMUL] = 0x7,
5222 [NEON_3R_VPMAX] = 0x7,
5223 [NEON_3R_VPMIN] = 0x7,
5224 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5225 [NEON_3R_VPADD] = 0x7,
5226 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
5227 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
5228 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5229 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5230 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5231 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5232 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
5233 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
5236 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
5237 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5238 * table A7-13.
5240 #define NEON_2RM_VREV64 0
5241 #define NEON_2RM_VREV32 1
5242 #define NEON_2RM_VREV16 2
5243 #define NEON_2RM_VPADDL 4
5244 #define NEON_2RM_VPADDL_U 5
5245 #define NEON_2RM_AESE 6 /* Includes AESD */
5246 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5247 #define NEON_2RM_VCLS 8
5248 #define NEON_2RM_VCLZ 9
5249 #define NEON_2RM_VCNT 10
5250 #define NEON_2RM_VMVN 11
5251 #define NEON_2RM_VPADAL 12
5252 #define NEON_2RM_VPADAL_U 13
5253 #define NEON_2RM_VQABS 14
5254 #define NEON_2RM_VQNEG 15
5255 #define NEON_2RM_VCGT0 16
5256 #define NEON_2RM_VCGE0 17
5257 #define NEON_2RM_VCEQ0 18
5258 #define NEON_2RM_VCLE0 19
5259 #define NEON_2RM_VCLT0 20
5260 #define NEON_2RM_SHA1H 21
5261 #define NEON_2RM_VABS 22
5262 #define NEON_2RM_VNEG 23
5263 #define NEON_2RM_VCGT0_F 24
5264 #define NEON_2RM_VCGE0_F 25
5265 #define NEON_2RM_VCEQ0_F 26
5266 #define NEON_2RM_VCLE0_F 27
5267 #define NEON_2RM_VCLT0_F 28
5268 #define NEON_2RM_VABS_F 30
5269 #define NEON_2RM_VNEG_F 31
5270 #define NEON_2RM_VSWP 32
5271 #define NEON_2RM_VTRN 33
5272 #define NEON_2RM_VUZP 34
5273 #define NEON_2RM_VZIP 35
5274 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5275 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5276 #define NEON_2RM_VSHLL 38
5277 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5278 #define NEON_2RM_VRINTN 40
5279 #define NEON_2RM_VRINTX 41
5280 #define NEON_2RM_VRINTA 42
5281 #define NEON_2RM_VRINTZ 43
5282 #define NEON_2RM_VCVT_F16_F32 44
5283 #define NEON_2RM_VRINTM 45
5284 #define NEON_2RM_VCVT_F32_F16 46
5285 #define NEON_2RM_VRINTP 47
5286 #define NEON_2RM_VCVTAU 48
5287 #define NEON_2RM_VCVTAS 49
5288 #define NEON_2RM_VCVTNU 50
5289 #define NEON_2RM_VCVTNS 51
5290 #define NEON_2RM_VCVTPU 52
5291 #define NEON_2RM_VCVTPS 53
5292 #define NEON_2RM_VCVTMU 54
5293 #define NEON_2RM_VCVTMS 55
5294 #define NEON_2RM_VRECPE 56
5295 #define NEON_2RM_VRSQRTE 57
5296 #define NEON_2RM_VRECPE_F 58
5297 #define NEON_2RM_VRSQRTE_F 59
5298 #define NEON_2RM_VCVT_FS 60
5299 #define NEON_2RM_VCVT_FU 61
5300 #define NEON_2RM_VCVT_SF 62
5301 #define NEON_2RM_VCVT_UF 63
5303 static int neon_2rm_is_float_op(int op)
5305 /* Return true if this neon 2reg-misc op is float-to-float */
5306 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
5307 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
5308 op == NEON_2RM_VRINTM ||
5309 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5310 op >= NEON_2RM_VRECPE_F);
5313 /* Each entry in this array has bit n set if the insn allows
5314 * size value n (otherwise it will UNDEF). Since unallocated
5315 * op values will have no bits set they always UNDEF.
5317 static const uint8_t neon_2rm_sizes[] = {
5318 [NEON_2RM_VREV64] = 0x7,
5319 [NEON_2RM_VREV32] = 0x3,
5320 [NEON_2RM_VREV16] = 0x1,
5321 [NEON_2RM_VPADDL] = 0x7,
5322 [NEON_2RM_VPADDL_U] = 0x7,
5323 [NEON_2RM_AESE] = 0x1,
5324 [NEON_2RM_AESMC] = 0x1,
5325 [NEON_2RM_VCLS] = 0x7,
5326 [NEON_2RM_VCLZ] = 0x7,
5327 [NEON_2RM_VCNT] = 0x1,
5328 [NEON_2RM_VMVN] = 0x1,
5329 [NEON_2RM_VPADAL] = 0x7,
5330 [NEON_2RM_VPADAL_U] = 0x7,
5331 [NEON_2RM_VQABS] = 0x7,
5332 [NEON_2RM_VQNEG] = 0x7,
5333 [NEON_2RM_VCGT0] = 0x7,
5334 [NEON_2RM_VCGE0] = 0x7,
5335 [NEON_2RM_VCEQ0] = 0x7,
5336 [NEON_2RM_VCLE0] = 0x7,
5337 [NEON_2RM_VCLT0] = 0x7,
5338 [NEON_2RM_SHA1H] = 0x4,
5339 [NEON_2RM_VABS] = 0x7,
5340 [NEON_2RM_VNEG] = 0x7,
5341 [NEON_2RM_VCGT0_F] = 0x4,
5342 [NEON_2RM_VCGE0_F] = 0x4,
5343 [NEON_2RM_VCEQ0_F] = 0x4,
5344 [NEON_2RM_VCLE0_F] = 0x4,
5345 [NEON_2RM_VCLT0_F] = 0x4,
5346 [NEON_2RM_VABS_F] = 0x4,
5347 [NEON_2RM_VNEG_F] = 0x4,
5348 [NEON_2RM_VSWP] = 0x1,
5349 [NEON_2RM_VTRN] = 0x7,
5350 [NEON_2RM_VUZP] = 0x7,
5351 [NEON_2RM_VZIP] = 0x7,
5352 [NEON_2RM_VMOVN] = 0x7,
5353 [NEON_2RM_VQMOVN] = 0x7,
5354 [NEON_2RM_VSHLL] = 0x7,
5355 [NEON_2RM_SHA1SU1] = 0x4,
5356 [NEON_2RM_VRINTN] = 0x4,
5357 [NEON_2RM_VRINTX] = 0x4,
5358 [NEON_2RM_VRINTA] = 0x4,
5359 [NEON_2RM_VRINTZ] = 0x4,
5360 [NEON_2RM_VCVT_F16_F32] = 0x2,
5361 [NEON_2RM_VRINTM] = 0x4,
5362 [NEON_2RM_VCVT_F32_F16] = 0x2,
5363 [NEON_2RM_VRINTP] = 0x4,
5364 [NEON_2RM_VCVTAU] = 0x4,
5365 [NEON_2RM_VCVTAS] = 0x4,
5366 [NEON_2RM_VCVTNU] = 0x4,
5367 [NEON_2RM_VCVTNS] = 0x4,
5368 [NEON_2RM_VCVTPU] = 0x4,
5369 [NEON_2RM_VCVTPS] = 0x4,
5370 [NEON_2RM_VCVTMU] = 0x4,
5371 [NEON_2RM_VCVTMS] = 0x4,
5372 [NEON_2RM_VRECPE] = 0x4,
5373 [NEON_2RM_VRSQRTE] = 0x4,
5374 [NEON_2RM_VRECPE_F] = 0x4,
5375 [NEON_2RM_VRSQRTE_F] = 0x4,
5376 [NEON_2RM_VCVT_FS] = 0x4,
5377 [NEON_2RM_VCVT_FU] = 0x4,
5378 [NEON_2RM_VCVT_SF] = 0x4,
5379 [NEON_2RM_VCVT_UF] = 0x4,
5382 /* Translate a NEON data processing instruction. Return nonzero if the
5383 instruction is invalid.
5384 We process data in a mixture of 32-bit and 64-bit chunks.
5385 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5387 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
5389 int op;
5390 int q;
5391 int rd, rn, rm;
5392 int size;
5393 int shift;
5394 int pass;
5395 int count;
5396 int pairwise;
5397 int u;
5398 uint32_t imm, mask;
5399 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
5400 TCGv_i64 tmp64;
5402 /* FIXME: this access check should not take precedence over UNDEF
5403 * for invalid encodings; we will generate incorrect syndrome information
5404 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5406 if (s->fp_excp_el) {
5407 gen_exception_insn(s, 4, EXCP_UDEF,
5408 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
5409 return 0;
5412 if (!s->vfp_enabled)
5413 return 1;
5414 q = (insn & (1 << 6)) != 0;
5415 u = (insn >> 24) & 1;
5416 VFP_DREG_D(rd, insn);
5417 VFP_DREG_N(rn, insn);
5418 VFP_DREG_M(rm, insn);
5419 size = (insn >> 20) & 3;
5420 if ((insn & (1 << 23)) == 0) {
5421 /* Three register same length. */
5422 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5423 /* Catch invalid op and bad size combinations: UNDEF */
5424 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5425 return 1;
5427 /* All insns of this form UNDEF for either this condition or the
5428 * superset of cases "Q==1"; we catch the latter later.
5430 if (q && ((rd | rn | rm) & 1)) {
5431 return 1;
5434 * The SHA-1/SHA-256 3-register instructions require special treatment
5435 * here, as their size field is overloaded as an op type selector, and
5436 * they all consume their input in a single pass.
5438 if (op == NEON_3R_SHA) {
5439 if (!q) {
5440 return 1;
5442 if (!u) { /* SHA-1 */
5443 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
5444 return 1;
5446 tmp = tcg_const_i32(rd);
5447 tmp2 = tcg_const_i32(rn);
5448 tmp3 = tcg_const_i32(rm);
5449 tmp4 = tcg_const_i32(size);
5450 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5451 tcg_temp_free_i32(tmp4);
5452 } else { /* SHA-256 */
5453 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
5454 return 1;
5456 tmp = tcg_const_i32(rd);
5457 tmp2 = tcg_const_i32(rn);
5458 tmp3 = tcg_const_i32(rm);
5459 switch (size) {
5460 case 0:
5461 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5462 break;
5463 case 1:
5464 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5465 break;
5466 case 2:
5467 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5468 break;
5471 tcg_temp_free_i32(tmp);
5472 tcg_temp_free_i32(tmp2);
5473 tcg_temp_free_i32(tmp3);
5474 return 0;
5476 if (size == 3 && op != NEON_3R_LOGIC) {
5477 /* 64-bit element instructions. */
5478 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5479 neon_load_reg64(cpu_V0, rn + pass);
5480 neon_load_reg64(cpu_V1, rm + pass);
5481 switch (op) {
5482 case NEON_3R_VQADD:
5483 if (u) {
5484 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5485 cpu_V0, cpu_V1);
5486 } else {
5487 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5488 cpu_V0, cpu_V1);
5490 break;
5491 case NEON_3R_VQSUB:
5492 if (u) {
5493 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5494 cpu_V0, cpu_V1);
5495 } else {
5496 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5497 cpu_V0, cpu_V1);
5499 break;
5500 case NEON_3R_VSHL:
5501 if (u) {
5502 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5503 } else {
5504 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5506 break;
5507 case NEON_3R_VQSHL:
5508 if (u) {
5509 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5510 cpu_V1, cpu_V0);
5511 } else {
5512 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5513 cpu_V1, cpu_V0);
5515 break;
5516 case NEON_3R_VRSHL:
5517 if (u) {
5518 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5519 } else {
5520 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5522 break;
5523 case NEON_3R_VQRSHL:
5524 if (u) {
5525 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5526 cpu_V1, cpu_V0);
5527 } else {
5528 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5529 cpu_V1, cpu_V0);
5531 break;
5532 case NEON_3R_VADD_VSUB:
5533 if (u) {
5534 tcg_gen_sub_i64(CPU_V001);
5535 } else {
5536 tcg_gen_add_i64(CPU_V001);
5538 break;
5539 default:
5540 abort();
5542 neon_store_reg64(cpu_V0, rd + pass);
5544 return 0;
5546 pairwise = 0;
5547 switch (op) {
5548 case NEON_3R_VSHL:
5549 case NEON_3R_VQSHL:
5550 case NEON_3R_VRSHL:
5551 case NEON_3R_VQRSHL:
5553 int rtmp;
5554 /* Shift instruction operands are reversed. */
5555 rtmp = rn;
5556 rn = rm;
5557 rm = rtmp;
5559 break;
5560 case NEON_3R_VPADD:
5561 if (u) {
5562 return 1;
5564 /* Fall through */
5565 case NEON_3R_VPMAX:
5566 case NEON_3R_VPMIN:
5567 pairwise = 1;
5568 break;
5569 case NEON_3R_FLOAT_ARITH:
5570 pairwise = (u && size < 2); /* if VPADD (float) */
5571 break;
5572 case NEON_3R_FLOAT_MINMAX:
5573 pairwise = u; /* if VPMIN/VPMAX (float) */
5574 break;
5575 case NEON_3R_FLOAT_CMP:
5576 if (!u && size) {
5577 /* no encoding for U=0 C=1x */
5578 return 1;
5580 break;
5581 case NEON_3R_FLOAT_ACMP:
5582 if (!u) {
5583 return 1;
5585 break;
5586 case NEON_3R_FLOAT_MISC:
5587 /* VMAXNM/VMINNM in ARMv8 */
5588 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5589 return 1;
5591 break;
5592 case NEON_3R_VMUL:
5593 if (u && (size != 0)) {
5594 /* UNDEF on invalid size for polynomial subcase */
5595 return 1;
5597 break;
5598 case NEON_3R_VFM:
5599 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
5600 return 1;
5602 break;
5603 default:
5604 break;
5607 if (pairwise && q) {
5608 /* All the pairwise insns UNDEF if Q is set */
5609 return 1;
5612 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5614 if (pairwise) {
5615 /* Pairwise. */
5616 if (pass < 1) {
5617 tmp = neon_load_reg(rn, 0);
5618 tmp2 = neon_load_reg(rn, 1);
5619 } else {
5620 tmp = neon_load_reg(rm, 0);
5621 tmp2 = neon_load_reg(rm, 1);
5623 } else {
5624 /* Elementwise. */
5625 tmp = neon_load_reg(rn, pass);
5626 tmp2 = neon_load_reg(rm, pass);
5628 switch (op) {
5629 case NEON_3R_VHADD:
5630 GEN_NEON_INTEGER_OP(hadd);
5631 break;
5632 case NEON_3R_VQADD:
5633 GEN_NEON_INTEGER_OP_ENV(qadd);
5634 break;
5635 case NEON_3R_VRHADD:
5636 GEN_NEON_INTEGER_OP(rhadd);
5637 break;
5638 case NEON_3R_LOGIC: /* Logic ops. */
5639 switch ((u << 2) | size) {
5640 case 0: /* VAND */
5641 tcg_gen_and_i32(tmp, tmp, tmp2);
5642 break;
5643 case 1: /* BIC */
5644 tcg_gen_andc_i32(tmp, tmp, tmp2);
5645 break;
5646 case 2: /* VORR */
5647 tcg_gen_or_i32(tmp, tmp, tmp2);
5648 break;
5649 case 3: /* VORN */
5650 tcg_gen_orc_i32(tmp, tmp, tmp2);
5651 break;
5652 case 4: /* VEOR */
5653 tcg_gen_xor_i32(tmp, tmp, tmp2);
5654 break;
5655 case 5: /* VBSL */
5656 tmp3 = neon_load_reg(rd, pass);
5657 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5658 tcg_temp_free_i32(tmp3);
5659 break;
5660 case 6: /* VBIT */
5661 tmp3 = neon_load_reg(rd, pass);
5662 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5663 tcg_temp_free_i32(tmp3);
5664 break;
5665 case 7: /* VBIF */
5666 tmp3 = neon_load_reg(rd, pass);
5667 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5668 tcg_temp_free_i32(tmp3);
5669 break;
5671 break;
5672 case NEON_3R_VHSUB:
5673 GEN_NEON_INTEGER_OP(hsub);
5674 break;
5675 case NEON_3R_VQSUB:
5676 GEN_NEON_INTEGER_OP_ENV(qsub);
5677 break;
5678 case NEON_3R_VCGT:
5679 GEN_NEON_INTEGER_OP(cgt);
5680 break;
5681 case NEON_3R_VCGE:
5682 GEN_NEON_INTEGER_OP(cge);
5683 break;
5684 case NEON_3R_VSHL:
5685 GEN_NEON_INTEGER_OP(shl);
5686 break;
5687 case NEON_3R_VQSHL:
5688 GEN_NEON_INTEGER_OP_ENV(qshl);
5689 break;
5690 case NEON_3R_VRSHL:
5691 GEN_NEON_INTEGER_OP(rshl);
5692 break;
5693 case NEON_3R_VQRSHL:
5694 GEN_NEON_INTEGER_OP_ENV(qrshl);
5695 break;
5696 case NEON_3R_VMAX:
5697 GEN_NEON_INTEGER_OP(max);
5698 break;
5699 case NEON_3R_VMIN:
5700 GEN_NEON_INTEGER_OP(min);
5701 break;
5702 case NEON_3R_VABD:
5703 GEN_NEON_INTEGER_OP(abd);
5704 break;
5705 case NEON_3R_VABA:
5706 GEN_NEON_INTEGER_OP(abd);
5707 tcg_temp_free_i32(tmp2);
5708 tmp2 = neon_load_reg(rd, pass);
5709 gen_neon_add(size, tmp, tmp2);
5710 break;
5711 case NEON_3R_VADD_VSUB:
5712 if (!u) { /* VADD */
5713 gen_neon_add(size, tmp, tmp2);
5714 } else { /* VSUB */
5715 switch (size) {
5716 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5717 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5718 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
5719 default: abort();
5722 break;
5723 case NEON_3R_VTST_VCEQ:
5724 if (!u) { /* VTST */
5725 switch (size) {
5726 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5727 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5728 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
5729 default: abort();
5731 } else { /* VCEQ */
5732 switch (size) {
5733 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5734 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5735 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5736 default: abort();
5739 break;
5740 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
5741 switch (size) {
5742 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5743 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5744 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5745 default: abort();
5747 tcg_temp_free_i32(tmp2);
5748 tmp2 = neon_load_reg(rd, pass);
5749 if (u) { /* VMLS */
5750 gen_neon_rsb(size, tmp, tmp2);
5751 } else { /* VMLA */
5752 gen_neon_add(size, tmp, tmp2);
5754 break;
5755 case NEON_3R_VMUL:
5756 if (u) { /* polynomial */
5757 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5758 } else { /* Integer */
5759 switch (size) {
5760 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5761 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5762 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5763 default: abort();
5766 break;
5767 case NEON_3R_VPMAX:
5768 GEN_NEON_INTEGER_OP(pmax);
5769 break;
5770 case NEON_3R_VPMIN:
5771 GEN_NEON_INTEGER_OP(pmin);
5772 break;
5773 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5774 if (!u) { /* VQDMULH */
5775 switch (size) {
5776 case 1:
5777 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5778 break;
5779 case 2:
5780 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5781 break;
5782 default: abort();
5784 } else { /* VQRDMULH */
5785 switch (size) {
5786 case 1:
5787 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5788 break;
5789 case 2:
5790 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5791 break;
5792 default: abort();
5795 break;
5796 case NEON_3R_VPADD:
5797 switch (size) {
5798 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5799 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5800 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5801 default: abort();
5803 break;
5804 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5806 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5807 switch ((u << 2) | size) {
5808 case 0: /* VADD */
5809 case 4: /* VPADD */
5810 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5811 break;
5812 case 2: /* VSUB */
5813 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5814 break;
5815 case 6: /* VABD */
5816 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5817 break;
5818 default:
5819 abort();
5821 tcg_temp_free_ptr(fpstatus);
5822 break;
5824 case NEON_3R_FLOAT_MULTIPLY:
5826 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5827 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5828 if (!u) {
5829 tcg_temp_free_i32(tmp2);
5830 tmp2 = neon_load_reg(rd, pass);
5831 if (size == 0) {
5832 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5833 } else {
5834 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5837 tcg_temp_free_ptr(fpstatus);
5838 break;
5840 case NEON_3R_FLOAT_CMP:
5842 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5843 if (!u) {
5844 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5845 } else {
5846 if (size == 0) {
5847 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5848 } else {
5849 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5852 tcg_temp_free_ptr(fpstatus);
5853 break;
5855 case NEON_3R_FLOAT_ACMP:
5857 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5858 if (size == 0) {
5859 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5860 } else {
5861 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5863 tcg_temp_free_ptr(fpstatus);
5864 break;
5866 case NEON_3R_FLOAT_MINMAX:
5868 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5869 if (size == 0) {
5870 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5871 } else {
5872 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5874 tcg_temp_free_ptr(fpstatus);
5875 break;
5877 case NEON_3R_FLOAT_MISC:
5878 if (u) {
5879 /* VMAXNM/VMINNM */
5880 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5881 if (size == 0) {
5882 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5883 } else {
5884 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5886 tcg_temp_free_ptr(fpstatus);
5887 } else {
5888 if (size == 0) {
5889 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5890 } else {
5891 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5894 break;
5895 case NEON_3R_VFM:
5897 /* VFMA, VFMS: fused multiply-add */
5898 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5899 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5900 if (size) {
5901 /* VFMS */
5902 gen_helper_vfp_negs(tmp, tmp);
5904 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5905 tcg_temp_free_i32(tmp3);
5906 tcg_temp_free_ptr(fpstatus);
5907 break;
5909 default:
5910 abort();
5912 tcg_temp_free_i32(tmp2);
5914 /* Save the result. For elementwise operations we can put it
5915 straight into the destination register. For pairwise operations
5916 we have to be careful to avoid clobbering the source operands. */
5917 if (pairwise && rd == rm) {
5918 neon_store_scratch(pass, tmp);
5919 } else {
5920 neon_store_reg(rd, pass, tmp);
5923 } /* for pass */
5924 if (pairwise && rd == rm) {
5925 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5926 tmp = neon_load_scratch(pass);
5927 neon_store_reg(rd, pass, tmp);
5930 /* End of 3 register same size operations. */
5931 } else if (insn & (1 << 4)) {
5932 if ((insn & 0x00380080) != 0) {
5933 /* Two registers and shift. */
5934 op = (insn >> 8) & 0xf;
5935 if (insn & (1 << 7)) {
5936 /* 64-bit shift. */
5937 if (op > 7) {
5938 return 1;
5940 size = 3;
5941 } else {
5942 size = 2;
5943 while ((insn & (1 << (size + 19))) == 0)
5944 size--;
5946 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5947 /* To avoid excessive duplication of ops we implement shift
5948 by immediate using the variable shift operations. */
5949 if (op < 8) {
5950 /* Shift by immediate:
5951 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5952 if (q && ((rd | rm) & 1)) {
5953 return 1;
5955 if (!u && (op == 4 || op == 6)) {
5956 return 1;
5958 /* Right shifts are encoded as N - shift, where N is the
5959 element size in bits. */
5960 if (op <= 4)
5961 shift = shift - (1 << (size + 3));
5962 if (size == 3) {
5963 count = q + 1;
5964 } else {
5965 count = q ? 4: 2;
5967 switch (size) {
5968 case 0:
5969 imm = (uint8_t) shift;
5970 imm |= imm << 8;
5971 imm |= imm << 16;
5972 break;
5973 case 1:
5974 imm = (uint16_t) shift;
5975 imm |= imm << 16;
5976 break;
5977 case 2:
5978 case 3:
5979 imm = shift;
5980 break;
5981 default:
5982 abort();
5985 for (pass = 0; pass < count; pass++) {
5986 if (size == 3) {
5987 neon_load_reg64(cpu_V0, rm + pass);
5988 tcg_gen_movi_i64(cpu_V1, imm);
5989 switch (op) {
5990 case 0: /* VSHR */
5991 case 1: /* VSRA */
5992 if (u)
5993 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5994 else
5995 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5996 break;
5997 case 2: /* VRSHR */
5998 case 3: /* VRSRA */
5999 if (u)
6000 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
6001 else
6002 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
6003 break;
6004 case 4: /* VSRI */
6005 case 5: /* VSHL, VSLI */
6006 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6007 break;
6008 case 6: /* VQSHLU */
6009 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6010 cpu_V0, cpu_V1);
6011 break;
6012 case 7: /* VQSHL */
6013 if (u) {
6014 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6015 cpu_V0, cpu_V1);
6016 } else {
6017 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6018 cpu_V0, cpu_V1);
6020 break;
6022 if (op == 1 || op == 3) {
6023 /* Accumulate. */
6024 neon_load_reg64(cpu_V1, rd + pass);
6025 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6026 } else if (op == 4 || (op == 5 && u)) {
6027 /* Insert */
6028 neon_load_reg64(cpu_V1, rd + pass);
6029 uint64_t mask;
6030 if (shift < -63 || shift > 63) {
6031 mask = 0;
6032 } else {
6033 if (op == 4) {
6034 mask = 0xffffffffffffffffull >> -shift;
6035 } else {
6036 mask = 0xffffffffffffffffull << shift;
6039 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6040 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6042 neon_store_reg64(cpu_V0, rd + pass);
6043 } else { /* size < 3 */
6044 /* Operands in T0 and T1. */
6045 tmp = neon_load_reg(rm, pass);
6046 tmp2 = tcg_temp_new_i32();
6047 tcg_gen_movi_i32(tmp2, imm);
6048 switch (op) {
6049 case 0: /* VSHR */
6050 case 1: /* VSRA */
6051 GEN_NEON_INTEGER_OP(shl);
6052 break;
6053 case 2: /* VRSHR */
6054 case 3: /* VRSRA */
6055 GEN_NEON_INTEGER_OP(rshl);
6056 break;
6057 case 4: /* VSRI */
6058 case 5: /* VSHL, VSLI */
6059 switch (size) {
6060 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6061 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6062 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
6063 default: abort();
6065 break;
6066 case 6: /* VQSHLU */
6067 switch (size) {
6068 case 0:
6069 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6070 tmp, tmp2);
6071 break;
6072 case 1:
6073 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6074 tmp, tmp2);
6075 break;
6076 case 2:
6077 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6078 tmp, tmp2);
6079 break;
6080 default:
6081 abort();
6083 break;
6084 case 7: /* VQSHL */
6085 GEN_NEON_INTEGER_OP_ENV(qshl);
6086 break;
6088 tcg_temp_free_i32(tmp2);
6090 if (op == 1 || op == 3) {
6091 /* Accumulate. */
6092 tmp2 = neon_load_reg(rd, pass);
6093 gen_neon_add(size, tmp, tmp2);
6094 tcg_temp_free_i32(tmp2);
6095 } else if (op == 4 || (op == 5 && u)) {
6096 /* Insert */
6097 switch (size) {
6098 case 0:
6099 if (op == 4)
6100 mask = 0xff >> -shift;
6101 else
6102 mask = (uint8_t)(0xff << shift);
6103 mask |= mask << 8;
6104 mask |= mask << 16;
6105 break;
6106 case 1:
6107 if (op == 4)
6108 mask = 0xffff >> -shift;
6109 else
6110 mask = (uint16_t)(0xffff << shift);
6111 mask |= mask << 16;
6112 break;
6113 case 2:
6114 if (shift < -31 || shift > 31) {
6115 mask = 0;
6116 } else {
6117 if (op == 4)
6118 mask = 0xffffffffu >> -shift;
6119 else
6120 mask = 0xffffffffu << shift;
6122 break;
6123 default:
6124 abort();
6126 tmp2 = neon_load_reg(rd, pass);
6127 tcg_gen_andi_i32(tmp, tmp, mask);
6128 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
6129 tcg_gen_or_i32(tmp, tmp, tmp2);
6130 tcg_temp_free_i32(tmp2);
6132 neon_store_reg(rd, pass, tmp);
6134 } /* for pass */
6135 } else if (op < 10) {
6136 /* Shift by immediate and narrow:
6137 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
6138 int input_unsigned = (op == 8) ? !u : u;
6139 if (rm & 1) {
6140 return 1;
6142 shift = shift - (1 << (size + 3));
6143 size++;
6144 if (size == 3) {
6145 tmp64 = tcg_const_i64(shift);
6146 neon_load_reg64(cpu_V0, rm);
6147 neon_load_reg64(cpu_V1, rm + 1);
6148 for (pass = 0; pass < 2; pass++) {
6149 TCGv_i64 in;
6150 if (pass == 0) {
6151 in = cpu_V0;
6152 } else {
6153 in = cpu_V1;
6155 if (q) {
6156 if (input_unsigned) {
6157 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
6158 } else {
6159 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
6161 } else {
6162 if (input_unsigned) {
6163 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
6164 } else {
6165 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
6168 tmp = tcg_temp_new_i32();
6169 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6170 neon_store_reg(rd, pass, tmp);
6171 } /* for pass */
6172 tcg_temp_free_i64(tmp64);
6173 } else {
6174 if (size == 1) {
6175 imm = (uint16_t)shift;
6176 imm |= imm << 16;
6177 } else {
6178 /* size == 2 */
6179 imm = (uint32_t)shift;
6181 tmp2 = tcg_const_i32(imm);
6182 tmp4 = neon_load_reg(rm + 1, 0);
6183 tmp5 = neon_load_reg(rm + 1, 1);
6184 for (pass = 0; pass < 2; pass++) {
6185 if (pass == 0) {
6186 tmp = neon_load_reg(rm, 0);
6187 } else {
6188 tmp = tmp4;
6190 gen_neon_shift_narrow(size, tmp, tmp2, q,
6191 input_unsigned);
6192 if (pass == 0) {
6193 tmp3 = neon_load_reg(rm, 1);
6194 } else {
6195 tmp3 = tmp5;
6197 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6198 input_unsigned);
6199 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
6200 tcg_temp_free_i32(tmp);
6201 tcg_temp_free_i32(tmp3);
6202 tmp = tcg_temp_new_i32();
6203 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6204 neon_store_reg(rd, pass, tmp);
6205 } /* for pass */
6206 tcg_temp_free_i32(tmp2);
6208 } else if (op == 10) {
6209 /* VSHLL, VMOVL */
6210 if (q || (rd & 1)) {
6211 return 1;
6213 tmp = neon_load_reg(rm, 0);
6214 tmp2 = neon_load_reg(rm, 1);
6215 for (pass = 0; pass < 2; pass++) {
6216 if (pass == 1)
6217 tmp = tmp2;
6219 gen_neon_widen(cpu_V0, tmp, size, u);
6221 if (shift != 0) {
6222 /* The shift is less than the width of the source
6223 type, so we can just shift the whole register. */
6224 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
6225 /* Widen the result of shift: we need to clear
6226 * the potential overflow bits resulting from
6227 * left bits of the narrow input appearing as
6228 * right bits of left the neighbour narrow
6229 * input. */
6230 if (size < 2 || !u) {
6231 uint64_t imm64;
6232 if (size == 0) {
6233 imm = (0xffu >> (8 - shift));
6234 imm |= imm << 16;
6235 } else if (size == 1) {
6236 imm = 0xffff >> (16 - shift);
6237 } else {
6238 /* size == 2 */
6239 imm = 0xffffffff >> (32 - shift);
6241 if (size < 2) {
6242 imm64 = imm | (((uint64_t)imm) << 32);
6243 } else {
6244 imm64 = imm;
6246 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
6249 neon_store_reg64(cpu_V0, rd + pass);
6251 } else if (op >= 14) {
6252 /* VCVT fixed-point. */
6253 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6254 return 1;
6256 /* We have already masked out the must-be-1 top bit of imm6,
6257 * hence this 32-shift where the ARM ARM has 64-imm6.
6259 shift = 32 - shift;
6260 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6261 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
6262 if (!(op & 1)) {
6263 if (u)
6264 gen_vfp_ulto(0, shift, 1);
6265 else
6266 gen_vfp_slto(0, shift, 1);
6267 } else {
6268 if (u)
6269 gen_vfp_toul(0, shift, 1);
6270 else
6271 gen_vfp_tosl(0, shift, 1);
6273 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
6275 } else {
6276 return 1;
6278 } else { /* (insn & 0x00380080) == 0 */
6279 int invert;
6280 if (q && (rd & 1)) {
6281 return 1;
6284 op = (insn >> 8) & 0xf;
6285 /* One register and immediate. */
6286 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6287 invert = (insn & (1 << 5)) != 0;
6288 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6289 * We choose to not special-case this and will behave as if a
6290 * valid constant encoding of 0 had been given.
6292 switch (op) {
6293 case 0: case 1:
6294 /* no-op */
6295 break;
6296 case 2: case 3:
6297 imm <<= 8;
6298 break;
6299 case 4: case 5:
6300 imm <<= 16;
6301 break;
6302 case 6: case 7:
6303 imm <<= 24;
6304 break;
6305 case 8: case 9:
6306 imm |= imm << 16;
6307 break;
6308 case 10: case 11:
6309 imm = (imm << 8) | (imm << 24);
6310 break;
6311 case 12:
6312 imm = (imm << 8) | 0xff;
6313 break;
6314 case 13:
6315 imm = (imm << 16) | 0xffff;
6316 break;
6317 case 14:
6318 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6319 if (invert)
6320 imm = ~imm;
6321 break;
6322 case 15:
6323 if (invert) {
6324 return 1;
6326 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6327 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6328 break;
6330 if (invert)
6331 imm = ~imm;
6333 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6334 if (op & 1 && op < 12) {
6335 tmp = neon_load_reg(rd, pass);
6336 if (invert) {
6337 /* The immediate value has already been inverted, so
6338 BIC becomes AND. */
6339 tcg_gen_andi_i32(tmp, tmp, imm);
6340 } else {
6341 tcg_gen_ori_i32(tmp, tmp, imm);
6343 } else {
6344 /* VMOV, VMVN. */
6345 tmp = tcg_temp_new_i32();
6346 if (op == 14 && invert) {
6347 int n;
6348 uint32_t val;
6349 val = 0;
6350 for (n = 0; n < 4; n++) {
6351 if (imm & (1 << (n + (pass & 1) * 4)))
6352 val |= 0xff << (n * 8);
6354 tcg_gen_movi_i32(tmp, val);
6355 } else {
6356 tcg_gen_movi_i32(tmp, imm);
6359 neon_store_reg(rd, pass, tmp);
6362 } else { /* (insn & 0x00800010 == 0x00800000) */
6363 if (size != 3) {
6364 op = (insn >> 8) & 0xf;
6365 if ((insn & (1 << 6)) == 0) {
6366 /* Three registers of different lengths. */
6367 int src1_wide;
6368 int src2_wide;
6369 int prewiden;
6370 /* undefreq: bit 0 : UNDEF if size == 0
6371 * bit 1 : UNDEF if size == 1
6372 * bit 2 : UNDEF if size == 2
6373 * bit 3 : UNDEF if U == 1
6374 * Note that [2:0] set implies 'always UNDEF'
6376 int undefreq;
6377 /* prewiden, src1_wide, src2_wide, undefreq */
6378 static const int neon_3reg_wide[16][4] = {
6379 {1, 0, 0, 0}, /* VADDL */
6380 {1, 1, 0, 0}, /* VADDW */
6381 {1, 0, 0, 0}, /* VSUBL */
6382 {1, 1, 0, 0}, /* VSUBW */
6383 {0, 1, 1, 0}, /* VADDHN */
6384 {0, 0, 0, 0}, /* VABAL */
6385 {0, 1, 1, 0}, /* VSUBHN */
6386 {0, 0, 0, 0}, /* VABDL */
6387 {0, 0, 0, 0}, /* VMLAL */
6388 {0, 0, 0, 9}, /* VQDMLAL */
6389 {0, 0, 0, 0}, /* VMLSL */
6390 {0, 0, 0, 9}, /* VQDMLSL */
6391 {0, 0, 0, 0}, /* Integer VMULL */
6392 {0, 0, 0, 1}, /* VQDMULL */
6393 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6394 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6397 prewiden = neon_3reg_wide[op][0];
6398 src1_wide = neon_3reg_wide[op][1];
6399 src2_wide = neon_3reg_wide[op][2];
6400 undefreq = neon_3reg_wide[op][3];
6402 if ((undefreq & (1 << size)) ||
6403 ((undefreq & 8) && u)) {
6404 return 1;
6406 if ((src1_wide && (rn & 1)) ||
6407 (src2_wide && (rm & 1)) ||
6408 (!src2_wide && (rd & 1))) {
6409 return 1;
6412 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6413 * outside the loop below as it only performs a single pass.
6415 if (op == 14 && size == 2) {
6416 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6418 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
6419 return 1;
6421 tcg_rn = tcg_temp_new_i64();
6422 tcg_rm = tcg_temp_new_i64();
6423 tcg_rd = tcg_temp_new_i64();
6424 neon_load_reg64(tcg_rn, rn);
6425 neon_load_reg64(tcg_rm, rm);
6426 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6427 neon_store_reg64(tcg_rd, rd);
6428 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6429 neon_store_reg64(tcg_rd, rd + 1);
6430 tcg_temp_free_i64(tcg_rn);
6431 tcg_temp_free_i64(tcg_rm);
6432 tcg_temp_free_i64(tcg_rd);
6433 return 0;
6436 /* Avoid overlapping operands. Wide source operands are
6437 always aligned so will never overlap with wide
6438 destinations in problematic ways. */
6439 if (rd == rm && !src2_wide) {
6440 tmp = neon_load_reg(rm, 1);
6441 neon_store_scratch(2, tmp);
6442 } else if (rd == rn && !src1_wide) {
6443 tmp = neon_load_reg(rn, 1);
6444 neon_store_scratch(2, tmp);
6446 TCGV_UNUSED_I32(tmp3);
6447 for (pass = 0; pass < 2; pass++) {
6448 if (src1_wide) {
6449 neon_load_reg64(cpu_V0, rn + pass);
6450 TCGV_UNUSED_I32(tmp);
6451 } else {
6452 if (pass == 1 && rd == rn) {
6453 tmp = neon_load_scratch(2);
6454 } else {
6455 tmp = neon_load_reg(rn, pass);
6457 if (prewiden) {
6458 gen_neon_widen(cpu_V0, tmp, size, u);
6461 if (src2_wide) {
6462 neon_load_reg64(cpu_V1, rm + pass);
6463 TCGV_UNUSED_I32(tmp2);
6464 } else {
6465 if (pass == 1 && rd == rm) {
6466 tmp2 = neon_load_scratch(2);
6467 } else {
6468 tmp2 = neon_load_reg(rm, pass);
6470 if (prewiden) {
6471 gen_neon_widen(cpu_V1, tmp2, size, u);
6474 switch (op) {
6475 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6476 gen_neon_addl(size);
6477 break;
6478 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6479 gen_neon_subl(size);
6480 break;
6481 case 5: case 7: /* VABAL, VABDL */
6482 switch ((size << 1) | u) {
6483 case 0:
6484 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6485 break;
6486 case 1:
6487 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6488 break;
6489 case 2:
6490 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6491 break;
6492 case 3:
6493 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6494 break;
6495 case 4:
6496 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6497 break;
6498 case 5:
6499 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6500 break;
6501 default: abort();
6503 tcg_temp_free_i32(tmp2);
6504 tcg_temp_free_i32(tmp);
6505 break;
6506 case 8: case 9: case 10: case 11: case 12: case 13:
6507 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6508 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6509 break;
6510 case 14: /* Polynomial VMULL */
6511 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6512 tcg_temp_free_i32(tmp2);
6513 tcg_temp_free_i32(tmp);
6514 break;
6515 default: /* 15 is RESERVED: caught earlier */
6516 abort();
6518 if (op == 13) {
6519 /* VQDMULL */
6520 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6521 neon_store_reg64(cpu_V0, rd + pass);
6522 } else if (op == 5 || (op >= 8 && op <= 11)) {
6523 /* Accumulate. */
6524 neon_load_reg64(cpu_V1, rd + pass);
6525 switch (op) {
6526 case 10: /* VMLSL */
6527 gen_neon_negl(cpu_V0, size);
6528 /* Fall through */
6529 case 5: case 8: /* VABAL, VMLAL */
6530 gen_neon_addl(size);
6531 break;
6532 case 9: case 11: /* VQDMLAL, VQDMLSL */
6533 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6534 if (op == 11) {
6535 gen_neon_negl(cpu_V0, size);
6537 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6538 break;
6539 default:
6540 abort();
6542 neon_store_reg64(cpu_V0, rd + pass);
6543 } else if (op == 4 || op == 6) {
6544 /* Narrowing operation. */
6545 tmp = tcg_temp_new_i32();
6546 if (!u) {
6547 switch (size) {
6548 case 0:
6549 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6550 break;
6551 case 1:
6552 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6553 break;
6554 case 2:
6555 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6556 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6557 break;
6558 default: abort();
6560 } else {
6561 switch (size) {
6562 case 0:
6563 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6564 break;
6565 case 1:
6566 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6567 break;
6568 case 2:
6569 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6570 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6571 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6572 break;
6573 default: abort();
6576 if (pass == 0) {
6577 tmp3 = tmp;
6578 } else {
6579 neon_store_reg(rd, 0, tmp3);
6580 neon_store_reg(rd, 1, tmp);
6582 } else {
6583 /* Write back the result. */
6584 neon_store_reg64(cpu_V0, rd + pass);
6587 } else {
6588 /* Two registers and a scalar. NB that for ops of this form
6589 * the ARM ARM labels bit 24 as Q, but it is in our variable
6590 * 'u', not 'q'.
6592 if (size == 0) {
6593 return 1;
6595 switch (op) {
6596 case 1: /* Float VMLA scalar */
6597 case 5: /* Floating point VMLS scalar */
6598 case 9: /* Floating point VMUL scalar */
6599 if (size == 1) {
6600 return 1;
6602 /* fall through */
6603 case 0: /* Integer VMLA scalar */
6604 case 4: /* Integer VMLS scalar */
6605 case 8: /* Integer VMUL scalar */
6606 case 12: /* VQDMULH scalar */
6607 case 13: /* VQRDMULH scalar */
6608 if (u && ((rd | rn) & 1)) {
6609 return 1;
6611 tmp = neon_get_scalar(size, rm);
6612 neon_store_scratch(0, tmp);
6613 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6614 tmp = neon_load_scratch(0);
6615 tmp2 = neon_load_reg(rn, pass);
6616 if (op == 12) {
6617 if (size == 1) {
6618 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6619 } else {
6620 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6622 } else if (op == 13) {
6623 if (size == 1) {
6624 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6625 } else {
6626 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6628 } else if (op & 1) {
6629 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6630 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6631 tcg_temp_free_ptr(fpstatus);
6632 } else {
6633 switch (size) {
6634 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6635 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6636 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6637 default: abort();
6640 tcg_temp_free_i32(tmp2);
6641 if (op < 8) {
6642 /* Accumulate. */
6643 tmp2 = neon_load_reg(rd, pass);
6644 switch (op) {
6645 case 0:
6646 gen_neon_add(size, tmp, tmp2);
6647 break;
6648 case 1:
6650 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6651 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6652 tcg_temp_free_ptr(fpstatus);
6653 break;
6655 case 4:
6656 gen_neon_rsb(size, tmp, tmp2);
6657 break;
6658 case 5:
6660 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6661 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6662 tcg_temp_free_ptr(fpstatus);
6663 break;
6665 default:
6666 abort();
6668 tcg_temp_free_i32(tmp2);
6670 neon_store_reg(rd, pass, tmp);
6672 break;
6673 case 3: /* VQDMLAL scalar */
6674 case 7: /* VQDMLSL scalar */
6675 case 11: /* VQDMULL scalar */
6676 if (u == 1) {
6677 return 1;
6679 /* fall through */
6680 case 2: /* VMLAL sclar */
6681 case 6: /* VMLSL scalar */
6682 case 10: /* VMULL scalar */
6683 if (rd & 1) {
6684 return 1;
6686 tmp2 = neon_get_scalar(size, rm);
6687 /* We need a copy of tmp2 because gen_neon_mull
6688 * deletes it during pass 0. */
6689 tmp4 = tcg_temp_new_i32();
6690 tcg_gen_mov_i32(tmp4, tmp2);
6691 tmp3 = neon_load_reg(rn, 1);
6693 for (pass = 0; pass < 2; pass++) {
6694 if (pass == 0) {
6695 tmp = neon_load_reg(rn, 0);
6696 } else {
6697 tmp = tmp3;
6698 tmp2 = tmp4;
6700 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6701 if (op != 11) {
6702 neon_load_reg64(cpu_V1, rd + pass);
6704 switch (op) {
6705 case 6:
6706 gen_neon_negl(cpu_V0, size);
6707 /* Fall through */
6708 case 2:
6709 gen_neon_addl(size);
6710 break;
6711 case 3: case 7:
6712 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6713 if (op == 7) {
6714 gen_neon_negl(cpu_V0, size);
6716 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6717 break;
6718 case 10:
6719 /* no-op */
6720 break;
6721 case 11:
6722 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6723 break;
6724 default:
6725 abort();
6727 neon_store_reg64(cpu_V0, rd + pass);
6731 break;
6732 default: /* 14 and 15 are RESERVED */
6733 return 1;
6736 } else { /* size == 3 */
6737 if (!u) {
6738 /* Extract. */
6739 imm = (insn >> 8) & 0xf;
6741 if (imm > 7 && !q)
6742 return 1;
6744 if (q && ((rd | rn | rm) & 1)) {
6745 return 1;
6748 if (imm == 0) {
6749 neon_load_reg64(cpu_V0, rn);
6750 if (q) {
6751 neon_load_reg64(cpu_V1, rn + 1);
6753 } else if (imm == 8) {
6754 neon_load_reg64(cpu_V0, rn + 1);
6755 if (q) {
6756 neon_load_reg64(cpu_V1, rm);
6758 } else if (q) {
6759 tmp64 = tcg_temp_new_i64();
6760 if (imm < 8) {
6761 neon_load_reg64(cpu_V0, rn);
6762 neon_load_reg64(tmp64, rn + 1);
6763 } else {
6764 neon_load_reg64(cpu_V0, rn + 1);
6765 neon_load_reg64(tmp64, rm);
6767 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6768 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6769 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6770 if (imm < 8) {
6771 neon_load_reg64(cpu_V1, rm);
6772 } else {
6773 neon_load_reg64(cpu_V1, rm + 1);
6774 imm -= 8;
6776 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6777 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6778 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6779 tcg_temp_free_i64(tmp64);
6780 } else {
6781 /* BUGFIX */
6782 neon_load_reg64(cpu_V0, rn);
6783 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6784 neon_load_reg64(cpu_V1, rm);
6785 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6786 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6788 neon_store_reg64(cpu_V0, rd);
6789 if (q) {
6790 neon_store_reg64(cpu_V1, rd + 1);
6792 } else if ((insn & (1 << 11)) == 0) {
6793 /* Two register misc. */
6794 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6795 size = (insn >> 18) & 3;
6796 /* UNDEF for unknown op values and bad op-size combinations */
6797 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6798 return 1;
6800 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6801 q && ((rm | rd) & 1)) {
6802 return 1;
6804 switch (op) {
6805 case NEON_2RM_VREV64:
6806 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6807 tmp = neon_load_reg(rm, pass * 2);
6808 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6809 switch (size) {
6810 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6811 case 1: gen_swap_half(tmp); break;
6812 case 2: /* no-op */ break;
6813 default: abort();
6815 neon_store_reg(rd, pass * 2 + 1, tmp);
6816 if (size == 2) {
6817 neon_store_reg(rd, pass * 2, tmp2);
6818 } else {
6819 switch (size) {
6820 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6821 case 1: gen_swap_half(tmp2); break;
6822 default: abort();
6824 neon_store_reg(rd, pass * 2, tmp2);
6827 break;
6828 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6829 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6830 for (pass = 0; pass < q + 1; pass++) {
6831 tmp = neon_load_reg(rm, pass * 2);
6832 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6833 tmp = neon_load_reg(rm, pass * 2 + 1);
6834 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6835 switch (size) {
6836 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6837 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6838 case 2: tcg_gen_add_i64(CPU_V001); break;
6839 default: abort();
6841 if (op >= NEON_2RM_VPADAL) {
6842 /* Accumulate. */
6843 neon_load_reg64(cpu_V1, rd + pass);
6844 gen_neon_addl(size);
6846 neon_store_reg64(cpu_V0, rd + pass);
6848 break;
6849 case NEON_2RM_VTRN:
6850 if (size == 2) {
6851 int n;
6852 for (n = 0; n < (q ? 4 : 2); n += 2) {
6853 tmp = neon_load_reg(rm, n);
6854 tmp2 = neon_load_reg(rd, n + 1);
6855 neon_store_reg(rm, n, tmp2);
6856 neon_store_reg(rd, n + 1, tmp);
6858 } else {
6859 goto elementwise;
6861 break;
6862 case NEON_2RM_VUZP:
6863 if (gen_neon_unzip(rd, rm, size, q)) {
6864 return 1;
6866 break;
6867 case NEON_2RM_VZIP:
6868 if (gen_neon_zip(rd, rm, size, q)) {
6869 return 1;
6871 break;
6872 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6873 /* also VQMOVUN; op field and mnemonics don't line up */
6874 if (rm & 1) {
6875 return 1;
6877 TCGV_UNUSED_I32(tmp2);
6878 for (pass = 0; pass < 2; pass++) {
6879 neon_load_reg64(cpu_V0, rm + pass);
6880 tmp = tcg_temp_new_i32();
6881 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6882 tmp, cpu_V0);
6883 if (pass == 0) {
6884 tmp2 = tmp;
6885 } else {
6886 neon_store_reg(rd, 0, tmp2);
6887 neon_store_reg(rd, 1, tmp);
6890 break;
6891 case NEON_2RM_VSHLL:
6892 if (q || (rd & 1)) {
6893 return 1;
6895 tmp = neon_load_reg(rm, 0);
6896 tmp2 = neon_load_reg(rm, 1);
6897 for (pass = 0; pass < 2; pass++) {
6898 if (pass == 1)
6899 tmp = tmp2;
6900 gen_neon_widen(cpu_V0, tmp, size, 1);
6901 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6902 neon_store_reg64(cpu_V0, rd + pass);
6904 break;
6905 case NEON_2RM_VCVT_F16_F32:
6906 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6907 q || (rm & 1)) {
6908 return 1;
6910 tmp = tcg_temp_new_i32();
6911 tmp2 = tcg_temp_new_i32();
6912 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
6913 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6914 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
6915 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6916 tcg_gen_shli_i32(tmp2, tmp2, 16);
6917 tcg_gen_or_i32(tmp2, tmp2, tmp);
6918 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
6919 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6920 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6921 neon_store_reg(rd, 0, tmp2);
6922 tmp2 = tcg_temp_new_i32();
6923 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6924 tcg_gen_shli_i32(tmp2, tmp2, 16);
6925 tcg_gen_or_i32(tmp2, tmp2, tmp);
6926 neon_store_reg(rd, 1, tmp2);
6927 tcg_temp_free_i32(tmp);
6928 break;
6929 case NEON_2RM_VCVT_F32_F16:
6930 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6931 q || (rd & 1)) {
6932 return 1;
6934 tmp3 = tcg_temp_new_i32();
6935 tmp = neon_load_reg(rm, 0);
6936 tmp2 = neon_load_reg(rm, 1);
6937 tcg_gen_ext16u_i32(tmp3, tmp);
6938 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6939 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6940 tcg_gen_shri_i32(tmp3, tmp, 16);
6941 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6942 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
6943 tcg_temp_free_i32(tmp);
6944 tcg_gen_ext16u_i32(tmp3, tmp2);
6945 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6946 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6947 tcg_gen_shri_i32(tmp3, tmp2, 16);
6948 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6949 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
6950 tcg_temp_free_i32(tmp2);
6951 tcg_temp_free_i32(tmp3);
6952 break;
6953 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6954 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
6955 || ((rm | rd) & 1)) {
6956 return 1;
6958 tmp = tcg_const_i32(rd);
6959 tmp2 = tcg_const_i32(rm);
6961 /* Bit 6 is the lowest opcode bit; it distinguishes between
6962 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6964 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6966 if (op == NEON_2RM_AESE) {
6967 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6968 } else {
6969 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6971 tcg_temp_free_i32(tmp);
6972 tcg_temp_free_i32(tmp2);
6973 tcg_temp_free_i32(tmp3);
6974 break;
6975 case NEON_2RM_SHA1H:
6976 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
6977 || ((rm | rd) & 1)) {
6978 return 1;
6980 tmp = tcg_const_i32(rd);
6981 tmp2 = tcg_const_i32(rm);
6983 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6985 tcg_temp_free_i32(tmp);
6986 tcg_temp_free_i32(tmp2);
6987 break;
6988 case NEON_2RM_SHA1SU1:
6989 if ((rm | rd) & 1) {
6990 return 1;
6992 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6993 if (q) {
6994 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
6995 return 1;
6997 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
6998 return 1;
7000 tmp = tcg_const_i32(rd);
7001 tmp2 = tcg_const_i32(rm);
7002 if (q) {
7003 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7004 } else {
7005 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7007 tcg_temp_free_i32(tmp);
7008 tcg_temp_free_i32(tmp2);
7009 break;
7010 default:
7011 elementwise:
7012 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7013 if (neon_2rm_is_float_op(op)) {
7014 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7015 neon_reg_offset(rm, pass));
7016 TCGV_UNUSED_I32(tmp);
7017 } else {
7018 tmp = neon_load_reg(rm, pass);
7020 switch (op) {
7021 case NEON_2RM_VREV32:
7022 switch (size) {
7023 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7024 case 1: gen_swap_half(tmp); break;
7025 default: abort();
7027 break;
7028 case NEON_2RM_VREV16:
7029 gen_rev16(tmp);
7030 break;
7031 case NEON_2RM_VCLS:
7032 switch (size) {
7033 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7034 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7035 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
7036 default: abort();
7038 break;
7039 case NEON_2RM_VCLZ:
7040 switch (size) {
7041 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7042 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7043 case 2: gen_helper_clz(tmp, tmp); break;
7044 default: abort();
7046 break;
7047 case NEON_2RM_VCNT:
7048 gen_helper_neon_cnt_u8(tmp, tmp);
7049 break;
7050 case NEON_2RM_VMVN:
7051 tcg_gen_not_i32(tmp, tmp);
7052 break;
7053 case NEON_2RM_VQABS:
7054 switch (size) {
7055 case 0:
7056 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7057 break;
7058 case 1:
7059 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7060 break;
7061 case 2:
7062 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7063 break;
7064 default: abort();
7066 break;
7067 case NEON_2RM_VQNEG:
7068 switch (size) {
7069 case 0:
7070 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7071 break;
7072 case 1:
7073 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7074 break;
7075 case 2:
7076 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7077 break;
7078 default: abort();
7080 break;
7081 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
7082 tmp2 = tcg_const_i32(0);
7083 switch(size) {
7084 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7085 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7086 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
7087 default: abort();
7089 tcg_temp_free_i32(tmp2);
7090 if (op == NEON_2RM_VCLE0) {
7091 tcg_gen_not_i32(tmp, tmp);
7093 break;
7094 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
7095 tmp2 = tcg_const_i32(0);
7096 switch(size) {
7097 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7098 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7099 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
7100 default: abort();
7102 tcg_temp_free_i32(tmp2);
7103 if (op == NEON_2RM_VCLT0) {
7104 tcg_gen_not_i32(tmp, tmp);
7106 break;
7107 case NEON_2RM_VCEQ0:
7108 tmp2 = tcg_const_i32(0);
7109 switch(size) {
7110 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7111 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7112 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
7113 default: abort();
7115 tcg_temp_free_i32(tmp2);
7116 break;
7117 case NEON_2RM_VABS:
7118 switch(size) {
7119 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7120 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7121 case 2: tcg_gen_abs_i32(tmp, tmp); break;
7122 default: abort();
7124 break;
7125 case NEON_2RM_VNEG:
7126 tmp2 = tcg_const_i32(0);
7127 gen_neon_rsb(size, tmp, tmp2);
7128 tcg_temp_free_i32(tmp2);
7129 break;
7130 case NEON_2RM_VCGT0_F:
7132 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7133 tmp2 = tcg_const_i32(0);
7134 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
7135 tcg_temp_free_i32(tmp2);
7136 tcg_temp_free_ptr(fpstatus);
7137 break;
7139 case NEON_2RM_VCGE0_F:
7141 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7142 tmp2 = tcg_const_i32(0);
7143 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
7144 tcg_temp_free_i32(tmp2);
7145 tcg_temp_free_ptr(fpstatus);
7146 break;
7148 case NEON_2RM_VCEQ0_F:
7150 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7151 tmp2 = tcg_const_i32(0);
7152 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
7153 tcg_temp_free_i32(tmp2);
7154 tcg_temp_free_ptr(fpstatus);
7155 break;
7157 case NEON_2RM_VCLE0_F:
7159 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7160 tmp2 = tcg_const_i32(0);
7161 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
7162 tcg_temp_free_i32(tmp2);
7163 tcg_temp_free_ptr(fpstatus);
7164 break;
7166 case NEON_2RM_VCLT0_F:
7168 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7169 tmp2 = tcg_const_i32(0);
7170 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
7171 tcg_temp_free_i32(tmp2);
7172 tcg_temp_free_ptr(fpstatus);
7173 break;
7175 case NEON_2RM_VABS_F:
7176 gen_vfp_abs(0);
7177 break;
7178 case NEON_2RM_VNEG_F:
7179 gen_vfp_neg(0);
7180 break;
7181 case NEON_2RM_VSWP:
7182 tmp2 = neon_load_reg(rd, pass);
7183 neon_store_reg(rm, pass, tmp2);
7184 break;
7185 case NEON_2RM_VTRN:
7186 tmp2 = neon_load_reg(rd, pass);
7187 switch (size) {
7188 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7189 case 1: gen_neon_trn_u16(tmp, tmp2); break;
7190 default: abort();
7192 neon_store_reg(rm, pass, tmp2);
7193 break;
7194 case NEON_2RM_VRINTN:
7195 case NEON_2RM_VRINTA:
7196 case NEON_2RM_VRINTM:
7197 case NEON_2RM_VRINTP:
7198 case NEON_2RM_VRINTZ:
7200 TCGv_i32 tcg_rmode;
7201 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7202 int rmode;
7204 if (op == NEON_2RM_VRINTZ) {
7205 rmode = FPROUNDING_ZERO;
7206 } else {
7207 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7210 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7211 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7212 cpu_env);
7213 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7214 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7215 cpu_env);
7216 tcg_temp_free_ptr(fpstatus);
7217 tcg_temp_free_i32(tcg_rmode);
7218 break;
7220 case NEON_2RM_VRINTX:
7222 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7223 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7224 tcg_temp_free_ptr(fpstatus);
7225 break;
7227 case NEON_2RM_VCVTAU:
7228 case NEON_2RM_VCVTAS:
7229 case NEON_2RM_VCVTNU:
7230 case NEON_2RM_VCVTNS:
7231 case NEON_2RM_VCVTPU:
7232 case NEON_2RM_VCVTPS:
7233 case NEON_2RM_VCVTMU:
7234 case NEON_2RM_VCVTMS:
7236 bool is_signed = !extract32(insn, 7, 1);
7237 TCGv_ptr fpst = get_fpstatus_ptr(1);
7238 TCGv_i32 tcg_rmode, tcg_shift;
7239 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7241 tcg_shift = tcg_const_i32(0);
7242 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7243 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7244 cpu_env);
7246 if (is_signed) {
7247 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7248 tcg_shift, fpst);
7249 } else {
7250 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7251 tcg_shift, fpst);
7254 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7255 cpu_env);
7256 tcg_temp_free_i32(tcg_rmode);
7257 tcg_temp_free_i32(tcg_shift);
7258 tcg_temp_free_ptr(fpst);
7259 break;
7261 case NEON_2RM_VRECPE:
7263 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7264 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7265 tcg_temp_free_ptr(fpstatus);
7266 break;
7268 case NEON_2RM_VRSQRTE:
7270 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7271 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7272 tcg_temp_free_ptr(fpstatus);
7273 break;
7275 case NEON_2RM_VRECPE_F:
7277 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7278 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7279 tcg_temp_free_ptr(fpstatus);
7280 break;
7282 case NEON_2RM_VRSQRTE_F:
7284 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7285 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7286 tcg_temp_free_ptr(fpstatus);
7287 break;
7289 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
7290 gen_vfp_sito(0, 1);
7291 break;
7292 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
7293 gen_vfp_uito(0, 1);
7294 break;
7295 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
7296 gen_vfp_tosiz(0, 1);
7297 break;
7298 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
7299 gen_vfp_touiz(0, 1);
7300 break;
7301 default:
7302 /* Reserved op values were caught by the
7303 * neon_2rm_sizes[] check earlier.
7305 abort();
7307 if (neon_2rm_is_float_op(op)) {
7308 tcg_gen_st_f32(cpu_F0s, cpu_env,
7309 neon_reg_offset(rd, pass));
7310 } else {
7311 neon_store_reg(rd, pass, tmp);
7314 break;
7316 } else if ((insn & (1 << 10)) == 0) {
7317 /* VTBL, VTBX. */
7318 int n = ((insn >> 8) & 3) + 1;
7319 if ((rn + n) > 32) {
7320 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7321 * helper function running off the end of the register file.
7323 return 1;
7325 n <<= 3;
7326 if (insn & (1 << 6)) {
7327 tmp = neon_load_reg(rd, 0);
7328 } else {
7329 tmp = tcg_temp_new_i32();
7330 tcg_gen_movi_i32(tmp, 0);
7332 tmp2 = neon_load_reg(rm, 0);
7333 tmp4 = tcg_const_i32(rn);
7334 tmp5 = tcg_const_i32(n);
7335 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7336 tcg_temp_free_i32(tmp);
7337 if (insn & (1 << 6)) {
7338 tmp = neon_load_reg(rd, 1);
7339 } else {
7340 tmp = tcg_temp_new_i32();
7341 tcg_gen_movi_i32(tmp, 0);
7343 tmp3 = neon_load_reg(rm, 1);
7344 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
7345 tcg_temp_free_i32(tmp5);
7346 tcg_temp_free_i32(tmp4);
7347 neon_store_reg(rd, 0, tmp2);
7348 neon_store_reg(rd, 1, tmp3);
7349 tcg_temp_free_i32(tmp);
7350 } else if ((insn & 0x380) == 0) {
7351 /* VDUP */
7352 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7353 return 1;
7355 if (insn & (1 << 19)) {
7356 tmp = neon_load_reg(rm, 1);
7357 } else {
7358 tmp = neon_load_reg(rm, 0);
7360 if (insn & (1 << 16)) {
7361 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
7362 } else if (insn & (1 << 17)) {
7363 if ((insn >> 18) & 1)
7364 gen_neon_dup_high16(tmp);
7365 else
7366 gen_neon_dup_low16(tmp);
7368 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7369 tmp2 = tcg_temp_new_i32();
7370 tcg_gen_mov_i32(tmp2, tmp);
7371 neon_store_reg(rd, pass, tmp2);
7373 tcg_temp_free_i32(tmp);
7374 } else {
7375 return 1;
7379 return 0;
7382 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7384 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7385 const ARMCPRegInfo *ri;
7387 cpnum = (insn >> 8) & 0xf;
7389 /* First check for coprocessor space used for XScale/iwMMXt insns */
7390 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7391 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7392 return 1;
7394 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7395 return disas_iwmmxt_insn(s, insn);
7396 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7397 return disas_dsp_insn(s, insn);
7399 return 1;
7402 /* Otherwise treat as a generic register access */
7403 is64 = (insn & (1 << 25)) == 0;
7404 if (!is64 && ((insn & (1 << 4)) == 0)) {
7405 /* cdp */
7406 return 1;
7409 crm = insn & 0xf;
7410 if (is64) {
7411 crn = 0;
7412 opc1 = (insn >> 4) & 0xf;
7413 opc2 = 0;
7414 rt2 = (insn >> 16) & 0xf;
7415 } else {
7416 crn = (insn >> 16) & 0xf;
7417 opc1 = (insn >> 21) & 7;
7418 opc2 = (insn >> 5) & 7;
7419 rt2 = 0;
7421 isread = (insn >> 20) & 1;
7422 rt = (insn >> 12) & 0xf;
7424 ri = get_arm_cp_reginfo(s->cp_regs,
7425 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7426 if (ri) {
7427 /* Check access permissions */
7428 if (!cp_access_ok(s->current_el, ri, isread)) {
7429 return 1;
7432 if (ri->accessfn ||
7433 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7434 /* Emit code to perform further access permissions checks at
7435 * runtime; this may result in an exception.
7436 * Note that on XScale all cp0..c13 registers do an access check
7437 * call in order to handle c15_cpar.
7439 TCGv_ptr tmpptr;
7440 TCGv_i32 tcg_syn, tcg_isread;
7441 uint32_t syndrome;
7443 /* Note that since we are an implementation which takes an
7444 * exception on a trapped conditional instruction only if the
7445 * instruction passes its condition code check, we can take
7446 * advantage of the clause in the ARM ARM that allows us to set
7447 * the COND field in the instruction to 0xE in all cases.
7448 * We could fish the actual condition out of the insn (ARM)
7449 * or the condexec bits (Thumb) but it isn't necessary.
7451 switch (cpnum) {
7452 case 14:
7453 if (is64) {
7454 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7455 isread, false);
7456 } else {
7457 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7458 rt, isread, false);
7460 break;
7461 case 15:
7462 if (is64) {
7463 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7464 isread, false);
7465 } else {
7466 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7467 rt, isread, false);
7469 break;
7470 default:
7471 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7472 * so this can only happen if this is an ARMv7 or earlier CPU,
7473 * in which case the syndrome information won't actually be
7474 * guest visible.
7476 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7477 syndrome = syn_uncategorized();
7478 break;
7481 gen_set_condexec(s);
7482 gen_set_pc_im(s, s->pc - 4);
7483 tmpptr = tcg_const_ptr(ri);
7484 tcg_syn = tcg_const_i32(syndrome);
7485 tcg_isread = tcg_const_i32(isread);
7486 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7487 tcg_isread);
7488 tcg_temp_free_ptr(tmpptr);
7489 tcg_temp_free_i32(tcg_syn);
7490 tcg_temp_free_i32(tcg_isread);
7493 /* Handle special cases first */
7494 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7495 case ARM_CP_NOP:
7496 return 0;
7497 case ARM_CP_WFI:
7498 if (isread) {
7499 return 1;
7501 gen_set_pc_im(s, s->pc);
7502 s->is_jmp = DISAS_WFI;
7503 return 0;
7504 default:
7505 break;
7508 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7509 gen_io_start();
7512 if (isread) {
7513 /* Read */
7514 if (is64) {
7515 TCGv_i64 tmp64;
7516 TCGv_i32 tmp;
7517 if (ri->type & ARM_CP_CONST) {
7518 tmp64 = tcg_const_i64(ri->resetvalue);
7519 } else if (ri->readfn) {
7520 TCGv_ptr tmpptr;
7521 tmp64 = tcg_temp_new_i64();
7522 tmpptr = tcg_const_ptr(ri);
7523 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7524 tcg_temp_free_ptr(tmpptr);
7525 } else {
7526 tmp64 = tcg_temp_new_i64();
7527 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7529 tmp = tcg_temp_new_i32();
7530 tcg_gen_extrl_i64_i32(tmp, tmp64);
7531 store_reg(s, rt, tmp);
7532 tcg_gen_shri_i64(tmp64, tmp64, 32);
7533 tmp = tcg_temp_new_i32();
7534 tcg_gen_extrl_i64_i32(tmp, tmp64);
7535 tcg_temp_free_i64(tmp64);
7536 store_reg(s, rt2, tmp);
7537 } else {
7538 TCGv_i32 tmp;
7539 if (ri->type & ARM_CP_CONST) {
7540 tmp = tcg_const_i32(ri->resetvalue);
7541 } else if (ri->readfn) {
7542 TCGv_ptr tmpptr;
7543 tmp = tcg_temp_new_i32();
7544 tmpptr = tcg_const_ptr(ri);
7545 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7546 tcg_temp_free_ptr(tmpptr);
7547 } else {
7548 tmp = load_cpu_offset(ri->fieldoffset);
7550 if (rt == 15) {
7551 /* Destination register of r15 for 32 bit loads sets
7552 * the condition codes from the high 4 bits of the value
7554 gen_set_nzcv(tmp);
7555 tcg_temp_free_i32(tmp);
7556 } else {
7557 store_reg(s, rt, tmp);
7560 } else {
7561 /* Write */
7562 if (ri->type & ARM_CP_CONST) {
7563 /* If not forbidden by access permissions, treat as WI */
7564 return 0;
7567 if (is64) {
7568 TCGv_i32 tmplo, tmphi;
7569 TCGv_i64 tmp64 = tcg_temp_new_i64();
7570 tmplo = load_reg(s, rt);
7571 tmphi = load_reg(s, rt2);
7572 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7573 tcg_temp_free_i32(tmplo);
7574 tcg_temp_free_i32(tmphi);
7575 if (ri->writefn) {
7576 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7577 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7578 tcg_temp_free_ptr(tmpptr);
7579 } else {
7580 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7582 tcg_temp_free_i64(tmp64);
7583 } else {
7584 if (ri->writefn) {
7585 TCGv_i32 tmp;
7586 TCGv_ptr tmpptr;
7587 tmp = load_reg(s, rt);
7588 tmpptr = tcg_const_ptr(ri);
7589 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7590 tcg_temp_free_ptr(tmpptr);
7591 tcg_temp_free_i32(tmp);
7592 } else {
7593 TCGv_i32 tmp = load_reg(s, rt);
7594 store_cpu_offset(tmp, ri->fieldoffset);
7599 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7600 /* I/O operations must end the TB here (whether read or write) */
7601 gen_io_end();
7602 gen_lookup_tb(s);
7603 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7604 /* We default to ending the TB on a coprocessor register write,
7605 * but allow this to be suppressed by the register definition
7606 * (usually only necessary to work around guest bugs).
7608 gen_lookup_tb(s);
7611 return 0;
7614 /* Unknown register; this might be a guest error or a QEMU
7615 * unimplemented feature.
7617 if (is64) {
7618 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7619 "64 bit system register cp:%d opc1: %d crm:%d "
7620 "(%s)\n",
7621 isread ? "read" : "write", cpnum, opc1, crm,
7622 s->ns ? "non-secure" : "secure");
7623 } else {
7624 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7625 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7626 "(%s)\n",
7627 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7628 s->ns ? "non-secure" : "secure");
7631 return 1;
7635 /* Store a 64-bit value to a register pair. Clobbers val. */
7636 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7638 TCGv_i32 tmp;
7639 tmp = tcg_temp_new_i32();
7640 tcg_gen_extrl_i64_i32(tmp, val);
7641 store_reg(s, rlow, tmp);
7642 tmp = tcg_temp_new_i32();
7643 tcg_gen_shri_i64(val, val, 32);
7644 tcg_gen_extrl_i64_i32(tmp, val);
7645 store_reg(s, rhigh, tmp);
7648 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7649 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7651 TCGv_i64 tmp;
7652 TCGv_i32 tmp2;
7654 /* Load value and extend to 64 bits. */
7655 tmp = tcg_temp_new_i64();
7656 tmp2 = load_reg(s, rlow);
7657 tcg_gen_extu_i32_i64(tmp, tmp2);
7658 tcg_temp_free_i32(tmp2);
7659 tcg_gen_add_i64(val, val, tmp);
7660 tcg_temp_free_i64(tmp);
7663 /* load and add a 64-bit value from a register pair. */
7664 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7666 TCGv_i64 tmp;
7667 TCGv_i32 tmpl;
7668 TCGv_i32 tmph;
7670 /* Load 64-bit value rd:rn. */
7671 tmpl = load_reg(s, rlow);
7672 tmph = load_reg(s, rhigh);
7673 tmp = tcg_temp_new_i64();
7674 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7675 tcg_temp_free_i32(tmpl);
7676 tcg_temp_free_i32(tmph);
7677 tcg_gen_add_i64(val, val, tmp);
7678 tcg_temp_free_i64(tmp);
7681 /* Set N and Z flags from hi|lo. */
7682 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7684 tcg_gen_mov_i32(cpu_NF, hi);
7685 tcg_gen_or_i32(cpu_ZF, lo, hi);
7688 /* Load/Store exclusive instructions are implemented by remembering
7689 the value/address loaded, and seeing if these are the same
7690 when the store is performed. This should be sufficient to implement
7691 the architecturally mandated semantics, and avoids having to monitor
7692 regular stores.
7694 In system emulation mode only one CPU will be running at once, so
7695 this sequence is effectively atomic. In user emulation mode we
7696 throw an exception and handle the atomic operation elsewhere. */
7697 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7698 TCGv_i32 addr, int size)
7700 TCGv_i32 tmp = tcg_temp_new_i32();
7702 s->is_ldex = true;
7704 switch (size) {
7705 case 0:
7706 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
7707 break;
7708 case 1:
7709 gen_aa32_ld16ua(s, tmp, addr, get_mem_index(s));
7710 break;
7711 case 2:
7712 case 3:
7713 gen_aa32_ld32ua(s, tmp, addr, get_mem_index(s));
7714 break;
7715 default:
7716 abort();
7719 if (size == 3) {
7720 TCGv_i32 tmp2 = tcg_temp_new_i32();
7721 TCGv_i32 tmp3 = tcg_temp_new_i32();
7723 tcg_gen_addi_i32(tmp2, addr, 4);
7724 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7725 tcg_temp_free_i32(tmp2);
7726 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7727 store_reg(s, rt2, tmp3);
7728 } else {
7729 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7732 store_reg(s, rt, tmp);
7733 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7736 static void gen_clrex(DisasContext *s)
7738 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7741 #ifdef CONFIG_USER_ONLY
7742 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7743 TCGv_i32 addr, int size)
7745 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
7746 tcg_gen_movi_i32(cpu_exclusive_info,
7747 size | (rd << 4) | (rt << 8) | (rt2 << 12));
7748 gen_exception_internal_insn(s, 4, EXCP_STREX);
7750 #else
7751 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7752 TCGv_i32 addr, int size)
7754 TCGv_i32 tmp;
7755 TCGv_i64 val64, extaddr;
7756 TCGLabel *done_label;
7757 TCGLabel *fail_label;
7759 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7760 [addr] = {Rt};
7761 {Rd} = 0;
7762 } else {
7763 {Rd} = 1;
7764 } */
7765 fail_label = gen_new_label();
7766 done_label = gen_new_label();
7767 extaddr = tcg_temp_new_i64();
7768 tcg_gen_extu_i32_i64(extaddr, addr);
7769 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7770 tcg_temp_free_i64(extaddr);
7772 tmp = tcg_temp_new_i32();
7773 switch (size) {
7774 case 0:
7775 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
7776 break;
7777 case 1:
7778 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
7779 break;
7780 case 2:
7781 case 3:
7782 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
7783 break;
7784 default:
7785 abort();
7788 val64 = tcg_temp_new_i64();
7789 if (size == 3) {
7790 TCGv_i32 tmp2 = tcg_temp_new_i32();
7791 TCGv_i32 tmp3 = tcg_temp_new_i32();
7792 tcg_gen_addi_i32(tmp2, addr, 4);
7793 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7794 tcg_temp_free_i32(tmp2);
7795 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7796 tcg_temp_free_i32(tmp3);
7797 } else {
7798 tcg_gen_extu_i32_i64(val64, tmp);
7800 tcg_temp_free_i32(tmp);
7802 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7803 tcg_temp_free_i64(val64);
7805 tmp = load_reg(s, rt);
7806 switch (size) {
7807 case 0:
7808 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
7809 break;
7810 case 1:
7811 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
7812 break;
7813 case 2:
7814 case 3:
7815 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7816 break;
7817 default:
7818 abort();
7820 tcg_temp_free_i32(tmp);
7821 if (size == 3) {
7822 tcg_gen_addi_i32(addr, addr, 4);
7823 tmp = load_reg(s, rt2);
7824 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7825 tcg_temp_free_i32(tmp);
7827 tcg_gen_movi_i32(cpu_R[rd], 0);
7828 tcg_gen_br(done_label);
7829 gen_set_label(fail_label);
7830 tcg_gen_movi_i32(cpu_R[rd], 1);
7831 gen_set_label(done_label);
7832 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7834 #endif
7836 /* gen_srs:
7837 * @env: CPUARMState
7838 * @s: DisasContext
7839 * @mode: mode field from insn (which stack to store to)
7840 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7841 * @writeback: true if writeback bit set
7843 * Generate code for the SRS (Store Return State) insn.
7845 static void gen_srs(DisasContext *s,
7846 uint32_t mode, uint32_t amode, bool writeback)
7848 int32_t offset;
7849 TCGv_i32 addr, tmp;
7850 bool undef = false;
7852 /* SRS is:
7853 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7854 * and specified mode is monitor mode
7855 * - UNDEFINED in Hyp mode
7856 * - UNPREDICTABLE in User or System mode
7857 * - UNPREDICTABLE if the specified mode is:
7858 * -- not implemented
7859 * -- not a valid mode number
7860 * -- a mode that's at a higher exception level
7861 * -- Monitor, if we are Non-secure
7862 * For the UNPREDICTABLE cases we choose to UNDEF.
7864 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
7865 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7866 return;
7869 if (s->current_el == 0 || s->current_el == 2) {
7870 undef = true;
7873 switch (mode) {
7874 case ARM_CPU_MODE_USR:
7875 case ARM_CPU_MODE_FIQ:
7876 case ARM_CPU_MODE_IRQ:
7877 case ARM_CPU_MODE_SVC:
7878 case ARM_CPU_MODE_ABT:
7879 case ARM_CPU_MODE_UND:
7880 case ARM_CPU_MODE_SYS:
7881 break;
7882 case ARM_CPU_MODE_HYP:
7883 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7884 undef = true;
7886 break;
7887 case ARM_CPU_MODE_MON:
7888 /* No need to check specifically for "are we non-secure" because
7889 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7890 * so if this isn't EL3 then we must be non-secure.
7892 if (s->current_el != 3) {
7893 undef = true;
7895 break;
7896 default:
7897 undef = true;
7900 if (undef) {
7901 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7902 default_exception_el(s));
7903 return;
7906 addr = tcg_temp_new_i32();
7907 tmp = tcg_const_i32(mode);
7908 /* get_r13_banked() will raise an exception if called from System mode */
7909 gen_set_condexec(s);
7910 gen_set_pc_im(s, s->pc - 4);
7911 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7912 tcg_temp_free_i32(tmp);
7913 switch (amode) {
7914 case 0: /* DA */
7915 offset = -4;
7916 break;
7917 case 1: /* IA */
7918 offset = 0;
7919 break;
7920 case 2: /* DB */
7921 offset = -8;
7922 break;
7923 case 3: /* IB */
7924 offset = 4;
7925 break;
7926 default:
7927 abort();
7929 tcg_gen_addi_i32(addr, addr, offset);
7930 tmp = load_reg(s, 14);
7931 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7932 tcg_temp_free_i32(tmp);
7933 tmp = load_cpu_field(spsr);
7934 tcg_gen_addi_i32(addr, addr, 4);
7935 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7936 tcg_temp_free_i32(tmp);
7937 if (writeback) {
7938 switch (amode) {
7939 case 0:
7940 offset = -8;
7941 break;
7942 case 1:
7943 offset = 4;
7944 break;
7945 case 2:
7946 offset = -4;
7947 break;
7948 case 3:
7949 offset = 0;
7950 break;
7951 default:
7952 abort();
7954 tcg_gen_addi_i32(addr, addr, offset);
7955 tmp = tcg_const_i32(mode);
7956 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7957 tcg_temp_free_i32(tmp);
7959 tcg_temp_free_i32(addr);
7960 s->is_jmp = DISAS_UPDATE;
7963 static void disas_arm_insn(DisasContext *s, unsigned int insn)
7965 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
7966 TCGv_i32 tmp;
7967 TCGv_i32 tmp2;
7968 TCGv_i32 tmp3;
7969 TCGv_i32 addr;
7970 TCGv_i64 tmp64;
7972 /* M variants do not implement ARM mode. */
7973 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7974 goto illegal_op;
7976 cond = insn >> 28;
7977 if (cond == 0xf){
7978 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7979 * choose to UNDEF. In ARMv5 and above the space is used
7980 * for miscellaneous unconditional instructions.
7982 ARCH(5);
7984 /* Unconditional instructions. */
7985 if (((insn >> 25) & 7) == 1) {
7986 /* NEON Data processing. */
7987 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7988 goto illegal_op;
7991 if (disas_neon_data_insn(s, insn)) {
7992 goto illegal_op;
7994 return;
7996 if ((insn & 0x0f100000) == 0x04000000) {
7997 /* NEON load/store. */
7998 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7999 goto illegal_op;
8002 if (disas_neon_ls_insn(s, insn)) {
8003 goto illegal_op;
8005 return;
8007 if ((insn & 0x0f000e10) == 0x0e000a00) {
8008 /* VFP. */
8009 if (disas_vfp_insn(s, insn)) {
8010 goto illegal_op;
8012 return;
8014 if (((insn & 0x0f30f000) == 0x0510f000) ||
8015 ((insn & 0x0f30f010) == 0x0710f000)) {
8016 if ((insn & (1 << 22)) == 0) {
8017 /* PLDW; v7MP */
8018 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8019 goto illegal_op;
8022 /* Otherwise PLD; v5TE+ */
8023 ARCH(5TE);
8024 return;
8026 if (((insn & 0x0f70f000) == 0x0450f000) ||
8027 ((insn & 0x0f70f010) == 0x0650f000)) {
8028 ARCH(7);
8029 return; /* PLI; V7 */
8031 if (((insn & 0x0f700000) == 0x04100000) ||
8032 ((insn & 0x0f700010) == 0x06100000)) {
8033 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8034 goto illegal_op;
8036 return; /* v7MP: Unallocated memory hint: must NOP */
8039 if ((insn & 0x0ffffdff) == 0x01010000) {
8040 ARCH(6);
8041 /* setend */
8042 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8043 gen_helper_setend(cpu_env);
8044 s->is_jmp = DISAS_UPDATE;
8046 return;
8047 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8048 switch ((insn >> 4) & 0xf) {
8049 case 1: /* clrex */
8050 ARCH(6K);
8051 gen_clrex(s);
8052 return;
8053 case 4: /* dsb */
8054 case 5: /* dmb */
8055 ARCH(7);
8056 /* We don't emulate caches so these are a no-op. */
8057 return;
8058 case 6: /* isb */
8059 /* We need to break the TB after this insn to execute
8060 * self-modifying code correctly and also to take
8061 * any pending interrupts immediately.
8063 gen_lookup_tb(s);
8064 return;
8065 default:
8066 goto illegal_op;
8068 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8069 /* srs */
8070 ARCH(6);
8071 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
8072 return;
8073 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
8074 /* rfe */
8075 int32_t offset;
8076 if (IS_USER(s))
8077 goto illegal_op;
8078 ARCH(6);
8079 rn = (insn >> 16) & 0xf;
8080 addr = load_reg(s, rn);
8081 i = (insn >> 23) & 3;
8082 switch (i) {
8083 case 0: offset = -4; break; /* DA */
8084 case 1: offset = 0; break; /* IA */
8085 case 2: offset = -8; break; /* DB */
8086 case 3: offset = 4; break; /* IB */
8087 default: abort();
8089 if (offset)
8090 tcg_gen_addi_i32(addr, addr, offset);
8091 /* Load PC into tmp and CPSR into tmp2. */
8092 tmp = tcg_temp_new_i32();
8093 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8094 tcg_gen_addi_i32(addr, addr, 4);
8095 tmp2 = tcg_temp_new_i32();
8096 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8097 if (insn & (1 << 21)) {
8098 /* Base writeback. */
8099 switch (i) {
8100 case 0: offset = -8; break;
8101 case 1: offset = 4; break;
8102 case 2: offset = -4; break;
8103 case 3: offset = 0; break;
8104 default: abort();
8106 if (offset)
8107 tcg_gen_addi_i32(addr, addr, offset);
8108 store_reg(s, rn, addr);
8109 } else {
8110 tcg_temp_free_i32(addr);
8112 gen_rfe(s, tmp, tmp2);
8113 return;
8114 } else if ((insn & 0x0e000000) == 0x0a000000) {
8115 /* branch link and change to thumb (blx <offset>) */
8116 int32_t offset;
8118 val = (uint32_t)s->pc;
8119 tmp = tcg_temp_new_i32();
8120 tcg_gen_movi_i32(tmp, val);
8121 store_reg(s, 14, tmp);
8122 /* Sign-extend the 24-bit offset */
8123 offset = (((int32_t)insn) << 8) >> 8;
8124 /* offset * 4 + bit24 * 2 + (thumb bit) */
8125 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8126 /* pipeline offset */
8127 val += 4;
8128 /* protected by ARCH(5); above, near the start of uncond block */
8129 gen_bx_im(s, val);
8130 return;
8131 } else if ((insn & 0x0e000f00) == 0x0c000100) {
8132 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
8133 /* iWMMXt register transfer. */
8134 if (extract32(s->c15_cpar, 1, 1)) {
8135 if (!disas_iwmmxt_insn(s, insn)) {
8136 return;
8140 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8141 /* Coprocessor double register transfer. */
8142 ARCH(5TE);
8143 } else if ((insn & 0x0f000010) == 0x0e000010) {
8144 /* Additional coprocessor register transfer. */
8145 } else if ((insn & 0x0ff10020) == 0x01000000) {
8146 uint32_t mask;
8147 uint32_t val;
8148 /* cps (privileged) */
8149 if (IS_USER(s))
8150 return;
8151 mask = val = 0;
8152 if (insn & (1 << 19)) {
8153 if (insn & (1 << 8))
8154 mask |= CPSR_A;
8155 if (insn & (1 << 7))
8156 mask |= CPSR_I;
8157 if (insn & (1 << 6))
8158 mask |= CPSR_F;
8159 if (insn & (1 << 18))
8160 val |= mask;
8162 if (insn & (1 << 17)) {
8163 mask |= CPSR_M;
8164 val |= (insn & 0x1f);
8166 if (mask) {
8167 gen_set_psr_im(s, mask, 0, val);
8169 return;
8171 goto illegal_op;
8173 if (cond != 0xe) {
8174 /* if not always execute, we generate a conditional jump to
8175 next instruction */
8176 s->condlabel = gen_new_label();
8177 arm_gen_test_cc(cond ^ 1, s->condlabel);
8178 s->condjmp = 1;
8180 if ((insn & 0x0f900000) == 0x03000000) {
8181 if ((insn & (1 << 21)) == 0) {
8182 ARCH(6T2);
8183 rd = (insn >> 12) & 0xf;
8184 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8185 if ((insn & (1 << 22)) == 0) {
8186 /* MOVW */
8187 tmp = tcg_temp_new_i32();
8188 tcg_gen_movi_i32(tmp, val);
8189 } else {
8190 /* MOVT */
8191 tmp = load_reg(s, rd);
8192 tcg_gen_ext16u_i32(tmp, tmp);
8193 tcg_gen_ori_i32(tmp, tmp, val << 16);
8195 store_reg(s, rd, tmp);
8196 } else {
8197 if (((insn >> 12) & 0xf) != 0xf)
8198 goto illegal_op;
8199 if (((insn >> 16) & 0xf) == 0) {
8200 gen_nop_hint(s, insn & 0xff);
8201 } else {
8202 /* CPSR = immediate */
8203 val = insn & 0xff;
8204 shift = ((insn >> 8) & 0xf) * 2;
8205 if (shift)
8206 val = (val >> shift) | (val << (32 - shift));
8207 i = ((insn & (1 << 22)) != 0);
8208 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8209 i, val)) {
8210 goto illegal_op;
8214 } else if ((insn & 0x0f900000) == 0x01000000
8215 && (insn & 0x00000090) != 0x00000090) {
8216 /* miscellaneous instructions */
8217 op1 = (insn >> 21) & 3;
8218 sh = (insn >> 4) & 0xf;
8219 rm = insn & 0xf;
8220 switch (sh) {
8221 case 0x0: /* MSR, MRS */
8222 if (insn & (1 << 9)) {
8223 /* MSR (banked) and MRS (banked) */
8224 int sysm = extract32(insn, 16, 4) |
8225 (extract32(insn, 8, 1) << 4);
8226 int r = extract32(insn, 22, 1);
8228 if (op1 & 1) {
8229 /* MSR (banked) */
8230 gen_msr_banked(s, r, sysm, rm);
8231 } else {
8232 /* MRS (banked) */
8233 int rd = extract32(insn, 12, 4);
8235 gen_mrs_banked(s, r, sysm, rd);
8237 break;
8240 /* MSR, MRS (for PSRs) */
8241 if (op1 & 1) {
8242 /* PSR = reg */
8243 tmp = load_reg(s, rm);
8244 i = ((op1 & 2) != 0);
8245 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
8246 goto illegal_op;
8247 } else {
8248 /* reg = PSR */
8249 rd = (insn >> 12) & 0xf;
8250 if (op1 & 2) {
8251 if (IS_USER(s))
8252 goto illegal_op;
8253 tmp = load_cpu_field(spsr);
8254 } else {
8255 tmp = tcg_temp_new_i32();
8256 gen_helper_cpsr_read(tmp, cpu_env);
8258 store_reg(s, rd, tmp);
8260 break;
8261 case 0x1:
8262 if (op1 == 1) {
8263 /* branch/exchange thumb (bx). */
8264 ARCH(4T);
8265 tmp = load_reg(s, rm);
8266 gen_bx(s, tmp);
8267 } else if (op1 == 3) {
8268 /* clz */
8269 ARCH(5);
8270 rd = (insn >> 12) & 0xf;
8271 tmp = load_reg(s, rm);
8272 gen_helper_clz(tmp, tmp);
8273 store_reg(s, rd, tmp);
8274 } else {
8275 goto illegal_op;
8277 break;
8278 case 0x2:
8279 if (op1 == 1) {
8280 ARCH(5J); /* bxj */
8281 /* Trivial implementation equivalent to bx. */
8282 tmp = load_reg(s, rm);
8283 gen_bx(s, tmp);
8284 } else {
8285 goto illegal_op;
8287 break;
8288 case 0x3:
8289 if (op1 != 1)
8290 goto illegal_op;
8292 ARCH(5);
8293 /* branch link/exchange thumb (blx) */
8294 tmp = load_reg(s, rm);
8295 tmp2 = tcg_temp_new_i32();
8296 tcg_gen_movi_i32(tmp2, s->pc);
8297 store_reg(s, 14, tmp2);
8298 gen_bx(s, tmp);
8299 break;
8300 case 0x4:
8302 /* crc32/crc32c */
8303 uint32_t c = extract32(insn, 8, 4);
8305 /* Check this CPU supports ARMv8 CRC instructions.
8306 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8307 * Bits 8, 10 and 11 should be zero.
8309 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
8310 (c & 0xd) != 0) {
8311 goto illegal_op;
8314 rn = extract32(insn, 16, 4);
8315 rd = extract32(insn, 12, 4);
8317 tmp = load_reg(s, rn);
8318 tmp2 = load_reg(s, rm);
8319 if (op1 == 0) {
8320 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8321 } else if (op1 == 1) {
8322 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8324 tmp3 = tcg_const_i32(1 << op1);
8325 if (c & 0x2) {
8326 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8327 } else {
8328 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8330 tcg_temp_free_i32(tmp2);
8331 tcg_temp_free_i32(tmp3);
8332 store_reg(s, rd, tmp);
8333 break;
8335 case 0x5: /* saturating add/subtract */
8336 ARCH(5TE);
8337 rd = (insn >> 12) & 0xf;
8338 rn = (insn >> 16) & 0xf;
8339 tmp = load_reg(s, rm);
8340 tmp2 = load_reg(s, rn);
8341 if (op1 & 2)
8342 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
8343 if (op1 & 1)
8344 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8345 else
8346 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8347 tcg_temp_free_i32(tmp2);
8348 store_reg(s, rd, tmp);
8349 break;
8350 case 7:
8352 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8353 switch (op1) {
8354 case 1:
8355 /* bkpt */
8356 ARCH(5);
8357 gen_exception_insn(s, 4, EXCP_BKPT,
8358 syn_aa32_bkpt(imm16, false),
8359 default_exception_el(s));
8360 break;
8361 case 2:
8362 /* Hypervisor call (v7) */
8363 ARCH(7);
8364 if (IS_USER(s)) {
8365 goto illegal_op;
8367 gen_hvc(s, imm16);
8368 break;
8369 case 3:
8370 /* Secure monitor call (v6+) */
8371 ARCH(6K);
8372 if (IS_USER(s)) {
8373 goto illegal_op;
8375 gen_smc(s);
8376 break;
8377 default:
8378 goto illegal_op;
8380 break;
8382 case 0x8: /* signed multiply */
8383 case 0xa:
8384 case 0xc:
8385 case 0xe:
8386 ARCH(5TE);
8387 rs = (insn >> 8) & 0xf;
8388 rn = (insn >> 12) & 0xf;
8389 rd = (insn >> 16) & 0xf;
8390 if (op1 == 1) {
8391 /* (32 * 16) >> 16 */
8392 tmp = load_reg(s, rm);
8393 tmp2 = load_reg(s, rs);
8394 if (sh & 4)
8395 tcg_gen_sari_i32(tmp2, tmp2, 16);
8396 else
8397 gen_sxth(tmp2);
8398 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8399 tcg_gen_shri_i64(tmp64, tmp64, 16);
8400 tmp = tcg_temp_new_i32();
8401 tcg_gen_extrl_i64_i32(tmp, tmp64);
8402 tcg_temp_free_i64(tmp64);
8403 if ((sh & 2) == 0) {
8404 tmp2 = load_reg(s, rn);
8405 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8406 tcg_temp_free_i32(tmp2);
8408 store_reg(s, rd, tmp);
8409 } else {
8410 /* 16 * 16 */
8411 tmp = load_reg(s, rm);
8412 tmp2 = load_reg(s, rs);
8413 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8414 tcg_temp_free_i32(tmp2);
8415 if (op1 == 2) {
8416 tmp64 = tcg_temp_new_i64();
8417 tcg_gen_ext_i32_i64(tmp64, tmp);
8418 tcg_temp_free_i32(tmp);
8419 gen_addq(s, tmp64, rn, rd);
8420 gen_storeq_reg(s, rn, rd, tmp64);
8421 tcg_temp_free_i64(tmp64);
8422 } else {
8423 if (op1 == 0) {
8424 tmp2 = load_reg(s, rn);
8425 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8426 tcg_temp_free_i32(tmp2);
8428 store_reg(s, rd, tmp);
8431 break;
8432 default:
8433 goto illegal_op;
8435 } else if (((insn & 0x0e000000) == 0 &&
8436 (insn & 0x00000090) != 0x90) ||
8437 ((insn & 0x0e000000) == (1 << 25))) {
8438 int set_cc, logic_cc, shiftop;
8440 op1 = (insn >> 21) & 0xf;
8441 set_cc = (insn >> 20) & 1;
8442 logic_cc = table_logic_cc[op1] & set_cc;
8444 /* data processing instruction */
8445 if (insn & (1 << 25)) {
8446 /* immediate operand */
8447 val = insn & 0xff;
8448 shift = ((insn >> 8) & 0xf) * 2;
8449 if (shift) {
8450 val = (val >> shift) | (val << (32 - shift));
8452 tmp2 = tcg_temp_new_i32();
8453 tcg_gen_movi_i32(tmp2, val);
8454 if (logic_cc && shift) {
8455 gen_set_CF_bit31(tmp2);
8457 } else {
8458 /* register */
8459 rm = (insn) & 0xf;
8460 tmp2 = load_reg(s, rm);
8461 shiftop = (insn >> 5) & 3;
8462 if (!(insn & (1 << 4))) {
8463 shift = (insn >> 7) & 0x1f;
8464 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8465 } else {
8466 rs = (insn >> 8) & 0xf;
8467 tmp = load_reg(s, rs);
8468 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8471 if (op1 != 0x0f && op1 != 0x0d) {
8472 rn = (insn >> 16) & 0xf;
8473 tmp = load_reg(s, rn);
8474 } else {
8475 TCGV_UNUSED_I32(tmp);
8477 rd = (insn >> 12) & 0xf;
8478 switch(op1) {
8479 case 0x00:
8480 tcg_gen_and_i32(tmp, tmp, tmp2);
8481 if (logic_cc) {
8482 gen_logic_CC(tmp);
8484 store_reg_bx(s, rd, tmp);
8485 break;
8486 case 0x01:
8487 tcg_gen_xor_i32(tmp, tmp, tmp2);
8488 if (logic_cc) {
8489 gen_logic_CC(tmp);
8491 store_reg_bx(s, rd, tmp);
8492 break;
8493 case 0x02:
8494 if (set_cc && rd == 15) {
8495 /* SUBS r15, ... is used for exception return. */
8496 if (IS_USER(s)) {
8497 goto illegal_op;
8499 gen_sub_CC(tmp, tmp, tmp2);
8500 gen_exception_return(s, tmp);
8501 } else {
8502 if (set_cc) {
8503 gen_sub_CC(tmp, tmp, tmp2);
8504 } else {
8505 tcg_gen_sub_i32(tmp, tmp, tmp2);
8507 store_reg_bx(s, rd, tmp);
8509 break;
8510 case 0x03:
8511 if (set_cc) {
8512 gen_sub_CC(tmp, tmp2, tmp);
8513 } else {
8514 tcg_gen_sub_i32(tmp, tmp2, tmp);
8516 store_reg_bx(s, rd, tmp);
8517 break;
8518 case 0x04:
8519 if (set_cc) {
8520 gen_add_CC(tmp, tmp, tmp2);
8521 } else {
8522 tcg_gen_add_i32(tmp, tmp, tmp2);
8524 store_reg_bx(s, rd, tmp);
8525 break;
8526 case 0x05:
8527 if (set_cc) {
8528 gen_adc_CC(tmp, tmp, tmp2);
8529 } else {
8530 gen_add_carry(tmp, tmp, tmp2);
8532 store_reg_bx(s, rd, tmp);
8533 break;
8534 case 0x06:
8535 if (set_cc) {
8536 gen_sbc_CC(tmp, tmp, tmp2);
8537 } else {
8538 gen_sub_carry(tmp, tmp, tmp2);
8540 store_reg_bx(s, rd, tmp);
8541 break;
8542 case 0x07:
8543 if (set_cc) {
8544 gen_sbc_CC(tmp, tmp2, tmp);
8545 } else {
8546 gen_sub_carry(tmp, tmp2, tmp);
8548 store_reg_bx(s, rd, tmp);
8549 break;
8550 case 0x08:
8551 if (set_cc) {
8552 tcg_gen_and_i32(tmp, tmp, tmp2);
8553 gen_logic_CC(tmp);
8555 tcg_temp_free_i32(tmp);
8556 break;
8557 case 0x09:
8558 if (set_cc) {
8559 tcg_gen_xor_i32(tmp, tmp, tmp2);
8560 gen_logic_CC(tmp);
8562 tcg_temp_free_i32(tmp);
8563 break;
8564 case 0x0a:
8565 if (set_cc) {
8566 gen_sub_CC(tmp, tmp, tmp2);
8568 tcg_temp_free_i32(tmp);
8569 break;
8570 case 0x0b:
8571 if (set_cc) {
8572 gen_add_CC(tmp, tmp, tmp2);
8574 tcg_temp_free_i32(tmp);
8575 break;
8576 case 0x0c:
8577 tcg_gen_or_i32(tmp, tmp, tmp2);
8578 if (logic_cc) {
8579 gen_logic_CC(tmp);
8581 store_reg_bx(s, rd, tmp);
8582 break;
8583 case 0x0d:
8584 if (logic_cc && rd == 15) {
8585 /* MOVS r15, ... is used for exception return. */
8586 if (IS_USER(s)) {
8587 goto illegal_op;
8589 gen_exception_return(s, tmp2);
8590 } else {
8591 if (logic_cc) {
8592 gen_logic_CC(tmp2);
8594 store_reg_bx(s, rd, tmp2);
8596 break;
8597 case 0x0e:
8598 tcg_gen_andc_i32(tmp, tmp, tmp2);
8599 if (logic_cc) {
8600 gen_logic_CC(tmp);
8602 store_reg_bx(s, rd, tmp);
8603 break;
8604 default:
8605 case 0x0f:
8606 tcg_gen_not_i32(tmp2, tmp2);
8607 if (logic_cc) {
8608 gen_logic_CC(tmp2);
8610 store_reg_bx(s, rd, tmp2);
8611 break;
8613 if (op1 != 0x0f && op1 != 0x0d) {
8614 tcg_temp_free_i32(tmp2);
8616 } else {
8617 /* other instructions */
8618 op1 = (insn >> 24) & 0xf;
8619 switch(op1) {
8620 case 0x0:
8621 case 0x1:
8622 /* multiplies, extra load/stores */
8623 sh = (insn >> 5) & 3;
8624 if (sh == 0) {
8625 if (op1 == 0x0) {
8626 rd = (insn >> 16) & 0xf;
8627 rn = (insn >> 12) & 0xf;
8628 rs = (insn >> 8) & 0xf;
8629 rm = (insn) & 0xf;
8630 op1 = (insn >> 20) & 0xf;
8631 switch (op1) {
8632 case 0: case 1: case 2: case 3: case 6:
8633 /* 32 bit mul */
8634 tmp = load_reg(s, rs);
8635 tmp2 = load_reg(s, rm);
8636 tcg_gen_mul_i32(tmp, tmp, tmp2);
8637 tcg_temp_free_i32(tmp2);
8638 if (insn & (1 << 22)) {
8639 /* Subtract (mls) */
8640 ARCH(6T2);
8641 tmp2 = load_reg(s, rn);
8642 tcg_gen_sub_i32(tmp, tmp2, tmp);
8643 tcg_temp_free_i32(tmp2);
8644 } else if (insn & (1 << 21)) {
8645 /* Add */
8646 tmp2 = load_reg(s, rn);
8647 tcg_gen_add_i32(tmp, tmp, tmp2);
8648 tcg_temp_free_i32(tmp2);
8650 if (insn & (1 << 20))
8651 gen_logic_CC(tmp);
8652 store_reg(s, rd, tmp);
8653 break;
8654 case 4:
8655 /* 64 bit mul double accumulate (UMAAL) */
8656 ARCH(6);
8657 tmp = load_reg(s, rs);
8658 tmp2 = load_reg(s, rm);
8659 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8660 gen_addq_lo(s, tmp64, rn);
8661 gen_addq_lo(s, tmp64, rd);
8662 gen_storeq_reg(s, rn, rd, tmp64);
8663 tcg_temp_free_i64(tmp64);
8664 break;
8665 case 8: case 9: case 10: case 11:
8666 case 12: case 13: case 14: case 15:
8667 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8668 tmp = load_reg(s, rs);
8669 tmp2 = load_reg(s, rm);
8670 if (insn & (1 << 22)) {
8671 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8672 } else {
8673 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8675 if (insn & (1 << 21)) { /* mult accumulate */
8676 TCGv_i32 al = load_reg(s, rn);
8677 TCGv_i32 ah = load_reg(s, rd);
8678 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8679 tcg_temp_free_i32(al);
8680 tcg_temp_free_i32(ah);
8682 if (insn & (1 << 20)) {
8683 gen_logicq_cc(tmp, tmp2);
8685 store_reg(s, rn, tmp);
8686 store_reg(s, rd, tmp2);
8687 break;
8688 default:
8689 goto illegal_op;
8691 } else {
8692 rn = (insn >> 16) & 0xf;
8693 rd = (insn >> 12) & 0xf;
8694 if (insn & (1 << 23)) {
8695 /* load/store exclusive */
8696 int op2 = (insn >> 8) & 3;
8697 op1 = (insn >> 21) & 0x3;
8699 switch (op2) {
8700 case 0: /* lda/stl */
8701 if (op1 == 1) {
8702 goto illegal_op;
8704 ARCH(8);
8705 break;
8706 case 1: /* reserved */
8707 goto illegal_op;
8708 case 2: /* ldaex/stlex */
8709 ARCH(8);
8710 break;
8711 case 3: /* ldrex/strex */
8712 if (op1) {
8713 ARCH(6K);
8714 } else {
8715 ARCH(6);
8717 break;
8720 addr = tcg_temp_local_new_i32();
8721 load_reg_var(s, addr, rn);
8723 /* Since the emulation does not have barriers,
8724 the acquire/release semantics need no special
8725 handling */
8726 if (op2 == 0) {
8727 if (insn & (1 << 20)) {
8728 tmp = tcg_temp_new_i32();
8729 switch (op1) {
8730 case 0: /* lda */
8731 gen_aa32_ld32u(s, tmp, addr,
8732 get_mem_index(s));
8733 break;
8734 case 2: /* ldab */
8735 gen_aa32_ld8u(s, tmp, addr,
8736 get_mem_index(s));
8737 break;
8738 case 3: /* ldah */
8739 gen_aa32_ld16u(s, tmp, addr,
8740 get_mem_index(s));
8741 break;
8742 default:
8743 abort();
8745 store_reg(s, rd, tmp);
8746 } else {
8747 rm = insn & 0xf;
8748 tmp = load_reg(s, rm);
8749 switch (op1) {
8750 case 0: /* stl */
8751 gen_aa32_st32(s, tmp, addr,
8752 get_mem_index(s));
8753 break;
8754 case 2: /* stlb */
8755 gen_aa32_st8(s, tmp, addr,
8756 get_mem_index(s));
8757 break;
8758 case 3: /* stlh */
8759 gen_aa32_st16(s, tmp, addr,
8760 get_mem_index(s));
8761 break;
8762 default:
8763 abort();
8765 tcg_temp_free_i32(tmp);
8767 } else if (insn & (1 << 20)) {
8768 switch (op1) {
8769 case 0: /* ldrex */
8770 gen_load_exclusive(s, rd, 15, addr, 2);
8771 break;
8772 case 1: /* ldrexd */
8773 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8774 break;
8775 case 2: /* ldrexb */
8776 gen_load_exclusive(s, rd, 15, addr, 0);
8777 break;
8778 case 3: /* ldrexh */
8779 gen_load_exclusive(s, rd, 15, addr, 1);
8780 break;
8781 default:
8782 abort();
8784 } else {
8785 rm = insn & 0xf;
8786 switch (op1) {
8787 case 0: /* strex */
8788 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8789 break;
8790 case 1: /* strexd */
8791 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8792 break;
8793 case 2: /* strexb */
8794 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8795 break;
8796 case 3: /* strexh */
8797 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8798 break;
8799 default:
8800 abort();
8803 tcg_temp_free_i32(addr);
8804 } else {
8805 /* SWP instruction */
8806 rm = (insn) & 0xf;
8808 /* ??? This is not really atomic. However we know
8809 we never have multiple CPUs running in parallel,
8810 so it is good enough. */
8811 addr = load_reg(s, rn);
8812 tmp = load_reg(s, rm);
8813 tmp2 = tcg_temp_new_i32();
8814 if (insn & (1 << 22)) {
8815 gen_aa32_ld8u(s, tmp2, addr, get_mem_index(s));
8816 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
8817 } else {
8818 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8819 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8821 tcg_temp_free_i32(tmp);
8822 tcg_temp_free_i32(addr);
8823 store_reg(s, rd, tmp2);
8826 } else {
8827 int address_offset;
8828 bool load = insn & (1 << 20);
8829 bool doubleword = false;
8830 /* Misc load/store */
8831 rn = (insn >> 16) & 0xf;
8832 rd = (insn >> 12) & 0xf;
8834 if (!load && (sh & 2)) {
8835 /* doubleword */
8836 ARCH(5TE);
8837 if (rd & 1) {
8838 /* UNPREDICTABLE; we choose to UNDEF */
8839 goto illegal_op;
8841 load = (sh & 1) == 0;
8842 doubleword = true;
8845 addr = load_reg(s, rn);
8846 if (insn & (1 << 24))
8847 gen_add_datah_offset(s, insn, 0, addr);
8848 address_offset = 0;
8850 if (doubleword) {
8851 if (!load) {
8852 /* store */
8853 tmp = load_reg(s, rd);
8854 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8855 tcg_temp_free_i32(tmp);
8856 tcg_gen_addi_i32(addr, addr, 4);
8857 tmp = load_reg(s, rd + 1);
8858 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8859 tcg_temp_free_i32(tmp);
8860 } else {
8861 /* load */
8862 tmp = tcg_temp_new_i32();
8863 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8864 store_reg(s, rd, tmp);
8865 tcg_gen_addi_i32(addr, addr, 4);
8866 tmp = tcg_temp_new_i32();
8867 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8868 rd++;
8870 address_offset = -4;
8871 } else if (load) {
8872 /* load */
8873 tmp = tcg_temp_new_i32();
8874 switch (sh) {
8875 case 1:
8876 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8877 break;
8878 case 2:
8879 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
8880 break;
8881 default:
8882 case 3:
8883 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
8884 break;
8886 } else {
8887 /* store */
8888 tmp = load_reg(s, rd);
8889 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
8890 tcg_temp_free_i32(tmp);
8892 /* Perform base writeback before the loaded value to
8893 ensure correct behavior with overlapping index registers.
8894 ldrd with base writeback is undefined if the
8895 destination and index registers overlap. */
8896 if (!(insn & (1 << 24))) {
8897 gen_add_datah_offset(s, insn, address_offset, addr);
8898 store_reg(s, rn, addr);
8899 } else if (insn & (1 << 21)) {
8900 if (address_offset)
8901 tcg_gen_addi_i32(addr, addr, address_offset);
8902 store_reg(s, rn, addr);
8903 } else {
8904 tcg_temp_free_i32(addr);
8906 if (load) {
8907 /* Complete the load. */
8908 store_reg(s, rd, tmp);
8911 break;
8912 case 0x4:
8913 case 0x5:
8914 goto do_ldst;
8915 case 0x6:
8916 case 0x7:
8917 if (insn & (1 << 4)) {
8918 ARCH(6);
8919 /* Armv6 Media instructions. */
8920 rm = insn & 0xf;
8921 rn = (insn >> 16) & 0xf;
8922 rd = (insn >> 12) & 0xf;
8923 rs = (insn >> 8) & 0xf;
8924 switch ((insn >> 23) & 3) {
8925 case 0: /* Parallel add/subtract. */
8926 op1 = (insn >> 20) & 7;
8927 tmp = load_reg(s, rn);
8928 tmp2 = load_reg(s, rm);
8929 sh = (insn >> 5) & 7;
8930 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8931 goto illegal_op;
8932 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
8933 tcg_temp_free_i32(tmp2);
8934 store_reg(s, rd, tmp);
8935 break;
8936 case 1:
8937 if ((insn & 0x00700020) == 0) {
8938 /* Halfword pack. */
8939 tmp = load_reg(s, rn);
8940 tmp2 = load_reg(s, rm);
8941 shift = (insn >> 7) & 0x1f;
8942 if (insn & (1 << 6)) {
8943 /* pkhtb */
8944 if (shift == 0)
8945 shift = 31;
8946 tcg_gen_sari_i32(tmp2, tmp2, shift);
8947 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8948 tcg_gen_ext16u_i32(tmp2, tmp2);
8949 } else {
8950 /* pkhbt */
8951 if (shift)
8952 tcg_gen_shli_i32(tmp2, tmp2, shift);
8953 tcg_gen_ext16u_i32(tmp, tmp);
8954 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8956 tcg_gen_or_i32(tmp, tmp, tmp2);
8957 tcg_temp_free_i32(tmp2);
8958 store_reg(s, rd, tmp);
8959 } else if ((insn & 0x00200020) == 0x00200000) {
8960 /* [us]sat */
8961 tmp = load_reg(s, rm);
8962 shift = (insn >> 7) & 0x1f;
8963 if (insn & (1 << 6)) {
8964 if (shift == 0)
8965 shift = 31;
8966 tcg_gen_sari_i32(tmp, tmp, shift);
8967 } else {
8968 tcg_gen_shli_i32(tmp, tmp, shift);
8970 sh = (insn >> 16) & 0x1f;
8971 tmp2 = tcg_const_i32(sh);
8972 if (insn & (1 << 22))
8973 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8974 else
8975 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8976 tcg_temp_free_i32(tmp2);
8977 store_reg(s, rd, tmp);
8978 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8979 /* [us]sat16 */
8980 tmp = load_reg(s, rm);
8981 sh = (insn >> 16) & 0x1f;
8982 tmp2 = tcg_const_i32(sh);
8983 if (insn & (1 << 22))
8984 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8985 else
8986 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8987 tcg_temp_free_i32(tmp2);
8988 store_reg(s, rd, tmp);
8989 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8990 /* Select bytes. */
8991 tmp = load_reg(s, rn);
8992 tmp2 = load_reg(s, rm);
8993 tmp3 = tcg_temp_new_i32();
8994 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8995 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8996 tcg_temp_free_i32(tmp3);
8997 tcg_temp_free_i32(tmp2);
8998 store_reg(s, rd, tmp);
8999 } else if ((insn & 0x000003e0) == 0x00000060) {
9000 tmp = load_reg(s, rm);
9001 shift = (insn >> 10) & 3;
9002 /* ??? In many cases it's not necessary to do a
9003 rotate, a shift is sufficient. */
9004 if (shift != 0)
9005 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9006 op1 = (insn >> 20) & 7;
9007 switch (op1) {
9008 case 0: gen_sxtb16(tmp); break;
9009 case 2: gen_sxtb(tmp); break;
9010 case 3: gen_sxth(tmp); break;
9011 case 4: gen_uxtb16(tmp); break;
9012 case 6: gen_uxtb(tmp); break;
9013 case 7: gen_uxth(tmp); break;
9014 default: goto illegal_op;
9016 if (rn != 15) {
9017 tmp2 = load_reg(s, rn);
9018 if ((op1 & 3) == 0) {
9019 gen_add16(tmp, tmp2);
9020 } else {
9021 tcg_gen_add_i32(tmp, tmp, tmp2);
9022 tcg_temp_free_i32(tmp2);
9025 store_reg(s, rd, tmp);
9026 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9027 /* rev */
9028 tmp = load_reg(s, rm);
9029 if (insn & (1 << 22)) {
9030 if (insn & (1 << 7)) {
9031 gen_revsh(tmp);
9032 } else {
9033 ARCH(6T2);
9034 gen_helper_rbit(tmp, tmp);
9036 } else {
9037 if (insn & (1 << 7))
9038 gen_rev16(tmp);
9039 else
9040 tcg_gen_bswap32_i32(tmp, tmp);
9042 store_reg(s, rd, tmp);
9043 } else {
9044 goto illegal_op;
9046 break;
9047 case 2: /* Multiplies (Type 3). */
9048 switch ((insn >> 20) & 0x7) {
9049 case 5:
9050 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9051 /* op2 not 00x or 11x : UNDEF */
9052 goto illegal_op;
9054 /* Signed multiply most significant [accumulate].
9055 (SMMUL, SMMLA, SMMLS) */
9056 tmp = load_reg(s, rm);
9057 tmp2 = load_reg(s, rs);
9058 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9060 if (rd != 15) {
9061 tmp = load_reg(s, rd);
9062 if (insn & (1 << 6)) {
9063 tmp64 = gen_subq_msw(tmp64, tmp);
9064 } else {
9065 tmp64 = gen_addq_msw(tmp64, tmp);
9068 if (insn & (1 << 5)) {
9069 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9071 tcg_gen_shri_i64(tmp64, tmp64, 32);
9072 tmp = tcg_temp_new_i32();
9073 tcg_gen_extrl_i64_i32(tmp, tmp64);
9074 tcg_temp_free_i64(tmp64);
9075 store_reg(s, rn, tmp);
9076 break;
9077 case 0:
9078 case 4:
9079 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9080 if (insn & (1 << 7)) {
9081 goto illegal_op;
9083 tmp = load_reg(s, rm);
9084 tmp2 = load_reg(s, rs);
9085 if (insn & (1 << 5))
9086 gen_swap_half(tmp2);
9087 gen_smul_dual(tmp, tmp2);
9088 if (insn & (1 << 22)) {
9089 /* smlald, smlsld */
9090 TCGv_i64 tmp64_2;
9092 tmp64 = tcg_temp_new_i64();
9093 tmp64_2 = tcg_temp_new_i64();
9094 tcg_gen_ext_i32_i64(tmp64, tmp);
9095 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
9096 tcg_temp_free_i32(tmp);
9097 tcg_temp_free_i32(tmp2);
9098 if (insn & (1 << 6)) {
9099 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9100 } else {
9101 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9103 tcg_temp_free_i64(tmp64_2);
9104 gen_addq(s, tmp64, rd, rn);
9105 gen_storeq_reg(s, rd, rn, tmp64);
9106 tcg_temp_free_i64(tmp64);
9107 } else {
9108 /* smuad, smusd, smlad, smlsd */
9109 if (insn & (1 << 6)) {
9110 /* This subtraction cannot overflow. */
9111 tcg_gen_sub_i32(tmp, tmp, tmp2);
9112 } else {
9113 /* This addition cannot overflow 32 bits;
9114 * however it may overflow considered as a
9115 * signed operation, in which case we must set
9116 * the Q flag.
9118 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9120 tcg_temp_free_i32(tmp2);
9121 if (rd != 15)
9123 tmp2 = load_reg(s, rd);
9124 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9125 tcg_temp_free_i32(tmp2);
9127 store_reg(s, rn, tmp);
9129 break;
9130 case 1:
9131 case 3:
9132 /* SDIV, UDIV */
9133 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
9134 goto illegal_op;
9136 if (((insn >> 5) & 7) || (rd != 15)) {
9137 goto illegal_op;
9139 tmp = load_reg(s, rm);
9140 tmp2 = load_reg(s, rs);
9141 if (insn & (1 << 21)) {
9142 gen_helper_udiv(tmp, tmp, tmp2);
9143 } else {
9144 gen_helper_sdiv(tmp, tmp, tmp2);
9146 tcg_temp_free_i32(tmp2);
9147 store_reg(s, rn, tmp);
9148 break;
9149 default:
9150 goto illegal_op;
9152 break;
9153 case 3:
9154 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9155 switch (op1) {
9156 case 0: /* Unsigned sum of absolute differences. */
9157 ARCH(6);
9158 tmp = load_reg(s, rm);
9159 tmp2 = load_reg(s, rs);
9160 gen_helper_usad8(tmp, tmp, tmp2);
9161 tcg_temp_free_i32(tmp2);
9162 if (rd != 15) {
9163 tmp2 = load_reg(s, rd);
9164 tcg_gen_add_i32(tmp, tmp, tmp2);
9165 tcg_temp_free_i32(tmp2);
9167 store_reg(s, rn, tmp);
9168 break;
9169 case 0x20: case 0x24: case 0x28: case 0x2c:
9170 /* Bitfield insert/clear. */
9171 ARCH(6T2);
9172 shift = (insn >> 7) & 0x1f;
9173 i = (insn >> 16) & 0x1f;
9174 if (i < shift) {
9175 /* UNPREDICTABLE; we choose to UNDEF */
9176 goto illegal_op;
9178 i = i + 1 - shift;
9179 if (rm == 15) {
9180 tmp = tcg_temp_new_i32();
9181 tcg_gen_movi_i32(tmp, 0);
9182 } else {
9183 tmp = load_reg(s, rm);
9185 if (i != 32) {
9186 tmp2 = load_reg(s, rd);
9187 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
9188 tcg_temp_free_i32(tmp2);
9190 store_reg(s, rd, tmp);
9191 break;
9192 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9193 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9194 ARCH(6T2);
9195 tmp = load_reg(s, rm);
9196 shift = (insn >> 7) & 0x1f;
9197 i = ((insn >> 16) & 0x1f) + 1;
9198 if (shift + i > 32)
9199 goto illegal_op;
9200 if (i < 32) {
9201 if (op1 & 0x20) {
9202 gen_ubfx(tmp, shift, (1u << i) - 1);
9203 } else {
9204 gen_sbfx(tmp, shift, i);
9207 store_reg(s, rd, tmp);
9208 break;
9209 default:
9210 goto illegal_op;
9212 break;
9214 break;
9216 do_ldst:
9217 /* Check for undefined extension instructions
9218 * per the ARM Bible IE:
9219 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9221 sh = (0xf << 20) | (0xf << 4);
9222 if (op1 == 0x7 && ((insn & sh) == sh))
9224 goto illegal_op;
9226 /* load/store byte/word */
9227 rn = (insn >> 16) & 0xf;
9228 rd = (insn >> 12) & 0xf;
9229 tmp2 = load_reg(s, rn);
9230 if ((insn & 0x01200000) == 0x00200000) {
9231 /* ldrt/strt */
9232 i = get_a32_user_mem_index(s);
9233 } else {
9234 i = get_mem_index(s);
9236 if (insn & (1 << 24))
9237 gen_add_data_offset(s, insn, tmp2);
9238 if (insn & (1 << 20)) {
9239 /* load */
9240 tmp = tcg_temp_new_i32();
9241 if (insn & (1 << 22)) {
9242 gen_aa32_ld8u(s, tmp, tmp2, i);
9243 } else {
9244 gen_aa32_ld32u(s, tmp, tmp2, i);
9246 } else {
9247 /* store */
9248 tmp = load_reg(s, rd);
9249 if (insn & (1 << 22)) {
9250 gen_aa32_st8(s, tmp, tmp2, i);
9251 } else {
9252 gen_aa32_st32(s, tmp, tmp2, i);
9254 tcg_temp_free_i32(tmp);
9256 if (!(insn & (1 << 24))) {
9257 gen_add_data_offset(s, insn, tmp2);
9258 store_reg(s, rn, tmp2);
9259 } else if (insn & (1 << 21)) {
9260 store_reg(s, rn, tmp2);
9261 } else {
9262 tcg_temp_free_i32(tmp2);
9264 if (insn & (1 << 20)) {
9265 /* Complete the load. */
9266 store_reg_from_load(s, rd, tmp);
9268 break;
9269 case 0x08:
9270 case 0x09:
9272 int j, n, loaded_base;
9273 bool exc_return = false;
9274 bool is_load = extract32(insn, 20, 1);
9275 bool user = false;
9276 TCGv_i32 loaded_var;
9277 /* load/store multiple words */
9278 /* XXX: store correct base if write back */
9279 if (insn & (1 << 22)) {
9280 /* LDM (user), LDM (exception return) and STM (user) */
9281 if (IS_USER(s))
9282 goto illegal_op; /* only usable in supervisor mode */
9284 if (is_load && extract32(insn, 15, 1)) {
9285 exc_return = true;
9286 } else {
9287 user = true;
9290 rn = (insn >> 16) & 0xf;
9291 addr = load_reg(s, rn);
9293 /* compute total size */
9294 loaded_base = 0;
9295 TCGV_UNUSED_I32(loaded_var);
9296 n = 0;
9297 for(i=0;i<16;i++) {
9298 if (insn & (1 << i))
9299 n++;
9301 /* XXX: test invalid n == 0 case ? */
9302 if (insn & (1 << 23)) {
9303 if (insn & (1 << 24)) {
9304 /* pre increment */
9305 tcg_gen_addi_i32(addr, addr, 4);
9306 } else {
9307 /* post increment */
9309 } else {
9310 if (insn & (1 << 24)) {
9311 /* pre decrement */
9312 tcg_gen_addi_i32(addr, addr, -(n * 4));
9313 } else {
9314 /* post decrement */
9315 if (n != 1)
9316 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9319 j = 0;
9320 for(i=0;i<16;i++) {
9321 if (insn & (1 << i)) {
9322 if (is_load) {
9323 /* load */
9324 tmp = tcg_temp_new_i32();
9325 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9326 if (user) {
9327 tmp2 = tcg_const_i32(i);
9328 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9329 tcg_temp_free_i32(tmp2);
9330 tcg_temp_free_i32(tmp);
9331 } else if (i == rn) {
9332 loaded_var = tmp;
9333 loaded_base = 1;
9334 } else {
9335 store_reg_from_load(s, i, tmp);
9337 } else {
9338 /* store */
9339 if (i == 15) {
9340 /* special case: r15 = PC + 8 */
9341 val = (long)s->pc + 4;
9342 tmp = tcg_temp_new_i32();
9343 tcg_gen_movi_i32(tmp, val);
9344 } else if (user) {
9345 tmp = tcg_temp_new_i32();
9346 tmp2 = tcg_const_i32(i);
9347 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9348 tcg_temp_free_i32(tmp2);
9349 } else {
9350 tmp = load_reg(s, i);
9352 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9353 tcg_temp_free_i32(tmp);
9355 j++;
9356 /* no need to add after the last transfer */
9357 if (j != n)
9358 tcg_gen_addi_i32(addr, addr, 4);
9361 if (insn & (1 << 21)) {
9362 /* write back */
9363 if (insn & (1 << 23)) {
9364 if (insn & (1 << 24)) {
9365 /* pre increment */
9366 } else {
9367 /* post increment */
9368 tcg_gen_addi_i32(addr, addr, 4);
9370 } else {
9371 if (insn & (1 << 24)) {
9372 /* pre decrement */
9373 if (n != 1)
9374 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9375 } else {
9376 /* post decrement */
9377 tcg_gen_addi_i32(addr, addr, -(n * 4));
9380 store_reg(s, rn, addr);
9381 } else {
9382 tcg_temp_free_i32(addr);
9384 if (loaded_base) {
9385 store_reg(s, rn, loaded_var);
9387 if (exc_return) {
9388 /* Restore CPSR from SPSR. */
9389 tmp = load_cpu_field(spsr);
9390 gen_helper_cpsr_write_eret(cpu_env, tmp);
9391 tcg_temp_free_i32(tmp);
9392 s->is_jmp = DISAS_JUMP;
9395 break;
9396 case 0xa:
9397 case 0xb:
9399 int32_t offset;
9401 /* branch (and link) */
9402 val = (int32_t)s->pc;
9403 if (insn & (1 << 24)) {
9404 tmp = tcg_temp_new_i32();
9405 tcg_gen_movi_i32(tmp, val);
9406 store_reg(s, 14, tmp);
9408 offset = sextract32(insn << 2, 0, 26);
9409 val += offset + 4;
9410 gen_jmp(s, val);
9412 break;
9413 case 0xc:
9414 case 0xd:
9415 case 0xe:
9416 if (((insn >> 8) & 0xe) == 10) {
9417 /* VFP. */
9418 if (disas_vfp_insn(s, insn)) {
9419 goto illegal_op;
9421 } else if (disas_coproc_insn(s, insn)) {
9422 /* Coprocessor. */
9423 goto illegal_op;
9425 break;
9426 case 0xf:
9427 /* swi */
9428 gen_set_pc_im(s, s->pc);
9429 s->svc_imm = extract32(insn, 0, 24);
9430 s->is_jmp = DISAS_SWI;
9431 break;
9432 default:
9433 illegal_op:
9434 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9435 default_exception_el(s));
9436 break;
9441 /* Return true if this is a Thumb-2 logical op. */
9442 static int
9443 thumb2_logic_op(int op)
9445 return (op < 8);
9448 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9449 then set condition code flags based on the result of the operation.
9450 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9451 to the high bit of T1.
9452 Returns zero if the opcode is valid. */
9454 static int
9455 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9456 TCGv_i32 t0, TCGv_i32 t1)
9458 int logic_cc;
9460 logic_cc = 0;
9461 switch (op) {
9462 case 0: /* and */
9463 tcg_gen_and_i32(t0, t0, t1);
9464 logic_cc = conds;
9465 break;
9466 case 1: /* bic */
9467 tcg_gen_andc_i32(t0, t0, t1);
9468 logic_cc = conds;
9469 break;
9470 case 2: /* orr */
9471 tcg_gen_or_i32(t0, t0, t1);
9472 logic_cc = conds;
9473 break;
9474 case 3: /* orn */
9475 tcg_gen_orc_i32(t0, t0, t1);
9476 logic_cc = conds;
9477 break;
9478 case 4: /* eor */
9479 tcg_gen_xor_i32(t0, t0, t1);
9480 logic_cc = conds;
9481 break;
9482 case 8: /* add */
9483 if (conds)
9484 gen_add_CC(t0, t0, t1);
9485 else
9486 tcg_gen_add_i32(t0, t0, t1);
9487 break;
9488 case 10: /* adc */
9489 if (conds)
9490 gen_adc_CC(t0, t0, t1);
9491 else
9492 gen_adc(t0, t1);
9493 break;
9494 case 11: /* sbc */
9495 if (conds) {
9496 gen_sbc_CC(t0, t0, t1);
9497 } else {
9498 gen_sub_carry(t0, t0, t1);
9500 break;
9501 case 13: /* sub */
9502 if (conds)
9503 gen_sub_CC(t0, t0, t1);
9504 else
9505 tcg_gen_sub_i32(t0, t0, t1);
9506 break;
9507 case 14: /* rsb */
9508 if (conds)
9509 gen_sub_CC(t0, t1, t0);
9510 else
9511 tcg_gen_sub_i32(t0, t1, t0);
9512 break;
9513 default: /* 5, 6, 7, 9, 12, 15. */
9514 return 1;
9516 if (logic_cc) {
9517 gen_logic_CC(t0);
9518 if (shifter_out)
9519 gen_set_CF_bit31(t1);
9521 return 0;
9524 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9525 is not legal. */
9526 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9528 uint32_t insn, imm, shift, offset;
9529 uint32_t rd, rn, rm, rs;
9530 TCGv_i32 tmp;
9531 TCGv_i32 tmp2;
9532 TCGv_i32 tmp3;
9533 TCGv_i32 addr;
9534 TCGv_i64 tmp64;
9535 int op;
9536 int shiftop;
9537 int conds;
9538 int logic_cc;
9540 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9541 || arm_dc_feature(s, ARM_FEATURE_M))) {
9542 /* Thumb-1 cores may need to treat bl and blx as a pair of
9543 16-bit instructions to get correct prefetch abort behavior. */
9544 insn = insn_hw1;
9545 if ((insn & (1 << 12)) == 0) {
9546 ARCH(5);
9547 /* Second half of blx. */
9548 offset = ((insn & 0x7ff) << 1);
9549 tmp = load_reg(s, 14);
9550 tcg_gen_addi_i32(tmp, tmp, offset);
9551 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9553 tmp2 = tcg_temp_new_i32();
9554 tcg_gen_movi_i32(tmp2, s->pc | 1);
9555 store_reg(s, 14, tmp2);
9556 gen_bx(s, tmp);
9557 return 0;
9559 if (insn & (1 << 11)) {
9560 /* Second half of bl. */
9561 offset = ((insn & 0x7ff) << 1) | 1;
9562 tmp = load_reg(s, 14);
9563 tcg_gen_addi_i32(tmp, tmp, offset);
9565 tmp2 = tcg_temp_new_i32();
9566 tcg_gen_movi_i32(tmp2, s->pc | 1);
9567 store_reg(s, 14, tmp2);
9568 gen_bx(s, tmp);
9569 return 0;
9571 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9572 /* Instruction spans a page boundary. Implement it as two
9573 16-bit instructions in case the second half causes an
9574 prefetch abort. */
9575 offset = ((int32_t)insn << 21) >> 9;
9576 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9577 return 0;
9579 /* Fall through to 32-bit decode. */
9582 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9583 s->pc += 2;
9584 insn |= (uint32_t)insn_hw1 << 16;
9586 if ((insn & 0xf800e800) != 0xf000e800) {
9587 ARCH(6T2);
9590 rn = (insn >> 16) & 0xf;
9591 rs = (insn >> 12) & 0xf;
9592 rd = (insn >> 8) & 0xf;
9593 rm = insn & 0xf;
9594 switch ((insn >> 25) & 0xf) {
9595 case 0: case 1: case 2: case 3:
9596 /* 16-bit instructions. Should never happen. */
9597 abort();
9598 case 4:
9599 if (insn & (1 << 22)) {
9600 /* Other load/store, table branch. */
9601 if (insn & 0x01200000) {
9602 /* Load/store doubleword. */
9603 if (rn == 15) {
9604 addr = tcg_temp_new_i32();
9605 tcg_gen_movi_i32(addr, s->pc & ~3);
9606 } else {
9607 addr = load_reg(s, rn);
9609 offset = (insn & 0xff) * 4;
9610 if ((insn & (1 << 23)) == 0)
9611 offset = -offset;
9612 if (insn & (1 << 24)) {
9613 tcg_gen_addi_i32(addr, addr, offset);
9614 offset = 0;
9616 if (insn & (1 << 20)) {
9617 /* ldrd */
9618 tmp = tcg_temp_new_i32();
9619 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9620 store_reg(s, rs, tmp);
9621 tcg_gen_addi_i32(addr, addr, 4);
9622 tmp = tcg_temp_new_i32();
9623 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9624 store_reg(s, rd, tmp);
9625 } else {
9626 /* strd */
9627 tmp = load_reg(s, rs);
9628 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9629 tcg_temp_free_i32(tmp);
9630 tcg_gen_addi_i32(addr, addr, 4);
9631 tmp = load_reg(s, rd);
9632 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9633 tcg_temp_free_i32(tmp);
9635 if (insn & (1 << 21)) {
9636 /* Base writeback. */
9637 if (rn == 15)
9638 goto illegal_op;
9639 tcg_gen_addi_i32(addr, addr, offset - 4);
9640 store_reg(s, rn, addr);
9641 } else {
9642 tcg_temp_free_i32(addr);
9644 } else if ((insn & (1 << 23)) == 0) {
9645 /* Load/store exclusive word. */
9646 addr = tcg_temp_local_new_i32();
9647 load_reg_var(s, addr, rn);
9648 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9649 if (insn & (1 << 20)) {
9650 gen_load_exclusive(s, rs, 15, addr, 2);
9651 } else {
9652 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9654 tcg_temp_free_i32(addr);
9655 } else if ((insn & (7 << 5)) == 0) {
9656 /* Table Branch. */
9657 if (rn == 15) {
9658 addr = tcg_temp_new_i32();
9659 tcg_gen_movi_i32(addr, s->pc);
9660 } else {
9661 addr = load_reg(s, rn);
9663 tmp = load_reg(s, rm);
9664 tcg_gen_add_i32(addr, addr, tmp);
9665 if (insn & (1 << 4)) {
9666 /* tbh */
9667 tcg_gen_add_i32(addr, addr, tmp);
9668 tcg_temp_free_i32(tmp);
9669 tmp = tcg_temp_new_i32();
9670 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9671 } else { /* tbb */
9672 tcg_temp_free_i32(tmp);
9673 tmp = tcg_temp_new_i32();
9674 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9676 tcg_temp_free_i32(addr);
9677 tcg_gen_shli_i32(tmp, tmp, 1);
9678 tcg_gen_addi_i32(tmp, tmp, s->pc);
9679 store_reg(s, 15, tmp);
9680 } else {
9681 int op2 = (insn >> 6) & 0x3;
9682 op = (insn >> 4) & 0x3;
9683 switch (op2) {
9684 case 0:
9685 goto illegal_op;
9686 case 1:
9687 /* Load/store exclusive byte/halfword/doubleword */
9688 if (op == 2) {
9689 goto illegal_op;
9691 ARCH(7);
9692 break;
9693 case 2:
9694 /* Load-acquire/store-release */
9695 if (op == 3) {
9696 goto illegal_op;
9698 /* Fall through */
9699 case 3:
9700 /* Load-acquire/store-release exclusive */
9701 ARCH(8);
9702 break;
9704 addr = tcg_temp_local_new_i32();
9705 load_reg_var(s, addr, rn);
9706 if (!(op2 & 1)) {
9707 if (insn & (1 << 20)) {
9708 tmp = tcg_temp_new_i32();
9709 switch (op) {
9710 case 0: /* ldab */
9711 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9712 break;
9713 case 1: /* ldah */
9714 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9715 break;
9716 case 2: /* lda */
9717 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9718 break;
9719 default:
9720 abort();
9722 store_reg(s, rs, tmp);
9723 } else {
9724 tmp = load_reg(s, rs);
9725 switch (op) {
9726 case 0: /* stlb */
9727 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9728 break;
9729 case 1: /* stlh */
9730 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9731 break;
9732 case 2: /* stl */
9733 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9734 break;
9735 default:
9736 abort();
9738 tcg_temp_free_i32(tmp);
9740 } else if (insn & (1 << 20)) {
9741 gen_load_exclusive(s, rs, rd, addr, op);
9742 } else {
9743 gen_store_exclusive(s, rm, rs, rd, addr, op);
9745 tcg_temp_free_i32(addr);
9747 } else {
9748 /* Load/store multiple, RFE, SRS. */
9749 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9750 /* RFE, SRS: not available in user mode or on M profile */
9751 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9752 goto illegal_op;
9754 if (insn & (1 << 20)) {
9755 /* rfe */
9756 addr = load_reg(s, rn);
9757 if ((insn & (1 << 24)) == 0)
9758 tcg_gen_addi_i32(addr, addr, -8);
9759 /* Load PC into tmp and CPSR into tmp2. */
9760 tmp = tcg_temp_new_i32();
9761 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9762 tcg_gen_addi_i32(addr, addr, 4);
9763 tmp2 = tcg_temp_new_i32();
9764 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9765 if (insn & (1 << 21)) {
9766 /* Base writeback. */
9767 if (insn & (1 << 24)) {
9768 tcg_gen_addi_i32(addr, addr, 4);
9769 } else {
9770 tcg_gen_addi_i32(addr, addr, -4);
9772 store_reg(s, rn, addr);
9773 } else {
9774 tcg_temp_free_i32(addr);
9776 gen_rfe(s, tmp, tmp2);
9777 } else {
9778 /* srs */
9779 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9780 insn & (1 << 21));
9782 } else {
9783 int i, loaded_base = 0;
9784 TCGv_i32 loaded_var;
9785 /* Load/store multiple. */
9786 addr = load_reg(s, rn);
9787 offset = 0;
9788 for (i = 0; i < 16; i++) {
9789 if (insn & (1 << i))
9790 offset += 4;
9792 if (insn & (1 << 24)) {
9793 tcg_gen_addi_i32(addr, addr, -offset);
9796 TCGV_UNUSED_I32(loaded_var);
9797 for (i = 0; i < 16; i++) {
9798 if ((insn & (1 << i)) == 0)
9799 continue;
9800 if (insn & (1 << 20)) {
9801 /* Load. */
9802 tmp = tcg_temp_new_i32();
9803 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9804 if (i == 15) {
9805 gen_bx(s, tmp);
9806 } else if (i == rn) {
9807 loaded_var = tmp;
9808 loaded_base = 1;
9809 } else {
9810 store_reg(s, i, tmp);
9812 } else {
9813 /* Store. */
9814 tmp = load_reg(s, i);
9815 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9816 tcg_temp_free_i32(tmp);
9818 tcg_gen_addi_i32(addr, addr, 4);
9820 if (loaded_base) {
9821 store_reg(s, rn, loaded_var);
9823 if (insn & (1 << 21)) {
9824 /* Base register writeback. */
9825 if (insn & (1 << 24)) {
9826 tcg_gen_addi_i32(addr, addr, -offset);
9828 /* Fault if writeback register is in register list. */
9829 if (insn & (1 << rn))
9830 goto illegal_op;
9831 store_reg(s, rn, addr);
9832 } else {
9833 tcg_temp_free_i32(addr);
9837 break;
9838 case 5:
9840 op = (insn >> 21) & 0xf;
9841 if (op == 6) {
9842 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9843 goto illegal_op;
9845 /* Halfword pack. */
9846 tmp = load_reg(s, rn);
9847 tmp2 = load_reg(s, rm);
9848 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9849 if (insn & (1 << 5)) {
9850 /* pkhtb */
9851 if (shift == 0)
9852 shift = 31;
9853 tcg_gen_sari_i32(tmp2, tmp2, shift);
9854 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9855 tcg_gen_ext16u_i32(tmp2, tmp2);
9856 } else {
9857 /* pkhbt */
9858 if (shift)
9859 tcg_gen_shli_i32(tmp2, tmp2, shift);
9860 tcg_gen_ext16u_i32(tmp, tmp);
9861 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9863 tcg_gen_or_i32(tmp, tmp, tmp2);
9864 tcg_temp_free_i32(tmp2);
9865 store_reg(s, rd, tmp);
9866 } else {
9867 /* Data processing register constant shift. */
9868 if (rn == 15) {
9869 tmp = tcg_temp_new_i32();
9870 tcg_gen_movi_i32(tmp, 0);
9871 } else {
9872 tmp = load_reg(s, rn);
9874 tmp2 = load_reg(s, rm);
9876 shiftop = (insn >> 4) & 3;
9877 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9878 conds = (insn & (1 << 20)) != 0;
9879 logic_cc = (conds && thumb2_logic_op(op));
9880 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9881 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9882 goto illegal_op;
9883 tcg_temp_free_i32(tmp2);
9884 if (rd != 15) {
9885 store_reg(s, rd, tmp);
9886 } else {
9887 tcg_temp_free_i32(tmp);
9890 break;
9891 case 13: /* Misc data processing. */
9892 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9893 if (op < 4 && (insn & 0xf000) != 0xf000)
9894 goto illegal_op;
9895 switch (op) {
9896 case 0: /* Register controlled shift. */
9897 tmp = load_reg(s, rn);
9898 tmp2 = load_reg(s, rm);
9899 if ((insn & 0x70) != 0)
9900 goto illegal_op;
9901 op = (insn >> 21) & 3;
9902 logic_cc = (insn & (1 << 20)) != 0;
9903 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9904 if (logic_cc)
9905 gen_logic_CC(tmp);
9906 store_reg_bx(s, rd, tmp);
9907 break;
9908 case 1: /* Sign/zero extend. */
9909 op = (insn >> 20) & 7;
9910 switch (op) {
9911 case 0: /* SXTAH, SXTH */
9912 case 1: /* UXTAH, UXTH */
9913 case 4: /* SXTAB, SXTB */
9914 case 5: /* UXTAB, UXTB */
9915 break;
9916 case 2: /* SXTAB16, SXTB16 */
9917 case 3: /* UXTAB16, UXTB16 */
9918 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9919 goto illegal_op;
9921 break;
9922 default:
9923 goto illegal_op;
9925 if (rn != 15) {
9926 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9927 goto illegal_op;
9930 tmp = load_reg(s, rm);
9931 shift = (insn >> 4) & 3;
9932 /* ??? In many cases it's not necessary to do a
9933 rotate, a shift is sufficient. */
9934 if (shift != 0)
9935 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9936 op = (insn >> 20) & 7;
9937 switch (op) {
9938 case 0: gen_sxth(tmp); break;
9939 case 1: gen_uxth(tmp); break;
9940 case 2: gen_sxtb16(tmp); break;
9941 case 3: gen_uxtb16(tmp); break;
9942 case 4: gen_sxtb(tmp); break;
9943 case 5: gen_uxtb(tmp); break;
9944 default:
9945 g_assert_not_reached();
9947 if (rn != 15) {
9948 tmp2 = load_reg(s, rn);
9949 if ((op >> 1) == 1) {
9950 gen_add16(tmp, tmp2);
9951 } else {
9952 tcg_gen_add_i32(tmp, tmp, tmp2);
9953 tcg_temp_free_i32(tmp2);
9956 store_reg(s, rd, tmp);
9957 break;
9958 case 2: /* SIMD add/subtract. */
9959 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9960 goto illegal_op;
9962 op = (insn >> 20) & 7;
9963 shift = (insn >> 4) & 7;
9964 if ((op & 3) == 3 || (shift & 3) == 3)
9965 goto illegal_op;
9966 tmp = load_reg(s, rn);
9967 tmp2 = load_reg(s, rm);
9968 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
9969 tcg_temp_free_i32(tmp2);
9970 store_reg(s, rd, tmp);
9971 break;
9972 case 3: /* Other data processing. */
9973 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9974 if (op < 4) {
9975 /* Saturating add/subtract. */
9976 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9977 goto illegal_op;
9979 tmp = load_reg(s, rn);
9980 tmp2 = load_reg(s, rm);
9981 if (op & 1)
9982 gen_helper_double_saturate(tmp, cpu_env, tmp);
9983 if (op & 2)
9984 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9985 else
9986 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
9987 tcg_temp_free_i32(tmp2);
9988 } else {
9989 switch (op) {
9990 case 0x0a: /* rbit */
9991 case 0x08: /* rev */
9992 case 0x09: /* rev16 */
9993 case 0x0b: /* revsh */
9994 case 0x18: /* clz */
9995 break;
9996 case 0x10: /* sel */
9997 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9998 goto illegal_op;
10000 break;
10001 case 0x20: /* crc32/crc32c */
10002 case 0x21:
10003 case 0x22:
10004 case 0x28:
10005 case 0x29:
10006 case 0x2a:
10007 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10008 goto illegal_op;
10010 break;
10011 default:
10012 goto illegal_op;
10014 tmp = load_reg(s, rn);
10015 switch (op) {
10016 case 0x0a: /* rbit */
10017 gen_helper_rbit(tmp, tmp);
10018 break;
10019 case 0x08: /* rev */
10020 tcg_gen_bswap32_i32(tmp, tmp);
10021 break;
10022 case 0x09: /* rev16 */
10023 gen_rev16(tmp);
10024 break;
10025 case 0x0b: /* revsh */
10026 gen_revsh(tmp);
10027 break;
10028 case 0x10: /* sel */
10029 tmp2 = load_reg(s, rm);
10030 tmp3 = tcg_temp_new_i32();
10031 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
10032 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
10033 tcg_temp_free_i32(tmp3);
10034 tcg_temp_free_i32(tmp2);
10035 break;
10036 case 0x18: /* clz */
10037 gen_helper_clz(tmp, tmp);
10038 break;
10039 case 0x20:
10040 case 0x21:
10041 case 0x22:
10042 case 0x28:
10043 case 0x29:
10044 case 0x2a:
10046 /* crc32/crc32c */
10047 uint32_t sz = op & 0x3;
10048 uint32_t c = op & 0x8;
10050 tmp2 = load_reg(s, rm);
10051 if (sz == 0) {
10052 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10053 } else if (sz == 1) {
10054 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10056 tmp3 = tcg_const_i32(1 << sz);
10057 if (c) {
10058 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10059 } else {
10060 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10062 tcg_temp_free_i32(tmp2);
10063 tcg_temp_free_i32(tmp3);
10064 break;
10066 default:
10067 g_assert_not_reached();
10070 store_reg(s, rd, tmp);
10071 break;
10072 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10073 switch ((insn >> 20) & 7) {
10074 case 0: /* 32 x 32 -> 32 */
10075 case 7: /* Unsigned sum of absolute differences. */
10076 break;
10077 case 1: /* 16 x 16 -> 32 */
10078 case 2: /* Dual multiply add. */
10079 case 3: /* 32 * 16 -> 32msb */
10080 case 4: /* Dual multiply subtract. */
10081 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10082 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10083 goto illegal_op;
10085 break;
10087 op = (insn >> 4) & 0xf;
10088 tmp = load_reg(s, rn);
10089 tmp2 = load_reg(s, rm);
10090 switch ((insn >> 20) & 7) {
10091 case 0: /* 32 x 32 -> 32 */
10092 tcg_gen_mul_i32(tmp, tmp, tmp2);
10093 tcg_temp_free_i32(tmp2);
10094 if (rs != 15) {
10095 tmp2 = load_reg(s, rs);
10096 if (op)
10097 tcg_gen_sub_i32(tmp, tmp2, tmp);
10098 else
10099 tcg_gen_add_i32(tmp, tmp, tmp2);
10100 tcg_temp_free_i32(tmp2);
10102 break;
10103 case 1: /* 16 x 16 -> 32 */
10104 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10105 tcg_temp_free_i32(tmp2);
10106 if (rs != 15) {
10107 tmp2 = load_reg(s, rs);
10108 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10109 tcg_temp_free_i32(tmp2);
10111 break;
10112 case 2: /* Dual multiply add. */
10113 case 4: /* Dual multiply subtract. */
10114 if (op)
10115 gen_swap_half(tmp2);
10116 gen_smul_dual(tmp, tmp2);
10117 if (insn & (1 << 22)) {
10118 /* This subtraction cannot overflow. */
10119 tcg_gen_sub_i32(tmp, tmp, tmp2);
10120 } else {
10121 /* This addition cannot overflow 32 bits;
10122 * however it may overflow considered as a signed
10123 * operation, in which case we must set the Q flag.
10125 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10127 tcg_temp_free_i32(tmp2);
10128 if (rs != 15)
10130 tmp2 = load_reg(s, rs);
10131 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10132 tcg_temp_free_i32(tmp2);
10134 break;
10135 case 3: /* 32 * 16 -> 32msb */
10136 if (op)
10137 tcg_gen_sari_i32(tmp2, tmp2, 16);
10138 else
10139 gen_sxth(tmp2);
10140 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10141 tcg_gen_shri_i64(tmp64, tmp64, 16);
10142 tmp = tcg_temp_new_i32();
10143 tcg_gen_extrl_i64_i32(tmp, tmp64);
10144 tcg_temp_free_i64(tmp64);
10145 if (rs != 15)
10147 tmp2 = load_reg(s, rs);
10148 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10149 tcg_temp_free_i32(tmp2);
10151 break;
10152 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10153 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10154 if (rs != 15) {
10155 tmp = load_reg(s, rs);
10156 if (insn & (1 << 20)) {
10157 tmp64 = gen_addq_msw(tmp64, tmp);
10158 } else {
10159 tmp64 = gen_subq_msw(tmp64, tmp);
10162 if (insn & (1 << 4)) {
10163 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10165 tcg_gen_shri_i64(tmp64, tmp64, 32);
10166 tmp = tcg_temp_new_i32();
10167 tcg_gen_extrl_i64_i32(tmp, tmp64);
10168 tcg_temp_free_i64(tmp64);
10169 break;
10170 case 7: /* Unsigned sum of absolute differences. */
10171 gen_helper_usad8(tmp, tmp, tmp2);
10172 tcg_temp_free_i32(tmp2);
10173 if (rs != 15) {
10174 tmp2 = load_reg(s, rs);
10175 tcg_gen_add_i32(tmp, tmp, tmp2);
10176 tcg_temp_free_i32(tmp2);
10178 break;
10180 store_reg(s, rd, tmp);
10181 break;
10182 case 6: case 7: /* 64-bit multiply, Divide. */
10183 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
10184 tmp = load_reg(s, rn);
10185 tmp2 = load_reg(s, rm);
10186 if ((op & 0x50) == 0x10) {
10187 /* sdiv, udiv */
10188 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
10189 goto illegal_op;
10191 if (op & 0x20)
10192 gen_helper_udiv(tmp, tmp, tmp2);
10193 else
10194 gen_helper_sdiv(tmp, tmp, tmp2);
10195 tcg_temp_free_i32(tmp2);
10196 store_reg(s, rd, tmp);
10197 } else if ((op & 0xe) == 0xc) {
10198 /* Dual multiply accumulate long. */
10199 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10200 tcg_temp_free_i32(tmp);
10201 tcg_temp_free_i32(tmp2);
10202 goto illegal_op;
10204 if (op & 1)
10205 gen_swap_half(tmp2);
10206 gen_smul_dual(tmp, tmp2);
10207 if (op & 0x10) {
10208 tcg_gen_sub_i32(tmp, tmp, tmp2);
10209 } else {
10210 tcg_gen_add_i32(tmp, tmp, tmp2);
10212 tcg_temp_free_i32(tmp2);
10213 /* BUGFIX */
10214 tmp64 = tcg_temp_new_i64();
10215 tcg_gen_ext_i32_i64(tmp64, tmp);
10216 tcg_temp_free_i32(tmp);
10217 gen_addq(s, tmp64, rs, rd);
10218 gen_storeq_reg(s, rs, rd, tmp64);
10219 tcg_temp_free_i64(tmp64);
10220 } else {
10221 if (op & 0x20) {
10222 /* Unsigned 64-bit multiply */
10223 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
10224 } else {
10225 if (op & 8) {
10226 /* smlalxy */
10227 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10228 tcg_temp_free_i32(tmp2);
10229 tcg_temp_free_i32(tmp);
10230 goto illegal_op;
10232 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10233 tcg_temp_free_i32(tmp2);
10234 tmp64 = tcg_temp_new_i64();
10235 tcg_gen_ext_i32_i64(tmp64, tmp);
10236 tcg_temp_free_i32(tmp);
10237 } else {
10238 /* Signed 64-bit multiply */
10239 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10242 if (op & 4) {
10243 /* umaal */
10244 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10245 tcg_temp_free_i64(tmp64);
10246 goto illegal_op;
10248 gen_addq_lo(s, tmp64, rs);
10249 gen_addq_lo(s, tmp64, rd);
10250 } else if (op & 0x40) {
10251 /* 64-bit accumulate. */
10252 gen_addq(s, tmp64, rs, rd);
10254 gen_storeq_reg(s, rs, rd, tmp64);
10255 tcg_temp_free_i64(tmp64);
10257 break;
10259 break;
10260 case 6: case 7: case 14: case 15:
10261 /* Coprocessor. */
10262 if (((insn >> 24) & 3) == 3) {
10263 /* Translate into the equivalent ARM encoding. */
10264 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10265 if (disas_neon_data_insn(s, insn)) {
10266 goto illegal_op;
10268 } else if (((insn >> 8) & 0xe) == 10) {
10269 if (disas_vfp_insn(s, insn)) {
10270 goto illegal_op;
10272 } else {
10273 if (insn & (1 << 28))
10274 goto illegal_op;
10275 if (disas_coproc_insn(s, insn)) {
10276 goto illegal_op;
10279 break;
10280 case 8: case 9: case 10: case 11:
10281 if (insn & (1 << 15)) {
10282 /* Branches, misc control. */
10283 if (insn & 0x5000) {
10284 /* Unconditional branch. */
10285 /* signextend(hw1[10:0]) -> offset[:12]. */
10286 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10287 /* hw1[10:0] -> offset[11:1]. */
10288 offset |= (insn & 0x7ff) << 1;
10289 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10290 offset[24:22] already have the same value because of the
10291 sign extension above. */
10292 offset ^= ((~insn) & (1 << 13)) << 10;
10293 offset ^= ((~insn) & (1 << 11)) << 11;
10295 if (insn & (1 << 14)) {
10296 /* Branch and link. */
10297 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
10300 offset += s->pc;
10301 if (insn & (1 << 12)) {
10302 /* b/bl */
10303 gen_jmp(s, offset);
10304 } else {
10305 /* blx */
10306 offset &= ~(uint32_t)2;
10307 /* thumb2 bx, no need to check */
10308 gen_bx_im(s, offset);
10310 } else if (((insn >> 23) & 7) == 7) {
10311 /* Misc control */
10312 if (insn & (1 << 13))
10313 goto illegal_op;
10315 if (insn & (1 << 26)) {
10316 if (!(insn & (1 << 20))) {
10317 /* Hypervisor call (v7) */
10318 int imm16 = extract32(insn, 16, 4) << 12
10319 | extract32(insn, 0, 12);
10320 ARCH(7);
10321 if (IS_USER(s)) {
10322 goto illegal_op;
10324 gen_hvc(s, imm16);
10325 } else {
10326 /* Secure monitor call (v6+) */
10327 ARCH(6K);
10328 if (IS_USER(s)) {
10329 goto illegal_op;
10331 gen_smc(s);
10333 } else {
10334 op = (insn >> 20) & 7;
10335 switch (op) {
10336 case 0: /* msr cpsr. */
10337 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10338 tmp = load_reg(s, rn);
10339 addr = tcg_const_i32(insn & 0xff);
10340 gen_helper_v7m_msr(cpu_env, addr, tmp);
10341 tcg_temp_free_i32(addr);
10342 tcg_temp_free_i32(tmp);
10343 gen_lookup_tb(s);
10344 break;
10346 /* fall through */
10347 case 1: /* msr spsr. */
10348 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10349 goto illegal_op;
10352 if (extract32(insn, 5, 1)) {
10353 /* MSR (banked) */
10354 int sysm = extract32(insn, 8, 4) |
10355 (extract32(insn, 4, 1) << 4);
10356 int r = op & 1;
10358 gen_msr_banked(s, r, sysm, rm);
10359 break;
10362 /* MSR (for PSRs) */
10363 tmp = load_reg(s, rn);
10364 if (gen_set_psr(s,
10365 msr_mask(s, (insn >> 8) & 0xf, op == 1),
10366 op == 1, tmp))
10367 goto illegal_op;
10368 break;
10369 case 2: /* cps, nop-hint. */
10370 if (((insn >> 8) & 7) == 0) {
10371 gen_nop_hint(s, insn & 0xff);
10373 /* Implemented as NOP in user mode. */
10374 if (IS_USER(s))
10375 break;
10376 offset = 0;
10377 imm = 0;
10378 if (insn & (1 << 10)) {
10379 if (insn & (1 << 7))
10380 offset |= CPSR_A;
10381 if (insn & (1 << 6))
10382 offset |= CPSR_I;
10383 if (insn & (1 << 5))
10384 offset |= CPSR_F;
10385 if (insn & (1 << 9))
10386 imm = CPSR_A | CPSR_I | CPSR_F;
10388 if (insn & (1 << 8)) {
10389 offset |= 0x1f;
10390 imm |= (insn & 0x1f);
10392 if (offset) {
10393 gen_set_psr_im(s, offset, 0, imm);
10395 break;
10396 case 3: /* Special control operations. */
10397 ARCH(7);
10398 op = (insn >> 4) & 0xf;
10399 switch (op) {
10400 case 2: /* clrex */
10401 gen_clrex(s);
10402 break;
10403 case 4: /* dsb */
10404 case 5: /* dmb */
10405 /* These execute as NOPs. */
10406 break;
10407 case 6: /* isb */
10408 /* We need to break the TB after this insn
10409 * to execute self-modifying code correctly
10410 * and also to take any pending interrupts
10411 * immediately.
10413 gen_lookup_tb(s);
10414 break;
10415 default:
10416 goto illegal_op;
10418 break;
10419 case 4: /* bxj */
10420 /* Trivial implementation equivalent to bx. */
10421 tmp = load_reg(s, rn);
10422 gen_bx(s, tmp);
10423 break;
10424 case 5: /* Exception return. */
10425 if (IS_USER(s)) {
10426 goto illegal_op;
10428 if (rn != 14 || rd != 15) {
10429 goto illegal_op;
10431 tmp = load_reg(s, rn);
10432 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10433 gen_exception_return(s, tmp);
10434 break;
10435 case 6: /* MRS */
10436 if (extract32(insn, 5, 1)) {
10437 /* MRS (banked) */
10438 int sysm = extract32(insn, 16, 4) |
10439 (extract32(insn, 4, 1) << 4);
10441 gen_mrs_banked(s, 0, sysm, rd);
10442 break;
10445 /* mrs cpsr */
10446 tmp = tcg_temp_new_i32();
10447 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10448 addr = tcg_const_i32(insn & 0xff);
10449 gen_helper_v7m_mrs(tmp, cpu_env, addr);
10450 tcg_temp_free_i32(addr);
10451 } else {
10452 gen_helper_cpsr_read(tmp, cpu_env);
10454 store_reg(s, rd, tmp);
10455 break;
10456 case 7: /* MRS */
10457 if (extract32(insn, 5, 1)) {
10458 /* MRS (banked) */
10459 int sysm = extract32(insn, 16, 4) |
10460 (extract32(insn, 4, 1) << 4);
10462 gen_mrs_banked(s, 1, sysm, rd);
10463 break;
10466 /* mrs spsr. */
10467 /* Not accessible in user mode. */
10468 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10469 goto illegal_op;
10471 tmp = load_cpu_field(spsr);
10472 store_reg(s, rd, tmp);
10473 break;
10476 } else {
10477 /* Conditional branch. */
10478 op = (insn >> 22) & 0xf;
10479 /* Generate a conditional jump to next instruction. */
10480 s->condlabel = gen_new_label();
10481 arm_gen_test_cc(op ^ 1, s->condlabel);
10482 s->condjmp = 1;
10484 /* offset[11:1] = insn[10:0] */
10485 offset = (insn & 0x7ff) << 1;
10486 /* offset[17:12] = insn[21:16]. */
10487 offset |= (insn & 0x003f0000) >> 4;
10488 /* offset[31:20] = insn[26]. */
10489 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10490 /* offset[18] = insn[13]. */
10491 offset |= (insn & (1 << 13)) << 5;
10492 /* offset[19] = insn[11]. */
10493 offset |= (insn & (1 << 11)) << 8;
10495 /* jump to the offset */
10496 gen_jmp(s, s->pc + offset);
10498 } else {
10499 /* Data processing immediate. */
10500 if (insn & (1 << 25)) {
10501 if (insn & (1 << 24)) {
10502 if (insn & (1 << 20))
10503 goto illegal_op;
10504 /* Bitfield/Saturate. */
10505 op = (insn >> 21) & 7;
10506 imm = insn & 0x1f;
10507 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10508 if (rn == 15) {
10509 tmp = tcg_temp_new_i32();
10510 tcg_gen_movi_i32(tmp, 0);
10511 } else {
10512 tmp = load_reg(s, rn);
10514 switch (op) {
10515 case 2: /* Signed bitfield extract. */
10516 imm++;
10517 if (shift + imm > 32)
10518 goto illegal_op;
10519 if (imm < 32)
10520 gen_sbfx(tmp, shift, imm);
10521 break;
10522 case 6: /* Unsigned bitfield extract. */
10523 imm++;
10524 if (shift + imm > 32)
10525 goto illegal_op;
10526 if (imm < 32)
10527 gen_ubfx(tmp, shift, (1u << imm) - 1);
10528 break;
10529 case 3: /* Bitfield insert/clear. */
10530 if (imm < shift)
10531 goto illegal_op;
10532 imm = imm + 1 - shift;
10533 if (imm != 32) {
10534 tmp2 = load_reg(s, rd);
10535 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
10536 tcg_temp_free_i32(tmp2);
10538 break;
10539 case 7:
10540 goto illegal_op;
10541 default: /* Saturate. */
10542 if (shift) {
10543 if (op & 1)
10544 tcg_gen_sari_i32(tmp, tmp, shift);
10545 else
10546 tcg_gen_shli_i32(tmp, tmp, shift);
10548 tmp2 = tcg_const_i32(imm);
10549 if (op & 4) {
10550 /* Unsigned. */
10551 if ((op & 1) && shift == 0) {
10552 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10553 tcg_temp_free_i32(tmp);
10554 tcg_temp_free_i32(tmp2);
10555 goto illegal_op;
10557 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10558 } else {
10559 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10561 } else {
10562 /* Signed. */
10563 if ((op & 1) && shift == 0) {
10564 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10565 tcg_temp_free_i32(tmp);
10566 tcg_temp_free_i32(tmp2);
10567 goto illegal_op;
10569 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10570 } else {
10571 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10574 tcg_temp_free_i32(tmp2);
10575 break;
10577 store_reg(s, rd, tmp);
10578 } else {
10579 imm = ((insn & 0x04000000) >> 15)
10580 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10581 if (insn & (1 << 22)) {
10582 /* 16-bit immediate. */
10583 imm |= (insn >> 4) & 0xf000;
10584 if (insn & (1 << 23)) {
10585 /* movt */
10586 tmp = load_reg(s, rd);
10587 tcg_gen_ext16u_i32(tmp, tmp);
10588 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10589 } else {
10590 /* movw */
10591 tmp = tcg_temp_new_i32();
10592 tcg_gen_movi_i32(tmp, imm);
10594 } else {
10595 /* Add/sub 12-bit immediate. */
10596 if (rn == 15) {
10597 offset = s->pc & ~(uint32_t)3;
10598 if (insn & (1 << 23))
10599 offset -= imm;
10600 else
10601 offset += imm;
10602 tmp = tcg_temp_new_i32();
10603 tcg_gen_movi_i32(tmp, offset);
10604 } else {
10605 tmp = load_reg(s, rn);
10606 if (insn & (1 << 23))
10607 tcg_gen_subi_i32(tmp, tmp, imm);
10608 else
10609 tcg_gen_addi_i32(tmp, tmp, imm);
10612 store_reg(s, rd, tmp);
10614 } else {
10615 int shifter_out = 0;
10616 /* modified 12-bit immediate. */
10617 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10618 imm = (insn & 0xff);
10619 switch (shift) {
10620 case 0: /* XY */
10621 /* Nothing to do. */
10622 break;
10623 case 1: /* 00XY00XY */
10624 imm |= imm << 16;
10625 break;
10626 case 2: /* XY00XY00 */
10627 imm |= imm << 16;
10628 imm <<= 8;
10629 break;
10630 case 3: /* XYXYXYXY */
10631 imm |= imm << 16;
10632 imm |= imm << 8;
10633 break;
10634 default: /* Rotated constant. */
10635 shift = (shift << 1) | (imm >> 7);
10636 imm |= 0x80;
10637 imm = imm << (32 - shift);
10638 shifter_out = 1;
10639 break;
10641 tmp2 = tcg_temp_new_i32();
10642 tcg_gen_movi_i32(tmp2, imm);
10643 rn = (insn >> 16) & 0xf;
10644 if (rn == 15) {
10645 tmp = tcg_temp_new_i32();
10646 tcg_gen_movi_i32(tmp, 0);
10647 } else {
10648 tmp = load_reg(s, rn);
10650 op = (insn >> 21) & 0xf;
10651 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10652 shifter_out, tmp, tmp2))
10653 goto illegal_op;
10654 tcg_temp_free_i32(tmp2);
10655 rd = (insn >> 8) & 0xf;
10656 if (rd != 15) {
10657 store_reg(s, rd, tmp);
10658 } else {
10659 tcg_temp_free_i32(tmp);
10663 break;
10664 case 12: /* Load/store single data item. */
10666 int postinc = 0;
10667 int writeback = 0;
10668 int memidx;
10669 if ((insn & 0x01100000) == 0x01000000) {
10670 if (disas_neon_ls_insn(s, insn)) {
10671 goto illegal_op;
10673 break;
10675 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10676 if (rs == 15) {
10677 if (!(insn & (1 << 20))) {
10678 goto illegal_op;
10680 if (op != 2) {
10681 /* Byte or halfword load space with dest == r15 : memory hints.
10682 * Catch them early so we don't emit pointless addressing code.
10683 * This space is a mix of:
10684 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10685 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10686 * cores)
10687 * unallocated hints, which must be treated as NOPs
10688 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10689 * which is easiest for the decoding logic
10690 * Some space which must UNDEF
10692 int op1 = (insn >> 23) & 3;
10693 int op2 = (insn >> 6) & 0x3f;
10694 if (op & 2) {
10695 goto illegal_op;
10697 if (rn == 15) {
10698 /* UNPREDICTABLE, unallocated hint or
10699 * PLD/PLDW/PLI (literal)
10701 return 0;
10703 if (op1 & 1) {
10704 return 0; /* PLD/PLDW/PLI or unallocated hint */
10706 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10707 return 0; /* PLD/PLDW/PLI or unallocated hint */
10709 /* UNDEF space, or an UNPREDICTABLE */
10710 return 1;
10713 memidx = get_mem_index(s);
10714 if (rn == 15) {
10715 addr = tcg_temp_new_i32();
10716 /* PC relative. */
10717 /* s->pc has already been incremented by 4. */
10718 imm = s->pc & 0xfffffffc;
10719 if (insn & (1 << 23))
10720 imm += insn & 0xfff;
10721 else
10722 imm -= insn & 0xfff;
10723 tcg_gen_movi_i32(addr, imm);
10724 } else {
10725 addr = load_reg(s, rn);
10726 if (insn & (1 << 23)) {
10727 /* Positive offset. */
10728 imm = insn & 0xfff;
10729 tcg_gen_addi_i32(addr, addr, imm);
10730 } else {
10731 imm = insn & 0xff;
10732 switch ((insn >> 8) & 0xf) {
10733 case 0x0: /* Shifted Register. */
10734 shift = (insn >> 4) & 0xf;
10735 if (shift > 3) {
10736 tcg_temp_free_i32(addr);
10737 goto illegal_op;
10739 tmp = load_reg(s, rm);
10740 if (shift)
10741 tcg_gen_shli_i32(tmp, tmp, shift);
10742 tcg_gen_add_i32(addr, addr, tmp);
10743 tcg_temp_free_i32(tmp);
10744 break;
10745 case 0xc: /* Negative offset. */
10746 tcg_gen_addi_i32(addr, addr, -imm);
10747 break;
10748 case 0xe: /* User privilege. */
10749 tcg_gen_addi_i32(addr, addr, imm);
10750 memidx = get_a32_user_mem_index(s);
10751 break;
10752 case 0x9: /* Post-decrement. */
10753 imm = -imm;
10754 /* Fall through. */
10755 case 0xb: /* Post-increment. */
10756 postinc = 1;
10757 writeback = 1;
10758 break;
10759 case 0xd: /* Pre-decrement. */
10760 imm = -imm;
10761 /* Fall through. */
10762 case 0xf: /* Pre-increment. */
10763 tcg_gen_addi_i32(addr, addr, imm);
10764 writeback = 1;
10765 break;
10766 default:
10767 tcg_temp_free_i32(addr);
10768 goto illegal_op;
10772 if (insn & (1 << 20)) {
10773 /* Load. */
10774 tmp = tcg_temp_new_i32();
10775 switch (op) {
10776 case 0:
10777 gen_aa32_ld8u(s, tmp, addr, memidx);
10778 break;
10779 case 4:
10780 gen_aa32_ld8s(s, tmp, addr, memidx);
10781 break;
10782 case 1:
10783 gen_aa32_ld16u(s, tmp, addr, memidx);
10784 break;
10785 case 5:
10786 gen_aa32_ld16s(s, tmp, addr, memidx);
10787 break;
10788 case 2:
10789 gen_aa32_ld32u(s, tmp, addr, memidx);
10790 break;
10791 default:
10792 tcg_temp_free_i32(tmp);
10793 tcg_temp_free_i32(addr);
10794 goto illegal_op;
10796 if (rs == 15) {
10797 gen_bx(s, tmp);
10798 } else {
10799 store_reg(s, rs, tmp);
10801 } else {
10802 /* Store. */
10803 tmp = load_reg(s, rs);
10804 switch (op) {
10805 case 0:
10806 gen_aa32_st8(s, tmp, addr, memidx);
10807 break;
10808 case 1:
10809 gen_aa32_st16(s, tmp, addr, memidx);
10810 break;
10811 case 2:
10812 gen_aa32_st32(s, tmp, addr, memidx);
10813 break;
10814 default:
10815 tcg_temp_free_i32(tmp);
10816 tcg_temp_free_i32(addr);
10817 goto illegal_op;
10819 tcg_temp_free_i32(tmp);
10821 if (postinc)
10822 tcg_gen_addi_i32(addr, addr, imm);
10823 if (writeback) {
10824 store_reg(s, rn, addr);
10825 } else {
10826 tcg_temp_free_i32(addr);
10829 break;
10830 default:
10831 goto illegal_op;
10833 return 0;
10834 illegal_op:
10835 return 1;
10838 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
10840 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10841 int32_t offset;
10842 int i;
10843 TCGv_i32 tmp;
10844 TCGv_i32 tmp2;
10845 TCGv_i32 addr;
10847 if (s->condexec_mask) {
10848 cond = s->condexec_cond;
10849 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10850 s->condlabel = gen_new_label();
10851 arm_gen_test_cc(cond ^ 1, s->condlabel);
10852 s->condjmp = 1;
10856 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
10857 s->pc += 2;
10859 switch (insn >> 12) {
10860 case 0: case 1:
10862 rd = insn & 7;
10863 op = (insn >> 11) & 3;
10864 if (op == 3) {
10865 /* add/subtract */
10866 rn = (insn >> 3) & 7;
10867 tmp = load_reg(s, rn);
10868 if (insn & (1 << 10)) {
10869 /* immediate */
10870 tmp2 = tcg_temp_new_i32();
10871 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
10872 } else {
10873 /* reg */
10874 rm = (insn >> 6) & 7;
10875 tmp2 = load_reg(s, rm);
10877 if (insn & (1 << 9)) {
10878 if (s->condexec_mask)
10879 tcg_gen_sub_i32(tmp, tmp, tmp2);
10880 else
10881 gen_sub_CC(tmp, tmp, tmp2);
10882 } else {
10883 if (s->condexec_mask)
10884 tcg_gen_add_i32(tmp, tmp, tmp2);
10885 else
10886 gen_add_CC(tmp, tmp, tmp2);
10888 tcg_temp_free_i32(tmp2);
10889 store_reg(s, rd, tmp);
10890 } else {
10891 /* shift immediate */
10892 rm = (insn >> 3) & 7;
10893 shift = (insn >> 6) & 0x1f;
10894 tmp = load_reg(s, rm);
10895 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10896 if (!s->condexec_mask)
10897 gen_logic_CC(tmp);
10898 store_reg(s, rd, tmp);
10900 break;
10901 case 2: case 3:
10902 /* arithmetic large immediate */
10903 op = (insn >> 11) & 3;
10904 rd = (insn >> 8) & 0x7;
10905 if (op == 0) { /* mov */
10906 tmp = tcg_temp_new_i32();
10907 tcg_gen_movi_i32(tmp, insn & 0xff);
10908 if (!s->condexec_mask)
10909 gen_logic_CC(tmp);
10910 store_reg(s, rd, tmp);
10911 } else {
10912 tmp = load_reg(s, rd);
10913 tmp2 = tcg_temp_new_i32();
10914 tcg_gen_movi_i32(tmp2, insn & 0xff);
10915 switch (op) {
10916 case 1: /* cmp */
10917 gen_sub_CC(tmp, tmp, tmp2);
10918 tcg_temp_free_i32(tmp);
10919 tcg_temp_free_i32(tmp2);
10920 break;
10921 case 2: /* add */
10922 if (s->condexec_mask)
10923 tcg_gen_add_i32(tmp, tmp, tmp2);
10924 else
10925 gen_add_CC(tmp, tmp, tmp2);
10926 tcg_temp_free_i32(tmp2);
10927 store_reg(s, rd, tmp);
10928 break;
10929 case 3: /* sub */
10930 if (s->condexec_mask)
10931 tcg_gen_sub_i32(tmp, tmp, tmp2);
10932 else
10933 gen_sub_CC(tmp, tmp, tmp2);
10934 tcg_temp_free_i32(tmp2);
10935 store_reg(s, rd, tmp);
10936 break;
10939 break;
10940 case 4:
10941 if (insn & (1 << 11)) {
10942 rd = (insn >> 8) & 7;
10943 /* load pc-relative. Bit 1 of PC is ignored. */
10944 val = s->pc + 2 + ((insn & 0xff) * 4);
10945 val &= ~(uint32_t)2;
10946 addr = tcg_temp_new_i32();
10947 tcg_gen_movi_i32(addr, val);
10948 tmp = tcg_temp_new_i32();
10949 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10950 tcg_temp_free_i32(addr);
10951 store_reg(s, rd, tmp);
10952 break;
10954 if (insn & (1 << 10)) {
10955 /* data processing extended or blx */
10956 rd = (insn & 7) | ((insn >> 4) & 8);
10957 rm = (insn >> 3) & 0xf;
10958 op = (insn >> 8) & 3;
10959 switch (op) {
10960 case 0: /* add */
10961 tmp = load_reg(s, rd);
10962 tmp2 = load_reg(s, rm);
10963 tcg_gen_add_i32(tmp, tmp, tmp2);
10964 tcg_temp_free_i32(tmp2);
10965 store_reg(s, rd, tmp);
10966 break;
10967 case 1: /* cmp */
10968 tmp = load_reg(s, rd);
10969 tmp2 = load_reg(s, rm);
10970 gen_sub_CC(tmp, tmp, tmp2);
10971 tcg_temp_free_i32(tmp2);
10972 tcg_temp_free_i32(tmp);
10973 break;
10974 case 2: /* mov/cpy */
10975 tmp = load_reg(s, rm);
10976 store_reg(s, rd, tmp);
10977 break;
10978 case 3:/* branch [and link] exchange thumb register */
10979 tmp = load_reg(s, rm);
10980 if (insn & (1 << 7)) {
10981 ARCH(5);
10982 val = (uint32_t)s->pc | 1;
10983 tmp2 = tcg_temp_new_i32();
10984 tcg_gen_movi_i32(tmp2, val);
10985 store_reg(s, 14, tmp2);
10987 /* already thumb, no need to check */
10988 gen_bx(s, tmp);
10989 break;
10991 break;
10994 /* data processing register */
10995 rd = insn & 7;
10996 rm = (insn >> 3) & 7;
10997 op = (insn >> 6) & 0xf;
10998 if (op == 2 || op == 3 || op == 4 || op == 7) {
10999 /* the shift/rotate ops want the operands backwards */
11000 val = rm;
11001 rm = rd;
11002 rd = val;
11003 val = 1;
11004 } else {
11005 val = 0;
11008 if (op == 9) { /* neg */
11009 tmp = tcg_temp_new_i32();
11010 tcg_gen_movi_i32(tmp, 0);
11011 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11012 tmp = load_reg(s, rd);
11013 } else {
11014 TCGV_UNUSED_I32(tmp);
11017 tmp2 = load_reg(s, rm);
11018 switch (op) {
11019 case 0x0: /* and */
11020 tcg_gen_and_i32(tmp, tmp, tmp2);
11021 if (!s->condexec_mask)
11022 gen_logic_CC(tmp);
11023 break;
11024 case 0x1: /* eor */
11025 tcg_gen_xor_i32(tmp, tmp, tmp2);
11026 if (!s->condexec_mask)
11027 gen_logic_CC(tmp);
11028 break;
11029 case 0x2: /* lsl */
11030 if (s->condexec_mask) {
11031 gen_shl(tmp2, tmp2, tmp);
11032 } else {
11033 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
11034 gen_logic_CC(tmp2);
11036 break;
11037 case 0x3: /* lsr */
11038 if (s->condexec_mask) {
11039 gen_shr(tmp2, tmp2, tmp);
11040 } else {
11041 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
11042 gen_logic_CC(tmp2);
11044 break;
11045 case 0x4: /* asr */
11046 if (s->condexec_mask) {
11047 gen_sar(tmp2, tmp2, tmp);
11048 } else {
11049 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
11050 gen_logic_CC(tmp2);
11052 break;
11053 case 0x5: /* adc */
11054 if (s->condexec_mask) {
11055 gen_adc(tmp, tmp2);
11056 } else {
11057 gen_adc_CC(tmp, tmp, tmp2);
11059 break;
11060 case 0x6: /* sbc */
11061 if (s->condexec_mask) {
11062 gen_sub_carry(tmp, tmp, tmp2);
11063 } else {
11064 gen_sbc_CC(tmp, tmp, tmp2);
11066 break;
11067 case 0x7: /* ror */
11068 if (s->condexec_mask) {
11069 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11070 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
11071 } else {
11072 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
11073 gen_logic_CC(tmp2);
11075 break;
11076 case 0x8: /* tst */
11077 tcg_gen_and_i32(tmp, tmp, tmp2);
11078 gen_logic_CC(tmp);
11079 rd = 16;
11080 break;
11081 case 0x9: /* neg */
11082 if (s->condexec_mask)
11083 tcg_gen_neg_i32(tmp, tmp2);
11084 else
11085 gen_sub_CC(tmp, tmp, tmp2);
11086 break;
11087 case 0xa: /* cmp */
11088 gen_sub_CC(tmp, tmp, tmp2);
11089 rd = 16;
11090 break;
11091 case 0xb: /* cmn */
11092 gen_add_CC(tmp, tmp, tmp2);
11093 rd = 16;
11094 break;
11095 case 0xc: /* orr */
11096 tcg_gen_or_i32(tmp, tmp, tmp2);
11097 if (!s->condexec_mask)
11098 gen_logic_CC(tmp);
11099 break;
11100 case 0xd: /* mul */
11101 tcg_gen_mul_i32(tmp, tmp, tmp2);
11102 if (!s->condexec_mask)
11103 gen_logic_CC(tmp);
11104 break;
11105 case 0xe: /* bic */
11106 tcg_gen_andc_i32(tmp, tmp, tmp2);
11107 if (!s->condexec_mask)
11108 gen_logic_CC(tmp);
11109 break;
11110 case 0xf: /* mvn */
11111 tcg_gen_not_i32(tmp2, tmp2);
11112 if (!s->condexec_mask)
11113 gen_logic_CC(tmp2);
11114 val = 1;
11115 rm = rd;
11116 break;
11118 if (rd != 16) {
11119 if (val) {
11120 store_reg(s, rm, tmp2);
11121 if (op != 0xf)
11122 tcg_temp_free_i32(tmp);
11123 } else {
11124 store_reg(s, rd, tmp);
11125 tcg_temp_free_i32(tmp2);
11127 } else {
11128 tcg_temp_free_i32(tmp);
11129 tcg_temp_free_i32(tmp2);
11131 break;
11133 case 5:
11134 /* load/store register offset. */
11135 rd = insn & 7;
11136 rn = (insn >> 3) & 7;
11137 rm = (insn >> 6) & 7;
11138 op = (insn >> 9) & 7;
11139 addr = load_reg(s, rn);
11140 tmp = load_reg(s, rm);
11141 tcg_gen_add_i32(addr, addr, tmp);
11142 tcg_temp_free_i32(tmp);
11144 if (op < 3) { /* store */
11145 tmp = load_reg(s, rd);
11146 } else {
11147 tmp = tcg_temp_new_i32();
11150 switch (op) {
11151 case 0: /* str */
11152 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11153 break;
11154 case 1: /* strh */
11155 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
11156 break;
11157 case 2: /* strb */
11158 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
11159 break;
11160 case 3: /* ldrsb */
11161 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
11162 break;
11163 case 4: /* ldr */
11164 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11165 break;
11166 case 5: /* ldrh */
11167 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
11168 break;
11169 case 6: /* ldrb */
11170 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
11171 break;
11172 case 7: /* ldrsh */
11173 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
11174 break;
11176 if (op >= 3) { /* load */
11177 store_reg(s, rd, tmp);
11178 } else {
11179 tcg_temp_free_i32(tmp);
11181 tcg_temp_free_i32(addr);
11182 break;
11184 case 6:
11185 /* load/store word immediate offset */
11186 rd = insn & 7;
11187 rn = (insn >> 3) & 7;
11188 addr = load_reg(s, rn);
11189 val = (insn >> 4) & 0x7c;
11190 tcg_gen_addi_i32(addr, addr, val);
11192 if (insn & (1 << 11)) {
11193 /* load */
11194 tmp = tcg_temp_new_i32();
11195 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11196 store_reg(s, rd, tmp);
11197 } else {
11198 /* store */
11199 tmp = load_reg(s, rd);
11200 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11201 tcg_temp_free_i32(tmp);
11203 tcg_temp_free_i32(addr);
11204 break;
11206 case 7:
11207 /* load/store byte immediate offset */
11208 rd = insn & 7;
11209 rn = (insn >> 3) & 7;
11210 addr = load_reg(s, rn);
11211 val = (insn >> 6) & 0x1f;
11212 tcg_gen_addi_i32(addr, addr, val);
11214 if (insn & (1 << 11)) {
11215 /* load */
11216 tmp = tcg_temp_new_i32();
11217 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
11218 store_reg(s, rd, tmp);
11219 } else {
11220 /* store */
11221 tmp = load_reg(s, rd);
11222 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
11223 tcg_temp_free_i32(tmp);
11225 tcg_temp_free_i32(addr);
11226 break;
11228 case 8:
11229 /* load/store halfword immediate offset */
11230 rd = insn & 7;
11231 rn = (insn >> 3) & 7;
11232 addr = load_reg(s, rn);
11233 val = (insn >> 5) & 0x3e;
11234 tcg_gen_addi_i32(addr, addr, val);
11236 if (insn & (1 << 11)) {
11237 /* load */
11238 tmp = tcg_temp_new_i32();
11239 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
11240 store_reg(s, rd, tmp);
11241 } else {
11242 /* store */
11243 tmp = load_reg(s, rd);
11244 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
11245 tcg_temp_free_i32(tmp);
11247 tcg_temp_free_i32(addr);
11248 break;
11250 case 9:
11251 /* load/store from stack */
11252 rd = (insn >> 8) & 7;
11253 addr = load_reg(s, 13);
11254 val = (insn & 0xff) * 4;
11255 tcg_gen_addi_i32(addr, addr, val);
11257 if (insn & (1 << 11)) {
11258 /* load */
11259 tmp = tcg_temp_new_i32();
11260 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11261 store_reg(s, rd, tmp);
11262 } else {
11263 /* store */
11264 tmp = load_reg(s, rd);
11265 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11266 tcg_temp_free_i32(tmp);
11268 tcg_temp_free_i32(addr);
11269 break;
11271 case 10:
11272 /* add to high reg */
11273 rd = (insn >> 8) & 7;
11274 if (insn & (1 << 11)) {
11275 /* SP */
11276 tmp = load_reg(s, 13);
11277 } else {
11278 /* PC. bit 1 is ignored. */
11279 tmp = tcg_temp_new_i32();
11280 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
11282 val = (insn & 0xff) * 4;
11283 tcg_gen_addi_i32(tmp, tmp, val);
11284 store_reg(s, rd, tmp);
11285 break;
11287 case 11:
11288 /* misc */
11289 op = (insn >> 8) & 0xf;
11290 switch (op) {
11291 case 0:
11292 /* adjust stack pointer */
11293 tmp = load_reg(s, 13);
11294 val = (insn & 0x7f) * 4;
11295 if (insn & (1 << 7))
11296 val = -(int32_t)val;
11297 tcg_gen_addi_i32(tmp, tmp, val);
11298 store_reg(s, 13, tmp);
11299 break;
11301 case 2: /* sign/zero extend. */
11302 ARCH(6);
11303 rd = insn & 7;
11304 rm = (insn >> 3) & 7;
11305 tmp = load_reg(s, rm);
11306 switch ((insn >> 6) & 3) {
11307 case 0: gen_sxth(tmp); break;
11308 case 1: gen_sxtb(tmp); break;
11309 case 2: gen_uxth(tmp); break;
11310 case 3: gen_uxtb(tmp); break;
11312 store_reg(s, rd, tmp);
11313 break;
11314 case 4: case 5: case 0xc: case 0xd:
11315 /* push/pop */
11316 addr = load_reg(s, 13);
11317 if (insn & (1 << 8))
11318 offset = 4;
11319 else
11320 offset = 0;
11321 for (i = 0; i < 8; i++) {
11322 if (insn & (1 << i))
11323 offset += 4;
11325 if ((insn & (1 << 11)) == 0) {
11326 tcg_gen_addi_i32(addr, addr, -offset);
11328 for (i = 0; i < 8; i++) {
11329 if (insn & (1 << i)) {
11330 if (insn & (1 << 11)) {
11331 /* pop */
11332 tmp = tcg_temp_new_i32();
11333 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11334 store_reg(s, i, tmp);
11335 } else {
11336 /* push */
11337 tmp = load_reg(s, i);
11338 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11339 tcg_temp_free_i32(tmp);
11341 /* advance to the next address. */
11342 tcg_gen_addi_i32(addr, addr, 4);
11345 TCGV_UNUSED_I32(tmp);
11346 if (insn & (1 << 8)) {
11347 if (insn & (1 << 11)) {
11348 /* pop pc */
11349 tmp = tcg_temp_new_i32();
11350 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11351 /* don't set the pc until the rest of the instruction
11352 has completed */
11353 } else {
11354 /* push lr */
11355 tmp = load_reg(s, 14);
11356 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11357 tcg_temp_free_i32(tmp);
11359 tcg_gen_addi_i32(addr, addr, 4);
11361 if ((insn & (1 << 11)) == 0) {
11362 tcg_gen_addi_i32(addr, addr, -offset);
11364 /* write back the new stack pointer */
11365 store_reg(s, 13, addr);
11366 /* set the new PC value */
11367 if ((insn & 0x0900) == 0x0900) {
11368 store_reg_from_load(s, 15, tmp);
11370 break;
11372 case 1: case 3: case 9: case 11: /* czb */
11373 rm = insn & 7;
11374 tmp = load_reg(s, rm);
11375 s->condlabel = gen_new_label();
11376 s->condjmp = 1;
11377 if (insn & (1 << 11))
11378 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
11379 else
11380 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
11381 tcg_temp_free_i32(tmp);
11382 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11383 val = (uint32_t)s->pc + 2;
11384 val += offset;
11385 gen_jmp(s, val);
11386 break;
11388 case 15: /* IT, nop-hint. */
11389 if ((insn & 0xf) == 0) {
11390 gen_nop_hint(s, (insn >> 4) & 0xf);
11391 break;
11393 /* If Then. */
11394 s->condexec_cond = (insn >> 4) & 0xe;
11395 s->condexec_mask = insn & 0x1f;
11396 /* No actual code generated for this insn, just setup state. */
11397 break;
11399 case 0xe: /* bkpt */
11401 int imm8 = extract32(insn, 0, 8);
11402 ARCH(5);
11403 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11404 default_exception_el(s));
11405 break;
11408 case 0xa: /* rev */
11409 ARCH(6);
11410 rn = (insn >> 3) & 0x7;
11411 rd = insn & 0x7;
11412 tmp = load_reg(s, rn);
11413 switch ((insn >> 6) & 3) {
11414 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
11415 case 1: gen_rev16(tmp); break;
11416 case 3: gen_revsh(tmp); break;
11417 default: goto illegal_op;
11419 store_reg(s, rd, tmp);
11420 break;
11422 case 6:
11423 switch ((insn >> 5) & 7) {
11424 case 2:
11425 /* setend */
11426 ARCH(6);
11427 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11428 gen_helper_setend(cpu_env);
11429 s->is_jmp = DISAS_UPDATE;
11431 break;
11432 case 3:
11433 /* cps */
11434 ARCH(6);
11435 if (IS_USER(s)) {
11436 break;
11438 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11439 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11440 /* FAULTMASK */
11441 if (insn & 1) {
11442 addr = tcg_const_i32(19);
11443 gen_helper_v7m_msr(cpu_env, addr, tmp);
11444 tcg_temp_free_i32(addr);
11446 /* PRIMASK */
11447 if (insn & 2) {
11448 addr = tcg_const_i32(16);
11449 gen_helper_v7m_msr(cpu_env, addr, tmp);
11450 tcg_temp_free_i32(addr);
11452 tcg_temp_free_i32(tmp);
11453 gen_lookup_tb(s);
11454 } else {
11455 if (insn & (1 << 4)) {
11456 shift = CPSR_A | CPSR_I | CPSR_F;
11457 } else {
11458 shift = 0;
11460 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
11462 break;
11463 default:
11464 goto undef;
11466 break;
11468 default:
11469 goto undef;
11471 break;
11473 case 12:
11475 /* load/store multiple */
11476 TCGv_i32 loaded_var;
11477 TCGV_UNUSED_I32(loaded_var);
11478 rn = (insn >> 8) & 0x7;
11479 addr = load_reg(s, rn);
11480 for (i = 0; i < 8; i++) {
11481 if (insn & (1 << i)) {
11482 if (insn & (1 << 11)) {
11483 /* load */
11484 tmp = tcg_temp_new_i32();
11485 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11486 if (i == rn) {
11487 loaded_var = tmp;
11488 } else {
11489 store_reg(s, i, tmp);
11491 } else {
11492 /* store */
11493 tmp = load_reg(s, i);
11494 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11495 tcg_temp_free_i32(tmp);
11497 /* advance to the next address */
11498 tcg_gen_addi_i32(addr, addr, 4);
11501 if ((insn & (1 << rn)) == 0) {
11502 /* base reg not in list: base register writeback */
11503 store_reg(s, rn, addr);
11504 } else {
11505 /* base reg in list: if load, complete it now */
11506 if (insn & (1 << 11)) {
11507 store_reg(s, rn, loaded_var);
11509 tcg_temp_free_i32(addr);
11511 break;
11513 case 13:
11514 /* conditional branch or swi */
11515 cond = (insn >> 8) & 0xf;
11516 if (cond == 0xe)
11517 goto undef;
11519 if (cond == 0xf) {
11520 /* swi */
11521 gen_set_pc_im(s, s->pc);
11522 s->svc_imm = extract32(insn, 0, 8);
11523 s->is_jmp = DISAS_SWI;
11524 break;
11526 /* generate a conditional jump to next instruction */
11527 s->condlabel = gen_new_label();
11528 arm_gen_test_cc(cond ^ 1, s->condlabel);
11529 s->condjmp = 1;
11531 /* jump to the offset */
11532 val = (uint32_t)s->pc + 2;
11533 offset = ((int32_t)insn << 24) >> 24;
11534 val += offset << 1;
11535 gen_jmp(s, val);
11536 break;
11538 case 14:
11539 if (insn & (1 << 11)) {
11540 if (disas_thumb2_insn(env, s, insn))
11541 goto undef32;
11542 break;
11544 /* unconditional branch */
11545 val = (uint32_t)s->pc;
11546 offset = ((int32_t)insn << 21) >> 21;
11547 val += (offset << 1) + 2;
11548 gen_jmp(s, val);
11549 break;
11551 case 15:
11552 if (disas_thumb2_insn(env, s, insn))
11553 goto undef32;
11554 break;
11556 return;
11557 undef32:
11558 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11559 default_exception_el(s));
11560 return;
11561 illegal_op:
11562 undef:
11563 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11564 default_exception_el(s));
11567 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11569 /* Return true if the insn at dc->pc might cross a page boundary.
11570 * (False positives are OK, false negatives are not.)
11572 uint16_t insn;
11574 if ((s->pc & 3) == 0) {
11575 /* At a 4-aligned address we can't be crossing a page */
11576 return false;
11579 /* This must be a Thumb insn */
11580 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
11582 if ((insn >> 11) >= 0x1d) {
11583 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11584 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11585 * end up actually treating this as two 16-bit insns (see the
11586 * code at the start of disas_thumb2_insn()) but we don't bother
11587 * to check for that as it is unlikely, and false positives here
11588 * are harmless.
11590 return true;
11592 /* Definitely a 16-bit insn, can't be crossing a page. */
11593 return false;
11596 /* generate intermediate code for basic block 'tb'. */
11597 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
11599 ARMCPU *cpu = arm_env_get_cpu(env);
11600 CPUState *cs = CPU(cpu);
11601 DisasContext dc1, *dc = &dc1;
11602 target_ulong pc_start;
11603 target_ulong next_page_start;
11604 int num_insns;
11605 int max_insns;
11606 bool end_of_page;
11608 /* generate intermediate code */
11610 /* The A64 decoder has its own top level loop, because it doesn't need
11611 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11613 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
11614 gen_intermediate_code_a64(cpu, tb);
11615 return;
11618 pc_start = tb->pc;
11620 dc->tb = tb;
11622 dc->is_jmp = DISAS_NEXT;
11623 dc->pc = pc_start;
11624 dc->singlestep_enabled = cs->singlestep_enabled;
11625 dc->condjmp = 0;
11627 dc->aarch64 = 0;
11628 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11629 * there is no secure EL1, so we route exceptions to EL3.
11631 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11632 !arm_el_is_aa64(env, 3);
11633 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
11634 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
11635 dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
11636 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11637 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
11638 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11639 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11640 #if !defined(CONFIG_USER_ONLY)
11641 dc->user = (dc->current_el == 0);
11642 #endif
11643 dc->ns = ARM_TBFLAG_NS(tb->flags);
11644 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
11645 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11646 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11647 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
11648 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
11649 dc->cp_regs = cpu->cp_regs;
11650 dc->features = env->features;
11652 /* Single step state. The code-generation logic here is:
11653 * SS_ACTIVE == 0:
11654 * generate code with no special handling for single-stepping (except
11655 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11656 * this happens anyway because those changes are all system register or
11657 * PSTATE writes).
11658 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11659 * emit code for one insn
11660 * emit code to clear PSTATE.SS
11661 * emit code to generate software step exception for completed step
11662 * end TB (as usual for having generated an exception)
11663 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11664 * emit code to generate a software step exception
11665 * end the TB
11667 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11668 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11669 dc->is_ldex = false;
11670 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11672 cpu_F0s = tcg_temp_new_i32();
11673 cpu_F1s = tcg_temp_new_i32();
11674 cpu_F0d = tcg_temp_new_i64();
11675 cpu_F1d = tcg_temp_new_i64();
11676 cpu_V0 = cpu_F0d;
11677 cpu_V1 = cpu_F1d;
11678 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11679 cpu_M0 = tcg_temp_new_i64();
11680 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
11681 num_insns = 0;
11682 max_insns = tb->cflags & CF_COUNT_MASK;
11683 if (max_insns == 0) {
11684 max_insns = CF_COUNT_MASK;
11686 if (max_insns > TCG_MAX_INSNS) {
11687 max_insns = TCG_MAX_INSNS;
11690 gen_tb_start(tb);
11692 tcg_clear_temp_count();
11694 /* A note on handling of the condexec (IT) bits:
11696 * We want to avoid the overhead of having to write the updated condexec
11697 * bits back to the CPUARMState for every instruction in an IT block. So:
11698 * (1) if the condexec bits are not already zero then we write
11699 * zero back into the CPUARMState now. This avoids complications trying
11700 * to do it at the end of the block. (For example if we don't do this
11701 * it's hard to identify whether we can safely skip writing condexec
11702 * at the end of the TB, which we definitely want to do for the case
11703 * where a TB doesn't do anything with the IT state at all.)
11704 * (2) if we are going to leave the TB then we call gen_set_condexec()
11705 * which will write the correct value into CPUARMState if zero is wrong.
11706 * This is done both for leaving the TB at the end, and for leaving
11707 * it because of an exception we know will happen, which is done in
11708 * gen_exception_insn(). The latter is necessary because we need to
11709 * leave the TB with the PC/IT state just prior to execution of the
11710 * instruction which caused the exception.
11711 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11712 * then the CPUARMState will be wrong and we need to reset it.
11713 * This is handled in the same way as restoration of the
11714 * PC in these situations; we save the value of the condexec bits
11715 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11716 * then uses this to restore them after an exception.
11718 * Note that there are no instructions which can read the condexec
11719 * bits, and none which can write non-static values to them, so
11720 * we don't need to care about whether CPUARMState is correct in the
11721 * middle of a TB.
11724 /* Reset the conditional execution bits immediately. This avoids
11725 complications trying to do it at the end of the block. */
11726 if (dc->condexec_mask || dc->condexec_cond)
11728 TCGv_i32 tmp = tcg_temp_new_i32();
11729 tcg_gen_movi_i32(tmp, 0);
11730 store_cpu_field(tmp, condexec_bits);
11732 do {
11733 tcg_gen_insn_start(dc->pc,
11734 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
11735 num_insns++;
11737 #ifdef CONFIG_USER_ONLY
11738 /* Intercept jump to the magic kernel page. */
11739 if (dc->pc >= 0xffff0000) {
11740 /* We always get here via a jump, so know we are not in a
11741 conditional execution block. */
11742 gen_exception_internal(EXCP_KERNEL_TRAP);
11743 dc->is_jmp = DISAS_EXC;
11744 break;
11746 #else
11747 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
11748 /* We always get here via a jump, so know we are not in a
11749 conditional execution block. */
11750 gen_exception_internal(EXCP_EXCEPTION_EXIT);
11751 dc->is_jmp = DISAS_EXC;
11752 break;
11754 #endif
11756 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11757 CPUBreakpoint *bp;
11758 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
11759 if (bp->pc == dc->pc) {
11760 if (bp->flags & BP_CPU) {
11761 gen_set_condexec(dc);
11762 gen_set_pc_im(dc, dc->pc);
11763 gen_helper_check_breakpoints(cpu_env);
11764 /* End the TB early; it's likely not going to be executed */
11765 dc->is_jmp = DISAS_UPDATE;
11766 } else {
11767 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11768 /* The address covered by the breakpoint must be
11769 included in [tb->pc, tb->pc + tb->size) in order
11770 to for it to be properly cleared -- thus we
11771 increment the PC here so that the logic setting
11772 tb->size below does the right thing. */
11773 /* TODO: Advance PC by correct instruction length to
11774 * avoid disassembler error messages */
11775 dc->pc += 2;
11776 goto done_generating;
11778 break;
11783 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
11784 gen_io_start();
11787 if (dc->ss_active && !dc->pstate_ss) {
11788 /* Singlestep state is Active-pending.
11789 * If we're in this state at the start of a TB then either
11790 * a) we just took an exception to an EL which is being debugged
11791 * and this is the first insn in the exception handler
11792 * b) debug exceptions were masked and we just unmasked them
11793 * without changing EL (eg by clearing PSTATE.D)
11794 * In either case we're going to take a swstep exception in the
11795 * "did not step an insn" case, and so the syndrome ISV and EX
11796 * bits should be zero.
11798 assert(num_insns == 1);
11799 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11800 default_exception_el(dc));
11801 goto done_generating;
11804 if (dc->thumb) {
11805 disas_thumb_insn(env, dc);
11806 if (dc->condexec_mask) {
11807 dc->condexec_cond = (dc->condexec_cond & 0xe)
11808 | ((dc->condexec_mask >> 4) & 1);
11809 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11810 if (dc->condexec_mask == 0) {
11811 dc->condexec_cond = 0;
11814 } else {
11815 unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
11816 dc->pc += 4;
11817 disas_arm_insn(dc, insn);
11820 if (dc->condjmp && !dc->is_jmp) {
11821 gen_set_label(dc->condlabel);
11822 dc->condjmp = 0;
11825 if (tcg_check_temp_count()) {
11826 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11827 dc->pc);
11830 /* Translation stops when a conditional branch is encountered.
11831 * Otherwise the subsequent code could get translated several times.
11832 * Also stop translation when a page boundary is reached. This
11833 * ensures prefetch aborts occur at the right place. */
11835 /* We want to stop the TB if the next insn starts in a new page,
11836 * or if it spans between this page and the next. This means that
11837 * if we're looking at the last halfword in the page we need to
11838 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11839 * or a 32-bit Thumb insn (which won't).
11840 * This is to avoid generating a silly TB with a single 16-bit insn
11841 * in it at the end of this page (which would execute correctly
11842 * but isn't very efficient).
11844 end_of_page = (dc->pc >= next_page_start) ||
11845 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11847 } while (!dc->is_jmp && !tcg_op_buf_full() &&
11848 !cs->singlestep_enabled &&
11849 !singlestep &&
11850 !dc->ss_active &&
11851 !end_of_page &&
11852 num_insns < max_insns);
11854 if (tb->cflags & CF_LAST_IO) {
11855 if (dc->condjmp) {
11856 /* FIXME: This can theoretically happen with self-modifying
11857 code. */
11858 cpu_abort(cs, "IO on conditional branch instruction");
11860 gen_io_end();
11863 /* At this stage dc->condjmp will only be set when the skipped
11864 instruction was a conditional branch or trap, and the PC has
11865 already been written. */
11866 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
11867 /* Unconditional and "condition passed" instruction codepath. */
11868 gen_set_condexec(dc);
11869 switch (dc->is_jmp) {
11870 case DISAS_SWI:
11871 gen_ss_advance(dc);
11872 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11873 default_exception_el(dc));
11874 break;
11875 case DISAS_HVC:
11876 gen_ss_advance(dc);
11877 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11878 break;
11879 case DISAS_SMC:
11880 gen_ss_advance(dc);
11881 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11882 break;
11883 case DISAS_NEXT:
11884 case DISAS_UPDATE:
11885 gen_set_pc_im(dc, dc->pc);
11886 /* fall through */
11887 default:
11888 if (dc->ss_active) {
11889 gen_step_complete_exception(dc);
11890 } else {
11891 /* FIXME: Single stepping a WFI insn will not halt
11892 the CPU. */
11893 gen_exception_internal(EXCP_DEBUG);
11896 if (dc->condjmp) {
11897 /* "Condition failed" instruction codepath. */
11898 gen_set_label(dc->condlabel);
11899 gen_set_condexec(dc);
11900 gen_set_pc_im(dc, dc->pc);
11901 if (dc->ss_active) {
11902 gen_step_complete_exception(dc);
11903 } else {
11904 gen_exception_internal(EXCP_DEBUG);
11907 } else {
11908 /* While branches must always occur at the end of an IT block,
11909 there are a few other things that can cause us to terminate
11910 the TB in the middle of an IT block:
11911 - Exception generating instructions (bkpt, swi, undefined).
11912 - Page boundaries.
11913 - Hardware watchpoints.
11914 Hardware breakpoints have already been handled and skip this code.
11916 gen_set_condexec(dc);
11917 switch(dc->is_jmp) {
11918 case DISAS_NEXT:
11919 gen_goto_tb(dc, 1, dc->pc);
11920 break;
11921 case DISAS_UPDATE:
11922 gen_set_pc_im(dc, dc->pc);
11923 /* fall through */
11924 case DISAS_JUMP:
11925 default:
11926 /* indicate that the hash table must be used to find the next TB */
11927 tcg_gen_exit_tb(0);
11928 break;
11929 case DISAS_TB_JUMP:
11930 /* nothing more to generate */
11931 break;
11932 case DISAS_WFI:
11933 gen_helper_wfi(cpu_env);
11934 /* The helper doesn't necessarily throw an exception, but we
11935 * must go back to the main loop to check for interrupts anyway.
11937 tcg_gen_exit_tb(0);
11938 break;
11939 case DISAS_WFE:
11940 gen_helper_wfe(cpu_env);
11941 break;
11942 case DISAS_YIELD:
11943 gen_helper_yield(cpu_env);
11944 break;
11945 case DISAS_SWI:
11946 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11947 default_exception_el(dc));
11948 break;
11949 case DISAS_HVC:
11950 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11951 break;
11952 case DISAS_SMC:
11953 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11954 break;
11956 if (dc->condjmp) {
11957 gen_set_label(dc->condlabel);
11958 gen_set_condexec(dc);
11959 gen_goto_tb(dc, 1, dc->pc);
11960 dc->condjmp = 0;
11964 done_generating:
11965 gen_tb_end(tb, num_insns);
11967 #ifdef DEBUG_DISAS
11968 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
11969 qemu_log_in_addr_range(pc_start)) {
11970 qemu_log("----------------\n");
11971 qemu_log("IN: %s\n", lookup_symbol(pc_start));
11972 log_target_disas(cs, pc_start, dc->pc - pc_start,
11973 dc->thumb | (dc->sctlr_b << 1));
11974 qemu_log("\n");
11976 #endif
11977 tb->size = dc->pc - pc_start;
11978 tb->icount = num_insns;
11981 static const char *cpu_mode_names[16] = {
11982 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11983 "???", "???", "hyp", "und", "???", "???", "???", "sys"
11986 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11987 int flags)
11989 ARMCPU *cpu = ARM_CPU(cs);
11990 CPUARMState *env = &cpu->env;
11991 int i;
11992 uint32_t psr;
11993 const char *ns_status;
11995 if (is_a64(env)) {
11996 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11997 return;
12000 for(i=0;i<16;i++) {
12001 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
12002 if ((i % 4) == 3)
12003 cpu_fprintf(f, "\n");
12004 else
12005 cpu_fprintf(f, " ");
12007 psr = cpsr_read(env);
12009 if (arm_feature(env, ARM_FEATURE_EL3) &&
12010 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12011 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12012 } else {
12013 ns_status = "";
12016 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12017 psr,
12018 psr & (1 << 31) ? 'N' : '-',
12019 psr & (1 << 30) ? 'Z' : '-',
12020 psr & (1 << 29) ? 'C' : '-',
12021 psr & (1 << 28) ? 'V' : '-',
12022 psr & CPSR_T ? 'T' : 'A',
12023 ns_status,
12024 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12026 if (flags & CPU_DUMP_FPU) {
12027 int numvfpregs = 0;
12028 if (arm_feature(env, ARM_FEATURE_VFP)) {
12029 numvfpregs += 16;
12031 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12032 numvfpregs += 16;
12034 for (i = 0; i < numvfpregs; i++) {
12035 uint64_t v = float64_val(env->vfp.regs[i]);
12036 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12037 i * 2, (uint32_t)v,
12038 i * 2 + 1, (uint32_t)(v >> 32),
12039 i, v);
12041 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
12045 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12046 target_ulong *data)
12048 if (is_a64(env)) {
12049 env->pc = data[0];
12050 env->condexec_bits = 0;
12051 } else {
12052 env->regs[15] = data[0];
12053 env->condexec_bits = data[1];