atomic: introduce smp_mb_acquire and smp_mb_release
[qemu.git] / target-arm / translate.c
blob164b52a0d0683c46e56fc288ec353200c021f8c2
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "qemu/log.h"
29 #include "qemu/bitops.h"
30 #include "arm_ldst.h"
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
36 #include "exec/log.h"
39 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
40 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
41 /* currently all emulated v5 cores are also v5TE, so don't bother */
42 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
43 #define ENABLE_ARCH_5J 0
44 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
45 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
46 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
47 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
48 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
50 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
52 #include "translate.h"
54 #if defined(CONFIG_USER_ONLY)
55 #define IS_USER(s) 1
56 #else
57 #define IS_USER(s) (s->user)
58 #endif
60 TCGv_env cpu_env;
61 /* We reuse the same 64-bit temporaries for efficiency. */
62 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
63 static TCGv_i32 cpu_R[16];
64 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
65 TCGv_i64 cpu_exclusive_addr;
66 TCGv_i64 cpu_exclusive_val;
67 #ifdef CONFIG_USER_ONLY
68 TCGv_i64 cpu_exclusive_test;
69 TCGv_i32 cpu_exclusive_info;
70 #endif
72 /* FIXME: These should be removed. */
73 static TCGv_i32 cpu_F0s, cpu_F1s;
74 static TCGv_i64 cpu_F0d, cpu_F1d;
76 #include "exec/gen-icount.h"
78 static const char *regnames[] =
79 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
80 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
82 /* initialize TCG globals. */
83 void arm_translate_init(void)
85 int i;
87 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
88 tcg_ctx.tcg_env = cpu_env;
90 for (i = 0; i < 16; i++) {
91 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
92 offsetof(CPUARMState, regs[i]),
93 regnames[i]);
95 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
96 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
97 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
98 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
100 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
101 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
102 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
103 offsetof(CPUARMState, exclusive_val), "exclusive_val");
104 #ifdef CONFIG_USER_ONLY
105 cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
106 offsetof(CPUARMState, exclusive_test), "exclusive_test");
107 cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
108 offsetof(CPUARMState, exclusive_info), "exclusive_info");
109 #endif
111 a64_translate_init();
114 static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
116 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
117 * insns:
118 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
119 * otherwise, access as if at PL0.
121 switch (s->mmu_idx) {
122 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
123 case ARMMMUIdx_S12NSE0:
124 case ARMMMUIdx_S12NSE1:
125 return ARMMMUIdx_S12NSE0;
126 case ARMMMUIdx_S1E3:
127 case ARMMMUIdx_S1SE0:
128 case ARMMMUIdx_S1SE1:
129 return ARMMMUIdx_S1SE0;
130 case ARMMMUIdx_S2NS:
131 default:
132 g_assert_not_reached();
136 static inline TCGv_i32 load_cpu_offset(int offset)
138 TCGv_i32 tmp = tcg_temp_new_i32();
139 tcg_gen_ld_i32(tmp, cpu_env, offset);
140 return tmp;
143 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
145 static inline void store_cpu_offset(TCGv_i32 var, int offset)
147 tcg_gen_st_i32(var, cpu_env, offset);
148 tcg_temp_free_i32(var);
151 #define store_cpu_field(var, name) \
152 store_cpu_offset(var, offsetof(CPUARMState, name))
154 /* Set a variable to the value of a CPU register. */
155 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
157 if (reg == 15) {
158 uint32_t addr;
159 /* normally, since we updated PC, we need only to add one insn */
160 if (s->thumb)
161 addr = (long)s->pc + 2;
162 else
163 addr = (long)s->pc + 4;
164 tcg_gen_movi_i32(var, addr);
165 } else {
166 tcg_gen_mov_i32(var, cpu_R[reg]);
170 /* Create a new temporary and set it to the value of a CPU register. */
171 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
173 TCGv_i32 tmp = tcg_temp_new_i32();
174 load_reg_var(s, tmp, reg);
175 return tmp;
178 /* Set a CPU register. The source must be a temporary and will be
179 marked as dead. */
180 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
182 if (reg == 15) {
183 /* In Thumb mode, we must ignore bit 0.
184 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
185 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
186 * We choose to ignore [1:0] in ARM mode for all architecture versions.
188 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
189 s->is_jmp = DISAS_JUMP;
191 tcg_gen_mov_i32(cpu_R[reg], var);
192 tcg_temp_free_i32(var);
195 /* Value extensions. */
196 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
198 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
205 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
207 TCGv_i32 tmp_mask = tcg_const_i32(mask);
208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
209 tcg_temp_free_i32(tmp_mask);
211 /* Set NZCV flags from the high 4 bits of var. */
212 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214 static void gen_exception_internal(int excp)
216 TCGv_i32 tcg_excp = tcg_const_i32(excp);
218 assert(excp_is_internal(excp));
219 gen_helper_exception_internal(cpu_env, tcg_excp);
220 tcg_temp_free_i32(tcg_excp);
223 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
225 TCGv_i32 tcg_excp = tcg_const_i32(excp);
226 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
227 TCGv_i32 tcg_el = tcg_const_i32(target_el);
229 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
230 tcg_syn, tcg_el);
232 tcg_temp_free_i32(tcg_el);
233 tcg_temp_free_i32(tcg_syn);
234 tcg_temp_free_i32(tcg_excp);
237 static void gen_ss_advance(DisasContext *s)
239 /* If the singlestep state is Active-not-pending, advance to
240 * Active-pending.
242 if (s->ss_active) {
243 s->pstate_ss = 0;
244 gen_helper_clear_pstate_ss(cpu_env);
248 static void gen_step_complete_exception(DisasContext *s)
250 /* We just completed step of an insn. Move from Active-not-pending
251 * to Active-pending, and then also take the swstep exception.
252 * This corresponds to making the (IMPDEF) choice to prioritize
253 * swstep exceptions over asynchronous exceptions taken to an exception
254 * level where debug is disabled. This choice has the advantage that
255 * we do not need to maintain internal state corresponding to the
256 * ISV/EX syndrome bits between completion of the step and generation
257 * of the exception, and our syndrome information is always correct.
259 gen_ss_advance(s);
260 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
261 default_exception_el(s));
262 s->is_jmp = DISAS_EXC;
265 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
267 TCGv_i32 tmp1 = tcg_temp_new_i32();
268 TCGv_i32 tmp2 = tcg_temp_new_i32();
269 tcg_gen_ext16s_i32(tmp1, a);
270 tcg_gen_ext16s_i32(tmp2, b);
271 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
272 tcg_temp_free_i32(tmp2);
273 tcg_gen_sari_i32(a, a, 16);
274 tcg_gen_sari_i32(b, b, 16);
275 tcg_gen_mul_i32(b, b, a);
276 tcg_gen_mov_i32(a, tmp1);
277 tcg_temp_free_i32(tmp1);
280 /* Byteswap each halfword. */
281 static void gen_rev16(TCGv_i32 var)
283 TCGv_i32 tmp = tcg_temp_new_i32();
284 tcg_gen_shri_i32(tmp, var, 8);
285 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
286 tcg_gen_shli_i32(var, var, 8);
287 tcg_gen_andi_i32(var, var, 0xff00ff00);
288 tcg_gen_or_i32(var, var, tmp);
289 tcg_temp_free_i32(tmp);
292 /* Byteswap low halfword and sign extend. */
293 static void gen_revsh(TCGv_i32 var)
295 tcg_gen_ext16u_i32(var, var);
296 tcg_gen_bswap16_i32(var, var);
297 tcg_gen_ext16s_i32(var, var);
300 /* Unsigned bitfield extract. */
301 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
303 if (shift)
304 tcg_gen_shri_i32(var, var, shift);
305 tcg_gen_andi_i32(var, var, mask);
308 /* Signed bitfield extract. */
309 static void gen_sbfx(TCGv_i32 var, int shift, int width)
311 uint32_t signbit;
313 if (shift)
314 tcg_gen_sari_i32(var, var, shift);
315 if (shift + width < 32) {
316 signbit = 1u << (width - 1);
317 tcg_gen_andi_i32(var, var, (1u << width) - 1);
318 tcg_gen_xori_i32(var, var, signbit);
319 tcg_gen_subi_i32(var, var, signbit);
323 /* Return (b << 32) + a. Mark inputs as dead */
324 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
326 TCGv_i64 tmp64 = tcg_temp_new_i64();
328 tcg_gen_extu_i32_i64(tmp64, b);
329 tcg_temp_free_i32(b);
330 tcg_gen_shli_i64(tmp64, tmp64, 32);
331 tcg_gen_add_i64(a, tmp64, a);
333 tcg_temp_free_i64(tmp64);
334 return a;
337 /* Return (b << 32) - a. Mark inputs as dead. */
338 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
340 TCGv_i64 tmp64 = tcg_temp_new_i64();
342 tcg_gen_extu_i32_i64(tmp64, b);
343 tcg_temp_free_i32(b);
344 tcg_gen_shli_i64(tmp64, tmp64, 32);
345 tcg_gen_sub_i64(a, tmp64, a);
347 tcg_temp_free_i64(tmp64);
348 return a;
351 /* 32x32->64 multiply. Marks inputs as dead. */
352 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
354 TCGv_i32 lo = tcg_temp_new_i32();
355 TCGv_i32 hi = tcg_temp_new_i32();
356 TCGv_i64 ret;
358 tcg_gen_mulu2_i32(lo, hi, a, b);
359 tcg_temp_free_i32(a);
360 tcg_temp_free_i32(b);
362 ret = tcg_temp_new_i64();
363 tcg_gen_concat_i32_i64(ret, lo, hi);
364 tcg_temp_free_i32(lo);
365 tcg_temp_free_i32(hi);
367 return ret;
370 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
372 TCGv_i32 lo = tcg_temp_new_i32();
373 TCGv_i32 hi = tcg_temp_new_i32();
374 TCGv_i64 ret;
376 tcg_gen_muls2_i32(lo, hi, a, b);
377 tcg_temp_free_i32(a);
378 tcg_temp_free_i32(b);
380 ret = tcg_temp_new_i64();
381 tcg_gen_concat_i32_i64(ret, lo, hi);
382 tcg_temp_free_i32(lo);
383 tcg_temp_free_i32(hi);
385 return ret;
388 /* Swap low and high halfwords. */
389 static void gen_swap_half(TCGv_i32 var)
391 TCGv_i32 tmp = tcg_temp_new_i32();
392 tcg_gen_shri_i32(tmp, var, 16);
393 tcg_gen_shli_i32(var, var, 16);
394 tcg_gen_or_i32(var, var, tmp);
395 tcg_temp_free_i32(tmp);
398 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
399 tmp = (t0 ^ t1) & 0x8000;
400 t0 &= ~0x8000;
401 t1 &= ~0x8000;
402 t0 = (t0 + t1) ^ tmp;
405 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
407 TCGv_i32 tmp = tcg_temp_new_i32();
408 tcg_gen_xor_i32(tmp, t0, t1);
409 tcg_gen_andi_i32(tmp, tmp, 0x8000);
410 tcg_gen_andi_i32(t0, t0, ~0x8000);
411 tcg_gen_andi_i32(t1, t1, ~0x8000);
412 tcg_gen_add_i32(t0, t0, t1);
413 tcg_gen_xor_i32(t0, t0, tmp);
414 tcg_temp_free_i32(tmp);
415 tcg_temp_free_i32(t1);
418 /* Set CF to the top bit of var. */
419 static void gen_set_CF_bit31(TCGv_i32 var)
421 tcg_gen_shri_i32(cpu_CF, var, 31);
424 /* Set N and Z flags from var. */
425 static inline void gen_logic_CC(TCGv_i32 var)
427 tcg_gen_mov_i32(cpu_NF, var);
428 tcg_gen_mov_i32(cpu_ZF, var);
431 /* T0 += T1 + CF. */
432 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
434 tcg_gen_add_i32(t0, t0, t1);
435 tcg_gen_add_i32(t0, t0, cpu_CF);
438 /* dest = T0 + T1 + CF. */
439 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
441 tcg_gen_add_i32(dest, t0, t1);
442 tcg_gen_add_i32(dest, dest, cpu_CF);
445 /* dest = T0 - T1 + CF - 1. */
446 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
448 tcg_gen_sub_i32(dest, t0, t1);
449 tcg_gen_add_i32(dest, dest, cpu_CF);
450 tcg_gen_subi_i32(dest, dest, 1);
453 /* dest = T0 + T1. Compute C, N, V and Z flags */
454 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
456 TCGv_i32 tmp = tcg_temp_new_i32();
457 tcg_gen_movi_i32(tmp, 0);
458 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
459 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
460 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
461 tcg_gen_xor_i32(tmp, t0, t1);
462 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
463 tcg_temp_free_i32(tmp);
464 tcg_gen_mov_i32(dest, cpu_NF);
467 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
468 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
470 TCGv_i32 tmp = tcg_temp_new_i32();
471 if (TCG_TARGET_HAS_add2_i32) {
472 tcg_gen_movi_i32(tmp, 0);
473 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
474 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
475 } else {
476 TCGv_i64 q0 = tcg_temp_new_i64();
477 TCGv_i64 q1 = tcg_temp_new_i64();
478 tcg_gen_extu_i32_i64(q0, t0);
479 tcg_gen_extu_i32_i64(q1, t1);
480 tcg_gen_add_i64(q0, q0, q1);
481 tcg_gen_extu_i32_i64(q1, cpu_CF);
482 tcg_gen_add_i64(q0, q0, q1);
483 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
484 tcg_temp_free_i64(q0);
485 tcg_temp_free_i64(q1);
487 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
488 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
489 tcg_gen_xor_i32(tmp, t0, t1);
490 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
491 tcg_temp_free_i32(tmp);
492 tcg_gen_mov_i32(dest, cpu_NF);
495 /* dest = T0 - T1. Compute C, N, V and Z flags */
496 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
498 TCGv_i32 tmp;
499 tcg_gen_sub_i32(cpu_NF, t0, t1);
500 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
501 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
502 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
503 tmp = tcg_temp_new_i32();
504 tcg_gen_xor_i32(tmp, t0, t1);
505 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
506 tcg_temp_free_i32(tmp);
507 tcg_gen_mov_i32(dest, cpu_NF);
510 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
511 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
513 TCGv_i32 tmp = tcg_temp_new_i32();
514 tcg_gen_not_i32(tmp, t1);
515 gen_adc_CC(dest, t0, tmp);
516 tcg_temp_free_i32(tmp);
519 #define GEN_SHIFT(name) \
520 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
522 TCGv_i32 tmp1, tmp2, tmp3; \
523 tmp1 = tcg_temp_new_i32(); \
524 tcg_gen_andi_i32(tmp1, t1, 0xff); \
525 tmp2 = tcg_const_i32(0); \
526 tmp3 = tcg_const_i32(0x1f); \
527 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
528 tcg_temp_free_i32(tmp3); \
529 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
530 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
531 tcg_temp_free_i32(tmp2); \
532 tcg_temp_free_i32(tmp1); \
534 GEN_SHIFT(shl)
535 GEN_SHIFT(shr)
536 #undef GEN_SHIFT
538 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
540 TCGv_i32 tmp1, tmp2;
541 tmp1 = tcg_temp_new_i32();
542 tcg_gen_andi_i32(tmp1, t1, 0xff);
543 tmp2 = tcg_const_i32(0x1f);
544 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
545 tcg_temp_free_i32(tmp2);
546 tcg_gen_sar_i32(dest, t0, tmp1);
547 tcg_temp_free_i32(tmp1);
550 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
552 TCGv_i32 c0 = tcg_const_i32(0);
553 TCGv_i32 tmp = tcg_temp_new_i32();
554 tcg_gen_neg_i32(tmp, src);
555 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
556 tcg_temp_free_i32(c0);
557 tcg_temp_free_i32(tmp);
560 static void shifter_out_im(TCGv_i32 var, int shift)
562 if (shift == 0) {
563 tcg_gen_andi_i32(cpu_CF, var, 1);
564 } else {
565 tcg_gen_shri_i32(cpu_CF, var, shift);
566 if (shift != 31) {
567 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
572 /* Shift by immediate. Includes special handling for shift == 0. */
573 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
574 int shift, int flags)
576 switch (shiftop) {
577 case 0: /* LSL */
578 if (shift != 0) {
579 if (flags)
580 shifter_out_im(var, 32 - shift);
581 tcg_gen_shli_i32(var, var, shift);
583 break;
584 case 1: /* LSR */
585 if (shift == 0) {
586 if (flags) {
587 tcg_gen_shri_i32(cpu_CF, var, 31);
589 tcg_gen_movi_i32(var, 0);
590 } else {
591 if (flags)
592 shifter_out_im(var, shift - 1);
593 tcg_gen_shri_i32(var, var, shift);
595 break;
596 case 2: /* ASR */
597 if (shift == 0)
598 shift = 32;
599 if (flags)
600 shifter_out_im(var, shift - 1);
601 if (shift == 32)
602 shift = 31;
603 tcg_gen_sari_i32(var, var, shift);
604 break;
605 case 3: /* ROR/RRX */
606 if (shift != 0) {
607 if (flags)
608 shifter_out_im(var, shift - 1);
609 tcg_gen_rotri_i32(var, var, shift); break;
610 } else {
611 TCGv_i32 tmp = tcg_temp_new_i32();
612 tcg_gen_shli_i32(tmp, cpu_CF, 31);
613 if (flags)
614 shifter_out_im(var, 0);
615 tcg_gen_shri_i32(var, var, 1);
616 tcg_gen_or_i32(var, var, tmp);
617 tcg_temp_free_i32(tmp);
622 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
623 TCGv_i32 shift, int flags)
625 if (flags) {
626 switch (shiftop) {
627 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
628 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
629 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
630 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
632 } else {
633 switch (shiftop) {
634 case 0:
635 gen_shl(var, var, shift);
636 break;
637 case 1:
638 gen_shr(var, var, shift);
639 break;
640 case 2:
641 gen_sar(var, var, shift);
642 break;
643 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
644 tcg_gen_rotr_i32(var, var, shift); break;
647 tcg_temp_free_i32(shift);
650 #define PAS_OP(pfx) \
651 switch (op2) { \
652 case 0: gen_pas_helper(glue(pfx,add16)); break; \
653 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
654 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
655 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
656 case 4: gen_pas_helper(glue(pfx,add8)); break; \
657 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
659 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
661 TCGv_ptr tmp;
663 switch (op1) {
664 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
665 case 1:
666 tmp = tcg_temp_new_ptr();
667 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
668 PAS_OP(s)
669 tcg_temp_free_ptr(tmp);
670 break;
671 case 5:
672 tmp = tcg_temp_new_ptr();
673 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
674 PAS_OP(u)
675 tcg_temp_free_ptr(tmp);
676 break;
677 #undef gen_pas_helper
678 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
679 case 2:
680 PAS_OP(q);
681 break;
682 case 3:
683 PAS_OP(sh);
684 break;
685 case 6:
686 PAS_OP(uq);
687 break;
688 case 7:
689 PAS_OP(uh);
690 break;
691 #undef gen_pas_helper
694 #undef PAS_OP
696 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
697 #define PAS_OP(pfx) \
698 switch (op1) { \
699 case 0: gen_pas_helper(glue(pfx,add8)); break; \
700 case 1: gen_pas_helper(glue(pfx,add16)); break; \
701 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
702 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
703 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
704 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
706 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
708 TCGv_ptr tmp;
710 switch (op2) {
711 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
712 case 0:
713 tmp = tcg_temp_new_ptr();
714 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
715 PAS_OP(s)
716 tcg_temp_free_ptr(tmp);
717 break;
718 case 4:
719 tmp = tcg_temp_new_ptr();
720 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
721 PAS_OP(u)
722 tcg_temp_free_ptr(tmp);
723 break;
724 #undef gen_pas_helper
725 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
726 case 1:
727 PAS_OP(q);
728 break;
729 case 2:
730 PAS_OP(sh);
731 break;
732 case 5:
733 PAS_OP(uq);
734 break;
735 case 6:
736 PAS_OP(uh);
737 break;
738 #undef gen_pas_helper
741 #undef PAS_OP
744 * Generate a conditional based on ARM condition code cc.
745 * This is common between ARM and Aarch64 targets.
747 void arm_test_cc(DisasCompare *cmp, int cc)
749 TCGv_i32 value;
750 TCGCond cond;
751 bool global = true;
753 switch (cc) {
754 case 0: /* eq: Z */
755 case 1: /* ne: !Z */
756 cond = TCG_COND_EQ;
757 value = cpu_ZF;
758 break;
760 case 2: /* cs: C */
761 case 3: /* cc: !C */
762 cond = TCG_COND_NE;
763 value = cpu_CF;
764 break;
766 case 4: /* mi: N */
767 case 5: /* pl: !N */
768 cond = TCG_COND_LT;
769 value = cpu_NF;
770 break;
772 case 6: /* vs: V */
773 case 7: /* vc: !V */
774 cond = TCG_COND_LT;
775 value = cpu_VF;
776 break;
778 case 8: /* hi: C && !Z */
779 case 9: /* ls: !C || Z -> !(C && !Z) */
780 cond = TCG_COND_NE;
781 value = tcg_temp_new_i32();
782 global = false;
783 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
784 ZF is non-zero for !Z; so AND the two subexpressions. */
785 tcg_gen_neg_i32(value, cpu_CF);
786 tcg_gen_and_i32(value, value, cpu_ZF);
787 break;
789 case 10: /* ge: N == V -> N ^ V == 0 */
790 case 11: /* lt: N != V -> N ^ V != 0 */
791 /* Since we're only interested in the sign bit, == 0 is >= 0. */
792 cond = TCG_COND_GE;
793 value = tcg_temp_new_i32();
794 global = false;
795 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
796 break;
798 case 12: /* gt: !Z && N == V */
799 case 13: /* le: Z || N != V */
800 cond = TCG_COND_NE;
801 value = tcg_temp_new_i32();
802 global = false;
803 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
804 * the sign bit then AND with ZF to yield the result. */
805 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
806 tcg_gen_sari_i32(value, value, 31);
807 tcg_gen_andc_i32(value, cpu_ZF, value);
808 break;
810 case 14: /* always */
811 case 15: /* always */
812 /* Use the ALWAYS condition, which will fold early.
813 * It doesn't matter what we use for the value. */
814 cond = TCG_COND_ALWAYS;
815 value = cpu_ZF;
816 goto no_invert;
818 default:
819 fprintf(stderr, "Bad condition code 0x%x\n", cc);
820 abort();
823 if (cc & 1) {
824 cond = tcg_invert_cond(cond);
827 no_invert:
828 cmp->cond = cond;
829 cmp->value = value;
830 cmp->value_global = global;
833 void arm_free_cc(DisasCompare *cmp)
835 if (!cmp->value_global) {
836 tcg_temp_free_i32(cmp->value);
840 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
842 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
845 void arm_gen_test_cc(int cc, TCGLabel *label)
847 DisasCompare cmp;
848 arm_test_cc(&cmp, cc);
849 arm_jump_cc(&cmp, label);
850 arm_free_cc(&cmp);
853 static const uint8_t table_logic_cc[16] = {
854 1, /* and */
855 1, /* xor */
856 0, /* sub */
857 0, /* rsb */
858 0, /* add */
859 0, /* adc */
860 0, /* sbc */
861 0, /* rsc */
862 1, /* andl */
863 1, /* xorl */
864 0, /* cmp */
865 0, /* cmn */
866 1, /* orr */
867 1, /* mov */
868 1, /* bic */
869 1, /* mvn */
872 /* Set PC and Thumb state from an immediate address. */
873 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
875 TCGv_i32 tmp;
877 s->is_jmp = DISAS_JUMP;
878 if (s->thumb != (addr & 1)) {
879 tmp = tcg_temp_new_i32();
880 tcg_gen_movi_i32(tmp, addr & 1);
881 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
882 tcg_temp_free_i32(tmp);
884 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
887 /* Set PC and Thumb state from var. var is marked as dead. */
888 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
890 s->is_jmp = DISAS_JUMP;
891 tcg_gen_andi_i32(cpu_R[15], var, ~1);
892 tcg_gen_andi_i32(var, var, 1);
893 store_cpu_field(var, thumb);
896 /* Variant of store_reg which uses branch&exchange logic when storing
897 to r15 in ARM architecture v7 and above. The source must be a temporary
898 and will be marked as dead. */
899 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
901 if (reg == 15 && ENABLE_ARCH_7) {
902 gen_bx(s, var);
903 } else {
904 store_reg(s, reg, var);
908 /* Variant of store_reg which uses branch&exchange logic when storing
909 * to r15 in ARM architecture v5T and above. This is used for storing
910 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
911 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
912 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
914 if (reg == 15 && ENABLE_ARCH_5) {
915 gen_bx(s, var);
916 } else {
917 store_reg(s, reg, var);
921 #ifdef CONFIG_USER_ONLY
922 #define IS_USER_ONLY 1
923 #else
924 #define IS_USER_ONLY 0
925 #endif
927 /* Abstractions of "generate code to do a guest load/store for
928 * AArch32", where a vaddr is always 32 bits (and is zero
929 * extended if we're a 64 bit core) and data is also
930 * 32 bits unless specifically doing a 64 bit access.
931 * These functions work like tcg_gen_qemu_{ld,st}* except
932 * that the address argument is TCGv_i32 rather than TCGv.
934 #if TARGET_LONG_BITS == 32
936 #define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
937 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
938 TCGv_i32 addr, int index) \
940 TCGMemOp opc = (OPC) | s->be_data; \
941 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
942 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
943 TCGv addr_be = tcg_temp_new(); \
944 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
945 tcg_gen_qemu_ld_i32(val, addr_be, index, opc); \
946 tcg_temp_free(addr_be); \
947 return; \
949 tcg_gen_qemu_ld_i32(val, addr, index, opc); \
952 #define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
953 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
954 TCGv_i32 addr, int index) \
956 TCGMemOp opc = (OPC) | s->be_data; \
957 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
958 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
959 TCGv addr_be = tcg_temp_new(); \
960 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
961 tcg_gen_qemu_st_i32(val, addr_be, index, opc); \
962 tcg_temp_free(addr_be); \
963 return; \
965 tcg_gen_qemu_st_i32(val, addr, index, opc); \
968 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
969 TCGv_i32 addr, int index)
971 TCGMemOp opc = MO_Q | s->be_data;
972 tcg_gen_qemu_ld_i64(val, addr, index, opc);
973 /* Not needed for user-mode BE32, where we use MO_BE instead. */
974 if (!IS_USER_ONLY && s->sctlr_b) {
975 tcg_gen_rotri_i64(val, val, 32);
979 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
980 TCGv_i32 addr, int index)
982 TCGMemOp opc = MO_Q | s->be_data;
983 /* Not needed for user-mode BE32, where we use MO_BE instead. */
984 if (!IS_USER_ONLY && s->sctlr_b) {
985 TCGv_i64 tmp = tcg_temp_new_i64();
986 tcg_gen_rotri_i64(tmp, val, 32);
987 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
988 tcg_temp_free_i64(tmp);
989 return;
991 tcg_gen_qemu_st_i64(val, addr, index, opc);
994 #else
996 #define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
997 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
998 TCGv_i32 addr, int index) \
1000 TCGMemOp opc = (OPC) | s->be_data; \
1001 TCGv addr64 = tcg_temp_new(); \
1002 tcg_gen_extu_i32_i64(addr64, addr); \
1003 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1004 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1005 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1007 tcg_gen_qemu_ld_i32(val, addr64, index, opc); \
1008 tcg_temp_free(addr64); \
1011 #define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
1012 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1013 TCGv_i32 addr, int index) \
1015 TCGMemOp opc = (OPC) | s->be_data; \
1016 TCGv addr64 = tcg_temp_new(); \
1017 tcg_gen_extu_i32_i64(addr64, addr); \
1018 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1019 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1020 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1022 tcg_gen_qemu_st_i32(val, addr64, index, opc); \
1023 tcg_temp_free(addr64); \
1026 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1027 TCGv_i32 addr, int index)
1029 TCGMemOp opc = MO_Q | s->be_data;
1030 TCGv addr64 = tcg_temp_new();
1031 tcg_gen_extu_i32_i64(addr64, addr);
1032 tcg_gen_qemu_ld_i64(val, addr64, index, opc);
1034 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1035 if (!IS_USER_ONLY && s->sctlr_b) {
1036 tcg_gen_rotri_i64(val, val, 32);
1038 tcg_temp_free(addr64);
1041 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1042 TCGv_i32 addr, int index)
1044 TCGMemOp opc = MO_Q | s->be_data;
1045 TCGv addr64 = tcg_temp_new();
1046 tcg_gen_extu_i32_i64(addr64, addr);
1048 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1049 if (!IS_USER_ONLY && s->sctlr_b) {
1050 TCGv tmp = tcg_temp_new();
1051 tcg_gen_rotri_i64(tmp, val, 32);
1052 tcg_gen_qemu_st_i64(tmp, addr64, index, opc);
1053 tcg_temp_free(tmp);
1054 } else {
1055 tcg_gen_qemu_st_i64(val, addr64, index, opc);
1057 tcg_temp_free(addr64);
1060 #endif
1062 DO_GEN_LD(8s, MO_SB, 3)
1063 DO_GEN_LD(8u, MO_UB, 3)
1064 DO_GEN_LD(16s, MO_SW, 2)
1065 DO_GEN_LD(16u, MO_UW, 2)
1066 DO_GEN_LD(32u, MO_UL, 0)
1067 /* 'a' variants include an alignment check */
1068 DO_GEN_LD(16ua, MO_UW | MO_ALIGN, 2)
1069 DO_GEN_LD(32ua, MO_UL | MO_ALIGN, 0)
1070 DO_GEN_ST(8, MO_UB, 3)
1071 DO_GEN_ST(16, MO_UW, 2)
1072 DO_GEN_ST(32, MO_UL, 0)
1074 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
1076 tcg_gen_movi_i32(cpu_R[15], val);
1079 static inline void gen_hvc(DisasContext *s, int imm16)
1081 /* The pre HVC helper handles cases when HVC gets trapped
1082 * as an undefined insn by runtime configuration (ie before
1083 * the insn really executes).
1085 gen_set_pc_im(s, s->pc - 4);
1086 gen_helper_pre_hvc(cpu_env);
1087 /* Otherwise we will treat this as a real exception which
1088 * happens after execution of the insn. (The distinction matters
1089 * for the PC value reported to the exception handler and also
1090 * for single stepping.)
1092 s->svc_imm = imm16;
1093 gen_set_pc_im(s, s->pc);
1094 s->is_jmp = DISAS_HVC;
1097 static inline void gen_smc(DisasContext *s)
1099 /* As with HVC, we may take an exception either before or after
1100 * the insn executes.
1102 TCGv_i32 tmp;
1104 gen_set_pc_im(s, s->pc - 4);
1105 tmp = tcg_const_i32(syn_aa32_smc());
1106 gen_helper_pre_smc(cpu_env, tmp);
1107 tcg_temp_free_i32(tmp);
1108 gen_set_pc_im(s, s->pc);
1109 s->is_jmp = DISAS_SMC;
1112 static inline void
1113 gen_set_condexec (DisasContext *s)
1115 if (s->condexec_mask) {
1116 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1117 TCGv_i32 tmp = tcg_temp_new_i32();
1118 tcg_gen_movi_i32(tmp, val);
1119 store_cpu_field(tmp, condexec_bits);
1123 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1125 gen_set_condexec(s);
1126 gen_set_pc_im(s, s->pc - offset);
1127 gen_exception_internal(excp);
1128 s->is_jmp = DISAS_JUMP;
1131 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1132 int syn, uint32_t target_el)
1134 gen_set_condexec(s);
1135 gen_set_pc_im(s, s->pc - offset);
1136 gen_exception(excp, syn, target_el);
1137 s->is_jmp = DISAS_JUMP;
1140 /* Force a TB lookup after an instruction that changes the CPU state. */
1141 static inline void gen_lookup_tb(DisasContext *s)
1143 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1144 s->is_jmp = DISAS_JUMP;
1147 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1148 TCGv_i32 var)
1150 int val, rm, shift, shiftop;
1151 TCGv_i32 offset;
1153 if (!(insn & (1 << 25))) {
1154 /* immediate */
1155 val = insn & 0xfff;
1156 if (!(insn & (1 << 23)))
1157 val = -val;
1158 if (val != 0)
1159 tcg_gen_addi_i32(var, var, val);
1160 } else {
1161 /* shift/register */
1162 rm = (insn) & 0xf;
1163 shift = (insn >> 7) & 0x1f;
1164 shiftop = (insn >> 5) & 3;
1165 offset = load_reg(s, rm);
1166 gen_arm_shift_im(offset, shiftop, shift, 0);
1167 if (!(insn & (1 << 23)))
1168 tcg_gen_sub_i32(var, var, offset);
1169 else
1170 tcg_gen_add_i32(var, var, offset);
1171 tcg_temp_free_i32(offset);
1175 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1176 int extra, TCGv_i32 var)
1178 int val, rm;
1179 TCGv_i32 offset;
1181 if (insn & (1 << 22)) {
1182 /* immediate */
1183 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1184 if (!(insn & (1 << 23)))
1185 val = -val;
1186 val += extra;
1187 if (val != 0)
1188 tcg_gen_addi_i32(var, var, val);
1189 } else {
1190 /* register */
1191 if (extra)
1192 tcg_gen_addi_i32(var, var, extra);
1193 rm = (insn) & 0xf;
1194 offset = load_reg(s, rm);
1195 if (!(insn & (1 << 23)))
1196 tcg_gen_sub_i32(var, var, offset);
1197 else
1198 tcg_gen_add_i32(var, var, offset);
1199 tcg_temp_free_i32(offset);
1203 static TCGv_ptr get_fpstatus_ptr(int neon)
1205 TCGv_ptr statusptr = tcg_temp_new_ptr();
1206 int offset;
1207 if (neon) {
1208 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1209 } else {
1210 offset = offsetof(CPUARMState, vfp.fp_status);
1212 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1213 return statusptr;
1216 #define VFP_OP2(name) \
1217 static inline void gen_vfp_##name(int dp) \
1219 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1220 if (dp) { \
1221 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1222 } else { \
1223 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1225 tcg_temp_free_ptr(fpst); \
1228 VFP_OP2(add)
1229 VFP_OP2(sub)
1230 VFP_OP2(mul)
1231 VFP_OP2(div)
1233 #undef VFP_OP2
1235 static inline void gen_vfp_F1_mul(int dp)
1237 /* Like gen_vfp_mul() but put result in F1 */
1238 TCGv_ptr fpst = get_fpstatus_ptr(0);
1239 if (dp) {
1240 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1241 } else {
1242 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1244 tcg_temp_free_ptr(fpst);
1247 static inline void gen_vfp_F1_neg(int dp)
1249 /* Like gen_vfp_neg() but put result in F1 */
1250 if (dp) {
1251 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1252 } else {
1253 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1257 static inline void gen_vfp_abs(int dp)
1259 if (dp)
1260 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1261 else
1262 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1265 static inline void gen_vfp_neg(int dp)
1267 if (dp)
1268 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1269 else
1270 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1273 static inline void gen_vfp_sqrt(int dp)
1275 if (dp)
1276 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1277 else
1278 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1281 static inline void gen_vfp_cmp(int dp)
1283 if (dp)
1284 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1285 else
1286 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1289 static inline void gen_vfp_cmpe(int dp)
1291 if (dp)
1292 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1293 else
1294 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1297 static inline void gen_vfp_F1_ld0(int dp)
1299 if (dp)
1300 tcg_gen_movi_i64(cpu_F1d, 0);
1301 else
1302 tcg_gen_movi_i32(cpu_F1s, 0);
1305 #define VFP_GEN_ITOF(name) \
1306 static inline void gen_vfp_##name(int dp, int neon) \
1308 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1309 if (dp) { \
1310 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1311 } else { \
1312 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1314 tcg_temp_free_ptr(statusptr); \
1317 VFP_GEN_ITOF(uito)
1318 VFP_GEN_ITOF(sito)
1319 #undef VFP_GEN_ITOF
1321 #define VFP_GEN_FTOI(name) \
1322 static inline void gen_vfp_##name(int dp, int neon) \
1324 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1325 if (dp) { \
1326 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1327 } else { \
1328 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1330 tcg_temp_free_ptr(statusptr); \
1333 VFP_GEN_FTOI(toui)
1334 VFP_GEN_FTOI(touiz)
1335 VFP_GEN_FTOI(tosi)
1336 VFP_GEN_FTOI(tosiz)
1337 #undef VFP_GEN_FTOI
1339 #define VFP_GEN_FIX(name, round) \
1340 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1342 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1343 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1344 if (dp) { \
1345 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1346 statusptr); \
1347 } else { \
1348 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1349 statusptr); \
1351 tcg_temp_free_i32(tmp_shift); \
1352 tcg_temp_free_ptr(statusptr); \
1354 VFP_GEN_FIX(tosh, _round_to_zero)
1355 VFP_GEN_FIX(tosl, _round_to_zero)
1356 VFP_GEN_FIX(touh, _round_to_zero)
1357 VFP_GEN_FIX(toul, _round_to_zero)
1358 VFP_GEN_FIX(shto, )
1359 VFP_GEN_FIX(slto, )
1360 VFP_GEN_FIX(uhto, )
1361 VFP_GEN_FIX(ulto, )
1362 #undef VFP_GEN_FIX
1364 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1366 if (dp) {
1367 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
1368 } else {
1369 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
1373 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1375 if (dp) {
1376 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
1377 } else {
1378 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
1382 static inline long
1383 vfp_reg_offset (int dp, int reg)
1385 if (dp)
1386 return offsetof(CPUARMState, vfp.regs[reg]);
1387 else if (reg & 1) {
1388 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1389 + offsetof(CPU_DoubleU, l.upper);
1390 } else {
1391 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1392 + offsetof(CPU_DoubleU, l.lower);
1396 /* Return the offset of a 32-bit piece of a NEON register.
1397 zero is the least significant end of the register. */
1398 static inline long
1399 neon_reg_offset (int reg, int n)
1401 int sreg;
1402 sreg = reg * 2 + n;
1403 return vfp_reg_offset(0, sreg);
1406 static TCGv_i32 neon_load_reg(int reg, int pass)
1408 TCGv_i32 tmp = tcg_temp_new_i32();
1409 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1410 return tmp;
1413 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1415 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1416 tcg_temp_free_i32(var);
1419 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1421 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1424 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1426 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1429 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1430 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1431 #define tcg_gen_st_f32 tcg_gen_st_i32
1432 #define tcg_gen_st_f64 tcg_gen_st_i64
1434 static inline void gen_mov_F0_vreg(int dp, int reg)
1436 if (dp)
1437 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1438 else
1439 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1442 static inline void gen_mov_F1_vreg(int dp, int reg)
1444 if (dp)
1445 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1446 else
1447 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1450 static inline void gen_mov_vreg_F0(int dp, int reg)
1452 if (dp)
1453 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1454 else
1455 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1458 #define ARM_CP_RW_BIT (1 << 20)
1460 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1462 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1465 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1467 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1470 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1472 TCGv_i32 var = tcg_temp_new_i32();
1473 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1474 return var;
1477 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1479 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1480 tcg_temp_free_i32(var);
1483 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1485 iwmmxt_store_reg(cpu_M0, rn);
1488 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1490 iwmmxt_load_reg(cpu_M0, rn);
1493 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1495 iwmmxt_load_reg(cpu_V1, rn);
1496 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1499 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1501 iwmmxt_load_reg(cpu_V1, rn);
1502 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1505 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1507 iwmmxt_load_reg(cpu_V1, rn);
1508 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1511 #define IWMMXT_OP(name) \
1512 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1514 iwmmxt_load_reg(cpu_V1, rn); \
1515 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1518 #define IWMMXT_OP_ENV(name) \
1519 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1521 iwmmxt_load_reg(cpu_V1, rn); \
1522 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1525 #define IWMMXT_OP_ENV_SIZE(name) \
1526 IWMMXT_OP_ENV(name##b) \
1527 IWMMXT_OP_ENV(name##w) \
1528 IWMMXT_OP_ENV(name##l)
1530 #define IWMMXT_OP_ENV1(name) \
1531 static inline void gen_op_iwmmxt_##name##_M0(void) \
1533 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1536 IWMMXT_OP(maddsq)
1537 IWMMXT_OP(madduq)
1538 IWMMXT_OP(sadb)
1539 IWMMXT_OP(sadw)
1540 IWMMXT_OP(mulslw)
1541 IWMMXT_OP(mulshw)
1542 IWMMXT_OP(mululw)
1543 IWMMXT_OP(muluhw)
1544 IWMMXT_OP(macsw)
1545 IWMMXT_OP(macuw)
1547 IWMMXT_OP_ENV_SIZE(unpackl)
1548 IWMMXT_OP_ENV_SIZE(unpackh)
1550 IWMMXT_OP_ENV1(unpacklub)
1551 IWMMXT_OP_ENV1(unpackluw)
1552 IWMMXT_OP_ENV1(unpacklul)
1553 IWMMXT_OP_ENV1(unpackhub)
1554 IWMMXT_OP_ENV1(unpackhuw)
1555 IWMMXT_OP_ENV1(unpackhul)
1556 IWMMXT_OP_ENV1(unpacklsb)
1557 IWMMXT_OP_ENV1(unpacklsw)
1558 IWMMXT_OP_ENV1(unpacklsl)
1559 IWMMXT_OP_ENV1(unpackhsb)
1560 IWMMXT_OP_ENV1(unpackhsw)
1561 IWMMXT_OP_ENV1(unpackhsl)
1563 IWMMXT_OP_ENV_SIZE(cmpeq)
1564 IWMMXT_OP_ENV_SIZE(cmpgtu)
1565 IWMMXT_OP_ENV_SIZE(cmpgts)
1567 IWMMXT_OP_ENV_SIZE(mins)
1568 IWMMXT_OP_ENV_SIZE(minu)
1569 IWMMXT_OP_ENV_SIZE(maxs)
1570 IWMMXT_OP_ENV_SIZE(maxu)
1572 IWMMXT_OP_ENV_SIZE(subn)
1573 IWMMXT_OP_ENV_SIZE(addn)
1574 IWMMXT_OP_ENV_SIZE(subu)
1575 IWMMXT_OP_ENV_SIZE(addu)
1576 IWMMXT_OP_ENV_SIZE(subs)
1577 IWMMXT_OP_ENV_SIZE(adds)
1579 IWMMXT_OP_ENV(avgb0)
1580 IWMMXT_OP_ENV(avgb1)
1581 IWMMXT_OP_ENV(avgw0)
1582 IWMMXT_OP_ENV(avgw1)
1584 IWMMXT_OP_ENV(packuw)
1585 IWMMXT_OP_ENV(packul)
1586 IWMMXT_OP_ENV(packuq)
1587 IWMMXT_OP_ENV(packsw)
1588 IWMMXT_OP_ENV(packsl)
1589 IWMMXT_OP_ENV(packsq)
1591 static void gen_op_iwmmxt_set_mup(void)
1593 TCGv_i32 tmp;
1594 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1595 tcg_gen_ori_i32(tmp, tmp, 2);
1596 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1599 static void gen_op_iwmmxt_set_cup(void)
1601 TCGv_i32 tmp;
1602 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1603 tcg_gen_ori_i32(tmp, tmp, 1);
1604 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1607 static void gen_op_iwmmxt_setpsr_nz(void)
1609 TCGv_i32 tmp = tcg_temp_new_i32();
1610 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1611 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1614 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1616 iwmmxt_load_reg(cpu_V1, rn);
1617 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1618 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1621 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1622 TCGv_i32 dest)
1624 int rd;
1625 uint32_t offset;
1626 TCGv_i32 tmp;
1628 rd = (insn >> 16) & 0xf;
1629 tmp = load_reg(s, rd);
1631 offset = (insn & 0xff) << ((insn >> 7) & 2);
1632 if (insn & (1 << 24)) {
1633 /* Pre indexed */
1634 if (insn & (1 << 23))
1635 tcg_gen_addi_i32(tmp, tmp, offset);
1636 else
1637 tcg_gen_addi_i32(tmp, tmp, -offset);
1638 tcg_gen_mov_i32(dest, tmp);
1639 if (insn & (1 << 21))
1640 store_reg(s, rd, tmp);
1641 else
1642 tcg_temp_free_i32(tmp);
1643 } else if (insn & (1 << 21)) {
1644 /* Post indexed */
1645 tcg_gen_mov_i32(dest, tmp);
1646 if (insn & (1 << 23))
1647 tcg_gen_addi_i32(tmp, tmp, offset);
1648 else
1649 tcg_gen_addi_i32(tmp, tmp, -offset);
1650 store_reg(s, rd, tmp);
1651 } else if (!(insn & (1 << 23)))
1652 return 1;
1653 return 0;
1656 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1658 int rd = (insn >> 0) & 0xf;
1659 TCGv_i32 tmp;
1661 if (insn & (1 << 8)) {
1662 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1663 return 1;
1664 } else {
1665 tmp = iwmmxt_load_creg(rd);
1667 } else {
1668 tmp = tcg_temp_new_i32();
1669 iwmmxt_load_reg(cpu_V0, rd);
1670 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1672 tcg_gen_andi_i32(tmp, tmp, mask);
1673 tcg_gen_mov_i32(dest, tmp);
1674 tcg_temp_free_i32(tmp);
1675 return 0;
1678 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1679 (ie. an undefined instruction). */
1680 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1682 int rd, wrd;
1683 int rdhi, rdlo, rd0, rd1, i;
1684 TCGv_i32 addr;
1685 TCGv_i32 tmp, tmp2, tmp3;
1687 if ((insn & 0x0e000e00) == 0x0c000000) {
1688 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1689 wrd = insn & 0xf;
1690 rdlo = (insn >> 12) & 0xf;
1691 rdhi = (insn >> 16) & 0xf;
1692 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1693 iwmmxt_load_reg(cpu_V0, wrd);
1694 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1695 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1696 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1697 } else { /* TMCRR */
1698 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1699 iwmmxt_store_reg(cpu_V0, wrd);
1700 gen_op_iwmmxt_set_mup();
1702 return 0;
1705 wrd = (insn >> 12) & 0xf;
1706 addr = tcg_temp_new_i32();
1707 if (gen_iwmmxt_address(s, insn, addr)) {
1708 tcg_temp_free_i32(addr);
1709 return 1;
1711 if (insn & ARM_CP_RW_BIT) {
1712 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1713 tmp = tcg_temp_new_i32();
1714 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1715 iwmmxt_store_creg(wrd, tmp);
1716 } else {
1717 i = 1;
1718 if (insn & (1 << 8)) {
1719 if (insn & (1 << 22)) { /* WLDRD */
1720 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1721 i = 0;
1722 } else { /* WLDRW wRd */
1723 tmp = tcg_temp_new_i32();
1724 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1726 } else {
1727 tmp = tcg_temp_new_i32();
1728 if (insn & (1 << 22)) { /* WLDRH */
1729 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1730 } else { /* WLDRB */
1731 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1734 if (i) {
1735 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1736 tcg_temp_free_i32(tmp);
1738 gen_op_iwmmxt_movq_wRn_M0(wrd);
1740 } else {
1741 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1742 tmp = iwmmxt_load_creg(wrd);
1743 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1744 } else {
1745 gen_op_iwmmxt_movq_M0_wRn(wrd);
1746 tmp = tcg_temp_new_i32();
1747 if (insn & (1 << 8)) {
1748 if (insn & (1 << 22)) { /* WSTRD */
1749 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1750 } else { /* WSTRW wRd */
1751 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1752 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1754 } else {
1755 if (insn & (1 << 22)) { /* WSTRH */
1756 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1757 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1758 } else { /* WSTRB */
1759 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1760 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1764 tcg_temp_free_i32(tmp);
1766 tcg_temp_free_i32(addr);
1767 return 0;
1770 if ((insn & 0x0f000000) != 0x0e000000)
1771 return 1;
1773 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1774 case 0x000: /* WOR */
1775 wrd = (insn >> 12) & 0xf;
1776 rd0 = (insn >> 0) & 0xf;
1777 rd1 = (insn >> 16) & 0xf;
1778 gen_op_iwmmxt_movq_M0_wRn(rd0);
1779 gen_op_iwmmxt_orq_M0_wRn(rd1);
1780 gen_op_iwmmxt_setpsr_nz();
1781 gen_op_iwmmxt_movq_wRn_M0(wrd);
1782 gen_op_iwmmxt_set_mup();
1783 gen_op_iwmmxt_set_cup();
1784 break;
1785 case 0x011: /* TMCR */
1786 if (insn & 0xf)
1787 return 1;
1788 rd = (insn >> 12) & 0xf;
1789 wrd = (insn >> 16) & 0xf;
1790 switch (wrd) {
1791 case ARM_IWMMXT_wCID:
1792 case ARM_IWMMXT_wCASF:
1793 break;
1794 case ARM_IWMMXT_wCon:
1795 gen_op_iwmmxt_set_cup();
1796 /* Fall through. */
1797 case ARM_IWMMXT_wCSSF:
1798 tmp = iwmmxt_load_creg(wrd);
1799 tmp2 = load_reg(s, rd);
1800 tcg_gen_andc_i32(tmp, tmp, tmp2);
1801 tcg_temp_free_i32(tmp2);
1802 iwmmxt_store_creg(wrd, tmp);
1803 break;
1804 case ARM_IWMMXT_wCGR0:
1805 case ARM_IWMMXT_wCGR1:
1806 case ARM_IWMMXT_wCGR2:
1807 case ARM_IWMMXT_wCGR3:
1808 gen_op_iwmmxt_set_cup();
1809 tmp = load_reg(s, rd);
1810 iwmmxt_store_creg(wrd, tmp);
1811 break;
1812 default:
1813 return 1;
1815 break;
1816 case 0x100: /* WXOR */
1817 wrd = (insn >> 12) & 0xf;
1818 rd0 = (insn >> 0) & 0xf;
1819 rd1 = (insn >> 16) & 0xf;
1820 gen_op_iwmmxt_movq_M0_wRn(rd0);
1821 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1822 gen_op_iwmmxt_setpsr_nz();
1823 gen_op_iwmmxt_movq_wRn_M0(wrd);
1824 gen_op_iwmmxt_set_mup();
1825 gen_op_iwmmxt_set_cup();
1826 break;
1827 case 0x111: /* TMRC */
1828 if (insn & 0xf)
1829 return 1;
1830 rd = (insn >> 12) & 0xf;
1831 wrd = (insn >> 16) & 0xf;
1832 tmp = iwmmxt_load_creg(wrd);
1833 store_reg(s, rd, tmp);
1834 break;
1835 case 0x300: /* WANDN */
1836 wrd = (insn >> 12) & 0xf;
1837 rd0 = (insn >> 0) & 0xf;
1838 rd1 = (insn >> 16) & 0xf;
1839 gen_op_iwmmxt_movq_M0_wRn(rd0);
1840 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1841 gen_op_iwmmxt_andq_M0_wRn(rd1);
1842 gen_op_iwmmxt_setpsr_nz();
1843 gen_op_iwmmxt_movq_wRn_M0(wrd);
1844 gen_op_iwmmxt_set_mup();
1845 gen_op_iwmmxt_set_cup();
1846 break;
1847 case 0x200: /* WAND */
1848 wrd = (insn >> 12) & 0xf;
1849 rd0 = (insn >> 0) & 0xf;
1850 rd1 = (insn >> 16) & 0xf;
1851 gen_op_iwmmxt_movq_M0_wRn(rd0);
1852 gen_op_iwmmxt_andq_M0_wRn(rd1);
1853 gen_op_iwmmxt_setpsr_nz();
1854 gen_op_iwmmxt_movq_wRn_M0(wrd);
1855 gen_op_iwmmxt_set_mup();
1856 gen_op_iwmmxt_set_cup();
1857 break;
1858 case 0x810: case 0xa10: /* WMADD */
1859 wrd = (insn >> 12) & 0xf;
1860 rd0 = (insn >> 0) & 0xf;
1861 rd1 = (insn >> 16) & 0xf;
1862 gen_op_iwmmxt_movq_M0_wRn(rd0);
1863 if (insn & (1 << 21))
1864 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1865 else
1866 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1867 gen_op_iwmmxt_movq_wRn_M0(wrd);
1868 gen_op_iwmmxt_set_mup();
1869 break;
1870 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1871 wrd = (insn >> 12) & 0xf;
1872 rd0 = (insn >> 16) & 0xf;
1873 rd1 = (insn >> 0) & 0xf;
1874 gen_op_iwmmxt_movq_M0_wRn(rd0);
1875 switch ((insn >> 22) & 3) {
1876 case 0:
1877 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1878 break;
1879 case 1:
1880 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1881 break;
1882 case 2:
1883 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1884 break;
1885 case 3:
1886 return 1;
1888 gen_op_iwmmxt_movq_wRn_M0(wrd);
1889 gen_op_iwmmxt_set_mup();
1890 gen_op_iwmmxt_set_cup();
1891 break;
1892 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1893 wrd = (insn >> 12) & 0xf;
1894 rd0 = (insn >> 16) & 0xf;
1895 rd1 = (insn >> 0) & 0xf;
1896 gen_op_iwmmxt_movq_M0_wRn(rd0);
1897 switch ((insn >> 22) & 3) {
1898 case 0:
1899 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1900 break;
1901 case 1:
1902 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1903 break;
1904 case 2:
1905 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1906 break;
1907 case 3:
1908 return 1;
1910 gen_op_iwmmxt_movq_wRn_M0(wrd);
1911 gen_op_iwmmxt_set_mup();
1912 gen_op_iwmmxt_set_cup();
1913 break;
1914 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1915 wrd = (insn >> 12) & 0xf;
1916 rd0 = (insn >> 16) & 0xf;
1917 rd1 = (insn >> 0) & 0xf;
1918 gen_op_iwmmxt_movq_M0_wRn(rd0);
1919 if (insn & (1 << 22))
1920 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1921 else
1922 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1923 if (!(insn & (1 << 20)))
1924 gen_op_iwmmxt_addl_M0_wRn(wrd);
1925 gen_op_iwmmxt_movq_wRn_M0(wrd);
1926 gen_op_iwmmxt_set_mup();
1927 break;
1928 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1929 wrd = (insn >> 12) & 0xf;
1930 rd0 = (insn >> 16) & 0xf;
1931 rd1 = (insn >> 0) & 0xf;
1932 gen_op_iwmmxt_movq_M0_wRn(rd0);
1933 if (insn & (1 << 21)) {
1934 if (insn & (1 << 20))
1935 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1936 else
1937 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1938 } else {
1939 if (insn & (1 << 20))
1940 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1941 else
1942 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1944 gen_op_iwmmxt_movq_wRn_M0(wrd);
1945 gen_op_iwmmxt_set_mup();
1946 break;
1947 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1948 wrd = (insn >> 12) & 0xf;
1949 rd0 = (insn >> 16) & 0xf;
1950 rd1 = (insn >> 0) & 0xf;
1951 gen_op_iwmmxt_movq_M0_wRn(rd0);
1952 if (insn & (1 << 21))
1953 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1954 else
1955 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1956 if (!(insn & (1 << 20))) {
1957 iwmmxt_load_reg(cpu_V1, wrd);
1958 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1960 gen_op_iwmmxt_movq_wRn_M0(wrd);
1961 gen_op_iwmmxt_set_mup();
1962 break;
1963 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1964 wrd = (insn >> 12) & 0xf;
1965 rd0 = (insn >> 16) & 0xf;
1966 rd1 = (insn >> 0) & 0xf;
1967 gen_op_iwmmxt_movq_M0_wRn(rd0);
1968 switch ((insn >> 22) & 3) {
1969 case 0:
1970 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1971 break;
1972 case 1:
1973 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1974 break;
1975 case 2:
1976 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1977 break;
1978 case 3:
1979 return 1;
1981 gen_op_iwmmxt_movq_wRn_M0(wrd);
1982 gen_op_iwmmxt_set_mup();
1983 gen_op_iwmmxt_set_cup();
1984 break;
1985 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1986 wrd = (insn >> 12) & 0xf;
1987 rd0 = (insn >> 16) & 0xf;
1988 rd1 = (insn >> 0) & 0xf;
1989 gen_op_iwmmxt_movq_M0_wRn(rd0);
1990 if (insn & (1 << 22)) {
1991 if (insn & (1 << 20))
1992 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1993 else
1994 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1995 } else {
1996 if (insn & (1 << 20))
1997 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1998 else
1999 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2001 gen_op_iwmmxt_movq_wRn_M0(wrd);
2002 gen_op_iwmmxt_set_mup();
2003 gen_op_iwmmxt_set_cup();
2004 break;
2005 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2006 wrd = (insn >> 12) & 0xf;
2007 rd0 = (insn >> 16) & 0xf;
2008 rd1 = (insn >> 0) & 0xf;
2009 gen_op_iwmmxt_movq_M0_wRn(rd0);
2010 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2011 tcg_gen_andi_i32(tmp, tmp, 7);
2012 iwmmxt_load_reg(cpu_V1, rd1);
2013 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2014 tcg_temp_free_i32(tmp);
2015 gen_op_iwmmxt_movq_wRn_M0(wrd);
2016 gen_op_iwmmxt_set_mup();
2017 break;
2018 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2019 if (((insn >> 6) & 3) == 3)
2020 return 1;
2021 rd = (insn >> 12) & 0xf;
2022 wrd = (insn >> 16) & 0xf;
2023 tmp = load_reg(s, rd);
2024 gen_op_iwmmxt_movq_M0_wRn(wrd);
2025 switch ((insn >> 6) & 3) {
2026 case 0:
2027 tmp2 = tcg_const_i32(0xff);
2028 tmp3 = tcg_const_i32((insn & 7) << 3);
2029 break;
2030 case 1:
2031 tmp2 = tcg_const_i32(0xffff);
2032 tmp3 = tcg_const_i32((insn & 3) << 4);
2033 break;
2034 case 2:
2035 tmp2 = tcg_const_i32(0xffffffff);
2036 tmp3 = tcg_const_i32((insn & 1) << 5);
2037 break;
2038 default:
2039 TCGV_UNUSED_I32(tmp2);
2040 TCGV_UNUSED_I32(tmp3);
2042 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2043 tcg_temp_free_i32(tmp3);
2044 tcg_temp_free_i32(tmp2);
2045 tcg_temp_free_i32(tmp);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 break;
2049 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2050 rd = (insn >> 12) & 0xf;
2051 wrd = (insn >> 16) & 0xf;
2052 if (rd == 15 || ((insn >> 22) & 3) == 3)
2053 return 1;
2054 gen_op_iwmmxt_movq_M0_wRn(wrd);
2055 tmp = tcg_temp_new_i32();
2056 switch ((insn >> 22) & 3) {
2057 case 0:
2058 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2059 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2060 if (insn & 8) {
2061 tcg_gen_ext8s_i32(tmp, tmp);
2062 } else {
2063 tcg_gen_andi_i32(tmp, tmp, 0xff);
2065 break;
2066 case 1:
2067 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2068 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2069 if (insn & 8) {
2070 tcg_gen_ext16s_i32(tmp, tmp);
2071 } else {
2072 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2074 break;
2075 case 2:
2076 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2077 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2078 break;
2080 store_reg(s, rd, tmp);
2081 break;
2082 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2083 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2084 return 1;
2085 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2086 switch ((insn >> 22) & 3) {
2087 case 0:
2088 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2089 break;
2090 case 1:
2091 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2092 break;
2093 case 2:
2094 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2095 break;
2097 tcg_gen_shli_i32(tmp, tmp, 28);
2098 gen_set_nzcv(tmp);
2099 tcg_temp_free_i32(tmp);
2100 break;
2101 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2102 if (((insn >> 6) & 3) == 3)
2103 return 1;
2104 rd = (insn >> 12) & 0xf;
2105 wrd = (insn >> 16) & 0xf;
2106 tmp = load_reg(s, rd);
2107 switch ((insn >> 6) & 3) {
2108 case 0:
2109 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2110 break;
2111 case 1:
2112 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2113 break;
2114 case 2:
2115 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2116 break;
2118 tcg_temp_free_i32(tmp);
2119 gen_op_iwmmxt_movq_wRn_M0(wrd);
2120 gen_op_iwmmxt_set_mup();
2121 break;
2122 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2123 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2124 return 1;
2125 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2126 tmp2 = tcg_temp_new_i32();
2127 tcg_gen_mov_i32(tmp2, tmp);
2128 switch ((insn >> 22) & 3) {
2129 case 0:
2130 for (i = 0; i < 7; i ++) {
2131 tcg_gen_shli_i32(tmp2, tmp2, 4);
2132 tcg_gen_and_i32(tmp, tmp, tmp2);
2134 break;
2135 case 1:
2136 for (i = 0; i < 3; i ++) {
2137 tcg_gen_shli_i32(tmp2, tmp2, 8);
2138 tcg_gen_and_i32(tmp, tmp, tmp2);
2140 break;
2141 case 2:
2142 tcg_gen_shli_i32(tmp2, tmp2, 16);
2143 tcg_gen_and_i32(tmp, tmp, tmp2);
2144 break;
2146 gen_set_nzcv(tmp);
2147 tcg_temp_free_i32(tmp2);
2148 tcg_temp_free_i32(tmp);
2149 break;
2150 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2151 wrd = (insn >> 12) & 0xf;
2152 rd0 = (insn >> 16) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0);
2154 switch ((insn >> 22) & 3) {
2155 case 0:
2156 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2157 break;
2158 case 1:
2159 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2160 break;
2161 case 2:
2162 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2163 break;
2164 case 3:
2165 return 1;
2167 gen_op_iwmmxt_movq_wRn_M0(wrd);
2168 gen_op_iwmmxt_set_mup();
2169 break;
2170 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2171 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2172 return 1;
2173 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2174 tmp2 = tcg_temp_new_i32();
2175 tcg_gen_mov_i32(tmp2, tmp);
2176 switch ((insn >> 22) & 3) {
2177 case 0:
2178 for (i = 0; i < 7; i ++) {
2179 tcg_gen_shli_i32(tmp2, tmp2, 4);
2180 tcg_gen_or_i32(tmp, tmp, tmp2);
2182 break;
2183 case 1:
2184 for (i = 0; i < 3; i ++) {
2185 tcg_gen_shli_i32(tmp2, tmp2, 8);
2186 tcg_gen_or_i32(tmp, tmp, tmp2);
2188 break;
2189 case 2:
2190 tcg_gen_shli_i32(tmp2, tmp2, 16);
2191 tcg_gen_or_i32(tmp, tmp, tmp2);
2192 break;
2194 gen_set_nzcv(tmp);
2195 tcg_temp_free_i32(tmp2);
2196 tcg_temp_free_i32(tmp);
2197 break;
2198 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2199 rd = (insn >> 12) & 0xf;
2200 rd0 = (insn >> 16) & 0xf;
2201 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2202 return 1;
2203 gen_op_iwmmxt_movq_M0_wRn(rd0);
2204 tmp = tcg_temp_new_i32();
2205 switch ((insn >> 22) & 3) {
2206 case 0:
2207 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2208 break;
2209 case 1:
2210 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2211 break;
2212 case 2:
2213 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2214 break;
2216 store_reg(s, rd, tmp);
2217 break;
2218 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2219 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2220 wrd = (insn >> 12) & 0xf;
2221 rd0 = (insn >> 16) & 0xf;
2222 rd1 = (insn >> 0) & 0xf;
2223 gen_op_iwmmxt_movq_M0_wRn(rd0);
2224 switch ((insn >> 22) & 3) {
2225 case 0:
2226 if (insn & (1 << 21))
2227 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2228 else
2229 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2230 break;
2231 case 1:
2232 if (insn & (1 << 21))
2233 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2234 else
2235 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2236 break;
2237 case 2:
2238 if (insn & (1 << 21))
2239 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2240 else
2241 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2242 break;
2243 case 3:
2244 return 1;
2246 gen_op_iwmmxt_movq_wRn_M0(wrd);
2247 gen_op_iwmmxt_set_mup();
2248 gen_op_iwmmxt_set_cup();
2249 break;
2250 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2251 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2252 wrd = (insn >> 12) & 0xf;
2253 rd0 = (insn >> 16) & 0xf;
2254 gen_op_iwmmxt_movq_M0_wRn(rd0);
2255 switch ((insn >> 22) & 3) {
2256 case 0:
2257 if (insn & (1 << 21))
2258 gen_op_iwmmxt_unpacklsb_M0();
2259 else
2260 gen_op_iwmmxt_unpacklub_M0();
2261 break;
2262 case 1:
2263 if (insn & (1 << 21))
2264 gen_op_iwmmxt_unpacklsw_M0();
2265 else
2266 gen_op_iwmmxt_unpackluw_M0();
2267 break;
2268 case 2:
2269 if (insn & (1 << 21))
2270 gen_op_iwmmxt_unpacklsl_M0();
2271 else
2272 gen_op_iwmmxt_unpacklul_M0();
2273 break;
2274 case 3:
2275 return 1;
2277 gen_op_iwmmxt_movq_wRn_M0(wrd);
2278 gen_op_iwmmxt_set_mup();
2279 gen_op_iwmmxt_set_cup();
2280 break;
2281 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2282 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2283 wrd = (insn >> 12) & 0xf;
2284 rd0 = (insn >> 16) & 0xf;
2285 gen_op_iwmmxt_movq_M0_wRn(rd0);
2286 switch ((insn >> 22) & 3) {
2287 case 0:
2288 if (insn & (1 << 21))
2289 gen_op_iwmmxt_unpackhsb_M0();
2290 else
2291 gen_op_iwmmxt_unpackhub_M0();
2292 break;
2293 case 1:
2294 if (insn & (1 << 21))
2295 gen_op_iwmmxt_unpackhsw_M0();
2296 else
2297 gen_op_iwmmxt_unpackhuw_M0();
2298 break;
2299 case 2:
2300 if (insn & (1 << 21))
2301 gen_op_iwmmxt_unpackhsl_M0();
2302 else
2303 gen_op_iwmmxt_unpackhul_M0();
2304 break;
2305 case 3:
2306 return 1;
2308 gen_op_iwmmxt_movq_wRn_M0(wrd);
2309 gen_op_iwmmxt_set_mup();
2310 gen_op_iwmmxt_set_cup();
2311 break;
2312 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2313 case 0x214: case 0x614: case 0xa14: case 0xe14:
2314 if (((insn >> 22) & 3) == 0)
2315 return 1;
2316 wrd = (insn >> 12) & 0xf;
2317 rd0 = (insn >> 16) & 0xf;
2318 gen_op_iwmmxt_movq_M0_wRn(rd0);
2319 tmp = tcg_temp_new_i32();
2320 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2321 tcg_temp_free_i32(tmp);
2322 return 1;
2324 switch ((insn >> 22) & 3) {
2325 case 1:
2326 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2327 break;
2328 case 2:
2329 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2330 break;
2331 case 3:
2332 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2333 break;
2335 tcg_temp_free_i32(tmp);
2336 gen_op_iwmmxt_movq_wRn_M0(wrd);
2337 gen_op_iwmmxt_set_mup();
2338 gen_op_iwmmxt_set_cup();
2339 break;
2340 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2341 case 0x014: case 0x414: case 0x814: case 0xc14:
2342 if (((insn >> 22) & 3) == 0)
2343 return 1;
2344 wrd = (insn >> 12) & 0xf;
2345 rd0 = (insn >> 16) & 0xf;
2346 gen_op_iwmmxt_movq_M0_wRn(rd0);
2347 tmp = tcg_temp_new_i32();
2348 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2349 tcg_temp_free_i32(tmp);
2350 return 1;
2352 switch ((insn >> 22) & 3) {
2353 case 1:
2354 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2355 break;
2356 case 2:
2357 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2358 break;
2359 case 3:
2360 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2361 break;
2363 tcg_temp_free_i32(tmp);
2364 gen_op_iwmmxt_movq_wRn_M0(wrd);
2365 gen_op_iwmmxt_set_mup();
2366 gen_op_iwmmxt_set_cup();
2367 break;
2368 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2369 case 0x114: case 0x514: case 0x914: case 0xd14:
2370 if (((insn >> 22) & 3) == 0)
2371 return 1;
2372 wrd = (insn >> 12) & 0xf;
2373 rd0 = (insn >> 16) & 0xf;
2374 gen_op_iwmmxt_movq_M0_wRn(rd0);
2375 tmp = tcg_temp_new_i32();
2376 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2377 tcg_temp_free_i32(tmp);
2378 return 1;
2380 switch ((insn >> 22) & 3) {
2381 case 1:
2382 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2383 break;
2384 case 2:
2385 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2386 break;
2387 case 3:
2388 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2389 break;
2391 tcg_temp_free_i32(tmp);
2392 gen_op_iwmmxt_movq_wRn_M0(wrd);
2393 gen_op_iwmmxt_set_mup();
2394 gen_op_iwmmxt_set_cup();
2395 break;
2396 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2397 case 0x314: case 0x714: case 0xb14: case 0xf14:
2398 if (((insn >> 22) & 3) == 0)
2399 return 1;
2400 wrd = (insn >> 12) & 0xf;
2401 rd0 = (insn >> 16) & 0xf;
2402 gen_op_iwmmxt_movq_M0_wRn(rd0);
2403 tmp = tcg_temp_new_i32();
2404 switch ((insn >> 22) & 3) {
2405 case 1:
2406 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2407 tcg_temp_free_i32(tmp);
2408 return 1;
2410 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2411 break;
2412 case 2:
2413 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2414 tcg_temp_free_i32(tmp);
2415 return 1;
2417 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2418 break;
2419 case 3:
2420 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2421 tcg_temp_free_i32(tmp);
2422 return 1;
2424 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2425 break;
2427 tcg_temp_free_i32(tmp);
2428 gen_op_iwmmxt_movq_wRn_M0(wrd);
2429 gen_op_iwmmxt_set_mup();
2430 gen_op_iwmmxt_set_cup();
2431 break;
2432 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2433 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2434 wrd = (insn >> 12) & 0xf;
2435 rd0 = (insn >> 16) & 0xf;
2436 rd1 = (insn >> 0) & 0xf;
2437 gen_op_iwmmxt_movq_M0_wRn(rd0);
2438 switch ((insn >> 22) & 3) {
2439 case 0:
2440 if (insn & (1 << 21))
2441 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2442 else
2443 gen_op_iwmmxt_minub_M0_wRn(rd1);
2444 break;
2445 case 1:
2446 if (insn & (1 << 21))
2447 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2448 else
2449 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2450 break;
2451 case 2:
2452 if (insn & (1 << 21))
2453 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2454 else
2455 gen_op_iwmmxt_minul_M0_wRn(rd1);
2456 break;
2457 case 3:
2458 return 1;
2460 gen_op_iwmmxt_movq_wRn_M0(wrd);
2461 gen_op_iwmmxt_set_mup();
2462 break;
2463 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2464 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2465 wrd = (insn >> 12) & 0xf;
2466 rd0 = (insn >> 16) & 0xf;
2467 rd1 = (insn >> 0) & 0xf;
2468 gen_op_iwmmxt_movq_M0_wRn(rd0);
2469 switch ((insn >> 22) & 3) {
2470 case 0:
2471 if (insn & (1 << 21))
2472 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2473 else
2474 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2475 break;
2476 case 1:
2477 if (insn & (1 << 21))
2478 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2479 else
2480 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2481 break;
2482 case 2:
2483 if (insn & (1 << 21))
2484 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2485 else
2486 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2487 break;
2488 case 3:
2489 return 1;
2491 gen_op_iwmmxt_movq_wRn_M0(wrd);
2492 gen_op_iwmmxt_set_mup();
2493 break;
2494 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2495 case 0x402: case 0x502: case 0x602: case 0x702:
2496 wrd = (insn >> 12) & 0xf;
2497 rd0 = (insn >> 16) & 0xf;
2498 rd1 = (insn >> 0) & 0xf;
2499 gen_op_iwmmxt_movq_M0_wRn(rd0);
2500 tmp = tcg_const_i32((insn >> 20) & 3);
2501 iwmmxt_load_reg(cpu_V1, rd1);
2502 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2503 tcg_temp_free_i32(tmp);
2504 gen_op_iwmmxt_movq_wRn_M0(wrd);
2505 gen_op_iwmmxt_set_mup();
2506 break;
2507 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2508 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2509 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2510 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2511 wrd = (insn >> 12) & 0xf;
2512 rd0 = (insn >> 16) & 0xf;
2513 rd1 = (insn >> 0) & 0xf;
2514 gen_op_iwmmxt_movq_M0_wRn(rd0);
2515 switch ((insn >> 20) & 0xf) {
2516 case 0x0:
2517 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2518 break;
2519 case 0x1:
2520 gen_op_iwmmxt_subub_M0_wRn(rd1);
2521 break;
2522 case 0x3:
2523 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2524 break;
2525 case 0x4:
2526 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2527 break;
2528 case 0x5:
2529 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2530 break;
2531 case 0x7:
2532 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2533 break;
2534 case 0x8:
2535 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2536 break;
2537 case 0x9:
2538 gen_op_iwmmxt_subul_M0_wRn(rd1);
2539 break;
2540 case 0xb:
2541 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2542 break;
2543 default:
2544 return 1;
2546 gen_op_iwmmxt_movq_wRn_M0(wrd);
2547 gen_op_iwmmxt_set_mup();
2548 gen_op_iwmmxt_set_cup();
2549 break;
2550 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2551 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2552 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2553 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2554 wrd = (insn >> 12) & 0xf;
2555 rd0 = (insn >> 16) & 0xf;
2556 gen_op_iwmmxt_movq_M0_wRn(rd0);
2557 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2558 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2559 tcg_temp_free_i32(tmp);
2560 gen_op_iwmmxt_movq_wRn_M0(wrd);
2561 gen_op_iwmmxt_set_mup();
2562 gen_op_iwmmxt_set_cup();
2563 break;
2564 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2565 case 0x418: case 0x518: case 0x618: case 0x718:
2566 case 0x818: case 0x918: case 0xa18: case 0xb18:
2567 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2568 wrd = (insn >> 12) & 0xf;
2569 rd0 = (insn >> 16) & 0xf;
2570 rd1 = (insn >> 0) & 0xf;
2571 gen_op_iwmmxt_movq_M0_wRn(rd0);
2572 switch ((insn >> 20) & 0xf) {
2573 case 0x0:
2574 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2575 break;
2576 case 0x1:
2577 gen_op_iwmmxt_addub_M0_wRn(rd1);
2578 break;
2579 case 0x3:
2580 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2581 break;
2582 case 0x4:
2583 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2584 break;
2585 case 0x5:
2586 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2587 break;
2588 case 0x7:
2589 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2590 break;
2591 case 0x8:
2592 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2593 break;
2594 case 0x9:
2595 gen_op_iwmmxt_addul_M0_wRn(rd1);
2596 break;
2597 case 0xb:
2598 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2599 break;
2600 default:
2601 return 1;
2603 gen_op_iwmmxt_movq_wRn_M0(wrd);
2604 gen_op_iwmmxt_set_mup();
2605 gen_op_iwmmxt_set_cup();
2606 break;
2607 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2608 case 0x408: case 0x508: case 0x608: case 0x708:
2609 case 0x808: case 0x908: case 0xa08: case 0xb08:
2610 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2611 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2612 return 1;
2613 wrd = (insn >> 12) & 0xf;
2614 rd0 = (insn >> 16) & 0xf;
2615 rd1 = (insn >> 0) & 0xf;
2616 gen_op_iwmmxt_movq_M0_wRn(rd0);
2617 switch ((insn >> 22) & 3) {
2618 case 1:
2619 if (insn & (1 << 21))
2620 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2621 else
2622 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2623 break;
2624 case 2:
2625 if (insn & (1 << 21))
2626 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2627 else
2628 gen_op_iwmmxt_packul_M0_wRn(rd1);
2629 break;
2630 case 3:
2631 if (insn & (1 << 21))
2632 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2633 else
2634 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2635 break;
2637 gen_op_iwmmxt_movq_wRn_M0(wrd);
2638 gen_op_iwmmxt_set_mup();
2639 gen_op_iwmmxt_set_cup();
2640 break;
2641 case 0x201: case 0x203: case 0x205: case 0x207:
2642 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2643 case 0x211: case 0x213: case 0x215: case 0x217:
2644 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2645 wrd = (insn >> 5) & 0xf;
2646 rd0 = (insn >> 12) & 0xf;
2647 rd1 = (insn >> 0) & 0xf;
2648 if (rd0 == 0xf || rd1 == 0xf)
2649 return 1;
2650 gen_op_iwmmxt_movq_M0_wRn(wrd);
2651 tmp = load_reg(s, rd0);
2652 tmp2 = load_reg(s, rd1);
2653 switch ((insn >> 16) & 0xf) {
2654 case 0x0: /* TMIA */
2655 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2656 break;
2657 case 0x8: /* TMIAPH */
2658 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2659 break;
2660 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2661 if (insn & (1 << 16))
2662 tcg_gen_shri_i32(tmp, tmp, 16);
2663 if (insn & (1 << 17))
2664 tcg_gen_shri_i32(tmp2, tmp2, 16);
2665 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2666 break;
2667 default:
2668 tcg_temp_free_i32(tmp2);
2669 tcg_temp_free_i32(tmp);
2670 return 1;
2672 tcg_temp_free_i32(tmp2);
2673 tcg_temp_free_i32(tmp);
2674 gen_op_iwmmxt_movq_wRn_M0(wrd);
2675 gen_op_iwmmxt_set_mup();
2676 break;
2677 default:
2678 return 1;
2681 return 0;
2684 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2685 (ie. an undefined instruction). */
2686 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2688 int acc, rd0, rd1, rdhi, rdlo;
2689 TCGv_i32 tmp, tmp2;
2691 if ((insn & 0x0ff00f10) == 0x0e200010) {
2692 /* Multiply with Internal Accumulate Format */
2693 rd0 = (insn >> 12) & 0xf;
2694 rd1 = insn & 0xf;
2695 acc = (insn >> 5) & 7;
2697 if (acc != 0)
2698 return 1;
2700 tmp = load_reg(s, rd0);
2701 tmp2 = load_reg(s, rd1);
2702 switch ((insn >> 16) & 0xf) {
2703 case 0x0: /* MIA */
2704 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2705 break;
2706 case 0x8: /* MIAPH */
2707 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2708 break;
2709 case 0xc: /* MIABB */
2710 case 0xd: /* MIABT */
2711 case 0xe: /* MIATB */
2712 case 0xf: /* MIATT */
2713 if (insn & (1 << 16))
2714 tcg_gen_shri_i32(tmp, tmp, 16);
2715 if (insn & (1 << 17))
2716 tcg_gen_shri_i32(tmp2, tmp2, 16);
2717 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2718 break;
2719 default:
2720 return 1;
2722 tcg_temp_free_i32(tmp2);
2723 tcg_temp_free_i32(tmp);
2725 gen_op_iwmmxt_movq_wRn_M0(acc);
2726 return 0;
2729 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2730 /* Internal Accumulator Access Format */
2731 rdhi = (insn >> 16) & 0xf;
2732 rdlo = (insn >> 12) & 0xf;
2733 acc = insn & 7;
2735 if (acc != 0)
2736 return 1;
2738 if (insn & ARM_CP_RW_BIT) { /* MRA */
2739 iwmmxt_load_reg(cpu_V0, acc);
2740 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2741 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2742 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2743 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2744 } else { /* MAR */
2745 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2746 iwmmxt_store_reg(cpu_V0, acc);
2748 return 0;
2751 return 1;
2754 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2755 #define VFP_SREG(insn, bigbit, smallbit) \
2756 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2757 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2758 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2759 reg = (((insn) >> (bigbit)) & 0x0f) \
2760 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2761 } else { \
2762 if (insn & (1 << (smallbit))) \
2763 return 1; \
2764 reg = ((insn) >> (bigbit)) & 0x0f; \
2765 }} while (0)
2767 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2768 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2769 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2770 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2771 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2772 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2774 /* Move between integer and VFP cores. */
2775 static TCGv_i32 gen_vfp_mrs(void)
2777 TCGv_i32 tmp = tcg_temp_new_i32();
2778 tcg_gen_mov_i32(tmp, cpu_F0s);
2779 return tmp;
2782 static void gen_vfp_msr(TCGv_i32 tmp)
2784 tcg_gen_mov_i32(cpu_F0s, tmp);
2785 tcg_temp_free_i32(tmp);
2788 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2790 TCGv_i32 tmp = tcg_temp_new_i32();
2791 if (shift)
2792 tcg_gen_shri_i32(var, var, shift);
2793 tcg_gen_ext8u_i32(var, var);
2794 tcg_gen_shli_i32(tmp, var, 8);
2795 tcg_gen_or_i32(var, var, tmp);
2796 tcg_gen_shli_i32(tmp, var, 16);
2797 tcg_gen_or_i32(var, var, tmp);
2798 tcg_temp_free_i32(tmp);
2801 static void gen_neon_dup_low16(TCGv_i32 var)
2803 TCGv_i32 tmp = tcg_temp_new_i32();
2804 tcg_gen_ext16u_i32(var, var);
2805 tcg_gen_shli_i32(tmp, var, 16);
2806 tcg_gen_or_i32(var, var, tmp);
2807 tcg_temp_free_i32(tmp);
2810 static void gen_neon_dup_high16(TCGv_i32 var)
2812 TCGv_i32 tmp = tcg_temp_new_i32();
2813 tcg_gen_andi_i32(var, var, 0xffff0000);
2814 tcg_gen_shri_i32(tmp, var, 16);
2815 tcg_gen_or_i32(var, var, tmp);
2816 tcg_temp_free_i32(tmp);
2819 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2821 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2822 TCGv_i32 tmp = tcg_temp_new_i32();
2823 switch (size) {
2824 case 0:
2825 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2826 gen_neon_dup_u8(tmp, 0);
2827 break;
2828 case 1:
2829 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2830 gen_neon_dup_low16(tmp);
2831 break;
2832 case 2:
2833 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2834 break;
2835 default: /* Avoid compiler warnings. */
2836 abort();
2838 return tmp;
2841 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2842 uint32_t dp)
2844 uint32_t cc = extract32(insn, 20, 2);
2846 if (dp) {
2847 TCGv_i64 frn, frm, dest;
2848 TCGv_i64 tmp, zero, zf, nf, vf;
2850 zero = tcg_const_i64(0);
2852 frn = tcg_temp_new_i64();
2853 frm = tcg_temp_new_i64();
2854 dest = tcg_temp_new_i64();
2856 zf = tcg_temp_new_i64();
2857 nf = tcg_temp_new_i64();
2858 vf = tcg_temp_new_i64();
2860 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2861 tcg_gen_ext_i32_i64(nf, cpu_NF);
2862 tcg_gen_ext_i32_i64(vf, cpu_VF);
2864 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2865 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2866 switch (cc) {
2867 case 0: /* eq: Z */
2868 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2869 frn, frm);
2870 break;
2871 case 1: /* vs: V */
2872 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2873 frn, frm);
2874 break;
2875 case 2: /* ge: N == V -> N ^ V == 0 */
2876 tmp = tcg_temp_new_i64();
2877 tcg_gen_xor_i64(tmp, vf, nf);
2878 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2879 frn, frm);
2880 tcg_temp_free_i64(tmp);
2881 break;
2882 case 3: /* gt: !Z && N == V */
2883 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2884 frn, frm);
2885 tmp = tcg_temp_new_i64();
2886 tcg_gen_xor_i64(tmp, vf, nf);
2887 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2888 dest, frm);
2889 tcg_temp_free_i64(tmp);
2890 break;
2892 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2893 tcg_temp_free_i64(frn);
2894 tcg_temp_free_i64(frm);
2895 tcg_temp_free_i64(dest);
2897 tcg_temp_free_i64(zf);
2898 tcg_temp_free_i64(nf);
2899 tcg_temp_free_i64(vf);
2901 tcg_temp_free_i64(zero);
2902 } else {
2903 TCGv_i32 frn, frm, dest;
2904 TCGv_i32 tmp, zero;
2906 zero = tcg_const_i32(0);
2908 frn = tcg_temp_new_i32();
2909 frm = tcg_temp_new_i32();
2910 dest = tcg_temp_new_i32();
2911 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2912 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2913 switch (cc) {
2914 case 0: /* eq: Z */
2915 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2916 frn, frm);
2917 break;
2918 case 1: /* vs: V */
2919 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2920 frn, frm);
2921 break;
2922 case 2: /* ge: N == V -> N ^ V == 0 */
2923 tmp = tcg_temp_new_i32();
2924 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2925 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2926 frn, frm);
2927 tcg_temp_free_i32(tmp);
2928 break;
2929 case 3: /* gt: !Z && N == V */
2930 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2931 frn, frm);
2932 tmp = tcg_temp_new_i32();
2933 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2934 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2935 dest, frm);
2936 tcg_temp_free_i32(tmp);
2937 break;
2939 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2940 tcg_temp_free_i32(frn);
2941 tcg_temp_free_i32(frm);
2942 tcg_temp_free_i32(dest);
2944 tcg_temp_free_i32(zero);
2947 return 0;
2950 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2951 uint32_t rm, uint32_t dp)
2953 uint32_t vmin = extract32(insn, 6, 1);
2954 TCGv_ptr fpst = get_fpstatus_ptr(0);
2956 if (dp) {
2957 TCGv_i64 frn, frm, dest;
2959 frn = tcg_temp_new_i64();
2960 frm = tcg_temp_new_i64();
2961 dest = tcg_temp_new_i64();
2963 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2964 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2965 if (vmin) {
2966 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2967 } else {
2968 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2970 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2971 tcg_temp_free_i64(frn);
2972 tcg_temp_free_i64(frm);
2973 tcg_temp_free_i64(dest);
2974 } else {
2975 TCGv_i32 frn, frm, dest;
2977 frn = tcg_temp_new_i32();
2978 frm = tcg_temp_new_i32();
2979 dest = tcg_temp_new_i32();
2981 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2982 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2983 if (vmin) {
2984 gen_helper_vfp_minnums(dest, frn, frm, fpst);
2985 } else {
2986 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
2988 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2989 tcg_temp_free_i32(frn);
2990 tcg_temp_free_i32(frm);
2991 tcg_temp_free_i32(dest);
2994 tcg_temp_free_ptr(fpst);
2995 return 0;
2998 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2999 int rounding)
3001 TCGv_ptr fpst = get_fpstatus_ptr(0);
3002 TCGv_i32 tcg_rmode;
3004 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3005 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3007 if (dp) {
3008 TCGv_i64 tcg_op;
3009 TCGv_i64 tcg_res;
3010 tcg_op = tcg_temp_new_i64();
3011 tcg_res = tcg_temp_new_i64();
3012 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3013 gen_helper_rintd(tcg_res, tcg_op, fpst);
3014 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3015 tcg_temp_free_i64(tcg_op);
3016 tcg_temp_free_i64(tcg_res);
3017 } else {
3018 TCGv_i32 tcg_op;
3019 TCGv_i32 tcg_res;
3020 tcg_op = tcg_temp_new_i32();
3021 tcg_res = tcg_temp_new_i32();
3022 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3023 gen_helper_rints(tcg_res, tcg_op, fpst);
3024 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3025 tcg_temp_free_i32(tcg_op);
3026 tcg_temp_free_i32(tcg_res);
3029 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3030 tcg_temp_free_i32(tcg_rmode);
3032 tcg_temp_free_ptr(fpst);
3033 return 0;
3036 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3037 int rounding)
3039 bool is_signed = extract32(insn, 7, 1);
3040 TCGv_ptr fpst = get_fpstatus_ptr(0);
3041 TCGv_i32 tcg_rmode, tcg_shift;
3043 tcg_shift = tcg_const_i32(0);
3045 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3046 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3048 if (dp) {
3049 TCGv_i64 tcg_double, tcg_res;
3050 TCGv_i32 tcg_tmp;
3051 /* Rd is encoded as a single precision register even when the source
3052 * is double precision.
3054 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3055 tcg_double = tcg_temp_new_i64();
3056 tcg_res = tcg_temp_new_i64();
3057 tcg_tmp = tcg_temp_new_i32();
3058 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3059 if (is_signed) {
3060 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3061 } else {
3062 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3064 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
3065 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3066 tcg_temp_free_i32(tcg_tmp);
3067 tcg_temp_free_i64(tcg_res);
3068 tcg_temp_free_i64(tcg_double);
3069 } else {
3070 TCGv_i32 tcg_single, tcg_res;
3071 tcg_single = tcg_temp_new_i32();
3072 tcg_res = tcg_temp_new_i32();
3073 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3074 if (is_signed) {
3075 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3076 } else {
3077 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3079 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3080 tcg_temp_free_i32(tcg_res);
3081 tcg_temp_free_i32(tcg_single);
3084 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3085 tcg_temp_free_i32(tcg_rmode);
3087 tcg_temp_free_i32(tcg_shift);
3089 tcg_temp_free_ptr(fpst);
3091 return 0;
3094 /* Table for converting the most common AArch32 encoding of
3095 * rounding mode to arm_fprounding order (which matches the
3096 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3098 static const uint8_t fp_decode_rm[] = {
3099 FPROUNDING_TIEAWAY,
3100 FPROUNDING_TIEEVEN,
3101 FPROUNDING_POSINF,
3102 FPROUNDING_NEGINF,
3105 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3107 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3109 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3110 return 1;
3113 if (dp) {
3114 VFP_DREG_D(rd, insn);
3115 VFP_DREG_N(rn, insn);
3116 VFP_DREG_M(rm, insn);
3117 } else {
3118 rd = VFP_SREG_D(insn);
3119 rn = VFP_SREG_N(insn);
3120 rm = VFP_SREG_M(insn);
3123 if ((insn & 0x0f800e50) == 0x0e000a00) {
3124 return handle_vsel(insn, rd, rn, rm, dp);
3125 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3126 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3127 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3128 /* VRINTA, VRINTN, VRINTP, VRINTM */
3129 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3130 return handle_vrint(insn, rd, rm, dp, rounding);
3131 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3132 /* VCVTA, VCVTN, VCVTP, VCVTM */
3133 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3134 return handle_vcvt(insn, rd, rm, dp, rounding);
3136 return 1;
3139 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3140 (ie. an undefined instruction). */
3141 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3143 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3144 int dp, veclen;
3145 TCGv_i32 addr;
3146 TCGv_i32 tmp;
3147 TCGv_i32 tmp2;
3149 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3150 return 1;
3153 /* FIXME: this access check should not take precedence over UNDEF
3154 * for invalid encodings; we will generate incorrect syndrome information
3155 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3157 if (s->fp_excp_el) {
3158 gen_exception_insn(s, 4, EXCP_UDEF,
3159 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
3160 return 0;
3163 if (!s->vfp_enabled) {
3164 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3165 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3166 return 1;
3167 rn = (insn >> 16) & 0xf;
3168 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3169 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3170 return 1;
3174 if (extract32(insn, 28, 4) == 0xf) {
3175 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3176 * only used in v8 and above.
3178 return disas_vfp_v8_insn(s, insn);
3181 dp = ((insn & 0xf00) == 0xb00);
3182 switch ((insn >> 24) & 0xf) {
3183 case 0xe:
3184 if (insn & (1 << 4)) {
3185 /* single register transfer */
3186 rd = (insn >> 12) & 0xf;
3187 if (dp) {
3188 int size;
3189 int pass;
3191 VFP_DREG_N(rn, insn);
3192 if (insn & 0xf)
3193 return 1;
3194 if (insn & 0x00c00060
3195 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3196 return 1;
3199 pass = (insn >> 21) & 1;
3200 if (insn & (1 << 22)) {
3201 size = 0;
3202 offset = ((insn >> 5) & 3) * 8;
3203 } else if (insn & (1 << 5)) {
3204 size = 1;
3205 offset = (insn & (1 << 6)) ? 16 : 0;
3206 } else {
3207 size = 2;
3208 offset = 0;
3210 if (insn & ARM_CP_RW_BIT) {
3211 /* vfp->arm */
3212 tmp = neon_load_reg(rn, pass);
3213 switch (size) {
3214 case 0:
3215 if (offset)
3216 tcg_gen_shri_i32(tmp, tmp, offset);
3217 if (insn & (1 << 23))
3218 gen_uxtb(tmp);
3219 else
3220 gen_sxtb(tmp);
3221 break;
3222 case 1:
3223 if (insn & (1 << 23)) {
3224 if (offset) {
3225 tcg_gen_shri_i32(tmp, tmp, 16);
3226 } else {
3227 gen_uxth(tmp);
3229 } else {
3230 if (offset) {
3231 tcg_gen_sari_i32(tmp, tmp, 16);
3232 } else {
3233 gen_sxth(tmp);
3236 break;
3237 case 2:
3238 break;
3240 store_reg(s, rd, tmp);
3241 } else {
3242 /* arm->vfp */
3243 tmp = load_reg(s, rd);
3244 if (insn & (1 << 23)) {
3245 /* VDUP */
3246 if (size == 0) {
3247 gen_neon_dup_u8(tmp, 0);
3248 } else if (size == 1) {
3249 gen_neon_dup_low16(tmp);
3251 for (n = 0; n <= pass * 2; n++) {
3252 tmp2 = tcg_temp_new_i32();
3253 tcg_gen_mov_i32(tmp2, tmp);
3254 neon_store_reg(rn, n, tmp2);
3256 neon_store_reg(rn, n, tmp);
3257 } else {
3258 /* VMOV */
3259 switch (size) {
3260 case 0:
3261 tmp2 = neon_load_reg(rn, pass);
3262 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3263 tcg_temp_free_i32(tmp2);
3264 break;
3265 case 1:
3266 tmp2 = neon_load_reg(rn, pass);
3267 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3268 tcg_temp_free_i32(tmp2);
3269 break;
3270 case 2:
3271 break;
3273 neon_store_reg(rn, pass, tmp);
3276 } else { /* !dp */
3277 if ((insn & 0x6f) != 0x00)
3278 return 1;
3279 rn = VFP_SREG_N(insn);
3280 if (insn & ARM_CP_RW_BIT) {
3281 /* vfp->arm */
3282 if (insn & (1 << 21)) {
3283 /* system register */
3284 rn >>= 1;
3286 switch (rn) {
3287 case ARM_VFP_FPSID:
3288 /* VFP2 allows access to FSID from userspace.
3289 VFP3 restricts all id registers to privileged
3290 accesses. */
3291 if (IS_USER(s)
3292 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3293 return 1;
3295 tmp = load_cpu_field(vfp.xregs[rn]);
3296 break;
3297 case ARM_VFP_FPEXC:
3298 if (IS_USER(s))
3299 return 1;
3300 tmp = load_cpu_field(vfp.xregs[rn]);
3301 break;
3302 case ARM_VFP_FPINST:
3303 case ARM_VFP_FPINST2:
3304 /* Not present in VFP3. */
3305 if (IS_USER(s)
3306 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3307 return 1;
3309 tmp = load_cpu_field(vfp.xregs[rn]);
3310 break;
3311 case ARM_VFP_FPSCR:
3312 if (rd == 15) {
3313 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3314 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3315 } else {
3316 tmp = tcg_temp_new_i32();
3317 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3319 break;
3320 case ARM_VFP_MVFR2:
3321 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3322 return 1;
3324 /* fall through */
3325 case ARM_VFP_MVFR0:
3326 case ARM_VFP_MVFR1:
3327 if (IS_USER(s)
3328 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3329 return 1;
3331 tmp = load_cpu_field(vfp.xregs[rn]);
3332 break;
3333 default:
3334 return 1;
3336 } else {
3337 gen_mov_F0_vreg(0, rn);
3338 tmp = gen_vfp_mrs();
3340 if (rd == 15) {
3341 /* Set the 4 flag bits in the CPSR. */
3342 gen_set_nzcv(tmp);
3343 tcg_temp_free_i32(tmp);
3344 } else {
3345 store_reg(s, rd, tmp);
3347 } else {
3348 /* arm->vfp */
3349 if (insn & (1 << 21)) {
3350 rn >>= 1;
3351 /* system register */
3352 switch (rn) {
3353 case ARM_VFP_FPSID:
3354 case ARM_VFP_MVFR0:
3355 case ARM_VFP_MVFR1:
3356 /* Writes are ignored. */
3357 break;
3358 case ARM_VFP_FPSCR:
3359 tmp = load_reg(s, rd);
3360 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3361 tcg_temp_free_i32(tmp);
3362 gen_lookup_tb(s);
3363 break;
3364 case ARM_VFP_FPEXC:
3365 if (IS_USER(s))
3366 return 1;
3367 /* TODO: VFP subarchitecture support.
3368 * For now, keep the EN bit only */
3369 tmp = load_reg(s, rd);
3370 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3371 store_cpu_field(tmp, vfp.xregs[rn]);
3372 gen_lookup_tb(s);
3373 break;
3374 case ARM_VFP_FPINST:
3375 case ARM_VFP_FPINST2:
3376 if (IS_USER(s)) {
3377 return 1;
3379 tmp = load_reg(s, rd);
3380 store_cpu_field(tmp, vfp.xregs[rn]);
3381 break;
3382 default:
3383 return 1;
3385 } else {
3386 tmp = load_reg(s, rd);
3387 gen_vfp_msr(tmp);
3388 gen_mov_vreg_F0(0, rn);
3392 } else {
3393 /* data processing */
3394 /* The opcode is in bits 23, 21, 20 and 6. */
3395 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3396 if (dp) {
3397 if (op == 15) {
3398 /* rn is opcode */
3399 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3400 } else {
3401 /* rn is register number */
3402 VFP_DREG_N(rn, insn);
3405 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3406 ((rn & 0x1e) == 0x6))) {
3407 /* Integer or single/half precision destination. */
3408 rd = VFP_SREG_D(insn);
3409 } else {
3410 VFP_DREG_D(rd, insn);
3412 if (op == 15 &&
3413 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3414 ((rn & 0x1e) == 0x4))) {
3415 /* VCVT from int or half precision is always from S reg
3416 * regardless of dp bit. VCVT with immediate frac_bits
3417 * has same format as SREG_M.
3419 rm = VFP_SREG_M(insn);
3420 } else {
3421 VFP_DREG_M(rm, insn);
3423 } else {
3424 rn = VFP_SREG_N(insn);
3425 if (op == 15 && rn == 15) {
3426 /* Double precision destination. */
3427 VFP_DREG_D(rd, insn);
3428 } else {
3429 rd = VFP_SREG_D(insn);
3431 /* NB that we implicitly rely on the encoding for the frac_bits
3432 * in VCVT of fixed to float being the same as that of an SREG_M
3434 rm = VFP_SREG_M(insn);
3437 veclen = s->vec_len;
3438 if (op == 15 && rn > 3)
3439 veclen = 0;
3441 /* Shut up compiler warnings. */
3442 delta_m = 0;
3443 delta_d = 0;
3444 bank_mask = 0;
3446 if (veclen > 0) {
3447 if (dp)
3448 bank_mask = 0xc;
3449 else
3450 bank_mask = 0x18;
3452 /* Figure out what type of vector operation this is. */
3453 if ((rd & bank_mask) == 0) {
3454 /* scalar */
3455 veclen = 0;
3456 } else {
3457 if (dp)
3458 delta_d = (s->vec_stride >> 1) + 1;
3459 else
3460 delta_d = s->vec_stride + 1;
3462 if ((rm & bank_mask) == 0) {
3463 /* mixed scalar/vector */
3464 delta_m = 0;
3465 } else {
3466 /* vector */
3467 delta_m = delta_d;
3472 /* Load the initial operands. */
3473 if (op == 15) {
3474 switch (rn) {
3475 case 16:
3476 case 17:
3477 /* Integer source */
3478 gen_mov_F0_vreg(0, rm);
3479 break;
3480 case 8:
3481 case 9:
3482 /* Compare */
3483 gen_mov_F0_vreg(dp, rd);
3484 gen_mov_F1_vreg(dp, rm);
3485 break;
3486 case 10:
3487 case 11:
3488 /* Compare with zero */
3489 gen_mov_F0_vreg(dp, rd);
3490 gen_vfp_F1_ld0(dp);
3491 break;
3492 case 20:
3493 case 21:
3494 case 22:
3495 case 23:
3496 case 28:
3497 case 29:
3498 case 30:
3499 case 31:
3500 /* Source and destination the same. */
3501 gen_mov_F0_vreg(dp, rd);
3502 break;
3503 case 4:
3504 case 5:
3505 case 6:
3506 case 7:
3507 /* VCVTB, VCVTT: only present with the halfprec extension
3508 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3509 * (we choose to UNDEF)
3511 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3512 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3513 return 1;
3515 if (!extract32(rn, 1, 1)) {
3516 /* Half precision source. */
3517 gen_mov_F0_vreg(0, rm);
3518 break;
3520 /* Otherwise fall through */
3521 default:
3522 /* One source operand. */
3523 gen_mov_F0_vreg(dp, rm);
3524 break;
3526 } else {
3527 /* Two source operands. */
3528 gen_mov_F0_vreg(dp, rn);
3529 gen_mov_F1_vreg(dp, rm);
3532 for (;;) {
3533 /* Perform the calculation. */
3534 switch (op) {
3535 case 0: /* VMLA: fd + (fn * fm) */
3536 /* Note that order of inputs to the add matters for NaNs */
3537 gen_vfp_F1_mul(dp);
3538 gen_mov_F0_vreg(dp, rd);
3539 gen_vfp_add(dp);
3540 break;
3541 case 1: /* VMLS: fd + -(fn * fm) */
3542 gen_vfp_mul(dp);
3543 gen_vfp_F1_neg(dp);
3544 gen_mov_F0_vreg(dp, rd);
3545 gen_vfp_add(dp);
3546 break;
3547 case 2: /* VNMLS: -fd + (fn * fm) */
3548 /* Note that it isn't valid to replace (-A + B) with (B - A)
3549 * or similar plausible looking simplifications
3550 * because this will give wrong results for NaNs.
3552 gen_vfp_F1_mul(dp);
3553 gen_mov_F0_vreg(dp, rd);
3554 gen_vfp_neg(dp);
3555 gen_vfp_add(dp);
3556 break;
3557 case 3: /* VNMLA: -fd + -(fn * fm) */
3558 gen_vfp_mul(dp);
3559 gen_vfp_F1_neg(dp);
3560 gen_mov_F0_vreg(dp, rd);
3561 gen_vfp_neg(dp);
3562 gen_vfp_add(dp);
3563 break;
3564 case 4: /* mul: fn * fm */
3565 gen_vfp_mul(dp);
3566 break;
3567 case 5: /* nmul: -(fn * fm) */
3568 gen_vfp_mul(dp);
3569 gen_vfp_neg(dp);
3570 break;
3571 case 6: /* add: fn + fm */
3572 gen_vfp_add(dp);
3573 break;
3574 case 7: /* sub: fn - fm */
3575 gen_vfp_sub(dp);
3576 break;
3577 case 8: /* div: fn / fm */
3578 gen_vfp_div(dp);
3579 break;
3580 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3581 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3582 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3583 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3584 /* These are fused multiply-add, and must be done as one
3585 * floating point operation with no rounding between the
3586 * multiplication and addition steps.
3587 * NB that doing the negations here as separate steps is
3588 * correct : an input NaN should come out with its sign bit
3589 * flipped if it is a negated-input.
3591 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3592 return 1;
3594 if (dp) {
3595 TCGv_ptr fpst;
3596 TCGv_i64 frd;
3597 if (op & 1) {
3598 /* VFNMS, VFMS */
3599 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3601 frd = tcg_temp_new_i64();
3602 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3603 if (op & 2) {
3604 /* VFNMA, VFNMS */
3605 gen_helper_vfp_negd(frd, frd);
3607 fpst = get_fpstatus_ptr(0);
3608 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3609 cpu_F1d, frd, fpst);
3610 tcg_temp_free_ptr(fpst);
3611 tcg_temp_free_i64(frd);
3612 } else {
3613 TCGv_ptr fpst;
3614 TCGv_i32 frd;
3615 if (op & 1) {
3616 /* VFNMS, VFMS */
3617 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3619 frd = tcg_temp_new_i32();
3620 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3621 if (op & 2) {
3622 gen_helper_vfp_negs(frd, frd);
3624 fpst = get_fpstatus_ptr(0);
3625 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3626 cpu_F1s, frd, fpst);
3627 tcg_temp_free_ptr(fpst);
3628 tcg_temp_free_i32(frd);
3630 break;
3631 case 14: /* fconst */
3632 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3633 return 1;
3636 n = (insn << 12) & 0x80000000;
3637 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3638 if (dp) {
3639 if (i & 0x40)
3640 i |= 0x3f80;
3641 else
3642 i |= 0x4000;
3643 n |= i << 16;
3644 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3645 } else {
3646 if (i & 0x40)
3647 i |= 0x780;
3648 else
3649 i |= 0x800;
3650 n |= i << 19;
3651 tcg_gen_movi_i32(cpu_F0s, n);
3653 break;
3654 case 15: /* extension space */
3655 switch (rn) {
3656 case 0: /* cpy */
3657 /* no-op */
3658 break;
3659 case 1: /* abs */
3660 gen_vfp_abs(dp);
3661 break;
3662 case 2: /* neg */
3663 gen_vfp_neg(dp);
3664 break;
3665 case 3: /* sqrt */
3666 gen_vfp_sqrt(dp);
3667 break;
3668 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3669 tmp = gen_vfp_mrs();
3670 tcg_gen_ext16u_i32(tmp, tmp);
3671 if (dp) {
3672 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3673 cpu_env);
3674 } else {
3675 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3676 cpu_env);
3678 tcg_temp_free_i32(tmp);
3679 break;
3680 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3681 tmp = gen_vfp_mrs();
3682 tcg_gen_shri_i32(tmp, tmp, 16);
3683 if (dp) {
3684 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3685 cpu_env);
3686 } else {
3687 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3688 cpu_env);
3690 tcg_temp_free_i32(tmp);
3691 break;
3692 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3693 tmp = tcg_temp_new_i32();
3694 if (dp) {
3695 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3696 cpu_env);
3697 } else {
3698 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3699 cpu_env);
3701 gen_mov_F0_vreg(0, rd);
3702 tmp2 = gen_vfp_mrs();
3703 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3704 tcg_gen_or_i32(tmp, tmp, tmp2);
3705 tcg_temp_free_i32(tmp2);
3706 gen_vfp_msr(tmp);
3707 break;
3708 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3709 tmp = tcg_temp_new_i32();
3710 if (dp) {
3711 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3712 cpu_env);
3713 } else {
3714 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3715 cpu_env);
3717 tcg_gen_shli_i32(tmp, tmp, 16);
3718 gen_mov_F0_vreg(0, rd);
3719 tmp2 = gen_vfp_mrs();
3720 tcg_gen_ext16u_i32(tmp2, tmp2);
3721 tcg_gen_or_i32(tmp, tmp, tmp2);
3722 tcg_temp_free_i32(tmp2);
3723 gen_vfp_msr(tmp);
3724 break;
3725 case 8: /* cmp */
3726 gen_vfp_cmp(dp);
3727 break;
3728 case 9: /* cmpe */
3729 gen_vfp_cmpe(dp);
3730 break;
3731 case 10: /* cmpz */
3732 gen_vfp_cmp(dp);
3733 break;
3734 case 11: /* cmpez */
3735 gen_vfp_F1_ld0(dp);
3736 gen_vfp_cmpe(dp);
3737 break;
3738 case 12: /* vrintr */
3740 TCGv_ptr fpst = get_fpstatus_ptr(0);
3741 if (dp) {
3742 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3743 } else {
3744 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3746 tcg_temp_free_ptr(fpst);
3747 break;
3749 case 13: /* vrintz */
3751 TCGv_ptr fpst = get_fpstatus_ptr(0);
3752 TCGv_i32 tcg_rmode;
3753 tcg_rmode = tcg_const_i32(float_round_to_zero);
3754 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3755 if (dp) {
3756 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3757 } else {
3758 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3760 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3761 tcg_temp_free_i32(tcg_rmode);
3762 tcg_temp_free_ptr(fpst);
3763 break;
3765 case 14: /* vrintx */
3767 TCGv_ptr fpst = get_fpstatus_ptr(0);
3768 if (dp) {
3769 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3770 } else {
3771 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3773 tcg_temp_free_ptr(fpst);
3774 break;
3776 case 15: /* single<->double conversion */
3777 if (dp)
3778 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3779 else
3780 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3781 break;
3782 case 16: /* fuito */
3783 gen_vfp_uito(dp, 0);
3784 break;
3785 case 17: /* fsito */
3786 gen_vfp_sito(dp, 0);
3787 break;
3788 case 20: /* fshto */
3789 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3790 return 1;
3792 gen_vfp_shto(dp, 16 - rm, 0);
3793 break;
3794 case 21: /* fslto */
3795 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3796 return 1;
3798 gen_vfp_slto(dp, 32 - rm, 0);
3799 break;
3800 case 22: /* fuhto */
3801 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3802 return 1;
3804 gen_vfp_uhto(dp, 16 - rm, 0);
3805 break;
3806 case 23: /* fulto */
3807 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3808 return 1;
3810 gen_vfp_ulto(dp, 32 - rm, 0);
3811 break;
3812 case 24: /* ftoui */
3813 gen_vfp_toui(dp, 0);
3814 break;
3815 case 25: /* ftouiz */
3816 gen_vfp_touiz(dp, 0);
3817 break;
3818 case 26: /* ftosi */
3819 gen_vfp_tosi(dp, 0);
3820 break;
3821 case 27: /* ftosiz */
3822 gen_vfp_tosiz(dp, 0);
3823 break;
3824 case 28: /* ftosh */
3825 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3826 return 1;
3828 gen_vfp_tosh(dp, 16 - rm, 0);
3829 break;
3830 case 29: /* ftosl */
3831 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3832 return 1;
3834 gen_vfp_tosl(dp, 32 - rm, 0);
3835 break;
3836 case 30: /* ftouh */
3837 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3838 return 1;
3840 gen_vfp_touh(dp, 16 - rm, 0);
3841 break;
3842 case 31: /* ftoul */
3843 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3844 return 1;
3846 gen_vfp_toul(dp, 32 - rm, 0);
3847 break;
3848 default: /* undefined */
3849 return 1;
3851 break;
3852 default: /* undefined */
3853 return 1;
3856 /* Write back the result. */
3857 if (op == 15 && (rn >= 8 && rn <= 11)) {
3858 /* Comparison, do nothing. */
3859 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3860 (rn & 0x1e) == 0x6)) {
3861 /* VCVT double to int: always integer result.
3862 * VCVT double to half precision is always a single
3863 * precision result.
3865 gen_mov_vreg_F0(0, rd);
3866 } else if (op == 15 && rn == 15) {
3867 /* conversion */
3868 gen_mov_vreg_F0(!dp, rd);
3869 } else {
3870 gen_mov_vreg_F0(dp, rd);
3873 /* break out of the loop if we have finished */
3874 if (veclen == 0)
3875 break;
3877 if (op == 15 && delta_m == 0) {
3878 /* single source one-many */
3879 while (veclen--) {
3880 rd = ((rd + delta_d) & (bank_mask - 1))
3881 | (rd & bank_mask);
3882 gen_mov_vreg_F0(dp, rd);
3884 break;
3886 /* Setup the next operands. */
3887 veclen--;
3888 rd = ((rd + delta_d) & (bank_mask - 1))
3889 | (rd & bank_mask);
3891 if (op == 15) {
3892 /* One source operand. */
3893 rm = ((rm + delta_m) & (bank_mask - 1))
3894 | (rm & bank_mask);
3895 gen_mov_F0_vreg(dp, rm);
3896 } else {
3897 /* Two source operands. */
3898 rn = ((rn + delta_d) & (bank_mask - 1))
3899 | (rn & bank_mask);
3900 gen_mov_F0_vreg(dp, rn);
3901 if (delta_m) {
3902 rm = ((rm + delta_m) & (bank_mask - 1))
3903 | (rm & bank_mask);
3904 gen_mov_F1_vreg(dp, rm);
3909 break;
3910 case 0xc:
3911 case 0xd:
3912 if ((insn & 0x03e00000) == 0x00400000) {
3913 /* two-register transfer */
3914 rn = (insn >> 16) & 0xf;
3915 rd = (insn >> 12) & 0xf;
3916 if (dp) {
3917 VFP_DREG_M(rm, insn);
3918 } else {
3919 rm = VFP_SREG_M(insn);
3922 if (insn & ARM_CP_RW_BIT) {
3923 /* vfp->arm */
3924 if (dp) {
3925 gen_mov_F0_vreg(0, rm * 2);
3926 tmp = gen_vfp_mrs();
3927 store_reg(s, rd, tmp);
3928 gen_mov_F0_vreg(0, rm * 2 + 1);
3929 tmp = gen_vfp_mrs();
3930 store_reg(s, rn, tmp);
3931 } else {
3932 gen_mov_F0_vreg(0, rm);
3933 tmp = gen_vfp_mrs();
3934 store_reg(s, rd, tmp);
3935 gen_mov_F0_vreg(0, rm + 1);
3936 tmp = gen_vfp_mrs();
3937 store_reg(s, rn, tmp);
3939 } else {
3940 /* arm->vfp */
3941 if (dp) {
3942 tmp = load_reg(s, rd);
3943 gen_vfp_msr(tmp);
3944 gen_mov_vreg_F0(0, rm * 2);
3945 tmp = load_reg(s, rn);
3946 gen_vfp_msr(tmp);
3947 gen_mov_vreg_F0(0, rm * 2 + 1);
3948 } else {
3949 tmp = load_reg(s, rd);
3950 gen_vfp_msr(tmp);
3951 gen_mov_vreg_F0(0, rm);
3952 tmp = load_reg(s, rn);
3953 gen_vfp_msr(tmp);
3954 gen_mov_vreg_F0(0, rm + 1);
3957 } else {
3958 /* Load/store */
3959 rn = (insn >> 16) & 0xf;
3960 if (dp)
3961 VFP_DREG_D(rd, insn);
3962 else
3963 rd = VFP_SREG_D(insn);
3964 if ((insn & 0x01200000) == 0x01000000) {
3965 /* Single load/store */
3966 offset = (insn & 0xff) << 2;
3967 if ((insn & (1 << 23)) == 0)
3968 offset = -offset;
3969 if (s->thumb && rn == 15) {
3970 /* This is actually UNPREDICTABLE */
3971 addr = tcg_temp_new_i32();
3972 tcg_gen_movi_i32(addr, s->pc & ~2);
3973 } else {
3974 addr = load_reg(s, rn);
3976 tcg_gen_addi_i32(addr, addr, offset);
3977 if (insn & (1 << 20)) {
3978 gen_vfp_ld(s, dp, addr);
3979 gen_mov_vreg_F0(dp, rd);
3980 } else {
3981 gen_mov_F0_vreg(dp, rd);
3982 gen_vfp_st(s, dp, addr);
3984 tcg_temp_free_i32(addr);
3985 } else {
3986 /* load/store multiple */
3987 int w = insn & (1 << 21);
3988 if (dp)
3989 n = (insn >> 1) & 0x7f;
3990 else
3991 n = insn & 0xff;
3993 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3994 /* P == U , W == 1 => UNDEF */
3995 return 1;
3997 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3998 /* UNPREDICTABLE cases for bad immediates: we choose to
3999 * UNDEF to avoid generating huge numbers of TCG ops
4001 return 1;
4003 if (rn == 15 && w) {
4004 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4005 return 1;
4008 if (s->thumb && rn == 15) {
4009 /* This is actually UNPREDICTABLE */
4010 addr = tcg_temp_new_i32();
4011 tcg_gen_movi_i32(addr, s->pc & ~2);
4012 } else {
4013 addr = load_reg(s, rn);
4015 if (insn & (1 << 24)) /* pre-decrement */
4016 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
4018 if (dp)
4019 offset = 8;
4020 else
4021 offset = 4;
4022 for (i = 0; i < n; i++) {
4023 if (insn & ARM_CP_RW_BIT) {
4024 /* load */
4025 gen_vfp_ld(s, dp, addr);
4026 gen_mov_vreg_F0(dp, rd + i);
4027 } else {
4028 /* store */
4029 gen_mov_F0_vreg(dp, rd + i);
4030 gen_vfp_st(s, dp, addr);
4032 tcg_gen_addi_i32(addr, addr, offset);
4034 if (w) {
4035 /* writeback */
4036 if (insn & (1 << 24))
4037 offset = -offset * n;
4038 else if (dp && (insn & 1))
4039 offset = 4;
4040 else
4041 offset = 0;
4043 if (offset != 0)
4044 tcg_gen_addi_i32(addr, addr, offset);
4045 store_reg(s, rn, addr);
4046 } else {
4047 tcg_temp_free_i32(addr);
4051 break;
4052 default:
4053 /* Should never happen. */
4054 return 1;
4056 return 0;
4059 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
4061 #ifndef CONFIG_USER_ONLY
4062 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4063 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4064 #else
4065 return true;
4066 #endif
4069 static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4071 if (use_goto_tb(s, dest)) {
4072 tcg_gen_goto_tb(n);
4073 gen_set_pc_im(s, dest);
4074 tcg_gen_exit_tb((uintptr_t)s->tb + n);
4075 } else {
4076 gen_set_pc_im(s, dest);
4077 tcg_gen_exit_tb(0);
4081 static inline void gen_jmp (DisasContext *s, uint32_t dest)
4083 if (unlikely(s->singlestep_enabled || s->ss_active)) {
4084 /* An indirect jump so that we still trigger the debug exception. */
4085 if (s->thumb)
4086 dest |= 1;
4087 gen_bx_im(s, dest);
4088 } else {
4089 gen_goto_tb(s, 0, dest);
4090 s->is_jmp = DISAS_TB_JUMP;
4094 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
4096 if (x)
4097 tcg_gen_sari_i32(t0, t0, 16);
4098 else
4099 gen_sxth(t0);
4100 if (y)
4101 tcg_gen_sari_i32(t1, t1, 16);
4102 else
4103 gen_sxth(t1);
4104 tcg_gen_mul_i32(t0, t0, t1);
4107 /* Return the mask of PSR bits set by a MSR instruction. */
4108 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4110 uint32_t mask;
4112 mask = 0;
4113 if (flags & (1 << 0))
4114 mask |= 0xff;
4115 if (flags & (1 << 1))
4116 mask |= 0xff00;
4117 if (flags & (1 << 2))
4118 mask |= 0xff0000;
4119 if (flags & (1 << 3))
4120 mask |= 0xff000000;
4122 /* Mask out undefined bits. */
4123 mask &= ~CPSR_RESERVED;
4124 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4125 mask &= ~CPSR_T;
4127 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4128 mask &= ~CPSR_Q; /* V5TE in reality*/
4130 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4131 mask &= ~(CPSR_E | CPSR_GE);
4133 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4134 mask &= ~CPSR_IT;
4136 /* Mask out execution state and reserved bits. */
4137 if (!spsr) {
4138 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4140 /* Mask out privileged bits. */
4141 if (IS_USER(s))
4142 mask &= CPSR_USER;
4143 return mask;
4146 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4147 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4149 TCGv_i32 tmp;
4150 if (spsr) {
4151 /* ??? This is also undefined in system mode. */
4152 if (IS_USER(s))
4153 return 1;
4155 tmp = load_cpu_field(spsr);
4156 tcg_gen_andi_i32(tmp, tmp, ~mask);
4157 tcg_gen_andi_i32(t0, t0, mask);
4158 tcg_gen_or_i32(tmp, tmp, t0);
4159 store_cpu_field(tmp, spsr);
4160 } else {
4161 gen_set_cpsr(t0, mask);
4163 tcg_temp_free_i32(t0);
4164 gen_lookup_tb(s);
4165 return 0;
4168 /* Returns nonzero if access to the PSR is not permitted. */
4169 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4171 TCGv_i32 tmp;
4172 tmp = tcg_temp_new_i32();
4173 tcg_gen_movi_i32(tmp, val);
4174 return gen_set_psr(s, mask, spsr, tmp);
4177 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4178 int *tgtmode, int *regno)
4180 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4181 * the target mode and register number, and identify the various
4182 * unpredictable cases.
4183 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4184 * + executed in user mode
4185 * + using R15 as the src/dest register
4186 * + accessing an unimplemented register
4187 * + accessing a register that's inaccessible at current PL/security state*
4188 * + accessing a register that you could access with a different insn
4189 * We choose to UNDEF in all these cases.
4190 * Since we don't know which of the various AArch32 modes we are in
4191 * we have to defer some checks to runtime.
4192 * Accesses to Monitor mode registers from Secure EL1 (which implies
4193 * that EL3 is AArch64) must trap to EL3.
4195 * If the access checks fail this function will emit code to take
4196 * an exception and return false. Otherwise it will return true,
4197 * and set *tgtmode and *regno appropriately.
4199 int exc_target = default_exception_el(s);
4201 /* These instructions are present only in ARMv8, or in ARMv7 with the
4202 * Virtualization Extensions.
4204 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4205 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4206 goto undef;
4209 if (IS_USER(s) || rn == 15) {
4210 goto undef;
4213 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4214 * of registers into (r, sysm).
4216 if (r) {
4217 /* SPSRs for other modes */
4218 switch (sysm) {
4219 case 0xe: /* SPSR_fiq */
4220 *tgtmode = ARM_CPU_MODE_FIQ;
4221 break;
4222 case 0x10: /* SPSR_irq */
4223 *tgtmode = ARM_CPU_MODE_IRQ;
4224 break;
4225 case 0x12: /* SPSR_svc */
4226 *tgtmode = ARM_CPU_MODE_SVC;
4227 break;
4228 case 0x14: /* SPSR_abt */
4229 *tgtmode = ARM_CPU_MODE_ABT;
4230 break;
4231 case 0x16: /* SPSR_und */
4232 *tgtmode = ARM_CPU_MODE_UND;
4233 break;
4234 case 0x1c: /* SPSR_mon */
4235 *tgtmode = ARM_CPU_MODE_MON;
4236 break;
4237 case 0x1e: /* SPSR_hyp */
4238 *tgtmode = ARM_CPU_MODE_HYP;
4239 break;
4240 default: /* unallocated */
4241 goto undef;
4243 /* We arbitrarily assign SPSR a register number of 16. */
4244 *regno = 16;
4245 } else {
4246 /* general purpose registers for other modes */
4247 switch (sysm) {
4248 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4249 *tgtmode = ARM_CPU_MODE_USR;
4250 *regno = sysm + 8;
4251 break;
4252 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4253 *tgtmode = ARM_CPU_MODE_FIQ;
4254 *regno = sysm;
4255 break;
4256 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4257 *tgtmode = ARM_CPU_MODE_IRQ;
4258 *regno = sysm & 1 ? 13 : 14;
4259 break;
4260 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4261 *tgtmode = ARM_CPU_MODE_SVC;
4262 *regno = sysm & 1 ? 13 : 14;
4263 break;
4264 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4265 *tgtmode = ARM_CPU_MODE_ABT;
4266 *regno = sysm & 1 ? 13 : 14;
4267 break;
4268 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4269 *tgtmode = ARM_CPU_MODE_UND;
4270 *regno = sysm & 1 ? 13 : 14;
4271 break;
4272 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4273 *tgtmode = ARM_CPU_MODE_MON;
4274 *regno = sysm & 1 ? 13 : 14;
4275 break;
4276 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4277 *tgtmode = ARM_CPU_MODE_HYP;
4278 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4279 *regno = sysm & 1 ? 13 : 17;
4280 break;
4281 default: /* unallocated */
4282 goto undef;
4286 /* Catch the 'accessing inaccessible register' cases we can detect
4287 * at translate time.
4289 switch (*tgtmode) {
4290 case ARM_CPU_MODE_MON:
4291 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4292 goto undef;
4294 if (s->current_el == 1) {
4295 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4296 * then accesses to Mon registers trap to EL3
4298 exc_target = 3;
4299 goto undef;
4301 break;
4302 case ARM_CPU_MODE_HYP:
4303 /* Note that we can forbid accesses from EL2 here because they
4304 * must be from Hyp mode itself
4306 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4307 goto undef;
4309 break;
4310 default:
4311 break;
4314 return true;
4316 undef:
4317 /* If we get here then some access check did not pass */
4318 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4319 return false;
4322 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4324 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4325 int tgtmode = 0, regno = 0;
4327 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4328 return;
4331 /* Sync state because msr_banked() can raise exceptions */
4332 gen_set_condexec(s);
4333 gen_set_pc_im(s, s->pc - 4);
4334 tcg_reg = load_reg(s, rn);
4335 tcg_tgtmode = tcg_const_i32(tgtmode);
4336 tcg_regno = tcg_const_i32(regno);
4337 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4338 tcg_temp_free_i32(tcg_tgtmode);
4339 tcg_temp_free_i32(tcg_regno);
4340 tcg_temp_free_i32(tcg_reg);
4341 s->is_jmp = DISAS_UPDATE;
4344 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4346 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4347 int tgtmode = 0, regno = 0;
4349 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4350 return;
4353 /* Sync state because mrs_banked() can raise exceptions */
4354 gen_set_condexec(s);
4355 gen_set_pc_im(s, s->pc - 4);
4356 tcg_reg = tcg_temp_new_i32();
4357 tcg_tgtmode = tcg_const_i32(tgtmode);
4358 tcg_regno = tcg_const_i32(regno);
4359 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4360 tcg_temp_free_i32(tcg_tgtmode);
4361 tcg_temp_free_i32(tcg_regno);
4362 store_reg(s, rn, tcg_reg);
4363 s->is_jmp = DISAS_UPDATE;
4366 /* Store value to PC as for an exception return (ie don't
4367 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4368 * will do the masking based on the new value of the Thumb bit.
4370 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
4372 tcg_gen_mov_i32(cpu_R[15], pc);
4373 tcg_temp_free_i32(pc);
4376 /* Generate a v6 exception return. Marks both values as dead. */
4377 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4379 store_pc_exc_ret(s, pc);
4380 /* The cpsr_write_eret helper will mask the low bits of PC
4381 * appropriately depending on the new Thumb bit, so it must
4382 * be called after storing the new PC.
4384 gen_helper_cpsr_write_eret(cpu_env, cpsr);
4385 tcg_temp_free_i32(cpsr);
4386 s->is_jmp = DISAS_JUMP;
4389 /* Generate an old-style exception return. Marks pc as dead. */
4390 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4392 gen_rfe(s, pc, load_cpu_field(spsr));
4395 static void gen_nop_hint(DisasContext *s, int val)
4397 switch (val) {
4398 case 1: /* yield */
4399 gen_set_pc_im(s, s->pc);
4400 s->is_jmp = DISAS_YIELD;
4401 break;
4402 case 3: /* wfi */
4403 gen_set_pc_im(s, s->pc);
4404 s->is_jmp = DISAS_WFI;
4405 break;
4406 case 2: /* wfe */
4407 gen_set_pc_im(s, s->pc);
4408 s->is_jmp = DISAS_WFE;
4409 break;
4410 case 4: /* sev */
4411 case 5: /* sevl */
4412 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4413 default: /* nop */
4414 break;
4418 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4420 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4422 switch (size) {
4423 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4424 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4425 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4426 default: abort();
4430 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4432 switch (size) {
4433 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4434 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4435 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4436 default: return;
4440 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4441 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4442 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4443 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4444 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4446 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4447 switch ((size << 1) | u) { \
4448 case 0: \
4449 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4450 break; \
4451 case 1: \
4452 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4453 break; \
4454 case 2: \
4455 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4456 break; \
4457 case 3: \
4458 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4459 break; \
4460 case 4: \
4461 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4462 break; \
4463 case 5: \
4464 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4465 break; \
4466 default: return 1; \
4467 }} while (0)
4469 #define GEN_NEON_INTEGER_OP(name) do { \
4470 switch ((size << 1) | u) { \
4471 case 0: \
4472 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4473 break; \
4474 case 1: \
4475 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4476 break; \
4477 case 2: \
4478 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4479 break; \
4480 case 3: \
4481 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4482 break; \
4483 case 4: \
4484 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4485 break; \
4486 case 5: \
4487 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4488 break; \
4489 default: return 1; \
4490 }} while (0)
4492 static TCGv_i32 neon_load_scratch(int scratch)
4494 TCGv_i32 tmp = tcg_temp_new_i32();
4495 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4496 return tmp;
4499 static void neon_store_scratch(int scratch, TCGv_i32 var)
4501 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4502 tcg_temp_free_i32(var);
4505 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4507 TCGv_i32 tmp;
4508 if (size == 1) {
4509 tmp = neon_load_reg(reg & 7, reg >> 4);
4510 if (reg & 8) {
4511 gen_neon_dup_high16(tmp);
4512 } else {
4513 gen_neon_dup_low16(tmp);
4515 } else {
4516 tmp = neon_load_reg(reg & 15, reg >> 4);
4518 return tmp;
4521 static int gen_neon_unzip(int rd, int rm, int size, int q)
4523 TCGv_i32 tmp, tmp2;
4524 if (!q && size == 2) {
4525 return 1;
4527 tmp = tcg_const_i32(rd);
4528 tmp2 = tcg_const_i32(rm);
4529 if (q) {
4530 switch (size) {
4531 case 0:
4532 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
4533 break;
4534 case 1:
4535 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
4536 break;
4537 case 2:
4538 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
4539 break;
4540 default:
4541 abort();
4543 } else {
4544 switch (size) {
4545 case 0:
4546 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
4547 break;
4548 case 1:
4549 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
4550 break;
4551 default:
4552 abort();
4555 tcg_temp_free_i32(tmp);
4556 tcg_temp_free_i32(tmp2);
4557 return 0;
4560 static int gen_neon_zip(int rd, int rm, int size, int q)
4562 TCGv_i32 tmp, tmp2;
4563 if (!q && size == 2) {
4564 return 1;
4566 tmp = tcg_const_i32(rd);
4567 tmp2 = tcg_const_i32(rm);
4568 if (q) {
4569 switch (size) {
4570 case 0:
4571 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
4572 break;
4573 case 1:
4574 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
4575 break;
4576 case 2:
4577 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
4578 break;
4579 default:
4580 abort();
4582 } else {
4583 switch (size) {
4584 case 0:
4585 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
4586 break;
4587 case 1:
4588 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
4589 break;
4590 default:
4591 abort();
4594 tcg_temp_free_i32(tmp);
4595 tcg_temp_free_i32(tmp2);
4596 return 0;
4599 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4601 TCGv_i32 rd, tmp;
4603 rd = tcg_temp_new_i32();
4604 tmp = tcg_temp_new_i32();
4606 tcg_gen_shli_i32(rd, t0, 8);
4607 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4608 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4609 tcg_gen_or_i32(rd, rd, tmp);
4611 tcg_gen_shri_i32(t1, t1, 8);
4612 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4613 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4614 tcg_gen_or_i32(t1, t1, tmp);
4615 tcg_gen_mov_i32(t0, rd);
4617 tcg_temp_free_i32(tmp);
4618 tcg_temp_free_i32(rd);
4621 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4623 TCGv_i32 rd, tmp;
4625 rd = tcg_temp_new_i32();
4626 tmp = tcg_temp_new_i32();
4628 tcg_gen_shli_i32(rd, t0, 16);
4629 tcg_gen_andi_i32(tmp, t1, 0xffff);
4630 tcg_gen_or_i32(rd, rd, tmp);
4631 tcg_gen_shri_i32(t1, t1, 16);
4632 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4633 tcg_gen_or_i32(t1, t1, tmp);
4634 tcg_gen_mov_i32(t0, rd);
4636 tcg_temp_free_i32(tmp);
4637 tcg_temp_free_i32(rd);
4641 static struct {
4642 int nregs;
4643 int interleave;
4644 int spacing;
4645 } neon_ls_element_type[11] = {
4646 {4, 4, 1},
4647 {4, 4, 2},
4648 {4, 1, 1},
4649 {4, 2, 1},
4650 {3, 3, 1},
4651 {3, 3, 2},
4652 {3, 1, 1},
4653 {1, 1, 1},
4654 {2, 2, 1},
4655 {2, 2, 2},
4656 {2, 1, 1}
4659 /* Translate a NEON load/store element instruction. Return nonzero if the
4660 instruction is invalid. */
4661 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4663 int rd, rn, rm;
4664 int op;
4665 int nregs;
4666 int interleave;
4667 int spacing;
4668 int stride;
4669 int size;
4670 int reg;
4671 int pass;
4672 int load;
4673 int shift;
4674 int n;
4675 TCGv_i32 addr;
4676 TCGv_i32 tmp;
4677 TCGv_i32 tmp2;
4678 TCGv_i64 tmp64;
4680 /* FIXME: this access check should not take precedence over UNDEF
4681 * for invalid encodings; we will generate incorrect syndrome information
4682 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4684 if (s->fp_excp_el) {
4685 gen_exception_insn(s, 4, EXCP_UDEF,
4686 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
4687 return 0;
4690 if (!s->vfp_enabled)
4691 return 1;
4692 VFP_DREG_D(rd, insn);
4693 rn = (insn >> 16) & 0xf;
4694 rm = insn & 0xf;
4695 load = (insn & (1 << 21)) != 0;
4696 if ((insn & (1 << 23)) == 0) {
4697 /* Load store all elements. */
4698 op = (insn >> 8) & 0xf;
4699 size = (insn >> 6) & 3;
4700 if (op > 10)
4701 return 1;
4702 /* Catch UNDEF cases for bad values of align field */
4703 switch (op & 0xc) {
4704 case 4:
4705 if (((insn >> 5) & 1) == 1) {
4706 return 1;
4708 break;
4709 case 8:
4710 if (((insn >> 4) & 3) == 3) {
4711 return 1;
4713 break;
4714 default:
4715 break;
4717 nregs = neon_ls_element_type[op].nregs;
4718 interleave = neon_ls_element_type[op].interleave;
4719 spacing = neon_ls_element_type[op].spacing;
4720 if (size == 3 && (interleave | spacing) != 1)
4721 return 1;
4722 addr = tcg_temp_new_i32();
4723 load_reg_var(s, addr, rn);
4724 stride = (1 << size) * interleave;
4725 for (reg = 0; reg < nregs; reg++) {
4726 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4727 load_reg_var(s, addr, rn);
4728 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4729 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4730 load_reg_var(s, addr, rn);
4731 tcg_gen_addi_i32(addr, addr, 1 << size);
4733 if (size == 3) {
4734 tmp64 = tcg_temp_new_i64();
4735 if (load) {
4736 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
4737 neon_store_reg64(tmp64, rd);
4738 } else {
4739 neon_load_reg64(tmp64, rd);
4740 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
4742 tcg_temp_free_i64(tmp64);
4743 tcg_gen_addi_i32(addr, addr, stride);
4744 } else {
4745 for (pass = 0; pass < 2; pass++) {
4746 if (size == 2) {
4747 if (load) {
4748 tmp = tcg_temp_new_i32();
4749 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
4750 neon_store_reg(rd, pass, tmp);
4751 } else {
4752 tmp = neon_load_reg(rd, pass);
4753 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
4754 tcg_temp_free_i32(tmp);
4756 tcg_gen_addi_i32(addr, addr, stride);
4757 } else if (size == 1) {
4758 if (load) {
4759 tmp = tcg_temp_new_i32();
4760 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
4761 tcg_gen_addi_i32(addr, addr, stride);
4762 tmp2 = tcg_temp_new_i32();
4763 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
4764 tcg_gen_addi_i32(addr, addr, stride);
4765 tcg_gen_shli_i32(tmp2, tmp2, 16);
4766 tcg_gen_or_i32(tmp, tmp, tmp2);
4767 tcg_temp_free_i32(tmp2);
4768 neon_store_reg(rd, pass, tmp);
4769 } else {
4770 tmp = neon_load_reg(rd, pass);
4771 tmp2 = tcg_temp_new_i32();
4772 tcg_gen_shri_i32(tmp2, tmp, 16);
4773 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
4774 tcg_temp_free_i32(tmp);
4775 tcg_gen_addi_i32(addr, addr, stride);
4776 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
4777 tcg_temp_free_i32(tmp2);
4778 tcg_gen_addi_i32(addr, addr, stride);
4780 } else /* size == 0 */ {
4781 if (load) {
4782 TCGV_UNUSED_I32(tmp2);
4783 for (n = 0; n < 4; n++) {
4784 tmp = tcg_temp_new_i32();
4785 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
4786 tcg_gen_addi_i32(addr, addr, stride);
4787 if (n == 0) {
4788 tmp2 = tmp;
4789 } else {
4790 tcg_gen_shli_i32(tmp, tmp, n * 8);
4791 tcg_gen_or_i32(tmp2, tmp2, tmp);
4792 tcg_temp_free_i32(tmp);
4795 neon_store_reg(rd, pass, tmp2);
4796 } else {
4797 tmp2 = neon_load_reg(rd, pass);
4798 for (n = 0; n < 4; n++) {
4799 tmp = tcg_temp_new_i32();
4800 if (n == 0) {
4801 tcg_gen_mov_i32(tmp, tmp2);
4802 } else {
4803 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4805 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
4806 tcg_temp_free_i32(tmp);
4807 tcg_gen_addi_i32(addr, addr, stride);
4809 tcg_temp_free_i32(tmp2);
4814 rd += spacing;
4816 tcg_temp_free_i32(addr);
4817 stride = nregs * 8;
4818 } else {
4819 size = (insn >> 10) & 3;
4820 if (size == 3) {
4821 /* Load single element to all lanes. */
4822 int a = (insn >> 4) & 1;
4823 if (!load) {
4824 return 1;
4826 size = (insn >> 6) & 3;
4827 nregs = ((insn >> 8) & 3) + 1;
4829 if (size == 3) {
4830 if (nregs != 4 || a == 0) {
4831 return 1;
4833 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4834 size = 2;
4836 if (nregs == 1 && a == 1 && size == 0) {
4837 return 1;
4839 if (nregs == 3 && a == 1) {
4840 return 1;
4842 addr = tcg_temp_new_i32();
4843 load_reg_var(s, addr, rn);
4844 if (nregs == 1) {
4845 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4846 tmp = gen_load_and_replicate(s, addr, size);
4847 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4848 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4849 if (insn & (1 << 5)) {
4850 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4851 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4853 tcg_temp_free_i32(tmp);
4854 } else {
4855 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4856 stride = (insn & (1 << 5)) ? 2 : 1;
4857 for (reg = 0; reg < nregs; reg++) {
4858 tmp = gen_load_and_replicate(s, addr, size);
4859 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4860 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4861 tcg_temp_free_i32(tmp);
4862 tcg_gen_addi_i32(addr, addr, 1 << size);
4863 rd += stride;
4866 tcg_temp_free_i32(addr);
4867 stride = (1 << size) * nregs;
4868 } else {
4869 /* Single element. */
4870 int idx = (insn >> 4) & 0xf;
4871 pass = (insn >> 7) & 1;
4872 switch (size) {
4873 case 0:
4874 shift = ((insn >> 5) & 3) * 8;
4875 stride = 1;
4876 break;
4877 case 1:
4878 shift = ((insn >> 6) & 1) * 16;
4879 stride = (insn & (1 << 5)) ? 2 : 1;
4880 break;
4881 case 2:
4882 shift = 0;
4883 stride = (insn & (1 << 6)) ? 2 : 1;
4884 break;
4885 default:
4886 abort();
4888 nregs = ((insn >> 8) & 3) + 1;
4889 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4890 switch (nregs) {
4891 case 1:
4892 if (((idx & (1 << size)) != 0) ||
4893 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4894 return 1;
4896 break;
4897 case 3:
4898 if ((idx & 1) != 0) {
4899 return 1;
4901 /* fall through */
4902 case 2:
4903 if (size == 2 && (idx & 2) != 0) {
4904 return 1;
4906 break;
4907 case 4:
4908 if ((size == 2) && ((idx & 3) == 3)) {
4909 return 1;
4911 break;
4912 default:
4913 abort();
4915 if ((rd + stride * (nregs - 1)) > 31) {
4916 /* Attempts to write off the end of the register file
4917 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4918 * the neon_load_reg() would write off the end of the array.
4920 return 1;
4922 addr = tcg_temp_new_i32();
4923 load_reg_var(s, addr, rn);
4924 for (reg = 0; reg < nregs; reg++) {
4925 if (load) {
4926 tmp = tcg_temp_new_i32();
4927 switch (size) {
4928 case 0:
4929 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
4930 break;
4931 case 1:
4932 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
4933 break;
4934 case 2:
4935 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
4936 break;
4937 default: /* Avoid compiler warnings. */
4938 abort();
4940 if (size != 2) {
4941 tmp2 = neon_load_reg(rd, pass);
4942 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4943 shift, size ? 16 : 8);
4944 tcg_temp_free_i32(tmp2);
4946 neon_store_reg(rd, pass, tmp);
4947 } else { /* Store */
4948 tmp = neon_load_reg(rd, pass);
4949 if (shift)
4950 tcg_gen_shri_i32(tmp, tmp, shift);
4951 switch (size) {
4952 case 0:
4953 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
4954 break;
4955 case 1:
4956 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
4957 break;
4958 case 2:
4959 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
4960 break;
4962 tcg_temp_free_i32(tmp);
4964 rd += stride;
4965 tcg_gen_addi_i32(addr, addr, 1 << size);
4967 tcg_temp_free_i32(addr);
4968 stride = nregs * (1 << size);
4971 if (rm != 15) {
4972 TCGv_i32 base;
4974 base = load_reg(s, rn);
4975 if (rm == 13) {
4976 tcg_gen_addi_i32(base, base, stride);
4977 } else {
4978 TCGv_i32 index;
4979 index = load_reg(s, rm);
4980 tcg_gen_add_i32(base, base, index);
4981 tcg_temp_free_i32(index);
4983 store_reg(s, rn, base);
4985 return 0;
4988 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4989 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
4991 tcg_gen_and_i32(t, t, c);
4992 tcg_gen_andc_i32(f, f, c);
4993 tcg_gen_or_i32(dest, t, f);
4996 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
4998 switch (size) {
4999 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5000 case 1: gen_helper_neon_narrow_u16(dest, src); break;
5001 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
5002 default: abort();
5006 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5008 switch (size) {
5009 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5010 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5011 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
5012 default: abort();
5016 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
5018 switch (size) {
5019 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5020 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5021 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
5022 default: abort();
5026 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5028 switch (size) {
5029 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5030 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5031 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
5032 default: abort();
5036 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
5037 int q, int u)
5039 if (q) {
5040 if (u) {
5041 switch (size) {
5042 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5043 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5044 default: abort();
5046 } else {
5047 switch (size) {
5048 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5049 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5050 default: abort();
5053 } else {
5054 if (u) {
5055 switch (size) {
5056 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5057 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
5058 default: abort();
5060 } else {
5061 switch (size) {
5062 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5063 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5064 default: abort();
5070 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
5072 if (u) {
5073 switch (size) {
5074 case 0: gen_helper_neon_widen_u8(dest, src); break;
5075 case 1: gen_helper_neon_widen_u16(dest, src); break;
5076 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5077 default: abort();
5079 } else {
5080 switch (size) {
5081 case 0: gen_helper_neon_widen_s8(dest, src); break;
5082 case 1: gen_helper_neon_widen_s16(dest, src); break;
5083 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5084 default: abort();
5087 tcg_temp_free_i32(src);
5090 static inline void gen_neon_addl(int size)
5092 switch (size) {
5093 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5094 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5095 case 2: tcg_gen_add_i64(CPU_V001); break;
5096 default: abort();
5100 static inline void gen_neon_subl(int size)
5102 switch (size) {
5103 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5104 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5105 case 2: tcg_gen_sub_i64(CPU_V001); break;
5106 default: abort();
5110 static inline void gen_neon_negl(TCGv_i64 var, int size)
5112 switch (size) {
5113 case 0: gen_helper_neon_negl_u16(var, var); break;
5114 case 1: gen_helper_neon_negl_u32(var, var); break;
5115 case 2:
5116 tcg_gen_neg_i64(var, var);
5117 break;
5118 default: abort();
5122 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
5124 switch (size) {
5125 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5126 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
5127 default: abort();
5131 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5132 int size, int u)
5134 TCGv_i64 tmp;
5136 switch ((size << 1) | u) {
5137 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5138 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5139 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5140 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5141 case 4:
5142 tmp = gen_muls_i64_i32(a, b);
5143 tcg_gen_mov_i64(dest, tmp);
5144 tcg_temp_free_i64(tmp);
5145 break;
5146 case 5:
5147 tmp = gen_mulu_i64_i32(a, b);
5148 tcg_gen_mov_i64(dest, tmp);
5149 tcg_temp_free_i64(tmp);
5150 break;
5151 default: abort();
5154 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5155 Don't forget to clean them now. */
5156 if (size < 2) {
5157 tcg_temp_free_i32(a);
5158 tcg_temp_free_i32(b);
5162 static void gen_neon_narrow_op(int op, int u, int size,
5163 TCGv_i32 dest, TCGv_i64 src)
5165 if (op) {
5166 if (u) {
5167 gen_neon_unarrow_sats(size, dest, src);
5168 } else {
5169 gen_neon_narrow(size, dest, src);
5171 } else {
5172 if (u) {
5173 gen_neon_narrow_satu(size, dest, src);
5174 } else {
5175 gen_neon_narrow_sats(size, dest, src);
5180 /* Symbolic constants for op fields for Neon 3-register same-length.
5181 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5182 * table A7-9.
5184 #define NEON_3R_VHADD 0
5185 #define NEON_3R_VQADD 1
5186 #define NEON_3R_VRHADD 2
5187 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5188 #define NEON_3R_VHSUB 4
5189 #define NEON_3R_VQSUB 5
5190 #define NEON_3R_VCGT 6
5191 #define NEON_3R_VCGE 7
5192 #define NEON_3R_VSHL 8
5193 #define NEON_3R_VQSHL 9
5194 #define NEON_3R_VRSHL 10
5195 #define NEON_3R_VQRSHL 11
5196 #define NEON_3R_VMAX 12
5197 #define NEON_3R_VMIN 13
5198 #define NEON_3R_VABD 14
5199 #define NEON_3R_VABA 15
5200 #define NEON_3R_VADD_VSUB 16
5201 #define NEON_3R_VTST_VCEQ 17
5202 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5203 #define NEON_3R_VMUL 19
5204 #define NEON_3R_VPMAX 20
5205 #define NEON_3R_VPMIN 21
5206 #define NEON_3R_VQDMULH_VQRDMULH 22
5207 #define NEON_3R_VPADD 23
5208 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
5209 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
5210 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5211 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5212 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5213 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5214 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
5215 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
5217 static const uint8_t neon_3r_sizes[] = {
5218 [NEON_3R_VHADD] = 0x7,
5219 [NEON_3R_VQADD] = 0xf,
5220 [NEON_3R_VRHADD] = 0x7,
5221 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5222 [NEON_3R_VHSUB] = 0x7,
5223 [NEON_3R_VQSUB] = 0xf,
5224 [NEON_3R_VCGT] = 0x7,
5225 [NEON_3R_VCGE] = 0x7,
5226 [NEON_3R_VSHL] = 0xf,
5227 [NEON_3R_VQSHL] = 0xf,
5228 [NEON_3R_VRSHL] = 0xf,
5229 [NEON_3R_VQRSHL] = 0xf,
5230 [NEON_3R_VMAX] = 0x7,
5231 [NEON_3R_VMIN] = 0x7,
5232 [NEON_3R_VABD] = 0x7,
5233 [NEON_3R_VABA] = 0x7,
5234 [NEON_3R_VADD_VSUB] = 0xf,
5235 [NEON_3R_VTST_VCEQ] = 0x7,
5236 [NEON_3R_VML] = 0x7,
5237 [NEON_3R_VMUL] = 0x7,
5238 [NEON_3R_VPMAX] = 0x7,
5239 [NEON_3R_VPMIN] = 0x7,
5240 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5241 [NEON_3R_VPADD] = 0x7,
5242 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
5243 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
5244 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5245 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5246 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5247 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5248 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
5249 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
5252 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
5253 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5254 * table A7-13.
5256 #define NEON_2RM_VREV64 0
5257 #define NEON_2RM_VREV32 1
5258 #define NEON_2RM_VREV16 2
5259 #define NEON_2RM_VPADDL 4
5260 #define NEON_2RM_VPADDL_U 5
5261 #define NEON_2RM_AESE 6 /* Includes AESD */
5262 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5263 #define NEON_2RM_VCLS 8
5264 #define NEON_2RM_VCLZ 9
5265 #define NEON_2RM_VCNT 10
5266 #define NEON_2RM_VMVN 11
5267 #define NEON_2RM_VPADAL 12
5268 #define NEON_2RM_VPADAL_U 13
5269 #define NEON_2RM_VQABS 14
5270 #define NEON_2RM_VQNEG 15
5271 #define NEON_2RM_VCGT0 16
5272 #define NEON_2RM_VCGE0 17
5273 #define NEON_2RM_VCEQ0 18
5274 #define NEON_2RM_VCLE0 19
5275 #define NEON_2RM_VCLT0 20
5276 #define NEON_2RM_SHA1H 21
5277 #define NEON_2RM_VABS 22
5278 #define NEON_2RM_VNEG 23
5279 #define NEON_2RM_VCGT0_F 24
5280 #define NEON_2RM_VCGE0_F 25
5281 #define NEON_2RM_VCEQ0_F 26
5282 #define NEON_2RM_VCLE0_F 27
5283 #define NEON_2RM_VCLT0_F 28
5284 #define NEON_2RM_VABS_F 30
5285 #define NEON_2RM_VNEG_F 31
5286 #define NEON_2RM_VSWP 32
5287 #define NEON_2RM_VTRN 33
5288 #define NEON_2RM_VUZP 34
5289 #define NEON_2RM_VZIP 35
5290 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5291 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5292 #define NEON_2RM_VSHLL 38
5293 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5294 #define NEON_2RM_VRINTN 40
5295 #define NEON_2RM_VRINTX 41
5296 #define NEON_2RM_VRINTA 42
5297 #define NEON_2RM_VRINTZ 43
5298 #define NEON_2RM_VCVT_F16_F32 44
5299 #define NEON_2RM_VRINTM 45
5300 #define NEON_2RM_VCVT_F32_F16 46
5301 #define NEON_2RM_VRINTP 47
5302 #define NEON_2RM_VCVTAU 48
5303 #define NEON_2RM_VCVTAS 49
5304 #define NEON_2RM_VCVTNU 50
5305 #define NEON_2RM_VCVTNS 51
5306 #define NEON_2RM_VCVTPU 52
5307 #define NEON_2RM_VCVTPS 53
5308 #define NEON_2RM_VCVTMU 54
5309 #define NEON_2RM_VCVTMS 55
5310 #define NEON_2RM_VRECPE 56
5311 #define NEON_2RM_VRSQRTE 57
5312 #define NEON_2RM_VRECPE_F 58
5313 #define NEON_2RM_VRSQRTE_F 59
5314 #define NEON_2RM_VCVT_FS 60
5315 #define NEON_2RM_VCVT_FU 61
5316 #define NEON_2RM_VCVT_SF 62
5317 #define NEON_2RM_VCVT_UF 63
5319 static int neon_2rm_is_float_op(int op)
5321 /* Return true if this neon 2reg-misc op is float-to-float */
5322 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
5323 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
5324 op == NEON_2RM_VRINTM ||
5325 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5326 op >= NEON_2RM_VRECPE_F);
5329 static bool neon_2rm_is_v8_op(int op)
5331 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5332 switch (op) {
5333 case NEON_2RM_VRINTN:
5334 case NEON_2RM_VRINTA:
5335 case NEON_2RM_VRINTM:
5336 case NEON_2RM_VRINTP:
5337 case NEON_2RM_VRINTZ:
5338 case NEON_2RM_VRINTX:
5339 case NEON_2RM_VCVTAU:
5340 case NEON_2RM_VCVTAS:
5341 case NEON_2RM_VCVTNU:
5342 case NEON_2RM_VCVTNS:
5343 case NEON_2RM_VCVTPU:
5344 case NEON_2RM_VCVTPS:
5345 case NEON_2RM_VCVTMU:
5346 case NEON_2RM_VCVTMS:
5347 return true;
5348 default:
5349 return false;
5353 /* Each entry in this array has bit n set if the insn allows
5354 * size value n (otherwise it will UNDEF). Since unallocated
5355 * op values will have no bits set they always UNDEF.
5357 static const uint8_t neon_2rm_sizes[] = {
5358 [NEON_2RM_VREV64] = 0x7,
5359 [NEON_2RM_VREV32] = 0x3,
5360 [NEON_2RM_VREV16] = 0x1,
5361 [NEON_2RM_VPADDL] = 0x7,
5362 [NEON_2RM_VPADDL_U] = 0x7,
5363 [NEON_2RM_AESE] = 0x1,
5364 [NEON_2RM_AESMC] = 0x1,
5365 [NEON_2RM_VCLS] = 0x7,
5366 [NEON_2RM_VCLZ] = 0x7,
5367 [NEON_2RM_VCNT] = 0x1,
5368 [NEON_2RM_VMVN] = 0x1,
5369 [NEON_2RM_VPADAL] = 0x7,
5370 [NEON_2RM_VPADAL_U] = 0x7,
5371 [NEON_2RM_VQABS] = 0x7,
5372 [NEON_2RM_VQNEG] = 0x7,
5373 [NEON_2RM_VCGT0] = 0x7,
5374 [NEON_2RM_VCGE0] = 0x7,
5375 [NEON_2RM_VCEQ0] = 0x7,
5376 [NEON_2RM_VCLE0] = 0x7,
5377 [NEON_2RM_VCLT0] = 0x7,
5378 [NEON_2RM_SHA1H] = 0x4,
5379 [NEON_2RM_VABS] = 0x7,
5380 [NEON_2RM_VNEG] = 0x7,
5381 [NEON_2RM_VCGT0_F] = 0x4,
5382 [NEON_2RM_VCGE0_F] = 0x4,
5383 [NEON_2RM_VCEQ0_F] = 0x4,
5384 [NEON_2RM_VCLE0_F] = 0x4,
5385 [NEON_2RM_VCLT0_F] = 0x4,
5386 [NEON_2RM_VABS_F] = 0x4,
5387 [NEON_2RM_VNEG_F] = 0x4,
5388 [NEON_2RM_VSWP] = 0x1,
5389 [NEON_2RM_VTRN] = 0x7,
5390 [NEON_2RM_VUZP] = 0x7,
5391 [NEON_2RM_VZIP] = 0x7,
5392 [NEON_2RM_VMOVN] = 0x7,
5393 [NEON_2RM_VQMOVN] = 0x7,
5394 [NEON_2RM_VSHLL] = 0x7,
5395 [NEON_2RM_SHA1SU1] = 0x4,
5396 [NEON_2RM_VRINTN] = 0x4,
5397 [NEON_2RM_VRINTX] = 0x4,
5398 [NEON_2RM_VRINTA] = 0x4,
5399 [NEON_2RM_VRINTZ] = 0x4,
5400 [NEON_2RM_VCVT_F16_F32] = 0x2,
5401 [NEON_2RM_VRINTM] = 0x4,
5402 [NEON_2RM_VCVT_F32_F16] = 0x2,
5403 [NEON_2RM_VRINTP] = 0x4,
5404 [NEON_2RM_VCVTAU] = 0x4,
5405 [NEON_2RM_VCVTAS] = 0x4,
5406 [NEON_2RM_VCVTNU] = 0x4,
5407 [NEON_2RM_VCVTNS] = 0x4,
5408 [NEON_2RM_VCVTPU] = 0x4,
5409 [NEON_2RM_VCVTPS] = 0x4,
5410 [NEON_2RM_VCVTMU] = 0x4,
5411 [NEON_2RM_VCVTMS] = 0x4,
5412 [NEON_2RM_VRECPE] = 0x4,
5413 [NEON_2RM_VRSQRTE] = 0x4,
5414 [NEON_2RM_VRECPE_F] = 0x4,
5415 [NEON_2RM_VRSQRTE_F] = 0x4,
5416 [NEON_2RM_VCVT_FS] = 0x4,
5417 [NEON_2RM_VCVT_FU] = 0x4,
5418 [NEON_2RM_VCVT_SF] = 0x4,
5419 [NEON_2RM_VCVT_UF] = 0x4,
5422 /* Translate a NEON data processing instruction. Return nonzero if the
5423 instruction is invalid.
5424 We process data in a mixture of 32-bit and 64-bit chunks.
5425 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5427 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
5429 int op;
5430 int q;
5431 int rd, rn, rm;
5432 int size;
5433 int shift;
5434 int pass;
5435 int count;
5436 int pairwise;
5437 int u;
5438 uint32_t imm, mask;
5439 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
5440 TCGv_i64 tmp64;
5442 /* FIXME: this access check should not take precedence over UNDEF
5443 * for invalid encodings; we will generate incorrect syndrome information
5444 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5446 if (s->fp_excp_el) {
5447 gen_exception_insn(s, 4, EXCP_UDEF,
5448 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
5449 return 0;
5452 if (!s->vfp_enabled)
5453 return 1;
5454 q = (insn & (1 << 6)) != 0;
5455 u = (insn >> 24) & 1;
5456 VFP_DREG_D(rd, insn);
5457 VFP_DREG_N(rn, insn);
5458 VFP_DREG_M(rm, insn);
5459 size = (insn >> 20) & 3;
5460 if ((insn & (1 << 23)) == 0) {
5461 /* Three register same length. */
5462 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5463 /* Catch invalid op and bad size combinations: UNDEF */
5464 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5465 return 1;
5467 /* All insns of this form UNDEF for either this condition or the
5468 * superset of cases "Q==1"; we catch the latter later.
5470 if (q && ((rd | rn | rm) & 1)) {
5471 return 1;
5474 * The SHA-1/SHA-256 3-register instructions require special treatment
5475 * here, as their size field is overloaded as an op type selector, and
5476 * they all consume their input in a single pass.
5478 if (op == NEON_3R_SHA) {
5479 if (!q) {
5480 return 1;
5482 if (!u) { /* SHA-1 */
5483 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
5484 return 1;
5486 tmp = tcg_const_i32(rd);
5487 tmp2 = tcg_const_i32(rn);
5488 tmp3 = tcg_const_i32(rm);
5489 tmp4 = tcg_const_i32(size);
5490 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5491 tcg_temp_free_i32(tmp4);
5492 } else { /* SHA-256 */
5493 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
5494 return 1;
5496 tmp = tcg_const_i32(rd);
5497 tmp2 = tcg_const_i32(rn);
5498 tmp3 = tcg_const_i32(rm);
5499 switch (size) {
5500 case 0:
5501 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5502 break;
5503 case 1:
5504 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5505 break;
5506 case 2:
5507 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5508 break;
5511 tcg_temp_free_i32(tmp);
5512 tcg_temp_free_i32(tmp2);
5513 tcg_temp_free_i32(tmp3);
5514 return 0;
5516 if (size == 3 && op != NEON_3R_LOGIC) {
5517 /* 64-bit element instructions. */
5518 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5519 neon_load_reg64(cpu_V0, rn + pass);
5520 neon_load_reg64(cpu_V1, rm + pass);
5521 switch (op) {
5522 case NEON_3R_VQADD:
5523 if (u) {
5524 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5525 cpu_V0, cpu_V1);
5526 } else {
5527 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5528 cpu_V0, cpu_V1);
5530 break;
5531 case NEON_3R_VQSUB:
5532 if (u) {
5533 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5534 cpu_V0, cpu_V1);
5535 } else {
5536 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5537 cpu_V0, cpu_V1);
5539 break;
5540 case NEON_3R_VSHL:
5541 if (u) {
5542 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5543 } else {
5544 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5546 break;
5547 case NEON_3R_VQSHL:
5548 if (u) {
5549 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5550 cpu_V1, cpu_V0);
5551 } else {
5552 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5553 cpu_V1, cpu_V0);
5555 break;
5556 case NEON_3R_VRSHL:
5557 if (u) {
5558 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5559 } else {
5560 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5562 break;
5563 case NEON_3R_VQRSHL:
5564 if (u) {
5565 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5566 cpu_V1, cpu_V0);
5567 } else {
5568 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5569 cpu_V1, cpu_V0);
5571 break;
5572 case NEON_3R_VADD_VSUB:
5573 if (u) {
5574 tcg_gen_sub_i64(CPU_V001);
5575 } else {
5576 tcg_gen_add_i64(CPU_V001);
5578 break;
5579 default:
5580 abort();
5582 neon_store_reg64(cpu_V0, rd + pass);
5584 return 0;
5586 pairwise = 0;
5587 switch (op) {
5588 case NEON_3R_VSHL:
5589 case NEON_3R_VQSHL:
5590 case NEON_3R_VRSHL:
5591 case NEON_3R_VQRSHL:
5593 int rtmp;
5594 /* Shift instruction operands are reversed. */
5595 rtmp = rn;
5596 rn = rm;
5597 rm = rtmp;
5599 break;
5600 case NEON_3R_VPADD:
5601 if (u) {
5602 return 1;
5604 /* Fall through */
5605 case NEON_3R_VPMAX:
5606 case NEON_3R_VPMIN:
5607 pairwise = 1;
5608 break;
5609 case NEON_3R_FLOAT_ARITH:
5610 pairwise = (u && size < 2); /* if VPADD (float) */
5611 break;
5612 case NEON_3R_FLOAT_MINMAX:
5613 pairwise = u; /* if VPMIN/VPMAX (float) */
5614 break;
5615 case NEON_3R_FLOAT_CMP:
5616 if (!u && size) {
5617 /* no encoding for U=0 C=1x */
5618 return 1;
5620 break;
5621 case NEON_3R_FLOAT_ACMP:
5622 if (!u) {
5623 return 1;
5625 break;
5626 case NEON_3R_FLOAT_MISC:
5627 /* VMAXNM/VMINNM in ARMv8 */
5628 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5629 return 1;
5631 break;
5632 case NEON_3R_VMUL:
5633 if (u && (size != 0)) {
5634 /* UNDEF on invalid size for polynomial subcase */
5635 return 1;
5637 break;
5638 case NEON_3R_VFM:
5639 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
5640 return 1;
5642 break;
5643 default:
5644 break;
5647 if (pairwise && q) {
5648 /* All the pairwise insns UNDEF if Q is set */
5649 return 1;
5652 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5654 if (pairwise) {
5655 /* Pairwise. */
5656 if (pass < 1) {
5657 tmp = neon_load_reg(rn, 0);
5658 tmp2 = neon_load_reg(rn, 1);
5659 } else {
5660 tmp = neon_load_reg(rm, 0);
5661 tmp2 = neon_load_reg(rm, 1);
5663 } else {
5664 /* Elementwise. */
5665 tmp = neon_load_reg(rn, pass);
5666 tmp2 = neon_load_reg(rm, pass);
5668 switch (op) {
5669 case NEON_3R_VHADD:
5670 GEN_NEON_INTEGER_OP(hadd);
5671 break;
5672 case NEON_3R_VQADD:
5673 GEN_NEON_INTEGER_OP_ENV(qadd);
5674 break;
5675 case NEON_3R_VRHADD:
5676 GEN_NEON_INTEGER_OP(rhadd);
5677 break;
5678 case NEON_3R_LOGIC: /* Logic ops. */
5679 switch ((u << 2) | size) {
5680 case 0: /* VAND */
5681 tcg_gen_and_i32(tmp, tmp, tmp2);
5682 break;
5683 case 1: /* BIC */
5684 tcg_gen_andc_i32(tmp, tmp, tmp2);
5685 break;
5686 case 2: /* VORR */
5687 tcg_gen_or_i32(tmp, tmp, tmp2);
5688 break;
5689 case 3: /* VORN */
5690 tcg_gen_orc_i32(tmp, tmp, tmp2);
5691 break;
5692 case 4: /* VEOR */
5693 tcg_gen_xor_i32(tmp, tmp, tmp2);
5694 break;
5695 case 5: /* VBSL */
5696 tmp3 = neon_load_reg(rd, pass);
5697 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5698 tcg_temp_free_i32(tmp3);
5699 break;
5700 case 6: /* VBIT */
5701 tmp3 = neon_load_reg(rd, pass);
5702 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5703 tcg_temp_free_i32(tmp3);
5704 break;
5705 case 7: /* VBIF */
5706 tmp3 = neon_load_reg(rd, pass);
5707 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5708 tcg_temp_free_i32(tmp3);
5709 break;
5711 break;
5712 case NEON_3R_VHSUB:
5713 GEN_NEON_INTEGER_OP(hsub);
5714 break;
5715 case NEON_3R_VQSUB:
5716 GEN_NEON_INTEGER_OP_ENV(qsub);
5717 break;
5718 case NEON_3R_VCGT:
5719 GEN_NEON_INTEGER_OP(cgt);
5720 break;
5721 case NEON_3R_VCGE:
5722 GEN_NEON_INTEGER_OP(cge);
5723 break;
5724 case NEON_3R_VSHL:
5725 GEN_NEON_INTEGER_OP(shl);
5726 break;
5727 case NEON_3R_VQSHL:
5728 GEN_NEON_INTEGER_OP_ENV(qshl);
5729 break;
5730 case NEON_3R_VRSHL:
5731 GEN_NEON_INTEGER_OP(rshl);
5732 break;
5733 case NEON_3R_VQRSHL:
5734 GEN_NEON_INTEGER_OP_ENV(qrshl);
5735 break;
5736 case NEON_3R_VMAX:
5737 GEN_NEON_INTEGER_OP(max);
5738 break;
5739 case NEON_3R_VMIN:
5740 GEN_NEON_INTEGER_OP(min);
5741 break;
5742 case NEON_3R_VABD:
5743 GEN_NEON_INTEGER_OP(abd);
5744 break;
5745 case NEON_3R_VABA:
5746 GEN_NEON_INTEGER_OP(abd);
5747 tcg_temp_free_i32(tmp2);
5748 tmp2 = neon_load_reg(rd, pass);
5749 gen_neon_add(size, tmp, tmp2);
5750 break;
5751 case NEON_3R_VADD_VSUB:
5752 if (!u) { /* VADD */
5753 gen_neon_add(size, tmp, tmp2);
5754 } else { /* VSUB */
5755 switch (size) {
5756 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5757 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5758 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
5759 default: abort();
5762 break;
5763 case NEON_3R_VTST_VCEQ:
5764 if (!u) { /* VTST */
5765 switch (size) {
5766 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5767 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5768 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
5769 default: abort();
5771 } else { /* VCEQ */
5772 switch (size) {
5773 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5774 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5775 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5776 default: abort();
5779 break;
5780 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
5781 switch (size) {
5782 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5783 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5784 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5785 default: abort();
5787 tcg_temp_free_i32(tmp2);
5788 tmp2 = neon_load_reg(rd, pass);
5789 if (u) { /* VMLS */
5790 gen_neon_rsb(size, tmp, tmp2);
5791 } else { /* VMLA */
5792 gen_neon_add(size, tmp, tmp2);
5794 break;
5795 case NEON_3R_VMUL:
5796 if (u) { /* polynomial */
5797 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5798 } else { /* Integer */
5799 switch (size) {
5800 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5801 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5802 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5803 default: abort();
5806 break;
5807 case NEON_3R_VPMAX:
5808 GEN_NEON_INTEGER_OP(pmax);
5809 break;
5810 case NEON_3R_VPMIN:
5811 GEN_NEON_INTEGER_OP(pmin);
5812 break;
5813 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5814 if (!u) { /* VQDMULH */
5815 switch (size) {
5816 case 1:
5817 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5818 break;
5819 case 2:
5820 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5821 break;
5822 default: abort();
5824 } else { /* VQRDMULH */
5825 switch (size) {
5826 case 1:
5827 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5828 break;
5829 case 2:
5830 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5831 break;
5832 default: abort();
5835 break;
5836 case NEON_3R_VPADD:
5837 switch (size) {
5838 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5839 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5840 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5841 default: abort();
5843 break;
5844 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5846 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5847 switch ((u << 2) | size) {
5848 case 0: /* VADD */
5849 case 4: /* VPADD */
5850 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5851 break;
5852 case 2: /* VSUB */
5853 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5854 break;
5855 case 6: /* VABD */
5856 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5857 break;
5858 default:
5859 abort();
5861 tcg_temp_free_ptr(fpstatus);
5862 break;
5864 case NEON_3R_FLOAT_MULTIPLY:
5866 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5867 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5868 if (!u) {
5869 tcg_temp_free_i32(tmp2);
5870 tmp2 = neon_load_reg(rd, pass);
5871 if (size == 0) {
5872 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5873 } else {
5874 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5877 tcg_temp_free_ptr(fpstatus);
5878 break;
5880 case NEON_3R_FLOAT_CMP:
5882 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5883 if (!u) {
5884 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5885 } else {
5886 if (size == 0) {
5887 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5888 } else {
5889 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5892 tcg_temp_free_ptr(fpstatus);
5893 break;
5895 case NEON_3R_FLOAT_ACMP:
5897 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5898 if (size == 0) {
5899 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5900 } else {
5901 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5903 tcg_temp_free_ptr(fpstatus);
5904 break;
5906 case NEON_3R_FLOAT_MINMAX:
5908 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5909 if (size == 0) {
5910 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5911 } else {
5912 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5914 tcg_temp_free_ptr(fpstatus);
5915 break;
5917 case NEON_3R_FLOAT_MISC:
5918 if (u) {
5919 /* VMAXNM/VMINNM */
5920 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5921 if (size == 0) {
5922 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5923 } else {
5924 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5926 tcg_temp_free_ptr(fpstatus);
5927 } else {
5928 if (size == 0) {
5929 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5930 } else {
5931 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5934 break;
5935 case NEON_3R_VFM:
5937 /* VFMA, VFMS: fused multiply-add */
5938 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5939 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5940 if (size) {
5941 /* VFMS */
5942 gen_helper_vfp_negs(tmp, tmp);
5944 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5945 tcg_temp_free_i32(tmp3);
5946 tcg_temp_free_ptr(fpstatus);
5947 break;
5949 default:
5950 abort();
5952 tcg_temp_free_i32(tmp2);
5954 /* Save the result. For elementwise operations we can put it
5955 straight into the destination register. For pairwise operations
5956 we have to be careful to avoid clobbering the source operands. */
5957 if (pairwise && rd == rm) {
5958 neon_store_scratch(pass, tmp);
5959 } else {
5960 neon_store_reg(rd, pass, tmp);
5963 } /* for pass */
5964 if (pairwise && rd == rm) {
5965 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5966 tmp = neon_load_scratch(pass);
5967 neon_store_reg(rd, pass, tmp);
5970 /* End of 3 register same size operations. */
5971 } else if (insn & (1 << 4)) {
5972 if ((insn & 0x00380080) != 0) {
5973 /* Two registers and shift. */
5974 op = (insn >> 8) & 0xf;
5975 if (insn & (1 << 7)) {
5976 /* 64-bit shift. */
5977 if (op > 7) {
5978 return 1;
5980 size = 3;
5981 } else {
5982 size = 2;
5983 while ((insn & (1 << (size + 19))) == 0)
5984 size--;
5986 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5987 /* To avoid excessive duplication of ops we implement shift
5988 by immediate using the variable shift operations. */
5989 if (op < 8) {
5990 /* Shift by immediate:
5991 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5992 if (q && ((rd | rm) & 1)) {
5993 return 1;
5995 if (!u && (op == 4 || op == 6)) {
5996 return 1;
5998 /* Right shifts are encoded as N - shift, where N is the
5999 element size in bits. */
6000 if (op <= 4)
6001 shift = shift - (1 << (size + 3));
6002 if (size == 3) {
6003 count = q + 1;
6004 } else {
6005 count = q ? 4: 2;
6007 switch (size) {
6008 case 0:
6009 imm = (uint8_t) shift;
6010 imm |= imm << 8;
6011 imm |= imm << 16;
6012 break;
6013 case 1:
6014 imm = (uint16_t) shift;
6015 imm |= imm << 16;
6016 break;
6017 case 2:
6018 case 3:
6019 imm = shift;
6020 break;
6021 default:
6022 abort();
6025 for (pass = 0; pass < count; pass++) {
6026 if (size == 3) {
6027 neon_load_reg64(cpu_V0, rm + pass);
6028 tcg_gen_movi_i64(cpu_V1, imm);
6029 switch (op) {
6030 case 0: /* VSHR */
6031 case 1: /* VSRA */
6032 if (u)
6033 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6034 else
6035 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
6036 break;
6037 case 2: /* VRSHR */
6038 case 3: /* VRSRA */
6039 if (u)
6040 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
6041 else
6042 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
6043 break;
6044 case 4: /* VSRI */
6045 case 5: /* VSHL, VSLI */
6046 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6047 break;
6048 case 6: /* VQSHLU */
6049 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6050 cpu_V0, cpu_V1);
6051 break;
6052 case 7: /* VQSHL */
6053 if (u) {
6054 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6055 cpu_V0, cpu_V1);
6056 } else {
6057 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6058 cpu_V0, cpu_V1);
6060 break;
6062 if (op == 1 || op == 3) {
6063 /* Accumulate. */
6064 neon_load_reg64(cpu_V1, rd + pass);
6065 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6066 } else if (op == 4 || (op == 5 && u)) {
6067 /* Insert */
6068 neon_load_reg64(cpu_V1, rd + pass);
6069 uint64_t mask;
6070 if (shift < -63 || shift > 63) {
6071 mask = 0;
6072 } else {
6073 if (op == 4) {
6074 mask = 0xffffffffffffffffull >> -shift;
6075 } else {
6076 mask = 0xffffffffffffffffull << shift;
6079 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6080 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6082 neon_store_reg64(cpu_V0, rd + pass);
6083 } else { /* size < 3 */
6084 /* Operands in T0 and T1. */
6085 tmp = neon_load_reg(rm, pass);
6086 tmp2 = tcg_temp_new_i32();
6087 tcg_gen_movi_i32(tmp2, imm);
6088 switch (op) {
6089 case 0: /* VSHR */
6090 case 1: /* VSRA */
6091 GEN_NEON_INTEGER_OP(shl);
6092 break;
6093 case 2: /* VRSHR */
6094 case 3: /* VRSRA */
6095 GEN_NEON_INTEGER_OP(rshl);
6096 break;
6097 case 4: /* VSRI */
6098 case 5: /* VSHL, VSLI */
6099 switch (size) {
6100 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6101 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6102 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
6103 default: abort();
6105 break;
6106 case 6: /* VQSHLU */
6107 switch (size) {
6108 case 0:
6109 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6110 tmp, tmp2);
6111 break;
6112 case 1:
6113 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6114 tmp, tmp2);
6115 break;
6116 case 2:
6117 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6118 tmp, tmp2);
6119 break;
6120 default:
6121 abort();
6123 break;
6124 case 7: /* VQSHL */
6125 GEN_NEON_INTEGER_OP_ENV(qshl);
6126 break;
6128 tcg_temp_free_i32(tmp2);
6130 if (op == 1 || op == 3) {
6131 /* Accumulate. */
6132 tmp2 = neon_load_reg(rd, pass);
6133 gen_neon_add(size, tmp, tmp2);
6134 tcg_temp_free_i32(tmp2);
6135 } else if (op == 4 || (op == 5 && u)) {
6136 /* Insert */
6137 switch (size) {
6138 case 0:
6139 if (op == 4)
6140 mask = 0xff >> -shift;
6141 else
6142 mask = (uint8_t)(0xff << shift);
6143 mask |= mask << 8;
6144 mask |= mask << 16;
6145 break;
6146 case 1:
6147 if (op == 4)
6148 mask = 0xffff >> -shift;
6149 else
6150 mask = (uint16_t)(0xffff << shift);
6151 mask |= mask << 16;
6152 break;
6153 case 2:
6154 if (shift < -31 || shift > 31) {
6155 mask = 0;
6156 } else {
6157 if (op == 4)
6158 mask = 0xffffffffu >> -shift;
6159 else
6160 mask = 0xffffffffu << shift;
6162 break;
6163 default:
6164 abort();
6166 tmp2 = neon_load_reg(rd, pass);
6167 tcg_gen_andi_i32(tmp, tmp, mask);
6168 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
6169 tcg_gen_or_i32(tmp, tmp, tmp2);
6170 tcg_temp_free_i32(tmp2);
6172 neon_store_reg(rd, pass, tmp);
6174 } /* for pass */
6175 } else if (op < 10) {
6176 /* Shift by immediate and narrow:
6177 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
6178 int input_unsigned = (op == 8) ? !u : u;
6179 if (rm & 1) {
6180 return 1;
6182 shift = shift - (1 << (size + 3));
6183 size++;
6184 if (size == 3) {
6185 tmp64 = tcg_const_i64(shift);
6186 neon_load_reg64(cpu_V0, rm);
6187 neon_load_reg64(cpu_V1, rm + 1);
6188 for (pass = 0; pass < 2; pass++) {
6189 TCGv_i64 in;
6190 if (pass == 0) {
6191 in = cpu_V0;
6192 } else {
6193 in = cpu_V1;
6195 if (q) {
6196 if (input_unsigned) {
6197 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
6198 } else {
6199 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
6201 } else {
6202 if (input_unsigned) {
6203 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
6204 } else {
6205 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
6208 tmp = tcg_temp_new_i32();
6209 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6210 neon_store_reg(rd, pass, tmp);
6211 } /* for pass */
6212 tcg_temp_free_i64(tmp64);
6213 } else {
6214 if (size == 1) {
6215 imm = (uint16_t)shift;
6216 imm |= imm << 16;
6217 } else {
6218 /* size == 2 */
6219 imm = (uint32_t)shift;
6221 tmp2 = tcg_const_i32(imm);
6222 tmp4 = neon_load_reg(rm + 1, 0);
6223 tmp5 = neon_load_reg(rm + 1, 1);
6224 for (pass = 0; pass < 2; pass++) {
6225 if (pass == 0) {
6226 tmp = neon_load_reg(rm, 0);
6227 } else {
6228 tmp = tmp4;
6230 gen_neon_shift_narrow(size, tmp, tmp2, q,
6231 input_unsigned);
6232 if (pass == 0) {
6233 tmp3 = neon_load_reg(rm, 1);
6234 } else {
6235 tmp3 = tmp5;
6237 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6238 input_unsigned);
6239 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
6240 tcg_temp_free_i32(tmp);
6241 tcg_temp_free_i32(tmp3);
6242 tmp = tcg_temp_new_i32();
6243 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6244 neon_store_reg(rd, pass, tmp);
6245 } /* for pass */
6246 tcg_temp_free_i32(tmp2);
6248 } else if (op == 10) {
6249 /* VSHLL, VMOVL */
6250 if (q || (rd & 1)) {
6251 return 1;
6253 tmp = neon_load_reg(rm, 0);
6254 tmp2 = neon_load_reg(rm, 1);
6255 for (pass = 0; pass < 2; pass++) {
6256 if (pass == 1)
6257 tmp = tmp2;
6259 gen_neon_widen(cpu_V0, tmp, size, u);
6261 if (shift != 0) {
6262 /* The shift is less than the width of the source
6263 type, so we can just shift the whole register. */
6264 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
6265 /* Widen the result of shift: we need to clear
6266 * the potential overflow bits resulting from
6267 * left bits of the narrow input appearing as
6268 * right bits of left the neighbour narrow
6269 * input. */
6270 if (size < 2 || !u) {
6271 uint64_t imm64;
6272 if (size == 0) {
6273 imm = (0xffu >> (8 - shift));
6274 imm |= imm << 16;
6275 } else if (size == 1) {
6276 imm = 0xffff >> (16 - shift);
6277 } else {
6278 /* size == 2 */
6279 imm = 0xffffffff >> (32 - shift);
6281 if (size < 2) {
6282 imm64 = imm | (((uint64_t)imm) << 32);
6283 } else {
6284 imm64 = imm;
6286 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
6289 neon_store_reg64(cpu_V0, rd + pass);
6291 } else if (op >= 14) {
6292 /* VCVT fixed-point. */
6293 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6294 return 1;
6296 /* We have already masked out the must-be-1 top bit of imm6,
6297 * hence this 32-shift where the ARM ARM has 64-imm6.
6299 shift = 32 - shift;
6300 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6301 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
6302 if (!(op & 1)) {
6303 if (u)
6304 gen_vfp_ulto(0, shift, 1);
6305 else
6306 gen_vfp_slto(0, shift, 1);
6307 } else {
6308 if (u)
6309 gen_vfp_toul(0, shift, 1);
6310 else
6311 gen_vfp_tosl(0, shift, 1);
6313 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
6315 } else {
6316 return 1;
6318 } else { /* (insn & 0x00380080) == 0 */
6319 int invert;
6320 if (q && (rd & 1)) {
6321 return 1;
6324 op = (insn >> 8) & 0xf;
6325 /* One register and immediate. */
6326 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6327 invert = (insn & (1 << 5)) != 0;
6328 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6329 * We choose to not special-case this and will behave as if a
6330 * valid constant encoding of 0 had been given.
6332 switch (op) {
6333 case 0: case 1:
6334 /* no-op */
6335 break;
6336 case 2: case 3:
6337 imm <<= 8;
6338 break;
6339 case 4: case 5:
6340 imm <<= 16;
6341 break;
6342 case 6: case 7:
6343 imm <<= 24;
6344 break;
6345 case 8: case 9:
6346 imm |= imm << 16;
6347 break;
6348 case 10: case 11:
6349 imm = (imm << 8) | (imm << 24);
6350 break;
6351 case 12:
6352 imm = (imm << 8) | 0xff;
6353 break;
6354 case 13:
6355 imm = (imm << 16) | 0xffff;
6356 break;
6357 case 14:
6358 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6359 if (invert)
6360 imm = ~imm;
6361 break;
6362 case 15:
6363 if (invert) {
6364 return 1;
6366 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6367 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6368 break;
6370 if (invert)
6371 imm = ~imm;
6373 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6374 if (op & 1 && op < 12) {
6375 tmp = neon_load_reg(rd, pass);
6376 if (invert) {
6377 /* The immediate value has already been inverted, so
6378 BIC becomes AND. */
6379 tcg_gen_andi_i32(tmp, tmp, imm);
6380 } else {
6381 tcg_gen_ori_i32(tmp, tmp, imm);
6383 } else {
6384 /* VMOV, VMVN. */
6385 tmp = tcg_temp_new_i32();
6386 if (op == 14 && invert) {
6387 int n;
6388 uint32_t val;
6389 val = 0;
6390 for (n = 0; n < 4; n++) {
6391 if (imm & (1 << (n + (pass & 1) * 4)))
6392 val |= 0xff << (n * 8);
6394 tcg_gen_movi_i32(tmp, val);
6395 } else {
6396 tcg_gen_movi_i32(tmp, imm);
6399 neon_store_reg(rd, pass, tmp);
6402 } else { /* (insn & 0x00800010 == 0x00800000) */
6403 if (size != 3) {
6404 op = (insn >> 8) & 0xf;
6405 if ((insn & (1 << 6)) == 0) {
6406 /* Three registers of different lengths. */
6407 int src1_wide;
6408 int src2_wide;
6409 int prewiden;
6410 /* undefreq: bit 0 : UNDEF if size == 0
6411 * bit 1 : UNDEF if size == 1
6412 * bit 2 : UNDEF if size == 2
6413 * bit 3 : UNDEF if U == 1
6414 * Note that [2:0] set implies 'always UNDEF'
6416 int undefreq;
6417 /* prewiden, src1_wide, src2_wide, undefreq */
6418 static const int neon_3reg_wide[16][4] = {
6419 {1, 0, 0, 0}, /* VADDL */
6420 {1, 1, 0, 0}, /* VADDW */
6421 {1, 0, 0, 0}, /* VSUBL */
6422 {1, 1, 0, 0}, /* VSUBW */
6423 {0, 1, 1, 0}, /* VADDHN */
6424 {0, 0, 0, 0}, /* VABAL */
6425 {0, 1, 1, 0}, /* VSUBHN */
6426 {0, 0, 0, 0}, /* VABDL */
6427 {0, 0, 0, 0}, /* VMLAL */
6428 {0, 0, 0, 9}, /* VQDMLAL */
6429 {0, 0, 0, 0}, /* VMLSL */
6430 {0, 0, 0, 9}, /* VQDMLSL */
6431 {0, 0, 0, 0}, /* Integer VMULL */
6432 {0, 0, 0, 1}, /* VQDMULL */
6433 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6434 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6437 prewiden = neon_3reg_wide[op][0];
6438 src1_wide = neon_3reg_wide[op][1];
6439 src2_wide = neon_3reg_wide[op][2];
6440 undefreq = neon_3reg_wide[op][3];
6442 if ((undefreq & (1 << size)) ||
6443 ((undefreq & 8) && u)) {
6444 return 1;
6446 if ((src1_wide && (rn & 1)) ||
6447 (src2_wide && (rm & 1)) ||
6448 (!src2_wide && (rd & 1))) {
6449 return 1;
6452 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6453 * outside the loop below as it only performs a single pass.
6455 if (op == 14 && size == 2) {
6456 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6458 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
6459 return 1;
6461 tcg_rn = tcg_temp_new_i64();
6462 tcg_rm = tcg_temp_new_i64();
6463 tcg_rd = tcg_temp_new_i64();
6464 neon_load_reg64(tcg_rn, rn);
6465 neon_load_reg64(tcg_rm, rm);
6466 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6467 neon_store_reg64(tcg_rd, rd);
6468 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6469 neon_store_reg64(tcg_rd, rd + 1);
6470 tcg_temp_free_i64(tcg_rn);
6471 tcg_temp_free_i64(tcg_rm);
6472 tcg_temp_free_i64(tcg_rd);
6473 return 0;
6476 /* Avoid overlapping operands. Wide source operands are
6477 always aligned so will never overlap with wide
6478 destinations in problematic ways. */
6479 if (rd == rm && !src2_wide) {
6480 tmp = neon_load_reg(rm, 1);
6481 neon_store_scratch(2, tmp);
6482 } else if (rd == rn && !src1_wide) {
6483 tmp = neon_load_reg(rn, 1);
6484 neon_store_scratch(2, tmp);
6486 TCGV_UNUSED_I32(tmp3);
6487 for (pass = 0; pass < 2; pass++) {
6488 if (src1_wide) {
6489 neon_load_reg64(cpu_V0, rn + pass);
6490 TCGV_UNUSED_I32(tmp);
6491 } else {
6492 if (pass == 1 && rd == rn) {
6493 tmp = neon_load_scratch(2);
6494 } else {
6495 tmp = neon_load_reg(rn, pass);
6497 if (prewiden) {
6498 gen_neon_widen(cpu_V0, tmp, size, u);
6501 if (src2_wide) {
6502 neon_load_reg64(cpu_V1, rm + pass);
6503 TCGV_UNUSED_I32(tmp2);
6504 } else {
6505 if (pass == 1 && rd == rm) {
6506 tmp2 = neon_load_scratch(2);
6507 } else {
6508 tmp2 = neon_load_reg(rm, pass);
6510 if (prewiden) {
6511 gen_neon_widen(cpu_V1, tmp2, size, u);
6514 switch (op) {
6515 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6516 gen_neon_addl(size);
6517 break;
6518 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6519 gen_neon_subl(size);
6520 break;
6521 case 5: case 7: /* VABAL, VABDL */
6522 switch ((size << 1) | u) {
6523 case 0:
6524 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6525 break;
6526 case 1:
6527 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6528 break;
6529 case 2:
6530 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6531 break;
6532 case 3:
6533 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6534 break;
6535 case 4:
6536 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6537 break;
6538 case 5:
6539 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6540 break;
6541 default: abort();
6543 tcg_temp_free_i32(tmp2);
6544 tcg_temp_free_i32(tmp);
6545 break;
6546 case 8: case 9: case 10: case 11: case 12: case 13:
6547 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6548 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6549 break;
6550 case 14: /* Polynomial VMULL */
6551 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6552 tcg_temp_free_i32(tmp2);
6553 tcg_temp_free_i32(tmp);
6554 break;
6555 default: /* 15 is RESERVED: caught earlier */
6556 abort();
6558 if (op == 13) {
6559 /* VQDMULL */
6560 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6561 neon_store_reg64(cpu_V0, rd + pass);
6562 } else if (op == 5 || (op >= 8 && op <= 11)) {
6563 /* Accumulate. */
6564 neon_load_reg64(cpu_V1, rd + pass);
6565 switch (op) {
6566 case 10: /* VMLSL */
6567 gen_neon_negl(cpu_V0, size);
6568 /* Fall through */
6569 case 5: case 8: /* VABAL, VMLAL */
6570 gen_neon_addl(size);
6571 break;
6572 case 9: case 11: /* VQDMLAL, VQDMLSL */
6573 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6574 if (op == 11) {
6575 gen_neon_negl(cpu_V0, size);
6577 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6578 break;
6579 default:
6580 abort();
6582 neon_store_reg64(cpu_V0, rd + pass);
6583 } else if (op == 4 || op == 6) {
6584 /* Narrowing operation. */
6585 tmp = tcg_temp_new_i32();
6586 if (!u) {
6587 switch (size) {
6588 case 0:
6589 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6590 break;
6591 case 1:
6592 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6593 break;
6594 case 2:
6595 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6596 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6597 break;
6598 default: abort();
6600 } else {
6601 switch (size) {
6602 case 0:
6603 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6604 break;
6605 case 1:
6606 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6607 break;
6608 case 2:
6609 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6610 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6611 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6612 break;
6613 default: abort();
6616 if (pass == 0) {
6617 tmp3 = tmp;
6618 } else {
6619 neon_store_reg(rd, 0, tmp3);
6620 neon_store_reg(rd, 1, tmp);
6622 } else {
6623 /* Write back the result. */
6624 neon_store_reg64(cpu_V0, rd + pass);
6627 } else {
6628 /* Two registers and a scalar. NB that for ops of this form
6629 * the ARM ARM labels bit 24 as Q, but it is in our variable
6630 * 'u', not 'q'.
6632 if (size == 0) {
6633 return 1;
6635 switch (op) {
6636 case 1: /* Float VMLA scalar */
6637 case 5: /* Floating point VMLS scalar */
6638 case 9: /* Floating point VMUL scalar */
6639 if (size == 1) {
6640 return 1;
6642 /* fall through */
6643 case 0: /* Integer VMLA scalar */
6644 case 4: /* Integer VMLS scalar */
6645 case 8: /* Integer VMUL scalar */
6646 case 12: /* VQDMULH scalar */
6647 case 13: /* VQRDMULH scalar */
6648 if (u && ((rd | rn) & 1)) {
6649 return 1;
6651 tmp = neon_get_scalar(size, rm);
6652 neon_store_scratch(0, tmp);
6653 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6654 tmp = neon_load_scratch(0);
6655 tmp2 = neon_load_reg(rn, pass);
6656 if (op == 12) {
6657 if (size == 1) {
6658 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6659 } else {
6660 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6662 } else if (op == 13) {
6663 if (size == 1) {
6664 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6665 } else {
6666 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6668 } else if (op & 1) {
6669 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6670 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6671 tcg_temp_free_ptr(fpstatus);
6672 } else {
6673 switch (size) {
6674 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6675 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6676 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6677 default: abort();
6680 tcg_temp_free_i32(tmp2);
6681 if (op < 8) {
6682 /* Accumulate. */
6683 tmp2 = neon_load_reg(rd, pass);
6684 switch (op) {
6685 case 0:
6686 gen_neon_add(size, tmp, tmp2);
6687 break;
6688 case 1:
6690 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6691 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6692 tcg_temp_free_ptr(fpstatus);
6693 break;
6695 case 4:
6696 gen_neon_rsb(size, tmp, tmp2);
6697 break;
6698 case 5:
6700 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6701 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6702 tcg_temp_free_ptr(fpstatus);
6703 break;
6705 default:
6706 abort();
6708 tcg_temp_free_i32(tmp2);
6710 neon_store_reg(rd, pass, tmp);
6712 break;
6713 case 3: /* VQDMLAL scalar */
6714 case 7: /* VQDMLSL scalar */
6715 case 11: /* VQDMULL scalar */
6716 if (u == 1) {
6717 return 1;
6719 /* fall through */
6720 case 2: /* VMLAL sclar */
6721 case 6: /* VMLSL scalar */
6722 case 10: /* VMULL scalar */
6723 if (rd & 1) {
6724 return 1;
6726 tmp2 = neon_get_scalar(size, rm);
6727 /* We need a copy of tmp2 because gen_neon_mull
6728 * deletes it during pass 0. */
6729 tmp4 = tcg_temp_new_i32();
6730 tcg_gen_mov_i32(tmp4, tmp2);
6731 tmp3 = neon_load_reg(rn, 1);
6733 for (pass = 0; pass < 2; pass++) {
6734 if (pass == 0) {
6735 tmp = neon_load_reg(rn, 0);
6736 } else {
6737 tmp = tmp3;
6738 tmp2 = tmp4;
6740 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6741 if (op != 11) {
6742 neon_load_reg64(cpu_V1, rd + pass);
6744 switch (op) {
6745 case 6:
6746 gen_neon_negl(cpu_V0, size);
6747 /* Fall through */
6748 case 2:
6749 gen_neon_addl(size);
6750 break;
6751 case 3: case 7:
6752 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6753 if (op == 7) {
6754 gen_neon_negl(cpu_V0, size);
6756 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6757 break;
6758 case 10:
6759 /* no-op */
6760 break;
6761 case 11:
6762 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6763 break;
6764 default:
6765 abort();
6767 neon_store_reg64(cpu_V0, rd + pass);
6771 break;
6772 default: /* 14 and 15 are RESERVED */
6773 return 1;
6776 } else { /* size == 3 */
6777 if (!u) {
6778 /* Extract. */
6779 imm = (insn >> 8) & 0xf;
6781 if (imm > 7 && !q)
6782 return 1;
6784 if (q && ((rd | rn | rm) & 1)) {
6785 return 1;
6788 if (imm == 0) {
6789 neon_load_reg64(cpu_V0, rn);
6790 if (q) {
6791 neon_load_reg64(cpu_V1, rn + 1);
6793 } else if (imm == 8) {
6794 neon_load_reg64(cpu_V0, rn + 1);
6795 if (q) {
6796 neon_load_reg64(cpu_V1, rm);
6798 } else if (q) {
6799 tmp64 = tcg_temp_new_i64();
6800 if (imm < 8) {
6801 neon_load_reg64(cpu_V0, rn);
6802 neon_load_reg64(tmp64, rn + 1);
6803 } else {
6804 neon_load_reg64(cpu_V0, rn + 1);
6805 neon_load_reg64(tmp64, rm);
6807 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6808 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6809 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6810 if (imm < 8) {
6811 neon_load_reg64(cpu_V1, rm);
6812 } else {
6813 neon_load_reg64(cpu_V1, rm + 1);
6814 imm -= 8;
6816 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6817 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6818 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6819 tcg_temp_free_i64(tmp64);
6820 } else {
6821 /* BUGFIX */
6822 neon_load_reg64(cpu_V0, rn);
6823 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6824 neon_load_reg64(cpu_V1, rm);
6825 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6826 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6828 neon_store_reg64(cpu_V0, rd);
6829 if (q) {
6830 neon_store_reg64(cpu_V1, rd + 1);
6832 } else if ((insn & (1 << 11)) == 0) {
6833 /* Two register misc. */
6834 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6835 size = (insn >> 18) & 3;
6836 /* UNDEF for unknown op values and bad op-size combinations */
6837 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6838 return 1;
6840 if (neon_2rm_is_v8_op(op) &&
6841 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6842 return 1;
6844 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6845 q && ((rm | rd) & 1)) {
6846 return 1;
6848 switch (op) {
6849 case NEON_2RM_VREV64:
6850 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6851 tmp = neon_load_reg(rm, pass * 2);
6852 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6853 switch (size) {
6854 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6855 case 1: gen_swap_half(tmp); break;
6856 case 2: /* no-op */ break;
6857 default: abort();
6859 neon_store_reg(rd, pass * 2 + 1, tmp);
6860 if (size == 2) {
6861 neon_store_reg(rd, pass * 2, tmp2);
6862 } else {
6863 switch (size) {
6864 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6865 case 1: gen_swap_half(tmp2); break;
6866 default: abort();
6868 neon_store_reg(rd, pass * 2, tmp2);
6871 break;
6872 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6873 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6874 for (pass = 0; pass < q + 1; pass++) {
6875 tmp = neon_load_reg(rm, pass * 2);
6876 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6877 tmp = neon_load_reg(rm, pass * 2 + 1);
6878 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6879 switch (size) {
6880 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6881 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6882 case 2: tcg_gen_add_i64(CPU_V001); break;
6883 default: abort();
6885 if (op >= NEON_2RM_VPADAL) {
6886 /* Accumulate. */
6887 neon_load_reg64(cpu_V1, rd + pass);
6888 gen_neon_addl(size);
6890 neon_store_reg64(cpu_V0, rd + pass);
6892 break;
6893 case NEON_2RM_VTRN:
6894 if (size == 2) {
6895 int n;
6896 for (n = 0; n < (q ? 4 : 2); n += 2) {
6897 tmp = neon_load_reg(rm, n);
6898 tmp2 = neon_load_reg(rd, n + 1);
6899 neon_store_reg(rm, n, tmp2);
6900 neon_store_reg(rd, n + 1, tmp);
6902 } else {
6903 goto elementwise;
6905 break;
6906 case NEON_2RM_VUZP:
6907 if (gen_neon_unzip(rd, rm, size, q)) {
6908 return 1;
6910 break;
6911 case NEON_2RM_VZIP:
6912 if (gen_neon_zip(rd, rm, size, q)) {
6913 return 1;
6915 break;
6916 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6917 /* also VQMOVUN; op field and mnemonics don't line up */
6918 if (rm & 1) {
6919 return 1;
6921 TCGV_UNUSED_I32(tmp2);
6922 for (pass = 0; pass < 2; pass++) {
6923 neon_load_reg64(cpu_V0, rm + pass);
6924 tmp = tcg_temp_new_i32();
6925 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6926 tmp, cpu_V0);
6927 if (pass == 0) {
6928 tmp2 = tmp;
6929 } else {
6930 neon_store_reg(rd, 0, tmp2);
6931 neon_store_reg(rd, 1, tmp);
6934 break;
6935 case NEON_2RM_VSHLL:
6936 if (q || (rd & 1)) {
6937 return 1;
6939 tmp = neon_load_reg(rm, 0);
6940 tmp2 = neon_load_reg(rm, 1);
6941 for (pass = 0; pass < 2; pass++) {
6942 if (pass == 1)
6943 tmp = tmp2;
6944 gen_neon_widen(cpu_V0, tmp, size, 1);
6945 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6946 neon_store_reg64(cpu_V0, rd + pass);
6948 break;
6949 case NEON_2RM_VCVT_F16_F32:
6950 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6951 q || (rm & 1)) {
6952 return 1;
6954 tmp = tcg_temp_new_i32();
6955 tmp2 = tcg_temp_new_i32();
6956 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
6957 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6958 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
6959 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6960 tcg_gen_shli_i32(tmp2, tmp2, 16);
6961 tcg_gen_or_i32(tmp2, tmp2, tmp);
6962 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
6963 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6964 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6965 neon_store_reg(rd, 0, tmp2);
6966 tmp2 = tcg_temp_new_i32();
6967 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6968 tcg_gen_shli_i32(tmp2, tmp2, 16);
6969 tcg_gen_or_i32(tmp2, tmp2, tmp);
6970 neon_store_reg(rd, 1, tmp2);
6971 tcg_temp_free_i32(tmp);
6972 break;
6973 case NEON_2RM_VCVT_F32_F16:
6974 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6975 q || (rd & 1)) {
6976 return 1;
6978 tmp3 = tcg_temp_new_i32();
6979 tmp = neon_load_reg(rm, 0);
6980 tmp2 = neon_load_reg(rm, 1);
6981 tcg_gen_ext16u_i32(tmp3, tmp);
6982 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6983 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6984 tcg_gen_shri_i32(tmp3, tmp, 16);
6985 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6986 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
6987 tcg_temp_free_i32(tmp);
6988 tcg_gen_ext16u_i32(tmp3, tmp2);
6989 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6990 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6991 tcg_gen_shri_i32(tmp3, tmp2, 16);
6992 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6993 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
6994 tcg_temp_free_i32(tmp2);
6995 tcg_temp_free_i32(tmp3);
6996 break;
6997 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6998 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
6999 || ((rm | rd) & 1)) {
7000 return 1;
7002 tmp = tcg_const_i32(rd);
7003 tmp2 = tcg_const_i32(rm);
7005 /* Bit 6 is the lowest opcode bit; it distinguishes between
7006 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7008 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7010 if (op == NEON_2RM_AESE) {
7011 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
7012 } else {
7013 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7015 tcg_temp_free_i32(tmp);
7016 tcg_temp_free_i32(tmp2);
7017 tcg_temp_free_i32(tmp3);
7018 break;
7019 case NEON_2RM_SHA1H:
7020 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
7021 || ((rm | rd) & 1)) {
7022 return 1;
7024 tmp = tcg_const_i32(rd);
7025 tmp2 = tcg_const_i32(rm);
7027 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7029 tcg_temp_free_i32(tmp);
7030 tcg_temp_free_i32(tmp2);
7031 break;
7032 case NEON_2RM_SHA1SU1:
7033 if ((rm | rd) & 1) {
7034 return 1;
7036 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7037 if (q) {
7038 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
7039 return 1;
7041 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
7042 return 1;
7044 tmp = tcg_const_i32(rd);
7045 tmp2 = tcg_const_i32(rm);
7046 if (q) {
7047 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7048 } else {
7049 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7051 tcg_temp_free_i32(tmp);
7052 tcg_temp_free_i32(tmp2);
7053 break;
7054 default:
7055 elementwise:
7056 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7057 if (neon_2rm_is_float_op(op)) {
7058 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7059 neon_reg_offset(rm, pass));
7060 TCGV_UNUSED_I32(tmp);
7061 } else {
7062 tmp = neon_load_reg(rm, pass);
7064 switch (op) {
7065 case NEON_2RM_VREV32:
7066 switch (size) {
7067 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7068 case 1: gen_swap_half(tmp); break;
7069 default: abort();
7071 break;
7072 case NEON_2RM_VREV16:
7073 gen_rev16(tmp);
7074 break;
7075 case NEON_2RM_VCLS:
7076 switch (size) {
7077 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7078 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7079 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
7080 default: abort();
7082 break;
7083 case NEON_2RM_VCLZ:
7084 switch (size) {
7085 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7086 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7087 case 2: gen_helper_clz(tmp, tmp); break;
7088 default: abort();
7090 break;
7091 case NEON_2RM_VCNT:
7092 gen_helper_neon_cnt_u8(tmp, tmp);
7093 break;
7094 case NEON_2RM_VMVN:
7095 tcg_gen_not_i32(tmp, tmp);
7096 break;
7097 case NEON_2RM_VQABS:
7098 switch (size) {
7099 case 0:
7100 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7101 break;
7102 case 1:
7103 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7104 break;
7105 case 2:
7106 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7107 break;
7108 default: abort();
7110 break;
7111 case NEON_2RM_VQNEG:
7112 switch (size) {
7113 case 0:
7114 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7115 break;
7116 case 1:
7117 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7118 break;
7119 case 2:
7120 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7121 break;
7122 default: abort();
7124 break;
7125 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
7126 tmp2 = tcg_const_i32(0);
7127 switch(size) {
7128 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7129 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7130 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
7131 default: abort();
7133 tcg_temp_free_i32(tmp2);
7134 if (op == NEON_2RM_VCLE0) {
7135 tcg_gen_not_i32(tmp, tmp);
7137 break;
7138 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
7139 tmp2 = tcg_const_i32(0);
7140 switch(size) {
7141 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7142 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7143 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
7144 default: abort();
7146 tcg_temp_free_i32(tmp2);
7147 if (op == NEON_2RM_VCLT0) {
7148 tcg_gen_not_i32(tmp, tmp);
7150 break;
7151 case NEON_2RM_VCEQ0:
7152 tmp2 = tcg_const_i32(0);
7153 switch(size) {
7154 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7155 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7156 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
7157 default: abort();
7159 tcg_temp_free_i32(tmp2);
7160 break;
7161 case NEON_2RM_VABS:
7162 switch(size) {
7163 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7164 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7165 case 2: tcg_gen_abs_i32(tmp, tmp); break;
7166 default: abort();
7168 break;
7169 case NEON_2RM_VNEG:
7170 tmp2 = tcg_const_i32(0);
7171 gen_neon_rsb(size, tmp, tmp2);
7172 tcg_temp_free_i32(tmp2);
7173 break;
7174 case NEON_2RM_VCGT0_F:
7176 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7177 tmp2 = tcg_const_i32(0);
7178 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
7179 tcg_temp_free_i32(tmp2);
7180 tcg_temp_free_ptr(fpstatus);
7181 break;
7183 case NEON_2RM_VCGE0_F:
7185 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7186 tmp2 = tcg_const_i32(0);
7187 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
7188 tcg_temp_free_i32(tmp2);
7189 tcg_temp_free_ptr(fpstatus);
7190 break;
7192 case NEON_2RM_VCEQ0_F:
7194 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7195 tmp2 = tcg_const_i32(0);
7196 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
7197 tcg_temp_free_i32(tmp2);
7198 tcg_temp_free_ptr(fpstatus);
7199 break;
7201 case NEON_2RM_VCLE0_F:
7203 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7204 tmp2 = tcg_const_i32(0);
7205 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
7206 tcg_temp_free_i32(tmp2);
7207 tcg_temp_free_ptr(fpstatus);
7208 break;
7210 case NEON_2RM_VCLT0_F:
7212 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7213 tmp2 = tcg_const_i32(0);
7214 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
7215 tcg_temp_free_i32(tmp2);
7216 tcg_temp_free_ptr(fpstatus);
7217 break;
7219 case NEON_2RM_VABS_F:
7220 gen_vfp_abs(0);
7221 break;
7222 case NEON_2RM_VNEG_F:
7223 gen_vfp_neg(0);
7224 break;
7225 case NEON_2RM_VSWP:
7226 tmp2 = neon_load_reg(rd, pass);
7227 neon_store_reg(rm, pass, tmp2);
7228 break;
7229 case NEON_2RM_VTRN:
7230 tmp2 = neon_load_reg(rd, pass);
7231 switch (size) {
7232 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7233 case 1: gen_neon_trn_u16(tmp, tmp2); break;
7234 default: abort();
7236 neon_store_reg(rm, pass, tmp2);
7237 break;
7238 case NEON_2RM_VRINTN:
7239 case NEON_2RM_VRINTA:
7240 case NEON_2RM_VRINTM:
7241 case NEON_2RM_VRINTP:
7242 case NEON_2RM_VRINTZ:
7244 TCGv_i32 tcg_rmode;
7245 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7246 int rmode;
7248 if (op == NEON_2RM_VRINTZ) {
7249 rmode = FPROUNDING_ZERO;
7250 } else {
7251 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7254 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7255 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7256 cpu_env);
7257 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7258 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7259 cpu_env);
7260 tcg_temp_free_ptr(fpstatus);
7261 tcg_temp_free_i32(tcg_rmode);
7262 break;
7264 case NEON_2RM_VRINTX:
7266 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7267 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7268 tcg_temp_free_ptr(fpstatus);
7269 break;
7271 case NEON_2RM_VCVTAU:
7272 case NEON_2RM_VCVTAS:
7273 case NEON_2RM_VCVTNU:
7274 case NEON_2RM_VCVTNS:
7275 case NEON_2RM_VCVTPU:
7276 case NEON_2RM_VCVTPS:
7277 case NEON_2RM_VCVTMU:
7278 case NEON_2RM_VCVTMS:
7280 bool is_signed = !extract32(insn, 7, 1);
7281 TCGv_ptr fpst = get_fpstatus_ptr(1);
7282 TCGv_i32 tcg_rmode, tcg_shift;
7283 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7285 tcg_shift = tcg_const_i32(0);
7286 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7287 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7288 cpu_env);
7290 if (is_signed) {
7291 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7292 tcg_shift, fpst);
7293 } else {
7294 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7295 tcg_shift, fpst);
7298 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7299 cpu_env);
7300 tcg_temp_free_i32(tcg_rmode);
7301 tcg_temp_free_i32(tcg_shift);
7302 tcg_temp_free_ptr(fpst);
7303 break;
7305 case NEON_2RM_VRECPE:
7307 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7308 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7309 tcg_temp_free_ptr(fpstatus);
7310 break;
7312 case NEON_2RM_VRSQRTE:
7314 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7315 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7316 tcg_temp_free_ptr(fpstatus);
7317 break;
7319 case NEON_2RM_VRECPE_F:
7321 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7322 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7323 tcg_temp_free_ptr(fpstatus);
7324 break;
7326 case NEON_2RM_VRSQRTE_F:
7328 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7329 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7330 tcg_temp_free_ptr(fpstatus);
7331 break;
7333 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
7334 gen_vfp_sito(0, 1);
7335 break;
7336 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
7337 gen_vfp_uito(0, 1);
7338 break;
7339 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
7340 gen_vfp_tosiz(0, 1);
7341 break;
7342 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
7343 gen_vfp_touiz(0, 1);
7344 break;
7345 default:
7346 /* Reserved op values were caught by the
7347 * neon_2rm_sizes[] check earlier.
7349 abort();
7351 if (neon_2rm_is_float_op(op)) {
7352 tcg_gen_st_f32(cpu_F0s, cpu_env,
7353 neon_reg_offset(rd, pass));
7354 } else {
7355 neon_store_reg(rd, pass, tmp);
7358 break;
7360 } else if ((insn & (1 << 10)) == 0) {
7361 /* VTBL, VTBX. */
7362 int n = ((insn >> 8) & 3) + 1;
7363 if ((rn + n) > 32) {
7364 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7365 * helper function running off the end of the register file.
7367 return 1;
7369 n <<= 3;
7370 if (insn & (1 << 6)) {
7371 tmp = neon_load_reg(rd, 0);
7372 } else {
7373 tmp = tcg_temp_new_i32();
7374 tcg_gen_movi_i32(tmp, 0);
7376 tmp2 = neon_load_reg(rm, 0);
7377 tmp4 = tcg_const_i32(rn);
7378 tmp5 = tcg_const_i32(n);
7379 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7380 tcg_temp_free_i32(tmp);
7381 if (insn & (1 << 6)) {
7382 tmp = neon_load_reg(rd, 1);
7383 } else {
7384 tmp = tcg_temp_new_i32();
7385 tcg_gen_movi_i32(tmp, 0);
7387 tmp3 = neon_load_reg(rm, 1);
7388 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
7389 tcg_temp_free_i32(tmp5);
7390 tcg_temp_free_i32(tmp4);
7391 neon_store_reg(rd, 0, tmp2);
7392 neon_store_reg(rd, 1, tmp3);
7393 tcg_temp_free_i32(tmp);
7394 } else if ((insn & 0x380) == 0) {
7395 /* VDUP */
7396 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7397 return 1;
7399 if (insn & (1 << 19)) {
7400 tmp = neon_load_reg(rm, 1);
7401 } else {
7402 tmp = neon_load_reg(rm, 0);
7404 if (insn & (1 << 16)) {
7405 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
7406 } else if (insn & (1 << 17)) {
7407 if ((insn >> 18) & 1)
7408 gen_neon_dup_high16(tmp);
7409 else
7410 gen_neon_dup_low16(tmp);
7412 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7413 tmp2 = tcg_temp_new_i32();
7414 tcg_gen_mov_i32(tmp2, tmp);
7415 neon_store_reg(rd, pass, tmp2);
7417 tcg_temp_free_i32(tmp);
7418 } else {
7419 return 1;
7423 return 0;
7426 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7428 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7429 const ARMCPRegInfo *ri;
7431 cpnum = (insn >> 8) & 0xf;
7433 /* First check for coprocessor space used for XScale/iwMMXt insns */
7434 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7435 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7436 return 1;
7438 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7439 return disas_iwmmxt_insn(s, insn);
7440 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7441 return disas_dsp_insn(s, insn);
7443 return 1;
7446 /* Otherwise treat as a generic register access */
7447 is64 = (insn & (1 << 25)) == 0;
7448 if (!is64 && ((insn & (1 << 4)) == 0)) {
7449 /* cdp */
7450 return 1;
7453 crm = insn & 0xf;
7454 if (is64) {
7455 crn = 0;
7456 opc1 = (insn >> 4) & 0xf;
7457 opc2 = 0;
7458 rt2 = (insn >> 16) & 0xf;
7459 } else {
7460 crn = (insn >> 16) & 0xf;
7461 opc1 = (insn >> 21) & 7;
7462 opc2 = (insn >> 5) & 7;
7463 rt2 = 0;
7465 isread = (insn >> 20) & 1;
7466 rt = (insn >> 12) & 0xf;
7468 ri = get_arm_cp_reginfo(s->cp_regs,
7469 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7470 if (ri) {
7471 /* Check access permissions */
7472 if (!cp_access_ok(s->current_el, ri, isread)) {
7473 return 1;
7476 if (ri->accessfn ||
7477 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7478 /* Emit code to perform further access permissions checks at
7479 * runtime; this may result in an exception.
7480 * Note that on XScale all cp0..c13 registers do an access check
7481 * call in order to handle c15_cpar.
7483 TCGv_ptr tmpptr;
7484 TCGv_i32 tcg_syn, tcg_isread;
7485 uint32_t syndrome;
7487 /* Note that since we are an implementation which takes an
7488 * exception on a trapped conditional instruction only if the
7489 * instruction passes its condition code check, we can take
7490 * advantage of the clause in the ARM ARM that allows us to set
7491 * the COND field in the instruction to 0xE in all cases.
7492 * We could fish the actual condition out of the insn (ARM)
7493 * or the condexec bits (Thumb) but it isn't necessary.
7495 switch (cpnum) {
7496 case 14:
7497 if (is64) {
7498 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7499 isread, false);
7500 } else {
7501 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7502 rt, isread, false);
7504 break;
7505 case 15:
7506 if (is64) {
7507 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7508 isread, false);
7509 } else {
7510 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7511 rt, isread, false);
7513 break;
7514 default:
7515 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7516 * so this can only happen if this is an ARMv7 or earlier CPU,
7517 * in which case the syndrome information won't actually be
7518 * guest visible.
7520 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7521 syndrome = syn_uncategorized();
7522 break;
7525 gen_set_condexec(s);
7526 gen_set_pc_im(s, s->pc - 4);
7527 tmpptr = tcg_const_ptr(ri);
7528 tcg_syn = tcg_const_i32(syndrome);
7529 tcg_isread = tcg_const_i32(isread);
7530 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7531 tcg_isread);
7532 tcg_temp_free_ptr(tmpptr);
7533 tcg_temp_free_i32(tcg_syn);
7534 tcg_temp_free_i32(tcg_isread);
7537 /* Handle special cases first */
7538 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7539 case ARM_CP_NOP:
7540 return 0;
7541 case ARM_CP_WFI:
7542 if (isread) {
7543 return 1;
7545 gen_set_pc_im(s, s->pc);
7546 s->is_jmp = DISAS_WFI;
7547 return 0;
7548 default:
7549 break;
7552 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7553 gen_io_start();
7556 if (isread) {
7557 /* Read */
7558 if (is64) {
7559 TCGv_i64 tmp64;
7560 TCGv_i32 tmp;
7561 if (ri->type & ARM_CP_CONST) {
7562 tmp64 = tcg_const_i64(ri->resetvalue);
7563 } else if (ri->readfn) {
7564 TCGv_ptr tmpptr;
7565 tmp64 = tcg_temp_new_i64();
7566 tmpptr = tcg_const_ptr(ri);
7567 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7568 tcg_temp_free_ptr(tmpptr);
7569 } else {
7570 tmp64 = tcg_temp_new_i64();
7571 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7573 tmp = tcg_temp_new_i32();
7574 tcg_gen_extrl_i64_i32(tmp, tmp64);
7575 store_reg(s, rt, tmp);
7576 tcg_gen_shri_i64(tmp64, tmp64, 32);
7577 tmp = tcg_temp_new_i32();
7578 tcg_gen_extrl_i64_i32(tmp, tmp64);
7579 tcg_temp_free_i64(tmp64);
7580 store_reg(s, rt2, tmp);
7581 } else {
7582 TCGv_i32 tmp;
7583 if (ri->type & ARM_CP_CONST) {
7584 tmp = tcg_const_i32(ri->resetvalue);
7585 } else if (ri->readfn) {
7586 TCGv_ptr tmpptr;
7587 tmp = tcg_temp_new_i32();
7588 tmpptr = tcg_const_ptr(ri);
7589 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7590 tcg_temp_free_ptr(tmpptr);
7591 } else {
7592 tmp = load_cpu_offset(ri->fieldoffset);
7594 if (rt == 15) {
7595 /* Destination register of r15 for 32 bit loads sets
7596 * the condition codes from the high 4 bits of the value
7598 gen_set_nzcv(tmp);
7599 tcg_temp_free_i32(tmp);
7600 } else {
7601 store_reg(s, rt, tmp);
7604 } else {
7605 /* Write */
7606 if (ri->type & ARM_CP_CONST) {
7607 /* If not forbidden by access permissions, treat as WI */
7608 return 0;
7611 if (is64) {
7612 TCGv_i32 tmplo, tmphi;
7613 TCGv_i64 tmp64 = tcg_temp_new_i64();
7614 tmplo = load_reg(s, rt);
7615 tmphi = load_reg(s, rt2);
7616 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7617 tcg_temp_free_i32(tmplo);
7618 tcg_temp_free_i32(tmphi);
7619 if (ri->writefn) {
7620 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7621 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7622 tcg_temp_free_ptr(tmpptr);
7623 } else {
7624 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7626 tcg_temp_free_i64(tmp64);
7627 } else {
7628 if (ri->writefn) {
7629 TCGv_i32 tmp;
7630 TCGv_ptr tmpptr;
7631 tmp = load_reg(s, rt);
7632 tmpptr = tcg_const_ptr(ri);
7633 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7634 tcg_temp_free_ptr(tmpptr);
7635 tcg_temp_free_i32(tmp);
7636 } else {
7637 TCGv_i32 tmp = load_reg(s, rt);
7638 store_cpu_offset(tmp, ri->fieldoffset);
7643 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7644 /* I/O operations must end the TB here (whether read or write) */
7645 gen_io_end();
7646 gen_lookup_tb(s);
7647 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7648 /* We default to ending the TB on a coprocessor register write,
7649 * but allow this to be suppressed by the register definition
7650 * (usually only necessary to work around guest bugs).
7652 gen_lookup_tb(s);
7655 return 0;
7658 /* Unknown register; this might be a guest error or a QEMU
7659 * unimplemented feature.
7661 if (is64) {
7662 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7663 "64 bit system register cp:%d opc1: %d crm:%d "
7664 "(%s)\n",
7665 isread ? "read" : "write", cpnum, opc1, crm,
7666 s->ns ? "non-secure" : "secure");
7667 } else {
7668 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7669 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7670 "(%s)\n",
7671 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7672 s->ns ? "non-secure" : "secure");
7675 return 1;
7679 /* Store a 64-bit value to a register pair. Clobbers val. */
7680 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7682 TCGv_i32 tmp;
7683 tmp = tcg_temp_new_i32();
7684 tcg_gen_extrl_i64_i32(tmp, val);
7685 store_reg(s, rlow, tmp);
7686 tmp = tcg_temp_new_i32();
7687 tcg_gen_shri_i64(val, val, 32);
7688 tcg_gen_extrl_i64_i32(tmp, val);
7689 store_reg(s, rhigh, tmp);
7692 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7693 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7695 TCGv_i64 tmp;
7696 TCGv_i32 tmp2;
7698 /* Load value and extend to 64 bits. */
7699 tmp = tcg_temp_new_i64();
7700 tmp2 = load_reg(s, rlow);
7701 tcg_gen_extu_i32_i64(tmp, tmp2);
7702 tcg_temp_free_i32(tmp2);
7703 tcg_gen_add_i64(val, val, tmp);
7704 tcg_temp_free_i64(tmp);
7707 /* load and add a 64-bit value from a register pair. */
7708 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7710 TCGv_i64 tmp;
7711 TCGv_i32 tmpl;
7712 TCGv_i32 tmph;
7714 /* Load 64-bit value rd:rn. */
7715 tmpl = load_reg(s, rlow);
7716 tmph = load_reg(s, rhigh);
7717 tmp = tcg_temp_new_i64();
7718 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7719 tcg_temp_free_i32(tmpl);
7720 tcg_temp_free_i32(tmph);
7721 tcg_gen_add_i64(val, val, tmp);
7722 tcg_temp_free_i64(tmp);
7725 /* Set N and Z flags from hi|lo. */
7726 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7728 tcg_gen_mov_i32(cpu_NF, hi);
7729 tcg_gen_or_i32(cpu_ZF, lo, hi);
7732 /* Load/Store exclusive instructions are implemented by remembering
7733 the value/address loaded, and seeing if these are the same
7734 when the store is performed. This should be sufficient to implement
7735 the architecturally mandated semantics, and avoids having to monitor
7736 regular stores.
7738 In system emulation mode only one CPU will be running at once, so
7739 this sequence is effectively atomic. In user emulation mode we
7740 throw an exception and handle the atomic operation elsewhere. */
7741 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7742 TCGv_i32 addr, int size)
7744 TCGv_i32 tmp = tcg_temp_new_i32();
7746 s->is_ldex = true;
7748 switch (size) {
7749 case 0:
7750 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
7751 break;
7752 case 1:
7753 gen_aa32_ld16ua(s, tmp, addr, get_mem_index(s));
7754 break;
7755 case 2:
7756 case 3:
7757 gen_aa32_ld32ua(s, tmp, addr, get_mem_index(s));
7758 break;
7759 default:
7760 abort();
7763 if (size == 3) {
7764 TCGv_i32 tmp2 = tcg_temp_new_i32();
7765 TCGv_i32 tmp3 = tcg_temp_new_i32();
7767 tcg_gen_addi_i32(tmp2, addr, 4);
7768 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7769 tcg_temp_free_i32(tmp2);
7770 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7771 store_reg(s, rt2, tmp3);
7772 } else {
7773 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7776 store_reg(s, rt, tmp);
7777 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7780 static void gen_clrex(DisasContext *s)
7782 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7785 #ifdef CONFIG_USER_ONLY
7786 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7787 TCGv_i32 addr, int size)
7789 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
7790 tcg_gen_movi_i32(cpu_exclusive_info,
7791 size | (rd << 4) | (rt << 8) | (rt2 << 12));
7792 gen_exception_internal_insn(s, 4, EXCP_STREX);
7794 #else
7795 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7796 TCGv_i32 addr, int size)
7798 TCGv_i32 tmp;
7799 TCGv_i64 val64, extaddr;
7800 TCGLabel *done_label;
7801 TCGLabel *fail_label;
7803 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7804 [addr] = {Rt};
7805 {Rd} = 0;
7806 } else {
7807 {Rd} = 1;
7808 } */
7809 fail_label = gen_new_label();
7810 done_label = gen_new_label();
7811 extaddr = tcg_temp_new_i64();
7812 tcg_gen_extu_i32_i64(extaddr, addr);
7813 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7814 tcg_temp_free_i64(extaddr);
7816 tmp = tcg_temp_new_i32();
7817 switch (size) {
7818 case 0:
7819 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
7820 break;
7821 case 1:
7822 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
7823 break;
7824 case 2:
7825 case 3:
7826 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
7827 break;
7828 default:
7829 abort();
7832 val64 = tcg_temp_new_i64();
7833 if (size == 3) {
7834 TCGv_i32 tmp2 = tcg_temp_new_i32();
7835 TCGv_i32 tmp3 = tcg_temp_new_i32();
7836 tcg_gen_addi_i32(tmp2, addr, 4);
7837 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7838 tcg_temp_free_i32(tmp2);
7839 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7840 tcg_temp_free_i32(tmp3);
7841 } else {
7842 tcg_gen_extu_i32_i64(val64, tmp);
7844 tcg_temp_free_i32(tmp);
7846 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7847 tcg_temp_free_i64(val64);
7849 tmp = load_reg(s, rt);
7850 switch (size) {
7851 case 0:
7852 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
7853 break;
7854 case 1:
7855 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
7856 break;
7857 case 2:
7858 case 3:
7859 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7860 break;
7861 default:
7862 abort();
7864 tcg_temp_free_i32(tmp);
7865 if (size == 3) {
7866 tcg_gen_addi_i32(addr, addr, 4);
7867 tmp = load_reg(s, rt2);
7868 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7869 tcg_temp_free_i32(tmp);
7871 tcg_gen_movi_i32(cpu_R[rd], 0);
7872 tcg_gen_br(done_label);
7873 gen_set_label(fail_label);
7874 tcg_gen_movi_i32(cpu_R[rd], 1);
7875 gen_set_label(done_label);
7876 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7878 #endif
7880 /* gen_srs:
7881 * @env: CPUARMState
7882 * @s: DisasContext
7883 * @mode: mode field from insn (which stack to store to)
7884 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7885 * @writeback: true if writeback bit set
7887 * Generate code for the SRS (Store Return State) insn.
7889 static void gen_srs(DisasContext *s,
7890 uint32_t mode, uint32_t amode, bool writeback)
7892 int32_t offset;
7893 TCGv_i32 addr, tmp;
7894 bool undef = false;
7896 /* SRS is:
7897 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7898 * and specified mode is monitor mode
7899 * - UNDEFINED in Hyp mode
7900 * - UNPREDICTABLE in User or System mode
7901 * - UNPREDICTABLE if the specified mode is:
7902 * -- not implemented
7903 * -- not a valid mode number
7904 * -- a mode that's at a higher exception level
7905 * -- Monitor, if we are Non-secure
7906 * For the UNPREDICTABLE cases we choose to UNDEF.
7908 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
7909 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7910 return;
7913 if (s->current_el == 0 || s->current_el == 2) {
7914 undef = true;
7917 switch (mode) {
7918 case ARM_CPU_MODE_USR:
7919 case ARM_CPU_MODE_FIQ:
7920 case ARM_CPU_MODE_IRQ:
7921 case ARM_CPU_MODE_SVC:
7922 case ARM_CPU_MODE_ABT:
7923 case ARM_CPU_MODE_UND:
7924 case ARM_CPU_MODE_SYS:
7925 break;
7926 case ARM_CPU_MODE_HYP:
7927 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7928 undef = true;
7930 break;
7931 case ARM_CPU_MODE_MON:
7932 /* No need to check specifically for "are we non-secure" because
7933 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7934 * so if this isn't EL3 then we must be non-secure.
7936 if (s->current_el != 3) {
7937 undef = true;
7939 break;
7940 default:
7941 undef = true;
7944 if (undef) {
7945 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7946 default_exception_el(s));
7947 return;
7950 addr = tcg_temp_new_i32();
7951 tmp = tcg_const_i32(mode);
7952 /* get_r13_banked() will raise an exception if called from System mode */
7953 gen_set_condexec(s);
7954 gen_set_pc_im(s, s->pc - 4);
7955 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7956 tcg_temp_free_i32(tmp);
7957 switch (amode) {
7958 case 0: /* DA */
7959 offset = -4;
7960 break;
7961 case 1: /* IA */
7962 offset = 0;
7963 break;
7964 case 2: /* DB */
7965 offset = -8;
7966 break;
7967 case 3: /* IB */
7968 offset = 4;
7969 break;
7970 default:
7971 abort();
7973 tcg_gen_addi_i32(addr, addr, offset);
7974 tmp = load_reg(s, 14);
7975 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7976 tcg_temp_free_i32(tmp);
7977 tmp = load_cpu_field(spsr);
7978 tcg_gen_addi_i32(addr, addr, 4);
7979 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7980 tcg_temp_free_i32(tmp);
7981 if (writeback) {
7982 switch (amode) {
7983 case 0:
7984 offset = -8;
7985 break;
7986 case 1:
7987 offset = 4;
7988 break;
7989 case 2:
7990 offset = -4;
7991 break;
7992 case 3:
7993 offset = 0;
7994 break;
7995 default:
7996 abort();
7998 tcg_gen_addi_i32(addr, addr, offset);
7999 tmp = tcg_const_i32(mode);
8000 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8001 tcg_temp_free_i32(tmp);
8003 tcg_temp_free_i32(addr);
8004 s->is_jmp = DISAS_UPDATE;
8007 static void disas_arm_insn(DisasContext *s, unsigned int insn)
8009 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
8010 TCGv_i32 tmp;
8011 TCGv_i32 tmp2;
8012 TCGv_i32 tmp3;
8013 TCGv_i32 addr;
8014 TCGv_i64 tmp64;
8016 /* M variants do not implement ARM mode. */
8017 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8018 goto illegal_op;
8020 cond = insn >> 28;
8021 if (cond == 0xf){
8022 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8023 * choose to UNDEF. In ARMv5 and above the space is used
8024 * for miscellaneous unconditional instructions.
8026 ARCH(5);
8028 /* Unconditional instructions. */
8029 if (((insn >> 25) & 7) == 1) {
8030 /* NEON Data processing. */
8031 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8032 goto illegal_op;
8035 if (disas_neon_data_insn(s, insn)) {
8036 goto illegal_op;
8038 return;
8040 if ((insn & 0x0f100000) == 0x04000000) {
8041 /* NEON load/store. */
8042 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8043 goto illegal_op;
8046 if (disas_neon_ls_insn(s, insn)) {
8047 goto illegal_op;
8049 return;
8051 if ((insn & 0x0f000e10) == 0x0e000a00) {
8052 /* VFP. */
8053 if (disas_vfp_insn(s, insn)) {
8054 goto illegal_op;
8056 return;
8058 if (((insn & 0x0f30f000) == 0x0510f000) ||
8059 ((insn & 0x0f30f010) == 0x0710f000)) {
8060 if ((insn & (1 << 22)) == 0) {
8061 /* PLDW; v7MP */
8062 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8063 goto illegal_op;
8066 /* Otherwise PLD; v5TE+ */
8067 ARCH(5TE);
8068 return;
8070 if (((insn & 0x0f70f000) == 0x0450f000) ||
8071 ((insn & 0x0f70f010) == 0x0650f000)) {
8072 ARCH(7);
8073 return; /* PLI; V7 */
8075 if (((insn & 0x0f700000) == 0x04100000) ||
8076 ((insn & 0x0f700010) == 0x06100000)) {
8077 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8078 goto illegal_op;
8080 return; /* v7MP: Unallocated memory hint: must NOP */
8083 if ((insn & 0x0ffffdff) == 0x01010000) {
8084 ARCH(6);
8085 /* setend */
8086 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8087 gen_helper_setend(cpu_env);
8088 s->is_jmp = DISAS_UPDATE;
8090 return;
8091 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8092 switch ((insn >> 4) & 0xf) {
8093 case 1: /* clrex */
8094 ARCH(6K);
8095 gen_clrex(s);
8096 return;
8097 case 4: /* dsb */
8098 case 5: /* dmb */
8099 ARCH(7);
8100 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8101 return;
8102 case 6: /* isb */
8103 /* We need to break the TB after this insn to execute
8104 * self-modifying code correctly and also to take
8105 * any pending interrupts immediately.
8107 gen_lookup_tb(s);
8108 return;
8109 default:
8110 goto illegal_op;
8112 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8113 /* srs */
8114 ARCH(6);
8115 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
8116 return;
8117 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
8118 /* rfe */
8119 int32_t offset;
8120 if (IS_USER(s))
8121 goto illegal_op;
8122 ARCH(6);
8123 rn = (insn >> 16) & 0xf;
8124 addr = load_reg(s, rn);
8125 i = (insn >> 23) & 3;
8126 switch (i) {
8127 case 0: offset = -4; break; /* DA */
8128 case 1: offset = 0; break; /* IA */
8129 case 2: offset = -8; break; /* DB */
8130 case 3: offset = 4; break; /* IB */
8131 default: abort();
8133 if (offset)
8134 tcg_gen_addi_i32(addr, addr, offset);
8135 /* Load PC into tmp and CPSR into tmp2. */
8136 tmp = tcg_temp_new_i32();
8137 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8138 tcg_gen_addi_i32(addr, addr, 4);
8139 tmp2 = tcg_temp_new_i32();
8140 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8141 if (insn & (1 << 21)) {
8142 /* Base writeback. */
8143 switch (i) {
8144 case 0: offset = -8; break;
8145 case 1: offset = 4; break;
8146 case 2: offset = -4; break;
8147 case 3: offset = 0; break;
8148 default: abort();
8150 if (offset)
8151 tcg_gen_addi_i32(addr, addr, offset);
8152 store_reg(s, rn, addr);
8153 } else {
8154 tcg_temp_free_i32(addr);
8156 gen_rfe(s, tmp, tmp2);
8157 return;
8158 } else if ((insn & 0x0e000000) == 0x0a000000) {
8159 /* branch link and change to thumb (blx <offset>) */
8160 int32_t offset;
8162 val = (uint32_t)s->pc;
8163 tmp = tcg_temp_new_i32();
8164 tcg_gen_movi_i32(tmp, val);
8165 store_reg(s, 14, tmp);
8166 /* Sign-extend the 24-bit offset */
8167 offset = (((int32_t)insn) << 8) >> 8;
8168 /* offset * 4 + bit24 * 2 + (thumb bit) */
8169 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8170 /* pipeline offset */
8171 val += 4;
8172 /* protected by ARCH(5); above, near the start of uncond block */
8173 gen_bx_im(s, val);
8174 return;
8175 } else if ((insn & 0x0e000f00) == 0x0c000100) {
8176 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
8177 /* iWMMXt register transfer. */
8178 if (extract32(s->c15_cpar, 1, 1)) {
8179 if (!disas_iwmmxt_insn(s, insn)) {
8180 return;
8184 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8185 /* Coprocessor double register transfer. */
8186 ARCH(5TE);
8187 } else if ((insn & 0x0f000010) == 0x0e000010) {
8188 /* Additional coprocessor register transfer. */
8189 } else if ((insn & 0x0ff10020) == 0x01000000) {
8190 uint32_t mask;
8191 uint32_t val;
8192 /* cps (privileged) */
8193 if (IS_USER(s))
8194 return;
8195 mask = val = 0;
8196 if (insn & (1 << 19)) {
8197 if (insn & (1 << 8))
8198 mask |= CPSR_A;
8199 if (insn & (1 << 7))
8200 mask |= CPSR_I;
8201 if (insn & (1 << 6))
8202 mask |= CPSR_F;
8203 if (insn & (1 << 18))
8204 val |= mask;
8206 if (insn & (1 << 17)) {
8207 mask |= CPSR_M;
8208 val |= (insn & 0x1f);
8210 if (mask) {
8211 gen_set_psr_im(s, mask, 0, val);
8213 return;
8215 goto illegal_op;
8217 if (cond != 0xe) {
8218 /* if not always execute, we generate a conditional jump to
8219 next instruction */
8220 s->condlabel = gen_new_label();
8221 arm_gen_test_cc(cond ^ 1, s->condlabel);
8222 s->condjmp = 1;
8224 if ((insn & 0x0f900000) == 0x03000000) {
8225 if ((insn & (1 << 21)) == 0) {
8226 ARCH(6T2);
8227 rd = (insn >> 12) & 0xf;
8228 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8229 if ((insn & (1 << 22)) == 0) {
8230 /* MOVW */
8231 tmp = tcg_temp_new_i32();
8232 tcg_gen_movi_i32(tmp, val);
8233 } else {
8234 /* MOVT */
8235 tmp = load_reg(s, rd);
8236 tcg_gen_ext16u_i32(tmp, tmp);
8237 tcg_gen_ori_i32(tmp, tmp, val << 16);
8239 store_reg(s, rd, tmp);
8240 } else {
8241 if (((insn >> 12) & 0xf) != 0xf)
8242 goto illegal_op;
8243 if (((insn >> 16) & 0xf) == 0) {
8244 gen_nop_hint(s, insn & 0xff);
8245 } else {
8246 /* CPSR = immediate */
8247 val = insn & 0xff;
8248 shift = ((insn >> 8) & 0xf) * 2;
8249 if (shift)
8250 val = (val >> shift) | (val << (32 - shift));
8251 i = ((insn & (1 << 22)) != 0);
8252 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8253 i, val)) {
8254 goto illegal_op;
8258 } else if ((insn & 0x0f900000) == 0x01000000
8259 && (insn & 0x00000090) != 0x00000090) {
8260 /* miscellaneous instructions */
8261 op1 = (insn >> 21) & 3;
8262 sh = (insn >> 4) & 0xf;
8263 rm = insn & 0xf;
8264 switch (sh) {
8265 case 0x0: /* MSR, MRS */
8266 if (insn & (1 << 9)) {
8267 /* MSR (banked) and MRS (banked) */
8268 int sysm = extract32(insn, 16, 4) |
8269 (extract32(insn, 8, 1) << 4);
8270 int r = extract32(insn, 22, 1);
8272 if (op1 & 1) {
8273 /* MSR (banked) */
8274 gen_msr_banked(s, r, sysm, rm);
8275 } else {
8276 /* MRS (banked) */
8277 int rd = extract32(insn, 12, 4);
8279 gen_mrs_banked(s, r, sysm, rd);
8281 break;
8284 /* MSR, MRS (for PSRs) */
8285 if (op1 & 1) {
8286 /* PSR = reg */
8287 tmp = load_reg(s, rm);
8288 i = ((op1 & 2) != 0);
8289 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
8290 goto illegal_op;
8291 } else {
8292 /* reg = PSR */
8293 rd = (insn >> 12) & 0xf;
8294 if (op1 & 2) {
8295 if (IS_USER(s))
8296 goto illegal_op;
8297 tmp = load_cpu_field(spsr);
8298 } else {
8299 tmp = tcg_temp_new_i32();
8300 gen_helper_cpsr_read(tmp, cpu_env);
8302 store_reg(s, rd, tmp);
8304 break;
8305 case 0x1:
8306 if (op1 == 1) {
8307 /* branch/exchange thumb (bx). */
8308 ARCH(4T);
8309 tmp = load_reg(s, rm);
8310 gen_bx(s, tmp);
8311 } else if (op1 == 3) {
8312 /* clz */
8313 ARCH(5);
8314 rd = (insn >> 12) & 0xf;
8315 tmp = load_reg(s, rm);
8316 gen_helper_clz(tmp, tmp);
8317 store_reg(s, rd, tmp);
8318 } else {
8319 goto illegal_op;
8321 break;
8322 case 0x2:
8323 if (op1 == 1) {
8324 ARCH(5J); /* bxj */
8325 /* Trivial implementation equivalent to bx. */
8326 tmp = load_reg(s, rm);
8327 gen_bx(s, tmp);
8328 } else {
8329 goto illegal_op;
8331 break;
8332 case 0x3:
8333 if (op1 != 1)
8334 goto illegal_op;
8336 ARCH(5);
8337 /* branch link/exchange thumb (blx) */
8338 tmp = load_reg(s, rm);
8339 tmp2 = tcg_temp_new_i32();
8340 tcg_gen_movi_i32(tmp2, s->pc);
8341 store_reg(s, 14, tmp2);
8342 gen_bx(s, tmp);
8343 break;
8344 case 0x4:
8346 /* crc32/crc32c */
8347 uint32_t c = extract32(insn, 8, 4);
8349 /* Check this CPU supports ARMv8 CRC instructions.
8350 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8351 * Bits 8, 10 and 11 should be zero.
8353 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
8354 (c & 0xd) != 0) {
8355 goto illegal_op;
8358 rn = extract32(insn, 16, 4);
8359 rd = extract32(insn, 12, 4);
8361 tmp = load_reg(s, rn);
8362 tmp2 = load_reg(s, rm);
8363 if (op1 == 0) {
8364 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8365 } else if (op1 == 1) {
8366 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8368 tmp3 = tcg_const_i32(1 << op1);
8369 if (c & 0x2) {
8370 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8371 } else {
8372 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8374 tcg_temp_free_i32(tmp2);
8375 tcg_temp_free_i32(tmp3);
8376 store_reg(s, rd, tmp);
8377 break;
8379 case 0x5: /* saturating add/subtract */
8380 ARCH(5TE);
8381 rd = (insn >> 12) & 0xf;
8382 rn = (insn >> 16) & 0xf;
8383 tmp = load_reg(s, rm);
8384 tmp2 = load_reg(s, rn);
8385 if (op1 & 2)
8386 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
8387 if (op1 & 1)
8388 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8389 else
8390 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8391 tcg_temp_free_i32(tmp2);
8392 store_reg(s, rd, tmp);
8393 break;
8394 case 7:
8396 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8397 switch (op1) {
8398 case 1:
8399 /* bkpt */
8400 ARCH(5);
8401 gen_exception_insn(s, 4, EXCP_BKPT,
8402 syn_aa32_bkpt(imm16, false),
8403 default_exception_el(s));
8404 break;
8405 case 2:
8406 /* Hypervisor call (v7) */
8407 ARCH(7);
8408 if (IS_USER(s)) {
8409 goto illegal_op;
8411 gen_hvc(s, imm16);
8412 break;
8413 case 3:
8414 /* Secure monitor call (v6+) */
8415 ARCH(6K);
8416 if (IS_USER(s)) {
8417 goto illegal_op;
8419 gen_smc(s);
8420 break;
8421 default:
8422 goto illegal_op;
8424 break;
8426 case 0x8: /* signed multiply */
8427 case 0xa:
8428 case 0xc:
8429 case 0xe:
8430 ARCH(5TE);
8431 rs = (insn >> 8) & 0xf;
8432 rn = (insn >> 12) & 0xf;
8433 rd = (insn >> 16) & 0xf;
8434 if (op1 == 1) {
8435 /* (32 * 16) >> 16 */
8436 tmp = load_reg(s, rm);
8437 tmp2 = load_reg(s, rs);
8438 if (sh & 4)
8439 tcg_gen_sari_i32(tmp2, tmp2, 16);
8440 else
8441 gen_sxth(tmp2);
8442 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8443 tcg_gen_shri_i64(tmp64, tmp64, 16);
8444 tmp = tcg_temp_new_i32();
8445 tcg_gen_extrl_i64_i32(tmp, tmp64);
8446 tcg_temp_free_i64(tmp64);
8447 if ((sh & 2) == 0) {
8448 tmp2 = load_reg(s, rn);
8449 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8450 tcg_temp_free_i32(tmp2);
8452 store_reg(s, rd, tmp);
8453 } else {
8454 /* 16 * 16 */
8455 tmp = load_reg(s, rm);
8456 tmp2 = load_reg(s, rs);
8457 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8458 tcg_temp_free_i32(tmp2);
8459 if (op1 == 2) {
8460 tmp64 = tcg_temp_new_i64();
8461 tcg_gen_ext_i32_i64(tmp64, tmp);
8462 tcg_temp_free_i32(tmp);
8463 gen_addq(s, tmp64, rn, rd);
8464 gen_storeq_reg(s, rn, rd, tmp64);
8465 tcg_temp_free_i64(tmp64);
8466 } else {
8467 if (op1 == 0) {
8468 tmp2 = load_reg(s, rn);
8469 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8470 tcg_temp_free_i32(tmp2);
8472 store_reg(s, rd, tmp);
8475 break;
8476 default:
8477 goto illegal_op;
8479 } else if (((insn & 0x0e000000) == 0 &&
8480 (insn & 0x00000090) != 0x90) ||
8481 ((insn & 0x0e000000) == (1 << 25))) {
8482 int set_cc, logic_cc, shiftop;
8484 op1 = (insn >> 21) & 0xf;
8485 set_cc = (insn >> 20) & 1;
8486 logic_cc = table_logic_cc[op1] & set_cc;
8488 /* data processing instruction */
8489 if (insn & (1 << 25)) {
8490 /* immediate operand */
8491 val = insn & 0xff;
8492 shift = ((insn >> 8) & 0xf) * 2;
8493 if (shift) {
8494 val = (val >> shift) | (val << (32 - shift));
8496 tmp2 = tcg_temp_new_i32();
8497 tcg_gen_movi_i32(tmp2, val);
8498 if (logic_cc && shift) {
8499 gen_set_CF_bit31(tmp2);
8501 } else {
8502 /* register */
8503 rm = (insn) & 0xf;
8504 tmp2 = load_reg(s, rm);
8505 shiftop = (insn >> 5) & 3;
8506 if (!(insn & (1 << 4))) {
8507 shift = (insn >> 7) & 0x1f;
8508 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8509 } else {
8510 rs = (insn >> 8) & 0xf;
8511 tmp = load_reg(s, rs);
8512 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8515 if (op1 != 0x0f && op1 != 0x0d) {
8516 rn = (insn >> 16) & 0xf;
8517 tmp = load_reg(s, rn);
8518 } else {
8519 TCGV_UNUSED_I32(tmp);
8521 rd = (insn >> 12) & 0xf;
8522 switch(op1) {
8523 case 0x00:
8524 tcg_gen_and_i32(tmp, tmp, tmp2);
8525 if (logic_cc) {
8526 gen_logic_CC(tmp);
8528 store_reg_bx(s, rd, tmp);
8529 break;
8530 case 0x01:
8531 tcg_gen_xor_i32(tmp, tmp, tmp2);
8532 if (logic_cc) {
8533 gen_logic_CC(tmp);
8535 store_reg_bx(s, rd, tmp);
8536 break;
8537 case 0x02:
8538 if (set_cc && rd == 15) {
8539 /* SUBS r15, ... is used for exception return. */
8540 if (IS_USER(s)) {
8541 goto illegal_op;
8543 gen_sub_CC(tmp, tmp, tmp2);
8544 gen_exception_return(s, tmp);
8545 } else {
8546 if (set_cc) {
8547 gen_sub_CC(tmp, tmp, tmp2);
8548 } else {
8549 tcg_gen_sub_i32(tmp, tmp, tmp2);
8551 store_reg_bx(s, rd, tmp);
8553 break;
8554 case 0x03:
8555 if (set_cc) {
8556 gen_sub_CC(tmp, tmp2, tmp);
8557 } else {
8558 tcg_gen_sub_i32(tmp, tmp2, tmp);
8560 store_reg_bx(s, rd, tmp);
8561 break;
8562 case 0x04:
8563 if (set_cc) {
8564 gen_add_CC(tmp, tmp, tmp2);
8565 } else {
8566 tcg_gen_add_i32(tmp, tmp, tmp2);
8568 store_reg_bx(s, rd, tmp);
8569 break;
8570 case 0x05:
8571 if (set_cc) {
8572 gen_adc_CC(tmp, tmp, tmp2);
8573 } else {
8574 gen_add_carry(tmp, tmp, tmp2);
8576 store_reg_bx(s, rd, tmp);
8577 break;
8578 case 0x06:
8579 if (set_cc) {
8580 gen_sbc_CC(tmp, tmp, tmp2);
8581 } else {
8582 gen_sub_carry(tmp, tmp, tmp2);
8584 store_reg_bx(s, rd, tmp);
8585 break;
8586 case 0x07:
8587 if (set_cc) {
8588 gen_sbc_CC(tmp, tmp2, tmp);
8589 } else {
8590 gen_sub_carry(tmp, tmp2, tmp);
8592 store_reg_bx(s, rd, tmp);
8593 break;
8594 case 0x08:
8595 if (set_cc) {
8596 tcg_gen_and_i32(tmp, tmp, tmp2);
8597 gen_logic_CC(tmp);
8599 tcg_temp_free_i32(tmp);
8600 break;
8601 case 0x09:
8602 if (set_cc) {
8603 tcg_gen_xor_i32(tmp, tmp, tmp2);
8604 gen_logic_CC(tmp);
8606 tcg_temp_free_i32(tmp);
8607 break;
8608 case 0x0a:
8609 if (set_cc) {
8610 gen_sub_CC(tmp, tmp, tmp2);
8612 tcg_temp_free_i32(tmp);
8613 break;
8614 case 0x0b:
8615 if (set_cc) {
8616 gen_add_CC(tmp, tmp, tmp2);
8618 tcg_temp_free_i32(tmp);
8619 break;
8620 case 0x0c:
8621 tcg_gen_or_i32(tmp, tmp, tmp2);
8622 if (logic_cc) {
8623 gen_logic_CC(tmp);
8625 store_reg_bx(s, rd, tmp);
8626 break;
8627 case 0x0d:
8628 if (logic_cc && rd == 15) {
8629 /* MOVS r15, ... is used for exception return. */
8630 if (IS_USER(s)) {
8631 goto illegal_op;
8633 gen_exception_return(s, tmp2);
8634 } else {
8635 if (logic_cc) {
8636 gen_logic_CC(tmp2);
8638 store_reg_bx(s, rd, tmp2);
8640 break;
8641 case 0x0e:
8642 tcg_gen_andc_i32(tmp, tmp, tmp2);
8643 if (logic_cc) {
8644 gen_logic_CC(tmp);
8646 store_reg_bx(s, rd, tmp);
8647 break;
8648 default:
8649 case 0x0f:
8650 tcg_gen_not_i32(tmp2, tmp2);
8651 if (logic_cc) {
8652 gen_logic_CC(tmp2);
8654 store_reg_bx(s, rd, tmp2);
8655 break;
8657 if (op1 != 0x0f && op1 != 0x0d) {
8658 tcg_temp_free_i32(tmp2);
8660 } else {
8661 /* other instructions */
8662 op1 = (insn >> 24) & 0xf;
8663 switch(op1) {
8664 case 0x0:
8665 case 0x1:
8666 /* multiplies, extra load/stores */
8667 sh = (insn >> 5) & 3;
8668 if (sh == 0) {
8669 if (op1 == 0x0) {
8670 rd = (insn >> 16) & 0xf;
8671 rn = (insn >> 12) & 0xf;
8672 rs = (insn >> 8) & 0xf;
8673 rm = (insn) & 0xf;
8674 op1 = (insn >> 20) & 0xf;
8675 switch (op1) {
8676 case 0: case 1: case 2: case 3: case 6:
8677 /* 32 bit mul */
8678 tmp = load_reg(s, rs);
8679 tmp2 = load_reg(s, rm);
8680 tcg_gen_mul_i32(tmp, tmp, tmp2);
8681 tcg_temp_free_i32(tmp2);
8682 if (insn & (1 << 22)) {
8683 /* Subtract (mls) */
8684 ARCH(6T2);
8685 tmp2 = load_reg(s, rn);
8686 tcg_gen_sub_i32(tmp, tmp2, tmp);
8687 tcg_temp_free_i32(tmp2);
8688 } else if (insn & (1 << 21)) {
8689 /* Add */
8690 tmp2 = load_reg(s, rn);
8691 tcg_gen_add_i32(tmp, tmp, tmp2);
8692 tcg_temp_free_i32(tmp2);
8694 if (insn & (1 << 20))
8695 gen_logic_CC(tmp);
8696 store_reg(s, rd, tmp);
8697 break;
8698 case 4:
8699 /* 64 bit mul double accumulate (UMAAL) */
8700 ARCH(6);
8701 tmp = load_reg(s, rs);
8702 tmp2 = load_reg(s, rm);
8703 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8704 gen_addq_lo(s, tmp64, rn);
8705 gen_addq_lo(s, tmp64, rd);
8706 gen_storeq_reg(s, rn, rd, tmp64);
8707 tcg_temp_free_i64(tmp64);
8708 break;
8709 case 8: case 9: case 10: case 11:
8710 case 12: case 13: case 14: case 15:
8711 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8712 tmp = load_reg(s, rs);
8713 tmp2 = load_reg(s, rm);
8714 if (insn & (1 << 22)) {
8715 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8716 } else {
8717 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8719 if (insn & (1 << 21)) { /* mult accumulate */
8720 TCGv_i32 al = load_reg(s, rn);
8721 TCGv_i32 ah = load_reg(s, rd);
8722 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8723 tcg_temp_free_i32(al);
8724 tcg_temp_free_i32(ah);
8726 if (insn & (1 << 20)) {
8727 gen_logicq_cc(tmp, tmp2);
8729 store_reg(s, rn, tmp);
8730 store_reg(s, rd, tmp2);
8731 break;
8732 default:
8733 goto illegal_op;
8735 } else {
8736 rn = (insn >> 16) & 0xf;
8737 rd = (insn >> 12) & 0xf;
8738 if (insn & (1 << 23)) {
8739 /* load/store exclusive */
8740 int op2 = (insn >> 8) & 3;
8741 op1 = (insn >> 21) & 0x3;
8743 switch (op2) {
8744 case 0: /* lda/stl */
8745 if (op1 == 1) {
8746 goto illegal_op;
8748 ARCH(8);
8749 break;
8750 case 1: /* reserved */
8751 goto illegal_op;
8752 case 2: /* ldaex/stlex */
8753 ARCH(8);
8754 break;
8755 case 3: /* ldrex/strex */
8756 if (op1) {
8757 ARCH(6K);
8758 } else {
8759 ARCH(6);
8761 break;
8764 addr = tcg_temp_local_new_i32();
8765 load_reg_var(s, addr, rn);
8767 /* Since the emulation does not have barriers,
8768 the acquire/release semantics need no special
8769 handling */
8770 if (op2 == 0) {
8771 if (insn & (1 << 20)) {
8772 tmp = tcg_temp_new_i32();
8773 switch (op1) {
8774 case 0: /* lda */
8775 gen_aa32_ld32u(s, tmp, addr,
8776 get_mem_index(s));
8777 break;
8778 case 2: /* ldab */
8779 gen_aa32_ld8u(s, tmp, addr,
8780 get_mem_index(s));
8781 break;
8782 case 3: /* ldah */
8783 gen_aa32_ld16u(s, tmp, addr,
8784 get_mem_index(s));
8785 break;
8786 default:
8787 abort();
8789 store_reg(s, rd, tmp);
8790 } else {
8791 rm = insn & 0xf;
8792 tmp = load_reg(s, rm);
8793 switch (op1) {
8794 case 0: /* stl */
8795 gen_aa32_st32(s, tmp, addr,
8796 get_mem_index(s));
8797 break;
8798 case 2: /* stlb */
8799 gen_aa32_st8(s, tmp, addr,
8800 get_mem_index(s));
8801 break;
8802 case 3: /* stlh */
8803 gen_aa32_st16(s, tmp, addr,
8804 get_mem_index(s));
8805 break;
8806 default:
8807 abort();
8809 tcg_temp_free_i32(tmp);
8811 } else if (insn & (1 << 20)) {
8812 switch (op1) {
8813 case 0: /* ldrex */
8814 gen_load_exclusive(s, rd, 15, addr, 2);
8815 break;
8816 case 1: /* ldrexd */
8817 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8818 break;
8819 case 2: /* ldrexb */
8820 gen_load_exclusive(s, rd, 15, addr, 0);
8821 break;
8822 case 3: /* ldrexh */
8823 gen_load_exclusive(s, rd, 15, addr, 1);
8824 break;
8825 default:
8826 abort();
8828 } else {
8829 rm = insn & 0xf;
8830 switch (op1) {
8831 case 0: /* strex */
8832 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8833 break;
8834 case 1: /* strexd */
8835 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8836 break;
8837 case 2: /* strexb */
8838 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8839 break;
8840 case 3: /* strexh */
8841 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8842 break;
8843 default:
8844 abort();
8847 tcg_temp_free_i32(addr);
8848 } else {
8849 /* SWP instruction */
8850 rm = (insn) & 0xf;
8852 /* ??? This is not really atomic. However we know
8853 we never have multiple CPUs running in parallel,
8854 so it is good enough. */
8855 addr = load_reg(s, rn);
8856 tmp = load_reg(s, rm);
8857 tmp2 = tcg_temp_new_i32();
8858 if (insn & (1 << 22)) {
8859 gen_aa32_ld8u(s, tmp2, addr, get_mem_index(s));
8860 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
8861 } else {
8862 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8863 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8865 tcg_temp_free_i32(tmp);
8866 tcg_temp_free_i32(addr);
8867 store_reg(s, rd, tmp2);
8870 } else {
8871 int address_offset;
8872 bool load = insn & (1 << 20);
8873 bool doubleword = false;
8874 /* Misc load/store */
8875 rn = (insn >> 16) & 0xf;
8876 rd = (insn >> 12) & 0xf;
8878 if (!load && (sh & 2)) {
8879 /* doubleword */
8880 ARCH(5TE);
8881 if (rd & 1) {
8882 /* UNPREDICTABLE; we choose to UNDEF */
8883 goto illegal_op;
8885 load = (sh & 1) == 0;
8886 doubleword = true;
8889 addr = load_reg(s, rn);
8890 if (insn & (1 << 24))
8891 gen_add_datah_offset(s, insn, 0, addr);
8892 address_offset = 0;
8894 if (doubleword) {
8895 if (!load) {
8896 /* store */
8897 tmp = load_reg(s, rd);
8898 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8899 tcg_temp_free_i32(tmp);
8900 tcg_gen_addi_i32(addr, addr, 4);
8901 tmp = load_reg(s, rd + 1);
8902 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8903 tcg_temp_free_i32(tmp);
8904 } else {
8905 /* load */
8906 tmp = tcg_temp_new_i32();
8907 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8908 store_reg(s, rd, tmp);
8909 tcg_gen_addi_i32(addr, addr, 4);
8910 tmp = tcg_temp_new_i32();
8911 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8912 rd++;
8914 address_offset = -4;
8915 } else if (load) {
8916 /* load */
8917 tmp = tcg_temp_new_i32();
8918 switch (sh) {
8919 case 1:
8920 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8921 break;
8922 case 2:
8923 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
8924 break;
8925 default:
8926 case 3:
8927 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
8928 break;
8930 } else {
8931 /* store */
8932 tmp = load_reg(s, rd);
8933 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
8934 tcg_temp_free_i32(tmp);
8936 /* Perform base writeback before the loaded value to
8937 ensure correct behavior with overlapping index registers.
8938 ldrd with base writeback is undefined if the
8939 destination and index registers overlap. */
8940 if (!(insn & (1 << 24))) {
8941 gen_add_datah_offset(s, insn, address_offset, addr);
8942 store_reg(s, rn, addr);
8943 } else if (insn & (1 << 21)) {
8944 if (address_offset)
8945 tcg_gen_addi_i32(addr, addr, address_offset);
8946 store_reg(s, rn, addr);
8947 } else {
8948 tcg_temp_free_i32(addr);
8950 if (load) {
8951 /* Complete the load. */
8952 store_reg(s, rd, tmp);
8955 break;
8956 case 0x4:
8957 case 0x5:
8958 goto do_ldst;
8959 case 0x6:
8960 case 0x7:
8961 if (insn & (1 << 4)) {
8962 ARCH(6);
8963 /* Armv6 Media instructions. */
8964 rm = insn & 0xf;
8965 rn = (insn >> 16) & 0xf;
8966 rd = (insn >> 12) & 0xf;
8967 rs = (insn >> 8) & 0xf;
8968 switch ((insn >> 23) & 3) {
8969 case 0: /* Parallel add/subtract. */
8970 op1 = (insn >> 20) & 7;
8971 tmp = load_reg(s, rn);
8972 tmp2 = load_reg(s, rm);
8973 sh = (insn >> 5) & 7;
8974 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8975 goto illegal_op;
8976 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
8977 tcg_temp_free_i32(tmp2);
8978 store_reg(s, rd, tmp);
8979 break;
8980 case 1:
8981 if ((insn & 0x00700020) == 0) {
8982 /* Halfword pack. */
8983 tmp = load_reg(s, rn);
8984 tmp2 = load_reg(s, rm);
8985 shift = (insn >> 7) & 0x1f;
8986 if (insn & (1 << 6)) {
8987 /* pkhtb */
8988 if (shift == 0)
8989 shift = 31;
8990 tcg_gen_sari_i32(tmp2, tmp2, shift);
8991 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8992 tcg_gen_ext16u_i32(tmp2, tmp2);
8993 } else {
8994 /* pkhbt */
8995 if (shift)
8996 tcg_gen_shli_i32(tmp2, tmp2, shift);
8997 tcg_gen_ext16u_i32(tmp, tmp);
8998 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9000 tcg_gen_or_i32(tmp, tmp, tmp2);
9001 tcg_temp_free_i32(tmp2);
9002 store_reg(s, rd, tmp);
9003 } else if ((insn & 0x00200020) == 0x00200000) {
9004 /* [us]sat */
9005 tmp = load_reg(s, rm);
9006 shift = (insn >> 7) & 0x1f;
9007 if (insn & (1 << 6)) {
9008 if (shift == 0)
9009 shift = 31;
9010 tcg_gen_sari_i32(tmp, tmp, shift);
9011 } else {
9012 tcg_gen_shli_i32(tmp, tmp, shift);
9014 sh = (insn >> 16) & 0x1f;
9015 tmp2 = tcg_const_i32(sh);
9016 if (insn & (1 << 22))
9017 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
9018 else
9019 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
9020 tcg_temp_free_i32(tmp2);
9021 store_reg(s, rd, tmp);
9022 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9023 /* [us]sat16 */
9024 tmp = load_reg(s, rm);
9025 sh = (insn >> 16) & 0x1f;
9026 tmp2 = tcg_const_i32(sh);
9027 if (insn & (1 << 22))
9028 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9029 else
9030 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9031 tcg_temp_free_i32(tmp2);
9032 store_reg(s, rd, tmp);
9033 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9034 /* Select bytes. */
9035 tmp = load_reg(s, rn);
9036 tmp2 = load_reg(s, rm);
9037 tmp3 = tcg_temp_new_i32();
9038 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9039 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9040 tcg_temp_free_i32(tmp3);
9041 tcg_temp_free_i32(tmp2);
9042 store_reg(s, rd, tmp);
9043 } else if ((insn & 0x000003e0) == 0x00000060) {
9044 tmp = load_reg(s, rm);
9045 shift = (insn >> 10) & 3;
9046 /* ??? In many cases it's not necessary to do a
9047 rotate, a shift is sufficient. */
9048 if (shift != 0)
9049 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9050 op1 = (insn >> 20) & 7;
9051 switch (op1) {
9052 case 0: gen_sxtb16(tmp); break;
9053 case 2: gen_sxtb(tmp); break;
9054 case 3: gen_sxth(tmp); break;
9055 case 4: gen_uxtb16(tmp); break;
9056 case 6: gen_uxtb(tmp); break;
9057 case 7: gen_uxth(tmp); break;
9058 default: goto illegal_op;
9060 if (rn != 15) {
9061 tmp2 = load_reg(s, rn);
9062 if ((op1 & 3) == 0) {
9063 gen_add16(tmp, tmp2);
9064 } else {
9065 tcg_gen_add_i32(tmp, tmp, tmp2);
9066 tcg_temp_free_i32(tmp2);
9069 store_reg(s, rd, tmp);
9070 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9071 /* rev */
9072 tmp = load_reg(s, rm);
9073 if (insn & (1 << 22)) {
9074 if (insn & (1 << 7)) {
9075 gen_revsh(tmp);
9076 } else {
9077 ARCH(6T2);
9078 gen_helper_rbit(tmp, tmp);
9080 } else {
9081 if (insn & (1 << 7))
9082 gen_rev16(tmp);
9083 else
9084 tcg_gen_bswap32_i32(tmp, tmp);
9086 store_reg(s, rd, tmp);
9087 } else {
9088 goto illegal_op;
9090 break;
9091 case 2: /* Multiplies (Type 3). */
9092 switch ((insn >> 20) & 0x7) {
9093 case 5:
9094 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9095 /* op2 not 00x or 11x : UNDEF */
9096 goto illegal_op;
9098 /* Signed multiply most significant [accumulate].
9099 (SMMUL, SMMLA, SMMLS) */
9100 tmp = load_reg(s, rm);
9101 tmp2 = load_reg(s, rs);
9102 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9104 if (rd != 15) {
9105 tmp = load_reg(s, rd);
9106 if (insn & (1 << 6)) {
9107 tmp64 = gen_subq_msw(tmp64, tmp);
9108 } else {
9109 tmp64 = gen_addq_msw(tmp64, tmp);
9112 if (insn & (1 << 5)) {
9113 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9115 tcg_gen_shri_i64(tmp64, tmp64, 32);
9116 tmp = tcg_temp_new_i32();
9117 tcg_gen_extrl_i64_i32(tmp, tmp64);
9118 tcg_temp_free_i64(tmp64);
9119 store_reg(s, rn, tmp);
9120 break;
9121 case 0:
9122 case 4:
9123 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9124 if (insn & (1 << 7)) {
9125 goto illegal_op;
9127 tmp = load_reg(s, rm);
9128 tmp2 = load_reg(s, rs);
9129 if (insn & (1 << 5))
9130 gen_swap_half(tmp2);
9131 gen_smul_dual(tmp, tmp2);
9132 if (insn & (1 << 22)) {
9133 /* smlald, smlsld */
9134 TCGv_i64 tmp64_2;
9136 tmp64 = tcg_temp_new_i64();
9137 tmp64_2 = tcg_temp_new_i64();
9138 tcg_gen_ext_i32_i64(tmp64, tmp);
9139 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
9140 tcg_temp_free_i32(tmp);
9141 tcg_temp_free_i32(tmp2);
9142 if (insn & (1 << 6)) {
9143 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9144 } else {
9145 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9147 tcg_temp_free_i64(tmp64_2);
9148 gen_addq(s, tmp64, rd, rn);
9149 gen_storeq_reg(s, rd, rn, tmp64);
9150 tcg_temp_free_i64(tmp64);
9151 } else {
9152 /* smuad, smusd, smlad, smlsd */
9153 if (insn & (1 << 6)) {
9154 /* This subtraction cannot overflow. */
9155 tcg_gen_sub_i32(tmp, tmp, tmp2);
9156 } else {
9157 /* This addition cannot overflow 32 bits;
9158 * however it may overflow considered as a
9159 * signed operation, in which case we must set
9160 * the Q flag.
9162 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9164 tcg_temp_free_i32(tmp2);
9165 if (rd != 15)
9167 tmp2 = load_reg(s, rd);
9168 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9169 tcg_temp_free_i32(tmp2);
9171 store_reg(s, rn, tmp);
9173 break;
9174 case 1:
9175 case 3:
9176 /* SDIV, UDIV */
9177 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
9178 goto illegal_op;
9180 if (((insn >> 5) & 7) || (rd != 15)) {
9181 goto illegal_op;
9183 tmp = load_reg(s, rm);
9184 tmp2 = load_reg(s, rs);
9185 if (insn & (1 << 21)) {
9186 gen_helper_udiv(tmp, tmp, tmp2);
9187 } else {
9188 gen_helper_sdiv(tmp, tmp, tmp2);
9190 tcg_temp_free_i32(tmp2);
9191 store_reg(s, rn, tmp);
9192 break;
9193 default:
9194 goto illegal_op;
9196 break;
9197 case 3:
9198 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9199 switch (op1) {
9200 case 0: /* Unsigned sum of absolute differences. */
9201 ARCH(6);
9202 tmp = load_reg(s, rm);
9203 tmp2 = load_reg(s, rs);
9204 gen_helper_usad8(tmp, tmp, tmp2);
9205 tcg_temp_free_i32(tmp2);
9206 if (rd != 15) {
9207 tmp2 = load_reg(s, rd);
9208 tcg_gen_add_i32(tmp, tmp, tmp2);
9209 tcg_temp_free_i32(tmp2);
9211 store_reg(s, rn, tmp);
9212 break;
9213 case 0x20: case 0x24: case 0x28: case 0x2c:
9214 /* Bitfield insert/clear. */
9215 ARCH(6T2);
9216 shift = (insn >> 7) & 0x1f;
9217 i = (insn >> 16) & 0x1f;
9218 if (i < shift) {
9219 /* UNPREDICTABLE; we choose to UNDEF */
9220 goto illegal_op;
9222 i = i + 1 - shift;
9223 if (rm == 15) {
9224 tmp = tcg_temp_new_i32();
9225 tcg_gen_movi_i32(tmp, 0);
9226 } else {
9227 tmp = load_reg(s, rm);
9229 if (i != 32) {
9230 tmp2 = load_reg(s, rd);
9231 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
9232 tcg_temp_free_i32(tmp2);
9234 store_reg(s, rd, tmp);
9235 break;
9236 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9237 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9238 ARCH(6T2);
9239 tmp = load_reg(s, rm);
9240 shift = (insn >> 7) & 0x1f;
9241 i = ((insn >> 16) & 0x1f) + 1;
9242 if (shift + i > 32)
9243 goto illegal_op;
9244 if (i < 32) {
9245 if (op1 & 0x20) {
9246 gen_ubfx(tmp, shift, (1u << i) - 1);
9247 } else {
9248 gen_sbfx(tmp, shift, i);
9251 store_reg(s, rd, tmp);
9252 break;
9253 default:
9254 goto illegal_op;
9256 break;
9258 break;
9260 do_ldst:
9261 /* Check for undefined extension instructions
9262 * per the ARM Bible IE:
9263 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9265 sh = (0xf << 20) | (0xf << 4);
9266 if (op1 == 0x7 && ((insn & sh) == sh))
9268 goto illegal_op;
9270 /* load/store byte/word */
9271 rn = (insn >> 16) & 0xf;
9272 rd = (insn >> 12) & 0xf;
9273 tmp2 = load_reg(s, rn);
9274 if ((insn & 0x01200000) == 0x00200000) {
9275 /* ldrt/strt */
9276 i = get_a32_user_mem_index(s);
9277 } else {
9278 i = get_mem_index(s);
9280 if (insn & (1 << 24))
9281 gen_add_data_offset(s, insn, tmp2);
9282 if (insn & (1 << 20)) {
9283 /* load */
9284 tmp = tcg_temp_new_i32();
9285 if (insn & (1 << 22)) {
9286 gen_aa32_ld8u(s, tmp, tmp2, i);
9287 } else {
9288 gen_aa32_ld32u(s, tmp, tmp2, i);
9290 } else {
9291 /* store */
9292 tmp = load_reg(s, rd);
9293 if (insn & (1 << 22)) {
9294 gen_aa32_st8(s, tmp, tmp2, i);
9295 } else {
9296 gen_aa32_st32(s, tmp, tmp2, i);
9298 tcg_temp_free_i32(tmp);
9300 if (!(insn & (1 << 24))) {
9301 gen_add_data_offset(s, insn, tmp2);
9302 store_reg(s, rn, tmp2);
9303 } else if (insn & (1 << 21)) {
9304 store_reg(s, rn, tmp2);
9305 } else {
9306 tcg_temp_free_i32(tmp2);
9308 if (insn & (1 << 20)) {
9309 /* Complete the load. */
9310 store_reg_from_load(s, rd, tmp);
9312 break;
9313 case 0x08:
9314 case 0x09:
9316 int j, n, loaded_base;
9317 bool exc_return = false;
9318 bool is_load = extract32(insn, 20, 1);
9319 bool user = false;
9320 TCGv_i32 loaded_var;
9321 /* load/store multiple words */
9322 /* XXX: store correct base if write back */
9323 if (insn & (1 << 22)) {
9324 /* LDM (user), LDM (exception return) and STM (user) */
9325 if (IS_USER(s))
9326 goto illegal_op; /* only usable in supervisor mode */
9328 if (is_load && extract32(insn, 15, 1)) {
9329 exc_return = true;
9330 } else {
9331 user = true;
9334 rn = (insn >> 16) & 0xf;
9335 addr = load_reg(s, rn);
9337 /* compute total size */
9338 loaded_base = 0;
9339 TCGV_UNUSED_I32(loaded_var);
9340 n = 0;
9341 for(i=0;i<16;i++) {
9342 if (insn & (1 << i))
9343 n++;
9345 /* XXX: test invalid n == 0 case ? */
9346 if (insn & (1 << 23)) {
9347 if (insn & (1 << 24)) {
9348 /* pre increment */
9349 tcg_gen_addi_i32(addr, addr, 4);
9350 } else {
9351 /* post increment */
9353 } else {
9354 if (insn & (1 << 24)) {
9355 /* pre decrement */
9356 tcg_gen_addi_i32(addr, addr, -(n * 4));
9357 } else {
9358 /* post decrement */
9359 if (n != 1)
9360 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9363 j = 0;
9364 for(i=0;i<16;i++) {
9365 if (insn & (1 << i)) {
9366 if (is_load) {
9367 /* load */
9368 tmp = tcg_temp_new_i32();
9369 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9370 if (user) {
9371 tmp2 = tcg_const_i32(i);
9372 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9373 tcg_temp_free_i32(tmp2);
9374 tcg_temp_free_i32(tmp);
9375 } else if (i == rn) {
9376 loaded_var = tmp;
9377 loaded_base = 1;
9378 } else if (rn == 15 && exc_return) {
9379 store_pc_exc_ret(s, tmp);
9380 } else {
9381 store_reg_from_load(s, i, tmp);
9383 } else {
9384 /* store */
9385 if (i == 15) {
9386 /* special case: r15 = PC + 8 */
9387 val = (long)s->pc + 4;
9388 tmp = tcg_temp_new_i32();
9389 tcg_gen_movi_i32(tmp, val);
9390 } else if (user) {
9391 tmp = tcg_temp_new_i32();
9392 tmp2 = tcg_const_i32(i);
9393 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9394 tcg_temp_free_i32(tmp2);
9395 } else {
9396 tmp = load_reg(s, i);
9398 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9399 tcg_temp_free_i32(tmp);
9401 j++;
9402 /* no need to add after the last transfer */
9403 if (j != n)
9404 tcg_gen_addi_i32(addr, addr, 4);
9407 if (insn & (1 << 21)) {
9408 /* write back */
9409 if (insn & (1 << 23)) {
9410 if (insn & (1 << 24)) {
9411 /* pre increment */
9412 } else {
9413 /* post increment */
9414 tcg_gen_addi_i32(addr, addr, 4);
9416 } else {
9417 if (insn & (1 << 24)) {
9418 /* pre decrement */
9419 if (n != 1)
9420 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9421 } else {
9422 /* post decrement */
9423 tcg_gen_addi_i32(addr, addr, -(n * 4));
9426 store_reg(s, rn, addr);
9427 } else {
9428 tcg_temp_free_i32(addr);
9430 if (loaded_base) {
9431 store_reg(s, rn, loaded_var);
9433 if (exc_return) {
9434 /* Restore CPSR from SPSR. */
9435 tmp = load_cpu_field(spsr);
9436 gen_helper_cpsr_write_eret(cpu_env, tmp);
9437 tcg_temp_free_i32(tmp);
9438 s->is_jmp = DISAS_JUMP;
9441 break;
9442 case 0xa:
9443 case 0xb:
9445 int32_t offset;
9447 /* branch (and link) */
9448 val = (int32_t)s->pc;
9449 if (insn & (1 << 24)) {
9450 tmp = tcg_temp_new_i32();
9451 tcg_gen_movi_i32(tmp, val);
9452 store_reg(s, 14, tmp);
9454 offset = sextract32(insn << 2, 0, 26);
9455 val += offset + 4;
9456 gen_jmp(s, val);
9458 break;
9459 case 0xc:
9460 case 0xd:
9461 case 0xe:
9462 if (((insn >> 8) & 0xe) == 10) {
9463 /* VFP. */
9464 if (disas_vfp_insn(s, insn)) {
9465 goto illegal_op;
9467 } else if (disas_coproc_insn(s, insn)) {
9468 /* Coprocessor. */
9469 goto illegal_op;
9471 break;
9472 case 0xf:
9473 /* swi */
9474 gen_set_pc_im(s, s->pc);
9475 s->svc_imm = extract32(insn, 0, 24);
9476 s->is_jmp = DISAS_SWI;
9477 break;
9478 default:
9479 illegal_op:
9480 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9481 default_exception_el(s));
9482 break;
9487 /* Return true if this is a Thumb-2 logical op. */
9488 static int
9489 thumb2_logic_op(int op)
9491 return (op < 8);
9494 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9495 then set condition code flags based on the result of the operation.
9496 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9497 to the high bit of T1.
9498 Returns zero if the opcode is valid. */
9500 static int
9501 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9502 TCGv_i32 t0, TCGv_i32 t1)
9504 int logic_cc;
9506 logic_cc = 0;
9507 switch (op) {
9508 case 0: /* and */
9509 tcg_gen_and_i32(t0, t0, t1);
9510 logic_cc = conds;
9511 break;
9512 case 1: /* bic */
9513 tcg_gen_andc_i32(t0, t0, t1);
9514 logic_cc = conds;
9515 break;
9516 case 2: /* orr */
9517 tcg_gen_or_i32(t0, t0, t1);
9518 logic_cc = conds;
9519 break;
9520 case 3: /* orn */
9521 tcg_gen_orc_i32(t0, t0, t1);
9522 logic_cc = conds;
9523 break;
9524 case 4: /* eor */
9525 tcg_gen_xor_i32(t0, t0, t1);
9526 logic_cc = conds;
9527 break;
9528 case 8: /* add */
9529 if (conds)
9530 gen_add_CC(t0, t0, t1);
9531 else
9532 tcg_gen_add_i32(t0, t0, t1);
9533 break;
9534 case 10: /* adc */
9535 if (conds)
9536 gen_adc_CC(t0, t0, t1);
9537 else
9538 gen_adc(t0, t1);
9539 break;
9540 case 11: /* sbc */
9541 if (conds) {
9542 gen_sbc_CC(t0, t0, t1);
9543 } else {
9544 gen_sub_carry(t0, t0, t1);
9546 break;
9547 case 13: /* sub */
9548 if (conds)
9549 gen_sub_CC(t0, t0, t1);
9550 else
9551 tcg_gen_sub_i32(t0, t0, t1);
9552 break;
9553 case 14: /* rsb */
9554 if (conds)
9555 gen_sub_CC(t0, t1, t0);
9556 else
9557 tcg_gen_sub_i32(t0, t1, t0);
9558 break;
9559 default: /* 5, 6, 7, 9, 12, 15. */
9560 return 1;
9562 if (logic_cc) {
9563 gen_logic_CC(t0);
9564 if (shifter_out)
9565 gen_set_CF_bit31(t1);
9567 return 0;
9570 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9571 is not legal. */
9572 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9574 uint32_t insn, imm, shift, offset;
9575 uint32_t rd, rn, rm, rs;
9576 TCGv_i32 tmp;
9577 TCGv_i32 tmp2;
9578 TCGv_i32 tmp3;
9579 TCGv_i32 addr;
9580 TCGv_i64 tmp64;
9581 int op;
9582 int shiftop;
9583 int conds;
9584 int logic_cc;
9586 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9587 || arm_dc_feature(s, ARM_FEATURE_M))) {
9588 /* Thumb-1 cores may need to treat bl and blx as a pair of
9589 16-bit instructions to get correct prefetch abort behavior. */
9590 insn = insn_hw1;
9591 if ((insn & (1 << 12)) == 0) {
9592 ARCH(5);
9593 /* Second half of blx. */
9594 offset = ((insn & 0x7ff) << 1);
9595 tmp = load_reg(s, 14);
9596 tcg_gen_addi_i32(tmp, tmp, offset);
9597 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9599 tmp2 = tcg_temp_new_i32();
9600 tcg_gen_movi_i32(tmp2, s->pc | 1);
9601 store_reg(s, 14, tmp2);
9602 gen_bx(s, tmp);
9603 return 0;
9605 if (insn & (1 << 11)) {
9606 /* Second half of bl. */
9607 offset = ((insn & 0x7ff) << 1) | 1;
9608 tmp = load_reg(s, 14);
9609 tcg_gen_addi_i32(tmp, tmp, offset);
9611 tmp2 = tcg_temp_new_i32();
9612 tcg_gen_movi_i32(tmp2, s->pc | 1);
9613 store_reg(s, 14, tmp2);
9614 gen_bx(s, tmp);
9615 return 0;
9617 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9618 /* Instruction spans a page boundary. Implement it as two
9619 16-bit instructions in case the second half causes an
9620 prefetch abort. */
9621 offset = ((int32_t)insn << 21) >> 9;
9622 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9623 return 0;
9625 /* Fall through to 32-bit decode. */
9628 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9629 s->pc += 2;
9630 insn |= (uint32_t)insn_hw1 << 16;
9632 if ((insn & 0xf800e800) != 0xf000e800) {
9633 ARCH(6T2);
9636 rn = (insn >> 16) & 0xf;
9637 rs = (insn >> 12) & 0xf;
9638 rd = (insn >> 8) & 0xf;
9639 rm = insn & 0xf;
9640 switch ((insn >> 25) & 0xf) {
9641 case 0: case 1: case 2: case 3:
9642 /* 16-bit instructions. Should never happen. */
9643 abort();
9644 case 4:
9645 if (insn & (1 << 22)) {
9646 /* Other load/store, table branch. */
9647 if (insn & 0x01200000) {
9648 /* Load/store doubleword. */
9649 if (rn == 15) {
9650 addr = tcg_temp_new_i32();
9651 tcg_gen_movi_i32(addr, s->pc & ~3);
9652 } else {
9653 addr = load_reg(s, rn);
9655 offset = (insn & 0xff) * 4;
9656 if ((insn & (1 << 23)) == 0)
9657 offset = -offset;
9658 if (insn & (1 << 24)) {
9659 tcg_gen_addi_i32(addr, addr, offset);
9660 offset = 0;
9662 if (insn & (1 << 20)) {
9663 /* ldrd */
9664 tmp = tcg_temp_new_i32();
9665 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9666 store_reg(s, rs, tmp);
9667 tcg_gen_addi_i32(addr, addr, 4);
9668 tmp = tcg_temp_new_i32();
9669 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9670 store_reg(s, rd, tmp);
9671 } else {
9672 /* strd */
9673 tmp = load_reg(s, rs);
9674 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9675 tcg_temp_free_i32(tmp);
9676 tcg_gen_addi_i32(addr, addr, 4);
9677 tmp = load_reg(s, rd);
9678 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9679 tcg_temp_free_i32(tmp);
9681 if (insn & (1 << 21)) {
9682 /* Base writeback. */
9683 if (rn == 15)
9684 goto illegal_op;
9685 tcg_gen_addi_i32(addr, addr, offset - 4);
9686 store_reg(s, rn, addr);
9687 } else {
9688 tcg_temp_free_i32(addr);
9690 } else if ((insn & (1 << 23)) == 0) {
9691 /* Load/store exclusive word. */
9692 addr = tcg_temp_local_new_i32();
9693 load_reg_var(s, addr, rn);
9694 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9695 if (insn & (1 << 20)) {
9696 gen_load_exclusive(s, rs, 15, addr, 2);
9697 } else {
9698 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9700 tcg_temp_free_i32(addr);
9701 } else if ((insn & (7 << 5)) == 0) {
9702 /* Table Branch. */
9703 if (rn == 15) {
9704 addr = tcg_temp_new_i32();
9705 tcg_gen_movi_i32(addr, s->pc);
9706 } else {
9707 addr = load_reg(s, rn);
9709 tmp = load_reg(s, rm);
9710 tcg_gen_add_i32(addr, addr, tmp);
9711 if (insn & (1 << 4)) {
9712 /* tbh */
9713 tcg_gen_add_i32(addr, addr, tmp);
9714 tcg_temp_free_i32(tmp);
9715 tmp = tcg_temp_new_i32();
9716 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9717 } else { /* tbb */
9718 tcg_temp_free_i32(tmp);
9719 tmp = tcg_temp_new_i32();
9720 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9722 tcg_temp_free_i32(addr);
9723 tcg_gen_shli_i32(tmp, tmp, 1);
9724 tcg_gen_addi_i32(tmp, tmp, s->pc);
9725 store_reg(s, 15, tmp);
9726 } else {
9727 int op2 = (insn >> 6) & 0x3;
9728 op = (insn >> 4) & 0x3;
9729 switch (op2) {
9730 case 0:
9731 goto illegal_op;
9732 case 1:
9733 /* Load/store exclusive byte/halfword/doubleword */
9734 if (op == 2) {
9735 goto illegal_op;
9737 ARCH(7);
9738 break;
9739 case 2:
9740 /* Load-acquire/store-release */
9741 if (op == 3) {
9742 goto illegal_op;
9744 /* Fall through */
9745 case 3:
9746 /* Load-acquire/store-release exclusive */
9747 ARCH(8);
9748 break;
9750 addr = tcg_temp_local_new_i32();
9751 load_reg_var(s, addr, rn);
9752 if (!(op2 & 1)) {
9753 if (insn & (1 << 20)) {
9754 tmp = tcg_temp_new_i32();
9755 switch (op) {
9756 case 0: /* ldab */
9757 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9758 break;
9759 case 1: /* ldah */
9760 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9761 break;
9762 case 2: /* lda */
9763 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9764 break;
9765 default:
9766 abort();
9768 store_reg(s, rs, tmp);
9769 } else {
9770 tmp = load_reg(s, rs);
9771 switch (op) {
9772 case 0: /* stlb */
9773 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9774 break;
9775 case 1: /* stlh */
9776 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9777 break;
9778 case 2: /* stl */
9779 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9780 break;
9781 default:
9782 abort();
9784 tcg_temp_free_i32(tmp);
9786 } else if (insn & (1 << 20)) {
9787 gen_load_exclusive(s, rs, rd, addr, op);
9788 } else {
9789 gen_store_exclusive(s, rm, rs, rd, addr, op);
9791 tcg_temp_free_i32(addr);
9793 } else {
9794 /* Load/store multiple, RFE, SRS. */
9795 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9796 /* RFE, SRS: not available in user mode or on M profile */
9797 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9798 goto illegal_op;
9800 if (insn & (1 << 20)) {
9801 /* rfe */
9802 addr = load_reg(s, rn);
9803 if ((insn & (1 << 24)) == 0)
9804 tcg_gen_addi_i32(addr, addr, -8);
9805 /* Load PC into tmp and CPSR into tmp2. */
9806 tmp = tcg_temp_new_i32();
9807 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9808 tcg_gen_addi_i32(addr, addr, 4);
9809 tmp2 = tcg_temp_new_i32();
9810 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9811 if (insn & (1 << 21)) {
9812 /* Base writeback. */
9813 if (insn & (1 << 24)) {
9814 tcg_gen_addi_i32(addr, addr, 4);
9815 } else {
9816 tcg_gen_addi_i32(addr, addr, -4);
9818 store_reg(s, rn, addr);
9819 } else {
9820 tcg_temp_free_i32(addr);
9822 gen_rfe(s, tmp, tmp2);
9823 } else {
9824 /* srs */
9825 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9826 insn & (1 << 21));
9828 } else {
9829 int i, loaded_base = 0;
9830 TCGv_i32 loaded_var;
9831 /* Load/store multiple. */
9832 addr = load_reg(s, rn);
9833 offset = 0;
9834 for (i = 0; i < 16; i++) {
9835 if (insn & (1 << i))
9836 offset += 4;
9838 if (insn & (1 << 24)) {
9839 tcg_gen_addi_i32(addr, addr, -offset);
9842 TCGV_UNUSED_I32(loaded_var);
9843 for (i = 0; i < 16; i++) {
9844 if ((insn & (1 << i)) == 0)
9845 continue;
9846 if (insn & (1 << 20)) {
9847 /* Load. */
9848 tmp = tcg_temp_new_i32();
9849 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9850 if (i == 15) {
9851 gen_bx(s, tmp);
9852 } else if (i == rn) {
9853 loaded_var = tmp;
9854 loaded_base = 1;
9855 } else {
9856 store_reg(s, i, tmp);
9858 } else {
9859 /* Store. */
9860 tmp = load_reg(s, i);
9861 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9862 tcg_temp_free_i32(tmp);
9864 tcg_gen_addi_i32(addr, addr, 4);
9866 if (loaded_base) {
9867 store_reg(s, rn, loaded_var);
9869 if (insn & (1 << 21)) {
9870 /* Base register writeback. */
9871 if (insn & (1 << 24)) {
9872 tcg_gen_addi_i32(addr, addr, -offset);
9874 /* Fault if writeback register is in register list. */
9875 if (insn & (1 << rn))
9876 goto illegal_op;
9877 store_reg(s, rn, addr);
9878 } else {
9879 tcg_temp_free_i32(addr);
9883 break;
9884 case 5:
9886 op = (insn >> 21) & 0xf;
9887 if (op == 6) {
9888 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9889 goto illegal_op;
9891 /* Halfword pack. */
9892 tmp = load_reg(s, rn);
9893 tmp2 = load_reg(s, rm);
9894 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9895 if (insn & (1 << 5)) {
9896 /* pkhtb */
9897 if (shift == 0)
9898 shift = 31;
9899 tcg_gen_sari_i32(tmp2, tmp2, shift);
9900 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9901 tcg_gen_ext16u_i32(tmp2, tmp2);
9902 } else {
9903 /* pkhbt */
9904 if (shift)
9905 tcg_gen_shli_i32(tmp2, tmp2, shift);
9906 tcg_gen_ext16u_i32(tmp, tmp);
9907 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9909 tcg_gen_or_i32(tmp, tmp, tmp2);
9910 tcg_temp_free_i32(tmp2);
9911 store_reg(s, rd, tmp);
9912 } else {
9913 /* Data processing register constant shift. */
9914 if (rn == 15) {
9915 tmp = tcg_temp_new_i32();
9916 tcg_gen_movi_i32(tmp, 0);
9917 } else {
9918 tmp = load_reg(s, rn);
9920 tmp2 = load_reg(s, rm);
9922 shiftop = (insn >> 4) & 3;
9923 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9924 conds = (insn & (1 << 20)) != 0;
9925 logic_cc = (conds && thumb2_logic_op(op));
9926 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9927 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9928 goto illegal_op;
9929 tcg_temp_free_i32(tmp2);
9930 if (rd != 15) {
9931 store_reg(s, rd, tmp);
9932 } else {
9933 tcg_temp_free_i32(tmp);
9936 break;
9937 case 13: /* Misc data processing. */
9938 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9939 if (op < 4 && (insn & 0xf000) != 0xf000)
9940 goto illegal_op;
9941 switch (op) {
9942 case 0: /* Register controlled shift. */
9943 tmp = load_reg(s, rn);
9944 tmp2 = load_reg(s, rm);
9945 if ((insn & 0x70) != 0)
9946 goto illegal_op;
9947 op = (insn >> 21) & 3;
9948 logic_cc = (insn & (1 << 20)) != 0;
9949 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9950 if (logic_cc)
9951 gen_logic_CC(tmp);
9952 store_reg_bx(s, rd, tmp);
9953 break;
9954 case 1: /* Sign/zero extend. */
9955 op = (insn >> 20) & 7;
9956 switch (op) {
9957 case 0: /* SXTAH, SXTH */
9958 case 1: /* UXTAH, UXTH */
9959 case 4: /* SXTAB, SXTB */
9960 case 5: /* UXTAB, UXTB */
9961 break;
9962 case 2: /* SXTAB16, SXTB16 */
9963 case 3: /* UXTAB16, UXTB16 */
9964 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9965 goto illegal_op;
9967 break;
9968 default:
9969 goto illegal_op;
9971 if (rn != 15) {
9972 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9973 goto illegal_op;
9976 tmp = load_reg(s, rm);
9977 shift = (insn >> 4) & 3;
9978 /* ??? In many cases it's not necessary to do a
9979 rotate, a shift is sufficient. */
9980 if (shift != 0)
9981 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9982 op = (insn >> 20) & 7;
9983 switch (op) {
9984 case 0: gen_sxth(tmp); break;
9985 case 1: gen_uxth(tmp); break;
9986 case 2: gen_sxtb16(tmp); break;
9987 case 3: gen_uxtb16(tmp); break;
9988 case 4: gen_sxtb(tmp); break;
9989 case 5: gen_uxtb(tmp); break;
9990 default:
9991 g_assert_not_reached();
9993 if (rn != 15) {
9994 tmp2 = load_reg(s, rn);
9995 if ((op >> 1) == 1) {
9996 gen_add16(tmp, tmp2);
9997 } else {
9998 tcg_gen_add_i32(tmp, tmp, tmp2);
9999 tcg_temp_free_i32(tmp2);
10002 store_reg(s, rd, tmp);
10003 break;
10004 case 2: /* SIMD add/subtract. */
10005 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10006 goto illegal_op;
10008 op = (insn >> 20) & 7;
10009 shift = (insn >> 4) & 7;
10010 if ((op & 3) == 3 || (shift & 3) == 3)
10011 goto illegal_op;
10012 tmp = load_reg(s, rn);
10013 tmp2 = load_reg(s, rm);
10014 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
10015 tcg_temp_free_i32(tmp2);
10016 store_reg(s, rd, tmp);
10017 break;
10018 case 3: /* Other data processing. */
10019 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10020 if (op < 4) {
10021 /* Saturating add/subtract. */
10022 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10023 goto illegal_op;
10025 tmp = load_reg(s, rn);
10026 tmp2 = load_reg(s, rm);
10027 if (op & 1)
10028 gen_helper_double_saturate(tmp, cpu_env, tmp);
10029 if (op & 2)
10030 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
10031 else
10032 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
10033 tcg_temp_free_i32(tmp2);
10034 } else {
10035 switch (op) {
10036 case 0x0a: /* rbit */
10037 case 0x08: /* rev */
10038 case 0x09: /* rev16 */
10039 case 0x0b: /* revsh */
10040 case 0x18: /* clz */
10041 break;
10042 case 0x10: /* sel */
10043 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10044 goto illegal_op;
10046 break;
10047 case 0x20: /* crc32/crc32c */
10048 case 0x21:
10049 case 0x22:
10050 case 0x28:
10051 case 0x29:
10052 case 0x2a:
10053 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10054 goto illegal_op;
10056 break;
10057 default:
10058 goto illegal_op;
10060 tmp = load_reg(s, rn);
10061 switch (op) {
10062 case 0x0a: /* rbit */
10063 gen_helper_rbit(tmp, tmp);
10064 break;
10065 case 0x08: /* rev */
10066 tcg_gen_bswap32_i32(tmp, tmp);
10067 break;
10068 case 0x09: /* rev16 */
10069 gen_rev16(tmp);
10070 break;
10071 case 0x0b: /* revsh */
10072 gen_revsh(tmp);
10073 break;
10074 case 0x10: /* sel */
10075 tmp2 = load_reg(s, rm);
10076 tmp3 = tcg_temp_new_i32();
10077 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
10078 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
10079 tcg_temp_free_i32(tmp3);
10080 tcg_temp_free_i32(tmp2);
10081 break;
10082 case 0x18: /* clz */
10083 gen_helper_clz(tmp, tmp);
10084 break;
10085 case 0x20:
10086 case 0x21:
10087 case 0x22:
10088 case 0x28:
10089 case 0x29:
10090 case 0x2a:
10092 /* crc32/crc32c */
10093 uint32_t sz = op & 0x3;
10094 uint32_t c = op & 0x8;
10096 tmp2 = load_reg(s, rm);
10097 if (sz == 0) {
10098 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10099 } else if (sz == 1) {
10100 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10102 tmp3 = tcg_const_i32(1 << sz);
10103 if (c) {
10104 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10105 } else {
10106 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10108 tcg_temp_free_i32(tmp2);
10109 tcg_temp_free_i32(tmp3);
10110 break;
10112 default:
10113 g_assert_not_reached();
10116 store_reg(s, rd, tmp);
10117 break;
10118 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10119 switch ((insn >> 20) & 7) {
10120 case 0: /* 32 x 32 -> 32 */
10121 case 7: /* Unsigned sum of absolute differences. */
10122 break;
10123 case 1: /* 16 x 16 -> 32 */
10124 case 2: /* Dual multiply add. */
10125 case 3: /* 32 * 16 -> 32msb */
10126 case 4: /* Dual multiply subtract. */
10127 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10128 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10129 goto illegal_op;
10131 break;
10133 op = (insn >> 4) & 0xf;
10134 tmp = load_reg(s, rn);
10135 tmp2 = load_reg(s, rm);
10136 switch ((insn >> 20) & 7) {
10137 case 0: /* 32 x 32 -> 32 */
10138 tcg_gen_mul_i32(tmp, tmp, tmp2);
10139 tcg_temp_free_i32(tmp2);
10140 if (rs != 15) {
10141 tmp2 = load_reg(s, rs);
10142 if (op)
10143 tcg_gen_sub_i32(tmp, tmp2, tmp);
10144 else
10145 tcg_gen_add_i32(tmp, tmp, tmp2);
10146 tcg_temp_free_i32(tmp2);
10148 break;
10149 case 1: /* 16 x 16 -> 32 */
10150 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10151 tcg_temp_free_i32(tmp2);
10152 if (rs != 15) {
10153 tmp2 = load_reg(s, rs);
10154 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10155 tcg_temp_free_i32(tmp2);
10157 break;
10158 case 2: /* Dual multiply add. */
10159 case 4: /* Dual multiply subtract. */
10160 if (op)
10161 gen_swap_half(tmp2);
10162 gen_smul_dual(tmp, tmp2);
10163 if (insn & (1 << 22)) {
10164 /* This subtraction cannot overflow. */
10165 tcg_gen_sub_i32(tmp, tmp, tmp2);
10166 } else {
10167 /* This addition cannot overflow 32 bits;
10168 * however it may overflow considered as a signed
10169 * operation, in which case we must set the Q flag.
10171 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10173 tcg_temp_free_i32(tmp2);
10174 if (rs != 15)
10176 tmp2 = load_reg(s, rs);
10177 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10178 tcg_temp_free_i32(tmp2);
10180 break;
10181 case 3: /* 32 * 16 -> 32msb */
10182 if (op)
10183 tcg_gen_sari_i32(tmp2, tmp2, 16);
10184 else
10185 gen_sxth(tmp2);
10186 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10187 tcg_gen_shri_i64(tmp64, tmp64, 16);
10188 tmp = tcg_temp_new_i32();
10189 tcg_gen_extrl_i64_i32(tmp, tmp64);
10190 tcg_temp_free_i64(tmp64);
10191 if (rs != 15)
10193 tmp2 = load_reg(s, rs);
10194 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10195 tcg_temp_free_i32(tmp2);
10197 break;
10198 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10199 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10200 if (rs != 15) {
10201 tmp = load_reg(s, rs);
10202 if (insn & (1 << 20)) {
10203 tmp64 = gen_addq_msw(tmp64, tmp);
10204 } else {
10205 tmp64 = gen_subq_msw(tmp64, tmp);
10208 if (insn & (1 << 4)) {
10209 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10211 tcg_gen_shri_i64(tmp64, tmp64, 32);
10212 tmp = tcg_temp_new_i32();
10213 tcg_gen_extrl_i64_i32(tmp, tmp64);
10214 tcg_temp_free_i64(tmp64);
10215 break;
10216 case 7: /* Unsigned sum of absolute differences. */
10217 gen_helper_usad8(tmp, tmp, tmp2);
10218 tcg_temp_free_i32(tmp2);
10219 if (rs != 15) {
10220 tmp2 = load_reg(s, rs);
10221 tcg_gen_add_i32(tmp, tmp, tmp2);
10222 tcg_temp_free_i32(tmp2);
10224 break;
10226 store_reg(s, rd, tmp);
10227 break;
10228 case 6: case 7: /* 64-bit multiply, Divide. */
10229 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
10230 tmp = load_reg(s, rn);
10231 tmp2 = load_reg(s, rm);
10232 if ((op & 0x50) == 0x10) {
10233 /* sdiv, udiv */
10234 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
10235 goto illegal_op;
10237 if (op & 0x20)
10238 gen_helper_udiv(tmp, tmp, tmp2);
10239 else
10240 gen_helper_sdiv(tmp, tmp, tmp2);
10241 tcg_temp_free_i32(tmp2);
10242 store_reg(s, rd, tmp);
10243 } else if ((op & 0xe) == 0xc) {
10244 /* Dual multiply accumulate long. */
10245 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10246 tcg_temp_free_i32(tmp);
10247 tcg_temp_free_i32(tmp2);
10248 goto illegal_op;
10250 if (op & 1)
10251 gen_swap_half(tmp2);
10252 gen_smul_dual(tmp, tmp2);
10253 if (op & 0x10) {
10254 tcg_gen_sub_i32(tmp, tmp, tmp2);
10255 } else {
10256 tcg_gen_add_i32(tmp, tmp, tmp2);
10258 tcg_temp_free_i32(tmp2);
10259 /* BUGFIX */
10260 tmp64 = tcg_temp_new_i64();
10261 tcg_gen_ext_i32_i64(tmp64, tmp);
10262 tcg_temp_free_i32(tmp);
10263 gen_addq(s, tmp64, rs, rd);
10264 gen_storeq_reg(s, rs, rd, tmp64);
10265 tcg_temp_free_i64(tmp64);
10266 } else {
10267 if (op & 0x20) {
10268 /* Unsigned 64-bit multiply */
10269 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
10270 } else {
10271 if (op & 8) {
10272 /* smlalxy */
10273 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10274 tcg_temp_free_i32(tmp2);
10275 tcg_temp_free_i32(tmp);
10276 goto illegal_op;
10278 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10279 tcg_temp_free_i32(tmp2);
10280 tmp64 = tcg_temp_new_i64();
10281 tcg_gen_ext_i32_i64(tmp64, tmp);
10282 tcg_temp_free_i32(tmp);
10283 } else {
10284 /* Signed 64-bit multiply */
10285 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10288 if (op & 4) {
10289 /* umaal */
10290 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10291 tcg_temp_free_i64(tmp64);
10292 goto illegal_op;
10294 gen_addq_lo(s, tmp64, rs);
10295 gen_addq_lo(s, tmp64, rd);
10296 } else if (op & 0x40) {
10297 /* 64-bit accumulate. */
10298 gen_addq(s, tmp64, rs, rd);
10300 gen_storeq_reg(s, rs, rd, tmp64);
10301 tcg_temp_free_i64(tmp64);
10303 break;
10305 break;
10306 case 6: case 7: case 14: case 15:
10307 /* Coprocessor. */
10308 if (((insn >> 24) & 3) == 3) {
10309 /* Translate into the equivalent ARM encoding. */
10310 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10311 if (disas_neon_data_insn(s, insn)) {
10312 goto illegal_op;
10314 } else if (((insn >> 8) & 0xe) == 10) {
10315 if (disas_vfp_insn(s, insn)) {
10316 goto illegal_op;
10318 } else {
10319 if (insn & (1 << 28))
10320 goto illegal_op;
10321 if (disas_coproc_insn(s, insn)) {
10322 goto illegal_op;
10325 break;
10326 case 8: case 9: case 10: case 11:
10327 if (insn & (1 << 15)) {
10328 /* Branches, misc control. */
10329 if (insn & 0x5000) {
10330 /* Unconditional branch. */
10331 /* signextend(hw1[10:0]) -> offset[:12]. */
10332 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10333 /* hw1[10:0] -> offset[11:1]. */
10334 offset |= (insn & 0x7ff) << 1;
10335 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10336 offset[24:22] already have the same value because of the
10337 sign extension above. */
10338 offset ^= ((~insn) & (1 << 13)) << 10;
10339 offset ^= ((~insn) & (1 << 11)) << 11;
10341 if (insn & (1 << 14)) {
10342 /* Branch and link. */
10343 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
10346 offset += s->pc;
10347 if (insn & (1 << 12)) {
10348 /* b/bl */
10349 gen_jmp(s, offset);
10350 } else {
10351 /* blx */
10352 offset &= ~(uint32_t)2;
10353 /* thumb2 bx, no need to check */
10354 gen_bx_im(s, offset);
10356 } else if (((insn >> 23) & 7) == 7) {
10357 /* Misc control */
10358 if (insn & (1 << 13))
10359 goto illegal_op;
10361 if (insn & (1 << 26)) {
10362 if (!(insn & (1 << 20))) {
10363 /* Hypervisor call (v7) */
10364 int imm16 = extract32(insn, 16, 4) << 12
10365 | extract32(insn, 0, 12);
10366 ARCH(7);
10367 if (IS_USER(s)) {
10368 goto illegal_op;
10370 gen_hvc(s, imm16);
10371 } else {
10372 /* Secure monitor call (v6+) */
10373 ARCH(6K);
10374 if (IS_USER(s)) {
10375 goto illegal_op;
10377 gen_smc(s);
10379 } else {
10380 op = (insn >> 20) & 7;
10381 switch (op) {
10382 case 0: /* msr cpsr. */
10383 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10384 tmp = load_reg(s, rn);
10385 addr = tcg_const_i32(insn & 0xff);
10386 gen_helper_v7m_msr(cpu_env, addr, tmp);
10387 tcg_temp_free_i32(addr);
10388 tcg_temp_free_i32(tmp);
10389 gen_lookup_tb(s);
10390 break;
10392 /* fall through */
10393 case 1: /* msr spsr. */
10394 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10395 goto illegal_op;
10398 if (extract32(insn, 5, 1)) {
10399 /* MSR (banked) */
10400 int sysm = extract32(insn, 8, 4) |
10401 (extract32(insn, 4, 1) << 4);
10402 int r = op & 1;
10404 gen_msr_banked(s, r, sysm, rm);
10405 break;
10408 /* MSR (for PSRs) */
10409 tmp = load_reg(s, rn);
10410 if (gen_set_psr(s,
10411 msr_mask(s, (insn >> 8) & 0xf, op == 1),
10412 op == 1, tmp))
10413 goto illegal_op;
10414 break;
10415 case 2: /* cps, nop-hint. */
10416 if (((insn >> 8) & 7) == 0) {
10417 gen_nop_hint(s, insn & 0xff);
10419 /* Implemented as NOP in user mode. */
10420 if (IS_USER(s))
10421 break;
10422 offset = 0;
10423 imm = 0;
10424 if (insn & (1 << 10)) {
10425 if (insn & (1 << 7))
10426 offset |= CPSR_A;
10427 if (insn & (1 << 6))
10428 offset |= CPSR_I;
10429 if (insn & (1 << 5))
10430 offset |= CPSR_F;
10431 if (insn & (1 << 9))
10432 imm = CPSR_A | CPSR_I | CPSR_F;
10434 if (insn & (1 << 8)) {
10435 offset |= 0x1f;
10436 imm |= (insn & 0x1f);
10438 if (offset) {
10439 gen_set_psr_im(s, offset, 0, imm);
10441 break;
10442 case 3: /* Special control operations. */
10443 ARCH(7);
10444 op = (insn >> 4) & 0xf;
10445 switch (op) {
10446 case 2: /* clrex */
10447 gen_clrex(s);
10448 break;
10449 case 4: /* dsb */
10450 case 5: /* dmb */
10451 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10452 break;
10453 case 6: /* isb */
10454 /* We need to break the TB after this insn
10455 * to execute self-modifying code correctly
10456 * and also to take any pending interrupts
10457 * immediately.
10459 gen_lookup_tb(s);
10460 break;
10461 default:
10462 goto illegal_op;
10464 break;
10465 case 4: /* bxj */
10466 /* Trivial implementation equivalent to bx. */
10467 tmp = load_reg(s, rn);
10468 gen_bx(s, tmp);
10469 break;
10470 case 5: /* Exception return. */
10471 if (IS_USER(s)) {
10472 goto illegal_op;
10474 if (rn != 14 || rd != 15) {
10475 goto illegal_op;
10477 tmp = load_reg(s, rn);
10478 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10479 gen_exception_return(s, tmp);
10480 break;
10481 case 6: /* MRS */
10482 if (extract32(insn, 5, 1)) {
10483 /* MRS (banked) */
10484 int sysm = extract32(insn, 16, 4) |
10485 (extract32(insn, 4, 1) << 4);
10487 gen_mrs_banked(s, 0, sysm, rd);
10488 break;
10491 /* mrs cpsr */
10492 tmp = tcg_temp_new_i32();
10493 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10494 addr = tcg_const_i32(insn & 0xff);
10495 gen_helper_v7m_mrs(tmp, cpu_env, addr);
10496 tcg_temp_free_i32(addr);
10497 } else {
10498 gen_helper_cpsr_read(tmp, cpu_env);
10500 store_reg(s, rd, tmp);
10501 break;
10502 case 7: /* MRS */
10503 if (extract32(insn, 5, 1)) {
10504 /* MRS (banked) */
10505 int sysm = extract32(insn, 16, 4) |
10506 (extract32(insn, 4, 1) << 4);
10508 gen_mrs_banked(s, 1, sysm, rd);
10509 break;
10512 /* mrs spsr. */
10513 /* Not accessible in user mode. */
10514 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10515 goto illegal_op;
10517 tmp = load_cpu_field(spsr);
10518 store_reg(s, rd, tmp);
10519 break;
10522 } else {
10523 /* Conditional branch. */
10524 op = (insn >> 22) & 0xf;
10525 /* Generate a conditional jump to next instruction. */
10526 s->condlabel = gen_new_label();
10527 arm_gen_test_cc(op ^ 1, s->condlabel);
10528 s->condjmp = 1;
10530 /* offset[11:1] = insn[10:0] */
10531 offset = (insn & 0x7ff) << 1;
10532 /* offset[17:12] = insn[21:16]. */
10533 offset |= (insn & 0x003f0000) >> 4;
10534 /* offset[31:20] = insn[26]. */
10535 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10536 /* offset[18] = insn[13]. */
10537 offset |= (insn & (1 << 13)) << 5;
10538 /* offset[19] = insn[11]. */
10539 offset |= (insn & (1 << 11)) << 8;
10541 /* jump to the offset */
10542 gen_jmp(s, s->pc + offset);
10544 } else {
10545 /* Data processing immediate. */
10546 if (insn & (1 << 25)) {
10547 if (insn & (1 << 24)) {
10548 if (insn & (1 << 20))
10549 goto illegal_op;
10550 /* Bitfield/Saturate. */
10551 op = (insn >> 21) & 7;
10552 imm = insn & 0x1f;
10553 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10554 if (rn == 15) {
10555 tmp = tcg_temp_new_i32();
10556 tcg_gen_movi_i32(tmp, 0);
10557 } else {
10558 tmp = load_reg(s, rn);
10560 switch (op) {
10561 case 2: /* Signed bitfield extract. */
10562 imm++;
10563 if (shift + imm > 32)
10564 goto illegal_op;
10565 if (imm < 32)
10566 gen_sbfx(tmp, shift, imm);
10567 break;
10568 case 6: /* Unsigned bitfield extract. */
10569 imm++;
10570 if (shift + imm > 32)
10571 goto illegal_op;
10572 if (imm < 32)
10573 gen_ubfx(tmp, shift, (1u << imm) - 1);
10574 break;
10575 case 3: /* Bitfield insert/clear. */
10576 if (imm < shift)
10577 goto illegal_op;
10578 imm = imm + 1 - shift;
10579 if (imm != 32) {
10580 tmp2 = load_reg(s, rd);
10581 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
10582 tcg_temp_free_i32(tmp2);
10584 break;
10585 case 7:
10586 goto illegal_op;
10587 default: /* Saturate. */
10588 if (shift) {
10589 if (op & 1)
10590 tcg_gen_sari_i32(tmp, tmp, shift);
10591 else
10592 tcg_gen_shli_i32(tmp, tmp, shift);
10594 tmp2 = tcg_const_i32(imm);
10595 if (op & 4) {
10596 /* Unsigned. */
10597 if ((op & 1) && shift == 0) {
10598 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10599 tcg_temp_free_i32(tmp);
10600 tcg_temp_free_i32(tmp2);
10601 goto illegal_op;
10603 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10604 } else {
10605 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10607 } else {
10608 /* Signed. */
10609 if ((op & 1) && shift == 0) {
10610 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10611 tcg_temp_free_i32(tmp);
10612 tcg_temp_free_i32(tmp2);
10613 goto illegal_op;
10615 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10616 } else {
10617 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10620 tcg_temp_free_i32(tmp2);
10621 break;
10623 store_reg(s, rd, tmp);
10624 } else {
10625 imm = ((insn & 0x04000000) >> 15)
10626 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10627 if (insn & (1 << 22)) {
10628 /* 16-bit immediate. */
10629 imm |= (insn >> 4) & 0xf000;
10630 if (insn & (1 << 23)) {
10631 /* movt */
10632 tmp = load_reg(s, rd);
10633 tcg_gen_ext16u_i32(tmp, tmp);
10634 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10635 } else {
10636 /* movw */
10637 tmp = tcg_temp_new_i32();
10638 tcg_gen_movi_i32(tmp, imm);
10640 } else {
10641 /* Add/sub 12-bit immediate. */
10642 if (rn == 15) {
10643 offset = s->pc & ~(uint32_t)3;
10644 if (insn & (1 << 23))
10645 offset -= imm;
10646 else
10647 offset += imm;
10648 tmp = tcg_temp_new_i32();
10649 tcg_gen_movi_i32(tmp, offset);
10650 } else {
10651 tmp = load_reg(s, rn);
10652 if (insn & (1 << 23))
10653 tcg_gen_subi_i32(tmp, tmp, imm);
10654 else
10655 tcg_gen_addi_i32(tmp, tmp, imm);
10658 store_reg(s, rd, tmp);
10660 } else {
10661 int shifter_out = 0;
10662 /* modified 12-bit immediate. */
10663 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10664 imm = (insn & 0xff);
10665 switch (shift) {
10666 case 0: /* XY */
10667 /* Nothing to do. */
10668 break;
10669 case 1: /* 00XY00XY */
10670 imm |= imm << 16;
10671 break;
10672 case 2: /* XY00XY00 */
10673 imm |= imm << 16;
10674 imm <<= 8;
10675 break;
10676 case 3: /* XYXYXYXY */
10677 imm |= imm << 16;
10678 imm |= imm << 8;
10679 break;
10680 default: /* Rotated constant. */
10681 shift = (shift << 1) | (imm >> 7);
10682 imm |= 0x80;
10683 imm = imm << (32 - shift);
10684 shifter_out = 1;
10685 break;
10687 tmp2 = tcg_temp_new_i32();
10688 tcg_gen_movi_i32(tmp2, imm);
10689 rn = (insn >> 16) & 0xf;
10690 if (rn == 15) {
10691 tmp = tcg_temp_new_i32();
10692 tcg_gen_movi_i32(tmp, 0);
10693 } else {
10694 tmp = load_reg(s, rn);
10696 op = (insn >> 21) & 0xf;
10697 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10698 shifter_out, tmp, tmp2))
10699 goto illegal_op;
10700 tcg_temp_free_i32(tmp2);
10701 rd = (insn >> 8) & 0xf;
10702 if (rd != 15) {
10703 store_reg(s, rd, tmp);
10704 } else {
10705 tcg_temp_free_i32(tmp);
10709 break;
10710 case 12: /* Load/store single data item. */
10712 int postinc = 0;
10713 int writeback = 0;
10714 int memidx;
10715 if ((insn & 0x01100000) == 0x01000000) {
10716 if (disas_neon_ls_insn(s, insn)) {
10717 goto illegal_op;
10719 break;
10721 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10722 if (rs == 15) {
10723 if (!(insn & (1 << 20))) {
10724 goto illegal_op;
10726 if (op != 2) {
10727 /* Byte or halfword load space with dest == r15 : memory hints.
10728 * Catch them early so we don't emit pointless addressing code.
10729 * This space is a mix of:
10730 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10731 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10732 * cores)
10733 * unallocated hints, which must be treated as NOPs
10734 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10735 * which is easiest for the decoding logic
10736 * Some space which must UNDEF
10738 int op1 = (insn >> 23) & 3;
10739 int op2 = (insn >> 6) & 0x3f;
10740 if (op & 2) {
10741 goto illegal_op;
10743 if (rn == 15) {
10744 /* UNPREDICTABLE, unallocated hint or
10745 * PLD/PLDW/PLI (literal)
10747 return 0;
10749 if (op1 & 1) {
10750 return 0; /* PLD/PLDW/PLI or unallocated hint */
10752 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10753 return 0; /* PLD/PLDW/PLI or unallocated hint */
10755 /* UNDEF space, or an UNPREDICTABLE */
10756 return 1;
10759 memidx = get_mem_index(s);
10760 if (rn == 15) {
10761 addr = tcg_temp_new_i32();
10762 /* PC relative. */
10763 /* s->pc has already been incremented by 4. */
10764 imm = s->pc & 0xfffffffc;
10765 if (insn & (1 << 23))
10766 imm += insn & 0xfff;
10767 else
10768 imm -= insn & 0xfff;
10769 tcg_gen_movi_i32(addr, imm);
10770 } else {
10771 addr = load_reg(s, rn);
10772 if (insn & (1 << 23)) {
10773 /* Positive offset. */
10774 imm = insn & 0xfff;
10775 tcg_gen_addi_i32(addr, addr, imm);
10776 } else {
10777 imm = insn & 0xff;
10778 switch ((insn >> 8) & 0xf) {
10779 case 0x0: /* Shifted Register. */
10780 shift = (insn >> 4) & 0xf;
10781 if (shift > 3) {
10782 tcg_temp_free_i32(addr);
10783 goto illegal_op;
10785 tmp = load_reg(s, rm);
10786 if (shift)
10787 tcg_gen_shli_i32(tmp, tmp, shift);
10788 tcg_gen_add_i32(addr, addr, tmp);
10789 tcg_temp_free_i32(tmp);
10790 break;
10791 case 0xc: /* Negative offset. */
10792 tcg_gen_addi_i32(addr, addr, -imm);
10793 break;
10794 case 0xe: /* User privilege. */
10795 tcg_gen_addi_i32(addr, addr, imm);
10796 memidx = get_a32_user_mem_index(s);
10797 break;
10798 case 0x9: /* Post-decrement. */
10799 imm = -imm;
10800 /* Fall through. */
10801 case 0xb: /* Post-increment. */
10802 postinc = 1;
10803 writeback = 1;
10804 break;
10805 case 0xd: /* Pre-decrement. */
10806 imm = -imm;
10807 /* Fall through. */
10808 case 0xf: /* Pre-increment. */
10809 tcg_gen_addi_i32(addr, addr, imm);
10810 writeback = 1;
10811 break;
10812 default:
10813 tcg_temp_free_i32(addr);
10814 goto illegal_op;
10818 if (insn & (1 << 20)) {
10819 /* Load. */
10820 tmp = tcg_temp_new_i32();
10821 switch (op) {
10822 case 0:
10823 gen_aa32_ld8u(s, tmp, addr, memidx);
10824 break;
10825 case 4:
10826 gen_aa32_ld8s(s, tmp, addr, memidx);
10827 break;
10828 case 1:
10829 gen_aa32_ld16u(s, tmp, addr, memidx);
10830 break;
10831 case 5:
10832 gen_aa32_ld16s(s, tmp, addr, memidx);
10833 break;
10834 case 2:
10835 gen_aa32_ld32u(s, tmp, addr, memidx);
10836 break;
10837 default:
10838 tcg_temp_free_i32(tmp);
10839 tcg_temp_free_i32(addr);
10840 goto illegal_op;
10842 if (rs == 15) {
10843 gen_bx(s, tmp);
10844 } else {
10845 store_reg(s, rs, tmp);
10847 } else {
10848 /* Store. */
10849 tmp = load_reg(s, rs);
10850 switch (op) {
10851 case 0:
10852 gen_aa32_st8(s, tmp, addr, memidx);
10853 break;
10854 case 1:
10855 gen_aa32_st16(s, tmp, addr, memidx);
10856 break;
10857 case 2:
10858 gen_aa32_st32(s, tmp, addr, memidx);
10859 break;
10860 default:
10861 tcg_temp_free_i32(tmp);
10862 tcg_temp_free_i32(addr);
10863 goto illegal_op;
10865 tcg_temp_free_i32(tmp);
10867 if (postinc)
10868 tcg_gen_addi_i32(addr, addr, imm);
10869 if (writeback) {
10870 store_reg(s, rn, addr);
10871 } else {
10872 tcg_temp_free_i32(addr);
10875 break;
10876 default:
10877 goto illegal_op;
10879 return 0;
10880 illegal_op:
10881 return 1;
10884 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
10886 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10887 int32_t offset;
10888 int i;
10889 TCGv_i32 tmp;
10890 TCGv_i32 tmp2;
10891 TCGv_i32 addr;
10893 if (s->condexec_mask) {
10894 cond = s->condexec_cond;
10895 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10896 s->condlabel = gen_new_label();
10897 arm_gen_test_cc(cond ^ 1, s->condlabel);
10898 s->condjmp = 1;
10902 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
10903 s->pc += 2;
10905 switch (insn >> 12) {
10906 case 0: case 1:
10908 rd = insn & 7;
10909 op = (insn >> 11) & 3;
10910 if (op == 3) {
10911 /* add/subtract */
10912 rn = (insn >> 3) & 7;
10913 tmp = load_reg(s, rn);
10914 if (insn & (1 << 10)) {
10915 /* immediate */
10916 tmp2 = tcg_temp_new_i32();
10917 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
10918 } else {
10919 /* reg */
10920 rm = (insn >> 6) & 7;
10921 tmp2 = load_reg(s, rm);
10923 if (insn & (1 << 9)) {
10924 if (s->condexec_mask)
10925 tcg_gen_sub_i32(tmp, tmp, tmp2);
10926 else
10927 gen_sub_CC(tmp, tmp, tmp2);
10928 } else {
10929 if (s->condexec_mask)
10930 tcg_gen_add_i32(tmp, tmp, tmp2);
10931 else
10932 gen_add_CC(tmp, tmp, tmp2);
10934 tcg_temp_free_i32(tmp2);
10935 store_reg(s, rd, tmp);
10936 } else {
10937 /* shift immediate */
10938 rm = (insn >> 3) & 7;
10939 shift = (insn >> 6) & 0x1f;
10940 tmp = load_reg(s, rm);
10941 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10942 if (!s->condexec_mask)
10943 gen_logic_CC(tmp);
10944 store_reg(s, rd, tmp);
10946 break;
10947 case 2: case 3:
10948 /* arithmetic large immediate */
10949 op = (insn >> 11) & 3;
10950 rd = (insn >> 8) & 0x7;
10951 if (op == 0) { /* mov */
10952 tmp = tcg_temp_new_i32();
10953 tcg_gen_movi_i32(tmp, insn & 0xff);
10954 if (!s->condexec_mask)
10955 gen_logic_CC(tmp);
10956 store_reg(s, rd, tmp);
10957 } else {
10958 tmp = load_reg(s, rd);
10959 tmp2 = tcg_temp_new_i32();
10960 tcg_gen_movi_i32(tmp2, insn & 0xff);
10961 switch (op) {
10962 case 1: /* cmp */
10963 gen_sub_CC(tmp, tmp, tmp2);
10964 tcg_temp_free_i32(tmp);
10965 tcg_temp_free_i32(tmp2);
10966 break;
10967 case 2: /* add */
10968 if (s->condexec_mask)
10969 tcg_gen_add_i32(tmp, tmp, tmp2);
10970 else
10971 gen_add_CC(tmp, tmp, tmp2);
10972 tcg_temp_free_i32(tmp2);
10973 store_reg(s, rd, tmp);
10974 break;
10975 case 3: /* sub */
10976 if (s->condexec_mask)
10977 tcg_gen_sub_i32(tmp, tmp, tmp2);
10978 else
10979 gen_sub_CC(tmp, tmp, tmp2);
10980 tcg_temp_free_i32(tmp2);
10981 store_reg(s, rd, tmp);
10982 break;
10985 break;
10986 case 4:
10987 if (insn & (1 << 11)) {
10988 rd = (insn >> 8) & 7;
10989 /* load pc-relative. Bit 1 of PC is ignored. */
10990 val = s->pc + 2 + ((insn & 0xff) * 4);
10991 val &= ~(uint32_t)2;
10992 addr = tcg_temp_new_i32();
10993 tcg_gen_movi_i32(addr, val);
10994 tmp = tcg_temp_new_i32();
10995 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10996 tcg_temp_free_i32(addr);
10997 store_reg(s, rd, tmp);
10998 break;
11000 if (insn & (1 << 10)) {
11001 /* data processing extended or blx */
11002 rd = (insn & 7) | ((insn >> 4) & 8);
11003 rm = (insn >> 3) & 0xf;
11004 op = (insn >> 8) & 3;
11005 switch (op) {
11006 case 0: /* add */
11007 tmp = load_reg(s, rd);
11008 tmp2 = load_reg(s, rm);
11009 tcg_gen_add_i32(tmp, tmp, tmp2);
11010 tcg_temp_free_i32(tmp2);
11011 store_reg(s, rd, tmp);
11012 break;
11013 case 1: /* cmp */
11014 tmp = load_reg(s, rd);
11015 tmp2 = load_reg(s, rm);
11016 gen_sub_CC(tmp, tmp, tmp2);
11017 tcg_temp_free_i32(tmp2);
11018 tcg_temp_free_i32(tmp);
11019 break;
11020 case 2: /* mov/cpy */
11021 tmp = load_reg(s, rm);
11022 store_reg(s, rd, tmp);
11023 break;
11024 case 3:/* branch [and link] exchange thumb register */
11025 tmp = load_reg(s, rm);
11026 if (insn & (1 << 7)) {
11027 ARCH(5);
11028 val = (uint32_t)s->pc | 1;
11029 tmp2 = tcg_temp_new_i32();
11030 tcg_gen_movi_i32(tmp2, val);
11031 store_reg(s, 14, tmp2);
11033 /* already thumb, no need to check */
11034 gen_bx(s, tmp);
11035 break;
11037 break;
11040 /* data processing register */
11041 rd = insn & 7;
11042 rm = (insn >> 3) & 7;
11043 op = (insn >> 6) & 0xf;
11044 if (op == 2 || op == 3 || op == 4 || op == 7) {
11045 /* the shift/rotate ops want the operands backwards */
11046 val = rm;
11047 rm = rd;
11048 rd = val;
11049 val = 1;
11050 } else {
11051 val = 0;
11054 if (op == 9) { /* neg */
11055 tmp = tcg_temp_new_i32();
11056 tcg_gen_movi_i32(tmp, 0);
11057 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11058 tmp = load_reg(s, rd);
11059 } else {
11060 TCGV_UNUSED_I32(tmp);
11063 tmp2 = load_reg(s, rm);
11064 switch (op) {
11065 case 0x0: /* and */
11066 tcg_gen_and_i32(tmp, tmp, tmp2);
11067 if (!s->condexec_mask)
11068 gen_logic_CC(tmp);
11069 break;
11070 case 0x1: /* eor */
11071 tcg_gen_xor_i32(tmp, tmp, tmp2);
11072 if (!s->condexec_mask)
11073 gen_logic_CC(tmp);
11074 break;
11075 case 0x2: /* lsl */
11076 if (s->condexec_mask) {
11077 gen_shl(tmp2, tmp2, tmp);
11078 } else {
11079 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
11080 gen_logic_CC(tmp2);
11082 break;
11083 case 0x3: /* lsr */
11084 if (s->condexec_mask) {
11085 gen_shr(tmp2, tmp2, tmp);
11086 } else {
11087 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
11088 gen_logic_CC(tmp2);
11090 break;
11091 case 0x4: /* asr */
11092 if (s->condexec_mask) {
11093 gen_sar(tmp2, tmp2, tmp);
11094 } else {
11095 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
11096 gen_logic_CC(tmp2);
11098 break;
11099 case 0x5: /* adc */
11100 if (s->condexec_mask) {
11101 gen_adc(tmp, tmp2);
11102 } else {
11103 gen_adc_CC(tmp, tmp, tmp2);
11105 break;
11106 case 0x6: /* sbc */
11107 if (s->condexec_mask) {
11108 gen_sub_carry(tmp, tmp, tmp2);
11109 } else {
11110 gen_sbc_CC(tmp, tmp, tmp2);
11112 break;
11113 case 0x7: /* ror */
11114 if (s->condexec_mask) {
11115 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11116 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
11117 } else {
11118 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
11119 gen_logic_CC(tmp2);
11121 break;
11122 case 0x8: /* tst */
11123 tcg_gen_and_i32(tmp, tmp, tmp2);
11124 gen_logic_CC(tmp);
11125 rd = 16;
11126 break;
11127 case 0x9: /* neg */
11128 if (s->condexec_mask)
11129 tcg_gen_neg_i32(tmp, tmp2);
11130 else
11131 gen_sub_CC(tmp, tmp, tmp2);
11132 break;
11133 case 0xa: /* cmp */
11134 gen_sub_CC(tmp, tmp, tmp2);
11135 rd = 16;
11136 break;
11137 case 0xb: /* cmn */
11138 gen_add_CC(tmp, tmp, tmp2);
11139 rd = 16;
11140 break;
11141 case 0xc: /* orr */
11142 tcg_gen_or_i32(tmp, tmp, tmp2);
11143 if (!s->condexec_mask)
11144 gen_logic_CC(tmp);
11145 break;
11146 case 0xd: /* mul */
11147 tcg_gen_mul_i32(tmp, tmp, tmp2);
11148 if (!s->condexec_mask)
11149 gen_logic_CC(tmp);
11150 break;
11151 case 0xe: /* bic */
11152 tcg_gen_andc_i32(tmp, tmp, tmp2);
11153 if (!s->condexec_mask)
11154 gen_logic_CC(tmp);
11155 break;
11156 case 0xf: /* mvn */
11157 tcg_gen_not_i32(tmp2, tmp2);
11158 if (!s->condexec_mask)
11159 gen_logic_CC(tmp2);
11160 val = 1;
11161 rm = rd;
11162 break;
11164 if (rd != 16) {
11165 if (val) {
11166 store_reg(s, rm, tmp2);
11167 if (op != 0xf)
11168 tcg_temp_free_i32(tmp);
11169 } else {
11170 store_reg(s, rd, tmp);
11171 tcg_temp_free_i32(tmp2);
11173 } else {
11174 tcg_temp_free_i32(tmp);
11175 tcg_temp_free_i32(tmp2);
11177 break;
11179 case 5:
11180 /* load/store register offset. */
11181 rd = insn & 7;
11182 rn = (insn >> 3) & 7;
11183 rm = (insn >> 6) & 7;
11184 op = (insn >> 9) & 7;
11185 addr = load_reg(s, rn);
11186 tmp = load_reg(s, rm);
11187 tcg_gen_add_i32(addr, addr, tmp);
11188 tcg_temp_free_i32(tmp);
11190 if (op < 3) { /* store */
11191 tmp = load_reg(s, rd);
11192 } else {
11193 tmp = tcg_temp_new_i32();
11196 switch (op) {
11197 case 0: /* str */
11198 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11199 break;
11200 case 1: /* strh */
11201 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
11202 break;
11203 case 2: /* strb */
11204 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
11205 break;
11206 case 3: /* ldrsb */
11207 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
11208 break;
11209 case 4: /* ldr */
11210 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11211 break;
11212 case 5: /* ldrh */
11213 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
11214 break;
11215 case 6: /* ldrb */
11216 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
11217 break;
11218 case 7: /* ldrsh */
11219 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
11220 break;
11222 if (op >= 3) { /* load */
11223 store_reg(s, rd, tmp);
11224 } else {
11225 tcg_temp_free_i32(tmp);
11227 tcg_temp_free_i32(addr);
11228 break;
11230 case 6:
11231 /* load/store word immediate offset */
11232 rd = insn & 7;
11233 rn = (insn >> 3) & 7;
11234 addr = load_reg(s, rn);
11235 val = (insn >> 4) & 0x7c;
11236 tcg_gen_addi_i32(addr, addr, val);
11238 if (insn & (1 << 11)) {
11239 /* load */
11240 tmp = tcg_temp_new_i32();
11241 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11242 store_reg(s, rd, tmp);
11243 } else {
11244 /* store */
11245 tmp = load_reg(s, rd);
11246 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11247 tcg_temp_free_i32(tmp);
11249 tcg_temp_free_i32(addr);
11250 break;
11252 case 7:
11253 /* load/store byte immediate offset */
11254 rd = insn & 7;
11255 rn = (insn >> 3) & 7;
11256 addr = load_reg(s, rn);
11257 val = (insn >> 6) & 0x1f;
11258 tcg_gen_addi_i32(addr, addr, val);
11260 if (insn & (1 << 11)) {
11261 /* load */
11262 tmp = tcg_temp_new_i32();
11263 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
11264 store_reg(s, rd, tmp);
11265 } else {
11266 /* store */
11267 tmp = load_reg(s, rd);
11268 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
11269 tcg_temp_free_i32(tmp);
11271 tcg_temp_free_i32(addr);
11272 break;
11274 case 8:
11275 /* load/store halfword immediate offset */
11276 rd = insn & 7;
11277 rn = (insn >> 3) & 7;
11278 addr = load_reg(s, rn);
11279 val = (insn >> 5) & 0x3e;
11280 tcg_gen_addi_i32(addr, addr, val);
11282 if (insn & (1 << 11)) {
11283 /* load */
11284 tmp = tcg_temp_new_i32();
11285 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
11286 store_reg(s, rd, tmp);
11287 } else {
11288 /* store */
11289 tmp = load_reg(s, rd);
11290 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
11291 tcg_temp_free_i32(tmp);
11293 tcg_temp_free_i32(addr);
11294 break;
11296 case 9:
11297 /* load/store from stack */
11298 rd = (insn >> 8) & 7;
11299 addr = load_reg(s, 13);
11300 val = (insn & 0xff) * 4;
11301 tcg_gen_addi_i32(addr, addr, val);
11303 if (insn & (1 << 11)) {
11304 /* load */
11305 tmp = tcg_temp_new_i32();
11306 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11307 store_reg(s, rd, tmp);
11308 } else {
11309 /* store */
11310 tmp = load_reg(s, rd);
11311 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11312 tcg_temp_free_i32(tmp);
11314 tcg_temp_free_i32(addr);
11315 break;
11317 case 10:
11318 /* add to high reg */
11319 rd = (insn >> 8) & 7;
11320 if (insn & (1 << 11)) {
11321 /* SP */
11322 tmp = load_reg(s, 13);
11323 } else {
11324 /* PC. bit 1 is ignored. */
11325 tmp = tcg_temp_new_i32();
11326 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
11328 val = (insn & 0xff) * 4;
11329 tcg_gen_addi_i32(tmp, tmp, val);
11330 store_reg(s, rd, tmp);
11331 break;
11333 case 11:
11334 /* misc */
11335 op = (insn >> 8) & 0xf;
11336 switch (op) {
11337 case 0:
11338 /* adjust stack pointer */
11339 tmp = load_reg(s, 13);
11340 val = (insn & 0x7f) * 4;
11341 if (insn & (1 << 7))
11342 val = -(int32_t)val;
11343 tcg_gen_addi_i32(tmp, tmp, val);
11344 store_reg(s, 13, tmp);
11345 break;
11347 case 2: /* sign/zero extend. */
11348 ARCH(6);
11349 rd = insn & 7;
11350 rm = (insn >> 3) & 7;
11351 tmp = load_reg(s, rm);
11352 switch ((insn >> 6) & 3) {
11353 case 0: gen_sxth(tmp); break;
11354 case 1: gen_sxtb(tmp); break;
11355 case 2: gen_uxth(tmp); break;
11356 case 3: gen_uxtb(tmp); break;
11358 store_reg(s, rd, tmp);
11359 break;
11360 case 4: case 5: case 0xc: case 0xd:
11361 /* push/pop */
11362 addr = load_reg(s, 13);
11363 if (insn & (1 << 8))
11364 offset = 4;
11365 else
11366 offset = 0;
11367 for (i = 0; i < 8; i++) {
11368 if (insn & (1 << i))
11369 offset += 4;
11371 if ((insn & (1 << 11)) == 0) {
11372 tcg_gen_addi_i32(addr, addr, -offset);
11374 for (i = 0; i < 8; i++) {
11375 if (insn & (1 << i)) {
11376 if (insn & (1 << 11)) {
11377 /* pop */
11378 tmp = tcg_temp_new_i32();
11379 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11380 store_reg(s, i, tmp);
11381 } else {
11382 /* push */
11383 tmp = load_reg(s, i);
11384 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11385 tcg_temp_free_i32(tmp);
11387 /* advance to the next address. */
11388 tcg_gen_addi_i32(addr, addr, 4);
11391 TCGV_UNUSED_I32(tmp);
11392 if (insn & (1 << 8)) {
11393 if (insn & (1 << 11)) {
11394 /* pop pc */
11395 tmp = tcg_temp_new_i32();
11396 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11397 /* don't set the pc until the rest of the instruction
11398 has completed */
11399 } else {
11400 /* push lr */
11401 tmp = load_reg(s, 14);
11402 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11403 tcg_temp_free_i32(tmp);
11405 tcg_gen_addi_i32(addr, addr, 4);
11407 if ((insn & (1 << 11)) == 0) {
11408 tcg_gen_addi_i32(addr, addr, -offset);
11410 /* write back the new stack pointer */
11411 store_reg(s, 13, addr);
11412 /* set the new PC value */
11413 if ((insn & 0x0900) == 0x0900) {
11414 store_reg_from_load(s, 15, tmp);
11416 break;
11418 case 1: case 3: case 9: case 11: /* czb */
11419 rm = insn & 7;
11420 tmp = load_reg(s, rm);
11421 s->condlabel = gen_new_label();
11422 s->condjmp = 1;
11423 if (insn & (1 << 11))
11424 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
11425 else
11426 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
11427 tcg_temp_free_i32(tmp);
11428 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11429 val = (uint32_t)s->pc + 2;
11430 val += offset;
11431 gen_jmp(s, val);
11432 break;
11434 case 15: /* IT, nop-hint. */
11435 if ((insn & 0xf) == 0) {
11436 gen_nop_hint(s, (insn >> 4) & 0xf);
11437 break;
11439 /* If Then. */
11440 s->condexec_cond = (insn >> 4) & 0xe;
11441 s->condexec_mask = insn & 0x1f;
11442 /* No actual code generated for this insn, just setup state. */
11443 break;
11445 case 0xe: /* bkpt */
11447 int imm8 = extract32(insn, 0, 8);
11448 ARCH(5);
11449 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11450 default_exception_el(s));
11451 break;
11454 case 0xa: /* rev */
11455 ARCH(6);
11456 rn = (insn >> 3) & 0x7;
11457 rd = insn & 0x7;
11458 tmp = load_reg(s, rn);
11459 switch ((insn >> 6) & 3) {
11460 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
11461 case 1: gen_rev16(tmp); break;
11462 case 3: gen_revsh(tmp); break;
11463 default: goto illegal_op;
11465 store_reg(s, rd, tmp);
11466 break;
11468 case 6:
11469 switch ((insn >> 5) & 7) {
11470 case 2:
11471 /* setend */
11472 ARCH(6);
11473 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11474 gen_helper_setend(cpu_env);
11475 s->is_jmp = DISAS_UPDATE;
11477 break;
11478 case 3:
11479 /* cps */
11480 ARCH(6);
11481 if (IS_USER(s)) {
11482 break;
11484 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11485 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11486 /* FAULTMASK */
11487 if (insn & 1) {
11488 addr = tcg_const_i32(19);
11489 gen_helper_v7m_msr(cpu_env, addr, tmp);
11490 tcg_temp_free_i32(addr);
11492 /* PRIMASK */
11493 if (insn & 2) {
11494 addr = tcg_const_i32(16);
11495 gen_helper_v7m_msr(cpu_env, addr, tmp);
11496 tcg_temp_free_i32(addr);
11498 tcg_temp_free_i32(tmp);
11499 gen_lookup_tb(s);
11500 } else {
11501 if (insn & (1 << 4)) {
11502 shift = CPSR_A | CPSR_I | CPSR_F;
11503 } else {
11504 shift = 0;
11506 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
11508 break;
11509 default:
11510 goto undef;
11512 break;
11514 default:
11515 goto undef;
11517 break;
11519 case 12:
11521 /* load/store multiple */
11522 TCGv_i32 loaded_var;
11523 TCGV_UNUSED_I32(loaded_var);
11524 rn = (insn >> 8) & 0x7;
11525 addr = load_reg(s, rn);
11526 for (i = 0; i < 8; i++) {
11527 if (insn & (1 << i)) {
11528 if (insn & (1 << 11)) {
11529 /* load */
11530 tmp = tcg_temp_new_i32();
11531 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11532 if (i == rn) {
11533 loaded_var = tmp;
11534 } else {
11535 store_reg(s, i, tmp);
11537 } else {
11538 /* store */
11539 tmp = load_reg(s, i);
11540 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11541 tcg_temp_free_i32(tmp);
11543 /* advance to the next address */
11544 tcg_gen_addi_i32(addr, addr, 4);
11547 if ((insn & (1 << rn)) == 0) {
11548 /* base reg not in list: base register writeback */
11549 store_reg(s, rn, addr);
11550 } else {
11551 /* base reg in list: if load, complete it now */
11552 if (insn & (1 << 11)) {
11553 store_reg(s, rn, loaded_var);
11555 tcg_temp_free_i32(addr);
11557 break;
11559 case 13:
11560 /* conditional branch or swi */
11561 cond = (insn >> 8) & 0xf;
11562 if (cond == 0xe)
11563 goto undef;
11565 if (cond == 0xf) {
11566 /* swi */
11567 gen_set_pc_im(s, s->pc);
11568 s->svc_imm = extract32(insn, 0, 8);
11569 s->is_jmp = DISAS_SWI;
11570 break;
11572 /* generate a conditional jump to next instruction */
11573 s->condlabel = gen_new_label();
11574 arm_gen_test_cc(cond ^ 1, s->condlabel);
11575 s->condjmp = 1;
11577 /* jump to the offset */
11578 val = (uint32_t)s->pc + 2;
11579 offset = ((int32_t)insn << 24) >> 24;
11580 val += offset << 1;
11581 gen_jmp(s, val);
11582 break;
11584 case 14:
11585 if (insn & (1 << 11)) {
11586 if (disas_thumb2_insn(env, s, insn))
11587 goto undef32;
11588 break;
11590 /* unconditional branch */
11591 val = (uint32_t)s->pc;
11592 offset = ((int32_t)insn << 21) >> 21;
11593 val += (offset << 1) + 2;
11594 gen_jmp(s, val);
11595 break;
11597 case 15:
11598 if (disas_thumb2_insn(env, s, insn))
11599 goto undef32;
11600 break;
11602 return;
11603 undef32:
11604 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11605 default_exception_el(s));
11606 return;
11607 illegal_op:
11608 undef:
11609 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11610 default_exception_el(s));
11613 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11615 /* Return true if the insn at dc->pc might cross a page boundary.
11616 * (False positives are OK, false negatives are not.)
11618 uint16_t insn;
11620 if ((s->pc & 3) == 0) {
11621 /* At a 4-aligned address we can't be crossing a page */
11622 return false;
11625 /* This must be a Thumb insn */
11626 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
11628 if ((insn >> 11) >= 0x1d) {
11629 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11630 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11631 * end up actually treating this as two 16-bit insns (see the
11632 * code at the start of disas_thumb2_insn()) but we don't bother
11633 * to check for that as it is unlikely, and false positives here
11634 * are harmless.
11636 return true;
11638 /* Definitely a 16-bit insn, can't be crossing a page. */
11639 return false;
11642 /* generate intermediate code for basic block 'tb'. */
11643 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
11645 ARMCPU *cpu = arm_env_get_cpu(env);
11646 CPUState *cs = CPU(cpu);
11647 DisasContext dc1, *dc = &dc1;
11648 target_ulong pc_start;
11649 target_ulong next_page_start;
11650 int num_insns;
11651 int max_insns;
11652 bool end_of_page;
11654 /* generate intermediate code */
11656 /* The A64 decoder has its own top level loop, because it doesn't need
11657 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11659 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
11660 gen_intermediate_code_a64(cpu, tb);
11661 return;
11664 pc_start = tb->pc;
11666 dc->tb = tb;
11668 dc->is_jmp = DISAS_NEXT;
11669 dc->pc = pc_start;
11670 dc->singlestep_enabled = cs->singlestep_enabled;
11671 dc->condjmp = 0;
11673 dc->aarch64 = 0;
11674 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11675 * there is no secure EL1, so we route exceptions to EL3.
11677 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11678 !arm_el_is_aa64(env, 3);
11679 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
11680 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
11681 dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
11682 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11683 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
11684 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11685 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11686 #if !defined(CONFIG_USER_ONLY)
11687 dc->user = (dc->current_el == 0);
11688 #endif
11689 dc->ns = ARM_TBFLAG_NS(tb->flags);
11690 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
11691 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11692 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11693 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
11694 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
11695 dc->cp_regs = cpu->cp_regs;
11696 dc->features = env->features;
11698 /* Single step state. The code-generation logic here is:
11699 * SS_ACTIVE == 0:
11700 * generate code with no special handling for single-stepping (except
11701 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11702 * this happens anyway because those changes are all system register or
11703 * PSTATE writes).
11704 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11705 * emit code for one insn
11706 * emit code to clear PSTATE.SS
11707 * emit code to generate software step exception for completed step
11708 * end TB (as usual for having generated an exception)
11709 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11710 * emit code to generate a software step exception
11711 * end the TB
11713 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11714 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11715 dc->is_ldex = false;
11716 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11718 cpu_F0s = tcg_temp_new_i32();
11719 cpu_F1s = tcg_temp_new_i32();
11720 cpu_F0d = tcg_temp_new_i64();
11721 cpu_F1d = tcg_temp_new_i64();
11722 cpu_V0 = cpu_F0d;
11723 cpu_V1 = cpu_F1d;
11724 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11725 cpu_M0 = tcg_temp_new_i64();
11726 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
11727 num_insns = 0;
11728 max_insns = tb->cflags & CF_COUNT_MASK;
11729 if (max_insns == 0) {
11730 max_insns = CF_COUNT_MASK;
11732 if (max_insns > TCG_MAX_INSNS) {
11733 max_insns = TCG_MAX_INSNS;
11736 gen_tb_start(tb);
11738 tcg_clear_temp_count();
11740 /* A note on handling of the condexec (IT) bits:
11742 * We want to avoid the overhead of having to write the updated condexec
11743 * bits back to the CPUARMState for every instruction in an IT block. So:
11744 * (1) if the condexec bits are not already zero then we write
11745 * zero back into the CPUARMState now. This avoids complications trying
11746 * to do it at the end of the block. (For example if we don't do this
11747 * it's hard to identify whether we can safely skip writing condexec
11748 * at the end of the TB, which we definitely want to do for the case
11749 * where a TB doesn't do anything with the IT state at all.)
11750 * (2) if we are going to leave the TB then we call gen_set_condexec()
11751 * which will write the correct value into CPUARMState if zero is wrong.
11752 * This is done both for leaving the TB at the end, and for leaving
11753 * it because of an exception we know will happen, which is done in
11754 * gen_exception_insn(). The latter is necessary because we need to
11755 * leave the TB with the PC/IT state just prior to execution of the
11756 * instruction which caused the exception.
11757 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11758 * then the CPUARMState will be wrong and we need to reset it.
11759 * This is handled in the same way as restoration of the
11760 * PC in these situations; we save the value of the condexec bits
11761 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11762 * then uses this to restore them after an exception.
11764 * Note that there are no instructions which can read the condexec
11765 * bits, and none which can write non-static values to them, so
11766 * we don't need to care about whether CPUARMState is correct in the
11767 * middle of a TB.
11770 /* Reset the conditional execution bits immediately. This avoids
11771 complications trying to do it at the end of the block. */
11772 if (dc->condexec_mask || dc->condexec_cond)
11774 TCGv_i32 tmp = tcg_temp_new_i32();
11775 tcg_gen_movi_i32(tmp, 0);
11776 store_cpu_field(tmp, condexec_bits);
11778 do {
11779 tcg_gen_insn_start(dc->pc,
11780 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11782 num_insns++;
11784 #ifdef CONFIG_USER_ONLY
11785 /* Intercept jump to the magic kernel page. */
11786 if (dc->pc >= 0xffff0000) {
11787 /* We always get here via a jump, so know we are not in a
11788 conditional execution block. */
11789 gen_exception_internal(EXCP_KERNEL_TRAP);
11790 dc->is_jmp = DISAS_EXC;
11791 break;
11793 #else
11794 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
11795 /* We always get here via a jump, so know we are not in a
11796 conditional execution block. */
11797 gen_exception_internal(EXCP_EXCEPTION_EXIT);
11798 dc->is_jmp = DISAS_EXC;
11799 break;
11801 #endif
11803 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11804 CPUBreakpoint *bp;
11805 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
11806 if (bp->pc == dc->pc) {
11807 if (bp->flags & BP_CPU) {
11808 gen_set_condexec(dc);
11809 gen_set_pc_im(dc, dc->pc);
11810 gen_helper_check_breakpoints(cpu_env);
11811 /* End the TB early; it's likely not going to be executed */
11812 dc->is_jmp = DISAS_UPDATE;
11813 } else {
11814 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11815 /* The address covered by the breakpoint must be
11816 included in [tb->pc, tb->pc + tb->size) in order
11817 to for it to be properly cleared -- thus we
11818 increment the PC here so that the logic setting
11819 tb->size below does the right thing. */
11820 /* TODO: Advance PC by correct instruction length to
11821 * avoid disassembler error messages */
11822 dc->pc += 2;
11823 goto done_generating;
11825 break;
11830 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
11831 gen_io_start();
11834 if (dc->ss_active && !dc->pstate_ss) {
11835 /* Singlestep state is Active-pending.
11836 * If we're in this state at the start of a TB then either
11837 * a) we just took an exception to an EL which is being debugged
11838 * and this is the first insn in the exception handler
11839 * b) debug exceptions were masked and we just unmasked them
11840 * without changing EL (eg by clearing PSTATE.D)
11841 * In either case we're going to take a swstep exception in the
11842 * "did not step an insn" case, and so the syndrome ISV and EX
11843 * bits should be zero.
11845 assert(num_insns == 1);
11846 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11847 default_exception_el(dc));
11848 goto done_generating;
11851 if (dc->thumb) {
11852 disas_thumb_insn(env, dc);
11853 if (dc->condexec_mask) {
11854 dc->condexec_cond = (dc->condexec_cond & 0xe)
11855 | ((dc->condexec_mask >> 4) & 1);
11856 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11857 if (dc->condexec_mask == 0) {
11858 dc->condexec_cond = 0;
11861 } else {
11862 unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
11863 dc->pc += 4;
11864 disas_arm_insn(dc, insn);
11867 if (dc->condjmp && !dc->is_jmp) {
11868 gen_set_label(dc->condlabel);
11869 dc->condjmp = 0;
11872 if (tcg_check_temp_count()) {
11873 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11874 dc->pc);
11877 /* Translation stops when a conditional branch is encountered.
11878 * Otherwise the subsequent code could get translated several times.
11879 * Also stop translation when a page boundary is reached. This
11880 * ensures prefetch aborts occur at the right place. */
11882 /* We want to stop the TB if the next insn starts in a new page,
11883 * or if it spans between this page and the next. This means that
11884 * if we're looking at the last halfword in the page we need to
11885 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11886 * or a 32-bit Thumb insn (which won't).
11887 * This is to avoid generating a silly TB with a single 16-bit insn
11888 * in it at the end of this page (which would execute correctly
11889 * but isn't very efficient).
11891 end_of_page = (dc->pc >= next_page_start) ||
11892 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11894 } while (!dc->is_jmp && !tcg_op_buf_full() &&
11895 !cs->singlestep_enabled &&
11896 !singlestep &&
11897 !dc->ss_active &&
11898 !end_of_page &&
11899 num_insns < max_insns);
11901 if (tb->cflags & CF_LAST_IO) {
11902 if (dc->condjmp) {
11903 /* FIXME: This can theoretically happen with self-modifying
11904 code. */
11905 cpu_abort(cs, "IO on conditional branch instruction");
11907 gen_io_end();
11910 /* At this stage dc->condjmp will only be set when the skipped
11911 instruction was a conditional branch or trap, and the PC has
11912 already been written. */
11913 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
11914 /* Unconditional and "condition passed" instruction codepath. */
11915 gen_set_condexec(dc);
11916 switch (dc->is_jmp) {
11917 case DISAS_SWI:
11918 gen_ss_advance(dc);
11919 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11920 default_exception_el(dc));
11921 break;
11922 case DISAS_HVC:
11923 gen_ss_advance(dc);
11924 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11925 break;
11926 case DISAS_SMC:
11927 gen_ss_advance(dc);
11928 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11929 break;
11930 case DISAS_NEXT:
11931 case DISAS_UPDATE:
11932 gen_set_pc_im(dc, dc->pc);
11933 /* fall through */
11934 default:
11935 if (dc->ss_active) {
11936 gen_step_complete_exception(dc);
11937 } else {
11938 /* FIXME: Single stepping a WFI insn will not halt
11939 the CPU. */
11940 gen_exception_internal(EXCP_DEBUG);
11943 if (dc->condjmp) {
11944 /* "Condition failed" instruction codepath. */
11945 gen_set_label(dc->condlabel);
11946 gen_set_condexec(dc);
11947 gen_set_pc_im(dc, dc->pc);
11948 if (dc->ss_active) {
11949 gen_step_complete_exception(dc);
11950 } else {
11951 gen_exception_internal(EXCP_DEBUG);
11954 } else {
11955 /* While branches must always occur at the end of an IT block,
11956 there are a few other things that can cause us to terminate
11957 the TB in the middle of an IT block:
11958 - Exception generating instructions (bkpt, swi, undefined).
11959 - Page boundaries.
11960 - Hardware watchpoints.
11961 Hardware breakpoints have already been handled and skip this code.
11963 gen_set_condexec(dc);
11964 switch(dc->is_jmp) {
11965 case DISAS_NEXT:
11966 gen_goto_tb(dc, 1, dc->pc);
11967 break;
11968 case DISAS_UPDATE:
11969 gen_set_pc_im(dc, dc->pc);
11970 /* fall through */
11971 case DISAS_JUMP:
11972 default:
11973 /* indicate that the hash table must be used to find the next TB */
11974 tcg_gen_exit_tb(0);
11975 break;
11976 case DISAS_TB_JUMP:
11977 /* nothing more to generate */
11978 break;
11979 case DISAS_WFI:
11980 gen_helper_wfi(cpu_env);
11981 /* The helper doesn't necessarily throw an exception, but we
11982 * must go back to the main loop to check for interrupts anyway.
11984 tcg_gen_exit_tb(0);
11985 break;
11986 case DISAS_WFE:
11987 gen_helper_wfe(cpu_env);
11988 break;
11989 case DISAS_YIELD:
11990 gen_helper_yield(cpu_env);
11991 break;
11992 case DISAS_SWI:
11993 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11994 default_exception_el(dc));
11995 break;
11996 case DISAS_HVC:
11997 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11998 break;
11999 case DISAS_SMC:
12000 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12001 break;
12003 if (dc->condjmp) {
12004 gen_set_label(dc->condlabel);
12005 gen_set_condexec(dc);
12006 gen_goto_tb(dc, 1, dc->pc);
12007 dc->condjmp = 0;
12011 done_generating:
12012 gen_tb_end(tb, num_insns);
12014 #ifdef DEBUG_DISAS
12015 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
12016 qemu_log_in_addr_range(pc_start)) {
12017 qemu_log("----------------\n");
12018 qemu_log("IN: %s\n", lookup_symbol(pc_start));
12019 log_target_disas(cs, pc_start, dc->pc - pc_start,
12020 dc->thumb | (dc->sctlr_b << 1));
12021 qemu_log("\n");
12023 #endif
12024 tb->size = dc->pc - pc_start;
12025 tb->icount = num_insns;
12028 static const char *cpu_mode_names[16] = {
12029 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12030 "???", "???", "hyp", "und", "???", "???", "???", "sys"
12033 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12034 int flags)
12036 ARMCPU *cpu = ARM_CPU(cs);
12037 CPUARMState *env = &cpu->env;
12038 int i;
12039 uint32_t psr;
12040 const char *ns_status;
12042 if (is_a64(env)) {
12043 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12044 return;
12047 for(i=0;i<16;i++) {
12048 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
12049 if ((i % 4) == 3)
12050 cpu_fprintf(f, "\n");
12051 else
12052 cpu_fprintf(f, " ");
12054 psr = cpsr_read(env);
12056 if (arm_feature(env, ARM_FEATURE_EL3) &&
12057 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12058 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12059 } else {
12060 ns_status = "";
12063 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12064 psr,
12065 psr & (1 << 31) ? 'N' : '-',
12066 psr & (1 << 30) ? 'Z' : '-',
12067 psr & (1 << 29) ? 'C' : '-',
12068 psr & (1 << 28) ? 'V' : '-',
12069 psr & CPSR_T ? 'T' : 'A',
12070 ns_status,
12071 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12073 if (flags & CPU_DUMP_FPU) {
12074 int numvfpregs = 0;
12075 if (arm_feature(env, ARM_FEATURE_VFP)) {
12076 numvfpregs += 16;
12078 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12079 numvfpregs += 16;
12081 for (i = 0; i < numvfpregs; i++) {
12082 uint64_t v = float64_val(env->vfp.regs[i]);
12083 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12084 i * 2, (uint32_t)v,
12085 i * 2 + 1, (uint32_t)(v >> 32),
12086 i, v);
12088 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
12092 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12093 target_ulong *data)
12095 if (is_a64(env)) {
12096 env->pc = data[0];
12097 env->condexec_bits = 0;
12098 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
12099 } else {
12100 env->regs[15] = data[0];
12101 env->condexec_bits = data[1];
12102 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;