hostmem: Register TYPE_MEMORY_BACKEND properties as class properties
[qemu/kevin.git] / target-arm / translate.c
blob8df24bf35a886341669f3e7b0099aa411c0cbdc1
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg-op.h"
28 #include "qemu/log.h"
29 #include "qemu/bitops.h"
30 #include "arm_ldst.h"
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
36 #include "exec/log.h"
39 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
40 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
41 /* currently all emulated v5 cores are also v5TE, so don't bother */
42 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
43 #define ENABLE_ARCH_5J 0
44 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
45 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
46 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
47 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
48 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
50 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
52 #include "translate.h"
54 #if defined(CONFIG_USER_ONLY)
55 #define IS_USER(s) 1
56 #else
57 #define IS_USER(s) (s->user)
58 #endif
60 TCGv_env cpu_env;
61 /* We reuse the same 64-bit temporaries for efficiency. */
62 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
63 static TCGv_i32 cpu_R[16];
64 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
65 TCGv_i64 cpu_exclusive_addr;
66 TCGv_i64 cpu_exclusive_val;
67 #ifdef CONFIG_USER_ONLY
68 TCGv_i64 cpu_exclusive_test;
69 TCGv_i32 cpu_exclusive_info;
70 #endif
72 /* FIXME: These should be removed. */
73 static TCGv_i32 cpu_F0s, cpu_F1s;
74 static TCGv_i64 cpu_F0d, cpu_F1d;
76 #include "exec/gen-icount.h"
78 static const char *regnames[] =
79 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
80 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
82 /* initialize TCG globals. */
83 void arm_translate_init(void)
85 int i;
87 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
88 tcg_ctx.tcg_env = cpu_env;
90 for (i = 0; i < 16; i++) {
91 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
92 offsetof(CPUARMState, regs[i]),
93 regnames[i]);
95 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
96 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
97 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
98 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
100 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
101 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
102 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
103 offsetof(CPUARMState, exclusive_val), "exclusive_val");
104 #ifdef CONFIG_USER_ONLY
105 cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
106 offsetof(CPUARMState, exclusive_test), "exclusive_test");
107 cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
108 offsetof(CPUARMState, exclusive_info), "exclusive_info");
109 #endif
111 a64_translate_init();
114 static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
116 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
117 * insns:
118 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
119 * otherwise, access as if at PL0.
121 switch (s->mmu_idx) {
122 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
123 case ARMMMUIdx_S12NSE0:
124 case ARMMMUIdx_S12NSE1:
125 return ARMMMUIdx_S12NSE0;
126 case ARMMMUIdx_S1E3:
127 case ARMMMUIdx_S1SE0:
128 case ARMMMUIdx_S1SE1:
129 return ARMMMUIdx_S1SE0;
130 case ARMMMUIdx_S2NS:
131 default:
132 g_assert_not_reached();
136 static inline TCGv_i32 load_cpu_offset(int offset)
138 TCGv_i32 tmp = tcg_temp_new_i32();
139 tcg_gen_ld_i32(tmp, cpu_env, offset);
140 return tmp;
143 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
145 static inline void store_cpu_offset(TCGv_i32 var, int offset)
147 tcg_gen_st_i32(var, cpu_env, offset);
148 tcg_temp_free_i32(var);
151 #define store_cpu_field(var, name) \
152 store_cpu_offset(var, offsetof(CPUARMState, name))
154 /* Set a variable to the value of a CPU register. */
155 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
157 if (reg == 15) {
158 uint32_t addr;
159 /* normally, since we updated PC, we need only to add one insn */
160 if (s->thumb)
161 addr = (long)s->pc + 2;
162 else
163 addr = (long)s->pc + 4;
164 tcg_gen_movi_i32(var, addr);
165 } else {
166 tcg_gen_mov_i32(var, cpu_R[reg]);
170 /* Create a new temporary and set it to the value of a CPU register. */
171 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
173 TCGv_i32 tmp = tcg_temp_new_i32();
174 load_reg_var(s, tmp, reg);
175 return tmp;
178 /* Set a CPU register. The source must be a temporary and will be
179 marked as dead. */
180 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
182 if (reg == 15) {
183 /* In Thumb mode, we must ignore bit 0.
184 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
185 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
186 * We choose to ignore [1:0] in ARM mode for all architecture versions.
188 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
189 s->is_jmp = DISAS_JUMP;
191 tcg_gen_mov_i32(cpu_R[reg], var);
192 tcg_temp_free_i32(var);
195 /* Value extensions. */
196 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
198 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
205 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
207 TCGv_i32 tmp_mask = tcg_const_i32(mask);
208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
209 tcg_temp_free_i32(tmp_mask);
211 /* Set NZCV flags from the high 4 bits of var. */
212 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214 static void gen_exception_internal(int excp)
216 TCGv_i32 tcg_excp = tcg_const_i32(excp);
218 assert(excp_is_internal(excp));
219 gen_helper_exception_internal(cpu_env, tcg_excp);
220 tcg_temp_free_i32(tcg_excp);
223 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
225 TCGv_i32 tcg_excp = tcg_const_i32(excp);
226 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
227 TCGv_i32 tcg_el = tcg_const_i32(target_el);
229 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
230 tcg_syn, tcg_el);
232 tcg_temp_free_i32(tcg_el);
233 tcg_temp_free_i32(tcg_syn);
234 tcg_temp_free_i32(tcg_excp);
237 static void gen_ss_advance(DisasContext *s)
239 /* If the singlestep state is Active-not-pending, advance to
240 * Active-pending.
242 if (s->ss_active) {
243 s->pstate_ss = 0;
244 gen_helper_clear_pstate_ss(cpu_env);
248 static void gen_step_complete_exception(DisasContext *s)
250 /* We just completed step of an insn. Move from Active-not-pending
251 * to Active-pending, and then also take the swstep exception.
252 * This corresponds to making the (IMPDEF) choice to prioritize
253 * swstep exceptions over asynchronous exceptions taken to an exception
254 * level where debug is disabled. This choice has the advantage that
255 * we do not need to maintain internal state corresponding to the
256 * ISV/EX syndrome bits between completion of the step and generation
257 * of the exception, and our syndrome information is always correct.
259 gen_ss_advance(s);
260 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
261 default_exception_el(s));
262 s->is_jmp = DISAS_EXC;
265 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
267 TCGv_i32 tmp1 = tcg_temp_new_i32();
268 TCGv_i32 tmp2 = tcg_temp_new_i32();
269 tcg_gen_ext16s_i32(tmp1, a);
270 tcg_gen_ext16s_i32(tmp2, b);
271 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
272 tcg_temp_free_i32(tmp2);
273 tcg_gen_sari_i32(a, a, 16);
274 tcg_gen_sari_i32(b, b, 16);
275 tcg_gen_mul_i32(b, b, a);
276 tcg_gen_mov_i32(a, tmp1);
277 tcg_temp_free_i32(tmp1);
280 /* Byteswap each halfword. */
281 static void gen_rev16(TCGv_i32 var)
283 TCGv_i32 tmp = tcg_temp_new_i32();
284 tcg_gen_shri_i32(tmp, var, 8);
285 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
286 tcg_gen_shli_i32(var, var, 8);
287 tcg_gen_andi_i32(var, var, 0xff00ff00);
288 tcg_gen_or_i32(var, var, tmp);
289 tcg_temp_free_i32(tmp);
292 /* Byteswap low halfword and sign extend. */
293 static void gen_revsh(TCGv_i32 var)
295 tcg_gen_ext16u_i32(var, var);
296 tcg_gen_bswap16_i32(var, var);
297 tcg_gen_ext16s_i32(var, var);
300 /* Unsigned bitfield extract. */
301 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
303 if (shift)
304 tcg_gen_shri_i32(var, var, shift);
305 tcg_gen_andi_i32(var, var, mask);
308 /* Signed bitfield extract. */
309 static void gen_sbfx(TCGv_i32 var, int shift, int width)
311 uint32_t signbit;
313 if (shift)
314 tcg_gen_sari_i32(var, var, shift);
315 if (shift + width < 32) {
316 signbit = 1u << (width - 1);
317 tcg_gen_andi_i32(var, var, (1u << width) - 1);
318 tcg_gen_xori_i32(var, var, signbit);
319 tcg_gen_subi_i32(var, var, signbit);
323 /* Return (b << 32) + a. Mark inputs as dead */
324 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
326 TCGv_i64 tmp64 = tcg_temp_new_i64();
328 tcg_gen_extu_i32_i64(tmp64, b);
329 tcg_temp_free_i32(b);
330 tcg_gen_shli_i64(tmp64, tmp64, 32);
331 tcg_gen_add_i64(a, tmp64, a);
333 tcg_temp_free_i64(tmp64);
334 return a;
337 /* Return (b << 32) - a. Mark inputs as dead. */
338 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
340 TCGv_i64 tmp64 = tcg_temp_new_i64();
342 tcg_gen_extu_i32_i64(tmp64, b);
343 tcg_temp_free_i32(b);
344 tcg_gen_shli_i64(tmp64, tmp64, 32);
345 tcg_gen_sub_i64(a, tmp64, a);
347 tcg_temp_free_i64(tmp64);
348 return a;
351 /* 32x32->64 multiply. Marks inputs as dead. */
352 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
354 TCGv_i32 lo = tcg_temp_new_i32();
355 TCGv_i32 hi = tcg_temp_new_i32();
356 TCGv_i64 ret;
358 tcg_gen_mulu2_i32(lo, hi, a, b);
359 tcg_temp_free_i32(a);
360 tcg_temp_free_i32(b);
362 ret = tcg_temp_new_i64();
363 tcg_gen_concat_i32_i64(ret, lo, hi);
364 tcg_temp_free_i32(lo);
365 tcg_temp_free_i32(hi);
367 return ret;
370 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
372 TCGv_i32 lo = tcg_temp_new_i32();
373 TCGv_i32 hi = tcg_temp_new_i32();
374 TCGv_i64 ret;
376 tcg_gen_muls2_i32(lo, hi, a, b);
377 tcg_temp_free_i32(a);
378 tcg_temp_free_i32(b);
380 ret = tcg_temp_new_i64();
381 tcg_gen_concat_i32_i64(ret, lo, hi);
382 tcg_temp_free_i32(lo);
383 tcg_temp_free_i32(hi);
385 return ret;
388 /* Swap low and high halfwords. */
389 static void gen_swap_half(TCGv_i32 var)
391 TCGv_i32 tmp = tcg_temp_new_i32();
392 tcg_gen_shri_i32(tmp, var, 16);
393 tcg_gen_shli_i32(var, var, 16);
394 tcg_gen_or_i32(var, var, tmp);
395 tcg_temp_free_i32(tmp);
398 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
399 tmp = (t0 ^ t1) & 0x8000;
400 t0 &= ~0x8000;
401 t1 &= ~0x8000;
402 t0 = (t0 + t1) ^ tmp;
405 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
407 TCGv_i32 tmp = tcg_temp_new_i32();
408 tcg_gen_xor_i32(tmp, t0, t1);
409 tcg_gen_andi_i32(tmp, tmp, 0x8000);
410 tcg_gen_andi_i32(t0, t0, ~0x8000);
411 tcg_gen_andi_i32(t1, t1, ~0x8000);
412 tcg_gen_add_i32(t0, t0, t1);
413 tcg_gen_xor_i32(t0, t0, tmp);
414 tcg_temp_free_i32(tmp);
415 tcg_temp_free_i32(t1);
418 /* Set CF to the top bit of var. */
419 static void gen_set_CF_bit31(TCGv_i32 var)
421 tcg_gen_shri_i32(cpu_CF, var, 31);
424 /* Set N and Z flags from var. */
425 static inline void gen_logic_CC(TCGv_i32 var)
427 tcg_gen_mov_i32(cpu_NF, var);
428 tcg_gen_mov_i32(cpu_ZF, var);
431 /* T0 += T1 + CF. */
432 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
434 tcg_gen_add_i32(t0, t0, t1);
435 tcg_gen_add_i32(t0, t0, cpu_CF);
438 /* dest = T0 + T1 + CF. */
439 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
441 tcg_gen_add_i32(dest, t0, t1);
442 tcg_gen_add_i32(dest, dest, cpu_CF);
445 /* dest = T0 - T1 + CF - 1. */
446 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
448 tcg_gen_sub_i32(dest, t0, t1);
449 tcg_gen_add_i32(dest, dest, cpu_CF);
450 tcg_gen_subi_i32(dest, dest, 1);
453 /* dest = T0 + T1. Compute C, N, V and Z flags */
454 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
456 TCGv_i32 tmp = tcg_temp_new_i32();
457 tcg_gen_movi_i32(tmp, 0);
458 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
459 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
460 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
461 tcg_gen_xor_i32(tmp, t0, t1);
462 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
463 tcg_temp_free_i32(tmp);
464 tcg_gen_mov_i32(dest, cpu_NF);
467 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
468 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
470 TCGv_i32 tmp = tcg_temp_new_i32();
471 if (TCG_TARGET_HAS_add2_i32) {
472 tcg_gen_movi_i32(tmp, 0);
473 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
474 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
475 } else {
476 TCGv_i64 q0 = tcg_temp_new_i64();
477 TCGv_i64 q1 = tcg_temp_new_i64();
478 tcg_gen_extu_i32_i64(q0, t0);
479 tcg_gen_extu_i32_i64(q1, t1);
480 tcg_gen_add_i64(q0, q0, q1);
481 tcg_gen_extu_i32_i64(q1, cpu_CF);
482 tcg_gen_add_i64(q0, q0, q1);
483 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
484 tcg_temp_free_i64(q0);
485 tcg_temp_free_i64(q1);
487 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
488 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
489 tcg_gen_xor_i32(tmp, t0, t1);
490 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
491 tcg_temp_free_i32(tmp);
492 tcg_gen_mov_i32(dest, cpu_NF);
495 /* dest = T0 - T1. Compute C, N, V and Z flags */
496 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
498 TCGv_i32 tmp;
499 tcg_gen_sub_i32(cpu_NF, t0, t1);
500 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
501 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
502 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
503 tmp = tcg_temp_new_i32();
504 tcg_gen_xor_i32(tmp, t0, t1);
505 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
506 tcg_temp_free_i32(tmp);
507 tcg_gen_mov_i32(dest, cpu_NF);
510 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
511 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
513 TCGv_i32 tmp = tcg_temp_new_i32();
514 tcg_gen_not_i32(tmp, t1);
515 gen_adc_CC(dest, t0, tmp);
516 tcg_temp_free_i32(tmp);
519 #define GEN_SHIFT(name) \
520 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
522 TCGv_i32 tmp1, tmp2, tmp3; \
523 tmp1 = tcg_temp_new_i32(); \
524 tcg_gen_andi_i32(tmp1, t1, 0xff); \
525 tmp2 = tcg_const_i32(0); \
526 tmp3 = tcg_const_i32(0x1f); \
527 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
528 tcg_temp_free_i32(tmp3); \
529 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
530 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
531 tcg_temp_free_i32(tmp2); \
532 tcg_temp_free_i32(tmp1); \
534 GEN_SHIFT(shl)
535 GEN_SHIFT(shr)
536 #undef GEN_SHIFT
538 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
540 TCGv_i32 tmp1, tmp2;
541 tmp1 = tcg_temp_new_i32();
542 tcg_gen_andi_i32(tmp1, t1, 0xff);
543 tmp2 = tcg_const_i32(0x1f);
544 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
545 tcg_temp_free_i32(tmp2);
546 tcg_gen_sar_i32(dest, t0, tmp1);
547 tcg_temp_free_i32(tmp1);
550 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
552 TCGv_i32 c0 = tcg_const_i32(0);
553 TCGv_i32 tmp = tcg_temp_new_i32();
554 tcg_gen_neg_i32(tmp, src);
555 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
556 tcg_temp_free_i32(c0);
557 tcg_temp_free_i32(tmp);
560 static void shifter_out_im(TCGv_i32 var, int shift)
562 if (shift == 0) {
563 tcg_gen_andi_i32(cpu_CF, var, 1);
564 } else {
565 tcg_gen_shri_i32(cpu_CF, var, shift);
566 if (shift != 31) {
567 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
572 /* Shift by immediate. Includes special handling for shift == 0. */
573 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
574 int shift, int flags)
576 switch (shiftop) {
577 case 0: /* LSL */
578 if (shift != 0) {
579 if (flags)
580 shifter_out_im(var, 32 - shift);
581 tcg_gen_shli_i32(var, var, shift);
583 break;
584 case 1: /* LSR */
585 if (shift == 0) {
586 if (flags) {
587 tcg_gen_shri_i32(cpu_CF, var, 31);
589 tcg_gen_movi_i32(var, 0);
590 } else {
591 if (flags)
592 shifter_out_im(var, shift - 1);
593 tcg_gen_shri_i32(var, var, shift);
595 break;
596 case 2: /* ASR */
597 if (shift == 0)
598 shift = 32;
599 if (flags)
600 shifter_out_im(var, shift - 1);
601 if (shift == 32)
602 shift = 31;
603 tcg_gen_sari_i32(var, var, shift);
604 break;
605 case 3: /* ROR/RRX */
606 if (shift != 0) {
607 if (flags)
608 shifter_out_im(var, shift - 1);
609 tcg_gen_rotri_i32(var, var, shift); break;
610 } else {
611 TCGv_i32 tmp = tcg_temp_new_i32();
612 tcg_gen_shli_i32(tmp, cpu_CF, 31);
613 if (flags)
614 shifter_out_im(var, 0);
615 tcg_gen_shri_i32(var, var, 1);
616 tcg_gen_or_i32(var, var, tmp);
617 tcg_temp_free_i32(tmp);
622 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
623 TCGv_i32 shift, int flags)
625 if (flags) {
626 switch (shiftop) {
627 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
628 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
629 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
630 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
632 } else {
633 switch (shiftop) {
634 case 0:
635 gen_shl(var, var, shift);
636 break;
637 case 1:
638 gen_shr(var, var, shift);
639 break;
640 case 2:
641 gen_sar(var, var, shift);
642 break;
643 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
644 tcg_gen_rotr_i32(var, var, shift); break;
647 tcg_temp_free_i32(shift);
650 #define PAS_OP(pfx) \
651 switch (op2) { \
652 case 0: gen_pas_helper(glue(pfx,add16)); break; \
653 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
654 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
655 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
656 case 4: gen_pas_helper(glue(pfx,add8)); break; \
657 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
659 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
661 TCGv_ptr tmp;
663 switch (op1) {
664 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
665 case 1:
666 tmp = tcg_temp_new_ptr();
667 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
668 PAS_OP(s)
669 tcg_temp_free_ptr(tmp);
670 break;
671 case 5:
672 tmp = tcg_temp_new_ptr();
673 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
674 PAS_OP(u)
675 tcg_temp_free_ptr(tmp);
676 break;
677 #undef gen_pas_helper
678 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
679 case 2:
680 PAS_OP(q);
681 break;
682 case 3:
683 PAS_OP(sh);
684 break;
685 case 6:
686 PAS_OP(uq);
687 break;
688 case 7:
689 PAS_OP(uh);
690 break;
691 #undef gen_pas_helper
694 #undef PAS_OP
696 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
697 #define PAS_OP(pfx) \
698 switch (op1) { \
699 case 0: gen_pas_helper(glue(pfx,add8)); break; \
700 case 1: gen_pas_helper(glue(pfx,add16)); break; \
701 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
702 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
703 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
704 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
706 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
708 TCGv_ptr tmp;
710 switch (op2) {
711 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
712 case 0:
713 tmp = tcg_temp_new_ptr();
714 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
715 PAS_OP(s)
716 tcg_temp_free_ptr(tmp);
717 break;
718 case 4:
719 tmp = tcg_temp_new_ptr();
720 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
721 PAS_OP(u)
722 tcg_temp_free_ptr(tmp);
723 break;
724 #undef gen_pas_helper
725 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
726 case 1:
727 PAS_OP(q);
728 break;
729 case 2:
730 PAS_OP(sh);
731 break;
732 case 5:
733 PAS_OP(uq);
734 break;
735 case 6:
736 PAS_OP(uh);
737 break;
738 #undef gen_pas_helper
741 #undef PAS_OP
744 * Generate a conditional based on ARM condition code cc.
745 * This is common between ARM and Aarch64 targets.
747 void arm_test_cc(DisasCompare *cmp, int cc)
749 TCGv_i32 value;
750 TCGCond cond;
751 bool global = true;
753 switch (cc) {
754 case 0: /* eq: Z */
755 case 1: /* ne: !Z */
756 cond = TCG_COND_EQ;
757 value = cpu_ZF;
758 break;
760 case 2: /* cs: C */
761 case 3: /* cc: !C */
762 cond = TCG_COND_NE;
763 value = cpu_CF;
764 break;
766 case 4: /* mi: N */
767 case 5: /* pl: !N */
768 cond = TCG_COND_LT;
769 value = cpu_NF;
770 break;
772 case 6: /* vs: V */
773 case 7: /* vc: !V */
774 cond = TCG_COND_LT;
775 value = cpu_VF;
776 break;
778 case 8: /* hi: C && !Z */
779 case 9: /* ls: !C || Z -> !(C && !Z) */
780 cond = TCG_COND_NE;
781 value = tcg_temp_new_i32();
782 global = false;
783 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
784 ZF is non-zero for !Z; so AND the two subexpressions. */
785 tcg_gen_neg_i32(value, cpu_CF);
786 tcg_gen_and_i32(value, value, cpu_ZF);
787 break;
789 case 10: /* ge: N == V -> N ^ V == 0 */
790 case 11: /* lt: N != V -> N ^ V != 0 */
791 /* Since we're only interested in the sign bit, == 0 is >= 0. */
792 cond = TCG_COND_GE;
793 value = tcg_temp_new_i32();
794 global = false;
795 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
796 break;
798 case 12: /* gt: !Z && N == V */
799 case 13: /* le: Z || N != V */
800 cond = TCG_COND_NE;
801 value = tcg_temp_new_i32();
802 global = false;
803 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
804 * the sign bit then AND with ZF to yield the result. */
805 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
806 tcg_gen_sari_i32(value, value, 31);
807 tcg_gen_andc_i32(value, cpu_ZF, value);
808 break;
810 case 14: /* always */
811 case 15: /* always */
812 /* Use the ALWAYS condition, which will fold early.
813 * It doesn't matter what we use for the value. */
814 cond = TCG_COND_ALWAYS;
815 value = cpu_ZF;
816 goto no_invert;
818 default:
819 fprintf(stderr, "Bad condition code 0x%x\n", cc);
820 abort();
823 if (cc & 1) {
824 cond = tcg_invert_cond(cond);
827 no_invert:
828 cmp->cond = cond;
829 cmp->value = value;
830 cmp->value_global = global;
833 void arm_free_cc(DisasCompare *cmp)
835 if (!cmp->value_global) {
836 tcg_temp_free_i32(cmp->value);
840 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
842 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
845 void arm_gen_test_cc(int cc, TCGLabel *label)
847 DisasCompare cmp;
848 arm_test_cc(&cmp, cc);
849 arm_jump_cc(&cmp, label);
850 arm_free_cc(&cmp);
853 static const uint8_t table_logic_cc[16] = {
854 1, /* and */
855 1, /* xor */
856 0, /* sub */
857 0, /* rsb */
858 0, /* add */
859 0, /* adc */
860 0, /* sbc */
861 0, /* rsc */
862 1, /* andl */
863 1, /* xorl */
864 0, /* cmp */
865 0, /* cmn */
866 1, /* orr */
867 1, /* mov */
868 1, /* bic */
869 1, /* mvn */
872 /* Set PC and Thumb state from an immediate address. */
873 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
875 TCGv_i32 tmp;
877 s->is_jmp = DISAS_JUMP;
878 if (s->thumb != (addr & 1)) {
879 tmp = tcg_temp_new_i32();
880 tcg_gen_movi_i32(tmp, addr & 1);
881 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
882 tcg_temp_free_i32(tmp);
884 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
887 /* Set PC and Thumb state from var. var is marked as dead. */
888 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
890 s->is_jmp = DISAS_JUMP;
891 tcg_gen_andi_i32(cpu_R[15], var, ~1);
892 tcg_gen_andi_i32(var, var, 1);
893 store_cpu_field(var, thumb);
896 /* Variant of store_reg which uses branch&exchange logic when storing
897 to r15 in ARM architecture v7 and above. The source must be a temporary
898 and will be marked as dead. */
899 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
901 if (reg == 15 && ENABLE_ARCH_7) {
902 gen_bx(s, var);
903 } else {
904 store_reg(s, reg, var);
908 /* Variant of store_reg which uses branch&exchange logic when storing
909 * to r15 in ARM architecture v5T and above. This is used for storing
910 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
911 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
912 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
914 if (reg == 15 && ENABLE_ARCH_5) {
915 gen_bx(s, var);
916 } else {
917 store_reg(s, reg, var);
921 #ifdef CONFIG_USER_ONLY
922 #define IS_USER_ONLY 1
923 #else
924 #define IS_USER_ONLY 0
925 #endif
927 /* Abstractions of "generate code to do a guest load/store for
928 * AArch32", where a vaddr is always 32 bits (and is zero
929 * extended if we're a 64 bit core) and data is also
930 * 32 bits unless specifically doing a 64 bit access.
931 * These functions work like tcg_gen_qemu_{ld,st}* except
932 * that the address argument is TCGv_i32 rather than TCGv.
934 #if TARGET_LONG_BITS == 32
936 #define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
937 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
938 TCGv_i32 addr, int index) \
940 TCGMemOp opc = (OPC) | s->be_data; \
941 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
942 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
943 TCGv addr_be = tcg_temp_new(); \
944 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
945 tcg_gen_qemu_ld_i32(val, addr_be, index, opc); \
946 tcg_temp_free(addr_be); \
947 return; \
949 tcg_gen_qemu_ld_i32(val, addr, index, opc); \
952 #define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
953 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
954 TCGv_i32 addr, int index) \
956 TCGMemOp opc = (OPC) | s->be_data; \
957 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
958 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
959 TCGv addr_be = tcg_temp_new(); \
960 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
961 tcg_gen_qemu_st_i32(val, addr_be, index, opc); \
962 tcg_temp_free(addr_be); \
963 return; \
965 tcg_gen_qemu_st_i32(val, addr, index, opc); \
968 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
969 TCGv_i32 addr, int index)
971 TCGMemOp opc = MO_Q | s->be_data;
972 tcg_gen_qemu_ld_i64(val, addr, index, opc);
973 /* Not needed for user-mode BE32, where we use MO_BE instead. */
974 if (!IS_USER_ONLY && s->sctlr_b) {
975 tcg_gen_rotri_i64(val, val, 32);
979 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
980 TCGv_i32 addr, int index)
982 TCGMemOp opc = MO_Q | s->be_data;
983 /* Not needed for user-mode BE32, where we use MO_BE instead. */
984 if (!IS_USER_ONLY && s->sctlr_b) {
985 TCGv_i64 tmp = tcg_temp_new_i64();
986 tcg_gen_rotri_i64(tmp, val, 32);
987 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
988 tcg_temp_free_i64(tmp);
989 return;
991 tcg_gen_qemu_st_i64(val, addr, index, opc);
994 #else
996 #define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
997 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
998 TCGv_i32 addr, int index) \
1000 TCGMemOp opc = (OPC) | s->be_data; \
1001 TCGv addr64 = tcg_temp_new(); \
1002 tcg_gen_extu_i32_i64(addr64, addr); \
1003 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1004 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1005 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1007 tcg_gen_qemu_ld_i32(val, addr64, index, opc); \
1008 tcg_temp_free(addr64); \
1011 #define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
1012 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1013 TCGv_i32 addr, int index) \
1015 TCGMemOp opc = (OPC) | s->be_data; \
1016 TCGv addr64 = tcg_temp_new(); \
1017 tcg_gen_extu_i32_i64(addr64, addr); \
1018 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1019 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1020 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1022 tcg_gen_qemu_st_i32(val, addr64, index, opc); \
1023 tcg_temp_free(addr64); \
1026 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1027 TCGv_i32 addr, int index)
1029 TCGMemOp opc = MO_Q | s->be_data;
1030 TCGv addr64 = tcg_temp_new();
1031 tcg_gen_extu_i32_i64(addr64, addr);
1032 tcg_gen_qemu_ld_i64(val, addr64, index, opc);
1034 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1035 if (!IS_USER_ONLY && s->sctlr_b) {
1036 tcg_gen_rotri_i64(val, val, 32);
1038 tcg_temp_free(addr64);
1041 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1042 TCGv_i32 addr, int index)
1044 TCGMemOp opc = MO_Q | s->be_data;
1045 TCGv addr64 = tcg_temp_new();
1046 tcg_gen_extu_i32_i64(addr64, addr);
1048 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1049 if (!IS_USER_ONLY && s->sctlr_b) {
1050 TCGv tmp = tcg_temp_new();
1051 tcg_gen_rotri_i64(tmp, val, 32);
1052 tcg_gen_qemu_st_i64(tmp, addr64, index, opc);
1053 tcg_temp_free(tmp);
1054 } else {
1055 tcg_gen_qemu_st_i64(val, addr64, index, opc);
1057 tcg_temp_free(addr64);
1060 #endif
1062 DO_GEN_LD(8s, MO_SB, 3)
1063 DO_GEN_LD(8u, MO_UB, 3)
1064 DO_GEN_LD(16s, MO_SW, 2)
1065 DO_GEN_LD(16u, MO_UW, 2)
1066 DO_GEN_LD(32u, MO_UL, 0)
1067 /* 'a' variants include an alignment check */
1068 DO_GEN_LD(16ua, MO_UW | MO_ALIGN, 2)
1069 DO_GEN_LD(32ua, MO_UL | MO_ALIGN, 0)
1070 DO_GEN_ST(8, MO_UB, 3)
1071 DO_GEN_ST(16, MO_UW, 2)
1072 DO_GEN_ST(32, MO_UL, 0)
1074 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
1076 tcg_gen_movi_i32(cpu_R[15], val);
1079 static inline void gen_hvc(DisasContext *s, int imm16)
1081 /* The pre HVC helper handles cases when HVC gets trapped
1082 * as an undefined insn by runtime configuration (ie before
1083 * the insn really executes).
1085 gen_set_pc_im(s, s->pc - 4);
1086 gen_helper_pre_hvc(cpu_env);
1087 /* Otherwise we will treat this as a real exception which
1088 * happens after execution of the insn. (The distinction matters
1089 * for the PC value reported to the exception handler and also
1090 * for single stepping.)
1092 s->svc_imm = imm16;
1093 gen_set_pc_im(s, s->pc);
1094 s->is_jmp = DISAS_HVC;
1097 static inline void gen_smc(DisasContext *s)
1099 /* As with HVC, we may take an exception either before or after
1100 * the insn executes.
1102 TCGv_i32 tmp;
1104 gen_set_pc_im(s, s->pc - 4);
1105 tmp = tcg_const_i32(syn_aa32_smc());
1106 gen_helper_pre_smc(cpu_env, tmp);
1107 tcg_temp_free_i32(tmp);
1108 gen_set_pc_im(s, s->pc);
1109 s->is_jmp = DISAS_SMC;
1112 static inline void
1113 gen_set_condexec (DisasContext *s)
1115 if (s->condexec_mask) {
1116 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1117 TCGv_i32 tmp = tcg_temp_new_i32();
1118 tcg_gen_movi_i32(tmp, val);
1119 store_cpu_field(tmp, condexec_bits);
1123 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1125 gen_set_condexec(s);
1126 gen_set_pc_im(s, s->pc - offset);
1127 gen_exception_internal(excp);
1128 s->is_jmp = DISAS_JUMP;
1131 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1132 int syn, uint32_t target_el)
1134 gen_set_condexec(s);
1135 gen_set_pc_im(s, s->pc - offset);
1136 gen_exception(excp, syn, target_el);
1137 s->is_jmp = DISAS_JUMP;
1140 /* Force a TB lookup after an instruction that changes the CPU state. */
1141 static inline void gen_lookup_tb(DisasContext *s)
1143 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1144 s->is_jmp = DISAS_JUMP;
1147 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1148 TCGv_i32 var)
1150 int val, rm, shift, shiftop;
1151 TCGv_i32 offset;
1153 if (!(insn & (1 << 25))) {
1154 /* immediate */
1155 val = insn & 0xfff;
1156 if (!(insn & (1 << 23)))
1157 val = -val;
1158 if (val != 0)
1159 tcg_gen_addi_i32(var, var, val);
1160 } else {
1161 /* shift/register */
1162 rm = (insn) & 0xf;
1163 shift = (insn >> 7) & 0x1f;
1164 shiftop = (insn >> 5) & 3;
1165 offset = load_reg(s, rm);
1166 gen_arm_shift_im(offset, shiftop, shift, 0);
1167 if (!(insn & (1 << 23)))
1168 tcg_gen_sub_i32(var, var, offset);
1169 else
1170 tcg_gen_add_i32(var, var, offset);
1171 tcg_temp_free_i32(offset);
1175 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1176 int extra, TCGv_i32 var)
1178 int val, rm;
1179 TCGv_i32 offset;
1181 if (insn & (1 << 22)) {
1182 /* immediate */
1183 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1184 if (!(insn & (1 << 23)))
1185 val = -val;
1186 val += extra;
1187 if (val != 0)
1188 tcg_gen_addi_i32(var, var, val);
1189 } else {
1190 /* register */
1191 if (extra)
1192 tcg_gen_addi_i32(var, var, extra);
1193 rm = (insn) & 0xf;
1194 offset = load_reg(s, rm);
1195 if (!(insn & (1 << 23)))
1196 tcg_gen_sub_i32(var, var, offset);
1197 else
1198 tcg_gen_add_i32(var, var, offset);
1199 tcg_temp_free_i32(offset);
1203 static TCGv_ptr get_fpstatus_ptr(int neon)
1205 TCGv_ptr statusptr = tcg_temp_new_ptr();
1206 int offset;
1207 if (neon) {
1208 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1209 } else {
1210 offset = offsetof(CPUARMState, vfp.fp_status);
1212 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1213 return statusptr;
1216 #define VFP_OP2(name) \
1217 static inline void gen_vfp_##name(int dp) \
1219 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1220 if (dp) { \
1221 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1222 } else { \
1223 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1225 tcg_temp_free_ptr(fpst); \
1228 VFP_OP2(add)
1229 VFP_OP2(sub)
1230 VFP_OP2(mul)
1231 VFP_OP2(div)
1233 #undef VFP_OP2
1235 static inline void gen_vfp_F1_mul(int dp)
1237 /* Like gen_vfp_mul() but put result in F1 */
1238 TCGv_ptr fpst = get_fpstatus_ptr(0);
1239 if (dp) {
1240 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1241 } else {
1242 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1244 tcg_temp_free_ptr(fpst);
1247 static inline void gen_vfp_F1_neg(int dp)
1249 /* Like gen_vfp_neg() but put result in F1 */
1250 if (dp) {
1251 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1252 } else {
1253 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1257 static inline void gen_vfp_abs(int dp)
1259 if (dp)
1260 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1261 else
1262 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1265 static inline void gen_vfp_neg(int dp)
1267 if (dp)
1268 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1269 else
1270 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1273 static inline void gen_vfp_sqrt(int dp)
1275 if (dp)
1276 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1277 else
1278 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1281 static inline void gen_vfp_cmp(int dp)
1283 if (dp)
1284 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1285 else
1286 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1289 static inline void gen_vfp_cmpe(int dp)
1291 if (dp)
1292 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1293 else
1294 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1297 static inline void gen_vfp_F1_ld0(int dp)
1299 if (dp)
1300 tcg_gen_movi_i64(cpu_F1d, 0);
1301 else
1302 tcg_gen_movi_i32(cpu_F1s, 0);
1305 #define VFP_GEN_ITOF(name) \
1306 static inline void gen_vfp_##name(int dp, int neon) \
1308 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1309 if (dp) { \
1310 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1311 } else { \
1312 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1314 tcg_temp_free_ptr(statusptr); \
1317 VFP_GEN_ITOF(uito)
1318 VFP_GEN_ITOF(sito)
1319 #undef VFP_GEN_ITOF
1321 #define VFP_GEN_FTOI(name) \
1322 static inline void gen_vfp_##name(int dp, int neon) \
1324 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1325 if (dp) { \
1326 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1327 } else { \
1328 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1330 tcg_temp_free_ptr(statusptr); \
1333 VFP_GEN_FTOI(toui)
1334 VFP_GEN_FTOI(touiz)
1335 VFP_GEN_FTOI(tosi)
1336 VFP_GEN_FTOI(tosiz)
1337 #undef VFP_GEN_FTOI
1339 #define VFP_GEN_FIX(name, round) \
1340 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1342 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1343 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1344 if (dp) { \
1345 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1346 statusptr); \
1347 } else { \
1348 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1349 statusptr); \
1351 tcg_temp_free_i32(tmp_shift); \
1352 tcg_temp_free_ptr(statusptr); \
1354 VFP_GEN_FIX(tosh, _round_to_zero)
1355 VFP_GEN_FIX(tosl, _round_to_zero)
1356 VFP_GEN_FIX(touh, _round_to_zero)
1357 VFP_GEN_FIX(toul, _round_to_zero)
1358 VFP_GEN_FIX(shto, )
1359 VFP_GEN_FIX(slto, )
1360 VFP_GEN_FIX(uhto, )
1361 VFP_GEN_FIX(ulto, )
1362 #undef VFP_GEN_FIX
1364 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1366 if (dp) {
1367 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
1368 } else {
1369 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
1373 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1375 if (dp) {
1376 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
1377 } else {
1378 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
1382 static inline long
1383 vfp_reg_offset (int dp, int reg)
1385 if (dp)
1386 return offsetof(CPUARMState, vfp.regs[reg]);
1387 else if (reg & 1) {
1388 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1389 + offsetof(CPU_DoubleU, l.upper);
1390 } else {
1391 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1392 + offsetof(CPU_DoubleU, l.lower);
1396 /* Return the offset of a 32-bit piece of a NEON register.
1397 zero is the least significant end of the register. */
1398 static inline long
1399 neon_reg_offset (int reg, int n)
1401 int sreg;
1402 sreg = reg * 2 + n;
1403 return vfp_reg_offset(0, sreg);
1406 static TCGv_i32 neon_load_reg(int reg, int pass)
1408 TCGv_i32 tmp = tcg_temp_new_i32();
1409 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1410 return tmp;
1413 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1415 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1416 tcg_temp_free_i32(var);
1419 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1421 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1424 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1426 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1429 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1430 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1431 #define tcg_gen_st_f32 tcg_gen_st_i32
1432 #define tcg_gen_st_f64 tcg_gen_st_i64
1434 static inline void gen_mov_F0_vreg(int dp, int reg)
1436 if (dp)
1437 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1438 else
1439 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1442 static inline void gen_mov_F1_vreg(int dp, int reg)
1444 if (dp)
1445 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1446 else
1447 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1450 static inline void gen_mov_vreg_F0(int dp, int reg)
1452 if (dp)
1453 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1454 else
1455 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1458 #define ARM_CP_RW_BIT (1 << 20)
1460 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1462 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1465 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1467 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1470 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1472 TCGv_i32 var = tcg_temp_new_i32();
1473 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1474 return var;
1477 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1479 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1480 tcg_temp_free_i32(var);
1483 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1485 iwmmxt_store_reg(cpu_M0, rn);
1488 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1490 iwmmxt_load_reg(cpu_M0, rn);
1493 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1495 iwmmxt_load_reg(cpu_V1, rn);
1496 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1499 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1501 iwmmxt_load_reg(cpu_V1, rn);
1502 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1505 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1507 iwmmxt_load_reg(cpu_V1, rn);
1508 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1511 #define IWMMXT_OP(name) \
1512 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1514 iwmmxt_load_reg(cpu_V1, rn); \
1515 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1518 #define IWMMXT_OP_ENV(name) \
1519 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1521 iwmmxt_load_reg(cpu_V1, rn); \
1522 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1525 #define IWMMXT_OP_ENV_SIZE(name) \
1526 IWMMXT_OP_ENV(name##b) \
1527 IWMMXT_OP_ENV(name##w) \
1528 IWMMXT_OP_ENV(name##l)
1530 #define IWMMXT_OP_ENV1(name) \
1531 static inline void gen_op_iwmmxt_##name##_M0(void) \
1533 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1536 IWMMXT_OP(maddsq)
1537 IWMMXT_OP(madduq)
1538 IWMMXT_OP(sadb)
1539 IWMMXT_OP(sadw)
1540 IWMMXT_OP(mulslw)
1541 IWMMXT_OP(mulshw)
1542 IWMMXT_OP(mululw)
1543 IWMMXT_OP(muluhw)
1544 IWMMXT_OP(macsw)
1545 IWMMXT_OP(macuw)
1547 IWMMXT_OP_ENV_SIZE(unpackl)
1548 IWMMXT_OP_ENV_SIZE(unpackh)
1550 IWMMXT_OP_ENV1(unpacklub)
1551 IWMMXT_OP_ENV1(unpackluw)
1552 IWMMXT_OP_ENV1(unpacklul)
1553 IWMMXT_OP_ENV1(unpackhub)
1554 IWMMXT_OP_ENV1(unpackhuw)
1555 IWMMXT_OP_ENV1(unpackhul)
1556 IWMMXT_OP_ENV1(unpacklsb)
1557 IWMMXT_OP_ENV1(unpacklsw)
1558 IWMMXT_OP_ENV1(unpacklsl)
1559 IWMMXT_OP_ENV1(unpackhsb)
1560 IWMMXT_OP_ENV1(unpackhsw)
1561 IWMMXT_OP_ENV1(unpackhsl)
1563 IWMMXT_OP_ENV_SIZE(cmpeq)
1564 IWMMXT_OP_ENV_SIZE(cmpgtu)
1565 IWMMXT_OP_ENV_SIZE(cmpgts)
1567 IWMMXT_OP_ENV_SIZE(mins)
1568 IWMMXT_OP_ENV_SIZE(minu)
1569 IWMMXT_OP_ENV_SIZE(maxs)
1570 IWMMXT_OP_ENV_SIZE(maxu)
1572 IWMMXT_OP_ENV_SIZE(subn)
1573 IWMMXT_OP_ENV_SIZE(addn)
1574 IWMMXT_OP_ENV_SIZE(subu)
1575 IWMMXT_OP_ENV_SIZE(addu)
1576 IWMMXT_OP_ENV_SIZE(subs)
1577 IWMMXT_OP_ENV_SIZE(adds)
1579 IWMMXT_OP_ENV(avgb0)
1580 IWMMXT_OP_ENV(avgb1)
1581 IWMMXT_OP_ENV(avgw0)
1582 IWMMXT_OP_ENV(avgw1)
1584 IWMMXT_OP_ENV(packuw)
1585 IWMMXT_OP_ENV(packul)
1586 IWMMXT_OP_ENV(packuq)
1587 IWMMXT_OP_ENV(packsw)
1588 IWMMXT_OP_ENV(packsl)
1589 IWMMXT_OP_ENV(packsq)
1591 static void gen_op_iwmmxt_set_mup(void)
1593 TCGv_i32 tmp;
1594 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1595 tcg_gen_ori_i32(tmp, tmp, 2);
1596 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1599 static void gen_op_iwmmxt_set_cup(void)
1601 TCGv_i32 tmp;
1602 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1603 tcg_gen_ori_i32(tmp, tmp, 1);
1604 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1607 static void gen_op_iwmmxt_setpsr_nz(void)
1609 TCGv_i32 tmp = tcg_temp_new_i32();
1610 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1611 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1614 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1616 iwmmxt_load_reg(cpu_V1, rn);
1617 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1618 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1621 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1622 TCGv_i32 dest)
1624 int rd;
1625 uint32_t offset;
1626 TCGv_i32 tmp;
1628 rd = (insn >> 16) & 0xf;
1629 tmp = load_reg(s, rd);
1631 offset = (insn & 0xff) << ((insn >> 7) & 2);
1632 if (insn & (1 << 24)) {
1633 /* Pre indexed */
1634 if (insn & (1 << 23))
1635 tcg_gen_addi_i32(tmp, tmp, offset);
1636 else
1637 tcg_gen_addi_i32(tmp, tmp, -offset);
1638 tcg_gen_mov_i32(dest, tmp);
1639 if (insn & (1 << 21))
1640 store_reg(s, rd, tmp);
1641 else
1642 tcg_temp_free_i32(tmp);
1643 } else if (insn & (1 << 21)) {
1644 /* Post indexed */
1645 tcg_gen_mov_i32(dest, tmp);
1646 if (insn & (1 << 23))
1647 tcg_gen_addi_i32(tmp, tmp, offset);
1648 else
1649 tcg_gen_addi_i32(tmp, tmp, -offset);
1650 store_reg(s, rd, tmp);
1651 } else if (!(insn & (1 << 23)))
1652 return 1;
1653 return 0;
1656 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1658 int rd = (insn >> 0) & 0xf;
1659 TCGv_i32 tmp;
1661 if (insn & (1 << 8)) {
1662 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1663 return 1;
1664 } else {
1665 tmp = iwmmxt_load_creg(rd);
1667 } else {
1668 tmp = tcg_temp_new_i32();
1669 iwmmxt_load_reg(cpu_V0, rd);
1670 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1672 tcg_gen_andi_i32(tmp, tmp, mask);
1673 tcg_gen_mov_i32(dest, tmp);
1674 tcg_temp_free_i32(tmp);
1675 return 0;
1678 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1679 (ie. an undefined instruction). */
1680 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1682 int rd, wrd;
1683 int rdhi, rdlo, rd0, rd1, i;
1684 TCGv_i32 addr;
1685 TCGv_i32 tmp, tmp2, tmp3;
1687 if ((insn & 0x0e000e00) == 0x0c000000) {
1688 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1689 wrd = insn & 0xf;
1690 rdlo = (insn >> 12) & 0xf;
1691 rdhi = (insn >> 16) & 0xf;
1692 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1693 iwmmxt_load_reg(cpu_V0, wrd);
1694 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1695 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1696 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1697 } else { /* TMCRR */
1698 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1699 iwmmxt_store_reg(cpu_V0, wrd);
1700 gen_op_iwmmxt_set_mup();
1702 return 0;
1705 wrd = (insn >> 12) & 0xf;
1706 addr = tcg_temp_new_i32();
1707 if (gen_iwmmxt_address(s, insn, addr)) {
1708 tcg_temp_free_i32(addr);
1709 return 1;
1711 if (insn & ARM_CP_RW_BIT) {
1712 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1713 tmp = tcg_temp_new_i32();
1714 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1715 iwmmxt_store_creg(wrd, tmp);
1716 } else {
1717 i = 1;
1718 if (insn & (1 << 8)) {
1719 if (insn & (1 << 22)) { /* WLDRD */
1720 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1721 i = 0;
1722 } else { /* WLDRW wRd */
1723 tmp = tcg_temp_new_i32();
1724 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1726 } else {
1727 tmp = tcg_temp_new_i32();
1728 if (insn & (1 << 22)) { /* WLDRH */
1729 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1730 } else { /* WLDRB */
1731 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1734 if (i) {
1735 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1736 tcg_temp_free_i32(tmp);
1738 gen_op_iwmmxt_movq_wRn_M0(wrd);
1740 } else {
1741 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1742 tmp = iwmmxt_load_creg(wrd);
1743 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1744 } else {
1745 gen_op_iwmmxt_movq_M0_wRn(wrd);
1746 tmp = tcg_temp_new_i32();
1747 if (insn & (1 << 8)) {
1748 if (insn & (1 << 22)) { /* WSTRD */
1749 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1750 } else { /* WSTRW wRd */
1751 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1752 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1754 } else {
1755 if (insn & (1 << 22)) { /* WSTRH */
1756 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1757 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1758 } else { /* WSTRB */
1759 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1760 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1764 tcg_temp_free_i32(tmp);
1766 tcg_temp_free_i32(addr);
1767 return 0;
1770 if ((insn & 0x0f000000) != 0x0e000000)
1771 return 1;
1773 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1774 case 0x000: /* WOR */
1775 wrd = (insn >> 12) & 0xf;
1776 rd0 = (insn >> 0) & 0xf;
1777 rd1 = (insn >> 16) & 0xf;
1778 gen_op_iwmmxt_movq_M0_wRn(rd0);
1779 gen_op_iwmmxt_orq_M0_wRn(rd1);
1780 gen_op_iwmmxt_setpsr_nz();
1781 gen_op_iwmmxt_movq_wRn_M0(wrd);
1782 gen_op_iwmmxt_set_mup();
1783 gen_op_iwmmxt_set_cup();
1784 break;
1785 case 0x011: /* TMCR */
1786 if (insn & 0xf)
1787 return 1;
1788 rd = (insn >> 12) & 0xf;
1789 wrd = (insn >> 16) & 0xf;
1790 switch (wrd) {
1791 case ARM_IWMMXT_wCID:
1792 case ARM_IWMMXT_wCASF:
1793 break;
1794 case ARM_IWMMXT_wCon:
1795 gen_op_iwmmxt_set_cup();
1796 /* Fall through. */
1797 case ARM_IWMMXT_wCSSF:
1798 tmp = iwmmxt_load_creg(wrd);
1799 tmp2 = load_reg(s, rd);
1800 tcg_gen_andc_i32(tmp, tmp, tmp2);
1801 tcg_temp_free_i32(tmp2);
1802 iwmmxt_store_creg(wrd, tmp);
1803 break;
1804 case ARM_IWMMXT_wCGR0:
1805 case ARM_IWMMXT_wCGR1:
1806 case ARM_IWMMXT_wCGR2:
1807 case ARM_IWMMXT_wCGR3:
1808 gen_op_iwmmxt_set_cup();
1809 tmp = load_reg(s, rd);
1810 iwmmxt_store_creg(wrd, tmp);
1811 break;
1812 default:
1813 return 1;
1815 break;
1816 case 0x100: /* WXOR */
1817 wrd = (insn >> 12) & 0xf;
1818 rd0 = (insn >> 0) & 0xf;
1819 rd1 = (insn >> 16) & 0xf;
1820 gen_op_iwmmxt_movq_M0_wRn(rd0);
1821 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1822 gen_op_iwmmxt_setpsr_nz();
1823 gen_op_iwmmxt_movq_wRn_M0(wrd);
1824 gen_op_iwmmxt_set_mup();
1825 gen_op_iwmmxt_set_cup();
1826 break;
1827 case 0x111: /* TMRC */
1828 if (insn & 0xf)
1829 return 1;
1830 rd = (insn >> 12) & 0xf;
1831 wrd = (insn >> 16) & 0xf;
1832 tmp = iwmmxt_load_creg(wrd);
1833 store_reg(s, rd, tmp);
1834 break;
1835 case 0x300: /* WANDN */
1836 wrd = (insn >> 12) & 0xf;
1837 rd0 = (insn >> 0) & 0xf;
1838 rd1 = (insn >> 16) & 0xf;
1839 gen_op_iwmmxt_movq_M0_wRn(rd0);
1840 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1841 gen_op_iwmmxt_andq_M0_wRn(rd1);
1842 gen_op_iwmmxt_setpsr_nz();
1843 gen_op_iwmmxt_movq_wRn_M0(wrd);
1844 gen_op_iwmmxt_set_mup();
1845 gen_op_iwmmxt_set_cup();
1846 break;
1847 case 0x200: /* WAND */
1848 wrd = (insn >> 12) & 0xf;
1849 rd0 = (insn >> 0) & 0xf;
1850 rd1 = (insn >> 16) & 0xf;
1851 gen_op_iwmmxt_movq_M0_wRn(rd0);
1852 gen_op_iwmmxt_andq_M0_wRn(rd1);
1853 gen_op_iwmmxt_setpsr_nz();
1854 gen_op_iwmmxt_movq_wRn_M0(wrd);
1855 gen_op_iwmmxt_set_mup();
1856 gen_op_iwmmxt_set_cup();
1857 break;
1858 case 0x810: case 0xa10: /* WMADD */
1859 wrd = (insn >> 12) & 0xf;
1860 rd0 = (insn >> 0) & 0xf;
1861 rd1 = (insn >> 16) & 0xf;
1862 gen_op_iwmmxt_movq_M0_wRn(rd0);
1863 if (insn & (1 << 21))
1864 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1865 else
1866 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1867 gen_op_iwmmxt_movq_wRn_M0(wrd);
1868 gen_op_iwmmxt_set_mup();
1869 break;
1870 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1871 wrd = (insn >> 12) & 0xf;
1872 rd0 = (insn >> 16) & 0xf;
1873 rd1 = (insn >> 0) & 0xf;
1874 gen_op_iwmmxt_movq_M0_wRn(rd0);
1875 switch ((insn >> 22) & 3) {
1876 case 0:
1877 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1878 break;
1879 case 1:
1880 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1881 break;
1882 case 2:
1883 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1884 break;
1885 case 3:
1886 return 1;
1888 gen_op_iwmmxt_movq_wRn_M0(wrd);
1889 gen_op_iwmmxt_set_mup();
1890 gen_op_iwmmxt_set_cup();
1891 break;
1892 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1893 wrd = (insn >> 12) & 0xf;
1894 rd0 = (insn >> 16) & 0xf;
1895 rd1 = (insn >> 0) & 0xf;
1896 gen_op_iwmmxt_movq_M0_wRn(rd0);
1897 switch ((insn >> 22) & 3) {
1898 case 0:
1899 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1900 break;
1901 case 1:
1902 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1903 break;
1904 case 2:
1905 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1906 break;
1907 case 3:
1908 return 1;
1910 gen_op_iwmmxt_movq_wRn_M0(wrd);
1911 gen_op_iwmmxt_set_mup();
1912 gen_op_iwmmxt_set_cup();
1913 break;
1914 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1915 wrd = (insn >> 12) & 0xf;
1916 rd0 = (insn >> 16) & 0xf;
1917 rd1 = (insn >> 0) & 0xf;
1918 gen_op_iwmmxt_movq_M0_wRn(rd0);
1919 if (insn & (1 << 22))
1920 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1921 else
1922 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1923 if (!(insn & (1 << 20)))
1924 gen_op_iwmmxt_addl_M0_wRn(wrd);
1925 gen_op_iwmmxt_movq_wRn_M0(wrd);
1926 gen_op_iwmmxt_set_mup();
1927 break;
1928 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1929 wrd = (insn >> 12) & 0xf;
1930 rd0 = (insn >> 16) & 0xf;
1931 rd1 = (insn >> 0) & 0xf;
1932 gen_op_iwmmxt_movq_M0_wRn(rd0);
1933 if (insn & (1 << 21)) {
1934 if (insn & (1 << 20))
1935 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1936 else
1937 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1938 } else {
1939 if (insn & (1 << 20))
1940 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1941 else
1942 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1944 gen_op_iwmmxt_movq_wRn_M0(wrd);
1945 gen_op_iwmmxt_set_mup();
1946 break;
1947 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1948 wrd = (insn >> 12) & 0xf;
1949 rd0 = (insn >> 16) & 0xf;
1950 rd1 = (insn >> 0) & 0xf;
1951 gen_op_iwmmxt_movq_M0_wRn(rd0);
1952 if (insn & (1 << 21))
1953 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1954 else
1955 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1956 if (!(insn & (1 << 20))) {
1957 iwmmxt_load_reg(cpu_V1, wrd);
1958 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1960 gen_op_iwmmxt_movq_wRn_M0(wrd);
1961 gen_op_iwmmxt_set_mup();
1962 break;
1963 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1964 wrd = (insn >> 12) & 0xf;
1965 rd0 = (insn >> 16) & 0xf;
1966 rd1 = (insn >> 0) & 0xf;
1967 gen_op_iwmmxt_movq_M0_wRn(rd0);
1968 switch ((insn >> 22) & 3) {
1969 case 0:
1970 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1971 break;
1972 case 1:
1973 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1974 break;
1975 case 2:
1976 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1977 break;
1978 case 3:
1979 return 1;
1981 gen_op_iwmmxt_movq_wRn_M0(wrd);
1982 gen_op_iwmmxt_set_mup();
1983 gen_op_iwmmxt_set_cup();
1984 break;
1985 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1986 wrd = (insn >> 12) & 0xf;
1987 rd0 = (insn >> 16) & 0xf;
1988 rd1 = (insn >> 0) & 0xf;
1989 gen_op_iwmmxt_movq_M0_wRn(rd0);
1990 if (insn & (1 << 22)) {
1991 if (insn & (1 << 20))
1992 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1993 else
1994 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1995 } else {
1996 if (insn & (1 << 20))
1997 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1998 else
1999 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2001 gen_op_iwmmxt_movq_wRn_M0(wrd);
2002 gen_op_iwmmxt_set_mup();
2003 gen_op_iwmmxt_set_cup();
2004 break;
2005 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2006 wrd = (insn >> 12) & 0xf;
2007 rd0 = (insn >> 16) & 0xf;
2008 rd1 = (insn >> 0) & 0xf;
2009 gen_op_iwmmxt_movq_M0_wRn(rd0);
2010 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2011 tcg_gen_andi_i32(tmp, tmp, 7);
2012 iwmmxt_load_reg(cpu_V1, rd1);
2013 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2014 tcg_temp_free_i32(tmp);
2015 gen_op_iwmmxt_movq_wRn_M0(wrd);
2016 gen_op_iwmmxt_set_mup();
2017 break;
2018 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2019 if (((insn >> 6) & 3) == 3)
2020 return 1;
2021 rd = (insn >> 12) & 0xf;
2022 wrd = (insn >> 16) & 0xf;
2023 tmp = load_reg(s, rd);
2024 gen_op_iwmmxt_movq_M0_wRn(wrd);
2025 switch ((insn >> 6) & 3) {
2026 case 0:
2027 tmp2 = tcg_const_i32(0xff);
2028 tmp3 = tcg_const_i32((insn & 7) << 3);
2029 break;
2030 case 1:
2031 tmp2 = tcg_const_i32(0xffff);
2032 tmp3 = tcg_const_i32((insn & 3) << 4);
2033 break;
2034 case 2:
2035 tmp2 = tcg_const_i32(0xffffffff);
2036 tmp3 = tcg_const_i32((insn & 1) << 5);
2037 break;
2038 default:
2039 TCGV_UNUSED_I32(tmp2);
2040 TCGV_UNUSED_I32(tmp3);
2042 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2043 tcg_temp_free_i32(tmp3);
2044 tcg_temp_free_i32(tmp2);
2045 tcg_temp_free_i32(tmp);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 break;
2049 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2050 rd = (insn >> 12) & 0xf;
2051 wrd = (insn >> 16) & 0xf;
2052 if (rd == 15 || ((insn >> 22) & 3) == 3)
2053 return 1;
2054 gen_op_iwmmxt_movq_M0_wRn(wrd);
2055 tmp = tcg_temp_new_i32();
2056 switch ((insn >> 22) & 3) {
2057 case 0:
2058 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2059 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2060 if (insn & 8) {
2061 tcg_gen_ext8s_i32(tmp, tmp);
2062 } else {
2063 tcg_gen_andi_i32(tmp, tmp, 0xff);
2065 break;
2066 case 1:
2067 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2068 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2069 if (insn & 8) {
2070 tcg_gen_ext16s_i32(tmp, tmp);
2071 } else {
2072 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2074 break;
2075 case 2:
2076 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2077 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2078 break;
2080 store_reg(s, rd, tmp);
2081 break;
2082 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2083 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2084 return 1;
2085 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2086 switch ((insn >> 22) & 3) {
2087 case 0:
2088 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2089 break;
2090 case 1:
2091 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2092 break;
2093 case 2:
2094 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2095 break;
2097 tcg_gen_shli_i32(tmp, tmp, 28);
2098 gen_set_nzcv(tmp);
2099 tcg_temp_free_i32(tmp);
2100 break;
2101 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2102 if (((insn >> 6) & 3) == 3)
2103 return 1;
2104 rd = (insn >> 12) & 0xf;
2105 wrd = (insn >> 16) & 0xf;
2106 tmp = load_reg(s, rd);
2107 switch ((insn >> 6) & 3) {
2108 case 0:
2109 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2110 break;
2111 case 1:
2112 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2113 break;
2114 case 2:
2115 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2116 break;
2118 tcg_temp_free_i32(tmp);
2119 gen_op_iwmmxt_movq_wRn_M0(wrd);
2120 gen_op_iwmmxt_set_mup();
2121 break;
2122 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2123 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2124 return 1;
2125 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2126 tmp2 = tcg_temp_new_i32();
2127 tcg_gen_mov_i32(tmp2, tmp);
2128 switch ((insn >> 22) & 3) {
2129 case 0:
2130 for (i = 0; i < 7; i ++) {
2131 tcg_gen_shli_i32(tmp2, tmp2, 4);
2132 tcg_gen_and_i32(tmp, tmp, tmp2);
2134 break;
2135 case 1:
2136 for (i = 0; i < 3; i ++) {
2137 tcg_gen_shli_i32(tmp2, tmp2, 8);
2138 tcg_gen_and_i32(tmp, tmp, tmp2);
2140 break;
2141 case 2:
2142 tcg_gen_shli_i32(tmp2, tmp2, 16);
2143 tcg_gen_and_i32(tmp, tmp, tmp2);
2144 break;
2146 gen_set_nzcv(tmp);
2147 tcg_temp_free_i32(tmp2);
2148 tcg_temp_free_i32(tmp);
2149 break;
2150 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2151 wrd = (insn >> 12) & 0xf;
2152 rd0 = (insn >> 16) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0);
2154 switch ((insn >> 22) & 3) {
2155 case 0:
2156 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2157 break;
2158 case 1:
2159 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2160 break;
2161 case 2:
2162 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2163 break;
2164 case 3:
2165 return 1;
2167 gen_op_iwmmxt_movq_wRn_M0(wrd);
2168 gen_op_iwmmxt_set_mup();
2169 break;
2170 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2171 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2172 return 1;
2173 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2174 tmp2 = tcg_temp_new_i32();
2175 tcg_gen_mov_i32(tmp2, tmp);
2176 switch ((insn >> 22) & 3) {
2177 case 0:
2178 for (i = 0; i < 7; i ++) {
2179 tcg_gen_shli_i32(tmp2, tmp2, 4);
2180 tcg_gen_or_i32(tmp, tmp, tmp2);
2182 break;
2183 case 1:
2184 for (i = 0; i < 3; i ++) {
2185 tcg_gen_shli_i32(tmp2, tmp2, 8);
2186 tcg_gen_or_i32(tmp, tmp, tmp2);
2188 break;
2189 case 2:
2190 tcg_gen_shli_i32(tmp2, tmp2, 16);
2191 tcg_gen_or_i32(tmp, tmp, tmp2);
2192 break;
2194 gen_set_nzcv(tmp);
2195 tcg_temp_free_i32(tmp2);
2196 tcg_temp_free_i32(tmp);
2197 break;
2198 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2199 rd = (insn >> 12) & 0xf;
2200 rd0 = (insn >> 16) & 0xf;
2201 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2202 return 1;
2203 gen_op_iwmmxt_movq_M0_wRn(rd0);
2204 tmp = tcg_temp_new_i32();
2205 switch ((insn >> 22) & 3) {
2206 case 0:
2207 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2208 break;
2209 case 1:
2210 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2211 break;
2212 case 2:
2213 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2214 break;
2216 store_reg(s, rd, tmp);
2217 break;
2218 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2219 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2220 wrd = (insn >> 12) & 0xf;
2221 rd0 = (insn >> 16) & 0xf;
2222 rd1 = (insn >> 0) & 0xf;
2223 gen_op_iwmmxt_movq_M0_wRn(rd0);
2224 switch ((insn >> 22) & 3) {
2225 case 0:
2226 if (insn & (1 << 21))
2227 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2228 else
2229 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2230 break;
2231 case 1:
2232 if (insn & (1 << 21))
2233 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2234 else
2235 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2236 break;
2237 case 2:
2238 if (insn & (1 << 21))
2239 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2240 else
2241 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2242 break;
2243 case 3:
2244 return 1;
2246 gen_op_iwmmxt_movq_wRn_M0(wrd);
2247 gen_op_iwmmxt_set_mup();
2248 gen_op_iwmmxt_set_cup();
2249 break;
2250 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2251 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2252 wrd = (insn >> 12) & 0xf;
2253 rd0 = (insn >> 16) & 0xf;
2254 gen_op_iwmmxt_movq_M0_wRn(rd0);
2255 switch ((insn >> 22) & 3) {
2256 case 0:
2257 if (insn & (1 << 21))
2258 gen_op_iwmmxt_unpacklsb_M0();
2259 else
2260 gen_op_iwmmxt_unpacklub_M0();
2261 break;
2262 case 1:
2263 if (insn & (1 << 21))
2264 gen_op_iwmmxt_unpacklsw_M0();
2265 else
2266 gen_op_iwmmxt_unpackluw_M0();
2267 break;
2268 case 2:
2269 if (insn & (1 << 21))
2270 gen_op_iwmmxt_unpacklsl_M0();
2271 else
2272 gen_op_iwmmxt_unpacklul_M0();
2273 break;
2274 case 3:
2275 return 1;
2277 gen_op_iwmmxt_movq_wRn_M0(wrd);
2278 gen_op_iwmmxt_set_mup();
2279 gen_op_iwmmxt_set_cup();
2280 break;
2281 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2282 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2283 wrd = (insn >> 12) & 0xf;
2284 rd0 = (insn >> 16) & 0xf;
2285 gen_op_iwmmxt_movq_M0_wRn(rd0);
2286 switch ((insn >> 22) & 3) {
2287 case 0:
2288 if (insn & (1 << 21))
2289 gen_op_iwmmxt_unpackhsb_M0();
2290 else
2291 gen_op_iwmmxt_unpackhub_M0();
2292 break;
2293 case 1:
2294 if (insn & (1 << 21))
2295 gen_op_iwmmxt_unpackhsw_M0();
2296 else
2297 gen_op_iwmmxt_unpackhuw_M0();
2298 break;
2299 case 2:
2300 if (insn & (1 << 21))
2301 gen_op_iwmmxt_unpackhsl_M0();
2302 else
2303 gen_op_iwmmxt_unpackhul_M0();
2304 break;
2305 case 3:
2306 return 1;
2308 gen_op_iwmmxt_movq_wRn_M0(wrd);
2309 gen_op_iwmmxt_set_mup();
2310 gen_op_iwmmxt_set_cup();
2311 break;
2312 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2313 case 0x214: case 0x614: case 0xa14: case 0xe14:
2314 if (((insn >> 22) & 3) == 0)
2315 return 1;
2316 wrd = (insn >> 12) & 0xf;
2317 rd0 = (insn >> 16) & 0xf;
2318 gen_op_iwmmxt_movq_M0_wRn(rd0);
2319 tmp = tcg_temp_new_i32();
2320 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2321 tcg_temp_free_i32(tmp);
2322 return 1;
2324 switch ((insn >> 22) & 3) {
2325 case 1:
2326 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2327 break;
2328 case 2:
2329 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2330 break;
2331 case 3:
2332 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2333 break;
2335 tcg_temp_free_i32(tmp);
2336 gen_op_iwmmxt_movq_wRn_M0(wrd);
2337 gen_op_iwmmxt_set_mup();
2338 gen_op_iwmmxt_set_cup();
2339 break;
2340 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2341 case 0x014: case 0x414: case 0x814: case 0xc14:
2342 if (((insn >> 22) & 3) == 0)
2343 return 1;
2344 wrd = (insn >> 12) & 0xf;
2345 rd0 = (insn >> 16) & 0xf;
2346 gen_op_iwmmxt_movq_M0_wRn(rd0);
2347 tmp = tcg_temp_new_i32();
2348 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2349 tcg_temp_free_i32(tmp);
2350 return 1;
2352 switch ((insn >> 22) & 3) {
2353 case 1:
2354 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2355 break;
2356 case 2:
2357 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2358 break;
2359 case 3:
2360 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2361 break;
2363 tcg_temp_free_i32(tmp);
2364 gen_op_iwmmxt_movq_wRn_M0(wrd);
2365 gen_op_iwmmxt_set_mup();
2366 gen_op_iwmmxt_set_cup();
2367 break;
2368 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2369 case 0x114: case 0x514: case 0x914: case 0xd14:
2370 if (((insn >> 22) & 3) == 0)
2371 return 1;
2372 wrd = (insn >> 12) & 0xf;
2373 rd0 = (insn >> 16) & 0xf;
2374 gen_op_iwmmxt_movq_M0_wRn(rd0);
2375 tmp = tcg_temp_new_i32();
2376 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2377 tcg_temp_free_i32(tmp);
2378 return 1;
2380 switch ((insn >> 22) & 3) {
2381 case 1:
2382 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2383 break;
2384 case 2:
2385 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2386 break;
2387 case 3:
2388 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2389 break;
2391 tcg_temp_free_i32(tmp);
2392 gen_op_iwmmxt_movq_wRn_M0(wrd);
2393 gen_op_iwmmxt_set_mup();
2394 gen_op_iwmmxt_set_cup();
2395 break;
2396 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2397 case 0x314: case 0x714: case 0xb14: case 0xf14:
2398 if (((insn >> 22) & 3) == 0)
2399 return 1;
2400 wrd = (insn >> 12) & 0xf;
2401 rd0 = (insn >> 16) & 0xf;
2402 gen_op_iwmmxt_movq_M0_wRn(rd0);
2403 tmp = tcg_temp_new_i32();
2404 switch ((insn >> 22) & 3) {
2405 case 1:
2406 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2407 tcg_temp_free_i32(tmp);
2408 return 1;
2410 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2411 break;
2412 case 2:
2413 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2414 tcg_temp_free_i32(tmp);
2415 return 1;
2417 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2418 break;
2419 case 3:
2420 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2421 tcg_temp_free_i32(tmp);
2422 return 1;
2424 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2425 break;
2427 tcg_temp_free_i32(tmp);
2428 gen_op_iwmmxt_movq_wRn_M0(wrd);
2429 gen_op_iwmmxt_set_mup();
2430 gen_op_iwmmxt_set_cup();
2431 break;
2432 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2433 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2434 wrd = (insn >> 12) & 0xf;
2435 rd0 = (insn >> 16) & 0xf;
2436 rd1 = (insn >> 0) & 0xf;
2437 gen_op_iwmmxt_movq_M0_wRn(rd0);
2438 switch ((insn >> 22) & 3) {
2439 case 0:
2440 if (insn & (1 << 21))
2441 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2442 else
2443 gen_op_iwmmxt_minub_M0_wRn(rd1);
2444 break;
2445 case 1:
2446 if (insn & (1 << 21))
2447 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2448 else
2449 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2450 break;
2451 case 2:
2452 if (insn & (1 << 21))
2453 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2454 else
2455 gen_op_iwmmxt_minul_M0_wRn(rd1);
2456 break;
2457 case 3:
2458 return 1;
2460 gen_op_iwmmxt_movq_wRn_M0(wrd);
2461 gen_op_iwmmxt_set_mup();
2462 break;
2463 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2464 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2465 wrd = (insn >> 12) & 0xf;
2466 rd0 = (insn >> 16) & 0xf;
2467 rd1 = (insn >> 0) & 0xf;
2468 gen_op_iwmmxt_movq_M0_wRn(rd0);
2469 switch ((insn >> 22) & 3) {
2470 case 0:
2471 if (insn & (1 << 21))
2472 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2473 else
2474 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2475 break;
2476 case 1:
2477 if (insn & (1 << 21))
2478 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2479 else
2480 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2481 break;
2482 case 2:
2483 if (insn & (1 << 21))
2484 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2485 else
2486 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2487 break;
2488 case 3:
2489 return 1;
2491 gen_op_iwmmxt_movq_wRn_M0(wrd);
2492 gen_op_iwmmxt_set_mup();
2493 break;
2494 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2495 case 0x402: case 0x502: case 0x602: case 0x702:
2496 wrd = (insn >> 12) & 0xf;
2497 rd0 = (insn >> 16) & 0xf;
2498 rd1 = (insn >> 0) & 0xf;
2499 gen_op_iwmmxt_movq_M0_wRn(rd0);
2500 tmp = tcg_const_i32((insn >> 20) & 3);
2501 iwmmxt_load_reg(cpu_V1, rd1);
2502 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2503 tcg_temp_free_i32(tmp);
2504 gen_op_iwmmxt_movq_wRn_M0(wrd);
2505 gen_op_iwmmxt_set_mup();
2506 break;
2507 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2508 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2509 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2510 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2511 wrd = (insn >> 12) & 0xf;
2512 rd0 = (insn >> 16) & 0xf;
2513 rd1 = (insn >> 0) & 0xf;
2514 gen_op_iwmmxt_movq_M0_wRn(rd0);
2515 switch ((insn >> 20) & 0xf) {
2516 case 0x0:
2517 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2518 break;
2519 case 0x1:
2520 gen_op_iwmmxt_subub_M0_wRn(rd1);
2521 break;
2522 case 0x3:
2523 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2524 break;
2525 case 0x4:
2526 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2527 break;
2528 case 0x5:
2529 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2530 break;
2531 case 0x7:
2532 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2533 break;
2534 case 0x8:
2535 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2536 break;
2537 case 0x9:
2538 gen_op_iwmmxt_subul_M0_wRn(rd1);
2539 break;
2540 case 0xb:
2541 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2542 break;
2543 default:
2544 return 1;
2546 gen_op_iwmmxt_movq_wRn_M0(wrd);
2547 gen_op_iwmmxt_set_mup();
2548 gen_op_iwmmxt_set_cup();
2549 break;
2550 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2551 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2552 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2553 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2554 wrd = (insn >> 12) & 0xf;
2555 rd0 = (insn >> 16) & 0xf;
2556 gen_op_iwmmxt_movq_M0_wRn(rd0);
2557 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2558 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2559 tcg_temp_free_i32(tmp);
2560 gen_op_iwmmxt_movq_wRn_M0(wrd);
2561 gen_op_iwmmxt_set_mup();
2562 gen_op_iwmmxt_set_cup();
2563 break;
2564 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2565 case 0x418: case 0x518: case 0x618: case 0x718:
2566 case 0x818: case 0x918: case 0xa18: case 0xb18:
2567 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2568 wrd = (insn >> 12) & 0xf;
2569 rd0 = (insn >> 16) & 0xf;
2570 rd1 = (insn >> 0) & 0xf;
2571 gen_op_iwmmxt_movq_M0_wRn(rd0);
2572 switch ((insn >> 20) & 0xf) {
2573 case 0x0:
2574 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2575 break;
2576 case 0x1:
2577 gen_op_iwmmxt_addub_M0_wRn(rd1);
2578 break;
2579 case 0x3:
2580 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2581 break;
2582 case 0x4:
2583 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2584 break;
2585 case 0x5:
2586 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2587 break;
2588 case 0x7:
2589 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2590 break;
2591 case 0x8:
2592 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2593 break;
2594 case 0x9:
2595 gen_op_iwmmxt_addul_M0_wRn(rd1);
2596 break;
2597 case 0xb:
2598 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2599 break;
2600 default:
2601 return 1;
2603 gen_op_iwmmxt_movq_wRn_M0(wrd);
2604 gen_op_iwmmxt_set_mup();
2605 gen_op_iwmmxt_set_cup();
2606 break;
2607 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2608 case 0x408: case 0x508: case 0x608: case 0x708:
2609 case 0x808: case 0x908: case 0xa08: case 0xb08:
2610 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2611 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2612 return 1;
2613 wrd = (insn >> 12) & 0xf;
2614 rd0 = (insn >> 16) & 0xf;
2615 rd1 = (insn >> 0) & 0xf;
2616 gen_op_iwmmxt_movq_M0_wRn(rd0);
2617 switch ((insn >> 22) & 3) {
2618 case 1:
2619 if (insn & (1 << 21))
2620 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2621 else
2622 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2623 break;
2624 case 2:
2625 if (insn & (1 << 21))
2626 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2627 else
2628 gen_op_iwmmxt_packul_M0_wRn(rd1);
2629 break;
2630 case 3:
2631 if (insn & (1 << 21))
2632 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2633 else
2634 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2635 break;
2637 gen_op_iwmmxt_movq_wRn_M0(wrd);
2638 gen_op_iwmmxt_set_mup();
2639 gen_op_iwmmxt_set_cup();
2640 break;
2641 case 0x201: case 0x203: case 0x205: case 0x207:
2642 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2643 case 0x211: case 0x213: case 0x215: case 0x217:
2644 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2645 wrd = (insn >> 5) & 0xf;
2646 rd0 = (insn >> 12) & 0xf;
2647 rd1 = (insn >> 0) & 0xf;
2648 if (rd0 == 0xf || rd1 == 0xf)
2649 return 1;
2650 gen_op_iwmmxt_movq_M0_wRn(wrd);
2651 tmp = load_reg(s, rd0);
2652 tmp2 = load_reg(s, rd1);
2653 switch ((insn >> 16) & 0xf) {
2654 case 0x0: /* TMIA */
2655 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2656 break;
2657 case 0x8: /* TMIAPH */
2658 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2659 break;
2660 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2661 if (insn & (1 << 16))
2662 tcg_gen_shri_i32(tmp, tmp, 16);
2663 if (insn & (1 << 17))
2664 tcg_gen_shri_i32(tmp2, tmp2, 16);
2665 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2666 break;
2667 default:
2668 tcg_temp_free_i32(tmp2);
2669 tcg_temp_free_i32(tmp);
2670 return 1;
2672 tcg_temp_free_i32(tmp2);
2673 tcg_temp_free_i32(tmp);
2674 gen_op_iwmmxt_movq_wRn_M0(wrd);
2675 gen_op_iwmmxt_set_mup();
2676 break;
2677 default:
2678 return 1;
2681 return 0;
2684 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2685 (ie. an undefined instruction). */
2686 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2688 int acc, rd0, rd1, rdhi, rdlo;
2689 TCGv_i32 tmp, tmp2;
2691 if ((insn & 0x0ff00f10) == 0x0e200010) {
2692 /* Multiply with Internal Accumulate Format */
2693 rd0 = (insn >> 12) & 0xf;
2694 rd1 = insn & 0xf;
2695 acc = (insn >> 5) & 7;
2697 if (acc != 0)
2698 return 1;
2700 tmp = load_reg(s, rd0);
2701 tmp2 = load_reg(s, rd1);
2702 switch ((insn >> 16) & 0xf) {
2703 case 0x0: /* MIA */
2704 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2705 break;
2706 case 0x8: /* MIAPH */
2707 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2708 break;
2709 case 0xc: /* MIABB */
2710 case 0xd: /* MIABT */
2711 case 0xe: /* MIATB */
2712 case 0xf: /* MIATT */
2713 if (insn & (1 << 16))
2714 tcg_gen_shri_i32(tmp, tmp, 16);
2715 if (insn & (1 << 17))
2716 tcg_gen_shri_i32(tmp2, tmp2, 16);
2717 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2718 break;
2719 default:
2720 return 1;
2722 tcg_temp_free_i32(tmp2);
2723 tcg_temp_free_i32(tmp);
2725 gen_op_iwmmxt_movq_wRn_M0(acc);
2726 return 0;
2729 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2730 /* Internal Accumulator Access Format */
2731 rdhi = (insn >> 16) & 0xf;
2732 rdlo = (insn >> 12) & 0xf;
2733 acc = insn & 7;
2735 if (acc != 0)
2736 return 1;
2738 if (insn & ARM_CP_RW_BIT) { /* MRA */
2739 iwmmxt_load_reg(cpu_V0, acc);
2740 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2741 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2742 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2743 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2744 } else { /* MAR */
2745 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2746 iwmmxt_store_reg(cpu_V0, acc);
2748 return 0;
2751 return 1;
2754 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2755 #define VFP_SREG(insn, bigbit, smallbit) \
2756 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2757 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2758 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2759 reg = (((insn) >> (bigbit)) & 0x0f) \
2760 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2761 } else { \
2762 if (insn & (1 << (smallbit))) \
2763 return 1; \
2764 reg = ((insn) >> (bigbit)) & 0x0f; \
2765 }} while (0)
2767 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2768 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2769 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2770 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2771 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2772 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2774 /* Move between integer and VFP cores. */
2775 static TCGv_i32 gen_vfp_mrs(void)
2777 TCGv_i32 tmp = tcg_temp_new_i32();
2778 tcg_gen_mov_i32(tmp, cpu_F0s);
2779 return tmp;
2782 static void gen_vfp_msr(TCGv_i32 tmp)
2784 tcg_gen_mov_i32(cpu_F0s, tmp);
2785 tcg_temp_free_i32(tmp);
2788 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2790 TCGv_i32 tmp = tcg_temp_new_i32();
2791 if (shift)
2792 tcg_gen_shri_i32(var, var, shift);
2793 tcg_gen_ext8u_i32(var, var);
2794 tcg_gen_shli_i32(tmp, var, 8);
2795 tcg_gen_or_i32(var, var, tmp);
2796 tcg_gen_shli_i32(tmp, var, 16);
2797 tcg_gen_or_i32(var, var, tmp);
2798 tcg_temp_free_i32(tmp);
2801 static void gen_neon_dup_low16(TCGv_i32 var)
2803 TCGv_i32 tmp = tcg_temp_new_i32();
2804 tcg_gen_ext16u_i32(var, var);
2805 tcg_gen_shli_i32(tmp, var, 16);
2806 tcg_gen_or_i32(var, var, tmp);
2807 tcg_temp_free_i32(tmp);
2810 static void gen_neon_dup_high16(TCGv_i32 var)
2812 TCGv_i32 tmp = tcg_temp_new_i32();
2813 tcg_gen_andi_i32(var, var, 0xffff0000);
2814 tcg_gen_shri_i32(tmp, var, 16);
2815 tcg_gen_or_i32(var, var, tmp);
2816 tcg_temp_free_i32(tmp);
2819 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2821 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2822 TCGv_i32 tmp = tcg_temp_new_i32();
2823 switch (size) {
2824 case 0:
2825 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2826 gen_neon_dup_u8(tmp, 0);
2827 break;
2828 case 1:
2829 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2830 gen_neon_dup_low16(tmp);
2831 break;
2832 case 2:
2833 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2834 break;
2835 default: /* Avoid compiler warnings. */
2836 abort();
2838 return tmp;
2841 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2842 uint32_t dp)
2844 uint32_t cc = extract32(insn, 20, 2);
2846 if (dp) {
2847 TCGv_i64 frn, frm, dest;
2848 TCGv_i64 tmp, zero, zf, nf, vf;
2850 zero = tcg_const_i64(0);
2852 frn = tcg_temp_new_i64();
2853 frm = tcg_temp_new_i64();
2854 dest = tcg_temp_new_i64();
2856 zf = tcg_temp_new_i64();
2857 nf = tcg_temp_new_i64();
2858 vf = tcg_temp_new_i64();
2860 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2861 tcg_gen_ext_i32_i64(nf, cpu_NF);
2862 tcg_gen_ext_i32_i64(vf, cpu_VF);
2864 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2865 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2866 switch (cc) {
2867 case 0: /* eq: Z */
2868 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2869 frn, frm);
2870 break;
2871 case 1: /* vs: V */
2872 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2873 frn, frm);
2874 break;
2875 case 2: /* ge: N == V -> N ^ V == 0 */
2876 tmp = tcg_temp_new_i64();
2877 tcg_gen_xor_i64(tmp, vf, nf);
2878 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2879 frn, frm);
2880 tcg_temp_free_i64(tmp);
2881 break;
2882 case 3: /* gt: !Z && N == V */
2883 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2884 frn, frm);
2885 tmp = tcg_temp_new_i64();
2886 tcg_gen_xor_i64(tmp, vf, nf);
2887 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2888 dest, frm);
2889 tcg_temp_free_i64(tmp);
2890 break;
2892 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2893 tcg_temp_free_i64(frn);
2894 tcg_temp_free_i64(frm);
2895 tcg_temp_free_i64(dest);
2897 tcg_temp_free_i64(zf);
2898 tcg_temp_free_i64(nf);
2899 tcg_temp_free_i64(vf);
2901 tcg_temp_free_i64(zero);
2902 } else {
2903 TCGv_i32 frn, frm, dest;
2904 TCGv_i32 tmp, zero;
2906 zero = tcg_const_i32(0);
2908 frn = tcg_temp_new_i32();
2909 frm = tcg_temp_new_i32();
2910 dest = tcg_temp_new_i32();
2911 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2912 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2913 switch (cc) {
2914 case 0: /* eq: Z */
2915 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2916 frn, frm);
2917 break;
2918 case 1: /* vs: V */
2919 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2920 frn, frm);
2921 break;
2922 case 2: /* ge: N == V -> N ^ V == 0 */
2923 tmp = tcg_temp_new_i32();
2924 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2925 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2926 frn, frm);
2927 tcg_temp_free_i32(tmp);
2928 break;
2929 case 3: /* gt: !Z && N == V */
2930 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2931 frn, frm);
2932 tmp = tcg_temp_new_i32();
2933 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2934 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2935 dest, frm);
2936 tcg_temp_free_i32(tmp);
2937 break;
2939 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2940 tcg_temp_free_i32(frn);
2941 tcg_temp_free_i32(frm);
2942 tcg_temp_free_i32(dest);
2944 tcg_temp_free_i32(zero);
2947 return 0;
2950 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2951 uint32_t rm, uint32_t dp)
2953 uint32_t vmin = extract32(insn, 6, 1);
2954 TCGv_ptr fpst = get_fpstatus_ptr(0);
2956 if (dp) {
2957 TCGv_i64 frn, frm, dest;
2959 frn = tcg_temp_new_i64();
2960 frm = tcg_temp_new_i64();
2961 dest = tcg_temp_new_i64();
2963 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2964 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2965 if (vmin) {
2966 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2967 } else {
2968 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2970 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2971 tcg_temp_free_i64(frn);
2972 tcg_temp_free_i64(frm);
2973 tcg_temp_free_i64(dest);
2974 } else {
2975 TCGv_i32 frn, frm, dest;
2977 frn = tcg_temp_new_i32();
2978 frm = tcg_temp_new_i32();
2979 dest = tcg_temp_new_i32();
2981 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2982 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2983 if (vmin) {
2984 gen_helper_vfp_minnums(dest, frn, frm, fpst);
2985 } else {
2986 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
2988 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2989 tcg_temp_free_i32(frn);
2990 tcg_temp_free_i32(frm);
2991 tcg_temp_free_i32(dest);
2994 tcg_temp_free_ptr(fpst);
2995 return 0;
2998 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2999 int rounding)
3001 TCGv_ptr fpst = get_fpstatus_ptr(0);
3002 TCGv_i32 tcg_rmode;
3004 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3005 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3007 if (dp) {
3008 TCGv_i64 tcg_op;
3009 TCGv_i64 tcg_res;
3010 tcg_op = tcg_temp_new_i64();
3011 tcg_res = tcg_temp_new_i64();
3012 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3013 gen_helper_rintd(tcg_res, tcg_op, fpst);
3014 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3015 tcg_temp_free_i64(tcg_op);
3016 tcg_temp_free_i64(tcg_res);
3017 } else {
3018 TCGv_i32 tcg_op;
3019 TCGv_i32 tcg_res;
3020 tcg_op = tcg_temp_new_i32();
3021 tcg_res = tcg_temp_new_i32();
3022 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3023 gen_helper_rints(tcg_res, tcg_op, fpst);
3024 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3025 tcg_temp_free_i32(tcg_op);
3026 tcg_temp_free_i32(tcg_res);
3029 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3030 tcg_temp_free_i32(tcg_rmode);
3032 tcg_temp_free_ptr(fpst);
3033 return 0;
3036 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3037 int rounding)
3039 bool is_signed = extract32(insn, 7, 1);
3040 TCGv_ptr fpst = get_fpstatus_ptr(0);
3041 TCGv_i32 tcg_rmode, tcg_shift;
3043 tcg_shift = tcg_const_i32(0);
3045 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3046 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3048 if (dp) {
3049 TCGv_i64 tcg_double, tcg_res;
3050 TCGv_i32 tcg_tmp;
3051 /* Rd is encoded as a single precision register even when the source
3052 * is double precision.
3054 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3055 tcg_double = tcg_temp_new_i64();
3056 tcg_res = tcg_temp_new_i64();
3057 tcg_tmp = tcg_temp_new_i32();
3058 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3059 if (is_signed) {
3060 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3061 } else {
3062 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3064 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
3065 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3066 tcg_temp_free_i32(tcg_tmp);
3067 tcg_temp_free_i64(tcg_res);
3068 tcg_temp_free_i64(tcg_double);
3069 } else {
3070 TCGv_i32 tcg_single, tcg_res;
3071 tcg_single = tcg_temp_new_i32();
3072 tcg_res = tcg_temp_new_i32();
3073 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3074 if (is_signed) {
3075 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3076 } else {
3077 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3079 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3080 tcg_temp_free_i32(tcg_res);
3081 tcg_temp_free_i32(tcg_single);
3084 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3085 tcg_temp_free_i32(tcg_rmode);
3087 tcg_temp_free_i32(tcg_shift);
3089 tcg_temp_free_ptr(fpst);
3091 return 0;
3094 /* Table for converting the most common AArch32 encoding of
3095 * rounding mode to arm_fprounding order (which matches the
3096 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3098 static const uint8_t fp_decode_rm[] = {
3099 FPROUNDING_TIEAWAY,
3100 FPROUNDING_TIEEVEN,
3101 FPROUNDING_POSINF,
3102 FPROUNDING_NEGINF,
3105 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3107 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3109 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3110 return 1;
3113 if (dp) {
3114 VFP_DREG_D(rd, insn);
3115 VFP_DREG_N(rn, insn);
3116 VFP_DREG_M(rm, insn);
3117 } else {
3118 rd = VFP_SREG_D(insn);
3119 rn = VFP_SREG_N(insn);
3120 rm = VFP_SREG_M(insn);
3123 if ((insn & 0x0f800e50) == 0x0e000a00) {
3124 return handle_vsel(insn, rd, rn, rm, dp);
3125 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3126 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3127 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3128 /* VRINTA, VRINTN, VRINTP, VRINTM */
3129 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3130 return handle_vrint(insn, rd, rm, dp, rounding);
3131 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3132 /* VCVTA, VCVTN, VCVTP, VCVTM */
3133 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3134 return handle_vcvt(insn, rd, rm, dp, rounding);
3136 return 1;
3139 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3140 (ie. an undefined instruction). */
3141 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3143 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3144 int dp, veclen;
3145 TCGv_i32 addr;
3146 TCGv_i32 tmp;
3147 TCGv_i32 tmp2;
3149 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3150 return 1;
3153 /* FIXME: this access check should not take precedence over UNDEF
3154 * for invalid encodings; we will generate incorrect syndrome information
3155 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3157 if (s->fp_excp_el) {
3158 gen_exception_insn(s, 4, EXCP_UDEF,
3159 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
3160 return 0;
3163 if (!s->vfp_enabled) {
3164 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3165 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3166 return 1;
3167 rn = (insn >> 16) & 0xf;
3168 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3169 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3170 return 1;
3174 if (extract32(insn, 28, 4) == 0xf) {
3175 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3176 * only used in v8 and above.
3178 return disas_vfp_v8_insn(s, insn);
3181 dp = ((insn & 0xf00) == 0xb00);
3182 switch ((insn >> 24) & 0xf) {
3183 case 0xe:
3184 if (insn & (1 << 4)) {
3185 /* single register transfer */
3186 rd = (insn >> 12) & 0xf;
3187 if (dp) {
3188 int size;
3189 int pass;
3191 VFP_DREG_N(rn, insn);
3192 if (insn & 0xf)
3193 return 1;
3194 if (insn & 0x00c00060
3195 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3196 return 1;
3199 pass = (insn >> 21) & 1;
3200 if (insn & (1 << 22)) {
3201 size = 0;
3202 offset = ((insn >> 5) & 3) * 8;
3203 } else if (insn & (1 << 5)) {
3204 size = 1;
3205 offset = (insn & (1 << 6)) ? 16 : 0;
3206 } else {
3207 size = 2;
3208 offset = 0;
3210 if (insn & ARM_CP_RW_BIT) {
3211 /* vfp->arm */
3212 tmp = neon_load_reg(rn, pass);
3213 switch (size) {
3214 case 0:
3215 if (offset)
3216 tcg_gen_shri_i32(tmp, tmp, offset);
3217 if (insn & (1 << 23))
3218 gen_uxtb(tmp);
3219 else
3220 gen_sxtb(tmp);
3221 break;
3222 case 1:
3223 if (insn & (1 << 23)) {
3224 if (offset) {
3225 tcg_gen_shri_i32(tmp, tmp, 16);
3226 } else {
3227 gen_uxth(tmp);
3229 } else {
3230 if (offset) {
3231 tcg_gen_sari_i32(tmp, tmp, 16);
3232 } else {
3233 gen_sxth(tmp);
3236 break;
3237 case 2:
3238 break;
3240 store_reg(s, rd, tmp);
3241 } else {
3242 /* arm->vfp */
3243 tmp = load_reg(s, rd);
3244 if (insn & (1 << 23)) {
3245 /* VDUP */
3246 if (size == 0) {
3247 gen_neon_dup_u8(tmp, 0);
3248 } else if (size == 1) {
3249 gen_neon_dup_low16(tmp);
3251 for (n = 0; n <= pass * 2; n++) {
3252 tmp2 = tcg_temp_new_i32();
3253 tcg_gen_mov_i32(tmp2, tmp);
3254 neon_store_reg(rn, n, tmp2);
3256 neon_store_reg(rn, n, tmp);
3257 } else {
3258 /* VMOV */
3259 switch (size) {
3260 case 0:
3261 tmp2 = neon_load_reg(rn, pass);
3262 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3263 tcg_temp_free_i32(tmp2);
3264 break;
3265 case 1:
3266 tmp2 = neon_load_reg(rn, pass);
3267 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3268 tcg_temp_free_i32(tmp2);
3269 break;
3270 case 2:
3271 break;
3273 neon_store_reg(rn, pass, tmp);
3276 } else { /* !dp */
3277 if ((insn & 0x6f) != 0x00)
3278 return 1;
3279 rn = VFP_SREG_N(insn);
3280 if (insn & ARM_CP_RW_BIT) {
3281 /* vfp->arm */
3282 if (insn & (1 << 21)) {
3283 /* system register */
3284 rn >>= 1;
3286 switch (rn) {
3287 case ARM_VFP_FPSID:
3288 /* VFP2 allows access to FSID from userspace.
3289 VFP3 restricts all id registers to privileged
3290 accesses. */
3291 if (IS_USER(s)
3292 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3293 return 1;
3295 tmp = load_cpu_field(vfp.xregs[rn]);
3296 break;
3297 case ARM_VFP_FPEXC:
3298 if (IS_USER(s))
3299 return 1;
3300 tmp = load_cpu_field(vfp.xregs[rn]);
3301 break;
3302 case ARM_VFP_FPINST:
3303 case ARM_VFP_FPINST2:
3304 /* Not present in VFP3. */
3305 if (IS_USER(s)
3306 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3307 return 1;
3309 tmp = load_cpu_field(vfp.xregs[rn]);
3310 break;
3311 case ARM_VFP_FPSCR:
3312 if (rd == 15) {
3313 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3314 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3315 } else {
3316 tmp = tcg_temp_new_i32();
3317 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3319 break;
3320 case ARM_VFP_MVFR2:
3321 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3322 return 1;
3324 /* fall through */
3325 case ARM_VFP_MVFR0:
3326 case ARM_VFP_MVFR1:
3327 if (IS_USER(s)
3328 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3329 return 1;
3331 tmp = load_cpu_field(vfp.xregs[rn]);
3332 break;
3333 default:
3334 return 1;
3336 } else {
3337 gen_mov_F0_vreg(0, rn);
3338 tmp = gen_vfp_mrs();
3340 if (rd == 15) {
3341 /* Set the 4 flag bits in the CPSR. */
3342 gen_set_nzcv(tmp);
3343 tcg_temp_free_i32(tmp);
3344 } else {
3345 store_reg(s, rd, tmp);
3347 } else {
3348 /* arm->vfp */
3349 if (insn & (1 << 21)) {
3350 rn >>= 1;
3351 /* system register */
3352 switch (rn) {
3353 case ARM_VFP_FPSID:
3354 case ARM_VFP_MVFR0:
3355 case ARM_VFP_MVFR1:
3356 /* Writes are ignored. */
3357 break;
3358 case ARM_VFP_FPSCR:
3359 tmp = load_reg(s, rd);
3360 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3361 tcg_temp_free_i32(tmp);
3362 gen_lookup_tb(s);
3363 break;
3364 case ARM_VFP_FPEXC:
3365 if (IS_USER(s))
3366 return 1;
3367 /* TODO: VFP subarchitecture support.
3368 * For now, keep the EN bit only */
3369 tmp = load_reg(s, rd);
3370 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3371 store_cpu_field(tmp, vfp.xregs[rn]);
3372 gen_lookup_tb(s);
3373 break;
3374 case ARM_VFP_FPINST:
3375 case ARM_VFP_FPINST2:
3376 if (IS_USER(s)) {
3377 return 1;
3379 tmp = load_reg(s, rd);
3380 store_cpu_field(tmp, vfp.xregs[rn]);
3381 break;
3382 default:
3383 return 1;
3385 } else {
3386 tmp = load_reg(s, rd);
3387 gen_vfp_msr(tmp);
3388 gen_mov_vreg_F0(0, rn);
3392 } else {
3393 /* data processing */
3394 /* The opcode is in bits 23, 21, 20 and 6. */
3395 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3396 if (dp) {
3397 if (op == 15) {
3398 /* rn is opcode */
3399 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3400 } else {
3401 /* rn is register number */
3402 VFP_DREG_N(rn, insn);
3405 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3406 ((rn & 0x1e) == 0x6))) {
3407 /* Integer or single/half precision destination. */
3408 rd = VFP_SREG_D(insn);
3409 } else {
3410 VFP_DREG_D(rd, insn);
3412 if (op == 15 &&
3413 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3414 ((rn & 0x1e) == 0x4))) {
3415 /* VCVT from int or half precision is always from S reg
3416 * regardless of dp bit. VCVT with immediate frac_bits
3417 * has same format as SREG_M.
3419 rm = VFP_SREG_M(insn);
3420 } else {
3421 VFP_DREG_M(rm, insn);
3423 } else {
3424 rn = VFP_SREG_N(insn);
3425 if (op == 15 && rn == 15) {
3426 /* Double precision destination. */
3427 VFP_DREG_D(rd, insn);
3428 } else {
3429 rd = VFP_SREG_D(insn);
3431 /* NB that we implicitly rely on the encoding for the frac_bits
3432 * in VCVT of fixed to float being the same as that of an SREG_M
3434 rm = VFP_SREG_M(insn);
3437 veclen = s->vec_len;
3438 if (op == 15 && rn > 3)
3439 veclen = 0;
3441 /* Shut up compiler warnings. */
3442 delta_m = 0;
3443 delta_d = 0;
3444 bank_mask = 0;
3446 if (veclen > 0) {
3447 if (dp)
3448 bank_mask = 0xc;
3449 else
3450 bank_mask = 0x18;
3452 /* Figure out what type of vector operation this is. */
3453 if ((rd & bank_mask) == 0) {
3454 /* scalar */
3455 veclen = 0;
3456 } else {
3457 if (dp)
3458 delta_d = (s->vec_stride >> 1) + 1;
3459 else
3460 delta_d = s->vec_stride + 1;
3462 if ((rm & bank_mask) == 0) {
3463 /* mixed scalar/vector */
3464 delta_m = 0;
3465 } else {
3466 /* vector */
3467 delta_m = delta_d;
3472 /* Load the initial operands. */
3473 if (op == 15) {
3474 switch (rn) {
3475 case 16:
3476 case 17:
3477 /* Integer source */
3478 gen_mov_F0_vreg(0, rm);
3479 break;
3480 case 8:
3481 case 9:
3482 /* Compare */
3483 gen_mov_F0_vreg(dp, rd);
3484 gen_mov_F1_vreg(dp, rm);
3485 break;
3486 case 10:
3487 case 11:
3488 /* Compare with zero */
3489 gen_mov_F0_vreg(dp, rd);
3490 gen_vfp_F1_ld0(dp);
3491 break;
3492 case 20:
3493 case 21:
3494 case 22:
3495 case 23:
3496 case 28:
3497 case 29:
3498 case 30:
3499 case 31:
3500 /* Source and destination the same. */
3501 gen_mov_F0_vreg(dp, rd);
3502 break;
3503 case 4:
3504 case 5:
3505 case 6:
3506 case 7:
3507 /* VCVTB, VCVTT: only present with the halfprec extension
3508 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3509 * (we choose to UNDEF)
3511 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3512 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3513 return 1;
3515 if (!extract32(rn, 1, 1)) {
3516 /* Half precision source. */
3517 gen_mov_F0_vreg(0, rm);
3518 break;
3520 /* Otherwise fall through */
3521 default:
3522 /* One source operand. */
3523 gen_mov_F0_vreg(dp, rm);
3524 break;
3526 } else {
3527 /* Two source operands. */
3528 gen_mov_F0_vreg(dp, rn);
3529 gen_mov_F1_vreg(dp, rm);
3532 for (;;) {
3533 /* Perform the calculation. */
3534 switch (op) {
3535 case 0: /* VMLA: fd + (fn * fm) */
3536 /* Note that order of inputs to the add matters for NaNs */
3537 gen_vfp_F1_mul(dp);
3538 gen_mov_F0_vreg(dp, rd);
3539 gen_vfp_add(dp);
3540 break;
3541 case 1: /* VMLS: fd + -(fn * fm) */
3542 gen_vfp_mul(dp);
3543 gen_vfp_F1_neg(dp);
3544 gen_mov_F0_vreg(dp, rd);
3545 gen_vfp_add(dp);
3546 break;
3547 case 2: /* VNMLS: -fd + (fn * fm) */
3548 /* Note that it isn't valid to replace (-A + B) with (B - A)
3549 * or similar plausible looking simplifications
3550 * because this will give wrong results for NaNs.
3552 gen_vfp_F1_mul(dp);
3553 gen_mov_F0_vreg(dp, rd);
3554 gen_vfp_neg(dp);
3555 gen_vfp_add(dp);
3556 break;
3557 case 3: /* VNMLA: -fd + -(fn * fm) */
3558 gen_vfp_mul(dp);
3559 gen_vfp_F1_neg(dp);
3560 gen_mov_F0_vreg(dp, rd);
3561 gen_vfp_neg(dp);
3562 gen_vfp_add(dp);
3563 break;
3564 case 4: /* mul: fn * fm */
3565 gen_vfp_mul(dp);
3566 break;
3567 case 5: /* nmul: -(fn * fm) */
3568 gen_vfp_mul(dp);
3569 gen_vfp_neg(dp);
3570 break;
3571 case 6: /* add: fn + fm */
3572 gen_vfp_add(dp);
3573 break;
3574 case 7: /* sub: fn - fm */
3575 gen_vfp_sub(dp);
3576 break;
3577 case 8: /* div: fn / fm */
3578 gen_vfp_div(dp);
3579 break;
3580 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3581 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3582 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3583 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3584 /* These are fused multiply-add, and must be done as one
3585 * floating point operation with no rounding between the
3586 * multiplication and addition steps.
3587 * NB that doing the negations here as separate steps is
3588 * correct : an input NaN should come out with its sign bit
3589 * flipped if it is a negated-input.
3591 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3592 return 1;
3594 if (dp) {
3595 TCGv_ptr fpst;
3596 TCGv_i64 frd;
3597 if (op & 1) {
3598 /* VFNMS, VFMS */
3599 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3601 frd = tcg_temp_new_i64();
3602 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3603 if (op & 2) {
3604 /* VFNMA, VFNMS */
3605 gen_helper_vfp_negd(frd, frd);
3607 fpst = get_fpstatus_ptr(0);
3608 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3609 cpu_F1d, frd, fpst);
3610 tcg_temp_free_ptr(fpst);
3611 tcg_temp_free_i64(frd);
3612 } else {
3613 TCGv_ptr fpst;
3614 TCGv_i32 frd;
3615 if (op & 1) {
3616 /* VFNMS, VFMS */
3617 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3619 frd = tcg_temp_new_i32();
3620 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3621 if (op & 2) {
3622 gen_helper_vfp_negs(frd, frd);
3624 fpst = get_fpstatus_ptr(0);
3625 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3626 cpu_F1s, frd, fpst);
3627 tcg_temp_free_ptr(fpst);
3628 tcg_temp_free_i32(frd);
3630 break;
3631 case 14: /* fconst */
3632 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3633 return 1;
3636 n = (insn << 12) & 0x80000000;
3637 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3638 if (dp) {
3639 if (i & 0x40)
3640 i |= 0x3f80;
3641 else
3642 i |= 0x4000;
3643 n |= i << 16;
3644 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3645 } else {
3646 if (i & 0x40)
3647 i |= 0x780;
3648 else
3649 i |= 0x800;
3650 n |= i << 19;
3651 tcg_gen_movi_i32(cpu_F0s, n);
3653 break;
3654 case 15: /* extension space */
3655 switch (rn) {
3656 case 0: /* cpy */
3657 /* no-op */
3658 break;
3659 case 1: /* abs */
3660 gen_vfp_abs(dp);
3661 break;
3662 case 2: /* neg */
3663 gen_vfp_neg(dp);
3664 break;
3665 case 3: /* sqrt */
3666 gen_vfp_sqrt(dp);
3667 break;
3668 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3669 tmp = gen_vfp_mrs();
3670 tcg_gen_ext16u_i32(tmp, tmp);
3671 if (dp) {
3672 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3673 cpu_env);
3674 } else {
3675 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3676 cpu_env);
3678 tcg_temp_free_i32(tmp);
3679 break;
3680 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3681 tmp = gen_vfp_mrs();
3682 tcg_gen_shri_i32(tmp, tmp, 16);
3683 if (dp) {
3684 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3685 cpu_env);
3686 } else {
3687 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3688 cpu_env);
3690 tcg_temp_free_i32(tmp);
3691 break;
3692 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3693 tmp = tcg_temp_new_i32();
3694 if (dp) {
3695 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3696 cpu_env);
3697 } else {
3698 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3699 cpu_env);
3701 gen_mov_F0_vreg(0, rd);
3702 tmp2 = gen_vfp_mrs();
3703 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3704 tcg_gen_or_i32(tmp, tmp, tmp2);
3705 tcg_temp_free_i32(tmp2);
3706 gen_vfp_msr(tmp);
3707 break;
3708 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3709 tmp = tcg_temp_new_i32();
3710 if (dp) {
3711 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3712 cpu_env);
3713 } else {
3714 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3715 cpu_env);
3717 tcg_gen_shli_i32(tmp, tmp, 16);
3718 gen_mov_F0_vreg(0, rd);
3719 tmp2 = gen_vfp_mrs();
3720 tcg_gen_ext16u_i32(tmp2, tmp2);
3721 tcg_gen_or_i32(tmp, tmp, tmp2);
3722 tcg_temp_free_i32(tmp2);
3723 gen_vfp_msr(tmp);
3724 break;
3725 case 8: /* cmp */
3726 gen_vfp_cmp(dp);
3727 break;
3728 case 9: /* cmpe */
3729 gen_vfp_cmpe(dp);
3730 break;
3731 case 10: /* cmpz */
3732 gen_vfp_cmp(dp);
3733 break;
3734 case 11: /* cmpez */
3735 gen_vfp_F1_ld0(dp);
3736 gen_vfp_cmpe(dp);
3737 break;
3738 case 12: /* vrintr */
3740 TCGv_ptr fpst = get_fpstatus_ptr(0);
3741 if (dp) {
3742 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3743 } else {
3744 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3746 tcg_temp_free_ptr(fpst);
3747 break;
3749 case 13: /* vrintz */
3751 TCGv_ptr fpst = get_fpstatus_ptr(0);
3752 TCGv_i32 tcg_rmode;
3753 tcg_rmode = tcg_const_i32(float_round_to_zero);
3754 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3755 if (dp) {
3756 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3757 } else {
3758 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3760 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3761 tcg_temp_free_i32(tcg_rmode);
3762 tcg_temp_free_ptr(fpst);
3763 break;
3765 case 14: /* vrintx */
3767 TCGv_ptr fpst = get_fpstatus_ptr(0);
3768 if (dp) {
3769 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3770 } else {
3771 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3773 tcg_temp_free_ptr(fpst);
3774 break;
3776 case 15: /* single<->double conversion */
3777 if (dp)
3778 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3779 else
3780 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3781 break;
3782 case 16: /* fuito */
3783 gen_vfp_uito(dp, 0);
3784 break;
3785 case 17: /* fsito */
3786 gen_vfp_sito(dp, 0);
3787 break;
3788 case 20: /* fshto */
3789 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3790 return 1;
3792 gen_vfp_shto(dp, 16 - rm, 0);
3793 break;
3794 case 21: /* fslto */
3795 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3796 return 1;
3798 gen_vfp_slto(dp, 32 - rm, 0);
3799 break;
3800 case 22: /* fuhto */
3801 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3802 return 1;
3804 gen_vfp_uhto(dp, 16 - rm, 0);
3805 break;
3806 case 23: /* fulto */
3807 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3808 return 1;
3810 gen_vfp_ulto(dp, 32 - rm, 0);
3811 break;
3812 case 24: /* ftoui */
3813 gen_vfp_toui(dp, 0);
3814 break;
3815 case 25: /* ftouiz */
3816 gen_vfp_touiz(dp, 0);
3817 break;
3818 case 26: /* ftosi */
3819 gen_vfp_tosi(dp, 0);
3820 break;
3821 case 27: /* ftosiz */
3822 gen_vfp_tosiz(dp, 0);
3823 break;
3824 case 28: /* ftosh */
3825 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3826 return 1;
3828 gen_vfp_tosh(dp, 16 - rm, 0);
3829 break;
3830 case 29: /* ftosl */
3831 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3832 return 1;
3834 gen_vfp_tosl(dp, 32 - rm, 0);
3835 break;
3836 case 30: /* ftouh */
3837 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3838 return 1;
3840 gen_vfp_touh(dp, 16 - rm, 0);
3841 break;
3842 case 31: /* ftoul */
3843 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3844 return 1;
3846 gen_vfp_toul(dp, 32 - rm, 0);
3847 break;
3848 default: /* undefined */
3849 return 1;
3851 break;
3852 default: /* undefined */
3853 return 1;
3856 /* Write back the result. */
3857 if (op == 15 && (rn >= 8 && rn <= 11)) {
3858 /* Comparison, do nothing. */
3859 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3860 (rn & 0x1e) == 0x6)) {
3861 /* VCVT double to int: always integer result.
3862 * VCVT double to half precision is always a single
3863 * precision result.
3865 gen_mov_vreg_F0(0, rd);
3866 } else if (op == 15 && rn == 15) {
3867 /* conversion */
3868 gen_mov_vreg_F0(!dp, rd);
3869 } else {
3870 gen_mov_vreg_F0(dp, rd);
3873 /* break out of the loop if we have finished */
3874 if (veclen == 0)
3875 break;
3877 if (op == 15 && delta_m == 0) {
3878 /* single source one-many */
3879 while (veclen--) {
3880 rd = ((rd + delta_d) & (bank_mask - 1))
3881 | (rd & bank_mask);
3882 gen_mov_vreg_F0(dp, rd);
3884 break;
3886 /* Setup the next operands. */
3887 veclen--;
3888 rd = ((rd + delta_d) & (bank_mask - 1))
3889 | (rd & bank_mask);
3891 if (op == 15) {
3892 /* One source operand. */
3893 rm = ((rm + delta_m) & (bank_mask - 1))
3894 | (rm & bank_mask);
3895 gen_mov_F0_vreg(dp, rm);
3896 } else {
3897 /* Two source operands. */
3898 rn = ((rn + delta_d) & (bank_mask - 1))
3899 | (rn & bank_mask);
3900 gen_mov_F0_vreg(dp, rn);
3901 if (delta_m) {
3902 rm = ((rm + delta_m) & (bank_mask - 1))
3903 | (rm & bank_mask);
3904 gen_mov_F1_vreg(dp, rm);
3909 break;
3910 case 0xc:
3911 case 0xd:
3912 if ((insn & 0x03e00000) == 0x00400000) {
3913 /* two-register transfer */
3914 rn = (insn >> 16) & 0xf;
3915 rd = (insn >> 12) & 0xf;
3916 if (dp) {
3917 VFP_DREG_M(rm, insn);
3918 } else {
3919 rm = VFP_SREG_M(insn);
3922 if (insn & ARM_CP_RW_BIT) {
3923 /* vfp->arm */
3924 if (dp) {
3925 gen_mov_F0_vreg(0, rm * 2);
3926 tmp = gen_vfp_mrs();
3927 store_reg(s, rd, tmp);
3928 gen_mov_F0_vreg(0, rm * 2 + 1);
3929 tmp = gen_vfp_mrs();
3930 store_reg(s, rn, tmp);
3931 } else {
3932 gen_mov_F0_vreg(0, rm);
3933 tmp = gen_vfp_mrs();
3934 store_reg(s, rd, tmp);
3935 gen_mov_F0_vreg(0, rm + 1);
3936 tmp = gen_vfp_mrs();
3937 store_reg(s, rn, tmp);
3939 } else {
3940 /* arm->vfp */
3941 if (dp) {
3942 tmp = load_reg(s, rd);
3943 gen_vfp_msr(tmp);
3944 gen_mov_vreg_F0(0, rm * 2);
3945 tmp = load_reg(s, rn);
3946 gen_vfp_msr(tmp);
3947 gen_mov_vreg_F0(0, rm * 2 + 1);
3948 } else {
3949 tmp = load_reg(s, rd);
3950 gen_vfp_msr(tmp);
3951 gen_mov_vreg_F0(0, rm);
3952 tmp = load_reg(s, rn);
3953 gen_vfp_msr(tmp);
3954 gen_mov_vreg_F0(0, rm + 1);
3957 } else {
3958 /* Load/store */
3959 rn = (insn >> 16) & 0xf;
3960 if (dp)
3961 VFP_DREG_D(rd, insn);
3962 else
3963 rd = VFP_SREG_D(insn);
3964 if ((insn & 0x01200000) == 0x01000000) {
3965 /* Single load/store */
3966 offset = (insn & 0xff) << 2;
3967 if ((insn & (1 << 23)) == 0)
3968 offset = -offset;
3969 if (s->thumb && rn == 15) {
3970 /* This is actually UNPREDICTABLE */
3971 addr = tcg_temp_new_i32();
3972 tcg_gen_movi_i32(addr, s->pc & ~2);
3973 } else {
3974 addr = load_reg(s, rn);
3976 tcg_gen_addi_i32(addr, addr, offset);
3977 if (insn & (1 << 20)) {
3978 gen_vfp_ld(s, dp, addr);
3979 gen_mov_vreg_F0(dp, rd);
3980 } else {
3981 gen_mov_F0_vreg(dp, rd);
3982 gen_vfp_st(s, dp, addr);
3984 tcg_temp_free_i32(addr);
3985 } else {
3986 /* load/store multiple */
3987 int w = insn & (1 << 21);
3988 if (dp)
3989 n = (insn >> 1) & 0x7f;
3990 else
3991 n = insn & 0xff;
3993 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3994 /* P == U , W == 1 => UNDEF */
3995 return 1;
3997 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3998 /* UNPREDICTABLE cases for bad immediates: we choose to
3999 * UNDEF to avoid generating huge numbers of TCG ops
4001 return 1;
4003 if (rn == 15 && w) {
4004 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4005 return 1;
4008 if (s->thumb && rn == 15) {
4009 /* This is actually UNPREDICTABLE */
4010 addr = tcg_temp_new_i32();
4011 tcg_gen_movi_i32(addr, s->pc & ~2);
4012 } else {
4013 addr = load_reg(s, rn);
4015 if (insn & (1 << 24)) /* pre-decrement */
4016 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
4018 if (dp)
4019 offset = 8;
4020 else
4021 offset = 4;
4022 for (i = 0; i < n; i++) {
4023 if (insn & ARM_CP_RW_BIT) {
4024 /* load */
4025 gen_vfp_ld(s, dp, addr);
4026 gen_mov_vreg_F0(dp, rd + i);
4027 } else {
4028 /* store */
4029 gen_mov_F0_vreg(dp, rd + i);
4030 gen_vfp_st(s, dp, addr);
4032 tcg_gen_addi_i32(addr, addr, offset);
4034 if (w) {
4035 /* writeback */
4036 if (insn & (1 << 24))
4037 offset = -offset * n;
4038 else if (dp && (insn & 1))
4039 offset = 4;
4040 else
4041 offset = 0;
4043 if (offset != 0)
4044 tcg_gen_addi_i32(addr, addr, offset);
4045 store_reg(s, rn, addr);
4046 } else {
4047 tcg_temp_free_i32(addr);
4051 break;
4052 default:
4053 /* Should never happen. */
4054 return 1;
4056 return 0;
4059 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
4061 #ifndef CONFIG_USER_ONLY
4062 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4063 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4064 #else
4065 return true;
4066 #endif
4069 static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4071 if (use_goto_tb(s, dest)) {
4072 tcg_gen_goto_tb(n);
4073 gen_set_pc_im(s, dest);
4074 tcg_gen_exit_tb((uintptr_t)s->tb + n);
4075 } else {
4076 gen_set_pc_im(s, dest);
4077 tcg_gen_exit_tb(0);
4081 static inline void gen_jmp (DisasContext *s, uint32_t dest)
4083 if (unlikely(s->singlestep_enabled || s->ss_active)) {
4084 /* An indirect jump so that we still trigger the debug exception. */
4085 if (s->thumb)
4086 dest |= 1;
4087 gen_bx_im(s, dest);
4088 } else {
4089 gen_goto_tb(s, 0, dest);
4090 s->is_jmp = DISAS_TB_JUMP;
4094 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
4096 if (x)
4097 tcg_gen_sari_i32(t0, t0, 16);
4098 else
4099 gen_sxth(t0);
4100 if (y)
4101 tcg_gen_sari_i32(t1, t1, 16);
4102 else
4103 gen_sxth(t1);
4104 tcg_gen_mul_i32(t0, t0, t1);
4107 /* Return the mask of PSR bits set by a MSR instruction. */
4108 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4110 uint32_t mask;
4112 mask = 0;
4113 if (flags & (1 << 0))
4114 mask |= 0xff;
4115 if (flags & (1 << 1))
4116 mask |= 0xff00;
4117 if (flags & (1 << 2))
4118 mask |= 0xff0000;
4119 if (flags & (1 << 3))
4120 mask |= 0xff000000;
4122 /* Mask out undefined bits. */
4123 mask &= ~CPSR_RESERVED;
4124 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4125 mask &= ~CPSR_T;
4127 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4128 mask &= ~CPSR_Q; /* V5TE in reality*/
4130 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4131 mask &= ~(CPSR_E | CPSR_GE);
4133 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4134 mask &= ~CPSR_IT;
4136 /* Mask out execution state and reserved bits. */
4137 if (!spsr) {
4138 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4140 /* Mask out privileged bits. */
4141 if (IS_USER(s))
4142 mask &= CPSR_USER;
4143 return mask;
4146 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4147 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4149 TCGv_i32 tmp;
4150 if (spsr) {
4151 /* ??? This is also undefined in system mode. */
4152 if (IS_USER(s))
4153 return 1;
4155 tmp = load_cpu_field(spsr);
4156 tcg_gen_andi_i32(tmp, tmp, ~mask);
4157 tcg_gen_andi_i32(t0, t0, mask);
4158 tcg_gen_or_i32(tmp, tmp, t0);
4159 store_cpu_field(tmp, spsr);
4160 } else {
4161 gen_set_cpsr(t0, mask);
4163 tcg_temp_free_i32(t0);
4164 gen_lookup_tb(s);
4165 return 0;
4168 /* Returns nonzero if access to the PSR is not permitted. */
4169 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4171 TCGv_i32 tmp;
4172 tmp = tcg_temp_new_i32();
4173 tcg_gen_movi_i32(tmp, val);
4174 return gen_set_psr(s, mask, spsr, tmp);
4177 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4178 int *tgtmode, int *regno)
4180 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4181 * the target mode and register number, and identify the various
4182 * unpredictable cases.
4183 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4184 * + executed in user mode
4185 * + using R15 as the src/dest register
4186 * + accessing an unimplemented register
4187 * + accessing a register that's inaccessible at current PL/security state*
4188 * + accessing a register that you could access with a different insn
4189 * We choose to UNDEF in all these cases.
4190 * Since we don't know which of the various AArch32 modes we are in
4191 * we have to defer some checks to runtime.
4192 * Accesses to Monitor mode registers from Secure EL1 (which implies
4193 * that EL3 is AArch64) must trap to EL3.
4195 * If the access checks fail this function will emit code to take
4196 * an exception and return false. Otherwise it will return true,
4197 * and set *tgtmode and *regno appropriately.
4199 int exc_target = default_exception_el(s);
4201 /* These instructions are present only in ARMv8, or in ARMv7 with the
4202 * Virtualization Extensions.
4204 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4205 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4206 goto undef;
4209 if (IS_USER(s) || rn == 15) {
4210 goto undef;
4213 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4214 * of registers into (r, sysm).
4216 if (r) {
4217 /* SPSRs for other modes */
4218 switch (sysm) {
4219 case 0xe: /* SPSR_fiq */
4220 *tgtmode = ARM_CPU_MODE_FIQ;
4221 break;
4222 case 0x10: /* SPSR_irq */
4223 *tgtmode = ARM_CPU_MODE_IRQ;
4224 break;
4225 case 0x12: /* SPSR_svc */
4226 *tgtmode = ARM_CPU_MODE_SVC;
4227 break;
4228 case 0x14: /* SPSR_abt */
4229 *tgtmode = ARM_CPU_MODE_ABT;
4230 break;
4231 case 0x16: /* SPSR_und */
4232 *tgtmode = ARM_CPU_MODE_UND;
4233 break;
4234 case 0x1c: /* SPSR_mon */
4235 *tgtmode = ARM_CPU_MODE_MON;
4236 break;
4237 case 0x1e: /* SPSR_hyp */
4238 *tgtmode = ARM_CPU_MODE_HYP;
4239 break;
4240 default: /* unallocated */
4241 goto undef;
4243 /* We arbitrarily assign SPSR a register number of 16. */
4244 *regno = 16;
4245 } else {
4246 /* general purpose registers for other modes */
4247 switch (sysm) {
4248 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4249 *tgtmode = ARM_CPU_MODE_USR;
4250 *regno = sysm + 8;
4251 break;
4252 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4253 *tgtmode = ARM_CPU_MODE_FIQ;
4254 *regno = sysm;
4255 break;
4256 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4257 *tgtmode = ARM_CPU_MODE_IRQ;
4258 *regno = sysm & 1 ? 13 : 14;
4259 break;
4260 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4261 *tgtmode = ARM_CPU_MODE_SVC;
4262 *regno = sysm & 1 ? 13 : 14;
4263 break;
4264 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4265 *tgtmode = ARM_CPU_MODE_ABT;
4266 *regno = sysm & 1 ? 13 : 14;
4267 break;
4268 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4269 *tgtmode = ARM_CPU_MODE_UND;
4270 *regno = sysm & 1 ? 13 : 14;
4271 break;
4272 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4273 *tgtmode = ARM_CPU_MODE_MON;
4274 *regno = sysm & 1 ? 13 : 14;
4275 break;
4276 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4277 *tgtmode = ARM_CPU_MODE_HYP;
4278 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4279 *regno = sysm & 1 ? 13 : 17;
4280 break;
4281 default: /* unallocated */
4282 goto undef;
4286 /* Catch the 'accessing inaccessible register' cases we can detect
4287 * at translate time.
4289 switch (*tgtmode) {
4290 case ARM_CPU_MODE_MON:
4291 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4292 goto undef;
4294 if (s->current_el == 1) {
4295 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4296 * then accesses to Mon registers trap to EL3
4298 exc_target = 3;
4299 goto undef;
4301 break;
4302 case ARM_CPU_MODE_HYP:
4303 /* Note that we can forbid accesses from EL2 here because they
4304 * must be from Hyp mode itself
4306 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4307 goto undef;
4309 break;
4310 default:
4311 break;
4314 return true;
4316 undef:
4317 /* If we get here then some access check did not pass */
4318 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4319 return false;
4322 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4324 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4325 int tgtmode = 0, regno = 0;
4327 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4328 return;
4331 /* Sync state because msr_banked() can raise exceptions */
4332 gen_set_condexec(s);
4333 gen_set_pc_im(s, s->pc - 4);
4334 tcg_reg = load_reg(s, rn);
4335 tcg_tgtmode = tcg_const_i32(tgtmode);
4336 tcg_regno = tcg_const_i32(regno);
4337 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4338 tcg_temp_free_i32(tcg_tgtmode);
4339 tcg_temp_free_i32(tcg_regno);
4340 tcg_temp_free_i32(tcg_reg);
4341 s->is_jmp = DISAS_UPDATE;
4344 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4346 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4347 int tgtmode = 0, regno = 0;
4349 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4350 return;
4353 /* Sync state because mrs_banked() can raise exceptions */
4354 gen_set_condexec(s);
4355 gen_set_pc_im(s, s->pc - 4);
4356 tcg_reg = tcg_temp_new_i32();
4357 tcg_tgtmode = tcg_const_i32(tgtmode);
4358 tcg_regno = tcg_const_i32(regno);
4359 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4360 tcg_temp_free_i32(tcg_tgtmode);
4361 tcg_temp_free_i32(tcg_regno);
4362 store_reg(s, rn, tcg_reg);
4363 s->is_jmp = DISAS_UPDATE;
4366 /* Generate an old-style exception return. Marks pc as dead. */
4367 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4369 TCGv_i32 tmp;
4370 store_reg(s, 15, pc);
4371 tmp = load_cpu_field(spsr);
4372 gen_helper_cpsr_write_eret(cpu_env, tmp);
4373 tcg_temp_free_i32(tmp);
4374 s->is_jmp = DISAS_JUMP;
4377 /* Generate a v6 exception return. Marks both values as dead. */
4378 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4380 gen_helper_cpsr_write_eret(cpu_env, cpsr);
4381 tcg_temp_free_i32(cpsr);
4382 store_reg(s, 15, pc);
4383 s->is_jmp = DISAS_JUMP;
4386 static void gen_nop_hint(DisasContext *s, int val)
4388 switch (val) {
4389 case 1: /* yield */
4390 gen_set_pc_im(s, s->pc);
4391 s->is_jmp = DISAS_YIELD;
4392 break;
4393 case 3: /* wfi */
4394 gen_set_pc_im(s, s->pc);
4395 s->is_jmp = DISAS_WFI;
4396 break;
4397 case 2: /* wfe */
4398 gen_set_pc_im(s, s->pc);
4399 s->is_jmp = DISAS_WFE;
4400 break;
4401 case 4: /* sev */
4402 case 5: /* sevl */
4403 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4404 default: /* nop */
4405 break;
4409 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4411 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4413 switch (size) {
4414 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4415 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4416 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4417 default: abort();
4421 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4423 switch (size) {
4424 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4425 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4426 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4427 default: return;
4431 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4432 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4433 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4434 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4435 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4437 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4438 switch ((size << 1) | u) { \
4439 case 0: \
4440 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4441 break; \
4442 case 1: \
4443 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4444 break; \
4445 case 2: \
4446 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4447 break; \
4448 case 3: \
4449 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4450 break; \
4451 case 4: \
4452 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4453 break; \
4454 case 5: \
4455 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4456 break; \
4457 default: return 1; \
4458 }} while (0)
4460 #define GEN_NEON_INTEGER_OP(name) do { \
4461 switch ((size << 1) | u) { \
4462 case 0: \
4463 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4464 break; \
4465 case 1: \
4466 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4467 break; \
4468 case 2: \
4469 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4470 break; \
4471 case 3: \
4472 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4473 break; \
4474 case 4: \
4475 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4476 break; \
4477 case 5: \
4478 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4479 break; \
4480 default: return 1; \
4481 }} while (0)
4483 static TCGv_i32 neon_load_scratch(int scratch)
4485 TCGv_i32 tmp = tcg_temp_new_i32();
4486 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4487 return tmp;
4490 static void neon_store_scratch(int scratch, TCGv_i32 var)
4492 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4493 tcg_temp_free_i32(var);
4496 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4498 TCGv_i32 tmp;
4499 if (size == 1) {
4500 tmp = neon_load_reg(reg & 7, reg >> 4);
4501 if (reg & 8) {
4502 gen_neon_dup_high16(tmp);
4503 } else {
4504 gen_neon_dup_low16(tmp);
4506 } else {
4507 tmp = neon_load_reg(reg & 15, reg >> 4);
4509 return tmp;
4512 static int gen_neon_unzip(int rd, int rm, int size, int q)
4514 TCGv_i32 tmp, tmp2;
4515 if (!q && size == 2) {
4516 return 1;
4518 tmp = tcg_const_i32(rd);
4519 tmp2 = tcg_const_i32(rm);
4520 if (q) {
4521 switch (size) {
4522 case 0:
4523 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
4524 break;
4525 case 1:
4526 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
4527 break;
4528 case 2:
4529 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
4530 break;
4531 default:
4532 abort();
4534 } else {
4535 switch (size) {
4536 case 0:
4537 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
4538 break;
4539 case 1:
4540 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
4541 break;
4542 default:
4543 abort();
4546 tcg_temp_free_i32(tmp);
4547 tcg_temp_free_i32(tmp2);
4548 return 0;
4551 static int gen_neon_zip(int rd, int rm, int size, int q)
4553 TCGv_i32 tmp, tmp2;
4554 if (!q && size == 2) {
4555 return 1;
4557 tmp = tcg_const_i32(rd);
4558 tmp2 = tcg_const_i32(rm);
4559 if (q) {
4560 switch (size) {
4561 case 0:
4562 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
4563 break;
4564 case 1:
4565 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
4566 break;
4567 case 2:
4568 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
4569 break;
4570 default:
4571 abort();
4573 } else {
4574 switch (size) {
4575 case 0:
4576 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
4577 break;
4578 case 1:
4579 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
4580 break;
4581 default:
4582 abort();
4585 tcg_temp_free_i32(tmp);
4586 tcg_temp_free_i32(tmp2);
4587 return 0;
4590 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4592 TCGv_i32 rd, tmp;
4594 rd = tcg_temp_new_i32();
4595 tmp = tcg_temp_new_i32();
4597 tcg_gen_shli_i32(rd, t0, 8);
4598 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4599 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4600 tcg_gen_or_i32(rd, rd, tmp);
4602 tcg_gen_shri_i32(t1, t1, 8);
4603 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4604 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4605 tcg_gen_or_i32(t1, t1, tmp);
4606 tcg_gen_mov_i32(t0, rd);
4608 tcg_temp_free_i32(tmp);
4609 tcg_temp_free_i32(rd);
4612 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4614 TCGv_i32 rd, tmp;
4616 rd = tcg_temp_new_i32();
4617 tmp = tcg_temp_new_i32();
4619 tcg_gen_shli_i32(rd, t0, 16);
4620 tcg_gen_andi_i32(tmp, t1, 0xffff);
4621 tcg_gen_or_i32(rd, rd, tmp);
4622 tcg_gen_shri_i32(t1, t1, 16);
4623 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4624 tcg_gen_or_i32(t1, t1, tmp);
4625 tcg_gen_mov_i32(t0, rd);
4627 tcg_temp_free_i32(tmp);
4628 tcg_temp_free_i32(rd);
4632 static struct {
4633 int nregs;
4634 int interleave;
4635 int spacing;
4636 } neon_ls_element_type[11] = {
4637 {4, 4, 1},
4638 {4, 4, 2},
4639 {4, 1, 1},
4640 {4, 2, 1},
4641 {3, 3, 1},
4642 {3, 3, 2},
4643 {3, 1, 1},
4644 {1, 1, 1},
4645 {2, 2, 1},
4646 {2, 2, 2},
4647 {2, 1, 1}
4650 /* Translate a NEON load/store element instruction. Return nonzero if the
4651 instruction is invalid. */
4652 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4654 int rd, rn, rm;
4655 int op;
4656 int nregs;
4657 int interleave;
4658 int spacing;
4659 int stride;
4660 int size;
4661 int reg;
4662 int pass;
4663 int load;
4664 int shift;
4665 int n;
4666 TCGv_i32 addr;
4667 TCGv_i32 tmp;
4668 TCGv_i32 tmp2;
4669 TCGv_i64 tmp64;
4671 /* FIXME: this access check should not take precedence over UNDEF
4672 * for invalid encodings; we will generate incorrect syndrome information
4673 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4675 if (s->fp_excp_el) {
4676 gen_exception_insn(s, 4, EXCP_UDEF,
4677 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
4678 return 0;
4681 if (!s->vfp_enabled)
4682 return 1;
4683 VFP_DREG_D(rd, insn);
4684 rn = (insn >> 16) & 0xf;
4685 rm = insn & 0xf;
4686 load = (insn & (1 << 21)) != 0;
4687 if ((insn & (1 << 23)) == 0) {
4688 /* Load store all elements. */
4689 op = (insn >> 8) & 0xf;
4690 size = (insn >> 6) & 3;
4691 if (op > 10)
4692 return 1;
4693 /* Catch UNDEF cases for bad values of align field */
4694 switch (op & 0xc) {
4695 case 4:
4696 if (((insn >> 5) & 1) == 1) {
4697 return 1;
4699 break;
4700 case 8:
4701 if (((insn >> 4) & 3) == 3) {
4702 return 1;
4704 break;
4705 default:
4706 break;
4708 nregs = neon_ls_element_type[op].nregs;
4709 interleave = neon_ls_element_type[op].interleave;
4710 spacing = neon_ls_element_type[op].spacing;
4711 if (size == 3 && (interleave | spacing) != 1)
4712 return 1;
4713 addr = tcg_temp_new_i32();
4714 load_reg_var(s, addr, rn);
4715 stride = (1 << size) * interleave;
4716 for (reg = 0; reg < nregs; reg++) {
4717 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4718 load_reg_var(s, addr, rn);
4719 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4720 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4721 load_reg_var(s, addr, rn);
4722 tcg_gen_addi_i32(addr, addr, 1 << size);
4724 if (size == 3) {
4725 tmp64 = tcg_temp_new_i64();
4726 if (load) {
4727 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
4728 neon_store_reg64(tmp64, rd);
4729 } else {
4730 neon_load_reg64(tmp64, rd);
4731 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
4733 tcg_temp_free_i64(tmp64);
4734 tcg_gen_addi_i32(addr, addr, stride);
4735 } else {
4736 for (pass = 0; pass < 2; pass++) {
4737 if (size == 2) {
4738 if (load) {
4739 tmp = tcg_temp_new_i32();
4740 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
4741 neon_store_reg(rd, pass, tmp);
4742 } else {
4743 tmp = neon_load_reg(rd, pass);
4744 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
4745 tcg_temp_free_i32(tmp);
4747 tcg_gen_addi_i32(addr, addr, stride);
4748 } else if (size == 1) {
4749 if (load) {
4750 tmp = tcg_temp_new_i32();
4751 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
4752 tcg_gen_addi_i32(addr, addr, stride);
4753 tmp2 = tcg_temp_new_i32();
4754 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
4755 tcg_gen_addi_i32(addr, addr, stride);
4756 tcg_gen_shli_i32(tmp2, tmp2, 16);
4757 tcg_gen_or_i32(tmp, tmp, tmp2);
4758 tcg_temp_free_i32(tmp2);
4759 neon_store_reg(rd, pass, tmp);
4760 } else {
4761 tmp = neon_load_reg(rd, pass);
4762 tmp2 = tcg_temp_new_i32();
4763 tcg_gen_shri_i32(tmp2, tmp, 16);
4764 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
4765 tcg_temp_free_i32(tmp);
4766 tcg_gen_addi_i32(addr, addr, stride);
4767 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
4768 tcg_temp_free_i32(tmp2);
4769 tcg_gen_addi_i32(addr, addr, stride);
4771 } else /* size == 0 */ {
4772 if (load) {
4773 TCGV_UNUSED_I32(tmp2);
4774 for (n = 0; n < 4; n++) {
4775 tmp = tcg_temp_new_i32();
4776 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
4777 tcg_gen_addi_i32(addr, addr, stride);
4778 if (n == 0) {
4779 tmp2 = tmp;
4780 } else {
4781 tcg_gen_shli_i32(tmp, tmp, n * 8);
4782 tcg_gen_or_i32(tmp2, tmp2, tmp);
4783 tcg_temp_free_i32(tmp);
4786 neon_store_reg(rd, pass, tmp2);
4787 } else {
4788 tmp2 = neon_load_reg(rd, pass);
4789 for (n = 0; n < 4; n++) {
4790 tmp = tcg_temp_new_i32();
4791 if (n == 0) {
4792 tcg_gen_mov_i32(tmp, tmp2);
4793 } else {
4794 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4796 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
4797 tcg_temp_free_i32(tmp);
4798 tcg_gen_addi_i32(addr, addr, stride);
4800 tcg_temp_free_i32(tmp2);
4805 rd += spacing;
4807 tcg_temp_free_i32(addr);
4808 stride = nregs * 8;
4809 } else {
4810 size = (insn >> 10) & 3;
4811 if (size == 3) {
4812 /* Load single element to all lanes. */
4813 int a = (insn >> 4) & 1;
4814 if (!load) {
4815 return 1;
4817 size = (insn >> 6) & 3;
4818 nregs = ((insn >> 8) & 3) + 1;
4820 if (size == 3) {
4821 if (nregs != 4 || a == 0) {
4822 return 1;
4824 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4825 size = 2;
4827 if (nregs == 1 && a == 1 && size == 0) {
4828 return 1;
4830 if (nregs == 3 && a == 1) {
4831 return 1;
4833 addr = tcg_temp_new_i32();
4834 load_reg_var(s, addr, rn);
4835 if (nregs == 1) {
4836 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4837 tmp = gen_load_and_replicate(s, addr, size);
4838 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4839 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4840 if (insn & (1 << 5)) {
4841 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4842 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4844 tcg_temp_free_i32(tmp);
4845 } else {
4846 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4847 stride = (insn & (1 << 5)) ? 2 : 1;
4848 for (reg = 0; reg < nregs; reg++) {
4849 tmp = gen_load_and_replicate(s, addr, size);
4850 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4851 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4852 tcg_temp_free_i32(tmp);
4853 tcg_gen_addi_i32(addr, addr, 1 << size);
4854 rd += stride;
4857 tcg_temp_free_i32(addr);
4858 stride = (1 << size) * nregs;
4859 } else {
4860 /* Single element. */
4861 int idx = (insn >> 4) & 0xf;
4862 pass = (insn >> 7) & 1;
4863 switch (size) {
4864 case 0:
4865 shift = ((insn >> 5) & 3) * 8;
4866 stride = 1;
4867 break;
4868 case 1:
4869 shift = ((insn >> 6) & 1) * 16;
4870 stride = (insn & (1 << 5)) ? 2 : 1;
4871 break;
4872 case 2:
4873 shift = 0;
4874 stride = (insn & (1 << 6)) ? 2 : 1;
4875 break;
4876 default:
4877 abort();
4879 nregs = ((insn >> 8) & 3) + 1;
4880 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4881 switch (nregs) {
4882 case 1:
4883 if (((idx & (1 << size)) != 0) ||
4884 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4885 return 1;
4887 break;
4888 case 3:
4889 if ((idx & 1) != 0) {
4890 return 1;
4892 /* fall through */
4893 case 2:
4894 if (size == 2 && (idx & 2) != 0) {
4895 return 1;
4897 break;
4898 case 4:
4899 if ((size == 2) && ((idx & 3) == 3)) {
4900 return 1;
4902 break;
4903 default:
4904 abort();
4906 if ((rd + stride * (nregs - 1)) > 31) {
4907 /* Attempts to write off the end of the register file
4908 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4909 * the neon_load_reg() would write off the end of the array.
4911 return 1;
4913 addr = tcg_temp_new_i32();
4914 load_reg_var(s, addr, rn);
4915 for (reg = 0; reg < nregs; reg++) {
4916 if (load) {
4917 tmp = tcg_temp_new_i32();
4918 switch (size) {
4919 case 0:
4920 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
4921 break;
4922 case 1:
4923 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
4924 break;
4925 case 2:
4926 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
4927 break;
4928 default: /* Avoid compiler warnings. */
4929 abort();
4931 if (size != 2) {
4932 tmp2 = neon_load_reg(rd, pass);
4933 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4934 shift, size ? 16 : 8);
4935 tcg_temp_free_i32(tmp2);
4937 neon_store_reg(rd, pass, tmp);
4938 } else { /* Store */
4939 tmp = neon_load_reg(rd, pass);
4940 if (shift)
4941 tcg_gen_shri_i32(tmp, tmp, shift);
4942 switch (size) {
4943 case 0:
4944 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
4945 break;
4946 case 1:
4947 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
4948 break;
4949 case 2:
4950 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
4951 break;
4953 tcg_temp_free_i32(tmp);
4955 rd += stride;
4956 tcg_gen_addi_i32(addr, addr, 1 << size);
4958 tcg_temp_free_i32(addr);
4959 stride = nregs * (1 << size);
4962 if (rm != 15) {
4963 TCGv_i32 base;
4965 base = load_reg(s, rn);
4966 if (rm == 13) {
4967 tcg_gen_addi_i32(base, base, stride);
4968 } else {
4969 TCGv_i32 index;
4970 index = load_reg(s, rm);
4971 tcg_gen_add_i32(base, base, index);
4972 tcg_temp_free_i32(index);
4974 store_reg(s, rn, base);
4976 return 0;
4979 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4980 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
4982 tcg_gen_and_i32(t, t, c);
4983 tcg_gen_andc_i32(f, f, c);
4984 tcg_gen_or_i32(dest, t, f);
4987 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
4989 switch (size) {
4990 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4991 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4992 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
4993 default: abort();
4997 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4999 switch (size) {
5000 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5001 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5002 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
5003 default: abort();
5007 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
5009 switch (size) {
5010 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5011 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5012 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
5013 default: abort();
5017 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
5019 switch (size) {
5020 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5021 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5022 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
5023 default: abort();
5027 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
5028 int q, int u)
5030 if (q) {
5031 if (u) {
5032 switch (size) {
5033 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5034 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5035 default: abort();
5037 } else {
5038 switch (size) {
5039 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5040 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5041 default: abort();
5044 } else {
5045 if (u) {
5046 switch (size) {
5047 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5048 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
5049 default: abort();
5051 } else {
5052 switch (size) {
5053 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5054 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5055 default: abort();
5061 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
5063 if (u) {
5064 switch (size) {
5065 case 0: gen_helper_neon_widen_u8(dest, src); break;
5066 case 1: gen_helper_neon_widen_u16(dest, src); break;
5067 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5068 default: abort();
5070 } else {
5071 switch (size) {
5072 case 0: gen_helper_neon_widen_s8(dest, src); break;
5073 case 1: gen_helper_neon_widen_s16(dest, src); break;
5074 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5075 default: abort();
5078 tcg_temp_free_i32(src);
5081 static inline void gen_neon_addl(int size)
5083 switch (size) {
5084 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5085 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5086 case 2: tcg_gen_add_i64(CPU_V001); break;
5087 default: abort();
5091 static inline void gen_neon_subl(int size)
5093 switch (size) {
5094 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5095 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5096 case 2: tcg_gen_sub_i64(CPU_V001); break;
5097 default: abort();
5101 static inline void gen_neon_negl(TCGv_i64 var, int size)
5103 switch (size) {
5104 case 0: gen_helper_neon_negl_u16(var, var); break;
5105 case 1: gen_helper_neon_negl_u32(var, var); break;
5106 case 2:
5107 tcg_gen_neg_i64(var, var);
5108 break;
5109 default: abort();
5113 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
5115 switch (size) {
5116 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5117 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
5118 default: abort();
5122 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5123 int size, int u)
5125 TCGv_i64 tmp;
5127 switch ((size << 1) | u) {
5128 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5129 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5130 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5131 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5132 case 4:
5133 tmp = gen_muls_i64_i32(a, b);
5134 tcg_gen_mov_i64(dest, tmp);
5135 tcg_temp_free_i64(tmp);
5136 break;
5137 case 5:
5138 tmp = gen_mulu_i64_i32(a, b);
5139 tcg_gen_mov_i64(dest, tmp);
5140 tcg_temp_free_i64(tmp);
5141 break;
5142 default: abort();
5145 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5146 Don't forget to clean them now. */
5147 if (size < 2) {
5148 tcg_temp_free_i32(a);
5149 tcg_temp_free_i32(b);
5153 static void gen_neon_narrow_op(int op, int u, int size,
5154 TCGv_i32 dest, TCGv_i64 src)
5156 if (op) {
5157 if (u) {
5158 gen_neon_unarrow_sats(size, dest, src);
5159 } else {
5160 gen_neon_narrow(size, dest, src);
5162 } else {
5163 if (u) {
5164 gen_neon_narrow_satu(size, dest, src);
5165 } else {
5166 gen_neon_narrow_sats(size, dest, src);
5171 /* Symbolic constants for op fields for Neon 3-register same-length.
5172 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5173 * table A7-9.
5175 #define NEON_3R_VHADD 0
5176 #define NEON_3R_VQADD 1
5177 #define NEON_3R_VRHADD 2
5178 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5179 #define NEON_3R_VHSUB 4
5180 #define NEON_3R_VQSUB 5
5181 #define NEON_3R_VCGT 6
5182 #define NEON_3R_VCGE 7
5183 #define NEON_3R_VSHL 8
5184 #define NEON_3R_VQSHL 9
5185 #define NEON_3R_VRSHL 10
5186 #define NEON_3R_VQRSHL 11
5187 #define NEON_3R_VMAX 12
5188 #define NEON_3R_VMIN 13
5189 #define NEON_3R_VABD 14
5190 #define NEON_3R_VABA 15
5191 #define NEON_3R_VADD_VSUB 16
5192 #define NEON_3R_VTST_VCEQ 17
5193 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5194 #define NEON_3R_VMUL 19
5195 #define NEON_3R_VPMAX 20
5196 #define NEON_3R_VPMIN 21
5197 #define NEON_3R_VQDMULH_VQRDMULH 22
5198 #define NEON_3R_VPADD 23
5199 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
5200 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
5201 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5202 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5203 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5204 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5205 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
5206 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
5208 static const uint8_t neon_3r_sizes[] = {
5209 [NEON_3R_VHADD] = 0x7,
5210 [NEON_3R_VQADD] = 0xf,
5211 [NEON_3R_VRHADD] = 0x7,
5212 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5213 [NEON_3R_VHSUB] = 0x7,
5214 [NEON_3R_VQSUB] = 0xf,
5215 [NEON_3R_VCGT] = 0x7,
5216 [NEON_3R_VCGE] = 0x7,
5217 [NEON_3R_VSHL] = 0xf,
5218 [NEON_3R_VQSHL] = 0xf,
5219 [NEON_3R_VRSHL] = 0xf,
5220 [NEON_3R_VQRSHL] = 0xf,
5221 [NEON_3R_VMAX] = 0x7,
5222 [NEON_3R_VMIN] = 0x7,
5223 [NEON_3R_VABD] = 0x7,
5224 [NEON_3R_VABA] = 0x7,
5225 [NEON_3R_VADD_VSUB] = 0xf,
5226 [NEON_3R_VTST_VCEQ] = 0x7,
5227 [NEON_3R_VML] = 0x7,
5228 [NEON_3R_VMUL] = 0x7,
5229 [NEON_3R_VPMAX] = 0x7,
5230 [NEON_3R_VPMIN] = 0x7,
5231 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5232 [NEON_3R_VPADD] = 0x7,
5233 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
5234 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
5235 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5236 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5237 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5238 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5239 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
5240 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
5243 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
5244 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5245 * table A7-13.
5247 #define NEON_2RM_VREV64 0
5248 #define NEON_2RM_VREV32 1
5249 #define NEON_2RM_VREV16 2
5250 #define NEON_2RM_VPADDL 4
5251 #define NEON_2RM_VPADDL_U 5
5252 #define NEON_2RM_AESE 6 /* Includes AESD */
5253 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5254 #define NEON_2RM_VCLS 8
5255 #define NEON_2RM_VCLZ 9
5256 #define NEON_2RM_VCNT 10
5257 #define NEON_2RM_VMVN 11
5258 #define NEON_2RM_VPADAL 12
5259 #define NEON_2RM_VPADAL_U 13
5260 #define NEON_2RM_VQABS 14
5261 #define NEON_2RM_VQNEG 15
5262 #define NEON_2RM_VCGT0 16
5263 #define NEON_2RM_VCGE0 17
5264 #define NEON_2RM_VCEQ0 18
5265 #define NEON_2RM_VCLE0 19
5266 #define NEON_2RM_VCLT0 20
5267 #define NEON_2RM_SHA1H 21
5268 #define NEON_2RM_VABS 22
5269 #define NEON_2RM_VNEG 23
5270 #define NEON_2RM_VCGT0_F 24
5271 #define NEON_2RM_VCGE0_F 25
5272 #define NEON_2RM_VCEQ0_F 26
5273 #define NEON_2RM_VCLE0_F 27
5274 #define NEON_2RM_VCLT0_F 28
5275 #define NEON_2RM_VABS_F 30
5276 #define NEON_2RM_VNEG_F 31
5277 #define NEON_2RM_VSWP 32
5278 #define NEON_2RM_VTRN 33
5279 #define NEON_2RM_VUZP 34
5280 #define NEON_2RM_VZIP 35
5281 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5282 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5283 #define NEON_2RM_VSHLL 38
5284 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5285 #define NEON_2RM_VRINTN 40
5286 #define NEON_2RM_VRINTX 41
5287 #define NEON_2RM_VRINTA 42
5288 #define NEON_2RM_VRINTZ 43
5289 #define NEON_2RM_VCVT_F16_F32 44
5290 #define NEON_2RM_VRINTM 45
5291 #define NEON_2RM_VCVT_F32_F16 46
5292 #define NEON_2RM_VRINTP 47
5293 #define NEON_2RM_VCVTAU 48
5294 #define NEON_2RM_VCVTAS 49
5295 #define NEON_2RM_VCVTNU 50
5296 #define NEON_2RM_VCVTNS 51
5297 #define NEON_2RM_VCVTPU 52
5298 #define NEON_2RM_VCVTPS 53
5299 #define NEON_2RM_VCVTMU 54
5300 #define NEON_2RM_VCVTMS 55
5301 #define NEON_2RM_VRECPE 56
5302 #define NEON_2RM_VRSQRTE 57
5303 #define NEON_2RM_VRECPE_F 58
5304 #define NEON_2RM_VRSQRTE_F 59
5305 #define NEON_2RM_VCVT_FS 60
5306 #define NEON_2RM_VCVT_FU 61
5307 #define NEON_2RM_VCVT_SF 62
5308 #define NEON_2RM_VCVT_UF 63
5310 static int neon_2rm_is_float_op(int op)
5312 /* Return true if this neon 2reg-misc op is float-to-float */
5313 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
5314 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
5315 op == NEON_2RM_VRINTM ||
5316 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5317 op >= NEON_2RM_VRECPE_F);
5320 static bool neon_2rm_is_v8_op(int op)
5322 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5323 switch (op) {
5324 case NEON_2RM_VRINTN:
5325 case NEON_2RM_VRINTA:
5326 case NEON_2RM_VRINTM:
5327 case NEON_2RM_VRINTP:
5328 case NEON_2RM_VRINTZ:
5329 case NEON_2RM_VRINTX:
5330 case NEON_2RM_VCVTAU:
5331 case NEON_2RM_VCVTAS:
5332 case NEON_2RM_VCVTNU:
5333 case NEON_2RM_VCVTNS:
5334 case NEON_2RM_VCVTPU:
5335 case NEON_2RM_VCVTPS:
5336 case NEON_2RM_VCVTMU:
5337 case NEON_2RM_VCVTMS:
5338 return true;
5339 default:
5340 return false;
5344 /* Each entry in this array has bit n set if the insn allows
5345 * size value n (otherwise it will UNDEF). Since unallocated
5346 * op values will have no bits set they always UNDEF.
5348 static const uint8_t neon_2rm_sizes[] = {
5349 [NEON_2RM_VREV64] = 0x7,
5350 [NEON_2RM_VREV32] = 0x3,
5351 [NEON_2RM_VREV16] = 0x1,
5352 [NEON_2RM_VPADDL] = 0x7,
5353 [NEON_2RM_VPADDL_U] = 0x7,
5354 [NEON_2RM_AESE] = 0x1,
5355 [NEON_2RM_AESMC] = 0x1,
5356 [NEON_2RM_VCLS] = 0x7,
5357 [NEON_2RM_VCLZ] = 0x7,
5358 [NEON_2RM_VCNT] = 0x1,
5359 [NEON_2RM_VMVN] = 0x1,
5360 [NEON_2RM_VPADAL] = 0x7,
5361 [NEON_2RM_VPADAL_U] = 0x7,
5362 [NEON_2RM_VQABS] = 0x7,
5363 [NEON_2RM_VQNEG] = 0x7,
5364 [NEON_2RM_VCGT0] = 0x7,
5365 [NEON_2RM_VCGE0] = 0x7,
5366 [NEON_2RM_VCEQ0] = 0x7,
5367 [NEON_2RM_VCLE0] = 0x7,
5368 [NEON_2RM_VCLT0] = 0x7,
5369 [NEON_2RM_SHA1H] = 0x4,
5370 [NEON_2RM_VABS] = 0x7,
5371 [NEON_2RM_VNEG] = 0x7,
5372 [NEON_2RM_VCGT0_F] = 0x4,
5373 [NEON_2RM_VCGE0_F] = 0x4,
5374 [NEON_2RM_VCEQ0_F] = 0x4,
5375 [NEON_2RM_VCLE0_F] = 0x4,
5376 [NEON_2RM_VCLT0_F] = 0x4,
5377 [NEON_2RM_VABS_F] = 0x4,
5378 [NEON_2RM_VNEG_F] = 0x4,
5379 [NEON_2RM_VSWP] = 0x1,
5380 [NEON_2RM_VTRN] = 0x7,
5381 [NEON_2RM_VUZP] = 0x7,
5382 [NEON_2RM_VZIP] = 0x7,
5383 [NEON_2RM_VMOVN] = 0x7,
5384 [NEON_2RM_VQMOVN] = 0x7,
5385 [NEON_2RM_VSHLL] = 0x7,
5386 [NEON_2RM_SHA1SU1] = 0x4,
5387 [NEON_2RM_VRINTN] = 0x4,
5388 [NEON_2RM_VRINTX] = 0x4,
5389 [NEON_2RM_VRINTA] = 0x4,
5390 [NEON_2RM_VRINTZ] = 0x4,
5391 [NEON_2RM_VCVT_F16_F32] = 0x2,
5392 [NEON_2RM_VRINTM] = 0x4,
5393 [NEON_2RM_VCVT_F32_F16] = 0x2,
5394 [NEON_2RM_VRINTP] = 0x4,
5395 [NEON_2RM_VCVTAU] = 0x4,
5396 [NEON_2RM_VCVTAS] = 0x4,
5397 [NEON_2RM_VCVTNU] = 0x4,
5398 [NEON_2RM_VCVTNS] = 0x4,
5399 [NEON_2RM_VCVTPU] = 0x4,
5400 [NEON_2RM_VCVTPS] = 0x4,
5401 [NEON_2RM_VCVTMU] = 0x4,
5402 [NEON_2RM_VCVTMS] = 0x4,
5403 [NEON_2RM_VRECPE] = 0x4,
5404 [NEON_2RM_VRSQRTE] = 0x4,
5405 [NEON_2RM_VRECPE_F] = 0x4,
5406 [NEON_2RM_VRSQRTE_F] = 0x4,
5407 [NEON_2RM_VCVT_FS] = 0x4,
5408 [NEON_2RM_VCVT_FU] = 0x4,
5409 [NEON_2RM_VCVT_SF] = 0x4,
5410 [NEON_2RM_VCVT_UF] = 0x4,
5413 /* Translate a NEON data processing instruction. Return nonzero if the
5414 instruction is invalid.
5415 We process data in a mixture of 32-bit and 64-bit chunks.
5416 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5418 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
5420 int op;
5421 int q;
5422 int rd, rn, rm;
5423 int size;
5424 int shift;
5425 int pass;
5426 int count;
5427 int pairwise;
5428 int u;
5429 uint32_t imm, mask;
5430 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
5431 TCGv_i64 tmp64;
5433 /* FIXME: this access check should not take precedence over UNDEF
5434 * for invalid encodings; we will generate incorrect syndrome information
5435 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5437 if (s->fp_excp_el) {
5438 gen_exception_insn(s, 4, EXCP_UDEF,
5439 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
5440 return 0;
5443 if (!s->vfp_enabled)
5444 return 1;
5445 q = (insn & (1 << 6)) != 0;
5446 u = (insn >> 24) & 1;
5447 VFP_DREG_D(rd, insn);
5448 VFP_DREG_N(rn, insn);
5449 VFP_DREG_M(rm, insn);
5450 size = (insn >> 20) & 3;
5451 if ((insn & (1 << 23)) == 0) {
5452 /* Three register same length. */
5453 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5454 /* Catch invalid op and bad size combinations: UNDEF */
5455 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5456 return 1;
5458 /* All insns of this form UNDEF for either this condition or the
5459 * superset of cases "Q==1"; we catch the latter later.
5461 if (q && ((rd | rn | rm) & 1)) {
5462 return 1;
5465 * The SHA-1/SHA-256 3-register instructions require special treatment
5466 * here, as their size field is overloaded as an op type selector, and
5467 * they all consume their input in a single pass.
5469 if (op == NEON_3R_SHA) {
5470 if (!q) {
5471 return 1;
5473 if (!u) { /* SHA-1 */
5474 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
5475 return 1;
5477 tmp = tcg_const_i32(rd);
5478 tmp2 = tcg_const_i32(rn);
5479 tmp3 = tcg_const_i32(rm);
5480 tmp4 = tcg_const_i32(size);
5481 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5482 tcg_temp_free_i32(tmp4);
5483 } else { /* SHA-256 */
5484 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
5485 return 1;
5487 tmp = tcg_const_i32(rd);
5488 tmp2 = tcg_const_i32(rn);
5489 tmp3 = tcg_const_i32(rm);
5490 switch (size) {
5491 case 0:
5492 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5493 break;
5494 case 1:
5495 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5496 break;
5497 case 2:
5498 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5499 break;
5502 tcg_temp_free_i32(tmp);
5503 tcg_temp_free_i32(tmp2);
5504 tcg_temp_free_i32(tmp3);
5505 return 0;
5507 if (size == 3 && op != NEON_3R_LOGIC) {
5508 /* 64-bit element instructions. */
5509 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5510 neon_load_reg64(cpu_V0, rn + pass);
5511 neon_load_reg64(cpu_V1, rm + pass);
5512 switch (op) {
5513 case NEON_3R_VQADD:
5514 if (u) {
5515 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5516 cpu_V0, cpu_V1);
5517 } else {
5518 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5519 cpu_V0, cpu_V1);
5521 break;
5522 case NEON_3R_VQSUB:
5523 if (u) {
5524 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5525 cpu_V0, cpu_V1);
5526 } else {
5527 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5528 cpu_V0, cpu_V1);
5530 break;
5531 case NEON_3R_VSHL:
5532 if (u) {
5533 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5534 } else {
5535 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5537 break;
5538 case NEON_3R_VQSHL:
5539 if (u) {
5540 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5541 cpu_V1, cpu_V0);
5542 } else {
5543 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5544 cpu_V1, cpu_V0);
5546 break;
5547 case NEON_3R_VRSHL:
5548 if (u) {
5549 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5550 } else {
5551 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5553 break;
5554 case NEON_3R_VQRSHL:
5555 if (u) {
5556 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5557 cpu_V1, cpu_V0);
5558 } else {
5559 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5560 cpu_V1, cpu_V0);
5562 break;
5563 case NEON_3R_VADD_VSUB:
5564 if (u) {
5565 tcg_gen_sub_i64(CPU_V001);
5566 } else {
5567 tcg_gen_add_i64(CPU_V001);
5569 break;
5570 default:
5571 abort();
5573 neon_store_reg64(cpu_V0, rd + pass);
5575 return 0;
5577 pairwise = 0;
5578 switch (op) {
5579 case NEON_3R_VSHL:
5580 case NEON_3R_VQSHL:
5581 case NEON_3R_VRSHL:
5582 case NEON_3R_VQRSHL:
5584 int rtmp;
5585 /* Shift instruction operands are reversed. */
5586 rtmp = rn;
5587 rn = rm;
5588 rm = rtmp;
5590 break;
5591 case NEON_3R_VPADD:
5592 if (u) {
5593 return 1;
5595 /* Fall through */
5596 case NEON_3R_VPMAX:
5597 case NEON_3R_VPMIN:
5598 pairwise = 1;
5599 break;
5600 case NEON_3R_FLOAT_ARITH:
5601 pairwise = (u && size < 2); /* if VPADD (float) */
5602 break;
5603 case NEON_3R_FLOAT_MINMAX:
5604 pairwise = u; /* if VPMIN/VPMAX (float) */
5605 break;
5606 case NEON_3R_FLOAT_CMP:
5607 if (!u && size) {
5608 /* no encoding for U=0 C=1x */
5609 return 1;
5611 break;
5612 case NEON_3R_FLOAT_ACMP:
5613 if (!u) {
5614 return 1;
5616 break;
5617 case NEON_3R_FLOAT_MISC:
5618 /* VMAXNM/VMINNM in ARMv8 */
5619 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5620 return 1;
5622 break;
5623 case NEON_3R_VMUL:
5624 if (u && (size != 0)) {
5625 /* UNDEF on invalid size for polynomial subcase */
5626 return 1;
5628 break;
5629 case NEON_3R_VFM:
5630 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
5631 return 1;
5633 break;
5634 default:
5635 break;
5638 if (pairwise && q) {
5639 /* All the pairwise insns UNDEF if Q is set */
5640 return 1;
5643 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5645 if (pairwise) {
5646 /* Pairwise. */
5647 if (pass < 1) {
5648 tmp = neon_load_reg(rn, 0);
5649 tmp2 = neon_load_reg(rn, 1);
5650 } else {
5651 tmp = neon_load_reg(rm, 0);
5652 tmp2 = neon_load_reg(rm, 1);
5654 } else {
5655 /* Elementwise. */
5656 tmp = neon_load_reg(rn, pass);
5657 tmp2 = neon_load_reg(rm, pass);
5659 switch (op) {
5660 case NEON_3R_VHADD:
5661 GEN_NEON_INTEGER_OP(hadd);
5662 break;
5663 case NEON_3R_VQADD:
5664 GEN_NEON_INTEGER_OP_ENV(qadd);
5665 break;
5666 case NEON_3R_VRHADD:
5667 GEN_NEON_INTEGER_OP(rhadd);
5668 break;
5669 case NEON_3R_LOGIC: /* Logic ops. */
5670 switch ((u << 2) | size) {
5671 case 0: /* VAND */
5672 tcg_gen_and_i32(tmp, tmp, tmp2);
5673 break;
5674 case 1: /* BIC */
5675 tcg_gen_andc_i32(tmp, tmp, tmp2);
5676 break;
5677 case 2: /* VORR */
5678 tcg_gen_or_i32(tmp, tmp, tmp2);
5679 break;
5680 case 3: /* VORN */
5681 tcg_gen_orc_i32(tmp, tmp, tmp2);
5682 break;
5683 case 4: /* VEOR */
5684 tcg_gen_xor_i32(tmp, tmp, tmp2);
5685 break;
5686 case 5: /* VBSL */
5687 tmp3 = neon_load_reg(rd, pass);
5688 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5689 tcg_temp_free_i32(tmp3);
5690 break;
5691 case 6: /* VBIT */
5692 tmp3 = neon_load_reg(rd, pass);
5693 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5694 tcg_temp_free_i32(tmp3);
5695 break;
5696 case 7: /* VBIF */
5697 tmp3 = neon_load_reg(rd, pass);
5698 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5699 tcg_temp_free_i32(tmp3);
5700 break;
5702 break;
5703 case NEON_3R_VHSUB:
5704 GEN_NEON_INTEGER_OP(hsub);
5705 break;
5706 case NEON_3R_VQSUB:
5707 GEN_NEON_INTEGER_OP_ENV(qsub);
5708 break;
5709 case NEON_3R_VCGT:
5710 GEN_NEON_INTEGER_OP(cgt);
5711 break;
5712 case NEON_3R_VCGE:
5713 GEN_NEON_INTEGER_OP(cge);
5714 break;
5715 case NEON_3R_VSHL:
5716 GEN_NEON_INTEGER_OP(shl);
5717 break;
5718 case NEON_3R_VQSHL:
5719 GEN_NEON_INTEGER_OP_ENV(qshl);
5720 break;
5721 case NEON_3R_VRSHL:
5722 GEN_NEON_INTEGER_OP(rshl);
5723 break;
5724 case NEON_3R_VQRSHL:
5725 GEN_NEON_INTEGER_OP_ENV(qrshl);
5726 break;
5727 case NEON_3R_VMAX:
5728 GEN_NEON_INTEGER_OP(max);
5729 break;
5730 case NEON_3R_VMIN:
5731 GEN_NEON_INTEGER_OP(min);
5732 break;
5733 case NEON_3R_VABD:
5734 GEN_NEON_INTEGER_OP(abd);
5735 break;
5736 case NEON_3R_VABA:
5737 GEN_NEON_INTEGER_OP(abd);
5738 tcg_temp_free_i32(tmp2);
5739 tmp2 = neon_load_reg(rd, pass);
5740 gen_neon_add(size, tmp, tmp2);
5741 break;
5742 case NEON_3R_VADD_VSUB:
5743 if (!u) { /* VADD */
5744 gen_neon_add(size, tmp, tmp2);
5745 } else { /* VSUB */
5746 switch (size) {
5747 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5748 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5749 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
5750 default: abort();
5753 break;
5754 case NEON_3R_VTST_VCEQ:
5755 if (!u) { /* VTST */
5756 switch (size) {
5757 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5758 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5759 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
5760 default: abort();
5762 } else { /* VCEQ */
5763 switch (size) {
5764 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5765 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5766 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5767 default: abort();
5770 break;
5771 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
5772 switch (size) {
5773 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5774 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5775 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5776 default: abort();
5778 tcg_temp_free_i32(tmp2);
5779 tmp2 = neon_load_reg(rd, pass);
5780 if (u) { /* VMLS */
5781 gen_neon_rsb(size, tmp, tmp2);
5782 } else { /* VMLA */
5783 gen_neon_add(size, tmp, tmp2);
5785 break;
5786 case NEON_3R_VMUL:
5787 if (u) { /* polynomial */
5788 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5789 } else { /* Integer */
5790 switch (size) {
5791 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5792 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5793 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5794 default: abort();
5797 break;
5798 case NEON_3R_VPMAX:
5799 GEN_NEON_INTEGER_OP(pmax);
5800 break;
5801 case NEON_3R_VPMIN:
5802 GEN_NEON_INTEGER_OP(pmin);
5803 break;
5804 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5805 if (!u) { /* VQDMULH */
5806 switch (size) {
5807 case 1:
5808 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5809 break;
5810 case 2:
5811 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5812 break;
5813 default: abort();
5815 } else { /* VQRDMULH */
5816 switch (size) {
5817 case 1:
5818 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5819 break;
5820 case 2:
5821 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5822 break;
5823 default: abort();
5826 break;
5827 case NEON_3R_VPADD:
5828 switch (size) {
5829 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5830 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5831 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5832 default: abort();
5834 break;
5835 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5837 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5838 switch ((u << 2) | size) {
5839 case 0: /* VADD */
5840 case 4: /* VPADD */
5841 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5842 break;
5843 case 2: /* VSUB */
5844 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5845 break;
5846 case 6: /* VABD */
5847 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5848 break;
5849 default:
5850 abort();
5852 tcg_temp_free_ptr(fpstatus);
5853 break;
5855 case NEON_3R_FLOAT_MULTIPLY:
5857 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5858 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5859 if (!u) {
5860 tcg_temp_free_i32(tmp2);
5861 tmp2 = neon_load_reg(rd, pass);
5862 if (size == 0) {
5863 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5864 } else {
5865 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5868 tcg_temp_free_ptr(fpstatus);
5869 break;
5871 case NEON_3R_FLOAT_CMP:
5873 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5874 if (!u) {
5875 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5876 } else {
5877 if (size == 0) {
5878 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5879 } else {
5880 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5883 tcg_temp_free_ptr(fpstatus);
5884 break;
5886 case NEON_3R_FLOAT_ACMP:
5888 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5889 if (size == 0) {
5890 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5891 } else {
5892 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5894 tcg_temp_free_ptr(fpstatus);
5895 break;
5897 case NEON_3R_FLOAT_MINMAX:
5899 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5900 if (size == 0) {
5901 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5902 } else {
5903 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5905 tcg_temp_free_ptr(fpstatus);
5906 break;
5908 case NEON_3R_FLOAT_MISC:
5909 if (u) {
5910 /* VMAXNM/VMINNM */
5911 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5912 if (size == 0) {
5913 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5914 } else {
5915 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5917 tcg_temp_free_ptr(fpstatus);
5918 } else {
5919 if (size == 0) {
5920 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5921 } else {
5922 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5925 break;
5926 case NEON_3R_VFM:
5928 /* VFMA, VFMS: fused multiply-add */
5929 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5930 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5931 if (size) {
5932 /* VFMS */
5933 gen_helper_vfp_negs(tmp, tmp);
5935 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5936 tcg_temp_free_i32(tmp3);
5937 tcg_temp_free_ptr(fpstatus);
5938 break;
5940 default:
5941 abort();
5943 tcg_temp_free_i32(tmp2);
5945 /* Save the result. For elementwise operations we can put it
5946 straight into the destination register. For pairwise operations
5947 we have to be careful to avoid clobbering the source operands. */
5948 if (pairwise && rd == rm) {
5949 neon_store_scratch(pass, tmp);
5950 } else {
5951 neon_store_reg(rd, pass, tmp);
5954 } /* for pass */
5955 if (pairwise && rd == rm) {
5956 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5957 tmp = neon_load_scratch(pass);
5958 neon_store_reg(rd, pass, tmp);
5961 /* End of 3 register same size operations. */
5962 } else if (insn & (1 << 4)) {
5963 if ((insn & 0x00380080) != 0) {
5964 /* Two registers and shift. */
5965 op = (insn >> 8) & 0xf;
5966 if (insn & (1 << 7)) {
5967 /* 64-bit shift. */
5968 if (op > 7) {
5969 return 1;
5971 size = 3;
5972 } else {
5973 size = 2;
5974 while ((insn & (1 << (size + 19))) == 0)
5975 size--;
5977 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5978 /* To avoid excessive duplication of ops we implement shift
5979 by immediate using the variable shift operations. */
5980 if (op < 8) {
5981 /* Shift by immediate:
5982 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5983 if (q && ((rd | rm) & 1)) {
5984 return 1;
5986 if (!u && (op == 4 || op == 6)) {
5987 return 1;
5989 /* Right shifts are encoded as N - shift, where N is the
5990 element size in bits. */
5991 if (op <= 4)
5992 shift = shift - (1 << (size + 3));
5993 if (size == 3) {
5994 count = q + 1;
5995 } else {
5996 count = q ? 4: 2;
5998 switch (size) {
5999 case 0:
6000 imm = (uint8_t) shift;
6001 imm |= imm << 8;
6002 imm |= imm << 16;
6003 break;
6004 case 1:
6005 imm = (uint16_t) shift;
6006 imm |= imm << 16;
6007 break;
6008 case 2:
6009 case 3:
6010 imm = shift;
6011 break;
6012 default:
6013 abort();
6016 for (pass = 0; pass < count; pass++) {
6017 if (size == 3) {
6018 neon_load_reg64(cpu_V0, rm + pass);
6019 tcg_gen_movi_i64(cpu_V1, imm);
6020 switch (op) {
6021 case 0: /* VSHR */
6022 case 1: /* VSRA */
6023 if (u)
6024 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6025 else
6026 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
6027 break;
6028 case 2: /* VRSHR */
6029 case 3: /* VRSRA */
6030 if (u)
6031 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
6032 else
6033 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
6034 break;
6035 case 4: /* VSRI */
6036 case 5: /* VSHL, VSLI */
6037 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6038 break;
6039 case 6: /* VQSHLU */
6040 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6041 cpu_V0, cpu_V1);
6042 break;
6043 case 7: /* VQSHL */
6044 if (u) {
6045 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6046 cpu_V0, cpu_V1);
6047 } else {
6048 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6049 cpu_V0, cpu_V1);
6051 break;
6053 if (op == 1 || op == 3) {
6054 /* Accumulate. */
6055 neon_load_reg64(cpu_V1, rd + pass);
6056 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6057 } else if (op == 4 || (op == 5 && u)) {
6058 /* Insert */
6059 neon_load_reg64(cpu_V1, rd + pass);
6060 uint64_t mask;
6061 if (shift < -63 || shift > 63) {
6062 mask = 0;
6063 } else {
6064 if (op == 4) {
6065 mask = 0xffffffffffffffffull >> -shift;
6066 } else {
6067 mask = 0xffffffffffffffffull << shift;
6070 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6071 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6073 neon_store_reg64(cpu_V0, rd + pass);
6074 } else { /* size < 3 */
6075 /* Operands in T0 and T1. */
6076 tmp = neon_load_reg(rm, pass);
6077 tmp2 = tcg_temp_new_i32();
6078 tcg_gen_movi_i32(tmp2, imm);
6079 switch (op) {
6080 case 0: /* VSHR */
6081 case 1: /* VSRA */
6082 GEN_NEON_INTEGER_OP(shl);
6083 break;
6084 case 2: /* VRSHR */
6085 case 3: /* VRSRA */
6086 GEN_NEON_INTEGER_OP(rshl);
6087 break;
6088 case 4: /* VSRI */
6089 case 5: /* VSHL, VSLI */
6090 switch (size) {
6091 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6092 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6093 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
6094 default: abort();
6096 break;
6097 case 6: /* VQSHLU */
6098 switch (size) {
6099 case 0:
6100 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6101 tmp, tmp2);
6102 break;
6103 case 1:
6104 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6105 tmp, tmp2);
6106 break;
6107 case 2:
6108 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6109 tmp, tmp2);
6110 break;
6111 default:
6112 abort();
6114 break;
6115 case 7: /* VQSHL */
6116 GEN_NEON_INTEGER_OP_ENV(qshl);
6117 break;
6119 tcg_temp_free_i32(tmp2);
6121 if (op == 1 || op == 3) {
6122 /* Accumulate. */
6123 tmp2 = neon_load_reg(rd, pass);
6124 gen_neon_add(size, tmp, tmp2);
6125 tcg_temp_free_i32(tmp2);
6126 } else if (op == 4 || (op == 5 && u)) {
6127 /* Insert */
6128 switch (size) {
6129 case 0:
6130 if (op == 4)
6131 mask = 0xff >> -shift;
6132 else
6133 mask = (uint8_t)(0xff << shift);
6134 mask |= mask << 8;
6135 mask |= mask << 16;
6136 break;
6137 case 1:
6138 if (op == 4)
6139 mask = 0xffff >> -shift;
6140 else
6141 mask = (uint16_t)(0xffff << shift);
6142 mask |= mask << 16;
6143 break;
6144 case 2:
6145 if (shift < -31 || shift > 31) {
6146 mask = 0;
6147 } else {
6148 if (op == 4)
6149 mask = 0xffffffffu >> -shift;
6150 else
6151 mask = 0xffffffffu << shift;
6153 break;
6154 default:
6155 abort();
6157 tmp2 = neon_load_reg(rd, pass);
6158 tcg_gen_andi_i32(tmp, tmp, mask);
6159 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
6160 tcg_gen_or_i32(tmp, tmp, tmp2);
6161 tcg_temp_free_i32(tmp2);
6163 neon_store_reg(rd, pass, tmp);
6165 } /* for pass */
6166 } else if (op < 10) {
6167 /* Shift by immediate and narrow:
6168 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
6169 int input_unsigned = (op == 8) ? !u : u;
6170 if (rm & 1) {
6171 return 1;
6173 shift = shift - (1 << (size + 3));
6174 size++;
6175 if (size == 3) {
6176 tmp64 = tcg_const_i64(shift);
6177 neon_load_reg64(cpu_V0, rm);
6178 neon_load_reg64(cpu_V1, rm + 1);
6179 for (pass = 0; pass < 2; pass++) {
6180 TCGv_i64 in;
6181 if (pass == 0) {
6182 in = cpu_V0;
6183 } else {
6184 in = cpu_V1;
6186 if (q) {
6187 if (input_unsigned) {
6188 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
6189 } else {
6190 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
6192 } else {
6193 if (input_unsigned) {
6194 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
6195 } else {
6196 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
6199 tmp = tcg_temp_new_i32();
6200 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6201 neon_store_reg(rd, pass, tmp);
6202 } /* for pass */
6203 tcg_temp_free_i64(tmp64);
6204 } else {
6205 if (size == 1) {
6206 imm = (uint16_t)shift;
6207 imm |= imm << 16;
6208 } else {
6209 /* size == 2 */
6210 imm = (uint32_t)shift;
6212 tmp2 = tcg_const_i32(imm);
6213 tmp4 = neon_load_reg(rm + 1, 0);
6214 tmp5 = neon_load_reg(rm + 1, 1);
6215 for (pass = 0; pass < 2; pass++) {
6216 if (pass == 0) {
6217 tmp = neon_load_reg(rm, 0);
6218 } else {
6219 tmp = tmp4;
6221 gen_neon_shift_narrow(size, tmp, tmp2, q,
6222 input_unsigned);
6223 if (pass == 0) {
6224 tmp3 = neon_load_reg(rm, 1);
6225 } else {
6226 tmp3 = tmp5;
6228 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6229 input_unsigned);
6230 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
6231 tcg_temp_free_i32(tmp);
6232 tcg_temp_free_i32(tmp3);
6233 tmp = tcg_temp_new_i32();
6234 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6235 neon_store_reg(rd, pass, tmp);
6236 } /* for pass */
6237 tcg_temp_free_i32(tmp2);
6239 } else if (op == 10) {
6240 /* VSHLL, VMOVL */
6241 if (q || (rd & 1)) {
6242 return 1;
6244 tmp = neon_load_reg(rm, 0);
6245 tmp2 = neon_load_reg(rm, 1);
6246 for (pass = 0; pass < 2; pass++) {
6247 if (pass == 1)
6248 tmp = tmp2;
6250 gen_neon_widen(cpu_V0, tmp, size, u);
6252 if (shift != 0) {
6253 /* The shift is less than the width of the source
6254 type, so we can just shift the whole register. */
6255 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
6256 /* Widen the result of shift: we need to clear
6257 * the potential overflow bits resulting from
6258 * left bits of the narrow input appearing as
6259 * right bits of left the neighbour narrow
6260 * input. */
6261 if (size < 2 || !u) {
6262 uint64_t imm64;
6263 if (size == 0) {
6264 imm = (0xffu >> (8 - shift));
6265 imm |= imm << 16;
6266 } else if (size == 1) {
6267 imm = 0xffff >> (16 - shift);
6268 } else {
6269 /* size == 2 */
6270 imm = 0xffffffff >> (32 - shift);
6272 if (size < 2) {
6273 imm64 = imm | (((uint64_t)imm) << 32);
6274 } else {
6275 imm64 = imm;
6277 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
6280 neon_store_reg64(cpu_V0, rd + pass);
6282 } else if (op >= 14) {
6283 /* VCVT fixed-point. */
6284 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6285 return 1;
6287 /* We have already masked out the must-be-1 top bit of imm6,
6288 * hence this 32-shift where the ARM ARM has 64-imm6.
6290 shift = 32 - shift;
6291 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6292 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
6293 if (!(op & 1)) {
6294 if (u)
6295 gen_vfp_ulto(0, shift, 1);
6296 else
6297 gen_vfp_slto(0, shift, 1);
6298 } else {
6299 if (u)
6300 gen_vfp_toul(0, shift, 1);
6301 else
6302 gen_vfp_tosl(0, shift, 1);
6304 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
6306 } else {
6307 return 1;
6309 } else { /* (insn & 0x00380080) == 0 */
6310 int invert;
6311 if (q && (rd & 1)) {
6312 return 1;
6315 op = (insn >> 8) & 0xf;
6316 /* One register and immediate. */
6317 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6318 invert = (insn & (1 << 5)) != 0;
6319 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6320 * We choose to not special-case this and will behave as if a
6321 * valid constant encoding of 0 had been given.
6323 switch (op) {
6324 case 0: case 1:
6325 /* no-op */
6326 break;
6327 case 2: case 3:
6328 imm <<= 8;
6329 break;
6330 case 4: case 5:
6331 imm <<= 16;
6332 break;
6333 case 6: case 7:
6334 imm <<= 24;
6335 break;
6336 case 8: case 9:
6337 imm |= imm << 16;
6338 break;
6339 case 10: case 11:
6340 imm = (imm << 8) | (imm << 24);
6341 break;
6342 case 12:
6343 imm = (imm << 8) | 0xff;
6344 break;
6345 case 13:
6346 imm = (imm << 16) | 0xffff;
6347 break;
6348 case 14:
6349 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6350 if (invert)
6351 imm = ~imm;
6352 break;
6353 case 15:
6354 if (invert) {
6355 return 1;
6357 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6358 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6359 break;
6361 if (invert)
6362 imm = ~imm;
6364 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6365 if (op & 1 && op < 12) {
6366 tmp = neon_load_reg(rd, pass);
6367 if (invert) {
6368 /* The immediate value has already been inverted, so
6369 BIC becomes AND. */
6370 tcg_gen_andi_i32(tmp, tmp, imm);
6371 } else {
6372 tcg_gen_ori_i32(tmp, tmp, imm);
6374 } else {
6375 /* VMOV, VMVN. */
6376 tmp = tcg_temp_new_i32();
6377 if (op == 14 && invert) {
6378 int n;
6379 uint32_t val;
6380 val = 0;
6381 for (n = 0; n < 4; n++) {
6382 if (imm & (1 << (n + (pass & 1) * 4)))
6383 val |= 0xff << (n * 8);
6385 tcg_gen_movi_i32(tmp, val);
6386 } else {
6387 tcg_gen_movi_i32(tmp, imm);
6390 neon_store_reg(rd, pass, tmp);
6393 } else { /* (insn & 0x00800010 == 0x00800000) */
6394 if (size != 3) {
6395 op = (insn >> 8) & 0xf;
6396 if ((insn & (1 << 6)) == 0) {
6397 /* Three registers of different lengths. */
6398 int src1_wide;
6399 int src2_wide;
6400 int prewiden;
6401 /* undefreq: bit 0 : UNDEF if size == 0
6402 * bit 1 : UNDEF if size == 1
6403 * bit 2 : UNDEF if size == 2
6404 * bit 3 : UNDEF if U == 1
6405 * Note that [2:0] set implies 'always UNDEF'
6407 int undefreq;
6408 /* prewiden, src1_wide, src2_wide, undefreq */
6409 static const int neon_3reg_wide[16][4] = {
6410 {1, 0, 0, 0}, /* VADDL */
6411 {1, 1, 0, 0}, /* VADDW */
6412 {1, 0, 0, 0}, /* VSUBL */
6413 {1, 1, 0, 0}, /* VSUBW */
6414 {0, 1, 1, 0}, /* VADDHN */
6415 {0, 0, 0, 0}, /* VABAL */
6416 {0, 1, 1, 0}, /* VSUBHN */
6417 {0, 0, 0, 0}, /* VABDL */
6418 {0, 0, 0, 0}, /* VMLAL */
6419 {0, 0, 0, 9}, /* VQDMLAL */
6420 {0, 0, 0, 0}, /* VMLSL */
6421 {0, 0, 0, 9}, /* VQDMLSL */
6422 {0, 0, 0, 0}, /* Integer VMULL */
6423 {0, 0, 0, 1}, /* VQDMULL */
6424 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6425 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6428 prewiden = neon_3reg_wide[op][0];
6429 src1_wide = neon_3reg_wide[op][1];
6430 src2_wide = neon_3reg_wide[op][2];
6431 undefreq = neon_3reg_wide[op][3];
6433 if ((undefreq & (1 << size)) ||
6434 ((undefreq & 8) && u)) {
6435 return 1;
6437 if ((src1_wide && (rn & 1)) ||
6438 (src2_wide && (rm & 1)) ||
6439 (!src2_wide && (rd & 1))) {
6440 return 1;
6443 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6444 * outside the loop below as it only performs a single pass.
6446 if (op == 14 && size == 2) {
6447 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6449 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
6450 return 1;
6452 tcg_rn = tcg_temp_new_i64();
6453 tcg_rm = tcg_temp_new_i64();
6454 tcg_rd = tcg_temp_new_i64();
6455 neon_load_reg64(tcg_rn, rn);
6456 neon_load_reg64(tcg_rm, rm);
6457 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6458 neon_store_reg64(tcg_rd, rd);
6459 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6460 neon_store_reg64(tcg_rd, rd + 1);
6461 tcg_temp_free_i64(tcg_rn);
6462 tcg_temp_free_i64(tcg_rm);
6463 tcg_temp_free_i64(tcg_rd);
6464 return 0;
6467 /* Avoid overlapping operands. Wide source operands are
6468 always aligned so will never overlap with wide
6469 destinations in problematic ways. */
6470 if (rd == rm && !src2_wide) {
6471 tmp = neon_load_reg(rm, 1);
6472 neon_store_scratch(2, tmp);
6473 } else if (rd == rn && !src1_wide) {
6474 tmp = neon_load_reg(rn, 1);
6475 neon_store_scratch(2, tmp);
6477 TCGV_UNUSED_I32(tmp3);
6478 for (pass = 0; pass < 2; pass++) {
6479 if (src1_wide) {
6480 neon_load_reg64(cpu_V0, rn + pass);
6481 TCGV_UNUSED_I32(tmp);
6482 } else {
6483 if (pass == 1 && rd == rn) {
6484 tmp = neon_load_scratch(2);
6485 } else {
6486 tmp = neon_load_reg(rn, pass);
6488 if (prewiden) {
6489 gen_neon_widen(cpu_V0, tmp, size, u);
6492 if (src2_wide) {
6493 neon_load_reg64(cpu_V1, rm + pass);
6494 TCGV_UNUSED_I32(tmp2);
6495 } else {
6496 if (pass == 1 && rd == rm) {
6497 tmp2 = neon_load_scratch(2);
6498 } else {
6499 tmp2 = neon_load_reg(rm, pass);
6501 if (prewiden) {
6502 gen_neon_widen(cpu_V1, tmp2, size, u);
6505 switch (op) {
6506 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6507 gen_neon_addl(size);
6508 break;
6509 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6510 gen_neon_subl(size);
6511 break;
6512 case 5: case 7: /* VABAL, VABDL */
6513 switch ((size << 1) | u) {
6514 case 0:
6515 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6516 break;
6517 case 1:
6518 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6519 break;
6520 case 2:
6521 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6522 break;
6523 case 3:
6524 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6525 break;
6526 case 4:
6527 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6528 break;
6529 case 5:
6530 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6531 break;
6532 default: abort();
6534 tcg_temp_free_i32(tmp2);
6535 tcg_temp_free_i32(tmp);
6536 break;
6537 case 8: case 9: case 10: case 11: case 12: case 13:
6538 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6539 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6540 break;
6541 case 14: /* Polynomial VMULL */
6542 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6543 tcg_temp_free_i32(tmp2);
6544 tcg_temp_free_i32(tmp);
6545 break;
6546 default: /* 15 is RESERVED: caught earlier */
6547 abort();
6549 if (op == 13) {
6550 /* VQDMULL */
6551 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6552 neon_store_reg64(cpu_V0, rd + pass);
6553 } else if (op == 5 || (op >= 8 && op <= 11)) {
6554 /* Accumulate. */
6555 neon_load_reg64(cpu_V1, rd + pass);
6556 switch (op) {
6557 case 10: /* VMLSL */
6558 gen_neon_negl(cpu_V0, size);
6559 /* Fall through */
6560 case 5: case 8: /* VABAL, VMLAL */
6561 gen_neon_addl(size);
6562 break;
6563 case 9: case 11: /* VQDMLAL, VQDMLSL */
6564 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6565 if (op == 11) {
6566 gen_neon_negl(cpu_V0, size);
6568 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6569 break;
6570 default:
6571 abort();
6573 neon_store_reg64(cpu_V0, rd + pass);
6574 } else if (op == 4 || op == 6) {
6575 /* Narrowing operation. */
6576 tmp = tcg_temp_new_i32();
6577 if (!u) {
6578 switch (size) {
6579 case 0:
6580 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6581 break;
6582 case 1:
6583 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6584 break;
6585 case 2:
6586 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6587 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6588 break;
6589 default: abort();
6591 } else {
6592 switch (size) {
6593 case 0:
6594 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6595 break;
6596 case 1:
6597 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6598 break;
6599 case 2:
6600 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6601 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6602 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6603 break;
6604 default: abort();
6607 if (pass == 0) {
6608 tmp3 = tmp;
6609 } else {
6610 neon_store_reg(rd, 0, tmp3);
6611 neon_store_reg(rd, 1, tmp);
6613 } else {
6614 /* Write back the result. */
6615 neon_store_reg64(cpu_V0, rd + pass);
6618 } else {
6619 /* Two registers and a scalar. NB that for ops of this form
6620 * the ARM ARM labels bit 24 as Q, but it is in our variable
6621 * 'u', not 'q'.
6623 if (size == 0) {
6624 return 1;
6626 switch (op) {
6627 case 1: /* Float VMLA scalar */
6628 case 5: /* Floating point VMLS scalar */
6629 case 9: /* Floating point VMUL scalar */
6630 if (size == 1) {
6631 return 1;
6633 /* fall through */
6634 case 0: /* Integer VMLA scalar */
6635 case 4: /* Integer VMLS scalar */
6636 case 8: /* Integer VMUL scalar */
6637 case 12: /* VQDMULH scalar */
6638 case 13: /* VQRDMULH scalar */
6639 if (u && ((rd | rn) & 1)) {
6640 return 1;
6642 tmp = neon_get_scalar(size, rm);
6643 neon_store_scratch(0, tmp);
6644 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6645 tmp = neon_load_scratch(0);
6646 tmp2 = neon_load_reg(rn, pass);
6647 if (op == 12) {
6648 if (size == 1) {
6649 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6650 } else {
6651 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6653 } else if (op == 13) {
6654 if (size == 1) {
6655 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6656 } else {
6657 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6659 } else if (op & 1) {
6660 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6661 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6662 tcg_temp_free_ptr(fpstatus);
6663 } else {
6664 switch (size) {
6665 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6666 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6667 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6668 default: abort();
6671 tcg_temp_free_i32(tmp2);
6672 if (op < 8) {
6673 /* Accumulate. */
6674 tmp2 = neon_load_reg(rd, pass);
6675 switch (op) {
6676 case 0:
6677 gen_neon_add(size, tmp, tmp2);
6678 break;
6679 case 1:
6681 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6682 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6683 tcg_temp_free_ptr(fpstatus);
6684 break;
6686 case 4:
6687 gen_neon_rsb(size, tmp, tmp2);
6688 break;
6689 case 5:
6691 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6692 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6693 tcg_temp_free_ptr(fpstatus);
6694 break;
6696 default:
6697 abort();
6699 tcg_temp_free_i32(tmp2);
6701 neon_store_reg(rd, pass, tmp);
6703 break;
6704 case 3: /* VQDMLAL scalar */
6705 case 7: /* VQDMLSL scalar */
6706 case 11: /* VQDMULL scalar */
6707 if (u == 1) {
6708 return 1;
6710 /* fall through */
6711 case 2: /* VMLAL sclar */
6712 case 6: /* VMLSL scalar */
6713 case 10: /* VMULL scalar */
6714 if (rd & 1) {
6715 return 1;
6717 tmp2 = neon_get_scalar(size, rm);
6718 /* We need a copy of tmp2 because gen_neon_mull
6719 * deletes it during pass 0. */
6720 tmp4 = tcg_temp_new_i32();
6721 tcg_gen_mov_i32(tmp4, tmp2);
6722 tmp3 = neon_load_reg(rn, 1);
6724 for (pass = 0; pass < 2; pass++) {
6725 if (pass == 0) {
6726 tmp = neon_load_reg(rn, 0);
6727 } else {
6728 tmp = tmp3;
6729 tmp2 = tmp4;
6731 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6732 if (op != 11) {
6733 neon_load_reg64(cpu_V1, rd + pass);
6735 switch (op) {
6736 case 6:
6737 gen_neon_negl(cpu_V0, size);
6738 /* Fall through */
6739 case 2:
6740 gen_neon_addl(size);
6741 break;
6742 case 3: case 7:
6743 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6744 if (op == 7) {
6745 gen_neon_negl(cpu_V0, size);
6747 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6748 break;
6749 case 10:
6750 /* no-op */
6751 break;
6752 case 11:
6753 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6754 break;
6755 default:
6756 abort();
6758 neon_store_reg64(cpu_V0, rd + pass);
6762 break;
6763 default: /* 14 and 15 are RESERVED */
6764 return 1;
6767 } else { /* size == 3 */
6768 if (!u) {
6769 /* Extract. */
6770 imm = (insn >> 8) & 0xf;
6772 if (imm > 7 && !q)
6773 return 1;
6775 if (q && ((rd | rn | rm) & 1)) {
6776 return 1;
6779 if (imm == 0) {
6780 neon_load_reg64(cpu_V0, rn);
6781 if (q) {
6782 neon_load_reg64(cpu_V1, rn + 1);
6784 } else if (imm == 8) {
6785 neon_load_reg64(cpu_V0, rn + 1);
6786 if (q) {
6787 neon_load_reg64(cpu_V1, rm);
6789 } else if (q) {
6790 tmp64 = tcg_temp_new_i64();
6791 if (imm < 8) {
6792 neon_load_reg64(cpu_V0, rn);
6793 neon_load_reg64(tmp64, rn + 1);
6794 } else {
6795 neon_load_reg64(cpu_V0, rn + 1);
6796 neon_load_reg64(tmp64, rm);
6798 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6799 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6800 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6801 if (imm < 8) {
6802 neon_load_reg64(cpu_V1, rm);
6803 } else {
6804 neon_load_reg64(cpu_V1, rm + 1);
6805 imm -= 8;
6807 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6808 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6809 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6810 tcg_temp_free_i64(tmp64);
6811 } else {
6812 /* BUGFIX */
6813 neon_load_reg64(cpu_V0, rn);
6814 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6815 neon_load_reg64(cpu_V1, rm);
6816 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6817 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6819 neon_store_reg64(cpu_V0, rd);
6820 if (q) {
6821 neon_store_reg64(cpu_V1, rd + 1);
6823 } else if ((insn & (1 << 11)) == 0) {
6824 /* Two register misc. */
6825 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6826 size = (insn >> 18) & 3;
6827 /* UNDEF for unknown op values and bad op-size combinations */
6828 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6829 return 1;
6831 if (neon_2rm_is_v8_op(op) &&
6832 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6833 return 1;
6835 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6836 q && ((rm | rd) & 1)) {
6837 return 1;
6839 switch (op) {
6840 case NEON_2RM_VREV64:
6841 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6842 tmp = neon_load_reg(rm, pass * 2);
6843 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6844 switch (size) {
6845 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6846 case 1: gen_swap_half(tmp); break;
6847 case 2: /* no-op */ break;
6848 default: abort();
6850 neon_store_reg(rd, pass * 2 + 1, tmp);
6851 if (size == 2) {
6852 neon_store_reg(rd, pass * 2, tmp2);
6853 } else {
6854 switch (size) {
6855 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6856 case 1: gen_swap_half(tmp2); break;
6857 default: abort();
6859 neon_store_reg(rd, pass * 2, tmp2);
6862 break;
6863 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6864 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6865 for (pass = 0; pass < q + 1; pass++) {
6866 tmp = neon_load_reg(rm, pass * 2);
6867 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6868 tmp = neon_load_reg(rm, pass * 2 + 1);
6869 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6870 switch (size) {
6871 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6872 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6873 case 2: tcg_gen_add_i64(CPU_V001); break;
6874 default: abort();
6876 if (op >= NEON_2RM_VPADAL) {
6877 /* Accumulate. */
6878 neon_load_reg64(cpu_V1, rd + pass);
6879 gen_neon_addl(size);
6881 neon_store_reg64(cpu_V0, rd + pass);
6883 break;
6884 case NEON_2RM_VTRN:
6885 if (size == 2) {
6886 int n;
6887 for (n = 0; n < (q ? 4 : 2); n += 2) {
6888 tmp = neon_load_reg(rm, n);
6889 tmp2 = neon_load_reg(rd, n + 1);
6890 neon_store_reg(rm, n, tmp2);
6891 neon_store_reg(rd, n + 1, tmp);
6893 } else {
6894 goto elementwise;
6896 break;
6897 case NEON_2RM_VUZP:
6898 if (gen_neon_unzip(rd, rm, size, q)) {
6899 return 1;
6901 break;
6902 case NEON_2RM_VZIP:
6903 if (gen_neon_zip(rd, rm, size, q)) {
6904 return 1;
6906 break;
6907 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6908 /* also VQMOVUN; op field and mnemonics don't line up */
6909 if (rm & 1) {
6910 return 1;
6912 TCGV_UNUSED_I32(tmp2);
6913 for (pass = 0; pass < 2; pass++) {
6914 neon_load_reg64(cpu_V0, rm + pass);
6915 tmp = tcg_temp_new_i32();
6916 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6917 tmp, cpu_V0);
6918 if (pass == 0) {
6919 tmp2 = tmp;
6920 } else {
6921 neon_store_reg(rd, 0, tmp2);
6922 neon_store_reg(rd, 1, tmp);
6925 break;
6926 case NEON_2RM_VSHLL:
6927 if (q || (rd & 1)) {
6928 return 1;
6930 tmp = neon_load_reg(rm, 0);
6931 tmp2 = neon_load_reg(rm, 1);
6932 for (pass = 0; pass < 2; pass++) {
6933 if (pass == 1)
6934 tmp = tmp2;
6935 gen_neon_widen(cpu_V0, tmp, size, 1);
6936 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6937 neon_store_reg64(cpu_V0, rd + pass);
6939 break;
6940 case NEON_2RM_VCVT_F16_F32:
6941 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6942 q || (rm & 1)) {
6943 return 1;
6945 tmp = tcg_temp_new_i32();
6946 tmp2 = tcg_temp_new_i32();
6947 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
6948 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6949 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
6950 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6951 tcg_gen_shli_i32(tmp2, tmp2, 16);
6952 tcg_gen_or_i32(tmp2, tmp2, tmp);
6953 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
6954 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6955 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6956 neon_store_reg(rd, 0, tmp2);
6957 tmp2 = tcg_temp_new_i32();
6958 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6959 tcg_gen_shli_i32(tmp2, tmp2, 16);
6960 tcg_gen_or_i32(tmp2, tmp2, tmp);
6961 neon_store_reg(rd, 1, tmp2);
6962 tcg_temp_free_i32(tmp);
6963 break;
6964 case NEON_2RM_VCVT_F32_F16:
6965 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6966 q || (rd & 1)) {
6967 return 1;
6969 tmp3 = tcg_temp_new_i32();
6970 tmp = neon_load_reg(rm, 0);
6971 tmp2 = neon_load_reg(rm, 1);
6972 tcg_gen_ext16u_i32(tmp3, tmp);
6973 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6974 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6975 tcg_gen_shri_i32(tmp3, tmp, 16);
6976 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6977 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
6978 tcg_temp_free_i32(tmp);
6979 tcg_gen_ext16u_i32(tmp3, tmp2);
6980 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6981 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6982 tcg_gen_shri_i32(tmp3, tmp2, 16);
6983 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6984 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
6985 tcg_temp_free_i32(tmp2);
6986 tcg_temp_free_i32(tmp3);
6987 break;
6988 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6989 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
6990 || ((rm | rd) & 1)) {
6991 return 1;
6993 tmp = tcg_const_i32(rd);
6994 tmp2 = tcg_const_i32(rm);
6996 /* Bit 6 is the lowest opcode bit; it distinguishes between
6997 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6999 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7001 if (op == NEON_2RM_AESE) {
7002 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
7003 } else {
7004 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7006 tcg_temp_free_i32(tmp);
7007 tcg_temp_free_i32(tmp2);
7008 tcg_temp_free_i32(tmp3);
7009 break;
7010 case NEON_2RM_SHA1H:
7011 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
7012 || ((rm | rd) & 1)) {
7013 return 1;
7015 tmp = tcg_const_i32(rd);
7016 tmp2 = tcg_const_i32(rm);
7018 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7020 tcg_temp_free_i32(tmp);
7021 tcg_temp_free_i32(tmp2);
7022 break;
7023 case NEON_2RM_SHA1SU1:
7024 if ((rm | rd) & 1) {
7025 return 1;
7027 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7028 if (q) {
7029 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
7030 return 1;
7032 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
7033 return 1;
7035 tmp = tcg_const_i32(rd);
7036 tmp2 = tcg_const_i32(rm);
7037 if (q) {
7038 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7039 } else {
7040 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7042 tcg_temp_free_i32(tmp);
7043 tcg_temp_free_i32(tmp2);
7044 break;
7045 default:
7046 elementwise:
7047 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7048 if (neon_2rm_is_float_op(op)) {
7049 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7050 neon_reg_offset(rm, pass));
7051 TCGV_UNUSED_I32(tmp);
7052 } else {
7053 tmp = neon_load_reg(rm, pass);
7055 switch (op) {
7056 case NEON_2RM_VREV32:
7057 switch (size) {
7058 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7059 case 1: gen_swap_half(tmp); break;
7060 default: abort();
7062 break;
7063 case NEON_2RM_VREV16:
7064 gen_rev16(tmp);
7065 break;
7066 case NEON_2RM_VCLS:
7067 switch (size) {
7068 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7069 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7070 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
7071 default: abort();
7073 break;
7074 case NEON_2RM_VCLZ:
7075 switch (size) {
7076 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7077 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7078 case 2: gen_helper_clz(tmp, tmp); break;
7079 default: abort();
7081 break;
7082 case NEON_2RM_VCNT:
7083 gen_helper_neon_cnt_u8(tmp, tmp);
7084 break;
7085 case NEON_2RM_VMVN:
7086 tcg_gen_not_i32(tmp, tmp);
7087 break;
7088 case NEON_2RM_VQABS:
7089 switch (size) {
7090 case 0:
7091 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7092 break;
7093 case 1:
7094 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7095 break;
7096 case 2:
7097 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7098 break;
7099 default: abort();
7101 break;
7102 case NEON_2RM_VQNEG:
7103 switch (size) {
7104 case 0:
7105 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7106 break;
7107 case 1:
7108 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7109 break;
7110 case 2:
7111 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7112 break;
7113 default: abort();
7115 break;
7116 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
7117 tmp2 = tcg_const_i32(0);
7118 switch(size) {
7119 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7120 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7121 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
7122 default: abort();
7124 tcg_temp_free_i32(tmp2);
7125 if (op == NEON_2RM_VCLE0) {
7126 tcg_gen_not_i32(tmp, tmp);
7128 break;
7129 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
7130 tmp2 = tcg_const_i32(0);
7131 switch(size) {
7132 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7133 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7134 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
7135 default: abort();
7137 tcg_temp_free_i32(tmp2);
7138 if (op == NEON_2RM_VCLT0) {
7139 tcg_gen_not_i32(tmp, tmp);
7141 break;
7142 case NEON_2RM_VCEQ0:
7143 tmp2 = tcg_const_i32(0);
7144 switch(size) {
7145 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7146 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7147 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
7148 default: abort();
7150 tcg_temp_free_i32(tmp2);
7151 break;
7152 case NEON_2RM_VABS:
7153 switch(size) {
7154 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7155 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7156 case 2: tcg_gen_abs_i32(tmp, tmp); break;
7157 default: abort();
7159 break;
7160 case NEON_2RM_VNEG:
7161 tmp2 = tcg_const_i32(0);
7162 gen_neon_rsb(size, tmp, tmp2);
7163 tcg_temp_free_i32(tmp2);
7164 break;
7165 case NEON_2RM_VCGT0_F:
7167 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7168 tmp2 = tcg_const_i32(0);
7169 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
7170 tcg_temp_free_i32(tmp2);
7171 tcg_temp_free_ptr(fpstatus);
7172 break;
7174 case NEON_2RM_VCGE0_F:
7176 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7177 tmp2 = tcg_const_i32(0);
7178 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
7179 tcg_temp_free_i32(tmp2);
7180 tcg_temp_free_ptr(fpstatus);
7181 break;
7183 case NEON_2RM_VCEQ0_F:
7185 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7186 tmp2 = tcg_const_i32(0);
7187 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
7188 tcg_temp_free_i32(tmp2);
7189 tcg_temp_free_ptr(fpstatus);
7190 break;
7192 case NEON_2RM_VCLE0_F:
7194 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7195 tmp2 = tcg_const_i32(0);
7196 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
7197 tcg_temp_free_i32(tmp2);
7198 tcg_temp_free_ptr(fpstatus);
7199 break;
7201 case NEON_2RM_VCLT0_F:
7203 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7204 tmp2 = tcg_const_i32(0);
7205 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
7206 tcg_temp_free_i32(tmp2);
7207 tcg_temp_free_ptr(fpstatus);
7208 break;
7210 case NEON_2RM_VABS_F:
7211 gen_vfp_abs(0);
7212 break;
7213 case NEON_2RM_VNEG_F:
7214 gen_vfp_neg(0);
7215 break;
7216 case NEON_2RM_VSWP:
7217 tmp2 = neon_load_reg(rd, pass);
7218 neon_store_reg(rm, pass, tmp2);
7219 break;
7220 case NEON_2RM_VTRN:
7221 tmp2 = neon_load_reg(rd, pass);
7222 switch (size) {
7223 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7224 case 1: gen_neon_trn_u16(tmp, tmp2); break;
7225 default: abort();
7227 neon_store_reg(rm, pass, tmp2);
7228 break;
7229 case NEON_2RM_VRINTN:
7230 case NEON_2RM_VRINTA:
7231 case NEON_2RM_VRINTM:
7232 case NEON_2RM_VRINTP:
7233 case NEON_2RM_VRINTZ:
7235 TCGv_i32 tcg_rmode;
7236 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7237 int rmode;
7239 if (op == NEON_2RM_VRINTZ) {
7240 rmode = FPROUNDING_ZERO;
7241 } else {
7242 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7245 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7246 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7247 cpu_env);
7248 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7249 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7250 cpu_env);
7251 tcg_temp_free_ptr(fpstatus);
7252 tcg_temp_free_i32(tcg_rmode);
7253 break;
7255 case NEON_2RM_VRINTX:
7257 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7258 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7259 tcg_temp_free_ptr(fpstatus);
7260 break;
7262 case NEON_2RM_VCVTAU:
7263 case NEON_2RM_VCVTAS:
7264 case NEON_2RM_VCVTNU:
7265 case NEON_2RM_VCVTNS:
7266 case NEON_2RM_VCVTPU:
7267 case NEON_2RM_VCVTPS:
7268 case NEON_2RM_VCVTMU:
7269 case NEON_2RM_VCVTMS:
7271 bool is_signed = !extract32(insn, 7, 1);
7272 TCGv_ptr fpst = get_fpstatus_ptr(1);
7273 TCGv_i32 tcg_rmode, tcg_shift;
7274 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7276 tcg_shift = tcg_const_i32(0);
7277 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7278 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7279 cpu_env);
7281 if (is_signed) {
7282 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7283 tcg_shift, fpst);
7284 } else {
7285 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7286 tcg_shift, fpst);
7289 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7290 cpu_env);
7291 tcg_temp_free_i32(tcg_rmode);
7292 tcg_temp_free_i32(tcg_shift);
7293 tcg_temp_free_ptr(fpst);
7294 break;
7296 case NEON_2RM_VRECPE:
7298 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7299 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7300 tcg_temp_free_ptr(fpstatus);
7301 break;
7303 case NEON_2RM_VRSQRTE:
7305 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7306 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7307 tcg_temp_free_ptr(fpstatus);
7308 break;
7310 case NEON_2RM_VRECPE_F:
7312 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7313 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7314 tcg_temp_free_ptr(fpstatus);
7315 break;
7317 case NEON_2RM_VRSQRTE_F:
7319 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7320 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7321 tcg_temp_free_ptr(fpstatus);
7322 break;
7324 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
7325 gen_vfp_sito(0, 1);
7326 break;
7327 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
7328 gen_vfp_uito(0, 1);
7329 break;
7330 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
7331 gen_vfp_tosiz(0, 1);
7332 break;
7333 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
7334 gen_vfp_touiz(0, 1);
7335 break;
7336 default:
7337 /* Reserved op values were caught by the
7338 * neon_2rm_sizes[] check earlier.
7340 abort();
7342 if (neon_2rm_is_float_op(op)) {
7343 tcg_gen_st_f32(cpu_F0s, cpu_env,
7344 neon_reg_offset(rd, pass));
7345 } else {
7346 neon_store_reg(rd, pass, tmp);
7349 break;
7351 } else if ((insn & (1 << 10)) == 0) {
7352 /* VTBL, VTBX. */
7353 int n = ((insn >> 8) & 3) + 1;
7354 if ((rn + n) > 32) {
7355 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7356 * helper function running off the end of the register file.
7358 return 1;
7360 n <<= 3;
7361 if (insn & (1 << 6)) {
7362 tmp = neon_load_reg(rd, 0);
7363 } else {
7364 tmp = tcg_temp_new_i32();
7365 tcg_gen_movi_i32(tmp, 0);
7367 tmp2 = neon_load_reg(rm, 0);
7368 tmp4 = tcg_const_i32(rn);
7369 tmp5 = tcg_const_i32(n);
7370 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7371 tcg_temp_free_i32(tmp);
7372 if (insn & (1 << 6)) {
7373 tmp = neon_load_reg(rd, 1);
7374 } else {
7375 tmp = tcg_temp_new_i32();
7376 tcg_gen_movi_i32(tmp, 0);
7378 tmp3 = neon_load_reg(rm, 1);
7379 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
7380 tcg_temp_free_i32(tmp5);
7381 tcg_temp_free_i32(tmp4);
7382 neon_store_reg(rd, 0, tmp2);
7383 neon_store_reg(rd, 1, tmp3);
7384 tcg_temp_free_i32(tmp);
7385 } else if ((insn & 0x380) == 0) {
7386 /* VDUP */
7387 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7388 return 1;
7390 if (insn & (1 << 19)) {
7391 tmp = neon_load_reg(rm, 1);
7392 } else {
7393 tmp = neon_load_reg(rm, 0);
7395 if (insn & (1 << 16)) {
7396 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
7397 } else if (insn & (1 << 17)) {
7398 if ((insn >> 18) & 1)
7399 gen_neon_dup_high16(tmp);
7400 else
7401 gen_neon_dup_low16(tmp);
7403 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7404 tmp2 = tcg_temp_new_i32();
7405 tcg_gen_mov_i32(tmp2, tmp);
7406 neon_store_reg(rd, pass, tmp2);
7408 tcg_temp_free_i32(tmp);
7409 } else {
7410 return 1;
7414 return 0;
7417 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7419 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7420 const ARMCPRegInfo *ri;
7422 cpnum = (insn >> 8) & 0xf;
7424 /* First check for coprocessor space used for XScale/iwMMXt insns */
7425 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7426 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7427 return 1;
7429 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7430 return disas_iwmmxt_insn(s, insn);
7431 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7432 return disas_dsp_insn(s, insn);
7434 return 1;
7437 /* Otherwise treat as a generic register access */
7438 is64 = (insn & (1 << 25)) == 0;
7439 if (!is64 && ((insn & (1 << 4)) == 0)) {
7440 /* cdp */
7441 return 1;
7444 crm = insn & 0xf;
7445 if (is64) {
7446 crn = 0;
7447 opc1 = (insn >> 4) & 0xf;
7448 opc2 = 0;
7449 rt2 = (insn >> 16) & 0xf;
7450 } else {
7451 crn = (insn >> 16) & 0xf;
7452 opc1 = (insn >> 21) & 7;
7453 opc2 = (insn >> 5) & 7;
7454 rt2 = 0;
7456 isread = (insn >> 20) & 1;
7457 rt = (insn >> 12) & 0xf;
7459 ri = get_arm_cp_reginfo(s->cp_regs,
7460 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7461 if (ri) {
7462 /* Check access permissions */
7463 if (!cp_access_ok(s->current_el, ri, isread)) {
7464 return 1;
7467 if (ri->accessfn ||
7468 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7469 /* Emit code to perform further access permissions checks at
7470 * runtime; this may result in an exception.
7471 * Note that on XScale all cp0..c13 registers do an access check
7472 * call in order to handle c15_cpar.
7474 TCGv_ptr tmpptr;
7475 TCGv_i32 tcg_syn, tcg_isread;
7476 uint32_t syndrome;
7478 /* Note that since we are an implementation which takes an
7479 * exception on a trapped conditional instruction only if the
7480 * instruction passes its condition code check, we can take
7481 * advantage of the clause in the ARM ARM that allows us to set
7482 * the COND field in the instruction to 0xE in all cases.
7483 * We could fish the actual condition out of the insn (ARM)
7484 * or the condexec bits (Thumb) but it isn't necessary.
7486 switch (cpnum) {
7487 case 14:
7488 if (is64) {
7489 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7490 isread, false);
7491 } else {
7492 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7493 rt, isread, false);
7495 break;
7496 case 15:
7497 if (is64) {
7498 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7499 isread, false);
7500 } else {
7501 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7502 rt, isread, false);
7504 break;
7505 default:
7506 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7507 * so this can only happen if this is an ARMv7 or earlier CPU,
7508 * in which case the syndrome information won't actually be
7509 * guest visible.
7511 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7512 syndrome = syn_uncategorized();
7513 break;
7516 gen_set_condexec(s);
7517 gen_set_pc_im(s, s->pc - 4);
7518 tmpptr = tcg_const_ptr(ri);
7519 tcg_syn = tcg_const_i32(syndrome);
7520 tcg_isread = tcg_const_i32(isread);
7521 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7522 tcg_isread);
7523 tcg_temp_free_ptr(tmpptr);
7524 tcg_temp_free_i32(tcg_syn);
7525 tcg_temp_free_i32(tcg_isread);
7528 /* Handle special cases first */
7529 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7530 case ARM_CP_NOP:
7531 return 0;
7532 case ARM_CP_WFI:
7533 if (isread) {
7534 return 1;
7536 gen_set_pc_im(s, s->pc);
7537 s->is_jmp = DISAS_WFI;
7538 return 0;
7539 default:
7540 break;
7543 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7544 gen_io_start();
7547 if (isread) {
7548 /* Read */
7549 if (is64) {
7550 TCGv_i64 tmp64;
7551 TCGv_i32 tmp;
7552 if (ri->type & ARM_CP_CONST) {
7553 tmp64 = tcg_const_i64(ri->resetvalue);
7554 } else if (ri->readfn) {
7555 TCGv_ptr tmpptr;
7556 tmp64 = tcg_temp_new_i64();
7557 tmpptr = tcg_const_ptr(ri);
7558 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7559 tcg_temp_free_ptr(tmpptr);
7560 } else {
7561 tmp64 = tcg_temp_new_i64();
7562 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7564 tmp = tcg_temp_new_i32();
7565 tcg_gen_extrl_i64_i32(tmp, tmp64);
7566 store_reg(s, rt, tmp);
7567 tcg_gen_shri_i64(tmp64, tmp64, 32);
7568 tmp = tcg_temp_new_i32();
7569 tcg_gen_extrl_i64_i32(tmp, tmp64);
7570 tcg_temp_free_i64(tmp64);
7571 store_reg(s, rt2, tmp);
7572 } else {
7573 TCGv_i32 tmp;
7574 if (ri->type & ARM_CP_CONST) {
7575 tmp = tcg_const_i32(ri->resetvalue);
7576 } else if (ri->readfn) {
7577 TCGv_ptr tmpptr;
7578 tmp = tcg_temp_new_i32();
7579 tmpptr = tcg_const_ptr(ri);
7580 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7581 tcg_temp_free_ptr(tmpptr);
7582 } else {
7583 tmp = load_cpu_offset(ri->fieldoffset);
7585 if (rt == 15) {
7586 /* Destination register of r15 for 32 bit loads sets
7587 * the condition codes from the high 4 bits of the value
7589 gen_set_nzcv(tmp);
7590 tcg_temp_free_i32(tmp);
7591 } else {
7592 store_reg(s, rt, tmp);
7595 } else {
7596 /* Write */
7597 if (ri->type & ARM_CP_CONST) {
7598 /* If not forbidden by access permissions, treat as WI */
7599 return 0;
7602 if (is64) {
7603 TCGv_i32 tmplo, tmphi;
7604 TCGv_i64 tmp64 = tcg_temp_new_i64();
7605 tmplo = load_reg(s, rt);
7606 tmphi = load_reg(s, rt2);
7607 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7608 tcg_temp_free_i32(tmplo);
7609 tcg_temp_free_i32(tmphi);
7610 if (ri->writefn) {
7611 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7612 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7613 tcg_temp_free_ptr(tmpptr);
7614 } else {
7615 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7617 tcg_temp_free_i64(tmp64);
7618 } else {
7619 if (ri->writefn) {
7620 TCGv_i32 tmp;
7621 TCGv_ptr tmpptr;
7622 tmp = load_reg(s, rt);
7623 tmpptr = tcg_const_ptr(ri);
7624 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7625 tcg_temp_free_ptr(tmpptr);
7626 tcg_temp_free_i32(tmp);
7627 } else {
7628 TCGv_i32 tmp = load_reg(s, rt);
7629 store_cpu_offset(tmp, ri->fieldoffset);
7634 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7635 /* I/O operations must end the TB here (whether read or write) */
7636 gen_io_end();
7637 gen_lookup_tb(s);
7638 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7639 /* We default to ending the TB on a coprocessor register write,
7640 * but allow this to be suppressed by the register definition
7641 * (usually only necessary to work around guest bugs).
7643 gen_lookup_tb(s);
7646 return 0;
7649 /* Unknown register; this might be a guest error or a QEMU
7650 * unimplemented feature.
7652 if (is64) {
7653 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7654 "64 bit system register cp:%d opc1: %d crm:%d "
7655 "(%s)\n",
7656 isread ? "read" : "write", cpnum, opc1, crm,
7657 s->ns ? "non-secure" : "secure");
7658 } else {
7659 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7660 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7661 "(%s)\n",
7662 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7663 s->ns ? "non-secure" : "secure");
7666 return 1;
7670 /* Store a 64-bit value to a register pair. Clobbers val. */
7671 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7673 TCGv_i32 tmp;
7674 tmp = tcg_temp_new_i32();
7675 tcg_gen_extrl_i64_i32(tmp, val);
7676 store_reg(s, rlow, tmp);
7677 tmp = tcg_temp_new_i32();
7678 tcg_gen_shri_i64(val, val, 32);
7679 tcg_gen_extrl_i64_i32(tmp, val);
7680 store_reg(s, rhigh, tmp);
7683 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7684 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7686 TCGv_i64 tmp;
7687 TCGv_i32 tmp2;
7689 /* Load value and extend to 64 bits. */
7690 tmp = tcg_temp_new_i64();
7691 tmp2 = load_reg(s, rlow);
7692 tcg_gen_extu_i32_i64(tmp, tmp2);
7693 tcg_temp_free_i32(tmp2);
7694 tcg_gen_add_i64(val, val, tmp);
7695 tcg_temp_free_i64(tmp);
7698 /* load and add a 64-bit value from a register pair. */
7699 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7701 TCGv_i64 tmp;
7702 TCGv_i32 tmpl;
7703 TCGv_i32 tmph;
7705 /* Load 64-bit value rd:rn. */
7706 tmpl = load_reg(s, rlow);
7707 tmph = load_reg(s, rhigh);
7708 tmp = tcg_temp_new_i64();
7709 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7710 tcg_temp_free_i32(tmpl);
7711 tcg_temp_free_i32(tmph);
7712 tcg_gen_add_i64(val, val, tmp);
7713 tcg_temp_free_i64(tmp);
7716 /* Set N and Z flags from hi|lo. */
7717 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7719 tcg_gen_mov_i32(cpu_NF, hi);
7720 tcg_gen_or_i32(cpu_ZF, lo, hi);
7723 /* Load/Store exclusive instructions are implemented by remembering
7724 the value/address loaded, and seeing if these are the same
7725 when the store is performed. This should be sufficient to implement
7726 the architecturally mandated semantics, and avoids having to monitor
7727 regular stores.
7729 In system emulation mode only one CPU will be running at once, so
7730 this sequence is effectively atomic. In user emulation mode we
7731 throw an exception and handle the atomic operation elsewhere. */
7732 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7733 TCGv_i32 addr, int size)
7735 TCGv_i32 tmp = tcg_temp_new_i32();
7737 s->is_ldex = true;
7739 switch (size) {
7740 case 0:
7741 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
7742 break;
7743 case 1:
7744 gen_aa32_ld16ua(s, tmp, addr, get_mem_index(s));
7745 break;
7746 case 2:
7747 case 3:
7748 gen_aa32_ld32ua(s, tmp, addr, get_mem_index(s));
7749 break;
7750 default:
7751 abort();
7754 if (size == 3) {
7755 TCGv_i32 tmp2 = tcg_temp_new_i32();
7756 TCGv_i32 tmp3 = tcg_temp_new_i32();
7758 tcg_gen_addi_i32(tmp2, addr, 4);
7759 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7760 tcg_temp_free_i32(tmp2);
7761 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7762 store_reg(s, rt2, tmp3);
7763 } else {
7764 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7767 store_reg(s, rt, tmp);
7768 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7771 static void gen_clrex(DisasContext *s)
7773 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7776 #ifdef CONFIG_USER_ONLY
7777 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7778 TCGv_i32 addr, int size)
7780 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
7781 tcg_gen_movi_i32(cpu_exclusive_info,
7782 size | (rd << 4) | (rt << 8) | (rt2 << 12));
7783 gen_exception_internal_insn(s, 4, EXCP_STREX);
7785 #else
7786 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7787 TCGv_i32 addr, int size)
7789 TCGv_i32 tmp;
7790 TCGv_i64 val64, extaddr;
7791 TCGLabel *done_label;
7792 TCGLabel *fail_label;
7794 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7795 [addr] = {Rt};
7796 {Rd} = 0;
7797 } else {
7798 {Rd} = 1;
7799 } */
7800 fail_label = gen_new_label();
7801 done_label = gen_new_label();
7802 extaddr = tcg_temp_new_i64();
7803 tcg_gen_extu_i32_i64(extaddr, addr);
7804 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7805 tcg_temp_free_i64(extaddr);
7807 tmp = tcg_temp_new_i32();
7808 switch (size) {
7809 case 0:
7810 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
7811 break;
7812 case 1:
7813 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
7814 break;
7815 case 2:
7816 case 3:
7817 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
7818 break;
7819 default:
7820 abort();
7823 val64 = tcg_temp_new_i64();
7824 if (size == 3) {
7825 TCGv_i32 tmp2 = tcg_temp_new_i32();
7826 TCGv_i32 tmp3 = tcg_temp_new_i32();
7827 tcg_gen_addi_i32(tmp2, addr, 4);
7828 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7829 tcg_temp_free_i32(tmp2);
7830 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7831 tcg_temp_free_i32(tmp3);
7832 } else {
7833 tcg_gen_extu_i32_i64(val64, tmp);
7835 tcg_temp_free_i32(tmp);
7837 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7838 tcg_temp_free_i64(val64);
7840 tmp = load_reg(s, rt);
7841 switch (size) {
7842 case 0:
7843 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
7844 break;
7845 case 1:
7846 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
7847 break;
7848 case 2:
7849 case 3:
7850 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7851 break;
7852 default:
7853 abort();
7855 tcg_temp_free_i32(tmp);
7856 if (size == 3) {
7857 tcg_gen_addi_i32(addr, addr, 4);
7858 tmp = load_reg(s, rt2);
7859 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7860 tcg_temp_free_i32(tmp);
7862 tcg_gen_movi_i32(cpu_R[rd], 0);
7863 tcg_gen_br(done_label);
7864 gen_set_label(fail_label);
7865 tcg_gen_movi_i32(cpu_R[rd], 1);
7866 gen_set_label(done_label);
7867 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7869 #endif
7871 /* gen_srs:
7872 * @env: CPUARMState
7873 * @s: DisasContext
7874 * @mode: mode field from insn (which stack to store to)
7875 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7876 * @writeback: true if writeback bit set
7878 * Generate code for the SRS (Store Return State) insn.
7880 static void gen_srs(DisasContext *s,
7881 uint32_t mode, uint32_t amode, bool writeback)
7883 int32_t offset;
7884 TCGv_i32 addr, tmp;
7885 bool undef = false;
7887 /* SRS is:
7888 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7889 * and specified mode is monitor mode
7890 * - UNDEFINED in Hyp mode
7891 * - UNPREDICTABLE in User or System mode
7892 * - UNPREDICTABLE if the specified mode is:
7893 * -- not implemented
7894 * -- not a valid mode number
7895 * -- a mode that's at a higher exception level
7896 * -- Monitor, if we are Non-secure
7897 * For the UNPREDICTABLE cases we choose to UNDEF.
7899 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
7900 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7901 return;
7904 if (s->current_el == 0 || s->current_el == 2) {
7905 undef = true;
7908 switch (mode) {
7909 case ARM_CPU_MODE_USR:
7910 case ARM_CPU_MODE_FIQ:
7911 case ARM_CPU_MODE_IRQ:
7912 case ARM_CPU_MODE_SVC:
7913 case ARM_CPU_MODE_ABT:
7914 case ARM_CPU_MODE_UND:
7915 case ARM_CPU_MODE_SYS:
7916 break;
7917 case ARM_CPU_MODE_HYP:
7918 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7919 undef = true;
7921 break;
7922 case ARM_CPU_MODE_MON:
7923 /* No need to check specifically for "are we non-secure" because
7924 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7925 * so if this isn't EL3 then we must be non-secure.
7927 if (s->current_el != 3) {
7928 undef = true;
7930 break;
7931 default:
7932 undef = true;
7935 if (undef) {
7936 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7937 default_exception_el(s));
7938 return;
7941 addr = tcg_temp_new_i32();
7942 tmp = tcg_const_i32(mode);
7943 /* get_r13_banked() will raise an exception if called from System mode */
7944 gen_set_condexec(s);
7945 gen_set_pc_im(s, s->pc - 4);
7946 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7947 tcg_temp_free_i32(tmp);
7948 switch (amode) {
7949 case 0: /* DA */
7950 offset = -4;
7951 break;
7952 case 1: /* IA */
7953 offset = 0;
7954 break;
7955 case 2: /* DB */
7956 offset = -8;
7957 break;
7958 case 3: /* IB */
7959 offset = 4;
7960 break;
7961 default:
7962 abort();
7964 tcg_gen_addi_i32(addr, addr, offset);
7965 tmp = load_reg(s, 14);
7966 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7967 tcg_temp_free_i32(tmp);
7968 tmp = load_cpu_field(spsr);
7969 tcg_gen_addi_i32(addr, addr, 4);
7970 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7971 tcg_temp_free_i32(tmp);
7972 if (writeback) {
7973 switch (amode) {
7974 case 0:
7975 offset = -8;
7976 break;
7977 case 1:
7978 offset = 4;
7979 break;
7980 case 2:
7981 offset = -4;
7982 break;
7983 case 3:
7984 offset = 0;
7985 break;
7986 default:
7987 abort();
7989 tcg_gen_addi_i32(addr, addr, offset);
7990 tmp = tcg_const_i32(mode);
7991 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7992 tcg_temp_free_i32(tmp);
7994 tcg_temp_free_i32(addr);
7995 s->is_jmp = DISAS_UPDATE;
7998 static void disas_arm_insn(DisasContext *s, unsigned int insn)
8000 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
8001 TCGv_i32 tmp;
8002 TCGv_i32 tmp2;
8003 TCGv_i32 tmp3;
8004 TCGv_i32 addr;
8005 TCGv_i64 tmp64;
8007 /* M variants do not implement ARM mode. */
8008 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8009 goto illegal_op;
8011 cond = insn >> 28;
8012 if (cond == 0xf){
8013 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8014 * choose to UNDEF. In ARMv5 and above the space is used
8015 * for miscellaneous unconditional instructions.
8017 ARCH(5);
8019 /* Unconditional instructions. */
8020 if (((insn >> 25) & 7) == 1) {
8021 /* NEON Data processing. */
8022 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8023 goto illegal_op;
8026 if (disas_neon_data_insn(s, insn)) {
8027 goto illegal_op;
8029 return;
8031 if ((insn & 0x0f100000) == 0x04000000) {
8032 /* NEON load/store. */
8033 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
8034 goto illegal_op;
8037 if (disas_neon_ls_insn(s, insn)) {
8038 goto illegal_op;
8040 return;
8042 if ((insn & 0x0f000e10) == 0x0e000a00) {
8043 /* VFP. */
8044 if (disas_vfp_insn(s, insn)) {
8045 goto illegal_op;
8047 return;
8049 if (((insn & 0x0f30f000) == 0x0510f000) ||
8050 ((insn & 0x0f30f010) == 0x0710f000)) {
8051 if ((insn & (1 << 22)) == 0) {
8052 /* PLDW; v7MP */
8053 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8054 goto illegal_op;
8057 /* Otherwise PLD; v5TE+ */
8058 ARCH(5TE);
8059 return;
8061 if (((insn & 0x0f70f000) == 0x0450f000) ||
8062 ((insn & 0x0f70f010) == 0x0650f000)) {
8063 ARCH(7);
8064 return; /* PLI; V7 */
8066 if (((insn & 0x0f700000) == 0x04100000) ||
8067 ((insn & 0x0f700010) == 0x06100000)) {
8068 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
8069 goto illegal_op;
8071 return; /* v7MP: Unallocated memory hint: must NOP */
8074 if ((insn & 0x0ffffdff) == 0x01010000) {
8075 ARCH(6);
8076 /* setend */
8077 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8078 gen_helper_setend(cpu_env);
8079 s->is_jmp = DISAS_UPDATE;
8081 return;
8082 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8083 switch ((insn >> 4) & 0xf) {
8084 case 1: /* clrex */
8085 ARCH(6K);
8086 gen_clrex(s);
8087 return;
8088 case 4: /* dsb */
8089 case 5: /* dmb */
8090 ARCH(7);
8091 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8092 return;
8093 case 6: /* isb */
8094 /* We need to break the TB after this insn to execute
8095 * self-modifying code correctly and also to take
8096 * any pending interrupts immediately.
8098 gen_lookup_tb(s);
8099 return;
8100 default:
8101 goto illegal_op;
8103 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8104 /* srs */
8105 ARCH(6);
8106 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
8107 return;
8108 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
8109 /* rfe */
8110 int32_t offset;
8111 if (IS_USER(s))
8112 goto illegal_op;
8113 ARCH(6);
8114 rn = (insn >> 16) & 0xf;
8115 addr = load_reg(s, rn);
8116 i = (insn >> 23) & 3;
8117 switch (i) {
8118 case 0: offset = -4; break; /* DA */
8119 case 1: offset = 0; break; /* IA */
8120 case 2: offset = -8; break; /* DB */
8121 case 3: offset = 4; break; /* IB */
8122 default: abort();
8124 if (offset)
8125 tcg_gen_addi_i32(addr, addr, offset);
8126 /* Load PC into tmp and CPSR into tmp2. */
8127 tmp = tcg_temp_new_i32();
8128 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8129 tcg_gen_addi_i32(addr, addr, 4);
8130 tmp2 = tcg_temp_new_i32();
8131 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8132 if (insn & (1 << 21)) {
8133 /* Base writeback. */
8134 switch (i) {
8135 case 0: offset = -8; break;
8136 case 1: offset = 4; break;
8137 case 2: offset = -4; break;
8138 case 3: offset = 0; break;
8139 default: abort();
8141 if (offset)
8142 tcg_gen_addi_i32(addr, addr, offset);
8143 store_reg(s, rn, addr);
8144 } else {
8145 tcg_temp_free_i32(addr);
8147 gen_rfe(s, tmp, tmp2);
8148 return;
8149 } else if ((insn & 0x0e000000) == 0x0a000000) {
8150 /* branch link and change to thumb (blx <offset>) */
8151 int32_t offset;
8153 val = (uint32_t)s->pc;
8154 tmp = tcg_temp_new_i32();
8155 tcg_gen_movi_i32(tmp, val);
8156 store_reg(s, 14, tmp);
8157 /* Sign-extend the 24-bit offset */
8158 offset = (((int32_t)insn) << 8) >> 8;
8159 /* offset * 4 + bit24 * 2 + (thumb bit) */
8160 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8161 /* pipeline offset */
8162 val += 4;
8163 /* protected by ARCH(5); above, near the start of uncond block */
8164 gen_bx_im(s, val);
8165 return;
8166 } else if ((insn & 0x0e000f00) == 0x0c000100) {
8167 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
8168 /* iWMMXt register transfer. */
8169 if (extract32(s->c15_cpar, 1, 1)) {
8170 if (!disas_iwmmxt_insn(s, insn)) {
8171 return;
8175 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8176 /* Coprocessor double register transfer. */
8177 ARCH(5TE);
8178 } else if ((insn & 0x0f000010) == 0x0e000010) {
8179 /* Additional coprocessor register transfer. */
8180 } else if ((insn & 0x0ff10020) == 0x01000000) {
8181 uint32_t mask;
8182 uint32_t val;
8183 /* cps (privileged) */
8184 if (IS_USER(s))
8185 return;
8186 mask = val = 0;
8187 if (insn & (1 << 19)) {
8188 if (insn & (1 << 8))
8189 mask |= CPSR_A;
8190 if (insn & (1 << 7))
8191 mask |= CPSR_I;
8192 if (insn & (1 << 6))
8193 mask |= CPSR_F;
8194 if (insn & (1 << 18))
8195 val |= mask;
8197 if (insn & (1 << 17)) {
8198 mask |= CPSR_M;
8199 val |= (insn & 0x1f);
8201 if (mask) {
8202 gen_set_psr_im(s, mask, 0, val);
8204 return;
8206 goto illegal_op;
8208 if (cond != 0xe) {
8209 /* if not always execute, we generate a conditional jump to
8210 next instruction */
8211 s->condlabel = gen_new_label();
8212 arm_gen_test_cc(cond ^ 1, s->condlabel);
8213 s->condjmp = 1;
8215 if ((insn & 0x0f900000) == 0x03000000) {
8216 if ((insn & (1 << 21)) == 0) {
8217 ARCH(6T2);
8218 rd = (insn >> 12) & 0xf;
8219 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8220 if ((insn & (1 << 22)) == 0) {
8221 /* MOVW */
8222 tmp = tcg_temp_new_i32();
8223 tcg_gen_movi_i32(tmp, val);
8224 } else {
8225 /* MOVT */
8226 tmp = load_reg(s, rd);
8227 tcg_gen_ext16u_i32(tmp, tmp);
8228 tcg_gen_ori_i32(tmp, tmp, val << 16);
8230 store_reg(s, rd, tmp);
8231 } else {
8232 if (((insn >> 12) & 0xf) != 0xf)
8233 goto illegal_op;
8234 if (((insn >> 16) & 0xf) == 0) {
8235 gen_nop_hint(s, insn & 0xff);
8236 } else {
8237 /* CPSR = immediate */
8238 val = insn & 0xff;
8239 shift = ((insn >> 8) & 0xf) * 2;
8240 if (shift)
8241 val = (val >> shift) | (val << (32 - shift));
8242 i = ((insn & (1 << 22)) != 0);
8243 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8244 i, val)) {
8245 goto illegal_op;
8249 } else if ((insn & 0x0f900000) == 0x01000000
8250 && (insn & 0x00000090) != 0x00000090) {
8251 /* miscellaneous instructions */
8252 op1 = (insn >> 21) & 3;
8253 sh = (insn >> 4) & 0xf;
8254 rm = insn & 0xf;
8255 switch (sh) {
8256 case 0x0: /* MSR, MRS */
8257 if (insn & (1 << 9)) {
8258 /* MSR (banked) and MRS (banked) */
8259 int sysm = extract32(insn, 16, 4) |
8260 (extract32(insn, 8, 1) << 4);
8261 int r = extract32(insn, 22, 1);
8263 if (op1 & 1) {
8264 /* MSR (banked) */
8265 gen_msr_banked(s, r, sysm, rm);
8266 } else {
8267 /* MRS (banked) */
8268 int rd = extract32(insn, 12, 4);
8270 gen_mrs_banked(s, r, sysm, rd);
8272 break;
8275 /* MSR, MRS (for PSRs) */
8276 if (op1 & 1) {
8277 /* PSR = reg */
8278 tmp = load_reg(s, rm);
8279 i = ((op1 & 2) != 0);
8280 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
8281 goto illegal_op;
8282 } else {
8283 /* reg = PSR */
8284 rd = (insn >> 12) & 0xf;
8285 if (op1 & 2) {
8286 if (IS_USER(s))
8287 goto illegal_op;
8288 tmp = load_cpu_field(spsr);
8289 } else {
8290 tmp = tcg_temp_new_i32();
8291 gen_helper_cpsr_read(tmp, cpu_env);
8293 store_reg(s, rd, tmp);
8295 break;
8296 case 0x1:
8297 if (op1 == 1) {
8298 /* branch/exchange thumb (bx). */
8299 ARCH(4T);
8300 tmp = load_reg(s, rm);
8301 gen_bx(s, tmp);
8302 } else if (op1 == 3) {
8303 /* clz */
8304 ARCH(5);
8305 rd = (insn >> 12) & 0xf;
8306 tmp = load_reg(s, rm);
8307 gen_helper_clz(tmp, tmp);
8308 store_reg(s, rd, tmp);
8309 } else {
8310 goto illegal_op;
8312 break;
8313 case 0x2:
8314 if (op1 == 1) {
8315 ARCH(5J); /* bxj */
8316 /* Trivial implementation equivalent to bx. */
8317 tmp = load_reg(s, rm);
8318 gen_bx(s, tmp);
8319 } else {
8320 goto illegal_op;
8322 break;
8323 case 0x3:
8324 if (op1 != 1)
8325 goto illegal_op;
8327 ARCH(5);
8328 /* branch link/exchange thumb (blx) */
8329 tmp = load_reg(s, rm);
8330 tmp2 = tcg_temp_new_i32();
8331 tcg_gen_movi_i32(tmp2, s->pc);
8332 store_reg(s, 14, tmp2);
8333 gen_bx(s, tmp);
8334 break;
8335 case 0x4:
8337 /* crc32/crc32c */
8338 uint32_t c = extract32(insn, 8, 4);
8340 /* Check this CPU supports ARMv8 CRC instructions.
8341 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8342 * Bits 8, 10 and 11 should be zero.
8344 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
8345 (c & 0xd) != 0) {
8346 goto illegal_op;
8349 rn = extract32(insn, 16, 4);
8350 rd = extract32(insn, 12, 4);
8352 tmp = load_reg(s, rn);
8353 tmp2 = load_reg(s, rm);
8354 if (op1 == 0) {
8355 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8356 } else if (op1 == 1) {
8357 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8359 tmp3 = tcg_const_i32(1 << op1);
8360 if (c & 0x2) {
8361 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8362 } else {
8363 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8365 tcg_temp_free_i32(tmp2);
8366 tcg_temp_free_i32(tmp3);
8367 store_reg(s, rd, tmp);
8368 break;
8370 case 0x5: /* saturating add/subtract */
8371 ARCH(5TE);
8372 rd = (insn >> 12) & 0xf;
8373 rn = (insn >> 16) & 0xf;
8374 tmp = load_reg(s, rm);
8375 tmp2 = load_reg(s, rn);
8376 if (op1 & 2)
8377 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
8378 if (op1 & 1)
8379 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8380 else
8381 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8382 tcg_temp_free_i32(tmp2);
8383 store_reg(s, rd, tmp);
8384 break;
8385 case 7:
8387 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8388 switch (op1) {
8389 case 1:
8390 /* bkpt */
8391 ARCH(5);
8392 gen_exception_insn(s, 4, EXCP_BKPT,
8393 syn_aa32_bkpt(imm16, false),
8394 default_exception_el(s));
8395 break;
8396 case 2:
8397 /* Hypervisor call (v7) */
8398 ARCH(7);
8399 if (IS_USER(s)) {
8400 goto illegal_op;
8402 gen_hvc(s, imm16);
8403 break;
8404 case 3:
8405 /* Secure monitor call (v6+) */
8406 ARCH(6K);
8407 if (IS_USER(s)) {
8408 goto illegal_op;
8410 gen_smc(s);
8411 break;
8412 default:
8413 goto illegal_op;
8415 break;
8417 case 0x8: /* signed multiply */
8418 case 0xa:
8419 case 0xc:
8420 case 0xe:
8421 ARCH(5TE);
8422 rs = (insn >> 8) & 0xf;
8423 rn = (insn >> 12) & 0xf;
8424 rd = (insn >> 16) & 0xf;
8425 if (op1 == 1) {
8426 /* (32 * 16) >> 16 */
8427 tmp = load_reg(s, rm);
8428 tmp2 = load_reg(s, rs);
8429 if (sh & 4)
8430 tcg_gen_sari_i32(tmp2, tmp2, 16);
8431 else
8432 gen_sxth(tmp2);
8433 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8434 tcg_gen_shri_i64(tmp64, tmp64, 16);
8435 tmp = tcg_temp_new_i32();
8436 tcg_gen_extrl_i64_i32(tmp, tmp64);
8437 tcg_temp_free_i64(tmp64);
8438 if ((sh & 2) == 0) {
8439 tmp2 = load_reg(s, rn);
8440 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8441 tcg_temp_free_i32(tmp2);
8443 store_reg(s, rd, tmp);
8444 } else {
8445 /* 16 * 16 */
8446 tmp = load_reg(s, rm);
8447 tmp2 = load_reg(s, rs);
8448 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8449 tcg_temp_free_i32(tmp2);
8450 if (op1 == 2) {
8451 tmp64 = tcg_temp_new_i64();
8452 tcg_gen_ext_i32_i64(tmp64, tmp);
8453 tcg_temp_free_i32(tmp);
8454 gen_addq(s, tmp64, rn, rd);
8455 gen_storeq_reg(s, rn, rd, tmp64);
8456 tcg_temp_free_i64(tmp64);
8457 } else {
8458 if (op1 == 0) {
8459 tmp2 = load_reg(s, rn);
8460 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8461 tcg_temp_free_i32(tmp2);
8463 store_reg(s, rd, tmp);
8466 break;
8467 default:
8468 goto illegal_op;
8470 } else if (((insn & 0x0e000000) == 0 &&
8471 (insn & 0x00000090) != 0x90) ||
8472 ((insn & 0x0e000000) == (1 << 25))) {
8473 int set_cc, logic_cc, shiftop;
8475 op1 = (insn >> 21) & 0xf;
8476 set_cc = (insn >> 20) & 1;
8477 logic_cc = table_logic_cc[op1] & set_cc;
8479 /* data processing instruction */
8480 if (insn & (1 << 25)) {
8481 /* immediate operand */
8482 val = insn & 0xff;
8483 shift = ((insn >> 8) & 0xf) * 2;
8484 if (shift) {
8485 val = (val >> shift) | (val << (32 - shift));
8487 tmp2 = tcg_temp_new_i32();
8488 tcg_gen_movi_i32(tmp2, val);
8489 if (logic_cc && shift) {
8490 gen_set_CF_bit31(tmp2);
8492 } else {
8493 /* register */
8494 rm = (insn) & 0xf;
8495 tmp2 = load_reg(s, rm);
8496 shiftop = (insn >> 5) & 3;
8497 if (!(insn & (1 << 4))) {
8498 shift = (insn >> 7) & 0x1f;
8499 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8500 } else {
8501 rs = (insn >> 8) & 0xf;
8502 tmp = load_reg(s, rs);
8503 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8506 if (op1 != 0x0f && op1 != 0x0d) {
8507 rn = (insn >> 16) & 0xf;
8508 tmp = load_reg(s, rn);
8509 } else {
8510 TCGV_UNUSED_I32(tmp);
8512 rd = (insn >> 12) & 0xf;
8513 switch(op1) {
8514 case 0x00:
8515 tcg_gen_and_i32(tmp, tmp, tmp2);
8516 if (logic_cc) {
8517 gen_logic_CC(tmp);
8519 store_reg_bx(s, rd, tmp);
8520 break;
8521 case 0x01:
8522 tcg_gen_xor_i32(tmp, tmp, tmp2);
8523 if (logic_cc) {
8524 gen_logic_CC(tmp);
8526 store_reg_bx(s, rd, tmp);
8527 break;
8528 case 0x02:
8529 if (set_cc && rd == 15) {
8530 /* SUBS r15, ... is used for exception return. */
8531 if (IS_USER(s)) {
8532 goto illegal_op;
8534 gen_sub_CC(tmp, tmp, tmp2);
8535 gen_exception_return(s, tmp);
8536 } else {
8537 if (set_cc) {
8538 gen_sub_CC(tmp, tmp, tmp2);
8539 } else {
8540 tcg_gen_sub_i32(tmp, tmp, tmp2);
8542 store_reg_bx(s, rd, tmp);
8544 break;
8545 case 0x03:
8546 if (set_cc) {
8547 gen_sub_CC(tmp, tmp2, tmp);
8548 } else {
8549 tcg_gen_sub_i32(tmp, tmp2, tmp);
8551 store_reg_bx(s, rd, tmp);
8552 break;
8553 case 0x04:
8554 if (set_cc) {
8555 gen_add_CC(tmp, tmp, tmp2);
8556 } else {
8557 tcg_gen_add_i32(tmp, tmp, tmp2);
8559 store_reg_bx(s, rd, tmp);
8560 break;
8561 case 0x05:
8562 if (set_cc) {
8563 gen_adc_CC(tmp, tmp, tmp2);
8564 } else {
8565 gen_add_carry(tmp, tmp, tmp2);
8567 store_reg_bx(s, rd, tmp);
8568 break;
8569 case 0x06:
8570 if (set_cc) {
8571 gen_sbc_CC(tmp, tmp, tmp2);
8572 } else {
8573 gen_sub_carry(tmp, tmp, tmp2);
8575 store_reg_bx(s, rd, tmp);
8576 break;
8577 case 0x07:
8578 if (set_cc) {
8579 gen_sbc_CC(tmp, tmp2, tmp);
8580 } else {
8581 gen_sub_carry(tmp, tmp2, tmp);
8583 store_reg_bx(s, rd, tmp);
8584 break;
8585 case 0x08:
8586 if (set_cc) {
8587 tcg_gen_and_i32(tmp, tmp, tmp2);
8588 gen_logic_CC(tmp);
8590 tcg_temp_free_i32(tmp);
8591 break;
8592 case 0x09:
8593 if (set_cc) {
8594 tcg_gen_xor_i32(tmp, tmp, tmp2);
8595 gen_logic_CC(tmp);
8597 tcg_temp_free_i32(tmp);
8598 break;
8599 case 0x0a:
8600 if (set_cc) {
8601 gen_sub_CC(tmp, tmp, tmp2);
8603 tcg_temp_free_i32(tmp);
8604 break;
8605 case 0x0b:
8606 if (set_cc) {
8607 gen_add_CC(tmp, tmp, tmp2);
8609 tcg_temp_free_i32(tmp);
8610 break;
8611 case 0x0c:
8612 tcg_gen_or_i32(tmp, tmp, tmp2);
8613 if (logic_cc) {
8614 gen_logic_CC(tmp);
8616 store_reg_bx(s, rd, tmp);
8617 break;
8618 case 0x0d:
8619 if (logic_cc && rd == 15) {
8620 /* MOVS r15, ... is used for exception return. */
8621 if (IS_USER(s)) {
8622 goto illegal_op;
8624 gen_exception_return(s, tmp2);
8625 } else {
8626 if (logic_cc) {
8627 gen_logic_CC(tmp2);
8629 store_reg_bx(s, rd, tmp2);
8631 break;
8632 case 0x0e:
8633 tcg_gen_andc_i32(tmp, tmp, tmp2);
8634 if (logic_cc) {
8635 gen_logic_CC(tmp);
8637 store_reg_bx(s, rd, tmp);
8638 break;
8639 default:
8640 case 0x0f:
8641 tcg_gen_not_i32(tmp2, tmp2);
8642 if (logic_cc) {
8643 gen_logic_CC(tmp2);
8645 store_reg_bx(s, rd, tmp2);
8646 break;
8648 if (op1 != 0x0f && op1 != 0x0d) {
8649 tcg_temp_free_i32(tmp2);
8651 } else {
8652 /* other instructions */
8653 op1 = (insn >> 24) & 0xf;
8654 switch(op1) {
8655 case 0x0:
8656 case 0x1:
8657 /* multiplies, extra load/stores */
8658 sh = (insn >> 5) & 3;
8659 if (sh == 0) {
8660 if (op1 == 0x0) {
8661 rd = (insn >> 16) & 0xf;
8662 rn = (insn >> 12) & 0xf;
8663 rs = (insn >> 8) & 0xf;
8664 rm = (insn) & 0xf;
8665 op1 = (insn >> 20) & 0xf;
8666 switch (op1) {
8667 case 0: case 1: case 2: case 3: case 6:
8668 /* 32 bit mul */
8669 tmp = load_reg(s, rs);
8670 tmp2 = load_reg(s, rm);
8671 tcg_gen_mul_i32(tmp, tmp, tmp2);
8672 tcg_temp_free_i32(tmp2);
8673 if (insn & (1 << 22)) {
8674 /* Subtract (mls) */
8675 ARCH(6T2);
8676 tmp2 = load_reg(s, rn);
8677 tcg_gen_sub_i32(tmp, tmp2, tmp);
8678 tcg_temp_free_i32(tmp2);
8679 } else if (insn & (1 << 21)) {
8680 /* Add */
8681 tmp2 = load_reg(s, rn);
8682 tcg_gen_add_i32(tmp, tmp, tmp2);
8683 tcg_temp_free_i32(tmp2);
8685 if (insn & (1 << 20))
8686 gen_logic_CC(tmp);
8687 store_reg(s, rd, tmp);
8688 break;
8689 case 4:
8690 /* 64 bit mul double accumulate (UMAAL) */
8691 ARCH(6);
8692 tmp = load_reg(s, rs);
8693 tmp2 = load_reg(s, rm);
8694 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8695 gen_addq_lo(s, tmp64, rn);
8696 gen_addq_lo(s, tmp64, rd);
8697 gen_storeq_reg(s, rn, rd, tmp64);
8698 tcg_temp_free_i64(tmp64);
8699 break;
8700 case 8: case 9: case 10: case 11:
8701 case 12: case 13: case 14: case 15:
8702 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8703 tmp = load_reg(s, rs);
8704 tmp2 = load_reg(s, rm);
8705 if (insn & (1 << 22)) {
8706 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8707 } else {
8708 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8710 if (insn & (1 << 21)) { /* mult accumulate */
8711 TCGv_i32 al = load_reg(s, rn);
8712 TCGv_i32 ah = load_reg(s, rd);
8713 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8714 tcg_temp_free_i32(al);
8715 tcg_temp_free_i32(ah);
8717 if (insn & (1 << 20)) {
8718 gen_logicq_cc(tmp, tmp2);
8720 store_reg(s, rn, tmp);
8721 store_reg(s, rd, tmp2);
8722 break;
8723 default:
8724 goto illegal_op;
8726 } else {
8727 rn = (insn >> 16) & 0xf;
8728 rd = (insn >> 12) & 0xf;
8729 if (insn & (1 << 23)) {
8730 /* load/store exclusive */
8731 int op2 = (insn >> 8) & 3;
8732 op1 = (insn >> 21) & 0x3;
8734 switch (op2) {
8735 case 0: /* lda/stl */
8736 if (op1 == 1) {
8737 goto illegal_op;
8739 ARCH(8);
8740 break;
8741 case 1: /* reserved */
8742 goto illegal_op;
8743 case 2: /* ldaex/stlex */
8744 ARCH(8);
8745 break;
8746 case 3: /* ldrex/strex */
8747 if (op1) {
8748 ARCH(6K);
8749 } else {
8750 ARCH(6);
8752 break;
8755 addr = tcg_temp_local_new_i32();
8756 load_reg_var(s, addr, rn);
8758 /* Since the emulation does not have barriers,
8759 the acquire/release semantics need no special
8760 handling */
8761 if (op2 == 0) {
8762 if (insn & (1 << 20)) {
8763 tmp = tcg_temp_new_i32();
8764 switch (op1) {
8765 case 0: /* lda */
8766 gen_aa32_ld32u(s, tmp, addr,
8767 get_mem_index(s));
8768 break;
8769 case 2: /* ldab */
8770 gen_aa32_ld8u(s, tmp, addr,
8771 get_mem_index(s));
8772 break;
8773 case 3: /* ldah */
8774 gen_aa32_ld16u(s, tmp, addr,
8775 get_mem_index(s));
8776 break;
8777 default:
8778 abort();
8780 store_reg(s, rd, tmp);
8781 } else {
8782 rm = insn & 0xf;
8783 tmp = load_reg(s, rm);
8784 switch (op1) {
8785 case 0: /* stl */
8786 gen_aa32_st32(s, tmp, addr,
8787 get_mem_index(s));
8788 break;
8789 case 2: /* stlb */
8790 gen_aa32_st8(s, tmp, addr,
8791 get_mem_index(s));
8792 break;
8793 case 3: /* stlh */
8794 gen_aa32_st16(s, tmp, addr,
8795 get_mem_index(s));
8796 break;
8797 default:
8798 abort();
8800 tcg_temp_free_i32(tmp);
8802 } else if (insn & (1 << 20)) {
8803 switch (op1) {
8804 case 0: /* ldrex */
8805 gen_load_exclusive(s, rd, 15, addr, 2);
8806 break;
8807 case 1: /* ldrexd */
8808 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8809 break;
8810 case 2: /* ldrexb */
8811 gen_load_exclusive(s, rd, 15, addr, 0);
8812 break;
8813 case 3: /* ldrexh */
8814 gen_load_exclusive(s, rd, 15, addr, 1);
8815 break;
8816 default:
8817 abort();
8819 } else {
8820 rm = insn & 0xf;
8821 switch (op1) {
8822 case 0: /* strex */
8823 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8824 break;
8825 case 1: /* strexd */
8826 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8827 break;
8828 case 2: /* strexb */
8829 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8830 break;
8831 case 3: /* strexh */
8832 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8833 break;
8834 default:
8835 abort();
8838 tcg_temp_free_i32(addr);
8839 } else {
8840 /* SWP instruction */
8841 rm = (insn) & 0xf;
8843 /* ??? This is not really atomic. However we know
8844 we never have multiple CPUs running in parallel,
8845 so it is good enough. */
8846 addr = load_reg(s, rn);
8847 tmp = load_reg(s, rm);
8848 tmp2 = tcg_temp_new_i32();
8849 if (insn & (1 << 22)) {
8850 gen_aa32_ld8u(s, tmp2, addr, get_mem_index(s));
8851 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
8852 } else {
8853 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8854 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8856 tcg_temp_free_i32(tmp);
8857 tcg_temp_free_i32(addr);
8858 store_reg(s, rd, tmp2);
8861 } else {
8862 int address_offset;
8863 bool load = insn & (1 << 20);
8864 bool doubleword = false;
8865 /* Misc load/store */
8866 rn = (insn >> 16) & 0xf;
8867 rd = (insn >> 12) & 0xf;
8869 if (!load && (sh & 2)) {
8870 /* doubleword */
8871 ARCH(5TE);
8872 if (rd & 1) {
8873 /* UNPREDICTABLE; we choose to UNDEF */
8874 goto illegal_op;
8876 load = (sh & 1) == 0;
8877 doubleword = true;
8880 addr = load_reg(s, rn);
8881 if (insn & (1 << 24))
8882 gen_add_datah_offset(s, insn, 0, addr);
8883 address_offset = 0;
8885 if (doubleword) {
8886 if (!load) {
8887 /* store */
8888 tmp = load_reg(s, rd);
8889 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8890 tcg_temp_free_i32(tmp);
8891 tcg_gen_addi_i32(addr, addr, 4);
8892 tmp = load_reg(s, rd + 1);
8893 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8894 tcg_temp_free_i32(tmp);
8895 } else {
8896 /* load */
8897 tmp = tcg_temp_new_i32();
8898 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8899 store_reg(s, rd, tmp);
8900 tcg_gen_addi_i32(addr, addr, 4);
8901 tmp = tcg_temp_new_i32();
8902 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8903 rd++;
8905 address_offset = -4;
8906 } else if (load) {
8907 /* load */
8908 tmp = tcg_temp_new_i32();
8909 switch (sh) {
8910 case 1:
8911 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8912 break;
8913 case 2:
8914 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
8915 break;
8916 default:
8917 case 3:
8918 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
8919 break;
8921 } else {
8922 /* store */
8923 tmp = load_reg(s, rd);
8924 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
8925 tcg_temp_free_i32(tmp);
8927 /* Perform base writeback before the loaded value to
8928 ensure correct behavior with overlapping index registers.
8929 ldrd with base writeback is undefined if the
8930 destination and index registers overlap. */
8931 if (!(insn & (1 << 24))) {
8932 gen_add_datah_offset(s, insn, address_offset, addr);
8933 store_reg(s, rn, addr);
8934 } else if (insn & (1 << 21)) {
8935 if (address_offset)
8936 tcg_gen_addi_i32(addr, addr, address_offset);
8937 store_reg(s, rn, addr);
8938 } else {
8939 tcg_temp_free_i32(addr);
8941 if (load) {
8942 /* Complete the load. */
8943 store_reg(s, rd, tmp);
8946 break;
8947 case 0x4:
8948 case 0x5:
8949 goto do_ldst;
8950 case 0x6:
8951 case 0x7:
8952 if (insn & (1 << 4)) {
8953 ARCH(6);
8954 /* Armv6 Media instructions. */
8955 rm = insn & 0xf;
8956 rn = (insn >> 16) & 0xf;
8957 rd = (insn >> 12) & 0xf;
8958 rs = (insn >> 8) & 0xf;
8959 switch ((insn >> 23) & 3) {
8960 case 0: /* Parallel add/subtract. */
8961 op1 = (insn >> 20) & 7;
8962 tmp = load_reg(s, rn);
8963 tmp2 = load_reg(s, rm);
8964 sh = (insn >> 5) & 7;
8965 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8966 goto illegal_op;
8967 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
8968 tcg_temp_free_i32(tmp2);
8969 store_reg(s, rd, tmp);
8970 break;
8971 case 1:
8972 if ((insn & 0x00700020) == 0) {
8973 /* Halfword pack. */
8974 tmp = load_reg(s, rn);
8975 tmp2 = load_reg(s, rm);
8976 shift = (insn >> 7) & 0x1f;
8977 if (insn & (1 << 6)) {
8978 /* pkhtb */
8979 if (shift == 0)
8980 shift = 31;
8981 tcg_gen_sari_i32(tmp2, tmp2, shift);
8982 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8983 tcg_gen_ext16u_i32(tmp2, tmp2);
8984 } else {
8985 /* pkhbt */
8986 if (shift)
8987 tcg_gen_shli_i32(tmp2, tmp2, shift);
8988 tcg_gen_ext16u_i32(tmp, tmp);
8989 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8991 tcg_gen_or_i32(tmp, tmp, tmp2);
8992 tcg_temp_free_i32(tmp2);
8993 store_reg(s, rd, tmp);
8994 } else if ((insn & 0x00200020) == 0x00200000) {
8995 /* [us]sat */
8996 tmp = load_reg(s, rm);
8997 shift = (insn >> 7) & 0x1f;
8998 if (insn & (1 << 6)) {
8999 if (shift == 0)
9000 shift = 31;
9001 tcg_gen_sari_i32(tmp, tmp, shift);
9002 } else {
9003 tcg_gen_shli_i32(tmp, tmp, shift);
9005 sh = (insn >> 16) & 0x1f;
9006 tmp2 = tcg_const_i32(sh);
9007 if (insn & (1 << 22))
9008 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
9009 else
9010 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
9011 tcg_temp_free_i32(tmp2);
9012 store_reg(s, rd, tmp);
9013 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9014 /* [us]sat16 */
9015 tmp = load_reg(s, rm);
9016 sh = (insn >> 16) & 0x1f;
9017 tmp2 = tcg_const_i32(sh);
9018 if (insn & (1 << 22))
9019 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9020 else
9021 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9022 tcg_temp_free_i32(tmp2);
9023 store_reg(s, rd, tmp);
9024 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9025 /* Select bytes. */
9026 tmp = load_reg(s, rn);
9027 tmp2 = load_reg(s, rm);
9028 tmp3 = tcg_temp_new_i32();
9029 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9030 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9031 tcg_temp_free_i32(tmp3);
9032 tcg_temp_free_i32(tmp2);
9033 store_reg(s, rd, tmp);
9034 } else if ((insn & 0x000003e0) == 0x00000060) {
9035 tmp = load_reg(s, rm);
9036 shift = (insn >> 10) & 3;
9037 /* ??? In many cases it's not necessary to do a
9038 rotate, a shift is sufficient. */
9039 if (shift != 0)
9040 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9041 op1 = (insn >> 20) & 7;
9042 switch (op1) {
9043 case 0: gen_sxtb16(tmp); break;
9044 case 2: gen_sxtb(tmp); break;
9045 case 3: gen_sxth(tmp); break;
9046 case 4: gen_uxtb16(tmp); break;
9047 case 6: gen_uxtb(tmp); break;
9048 case 7: gen_uxth(tmp); break;
9049 default: goto illegal_op;
9051 if (rn != 15) {
9052 tmp2 = load_reg(s, rn);
9053 if ((op1 & 3) == 0) {
9054 gen_add16(tmp, tmp2);
9055 } else {
9056 tcg_gen_add_i32(tmp, tmp, tmp2);
9057 tcg_temp_free_i32(tmp2);
9060 store_reg(s, rd, tmp);
9061 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9062 /* rev */
9063 tmp = load_reg(s, rm);
9064 if (insn & (1 << 22)) {
9065 if (insn & (1 << 7)) {
9066 gen_revsh(tmp);
9067 } else {
9068 ARCH(6T2);
9069 gen_helper_rbit(tmp, tmp);
9071 } else {
9072 if (insn & (1 << 7))
9073 gen_rev16(tmp);
9074 else
9075 tcg_gen_bswap32_i32(tmp, tmp);
9077 store_reg(s, rd, tmp);
9078 } else {
9079 goto illegal_op;
9081 break;
9082 case 2: /* Multiplies (Type 3). */
9083 switch ((insn >> 20) & 0x7) {
9084 case 5:
9085 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9086 /* op2 not 00x or 11x : UNDEF */
9087 goto illegal_op;
9089 /* Signed multiply most significant [accumulate].
9090 (SMMUL, SMMLA, SMMLS) */
9091 tmp = load_reg(s, rm);
9092 tmp2 = load_reg(s, rs);
9093 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9095 if (rd != 15) {
9096 tmp = load_reg(s, rd);
9097 if (insn & (1 << 6)) {
9098 tmp64 = gen_subq_msw(tmp64, tmp);
9099 } else {
9100 tmp64 = gen_addq_msw(tmp64, tmp);
9103 if (insn & (1 << 5)) {
9104 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9106 tcg_gen_shri_i64(tmp64, tmp64, 32);
9107 tmp = tcg_temp_new_i32();
9108 tcg_gen_extrl_i64_i32(tmp, tmp64);
9109 tcg_temp_free_i64(tmp64);
9110 store_reg(s, rn, tmp);
9111 break;
9112 case 0:
9113 case 4:
9114 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9115 if (insn & (1 << 7)) {
9116 goto illegal_op;
9118 tmp = load_reg(s, rm);
9119 tmp2 = load_reg(s, rs);
9120 if (insn & (1 << 5))
9121 gen_swap_half(tmp2);
9122 gen_smul_dual(tmp, tmp2);
9123 if (insn & (1 << 22)) {
9124 /* smlald, smlsld */
9125 TCGv_i64 tmp64_2;
9127 tmp64 = tcg_temp_new_i64();
9128 tmp64_2 = tcg_temp_new_i64();
9129 tcg_gen_ext_i32_i64(tmp64, tmp);
9130 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
9131 tcg_temp_free_i32(tmp);
9132 tcg_temp_free_i32(tmp2);
9133 if (insn & (1 << 6)) {
9134 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9135 } else {
9136 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9138 tcg_temp_free_i64(tmp64_2);
9139 gen_addq(s, tmp64, rd, rn);
9140 gen_storeq_reg(s, rd, rn, tmp64);
9141 tcg_temp_free_i64(tmp64);
9142 } else {
9143 /* smuad, smusd, smlad, smlsd */
9144 if (insn & (1 << 6)) {
9145 /* This subtraction cannot overflow. */
9146 tcg_gen_sub_i32(tmp, tmp, tmp2);
9147 } else {
9148 /* This addition cannot overflow 32 bits;
9149 * however it may overflow considered as a
9150 * signed operation, in which case we must set
9151 * the Q flag.
9153 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9155 tcg_temp_free_i32(tmp2);
9156 if (rd != 15)
9158 tmp2 = load_reg(s, rd);
9159 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9160 tcg_temp_free_i32(tmp2);
9162 store_reg(s, rn, tmp);
9164 break;
9165 case 1:
9166 case 3:
9167 /* SDIV, UDIV */
9168 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
9169 goto illegal_op;
9171 if (((insn >> 5) & 7) || (rd != 15)) {
9172 goto illegal_op;
9174 tmp = load_reg(s, rm);
9175 tmp2 = load_reg(s, rs);
9176 if (insn & (1 << 21)) {
9177 gen_helper_udiv(tmp, tmp, tmp2);
9178 } else {
9179 gen_helper_sdiv(tmp, tmp, tmp2);
9181 tcg_temp_free_i32(tmp2);
9182 store_reg(s, rn, tmp);
9183 break;
9184 default:
9185 goto illegal_op;
9187 break;
9188 case 3:
9189 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9190 switch (op1) {
9191 case 0: /* Unsigned sum of absolute differences. */
9192 ARCH(6);
9193 tmp = load_reg(s, rm);
9194 tmp2 = load_reg(s, rs);
9195 gen_helper_usad8(tmp, tmp, tmp2);
9196 tcg_temp_free_i32(tmp2);
9197 if (rd != 15) {
9198 tmp2 = load_reg(s, rd);
9199 tcg_gen_add_i32(tmp, tmp, tmp2);
9200 tcg_temp_free_i32(tmp2);
9202 store_reg(s, rn, tmp);
9203 break;
9204 case 0x20: case 0x24: case 0x28: case 0x2c:
9205 /* Bitfield insert/clear. */
9206 ARCH(6T2);
9207 shift = (insn >> 7) & 0x1f;
9208 i = (insn >> 16) & 0x1f;
9209 if (i < shift) {
9210 /* UNPREDICTABLE; we choose to UNDEF */
9211 goto illegal_op;
9213 i = i + 1 - shift;
9214 if (rm == 15) {
9215 tmp = tcg_temp_new_i32();
9216 tcg_gen_movi_i32(tmp, 0);
9217 } else {
9218 tmp = load_reg(s, rm);
9220 if (i != 32) {
9221 tmp2 = load_reg(s, rd);
9222 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
9223 tcg_temp_free_i32(tmp2);
9225 store_reg(s, rd, tmp);
9226 break;
9227 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9228 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9229 ARCH(6T2);
9230 tmp = load_reg(s, rm);
9231 shift = (insn >> 7) & 0x1f;
9232 i = ((insn >> 16) & 0x1f) + 1;
9233 if (shift + i > 32)
9234 goto illegal_op;
9235 if (i < 32) {
9236 if (op1 & 0x20) {
9237 gen_ubfx(tmp, shift, (1u << i) - 1);
9238 } else {
9239 gen_sbfx(tmp, shift, i);
9242 store_reg(s, rd, tmp);
9243 break;
9244 default:
9245 goto illegal_op;
9247 break;
9249 break;
9251 do_ldst:
9252 /* Check for undefined extension instructions
9253 * per the ARM Bible IE:
9254 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9256 sh = (0xf << 20) | (0xf << 4);
9257 if (op1 == 0x7 && ((insn & sh) == sh))
9259 goto illegal_op;
9261 /* load/store byte/word */
9262 rn = (insn >> 16) & 0xf;
9263 rd = (insn >> 12) & 0xf;
9264 tmp2 = load_reg(s, rn);
9265 if ((insn & 0x01200000) == 0x00200000) {
9266 /* ldrt/strt */
9267 i = get_a32_user_mem_index(s);
9268 } else {
9269 i = get_mem_index(s);
9271 if (insn & (1 << 24))
9272 gen_add_data_offset(s, insn, tmp2);
9273 if (insn & (1 << 20)) {
9274 /* load */
9275 tmp = tcg_temp_new_i32();
9276 if (insn & (1 << 22)) {
9277 gen_aa32_ld8u(s, tmp, tmp2, i);
9278 } else {
9279 gen_aa32_ld32u(s, tmp, tmp2, i);
9281 } else {
9282 /* store */
9283 tmp = load_reg(s, rd);
9284 if (insn & (1 << 22)) {
9285 gen_aa32_st8(s, tmp, tmp2, i);
9286 } else {
9287 gen_aa32_st32(s, tmp, tmp2, i);
9289 tcg_temp_free_i32(tmp);
9291 if (!(insn & (1 << 24))) {
9292 gen_add_data_offset(s, insn, tmp2);
9293 store_reg(s, rn, tmp2);
9294 } else if (insn & (1 << 21)) {
9295 store_reg(s, rn, tmp2);
9296 } else {
9297 tcg_temp_free_i32(tmp2);
9299 if (insn & (1 << 20)) {
9300 /* Complete the load. */
9301 store_reg_from_load(s, rd, tmp);
9303 break;
9304 case 0x08:
9305 case 0x09:
9307 int j, n, loaded_base;
9308 bool exc_return = false;
9309 bool is_load = extract32(insn, 20, 1);
9310 bool user = false;
9311 TCGv_i32 loaded_var;
9312 /* load/store multiple words */
9313 /* XXX: store correct base if write back */
9314 if (insn & (1 << 22)) {
9315 /* LDM (user), LDM (exception return) and STM (user) */
9316 if (IS_USER(s))
9317 goto illegal_op; /* only usable in supervisor mode */
9319 if (is_load && extract32(insn, 15, 1)) {
9320 exc_return = true;
9321 } else {
9322 user = true;
9325 rn = (insn >> 16) & 0xf;
9326 addr = load_reg(s, rn);
9328 /* compute total size */
9329 loaded_base = 0;
9330 TCGV_UNUSED_I32(loaded_var);
9331 n = 0;
9332 for(i=0;i<16;i++) {
9333 if (insn & (1 << i))
9334 n++;
9336 /* XXX: test invalid n == 0 case ? */
9337 if (insn & (1 << 23)) {
9338 if (insn & (1 << 24)) {
9339 /* pre increment */
9340 tcg_gen_addi_i32(addr, addr, 4);
9341 } else {
9342 /* post increment */
9344 } else {
9345 if (insn & (1 << 24)) {
9346 /* pre decrement */
9347 tcg_gen_addi_i32(addr, addr, -(n * 4));
9348 } else {
9349 /* post decrement */
9350 if (n != 1)
9351 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9354 j = 0;
9355 for(i=0;i<16;i++) {
9356 if (insn & (1 << i)) {
9357 if (is_load) {
9358 /* load */
9359 tmp = tcg_temp_new_i32();
9360 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9361 if (user) {
9362 tmp2 = tcg_const_i32(i);
9363 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9364 tcg_temp_free_i32(tmp2);
9365 tcg_temp_free_i32(tmp);
9366 } else if (i == rn) {
9367 loaded_var = tmp;
9368 loaded_base = 1;
9369 } else {
9370 store_reg_from_load(s, i, tmp);
9372 } else {
9373 /* store */
9374 if (i == 15) {
9375 /* special case: r15 = PC + 8 */
9376 val = (long)s->pc + 4;
9377 tmp = tcg_temp_new_i32();
9378 tcg_gen_movi_i32(tmp, val);
9379 } else if (user) {
9380 tmp = tcg_temp_new_i32();
9381 tmp2 = tcg_const_i32(i);
9382 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9383 tcg_temp_free_i32(tmp2);
9384 } else {
9385 tmp = load_reg(s, i);
9387 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9388 tcg_temp_free_i32(tmp);
9390 j++;
9391 /* no need to add after the last transfer */
9392 if (j != n)
9393 tcg_gen_addi_i32(addr, addr, 4);
9396 if (insn & (1 << 21)) {
9397 /* write back */
9398 if (insn & (1 << 23)) {
9399 if (insn & (1 << 24)) {
9400 /* pre increment */
9401 } else {
9402 /* post increment */
9403 tcg_gen_addi_i32(addr, addr, 4);
9405 } else {
9406 if (insn & (1 << 24)) {
9407 /* pre decrement */
9408 if (n != 1)
9409 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9410 } else {
9411 /* post decrement */
9412 tcg_gen_addi_i32(addr, addr, -(n * 4));
9415 store_reg(s, rn, addr);
9416 } else {
9417 tcg_temp_free_i32(addr);
9419 if (loaded_base) {
9420 store_reg(s, rn, loaded_var);
9422 if (exc_return) {
9423 /* Restore CPSR from SPSR. */
9424 tmp = load_cpu_field(spsr);
9425 gen_helper_cpsr_write_eret(cpu_env, tmp);
9426 tcg_temp_free_i32(tmp);
9427 s->is_jmp = DISAS_JUMP;
9430 break;
9431 case 0xa:
9432 case 0xb:
9434 int32_t offset;
9436 /* branch (and link) */
9437 val = (int32_t)s->pc;
9438 if (insn & (1 << 24)) {
9439 tmp = tcg_temp_new_i32();
9440 tcg_gen_movi_i32(tmp, val);
9441 store_reg(s, 14, tmp);
9443 offset = sextract32(insn << 2, 0, 26);
9444 val += offset + 4;
9445 gen_jmp(s, val);
9447 break;
9448 case 0xc:
9449 case 0xd:
9450 case 0xe:
9451 if (((insn >> 8) & 0xe) == 10) {
9452 /* VFP. */
9453 if (disas_vfp_insn(s, insn)) {
9454 goto illegal_op;
9456 } else if (disas_coproc_insn(s, insn)) {
9457 /* Coprocessor. */
9458 goto illegal_op;
9460 break;
9461 case 0xf:
9462 /* swi */
9463 gen_set_pc_im(s, s->pc);
9464 s->svc_imm = extract32(insn, 0, 24);
9465 s->is_jmp = DISAS_SWI;
9466 break;
9467 default:
9468 illegal_op:
9469 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9470 default_exception_el(s));
9471 break;
9476 /* Return true if this is a Thumb-2 logical op. */
9477 static int
9478 thumb2_logic_op(int op)
9480 return (op < 8);
9483 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9484 then set condition code flags based on the result of the operation.
9485 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9486 to the high bit of T1.
9487 Returns zero if the opcode is valid. */
9489 static int
9490 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9491 TCGv_i32 t0, TCGv_i32 t1)
9493 int logic_cc;
9495 logic_cc = 0;
9496 switch (op) {
9497 case 0: /* and */
9498 tcg_gen_and_i32(t0, t0, t1);
9499 logic_cc = conds;
9500 break;
9501 case 1: /* bic */
9502 tcg_gen_andc_i32(t0, t0, t1);
9503 logic_cc = conds;
9504 break;
9505 case 2: /* orr */
9506 tcg_gen_or_i32(t0, t0, t1);
9507 logic_cc = conds;
9508 break;
9509 case 3: /* orn */
9510 tcg_gen_orc_i32(t0, t0, t1);
9511 logic_cc = conds;
9512 break;
9513 case 4: /* eor */
9514 tcg_gen_xor_i32(t0, t0, t1);
9515 logic_cc = conds;
9516 break;
9517 case 8: /* add */
9518 if (conds)
9519 gen_add_CC(t0, t0, t1);
9520 else
9521 tcg_gen_add_i32(t0, t0, t1);
9522 break;
9523 case 10: /* adc */
9524 if (conds)
9525 gen_adc_CC(t0, t0, t1);
9526 else
9527 gen_adc(t0, t1);
9528 break;
9529 case 11: /* sbc */
9530 if (conds) {
9531 gen_sbc_CC(t0, t0, t1);
9532 } else {
9533 gen_sub_carry(t0, t0, t1);
9535 break;
9536 case 13: /* sub */
9537 if (conds)
9538 gen_sub_CC(t0, t0, t1);
9539 else
9540 tcg_gen_sub_i32(t0, t0, t1);
9541 break;
9542 case 14: /* rsb */
9543 if (conds)
9544 gen_sub_CC(t0, t1, t0);
9545 else
9546 tcg_gen_sub_i32(t0, t1, t0);
9547 break;
9548 default: /* 5, 6, 7, 9, 12, 15. */
9549 return 1;
9551 if (logic_cc) {
9552 gen_logic_CC(t0);
9553 if (shifter_out)
9554 gen_set_CF_bit31(t1);
9556 return 0;
9559 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9560 is not legal. */
9561 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9563 uint32_t insn, imm, shift, offset;
9564 uint32_t rd, rn, rm, rs;
9565 TCGv_i32 tmp;
9566 TCGv_i32 tmp2;
9567 TCGv_i32 tmp3;
9568 TCGv_i32 addr;
9569 TCGv_i64 tmp64;
9570 int op;
9571 int shiftop;
9572 int conds;
9573 int logic_cc;
9575 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9576 || arm_dc_feature(s, ARM_FEATURE_M))) {
9577 /* Thumb-1 cores may need to treat bl and blx as a pair of
9578 16-bit instructions to get correct prefetch abort behavior. */
9579 insn = insn_hw1;
9580 if ((insn & (1 << 12)) == 0) {
9581 ARCH(5);
9582 /* Second half of blx. */
9583 offset = ((insn & 0x7ff) << 1);
9584 tmp = load_reg(s, 14);
9585 tcg_gen_addi_i32(tmp, tmp, offset);
9586 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9588 tmp2 = tcg_temp_new_i32();
9589 tcg_gen_movi_i32(tmp2, s->pc | 1);
9590 store_reg(s, 14, tmp2);
9591 gen_bx(s, tmp);
9592 return 0;
9594 if (insn & (1 << 11)) {
9595 /* Second half of bl. */
9596 offset = ((insn & 0x7ff) << 1) | 1;
9597 tmp = load_reg(s, 14);
9598 tcg_gen_addi_i32(tmp, tmp, offset);
9600 tmp2 = tcg_temp_new_i32();
9601 tcg_gen_movi_i32(tmp2, s->pc | 1);
9602 store_reg(s, 14, tmp2);
9603 gen_bx(s, tmp);
9604 return 0;
9606 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9607 /* Instruction spans a page boundary. Implement it as two
9608 16-bit instructions in case the second half causes an
9609 prefetch abort. */
9610 offset = ((int32_t)insn << 21) >> 9;
9611 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9612 return 0;
9614 /* Fall through to 32-bit decode. */
9617 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9618 s->pc += 2;
9619 insn |= (uint32_t)insn_hw1 << 16;
9621 if ((insn & 0xf800e800) != 0xf000e800) {
9622 ARCH(6T2);
9625 rn = (insn >> 16) & 0xf;
9626 rs = (insn >> 12) & 0xf;
9627 rd = (insn >> 8) & 0xf;
9628 rm = insn & 0xf;
9629 switch ((insn >> 25) & 0xf) {
9630 case 0: case 1: case 2: case 3:
9631 /* 16-bit instructions. Should never happen. */
9632 abort();
9633 case 4:
9634 if (insn & (1 << 22)) {
9635 /* Other load/store, table branch. */
9636 if (insn & 0x01200000) {
9637 /* Load/store doubleword. */
9638 if (rn == 15) {
9639 addr = tcg_temp_new_i32();
9640 tcg_gen_movi_i32(addr, s->pc & ~3);
9641 } else {
9642 addr = load_reg(s, rn);
9644 offset = (insn & 0xff) * 4;
9645 if ((insn & (1 << 23)) == 0)
9646 offset = -offset;
9647 if (insn & (1 << 24)) {
9648 tcg_gen_addi_i32(addr, addr, offset);
9649 offset = 0;
9651 if (insn & (1 << 20)) {
9652 /* ldrd */
9653 tmp = tcg_temp_new_i32();
9654 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9655 store_reg(s, rs, tmp);
9656 tcg_gen_addi_i32(addr, addr, 4);
9657 tmp = tcg_temp_new_i32();
9658 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9659 store_reg(s, rd, tmp);
9660 } else {
9661 /* strd */
9662 tmp = load_reg(s, rs);
9663 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9664 tcg_temp_free_i32(tmp);
9665 tcg_gen_addi_i32(addr, addr, 4);
9666 tmp = load_reg(s, rd);
9667 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9668 tcg_temp_free_i32(tmp);
9670 if (insn & (1 << 21)) {
9671 /* Base writeback. */
9672 if (rn == 15)
9673 goto illegal_op;
9674 tcg_gen_addi_i32(addr, addr, offset - 4);
9675 store_reg(s, rn, addr);
9676 } else {
9677 tcg_temp_free_i32(addr);
9679 } else if ((insn & (1 << 23)) == 0) {
9680 /* Load/store exclusive word. */
9681 addr = tcg_temp_local_new_i32();
9682 load_reg_var(s, addr, rn);
9683 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9684 if (insn & (1 << 20)) {
9685 gen_load_exclusive(s, rs, 15, addr, 2);
9686 } else {
9687 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9689 tcg_temp_free_i32(addr);
9690 } else if ((insn & (7 << 5)) == 0) {
9691 /* Table Branch. */
9692 if (rn == 15) {
9693 addr = tcg_temp_new_i32();
9694 tcg_gen_movi_i32(addr, s->pc);
9695 } else {
9696 addr = load_reg(s, rn);
9698 tmp = load_reg(s, rm);
9699 tcg_gen_add_i32(addr, addr, tmp);
9700 if (insn & (1 << 4)) {
9701 /* tbh */
9702 tcg_gen_add_i32(addr, addr, tmp);
9703 tcg_temp_free_i32(tmp);
9704 tmp = tcg_temp_new_i32();
9705 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9706 } else { /* tbb */
9707 tcg_temp_free_i32(tmp);
9708 tmp = tcg_temp_new_i32();
9709 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9711 tcg_temp_free_i32(addr);
9712 tcg_gen_shli_i32(tmp, tmp, 1);
9713 tcg_gen_addi_i32(tmp, tmp, s->pc);
9714 store_reg(s, 15, tmp);
9715 } else {
9716 int op2 = (insn >> 6) & 0x3;
9717 op = (insn >> 4) & 0x3;
9718 switch (op2) {
9719 case 0:
9720 goto illegal_op;
9721 case 1:
9722 /* Load/store exclusive byte/halfword/doubleword */
9723 if (op == 2) {
9724 goto illegal_op;
9726 ARCH(7);
9727 break;
9728 case 2:
9729 /* Load-acquire/store-release */
9730 if (op == 3) {
9731 goto illegal_op;
9733 /* Fall through */
9734 case 3:
9735 /* Load-acquire/store-release exclusive */
9736 ARCH(8);
9737 break;
9739 addr = tcg_temp_local_new_i32();
9740 load_reg_var(s, addr, rn);
9741 if (!(op2 & 1)) {
9742 if (insn & (1 << 20)) {
9743 tmp = tcg_temp_new_i32();
9744 switch (op) {
9745 case 0: /* ldab */
9746 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9747 break;
9748 case 1: /* ldah */
9749 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9750 break;
9751 case 2: /* lda */
9752 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9753 break;
9754 default:
9755 abort();
9757 store_reg(s, rs, tmp);
9758 } else {
9759 tmp = load_reg(s, rs);
9760 switch (op) {
9761 case 0: /* stlb */
9762 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9763 break;
9764 case 1: /* stlh */
9765 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9766 break;
9767 case 2: /* stl */
9768 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9769 break;
9770 default:
9771 abort();
9773 tcg_temp_free_i32(tmp);
9775 } else if (insn & (1 << 20)) {
9776 gen_load_exclusive(s, rs, rd, addr, op);
9777 } else {
9778 gen_store_exclusive(s, rm, rs, rd, addr, op);
9780 tcg_temp_free_i32(addr);
9782 } else {
9783 /* Load/store multiple, RFE, SRS. */
9784 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9785 /* RFE, SRS: not available in user mode or on M profile */
9786 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9787 goto illegal_op;
9789 if (insn & (1 << 20)) {
9790 /* rfe */
9791 addr = load_reg(s, rn);
9792 if ((insn & (1 << 24)) == 0)
9793 tcg_gen_addi_i32(addr, addr, -8);
9794 /* Load PC into tmp and CPSR into tmp2. */
9795 tmp = tcg_temp_new_i32();
9796 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9797 tcg_gen_addi_i32(addr, addr, 4);
9798 tmp2 = tcg_temp_new_i32();
9799 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9800 if (insn & (1 << 21)) {
9801 /* Base writeback. */
9802 if (insn & (1 << 24)) {
9803 tcg_gen_addi_i32(addr, addr, 4);
9804 } else {
9805 tcg_gen_addi_i32(addr, addr, -4);
9807 store_reg(s, rn, addr);
9808 } else {
9809 tcg_temp_free_i32(addr);
9811 gen_rfe(s, tmp, tmp2);
9812 } else {
9813 /* srs */
9814 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9815 insn & (1 << 21));
9817 } else {
9818 int i, loaded_base = 0;
9819 TCGv_i32 loaded_var;
9820 /* Load/store multiple. */
9821 addr = load_reg(s, rn);
9822 offset = 0;
9823 for (i = 0; i < 16; i++) {
9824 if (insn & (1 << i))
9825 offset += 4;
9827 if (insn & (1 << 24)) {
9828 tcg_gen_addi_i32(addr, addr, -offset);
9831 TCGV_UNUSED_I32(loaded_var);
9832 for (i = 0; i < 16; i++) {
9833 if ((insn & (1 << i)) == 0)
9834 continue;
9835 if (insn & (1 << 20)) {
9836 /* Load. */
9837 tmp = tcg_temp_new_i32();
9838 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9839 if (i == 15) {
9840 gen_bx(s, tmp);
9841 } else if (i == rn) {
9842 loaded_var = tmp;
9843 loaded_base = 1;
9844 } else {
9845 store_reg(s, i, tmp);
9847 } else {
9848 /* Store. */
9849 tmp = load_reg(s, i);
9850 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9851 tcg_temp_free_i32(tmp);
9853 tcg_gen_addi_i32(addr, addr, 4);
9855 if (loaded_base) {
9856 store_reg(s, rn, loaded_var);
9858 if (insn & (1 << 21)) {
9859 /* Base register writeback. */
9860 if (insn & (1 << 24)) {
9861 tcg_gen_addi_i32(addr, addr, -offset);
9863 /* Fault if writeback register is in register list. */
9864 if (insn & (1 << rn))
9865 goto illegal_op;
9866 store_reg(s, rn, addr);
9867 } else {
9868 tcg_temp_free_i32(addr);
9872 break;
9873 case 5:
9875 op = (insn >> 21) & 0xf;
9876 if (op == 6) {
9877 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9878 goto illegal_op;
9880 /* Halfword pack. */
9881 tmp = load_reg(s, rn);
9882 tmp2 = load_reg(s, rm);
9883 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9884 if (insn & (1 << 5)) {
9885 /* pkhtb */
9886 if (shift == 0)
9887 shift = 31;
9888 tcg_gen_sari_i32(tmp2, tmp2, shift);
9889 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9890 tcg_gen_ext16u_i32(tmp2, tmp2);
9891 } else {
9892 /* pkhbt */
9893 if (shift)
9894 tcg_gen_shli_i32(tmp2, tmp2, shift);
9895 tcg_gen_ext16u_i32(tmp, tmp);
9896 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9898 tcg_gen_or_i32(tmp, tmp, tmp2);
9899 tcg_temp_free_i32(tmp2);
9900 store_reg(s, rd, tmp);
9901 } else {
9902 /* Data processing register constant shift. */
9903 if (rn == 15) {
9904 tmp = tcg_temp_new_i32();
9905 tcg_gen_movi_i32(tmp, 0);
9906 } else {
9907 tmp = load_reg(s, rn);
9909 tmp2 = load_reg(s, rm);
9911 shiftop = (insn >> 4) & 3;
9912 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9913 conds = (insn & (1 << 20)) != 0;
9914 logic_cc = (conds && thumb2_logic_op(op));
9915 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9916 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9917 goto illegal_op;
9918 tcg_temp_free_i32(tmp2);
9919 if (rd != 15) {
9920 store_reg(s, rd, tmp);
9921 } else {
9922 tcg_temp_free_i32(tmp);
9925 break;
9926 case 13: /* Misc data processing. */
9927 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9928 if (op < 4 && (insn & 0xf000) != 0xf000)
9929 goto illegal_op;
9930 switch (op) {
9931 case 0: /* Register controlled shift. */
9932 tmp = load_reg(s, rn);
9933 tmp2 = load_reg(s, rm);
9934 if ((insn & 0x70) != 0)
9935 goto illegal_op;
9936 op = (insn >> 21) & 3;
9937 logic_cc = (insn & (1 << 20)) != 0;
9938 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9939 if (logic_cc)
9940 gen_logic_CC(tmp);
9941 store_reg_bx(s, rd, tmp);
9942 break;
9943 case 1: /* Sign/zero extend. */
9944 op = (insn >> 20) & 7;
9945 switch (op) {
9946 case 0: /* SXTAH, SXTH */
9947 case 1: /* UXTAH, UXTH */
9948 case 4: /* SXTAB, SXTB */
9949 case 5: /* UXTAB, UXTB */
9950 break;
9951 case 2: /* SXTAB16, SXTB16 */
9952 case 3: /* UXTAB16, UXTB16 */
9953 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9954 goto illegal_op;
9956 break;
9957 default:
9958 goto illegal_op;
9960 if (rn != 15) {
9961 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9962 goto illegal_op;
9965 tmp = load_reg(s, rm);
9966 shift = (insn >> 4) & 3;
9967 /* ??? In many cases it's not necessary to do a
9968 rotate, a shift is sufficient. */
9969 if (shift != 0)
9970 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9971 op = (insn >> 20) & 7;
9972 switch (op) {
9973 case 0: gen_sxth(tmp); break;
9974 case 1: gen_uxth(tmp); break;
9975 case 2: gen_sxtb16(tmp); break;
9976 case 3: gen_uxtb16(tmp); break;
9977 case 4: gen_sxtb(tmp); break;
9978 case 5: gen_uxtb(tmp); break;
9979 default:
9980 g_assert_not_reached();
9982 if (rn != 15) {
9983 tmp2 = load_reg(s, rn);
9984 if ((op >> 1) == 1) {
9985 gen_add16(tmp, tmp2);
9986 } else {
9987 tcg_gen_add_i32(tmp, tmp, tmp2);
9988 tcg_temp_free_i32(tmp2);
9991 store_reg(s, rd, tmp);
9992 break;
9993 case 2: /* SIMD add/subtract. */
9994 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9995 goto illegal_op;
9997 op = (insn >> 20) & 7;
9998 shift = (insn >> 4) & 7;
9999 if ((op & 3) == 3 || (shift & 3) == 3)
10000 goto illegal_op;
10001 tmp = load_reg(s, rn);
10002 tmp2 = load_reg(s, rm);
10003 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
10004 tcg_temp_free_i32(tmp2);
10005 store_reg(s, rd, tmp);
10006 break;
10007 case 3: /* Other data processing. */
10008 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10009 if (op < 4) {
10010 /* Saturating add/subtract. */
10011 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10012 goto illegal_op;
10014 tmp = load_reg(s, rn);
10015 tmp2 = load_reg(s, rm);
10016 if (op & 1)
10017 gen_helper_double_saturate(tmp, cpu_env, tmp);
10018 if (op & 2)
10019 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
10020 else
10021 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
10022 tcg_temp_free_i32(tmp2);
10023 } else {
10024 switch (op) {
10025 case 0x0a: /* rbit */
10026 case 0x08: /* rev */
10027 case 0x09: /* rev16 */
10028 case 0x0b: /* revsh */
10029 case 0x18: /* clz */
10030 break;
10031 case 0x10: /* sel */
10032 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10033 goto illegal_op;
10035 break;
10036 case 0x20: /* crc32/crc32c */
10037 case 0x21:
10038 case 0x22:
10039 case 0x28:
10040 case 0x29:
10041 case 0x2a:
10042 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10043 goto illegal_op;
10045 break;
10046 default:
10047 goto illegal_op;
10049 tmp = load_reg(s, rn);
10050 switch (op) {
10051 case 0x0a: /* rbit */
10052 gen_helper_rbit(tmp, tmp);
10053 break;
10054 case 0x08: /* rev */
10055 tcg_gen_bswap32_i32(tmp, tmp);
10056 break;
10057 case 0x09: /* rev16 */
10058 gen_rev16(tmp);
10059 break;
10060 case 0x0b: /* revsh */
10061 gen_revsh(tmp);
10062 break;
10063 case 0x10: /* sel */
10064 tmp2 = load_reg(s, rm);
10065 tmp3 = tcg_temp_new_i32();
10066 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
10067 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
10068 tcg_temp_free_i32(tmp3);
10069 tcg_temp_free_i32(tmp2);
10070 break;
10071 case 0x18: /* clz */
10072 gen_helper_clz(tmp, tmp);
10073 break;
10074 case 0x20:
10075 case 0x21:
10076 case 0x22:
10077 case 0x28:
10078 case 0x29:
10079 case 0x2a:
10081 /* crc32/crc32c */
10082 uint32_t sz = op & 0x3;
10083 uint32_t c = op & 0x8;
10085 tmp2 = load_reg(s, rm);
10086 if (sz == 0) {
10087 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10088 } else if (sz == 1) {
10089 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10091 tmp3 = tcg_const_i32(1 << sz);
10092 if (c) {
10093 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10094 } else {
10095 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10097 tcg_temp_free_i32(tmp2);
10098 tcg_temp_free_i32(tmp3);
10099 break;
10101 default:
10102 g_assert_not_reached();
10105 store_reg(s, rd, tmp);
10106 break;
10107 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10108 switch ((insn >> 20) & 7) {
10109 case 0: /* 32 x 32 -> 32 */
10110 case 7: /* Unsigned sum of absolute differences. */
10111 break;
10112 case 1: /* 16 x 16 -> 32 */
10113 case 2: /* Dual multiply add. */
10114 case 3: /* 32 * 16 -> 32msb */
10115 case 4: /* Dual multiply subtract. */
10116 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10117 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10118 goto illegal_op;
10120 break;
10122 op = (insn >> 4) & 0xf;
10123 tmp = load_reg(s, rn);
10124 tmp2 = load_reg(s, rm);
10125 switch ((insn >> 20) & 7) {
10126 case 0: /* 32 x 32 -> 32 */
10127 tcg_gen_mul_i32(tmp, tmp, tmp2);
10128 tcg_temp_free_i32(tmp2);
10129 if (rs != 15) {
10130 tmp2 = load_reg(s, rs);
10131 if (op)
10132 tcg_gen_sub_i32(tmp, tmp2, tmp);
10133 else
10134 tcg_gen_add_i32(tmp, tmp, tmp2);
10135 tcg_temp_free_i32(tmp2);
10137 break;
10138 case 1: /* 16 x 16 -> 32 */
10139 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10140 tcg_temp_free_i32(tmp2);
10141 if (rs != 15) {
10142 tmp2 = load_reg(s, rs);
10143 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10144 tcg_temp_free_i32(tmp2);
10146 break;
10147 case 2: /* Dual multiply add. */
10148 case 4: /* Dual multiply subtract. */
10149 if (op)
10150 gen_swap_half(tmp2);
10151 gen_smul_dual(tmp, tmp2);
10152 if (insn & (1 << 22)) {
10153 /* This subtraction cannot overflow. */
10154 tcg_gen_sub_i32(tmp, tmp, tmp2);
10155 } else {
10156 /* This addition cannot overflow 32 bits;
10157 * however it may overflow considered as a signed
10158 * operation, in which case we must set the Q flag.
10160 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10162 tcg_temp_free_i32(tmp2);
10163 if (rs != 15)
10165 tmp2 = load_reg(s, rs);
10166 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10167 tcg_temp_free_i32(tmp2);
10169 break;
10170 case 3: /* 32 * 16 -> 32msb */
10171 if (op)
10172 tcg_gen_sari_i32(tmp2, tmp2, 16);
10173 else
10174 gen_sxth(tmp2);
10175 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10176 tcg_gen_shri_i64(tmp64, tmp64, 16);
10177 tmp = tcg_temp_new_i32();
10178 tcg_gen_extrl_i64_i32(tmp, tmp64);
10179 tcg_temp_free_i64(tmp64);
10180 if (rs != 15)
10182 tmp2 = load_reg(s, rs);
10183 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10184 tcg_temp_free_i32(tmp2);
10186 break;
10187 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10188 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10189 if (rs != 15) {
10190 tmp = load_reg(s, rs);
10191 if (insn & (1 << 20)) {
10192 tmp64 = gen_addq_msw(tmp64, tmp);
10193 } else {
10194 tmp64 = gen_subq_msw(tmp64, tmp);
10197 if (insn & (1 << 4)) {
10198 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10200 tcg_gen_shri_i64(tmp64, tmp64, 32);
10201 tmp = tcg_temp_new_i32();
10202 tcg_gen_extrl_i64_i32(tmp, tmp64);
10203 tcg_temp_free_i64(tmp64);
10204 break;
10205 case 7: /* Unsigned sum of absolute differences. */
10206 gen_helper_usad8(tmp, tmp, tmp2);
10207 tcg_temp_free_i32(tmp2);
10208 if (rs != 15) {
10209 tmp2 = load_reg(s, rs);
10210 tcg_gen_add_i32(tmp, tmp, tmp2);
10211 tcg_temp_free_i32(tmp2);
10213 break;
10215 store_reg(s, rd, tmp);
10216 break;
10217 case 6: case 7: /* 64-bit multiply, Divide. */
10218 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
10219 tmp = load_reg(s, rn);
10220 tmp2 = load_reg(s, rm);
10221 if ((op & 0x50) == 0x10) {
10222 /* sdiv, udiv */
10223 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
10224 goto illegal_op;
10226 if (op & 0x20)
10227 gen_helper_udiv(tmp, tmp, tmp2);
10228 else
10229 gen_helper_sdiv(tmp, tmp, tmp2);
10230 tcg_temp_free_i32(tmp2);
10231 store_reg(s, rd, tmp);
10232 } else if ((op & 0xe) == 0xc) {
10233 /* Dual multiply accumulate long. */
10234 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10235 tcg_temp_free_i32(tmp);
10236 tcg_temp_free_i32(tmp2);
10237 goto illegal_op;
10239 if (op & 1)
10240 gen_swap_half(tmp2);
10241 gen_smul_dual(tmp, tmp2);
10242 if (op & 0x10) {
10243 tcg_gen_sub_i32(tmp, tmp, tmp2);
10244 } else {
10245 tcg_gen_add_i32(tmp, tmp, tmp2);
10247 tcg_temp_free_i32(tmp2);
10248 /* BUGFIX */
10249 tmp64 = tcg_temp_new_i64();
10250 tcg_gen_ext_i32_i64(tmp64, tmp);
10251 tcg_temp_free_i32(tmp);
10252 gen_addq(s, tmp64, rs, rd);
10253 gen_storeq_reg(s, rs, rd, tmp64);
10254 tcg_temp_free_i64(tmp64);
10255 } else {
10256 if (op & 0x20) {
10257 /* Unsigned 64-bit multiply */
10258 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
10259 } else {
10260 if (op & 8) {
10261 /* smlalxy */
10262 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10263 tcg_temp_free_i32(tmp2);
10264 tcg_temp_free_i32(tmp);
10265 goto illegal_op;
10267 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10268 tcg_temp_free_i32(tmp2);
10269 tmp64 = tcg_temp_new_i64();
10270 tcg_gen_ext_i32_i64(tmp64, tmp);
10271 tcg_temp_free_i32(tmp);
10272 } else {
10273 /* Signed 64-bit multiply */
10274 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10277 if (op & 4) {
10278 /* umaal */
10279 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10280 tcg_temp_free_i64(tmp64);
10281 goto illegal_op;
10283 gen_addq_lo(s, tmp64, rs);
10284 gen_addq_lo(s, tmp64, rd);
10285 } else if (op & 0x40) {
10286 /* 64-bit accumulate. */
10287 gen_addq(s, tmp64, rs, rd);
10289 gen_storeq_reg(s, rs, rd, tmp64);
10290 tcg_temp_free_i64(tmp64);
10292 break;
10294 break;
10295 case 6: case 7: case 14: case 15:
10296 /* Coprocessor. */
10297 if (((insn >> 24) & 3) == 3) {
10298 /* Translate into the equivalent ARM encoding. */
10299 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10300 if (disas_neon_data_insn(s, insn)) {
10301 goto illegal_op;
10303 } else if (((insn >> 8) & 0xe) == 10) {
10304 if (disas_vfp_insn(s, insn)) {
10305 goto illegal_op;
10307 } else {
10308 if (insn & (1 << 28))
10309 goto illegal_op;
10310 if (disas_coproc_insn(s, insn)) {
10311 goto illegal_op;
10314 break;
10315 case 8: case 9: case 10: case 11:
10316 if (insn & (1 << 15)) {
10317 /* Branches, misc control. */
10318 if (insn & 0x5000) {
10319 /* Unconditional branch. */
10320 /* signextend(hw1[10:0]) -> offset[:12]. */
10321 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10322 /* hw1[10:0] -> offset[11:1]. */
10323 offset |= (insn & 0x7ff) << 1;
10324 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10325 offset[24:22] already have the same value because of the
10326 sign extension above. */
10327 offset ^= ((~insn) & (1 << 13)) << 10;
10328 offset ^= ((~insn) & (1 << 11)) << 11;
10330 if (insn & (1 << 14)) {
10331 /* Branch and link. */
10332 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
10335 offset += s->pc;
10336 if (insn & (1 << 12)) {
10337 /* b/bl */
10338 gen_jmp(s, offset);
10339 } else {
10340 /* blx */
10341 offset &= ~(uint32_t)2;
10342 /* thumb2 bx, no need to check */
10343 gen_bx_im(s, offset);
10345 } else if (((insn >> 23) & 7) == 7) {
10346 /* Misc control */
10347 if (insn & (1 << 13))
10348 goto illegal_op;
10350 if (insn & (1 << 26)) {
10351 if (!(insn & (1 << 20))) {
10352 /* Hypervisor call (v7) */
10353 int imm16 = extract32(insn, 16, 4) << 12
10354 | extract32(insn, 0, 12);
10355 ARCH(7);
10356 if (IS_USER(s)) {
10357 goto illegal_op;
10359 gen_hvc(s, imm16);
10360 } else {
10361 /* Secure monitor call (v6+) */
10362 ARCH(6K);
10363 if (IS_USER(s)) {
10364 goto illegal_op;
10366 gen_smc(s);
10368 } else {
10369 op = (insn >> 20) & 7;
10370 switch (op) {
10371 case 0: /* msr cpsr. */
10372 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10373 tmp = load_reg(s, rn);
10374 addr = tcg_const_i32(insn & 0xff);
10375 gen_helper_v7m_msr(cpu_env, addr, tmp);
10376 tcg_temp_free_i32(addr);
10377 tcg_temp_free_i32(tmp);
10378 gen_lookup_tb(s);
10379 break;
10381 /* fall through */
10382 case 1: /* msr spsr. */
10383 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10384 goto illegal_op;
10387 if (extract32(insn, 5, 1)) {
10388 /* MSR (banked) */
10389 int sysm = extract32(insn, 8, 4) |
10390 (extract32(insn, 4, 1) << 4);
10391 int r = op & 1;
10393 gen_msr_banked(s, r, sysm, rm);
10394 break;
10397 /* MSR (for PSRs) */
10398 tmp = load_reg(s, rn);
10399 if (gen_set_psr(s,
10400 msr_mask(s, (insn >> 8) & 0xf, op == 1),
10401 op == 1, tmp))
10402 goto illegal_op;
10403 break;
10404 case 2: /* cps, nop-hint. */
10405 if (((insn >> 8) & 7) == 0) {
10406 gen_nop_hint(s, insn & 0xff);
10408 /* Implemented as NOP in user mode. */
10409 if (IS_USER(s))
10410 break;
10411 offset = 0;
10412 imm = 0;
10413 if (insn & (1 << 10)) {
10414 if (insn & (1 << 7))
10415 offset |= CPSR_A;
10416 if (insn & (1 << 6))
10417 offset |= CPSR_I;
10418 if (insn & (1 << 5))
10419 offset |= CPSR_F;
10420 if (insn & (1 << 9))
10421 imm = CPSR_A | CPSR_I | CPSR_F;
10423 if (insn & (1 << 8)) {
10424 offset |= 0x1f;
10425 imm |= (insn & 0x1f);
10427 if (offset) {
10428 gen_set_psr_im(s, offset, 0, imm);
10430 break;
10431 case 3: /* Special control operations. */
10432 ARCH(7);
10433 op = (insn >> 4) & 0xf;
10434 switch (op) {
10435 case 2: /* clrex */
10436 gen_clrex(s);
10437 break;
10438 case 4: /* dsb */
10439 case 5: /* dmb */
10440 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10441 break;
10442 case 6: /* isb */
10443 /* We need to break the TB after this insn
10444 * to execute self-modifying code correctly
10445 * and also to take any pending interrupts
10446 * immediately.
10448 gen_lookup_tb(s);
10449 break;
10450 default:
10451 goto illegal_op;
10453 break;
10454 case 4: /* bxj */
10455 /* Trivial implementation equivalent to bx. */
10456 tmp = load_reg(s, rn);
10457 gen_bx(s, tmp);
10458 break;
10459 case 5: /* Exception return. */
10460 if (IS_USER(s)) {
10461 goto illegal_op;
10463 if (rn != 14 || rd != 15) {
10464 goto illegal_op;
10466 tmp = load_reg(s, rn);
10467 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10468 gen_exception_return(s, tmp);
10469 break;
10470 case 6: /* MRS */
10471 if (extract32(insn, 5, 1)) {
10472 /* MRS (banked) */
10473 int sysm = extract32(insn, 16, 4) |
10474 (extract32(insn, 4, 1) << 4);
10476 gen_mrs_banked(s, 0, sysm, rd);
10477 break;
10480 /* mrs cpsr */
10481 tmp = tcg_temp_new_i32();
10482 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10483 addr = tcg_const_i32(insn & 0xff);
10484 gen_helper_v7m_mrs(tmp, cpu_env, addr);
10485 tcg_temp_free_i32(addr);
10486 } else {
10487 gen_helper_cpsr_read(tmp, cpu_env);
10489 store_reg(s, rd, tmp);
10490 break;
10491 case 7: /* MRS */
10492 if (extract32(insn, 5, 1)) {
10493 /* MRS (banked) */
10494 int sysm = extract32(insn, 16, 4) |
10495 (extract32(insn, 4, 1) << 4);
10497 gen_mrs_banked(s, 1, sysm, rd);
10498 break;
10501 /* mrs spsr. */
10502 /* Not accessible in user mode. */
10503 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10504 goto illegal_op;
10506 tmp = load_cpu_field(spsr);
10507 store_reg(s, rd, tmp);
10508 break;
10511 } else {
10512 /* Conditional branch. */
10513 op = (insn >> 22) & 0xf;
10514 /* Generate a conditional jump to next instruction. */
10515 s->condlabel = gen_new_label();
10516 arm_gen_test_cc(op ^ 1, s->condlabel);
10517 s->condjmp = 1;
10519 /* offset[11:1] = insn[10:0] */
10520 offset = (insn & 0x7ff) << 1;
10521 /* offset[17:12] = insn[21:16]. */
10522 offset |= (insn & 0x003f0000) >> 4;
10523 /* offset[31:20] = insn[26]. */
10524 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10525 /* offset[18] = insn[13]. */
10526 offset |= (insn & (1 << 13)) << 5;
10527 /* offset[19] = insn[11]. */
10528 offset |= (insn & (1 << 11)) << 8;
10530 /* jump to the offset */
10531 gen_jmp(s, s->pc + offset);
10533 } else {
10534 /* Data processing immediate. */
10535 if (insn & (1 << 25)) {
10536 if (insn & (1 << 24)) {
10537 if (insn & (1 << 20))
10538 goto illegal_op;
10539 /* Bitfield/Saturate. */
10540 op = (insn >> 21) & 7;
10541 imm = insn & 0x1f;
10542 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10543 if (rn == 15) {
10544 tmp = tcg_temp_new_i32();
10545 tcg_gen_movi_i32(tmp, 0);
10546 } else {
10547 tmp = load_reg(s, rn);
10549 switch (op) {
10550 case 2: /* Signed bitfield extract. */
10551 imm++;
10552 if (shift + imm > 32)
10553 goto illegal_op;
10554 if (imm < 32)
10555 gen_sbfx(tmp, shift, imm);
10556 break;
10557 case 6: /* Unsigned bitfield extract. */
10558 imm++;
10559 if (shift + imm > 32)
10560 goto illegal_op;
10561 if (imm < 32)
10562 gen_ubfx(tmp, shift, (1u << imm) - 1);
10563 break;
10564 case 3: /* Bitfield insert/clear. */
10565 if (imm < shift)
10566 goto illegal_op;
10567 imm = imm + 1 - shift;
10568 if (imm != 32) {
10569 tmp2 = load_reg(s, rd);
10570 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
10571 tcg_temp_free_i32(tmp2);
10573 break;
10574 case 7:
10575 goto illegal_op;
10576 default: /* Saturate. */
10577 if (shift) {
10578 if (op & 1)
10579 tcg_gen_sari_i32(tmp, tmp, shift);
10580 else
10581 tcg_gen_shli_i32(tmp, tmp, shift);
10583 tmp2 = tcg_const_i32(imm);
10584 if (op & 4) {
10585 /* Unsigned. */
10586 if ((op & 1) && shift == 0) {
10587 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10588 tcg_temp_free_i32(tmp);
10589 tcg_temp_free_i32(tmp2);
10590 goto illegal_op;
10592 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10593 } else {
10594 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10596 } else {
10597 /* Signed. */
10598 if ((op & 1) && shift == 0) {
10599 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10600 tcg_temp_free_i32(tmp);
10601 tcg_temp_free_i32(tmp2);
10602 goto illegal_op;
10604 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10605 } else {
10606 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10609 tcg_temp_free_i32(tmp2);
10610 break;
10612 store_reg(s, rd, tmp);
10613 } else {
10614 imm = ((insn & 0x04000000) >> 15)
10615 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10616 if (insn & (1 << 22)) {
10617 /* 16-bit immediate. */
10618 imm |= (insn >> 4) & 0xf000;
10619 if (insn & (1 << 23)) {
10620 /* movt */
10621 tmp = load_reg(s, rd);
10622 tcg_gen_ext16u_i32(tmp, tmp);
10623 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10624 } else {
10625 /* movw */
10626 tmp = tcg_temp_new_i32();
10627 tcg_gen_movi_i32(tmp, imm);
10629 } else {
10630 /* Add/sub 12-bit immediate. */
10631 if (rn == 15) {
10632 offset = s->pc & ~(uint32_t)3;
10633 if (insn & (1 << 23))
10634 offset -= imm;
10635 else
10636 offset += imm;
10637 tmp = tcg_temp_new_i32();
10638 tcg_gen_movi_i32(tmp, offset);
10639 } else {
10640 tmp = load_reg(s, rn);
10641 if (insn & (1 << 23))
10642 tcg_gen_subi_i32(tmp, tmp, imm);
10643 else
10644 tcg_gen_addi_i32(tmp, tmp, imm);
10647 store_reg(s, rd, tmp);
10649 } else {
10650 int shifter_out = 0;
10651 /* modified 12-bit immediate. */
10652 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10653 imm = (insn & 0xff);
10654 switch (shift) {
10655 case 0: /* XY */
10656 /* Nothing to do. */
10657 break;
10658 case 1: /* 00XY00XY */
10659 imm |= imm << 16;
10660 break;
10661 case 2: /* XY00XY00 */
10662 imm |= imm << 16;
10663 imm <<= 8;
10664 break;
10665 case 3: /* XYXYXYXY */
10666 imm |= imm << 16;
10667 imm |= imm << 8;
10668 break;
10669 default: /* Rotated constant. */
10670 shift = (shift << 1) | (imm >> 7);
10671 imm |= 0x80;
10672 imm = imm << (32 - shift);
10673 shifter_out = 1;
10674 break;
10676 tmp2 = tcg_temp_new_i32();
10677 tcg_gen_movi_i32(tmp2, imm);
10678 rn = (insn >> 16) & 0xf;
10679 if (rn == 15) {
10680 tmp = tcg_temp_new_i32();
10681 tcg_gen_movi_i32(tmp, 0);
10682 } else {
10683 tmp = load_reg(s, rn);
10685 op = (insn >> 21) & 0xf;
10686 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10687 shifter_out, tmp, tmp2))
10688 goto illegal_op;
10689 tcg_temp_free_i32(tmp2);
10690 rd = (insn >> 8) & 0xf;
10691 if (rd != 15) {
10692 store_reg(s, rd, tmp);
10693 } else {
10694 tcg_temp_free_i32(tmp);
10698 break;
10699 case 12: /* Load/store single data item. */
10701 int postinc = 0;
10702 int writeback = 0;
10703 int memidx;
10704 if ((insn & 0x01100000) == 0x01000000) {
10705 if (disas_neon_ls_insn(s, insn)) {
10706 goto illegal_op;
10708 break;
10710 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10711 if (rs == 15) {
10712 if (!(insn & (1 << 20))) {
10713 goto illegal_op;
10715 if (op != 2) {
10716 /* Byte or halfword load space with dest == r15 : memory hints.
10717 * Catch them early so we don't emit pointless addressing code.
10718 * This space is a mix of:
10719 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10720 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10721 * cores)
10722 * unallocated hints, which must be treated as NOPs
10723 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10724 * which is easiest for the decoding logic
10725 * Some space which must UNDEF
10727 int op1 = (insn >> 23) & 3;
10728 int op2 = (insn >> 6) & 0x3f;
10729 if (op & 2) {
10730 goto illegal_op;
10732 if (rn == 15) {
10733 /* UNPREDICTABLE, unallocated hint or
10734 * PLD/PLDW/PLI (literal)
10736 return 0;
10738 if (op1 & 1) {
10739 return 0; /* PLD/PLDW/PLI or unallocated hint */
10741 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10742 return 0; /* PLD/PLDW/PLI or unallocated hint */
10744 /* UNDEF space, or an UNPREDICTABLE */
10745 return 1;
10748 memidx = get_mem_index(s);
10749 if (rn == 15) {
10750 addr = tcg_temp_new_i32();
10751 /* PC relative. */
10752 /* s->pc has already been incremented by 4. */
10753 imm = s->pc & 0xfffffffc;
10754 if (insn & (1 << 23))
10755 imm += insn & 0xfff;
10756 else
10757 imm -= insn & 0xfff;
10758 tcg_gen_movi_i32(addr, imm);
10759 } else {
10760 addr = load_reg(s, rn);
10761 if (insn & (1 << 23)) {
10762 /* Positive offset. */
10763 imm = insn & 0xfff;
10764 tcg_gen_addi_i32(addr, addr, imm);
10765 } else {
10766 imm = insn & 0xff;
10767 switch ((insn >> 8) & 0xf) {
10768 case 0x0: /* Shifted Register. */
10769 shift = (insn >> 4) & 0xf;
10770 if (shift > 3) {
10771 tcg_temp_free_i32(addr);
10772 goto illegal_op;
10774 tmp = load_reg(s, rm);
10775 if (shift)
10776 tcg_gen_shli_i32(tmp, tmp, shift);
10777 tcg_gen_add_i32(addr, addr, tmp);
10778 tcg_temp_free_i32(tmp);
10779 break;
10780 case 0xc: /* Negative offset. */
10781 tcg_gen_addi_i32(addr, addr, -imm);
10782 break;
10783 case 0xe: /* User privilege. */
10784 tcg_gen_addi_i32(addr, addr, imm);
10785 memidx = get_a32_user_mem_index(s);
10786 break;
10787 case 0x9: /* Post-decrement. */
10788 imm = -imm;
10789 /* Fall through. */
10790 case 0xb: /* Post-increment. */
10791 postinc = 1;
10792 writeback = 1;
10793 break;
10794 case 0xd: /* Pre-decrement. */
10795 imm = -imm;
10796 /* Fall through. */
10797 case 0xf: /* Pre-increment. */
10798 tcg_gen_addi_i32(addr, addr, imm);
10799 writeback = 1;
10800 break;
10801 default:
10802 tcg_temp_free_i32(addr);
10803 goto illegal_op;
10807 if (insn & (1 << 20)) {
10808 /* Load. */
10809 tmp = tcg_temp_new_i32();
10810 switch (op) {
10811 case 0:
10812 gen_aa32_ld8u(s, tmp, addr, memidx);
10813 break;
10814 case 4:
10815 gen_aa32_ld8s(s, tmp, addr, memidx);
10816 break;
10817 case 1:
10818 gen_aa32_ld16u(s, tmp, addr, memidx);
10819 break;
10820 case 5:
10821 gen_aa32_ld16s(s, tmp, addr, memidx);
10822 break;
10823 case 2:
10824 gen_aa32_ld32u(s, tmp, addr, memidx);
10825 break;
10826 default:
10827 tcg_temp_free_i32(tmp);
10828 tcg_temp_free_i32(addr);
10829 goto illegal_op;
10831 if (rs == 15) {
10832 gen_bx(s, tmp);
10833 } else {
10834 store_reg(s, rs, tmp);
10836 } else {
10837 /* Store. */
10838 tmp = load_reg(s, rs);
10839 switch (op) {
10840 case 0:
10841 gen_aa32_st8(s, tmp, addr, memidx);
10842 break;
10843 case 1:
10844 gen_aa32_st16(s, tmp, addr, memidx);
10845 break;
10846 case 2:
10847 gen_aa32_st32(s, tmp, addr, memidx);
10848 break;
10849 default:
10850 tcg_temp_free_i32(tmp);
10851 tcg_temp_free_i32(addr);
10852 goto illegal_op;
10854 tcg_temp_free_i32(tmp);
10856 if (postinc)
10857 tcg_gen_addi_i32(addr, addr, imm);
10858 if (writeback) {
10859 store_reg(s, rn, addr);
10860 } else {
10861 tcg_temp_free_i32(addr);
10864 break;
10865 default:
10866 goto illegal_op;
10868 return 0;
10869 illegal_op:
10870 return 1;
10873 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
10875 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10876 int32_t offset;
10877 int i;
10878 TCGv_i32 tmp;
10879 TCGv_i32 tmp2;
10880 TCGv_i32 addr;
10882 if (s->condexec_mask) {
10883 cond = s->condexec_cond;
10884 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10885 s->condlabel = gen_new_label();
10886 arm_gen_test_cc(cond ^ 1, s->condlabel);
10887 s->condjmp = 1;
10891 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
10892 s->pc += 2;
10894 switch (insn >> 12) {
10895 case 0: case 1:
10897 rd = insn & 7;
10898 op = (insn >> 11) & 3;
10899 if (op == 3) {
10900 /* add/subtract */
10901 rn = (insn >> 3) & 7;
10902 tmp = load_reg(s, rn);
10903 if (insn & (1 << 10)) {
10904 /* immediate */
10905 tmp2 = tcg_temp_new_i32();
10906 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
10907 } else {
10908 /* reg */
10909 rm = (insn >> 6) & 7;
10910 tmp2 = load_reg(s, rm);
10912 if (insn & (1 << 9)) {
10913 if (s->condexec_mask)
10914 tcg_gen_sub_i32(tmp, tmp, tmp2);
10915 else
10916 gen_sub_CC(tmp, tmp, tmp2);
10917 } else {
10918 if (s->condexec_mask)
10919 tcg_gen_add_i32(tmp, tmp, tmp2);
10920 else
10921 gen_add_CC(tmp, tmp, tmp2);
10923 tcg_temp_free_i32(tmp2);
10924 store_reg(s, rd, tmp);
10925 } else {
10926 /* shift immediate */
10927 rm = (insn >> 3) & 7;
10928 shift = (insn >> 6) & 0x1f;
10929 tmp = load_reg(s, rm);
10930 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10931 if (!s->condexec_mask)
10932 gen_logic_CC(tmp);
10933 store_reg(s, rd, tmp);
10935 break;
10936 case 2: case 3:
10937 /* arithmetic large immediate */
10938 op = (insn >> 11) & 3;
10939 rd = (insn >> 8) & 0x7;
10940 if (op == 0) { /* mov */
10941 tmp = tcg_temp_new_i32();
10942 tcg_gen_movi_i32(tmp, insn & 0xff);
10943 if (!s->condexec_mask)
10944 gen_logic_CC(tmp);
10945 store_reg(s, rd, tmp);
10946 } else {
10947 tmp = load_reg(s, rd);
10948 tmp2 = tcg_temp_new_i32();
10949 tcg_gen_movi_i32(tmp2, insn & 0xff);
10950 switch (op) {
10951 case 1: /* cmp */
10952 gen_sub_CC(tmp, tmp, tmp2);
10953 tcg_temp_free_i32(tmp);
10954 tcg_temp_free_i32(tmp2);
10955 break;
10956 case 2: /* add */
10957 if (s->condexec_mask)
10958 tcg_gen_add_i32(tmp, tmp, tmp2);
10959 else
10960 gen_add_CC(tmp, tmp, tmp2);
10961 tcg_temp_free_i32(tmp2);
10962 store_reg(s, rd, tmp);
10963 break;
10964 case 3: /* sub */
10965 if (s->condexec_mask)
10966 tcg_gen_sub_i32(tmp, tmp, tmp2);
10967 else
10968 gen_sub_CC(tmp, tmp, tmp2);
10969 tcg_temp_free_i32(tmp2);
10970 store_reg(s, rd, tmp);
10971 break;
10974 break;
10975 case 4:
10976 if (insn & (1 << 11)) {
10977 rd = (insn >> 8) & 7;
10978 /* load pc-relative. Bit 1 of PC is ignored. */
10979 val = s->pc + 2 + ((insn & 0xff) * 4);
10980 val &= ~(uint32_t)2;
10981 addr = tcg_temp_new_i32();
10982 tcg_gen_movi_i32(addr, val);
10983 tmp = tcg_temp_new_i32();
10984 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
10985 tcg_temp_free_i32(addr);
10986 store_reg(s, rd, tmp);
10987 break;
10989 if (insn & (1 << 10)) {
10990 /* data processing extended or blx */
10991 rd = (insn & 7) | ((insn >> 4) & 8);
10992 rm = (insn >> 3) & 0xf;
10993 op = (insn >> 8) & 3;
10994 switch (op) {
10995 case 0: /* add */
10996 tmp = load_reg(s, rd);
10997 tmp2 = load_reg(s, rm);
10998 tcg_gen_add_i32(tmp, tmp, tmp2);
10999 tcg_temp_free_i32(tmp2);
11000 store_reg(s, rd, tmp);
11001 break;
11002 case 1: /* cmp */
11003 tmp = load_reg(s, rd);
11004 tmp2 = load_reg(s, rm);
11005 gen_sub_CC(tmp, tmp, tmp2);
11006 tcg_temp_free_i32(tmp2);
11007 tcg_temp_free_i32(tmp);
11008 break;
11009 case 2: /* mov/cpy */
11010 tmp = load_reg(s, rm);
11011 store_reg(s, rd, tmp);
11012 break;
11013 case 3:/* branch [and link] exchange thumb register */
11014 tmp = load_reg(s, rm);
11015 if (insn & (1 << 7)) {
11016 ARCH(5);
11017 val = (uint32_t)s->pc | 1;
11018 tmp2 = tcg_temp_new_i32();
11019 tcg_gen_movi_i32(tmp2, val);
11020 store_reg(s, 14, tmp2);
11022 /* already thumb, no need to check */
11023 gen_bx(s, tmp);
11024 break;
11026 break;
11029 /* data processing register */
11030 rd = insn & 7;
11031 rm = (insn >> 3) & 7;
11032 op = (insn >> 6) & 0xf;
11033 if (op == 2 || op == 3 || op == 4 || op == 7) {
11034 /* the shift/rotate ops want the operands backwards */
11035 val = rm;
11036 rm = rd;
11037 rd = val;
11038 val = 1;
11039 } else {
11040 val = 0;
11043 if (op == 9) { /* neg */
11044 tmp = tcg_temp_new_i32();
11045 tcg_gen_movi_i32(tmp, 0);
11046 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11047 tmp = load_reg(s, rd);
11048 } else {
11049 TCGV_UNUSED_I32(tmp);
11052 tmp2 = load_reg(s, rm);
11053 switch (op) {
11054 case 0x0: /* and */
11055 tcg_gen_and_i32(tmp, tmp, tmp2);
11056 if (!s->condexec_mask)
11057 gen_logic_CC(tmp);
11058 break;
11059 case 0x1: /* eor */
11060 tcg_gen_xor_i32(tmp, tmp, tmp2);
11061 if (!s->condexec_mask)
11062 gen_logic_CC(tmp);
11063 break;
11064 case 0x2: /* lsl */
11065 if (s->condexec_mask) {
11066 gen_shl(tmp2, tmp2, tmp);
11067 } else {
11068 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
11069 gen_logic_CC(tmp2);
11071 break;
11072 case 0x3: /* lsr */
11073 if (s->condexec_mask) {
11074 gen_shr(tmp2, tmp2, tmp);
11075 } else {
11076 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
11077 gen_logic_CC(tmp2);
11079 break;
11080 case 0x4: /* asr */
11081 if (s->condexec_mask) {
11082 gen_sar(tmp2, tmp2, tmp);
11083 } else {
11084 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
11085 gen_logic_CC(tmp2);
11087 break;
11088 case 0x5: /* adc */
11089 if (s->condexec_mask) {
11090 gen_adc(tmp, tmp2);
11091 } else {
11092 gen_adc_CC(tmp, tmp, tmp2);
11094 break;
11095 case 0x6: /* sbc */
11096 if (s->condexec_mask) {
11097 gen_sub_carry(tmp, tmp, tmp2);
11098 } else {
11099 gen_sbc_CC(tmp, tmp, tmp2);
11101 break;
11102 case 0x7: /* ror */
11103 if (s->condexec_mask) {
11104 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11105 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
11106 } else {
11107 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
11108 gen_logic_CC(tmp2);
11110 break;
11111 case 0x8: /* tst */
11112 tcg_gen_and_i32(tmp, tmp, tmp2);
11113 gen_logic_CC(tmp);
11114 rd = 16;
11115 break;
11116 case 0x9: /* neg */
11117 if (s->condexec_mask)
11118 tcg_gen_neg_i32(tmp, tmp2);
11119 else
11120 gen_sub_CC(tmp, tmp, tmp2);
11121 break;
11122 case 0xa: /* cmp */
11123 gen_sub_CC(tmp, tmp, tmp2);
11124 rd = 16;
11125 break;
11126 case 0xb: /* cmn */
11127 gen_add_CC(tmp, tmp, tmp2);
11128 rd = 16;
11129 break;
11130 case 0xc: /* orr */
11131 tcg_gen_or_i32(tmp, tmp, tmp2);
11132 if (!s->condexec_mask)
11133 gen_logic_CC(tmp);
11134 break;
11135 case 0xd: /* mul */
11136 tcg_gen_mul_i32(tmp, tmp, tmp2);
11137 if (!s->condexec_mask)
11138 gen_logic_CC(tmp);
11139 break;
11140 case 0xe: /* bic */
11141 tcg_gen_andc_i32(tmp, tmp, tmp2);
11142 if (!s->condexec_mask)
11143 gen_logic_CC(tmp);
11144 break;
11145 case 0xf: /* mvn */
11146 tcg_gen_not_i32(tmp2, tmp2);
11147 if (!s->condexec_mask)
11148 gen_logic_CC(tmp2);
11149 val = 1;
11150 rm = rd;
11151 break;
11153 if (rd != 16) {
11154 if (val) {
11155 store_reg(s, rm, tmp2);
11156 if (op != 0xf)
11157 tcg_temp_free_i32(tmp);
11158 } else {
11159 store_reg(s, rd, tmp);
11160 tcg_temp_free_i32(tmp2);
11162 } else {
11163 tcg_temp_free_i32(tmp);
11164 tcg_temp_free_i32(tmp2);
11166 break;
11168 case 5:
11169 /* load/store register offset. */
11170 rd = insn & 7;
11171 rn = (insn >> 3) & 7;
11172 rm = (insn >> 6) & 7;
11173 op = (insn >> 9) & 7;
11174 addr = load_reg(s, rn);
11175 tmp = load_reg(s, rm);
11176 tcg_gen_add_i32(addr, addr, tmp);
11177 tcg_temp_free_i32(tmp);
11179 if (op < 3) { /* store */
11180 tmp = load_reg(s, rd);
11181 } else {
11182 tmp = tcg_temp_new_i32();
11185 switch (op) {
11186 case 0: /* str */
11187 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11188 break;
11189 case 1: /* strh */
11190 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
11191 break;
11192 case 2: /* strb */
11193 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
11194 break;
11195 case 3: /* ldrsb */
11196 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
11197 break;
11198 case 4: /* ldr */
11199 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11200 break;
11201 case 5: /* ldrh */
11202 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
11203 break;
11204 case 6: /* ldrb */
11205 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
11206 break;
11207 case 7: /* ldrsh */
11208 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
11209 break;
11211 if (op >= 3) { /* load */
11212 store_reg(s, rd, tmp);
11213 } else {
11214 tcg_temp_free_i32(tmp);
11216 tcg_temp_free_i32(addr);
11217 break;
11219 case 6:
11220 /* load/store word immediate offset */
11221 rd = insn & 7;
11222 rn = (insn >> 3) & 7;
11223 addr = load_reg(s, rn);
11224 val = (insn >> 4) & 0x7c;
11225 tcg_gen_addi_i32(addr, addr, val);
11227 if (insn & (1 << 11)) {
11228 /* load */
11229 tmp = tcg_temp_new_i32();
11230 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11231 store_reg(s, rd, tmp);
11232 } else {
11233 /* store */
11234 tmp = load_reg(s, rd);
11235 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11236 tcg_temp_free_i32(tmp);
11238 tcg_temp_free_i32(addr);
11239 break;
11241 case 7:
11242 /* load/store byte immediate offset */
11243 rd = insn & 7;
11244 rn = (insn >> 3) & 7;
11245 addr = load_reg(s, rn);
11246 val = (insn >> 6) & 0x1f;
11247 tcg_gen_addi_i32(addr, addr, val);
11249 if (insn & (1 << 11)) {
11250 /* load */
11251 tmp = tcg_temp_new_i32();
11252 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
11253 store_reg(s, rd, tmp);
11254 } else {
11255 /* store */
11256 tmp = load_reg(s, rd);
11257 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
11258 tcg_temp_free_i32(tmp);
11260 tcg_temp_free_i32(addr);
11261 break;
11263 case 8:
11264 /* load/store halfword immediate offset */
11265 rd = insn & 7;
11266 rn = (insn >> 3) & 7;
11267 addr = load_reg(s, rn);
11268 val = (insn >> 5) & 0x3e;
11269 tcg_gen_addi_i32(addr, addr, val);
11271 if (insn & (1 << 11)) {
11272 /* load */
11273 tmp = tcg_temp_new_i32();
11274 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
11275 store_reg(s, rd, tmp);
11276 } else {
11277 /* store */
11278 tmp = load_reg(s, rd);
11279 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
11280 tcg_temp_free_i32(tmp);
11282 tcg_temp_free_i32(addr);
11283 break;
11285 case 9:
11286 /* load/store from stack */
11287 rd = (insn >> 8) & 7;
11288 addr = load_reg(s, 13);
11289 val = (insn & 0xff) * 4;
11290 tcg_gen_addi_i32(addr, addr, val);
11292 if (insn & (1 << 11)) {
11293 /* load */
11294 tmp = tcg_temp_new_i32();
11295 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11296 store_reg(s, rd, tmp);
11297 } else {
11298 /* store */
11299 tmp = load_reg(s, rd);
11300 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11301 tcg_temp_free_i32(tmp);
11303 tcg_temp_free_i32(addr);
11304 break;
11306 case 10:
11307 /* add to high reg */
11308 rd = (insn >> 8) & 7;
11309 if (insn & (1 << 11)) {
11310 /* SP */
11311 tmp = load_reg(s, 13);
11312 } else {
11313 /* PC. bit 1 is ignored. */
11314 tmp = tcg_temp_new_i32();
11315 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
11317 val = (insn & 0xff) * 4;
11318 tcg_gen_addi_i32(tmp, tmp, val);
11319 store_reg(s, rd, tmp);
11320 break;
11322 case 11:
11323 /* misc */
11324 op = (insn >> 8) & 0xf;
11325 switch (op) {
11326 case 0:
11327 /* adjust stack pointer */
11328 tmp = load_reg(s, 13);
11329 val = (insn & 0x7f) * 4;
11330 if (insn & (1 << 7))
11331 val = -(int32_t)val;
11332 tcg_gen_addi_i32(tmp, tmp, val);
11333 store_reg(s, 13, tmp);
11334 break;
11336 case 2: /* sign/zero extend. */
11337 ARCH(6);
11338 rd = insn & 7;
11339 rm = (insn >> 3) & 7;
11340 tmp = load_reg(s, rm);
11341 switch ((insn >> 6) & 3) {
11342 case 0: gen_sxth(tmp); break;
11343 case 1: gen_sxtb(tmp); break;
11344 case 2: gen_uxth(tmp); break;
11345 case 3: gen_uxtb(tmp); break;
11347 store_reg(s, rd, tmp);
11348 break;
11349 case 4: case 5: case 0xc: case 0xd:
11350 /* push/pop */
11351 addr = load_reg(s, 13);
11352 if (insn & (1 << 8))
11353 offset = 4;
11354 else
11355 offset = 0;
11356 for (i = 0; i < 8; i++) {
11357 if (insn & (1 << i))
11358 offset += 4;
11360 if ((insn & (1 << 11)) == 0) {
11361 tcg_gen_addi_i32(addr, addr, -offset);
11363 for (i = 0; i < 8; i++) {
11364 if (insn & (1 << i)) {
11365 if (insn & (1 << 11)) {
11366 /* pop */
11367 tmp = tcg_temp_new_i32();
11368 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11369 store_reg(s, i, tmp);
11370 } else {
11371 /* push */
11372 tmp = load_reg(s, i);
11373 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11374 tcg_temp_free_i32(tmp);
11376 /* advance to the next address. */
11377 tcg_gen_addi_i32(addr, addr, 4);
11380 TCGV_UNUSED_I32(tmp);
11381 if (insn & (1 << 8)) {
11382 if (insn & (1 << 11)) {
11383 /* pop pc */
11384 tmp = tcg_temp_new_i32();
11385 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11386 /* don't set the pc until the rest of the instruction
11387 has completed */
11388 } else {
11389 /* push lr */
11390 tmp = load_reg(s, 14);
11391 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11392 tcg_temp_free_i32(tmp);
11394 tcg_gen_addi_i32(addr, addr, 4);
11396 if ((insn & (1 << 11)) == 0) {
11397 tcg_gen_addi_i32(addr, addr, -offset);
11399 /* write back the new stack pointer */
11400 store_reg(s, 13, addr);
11401 /* set the new PC value */
11402 if ((insn & 0x0900) == 0x0900) {
11403 store_reg_from_load(s, 15, tmp);
11405 break;
11407 case 1: case 3: case 9: case 11: /* czb */
11408 rm = insn & 7;
11409 tmp = load_reg(s, rm);
11410 s->condlabel = gen_new_label();
11411 s->condjmp = 1;
11412 if (insn & (1 << 11))
11413 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
11414 else
11415 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
11416 tcg_temp_free_i32(tmp);
11417 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11418 val = (uint32_t)s->pc + 2;
11419 val += offset;
11420 gen_jmp(s, val);
11421 break;
11423 case 15: /* IT, nop-hint. */
11424 if ((insn & 0xf) == 0) {
11425 gen_nop_hint(s, (insn >> 4) & 0xf);
11426 break;
11428 /* If Then. */
11429 s->condexec_cond = (insn >> 4) & 0xe;
11430 s->condexec_mask = insn & 0x1f;
11431 /* No actual code generated for this insn, just setup state. */
11432 break;
11434 case 0xe: /* bkpt */
11436 int imm8 = extract32(insn, 0, 8);
11437 ARCH(5);
11438 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11439 default_exception_el(s));
11440 break;
11443 case 0xa: /* rev */
11444 ARCH(6);
11445 rn = (insn >> 3) & 0x7;
11446 rd = insn & 0x7;
11447 tmp = load_reg(s, rn);
11448 switch ((insn >> 6) & 3) {
11449 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
11450 case 1: gen_rev16(tmp); break;
11451 case 3: gen_revsh(tmp); break;
11452 default: goto illegal_op;
11454 store_reg(s, rd, tmp);
11455 break;
11457 case 6:
11458 switch ((insn >> 5) & 7) {
11459 case 2:
11460 /* setend */
11461 ARCH(6);
11462 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11463 gen_helper_setend(cpu_env);
11464 s->is_jmp = DISAS_UPDATE;
11466 break;
11467 case 3:
11468 /* cps */
11469 ARCH(6);
11470 if (IS_USER(s)) {
11471 break;
11473 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11474 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11475 /* FAULTMASK */
11476 if (insn & 1) {
11477 addr = tcg_const_i32(19);
11478 gen_helper_v7m_msr(cpu_env, addr, tmp);
11479 tcg_temp_free_i32(addr);
11481 /* PRIMASK */
11482 if (insn & 2) {
11483 addr = tcg_const_i32(16);
11484 gen_helper_v7m_msr(cpu_env, addr, tmp);
11485 tcg_temp_free_i32(addr);
11487 tcg_temp_free_i32(tmp);
11488 gen_lookup_tb(s);
11489 } else {
11490 if (insn & (1 << 4)) {
11491 shift = CPSR_A | CPSR_I | CPSR_F;
11492 } else {
11493 shift = 0;
11495 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
11497 break;
11498 default:
11499 goto undef;
11501 break;
11503 default:
11504 goto undef;
11506 break;
11508 case 12:
11510 /* load/store multiple */
11511 TCGv_i32 loaded_var;
11512 TCGV_UNUSED_I32(loaded_var);
11513 rn = (insn >> 8) & 0x7;
11514 addr = load_reg(s, rn);
11515 for (i = 0; i < 8; i++) {
11516 if (insn & (1 << i)) {
11517 if (insn & (1 << 11)) {
11518 /* load */
11519 tmp = tcg_temp_new_i32();
11520 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11521 if (i == rn) {
11522 loaded_var = tmp;
11523 } else {
11524 store_reg(s, i, tmp);
11526 } else {
11527 /* store */
11528 tmp = load_reg(s, i);
11529 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11530 tcg_temp_free_i32(tmp);
11532 /* advance to the next address */
11533 tcg_gen_addi_i32(addr, addr, 4);
11536 if ((insn & (1 << rn)) == 0) {
11537 /* base reg not in list: base register writeback */
11538 store_reg(s, rn, addr);
11539 } else {
11540 /* base reg in list: if load, complete it now */
11541 if (insn & (1 << 11)) {
11542 store_reg(s, rn, loaded_var);
11544 tcg_temp_free_i32(addr);
11546 break;
11548 case 13:
11549 /* conditional branch or swi */
11550 cond = (insn >> 8) & 0xf;
11551 if (cond == 0xe)
11552 goto undef;
11554 if (cond == 0xf) {
11555 /* swi */
11556 gen_set_pc_im(s, s->pc);
11557 s->svc_imm = extract32(insn, 0, 8);
11558 s->is_jmp = DISAS_SWI;
11559 break;
11561 /* generate a conditional jump to next instruction */
11562 s->condlabel = gen_new_label();
11563 arm_gen_test_cc(cond ^ 1, s->condlabel);
11564 s->condjmp = 1;
11566 /* jump to the offset */
11567 val = (uint32_t)s->pc + 2;
11568 offset = ((int32_t)insn << 24) >> 24;
11569 val += offset << 1;
11570 gen_jmp(s, val);
11571 break;
11573 case 14:
11574 if (insn & (1 << 11)) {
11575 if (disas_thumb2_insn(env, s, insn))
11576 goto undef32;
11577 break;
11579 /* unconditional branch */
11580 val = (uint32_t)s->pc;
11581 offset = ((int32_t)insn << 21) >> 21;
11582 val += (offset << 1) + 2;
11583 gen_jmp(s, val);
11584 break;
11586 case 15:
11587 if (disas_thumb2_insn(env, s, insn))
11588 goto undef32;
11589 break;
11591 return;
11592 undef32:
11593 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11594 default_exception_el(s));
11595 return;
11596 illegal_op:
11597 undef:
11598 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11599 default_exception_el(s));
11602 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11604 /* Return true if the insn at dc->pc might cross a page boundary.
11605 * (False positives are OK, false negatives are not.)
11607 uint16_t insn;
11609 if ((s->pc & 3) == 0) {
11610 /* At a 4-aligned address we can't be crossing a page */
11611 return false;
11614 /* This must be a Thumb insn */
11615 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
11617 if ((insn >> 11) >= 0x1d) {
11618 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11619 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11620 * end up actually treating this as two 16-bit insns (see the
11621 * code at the start of disas_thumb2_insn()) but we don't bother
11622 * to check for that as it is unlikely, and false positives here
11623 * are harmless.
11625 return true;
11627 /* Definitely a 16-bit insn, can't be crossing a page. */
11628 return false;
11631 /* generate intermediate code for basic block 'tb'. */
11632 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
11634 ARMCPU *cpu = arm_env_get_cpu(env);
11635 CPUState *cs = CPU(cpu);
11636 DisasContext dc1, *dc = &dc1;
11637 target_ulong pc_start;
11638 target_ulong next_page_start;
11639 int num_insns;
11640 int max_insns;
11641 bool end_of_page;
11643 /* generate intermediate code */
11645 /* The A64 decoder has its own top level loop, because it doesn't need
11646 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11648 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
11649 gen_intermediate_code_a64(cpu, tb);
11650 return;
11653 pc_start = tb->pc;
11655 dc->tb = tb;
11657 dc->is_jmp = DISAS_NEXT;
11658 dc->pc = pc_start;
11659 dc->singlestep_enabled = cs->singlestep_enabled;
11660 dc->condjmp = 0;
11662 dc->aarch64 = 0;
11663 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11664 * there is no secure EL1, so we route exceptions to EL3.
11666 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11667 !arm_el_is_aa64(env, 3);
11668 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
11669 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
11670 dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
11671 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11672 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
11673 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11674 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11675 #if !defined(CONFIG_USER_ONLY)
11676 dc->user = (dc->current_el == 0);
11677 #endif
11678 dc->ns = ARM_TBFLAG_NS(tb->flags);
11679 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
11680 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11681 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11682 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
11683 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
11684 dc->cp_regs = cpu->cp_regs;
11685 dc->features = env->features;
11687 /* Single step state. The code-generation logic here is:
11688 * SS_ACTIVE == 0:
11689 * generate code with no special handling for single-stepping (except
11690 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11691 * this happens anyway because those changes are all system register or
11692 * PSTATE writes).
11693 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11694 * emit code for one insn
11695 * emit code to clear PSTATE.SS
11696 * emit code to generate software step exception for completed step
11697 * end TB (as usual for having generated an exception)
11698 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11699 * emit code to generate a software step exception
11700 * end the TB
11702 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11703 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11704 dc->is_ldex = false;
11705 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11707 cpu_F0s = tcg_temp_new_i32();
11708 cpu_F1s = tcg_temp_new_i32();
11709 cpu_F0d = tcg_temp_new_i64();
11710 cpu_F1d = tcg_temp_new_i64();
11711 cpu_V0 = cpu_F0d;
11712 cpu_V1 = cpu_F1d;
11713 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11714 cpu_M0 = tcg_temp_new_i64();
11715 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
11716 num_insns = 0;
11717 max_insns = tb->cflags & CF_COUNT_MASK;
11718 if (max_insns == 0) {
11719 max_insns = CF_COUNT_MASK;
11721 if (max_insns > TCG_MAX_INSNS) {
11722 max_insns = TCG_MAX_INSNS;
11725 gen_tb_start(tb);
11727 tcg_clear_temp_count();
11729 /* A note on handling of the condexec (IT) bits:
11731 * We want to avoid the overhead of having to write the updated condexec
11732 * bits back to the CPUARMState for every instruction in an IT block. So:
11733 * (1) if the condexec bits are not already zero then we write
11734 * zero back into the CPUARMState now. This avoids complications trying
11735 * to do it at the end of the block. (For example if we don't do this
11736 * it's hard to identify whether we can safely skip writing condexec
11737 * at the end of the TB, which we definitely want to do for the case
11738 * where a TB doesn't do anything with the IT state at all.)
11739 * (2) if we are going to leave the TB then we call gen_set_condexec()
11740 * which will write the correct value into CPUARMState if zero is wrong.
11741 * This is done both for leaving the TB at the end, and for leaving
11742 * it because of an exception we know will happen, which is done in
11743 * gen_exception_insn(). The latter is necessary because we need to
11744 * leave the TB with the PC/IT state just prior to execution of the
11745 * instruction which caused the exception.
11746 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11747 * then the CPUARMState will be wrong and we need to reset it.
11748 * This is handled in the same way as restoration of the
11749 * PC in these situations; we save the value of the condexec bits
11750 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11751 * then uses this to restore them after an exception.
11753 * Note that there are no instructions which can read the condexec
11754 * bits, and none which can write non-static values to them, so
11755 * we don't need to care about whether CPUARMState is correct in the
11756 * middle of a TB.
11759 /* Reset the conditional execution bits immediately. This avoids
11760 complications trying to do it at the end of the block. */
11761 if (dc->condexec_mask || dc->condexec_cond)
11763 TCGv_i32 tmp = tcg_temp_new_i32();
11764 tcg_gen_movi_i32(tmp, 0);
11765 store_cpu_field(tmp, condexec_bits);
11767 do {
11768 tcg_gen_insn_start(dc->pc,
11769 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11771 num_insns++;
11773 #ifdef CONFIG_USER_ONLY
11774 /* Intercept jump to the magic kernel page. */
11775 if (dc->pc >= 0xffff0000) {
11776 /* We always get here via a jump, so know we are not in a
11777 conditional execution block. */
11778 gen_exception_internal(EXCP_KERNEL_TRAP);
11779 dc->is_jmp = DISAS_EXC;
11780 break;
11782 #else
11783 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
11784 /* We always get here via a jump, so know we are not in a
11785 conditional execution block. */
11786 gen_exception_internal(EXCP_EXCEPTION_EXIT);
11787 dc->is_jmp = DISAS_EXC;
11788 break;
11790 #endif
11792 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11793 CPUBreakpoint *bp;
11794 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
11795 if (bp->pc == dc->pc) {
11796 if (bp->flags & BP_CPU) {
11797 gen_set_condexec(dc);
11798 gen_set_pc_im(dc, dc->pc);
11799 gen_helper_check_breakpoints(cpu_env);
11800 /* End the TB early; it's likely not going to be executed */
11801 dc->is_jmp = DISAS_UPDATE;
11802 } else {
11803 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11804 /* The address covered by the breakpoint must be
11805 included in [tb->pc, tb->pc + tb->size) in order
11806 to for it to be properly cleared -- thus we
11807 increment the PC here so that the logic setting
11808 tb->size below does the right thing. */
11809 /* TODO: Advance PC by correct instruction length to
11810 * avoid disassembler error messages */
11811 dc->pc += 2;
11812 goto done_generating;
11814 break;
11819 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
11820 gen_io_start();
11823 if (dc->ss_active && !dc->pstate_ss) {
11824 /* Singlestep state is Active-pending.
11825 * If we're in this state at the start of a TB then either
11826 * a) we just took an exception to an EL which is being debugged
11827 * and this is the first insn in the exception handler
11828 * b) debug exceptions were masked and we just unmasked them
11829 * without changing EL (eg by clearing PSTATE.D)
11830 * In either case we're going to take a swstep exception in the
11831 * "did not step an insn" case, and so the syndrome ISV and EX
11832 * bits should be zero.
11834 assert(num_insns == 1);
11835 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11836 default_exception_el(dc));
11837 goto done_generating;
11840 if (dc->thumb) {
11841 disas_thumb_insn(env, dc);
11842 if (dc->condexec_mask) {
11843 dc->condexec_cond = (dc->condexec_cond & 0xe)
11844 | ((dc->condexec_mask >> 4) & 1);
11845 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11846 if (dc->condexec_mask == 0) {
11847 dc->condexec_cond = 0;
11850 } else {
11851 unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
11852 dc->pc += 4;
11853 disas_arm_insn(dc, insn);
11856 if (dc->condjmp && !dc->is_jmp) {
11857 gen_set_label(dc->condlabel);
11858 dc->condjmp = 0;
11861 if (tcg_check_temp_count()) {
11862 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11863 dc->pc);
11866 /* Translation stops when a conditional branch is encountered.
11867 * Otherwise the subsequent code could get translated several times.
11868 * Also stop translation when a page boundary is reached. This
11869 * ensures prefetch aborts occur at the right place. */
11871 /* We want to stop the TB if the next insn starts in a new page,
11872 * or if it spans between this page and the next. This means that
11873 * if we're looking at the last halfword in the page we need to
11874 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11875 * or a 32-bit Thumb insn (which won't).
11876 * This is to avoid generating a silly TB with a single 16-bit insn
11877 * in it at the end of this page (which would execute correctly
11878 * but isn't very efficient).
11880 end_of_page = (dc->pc >= next_page_start) ||
11881 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11883 } while (!dc->is_jmp && !tcg_op_buf_full() &&
11884 !cs->singlestep_enabled &&
11885 !singlestep &&
11886 !dc->ss_active &&
11887 !end_of_page &&
11888 num_insns < max_insns);
11890 if (tb->cflags & CF_LAST_IO) {
11891 if (dc->condjmp) {
11892 /* FIXME: This can theoretically happen with self-modifying
11893 code. */
11894 cpu_abort(cs, "IO on conditional branch instruction");
11896 gen_io_end();
11899 /* At this stage dc->condjmp will only be set when the skipped
11900 instruction was a conditional branch or trap, and the PC has
11901 already been written. */
11902 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
11903 /* Unconditional and "condition passed" instruction codepath. */
11904 gen_set_condexec(dc);
11905 switch (dc->is_jmp) {
11906 case DISAS_SWI:
11907 gen_ss_advance(dc);
11908 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11909 default_exception_el(dc));
11910 break;
11911 case DISAS_HVC:
11912 gen_ss_advance(dc);
11913 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11914 break;
11915 case DISAS_SMC:
11916 gen_ss_advance(dc);
11917 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11918 break;
11919 case DISAS_NEXT:
11920 case DISAS_UPDATE:
11921 gen_set_pc_im(dc, dc->pc);
11922 /* fall through */
11923 default:
11924 if (dc->ss_active) {
11925 gen_step_complete_exception(dc);
11926 } else {
11927 /* FIXME: Single stepping a WFI insn will not halt
11928 the CPU. */
11929 gen_exception_internal(EXCP_DEBUG);
11932 if (dc->condjmp) {
11933 /* "Condition failed" instruction codepath. */
11934 gen_set_label(dc->condlabel);
11935 gen_set_condexec(dc);
11936 gen_set_pc_im(dc, dc->pc);
11937 if (dc->ss_active) {
11938 gen_step_complete_exception(dc);
11939 } else {
11940 gen_exception_internal(EXCP_DEBUG);
11943 } else {
11944 /* While branches must always occur at the end of an IT block,
11945 there are a few other things that can cause us to terminate
11946 the TB in the middle of an IT block:
11947 - Exception generating instructions (bkpt, swi, undefined).
11948 - Page boundaries.
11949 - Hardware watchpoints.
11950 Hardware breakpoints have already been handled and skip this code.
11952 gen_set_condexec(dc);
11953 switch(dc->is_jmp) {
11954 case DISAS_NEXT:
11955 gen_goto_tb(dc, 1, dc->pc);
11956 break;
11957 case DISAS_UPDATE:
11958 gen_set_pc_im(dc, dc->pc);
11959 /* fall through */
11960 case DISAS_JUMP:
11961 default:
11962 /* indicate that the hash table must be used to find the next TB */
11963 tcg_gen_exit_tb(0);
11964 break;
11965 case DISAS_TB_JUMP:
11966 /* nothing more to generate */
11967 break;
11968 case DISAS_WFI:
11969 gen_helper_wfi(cpu_env);
11970 /* The helper doesn't necessarily throw an exception, but we
11971 * must go back to the main loop to check for interrupts anyway.
11973 tcg_gen_exit_tb(0);
11974 break;
11975 case DISAS_WFE:
11976 gen_helper_wfe(cpu_env);
11977 break;
11978 case DISAS_YIELD:
11979 gen_helper_yield(cpu_env);
11980 break;
11981 case DISAS_SWI:
11982 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11983 default_exception_el(dc));
11984 break;
11985 case DISAS_HVC:
11986 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11987 break;
11988 case DISAS_SMC:
11989 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11990 break;
11992 if (dc->condjmp) {
11993 gen_set_label(dc->condlabel);
11994 gen_set_condexec(dc);
11995 gen_goto_tb(dc, 1, dc->pc);
11996 dc->condjmp = 0;
12000 done_generating:
12001 gen_tb_end(tb, num_insns);
12003 #ifdef DEBUG_DISAS
12004 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
12005 qemu_log_in_addr_range(pc_start)) {
12006 qemu_log("----------------\n");
12007 qemu_log("IN: %s\n", lookup_symbol(pc_start));
12008 log_target_disas(cs, pc_start, dc->pc - pc_start,
12009 dc->thumb | (dc->sctlr_b << 1));
12010 qemu_log("\n");
12012 #endif
12013 tb->size = dc->pc - pc_start;
12014 tb->icount = num_insns;
12017 static const char *cpu_mode_names[16] = {
12018 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12019 "???", "???", "hyp", "und", "???", "???", "???", "sys"
12022 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12023 int flags)
12025 ARMCPU *cpu = ARM_CPU(cs);
12026 CPUARMState *env = &cpu->env;
12027 int i;
12028 uint32_t psr;
12029 const char *ns_status;
12031 if (is_a64(env)) {
12032 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12033 return;
12036 for(i=0;i<16;i++) {
12037 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
12038 if ((i % 4) == 3)
12039 cpu_fprintf(f, "\n");
12040 else
12041 cpu_fprintf(f, " ");
12043 psr = cpsr_read(env);
12045 if (arm_feature(env, ARM_FEATURE_EL3) &&
12046 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12047 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12048 } else {
12049 ns_status = "";
12052 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12053 psr,
12054 psr & (1 << 31) ? 'N' : '-',
12055 psr & (1 << 30) ? 'Z' : '-',
12056 psr & (1 << 29) ? 'C' : '-',
12057 psr & (1 << 28) ? 'V' : '-',
12058 psr & CPSR_T ? 'T' : 'A',
12059 ns_status,
12060 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12062 if (flags & CPU_DUMP_FPU) {
12063 int numvfpregs = 0;
12064 if (arm_feature(env, ARM_FEATURE_VFP)) {
12065 numvfpregs += 16;
12067 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12068 numvfpregs += 16;
12070 for (i = 0; i < numvfpregs; i++) {
12071 uint64_t v = float64_val(env->vfp.regs[i]);
12072 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12073 i * 2, (uint32_t)v,
12074 i * 2 + 1, (uint32_t)(v >> 32),
12075 i, v);
12077 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
12081 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12082 target_ulong *data)
12084 if (is_a64(env)) {
12085 env->pc = data[0];
12086 env->condexec_bits = 0;
12087 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
12088 } else {
12089 env->regs[15] = data[0];
12090 env->condexec_bits = data[1];
12091 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;