io: improve docs for QIOChannelSocket async functions
[qemu/ar7.git] / target-arm / translate.c
blobcf3dc337748431d624a065e570bf62296d5a876d
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "tcg-op.h"
27 #include "qemu/log.h"
28 #include "qemu/bitops.h"
29 #include "arm_ldst.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
34 #include "trace-tcg.h"
35 #include "exec/log.h"
38 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
39 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
40 /* currently all emulated v5 cores are also v5TE, so don't bother */
41 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
42 #define ENABLE_ARCH_5J 0
43 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
44 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
45 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
46 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
47 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
49 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
51 #include "translate.h"
53 #if defined(CONFIG_USER_ONLY)
54 #define IS_USER(s) 1
55 #else
56 #define IS_USER(s) (s->user)
57 #endif
59 TCGv_ptr cpu_env;
60 /* We reuse the same 64-bit temporaries for efficiency. */
61 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
62 static TCGv_i32 cpu_R[16];
63 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
64 TCGv_i64 cpu_exclusive_addr;
65 TCGv_i64 cpu_exclusive_val;
66 #ifdef CONFIG_USER_ONLY
67 TCGv_i64 cpu_exclusive_test;
68 TCGv_i32 cpu_exclusive_info;
69 #endif
71 /* FIXME: These should be removed. */
72 static TCGv_i32 cpu_F0s, cpu_F1s;
73 static TCGv_i64 cpu_F0d, cpu_F1d;
75 #include "exec/gen-icount.h"
77 static const char *regnames[] =
78 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
79 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
81 /* initialize TCG globals. */
82 void arm_translate_init(void)
84 int i;
86 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
88 for (i = 0; i < 16; i++) {
89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
90 offsetof(CPUARMState, regs[i]),
91 regnames[i]);
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
102 #ifdef CONFIG_USER_ONLY
103 cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
104 offsetof(CPUARMState, exclusive_test), "exclusive_test");
105 cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
106 offsetof(CPUARMState, exclusive_info), "exclusive_info");
107 #endif
109 a64_translate_init();
112 static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
114 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
115 * insns:
116 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
117 * otherwise, access as if at PL0.
119 switch (s->mmu_idx) {
120 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
121 case ARMMMUIdx_S12NSE0:
122 case ARMMMUIdx_S12NSE1:
123 return ARMMMUIdx_S12NSE0;
124 case ARMMMUIdx_S1E3:
125 case ARMMMUIdx_S1SE0:
126 case ARMMMUIdx_S1SE1:
127 return ARMMMUIdx_S1SE0;
128 case ARMMMUIdx_S2NS:
129 default:
130 g_assert_not_reached();
134 static inline TCGv_i32 load_cpu_offset(int offset)
136 TCGv_i32 tmp = tcg_temp_new_i32();
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
141 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
143 static inline void store_cpu_offset(TCGv_i32 var, int offset)
145 tcg_gen_st_i32(var, cpu_env, offset);
146 tcg_temp_free_i32(var);
149 #define store_cpu_field(var, name) \
150 store_cpu_offset(var, offsetof(CPUARMState, name))
152 /* Set a variable to the value of a CPU register. */
153 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
155 if (reg == 15) {
156 uint32_t addr;
157 /* normally, since we updated PC, we need only to add one insn */
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
164 tcg_gen_mov_i32(var, cpu_R[reg]);
168 /* Create a new temporary and set it to the value of a CPU register. */
169 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
171 TCGv_i32 tmp = tcg_temp_new_i32();
172 load_reg_var(s, tmp, reg);
173 return tmp;
176 /* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
178 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
184 tcg_gen_mov_i32(cpu_R[reg], var);
185 tcg_temp_free_i32(var);
188 /* Value extensions. */
189 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
191 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
194 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
198 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
200 TCGv_i32 tmp_mask = tcg_const_i32(mask);
201 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
202 tcg_temp_free_i32(tmp_mask);
204 /* Set NZCV flags from the high 4 bits of var. */
205 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
207 static void gen_exception_internal(int excp)
209 TCGv_i32 tcg_excp = tcg_const_i32(excp);
211 assert(excp_is_internal(excp));
212 gen_helper_exception_internal(cpu_env, tcg_excp);
213 tcg_temp_free_i32(tcg_excp);
216 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
218 TCGv_i32 tcg_excp = tcg_const_i32(excp);
219 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
220 TCGv_i32 tcg_el = tcg_const_i32(target_el);
222 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
223 tcg_syn, tcg_el);
225 tcg_temp_free_i32(tcg_el);
226 tcg_temp_free_i32(tcg_syn);
227 tcg_temp_free_i32(tcg_excp);
230 static void gen_ss_advance(DisasContext *s)
232 /* If the singlestep state is Active-not-pending, advance to
233 * Active-pending.
235 if (s->ss_active) {
236 s->pstate_ss = 0;
237 gen_helper_clear_pstate_ss(cpu_env);
241 static void gen_step_complete_exception(DisasContext *s)
243 /* We just completed step of an insn. Move from Active-not-pending
244 * to Active-pending, and then also take the swstep exception.
245 * This corresponds to making the (IMPDEF) choice to prioritize
246 * swstep exceptions over asynchronous exceptions taken to an exception
247 * level where debug is disabled. This choice has the advantage that
248 * we do not need to maintain internal state corresponding to the
249 * ISV/EX syndrome bits between completion of the step and generation
250 * of the exception, and our syndrome information is always correct.
252 gen_ss_advance(s);
253 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
254 default_exception_el(s));
255 s->is_jmp = DISAS_EXC;
258 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
260 TCGv_i32 tmp1 = tcg_temp_new_i32();
261 TCGv_i32 tmp2 = tcg_temp_new_i32();
262 tcg_gen_ext16s_i32(tmp1, a);
263 tcg_gen_ext16s_i32(tmp2, b);
264 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
265 tcg_temp_free_i32(tmp2);
266 tcg_gen_sari_i32(a, a, 16);
267 tcg_gen_sari_i32(b, b, 16);
268 tcg_gen_mul_i32(b, b, a);
269 tcg_gen_mov_i32(a, tmp1);
270 tcg_temp_free_i32(tmp1);
273 /* Byteswap each halfword. */
274 static void gen_rev16(TCGv_i32 var)
276 TCGv_i32 tmp = tcg_temp_new_i32();
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_andi_i32(var, var, 0xff00ff00);
281 tcg_gen_or_i32(var, var, tmp);
282 tcg_temp_free_i32(tmp);
285 /* Byteswap low halfword and sign extend. */
286 static void gen_revsh(TCGv_i32 var)
288 tcg_gen_ext16u_i32(var, var);
289 tcg_gen_bswap16_i32(var, var);
290 tcg_gen_ext16s_i32(var, var);
293 /* Unsigned bitfield extract. */
294 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
296 if (shift)
297 tcg_gen_shri_i32(var, var, shift);
298 tcg_gen_andi_i32(var, var, mask);
301 /* Signed bitfield extract. */
302 static void gen_sbfx(TCGv_i32 var, int shift, int width)
304 uint32_t signbit;
306 if (shift)
307 tcg_gen_sari_i32(var, var, shift);
308 if (shift + width < 32) {
309 signbit = 1u << (width - 1);
310 tcg_gen_andi_i32(var, var, (1u << width) - 1);
311 tcg_gen_xori_i32(var, var, signbit);
312 tcg_gen_subi_i32(var, var, signbit);
316 /* Return (b << 32) + a. Mark inputs as dead */
317 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
319 TCGv_i64 tmp64 = tcg_temp_new_i64();
321 tcg_gen_extu_i32_i64(tmp64, b);
322 tcg_temp_free_i32(b);
323 tcg_gen_shli_i64(tmp64, tmp64, 32);
324 tcg_gen_add_i64(a, tmp64, a);
326 tcg_temp_free_i64(tmp64);
327 return a;
330 /* Return (b << 32) - a. Mark inputs as dead. */
331 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
333 TCGv_i64 tmp64 = tcg_temp_new_i64();
335 tcg_gen_extu_i32_i64(tmp64, b);
336 tcg_temp_free_i32(b);
337 tcg_gen_shli_i64(tmp64, tmp64, 32);
338 tcg_gen_sub_i64(a, tmp64, a);
340 tcg_temp_free_i64(tmp64);
341 return a;
344 /* 32x32->64 multiply. Marks inputs as dead. */
345 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
347 TCGv_i32 lo = tcg_temp_new_i32();
348 TCGv_i32 hi = tcg_temp_new_i32();
349 TCGv_i64 ret;
351 tcg_gen_mulu2_i32(lo, hi, a, b);
352 tcg_temp_free_i32(a);
353 tcg_temp_free_i32(b);
355 ret = tcg_temp_new_i64();
356 tcg_gen_concat_i32_i64(ret, lo, hi);
357 tcg_temp_free_i32(lo);
358 tcg_temp_free_i32(hi);
360 return ret;
363 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
365 TCGv_i32 lo = tcg_temp_new_i32();
366 TCGv_i32 hi = tcg_temp_new_i32();
367 TCGv_i64 ret;
369 tcg_gen_muls2_i32(lo, hi, a, b);
370 tcg_temp_free_i32(a);
371 tcg_temp_free_i32(b);
373 ret = tcg_temp_new_i64();
374 tcg_gen_concat_i32_i64(ret, lo, hi);
375 tcg_temp_free_i32(lo);
376 tcg_temp_free_i32(hi);
378 return ret;
381 /* Swap low and high halfwords. */
382 static void gen_swap_half(TCGv_i32 var)
384 TCGv_i32 tmp = tcg_temp_new_i32();
385 tcg_gen_shri_i32(tmp, var, 16);
386 tcg_gen_shli_i32(var, var, 16);
387 tcg_gen_or_i32(var, var, tmp);
388 tcg_temp_free_i32(tmp);
391 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
392 tmp = (t0 ^ t1) & 0x8000;
393 t0 &= ~0x8000;
394 t1 &= ~0x8000;
395 t0 = (t0 + t1) ^ tmp;
398 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
400 TCGv_i32 tmp = tcg_temp_new_i32();
401 tcg_gen_xor_i32(tmp, t0, t1);
402 tcg_gen_andi_i32(tmp, tmp, 0x8000);
403 tcg_gen_andi_i32(t0, t0, ~0x8000);
404 tcg_gen_andi_i32(t1, t1, ~0x8000);
405 tcg_gen_add_i32(t0, t0, t1);
406 tcg_gen_xor_i32(t0, t0, tmp);
407 tcg_temp_free_i32(tmp);
408 tcg_temp_free_i32(t1);
411 /* Set CF to the top bit of var. */
412 static void gen_set_CF_bit31(TCGv_i32 var)
414 tcg_gen_shri_i32(cpu_CF, var, 31);
417 /* Set N and Z flags from var. */
418 static inline void gen_logic_CC(TCGv_i32 var)
420 tcg_gen_mov_i32(cpu_NF, var);
421 tcg_gen_mov_i32(cpu_ZF, var);
424 /* T0 += T1 + CF. */
425 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
427 tcg_gen_add_i32(t0, t0, t1);
428 tcg_gen_add_i32(t0, t0, cpu_CF);
431 /* dest = T0 + T1 + CF. */
432 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
434 tcg_gen_add_i32(dest, t0, t1);
435 tcg_gen_add_i32(dest, dest, cpu_CF);
438 /* dest = T0 - T1 + CF - 1. */
439 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
441 tcg_gen_sub_i32(dest, t0, t1);
442 tcg_gen_add_i32(dest, dest, cpu_CF);
443 tcg_gen_subi_i32(dest, dest, 1);
446 /* dest = T0 + T1. Compute C, N, V and Z flags */
447 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
449 TCGv_i32 tmp = tcg_temp_new_i32();
450 tcg_gen_movi_i32(tmp, 0);
451 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
452 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
453 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
454 tcg_gen_xor_i32(tmp, t0, t1);
455 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
456 tcg_temp_free_i32(tmp);
457 tcg_gen_mov_i32(dest, cpu_NF);
460 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
461 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
463 TCGv_i32 tmp = tcg_temp_new_i32();
464 if (TCG_TARGET_HAS_add2_i32) {
465 tcg_gen_movi_i32(tmp, 0);
466 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
467 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
468 } else {
469 TCGv_i64 q0 = tcg_temp_new_i64();
470 TCGv_i64 q1 = tcg_temp_new_i64();
471 tcg_gen_extu_i32_i64(q0, t0);
472 tcg_gen_extu_i32_i64(q1, t1);
473 tcg_gen_add_i64(q0, q0, q1);
474 tcg_gen_extu_i32_i64(q1, cpu_CF);
475 tcg_gen_add_i64(q0, q0, q1);
476 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
477 tcg_temp_free_i64(q0);
478 tcg_temp_free_i64(q1);
480 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
481 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
482 tcg_gen_xor_i32(tmp, t0, t1);
483 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
484 tcg_temp_free_i32(tmp);
485 tcg_gen_mov_i32(dest, cpu_NF);
488 /* dest = T0 - T1. Compute C, N, V and Z flags */
489 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
491 TCGv_i32 tmp;
492 tcg_gen_sub_i32(cpu_NF, t0, t1);
493 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
494 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
495 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
496 tmp = tcg_temp_new_i32();
497 tcg_gen_xor_i32(tmp, t0, t1);
498 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
499 tcg_temp_free_i32(tmp);
500 tcg_gen_mov_i32(dest, cpu_NF);
503 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
504 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
506 TCGv_i32 tmp = tcg_temp_new_i32();
507 tcg_gen_not_i32(tmp, t1);
508 gen_adc_CC(dest, t0, tmp);
509 tcg_temp_free_i32(tmp);
512 #define GEN_SHIFT(name) \
513 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
515 TCGv_i32 tmp1, tmp2, tmp3; \
516 tmp1 = tcg_temp_new_i32(); \
517 tcg_gen_andi_i32(tmp1, t1, 0xff); \
518 tmp2 = tcg_const_i32(0); \
519 tmp3 = tcg_const_i32(0x1f); \
520 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
521 tcg_temp_free_i32(tmp3); \
522 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
523 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
524 tcg_temp_free_i32(tmp2); \
525 tcg_temp_free_i32(tmp1); \
527 GEN_SHIFT(shl)
528 GEN_SHIFT(shr)
529 #undef GEN_SHIFT
531 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
533 TCGv_i32 tmp1, tmp2;
534 tmp1 = tcg_temp_new_i32();
535 tcg_gen_andi_i32(tmp1, t1, 0xff);
536 tmp2 = tcg_const_i32(0x1f);
537 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
538 tcg_temp_free_i32(tmp2);
539 tcg_gen_sar_i32(dest, t0, tmp1);
540 tcg_temp_free_i32(tmp1);
543 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
545 TCGv_i32 c0 = tcg_const_i32(0);
546 TCGv_i32 tmp = tcg_temp_new_i32();
547 tcg_gen_neg_i32(tmp, src);
548 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
549 tcg_temp_free_i32(c0);
550 tcg_temp_free_i32(tmp);
553 static void shifter_out_im(TCGv_i32 var, int shift)
555 if (shift == 0) {
556 tcg_gen_andi_i32(cpu_CF, var, 1);
557 } else {
558 tcg_gen_shri_i32(cpu_CF, var, shift);
559 if (shift != 31) {
560 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
565 /* Shift by immediate. Includes special handling for shift == 0. */
566 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
567 int shift, int flags)
569 switch (shiftop) {
570 case 0: /* LSL */
571 if (shift != 0) {
572 if (flags)
573 shifter_out_im(var, 32 - shift);
574 tcg_gen_shli_i32(var, var, shift);
576 break;
577 case 1: /* LSR */
578 if (shift == 0) {
579 if (flags) {
580 tcg_gen_shri_i32(cpu_CF, var, 31);
582 tcg_gen_movi_i32(var, 0);
583 } else {
584 if (flags)
585 shifter_out_im(var, shift - 1);
586 tcg_gen_shri_i32(var, var, shift);
588 break;
589 case 2: /* ASR */
590 if (shift == 0)
591 shift = 32;
592 if (flags)
593 shifter_out_im(var, shift - 1);
594 if (shift == 32)
595 shift = 31;
596 tcg_gen_sari_i32(var, var, shift);
597 break;
598 case 3: /* ROR/RRX */
599 if (shift != 0) {
600 if (flags)
601 shifter_out_im(var, shift - 1);
602 tcg_gen_rotri_i32(var, var, shift); break;
603 } else {
604 TCGv_i32 tmp = tcg_temp_new_i32();
605 tcg_gen_shli_i32(tmp, cpu_CF, 31);
606 if (flags)
607 shifter_out_im(var, 0);
608 tcg_gen_shri_i32(var, var, 1);
609 tcg_gen_or_i32(var, var, tmp);
610 tcg_temp_free_i32(tmp);
615 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
616 TCGv_i32 shift, int flags)
618 if (flags) {
619 switch (shiftop) {
620 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
621 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
622 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
623 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
625 } else {
626 switch (shiftop) {
627 case 0:
628 gen_shl(var, var, shift);
629 break;
630 case 1:
631 gen_shr(var, var, shift);
632 break;
633 case 2:
634 gen_sar(var, var, shift);
635 break;
636 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
637 tcg_gen_rotr_i32(var, var, shift); break;
640 tcg_temp_free_i32(shift);
643 #define PAS_OP(pfx) \
644 switch (op2) { \
645 case 0: gen_pas_helper(glue(pfx,add16)); break; \
646 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
647 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
648 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
649 case 4: gen_pas_helper(glue(pfx,add8)); break; \
650 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
652 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
654 TCGv_ptr tmp;
656 switch (op1) {
657 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
658 case 1:
659 tmp = tcg_temp_new_ptr();
660 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
661 PAS_OP(s)
662 tcg_temp_free_ptr(tmp);
663 break;
664 case 5:
665 tmp = tcg_temp_new_ptr();
666 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
667 PAS_OP(u)
668 tcg_temp_free_ptr(tmp);
669 break;
670 #undef gen_pas_helper
671 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
672 case 2:
673 PAS_OP(q);
674 break;
675 case 3:
676 PAS_OP(sh);
677 break;
678 case 6:
679 PAS_OP(uq);
680 break;
681 case 7:
682 PAS_OP(uh);
683 break;
684 #undef gen_pas_helper
687 #undef PAS_OP
689 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
690 #define PAS_OP(pfx) \
691 switch (op1) { \
692 case 0: gen_pas_helper(glue(pfx,add8)); break; \
693 case 1: gen_pas_helper(glue(pfx,add16)); break; \
694 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
695 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
696 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
697 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
699 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
701 TCGv_ptr tmp;
703 switch (op2) {
704 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
705 case 0:
706 tmp = tcg_temp_new_ptr();
707 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
708 PAS_OP(s)
709 tcg_temp_free_ptr(tmp);
710 break;
711 case 4:
712 tmp = tcg_temp_new_ptr();
713 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
714 PAS_OP(u)
715 tcg_temp_free_ptr(tmp);
716 break;
717 #undef gen_pas_helper
718 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
719 case 1:
720 PAS_OP(q);
721 break;
722 case 2:
723 PAS_OP(sh);
724 break;
725 case 5:
726 PAS_OP(uq);
727 break;
728 case 6:
729 PAS_OP(uh);
730 break;
731 #undef gen_pas_helper
734 #undef PAS_OP
737 * Generate a conditional based on ARM condition code cc.
738 * This is common between ARM and Aarch64 targets.
740 void arm_test_cc(DisasCompare *cmp, int cc)
742 TCGv_i32 value;
743 TCGCond cond;
744 bool global = true;
746 switch (cc) {
747 case 0: /* eq: Z */
748 case 1: /* ne: !Z */
749 cond = TCG_COND_EQ;
750 value = cpu_ZF;
751 break;
753 case 2: /* cs: C */
754 case 3: /* cc: !C */
755 cond = TCG_COND_NE;
756 value = cpu_CF;
757 break;
759 case 4: /* mi: N */
760 case 5: /* pl: !N */
761 cond = TCG_COND_LT;
762 value = cpu_NF;
763 break;
765 case 6: /* vs: V */
766 case 7: /* vc: !V */
767 cond = TCG_COND_LT;
768 value = cpu_VF;
769 break;
771 case 8: /* hi: C && !Z */
772 case 9: /* ls: !C || Z -> !(C && !Z) */
773 cond = TCG_COND_NE;
774 value = tcg_temp_new_i32();
775 global = false;
776 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
777 ZF is non-zero for !Z; so AND the two subexpressions. */
778 tcg_gen_neg_i32(value, cpu_CF);
779 tcg_gen_and_i32(value, value, cpu_ZF);
780 break;
782 case 10: /* ge: N == V -> N ^ V == 0 */
783 case 11: /* lt: N != V -> N ^ V != 0 */
784 /* Since we're only interested in the sign bit, == 0 is >= 0. */
785 cond = TCG_COND_GE;
786 value = tcg_temp_new_i32();
787 global = false;
788 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
789 break;
791 case 12: /* gt: !Z && N == V */
792 case 13: /* le: Z || N != V */
793 cond = TCG_COND_NE;
794 value = tcg_temp_new_i32();
795 global = false;
796 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
797 * the sign bit then AND with ZF to yield the result. */
798 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
799 tcg_gen_sari_i32(value, value, 31);
800 tcg_gen_andc_i32(value, cpu_ZF, value);
801 break;
803 case 14: /* always */
804 case 15: /* always */
805 /* Use the ALWAYS condition, which will fold early.
806 * It doesn't matter what we use for the value. */
807 cond = TCG_COND_ALWAYS;
808 value = cpu_ZF;
809 goto no_invert;
811 default:
812 fprintf(stderr, "Bad condition code 0x%x\n", cc);
813 abort();
816 if (cc & 1) {
817 cond = tcg_invert_cond(cond);
820 no_invert:
821 cmp->cond = cond;
822 cmp->value = value;
823 cmp->value_global = global;
826 void arm_free_cc(DisasCompare *cmp)
828 if (!cmp->value_global) {
829 tcg_temp_free_i32(cmp->value);
833 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
835 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
838 void arm_gen_test_cc(int cc, TCGLabel *label)
840 DisasCompare cmp;
841 arm_test_cc(&cmp, cc);
842 arm_jump_cc(&cmp, label);
843 arm_free_cc(&cmp);
846 static const uint8_t table_logic_cc[16] = {
847 1, /* and */
848 1, /* xor */
849 0, /* sub */
850 0, /* rsb */
851 0, /* add */
852 0, /* adc */
853 0, /* sbc */
854 0, /* rsc */
855 1, /* andl */
856 1, /* xorl */
857 0, /* cmp */
858 0, /* cmn */
859 1, /* orr */
860 1, /* mov */
861 1, /* bic */
862 1, /* mvn */
865 /* Set PC and Thumb state from an immediate address. */
866 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
868 TCGv_i32 tmp;
870 s->is_jmp = DISAS_JUMP;
871 if (s->thumb != (addr & 1)) {
872 tmp = tcg_temp_new_i32();
873 tcg_gen_movi_i32(tmp, addr & 1);
874 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
875 tcg_temp_free_i32(tmp);
877 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
880 /* Set PC and Thumb state from var. var is marked as dead. */
881 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
883 s->is_jmp = DISAS_JUMP;
884 tcg_gen_andi_i32(cpu_R[15], var, ~1);
885 tcg_gen_andi_i32(var, var, 1);
886 store_cpu_field(var, thumb);
889 /* Variant of store_reg which uses branch&exchange logic when storing
890 to r15 in ARM architecture v7 and above. The source must be a temporary
891 and will be marked as dead. */
892 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
894 if (reg == 15 && ENABLE_ARCH_7) {
895 gen_bx(s, var);
896 } else {
897 store_reg(s, reg, var);
901 /* Variant of store_reg which uses branch&exchange logic when storing
902 * to r15 in ARM architecture v5T and above. This is used for storing
903 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
904 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
905 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
907 if (reg == 15 && ENABLE_ARCH_5) {
908 gen_bx(s, var);
909 } else {
910 store_reg(s, reg, var);
914 /* Abstractions of "generate code to do a guest load/store for
915 * AArch32", where a vaddr is always 32 bits (and is zero
916 * extended if we're a 64 bit core) and data is also
917 * 32 bits unless specifically doing a 64 bit access.
918 * These functions work like tcg_gen_qemu_{ld,st}* except
919 * that the address argument is TCGv_i32 rather than TCGv.
921 #if TARGET_LONG_BITS == 32
923 #define DO_GEN_LD(SUFF, OPC) \
924 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
926 tcg_gen_qemu_ld_i32(val, addr, index, (OPC)); \
929 #define DO_GEN_ST(SUFF, OPC) \
930 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
932 tcg_gen_qemu_st_i32(val, addr, index, (OPC)); \
935 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
937 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
940 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
942 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
945 #else
947 #define DO_GEN_LD(SUFF, OPC) \
948 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
950 TCGv addr64 = tcg_temp_new(); \
951 tcg_gen_extu_i32_i64(addr64, addr); \
952 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
953 tcg_temp_free(addr64); \
956 #define DO_GEN_ST(SUFF, OPC) \
957 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
959 TCGv addr64 = tcg_temp_new(); \
960 tcg_gen_extu_i32_i64(addr64, addr); \
961 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
962 tcg_temp_free(addr64); \
965 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
967 TCGv addr64 = tcg_temp_new();
968 tcg_gen_extu_i32_i64(addr64, addr);
969 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
970 tcg_temp_free(addr64);
973 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
975 TCGv addr64 = tcg_temp_new();
976 tcg_gen_extu_i32_i64(addr64, addr);
977 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
978 tcg_temp_free(addr64);
981 #endif
983 DO_GEN_LD(8s, MO_SB)
984 DO_GEN_LD(8u, MO_UB)
985 DO_GEN_LD(16s, MO_TESW)
986 DO_GEN_LD(16u, MO_TEUW)
987 DO_GEN_LD(32u, MO_TEUL)
988 /* 'a' variants include an alignment check */
989 DO_GEN_LD(16ua, MO_TEUW | MO_ALIGN)
990 DO_GEN_LD(32ua, MO_TEUL | MO_ALIGN)
991 DO_GEN_ST(8, MO_UB)
992 DO_GEN_ST(16, MO_TEUW)
993 DO_GEN_ST(32, MO_TEUL)
995 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
997 tcg_gen_movi_i32(cpu_R[15], val);
1000 static inline void gen_hvc(DisasContext *s, int imm16)
1002 /* The pre HVC helper handles cases when HVC gets trapped
1003 * as an undefined insn by runtime configuration (ie before
1004 * the insn really executes).
1006 gen_set_pc_im(s, s->pc - 4);
1007 gen_helper_pre_hvc(cpu_env);
1008 /* Otherwise we will treat this as a real exception which
1009 * happens after execution of the insn. (The distinction matters
1010 * for the PC value reported to the exception handler and also
1011 * for single stepping.)
1013 s->svc_imm = imm16;
1014 gen_set_pc_im(s, s->pc);
1015 s->is_jmp = DISAS_HVC;
1018 static inline void gen_smc(DisasContext *s)
1020 /* As with HVC, we may take an exception either before or after
1021 * the insn executes.
1023 TCGv_i32 tmp;
1025 gen_set_pc_im(s, s->pc - 4);
1026 tmp = tcg_const_i32(syn_aa32_smc());
1027 gen_helper_pre_smc(cpu_env, tmp);
1028 tcg_temp_free_i32(tmp);
1029 gen_set_pc_im(s, s->pc);
1030 s->is_jmp = DISAS_SMC;
1033 static inline void
1034 gen_set_condexec (DisasContext *s)
1036 if (s->condexec_mask) {
1037 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1038 TCGv_i32 tmp = tcg_temp_new_i32();
1039 tcg_gen_movi_i32(tmp, val);
1040 store_cpu_field(tmp, condexec_bits);
1044 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1046 gen_set_condexec(s);
1047 gen_set_pc_im(s, s->pc - offset);
1048 gen_exception_internal(excp);
1049 s->is_jmp = DISAS_JUMP;
1052 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1053 int syn, uint32_t target_el)
1055 gen_set_condexec(s);
1056 gen_set_pc_im(s, s->pc - offset);
1057 gen_exception(excp, syn, target_el);
1058 s->is_jmp = DISAS_JUMP;
1061 /* Force a TB lookup after an instruction that changes the CPU state. */
1062 static inline void gen_lookup_tb(DisasContext *s)
1064 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1065 s->is_jmp = DISAS_JUMP;
1068 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1069 TCGv_i32 var)
1071 int val, rm, shift, shiftop;
1072 TCGv_i32 offset;
1074 if (!(insn & (1 << 25))) {
1075 /* immediate */
1076 val = insn & 0xfff;
1077 if (!(insn & (1 << 23)))
1078 val = -val;
1079 if (val != 0)
1080 tcg_gen_addi_i32(var, var, val);
1081 } else {
1082 /* shift/register */
1083 rm = (insn) & 0xf;
1084 shift = (insn >> 7) & 0x1f;
1085 shiftop = (insn >> 5) & 3;
1086 offset = load_reg(s, rm);
1087 gen_arm_shift_im(offset, shiftop, shift, 0);
1088 if (!(insn & (1 << 23)))
1089 tcg_gen_sub_i32(var, var, offset);
1090 else
1091 tcg_gen_add_i32(var, var, offset);
1092 tcg_temp_free_i32(offset);
1096 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1097 int extra, TCGv_i32 var)
1099 int val, rm;
1100 TCGv_i32 offset;
1102 if (insn & (1 << 22)) {
1103 /* immediate */
1104 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1105 if (!(insn & (1 << 23)))
1106 val = -val;
1107 val += extra;
1108 if (val != 0)
1109 tcg_gen_addi_i32(var, var, val);
1110 } else {
1111 /* register */
1112 if (extra)
1113 tcg_gen_addi_i32(var, var, extra);
1114 rm = (insn) & 0xf;
1115 offset = load_reg(s, rm);
1116 if (!(insn & (1 << 23)))
1117 tcg_gen_sub_i32(var, var, offset);
1118 else
1119 tcg_gen_add_i32(var, var, offset);
1120 tcg_temp_free_i32(offset);
1124 static TCGv_ptr get_fpstatus_ptr(int neon)
1126 TCGv_ptr statusptr = tcg_temp_new_ptr();
1127 int offset;
1128 if (neon) {
1129 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1130 } else {
1131 offset = offsetof(CPUARMState, vfp.fp_status);
1133 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1134 return statusptr;
1137 #define VFP_OP2(name) \
1138 static inline void gen_vfp_##name(int dp) \
1140 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1141 if (dp) { \
1142 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1143 } else { \
1144 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1146 tcg_temp_free_ptr(fpst); \
1149 VFP_OP2(add)
1150 VFP_OP2(sub)
1151 VFP_OP2(mul)
1152 VFP_OP2(div)
1154 #undef VFP_OP2
1156 static inline void gen_vfp_F1_mul(int dp)
1158 /* Like gen_vfp_mul() but put result in F1 */
1159 TCGv_ptr fpst = get_fpstatus_ptr(0);
1160 if (dp) {
1161 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1162 } else {
1163 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1165 tcg_temp_free_ptr(fpst);
1168 static inline void gen_vfp_F1_neg(int dp)
1170 /* Like gen_vfp_neg() but put result in F1 */
1171 if (dp) {
1172 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1173 } else {
1174 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1178 static inline void gen_vfp_abs(int dp)
1180 if (dp)
1181 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1182 else
1183 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1186 static inline void gen_vfp_neg(int dp)
1188 if (dp)
1189 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1190 else
1191 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1194 static inline void gen_vfp_sqrt(int dp)
1196 if (dp)
1197 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1198 else
1199 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1202 static inline void gen_vfp_cmp(int dp)
1204 if (dp)
1205 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1206 else
1207 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1210 static inline void gen_vfp_cmpe(int dp)
1212 if (dp)
1213 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1214 else
1215 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1218 static inline void gen_vfp_F1_ld0(int dp)
1220 if (dp)
1221 tcg_gen_movi_i64(cpu_F1d, 0);
1222 else
1223 tcg_gen_movi_i32(cpu_F1s, 0);
1226 #define VFP_GEN_ITOF(name) \
1227 static inline void gen_vfp_##name(int dp, int neon) \
1229 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1230 if (dp) { \
1231 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1232 } else { \
1233 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1235 tcg_temp_free_ptr(statusptr); \
1238 VFP_GEN_ITOF(uito)
1239 VFP_GEN_ITOF(sito)
1240 #undef VFP_GEN_ITOF
1242 #define VFP_GEN_FTOI(name) \
1243 static inline void gen_vfp_##name(int dp, int neon) \
1245 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1246 if (dp) { \
1247 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1248 } else { \
1249 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1251 tcg_temp_free_ptr(statusptr); \
1254 VFP_GEN_FTOI(toui)
1255 VFP_GEN_FTOI(touiz)
1256 VFP_GEN_FTOI(tosi)
1257 VFP_GEN_FTOI(tosiz)
1258 #undef VFP_GEN_FTOI
1260 #define VFP_GEN_FIX(name, round) \
1261 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1263 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1264 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1265 if (dp) { \
1266 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1267 statusptr); \
1268 } else { \
1269 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1270 statusptr); \
1272 tcg_temp_free_i32(tmp_shift); \
1273 tcg_temp_free_ptr(statusptr); \
1275 VFP_GEN_FIX(tosh, _round_to_zero)
1276 VFP_GEN_FIX(tosl, _round_to_zero)
1277 VFP_GEN_FIX(touh, _round_to_zero)
1278 VFP_GEN_FIX(toul, _round_to_zero)
1279 VFP_GEN_FIX(shto, )
1280 VFP_GEN_FIX(slto, )
1281 VFP_GEN_FIX(uhto, )
1282 VFP_GEN_FIX(ulto, )
1283 #undef VFP_GEN_FIX
1285 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1287 if (dp) {
1288 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
1289 } else {
1290 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
1294 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1296 if (dp) {
1297 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
1298 } else {
1299 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
1303 static inline long
1304 vfp_reg_offset (int dp, int reg)
1306 if (dp)
1307 return offsetof(CPUARMState, vfp.regs[reg]);
1308 else if (reg & 1) {
1309 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1310 + offsetof(CPU_DoubleU, l.upper);
1311 } else {
1312 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1313 + offsetof(CPU_DoubleU, l.lower);
1317 /* Return the offset of a 32-bit piece of a NEON register.
1318 zero is the least significant end of the register. */
1319 static inline long
1320 neon_reg_offset (int reg, int n)
1322 int sreg;
1323 sreg = reg * 2 + n;
1324 return vfp_reg_offset(0, sreg);
1327 static TCGv_i32 neon_load_reg(int reg, int pass)
1329 TCGv_i32 tmp = tcg_temp_new_i32();
1330 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1331 return tmp;
1334 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1336 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1337 tcg_temp_free_i32(var);
1340 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1342 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1345 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1347 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1350 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1351 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1352 #define tcg_gen_st_f32 tcg_gen_st_i32
1353 #define tcg_gen_st_f64 tcg_gen_st_i64
1355 static inline void gen_mov_F0_vreg(int dp, int reg)
1357 if (dp)
1358 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1359 else
1360 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1363 static inline void gen_mov_F1_vreg(int dp, int reg)
1365 if (dp)
1366 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1367 else
1368 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1371 static inline void gen_mov_vreg_F0(int dp, int reg)
1373 if (dp)
1374 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1375 else
1376 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1379 #define ARM_CP_RW_BIT (1 << 20)
1381 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1383 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1386 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1388 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1391 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1393 TCGv_i32 var = tcg_temp_new_i32();
1394 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1395 return var;
1398 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1400 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1401 tcg_temp_free_i32(var);
1404 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1406 iwmmxt_store_reg(cpu_M0, rn);
1409 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1411 iwmmxt_load_reg(cpu_M0, rn);
1414 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1416 iwmmxt_load_reg(cpu_V1, rn);
1417 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1420 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1422 iwmmxt_load_reg(cpu_V1, rn);
1423 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1426 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1428 iwmmxt_load_reg(cpu_V1, rn);
1429 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1432 #define IWMMXT_OP(name) \
1433 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1435 iwmmxt_load_reg(cpu_V1, rn); \
1436 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1439 #define IWMMXT_OP_ENV(name) \
1440 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1442 iwmmxt_load_reg(cpu_V1, rn); \
1443 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1446 #define IWMMXT_OP_ENV_SIZE(name) \
1447 IWMMXT_OP_ENV(name##b) \
1448 IWMMXT_OP_ENV(name##w) \
1449 IWMMXT_OP_ENV(name##l)
1451 #define IWMMXT_OP_ENV1(name) \
1452 static inline void gen_op_iwmmxt_##name##_M0(void) \
1454 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1457 IWMMXT_OP(maddsq)
1458 IWMMXT_OP(madduq)
1459 IWMMXT_OP(sadb)
1460 IWMMXT_OP(sadw)
1461 IWMMXT_OP(mulslw)
1462 IWMMXT_OP(mulshw)
1463 IWMMXT_OP(mululw)
1464 IWMMXT_OP(muluhw)
1465 IWMMXT_OP(macsw)
1466 IWMMXT_OP(macuw)
1468 IWMMXT_OP_ENV_SIZE(unpackl)
1469 IWMMXT_OP_ENV_SIZE(unpackh)
1471 IWMMXT_OP_ENV1(unpacklub)
1472 IWMMXT_OP_ENV1(unpackluw)
1473 IWMMXT_OP_ENV1(unpacklul)
1474 IWMMXT_OP_ENV1(unpackhub)
1475 IWMMXT_OP_ENV1(unpackhuw)
1476 IWMMXT_OP_ENV1(unpackhul)
1477 IWMMXT_OP_ENV1(unpacklsb)
1478 IWMMXT_OP_ENV1(unpacklsw)
1479 IWMMXT_OP_ENV1(unpacklsl)
1480 IWMMXT_OP_ENV1(unpackhsb)
1481 IWMMXT_OP_ENV1(unpackhsw)
1482 IWMMXT_OP_ENV1(unpackhsl)
1484 IWMMXT_OP_ENV_SIZE(cmpeq)
1485 IWMMXT_OP_ENV_SIZE(cmpgtu)
1486 IWMMXT_OP_ENV_SIZE(cmpgts)
1488 IWMMXT_OP_ENV_SIZE(mins)
1489 IWMMXT_OP_ENV_SIZE(minu)
1490 IWMMXT_OP_ENV_SIZE(maxs)
1491 IWMMXT_OP_ENV_SIZE(maxu)
1493 IWMMXT_OP_ENV_SIZE(subn)
1494 IWMMXT_OP_ENV_SIZE(addn)
1495 IWMMXT_OP_ENV_SIZE(subu)
1496 IWMMXT_OP_ENV_SIZE(addu)
1497 IWMMXT_OP_ENV_SIZE(subs)
1498 IWMMXT_OP_ENV_SIZE(adds)
1500 IWMMXT_OP_ENV(avgb0)
1501 IWMMXT_OP_ENV(avgb1)
1502 IWMMXT_OP_ENV(avgw0)
1503 IWMMXT_OP_ENV(avgw1)
1505 IWMMXT_OP_ENV(packuw)
1506 IWMMXT_OP_ENV(packul)
1507 IWMMXT_OP_ENV(packuq)
1508 IWMMXT_OP_ENV(packsw)
1509 IWMMXT_OP_ENV(packsl)
1510 IWMMXT_OP_ENV(packsq)
1512 static void gen_op_iwmmxt_set_mup(void)
1514 TCGv_i32 tmp;
1515 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1516 tcg_gen_ori_i32(tmp, tmp, 2);
1517 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1520 static void gen_op_iwmmxt_set_cup(void)
1522 TCGv_i32 tmp;
1523 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1524 tcg_gen_ori_i32(tmp, tmp, 1);
1525 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1528 static void gen_op_iwmmxt_setpsr_nz(void)
1530 TCGv_i32 tmp = tcg_temp_new_i32();
1531 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1532 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1535 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1537 iwmmxt_load_reg(cpu_V1, rn);
1538 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1539 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1542 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1543 TCGv_i32 dest)
1545 int rd;
1546 uint32_t offset;
1547 TCGv_i32 tmp;
1549 rd = (insn >> 16) & 0xf;
1550 tmp = load_reg(s, rd);
1552 offset = (insn & 0xff) << ((insn >> 7) & 2);
1553 if (insn & (1 << 24)) {
1554 /* Pre indexed */
1555 if (insn & (1 << 23))
1556 tcg_gen_addi_i32(tmp, tmp, offset);
1557 else
1558 tcg_gen_addi_i32(tmp, tmp, -offset);
1559 tcg_gen_mov_i32(dest, tmp);
1560 if (insn & (1 << 21))
1561 store_reg(s, rd, tmp);
1562 else
1563 tcg_temp_free_i32(tmp);
1564 } else if (insn & (1 << 21)) {
1565 /* Post indexed */
1566 tcg_gen_mov_i32(dest, tmp);
1567 if (insn & (1 << 23))
1568 tcg_gen_addi_i32(tmp, tmp, offset);
1569 else
1570 tcg_gen_addi_i32(tmp, tmp, -offset);
1571 store_reg(s, rd, tmp);
1572 } else if (!(insn & (1 << 23)))
1573 return 1;
1574 return 0;
1577 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1579 int rd = (insn >> 0) & 0xf;
1580 TCGv_i32 tmp;
1582 if (insn & (1 << 8)) {
1583 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1584 return 1;
1585 } else {
1586 tmp = iwmmxt_load_creg(rd);
1588 } else {
1589 tmp = tcg_temp_new_i32();
1590 iwmmxt_load_reg(cpu_V0, rd);
1591 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1593 tcg_gen_andi_i32(tmp, tmp, mask);
1594 tcg_gen_mov_i32(dest, tmp);
1595 tcg_temp_free_i32(tmp);
1596 return 0;
1599 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1600 (ie. an undefined instruction). */
1601 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1603 int rd, wrd;
1604 int rdhi, rdlo, rd0, rd1, i;
1605 TCGv_i32 addr;
1606 TCGv_i32 tmp, tmp2, tmp3;
1608 if ((insn & 0x0e000e00) == 0x0c000000) {
1609 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1610 wrd = insn & 0xf;
1611 rdlo = (insn >> 12) & 0xf;
1612 rdhi = (insn >> 16) & 0xf;
1613 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1614 iwmmxt_load_reg(cpu_V0, wrd);
1615 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1616 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1617 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1618 } else { /* TMCRR */
1619 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1620 iwmmxt_store_reg(cpu_V0, wrd);
1621 gen_op_iwmmxt_set_mup();
1623 return 0;
1626 wrd = (insn >> 12) & 0xf;
1627 addr = tcg_temp_new_i32();
1628 if (gen_iwmmxt_address(s, insn, addr)) {
1629 tcg_temp_free_i32(addr);
1630 return 1;
1632 if (insn & ARM_CP_RW_BIT) {
1633 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1634 tmp = tcg_temp_new_i32();
1635 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1636 iwmmxt_store_creg(wrd, tmp);
1637 } else {
1638 i = 1;
1639 if (insn & (1 << 8)) {
1640 if (insn & (1 << 22)) { /* WLDRD */
1641 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
1642 i = 0;
1643 } else { /* WLDRW wRd */
1644 tmp = tcg_temp_new_i32();
1645 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1647 } else {
1648 tmp = tcg_temp_new_i32();
1649 if (insn & (1 << 22)) { /* WLDRH */
1650 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
1651 } else { /* WLDRB */
1652 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
1655 if (i) {
1656 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1657 tcg_temp_free_i32(tmp);
1659 gen_op_iwmmxt_movq_wRn_M0(wrd);
1661 } else {
1662 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1663 tmp = iwmmxt_load_creg(wrd);
1664 gen_aa32_st32(tmp, addr, get_mem_index(s));
1665 } else {
1666 gen_op_iwmmxt_movq_M0_wRn(wrd);
1667 tmp = tcg_temp_new_i32();
1668 if (insn & (1 << 8)) {
1669 if (insn & (1 << 22)) { /* WSTRD */
1670 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
1671 } else { /* WSTRW wRd */
1672 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1673 gen_aa32_st32(tmp, addr, get_mem_index(s));
1675 } else {
1676 if (insn & (1 << 22)) { /* WSTRH */
1677 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1678 gen_aa32_st16(tmp, addr, get_mem_index(s));
1679 } else { /* WSTRB */
1680 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1681 gen_aa32_st8(tmp, addr, get_mem_index(s));
1685 tcg_temp_free_i32(tmp);
1687 tcg_temp_free_i32(addr);
1688 return 0;
1691 if ((insn & 0x0f000000) != 0x0e000000)
1692 return 1;
1694 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1695 case 0x000: /* WOR */
1696 wrd = (insn >> 12) & 0xf;
1697 rd0 = (insn >> 0) & 0xf;
1698 rd1 = (insn >> 16) & 0xf;
1699 gen_op_iwmmxt_movq_M0_wRn(rd0);
1700 gen_op_iwmmxt_orq_M0_wRn(rd1);
1701 gen_op_iwmmxt_setpsr_nz();
1702 gen_op_iwmmxt_movq_wRn_M0(wrd);
1703 gen_op_iwmmxt_set_mup();
1704 gen_op_iwmmxt_set_cup();
1705 break;
1706 case 0x011: /* TMCR */
1707 if (insn & 0xf)
1708 return 1;
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 switch (wrd) {
1712 case ARM_IWMMXT_wCID:
1713 case ARM_IWMMXT_wCASF:
1714 break;
1715 case ARM_IWMMXT_wCon:
1716 gen_op_iwmmxt_set_cup();
1717 /* Fall through. */
1718 case ARM_IWMMXT_wCSSF:
1719 tmp = iwmmxt_load_creg(wrd);
1720 tmp2 = load_reg(s, rd);
1721 tcg_gen_andc_i32(tmp, tmp, tmp2);
1722 tcg_temp_free_i32(tmp2);
1723 iwmmxt_store_creg(wrd, tmp);
1724 break;
1725 case ARM_IWMMXT_wCGR0:
1726 case ARM_IWMMXT_wCGR1:
1727 case ARM_IWMMXT_wCGR2:
1728 case ARM_IWMMXT_wCGR3:
1729 gen_op_iwmmxt_set_cup();
1730 tmp = load_reg(s, rd);
1731 iwmmxt_store_creg(wrd, tmp);
1732 break;
1733 default:
1734 return 1;
1736 break;
1737 case 0x100: /* WXOR */
1738 wrd = (insn >> 12) & 0xf;
1739 rd0 = (insn >> 0) & 0xf;
1740 rd1 = (insn >> 16) & 0xf;
1741 gen_op_iwmmxt_movq_M0_wRn(rd0);
1742 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1743 gen_op_iwmmxt_setpsr_nz();
1744 gen_op_iwmmxt_movq_wRn_M0(wrd);
1745 gen_op_iwmmxt_set_mup();
1746 gen_op_iwmmxt_set_cup();
1747 break;
1748 case 0x111: /* TMRC */
1749 if (insn & 0xf)
1750 return 1;
1751 rd = (insn >> 12) & 0xf;
1752 wrd = (insn >> 16) & 0xf;
1753 tmp = iwmmxt_load_creg(wrd);
1754 store_reg(s, rd, tmp);
1755 break;
1756 case 0x300: /* WANDN */
1757 wrd = (insn >> 12) & 0xf;
1758 rd0 = (insn >> 0) & 0xf;
1759 rd1 = (insn >> 16) & 0xf;
1760 gen_op_iwmmxt_movq_M0_wRn(rd0);
1761 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1762 gen_op_iwmmxt_andq_M0_wRn(rd1);
1763 gen_op_iwmmxt_setpsr_nz();
1764 gen_op_iwmmxt_movq_wRn_M0(wrd);
1765 gen_op_iwmmxt_set_mup();
1766 gen_op_iwmmxt_set_cup();
1767 break;
1768 case 0x200: /* WAND */
1769 wrd = (insn >> 12) & 0xf;
1770 rd0 = (insn >> 0) & 0xf;
1771 rd1 = (insn >> 16) & 0xf;
1772 gen_op_iwmmxt_movq_M0_wRn(rd0);
1773 gen_op_iwmmxt_andq_M0_wRn(rd1);
1774 gen_op_iwmmxt_setpsr_nz();
1775 gen_op_iwmmxt_movq_wRn_M0(wrd);
1776 gen_op_iwmmxt_set_mup();
1777 gen_op_iwmmxt_set_cup();
1778 break;
1779 case 0x810: case 0xa10: /* WMADD */
1780 wrd = (insn >> 12) & 0xf;
1781 rd0 = (insn >> 0) & 0xf;
1782 rd1 = (insn >> 16) & 0xf;
1783 gen_op_iwmmxt_movq_M0_wRn(rd0);
1784 if (insn & (1 << 21))
1785 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1786 else
1787 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1788 gen_op_iwmmxt_movq_wRn_M0(wrd);
1789 gen_op_iwmmxt_set_mup();
1790 break;
1791 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1792 wrd = (insn >> 12) & 0xf;
1793 rd0 = (insn >> 16) & 0xf;
1794 rd1 = (insn >> 0) & 0xf;
1795 gen_op_iwmmxt_movq_M0_wRn(rd0);
1796 switch ((insn >> 22) & 3) {
1797 case 0:
1798 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1799 break;
1800 case 1:
1801 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1802 break;
1803 case 2:
1804 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1805 break;
1806 case 3:
1807 return 1;
1809 gen_op_iwmmxt_movq_wRn_M0(wrd);
1810 gen_op_iwmmxt_set_mup();
1811 gen_op_iwmmxt_set_cup();
1812 break;
1813 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1814 wrd = (insn >> 12) & 0xf;
1815 rd0 = (insn >> 16) & 0xf;
1816 rd1 = (insn >> 0) & 0xf;
1817 gen_op_iwmmxt_movq_M0_wRn(rd0);
1818 switch ((insn >> 22) & 3) {
1819 case 0:
1820 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1821 break;
1822 case 1:
1823 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1824 break;
1825 case 2:
1826 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1827 break;
1828 case 3:
1829 return 1;
1831 gen_op_iwmmxt_movq_wRn_M0(wrd);
1832 gen_op_iwmmxt_set_mup();
1833 gen_op_iwmmxt_set_cup();
1834 break;
1835 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1836 wrd = (insn >> 12) & 0xf;
1837 rd0 = (insn >> 16) & 0xf;
1838 rd1 = (insn >> 0) & 0xf;
1839 gen_op_iwmmxt_movq_M0_wRn(rd0);
1840 if (insn & (1 << 22))
1841 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1842 else
1843 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1844 if (!(insn & (1 << 20)))
1845 gen_op_iwmmxt_addl_M0_wRn(wrd);
1846 gen_op_iwmmxt_movq_wRn_M0(wrd);
1847 gen_op_iwmmxt_set_mup();
1848 break;
1849 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1850 wrd = (insn >> 12) & 0xf;
1851 rd0 = (insn >> 16) & 0xf;
1852 rd1 = (insn >> 0) & 0xf;
1853 gen_op_iwmmxt_movq_M0_wRn(rd0);
1854 if (insn & (1 << 21)) {
1855 if (insn & (1 << 20))
1856 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1857 else
1858 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1859 } else {
1860 if (insn & (1 << 20))
1861 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1862 else
1863 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1865 gen_op_iwmmxt_movq_wRn_M0(wrd);
1866 gen_op_iwmmxt_set_mup();
1867 break;
1868 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1869 wrd = (insn >> 12) & 0xf;
1870 rd0 = (insn >> 16) & 0xf;
1871 rd1 = (insn >> 0) & 0xf;
1872 gen_op_iwmmxt_movq_M0_wRn(rd0);
1873 if (insn & (1 << 21))
1874 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1875 else
1876 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1877 if (!(insn & (1 << 20))) {
1878 iwmmxt_load_reg(cpu_V1, wrd);
1879 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1881 gen_op_iwmmxt_movq_wRn_M0(wrd);
1882 gen_op_iwmmxt_set_mup();
1883 break;
1884 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1885 wrd = (insn >> 12) & 0xf;
1886 rd0 = (insn >> 16) & 0xf;
1887 rd1 = (insn >> 0) & 0xf;
1888 gen_op_iwmmxt_movq_M0_wRn(rd0);
1889 switch ((insn >> 22) & 3) {
1890 case 0:
1891 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1892 break;
1893 case 1:
1894 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1895 break;
1896 case 2:
1897 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1898 break;
1899 case 3:
1900 return 1;
1902 gen_op_iwmmxt_movq_wRn_M0(wrd);
1903 gen_op_iwmmxt_set_mup();
1904 gen_op_iwmmxt_set_cup();
1905 break;
1906 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1907 wrd = (insn >> 12) & 0xf;
1908 rd0 = (insn >> 16) & 0xf;
1909 rd1 = (insn >> 0) & 0xf;
1910 gen_op_iwmmxt_movq_M0_wRn(rd0);
1911 if (insn & (1 << 22)) {
1912 if (insn & (1 << 20))
1913 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1914 else
1915 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1916 } else {
1917 if (insn & (1 << 20))
1918 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1919 else
1920 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1922 gen_op_iwmmxt_movq_wRn_M0(wrd);
1923 gen_op_iwmmxt_set_mup();
1924 gen_op_iwmmxt_set_cup();
1925 break;
1926 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1927 wrd = (insn >> 12) & 0xf;
1928 rd0 = (insn >> 16) & 0xf;
1929 rd1 = (insn >> 0) & 0xf;
1930 gen_op_iwmmxt_movq_M0_wRn(rd0);
1931 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1932 tcg_gen_andi_i32(tmp, tmp, 7);
1933 iwmmxt_load_reg(cpu_V1, rd1);
1934 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1935 tcg_temp_free_i32(tmp);
1936 gen_op_iwmmxt_movq_wRn_M0(wrd);
1937 gen_op_iwmmxt_set_mup();
1938 break;
1939 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1940 if (((insn >> 6) & 3) == 3)
1941 return 1;
1942 rd = (insn >> 12) & 0xf;
1943 wrd = (insn >> 16) & 0xf;
1944 tmp = load_reg(s, rd);
1945 gen_op_iwmmxt_movq_M0_wRn(wrd);
1946 switch ((insn >> 6) & 3) {
1947 case 0:
1948 tmp2 = tcg_const_i32(0xff);
1949 tmp3 = tcg_const_i32((insn & 7) << 3);
1950 break;
1951 case 1:
1952 tmp2 = tcg_const_i32(0xffff);
1953 tmp3 = tcg_const_i32((insn & 3) << 4);
1954 break;
1955 case 2:
1956 tmp2 = tcg_const_i32(0xffffffff);
1957 tmp3 = tcg_const_i32((insn & 1) << 5);
1958 break;
1959 default:
1960 TCGV_UNUSED_I32(tmp2);
1961 TCGV_UNUSED_I32(tmp3);
1963 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1964 tcg_temp_free_i32(tmp3);
1965 tcg_temp_free_i32(tmp2);
1966 tcg_temp_free_i32(tmp);
1967 gen_op_iwmmxt_movq_wRn_M0(wrd);
1968 gen_op_iwmmxt_set_mup();
1969 break;
1970 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1971 rd = (insn >> 12) & 0xf;
1972 wrd = (insn >> 16) & 0xf;
1973 if (rd == 15 || ((insn >> 22) & 3) == 3)
1974 return 1;
1975 gen_op_iwmmxt_movq_M0_wRn(wrd);
1976 tmp = tcg_temp_new_i32();
1977 switch ((insn >> 22) & 3) {
1978 case 0:
1979 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1980 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1981 if (insn & 8) {
1982 tcg_gen_ext8s_i32(tmp, tmp);
1983 } else {
1984 tcg_gen_andi_i32(tmp, tmp, 0xff);
1986 break;
1987 case 1:
1988 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1989 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1990 if (insn & 8) {
1991 tcg_gen_ext16s_i32(tmp, tmp);
1992 } else {
1993 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1995 break;
1996 case 2:
1997 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1998 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1999 break;
2001 store_reg(s, rd, tmp);
2002 break;
2003 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2004 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2005 return 1;
2006 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2007 switch ((insn >> 22) & 3) {
2008 case 0:
2009 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2010 break;
2011 case 1:
2012 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2013 break;
2014 case 2:
2015 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2016 break;
2018 tcg_gen_shli_i32(tmp, tmp, 28);
2019 gen_set_nzcv(tmp);
2020 tcg_temp_free_i32(tmp);
2021 break;
2022 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2023 if (((insn >> 6) & 3) == 3)
2024 return 1;
2025 rd = (insn >> 12) & 0xf;
2026 wrd = (insn >> 16) & 0xf;
2027 tmp = load_reg(s, rd);
2028 switch ((insn >> 6) & 3) {
2029 case 0:
2030 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2031 break;
2032 case 1:
2033 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2034 break;
2035 case 2:
2036 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2037 break;
2039 tcg_temp_free_i32(tmp);
2040 gen_op_iwmmxt_movq_wRn_M0(wrd);
2041 gen_op_iwmmxt_set_mup();
2042 break;
2043 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2044 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2045 return 1;
2046 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2047 tmp2 = tcg_temp_new_i32();
2048 tcg_gen_mov_i32(tmp2, tmp);
2049 switch ((insn >> 22) & 3) {
2050 case 0:
2051 for (i = 0; i < 7; i ++) {
2052 tcg_gen_shli_i32(tmp2, tmp2, 4);
2053 tcg_gen_and_i32(tmp, tmp, tmp2);
2055 break;
2056 case 1:
2057 for (i = 0; i < 3; i ++) {
2058 tcg_gen_shli_i32(tmp2, tmp2, 8);
2059 tcg_gen_and_i32(tmp, tmp, tmp2);
2061 break;
2062 case 2:
2063 tcg_gen_shli_i32(tmp2, tmp2, 16);
2064 tcg_gen_and_i32(tmp, tmp, tmp2);
2065 break;
2067 gen_set_nzcv(tmp);
2068 tcg_temp_free_i32(tmp2);
2069 tcg_temp_free_i32(tmp);
2070 break;
2071 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2072 wrd = (insn >> 12) & 0xf;
2073 rd0 = (insn >> 16) & 0xf;
2074 gen_op_iwmmxt_movq_M0_wRn(rd0);
2075 switch ((insn >> 22) & 3) {
2076 case 0:
2077 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2078 break;
2079 case 1:
2080 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2081 break;
2082 case 2:
2083 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2084 break;
2085 case 3:
2086 return 1;
2088 gen_op_iwmmxt_movq_wRn_M0(wrd);
2089 gen_op_iwmmxt_set_mup();
2090 break;
2091 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2092 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2093 return 1;
2094 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2095 tmp2 = tcg_temp_new_i32();
2096 tcg_gen_mov_i32(tmp2, tmp);
2097 switch ((insn >> 22) & 3) {
2098 case 0:
2099 for (i = 0; i < 7; i ++) {
2100 tcg_gen_shli_i32(tmp2, tmp2, 4);
2101 tcg_gen_or_i32(tmp, tmp, tmp2);
2103 break;
2104 case 1:
2105 for (i = 0; i < 3; i ++) {
2106 tcg_gen_shli_i32(tmp2, tmp2, 8);
2107 tcg_gen_or_i32(tmp, tmp, tmp2);
2109 break;
2110 case 2:
2111 tcg_gen_shli_i32(tmp2, tmp2, 16);
2112 tcg_gen_or_i32(tmp, tmp, tmp2);
2113 break;
2115 gen_set_nzcv(tmp);
2116 tcg_temp_free_i32(tmp2);
2117 tcg_temp_free_i32(tmp);
2118 break;
2119 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2120 rd = (insn >> 12) & 0xf;
2121 rd0 = (insn >> 16) & 0xf;
2122 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2123 return 1;
2124 gen_op_iwmmxt_movq_M0_wRn(rd0);
2125 tmp = tcg_temp_new_i32();
2126 switch ((insn >> 22) & 3) {
2127 case 0:
2128 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2129 break;
2130 case 1:
2131 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2132 break;
2133 case 2:
2134 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2135 break;
2137 store_reg(s, rd, tmp);
2138 break;
2139 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2140 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2141 wrd = (insn >> 12) & 0xf;
2142 rd0 = (insn >> 16) & 0xf;
2143 rd1 = (insn >> 0) & 0xf;
2144 gen_op_iwmmxt_movq_M0_wRn(rd0);
2145 switch ((insn >> 22) & 3) {
2146 case 0:
2147 if (insn & (1 << 21))
2148 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2149 else
2150 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2151 break;
2152 case 1:
2153 if (insn & (1 << 21))
2154 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2155 else
2156 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2157 break;
2158 case 2:
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2161 else
2162 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2163 break;
2164 case 3:
2165 return 1;
2167 gen_op_iwmmxt_movq_wRn_M0(wrd);
2168 gen_op_iwmmxt_set_mup();
2169 gen_op_iwmmxt_set_cup();
2170 break;
2171 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2172 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2173 wrd = (insn >> 12) & 0xf;
2174 rd0 = (insn >> 16) & 0xf;
2175 gen_op_iwmmxt_movq_M0_wRn(rd0);
2176 switch ((insn >> 22) & 3) {
2177 case 0:
2178 if (insn & (1 << 21))
2179 gen_op_iwmmxt_unpacklsb_M0();
2180 else
2181 gen_op_iwmmxt_unpacklub_M0();
2182 break;
2183 case 1:
2184 if (insn & (1 << 21))
2185 gen_op_iwmmxt_unpacklsw_M0();
2186 else
2187 gen_op_iwmmxt_unpackluw_M0();
2188 break;
2189 case 2:
2190 if (insn & (1 << 21))
2191 gen_op_iwmmxt_unpacklsl_M0();
2192 else
2193 gen_op_iwmmxt_unpacklul_M0();
2194 break;
2195 case 3:
2196 return 1;
2198 gen_op_iwmmxt_movq_wRn_M0(wrd);
2199 gen_op_iwmmxt_set_mup();
2200 gen_op_iwmmxt_set_cup();
2201 break;
2202 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2203 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2204 wrd = (insn >> 12) & 0xf;
2205 rd0 = (insn >> 16) & 0xf;
2206 gen_op_iwmmxt_movq_M0_wRn(rd0);
2207 switch ((insn >> 22) & 3) {
2208 case 0:
2209 if (insn & (1 << 21))
2210 gen_op_iwmmxt_unpackhsb_M0();
2211 else
2212 gen_op_iwmmxt_unpackhub_M0();
2213 break;
2214 case 1:
2215 if (insn & (1 << 21))
2216 gen_op_iwmmxt_unpackhsw_M0();
2217 else
2218 gen_op_iwmmxt_unpackhuw_M0();
2219 break;
2220 case 2:
2221 if (insn & (1 << 21))
2222 gen_op_iwmmxt_unpackhsl_M0();
2223 else
2224 gen_op_iwmmxt_unpackhul_M0();
2225 break;
2226 case 3:
2227 return 1;
2229 gen_op_iwmmxt_movq_wRn_M0(wrd);
2230 gen_op_iwmmxt_set_mup();
2231 gen_op_iwmmxt_set_cup();
2232 break;
2233 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2234 case 0x214: case 0x614: case 0xa14: case 0xe14:
2235 if (((insn >> 22) & 3) == 0)
2236 return 1;
2237 wrd = (insn >> 12) & 0xf;
2238 rd0 = (insn >> 16) & 0xf;
2239 gen_op_iwmmxt_movq_M0_wRn(rd0);
2240 tmp = tcg_temp_new_i32();
2241 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2242 tcg_temp_free_i32(tmp);
2243 return 1;
2245 switch ((insn >> 22) & 3) {
2246 case 1:
2247 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2248 break;
2249 case 2:
2250 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2251 break;
2252 case 3:
2253 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2254 break;
2256 tcg_temp_free_i32(tmp);
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2260 break;
2261 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2262 case 0x014: case 0x414: case 0x814: case 0xc14:
2263 if (((insn >> 22) & 3) == 0)
2264 return 1;
2265 wrd = (insn >> 12) & 0xf;
2266 rd0 = (insn >> 16) & 0xf;
2267 gen_op_iwmmxt_movq_M0_wRn(rd0);
2268 tmp = tcg_temp_new_i32();
2269 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2270 tcg_temp_free_i32(tmp);
2271 return 1;
2273 switch ((insn >> 22) & 3) {
2274 case 1:
2275 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2276 break;
2277 case 2:
2278 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2279 break;
2280 case 3:
2281 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2282 break;
2284 tcg_temp_free_i32(tmp);
2285 gen_op_iwmmxt_movq_wRn_M0(wrd);
2286 gen_op_iwmmxt_set_mup();
2287 gen_op_iwmmxt_set_cup();
2288 break;
2289 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2290 case 0x114: case 0x514: case 0x914: case 0xd14:
2291 if (((insn >> 22) & 3) == 0)
2292 return 1;
2293 wrd = (insn >> 12) & 0xf;
2294 rd0 = (insn >> 16) & 0xf;
2295 gen_op_iwmmxt_movq_M0_wRn(rd0);
2296 tmp = tcg_temp_new_i32();
2297 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2298 tcg_temp_free_i32(tmp);
2299 return 1;
2301 switch ((insn >> 22) & 3) {
2302 case 1:
2303 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2304 break;
2305 case 2:
2306 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2307 break;
2308 case 3:
2309 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2310 break;
2312 tcg_temp_free_i32(tmp);
2313 gen_op_iwmmxt_movq_wRn_M0(wrd);
2314 gen_op_iwmmxt_set_mup();
2315 gen_op_iwmmxt_set_cup();
2316 break;
2317 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2318 case 0x314: case 0x714: case 0xb14: case 0xf14:
2319 if (((insn >> 22) & 3) == 0)
2320 return 1;
2321 wrd = (insn >> 12) & 0xf;
2322 rd0 = (insn >> 16) & 0xf;
2323 gen_op_iwmmxt_movq_M0_wRn(rd0);
2324 tmp = tcg_temp_new_i32();
2325 switch ((insn >> 22) & 3) {
2326 case 1:
2327 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2328 tcg_temp_free_i32(tmp);
2329 return 1;
2331 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2332 break;
2333 case 2:
2334 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2335 tcg_temp_free_i32(tmp);
2336 return 1;
2338 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2339 break;
2340 case 3:
2341 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2342 tcg_temp_free_i32(tmp);
2343 return 1;
2345 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2346 break;
2348 tcg_temp_free_i32(tmp);
2349 gen_op_iwmmxt_movq_wRn_M0(wrd);
2350 gen_op_iwmmxt_set_mup();
2351 gen_op_iwmmxt_set_cup();
2352 break;
2353 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2354 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2355 wrd = (insn >> 12) & 0xf;
2356 rd0 = (insn >> 16) & 0xf;
2357 rd1 = (insn >> 0) & 0xf;
2358 gen_op_iwmmxt_movq_M0_wRn(rd0);
2359 switch ((insn >> 22) & 3) {
2360 case 0:
2361 if (insn & (1 << 21))
2362 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2363 else
2364 gen_op_iwmmxt_minub_M0_wRn(rd1);
2365 break;
2366 case 1:
2367 if (insn & (1 << 21))
2368 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2369 else
2370 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2371 break;
2372 case 2:
2373 if (insn & (1 << 21))
2374 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2375 else
2376 gen_op_iwmmxt_minul_M0_wRn(rd1);
2377 break;
2378 case 3:
2379 return 1;
2381 gen_op_iwmmxt_movq_wRn_M0(wrd);
2382 gen_op_iwmmxt_set_mup();
2383 break;
2384 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2385 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2386 wrd = (insn >> 12) & 0xf;
2387 rd0 = (insn >> 16) & 0xf;
2388 rd1 = (insn >> 0) & 0xf;
2389 gen_op_iwmmxt_movq_M0_wRn(rd0);
2390 switch ((insn >> 22) & 3) {
2391 case 0:
2392 if (insn & (1 << 21))
2393 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2394 else
2395 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2396 break;
2397 case 1:
2398 if (insn & (1 << 21))
2399 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2400 else
2401 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2402 break;
2403 case 2:
2404 if (insn & (1 << 21))
2405 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2406 else
2407 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2408 break;
2409 case 3:
2410 return 1;
2412 gen_op_iwmmxt_movq_wRn_M0(wrd);
2413 gen_op_iwmmxt_set_mup();
2414 break;
2415 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2416 case 0x402: case 0x502: case 0x602: case 0x702:
2417 wrd = (insn >> 12) & 0xf;
2418 rd0 = (insn >> 16) & 0xf;
2419 rd1 = (insn >> 0) & 0xf;
2420 gen_op_iwmmxt_movq_M0_wRn(rd0);
2421 tmp = tcg_const_i32((insn >> 20) & 3);
2422 iwmmxt_load_reg(cpu_V1, rd1);
2423 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2424 tcg_temp_free_i32(tmp);
2425 gen_op_iwmmxt_movq_wRn_M0(wrd);
2426 gen_op_iwmmxt_set_mup();
2427 break;
2428 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2429 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2430 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2431 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2432 wrd = (insn >> 12) & 0xf;
2433 rd0 = (insn >> 16) & 0xf;
2434 rd1 = (insn >> 0) & 0xf;
2435 gen_op_iwmmxt_movq_M0_wRn(rd0);
2436 switch ((insn >> 20) & 0xf) {
2437 case 0x0:
2438 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2439 break;
2440 case 0x1:
2441 gen_op_iwmmxt_subub_M0_wRn(rd1);
2442 break;
2443 case 0x3:
2444 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2445 break;
2446 case 0x4:
2447 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2448 break;
2449 case 0x5:
2450 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2451 break;
2452 case 0x7:
2453 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2454 break;
2455 case 0x8:
2456 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2457 break;
2458 case 0x9:
2459 gen_op_iwmmxt_subul_M0_wRn(rd1);
2460 break;
2461 case 0xb:
2462 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2463 break;
2464 default:
2465 return 1;
2467 gen_op_iwmmxt_movq_wRn_M0(wrd);
2468 gen_op_iwmmxt_set_mup();
2469 gen_op_iwmmxt_set_cup();
2470 break;
2471 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2472 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2473 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2474 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2475 wrd = (insn >> 12) & 0xf;
2476 rd0 = (insn >> 16) & 0xf;
2477 gen_op_iwmmxt_movq_M0_wRn(rd0);
2478 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2479 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2480 tcg_temp_free_i32(tmp);
2481 gen_op_iwmmxt_movq_wRn_M0(wrd);
2482 gen_op_iwmmxt_set_mup();
2483 gen_op_iwmmxt_set_cup();
2484 break;
2485 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2486 case 0x418: case 0x518: case 0x618: case 0x718:
2487 case 0x818: case 0x918: case 0xa18: case 0xb18:
2488 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2489 wrd = (insn >> 12) & 0xf;
2490 rd0 = (insn >> 16) & 0xf;
2491 rd1 = (insn >> 0) & 0xf;
2492 gen_op_iwmmxt_movq_M0_wRn(rd0);
2493 switch ((insn >> 20) & 0xf) {
2494 case 0x0:
2495 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2496 break;
2497 case 0x1:
2498 gen_op_iwmmxt_addub_M0_wRn(rd1);
2499 break;
2500 case 0x3:
2501 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2502 break;
2503 case 0x4:
2504 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2505 break;
2506 case 0x5:
2507 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2508 break;
2509 case 0x7:
2510 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2511 break;
2512 case 0x8:
2513 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2514 break;
2515 case 0x9:
2516 gen_op_iwmmxt_addul_M0_wRn(rd1);
2517 break;
2518 case 0xb:
2519 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2520 break;
2521 default:
2522 return 1;
2524 gen_op_iwmmxt_movq_wRn_M0(wrd);
2525 gen_op_iwmmxt_set_mup();
2526 gen_op_iwmmxt_set_cup();
2527 break;
2528 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2529 case 0x408: case 0x508: case 0x608: case 0x708:
2530 case 0x808: case 0x908: case 0xa08: case 0xb08:
2531 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2532 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2533 return 1;
2534 wrd = (insn >> 12) & 0xf;
2535 rd0 = (insn >> 16) & 0xf;
2536 rd1 = (insn >> 0) & 0xf;
2537 gen_op_iwmmxt_movq_M0_wRn(rd0);
2538 switch ((insn >> 22) & 3) {
2539 case 1:
2540 if (insn & (1 << 21))
2541 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2542 else
2543 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2544 break;
2545 case 2:
2546 if (insn & (1 << 21))
2547 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2548 else
2549 gen_op_iwmmxt_packul_M0_wRn(rd1);
2550 break;
2551 case 3:
2552 if (insn & (1 << 21))
2553 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2554 else
2555 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2556 break;
2558 gen_op_iwmmxt_movq_wRn_M0(wrd);
2559 gen_op_iwmmxt_set_mup();
2560 gen_op_iwmmxt_set_cup();
2561 break;
2562 case 0x201: case 0x203: case 0x205: case 0x207:
2563 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2564 case 0x211: case 0x213: case 0x215: case 0x217:
2565 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2566 wrd = (insn >> 5) & 0xf;
2567 rd0 = (insn >> 12) & 0xf;
2568 rd1 = (insn >> 0) & 0xf;
2569 if (rd0 == 0xf || rd1 == 0xf)
2570 return 1;
2571 gen_op_iwmmxt_movq_M0_wRn(wrd);
2572 tmp = load_reg(s, rd0);
2573 tmp2 = load_reg(s, rd1);
2574 switch ((insn >> 16) & 0xf) {
2575 case 0x0: /* TMIA */
2576 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2577 break;
2578 case 0x8: /* TMIAPH */
2579 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2580 break;
2581 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2582 if (insn & (1 << 16))
2583 tcg_gen_shri_i32(tmp, tmp, 16);
2584 if (insn & (1 << 17))
2585 tcg_gen_shri_i32(tmp2, tmp2, 16);
2586 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2587 break;
2588 default:
2589 tcg_temp_free_i32(tmp2);
2590 tcg_temp_free_i32(tmp);
2591 return 1;
2593 tcg_temp_free_i32(tmp2);
2594 tcg_temp_free_i32(tmp);
2595 gen_op_iwmmxt_movq_wRn_M0(wrd);
2596 gen_op_iwmmxt_set_mup();
2597 break;
2598 default:
2599 return 1;
2602 return 0;
2605 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2606 (ie. an undefined instruction). */
2607 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2609 int acc, rd0, rd1, rdhi, rdlo;
2610 TCGv_i32 tmp, tmp2;
2612 if ((insn & 0x0ff00f10) == 0x0e200010) {
2613 /* Multiply with Internal Accumulate Format */
2614 rd0 = (insn >> 12) & 0xf;
2615 rd1 = insn & 0xf;
2616 acc = (insn >> 5) & 7;
2618 if (acc != 0)
2619 return 1;
2621 tmp = load_reg(s, rd0);
2622 tmp2 = load_reg(s, rd1);
2623 switch ((insn >> 16) & 0xf) {
2624 case 0x0: /* MIA */
2625 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2626 break;
2627 case 0x8: /* MIAPH */
2628 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2629 break;
2630 case 0xc: /* MIABB */
2631 case 0xd: /* MIABT */
2632 case 0xe: /* MIATB */
2633 case 0xf: /* MIATT */
2634 if (insn & (1 << 16))
2635 tcg_gen_shri_i32(tmp, tmp, 16);
2636 if (insn & (1 << 17))
2637 tcg_gen_shri_i32(tmp2, tmp2, 16);
2638 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2639 break;
2640 default:
2641 return 1;
2643 tcg_temp_free_i32(tmp2);
2644 tcg_temp_free_i32(tmp);
2646 gen_op_iwmmxt_movq_wRn_M0(acc);
2647 return 0;
2650 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2651 /* Internal Accumulator Access Format */
2652 rdhi = (insn >> 16) & 0xf;
2653 rdlo = (insn >> 12) & 0xf;
2654 acc = insn & 7;
2656 if (acc != 0)
2657 return 1;
2659 if (insn & ARM_CP_RW_BIT) { /* MRA */
2660 iwmmxt_load_reg(cpu_V0, acc);
2661 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2662 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2663 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2664 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2665 } else { /* MAR */
2666 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2667 iwmmxt_store_reg(cpu_V0, acc);
2669 return 0;
2672 return 1;
2675 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2676 #define VFP_SREG(insn, bigbit, smallbit) \
2677 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2678 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2679 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2680 reg = (((insn) >> (bigbit)) & 0x0f) \
2681 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2682 } else { \
2683 if (insn & (1 << (smallbit))) \
2684 return 1; \
2685 reg = ((insn) >> (bigbit)) & 0x0f; \
2686 }} while (0)
2688 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2689 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2690 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2691 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2692 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2693 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2695 /* Move between integer and VFP cores. */
2696 static TCGv_i32 gen_vfp_mrs(void)
2698 TCGv_i32 tmp = tcg_temp_new_i32();
2699 tcg_gen_mov_i32(tmp, cpu_F0s);
2700 return tmp;
2703 static void gen_vfp_msr(TCGv_i32 tmp)
2705 tcg_gen_mov_i32(cpu_F0s, tmp);
2706 tcg_temp_free_i32(tmp);
2709 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2711 TCGv_i32 tmp = tcg_temp_new_i32();
2712 if (shift)
2713 tcg_gen_shri_i32(var, var, shift);
2714 tcg_gen_ext8u_i32(var, var);
2715 tcg_gen_shli_i32(tmp, var, 8);
2716 tcg_gen_or_i32(var, var, tmp);
2717 tcg_gen_shli_i32(tmp, var, 16);
2718 tcg_gen_or_i32(var, var, tmp);
2719 tcg_temp_free_i32(tmp);
2722 static void gen_neon_dup_low16(TCGv_i32 var)
2724 TCGv_i32 tmp = tcg_temp_new_i32();
2725 tcg_gen_ext16u_i32(var, var);
2726 tcg_gen_shli_i32(tmp, var, 16);
2727 tcg_gen_or_i32(var, var, tmp);
2728 tcg_temp_free_i32(tmp);
2731 static void gen_neon_dup_high16(TCGv_i32 var)
2733 TCGv_i32 tmp = tcg_temp_new_i32();
2734 tcg_gen_andi_i32(var, var, 0xffff0000);
2735 tcg_gen_shri_i32(tmp, var, 16);
2736 tcg_gen_or_i32(var, var, tmp);
2737 tcg_temp_free_i32(tmp);
2740 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2742 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2743 TCGv_i32 tmp = tcg_temp_new_i32();
2744 switch (size) {
2745 case 0:
2746 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2747 gen_neon_dup_u8(tmp, 0);
2748 break;
2749 case 1:
2750 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2751 gen_neon_dup_low16(tmp);
2752 break;
2753 case 2:
2754 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2755 break;
2756 default: /* Avoid compiler warnings. */
2757 abort();
2759 return tmp;
2762 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2763 uint32_t dp)
2765 uint32_t cc = extract32(insn, 20, 2);
2767 if (dp) {
2768 TCGv_i64 frn, frm, dest;
2769 TCGv_i64 tmp, zero, zf, nf, vf;
2771 zero = tcg_const_i64(0);
2773 frn = tcg_temp_new_i64();
2774 frm = tcg_temp_new_i64();
2775 dest = tcg_temp_new_i64();
2777 zf = tcg_temp_new_i64();
2778 nf = tcg_temp_new_i64();
2779 vf = tcg_temp_new_i64();
2781 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2782 tcg_gen_ext_i32_i64(nf, cpu_NF);
2783 tcg_gen_ext_i32_i64(vf, cpu_VF);
2785 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2786 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2787 switch (cc) {
2788 case 0: /* eq: Z */
2789 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2790 frn, frm);
2791 break;
2792 case 1: /* vs: V */
2793 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2794 frn, frm);
2795 break;
2796 case 2: /* ge: N == V -> N ^ V == 0 */
2797 tmp = tcg_temp_new_i64();
2798 tcg_gen_xor_i64(tmp, vf, nf);
2799 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2800 frn, frm);
2801 tcg_temp_free_i64(tmp);
2802 break;
2803 case 3: /* gt: !Z && N == V */
2804 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2805 frn, frm);
2806 tmp = tcg_temp_new_i64();
2807 tcg_gen_xor_i64(tmp, vf, nf);
2808 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2809 dest, frm);
2810 tcg_temp_free_i64(tmp);
2811 break;
2813 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2814 tcg_temp_free_i64(frn);
2815 tcg_temp_free_i64(frm);
2816 tcg_temp_free_i64(dest);
2818 tcg_temp_free_i64(zf);
2819 tcg_temp_free_i64(nf);
2820 tcg_temp_free_i64(vf);
2822 tcg_temp_free_i64(zero);
2823 } else {
2824 TCGv_i32 frn, frm, dest;
2825 TCGv_i32 tmp, zero;
2827 zero = tcg_const_i32(0);
2829 frn = tcg_temp_new_i32();
2830 frm = tcg_temp_new_i32();
2831 dest = tcg_temp_new_i32();
2832 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2833 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2834 switch (cc) {
2835 case 0: /* eq: Z */
2836 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2837 frn, frm);
2838 break;
2839 case 1: /* vs: V */
2840 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2841 frn, frm);
2842 break;
2843 case 2: /* ge: N == V -> N ^ V == 0 */
2844 tmp = tcg_temp_new_i32();
2845 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2846 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2847 frn, frm);
2848 tcg_temp_free_i32(tmp);
2849 break;
2850 case 3: /* gt: !Z && N == V */
2851 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2852 frn, frm);
2853 tmp = tcg_temp_new_i32();
2854 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2855 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2856 dest, frm);
2857 tcg_temp_free_i32(tmp);
2858 break;
2860 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2861 tcg_temp_free_i32(frn);
2862 tcg_temp_free_i32(frm);
2863 tcg_temp_free_i32(dest);
2865 tcg_temp_free_i32(zero);
2868 return 0;
2871 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2872 uint32_t rm, uint32_t dp)
2874 uint32_t vmin = extract32(insn, 6, 1);
2875 TCGv_ptr fpst = get_fpstatus_ptr(0);
2877 if (dp) {
2878 TCGv_i64 frn, frm, dest;
2880 frn = tcg_temp_new_i64();
2881 frm = tcg_temp_new_i64();
2882 dest = tcg_temp_new_i64();
2884 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2885 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2886 if (vmin) {
2887 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2888 } else {
2889 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2891 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2892 tcg_temp_free_i64(frn);
2893 tcg_temp_free_i64(frm);
2894 tcg_temp_free_i64(dest);
2895 } else {
2896 TCGv_i32 frn, frm, dest;
2898 frn = tcg_temp_new_i32();
2899 frm = tcg_temp_new_i32();
2900 dest = tcg_temp_new_i32();
2902 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2903 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2904 if (vmin) {
2905 gen_helper_vfp_minnums(dest, frn, frm, fpst);
2906 } else {
2907 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
2909 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2910 tcg_temp_free_i32(frn);
2911 tcg_temp_free_i32(frm);
2912 tcg_temp_free_i32(dest);
2915 tcg_temp_free_ptr(fpst);
2916 return 0;
2919 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2920 int rounding)
2922 TCGv_ptr fpst = get_fpstatus_ptr(0);
2923 TCGv_i32 tcg_rmode;
2925 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2926 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2928 if (dp) {
2929 TCGv_i64 tcg_op;
2930 TCGv_i64 tcg_res;
2931 tcg_op = tcg_temp_new_i64();
2932 tcg_res = tcg_temp_new_i64();
2933 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2934 gen_helper_rintd(tcg_res, tcg_op, fpst);
2935 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2936 tcg_temp_free_i64(tcg_op);
2937 tcg_temp_free_i64(tcg_res);
2938 } else {
2939 TCGv_i32 tcg_op;
2940 TCGv_i32 tcg_res;
2941 tcg_op = tcg_temp_new_i32();
2942 tcg_res = tcg_temp_new_i32();
2943 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2944 gen_helper_rints(tcg_res, tcg_op, fpst);
2945 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2946 tcg_temp_free_i32(tcg_op);
2947 tcg_temp_free_i32(tcg_res);
2950 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2951 tcg_temp_free_i32(tcg_rmode);
2953 tcg_temp_free_ptr(fpst);
2954 return 0;
2957 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2958 int rounding)
2960 bool is_signed = extract32(insn, 7, 1);
2961 TCGv_ptr fpst = get_fpstatus_ptr(0);
2962 TCGv_i32 tcg_rmode, tcg_shift;
2964 tcg_shift = tcg_const_i32(0);
2966 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2967 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2969 if (dp) {
2970 TCGv_i64 tcg_double, tcg_res;
2971 TCGv_i32 tcg_tmp;
2972 /* Rd is encoded as a single precision register even when the source
2973 * is double precision.
2975 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2976 tcg_double = tcg_temp_new_i64();
2977 tcg_res = tcg_temp_new_i64();
2978 tcg_tmp = tcg_temp_new_i32();
2979 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2980 if (is_signed) {
2981 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2982 } else {
2983 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2985 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
2986 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2987 tcg_temp_free_i32(tcg_tmp);
2988 tcg_temp_free_i64(tcg_res);
2989 tcg_temp_free_i64(tcg_double);
2990 } else {
2991 TCGv_i32 tcg_single, tcg_res;
2992 tcg_single = tcg_temp_new_i32();
2993 tcg_res = tcg_temp_new_i32();
2994 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2995 if (is_signed) {
2996 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2997 } else {
2998 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3000 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3001 tcg_temp_free_i32(tcg_res);
3002 tcg_temp_free_i32(tcg_single);
3005 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3006 tcg_temp_free_i32(tcg_rmode);
3008 tcg_temp_free_i32(tcg_shift);
3010 tcg_temp_free_ptr(fpst);
3012 return 0;
3015 /* Table for converting the most common AArch32 encoding of
3016 * rounding mode to arm_fprounding order (which matches the
3017 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3019 static const uint8_t fp_decode_rm[] = {
3020 FPROUNDING_TIEAWAY,
3021 FPROUNDING_TIEEVEN,
3022 FPROUNDING_POSINF,
3023 FPROUNDING_NEGINF,
3026 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3028 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3030 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3031 return 1;
3034 if (dp) {
3035 VFP_DREG_D(rd, insn);
3036 VFP_DREG_N(rn, insn);
3037 VFP_DREG_M(rm, insn);
3038 } else {
3039 rd = VFP_SREG_D(insn);
3040 rn = VFP_SREG_N(insn);
3041 rm = VFP_SREG_M(insn);
3044 if ((insn & 0x0f800e50) == 0x0e000a00) {
3045 return handle_vsel(insn, rd, rn, rm, dp);
3046 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3047 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3048 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3049 /* VRINTA, VRINTN, VRINTP, VRINTM */
3050 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3051 return handle_vrint(insn, rd, rm, dp, rounding);
3052 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3053 /* VCVTA, VCVTN, VCVTP, VCVTM */
3054 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3055 return handle_vcvt(insn, rd, rm, dp, rounding);
3057 return 1;
3060 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3061 (ie. an undefined instruction). */
3062 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3064 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3065 int dp, veclen;
3066 TCGv_i32 addr;
3067 TCGv_i32 tmp;
3068 TCGv_i32 tmp2;
3070 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3071 return 1;
3074 /* FIXME: this access check should not take precedence over UNDEF
3075 * for invalid encodings; we will generate incorrect syndrome information
3076 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3078 if (s->fp_excp_el) {
3079 gen_exception_insn(s, 4, EXCP_UDEF,
3080 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
3081 return 0;
3084 if (!s->vfp_enabled) {
3085 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3086 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3087 return 1;
3088 rn = (insn >> 16) & 0xf;
3089 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3090 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3091 return 1;
3095 if (extract32(insn, 28, 4) == 0xf) {
3096 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3097 * only used in v8 and above.
3099 return disas_vfp_v8_insn(s, insn);
3102 dp = ((insn & 0xf00) == 0xb00);
3103 switch ((insn >> 24) & 0xf) {
3104 case 0xe:
3105 if (insn & (1 << 4)) {
3106 /* single register transfer */
3107 rd = (insn >> 12) & 0xf;
3108 if (dp) {
3109 int size;
3110 int pass;
3112 VFP_DREG_N(rn, insn);
3113 if (insn & 0xf)
3114 return 1;
3115 if (insn & 0x00c00060
3116 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3117 return 1;
3120 pass = (insn >> 21) & 1;
3121 if (insn & (1 << 22)) {
3122 size = 0;
3123 offset = ((insn >> 5) & 3) * 8;
3124 } else if (insn & (1 << 5)) {
3125 size = 1;
3126 offset = (insn & (1 << 6)) ? 16 : 0;
3127 } else {
3128 size = 2;
3129 offset = 0;
3131 if (insn & ARM_CP_RW_BIT) {
3132 /* vfp->arm */
3133 tmp = neon_load_reg(rn, pass);
3134 switch (size) {
3135 case 0:
3136 if (offset)
3137 tcg_gen_shri_i32(tmp, tmp, offset);
3138 if (insn & (1 << 23))
3139 gen_uxtb(tmp);
3140 else
3141 gen_sxtb(tmp);
3142 break;
3143 case 1:
3144 if (insn & (1 << 23)) {
3145 if (offset) {
3146 tcg_gen_shri_i32(tmp, tmp, 16);
3147 } else {
3148 gen_uxth(tmp);
3150 } else {
3151 if (offset) {
3152 tcg_gen_sari_i32(tmp, tmp, 16);
3153 } else {
3154 gen_sxth(tmp);
3157 break;
3158 case 2:
3159 break;
3161 store_reg(s, rd, tmp);
3162 } else {
3163 /* arm->vfp */
3164 tmp = load_reg(s, rd);
3165 if (insn & (1 << 23)) {
3166 /* VDUP */
3167 if (size == 0) {
3168 gen_neon_dup_u8(tmp, 0);
3169 } else if (size == 1) {
3170 gen_neon_dup_low16(tmp);
3172 for (n = 0; n <= pass * 2; n++) {
3173 tmp2 = tcg_temp_new_i32();
3174 tcg_gen_mov_i32(tmp2, tmp);
3175 neon_store_reg(rn, n, tmp2);
3177 neon_store_reg(rn, n, tmp);
3178 } else {
3179 /* VMOV */
3180 switch (size) {
3181 case 0:
3182 tmp2 = neon_load_reg(rn, pass);
3183 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3184 tcg_temp_free_i32(tmp2);
3185 break;
3186 case 1:
3187 tmp2 = neon_load_reg(rn, pass);
3188 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3189 tcg_temp_free_i32(tmp2);
3190 break;
3191 case 2:
3192 break;
3194 neon_store_reg(rn, pass, tmp);
3197 } else { /* !dp */
3198 if ((insn & 0x6f) != 0x00)
3199 return 1;
3200 rn = VFP_SREG_N(insn);
3201 if (insn & ARM_CP_RW_BIT) {
3202 /* vfp->arm */
3203 if (insn & (1 << 21)) {
3204 /* system register */
3205 rn >>= 1;
3207 switch (rn) {
3208 case ARM_VFP_FPSID:
3209 /* VFP2 allows access to FSID from userspace.
3210 VFP3 restricts all id registers to privileged
3211 accesses. */
3212 if (IS_USER(s)
3213 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3214 return 1;
3216 tmp = load_cpu_field(vfp.xregs[rn]);
3217 break;
3218 case ARM_VFP_FPEXC:
3219 if (IS_USER(s))
3220 return 1;
3221 tmp = load_cpu_field(vfp.xregs[rn]);
3222 break;
3223 case ARM_VFP_FPINST:
3224 case ARM_VFP_FPINST2:
3225 /* Not present in VFP3. */
3226 if (IS_USER(s)
3227 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3228 return 1;
3230 tmp = load_cpu_field(vfp.xregs[rn]);
3231 break;
3232 case ARM_VFP_FPSCR:
3233 if (rd == 15) {
3234 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3235 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3236 } else {
3237 tmp = tcg_temp_new_i32();
3238 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3240 break;
3241 case ARM_VFP_MVFR2:
3242 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3243 return 1;
3245 /* fall through */
3246 case ARM_VFP_MVFR0:
3247 case ARM_VFP_MVFR1:
3248 if (IS_USER(s)
3249 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3250 return 1;
3252 tmp = load_cpu_field(vfp.xregs[rn]);
3253 break;
3254 default:
3255 return 1;
3257 } else {
3258 gen_mov_F0_vreg(0, rn);
3259 tmp = gen_vfp_mrs();
3261 if (rd == 15) {
3262 /* Set the 4 flag bits in the CPSR. */
3263 gen_set_nzcv(tmp);
3264 tcg_temp_free_i32(tmp);
3265 } else {
3266 store_reg(s, rd, tmp);
3268 } else {
3269 /* arm->vfp */
3270 if (insn & (1 << 21)) {
3271 rn >>= 1;
3272 /* system register */
3273 switch (rn) {
3274 case ARM_VFP_FPSID:
3275 case ARM_VFP_MVFR0:
3276 case ARM_VFP_MVFR1:
3277 /* Writes are ignored. */
3278 break;
3279 case ARM_VFP_FPSCR:
3280 tmp = load_reg(s, rd);
3281 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3282 tcg_temp_free_i32(tmp);
3283 gen_lookup_tb(s);
3284 break;
3285 case ARM_VFP_FPEXC:
3286 if (IS_USER(s))
3287 return 1;
3288 /* TODO: VFP subarchitecture support.
3289 * For now, keep the EN bit only */
3290 tmp = load_reg(s, rd);
3291 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3292 store_cpu_field(tmp, vfp.xregs[rn]);
3293 gen_lookup_tb(s);
3294 break;
3295 case ARM_VFP_FPINST:
3296 case ARM_VFP_FPINST2:
3297 if (IS_USER(s)) {
3298 return 1;
3300 tmp = load_reg(s, rd);
3301 store_cpu_field(tmp, vfp.xregs[rn]);
3302 break;
3303 default:
3304 return 1;
3306 } else {
3307 tmp = load_reg(s, rd);
3308 gen_vfp_msr(tmp);
3309 gen_mov_vreg_F0(0, rn);
3313 } else {
3314 /* data processing */
3315 /* The opcode is in bits 23, 21, 20 and 6. */
3316 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3317 if (dp) {
3318 if (op == 15) {
3319 /* rn is opcode */
3320 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3321 } else {
3322 /* rn is register number */
3323 VFP_DREG_N(rn, insn);
3326 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3327 ((rn & 0x1e) == 0x6))) {
3328 /* Integer or single/half precision destination. */
3329 rd = VFP_SREG_D(insn);
3330 } else {
3331 VFP_DREG_D(rd, insn);
3333 if (op == 15 &&
3334 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3335 ((rn & 0x1e) == 0x4))) {
3336 /* VCVT from int or half precision is always from S reg
3337 * regardless of dp bit. VCVT with immediate frac_bits
3338 * has same format as SREG_M.
3340 rm = VFP_SREG_M(insn);
3341 } else {
3342 VFP_DREG_M(rm, insn);
3344 } else {
3345 rn = VFP_SREG_N(insn);
3346 if (op == 15 && rn == 15) {
3347 /* Double precision destination. */
3348 VFP_DREG_D(rd, insn);
3349 } else {
3350 rd = VFP_SREG_D(insn);
3352 /* NB that we implicitly rely on the encoding for the frac_bits
3353 * in VCVT of fixed to float being the same as that of an SREG_M
3355 rm = VFP_SREG_M(insn);
3358 veclen = s->vec_len;
3359 if (op == 15 && rn > 3)
3360 veclen = 0;
3362 /* Shut up compiler warnings. */
3363 delta_m = 0;
3364 delta_d = 0;
3365 bank_mask = 0;
3367 if (veclen > 0) {
3368 if (dp)
3369 bank_mask = 0xc;
3370 else
3371 bank_mask = 0x18;
3373 /* Figure out what type of vector operation this is. */
3374 if ((rd & bank_mask) == 0) {
3375 /* scalar */
3376 veclen = 0;
3377 } else {
3378 if (dp)
3379 delta_d = (s->vec_stride >> 1) + 1;
3380 else
3381 delta_d = s->vec_stride + 1;
3383 if ((rm & bank_mask) == 0) {
3384 /* mixed scalar/vector */
3385 delta_m = 0;
3386 } else {
3387 /* vector */
3388 delta_m = delta_d;
3393 /* Load the initial operands. */
3394 if (op == 15) {
3395 switch (rn) {
3396 case 16:
3397 case 17:
3398 /* Integer source */
3399 gen_mov_F0_vreg(0, rm);
3400 break;
3401 case 8:
3402 case 9:
3403 /* Compare */
3404 gen_mov_F0_vreg(dp, rd);
3405 gen_mov_F1_vreg(dp, rm);
3406 break;
3407 case 10:
3408 case 11:
3409 /* Compare with zero */
3410 gen_mov_F0_vreg(dp, rd);
3411 gen_vfp_F1_ld0(dp);
3412 break;
3413 case 20:
3414 case 21:
3415 case 22:
3416 case 23:
3417 case 28:
3418 case 29:
3419 case 30:
3420 case 31:
3421 /* Source and destination the same. */
3422 gen_mov_F0_vreg(dp, rd);
3423 break;
3424 case 4:
3425 case 5:
3426 case 6:
3427 case 7:
3428 /* VCVTB, VCVTT: only present with the halfprec extension
3429 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3430 * (we choose to UNDEF)
3432 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3433 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3434 return 1;
3436 if (!extract32(rn, 1, 1)) {
3437 /* Half precision source. */
3438 gen_mov_F0_vreg(0, rm);
3439 break;
3441 /* Otherwise fall through */
3442 default:
3443 /* One source operand. */
3444 gen_mov_F0_vreg(dp, rm);
3445 break;
3447 } else {
3448 /* Two source operands. */
3449 gen_mov_F0_vreg(dp, rn);
3450 gen_mov_F1_vreg(dp, rm);
3453 for (;;) {
3454 /* Perform the calculation. */
3455 switch (op) {
3456 case 0: /* VMLA: fd + (fn * fm) */
3457 /* Note that order of inputs to the add matters for NaNs */
3458 gen_vfp_F1_mul(dp);
3459 gen_mov_F0_vreg(dp, rd);
3460 gen_vfp_add(dp);
3461 break;
3462 case 1: /* VMLS: fd + -(fn * fm) */
3463 gen_vfp_mul(dp);
3464 gen_vfp_F1_neg(dp);
3465 gen_mov_F0_vreg(dp, rd);
3466 gen_vfp_add(dp);
3467 break;
3468 case 2: /* VNMLS: -fd + (fn * fm) */
3469 /* Note that it isn't valid to replace (-A + B) with (B - A)
3470 * or similar plausible looking simplifications
3471 * because this will give wrong results for NaNs.
3473 gen_vfp_F1_mul(dp);
3474 gen_mov_F0_vreg(dp, rd);
3475 gen_vfp_neg(dp);
3476 gen_vfp_add(dp);
3477 break;
3478 case 3: /* VNMLA: -fd + -(fn * fm) */
3479 gen_vfp_mul(dp);
3480 gen_vfp_F1_neg(dp);
3481 gen_mov_F0_vreg(dp, rd);
3482 gen_vfp_neg(dp);
3483 gen_vfp_add(dp);
3484 break;
3485 case 4: /* mul: fn * fm */
3486 gen_vfp_mul(dp);
3487 break;
3488 case 5: /* nmul: -(fn * fm) */
3489 gen_vfp_mul(dp);
3490 gen_vfp_neg(dp);
3491 break;
3492 case 6: /* add: fn + fm */
3493 gen_vfp_add(dp);
3494 break;
3495 case 7: /* sub: fn - fm */
3496 gen_vfp_sub(dp);
3497 break;
3498 case 8: /* div: fn / fm */
3499 gen_vfp_div(dp);
3500 break;
3501 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3502 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3503 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3504 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3505 /* These are fused multiply-add, and must be done as one
3506 * floating point operation with no rounding between the
3507 * multiplication and addition steps.
3508 * NB that doing the negations here as separate steps is
3509 * correct : an input NaN should come out with its sign bit
3510 * flipped if it is a negated-input.
3512 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3513 return 1;
3515 if (dp) {
3516 TCGv_ptr fpst;
3517 TCGv_i64 frd;
3518 if (op & 1) {
3519 /* VFNMS, VFMS */
3520 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3522 frd = tcg_temp_new_i64();
3523 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3524 if (op & 2) {
3525 /* VFNMA, VFNMS */
3526 gen_helper_vfp_negd(frd, frd);
3528 fpst = get_fpstatus_ptr(0);
3529 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3530 cpu_F1d, frd, fpst);
3531 tcg_temp_free_ptr(fpst);
3532 tcg_temp_free_i64(frd);
3533 } else {
3534 TCGv_ptr fpst;
3535 TCGv_i32 frd;
3536 if (op & 1) {
3537 /* VFNMS, VFMS */
3538 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3540 frd = tcg_temp_new_i32();
3541 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3542 if (op & 2) {
3543 gen_helper_vfp_negs(frd, frd);
3545 fpst = get_fpstatus_ptr(0);
3546 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3547 cpu_F1s, frd, fpst);
3548 tcg_temp_free_ptr(fpst);
3549 tcg_temp_free_i32(frd);
3551 break;
3552 case 14: /* fconst */
3553 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3554 return 1;
3557 n = (insn << 12) & 0x80000000;
3558 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3559 if (dp) {
3560 if (i & 0x40)
3561 i |= 0x3f80;
3562 else
3563 i |= 0x4000;
3564 n |= i << 16;
3565 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3566 } else {
3567 if (i & 0x40)
3568 i |= 0x780;
3569 else
3570 i |= 0x800;
3571 n |= i << 19;
3572 tcg_gen_movi_i32(cpu_F0s, n);
3574 break;
3575 case 15: /* extension space */
3576 switch (rn) {
3577 case 0: /* cpy */
3578 /* no-op */
3579 break;
3580 case 1: /* abs */
3581 gen_vfp_abs(dp);
3582 break;
3583 case 2: /* neg */
3584 gen_vfp_neg(dp);
3585 break;
3586 case 3: /* sqrt */
3587 gen_vfp_sqrt(dp);
3588 break;
3589 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3590 tmp = gen_vfp_mrs();
3591 tcg_gen_ext16u_i32(tmp, tmp);
3592 if (dp) {
3593 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3594 cpu_env);
3595 } else {
3596 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3597 cpu_env);
3599 tcg_temp_free_i32(tmp);
3600 break;
3601 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3602 tmp = gen_vfp_mrs();
3603 tcg_gen_shri_i32(tmp, tmp, 16);
3604 if (dp) {
3605 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3606 cpu_env);
3607 } else {
3608 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3609 cpu_env);
3611 tcg_temp_free_i32(tmp);
3612 break;
3613 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3614 tmp = tcg_temp_new_i32();
3615 if (dp) {
3616 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3617 cpu_env);
3618 } else {
3619 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3620 cpu_env);
3622 gen_mov_F0_vreg(0, rd);
3623 tmp2 = gen_vfp_mrs();
3624 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3625 tcg_gen_or_i32(tmp, tmp, tmp2);
3626 tcg_temp_free_i32(tmp2);
3627 gen_vfp_msr(tmp);
3628 break;
3629 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3630 tmp = tcg_temp_new_i32();
3631 if (dp) {
3632 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3633 cpu_env);
3634 } else {
3635 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3636 cpu_env);
3638 tcg_gen_shli_i32(tmp, tmp, 16);
3639 gen_mov_F0_vreg(0, rd);
3640 tmp2 = gen_vfp_mrs();
3641 tcg_gen_ext16u_i32(tmp2, tmp2);
3642 tcg_gen_or_i32(tmp, tmp, tmp2);
3643 tcg_temp_free_i32(tmp2);
3644 gen_vfp_msr(tmp);
3645 break;
3646 case 8: /* cmp */
3647 gen_vfp_cmp(dp);
3648 break;
3649 case 9: /* cmpe */
3650 gen_vfp_cmpe(dp);
3651 break;
3652 case 10: /* cmpz */
3653 gen_vfp_cmp(dp);
3654 break;
3655 case 11: /* cmpez */
3656 gen_vfp_F1_ld0(dp);
3657 gen_vfp_cmpe(dp);
3658 break;
3659 case 12: /* vrintr */
3661 TCGv_ptr fpst = get_fpstatus_ptr(0);
3662 if (dp) {
3663 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3664 } else {
3665 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3667 tcg_temp_free_ptr(fpst);
3668 break;
3670 case 13: /* vrintz */
3672 TCGv_ptr fpst = get_fpstatus_ptr(0);
3673 TCGv_i32 tcg_rmode;
3674 tcg_rmode = tcg_const_i32(float_round_to_zero);
3675 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3676 if (dp) {
3677 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3678 } else {
3679 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3681 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3682 tcg_temp_free_i32(tcg_rmode);
3683 tcg_temp_free_ptr(fpst);
3684 break;
3686 case 14: /* vrintx */
3688 TCGv_ptr fpst = get_fpstatus_ptr(0);
3689 if (dp) {
3690 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3691 } else {
3692 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3694 tcg_temp_free_ptr(fpst);
3695 break;
3697 case 15: /* single<->double conversion */
3698 if (dp)
3699 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3700 else
3701 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3702 break;
3703 case 16: /* fuito */
3704 gen_vfp_uito(dp, 0);
3705 break;
3706 case 17: /* fsito */
3707 gen_vfp_sito(dp, 0);
3708 break;
3709 case 20: /* fshto */
3710 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3711 return 1;
3713 gen_vfp_shto(dp, 16 - rm, 0);
3714 break;
3715 case 21: /* fslto */
3716 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3717 return 1;
3719 gen_vfp_slto(dp, 32 - rm, 0);
3720 break;
3721 case 22: /* fuhto */
3722 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3723 return 1;
3725 gen_vfp_uhto(dp, 16 - rm, 0);
3726 break;
3727 case 23: /* fulto */
3728 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3729 return 1;
3731 gen_vfp_ulto(dp, 32 - rm, 0);
3732 break;
3733 case 24: /* ftoui */
3734 gen_vfp_toui(dp, 0);
3735 break;
3736 case 25: /* ftouiz */
3737 gen_vfp_touiz(dp, 0);
3738 break;
3739 case 26: /* ftosi */
3740 gen_vfp_tosi(dp, 0);
3741 break;
3742 case 27: /* ftosiz */
3743 gen_vfp_tosiz(dp, 0);
3744 break;
3745 case 28: /* ftosh */
3746 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3747 return 1;
3749 gen_vfp_tosh(dp, 16 - rm, 0);
3750 break;
3751 case 29: /* ftosl */
3752 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3753 return 1;
3755 gen_vfp_tosl(dp, 32 - rm, 0);
3756 break;
3757 case 30: /* ftouh */
3758 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3759 return 1;
3761 gen_vfp_touh(dp, 16 - rm, 0);
3762 break;
3763 case 31: /* ftoul */
3764 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3765 return 1;
3767 gen_vfp_toul(dp, 32 - rm, 0);
3768 break;
3769 default: /* undefined */
3770 return 1;
3772 break;
3773 default: /* undefined */
3774 return 1;
3777 /* Write back the result. */
3778 if (op == 15 && (rn >= 8 && rn <= 11)) {
3779 /* Comparison, do nothing. */
3780 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3781 (rn & 0x1e) == 0x6)) {
3782 /* VCVT double to int: always integer result.
3783 * VCVT double to half precision is always a single
3784 * precision result.
3786 gen_mov_vreg_F0(0, rd);
3787 } else if (op == 15 && rn == 15) {
3788 /* conversion */
3789 gen_mov_vreg_F0(!dp, rd);
3790 } else {
3791 gen_mov_vreg_F0(dp, rd);
3794 /* break out of the loop if we have finished */
3795 if (veclen == 0)
3796 break;
3798 if (op == 15 && delta_m == 0) {
3799 /* single source one-many */
3800 while (veclen--) {
3801 rd = ((rd + delta_d) & (bank_mask - 1))
3802 | (rd & bank_mask);
3803 gen_mov_vreg_F0(dp, rd);
3805 break;
3807 /* Setup the next operands. */
3808 veclen--;
3809 rd = ((rd + delta_d) & (bank_mask - 1))
3810 | (rd & bank_mask);
3812 if (op == 15) {
3813 /* One source operand. */
3814 rm = ((rm + delta_m) & (bank_mask - 1))
3815 | (rm & bank_mask);
3816 gen_mov_F0_vreg(dp, rm);
3817 } else {
3818 /* Two source operands. */
3819 rn = ((rn + delta_d) & (bank_mask - 1))
3820 | (rn & bank_mask);
3821 gen_mov_F0_vreg(dp, rn);
3822 if (delta_m) {
3823 rm = ((rm + delta_m) & (bank_mask - 1))
3824 | (rm & bank_mask);
3825 gen_mov_F1_vreg(dp, rm);
3830 break;
3831 case 0xc:
3832 case 0xd:
3833 if ((insn & 0x03e00000) == 0x00400000) {
3834 /* two-register transfer */
3835 rn = (insn >> 16) & 0xf;
3836 rd = (insn >> 12) & 0xf;
3837 if (dp) {
3838 VFP_DREG_M(rm, insn);
3839 } else {
3840 rm = VFP_SREG_M(insn);
3843 if (insn & ARM_CP_RW_BIT) {
3844 /* vfp->arm */
3845 if (dp) {
3846 gen_mov_F0_vreg(0, rm * 2);
3847 tmp = gen_vfp_mrs();
3848 store_reg(s, rd, tmp);
3849 gen_mov_F0_vreg(0, rm * 2 + 1);
3850 tmp = gen_vfp_mrs();
3851 store_reg(s, rn, tmp);
3852 } else {
3853 gen_mov_F0_vreg(0, rm);
3854 tmp = gen_vfp_mrs();
3855 store_reg(s, rd, tmp);
3856 gen_mov_F0_vreg(0, rm + 1);
3857 tmp = gen_vfp_mrs();
3858 store_reg(s, rn, tmp);
3860 } else {
3861 /* arm->vfp */
3862 if (dp) {
3863 tmp = load_reg(s, rd);
3864 gen_vfp_msr(tmp);
3865 gen_mov_vreg_F0(0, rm * 2);
3866 tmp = load_reg(s, rn);
3867 gen_vfp_msr(tmp);
3868 gen_mov_vreg_F0(0, rm * 2 + 1);
3869 } else {
3870 tmp = load_reg(s, rd);
3871 gen_vfp_msr(tmp);
3872 gen_mov_vreg_F0(0, rm);
3873 tmp = load_reg(s, rn);
3874 gen_vfp_msr(tmp);
3875 gen_mov_vreg_F0(0, rm + 1);
3878 } else {
3879 /* Load/store */
3880 rn = (insn >> 16) & 0xf;
3881 if (dp)
3882 VFP_DREG_D(rd, insn);
3883 else
3884 rd = VFP_SREG_D(insn);
3885 if ((insn & 0x01200000) == 0x01000000) {
3886 /* Single load/store */
3887 offset = (insn & 0xff) << 2;
3888 if ((insn & (1 << 23)) == 0)
3889 offset = -offset;
3890 if (s->thumb && rn == 15) {
3891 /* This is actually UNPREDICTABLE */
3892 addr = tcg_temp_new_i32();
3893 tcg_gen_movi_i32(addr, s->pc & ~2);
3894 } else {
3895 addr = load_reg(s, rn);
3897 tcg_gen_addi_i32(addr, addr, offset);
3898 if (insn & (1 << 20)) {
3899 gen_vfp_ld(s, dp, addr);
3900 gen_mov_vreg_F0(dp, rd);
3901 } else {
3902 gen_mov_F0_vreg(dp, rd);
3903 gen_vfp_st(s, dp, addr);
3905 tcg_temp_free_i32(addr);
3906 } else {
3907 /* load/store multiple */
3908 int w = insn & (1 << 21);
3909 if (dp)
3910 n = (insn >> 1) & 0x7f;
3911 else
3912 n = insn & 0xff;
3914 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3915 /* P == U , W == 1 => UNDEF */
3916 return 1;
3918 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3919 /* UNPREDICTABLE cases for bad immediates: we choose to
3920 * UNDEF to avoid generating huge numbers of TCG ops
3922 return 1;
3924 if (rn == 15 && w) {
3925 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3926 return 1;
3929 if (s->thumb && rn == 15) {
3930 /* This is actually UNPREDICTABLE */
3931 addr = tcg_temp_new_i32();
3932 tcg_gen_movi_i32(addr, s->pc & ~2);
3933 } else {
3934 addr = load_reg(s, rn);
3936 if (insn & (1 << 24)) /* pre-decrement */
3937 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3939 if (dp)
3940 offset = 8;
3941 else
3942 offset = 4;
3943 for (i = 0; i < n; i++) {
3944 if (insn & ARM_CP_RW_BIT) {
3945 /* load */
3946 gen_vfp_ld(s, dp, addr);
3947 gen_mov_vreg_F0(dp, rd + i);
3948 } else {
3949 /* store */
3950 gen_mov_F0_vreg(dp, rd + i);
3951 gen_vfp_st(s, dp, addr);
3953 tcg_gen_addi_i32(addr, addr, offset);
3955 if (w) {
3956 /* writeback */
3957 if (insn & (1 << 24))
3958 offset = -offset * n;
3959 else if (dp && (insn & 1))
3960 offset = 4;
3961 else
3962 offset = 0;
3964 if (offset != 0)
3965 tcg_gen_addi_i32(addr, addr, offset);
3966 store_reg(s, rn, addr);
3967 } else {
3968 tcg_temp_free_i32(addr);
3972 break;
3973 default:
3974 /* Should never happen. */
3975 return 1;
3977 return 0;
3980 static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
3982 TranslationBlock *tb;
3984 tb = s->tb;
3985 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3986 tcg_gen_goto_tb(n);
3987 gen_set_pc_im(s, dest);
3988 tcg_gen_exit_tb((uintptr_t)tb + n);
3989 } else {
3990 gen_set_pc_im(s, dest);
3991 tcg_gen_exit_tb(0);
3995 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3997 if (unlikely(s->singlestep_enabled || s->ss_active)) {
3998 /* An indirect jump so that we still trigger the debug exception. */
3999 if (s->thumb)
4000 dest |= 1;
4001 gen_bx_im(s, dest);
4002 } else {
4003 gen_goto_tb(s, 0, dest);
4004 s->is_jmp = DISAS_TB_JUMP;
4008 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
4010 if (x)
4011 tcg_gen_sari_i32(t0, t0, 16);
4012 else
4013 gen_sxth(t0);
4014 if (y)
4015 tcg_gen_sari_i32(t1, t1, 16);
4016 else
4017 gen_sxth(t1);
4018 tcg_gen_mul_i32(t0, t0, t1);
4021 /* Return the mask of PSR bits set by a MSR instruction. */
4022 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4024 uint32_t mask;
4026 mask = 0;
4027 if (flags & (1 << 0))
4028 mask |= 0xff;
4029 if (flags & (1 << 1))
4030 mask |= 0xff00;
4031 if (flags & (1 << 2))
4032 mask |= 0xff0000;
4033 if (flags & (1 << 3))
4034 mask |= 0xff000000;
4036 /* Mask out undefined bits. */
4037 mask &= ~CPSR_RESERVED;
4038 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4039 mask &= ~CPSR_T;
4041 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4042 mask &= ~CPSR_Q; /* V5TE in reality*/
4044 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4045 mask &= ~(CPSR_E | CPSR_GE);
4047 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4048 mask &= ~CPSR_IT;
4050 /* Mask out execution state and reserved bits. */
4051 if (!spsr) {
4052 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4054 /* Mask out privileged bits. */
4055 if (IS_USER(s))
4056 mask &= CPSR_USER;
4057 return mask;
4060 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4061 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4063 TCGv_i32 tmp;
4064 if (spsr) {
4065 /* ??? This is also undefined in system mode. */
4066 if (IS_USER(s))
4067 return 1;
4069 tmp = load_cpu_field(spsr);
4070 tcg_gen_andi_i32(tmp, tmp, ~mask);
4071 tcg_gen_andi_i32(t0, t0, mask);
4072 tcg_gen_or_i32(tmp, tmp, t0);
4073 store_cpu_field(tmp, spsr);
4074 } else {
4075 gen_set_cpsr(t0, mask);
4077 tcg_temp_free_i32(t0);
4078 gen_lookup_tb(s);
4079 return 0;
4082 /* Returns nonzero if access to the PSR is not permitted. */
4083 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4085 TCGv_i32 tmp;
4086 tmp = tcg_temp_new_i32();
4087 tcg_gen_movi_i32(tmp, val);
4088 return gen_set_psr(s, mask, spsr, tmp);
4091 /* Generate an old-style exception return. Marks pc as dead. */
4092 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4094 TCGv_i32 tmp;
4095 store_reg(s, 15, pc);
4096 tmp = load_cpu_field(spsr);
4097 gen_set_cpsr(tmp, CPSR_ERET_MASK);
4098 tcg_temp_free_i32(tmp);
4099 s->is_jmp = DISAS_JUMP;
4102 /* Generate a v6 exception return. Marks both values as dead. */
4103 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4105 gen_set_cpsr(cpsr, CPSR_ERET_MASK);
4106 tcg_temp_free_i32(cpsr);
4107 store_reg(s, 15, pc);
4108 s->is_jmp = DISAS_JUMP;
4111 static void gen_nop_hint(DisasContext *s, int val)
4113 switch (val) {
4114 case 1: /* yield */
4115 gen_set_pc_im(s, s->pc);
4116 s->is_jmp = DISAS_YIELD;
4117 break;
4118 case 3: /* wfi */
4119 gen_set_pc_im(s, s->pc);
4120 s->is_jmp = DISAS_WFI;
4121 break;
4122 case 2: /* wfe */
4123 gen_set_pc_im(s, s->pc);
4124 s->is_jmp = DISAS_WFE;
4125 break;
4126 case 4: /* sev */
4127 case 5: /* sevl */
4128 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4129 default: /* nop */
4130 break;
4134 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4136 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4138 switch (size) {
4139 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4140 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4141 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4142 default: abort();
4146 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4148 switch (size) {
4149 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4150 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4151 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4152 default: return;
4156 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4157 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4158 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4159 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4160 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4162 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4163 switch ((size << 1) | u) { \
4164 case 0: \
4165 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4166 break; \
4167 case 1: \
4168 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4169 break; \
4170 case 2: \
4171 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4172 break; \
4173 case 3: \
4174 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4175 break; \
4176 case 4: \
4177 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4178 break; \
4179 case 5: \
4180 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4181 break; \
4182 default: return 1; \
4183 }} while (0)
4185 #define GEN_NEON_INTEGER_OP(name) do { \
4186 switch ((size << 1) | u) { \
4187 case 0: \
4188 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4189 break; \
4190 case 1: \
4191 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4192 break; \
4193 case 2: \
4194 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4195 break; \
4196 case 3: \
4197 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4198 break; \
4199 case 4: \
4200 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4201 break; \
4202 case 5: \
4203 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4204 break; \
4205 default: return 1; \
4206 }} while (0)
4208 static TCGv_i32 neon_load_scratch(int scratch)
4210 TCGv_i32 tmp = tcg_temp_new_i32();
4211 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4212 return tmp;
4215 static void neon_store_scratch(int scratch, TCGv_i32 var)
4217 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4218 tcg_temp_free_i32(var);
4221 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4223 TCGv_i32 tmp;
4224 if (size == 1) {
4225 tmp = neon_load_reg(reg & 7, reg >> 4);
4226 if (reg & 8) {
4227 gen_neon_dup_high16(tmp);
4228 } else {
4229 gen_neon_dup_low16(tmp);
4231 } else {
4232 tmp = neon_load_reg(reg & 15, reg >> 4);
4234 return tmp;
4237 static int gen_neon_unzip(int rd, int rm, int size, int q)
4239 TCGv_i32 tmp, tmp2;
4240 if (!q && size == 2) {
4241 return 1;
4243 tmp = tcg_const_i32(rd);
4244 tmp2 = tcg_const_i32(rm);
4245 if (q) {
4246 switch (size) {
4247 case 0:
4248 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
4249 break;
4250 case 1:
4251 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
4252 break;
4253 case 2:
4254 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
4255 break;
4256 default:
4257 abort();
4259 } else {
4260 switch (size) {
4261 case 0:
4262 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
4263 break;
4264 case 1:
4265 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
4266 break;
4267 default:
4268 abort();
4271 tcg_temp_free_i32(tmp);
4272 tcg_temp_free_i32(tmp2);
4273 return 0;
4276 static int gen_neon_zip(int rd, int rm, int size, int q)
4278 TCGv_i32 tmp, tmp2;
4279 if (!q && size == 2) {
4280 return 1;
4282 tmp = tcg_const_i32(rd);
4283 tmp2 = tcg_const_i32(rm);
4284 if (q) {
4285 switch (size) {
4286 case 0:
4287 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
4288 break;
4289 case 1:
4290 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
4291 break;
4292 case 2:
4293 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
4294 break;
4295 default:
4296 abort();
4298 } else {
4299 switch (size) {
4300 case 0:
4301 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
4302 break;
4303 case 1:
4304 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
4305 break;
4306 default:
4307 abort();
4310 tcg_temp_free_i32(tmp);
4311 tcg_temp_free_i32(tmp2);
4312 return 0;
4315 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4317 TCGv_i32 rd, tmp;
4319 rd = tcg_temp_new_i32();
4320 tmp = tcg_temp_new_i32();
4322 tcg_gen_shli_i32(rd, t0, 8);
4323 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4324 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4325 tcg_gen_or_i32(rd, rd, tmp);
4327 tcg_gen_shri_i32(t1, t1, 8);
4328 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4329 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4330 tcg_gen_or_i32(t1, t1, tmp);
4331 tcg_gen_mov_i32(t0, rd);
4333 tcg_temp_free_i32(tmp);
4334 tcg_temp_free_i32(rd);
4337 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4339 TCGv_i32 rd, tmp;
4341 rd = tcg_temp_new_i32();
4342 tmp = tcg_temp_new_i32();
4344 tcg_gen_shli_i32(rd, t0, 16);
4345 tcg_gen_andi_i32(tmp, t1, 0xffff);
4346 tcg_gen_or_i32(rd, rd, tmp);
4347 tcg_gen_shri_i32(t1, t1, 16);
4348 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4349 tcg_gen_or_i32(t1, t1, tmp);
4350 tcg_gen_mov_i32(t0, rd);
4352 tcg_temp_free_i32(tmp);
4353 tcg_temp_free_i32(rd);
4357 static struct {
4358 int nregs;
4359 int interleave;
4360 int spacing;
4361 } neon_ls_element_type[11] = {
4362 {4, 4, 1},
4363 {4, 4, 2},
4364 {4, 1, 1},
4365 {4, 2, 1},
4366 {3, 3, 1},
4367 {3, 3, 2},
4368 {3, 1, 1},
4369 {1, 1, 1},
4370 {2, 2, 1},
4371 {2, 2, 2},
4372 {2, 1, 1}
4375 /* Translate a NEON load/store element instruction. Return nonzero if the
4376 instruction is invalid. */
4377 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4379 int rd, rn, rm;
4380 int op;
4381 int nregs;
4382 int interleave;
4383 int spacing;
4384 int stride;
4385 int size;
4386 int reg;
4387 int pass;
4388 int load;
4389 int shift;
4390 int n;
4391 TCGv_i32 addr;
4392 TCGv_i32 tmp;
4393 TCGv_i32 tmp2;
4394 TCGv_i64 tmp64;
4396 /* FIXME: this access check should not take precedence over UNDEF
4397 * for invalid encodings; we will generate incorrect syndrome information
4398 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4400 if (s->fp_excp_el) {
4401 gen_exception_insn(s, 4, EXCP_UDEF,
4402 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
4403 return 0;
4406 if (!s->vfp_enabled)
4407 return 1;
4408 VFP_DREG_D(rd, insn);
4409 rn = (insn >> 16) & 0xf;
4410 rm = insn & 0xf;
4411 load = (insn & (1 << 21)) != 0;
4412 if ((insn & (1 << 23)) == 0) {
4413 /* Load store all elements. */
4414 op = (insn >> 8) & 0xf;
4415 size = (insn >> 6) & 3;
4416 if (op > 10)
4417 return 1;
4418 /* Catch UNDEF cases for bad values of align field */
4419 switch (op & 0xc) {
4420 case 4:
4421 if (((insn >> 5) & 1) == 1) {
4422 return 1;
4424 break;
4425 case 8:
4426 if (((insn >> 4) & 3) == 3) {
4427 return 1;
4429 break;
4430 default:
4431 break;
4433 nregs = neon_ls_element_type[op].nregs;
4434 interleave = neon_ls_element_type[op].interleave;
4435 spacing = neon_ls_element_type[op].spacing;
4436 if (size == 3 && (interleave | spacing) != 1)
4437 return 1;
4438 addr = tcg_temp_new_i32();
4439 load_reg_var(s, addr, rn);
4440 stride = (1 << size) * interleave;
4441 for (reg = 0; reg < nregs; reg++) {
4442 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4443 load_reg_var(s, addr, rn);
4444 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4445 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4446 load_reg_var(s, addr, rn);
4447 tcg_gen_addi_i32(addr, addr, 1 << size);
4449 if (size == 3) {
4450 tmp64 = tcg_temp_new_i64();
4451 if (load) {
4452 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
4453 neon_store_reg64(tmp64, rd);
4454 } else {
4455 neon_load_reg64(tmp64, rd);
4456 gen_aa32_st64(tmp64, addr, get_mem_index(s));
4458 tcg_temp_free_i64(tmp64);
4459 tcg_gen_addi_i32(addr, addr, stride);
4460 } else {
4461 for (pass = 0; pass < 2; pass++) {
4462 if (size == 2) {
4463 if (load) {
4464 tmp = tcg_temp_new_i32();
4465 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
4466 neon_store_reg(rd, pass, tmp);
4467 } else {
4468 tmp = neon_load_reg(rd, pass);
4469 gen_aa32_st32(tmp, addr, get_mem_index(s));
4470 tcg_temp_free_i32(tmp);
4472 tcg_gen_addi_i32(addr, addr, stride);
4473 } else if (size == 1) {
4474 if (load) {
4475 tmp = tcg_temp_new_i32();
4476 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
4477 tcg_gen_addi_i32(addr, addr, stride);
4478 tmp2 = tcg_temp_new_i32();
4479 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
4480 tcg_gen_addi_i32(addr, addr, stride);
4481 tcg_gen_shli_i32(tmp2, tmp2, 16);
4482 tcg_gen_or_i32(tmp, tmp, tmp2);
4483 tcg_temp_free_i32(tmp2);
4484 neon_store_reg(rd, pass, tmp);
4485 } else {
4486 tmp = neon_load_reg(rd, pass);
4487 tmp2 = tcg_temp_new_i32();
4488 tcg_gen_shri_i32(tmp2, tmp, 16);
4489 gen_aa32_st16(tmp, addr, get_mem_index(s));
4490 tcg_temp_free_i32(tmp);
4491 tcg_gen_addi_i32(addr, addr, stride);
4492 gen_aa32_st16(tmp2, addr, get_mem_index(s));
4493 tcg_temp_free_i32(tmp2);
4494 tcg_gen_addi_i32(addr, addr, stride);
4496 } else /* size == 0 */ {
4497 if (load) {
4498 TCGV_UNUSED_I32(tmp2);
4499 for (n = 0; n < 4; n++) {
4500 tmp = tcg_temp_new_i32();
4501 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
4502 tcg_gen_addi_i32(addr, addr, stride);
4503 if (n == 0) {
4504 tmp2 = tmp;
4505 } else {
4506 tcg_gen_shli_i32(tmp, tmp, n * 8);
4507 tcg_gen_or_i32(tmp2, tmp2, tmp);
4508 tcg_temp_free_i32(tmp);
4511 neon_store_reg(rd, pass, tmp2);
4512 } else {
4513 tmp2 = neon_load_reg(rd, pass);
4514 for (n = 0; n < 4; n++) {
4515 tmp = tcg_temp_new_i32();
4516 if (n == 0) {
4517 tcg_gen_mov_i32(tmp, tmp2);
4518 } else {
4519 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4521 gen_aa32_st8(tmp, addr, get_mem_index(s));
4522 tcg_temp_free_i32(tmp);
4523 tcg_gen_addi_i32(addr, addr, stride);
4525 tcg_temp_free_i32(tmp2);
4530 rd += spacing;
4532 tcg_temp_free_i32(addr);
4533 stride = nregs * 8;
4534 } else {
4535 size = (insn >> 10) & 3;
4536 if (size == 3) {
4537 /* Load single element to all lanes. */
4538 int a = (insn >> 4) & 1;
4539 if (!load) {
4540 return 1;
4542 size = (insn >> 6) & 3;
4543 nregs = ((insn >> 8) & 3) + 1;
4545 if (size == 3) {
4546 if (nregs != 4 || a == 0) {
4547 return 1;
4549 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4550 size = 2;
4552 if (nregs == 1 && a == 1 && size == 0) {
4553 return 1;
4555 if (nregs == 3 && a == 1) {
4556 return 1;
4558 addr = tcg_temp_new_i32();
4559 load_reg_var(s, addr, rn);
4560 if (nregs == 1) {
4561 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4562 tmp = gen_load_and_replicate(s, addr, size);
4563 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4564 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4565 if (insn & (1 << 5)) {
4566 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4567 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4569 tcg_temp_free_i32(tmp);
4570 } else {
4571 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4572 stride = (insn & (1 << 5)) ? 2 : 1;
4573 for (reg = 0; reg < nregs; reg++) {
4574 tmp = gen_load_and_replicate(s, addr, size);
4575 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4576 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4577 tcg_temp_free_i32(tmp);
4578 tcg_gen_addi_i32(addr, addr, 1 << size);
4579 rd += stride;
4582 tcg_temp_free_i32(addr);
4583 stride = (1 << size) * nregs;
4584 } else {
4585 /* Single element. */
4586 int idx = (insn >> 4) & 0xf;
4587 pass = (insn >> 7) & 1;
4588 switch (size) {
4589 case 0:
4590 shift = ((insn >> 5) & 3) * 8;
4591 stride = 1;
4592 break;
4593 case 1:
4594 shift = ((insn >> 6) & 1) * 16;
4595 stride = (insn & (1 << 5)) ? 2 : 1;
4596 break;
4597 case 2:
4598 shift = 0;
4599 stride = (insn & (1 << 6)) ? 2 : 1;
4600 break;
4601 default:
4602 abort();
4604 nregs = ((insn >> 8) & 3) + 1;
4605 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4606 switch (nregs) {
4607 case 1:
4608 if (((idx & (1 << size)) != 0) ||
4609 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4610 return 1;
4612 break;
4613 case 3:
4614 if ((idx & 1) != 0) {
4615 return 1;
4617 /* fall through */
4618 case 2:
4619 if (size == 2 && (idx & 2) != 0) {
4620 return 1;
4622 break;
4623 case 4:
4624 if ((size == 2) && ((idx & 3) == 3)) {
4625 return 1;
4627 break;
4628 default:
4629 abort();
4631 if ((rd + stride * (nregs - 1)) > 31) {
4632 /* Attempts to write off the end of the register file
4633 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4634 * the neon_load_reg() would write off the end of the array.
4636 return 1;
4638 addr = tcg_temp_new_i32();
4639 load_reg_var(s, addr, rn);
4640 for (reg = 0; reg < nregs; reg++) {
4641 if (load) {
4642 tmp = tcg_temp_new_i32();
4643 switch (size) {
4644 case 0:
4645 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
4646 break;
4647 case 1:
4648 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
4649 break;
4650 case 2:
4651 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
4652 break;
4653 default: /* Avoid compiler warnings. */
4654 abort();
4656 if (size != 2) {
4657 tmp2 = neon_load_reg(rd, pass);
4658 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4659 shift, size ? 16 : 8);
4660 tcg_temp_free_i32(tmp2);
4662 neon_store_reg(rd, pass, tmp);
4663 } else { /* Store */
4664 tmp = neon_load_reg(rd, pass);
4665 if (shift)
4666 tcg_gen_shri_i32(tmp, tmp, shift);
4667 switch (size) {
4668 case 0:
4669 gen_aa32_st8(tmp, addr, get_mem_index(s));
4670 break;
4671 case 1:
4672 gen_aa32_st16(tmp, addr, get_mem_index(s));
4673 break;
4674 case 2:
4675 gen_aa32_st32(tmp, addr, get_mem_index(s));
4676 break;
4678 tcg_temp_free_i32(tmp);
4680 rd += stride;
4681 tcg_gen_addi_i32(addr, addr, 1 << size);
4683 tcg_temp_free_i32(addr);
4684 stride = nregs * (1 << size);
4687 if (rm != 15) {
4688 TCGv_i32 base;
4690 base = load_reg(s, rn);
4691 if (rm == 13) {
4692 tcg_gen_addi_i32(base, base, stride);
4693 } else {
4694 TCGv_i32 index;
4695 index = load_reg(s, rm);
4696 tcg_gen_add_i32(base, base, index);
4697 tcg_temp_free_i32(index);
4699 store_reg(s, rn, base);
4701 return 0;
4704 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4705 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
4707 tcg_gen_and_i32(t, t, c);
4708 tcg_gen_andc_i32(f, f, c);
4709 tcg_gen_or_i32(dest, t, f);
4712 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
4714 switch (size) {
4715 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4716 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4717 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
4718 default: abort();
4722 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4724 switch (size) {
4725 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4726 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4727 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4728 default: abort();
4732 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
4734 switch (size) {
4735 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4736 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4737 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4738 default: abort();
4742 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4744 switch (size) {
4745 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4746 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4747 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4748 default: abort();
4752 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
4753 int q, int u)
4755 if (q) {
4756 if (u) {
4757 switch (size) {
4758 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4759 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4760 default: abort();
4762 } else {
4763 switch (size) {
4764 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4765 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4766 default: abort();
4769 } else {
4770 if (u) {
4771 switch (size) {
4772 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4773 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4774 default: abort();
4776 } else {
4777 switch (size) {
4778 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4779 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4780 default: abort();
4786 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
4788 if (u) {
4789 switch (size) {
4790 case 0: gen_helper_neon_widen_u8(dest, src); break;
4791 case 1: gen_helper_neon_widen_u16(dest, src); break;
4792 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4793 default: abort();
4795 } else {
4796 switch (size) {
4797 case 0: gen_helper_neon_widen_s8(dest, src); break;
4798 case 1: gen_helper_neon_widen_s16(dest, src); break;
4799 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4800 default: abort();
4803 tcg_temp_free_i32(src);
4806 static inline void gen_neon_addl(int size)
4808 switch (size) {
4809 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4810 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4811 case 2: tcg_gen_add_i64(CPU_V001); break;
4812 default: abort();
4816 static inline void gen_neon_subl(int size)
4818 switch (size) {
4819 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4820 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4821 case 2: tcg_gen_sub_i64(CPU_V001); break;
4822 default: abort();
4826 static inline void gen_neon_negl(TCGv_i64 var, int size)
4828 switch (size) {
4829 case 0: gen_helper_neon_negl_u16(var, var); break;
4830 case 1: gen_helper_neon_negl_u32(var, var); break;
4831 case 2:
4832 tcg_gen_neg_i64(var, var);
4833 break;
4834 default: abort();
4838 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4840 switch (size) {
4841 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4842 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4843 default: abort();
4847 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4848 int size, int u)
4850 TCGv_i64 tmp;
4852 switch ((size << 1) | u) {
4853 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4854 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4855 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4856 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4857 case 4:
4858 tmp = gen_muls_i64_i32(a, b);
4859 tcg_gen_mov_i64(dest, tmp);
4860 tcg_temp_free_i64(tmp);
4861 break;
4862 case 5:
4863 tmp = gen_mulu_i64_i32(a, b);
4864 tcg_gen_mov_i64(dest, tmp);
4865 tcg_temp_free_i64(tmp);
4866 break;
4867 default: abort();
4870 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4871 Don't forget to clean them now. */
4872 if (size < 2) {
4873 tcg_temp_free_i32(a);
4874 tcg_temp_free_i32(b);
4878 static void gen_neon_narrow_op(int op, int u, int size,
4879 TCGv_i32 dest, TCGv_i64 src)
4881 if (op) {
4882 if (u) {
4883 gen_neon_unarrow_sats(size, dest, src);
4884 } else {
4885 gen_neon_narrow(size, dest, src);
4887 } else {
4888 if (u) {
4889 gen_neon_narrow_satu(size, dest, src);
4890 } else {
4891 gen_neon_narrow_sats(size, dest, src);
4896 /* Symbolic constants for op fields for Neon 3-register same-length.
4897 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4898 * table A7-9.
4900 #define NEON_3R_VHADD 0
4901 #define NEON_3R_VQADD 1
4902 #define NEON_3R_VRHADD 2
4903 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4904 #define NEON_3R_VHSUB 4
4905 #define NEON_3R_VQSUB 5
4906 #define NEON_3R_VCGT 6
4907 #define NEON_3R_VCGE 7
4908 #define NEON_3R_VSHL 8
4909 #define NEON_3R_VQSHL 9
4910 #define NEON_3R_VRSHL 10
4911 #define NEON_3R_VQRSHL 11
4912 #define NEON_3R_VMAX 12
4913 #define NEON_3R_VMIN 13
4914 #define NEON_3R_VABD 14
4915 #define NEON_3R_VABA 15
4916 #define NEON_3R_VADD_VSUB 16
4917 #define NEON_3R_VTST_VCEQ 17
4918 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4919 #define NEON_3R_VMUL 19
4920 #define NEON_3R_VPMAX 20
4921 #define NEON_3R_VPMIN 21
4922 #define NEON_3R_VQDMULH_VQRDMULH 22
4923 #define NEON_3R_VPADD 23
4924 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4925 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4926 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4927 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4928 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4929 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4930 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4931 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4933 static const uint8_t neon_3r_sizes[] = {
4934 [NEON_3R_VHADD] = 0x7,
4935 [NEON_3R_VQADD] = 0xf,
4936 [NEON_3R_VRHADD] = 0x7,
4937 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4938 [NEON_3R_VHSUB] = 0x7,
4939 [NEON_3R_VQSUB] = 0xf,
4940 [NEON_3R_VCGT] = 0x7,
4941 [NEON_3R_VCGE] = 0x7,
4942 [NEON_3R_VSHL] = 0xf,
4943 [NEON_3R_VQSHL] = 0xf,
4944 [NEON_3R_VRSHL] = 0xf,
4945 [NEON_3R_VQRSHL] = 0xf,
4946 [NEON_3R_VMAX] = 0x7,
4947 [NEON_3R_VMIN] = 0x7,
4948 [NEON_3R_VABD] = 0x7,
4949 [NEON_3R_VABA] = 0x7,
4950 [NEON_3R_VADD_VSUB] = 0xf,
4951 [NEON_3R_VTST_VCEQ] = 0x7,
4952 [NEON_3R_VML] = 0x7,
4953 [NEON_3R_VMUL] = 0x7,
4954 [NEON_3R_VPMAX] = 0x7,
4955 [NEON_3R_VPMIN] = 0x7,
4956 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4957 [NEON_3R_VPADD] = 0x7,
4958 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
4959 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4960 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4961 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4962 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4963 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4964 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4965 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
4968 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4969 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4970 * table A7-13.
4972 #define NEON_2RM_VREV64 0
4973 #define NEON_2RM_VREV32 1
4974 #define NEON_2RM_VREV16 2
4975 #define NEON_2RM_VPADDL 4
4976 #define NEON_2RM_VPADDL_U 5
4977 #define NEON_2RM_AESE 6 /* Includes AESD */
4978 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
4979 #define NEON_2RM_VCLS 8
4980 #define NEON_2RM_VCLZ 9
4981 #define NEON_2RM_VCNT 10
4982 #define NEON_2RM_VMVN 11
4983 #define NEON_2RM_VPADAL 12
4984 #define NEON_2RM_VPADAL_U 13
4985 #define NEON_2RM_VQABS 14
4986 #define NEON_2RM_VQNEG 15
4987 #define NEON_2RM_VCGT0 16
4988 #define NEON_2RM_VCGE0 17
4989 #define NEON_2RM_VCEQ0 18
4990 #define NEON_2RM_VCLE0 19
4991 #define NEON_2RM_VCLT0 20
4992 #define NEON_2RM_SHA1H 21
4993 #define NEON_2RM_VABS 22
4994 #define NEON_2RM_VNEG 23
4995 #define NEON_2RM_VCGT0_F 24
4996 #define NEON_2RM_VCGE0_F 25
4997 #define NEON_2RM_VCEQ0_F 26
4998 #define NEON_2RM_VCLE0_F 27
4999 #define NEON_2RM_VCLT0_F 28
5000 #define NEON_2RM_VABS_F 30
5001 #define NEON_2RM_VNEG_F 31
5002 #define NEON_2RM_VSWP 32
5003 #define NEON_2RM_VTRN 33
5004 #define NEON_2RM_VUZP 34
5005 #define NEON_2RM_VZIP 35
5006 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5007 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5008 #define NEON_2RM_VSHLL 38
5009 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5010 #define NEON_2RM_VRINTN 40
5011 #define NEON_2RM_VRINTX 41
5012 #define NEON_2RM_VRINTA 42
5013 #define NEON_2RM_VRINTZ 43
5014 #define NEON_2RM_VCVT_F16_F32 44
5015 #define NEON_2RM_VRINTM 45
5016 #define NEON_2RM_VCVT_F32_F16 46
5017 #define NEON_2RM_VRINTP 47
5018 #define NEON_2RM_VCVTAU 48
5019 #define NEON_2RM_VCVTAS 49
5020 #define NEON_2RM_VCVTNU 50
5021 #define NEON_2RM_VCVTNS 51
5022 #define NEON_2RM_VCVTPU 52
5023 #define NEON_2RM_VCVTPS 53
5024 #define NEON_2RM_VCVTMU 54
5025 #define NEON_2RM_VCVTMS 55
5026 #define NEON_2RM_VRECPE 56
5027 #define NEON_2RM_VRSQRTE 57
5028 #define NEON_2RM_VRECPE_F 58
5029 #define NEON_2RM_VRSQRTE_F 59
5030 #define NEON_2RM_VCVT_FS 60
5031 #define NEON_2RM_VCVT_FU 61
5032 #define NEON_2RM_VCVT_SF 62
5033 #define NEON_2RM_VCVT_UF 63
5035 static int neon_2rm_is_float_op(int op)
5037 /* Return true if this neon 2reg-misc op is float-to-float */
5038 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
5039 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
5040 op == NEON_2RM_VRINTM ||
5041 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5042 op >= NEON_2RM_VRECPE_F);
5045 /* Each entry in this array has bit n set if the insn allows
5046 * size value n (otherwise it will UNDEF). Since unallocated
5047 * op values will have no bits set they always UNDEF.
5049 static const uint8_t neon_2rm_sizes[] = {
5050 [NEON_2RM_VREV64] = 0x7,
5051 [NEON_2RM_VREV32] = 0x3,
5052 [NEON_2RM_VREV16] = 0x1,
5053 [NEON_2RM_VPADDL] = 0x7,
5054 [NEON_2RM_VPADDL_U] = 0x7,
5055 [NEON_2RM_AESE] = 0x1,
5056 [NEON_2RM_AESMC] = 0x1,
5057 [NEON_2RM_VCLS] = 0x7,
5058 [NEON_2RM_VCLZ] = 0x7,
5059 [NEON_2RM_VCNT] = 0x1,
5060 [NEON_2RM_VMVN] = 0x1,
5061 [NEON_2RM_VPADAL] = 0x7,
5062 [NEON_2RM_VPADAL_U] = 0x7,
5063 [NEON_2RM_VQABS] = 0x7,
5064 [NEON_2RM_VQNEG] = 0x7,
5065 [NEON_2RM_VCGT0] = 0x7,
5066 [NEON_2RM_VCGE0] = 0x7,
5067 [NEON_2RM_VCEQ0] = 0x7,
5068 [NEON_2RM_VCLE0] = 0x7,
5069 [NEON_2RM_VCLT0] = 0x7,
5070 [NEON_2RM_SHA1H] = 0x4,
5071 [NEON_2RM_VABS] = 0x7,
5072 [NEON_2RM_VNEG] = 0x7,
5073 [NEON_2RM_VCGT0_F] = 0x4,
5074 [NEON_2RM_VCGE0_F] = 0x4,
5075 [NEON_2RM_VCEQ0_F] = 0x4,
5076 [NEON_2RM_VCLE0_F] = 0x4,
5077 [NEON_2RM_VCLT0_F] = 0x4,
5078 [NEON_2RM_VABS_F] = 0x4,
5079 [NEON_2RM_VNEG_F] = 0x4,
5080 [NEON_2RM_VSWP] = 0x1,
5081 [NEON_2RM_VTRN] = 0x7,
5082 [NEON_2RM_VUZP] = 0x7,
5083 [NEON_2RM_VZIP] = 0x7,
5084 [NEON_2RM_VMOVN] = 0x7,
5085 [NEON_2RM_VQMOVN] = 0x7,
5086 [NEON_2RM_VSHLL] = 0x7,
5087 [NEON_2RM_SHA1SU1] = 0x4,
5088 [NEON_2RM_VRINTN] = 0x4,
5089 [NEON_2RM_VRINTX] = 0x4,
5090 [NEON_2RM_VRINTA] = 0x4,
5091 [NEON_2RM_VRINTZ] = 0x4,
5092 [NEON_2RM_VCVT_F16_F32] = 0x2,
5093 [NEON_2RM_VRINTM] = 0x4,
5094 [NEON_2RM_VCVT_F32_F16] = 0x2,
5095 [NEON_2RM_VRINTP] = 0x4,
5096 [NEON_2RM_VCVTAU] = 0x4,
5097 [NEON_2RM_VCVTAS] = 0x4,
5098 [NEON_2RM_VCVTNU] = 0x4,
5099 [NEON_2RM_VCVTNS] = 0x4,
5100 [NEON_2RM_VCVTPU] = 0x4,
5101 [NEON_2RM_VCVTPS] = 0x4,
5102 [NEON_2RM_VCVTMU] = 0x4,
5103 [NEON_2RM_VCVTMS] = 0x4,
5104 [NEON_2RM_VRECPE] = 0x4,
5105 [NEON_2RM_VRSQRTE] = 0x4,
5106 [NEON_2RM_VRECPE_F] = 0x4,
5107 [NEON_2RM_VRSQRTE_F] = 0x4,
5108 [NEON_2RM_VCVT_FS] = 0x4,
5109 [NEON_2RM_VCVT_FU] = 0x4,
5110 [NEON_2RM_VCVT_SF] = 0x4,
5111 [NEON_2RM_VCVT_UF] = 0x4,
5114 /* Translate a NEON data processing instruction. Return nonzero if the
5115 instruction is invalid.
5116 We process data in a mixture of 32-bit and 64-bit chunks.
5117 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5119 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
5121 int op;
5122 int q;
5123 int rd, rn, rm;
5124 int size;
5125 int shift;
5126 int pass;
5127 int count;
5128 int pairwise;
5129 int u;
5130 uint32_t imm, mask;
5131 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
5132 TCGv_i64 tmp64;
5134 /* FIXME: this access check should not take precedence over UNDEF
5135 * for invalid encodings; we will generate incorrect syndrome information
5136 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5138 if (s->fp_excp_el) {
5139 gen_exception_insn(s, 4, EXCP_UDEF,
5140 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
5141 return 0;
5144 if (!s->vfp_enabled)
5145 return 1;
5146 q = (insn & (1 << 6)) != 0;
5147 u = (insn >> 24) & 1;
5148 VFP_DREG_D(rd, insn);
5149 VFP_DREG_N(rn, insn);
5150 VFP_DREG_M(rm, insn);
5151 size = (insn >> 20) & 3;
5152 if ((insn & (1 << 23)) == 0) {
5153 /* Three register same length. */
5154 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5155 /* Catch invalid op and bad size combinations: UNDEF */
5156 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5157 return 1;
5159 /* All insns of this form UNDEF for either this condition or the
5160 * superset of cases "Q==1"; we catch the latter later.
5162 if (q && ((rd | rn | rm) & 1)) {
5163 return 1;
5166 * The SHA-1/SHA-256 3-register instructions require special treatment
5167 * here, as their size field is overloaded as an op type selector, and
5168 * they all consume their input in a single pass.
5170 if (op == NEON_3R_SHA) {
5171 if (!q) {
5172 return 1;
5174 if (!u) { /* SHA-1 */
5175 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
5176 return 1;
5178 tmp = tcg_const_i32(rd);
5179 tmp2 = tcg_const_i32(rn);
5180 tmp3 = tcg_const_i32(rm);
5181 tmp4 = tcg_const_i32(size);
5182 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5183 tcg_temp_free_i32(tmp4);
5184 } else { /* SHA-256 */
5185 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
5186 return 1;
5188 tmp = tcg_const_i32(rd);
5189 tmp2 = tcg_const_i32(rn);
5190 tmp3 = tcg_const_i32(rm);
5191 switch (size) {
5192 case 0:
5193 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5194 break;
5195 case 1:
5196 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5197 break;
5198 case 2:
5199 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5200 break;
5203 tcg_temp_free_i32(tmp);
5204 tcg_temp_free_i32(tmp2);
5205 tcg_temp_free_i32(tmp3);
5206 return 0;
5208 if (size == 3 && op != NEON_3R_LOGIC) {
5209 /* 64-bit element instructions. */
5210 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5211 neon_load_reg64(cpu_V0, rn + pass);
5212 neon_load_reg64(cpu_V1, rm + pass);
5213 switch (op) {
5214 case NEON_3R_VQADD:
5215 if (u) {
5216 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5217 cpu_V0, cpu_V1);
5218 } else {
5219 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5220 cpu_V0, cpu_V1);
5222 break;
5223 case NEON_3R_VQSUB:
5224 if (u) {
5225 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5226 cpu_V0, cpu_V1);
5227 } else {
5228 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5229 cpu_V0, cpu_V1);
5231 break;
5232 case NEON_3R_VSHL:
5233 if (u) {
5234 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5235 } else {
5236 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5238 break;
5239 case NEON_3R_VQSHL:
5240 if (u) {
5241 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5242 cpu_V1, cpu_V0);
5243 } else {
5244 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5245 cpu_V1, cpu_V0);
5247 break;
5248 case NEON_3R_VRSHL:
5249 if (u) {
5250 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5251 } else {
5252 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5254 break;
5255 case NEON_3R_VQRSHL:
5256 if (u) {
5257 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5258 cpu_V1, cpu_V0);
5259 } else {
5260 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5261 cpu_V1, cpu_V0);
5263 break;
5264 case NEON_3R_VADD_VSUB:
5265 if (u) {
5266 tcg_gen_sub_i64(CPU_V001);
5267 } else {
5268 tcg_gen_add_i64(CPU_V001);
5270 break;
5271 default:
5272 abort();
5274 neon_store_reg64(cpu_V0, rd + pass);
5276 return 0;
5278 pairwise = 0;
5279 switch (op) {
5280 case NEON_3R_VSHL:
5281 case NEON_3R_VQSHL:
5282 case NEON_3R_VRSHL:
5283 case NEON_3R_VQRSHL:
5285 int rtmp;
5286 /* Shift instruction operands are reversed. */
5287 rtmp = rn;
5288 rn = rm;
5289 rm = rtmp;
5291 break;
5292 case NEON_3R_VPADD:
5293 if (u) {
5294 return 1;
5296 /* Fall through */
5297 case NEON_3R_VPMAX:
5298 case NEON_3R_VPMIN:
5299 pairwise = 1;
5300 break;
5301 case NEON_3R_FLOAT_ARITH:
5302 pairwise = (u && size < 2); /* if VPADD (float) */
5303 break;
5304 case NEON_3R_FLOAT_MINMAX:
5305 pairwise = u; /* if VPMIN/VPMAX (float) */
5306 break;
5307 case NEON_3R_FLOAT_CMP:
5308 if (!u && size) {
5309 /* no encoding for U=0 C=1x */
5310 return 1;
5312 break;
5313 case NEON_3R_FLOAT_ACMP:
5314 if (!u) {
5315 return 1;
5317 break;
5318 case NEON_3R_FLOAT_MISC:
5319 /* VMAXNM/VMINNM in ARMv8 */
5320 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5321 return 1;
5323 break;
5324 case NEON_3R_VMUL:
5325 if (u && (size != 0)) {
5326 /* UNDEF on invalid size for polynomial subcase */
5327 return 1;
5329 break;
5330 case NEON_3R_VFM:
5331 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
5332 return 1;
5334 break;
5335 default:
5336 break;
5339 if (pairwise && q) {
5340 /* All the pairwise insns UNDEF if Q is set */
5341 return 1;
5344 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5346 if (pairwise) {
5347 /* Pairwise. */
5348 if (pass < 1) {
5349 tmp = neon_load_reg(rn, 0);
5350 tmp2 = neon_load_reg(rn, 1);
5351 } else {
5352 tmp = neon_load_reg(rm, 0);
5353 tmp2 = neon_load_reg(rm, 1);
5355 } else {
5356 /* Elementwise. */
5357 tmp = neon_load_reg(rn, pass);
5358 tmp2 = neon_load_reg(rm, pass);
5360 switch (op) {
5361 case NEON_3R_VHADD:
5362 GEN_NEON_INTEGER_OP(hadd);
5363 break;
5364 case NEON_3R_VQADD:
5365 GEN_NEON_INTEGER_OP_ENV(qadd);
5366 break;
5367 case NEON_3R_VRHADD:
5368 GEN_NEON_INTEGER_OP(rhadd);
5369 break;
5370 case NEON_3R_LOGIC: /* Logic ops. */
5371 switch ((u << 2) | size) {
5372 case 0: /* VAND */
5373 tcg_gen_and_i32(tmp, tmp, tmp2);
5374 break;
5375 case 1: /* BIC */
5376 tcg_gen_andc_i32(tmp, tmp, tmp2);
5377 break;
5378 case 2: /* VORR */
5379 tcg_gen_or_i32(tmp, tmp, tmp2);
5380 break;
5381 case 3: /* VORN */
5382 tcg_gen_orc_i32(tmp, tmp, tmp2);
5383 break;
5384 case 4: /* VEOR */
5385 tcg_gen_xor_i32(tmp, tmp, tmp2);
5386 break;
5387 case 5: /* VBSL */
5388 tmp3 = neon_load_reg(rd, pass);
5389 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5390 tcg_temp_free_i32(tmp3);
5391 break;
5392 case 6: /* VBIT */
5393 tmp3 = neon_load_reg(rd, pass);
5394 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5395 tcg_temp_free_i32(tmp3);
5396 break;
5397 case 7: /* VBIF */
5398 tmp3 = neon_load_reg(rd, pass);
5399 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5400 tcg_temp_free_i32(tmp3);
5401 break;
5403 break;
5404 case NEON_3R_VHSUB:
5405 GEN_NEON_INTEGER_OP(hsub);
5406 break;
5407 case NEON_3R_VQSUB:
5408 GEN_NEON_INTEGER_OP_ENV(qsub);
5409 break;
5410 case NEON_3R_VCGT:
5411 GEN_NEON_INTEGER_OP(cgt);
5412 break;
5413 case NEON_3R_VCGE:
5414 GEN_NEON_INTEGER_OP(cge);
5415 break;
5416 case NEON_3R_VSHL:
5417 GEN_NEON_INTEGER_OP(shl);
5418 break;
5419 case NEON_3R_VQSHL:
5420 GEN_NEON_INTEGER_OP_ENV(qshl);
5421 break;
5422 case NEON_3R_VRSHL:
5423 GEN_NEON_INTEGER_OP(rshl);
5424 break;
5425 case NEON_3R_VQRSHL:
5426 GEN_NEON_INTEGER_OP_ENV(qrshl);
5427 break;
5428 case NEON_3R_VMAX:
5429 GEN_NEON_INTEGER_OP(max);
5430 break;
5431 case NEON_3R_VMIN:
5432 GEN_NEON_INTEGER_OP(min);
5433 break;
5434 case NEON_3R_VABD:
5435 GEN_NEON_INTEGER_OP(abd);
5436 break;
5437 case NEON_3R_VABA:
5438 GEN_NEON_INTEGER_OP(abd);
5439 tcg_temp_free_i32(tmp2);
5440 tmp2 = neon_load_reg(rd, pass);
5441 gen_neon_add(size, tmp, tmp2);
5442 break;
5443 case NEON_3R_VADD_VSUB:
5444 if (!u) { /* VADD */
5445 gen_neon_add(size, tmp, tmp2);
5446 } else { /* VSUB */
5447 switch (size) {
5448 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5449 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5450 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
5451 default: abort();
5454 break;
5455 case NEON_3R_VTST_VCEQ:
5456 if (!u) { /* VTST */
5457 switch (size) {
5458 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5459 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5460 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
5461 default: abort();
5463 } else { /* VCEQ */
5464 switch (size) {
5465 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5466 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5467 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5468 default: abort();
5471 break;
5472 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
5473 switch (size) {
5474 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5475 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5476 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5477 default: abort();
5479 tcg_temp_free_i32(tmp2);
5480 tmp2 = neon_load_reg(rd, pass);
5481 if (u) { /* VMLS */
5482 gen_neon_rsb(size, tmp, tmp2);
5483 } else { /* VMLA */
5484 gen_neon_add(size, tmp, tmp2);
5486 break;
5487 case NEON_3R_VMUL:
5488 if (u) { /* polynomial */
5489 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5490 } else { /* Integer */
5491 switch (size) {
5492 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5493 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5494 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5495 default: abort();
5498 break;
5499 case NEON_3R_VPMAX:
5500 GEN_NEON_INTEGER_OP(pmax);
5501 break;
5502 case NEON_3R_VPMIN:
5503 GEN_NEON_INTEGER_OP(pmin);
5504 break;
5505 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5506 if (!u) { /* VQDMULH */
5507 switch (size) {
5508 case 1:
5509 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5510 break;
5511 case 2:
5512 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5513 break;
5514 default: abort();
5516 } else { /* VQRDMULH */
5517 switch (size) {
5518 case 1:
5519 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5520 break;
5521 case 2:
5522 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5523 break;
5524 default: abort();
5527 break;
5528 case NEON_3R_VPADD:
5529 switch (size) {
5530 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5531 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5532 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5533 default: abort();
5535 break;
5536 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5538 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5539 switch ((u << 2) | size) {
5540 case 0: /* VADD */
5541 case 4: /* VPADD */
5542 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5543 break;
5544 case 2: /* VSUB */
5545 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5546 break;
5547 case 6: /* VABD */
5548 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5549 break;
5550 default:
5551 abort();
5553 tcg_temp_free_ptr(fpstatus);
5554 break;
5556 case NEON_3R_FLOAT_MULTIPLY:
5558 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5559 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5560 if (!u) {
5561 tcg_temp_free_i32(tmp2);
5562 tmp2 = neon_load_reg(rd, pass);
5563 if (size == 0) {
5564 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5565 } else {
5566 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5569 tcg_temp_free_ptr(fpstatus);
5570 break;
5572 case NEON_3R_FLOAT_CMP:
5574 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5575 if (!u) {
5576 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5577 } else {
5578 if (size == 0) {
5579 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5580 } else {
5581 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5584 tcg_temp_free_ptr(fpstatus);
5585 break;
5587 case NEON_3R_FLOAT_ACMP:
5589 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5590 if (size == 0) {
5591 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5592 } else {
5593 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5595 tcg_temp_free_ptr(fpstatus);
5596 break;
5598 case NEON_3R_FLOAT_MINMAX:
5600 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5601 if (size == 0) {
5602 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5603 } else {
5604 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5606 tcg_temp_free_ptr(fpstatus);
5607 break;
5609 case NEON_3R_FLOAT_MISC:
5610 if (u) {
5611 /* VMAXNM/VMINNM */
5612 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5613 if (size == 0) {
5614 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5615 } else {
5616 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5618 tcg_temp_free_ptr(fpstatus);
5619 } else {
5620 if (size == 0) {
5621 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5622 } else {
5623 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5626 break;
5627 case NEON_3R_VFM:
5629 /* VFMA, VFMS: fused multiply-add */
5630 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5631 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5632 if (size) {
5633 /* VFMS */
5634 gen_helper_vfp_negs(tmp, tmp);
5636 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5637 tcg_temp_free_i32(tmp3);
5638 tcg_temp_free_ptr(fpstatus);
5639 break;
5641 default:
5642 abort();
5644 tcg_temp_free_i32(tmp2);
5646 /* Save the result. For elementwise operations we can put it
5647 straight into the destination register. For pairwise operations
5648 we have to be careful to avoid clobbering the source operands. */
5649 if (pairwise && rd == rm) {
5650 neon_store_scratch(pass, tmp);
5651 } else {
5652 neon_store_reg(rd, pass, tmp);
5655 } /* for pass */
5656 if (pairwise && rd == rm) {
5657 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5658 tmp = neon_load_scratch(pass);
5659 neon_store_reg(rd, pass, tmp);
5662 /* End of 3 register same size operations. */
5663 } else if (insn & (1 << 4)) {
5664 if ((insn & 0x00380080) != 0) {
5665 /* Two registers and shift. */
5666 op = (insn >> 8) & 0xf;
5667 if (insn & (1 << 7)) {
5668 /* 64-bit shift. */
5669 if (op > 7) {
5670 return 1;
5672 size = 3;
5673 } else {
5674 size = 2;
5675 while ((insn & (1 << (size + 19))) == 0)
5676 size--;
5678 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5679 /* To avoid excessive duplication of ops we implement shift
5680 by immediate using the variable shift operations. */
5681 if (op < 8) {
5682 /* Shift by immediate:
5683 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5684 if (q && ((rd | rm) & 1)) {
5685 return 1;
5687 if (!u && (op == 4 || op == 6)) {
5688 return 1;
5690 /* Right shifts are encoded as N - shift, where N is the
5691 element size in bits. */
5692 if (op <= 4)
5693 shift = shift - (1 << (size + 3));
5694 if (size == 3) {
5695 count = q + 1;
5696 } else {
5697 count = q ? 4: 2;
5699 switch (size) {
5700 case 0:
5701 imm = (uint8_t) shift;
5702 imm |= imm << 8;
5703 imm |= imm << 16;
5704 break;
5705 case 1:
5706 imm = (uint16_t) shift;
5707 imm |= imm << 16;
5708 break;
5709 case 2:
5710 case 3:
5711 imm = shift;
5712 break;
5713 default:
5714 abort();
5717 for (pass = 0; pass < count; pass++) {
5718 if (size == 3) {
5719 neon_load_reg64(cpu_V0, rm + pass);
5720 tcg_gen_movi_i64(cpu_V1, imm);
5721 switch (op) {
5722 case 0: /* VSHR */
5723 case 1: /* VSRA */
5724 if (u)
5725 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5726 else
5727 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5728 break;
5729 case 2: /* VRSHR */
5730 case 3: /* VRSRA */
5731 if (u)
5732 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5733 else
5734 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5735 break;
5736 case 4: /* VSRI */
5737 case 5: /* VSHL, VSLI */
5738 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5739 break;
5740 case 6: /* VQSHLU */
5741 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5742 cpu_V0, cpu_V1);
5743 break;
5744 case 7: /* VQSHL */
5745 if (u) {
5746 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5747 cpu_V0, cpu_V1);
5748 } else {
5749 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5750 cpu_V0, cpu_V1);
5752 break;
5754 if (op == 1 || op == 3) {
5755 /* Accumulate. */
5756 neon_load_reg64(cpu_V1, rd + pass);
5757 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5758 } else if (op == 4 || (op == 5 && u)) {
5759 /* Insert */
5760 neon_load_reg64(cpu_V1, rd + pass);
5761 uint64_t mask;
5762 if (shift < -63 || shift > 63) {
5763 mask = 0;
5764 } else {
5765 if (op == 4) {
5766 mask = 0xffffffffffffffffull >> -shift;
5767 } else {
5768 mask = 0xffffffffffffffffull << shift;
5771 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5772 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5774 neon_store_reg64(cpu_V0, rd + pass);
5775 } else { /* size < 3 */
5776 /* Operands in T0 and T1. */
5777 tmp = neon_load_reg(rm, pass);
5778 tmp2 = tcg_temp_new_i32();
5779 tcg_gen_movi_i32(tmp2, imm);
5780 switch (op) {
5781 case 0: /* VSHR */
5782 case 1: /* VSRA */
5783 GEN_NEON_INTEGER_OP(shl);
5784 break;
5785 case 2: /* VRSHR */
5786 case 3: /* VRSRA */
5787 GEN_NEON_INTEGER_OP(rshl);
5788 break;
5789 case 4: /* VSRI */
5790 case 5: /* VSHL, VSLI */
5791 switch (size) {
5792 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5793 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5794 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5795 default: abort();
5797 break;
5798 case 6: /* VQSHLU */
5799 switch (size) {
5800 case 0:
5801 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5802 tmp, tmp2);
5803 break;
5804 case 1:
5805 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5806 tmp, tmp2);
5807 break;
5808 case 2:
5809 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5810 tmp, tmp2);
5811 break;
5812 default:
5813 abort();
5815 break;
5816 case 7: /* VQSHL */
5817 GEN_NEON_INTEGER_OP_ENV(qshl);
5818 break;
5820 tcg_temp_free_i32(tmp2);
5822 if (op == 1 || op == 3) {
5823 /* Accumulate. */
5824 tmp2 = neon_load_reg(rd, pass);
5825 gen_neon_add(size, tmp, tmp2);
5826 tcg_temp_free_i32(tmp2);
5827 } else if (op == 4 || (op == 5 && u)) {
5828 /* Insert */
5829 switch (size) {
5830 case 0:
5831 if (op == 4)
5832 mask = 0xff >> -shift;
5833 else
5834 mask = (uint8_t)(0xff << shift);
5835 mask |= mask << 8;
5836 mask |= mask << 16;
5837 break;
5838 case 1:
5839 if (op == 4)
5840 mask = 0xffff >> -shift;
5841 else
5842 mask = (uint16_t)(0xffff << shift);
5843 mask |= mask << 16;
5844 break;
5845 case 2:
5846 if (shift < -31 || shift > 31) {
5847 mask = 0;
5848 } else {
5849 if (op == 4)
5850 mask = 0xffffffffu >> -shift;
5851 else
5852 mask = 0xffffffffu << shift;
5854 break;
5855 default:
5856 abort();
5858 tmp2 = neon_load_reg(rd, pass);
5859 tcg_gen_andi_i32(tmp, tmp, mask);
5860 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5861 tcg_gen_or_i32(tmp, tmp, tmp2);
5862 tcg_temp_free_i32(tmp2);
5864 neon_store_reg(rd, pass, tmp);
5866 } /* for pass */
5867 } else if (op < 10) {
5868 /* Shift by immediate and narrow:
5869 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5870 int input_unsigned = (op == 8) ? !u : u;
5871 if (rm & 1) {
5872 return 1;
5874 shift = shift - (1 << (size + 3));
5875 size++;
5876 if (size == 3) {
5877 tmp64 = tcg_const_i64(shift);
5878 neon_load_reg64(cpu_V0, rm);
5879 neon_load_reg64(cpu_V1, rm + 1);
5880 for (pass = 0; pass < 2; pass++) {
5881 TCGv_i64 in;
5882 if (pass == 0) {
5883 in = cpu_V0;
5884 } else {
5885 in = cpu_V1;
5887 if (q) {
5888 if (input_unsigned) {
5889 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5890 } else {
5891 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5893 } else {
5894 if (input_unsigned) {
5895 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5896 } else {
5897 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5900 tmp = tcg_temp_new_i32();
5901 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5902 neon_store_reg(rd, pass, tmp);
5903 } /* for pass */
5904 tcg_temp_free_i64(tmp64);
5905 } else {
5906 if (size == 1) {
5907 imm = (uint16_t)shift;
5908 imm |= imm << 16;
5909 } else {
5910 /* size == 2 */
5911 imm = (uint32_t)shift;
5913 tmp2 = tcg_const_i32(imm);
5914 tmp4 = neon_load_reg(rm + 1, 0);
5915 tmp5 = neon_load_reg(rm + 1, 1);
5916 for (pass = 0; pass < 2; pass++) {
5917 if (pass == 0) {
5918 tmp = neon_load_reg(rm, 0);
5919 } else {
5920 tmp = tmp4;
5922 gen_neon_shift_narrow(size, tmp, tmp2, q,
5923 input_unsigned);
5924 if (pass == 0) {
5925 tmp3 = neon_load_reg(rm, 1);
5926 } else {
5927 tmp3 = tmp5;
5929 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5930 input_unsigned);
5931 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5932 tcg_temp_free_i32(tmp);
5933 tcg_temp_free_i32(tmp3);
5934 tmp = tcg_temp_new_i32();
5935 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5936 neon_store_reg(rd, pass, tmp);
5937 } /* for pass */
5938 tcg_temp_free_i32(tmp2);
5940 } else if (op == 10) {
5941 /* VSHLL, VMOVL */
5942 if (q || (rd & 1)) {
5943 return 1;
5945 tmp = neon_load_reg(rm, 0);
5946 tmp2 = neon_load_reg(rm, 1);
5947 for (pass = 0; pass < 2; pass++) {
5948 if (pass == 1)
5949 tmp = tmp2;
5951 gen_neon_widen(cpu_V0, tmp, size, u);
5953 if (shift != 0) {
5954 /* The shift is less than the width of the source
5955 type, so we can just shift the whole register. */
5956 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5957 /* Widen the result of shift: we need to clear
5958 * the potential overflow bits resulting from
5959 * left bits of the narrow input appearing as
5960 * right bits of left the neighbour narrow
5961 * input. */
5962 if (size < 2 || !u) {
5963 uint64_t imm64;
5964 if (size == 0) {
5965 imm = (0xffu >> (8 - shift));
5966 imm |= imm << 16;
5967 } else if (size == 1) {
5968 imm = 0xffff >> (16 - shift);
5969 } else {
5970 /* size == 2 */
5971 imm = 0xffffffff >> (32 - shift);
5973 if (size < 2) {
5974 imm64 = imm | (((uint64_t)imm) << 32);
5975 } else {
5976 imm64 = imm;
5978 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5981 neon_store_reg64(cpu_V0, rd + pass);
5983 } else if (op >= 14) {
5984 /* VCVT fixed-point. */
5985 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5986 return 1;
5988 /* We have already masked out the must-be-1 top bit of imm6,
5989 * hence this 32-shift where the ARM ARM has 64-imm6.
5991 shift = 32 - shift;
5992 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5993 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5994 if (!(op & 1)) {
5995 if (u)
5996 gen_vfp_ulto(0, shift, 1);
5997 else
5998 gen_vfp_slto(0, shift, 1);
5999 } else {
6000 if (u)
6001 gen_vfp_toul(0, shift, 1);
6002 else
6003 gen_vfp_tosl(0, shift, 1);
6005 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
6007 } else {
6008 return 1;
6010 } else { /* (insn & 0x00380080) == 0 */
6011 int invert;
6012 if (q && (rd & 1)) {
6013 return 1;
6016 op = (insn >> 8) & 0xf;
6017 /* One register and immediate. */
6018 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6019 invert = (insn & (1 << 5)) != 0;
6020 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6021 * We choose to not special-case this and will behave as if a
6022 * valid constant encoding of 0 had been given.
6024 switch (op) {
6025 case 0: case 1:
6026 /* no-op */
6027 break;
6028 case 2: case 3:
6029 imm <<= 8;
6030 break;
6031 case 4: case 5:
6032 imm <<= 16;
6033 break;
6034 case 6: case 7:
6035 imm <<= 24;
6036 break;
6037 case 8: case 9:
6038 imm |= imm << 16;
6039 break;
6040 case 10: case 11:
6041 imm = (imm << 8) | (imm << 24);
6042 break;
6043 case 12:
6044 imm = (imm << 8) | 0xff;
6045 break;
6046 case 13:
6047 imm = (imm << 16) | 0xffff;
6048 break;
6049 case 14:
6050 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6051 if (invert)
6052 imm = ~imm;
6053 break;
6054 case 15:
6055 if (invert) {
6056 return 1;
6058 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6059 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6060 break;
6062 if (invert)
6063 imm = ~imm;
6065 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6066 if (op & 1 && op < 12) {
6067 tmp = neon_load_reg(rd, pass);
6068 if (invert) {
6069 /* The immediate value has already been inverted, so
6070 BIC becomes AND. */
6071 tcg_gen_andi_i32(tmp, tmp, imm);
6072 } else {
6073 tcg_gen_ori_i32(tmp, tmp, imm);
6075 } else {
6076 /* VMOV, VMVN. */
6077 tmp = tcg_temp_new_i32();
6078 if (op == 14 && invert) {
6079 int n;
6080 uint32_t val;
6081 val = 0;
6082 for (n = 0; n < 4; n++) {
6083 if (imm & (1 << (n + (pass & 1) * 4)))
6084 val |= 0xff << (n * 8);
6086 tcg_gen_movi_i32(tmp, val);
6087 } else {
6088 tcg_gen_movi_i32(tmp, imm);
6091 neon_store_reg(rd, pass, tmp);
6094 } else { /* (insn & 0x00800010 == 0x00800000) */
6095 if (size != 3) {
6096 op = (insn >> 8) & 0xf;
6097 if ((insn & (1 << 6)) == 0) {
6098 /* Three registers of different lengths. */
6099 int src1_wide;
6100 int src2_wide;
6101 int prewiden;
6102 /* undefreq: bit 0 : UNDEF if size == 0
6103 * bit 1 : UNDEF if size == 1
6104 * bit 2 : UNDEF if size == 2
6105 * bit 3 : UNDEF if U == 1
6106 * Note that [2:0] set implies 'always UNDEF'
6108 int undefreq;
6109 /* prewiden, src1_wide, src2_wide, undefreq */
6110 static const int neon_3reg_wide[16][4] = {
6111 {1, 0, 0, 0}, /* VADDL */
6112 {1, 1, 0, 0}, /* VADDW */
6113 {1, 0, 0, 0}, /* VSUBL */
6114 {1, 1, 0, 0}, /* VSUBW */
6115 {0, 1, 1, 0}, /* VADDHN */
6116 {0, 0, 0, 0}, /* VABAL */
6117 {0, 1, 1, 0}, /* VSUBHN */
6118 {0, 0, 0, 0}, /* VABDL */
6119 {0, 0, 0, 0}, /* VMLAL */
6120 {0, 0, 0, 9}, /* VQDMLAL */
6121 {0, 0, 0, 0}, /* VMLSL */
6122 {0, 0, 0, 9}, /* VQDMLSL */
6123 {0, 0, 0, 0}, /* Integer VMULL */
6124 {0, 0, 0, 1}, /* VQDMULL */
6125 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6126 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6129 prewiden = neon_3reg_wide[op][0];
6130 src1_wide = neon_3reg_wide[op][1];
6131 src2_wide = neon_3reg_wide[op][2];
6132 undefreq = neon_3reg_wide[op][3];
6134 if ((undefreq & (1 << size)) ||
6135 ((undefreq & 8) && u)) {
6136 return 1;
6138 if ((src1_wide && (rn & 1)) ||
6139 (src2_wide && (rm & 1)) ||
6140 (!src2_wide && (rd & 1))) {
6141 return 1;
6144 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6145 * outside the loop below as it only performs a single pass.
6147 if (op == 14 && size == 2) {
6148 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6150 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
6151 return 1;
6153 tcg_rn = tcg_temp_new_i64();
6154 tcg_rm = tcg_temp_new_i64();
6155 tcg_rd = tcg_temp_new_i64();
6156 neon_load_reg64(tcg_rn, rn);
6157 neon_load_reg64(tcg_rm, rm);
6158 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6159 neon_store_reg64(tcg_rd, rd);
6160 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6161 neon_store_reg64(tcg_rd, rd + 1);
6162 tcg_temp_free_i64(tcg_rn);
6163 tcg_temp_free_i64(tcg_rm);
6164 tcg_temp_free_i64(tcg_rd);
6165 return 0;
6168 /* Avoid overlapping operands. Wide source operands are
6169 always aligned so will never overlap with wide
6170 destinations in problematic ways. */
6171 if (rd == rm && !src2_wide) {
6172 tmp = neon_load_reg(rm, 1);
6173 neon_store_scratch(2, tmp);
6174 } else if (rd == rn && !src1_wide) {
6175 tmp = neon_load_reg(rn, 1);
6176 neon_store_scratch(2, tmp);
6178 TCGV_UNUSED_I32(tmp3);
6179 for (pass = 0; pass < 2; pass++) {
6180 if (src1_wide) {
6181 neon_load_reg64(cpu_V0, rn + pass);
6182 TCGV_UNUSED_I32(tmp);
6183 } else {
6184 if (pass == 1 && rd == rn) {
6185 tmp = neon_load_scratch(2);
6186 } else {
6187 tmp = neon_load_reg(rn, pass);
6189 if (prewiden) {
6190 gen_neon_widen(cpu_V0, tmp, size, u);
6193 if (src2_wide) {
6194 neon_load_reg64(cpu_V1, rm + pass);
6195 TCGV_UNUSED_I32(tmp2);
6196 } else {
6197 if (pass == 1 && rd == rm) {
6198 tmp2 = neon_load_scratch(2);
6199 } else {
6200 tmp2 = neon_load_reg(rm, pass);
6202 if (prewiden) {
6203 gen_neon_widen(cpu_V1, tmp2, size, u);
6206 switch (op) {
6207 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6208 gen_neon_addl(size);
6209 break;
6210 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6211 gen_neon_subl(size);
6212 break;
6213 case 5: case 7: /* VABAL, VABDL */
6214 switch ((size << 1) | u) {
6215 case 0:
6216 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6217 break;
6218 case 1:
6219 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6220 break;
6221 case 2:
6222 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6223 break;
6224 case 3:
6225 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6226 break;
6227 case 4:
6228 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6229 break;
6230 case 5:
6231 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6232 break;
6233 default: abort();
6235 tcg_temp_free_i32(tmp2);
6236 tcg_temp_free_i32(tmp);
6237 break;
6238 case 8: case 9: case 10: case 11: case 12: case 13:
6239 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6240 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6241 break;
6242 case 14: /* Polynomial VMULL */
6243 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6244 tcg_temp_free_i32(tmp2);
6245 tcg_temp_free_i32(tmp);
6246 break;
6247 default: /* 15 is RESERVED: caught earlier */
6248 abort();
6250 if (op == 13) {
6251 /* VQDMULL */
6252 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6253 neon_store_reg64(cpu_V0, rd + pass);
6254 } else if (op == 5 || (op >= 8 && op <= 11)) {
6255 /* Accumulate. */
6256 neon_load_reg64(cpu_V1, rd + pass);
6257 switch (op) {
6258 case 10: /* VMLSL */
6259 gen_neon_negl(cpu_V0, size);
6260 /* Fall through */
6261 case 5: case 8: /* VABAL, VMLAL */
6262 gen_neon_addl(size);
6263 break;
6264 case 9: case 11: /* VQDMLAL, VQDMLSL */
6265 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6266 if (op == 11) {
6267 gen_neon_negl(cpu_V0, size);
6269 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6270 break;
6271 default:
6272 abort();
6274 neon_store_reg64(cpu_V0, rd + pass);
6275 } else if (op == 4 || op == 6) {
6276 /* Narrowing operation. */
6277 tmp = tcg_temp_new_i32();
6278 if (!u) {
6279 switch (size) {
6280 case 0:
6281 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6282 break;
6283 case 1:
6284 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6285 break;
6286 case 2:
6287 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6288 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6289 break;
6290 default: abort();
6292 } else {
6293 switch (size) {
6294 case 0:
6295 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6296 break;
6297 case 1:
6298 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6299 break;
6300 case 2:
6301 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6302 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6303 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6304 break;
6305 default: abort();
6308 if (pass == 0) {
6309 tmp3 = tmp;
6310 } else {
6311 neon_store_reg(rd, 0, tmp3);
6312 neon_store_reg(rd, 1, tmp);
6314 } else {
6315 /* Write back the result. */
6316 neon_store_reg64(cpu_V0, rd + pass);
6319 } else {
6320 /* Two registers and a scalar. NB that for ops of this form
6321 * the ARM ARM labels bit 24 as Q, but it is in our variable
6322 * 'u', not 'q'.
6324 if (size == 0) {
6325 return 1;
6327 switch (op) {
6328 case 1: /* Float VMLA scalar */
6329 case 5: /* Floating point VMLS scalar */
6330 case 9: /* Floating point VMUL scalar */
6331 if (size == 1) {
6332 return 1;
6334 /* fall through */
6335 case 0: /* Integer VMLA scalar */
6336 case 4: /* Integer VMLS scalar */
6337 case 8: /* Integer VMUL scalar */
6338 case 12: /* VQDMULH scalar */
6339 case 13: /* VQRDMULH scalar */
6340 if (u && ((rd | rn) & 1)) {
6341 return 1;
6343 tmp = neon_get_scalar(size, rm);
6344 neon_store_scratch(0, tmp);
6345 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6346 tmp = neon_load_scratch(0);
6347 tmp2 = neon_load_reg(rn, pass);
6348 if (op == 12) {
6349 if (size == 1) {
6350 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6351 } else {
6352 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6354 } else if (op == 13) {
6355 if (size == 1) {
6356 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6357 } else {
6358 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6360 } else if (op & 1) {
6361 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6362 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6363 tcg_temp_free_ptr(fpstatus);
6364 } else {
6365 switch (size) {
6366 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6367 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6368 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6369 default: abort();
6372 tcg_temp_free_i32(tmp2);
6373 if (op < 8) {
6374 /* Accumulate. */
6375 tmp2 = neon_load_reg(rd, pass);
6376 switch (op) {
6377 case 0:
6378 gen_neon_add(size, tmp, tmp2);
6379 break;
6380 case 1:
6382 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6383 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6384 tcg_temp_free_ptr(fpstatus);
6385 break;
6387 case 4:
6388 gen_neon_rsb(size, tmp, tmp2);
6389 break;
6390 case 5:
6392 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6393 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6394 tcg_temp_free_ptr(fpstatus);
6395 break;
6397 default:
6398 abort();
6400 tcg_temp_free_i32(tmp2);
6402 neon_store_reg(rd, pass, tmp);
6404 break;
6405 case 3: /* VQDMLAL scalar */
6406 case 7: /* VQDMLSL scalar */
6407 case 11: /* VQDMULL scalar */
6408 if (u == 1) {
6409 return 1;
6411 /* fall through */
6412 case 2: /* VMLAL sclar */
6413 case 6: /* VMLSL scalar */
6414 case 10: /* VMULL scalar */
6415 if (rd & 1) {
6416 return 1;
6418 tmp2 = neon_get_scalar(size, rm);
6419 /* We need a copy of tmp2 because gen_neon_mull
6420 * deletes it during pass 0. */
6421 tmp4 = tcg_temp_new_i32();
6422 tcg_gen_mov_i32(tmp4, tmp2);
6423 tmp3 = neon_load_reg(rn, 1);
6425 for (pass = 0; pass < 2; pass++) {
6426 if (pass == 0) {
6427 tmp = neon_load_reg(rn, 0);
6428 } else {
6429 tmp = tmp3;
6430 tmp2 = tmp4;
6432 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6433 if (op != 11) {
6434 neon_load_reg64(cpu_V1, rd + pass);
6436 switch (op) {
6437 case 6:
6438 gen_neon_negl(cpu_V0, size);
6439 /* Fall through */
6440 case 2:
6441 gen_neon_addl(size);
6442 break;
6443 case 3: case 7:
6444 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6445 if (op == 7) {
6446 gen_neon_negl(cpu_V0, size);
6448 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6449 break;
6450 case 10:
6451 /* no-op */
6452 break;
6453 case 11:
6454 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6455 break;
6456 default:
6457 abort();
6459 neon_store_reg64(cpu_V0, rd + pass);
6463 break;
6464 default: /* 14 and 15 are RESERVED */
6465 return 1;
6468 } else { /* size == 3 */
6469 if (!u) {
6470 /* Extract. */
6471 imm = (insn >> 8) & 0xf;
6473 if (imm > 7 && !q)
6474 return 1;
6476 if (q && ((rd | rn | rm) & 1)) {
6477 return 1;
6480 if (imm == 0) {
6481 neon_load_reg64(cpu_V0, rn);
6482 if (q) {
6483 neon_load_reg64(cpu_V1, rn + 1);
6485 } else if (imm == 8) {
6486 neon_load_reg64(cpu_V0, rn + 1);
6487 if (q) {
6488 neon_load_reg64(cpu_V1, rm);
6490 } else if (q) {
6491 tmp64 = tcg_temp_new_i64();
6492 if (imm < 8) {
6493 neon_load_reg64(cpu_V0, rn);
6494 neon_load_reg64(tmp64, rn + 1);
6495 } else {
6496 neon_load_reg64(cpu_V0, rn + 1);
6497 neon_load_reg64(tmp64, rm);
6499 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6500 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6501 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6502 if (imm < 8) {
6503 neon_load_reg64(cpu_V1, rm);
6504 } else {
6505 neon_load_reg64(cpu_V1, rm + 1);
6506 imm -= 8;
6508 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6509 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6510 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6511 tcg_temp_free_i64(tmp64);
6512 } else {
6513 /* BUGFIX */
6514 neon_load_reg64(cpu_V0, rn);
6515 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6516 neon_load_reg64(cpu_V1, rm);
6517 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6518 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6520 neon_store_reg64(cpu_V0, rd);
6521 if (q) {
6522 neon_store_reg64(cpu_V1, rd + 1);
6524 } else if ((insn & (1 << 11)) == 0) {
6525 /* Two register misc. */
6526 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6527 size = (insn >> 18) & 3;
6528 /* UNDEF for unknown op values and bad op-size combinations */
6529 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6530 return 1;
6532 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6533 q && ((rm | rd) & 1)) {
6534 return 1;
6536 switch (op) {
6537 case NEON_2RM_VREV64:
6538 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6539 tmp = neon_load_reg(rm, pass * 2);
6540 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6541 switch (size) {
6542 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6543 case 1: gen_swap_half(tmp); break;
6544 case 2: /* no-op */ break;
6545 default: abort();
6547 neon_store_reg(rd, pass * 2 + 1, tmp);
6548 if (size == 2) {
6549 neon_store_reg(rd, pass * 2, tmp2);
6550 } else {
6551 switch (size) {
6552 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6553 case 1: gen_swap_half(tmp2); break;
6554 default: abort();
6556 neon_store_reg(rd, pass * 2, tmp2);
6559 break;
6560 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6561 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6562 for (pass = 0; pass < q + 1; pass++) {
6563 tmp = neon_load_reg(rm, pass * 2);
6564 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6565 tmp = neon_load_reg(rm, pass * 2 + 1);
6566 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6567 switch (size) {
6568 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6569 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6570 case 2: tcg_gen_add_i64(CPU_V001); break;
6571 default: abort();
6573 if (op >= NEON_2RM_VPADAL) {
6574 /* Accumulate. */
6575 neon_load_reg64(cpu_V1, rd + pass);
6576 gen_neon_addl(size);
6578 neon_store_reg64(cpu_V0, rd + pass);
6580 break;
6581 case NEON_2RM_VTRN:
6582 if (size == 2) {
6583 int n;
6584 for (n = 0; n < (q ? 4 : 2); n += 2) {
6585 tmp = neon_load_reg(rm, n);
6586 tmp2 = neon_load_reg(rd, n + 1);
6587 neon_store_reg(rm, n, tmp2);
6588 neon_store_reg(rd, n + 1, tmp);
6590 } else {
6591 goto elementwise;
6593 break;
6594 case NEON_2RM_VUZP:
6595 if (gen_neon_unzip(rd, rm, size, q)) {
6596 return 1;
6598 break;
6599 case NEON_2RM_VZIP:
6600 if (gen_neon_zip(rd, rm, size, q)) {
6601 return 1;
6603 break;
6604 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6605 /* also VQMOVUN; op field and mnemonics don't line up */
6606 if (rm & 1) {
6607 return 1;
6609 TCGV_UNUSED_I32(tmp2);
6610 for (pass = 0; pass < 2; pass++) {
6611 neon_load_reg64(cpu_V0, rm + pass);
6612 tmp = tcg_temp_new_i32();
6613 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6614 tmp, cpu_V0);
6615 if (pass == 0) {
6616 tmp2 = tmp;
6617 } else {
6618 neon_store_reg(rd, 0, tmp2);
6619 neon_store_reg(rd, 1, tmp);
6622 break;
6623 case NEON_2RM_VSHLL:
6624 if (q || (rd & 1)) {
6625 return 1;
6627 tmp = neon_load_reg(rm, 0);
6628 tmp2 = neon_load_reg(rm, 1);
6629 for (pass = 0; pass < 2; pass++) {
6630 if (pass == 1)
6631 tmp = tmp2;
6632 gen_neon_widen(cpu_V0, tmp, size, 1);
6633 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6634 neon_store_reg64(cpu_V0, rd + pass);
6636 break;
6637 case NEON_2RM_VCVT_F16_F32:
6638 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6639 q || (rm & 1)) {
6640 return 1;
6642 tmp = tcg_temp_new_i32();
6643 tmp2 = tcg_temp_new_i32();
6644 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
6645 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6646 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
6647 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6648 tcg_gen_shli_i32(tmp2, tmp2, 16);
6649 tcg_gen_or_i32(tmp2, tmp2, tmp);
6650 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
6651 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6652 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6653 neon_store_reg(rd, 0, tmp2);
6654 tmp2 = tcg_temp_new_i32();
6655 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6656 tcg_gen_shli_i32(tmp2, tmp2, 16);
6657 tcg_gen_or_i32(tmp2, tmp2, tmp);
6658 neon_store_reg(rd, 1, tmp2);
6659 tcg_temp_free_i32(tmp);
6660 break;
6661 case NEON_2RM_VCVT_F32_F16:
6662 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6663 q || (rd & 1)) {
6664 return 1;
6666 tmp3 = tcg_temp_new_i32();
6667 tmp = neon_load_reg(rm, 0);
6668 tmp2 = neon_load_reg(rm, 1);
6669 tcg_gen_ext16u_i32(tmp3, tmp);
6670 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6671 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6672 tcg_gen_shri_i32(tmp3, tmp, 16);
6673 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6674 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
6675 tcg_temp_free_i32(tmp);
6676 tcg_gen_ext16u_i32(tmp3, tmp2);
6677 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6678 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6679 tcg_gen_shri_i32(tmp3, tmp2, 16);
6680 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6681 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
6682 tcg_temp_free_i32(tmp2);
6683 tcg_temp_free_i32(tmp3);
6684 break;
6685 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6686 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
6687 || ((rm | rd) & 1)) {
6688 return 1;
6690 tmp = tcg_const_i32(rd);
6691 tmp2 = tcg_const_i32(rm);
6693 /* Bit 6 is the lowest opcode bit; it distinguishes between
6694 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6696 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6698 if (op == NEON_2RM_AESE) {
6699 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6700 } else {
6701 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6703 tcg_temp_free_i32(tmp);
6704 tcg_temp_free_i32(tmp2);
6705 tcg_temp_free_i32(tmp3);
6706 break;
6707 case NEON_2RM_SHA1H:
6708 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
6709 || ((rm | rd) & 1)) {
6710 return 1;
6712 tmp = tcg_const_i32(rd);
6713 tmp2 = tcg_const_i32(rm);
6715 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6717 tcg_temp_free_i32(tmp);
6718 tcg_temp_free_i32(tmp2);
6719 break;
6720 case NEON_2RM_SHA1SU1:
6721 if ((rm | rd) & 1) {
6722 return 1;
6724 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6725 if (q) {
6726 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
6727 return 1;
6729 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
6730 return 1;
6732 tmp = tcg_const_i32(rd);
6733 tmp2 = tcg_const_i32(rm);
6734 if (q) {
6735 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6736 } else {
6737 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6739 tcg_temp_free_i32(tmp);
6740 tcg_temp_free_i32(tmp2);
6741 break;
6742 default:
6743 elementwise:
6744 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6745 if (neon_2rm_is_float_op(op)) {
6746 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6747 neon_reg_offset(rm, pass));
6748 TCGV_UNUSED_I32(tmp);
6749 } else {
6750 tmp = neon_load_reg(rm, pass);
6752 switch (op) {
6753 case NEON_2RM_VREV32:
6754 switch (size) {
6755 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6756 case 1: gen_swap_half(tmp); break;
6757 default: abort();
6759 break;
6760 case NEON_2RM_VREV16:
6761 gen_rev16(tmp);
6762 break;
6763 case NEON_2RM_VCLS:
6764 switch (size) {
6765 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6766 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6767 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6768 default: abort();
6770 break;
6771 case NEON_2RM_VCLZ:
6772 switch (size) {
6773 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6774 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6775 case 2: gen_helper_clz(tmp, tmp); break;
6776 default: abort();
6778 break;
6779 case NEON_2RM_VCNT:
6780 gen_helper_neon_cnt_u8(tmp, tmp);
6781 break;
6782 case NEON_2RM_VMVN:
6783 tcg_gen_not_i32(tmp, tmp);
6784 break;
6785 case NEON_2RM_VQABS:
6786 switch (size) {
6787 case 0:
6788 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6789 break;
6790 case 1:
6791 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6792 break;
6793 case 2:
6794 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6795 break;
6796 default: abort();
6798 break;
6799 case NEON_2RM_VQNEG:
6800 switch (size) {
6801 case 0:
6802 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6803 break;
6804 case 1:
6805 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6806 break;
6807 case 2:
6808 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6809 break;
6810 default: abort();
6812 break;
6813 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6814 tmp2 = tcg_const_i32(0);
6815 switch(size) {
6816 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6817 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6818 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6819 default: abort();
6821 tcg_temp_free_i32(tmp2);
6822 if (op == NEON_2RM_VCLE0) {
6823 tcg_gen_not_i32(tmp, tmp);
6825 break;
6826 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6827 tmp2 = tcg_const_i32(0);
6828 switch(size) {
6829 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6830 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6831 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6832 default: abort();
6834 tcg_temp_free_i32(tmp2);
6835 if (op == NEON_2RM_VCLT0) {
6836 tcg_gen_not_i32(tmp, tmp);
6838 break;
6839 case NEON_2RM_VCEQ0:
6840 tmp2 = tcg_const_i32(0);
6841 switch(size) {
6842 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6843 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6844 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6845 default: abort();
6847 tcg_temp_free_i32(tmp2);
6848 break;
6849 case NEON_2RM_VABS:
6850 switch(size) {
6851 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6852 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6853 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6854 default: abort();
6856 break;
6857 case NEON_2RM_VNEG:
6858 tmp2 = tcg_const_i32(0);
6859 gen_neon_rsb(size, tmp, tmp2);
6860 tcg_temp_free_i32(tmp2);
6861 break;
6862 case NEON_2RM_VCGT0_F:
6864 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6865 tmp2 = tcg_const_i32(0);
6866 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6867 tcg_temp_free_i32(tmp2);
6868 tcg_temp_free_ptr(fpstatus);
6869 break;
6871 case NEON_2RM_VCGE0_F:
6873 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6874 tmp2 = tcg_const_i32(0);
6875 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6876 tcg_temp_free_i32(tmp2);
6877 tcg_temp_free_ptr(fpstatus);
6878 break;
6880 case NEON_2RM_VCEQ0_F:
6882 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6883 tmp2 = tcg_const_i32(0);
6884 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6885 tcg_temp_free_i32(tmp2);
6886 tcg_temp_free_ptr(fpstatus);
6887 break;
6889 case NEON_2RM_VCLE0_F:
6891 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6892 tmp2 = tcg_const_i32(0);
6893 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6894 tcg_temp_free_i32(tmp2);
6895 tcg_temp_free_ptr(fpstatus);
6896 break;
6898 case NEON_2RM_VCLT0_F:
6900 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6901 tmp2 = tcg_const_i32(0);
6902 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6903 tcg_temp_free_i32(tmp2);
6904 tcg_temp_free_ptr(fpstatus);
6905 break;
6907 case NEON_2RM_VABS_F:
6908 gen_vfp_abs(0);
6909 break;
6910 case NEON_2RM_VNEG_F:
6911 gen_vfp_neg(0);
6912 break;
6913 case NEON_2RM_VSWP:
6914 tmp2 = neon_load_reg(rd, pass);
6915 neon_store_reg(rm, pass, tmp2);
6916 break;
6917 case NEON_2RM_VTRN:
6918 tmp2 = neon_load_reg(rd, pass);
6919 switch (size) {
6920 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6921 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6922 default: abort();
6924 neon_store_reg(rm, pass, tmp2);
6925 break;
6926 case NEON_2RM_VRINTN:
6927 case NEON_2RM_VRINTA:
6928 case NEON_2RM_VRINTM:
6929 case NEON_2RM_VRINTP:
6930 case NEON_2RM_VRINTZ:
6932 TCGv_i32 tcg_rmode;
6933 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6934 int rmode;
6936 if (op == NEON_2RM_VRINTZ) {
6937 rmode = FPROUNDING_ZERO;
6938 } else {
6939 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6942 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6943 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6944 cpu_env);
6945 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6946 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6947 cpu_env);
6948 tcg_temp_free_ptr(fpstatus);
6949 tcg_temp_free_i32(tcg_rmode);
6950 break;
6952 case NEON_2RM_VRINTX:
6954 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6955 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6956 tcg_temp_free_ptr(fpstatus);
6957 break;
6959 case NEON_2RM_VCVTAU:
6960 case NEON_2RM_VCVTAS:
6961 case NEON_2RM_VCVTNU:
6962 case NEON_2RM_VCVTNS:
6963 case NEON_2RM_VCVTPU:
6964 case NEON_2RM_VCVTPS:
6965 case NEON_2RM_VCVTMU:
6966 case NEON_2RM_VCVTMS:
6968 bool is_signed = !extract32(insn, 7, 1);
6969 TCGv_ptr fpst = get_fpstatus_ptr(1);
6970 TCGv_i32 tcg_rmode, tcg_shift;
6971 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6973 tcg_shift = tcg_const_i32(0);
6974 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6975 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6976 cpu_env);
6978 if (is_signed) {
6979 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6980 tcg_shift, fpst);
6981 } else {
6982 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6983 tcg_shift, fpst);
6986 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6987 cpu_env);
6988 tcg_temp_free_i32(tcg_rmode);
6989 tcg_temp_free_i32(tcg_shift);
6990 tcg_temp_free_ptr(fpst);
6991 break;
6993 case NEON_2RM_VRECPE:
6995 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6996 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6997 tcg_temp_free_ptr(fpstatus);
6998 break;
7000 case NEON_2RM_VRSQRTE:
7002 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7003 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7004 tcg_temp_free_ptr(fpstatus);
7005 break;
7007 case NEON_2RM_VRECPE_F:
7009 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7010 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7011 tcg_temp_free_ptr(fpstatus);
7012 break;
7014 case NEON_2RM_VRSQRTE_F:
7016 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7017 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7018 tcg_temp_free_ptr(fpstatus);
7019 break;
7021 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
7022 gen_vfp_sito(0, 1);
7023 break;
7024 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
7025 gen_vfp_uito(0, 1);
7026 break;
7027 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
7028 gen_vfp_tosiz(0, 1);
7029 break;
7030 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
7031 gen_vfp_touiz(0, 1);
7032 break;
7033 default:
7034 /* Reserved op values were caught by the
7035 * neon_2rm_sizes[] check earlier.
7037 abort();
7039 if (neon_2rm_is_float_op(op)) {
7040 tcg_gen_st_f32(cpu_F0s, cpu_env,
7041 neon_reg_offset(rd, pass));
7042 } else {
7043 neon_store_reg(rd, pass, tmp);
7046 break;
7048 } else if ((insn & (1 << 10)) == 0) {
7049 /* VTBL, VTBX. */
7050 int n = ((insn >> 8) & 3) + 1;
7051 if ((rn + n) > 32) {
7052 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7053 * helper function running off the end of the register file.
7055 return 1;
7057 n <<= 3;
7058 if (insn & (1 << 6)) {
7059 tmp = neon_load_reg(rd, 0);
7060 } else {
7061 tmp = tcg_temp_new_i32();
7062 tcg_gen_movi_i32(tmp, 0);
7064 tmp2 = neon_load_reg(rm, 0);
7065 tmp4 = tcg_const_i32(rn);
7066 tmp5 = tcg_const_i32(n);
7067 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7068 tcg_temp_free_i32(tmp);
7069 if (insn & (1 << 6)) {
7070 tmp = neon_load_reg(rd, 1);
7071 } else {
7072 tmp = tcg_temp_new_i32();
7073 tcg_gen_movi_i32(tmp, 0);
7075 tmp3 = neon_load_reg(rm, 1);
7076 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
7077 tcg_temp_free_i32(tmp5);
7078 tcg_temp_free_i32(tmp4);
7079 neon_store_reg(rd, 0, tmp2);
7080 neon_store_reg(rd, 1, tmp3);
7081 tcg_temp_free_i32(tmp);
7082 } else if ((insn & 0x380) == 0) {
7083 /* VDUP */
7084 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7085 return 1;
7087 if (insn & (1 << 19)) {
7088 tmp = neon_load_reg(rm, 1);
7089 } else {
7090 tmp = neon_load_reg(rm, 0);
7092 if (insn & (1 << 16)) {
7093 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
7094 } else if (insn & (1 << 17)) {
7095 if ((insn >> 18) & 1)
7096 gen_neon_dup_high16(tmp);
7097 else
7098 gen_neon_dup_low16(tmp);
7100 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7101 tmp2 = tcg_temp_new_i32();
7102 tcg_gen_mov_i32(tmp2, tmp);
7103 neon_store_reg(rd, pass, tmp2);
7105 tcg_temp_free_i32(tmp);
7106 } else {
7107 return 1;
7111 return 0;
7114 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7116 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7117 const ARMCPRegInfo *ri;
7119 cpnum = (insn >> 8) & 0xf;
7121 /* First check for coprocessor space used for XScale/iwMMXt insns */
7122 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7123 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7124 return 1;
7126 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7127 return disas_iwmmxt_insn(s, insn);
7128 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7129 return disas_dsp_insn(s, insn);
7131 return 1;
7134 /* Otherwise treat as a generic register access */
7135 is64 = (insn & (1 << 25)) == 0;
7136 if (!is64 && ((insn & (1 << 4)) == 0)) {
7137 /* cdp */
7138 return 1;
7141 crm = insn & 0xf;
7142 if (is64) {
7143 crn = 0;
7144 opc1 = (insn >> 4) & 0xf;
7145 opc2 = 0;
7146 rt2 = (insn >> 16) & 0xf;
7147 } else {
7148 crn = (insn >> 16) & 0xf;
7149 opc1 = (insn >> 21) & 7;
7150 opc2 = (insn >> 5) & 7;
7151 rt2 = 0;
7153 isread = (insn >> 20) & 1;
7154 rt = (insn >> 12) & 0xf;
7156 ri = get_arm_cp_reginfo(s->cp_regs,
7157 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7158 if (ri) {
7159 /* Check access permissions */
7160 if (!cp_access_ok(s->current_el, ri, isread)) {
7161 return 1;
7164 if (ri->accessfn ||
7165 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7166 /* Emit code to perform further access permissions checks at
7167 * runtime; this may result in an exception.
7168 * Note that on XScale all cp0..c13 registers do an access check
7169 * call in order to handle c15_cpar.
7171 TCGv_ptr tmpptr;
7172 TCGv_i32 tcg_syn, tcg_isread;
7173 uint32_t syndrome;
7175 /* Note that since we are an implementation which takes an
7176 * exception on a trapped conditional instruction only if the
7177 * instruction passes its condition code check, we can take
7178 * advantage of the clause in the ARM ARM that allows us to set
7179 * the COND field in the instruction to 0xE in all cases.
7180 * We could fish the actual condition out of the insn (ARM)
7181 * or the condexec bits (Thumb) but it isn't necessary.
7183 switch (cpnum) {
7184 case 14:
7185 if (is64) {
7186 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7187 isread, false);
7188 } else {
7189 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7190 rt, isread, false);
7192 break;
7193 case 15:
7194 if (is64) {
7195 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7196 isread, false);
7197 } else {
7198 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7199 rt, isread, false);
7201 break;
7202 default:
7203 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7204 * so this can only happen if this is an ARMv7 or earlier CPU,
7205 * in which case the syndrome information won't actually be
7206 * guest visible.
7208 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7209 syndrome = syn_uncategorized();
7210 break;
7213 gen_set_condexec(s);
7214 gen_set_pc_im(s, s->pc - 4);
7215 tmpptr = tcg_const_ptr(ri);
7216 tcg_syn = tcg_const_i32(syndrome);
7217 tcg_isread = tcg_const_i32(isread);
7218 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7219 tcg_isread);
7220 tcg_temp_free_ptr(tmpptr);
7221 tcg_temp_free_i32(tcg_syn);
7222 tcg_temp_free_i32(tcg_isread);
7225 /* Handle special cases first */
7226 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7227 case ARM_CP_NOP:
7228 return 0;
7229 case ARM_CP_WFI:
7230 if (isread) {
7231 return 1;
7233 gen_set_pc_im(s, s->pc);
7234 s->is_jmp = DISAS_WFI;
7235 return 0;
7236 default:
7237 break;
7240 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7241 gen_io_start();
7244 if (isread) {
7245 /* Read */
7246 if (is64) {
7247 TCGv_i64 tmp64;
7248 TCGv_i32 tmp;
7249 if (ri->type & ARM_CP_CONST) {
7250 tmp64 = tcg_const_i64(ri->resetvalue);
7251 } else if (ri->readfn) {
7252 TCGv_ptr tmpptr;
7253 tmp64 = tcg_temp_new_i64();
7254 tmpptr = tcg_const_ptr(ri);
7255 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7256 tcg_temp_free_ptr(tmpptr);
7257 } else {
7258 tmp64 = tcg_temp_new_i64();
7259 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7261 tmp = tcg_temp_new_i32();
7262 tcg_gen_extrl_i64_i32(tmp, tmp64);
7263 store_reg(s, rt, tmp);
7264 tcg_gen_shri_i64(tmp64, tmp64, 32);
7265 tmp = tcg_temp_new_i32();
7266 tcg_gen_extrl_i64_i32(tmp, tmp64);
7267 tcg_temp_free_i64(tmp64);
7268 store_reg(s, rt2, tmp);
7269 } else {
7270 TCGv_i32 tmp;
7271 if (ri->type & ARM_CP_CONST) {
7272 tmp = tcg_const_i32(ri->resetvalue);
7273 } else if (ri->readfn) {
7274 TCGv_ptr tmpptr;
7275 tmp = tcg_temp_new_i32();
7276 tmpptr = tcg_const_ptr(ri);
7277 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7278 tcg_temp_free_ptr(tmpptr);
7279 } else {
7280 tmp = load_cpu_offset(ri->fieldoffset);
7282 if (rt == 15) {
7283 /* Destination register of r15 for 32 bit loads sets
7284 * the condition codes from the high 4 bits of the value
7286 gen_set_nzcv(tmp);
7287 tcg_temp_free_i32(tmp);
7288 } else {
7289 store_reg(s, rt, tmp);
7292 } else {
7293 /* Write */
7294 if (ri->type & ARM_CP_CONST) {
7295 /* If not forbidden by access permissions, treat as WI */
7296 return 0;
7299 if (is64) {
7300 TCGv_i32 tmplo, tmphi;
7301 TCGv_i64 tmp64 = tcg_temp_new_i64();
7302 tmplo = load_reg(s, rt);
7303 tmphi = load_reg(s, rt2);
7304 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7305 tcg_temp_free_i32(tmplo);
7306 tcg_temp_free_i32(tmphi);
7307 if (ri->writefn) {
7308 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7309 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7310 tcg_temp_free_ptr(tmpptr);
7311 } else {
7312 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7314 tcg_temp_free_i64(tmp64);
7315 } else {
7316 if (ri->writefn) {
7317 TCGv_i32 tmp;
7318 TCGv_ptr tmpptr;
7319 tmp = load_reg(s, rt);
7320 tmpptr = tcg_const_ptr(ri);
7321 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7322 tcg_temp_free_ptr(tmpptr);
7323 tcg_temp_free_i32(tmp);
7324 } else {
7325 TCGv_i32 tmp = load_reg(s, rt);
7326 store_cpu_offset(tmp, ri->fieldoffset);
7331 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7332 /* I/O operations must end the TB here (whether read or write) */
7333 gen_io_end();
7334 gen_lookup_tb(s);
7335 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7336 /* We default to ending the TB on a coprocessor register write,
7337 * but allow this to be suppressed by the register definition
7338 * (usually only necessary to work around guest bugs).
7340 gen_lookup_tb(s);
7343 return 0;
7346 /* Unknown register; this might be a guest error or a QEMU
7347 * unimplemented feature.
7349 if (is64) {
7350 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7351 "64 bit system register cp:%d opc1: %d crm:%d "
7352 "(%s)\n",
7353 isread ? "read" : "write", cpnum, opc1, crm,
7354 s->ns ? "non-secure" : "secure");
7355 } else {
7356 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7357 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7358 "(%s)\n",
7359 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7360 s->ns ? "non-secure" : "secure");
7363 return 1;
7367 /* Store a 64-bit value to a register pair. Clobbers val. */
7368 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7370 TCGv_i32 tmp;
7371 tmp = tcg_temp_new_i32();
7372 tcg_gen_extrl_i64_i32(tmp, val);
7373 store_reg(s, rlow, tmp);
7374 tmp = tcg_temp_new_i32();
7375 tcg_gen_shri_i64(val, val, 32);
7376 tcg_gen_extrl_i64_i32(tmp, val);
7377 store_reg(s, rhigh, tmp);
7380 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7381 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7383 TCGv_i64 tmp;
7384 TCGv_i32 tmp2;
7386 /* Load value and extend to 64 bits. */
7387 tmp = tcg_temp_new_i64();
7388 tmp2 = load_reg(s, rlow);
7389 tcg_gen_extu_i32_i64(tmp, tmp2);
7390 tcg_temp_free_i32(tmp2);
7391 tcg_gen_add_i64(val, val, tmp);
7392 tcg_temp_free_i64(tmp);
7395 /* load and add a 64-bit value from a register pair. */
7396 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7398 TCGv_i64 tmp;
7399 TCGv_i32 tmpl;
7400 TCGv_i32 tmph;
7402 /* Load 64-bit value rd:rn. */
7403 tmpl = load_reg(s, rlow);
7404 tmph = load_reg(s, rhigh);
7405 tmp = tcg_temp_new_i64();
7406 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7407 tcg_temp_free_i32(tmpl);
7408 tcg_temp_free_i32(tmph);
7409 tcg_gen_add_i64(val, val, tmp);
7410 tcg_temp_free_i64(tmp);
7413 /* Set N and Z flags from hi|lo. */
7414 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7416 tcg_gen_mov_i32(cpu_NF, hi);
7417 tcg_gen_or_i32(cpu_ZF, lo, hi);
7420 /* Load/Store exclusive instructions are implemented by remembering
7421 the value/address loaded, and seeing if these are the same
7422 when the store is performed. This should be sufficient to implement
7423 the architecturally mandated semantics, and avoids having to monitor
7424 regular stores.
7426 In system emulation mode only one CPU will be running at once, so
7427 this sequence is effectively atomic. In user emulation mode we
7428 throw an exception and handle the atomic operation elsewhere. */
7429 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7430 TCGv_i32 addr, int size)
7432 TCGv_i32 tmp = tcg_temp_new_i32();
7434 s->is_ldex = true;
7436 switch (size) {
7437 case 0:
7438 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
7439 break;
7440 case 1:
7441 gen_aa32_ld16ua(tmp, addr, get_mem_index(s));
7442 break;
7443 case 2:
7444 case 3:
7445 gen_aa32_ld32ua(tmp, addr, get_mem_index(s));
7446 break;
7447 default:
7448 abort();
7451 if (size == 3) {
7452 TCGv_i32 tmp2 = tcg_temp_new_i32();
7453 TCGv_i32 tmp3 = tcg_temp_new_i32();
7455 tcg_gen_addi_i32(tmp2, addr, 4);
7456 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7457 tcg_temp_free_i32(tmp2);
7458 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7459 store_reg(s, rt2, tmp3);
7460 } else {
7461 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7464 store_reg(s, rt, tmp);
7465 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7468 static void gen_clrex(DisasContext *s)
7470 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7473 #ifdef CONFIG_USER_ONLY
7474 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7475 TCGv_i32 addr, int size)
7477 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
7478 tcg_gen_movi_i32(cpu_exclusive_info,
7479 size | (rd << 4) | (rt << 8) | (rt2 << 12));
7480 gen_exception_internal_insn(s, 4, EXCP_STREX);
7482 #else
7483 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7484 TCGv_i32 addr, int size)
7486 TCGv_i32 tmp;
7487 TCGv_i64 val64, extaddr;
7488 TCGLabel *done_label;
7489 TCGLabel *fail_label;
7491 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7492 [addr] = {Rt};
7493 {Rd} = 0;
7494 } else {
7495 {Rd} = 1;
7496 } */
7497 fail_label = gen_new_label();
7498 done_label = gen_new_label();
7499 extaddr = tcg_temp_new_i64();
7500 tcg_gen_extu_i32_i64(extaddr, addr);
7501 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7502 tcg_temp_free_i64(extaddr);
7504 tmp = tcg_temp_new_i32();
7505 switch (size) {
7506 case 0:
7507 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
7508 break;
7509 case 1:
7510 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
7511 break;
7512 case 2:
7513 case 3:
7514 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7515 break;
7516 default:
7517 abort();
7520 val64 = tcg_temp_new_i64();
7521 if (size == 3) {
7522 TCGv_i32 tmp2 = tcg_temp_new_i32();
7523 TCGv_i32 tmp3 = tcg_temp_new_i32();
7524 tcg_gen_addi_i32(tmp2, addr, 4);
7525 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7526 tcg_temp_free_i32(tmp2);
7527 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7528 tcg_temp_free_i32(tmp3);
7529 } else {
7530 tcg_gen_extu_i32_i64(val64, tmp);
7532 tcg_temp_free_i32(tmp);
7534 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7535 tcg_temp_free_i64(val64);
7537 tmp = load_reg(s, rt);
7538 switch (size) {
7539 case 0:
7540 gen_aa32_st8(tmp, addr, get_mem_index(s));
7541 break;
7542 case 1:
7543 gen_aa32_st16(tmp, addr, get_mem_index(s));
7544 break;
7545 case 2:
7546 case 3:
7547 gen_aa32_st32(tmp, addr, get_mem_index(s));
7548 break;
7549 default:
7550 abort();
7552 tcg_temp_free_i32(tmp);
7553 if (size == 3) {
7554 tcg_gen_addi_i32(addr, addr, 4);
7555 tmp = load_reg(s, rt2);
7556 gen_aa32_st32(tmp, addr, get_mem_index(s));
7557 tcg_temp_free_i32(tmp);
7559 tcg_gen_movi_i32(cpu_R[rd], 0);
7560 tcg_gen_br(done_label);
7561 gen_set_label(fail_label);
7562 tcg_gen_movi_i32(cpu_R[rd], 1);
7563 gen_set_label(done_label);
7564 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7566 #endif
7568 /* gen_srs:
7569 * @env: CPUARMState
7570 * @s: DisasContext
7571 * @mode: mode field from insn (which stack to store to)
7572 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7573 * @writeback: true if writeback bit set
7575 * Generate code for the SRS (Store Return State) insn.
7577 static void gen_srs(DisasContext *s,
7578 uint32_t mode, uint32_t amode, bool writeback)
7580 int32_t offset;
7581 TCGv_i32 addr = tcg_temp_new_i32();
7582 TCGv_i32 tmp = tcg_const_i32(mode);
7583 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7584 tcg_temp_free_i32(tmp);
7585 switch (amode) {
7586 case 0: /* DA */
7587 offset = -4;
7588 break;
7589 case 1: /* IA */
7590 offset = 0;
7591 break;
7592 case 2: /* DB */
7593 offset = -8;
7594 break;
7595 case 3: /* IB */
7596 offset = 4;
7597 break;
7598 default:
7599 abort();
7601 tcg_gen_addi_i32(addr, addr, offset);
7602 tmp = load_reg(s, 14);
7603 gen_aa32_st32(tmp, addr, get_mem_index(s));
7604 tcg_temp_free_i32(tmp);
7605 tmp = load_cpu_field(spsr);
7606 tcg_gen_addi_i32(addr, addr, 4);
7607 gen_aa32_st32(tmp, addr, get_mem_index(s));
7608 tcg_temp_free_i32(tmp);
7609 if (writeback) {
7610 switch (amode) {
7611 case 0:
7612 offset = -8;
7613 break;
7614 case 1:
7615 offset = 4;
7616 break;
7617 case 2:
7618 offset = -4;
7619 break;
7620 case 3:
7621 offset = 0;
7622 break;
7623 default:
7624 abort();
7626 tcg_gen_addi_i32(addr, addr, offset);
7627 tmp = tcg_const_i32(mode);
7628 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7629 tcg_temp_free_i32(tmp);
7631 tcg_temp_free_i32(addr);
7634 static void disas_arm_insn(DisasContext *s, unsigned int insn)
7636 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
7637 TCGv_i32 tmp;
7638 TCGv_i32 tmp2;
7639 TCGv_i32 tmp3;
7640 TCGv_i32 addr;
7641 TCGv_i64 tmp64;
7643 /* M variants do not implement ARM mode. */
7644 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7645 goto illegal_op;
7647 cond = insn >> 28;
7648 if (cond == 0xf){
7649 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7650 * choose to UNDEF. In ARMv5 and above the space is used
7651 * for miscellaneous unconditional instructions.
7653 ARCH(5);
7655 /* Unconditional instructions. */
7656 if (((insn >> 25) & 7) == 1) {
7657 /* NEON Data processing. */
7658 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7659 goto illegal_op;
7662 if (disas_neon_data_insn(s, insn)) {
7663 goto illegal_op;
7665 return;
7667 if ((insn & 0x0f100000) == 0x04000000) {
7668 /* NEON load/store. */
7669 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7670 goto illegal_op;
7673 if (disas_neon_ls_insn(s, insn)) {
7674 goto illegal_op;
7676 return;
7678 if ((insn & 0x0f000e10) == 0x0e000a00) {
7679 /* VFP. */
7680 if (disas_vfp_insn(s, insn)) {
7681 goto illegal_op;
7683 return;
7685 if (((insn & 0x0f30f000) == 0x0510f000) ||
7686 ((insn & 0x0f30f010) == 0x0710f000)) {
7687 if ((insn & (1 << 22)) == 0) {
7688 /* PLDW; v7MP */
7689 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7690 goto illegal_op;
7693 /* Otherwise PLD; v5TE+ */
7694 ARCH(5TE);
7695 return;
7697 if (((insn & 0x0f70f000) == 0x0450f000) ||
7698 ((insn & 0x0f70f010) == 0x0650f000)) {
7699 ARCH(7);
7700 return; /* PLI; V7 */
7702 if (((insn & 0x0f700000) == 0x04100000) ||
7703 ((insn & 0x0f700010) == 0x06100000)) {
7704 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7705 goto illegal_op;
7707 return; /* v7MP: Unallocated memory hint: must NOP */
7710 if ((insn & 0x0ffffdff) == 0x01010000) {
7711 ARCH(6);
7712 /* setend */
7713 if (((insn >> 9) & 1) != s->bswap_code) {
7714 /* Dynamic endianness switching not implemented. */
7715 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
7716 goto illegal_op;
7718 return;
7719 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7720 switch ((insn >> 4) & 0xf) {
7721 case 1: /* clrex */
7722 ARCH(6K);
7723 gen_clrex(s);
7724 return;
7725 case 4: /* dsb */
7726 case 5: /* dmb */
7727 ARCH(7);
7728 /* We don't emulate caches so these are a no-op. */
7729 return;
7730 case 6: /* isb */
7731 /* We need to break the TB after this insn to execute
7732 * self-modifying code correctly and also to take
7733 * any pending interrupts immediately.
7735 gen_lookup_tb(s);
7736 return;
7737 default:
7738 goto illegal_op;
7740 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7741 /* srs */
7742 if (IS_USER(s)) {
7743 goto illegal_op;
7745 ARCH(6);
7746 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
7747 return;
7748 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
7749 /* rfe */
7750 int32_t offset;
7751 if (IS_USER(s))
7752 goto illegal_op;
7753 ARCH(6);
7754 rn = (insn >> 16) & 0xf;
7755 addr = load_reg(s, rn);
7756 i = (insn >> 23) & 3;
7757 switch (i) {
7758 case 0: offset = -4; break; /* DA */
7759 case 1: offset = 0; break; /* IA */
7760 case 2: offset = -8; break; /* DB */
7761 case 3: offset = 4; break; /* IB */
7762 default: abort();
7764 if (offset)
7765 tcg_gen_addi_i32(addr, addr, offset);
7766 /* Load PC into tmp and CPSR into tmp2. */
7767 tmp = tcg_temp_new_i32();
7768 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7769 tcg_gen_addi_i32(addr, addr, 4);
7770 tmp2 = tcg_temp_new_i32();
7771 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
7772 if (insn & (1 << 21)) {
7773 /* Base writeback. */
7774 switch (i) {
7775 case 0: offset = -8; break;
7776 case 1: offset = 4; break;
7777 case 2: offset = -4; break;
7778 case 3: offset = 0; break;
7779 default: abort();
7781 if (offset)
7782 tcg_gen_addi_i32(addr, addr, offset);
7783 store_reg(s, rn, addr);
7784 } else {
7785 tcg_temp_free_i32(addr);
7787 gen_rfe(s, tmp, tmp2);
7788 return;
7789 } else if ((insn & 0x0e000000) == 0x0a000000) {
7790 /* branch link and change to thumb (blx <offset>) */
7791 int32_t offset;
7793 val = (uint32_t)s->pc;
7794 tmp = tcg_temp_new_i32();
7795 tcg_gen_movi_i32(tmp, val);
7796 store_reg(s, 14, tmp);
7797 /* Sign-extend the 24-bit offset */
7798 offset = (((int32_t)insn) << 8) >> 8;
7799 /* offset * 4 + bit24 * 2 + (thumb bit) */
7800 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7801 /* pipeline offset */
7802 val += 4;
7803 /* protected by ARCH(5); above, near the start of uncond block */
7804 gen_bx_im(s, val);
7805 return;
7806 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7807 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7808 /* iWMMXt register transfer. */
7809 if (extract32(s->c15_cpar, 1, 1)) {
7810 if (!disas_iwmmxt_insn(s, insn)) {
7811 return;
7815 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7816 /* Coprocessor double register transfer. */
7817 ARCH(5TE);
7818 } else if ((insn & 0x0f000010) == 0x0e000010) {
7819 /* Additional coprocessor register transfer. */
7820 } else if ((insn & 0x0ff10020) == 0x01000000) {
7821 uint32_t mask;
7822 uint32_t val;
7823 /* cps (privileged) */
7824 if (IS_USER(s))
7825 return;
7826 mask = val = 0;
7827 if (insn & (1 << 19)) {
7828 if (insn & (1 << 8))
7829 mask |= CPSR_A;
7830 if (insn & (1 << 7))
7831 mask |= CPSR_I;
7832 if (insn & (1 << 6))
7833 mask |= CPSR_F;
7834 if (insn & (1 << 18))
7835 val |= mask;
7837 if (insn & (1 << 17)) {
7838 mask |= CPSR_M;
7839 val |= (insn & 0x1f);
7841 if (mask) {
7842 gen_set_psr_im(s, mask, 0, val);
7844 return;
7846 goto illegal_op;
7848 if (cond != 0xe) {
7849 /* if not always execute, we generate a conditional jump to
7850 next instruction */
7851 s->condlabel = gen_new_label();
7852 arm_gen_test_cc(cond ^ 1, s->condlabel);
7853 s->condjmp = 1;
7855 if ((insn & 0x0f900000) == 0x03000000) {
7856 if ((insn & (1 << 21)) == 0) {
7857 ARCH(6T2);
7858 rd = (insn >> 12) & 0xf;
7859 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7860 if ((insn & (1 << 22)) == 0) {
7861 /* MOVW */
7862 tmp = tcg_temp_new_i32();
7863 tcg_gen_movi_i32(tmp, val);
7864 } else {
7865 /* MOVT */
7866 tmp = load_reg(s, rd);
7867 tcg_gen_ext16u_i32(tmp, tmp);
7868 tcg_gen_ori_i32(tmp, tmp, val << 16);
7870 store_reg(s, rd, tmp);
7871 } else {
7872 if (((insn >> 12) & 0xf) != 0xf)
7873 goto illegal_op;
7874 if (((insn >> 16) & 0xf) == 0) {
7875 gen_nop_hint(s, insn & 0xff);
7876 } else {
7877 /* CPSR = immediate */
7878 val = insn & 0xff;
7879 shift = ((insn >> 8) & 0xf) * 2;
7880 if (shift)
7881 val = (val >> shift) | (val << (32 - shift));
7882 i = ((insn & (1 << 22)) != 0);
7883 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7884 i, val)) {
7885 goto illegal_op;
7889 } else if ((insn & 0x0f900000) == 0x01000000
7890 && (insn & 0x00000090) != 0x00000090) {
7891 /* miscellaneous instructions */
7892 op1 = (insn >> 21) & 3;
7893 sh = (insn >> 4) & 0xf;
7894 rm = insn & 0xf;
7895 switch (sh) {
7896 case 0x0: /* move program status register */
7897 if (op1 & 1) {
7898 /* PSR = reg */
7899 tmp = load_reg(s, rm);
7900 i = ((op1 & 2) != 0);
7901 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
7902 goto illegal_op;
7903 } else {
7904 /* reg = PSR */
7905 rd = (insn >> 12) & 0xf;
7906 if (op1 & 2) {
7907 if (IS_USER(s))
7908 goto illegal_op;
7909 tmp = load_cpu_field(spsr);
7910 } else {
7911 tmp = tcg_temp_new_i32();
7912 gen_helper_cpsr_read(tmp, cpu_env);
7914 store_reg(s, rd, tmp);
7916 break;
7917 case 0x1:
7918 if (op1 == 1) {
7919 /* branch/exchange thumb (bx). */
7920 ARCH(4T);
7921 tmp = load_reg(s, rm);
7922 gen_bx(s, tmp);
7923 } else if (op1 == 3) {
7924 /* clz */
7925 ARCH(5);
7926 rd = (insn >> 12) & 0xf;
7927 tmp = load_reg(s, rm);
7928 gen_helper_clz(tmp, tmp);
7929 store_reg(s, rd, tmp);
7930 } else {
7931 goto illegal_op;
7933 break;
7934 case 0x2:
7935 if (op1 == 1) {
7936 ARCH(5J); /* bxj */
7937 /* Trivial implementation equivalent to bx. */
7938 tmp = load_reg(s, rm);
7939 gen_bx(s, tmp);
7940 } else {
7941 goto illegal_op;
7943 break;
7944 case 0x3:
7945 if (op1 != 1)
7946 goto illegal_op;
7948 ARCH(5);
7949 /* branch link/exchange thumb (blx) */
7950 tmp = load_reg(s, rm);
7951 tmp2 = tcg_temp_new_i32();
7952 tcg_gen_movi_i32(tmp2, s->pc);
7953 store_reg(s, 14, tmp2);
7954 gen_bx(s, tmp);
7955 break;
7956 case 0x4:
7958 /* crc32/crc32c */
7959 uint32_t c = extract32(insn, 8, 4);
7961 /* Check this CPU supports ARMv8 CRC instructions.
7962 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7963 * Bits 8, 10 and 11 should be zero.
7965 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
7966 (c & 0xd) != 0) {
7967 goto illegal_op;
7970 rn = extract32(insn, 16, 4);
7971 rd = extract32(insn, 12, 4);
7973 tmp = load_reg(s, rn);
7974 tmp2 = load_reg(s, rm);
7975 if (op1 == 0) {
7976 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
7977 } else if (op1 == 1) {
7978 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
7980 tmp3 = tcg_const_i32(1 << op1);
7981 if (c & 0x2) {
7982 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7983 } else {
7984 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7986 tcg_temp_free_i32(tmp2);
7987 tcg_temp_free_i32(tmp3);
7988 store_reg(s, rd, tmp);
7989 break;
7991 case 0x5: /* saturating add/subtract */
7992 ARCH(5TE);
7993 rd = (insn >> 12) & 0xf;
7994 rn = (insn >> 16) & 0xf;
7995 tmp = load_reg(s, rm);
7996 tmp2 = load_reg(s, rn);
7997 if (op1 & 2)
7998 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
7999 if (op1 & 1)
8000 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8001 else
8002 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8003 tcg_temp_free_i32(tmp2);
8004 store_reg(s, rd, tmp);
8005 break;
8006 case 7:
8008 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8009 switch (op1) {
8010 case 1:
8011 /* bkpt */
8012 ARCH(5);
8013 gen_exception_insn(s, 4, EXCP_BKPT,
8014 syn_aa32_bkpt(imm16, false),
8015 default_exception_el(s));
8016 break;
8017 case 2:
8018 /* Hypervisor call (v7) */
8019 ARCH(7);
8020 if (IS_USER(s)) {
8021 goto illegal_op;
8023 gen_hvc(s, imm16);
8024 break;
8025 case 3:
8026 /* Secure monitor call (v6+) */
8027 ARCH(6K);
8028 if (IS_USER(s)) {
8029 goto illegal_op;
8031 gen_smc(s);
8032 break;
8033 default:
8034 goto illegal_op;
8036 break;
8038 case 0x8: /* signed multiply */
8039 case 0xa:
8040 case 0xc:
8041 case 0xe:
8042 ARCH(5TE);
8043 rs = (insn >> 8) & 0xf;
8044 rn = (insn >> 12) & 0xf;
8045 rd = (insn >> 16) & 0xf;
8046 if (op1 == 1) {
8047 /* (32 * 16) >> 16 */
8048 tmp = load_reg(s, rm);
8049 tmp2 = load_reg(s, rs);
8050 if (sh & 4)
8051 tcg_gen_sari_i32(tmp2, tmp2, 16);
8052 else
8053 gen_sxth(tmp2);
8054 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8055 tcg_gen_shri_i64(tmp64, tmp64, 16);
8056 tmp = tcg_temp_new_i32();
8057 tcg_gen_extrl_i64_i32(tmp, tmp64);
8058 tcg_temp_free_i64(tmp64);
8059 if ((sh & 2) == 0) {
8060 tmp2 = load_reg(s, rn);
8061 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8062 tcg_temp_free_i32(tmp2);
8064 store_reg(s, rd, tmp);
8065 } else {
8066 /* 16 * 16 */
8067 tmp = load_reg(s, rm);
8068 tmp2 = load_reg(s, rs);
8069 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8070 tcg_temp_free_i32(tmp2);
8071 if (op1 == 2) {
8072 tmp64 = tcg_temp_new_i64();
8073 tcg_gen_ext_i32_i64(tmp64, tmp);
8074 tcg_temp_free_i32(tmp);
8075 gen_addq(s, tmp64, rn, rd);
8076 gen_storeq_reg(s, rn, rd, tmp64);
8077 tcg_temp_free_i64(tmp64);
8078 } else {
8079 if (op1 == 0) {
8080 tmp2 = load_reg(s, rn);
8081 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8082 tcg_temp_free_i32(tmp2);
8084 store_reg(s, rd, tmp);
8087 break;
8088 default:
8089 goto illegal_op;
8091 } else if (((insn & 0x0e000000) == 0 &&
8092 (insn & 0x00000090) != 0x90) ||
8093 ((insn & 0x0e000000) == (1 << 25))) {
8094 int set_cc, logic_cc, shiftop;
8096 op1 = (insn >> 21) & 0xf;
8097 set_cc = (insn >> 20) & 1;
8098 logic_cc = table_logic_cc[op1] & set_cc;
8100 /* data processing instruction */
8101 if (insn & (1 << 25)) {
8102 /* immediate operand */
8103 val = insn & 0xff;
8104 shift = ((insn >> 8) & 0xf) * 2;
8105 if (shift) {
8106 val = (val >> shift) | (val << (32 - shift));
8108 tmp2 = tcg_temp_new_i32();
8109 tcg_gen_movi_i32(tmp2, val);
8110 if (logic_cc && shift) {
8111 gen_set_CF_bit31(tmp2);
8113 } else {
8114 /* register */
8115 rm = (insn) & 0xf;
8116 tmp2 = load_reg(s, rm);
8117 shiftop = (insn >> 5) & 3;
8118 if (!(insn & (1 << 4))) {
8119 shift = (insn >> 7) & 0x1f;
8120 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8121 } else {
8122 rs = (insn >> 8) & 0xf;
8123 tmp = load_reg(s, rs);
8124 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8127 if (op1 != 0x0f && op1 != 0x0d) {
8128 rn = (insn >> 16) & 0xf;
8129 tmp = load_reg(s, rn);
8130 } else {
8131 TCGV_UNUSED_I32(tmp);
8133 rd = (insn >> 12) & 0xf;
8134 switch(op1) {
8135 case 0x00:
8136 tcg_gen_and_i32(tmp, tmp, tmp2);
8137 if (logic_cc) {
8138 gen_logic_CC(tmp);
8140 store_reg_bx(s, rd, tmp);
8141 break;
8142 case 0x01:
8143 tcg_gen_xor_i32(tmp, tmp, tmp2);
8144 if (logic_cc) {
8145 gen_logic_CC(tmp);
8147 store_reg_bx(s, rd, tmp);
8148 break;
8149 case 0x02:
8150 if (set_cc && rd == 15) {
8151 /* SUBS r15, ... is used for exception return. */
8152 if (IS_USER(s)) {
8153 goto illegal_op;
8155 gen_sub_CC(tmp, tmp, tmp2);
8156 gen_exception_return(s, tmp);
8157 } else {
8158 if (set_cc) {
8159 gen_sub_CC(tmp, tmp, tmp2);
8160 } else {
8161 tcg_gen_sub_i32(tmp, tmp, tmp2);
8163 store_reg_bx(s, rd, tmp);
8165 break;
8166 case 0x03:
8167 if (set_cc) {
8168 gen_sub_CC(tmp, tmp2, tmp);
8169 } else {
8170 tcg_gen_sub_i32(tmp, tmp2, tmp);
8172 store_reg_bx(s, rd, tmp);
8173 break;
8174 case 0x04:
8175 if (set_cc) {
8176 gen_add_CC(tmp, tmp, tmp2);
8177 } else {
8178 tcg_gen_add_i32(tmp, tmp, tmp2);
8180 store_reg_bx(s, rd, tmp);
8181 break;
8182 case 0x05:
8183 if (set_cc) {
8184 gen_adc_CC(tmp, tmp, tmp2);
8185 } else {
8186 gen_add_carry(tmp, tmp, tmp2);
8188 store_reg_bx(s, rd, tmp);
8189 break;
8190 case 0x06:
8191 if (set_cc) {
8192 gen_sbc_CC(tmp, tmp, tmp2);
8193 } else {
8194 gen_sub_carry(tmp, tmp, tmp2);
8196 store_reg_bx(s, rd, tmp);
8197 break;
8198 case 0x07:
8199 if (set_cc) {
8200 gen_sbc_CC(tmp, tmp2, tmp);
8201 } else {
8202 gen_sub_carry(tmp, tmp2, tmp);
8204 store_reg_bx(s, rd, tmp);
8205 break;
8206 case 0x08:
8207 if (set_cc) {
8208 tcg_gen_and_i32(tmp, tmp, tmp2);
8209 gen_logic_CC(tmp);
8211 tcg_temp_free_i32(tmp);
8212 break;
8213 case 0x09:
8214 if (set_cc) {
8215 tcg_gen_xor_i32(tmp, tmp, tmp2);
8216 gen_logic_CC(tmp);
8218 tcg_temp_free_i32(tmp);
8219 break;
8220 case 0x0a:
8221 if (set_cc) {
8222 gen_sub_CC(tmp, tmp, tmp2);
8224 tcg_temp_free_i32(tmp);
8225 break;
8226 case 0x0b:
8227 if (set_cc) {
8228 gen_add_CC(tmp, tmp, tmp2);
8230 tcg_temp_free_i32(tmp);
8231 break;
8232 case 0x0c:
8233 tcg_gen_or_i32(tmp, tmp, tmp2);
8234 if (logic_cc) {
8235 gen_logic_CC(tmp);
8237 store_reg_bx(s, rd, tmp);
8238 break;
8239 case 0x0d:
8240 if (logic_cc && rd == 15) {
8241 /* MOVS r15, ... is used for exception return. */
8242 if (IS_USER(s)) {
8243 goto illegal_op;
8245 gen_exception_return(s, tmp2);
8246 } else {
8247 if (logic_cc) {
8248 gen_logic_CC(tmp2);
8250 store_reg_bx(s, rd, tmp2);
8252 break;
8253 case 0x0e:
8254 tcg_gen_andc_i32(tmp, tmp, tmp2);
8255 if (logic_cc) {
8256 gen_logic_CC(tmp);
8258 store_reg_bx(s, rd, tmp);
8259 break;
8260 default:
8261 case 0x0f:
8262 tcg_gen_not_i32(tmp2, tmp2);
8263 if (logic_cc) {
8264 gen_logic_CC(tmp2);
8266 store_reg_bx(s, rd, tmp2);
8267 break;
8269 if (op1 != 0x0f && op1 != 0x0d) {
8270 tcg_temp_free_i32(tmp2);
8272 } else {
8273 /* other instructions */
8274 op1 = (insn >> 24) & 0xf;
8275 switch(op1) {
8276 case 0x0:
8277 case 0x1:
8278 /* multiplies, extra load/stores */
8279 sh = (insn >> 5) & 3;
8280 if (sh == 0) {
8281 if (op1 == 0x0) {
8282 rd = (insn >> 16) & 0xf;
8283 rn = (insn >> 12) & 0xf;
8284 rs = (insn >> 8) & 0xf;
8285 rm = (insn) & 0xf;
8286 op1 = (insn >> 20) & 0xf;
8287 switch (op1) {
8288 case 0: case 1: case 2: case 3: case 6:
8289 /* 32 bit mul */
8290 tmp = load_reg(s, rs);
8291 tmp2 = load_reg(s, rm);
8292 tcg_gen_mul_i32(tmp, tmp, tmp2);
8293 tcg_temp_free_i32(tmp2);
8294 if (insn & (1 << 22)) {
8295 /* Subtract (mls) */
8296 ARCH(6T2);
8297 tmp2 = load_reg(s, rn);
8298 tcg_gen_sub_i32(tmp, tmp2, tmp);
8299 tcg_temp_free_i32(tmp2);
8300 } else if (insn & (1 << 21)) {
8301 /* Add */
8302 tmp2 = load_reg(s, rn);
8303 tcg_gen_add_i32(tmp, tmp, tmp2);
8304 tcg_temp_free_i32(tmp2);
8306 if (insn & (1 << 20))
8307 gen_logic_CC(tmp);
8308 store_reg(s, rd, tmp);
8309 break;
8310 case 4:
8311 /* 64 bit mul double accumulate (UMAAL) */
8312 ARCH(6);
8313 tmp = load_reg(s, rs);
8314 tmp2 = load_reg(s, rm);
8315 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8316 gen_addq_lo(s, tmp64, rn);
8317 gen_addq_lo(s, tmp64, rd);
8318 gen_storeq_reg(s, rn, rd, tmp64);
8319 tcg_temp_free_i64(tmp64);
8320 break;
8321 case 8: case 9: case 10: case 11:
8322 case 12: case 13: case 14: case 15:
8323 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8324 tmp = load_reg(s, rs);
8325 tmp2 = load_reg(s, rm);
8326 if (insn & (1 << 22)) {
8327 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8328 } else {
8329 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8331 if (insn & (1 << 21)) { /* mult accumulate */
8332 TCGv_i32 al = load_reg(s, rn);
8333 TCGv_i32 ah = load_reg(s, rd);
8334 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8335 tcg_temp_free_i32(al);
8336 tcg_temp_free_i32(ah);
8338 if (insn & (1 << 20)) {
8339 gen_logicq_cc(tmp, tmp2);
8341 store_reg(s, rn, tmp);
8342 store_reg(s, rd, tmp2);
8343 break;
8344 default:
8345 goto illegal_op;
8347 } else {
8348 rn = (insn >> 16) & 0xf;
8349 rd = (insn >> 12) & 0xf;
8350 if (insn & (1 << 23)) {
8351 /* load/store exclusive */
8352 int op2 = (insn >> 8) & 3;
8353 op1 = (insn >> 21) & 0x3;
8355 switch (op2) {
8356 case 0: /* lda/stl */
8357 if (op1 == 1) {
8358 goto illegal_op;
8360 ARCH(8);
8361 break;
8362 case 1: /* reserved */
8363 goto illegal_op;
8364 case 2: /* ldaex/stlex */
8365 ARCH(8);
8366 break;
8367 case 3: /* ldrex/strex */
8368 if (op1) {
8369 ARCH(6K);
8370 } else {
8371 ARCH(6);
8373 break;
8376 addr = tcg_temp_local_new_i32();
8377 load_reg_var(s, addr, rn);
8379 /* Since the emulation does not have barriers,
8380 the acquire/release semantics need no special
8381 handling */
8382 if (op2 == 0) {
8383 if (insn & (1 << 20)) {
8384 tmp = tcg_temp_new_i32();
8385 switch (op1) {
8386 case 0: /* lda */
8387 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8388 break;
8389 case 2: /* ldab */
8390 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8391 break;
8392 case 3: /* ldah */
8393 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8394 break;
8395 default:
8396 abort();
8398 store_reg(s, rd, tmp);
8399 } else {
8400 rm = insn & 0xf;
8401 tmp = load_reg(s, rm);
8402 switch (op1) {
8403 case 0: /* stl */
8404 gen_aa32_st32(tmp, addr, get_mem_index(s));
8405 break;
8406 case 2: /* stlb */
8407 gen_aa32_st8(tmp, addr, get_mem_index(s));
8408 break;
8409 case 3: /* stlh */
8410 gen_aa32_st16(tmp, addr, get_mem_index(s));
8411 break;
8412 default:
8413 abort();
8415 tcg_temp_free_i32(tmp);
8417 } else if (insn & (1 << 20)) {
8418 switch (op1) {
8419 case 0: /* ldrex */
8420 gen_load_exclusive(s, rd, 15, addr, 2);
8421 break;
8422 case 1: /* ldrexd */
8423 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8424 break;
8425 case 2: /* ldrexb */
8426 gen_load_exclusive(s, rd, 15, addr, 0);
8427 break;
8428 case 3: /* ldrexh */
8429 gen_load_exclusive(s, rd, 15, addr, 1);
8430 break;
8431 default:
8432 abort();
8434 } else {
8435 rm = insn & 0xf;
8436 switch (op1) {
8437 case 0: /* strex */
8438 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8439 break;
8440 case 1: /* strexd */
8441 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8442 break;
8443 case 2: /* strexb */
8444 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8445 break;
8446 case 3: /* strexh */
8447 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8448 break;
8449 default:
8450 abort();
8453 tcg_temp_free_i32(addr);
8454 } else {
8455 /* SWP instruction */
8456 rm = (insn) & 0xf;
8458 /* ??? This is not really atomic. However we know
8459 we never have multiple CPUs running in parallel,
8460 so it is good enough. */
8461 addr = load_reg(s, rn);
8462 tmp = load_reg(s, rm);
8463 tmp2 = tcg_temp_new_i32();
8464 if (insn & (1 << 22)) {
8465 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8466 gen_aa32_st8(tmp, addr, get_mem_index(s));
8467 } else {
8468 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8469 gen_aa32_st32(tmp, addr, get_mem_index(s));
8471 tcg_temp_free_i32(tmp);
8472 tcg_temp_free_i32(addr);
8473 store_reg(s, rd, tmp2);
8476 } else {
8477 int address_offset;
8478 bool load = insn & (1 << 20);
8479 bool doubleword = false;
8480 /* Misc load/store */
8481 rn = (insn >> 16) & 0xf;
8482 rd = (insn >> 12) & 0xf;
8484 if (!load && (sh & 2)) {
8485 /* doubleword */
8486 ARCH(5TE);
8487 if (rd & 1) {
8488 /* UNPREDICTABLE; we choose to UNDEF */
8489 goto illegal_op;
8491 load = (sh & 1) == 0;
8492 doubleword = true;
8495 addr = load_reg(s, rn);
8496 if (insn & (1 << 24))
8497 gen_add_datah_offset(s, insn, 0, addr);
8498 address_offset = 0;
8500 if (doubleword) {
8501 if (!load) {
8502 /* store */
8503 tmp = load_reg(s, rd);
8504 gen_aa32_st32(tmp, addr, get_mem_index(s));
8505 tcg_temp_free_i32(tmp);
8506 tcg_gen_addi_i32(addr, addr, 4);
8507 tmp = load_reg(s, rd + 1);
8508 gen_aa32_st32(tmp, addr, get_mem_index(s));
8509 tcg_temp_free_i32(tmp);
8510 } else {
8511 /* load */
8512 tmp = tcg_temp_new_i32();
8513 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8514 store_reg(s, rd, tmp);
8515 tcg_gen_addi_i32(addr, addr, 4);
8516 tmp = tcg_temp_new_i32();
8517 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8518 rd++;
8520 address_offset = -4;
8521 } else if (load) {
8522 /* load */
8523 tmp = tcg_temp_new_i32();
8524 switch (sh) {
8525 case 1:
8526 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8527 break;
8528 case 2:
8529 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
8530 break;
8531 default:
8532 case 3:
8533 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
8534 break;
8536 } else {
8537 /* store */
8538 tmp = load_reg(s, rd);
8539 gen_aa32_st16(tmp, addr, get_mem_index(s));
8540 tcg_temp_free_i32(tmp);
8542 /* Perform base writeback before the loaded value to
8543 ensure correct behavior with overlapping index registers.
8544 ldrd with base writeback is undefined if the
8545 destination and index registers overlap. */
8546 if (!(insn & (1 << 24))) {
8547 gen_add_datah_offset(s, insn, address_offset, addr);
8548 store_reg(s, rn, addr);
8549 } else if (insn & (1 << 21)) {
8550 if (address_offset)
8551 tcg_gen_addi_i32(addr, addr, address_offset);
8552 store_reg(s, rn, addr);
8553 } else {
8554 tcg_temp_free_i32(addr);
8556 if (load) {
8557 /* Complete the load. */
8558 store_reg(s, rd, tmp);
8561 break;
8562 case 0x4:
8563 case 0x5:
8564 goto do_ldst;
8565 case 0x6:
8566 case 0x7:
8567 if (insn & (1 << 4)) {
8568 ARCH(6);
8569 /* Armv6 Media instructions. */
8570 rm = insn & 0xf;
8571 rn = (insn >> 16) & 0xf;
8572 rd = (insn >> 12) & 0xf;
8573 rs = (insn >> 8) & 0xf;
8574 switch ((insn >> 23) & 3) {
8575 case 0: /* Parallel add/subtract. */
8576 op1 = (insn >> 20) & 7;
8577 tmp = load_reg(s, rn);
8578 tmp2 = load_reg(s, rm);
8579 sh = (insn >> 5) & 7;
8580 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8581 goto illegal_op;
8582 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
8583 tcg_temp_free_i32(tmp2);
8584 store_reg(s, rd, tmp);
8585 break;
8586 case 1:
8587 if ((insn & 0x00700020) == 0) {
8588 /* Halfword pack. */
8589 tmp = load_reg(s, rn);
8590 tmp2 = load_reg(s, rm);
8591 shift = (insn >> 7) & 0x1f;
8592 if (insn & (1 << 6)) {
8593 /* pkhtb */
8594 if (shift == 0)
8595 shift = 31;
8596 tcg_gen_sari_i32(tmp2, tmp2, shift);
8597 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8598 tcg_gen_ext16u_i32(tmp2, tmp2);
8599 } else {
8600 /* pkhbt */
8601 if (shift)
8602 tcg_gen_shli_i32(tmp2, tmp2, shift);
8603 tcg_gen_ext16u_i32(tmp, tmp);
8604 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8606 tcg_gen_or_i32(tmp, tmp, tmp2);
8607 tcg_temp_free_i32(tmp2);
8608 store_reg(s, rd, tmp);
8609 } else if ((insn & 0x00200020) == 0x00200000) {
8610 /* [us]sat */
8611 tmp = load_reg(s, rm);
8612 shift = (insn >> 7) & 0x1f;
8613 if (insn & (1 << 6)) {
8614 if (shift == 0)
8615 shift = 31;
8616 tcg_gen_sari_i32(tmp, tmp, shift);
8617 } else {
8618 tcg_gen_shli_i32(tmp, tmp, shift);
8620 sh = (insn >> 16) & 0x1f;
8621 tmp2 = tcg_const_i32(sh);
8622 if (insn & (1 << 22))
8623 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8624 else
8625 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8626 tcg_temp_free_i32(tmp2);
8627 store_reg(s, rd, tmp);
8628 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8629 /* [us]sat16 */
8630 tmp = load_reg(s, rm);
8631 sh = (insn >> 16) & 0x1f;
8632 tmp2 = tcg_const_i32(sh);
8633 if (insn & (1 << 22))
8634 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8635 else
8636 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8637 tcg_temp_free_i32(tmp2);
8638 store_reg(s, rd, tmp);
8639 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8640 /* Select bytes. */
8641 tmp = load_reg(s, rn);
8642 tmp2 = load_reg(s, rm);
8643 tmp3 = tcg_temp_new_i32();
8644 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8645 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8646 tcg_temp_free_i32(tmp3);
8647 tcg_temp_free_i32(tmp2);
8648 store_reg(s, rd, tmp);
8649 } else if ((insn & 0x000003e0) == 0x00000060) {
8650 tmp = load_reg(s, rm);
8651 shift = (insn >> 10) & 3;
8652 /* ??? In many cases it's not necessary to do a
8653 rotate, a shift is sufficient. */
8654 if (shift != 0)
8655 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8656 op1 = (insn >> 20) & 7;
8657 switch (op1) {
8658 case 0: gen_sxtb16(tmp); break;
8659 case 2: gen_sxtb(tmp); break;
8660 case 3: gen_sxth(tmp); break;
8661 case 4: gen_uxtb16(tmp); break;
8662 case 6: gen_uxtb(tmp); break;
8663 case 7: gen_uxth(tmp); break;
8664 default: goto illegal_op;
8666 if (rn != 15) {
8667 tmp2 = load_reg(s, rn);
8668 if ((op1 & 3) == 0) {
8669 gen_add16(tmp, tmp2);
8670 } else {
8671 tcg_gen_add_i32(tmp, tmp, tmp2);
8672 tcg_temp_free_i32(tmp2);
8675 store_reg(s, rd, tmp);
8676 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8677 /* rev */
8678 tmp = load_reg(s, rm);
8679 if (insn & (1 << 22)) {
8680 if (insn & (1 << 7)) {
8681 gen_revsh(tmp);
8682 } else {
8683 ARCH(6T2);
8684 gen_helper_rbit(tmp, tmp);
8686 } else {
8687 if (insn & (1 << 7))
8688 gen_rev16(tmp);
8689 else
8690 tcg_gen_bswap32_i32(tmp, tmp);
8692 store_reg(s, rd, tmp);
8693 } else {
8694 goto illegal_op;
8696 break;
8697 case 2: /* Multiplies (Type 3). */
8698 switch ((insn >> 20) & 0x7) {
8699 case 5:
8700 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8701 /* op2 not 00x or 11x : UNDEF */
8702 goto illegal_op;
8704 /* Signed multiply most significant [accumulate].
8705 (SMMUL, SMMLA, SMMLS) */
8706 tmp = load_reg(s, rm);
8707 tmp2 = load_reg(s, rs);
8708 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8710 if (rd != 15) {
8711 tmp = load_reg(s, rd);
8712 if (insn & (1 << 6)) {
8713 tmp64 = gen_subq_msw(tmp64, tmp);
8714 } else {
8715 tmp64 = gen_addq_msw(tmp64, tmp);
8718 if (insn & (1 << 5)) {
8719 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8721 tcg_gen_shri_i64(tmp64, tmp64, 32);
8722 tmp = tcg_temp_new_i32();
8723 tcg_gen_extrl_i64_i32(tmp, tmp64);
8724 tcg_temp_free_i64(tmp64);
8725 store_reg(s, rn, tmp);
8726 break;
8727 case 0:
8728 case 4:
8729 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8730 if (insn & (1 << 7)) {
8731 goto illegal_op;
8733 tmp = load_reg(s, rm);
8734 tmp2 = load_reg(s, rs);
8735 if (insn & (1 << 5))
8736 gen_swap_half(tmp2);
8737 gen_smul_dual(tmp, tmp2);
8738 if (insn & (1 << 22)) {
8739 /* smlald, smlsld */
8740 TCGv_i64 tmp64_2;
8742 tmp64 = tcg_temp_new_i64();
8743 tmp64_2 = tcg_temp_new_i64();
8744 tcg_gen_ext_i32_i64(tmp64, tmp);
8745 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
8746 tcg_temp_free_i32(tmp);
8747 tcg_temp_free_i32(tmp2);
8748 if (insn & (1 << 6)) {
8749 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8750 } else {
8751 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8753 tcg_temp_free_i64(tmp64_2);
8754 gen_addq(s, tmp64, rd, rn);
8755 gen_storeq_reg(s, rd, rn, tmp64);
8756 tcg_temp_free_i64(tmp64);
8757 } else {
8758 /* smuad, smusd, smlad, smlsd */
8759 if (insn & (1 << 6)) {
8760 /* This subtraction cannot overflow. */
8761 tcg_gen_sub_i32(tmp, tmp, tmp2);
8762 } else {
8763 /* This addition cannot overflow 32 bits;
8764 * however it may overflow considered as a
8765 * signed operation, in which case we must set
8766 * the Q flag.
8768 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8770 tcg_temp_free_i32(tmp2);
8771 if (rd != 15)
8773 tmp2 = load_reg(s, rd);
8774 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8775 tcg_temp_free_i32(tmp2);
8777 store_reg(s, rn, tmp);
8779 break;
8780 case 1:
8781 case 3:
8782 /* SDIV, UDIV */
8783 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
8784 goto illegal_op;
8786 if (((insn >> 5) & 7) || (rd != 15)) {
8787 goto illegal_op;
8789 tmp = load_reg(s, rm);
8790 tmp2 = load_reg(s, rs);
8791 if (insn & (1 << 21)) {
8792 gen_helper_udiv(tmp, tmp, tmp2);
8793 } else {
8794 gen_helper_sdiv(tmp, tmp, tmp2);
8796 tcg_temp_free_i32(tmp2);
8797 store_reg(s, rn, tmp);
8798 break;
8799 default:
8800 goto illegal_op;
8802 break;
8803 case 3:
8804 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8805 switch (op1) {
8806 case 0: /* Unsigned sum of absolute differences. */
8807 ARCH(6);
8808 tmp = load_reg(s, rm);
8809 tmp2 = load_reg(s, rs);
8810 gen_helper_usad8(tmp, tmp, tmp2);
8811 tcg_temp_free_i32(tmp2);
8812 if (rd != 15) {
8813 tmp2 = load_reg(s, rd);
8814 tcg_gen_add_i32(tmp, tmp, tmp2);
8815 tcg_temp_free_i32(tmp2);
8817 store_reg(s, rn, tmp);
8818 break;
8819 case 0x20: case 0x24: case 0x28: case 0x2c:
8820 /* Bitfield insert/clear. */
8821 ARCH(6T2);
8822 shift = (insn >> 7) & 0x1f;
8823 i = (insn >> 16) & 0x1f;
8824 if (i < shift) {
8825 /* UNPREDICTABLE; we choose to UNDEF */
8826 goto illegal_op;
8828 i = i + 1 - shift;
8829 if (rm == 15) {
8830 tmp = tcg_temp_new_i32();
8831 tcg_gen_movi_i32(tmp, 0);
8832 } else {
8833 tmp = load_reg(s, rm);
8835 if (i != 32) {
8836 tmp2 = load_reg(s, rd);
8837 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
8838 tcg_temp_free_i32(tmp2);
8840 store_reg(s, rd, tmp);
8841 break;
8842 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8843 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
8844 ARCH(6T2);
8845 tmp = load_reg(s, rm);
8846 shift = (insn >> 7) & 0x1f;
8847 i = ((insn >> 16) & 0x1f) + 1;
8848 if (shift + i > 32)
8849 goto illegal_op;
8850 if (i < 32) {
8851 if (op1 & 0x20) {
8852 gen_ubfx(tmp, shift, (1u << i) - 1);
8853 } else {
8854 gen_sbfx(tmp, shift, i);
8857 store_reg(s, rd, tmp);
8858 break;
8859 default:
8860 goto illegal_op;
8862 break;
8864 break;
8866 do_ldst:
8867 /* Check for undefined extension instructions
8868 * per the ARM Bible IE:
8869 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8871 sh = (0xf << 20) | (0xf << 4);
8872 if (op1 == 0x7 && ((insn & sh) == sh))
8874 goto illegal_op;
8876 /* load/store byte/word */
8877 rn = (insn >> 16) & 0xf;
8878 rd = (insn >> 12) & 0xf;
8879 tmp2 = load_reg(s, rn);
8880 if ((insn & 0x01200000) == 0x00200000) {
8881 /* ldrt/strt */
8882 i = get_a32_user_mem_index(s);
8883 } else {
8884 i = get_mem_index(s);
8886 if (insn & (1 << 24))
8887 gen_add_data_offset(s, insn, tmp2);
8888 if (insn & (1 << 20)) {
8889 /* load */
8890 tmp = tcg_temp_new_i32();
8891 if (insn & (1 << 22)) {
8892 gen_aa32_ld8u(tmp, tmp2, i);
8893 } else {
8894 gen_aa32_ld32u(tmp, tmp2, i);
8896 } else {
8897 /* store */
8898 tmp = load_reg(s, rd);
8899 if (insn & (1 << 22)) {
8900 gen_aa32_st8(tmp, tmp2, i);
8901 } else {
8902 gen_aa32_st32(tmp, tmp2, i);
8904 tcg_temp_free_i32(tmp);
8906 if (!(insn & (1 << 24))) {
8907 gen_add_data_offset(s, insn, tmp2);
8908 store_reg(s, rn, tmp2);
8909 } else if (insn & (1 << 21)) {
8910 store_reg(s, rn, tmp2);
8911 } else {
8912 tcg_temp_free_i32(tmp2);
8914 if (insn & (1 << 20)) {
8915 /* Complete the load. */
8916 store_reg_from_load(s, rd, tmp);
8918 break;
8919 case 0x08:
8920 case 0x09:
8922 int j, n, loaded_base;
8923 bool exc_return = false;
8924 bool is_load = extract32(insn, 20, 1);
8925 bool user = false;
8926 TCGv_i32 loaded_var;
8927 /* load/store multiple words */
8928 /* XXX: store correct base if write back */
8929 if (insn & (1 << 22)) {
8930 /* LDM (user), LDM (exception return) and STM (user) */
8931 if (IS_USER(s))
8932 goto illegal_op; /* only usable in supervisor mode */
8934 if (is_load && extract32(insn, 15, 1)) {
8935 exc_return = true;
8936 } else {
8937 user = true;
8940 rn = (insn >> 16) & 0xf;
8941 addr = load_reg(s, rn);
8943 /* compute total size */
8944 loaded_base = 0;
8945 TCGV_UNUSED_I32(loaded_var);
8946 n = 0;
8947 for(i=0;i<16;i++) {
8948 if (insn & (1 << i))
8949 n++;
8951 /* XXX: test invalid n == 0 case ? */
8952 if (insn & (1 << 23)) {
8953 if (insn & (1 << 24)) {
8954 /* pre increment */
8955 tcg_gen_addi_i32(addr, addr, 4);
8956 } else {
8957 /* post increment */
8959 } else {
8960 if (insn & (1 << 24)) {
8961 /* pre decrement */
8962 tcg_gen_addi_i32(addr, addr, -(n * 4));
8963 } else {
8964 /* post decrement */
8965 if (n != 1)
8966 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
8969 j = 0;
8970 for(i=0;i<16;i++) {
8971 if (insn & (1 << i)) {
8972 if (is_load) {
8973 /* load */
8974 tmp = tcg_temp_new_i32();
8975 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8976 if (user) {
8977 tmp2 = tcg_const_i32(i);
8978 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
8979 tcg_temp_free_i32(tmp2);
8980 tcg_temp_free_i32(tmp);
8981 } else if (i == rn) {
8982 loaded_var = tmp;
8983 loaded_base = 1;
8984 } else {
8985 store_reg_from_load(s, i, tmp);
8987 } else {
8988 /* store */
8989 if (i == 15) {
8990 /* special case: r15 = PC + 8 */
8991 val = (long)s->pc + 4;
8992 tmp = tcg_temp_new_i32();
8993 tcg_gen_movi_i32(tmp, val);
8994 } else if (user) {
8995 tmp = tcg_temp_new_i32();
8996 tmp2 = tcg_const_i32(i);
8997 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
8998 tcg_temp_free_i32(tmp2);
8999 } else {
9000 tmp = load_reg(s, i);
9002 gen_aa32_st32(tmp, addr, get_mem_index(s));
9003 tcg_temp_free_i32(tmp);
9005 j++;
9006 /* no need to add after the last transfer */
9007 if (j != n)
9008 tcg_gen_addi_i32(addr, addr, 4);
9011 if (insn & (1 << 21)) {
9012 /* write back */
9013 if (insn & (1 << 23)) {
9014 if (insn & (1 << 24)) {
9015 /* pre increment */
9016 } else {
9017 /* post increment */
9018 tcg_gen_addi_i32(addr, addr, 4);
9020 } else {
9021 if (insn & (1 << 24)) {
9022 /* pre decrement */
9023 if (n != 1)
9024 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9025 } else {
9026 /* post decrement */
9027 tcg_gen_addi_i32(addr, addr, -(n * 4));
9030 store_reg(s, rn, addr);
9031 } else {
9032 tcg_temp_free_i32(addr);
9034 if (loaded_base) {
9035 store_reg(s, rn, loaded_var);
9037 if (exc_return) {
9038 /* Restore CPSR from SPSR. */
9039 tmp = load_cpu_field(spsr);
9040 gen_set_cpsr(tmp, CPSR_ERET_MASK);
9041 tcg_temp_free_i32(tmp);
9042 s->is_jmp = DISAS_JUMP;
9045 break;
9046 case 0xa:
9047 case 0xb:
9049 int32_t offset;
9051 /* branch (and link) */
9052 val = (int32_t)s->pc;
9053 if (insn & (1 << 24)) {
9054 tmp = tcg_temp_new_i32();
9055 tcg_gen_movi_i32(tmp, val);
9056 store_reg(s, 14, tmp);
9058 offset = sextract32(insn << 2, 0, 26);
9059 val += offset + 4;
9060 gen_jmp(s, val);
9062 break;
9063 case 0xc:
9064 case 0xd:
9065 case 0xe:
9066 if (((insn >> 8) & 0xe) == 10) {
9067 /* VFP. */
9068 if (disas_vfp_insn(s, insn)) {
9069 goto illegal_op;
9071 } else if (disas_coproc_insn(s, insn)) {
9072 /* Coprocessor. */
9073 goto illegal_op;
9075 break;
9076 case 0xf:
9077 /* swi */
9078 gen_set_pc_im(s, s->pc);
9079 s->svc_imm = extract32(insn, 0, 24);
9080 s->is_jmp = DISAS_SWI;
9081 break;
9082 default:
9083 illegal_op:
9084 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9085 default_exception_el(s));
9086 break;
9091 /* Return true if this is a Thumb-2 logical op. */
9092 static int
9093 thumb2_logic_op(int op)
9095 return (op < 8);
9098 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9099 then set condition code flags based on the result of the operation.
9100 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9101 to the high bit of T1.
9102 Returns zero if the opcode is valid. */
9104 static int
9105 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9106 TCGv_i32 t0, TCGv_i32 t1)
9108 int logic_cc;
9110 logic_cc = 0;
9111 switch (op) {
9112 case 0: /* and */
9113 tcg_gen_and_i32(t0, t0, t1);
9114 logic_cc = conds;
9115 break;
9116 case 1: /* bic */
9117 tcg_gen_andc_i32(t0, t0, t1);
9118 logic_cc = conds;
9119 break;
9120 case 2: /* orr */
9121 tcg_gen_or_i32(t0, t0, t1);
9122 logic_cc = conds;
9123 break;
9124 case 3: /* orn */
9125 tcg_gen_orc_i32(t0, t0, t1);
9126 logic_cc = conds;
9127 break;
9128 case 4: /* eor */
9129 tcg_gen_xor_i32(t0, t0, t1);
9130 logic_cc = conds;
9131 break;
9132 case 8: /* add */
9133 if (conds)
9134 gen_add_CC(t0, t0, t1);
9135 else
9136 tcg_gen_add_i32(t0, t0, t1);
9137 break;
9138 case 10: /* adc */
9139 if (conds)
9140 gen_adc_CC(t0, t0, t1);
9141 else
9142 gen_adc(t0, t1);
9143 break;
9144 case 11: /* sbc */
9145 if (conds) {
9146 gen_sbc_CC(t0, t0, t1);
9147 } else {
9148 gen_sub_carry(t0, t0, t1);
9150 break;
9151 case 13: /* sub */
9152 if (conds)
9153 gen_sub_CC(t0, t0, t1);
9154 else
9155 tcg_gen_sub_i32(t0, t0, t1);
9156 break;
9157 case 14: /* rsb */
9158 if (conds)
9159 gen_sub_CC(t0, t1, t0);
9160 else
9161 tcg_gen_sub_i32(t0, t1, t0);
9162 break;
9163 default: /* 5, 6, 7, 9, 12, 15. */
9164 return 1;
9166 if (logic_cc) {
9167 gen_logic_CC(t0);
9168 if (shifter_out)
9169 gen_set_CF_bit31(t1);
9171 return 0;
9174 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9175 is not legal. */
9176 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9178 uint32_t insn, imm, shift, offset;
9179 uint32_t rd, rn, rm, rs;
9180 TCGv_i32 tmp;
9181 TCGv_i32 tmp2;
9182 TCGv_i32 tmp3;
9183 TCGv_i32 addr;
9184 TCGv_i64 tmp64;
9185 int op;
9186 int shiftop;
9187 int conds;
9188 int logic_cc;
9190 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9191 || arm_dc_feature(s, ARM_FEATURE_M))) {
9192 /* Thumb-1 cores may need to treat bl and blx as a pair of
9193 16-bit instructions to get correct prefetch abort behavior. */
9194 insn = insn_hw1;
9195 if ((insn & (1 << 12)) == 0) {
9196 ARCH(5);
9197 /* Second half of blx. */
9198 offset = ((insn & 0x7ff) << 1);
9199 tmp = load_reg(s, 14);
9200 tcg_gen_addi_i32(tmp, tmp, offset);
9201 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9203 tmp2 = tcg_temp_new_i32();
9204 tcg_gen_movi_i32(tmp2, s->pc | 1);
9205 store_reg(s, 14, tmp2);
9206 gen_bx(s, tmp);
9207 return 0;
9209 if (insn & (1 << 11)) {
9210 /* Second half of bl. */
9211 offset = ((insn & 0x7ff) << 1) | 1;
9212 tmp = load_reg(s, 14);
9213 tcg_gen_addi_i32(tmp, tmp, offset);
9215 tmp2 = tcg_temp_new_i32();
9216 tcg_gen_movi_i32(tmp2, s->pc | 1);
9217 store_reg(s, 14, tmp2);
9218 gen_bx(s, tmp);
9219 return 0;
9221 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9222 /* Instruction spans a page boundary. Implement it as two
9223 16-bit instructions in case the second half causes an
9224 prefetch abort. */
9225 offset = ((int32_t)insn << 21) >> 9;
9226 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9227 return 0;
9229 /* Fall through to 32-bit decode. */
9232 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9233 s->pc += 2;
9234 insn |= (uint32_t)insn_hw1 << 16;
9236 if ((insn & 0xf800e800) != 0xf000e800) {
9237 ARCH(6T2);
9240 rn = (insn >> 16) & 0xf;
9241 rs = (insn >> 12) & 0xf;
9242 rd = (insn >> 8) & 0xf;
9243 rm = insn & 0xf;
9244 switch ((insn >> 25) & 0xf) {
9245 case 0: case 1: case 2: case 3:
9246 /* 16-bit instructions. Should never happen. */
9247 abort();
9248 case 4:
9249 if (insn & (1 << 22)) {
9250 /* Other load/store, table branch. */
9251 if (insn & 0x01200000) {
9252 /* Load/store doubleword. */
9253 if (rn == 15) {
9254 addr = tcg_temp_new_i32();
9255 tcg_gen_movi_i32(addr, s->pc & ~3);
9256 } else {
9257 addr = load_reg(s, rn);
9259 offset = (insn & 0xff) * 4;
9260 if ((insn & (1 << 23)) == 0)
9261 offset = -offset;
9262 if (insn & (1 << 24)) {
9263 tcg_gen_addi_i32(addr, addr, offset);
9264 offset = 0;
9266 if (insn & (1 << 20)) {
9267 /* ldrd */
9268 tmp = tcg_temp_new_i32();
9269 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9270 store_reg(s, rs, tmp);
9271 tcg_gen_addi_i32(addr, addr, 4);
9272 tmp = tcg_temp_new_i32();
9273 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9274 store_reg(s, rd, tmp);
9275 } else {
9276 /* strd */
9277 tmp = load_reg(s, rs);
9278 gen_aa32_st32(tmp, addr, get_mem_index(s));
9279 tcg_temp_free_i32(tmp);
9280 tcg_gen_addi_i32(addr, addr, 4);
9281 tmp = load_reg(s, rd);
9282 gen_aa32_st32(tmp, addr, get_mem_index(s));
9283 tcg_temp_free_i32(tmp);
9285 if (insn & (1 << 21)) {
9286 /* Base writeback. */
9287 if (rn == 15)
9288 goto illegal_op;
9289 tcg_gen_addi_i32(addr, addr, offset - 4);
9290 store_reg(s, rn, addr);
9291 } else {
9292 tcg_temp_free_i32(addr);
9294 } else if ((insn & (1 << 23)) == 0) {
9295 /* Load/store exclusive word. */
9296 addr = tcg_temp_local_new_i32();
9297 load_reg_var(s, addr, rn);
9298 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9299 if (insn & (1 << 20)) {
9300 gen_load_exclusive(s, rs, 15, addr, 2);
9301 } else {
9302 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9304 tcg_temp_free_i32(addr);
9305 } else if ((insn & (7 << 5)) == 0) {
9306 /* Table Branch. */
9307 if (rn == 15) {
9308 addr = tcg_temp_new_i32();
9309 tcg_gen_movi_i32(addr, s->pc);
9310 } else {
9311 addr = load_reg(s, rn);
9313 tmp = load_reg(s, rm);
9314 tcg_gen_add_i32(addr, addr, tmp);
9315 if (insn & (1 << 4)) {
9316 /* tbh */
9317 tcg_gen_add_i32(addr, addr, tmp);
9318 tcg_temp_free_i32(tmp);
9319 tmp = tcg_temp_new_i32();
9320 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9321 } else { /* tbb */
9322 tcg_temp_free_i32(tmp);
9323 tmp = tcg_temp_new_i32();
9324 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9326 tcg_temp_free_i32(addr);
9327 tcg_gen_shli_i32(tmp, tmp, 1);
9328 tcg_gen_addi_i32(tmp, tmp, s->pc);
9329 store_reg(s, 15, tmp);
9330 } else {
9331 int op2 = (insn >> 6) & 0x3;
9332 op = (insn >> 4) & 0x3;
9333 switch (op2) {
9334 case 0:
9335 goto illegal_op;
9336 case 1:
9337 /* Load/store exclusive byte/halfword/doubleword */
9338 if (op == 2) {
9339 goto illegal_op;
9341 ARCH(7);
9342 break;
9343 case 2:
9344 /* Load-acquire/store-release */
9345 if (op == 3) {
9346 goto illegal_op;
9348 /* Fall through */
9349 case 3:
9350 /* Load-acquire/store-release exclusive */
9351 ARCH(8);
9352 break;
9354 addr = tcg_temp_local_new_i32();
9355 load_reg_var(s, addr, rn);
9356 if (!(op2 & 1)) {
9357 if (insn & (1 << 20)) {
9358 tmp = tcg_temp_new_i32();
9359 switch (op) {
9360 case 0: /* ldab */
9361 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9362 break;
9363 case 1: /* ldah */
9364 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9365 break;
9366 case 2: /* lda */
9367 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9368 break;
9369 default:
9370 abort();
9372 store_reg(s, rs, tmp);
9373 } else {
9374 tmp = load_reg(s, rs);
9375 switch (op) {
9376 case 0: /* stlb */
9377 gen_aa32_st8(tmp, addr, get_mem_index(s));
9378 break;
9379 case 1: /* stlh */
9380 gen_aa32_st16(tmp, addr, get_mem_index(s));
9381 break;
9382 case 2: /* stl */
9383 gen_aa32_st32(tmp, addr, get_mem_index(s));
9384 break;
9385 default:
9386 abort();
9388 tcg_temp_free_i32(tmp);
9390 } else if (insn & (1 << 20)) {
9391 gen_load_exclusive(s, rs, rd, addr, op);
9392 } else {
9393 gen_store_exclusive(s, rm, rs, rd, addr, op);
9395 tcg_temp_free_i32(addr);
9397 } else {
9398 /* Load/store multiple, RFE, SRS. */
9399 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9400 /* RFE, SRS: not available in user mode or on M profile */
9401 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9402 goto illegal_op;
9404 if (insn & (1 << 20)) {
9405 /* rfe */
9406 addr = load_reg(s, rn);
9407 if ((insn & (1 << 24)) == 0)
9408 tcg_gen_addi_i32(addr, addr, -8);
9409 /* Load PC into tmp and CPSR into tmp2. */
9410 tmp = tcg_temp_new_i32();
9411 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9412 tcg_gen_addi_i32(addr, addr, 4);
9413 tmp2 = tcg_temp_new_i32();
9414 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9415 if (insn & (1 << 21)) {
9416 /* Base writeback. */
9417 if (insn & (1 << 24)) {
9418 tcg_gen_addi_i32(addr, addr, 4);
9419 } else {
9420 tcg_gen_addi_i32(addr, addr, -4);
9422 store_reg(s, rn, addr);
9423 } else {
9424 tcg_temp_free_i32(addr);
9426 gen_rfe(s, tmp, tmp2);
9427 } else {
9428 /* srs */
9429 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9430 insn & (1 << 21));
9432 } else {
9433 int i, loaded_base = 0;
9434 TCGv_i32 loaded_var;
9435 /* Load/store multiple. */
9436 addr = load_reg(s, rn);
9437 offset = 0;
9438 for (i = 0; i < 16; i++) {
9439 if (insn & (1 << i))
9440 offset += 4;
9442 if (insn & (1 << 24)) {
9443 tcg_gen_addi_i32(addr, addr, -offset);
9446 TCGV_UNUSED_I32(loaded_var);
9447 for (i = 0; i < 16; i++) {
9448 if ((insn & (1 << i)) == 0)
9449 continue;
9450 if (insn & (1 << 20)) {
9451 /* Load. */
9452 tmp = tcg_temp_new_i32();
9453 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9454 if (i == 15) {
9455 gen_bx(s, tmp);
9456 } else if (i == rn) {
9457 loaded_var = tmp;
9458 loaded_base = 1;
9459 } else {
9460 store_reg(s, i, tmp);
9462 } else {
9463 /* Store. */
9464 tmp = load_reg(s, i);
9465 gen_aa32_st32(tmp, addr, get_mem_index(s));
9466 tcg_temp_free_i32(tmp);
9468 tcg_gen_addi_i32(addr, addr, 4);
9470 if (loaded_base) {
9471 store_reg(s, rn, loaded_var);
9473 if (insn & (1 << 21)) {
9474 /* Base register writeback. */
9475 if (insn & (1 << 24)) {
9476 tcg_gen_addi_i32(addr, addr, -offset);
9478 /* Fault if writeback register is in register list. */
9479 if (insn & (1 << rn))
9480 goto illegal_op;
9481 store_reg(s, rn, addr);
9482 } else {
9483 tcg_temp_free_i32(addr);
9487 break;
9488 case 5:
9490 op = (insn >> 21) & 0xf;
9491 if (op == 6) {
9492 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9493 goto illegal_op;
9495 /* Halfword pack. */
9496 tmp = load_reg(s, rn);
9497 tmp2 = load_reg(s, rm);
9498 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9499 if (insn & (1 << 5)) {
9500 /* pkhtb */
9501 if (shift == 0)
9502 shift = 31;
9503 tcg_gen_sari_i32(tmp2, tmp2, shift);
9504 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9505 tcg_gen_ext16u_i32(tmp2, tmp2);
9506 } else {
9507 /* pkhbt */
9508 if (shift)
9509 tcg_gen_shli_i32(tmp2, tmp2, shift);
9510 tcg_gen_ext16u_i32(tmp, tmp);
9511 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9513 tcg_gen_or_i32(tmp, tmp, tmp2);
9514 tcg_temp_free_i32(tmp2);
9515 store_reg(s, rd, tmp);
9516 } else {
9517 /* Data processing register constant shift. */
9518 if (rn == 15) {
9519 tmp = tcg_temp_new_i32();
9520 tcg_gen_movi_i32(tmp, 0);
9521 } else {
9522 tmp = load_reg(s, rn);
9524 tmp2 = load_reg(s, rm);
9526 shiftop = (insn >> 4) & 3;
9527 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9528 conds = (insn & (1 << 20)) != 0;
9529 logic_cc = (conds && thumb2_logic_op(op));
9530 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9531 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9532 goto illegal_op;
9533 tcg_temp_free_i32(tmp2);
9534 if (rd != 15) {
9535 store_reg(s, rd, tmp);
9536 } else {
9537 tcg_temp_free_i32(tmp);
9540 break;
9541 case 13: /* Misc data processing. */
9542 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9543 if (op < 4 && (insn & 0xf000) != 0xf000)
9544 goto illegal_op;
9545 switch (op) {
9546 case 0: /* Register controlled shift. */
9547 tmp = load_reg(s, rn);
9548 tmp2 = load_reg(s, rm);
9549 if ((insn & 0x70) != 0)
9550 goto illegal_op;
9551 op = (insn >> 21) & 3;
9552 logic_cc = (insn & (1 << 20)) != 0;
9553 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9554 if (logic_cc)
9555 gen_logic_CC(tmp);
9556 store_reg_bx(s, rd, tmp);
9557 break;
9558 case 1: /* Sign/zero extend. */
9559 op = (insn >> 20) & 7;
9560 switch (op) {
9561 case 0: /* SXTAH, SXTH */
9562 case 1: /* UXTAH, UXTH */
9563 case 4: /* SXTAB, SXTB */
9564 case 5: /* UXTAB, UXTB */
9565 break;
9566 case 2: /* SXTAB16, SXTB16 */
9567 case 3: /* UXTAB16, UXTB16 */
9568 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9569 goto illegal_op;
9571 break;
9572 default:
9573 goto illegal_op;
9575 if (rn != 15) {
9576 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9577 goto illegal_op;
9580 tmp = load_reg(s, rm);
9581 shift = (insn >> 4) & 3;
9582 /* ??? In many cases it's not necessary to do a
9583 rotate, a shift is sufficient. */
9584 if (shift != 0)
9585 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9586 op = (insn >> 20) & 7;
9587 switch (op) {
9588 case 0: gen_sxth(tmp); break;
9589 case 1: gen_uxth(tmp); break;
9590 case 2: gen_sxtb16(tmp); break;
9591 case 3: gen_uxtb16(tmp); break;
9592 case 4: gen_sxtb(tmp); break;
9593 case 5: gen_uxtb(tmp); break;
9594 default:
9595 g_assert_not_reached();
9597 if (rn != 15) {
9598 tmp2 = load_reg(s, rn);
9599 if ((op >> 1) == 1) {
9600 gen_add16(tmp, tmp2);
9601 } else {
9602 tcg_gen_add_i32(tmp, tmp, tmp2);
9603 tcg_temp_free_i32(tmp2);
9606 store_reg(s, rd, tmp);
9607 break;
9608 case 2: /* SIMD add/subtract. */
9609 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9610 goto illegal_op;
9612 op = (insn >> 20) & 7;
9613 shift = (insn >> 4) & 7;
9614 if ((op & 3) == 3 || (shift & 3) == 3)
9615 goto illegal_op;
9616 tmp = load_reg(s, rn);
9617 tmp2 = load_reg(s, rm);
9618 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
9619 tcg_temp_free_i32(tmp2);
9620 store_reg(s, rd, tmp);
9621 break;
9622 case 3: /* Other data processing. */
9623 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9624 if (op < 4) {
9625 /* Saturating add/subtract. */
9626 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9627 goto illegal_op;
9629 tmp = load_reg(s, rn);
9630 tmp2 = load_reg(s, rm);
9631 if (op & 1)
9632 gen_helper_double_saturate(tmp, cpu_env, tmp);
9633 if (op & 2)
9634 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9635 else
9636 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
9637 tcg_temp_free_i32(tmp2);
9638 } else {
9639 switch (op) {
9640 case 0x0a: /* rbit */
9641 case 0x08: /* rev */
9642 case 0x09: /* rev16 */
9643 case 0x0b: /* revsh */
9644 case 0x18: /* clz */
9645 break;
9646 case 0x10: /* sel */
9647 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9648 goto illegal_op;
9650 break;
9651 case 0x20: /* crc32/crc32c */
9652 case 0x21:
9653 case 0x22:
9654 case 0x28:
9655 case 0x29:
9656 case 0x2a:
9657 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
9658 goto illegal_op;
9660 break;
9661 default:
9662 goto illegal_op;
9664 tmp = load_reg(s, rn);
9665 switch (op) {
9666 case 0x0a: /* rbit */
9667 gen_helper_rbit(tmp, tmp);
9668 break;
9669 case 0x08: /* rev */
9670 tcg_gen_bswap32_i32(tmp, tmp);
9671 break;
9672 case 0x09: /* rev16 */
9673 gen_rev16(tmp);
9674 break;
9675 case 0x0b: /* revsh */
9676 gen_revsh(tmp);
9677 break;
9678 case 0x10: /* sel */
9679 tmp2 = load_reg(s, rm);
9680 tmp3 = tcg_temp_new_i32();
9681 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9682 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9683 tcg_temp_free_i32(tmp3);
9684 tcg_temp_free_i32(tmp2);
9685 break;
9686 case 0x18: /* clz */
9687 gen_helper_clz(tmp, tmp);
9688 break;
9689 case 0x20:
9690 case 0x21:
9691 case 0x22:
9692 case 0x28:
9693 case 0x29:
9694 case 0x2a:
9696 /* crc32/crc32c */
9697 uint32_t sz = op & 0x3;
9698 uint32_t c = op & 0x8;
9700 tmp2 = load_reg(s, rm);
9701 if (sz == 0) {
9702 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9703 } else if (sz == 1) {
9704 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9706 tmp3 = tcg_const_i32(1 << sz);
9707 if (c) {
9708 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9709 } else {
9710 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9712 tcg_temp_free_i32(tmp2);
9713 tcg_temp_free_i32(tmp3);
9714 break;
9716 default:
9717 g_assert_not_reached();
9720 store_reg(s, rd, tmp);
9721 break;
9722 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9723 switch ((insn >> 20) & 7) {
9724 case 0: /* 32 x 32 -> 32 */
9725 case 7: /* Unsigned sum of absolute differences. */
9726 break;
9727 case 1: /* 16 x 16 -> 32 */
9728 case 2: /* Dual multiply add. */
9729 case 3: /* 32 * 16 -> 32msb */
9730 case 4: /* Dual multiply subtract. */
9731 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9732 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9733 goto illegal_op;
9735 break;
9737 op = (insn >> 4) & 0xf;
9738 tmp = load_reg(s, rn);
9739 tmp2 = load_reg(s, rm);
9740 switch ((insn >> 20) & 7) {
9741 case 0: /* 32 x 32 -> 32 */
9742 tcg_gen_mul_i32(tmp, tmp, tmp2);
9743 tcg_temp_free_i32(tmp2);
9744 if (rs != 15) {
9745 tmp2 = load_reg(s, rs);
9746 if (op)
9747 tcg_gen_sub_i32(tmp, tmp2, tmp);
9748 else
9749 tcg_gen_add_i32(tmp, tmp, tmp2);
9750 tcg_temp_free_i32(tmp2);
9752 break;
9753 case 1: /* 16 x 16 -> 32 */
9754 gen_mulxy(tmp, tmp2, op & 2, op & 1);
9755 tcg_temp_free_i32(tmp2);
9756 if (rs != 15) {
9757 tmp2 = load_reg(s, rs);
9758 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9759 tcg_temp_free_i32(tmp2);
9761 break;
9762 case 2: /* Dual multiply add. */
9763 case 4: /* Dual multiply subtract. */
9764 if (op)
9765 gen_swap_half(tmp2);
9766 gen_smul_dual(tmp, tmp2);
9767 if (insn & (1 << 22)) {
9768 /* This subtraction cannot overflow. */
9769 tcg_gen_sub_i32(tmp, tmp, tmp2);
9770 } else {
9771 /* This addition cannot overflow 32 bits;
9772 * however it may overflow considered as a signed
9773 * operation, in which case we must set the Q flag.
9775 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9777 tcg_temp_free_i32(tmp2);
9778 if (rs != 15)
9780 tmp2 = load_reg(s, rs);
9781 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9782 tcg_temp_free_i32(tmp2);
9784 break;
9785 case 3: /* 32 * 16 -> 32msb */
9786 if (op)
9787 tcg_gen_sari_i32(tmp2, tmp2, 16);
9788 else
9789 gen_sxth(tmp2);
9790 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9791 tcg_gen_shri_i64(tmp64, tmp64, 16);
9792 tmp = tcg_temp_new_i32();
9793 tcg_gen_extrl_i64_i32(tmp, tmp64);
9794 tcg_temp_free_i64(tmp64);
9795 if (rs != 15)
9797 tmp2 = load_reg(s, rs);
9798 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9799 tcg_temp_free_i32(tmp2);
9801 break;
9802 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9803 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9804 if (rs != 15) {
9805 tmp = load_reg(s, rs);
9806 if (insn & (1 << 20)) {
9807 tmp64 = gen_addq_msw(tmp64, tmp);
9808 } else {
9809 tmp64 = gen_subq_msw(tmp64, tmp);
9812 if (insn & (1 << 4)) {
9813 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9815 tcg_gen_shri_i64(tmp64, tmp64, 32);
9816 tmp = tcg_temp_new_i32();
9817 tcg_gen_extrl_i64_i32(tmp, tmp64);
9818 tcg_temp_free_i64(tmp64);
9819 break;
9820 case 7: /* Unsigned sum of absolute differences. */
9821 gen_helper_usad8(tmp, tmp, tmp2);
9822 tcg_temp_free_i32(tmp2);
9823 if (rs != 15) {
9824 tmp2 = load_reg(s, rs);
9825 tcg_gen_add_i32(tmp, tmp, tmp2);
9826 tcg_temp_free_i32(tmp2);
9828 break;
9830 store_reg(s, rd, tmp);
9831 break;
9832 case 6: case 7: /* 64-bit multiply, Divide. */
9833 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
9834 tmp = load_reg(s, rn);
9835 tmp2 = load_reg(s, rm);
9836 if ((op & 0x50) == 0x10) {
9837 /* sdiv, udiv */
9838 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9839 goto illegal_op;
9841 if (op & 0x20)
9842 gen_helper_udiv(tmp, tmp, tmp2);
9843 else
9844 gen_helper_sdiv(tmp, tmp, tmp2);
9845 tcg_temp_free_i32(tmp2);
9846 store_reg(s, rd, tmp);
9847 } else if ((op & 0xe) == 0xc) {
9848 /* Dual multiply accumulate long. */
9849 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9850 tcg_temp_free_i32(tmp);
9851 tcg_temp_free_i32(tmp2);
9852 goto illegal_op;
9854 if (op & 1)
9855 gen_swap_half(tmp2);
9856 gen_smul_dual(tmp, tmp2);
9857 if (op & 0x10) {
9858 tcg_gen_sub_i32(tmp, tmp, tmp2);
9859 } else {
9860 tcg_gen_add_i32(tmp, tmp, tmp2);
9862 tcg_temp_free_i32(tmp2);
9863 /* BUGFIX */
9864 tmp64 = tcg_temp_new_i64();
9865 tcg_gen_ext_i32_i64(tmp64, tmp);
9866 tcg_temp_free_i32(tmp);
9867 gen_addq(s, tmp64, rs, rd);
9868 gen_storeq_reg(s, rs, rd, tmp64);
9869 tcg_temp_free_i64(tmp64);
9870 } else {
9871 if (op & 0x20) {
9872 /* Unsigned 64-bit multiply */
9873 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9874 } else {
9875 if (op & 8) {
9876 /* smlalxy */
9877 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9878 tcg_temp_free_i32(tmp2);
9879 tcg_temp_free_i32(tmp);
9880 goto illegal_op;
9882 gen_mulxy(tmp, tmp2, op & 2, op & 1);
9883 tcg_temp_free_i32(tmp2);
9884 tmp64 = tcg_temp_new_i64();
9885 tcg_gen_ext_i32_i64(tmp64, tmp);
9886 tcg_temp_free_i32(tmp);
9887 } else {
9888 /* Signed 64-bit multiply */
9889 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9892 if (op & 4) {
9893 /* umaal */
9894 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9895 tcg_temp_free_i64(tmp64);
9896 goto illegal_op;
9898 gen_addq_lo(s, tmp64, rs);
9899 gen_addq_lo(s, tmp64, rd);
9900 } else if (op & 0x40) {
9901 /* 64-bit accumulate. */
9902 gen_addq(s, tmp64, rs, rd);
9904 gen_storeq_reg(s, rs, rd, tmp64);
9905 tcg_temp_free_i64(tmp64);
9907 break;
9909 break;
9910 case 6: case 7: case 14: case 15:
9911 /* Coprocessor. */
9912 if (((insn >> 24) & 3) == 3) {
9913 /* Translate into the equivalent ARM encoding. */
9914 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9915 if (disas_neon_data_insn(s, insn)) {
9916 goto illegal_op;
9918 } else if (((insn >> 8) & 0xe) == 10) {
9919 if (disas_vfp_insn(s, insn)) {
9920 goto illegal_op;
9922 } else {
9923 if (insn & (1 << 28))
9924 goto illegal_op;
9925 if (disas_coproc_insn(s, insn)) {
9926 goto illegal_op;
9929 break;
9930 case 8: case 9: case 10: case 11:
9931 if (insn & (1 << 15)) {
9932 /* Branches, misc control. */
9933 if (insn & 0x5000) {
9934 /* Unconditional branch. */
9935 /* signextend(hw1[10:0]) -> offset[:12]. */
9936 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9937 /* hw1[10:0] -> offset[11:1]. */
9938 offset |= (insn & 0x7ff) << 1;
9939 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9940 offset[24:22] already have the same value because of the
9941 sign extension above. */
9942 offset ^= ((~insn) & (1 << 13)) << 10;
9943 offset ^= ((~insn) & (1 << 11)) << 11;
9945 if (insn & (1 << 14)) {
9946 /* Branch and link. */
9947 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
9950 offset += s->pc;
9951 if (insn & (1 << 12)) {
9952 /* b/bl */
9953 gen_jmp(s, offset);
9954 } else {
9955 /* blx */
9956 offset &= ~(uint32_t)2;
9957 /* thumb2 bx, no need to check */
9958 gen_bx_im(s, offset);
9960 } else if (((insn >> 23) & 7) == 7) {
9961 /* Misc control */
9962 if (insn & (1 << 13))
9963 goto illegal_op;
9965 if (insn & (1 << 26)) {
9966 if (!(insn & (1 << 20))) {
9967 /* Hypervisor call (v7) */
9968 int imm16 = extract32(insn, 16, 4) << 12
9969 | extract32(insn, 0, 12);
9970 ARCH(7);
9971 if (IS_USER(s)) {
9972 goto illegal_op;
9974 gen_hvc(s, imm16);
9975 } else {
9976 /* Secure monitor call (v6+) */
9977 ARCH(6K);
9978 if (IS_USER(s)) {
9979 goto illegal_op;
9981 gen_smc(s);
9983 } else {
9984 op = (insn >> 20) & 7;
9985 switch (op) {
9986 case 0: /* msr cpsr. */
9987 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9988 tmp = load_reg(s, rn);
9989 addr = tcg_const_i32(insn & 0xff);
9990 gen_helper_v7m_msr(cpu_env, addr, tmp);
9991 tcg_temp_free_i32(addr);
9992 tcg_temp_free_i32(tmp);
9993 gen_lookup_tb(s);
9994 break;
9996 /* fall through */
9997 case 1: /* msr spsr. */
9998 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9999 goto illegal_op;
10001 tmp = load_reg(s, rn);
10002 if (gen_set_psr(s,
10003 msr_mask(s, (insn >> 8) & 0xf, op == 1),
10004 op == 1, tmp))
10005 goto illegal_op;
10006 break;
10007 case 2: /* cps, nop-hint. */
10008 if (((insn >> 8) & 7) == 0) {
10009 gen_nop_hint(s, insn & 0xff);
10011 /* Implemented as NOP in user mode. */
10012 if (IS_USER(s))
10013 break;
10014 offset = 0;
10015 imm = 0;
10016 if (insn & (1 << 10)) {
10017 if (insn & (1 << 7))
10018 offset |= CPSR_A;
10019 if (insn & (1 << 6))
10020 offset |= CPSR_I;
10021 if (insn & (1 << 5))
10022 offset |= CPSR_F;
10023 if (insn & (1 << 9))
10024 imm = CPSR_A | CPSR_I | CPSR_F;
10026 if (insn & (1 << 8)) {
10027 offset |= 0x1f;
10028 imm |= (insn & 0x1f);
10030 if (offset) {
10031 gen_set_psr_im(s, offset, 0, imm);
10033 break;
10034 case 3: /* Special control operations. */
10035 ARCH(7);
10036 op = (insn >> 4) & 0xf;
10037 switch (op) {
10038 case 2: /* clrex */
10039 gen_clrex(s);
10040 break;
10041 case 4: /* dsb */
10042 case 5: /* dmb */
10043 /* These execute as NOPs. */
10044 break;
10045 case 6: /* isb */
10046 /* We need to break the TB after this insn
10047 * to execute self-modifying code correctly
10048 * and also to take any pending interrupts
10049 * immediately.
10051 gen_lookup_tb(s);
10052 break;
10053 default:
10054 goto illegal_op;
10056 break;
10057 case 4: /* bxj */
10058 /* Trivial implementation equivalent to bx. */
10059 tmp = load_reg(s, rn);
10060 gen_bx(s, tmp);
10061 break;
10062 case 5: /* Exception return. */
10063 if (IS_USER(s)) {
10064 goto illegal_op;
10066 if (rn != 14 || rd != 15) {
10067 goto illegal_op;
10069 tmp = load_reg(s, rn);
10070 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10071 gen_exception_return(s, tmp);
10072 break;
10073 case 6: /* mrs cpsr. */
10074 tmp = tcg_temp_new_i32();
10075 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10076 addr = tcg_const_i32(insn & 0xff);
10077 gen_helper_v7m_mrs(tmp, cpu_env, addr);
10078 tcg_temp_free_i32(addr);
10079 } else {
10080 gen_helper_cpsr_read(tmp, cpu_env);
10082 store_reg(s, rd, tmp);
10083 break;
10084 case 7: /* mrs spsr. */
10085 /* Not accessible in user mode. */
10086 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10087 goto illegal_op;
10089 tmp = load_cpu_field(spsr);
10090 store_reg(s, rd, tmp);
10091 break;
10094 } else {
10095 /* Conditional branch. */
10096 op = (insn >> 22) & 0xf;
10097 /* Generate a conditional jump to next instruction. */
10098 s->condlabel = gen_new_label();
10099 arm_gen_test_cc(op ^ 1, s->condlabel);
10100 s->condjmp = 1;
10102 /* offset[11:1] = insn[10:0] */
10103 offset = (insn & 0x7ff) << 1;
10104 /* offset[17:12] = insn[21:16]. */
10105 offset |= (insn & 0x003f0000) >> 4;
10106 /* offset[31:20] = insn[26]. */
10107 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10108 /* offset[18] = insn[13]. */
10109 offset |= (insn & (1 << 13)) << 5;
10110 /* offset[19] = insn[11]. */
10111 offset |= (insn & (1 << 11)) << 8;
10113 /* jump to the offset */
10114 gen_jmp(s, s->pc + offset);
10116 } else {
10117 /* Data processing immediate. */
10118 if (insn & (1 << 25)) {
10119 if (insn & (1 << 24)) {
10120 if (insn & (1 << 20))
10121 goto illegal_op;
10122 /* Bitfield/Saturate. */
10123 op = (insn >> 21) & 7;
10124 imm = insn & 0x1f;
10125 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10126 if (rn == 15) {
10127 tmp = tcg_temp_new_i32();
10128 tcg_gen_movi_i32(tmp, 0);
10129 } else {
10130 tmp = load_reg(s, rn);
10132 switch (op) {
10133 case 2: /* Signed bitfield extract. */
10134 imm++;
10135 if (shift + imm > 32)
10136 goto illegal_op;
10137 if (imm < 32)
10138 gen_sbfx(tmp, shift, imm);
10139 break;
10140 case 6: /* Unsigned bitfield extract. */
10141 imm++;
10142 if (shift + imm > 32)
10143 goto illegal_op;
10144 if (imm < 32)
10145 gen_ubfx(tmp, shift, (1u << imm) - 1);
10146 break;
10147 case 3: /* Bitfield insert/clear. */
10148 if (imm < shift)
10149 goto illegal_op;
10150 imm = imm + 1 - shift;
10151 if (imm != 32) {
10152 tmp2 = load_reg(s, rd);
10153 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
10154 tcg_temp_free_i32(tmp2);
10156 break;
10157 case 7:
10158 goto illegal_op;
10159 default: /* Saturate. */
10160 if (shift) {
10161 if (op & 1)
10162 tcg_gen_sari_i32(tmp, tmp, shift);
10163 else
10164 tcg_gen_shli_i32(tmp, tmp, shift);
10166 tmp2 = tcg_const_i32(imm);
10167 if (op & 4) {
10168 /* Unsigned. */
10169 if ((op & 1) && shift == 0) {
10170 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10171 tcg_temp_free_i32(tmp);
10172 tcg_temp_free_i32(tmp2);
10173 goto illegal_op;
10175 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10176 } else {
10177 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10179 } else {
10180 /* Signed. */
10181 if ((op & 1) && shift == 0) {
10182 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10183 tcg_temp_free_i32(tmp);
10184 tcg_temp_free_i32(tmp2);
10185 goto illegal_op;
10187 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10188 } else {
10189 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10192 tcg_temp_free_i32(tmp2);
10193 break;
10195 store_reg(s, rd, tmp);
10196 } else {
10197 imm = ((insn & 0x04000000) >> 15)
10198 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10199 if (insn & (1 << 22)) {
10200 /* 16-bit immediate. */
10201 imm |= (insn >> 4) & 0xf000;
10202 if (insn & (1 << 23)) {
10203 /* movt */
10204 tmp = load_reg(s, rd);
10205 tcg_gen_ext16u_i32(tmp, tmp);
10206 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10207 } else {
10208 /* movw */
10209 tmp = tcg_temp_new_i32();
10210 tcg_gen_movi_i32(tmp, imm);
10212 } else {
10213 /* Add/sub 12-bit immediate. */
10214 if (rn == 15) {
10215 offset = s->pc & ~(uint32_t)3;
10216 if (insn & (1 << 23))
10217 offset -= imm;
10218 else
10219 offset += imm;
10220 tmp = tcg_temp_new_i32();
10221 tcg_gen_movi_i32(tmp, offset);
10222 } else {
10223 tmp = load_reg(s, rn);
10224 if (insn & (1 << 23))
10225 tcg_gen_subi_i32(tmp, tmp, imm);
10226 else
10227 tcg_gen_addi_i32(tmp, tmp, imm);
10230 store_reg(s, rd, tmp);
10232 } else {
10233 int shifter_out = 0;
10234 /* modified 12-bit immediate. */
10235 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10236 imm = (insn & 0xff);
10237 switch (shift) {
10238 case 0: /* XY */
10239 /* Nothing to do. */
10240 break;
10241 case 1: /* 00XY00XY */
10242 imm |= imm << 16;
10243 break;
10244 case 2: /* XY00XY00 */
10245 imm |= imm << 16;
10246 imm <<= 8;
10247 break;
10248 case 3: /* XYXYXYXY */
10249 imm |= imm << 16;
10250 imm |= imm << 8;
10251 break;
10252 default: /* Rotated constant. */
10253 shift = (shift << 1) | (imm >> 7);
10254 imm |= 0x80;
10255 imm = imm << (32 - shift);
10256 shifter_out = 1;
10257 break;
10259 tmp2 = tcg_temp_new_i32();
10260 tcg_gen_movi_i32(tmp2, imm);
10261 rn = (insn >> 16) & 0xf;
10262 if (rn == 15) {
10263 tmp = tcg_temp_new_i32();
10264 tcg_gen_movi_i32(tmp, 0);
10265 } else {
10266 tmp = load_reg(s, rn);
10268 op = (insn >> 21) & 0xf;
10269 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10270 shifter_out, tmp, tmp2))
10271 goto illegal_op;
10272 tcg_temp_free_i32(tmp2);
10273 rd = (insn >> 8) & 0xf;
10274 if (rd != 15) {
10275 store_reg(s, rd, tmp);
10276 } else {
10277 tcg_temp_free_i32(tmp);
10281 break;
10282 case 12: /* Load/store single data item. */
10284 int postinc = 0;
10285 int writeback = 0;
10286 int memidx;
10287 if ((insn & 0x01100000) == 0x01000000) {
10288 if (disas_neon_ls_insn(s, insn)) {
10289 goto illegal_op;
10291 break;
10293 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10294 if (rs == 15) {
10295 if (!(insn & (1 << 20))) {
10296 goto illegal_op;
10298 if (op != 2) {
10299 /* Byte or halfword load space with dest == r15 : memory hints.
10300 * Catch them early so we don't emit pointless addressing code.
10301 * This space is a mix of:
10302 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10303 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10304 * cores)
10305 * unallocated hints, which must be treated as NOPs
10306 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10307 * which is easiest for the decoding logic
10308 * Some space which must UNDEF
10310 int op1 = (insn >> 23) & 3;
10311 int op2 = (insn >> 6) & 0x3f;
10312 if (op & 2) {
10313 goto illegal_op;
10315 if (rn == 15) {
10316 /* UNPREDICTABLE, unallocated hint or
10317 * PLD/PLDW/PLI (literal)
10319 return 0;
10321 if (op1 & 1) {
10322 return 0; /* PLD/PLDW/PLI or unallocated hint */
10324 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10325 return 0; /* PLD/PLDW/PLI or unallocated hint */
10327 /* UNDEF space, or an UNPREDICTABLE */
10328 return 1;
10331 memidx = get_mem_index(s);
10332 if (rn == 15) {
10333 addr = tcg_temp_new_i32();
10334 /* PC relative. */
10335 /* s->pc has already been incremented by 4. */
10336 imm = s->pc & 0xfffffffc;
10337 if (insn & (1 << 23))
10338 imm += insn & 0xfff;
10339 else
10340 imm -= insn & 0xfff;
10341 tcg_gen_movi_i32(addr, imm);
10342 } else {
10343 addr = load_reg(s, rn);
10344 if (insn & (1 << 23)) {
10345 /* Positive offset. */
10346 imm = insn & 0xfff;
10347 tcg_gen_addi_i32(addr, addr, imm);
10348 } else {
10349 imm = insn & 0xff;
10350 switch ((insn >> 8) & 0xf) {
10351 case 0x0: /* Shifted Register. */
10352 shift = (insn >> 4) & 0xf;
10353 if (shift > 3) {
10354 tcg_temp_free_i32(addr);
10355 goto illegal_op;
10357 tmp = load_reg(s, rm);
10358 if (shift)
10359 tcg_gen_shli_i32(tmp, tmp, shift);
10360 tcg_gen_add_i32(addr, addr, tmp);
10361 tcg_temp_free_i32(tmp);
10362 break;
10363 case 0xc: /* Negative offset. */
10364 tcg_gen_addi_i32(addr, addr, -imm);
10365 break;
10366 case 0xe: /* User privilege. */
10367 tcg_gen_addi_i32(addr, addr, imm);
10368 memidx = get_a32_user_mem_index(s);
10369 break;
10370 case 0x9: /* Post-decrement. */
10371 imm = -imm;
10372 /* Fall through. */
10373 case 0xb: /* Post-increment. */
10374 postinc = 1;
10375 writeback = 1;
10376 break;
10377 case 0xd: /* Pre-decrement. */
10378 imm = -imm;
10379 /* Fall through. */
10380 case 0xf: /* Pre-increment. */
10381 tcg_gen_addi_i32(addr, addr, imm);
10382 writeback = 1;
10383 break;
10384 default:
10385 tcg_temp_free_i32(addr);
10386 goto illegal_op;
10390 if (insn & (1 << 20)) {
10391 /* Load. */
10392 tmp = tcg_temp_new_i32();
10393 switch (op) {
10394 case 0:
10395 gen_aa32_ld8u(tmp, addr, memidx);
10396 break;
10397 case 4:
10398 gen_aa32_ld8s(tmp, addr, memidx);
10399 break;
10400 case 1:
10401 gen_aa32_ld16u(tmp, addr, memidx);
10402 break;
10403 case 5:
10404 gen_aa32_ld16s(tmp, addr, memidx);
10405 break;
10406 case 2:
10407 gen_aa32_ld32u(tmp, addr, memidx);
10408 break;
10409 default:
10410 tcg_temp_free_i32(tmp);
10411 tcg_temp_free_i32(addr);
10412 goto illegal_op;
10414 if (rs == 15) {
10415 gen_bx(s, tmp);
10416 } else {
10417 store_reg(s, rs, tmp);
10419 } else {
10420 /* Store. */
10421 tmp = load_reg(s, rs);
10422 switch (op) {
10423 case 0:
10424 gen_aa32_st8(tmp, addr, memidx);
10425 break;
10426 case 1:
10427 gen_aa32_st16(tmp, addr, memidx);
10428 break;
10429 case 2:
10430 gen_aa32_st32(tmp, addr, memidx);
10431 break;
10432 default:
10433 tcg_temp_free_i32(tmp);
10434 tcg_temp_free_i32(addr);
10435 goto illegal_op;
10437 tcg_temp_free_i32(tmp);
10439 if (postinc)
10440 tcg_gen_addi_i32(addr, addr, imm);
10441 if (writeback) {
10442 store_reg(s, rn, addr);
10443 } else {
10444 tcg_temp_free_i32(addr);
10447 break;
10448 default:
10449 goto illegal_op;
10451 return 0;
10452 illegal_op:
10453 return 1;
10456 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
10458 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10459 int32_t offset;
10460 int i;
10461 TCGv_i32 tmp;
10462 TCGv_i32 tmp2;
10463 TCGv_i32 addr;
10465 if (s->condexec_mask) {
10466 cond = s->condexec_cond;
10467 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10468 s->condlabel = gen_new_label();
10469 arm_gen_test_cc(cond ^ 1, s->condlabel);
10470 s->condjmp = 1;
10474 insn = arm_lduw_code(env, s->pc, s->bswap_code);
10475 s->pc += 2;
10477 switch (insn >> 12) {
10478 case 0: case 1:
10480 rd = insn & 7;
10481 op = (insn >> 11) & 3;
10482 if (op == 3) {
10483 /* add/subtract */
10484 rn = (insn >> 3) & 7;
10485 tmp = load_reg(s, rn);
10486 if (insn & (1 << 10)) {
10487 /* immediate */
10488 tmp2 = tcg_temp_new_i32();
10489 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
10490 } else {
10491 /* reg */
10492 rm = (insn >> 6) & 7;
10493 tmp2 = load_reg(s, rm);
10495 if (insn & (1 << 9)) {
10496 if (s->condexec_mask)
10497 tcg_gen_sub_i32(tmp, tmp, tmp2);
10498 else
10499 gen_sub_CC(tmp, tmp, tmp2);
10500 } else {
10501 if (s->condexec_mask)
10502 tcg_gen_add_i32(tmp, tmp, tmp2);
10503 else
10504 gen_add_CC(tmp, tmp, tmp2);
10506 tcg_temp_free_i32(tmp2);
10507 store_reg(s, rd, tmp);
10508 } else {
10509 /* shift immediate */
10510 rm = (insn >> 3) & 7;
10511 shift = (insn >> 6) & 0x1f;
10512 tmp = load_reg(s, rm);
10513 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10514 if (!s->condexec_mask)
10515 gen_logic_CC(tmp);
10516 store_reg(s, rd, tmp);
10518 break;
10519 case 2: case 3:
10520 /* arithmetic large immediate */
10521 op = (insn >> 11) & 3;
10522 rd = (insn >> 8) & 0x7;
10523 if (op == 0) { /* mov */
10524 tmp = tcg_temp_new_i32();
10525 tcg_gen_movi_i32(tmp, insn & 0xff);
10526 if (!s->condexec_mask)
10527 gen_logic_CC(tmp);
10528 store_reg(s, rd, tmp);
10529 } else {
10530 tmp = load_reg(s, rd);
10531 tmp2 = tcg_temp_new_i32();
10532 tcg_gen_movi_i32(tmp2, insn & 0xff);
10533 switch (op) {
10534 case 1: /* cmp */
10535 gen_sub_CC(tmp, tmp, tmp2);
10536 tcg_temp_free_i32(tmp);
10537 tcg_temp_free_i32(tmp2);
10538 break;
10539 case 2: /* add */
10540 if (s->condexec_mask)
10541 tcg_gen_add_i32(tmp, tmp, tmp2);
10542 else
10543 gen_add_CC(tmp, tmp, tmp2);
10544 tcg_temp_free_i32(tmp2);
10545 store_reg(s, rd, tmp);
10546 break;
10547 case 3: /* sub */
10548 if (s->condexec_mask)
10549 tcg_gen_sub_i32(tmp, tmp, tmp2);
10550 else
10551 gen_sub_CC(tmp, tmp, tmp2);
10552 tcg_temp_free_i32(tmp2);
10553 store_reg(s, rd, tmp);
10554 break;
10557 break;
10558 case 4:
10559 if (insn & (1 << 11)) {
10560 rd = (insn >> 8) & 7;
10561 /* load pc-relative. Bit 1 of PC is ignored. */
10562 val = s->pc + 2 + ((insn & 0xff) * 4);
10563 val &= ~(uint32_t)2;
10564 addr = tcg_temp_new_i32();
10565 tcg_gen_movi_i32(addr, val);
10566 tmp = tcg_temp_new_i32();
10567 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10568 tcg_temp_free_i32(addr);
10569 store_reg(s, rd, tmp);
10570 break;
10572 if (insn & (1 << 10)) {
10573 /* data processing extended or blx */
10574 rd = (insn & 7) | ((insn >> 4) & 8);
10575 rm = (insn >> 3) & 0xf;
10576 op = (insn >> 8) & 3;
10577 switch (op) {
10578 case 0: /* add */
10579 tmp = load_reg(s, rd);
10580 tmp2 = load_reg(s, rm);
10581 tcg_gen_add_i32(tmp, tmp, tmp2);
10582 tcg_temp_free_i32(tmp2);
10583 store_reg(s, rd, tmp);
10584 break;
10585 case 1: /* cmp */
10586 tmp = load_reg(s, rd);
10587 tmp2 = load_reg(s, rm);
10588 gen_sub_CC(tmp, tmp, tmp2);
10589 tcg_temp_free_i32(tmp2);
10590 tcg_temp_free_i32(tmp);
10591 break;
10592 case 2: /* mov/cpy */
10593 tmp = load_reg(s, rm);
10594 store_reg(s, rd, tmp);
10595 break;
10596 case 3:/* branch [and link] exchange thumb register */
10597 tmp = load_reg(s, rm);
10598 if (insn & (1 << 7)) {
10599 ARCH(5);
10600 val = (uint32_t)s->pc | 1;
10601 tmp2 = tcg_temp_new_i32();
10602 tcg_gen_movi_i32(tmp2, val);
10603 store_reg(s, 14, tmp2);
10605 /* already thumb, no need to check */
10606 gen_bx(s, tmp);
10607 break;
10609 break;
10612 /* data processing register */
10613 rd = insn & 7;
10614 rm = (insn >> 3) & 7;
10615 op = (insn >> 6) & 0xf;
10616 if (op == 2 || op == 3 || op == 4 || op == 7) {
10617 /* the shift/rotate ops want the operands backwards */
10618 val = rm;
10619 rm = rd;
10620 rd = val;
10621 val = 1;
10622 } else {
10623 val = 0;
10626 if (op == 9) { /* neg */
10627 tmp = tcg_temp_new_i32();
10628 tcg_gen_movi_i32(tmp, 0);
10629 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10630 tmp = load_reg(s, rd);
10631 } else {
10632 TCGV_UNUSED_I32(tmp);
10635 tmp2 = load_reg(s, rm);
10636 switch (op) {
10637 case 0x0: /* and */
10638 tcg_gen_and_i32(tmp, tmp, tmp2);
10639 if (!s->condexec_mask)
10640 gen_logic_CC(tmp);
10641 break;
10642 case 0x1: /* eor */
10643 tcg_gen_xor_i32(tmp, tmp, tmp2);
10644 if (!s->condexec_mask)
10645 gen_logic_CC(tmp);
10646 break;
10647 case 0x2: /* lsl */
10648 if (s->condexec_mask) {
10649 gen_shl(tmp2, tmp2, tmp);
10650 } else {
10651 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
10652 gen_logic_CC(tmp2);
10654 break;
10655 case 0x3: /* lsr */
10656 if (s->condexec_mask) {
10657 gen_shr(tmp2, tmp2, tmp);
10658 } else {
10659 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
10660 gen_logic_CC(tmp2);
10662 break;
10663 case 0x4: /* asr */
10664 if (s->condexec_mask) {
10665 gen_sar(tmp2, tmp2, tmp);
10666 } else {
10667 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
10668 gen_logic_CC(tmp2);
10670 break;
10671 case 0x5: /* adc */
10672 if (s->condexec_mask) {
10673 gen_adc(tmp, tmp2);
10674 } else {
10675 gen_adc_CC(tmp, tmp, tmp2);
10677 break;
10678 case 0x6: /* sbc */
10679 if (s->condexec_mask) {
10680 gen_sub_carry(tmp, tmp, tmp2);
10681 } else {
10682 gen_sbc_CC(tmp, tmp, tmp2);
10684 break;
10685 case 0x7: /* ror */
10686 if (s->condexec_mask) {
10687 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10688 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
10689 } else {
10690 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
10691 gen_logic_CC(tmp2);
10693 break;
10694 case 0x8: /* tst */
10695 tcg_gen_and_i32(tmp, tmp, tmp2);
10696 gen_logic_CC(tmp);
10697 rd = 16;
10698 break;
10699 case 0x9: /* neg */
10700 if (s->condexec_mask)
10701 tcg_gen_neg_i32(tmp, tmp2);
10702 else
10703 gen_sub_CC(tmp, tmp, tmp2);
10704 break;
10705 case 0xa: /* cmp */
10706 gen_sub_CC(tmp, tmp, tmp2);
10707 rd = 16;
10708 break;
10709 case 0xb: /* cmn */
10710 gen_add_CC(tmp, tmp, tmp2);
10711 rd = 16;
10712 break;
10713 case 0xc: /* orr */
10714 tcg_gen_or_i32(tmp, tmp, tmp2);
10715 if (!s->condexec_mask)
10716 gen_logic_CC(tmp);
10717 break;
10718 case 0xd: /* mul */
10719 tcg_gen_mul_i32(tmp, tmp, tmp2);
10720 if (!s->condexec_mask)
10721 gen_logic_CC(tmp);
10722 break;
10723 case 0xe: /* bic */
10724 tcg_gen_andc_i32(tmp, tmp, tmp2);
10725 if (!s->condexec_mask)
10726 gen_logic_CC(tmp);
10727 break;
10728 case 0xf: /* mvn */
10729 tcg_gen_not_i32(tmp2, tmp2);
10730 if (!s->condexec_mask)
10731 gen_logic_CC(tmp2);
10732 val = 1;
10733 rm = rd;
10734 break;
10736 if (rd != 16) {
10737 if (val) {
10738 store_reg(s, rm, tmp2);
10739 if (op != 0xf)
10740 tcg_temp_free_i32(tmp);
10741 } else {
10742 store_reg(s, rd, tmp);
10743 tcg_temp_free_i32(tmp2);
10745 } else {
10746 tcg_temp_free_i32(tmp);
10747 tcg_temp_free_i32(tmp2);
10749 break;
10751 case 5:
10752 /* load/store register offset. */
10753 rd = insn & 7;
10754 rn = (insn >> 3) & 7;
10755 rm = (insn >> 6) & 7;
10756 op = (insn >> 9) & 7;
10757 addr = load_reg(s, rn);
10758 tmp = load_reg(s, rm);
10759 tcg_gen_add_i32(addr, addr, tmp);
10760 tcg_temp_free_i32(tmp);
10762 if (op < 3) { /* store */
10763 tmp = load_reg(s, rd);
10764 } else {
10765 tmp = tcg_temp_new_i32();
10768 switch (op) {
10769 case 0: /* str */
10770 gen_aa32_st32(tmp, addr, get_mem_index(s));
10771 break;
10772 case 1: /* strh */
10773 gen_aa32_st16(tmp, addr, get_mem_index(s));
10774 break;
10775 case 2: /* strb */
10776 gen_aa32_st8(tmp, addr, get_mem_index(s));
10777 break;
10778 case 3: /* ldrsb */
10779 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
10780 break;
10781 case 4: /* ldr */
10782 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10783 break;
10784 case 5: /* ldrh */
10785 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
10786 break;
10787 case 6: /* ldrb */
10788 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
10789 break;
10790 case 7: /* ldrsh */
10791 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
10792 break;
10794 if (op >= 3) { /* load */
10795 store_reg(s, rd, tmp);
10796 } else {
10797 tcg_temp_free_i32(tmp);
10799 tcg_temp_free_i32(addr);
10800 break;
10802 case 6:
10803 /* load/store word immediate offset */
10804 rd = insn & 7;
10805 rn = (insn >> 3) & 7;
10806 addr = load_reg(s, rn);
10807 val = (insn >> 4) & 0x7c;
10808 tcg_gen_addi_i32(addr, addr, val);
10810 if (insn & (1 << 11)) {
10811 /* load */
10812 tmp = tcg_temp_new_i32();
10813 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10814 store_reg(s, rd, tmp);
10815 } else {
10816 /* store */
10817 tmp = load_reg(s, rd);
10818 gen_aa32_st32(tmp, addr, get_mem_index(s));
10819 tcg_temp_free_i32(tmp);
10821 tcg_temp_free_i32(addr);
10822 break;
10824 case 7:
10825 /* load/store byte immediate offset */
10826 rd = insn & 7;
10827 rn = (insn >> 3) & 7;
10828 addr = load_reg(s, rn);
10829 val = (insn >> 6) & 0x1f;
10830 tcg_gen_addi_i32(addr, addr, val);
10832 if (insn & (1 << 11)) {
10833 /* load */
10834 tmp = tcg_temp_new_i32();
10835 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
10836 store_reg(s, rd, tmp);
10837 } else {
10838 /* store */
10839 tmp = load_reg(s, rd);
10840 gen_aa32_st8(tmp, addr, get_mem_index(s));
10841 tcg_temp_free_i32(tmp);
10843 tcg_temp_free_i32(addr);
10844 break;
10846 case 8:
10847 /* load/store halfword immediate offset */
10848 rd = insn & 7;
10849 rn = (insn >> 3) & 7;
10850 addr = load_reg(s, rn);
10851 val = (insn >> 5) & 0x3e;
10852 tcg_gen_addi_i32(addr, addr, val);
10854 if (insn & (1 << 11)) {
10855 /* load */
10856 tmp = tcg_temp_new_i32();
10857 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
10858 store_reg(s, rd, tmp);
10859 } else {
10860 /* store */
10861 tmp = load_reg(s, rd);
10862 gen_aa32_st16(tmp, addr, get_mem_index(s));
10863 tcg_temp_free_i32(tmp);
10865 tcg_temp_free_i32(addr);
10866 break;
10868 case 9:
10869 /* load/store from stack */
10870 rd = (insn >> 8) & 7;
10871 addr = load_reg(s, 13);
10872 val = (insn & 0xff) * 4;
10873 tcg_gen_addi_i32(addr, addr, val);
10875 if (insn & (1 << 11)) {
10876 /* load */
10877 tmp = tcg_temp_new_i32();
10878 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10879 store_reg(s, rd, tmp);
10880 } else {
10881 /* store */
10882 tmp = load_reg(s, rd);
10883 gen_aa32_st32(tmp, addr, get_mem_index(s));
10884 tcg_temp_free_i32(tmp);
10886 tcg_temp_free_i32(addr);
10887 break;
10889 case 10:
10890 /* add to high reg */
10891 rd = (insn >> 8) & 7;
10892 if (insn & (1 << 11)) {
10893 /* SP */
10894 tmp = load_reg(s, 13);
10895 } else {
10896 /* PC. bit 1 is ignored. */
10897 tmp = tcg_temp_new_i32();
10898 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
10900 val = (insn & 0xff) * 4;
10901 tcg_gen_addi_i32(tmp, tmp, val);
10902 store_reg(s, rd, tmp);
10903 break;
10905 case 11:
10906 /* misc */
10907 op = (insn >> 8) & 0xf;
10908 switch (op) {
10909 case 0:
10910 /* adjust stack pointer */
10911 tmp = load_reg(s, 13);
10912 val = (insn & 0x7f) * 4;
10913 if (insn & (1 << 7))
10914 val = -(int32_t)val;
10915 tcg_gen_addi_i32(tmp, tmp, val);
10916 store_reg(s, 13, tmp);
10917 break;
10919 case 2: /* sign/zero extend. */
10920 ARCH(6);
10921 rd = insn & 7;
10922 rm = (insn >> 3) & 7;
10923 tmp = load_reg(s, rm);
10924 switch ((insn >> 6) & 3) {
10925 case 0: gen_sxth(tmp); break;
10926 case 1: gen_sxtb(tmp); break;
10927 case 2: gen_uxth(tmp); break;
10928 case 3: gen_uxtb(tmp); break;
10930 store_reg(s, rd, tmp);
10931 break;
10932 case 4: case 5: case 0xc: case 0xd:
10933 /* push/pop */
10934 addr = load_reg(s, 13);
10935 if (insn & (1 << 8))
10936 offset = 4;
10937 else
10938 offset = 0;
10939 for (i = 0; i < 8; i++) {
10940 if (insn & (1 << i))
10941 offset += 4;
10943 if ((insn & (1 << 11)) == 0) {
10944 tcg_gen_addi_i32(addr, addr, -offset);
10946 for (i = 0; i < 8; i++) {
10947 if (insn & (1 << i)) {
10948 if (insn & (1 << 11)) {
10949 /* pop */
10950 tmp = tcg_temp_new_i32();
10951 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10952 store_reg(s, i, tmp);
10953 } else {
10954 /* push */
10955 tmp = load_reg(s, i);
10956 gen_aa32_st32(tmp, addr, get_mem_index(s));
10957 tcg_temp_free_i32(tmp);
10959 /* advance to the next address. */
10960 tcg_gen_addi_i32(addr, addr, 4);
10963 TCGV_UNUSED_I32(tmp);
10964 if (insn & (1 << 8)) {
10965 if (insn & (1 << 11)) {
10966 /* pop pc */
10967 tmp = tcg_temp_new_i32();
10968 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10969 /* don't set the pc until the rest of the instruction
10970 has completed */
10971 } else {
10972 /* push lr */
10973 tmp = load_reg(s, 14);
10974 gen_aa32_st32(tmp, addr, get_mem_index(s));
10975 tcg_temp_free_i32(tmp);
10977 tcg_gen_addi_i32(addr, addr, 4);
10979 if ((insn & (1 << 11)) == 0) {
10980 tcg_gen_addi_i32(addr, addr, -offset);
10982 /* write back the new stack pointer */
10983 store_reg(s, 13, addr);
10984 /* set the new PC value */
10985 if ((insn & 0x0900) == 0x0900) {
10986 store_reg_from_load(s, 15, tmp);
10988 break;
10990 case 1: case 3: case 9: case 11: /* czb */
10991 rm = insn & 7;
10992 tmp = load_reg(s, rm);
10993 s->condlabel = gen_new_label();
10994 s->condjmp = 1;
10995 if (insn & (1 << 11))
10996 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
10997 else
10998 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
10999 tcg_temp_free_i32(tmp);
11000 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11001 val = (uint32_t)s->pc + 2;
11002 val += offset;
11003 gen_jmp(s, val);
11004 break;
11006 case 15: /* IT, nop-hint. */
11007 if ((insn & 0xf) == 0) {
11008 gen_nop_hint(s, (insn >> 4) & 0xf);
11009 break;
11011 /* If Then. */
11012 s->condexec_cond = (insn >> 4) & 0xe;
11013 s->condexec_mask = insn & 0x1f;
11014 /* No actual code generated for this insn, just setup state. */
11015 break;
11017 case 0xe: /* bkpt */
11019 int imm8 = extract32(insn, 0, 8);
11020 ARCH(5);
11021 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11022 default_exception_el(s));
11023 break;
11026 case 0xa: /* rev */
11027 ARCH(6);
11028 rn = (insn >> 3) & 0x7;
11029 rd = insn & 0x7;
11030 tmp = load_reg(s, rn);
11031 switch ((insn >> 6) & 3) {
11032 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
11033 case 1: gen_rev16(tmp); break;
11034 case 3: gen_revsh(tmp); break;
11035 default: goto illegal_op;
11037 store_reg(s, rd, tmp);
11038 break;
11040 case 6:
11041 switch ((insn >> 5) & 7) {
11042 case 2:
11043 /* setend */
11044 ARCH(6);
11045 if (((insn >> 3) & 1) != s->bswap_code) {
11046 /* Dynamic endianness switching not implemented. */
11047 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
11048 goto illegal_op;
11050 break;
11051 case 3:
11052 /* cps */
11053 ARCH(6);
11054 if (IS_USER(s)) {
11055 break;
11057 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11058 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11059 /* FAULTMASK */
11060 if (insn & 1) {
11061 addr = tcg_const_i32(19);
11062 gen_helper_v7m_msr(cpu_env, addr, tmp);
11063 tcg_temp_free_i32(addr);
11065 /* PRIMASK */
11066 if (insn & 2) {
11067 addr = tcg_const_i32(16);
11068 gen_helper_v7m_msr(cpu_env, addr, tmp);
11069 tcg_temp_free_i32(addr);
11071 tcg_temp_free_i32(tmp);
11072 gen_lookup_tb(s);
11073 } else {
11074 if (insn & (1 << 4)) {
11075 shift = CPSR_A | CPSR_I | CPSR_F;
11076 } else {
11077 shift = 0;
11079 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
11081 break;
11082 default:
11083 goto undef;
11085 break;
11087 default:
11088 goto undef;
11090 break;
11092 case 12:
11094 /* load/store multiple */
11095 TCGv_i32 loaded_var;
11096 TCGV_UNUSED_I32(loaded_var);
11097 rn = (insn >> 8) & 0x7;
11098 addr = load_reg(s, rn);
11099 for (i = 0; i < 8; i++) {
11100 if (insn & (1 << i)) {
11101 if (insn & (1 << 11)) {
11102 /* load */
11103 tmp = tcg_temp_new_i32();
11104 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
11105 if (i == rn) {
11106 loaded_var = tmp;
11107 } else {
11108 store_reg(s, i, tmp);
11110 } else {
11111 /* store */
11112 tmp = load_reg(s, i);
11113 gen_aa32_st32(tmp, addr, get_mem_index(s));
11114 tcg_temp_free_i32(tmp);
11116 /* advance to the next address */
11117 tcg_gen_addi_i32(addr, addr, 4);
11120 if ((insn & (1 << rn)) == 0) {
11121 /* base reg not in list: base register writeback */
11122 store_reg(s, rn, addr);
11123 } else {
11124 /* base reg in list: if load, complete it now */
11125 if (insn & (1 << 11)) {
11126 store_reg(s, rn, loaded_var);
11128 tcg_temp_free_i32(addr);
11130 break;
11132 case 13:
11133 /* conditional branch or swi */
11134 cond = (insn >> 8) & 0xf;
11135 if (cond == 0xe)
11136 goto undef;
11138 if (cond == 0xf) {
11139 /* swi */
11140 gen_set_pc_im(s, s->pc);
11141 s->svc_imm = extract32(insn, 0, 8);
11142 s->is_jmp = DISAS_SWI;
11143 break;
11145 /* generate a conditional jump to next instruction */
11146 s->condlabel = gen_new_label();
11147 arm_gen_test_cc(cond ^ 1, s->condlabel);
11148 s->condjmp = 1;
11150 /* jump to the offset */
11151 val = (uint32_t)s->pc + 2;
11152 offset = ((int32_t)insn << 24) >> 24;
11153 val += offset << 1;
11154 gen_jmp(s, val);
11155 break;
11157 case 14:
11158 if (insn & (1 << 11)) {
11159 if (disas_thumb2_insn(env, s, insn))
11160 goto undef32;
11161 break;
11163 /* unconditional branch */
11164 val = (uint32_t)s->pc;
11165 offset = ((int32_t)insn << 21) >> 21;
11166 val += (offset << 1) + 2;
11167 gen_jmp(s, val);
11168 break;
11170 case 15:
11171 if (disas_thumb2_insn(env, s, insn))
11172 goto undef32;
11173 break;
11175 return;
11176 undef32:
11177 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11178 default_exception_el(s));
11179 return;
11180 illegal_op:
11181 undef:
11182 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11183 default_exception_el(s));
11186 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11188 /* Return true if the insn at dc->pc might cross a page boundary.
11189 * (False positives are OK, false negatives are not.)
11191 uint16_t insn;
11193 if ((s->pc & 3) == 0) {
11194 /* At a 4-aligned address we can't be crossing a page */
11195 return false;
11198 /* This must be a Thumb insn */
11199 insn = arm_lduw_code(env, s->pc, s->bswap_code);
11201 if ((insn >> 11) >= 0x1d) {
11202 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11203 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11204 * end up actually treating this as two 16-bit insns (see the
11205 * code at the start of disas_thumb2_insn()) but we don't bother
11206 * to check for that as it is unlikely, and false positives here
11207 * are harmless.
11209 return true;
11211 /* Definitely a 16-bit insn, can't be crossing a page. */
11212 return false;
11215 /* generate intermediate code for basic block 'tb'. */
11216 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
11218 ARMCPU *cpu = arm_env_get_cpu(env);
11219 CPUState *cs = CPU(cpu);
11220 DisasContext dc1, *dc = &dc1;
11221 target_ulong pc_start;
11222 target_ulong next_page_start;
11223 int num_insns;
11224 int max_insns;
11225 bool end_of_page;
11227 /* generate intermediate code */
11229 /* The A64 decoder has its own top level loop, because it doesn't need
11230 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11232 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
11233 gen_intermediate_code_a64(cpu, tb);
11234 return;
11237 pc_start = tb->pc;
11239 dc->tb = tb;
11241 dc->is_jmp = DISAS_NEXT;
11242 dc->pc = pc_start;
11243 dc->singlestep_enabled = cs->singlestep_enabled;
11244 dc->condjmp = 0;
11246 dc->aarch64 = 0;
11247 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11248 * there is no secure EL1, so we route exceptions to EL3.
11250 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11251 !arm_el_is_aa64(env, 3);
11252 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
11253 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
11254 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11255 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
11256 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11257 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11258 #if !defined(CONFIG_USER_ONLY)
11259 dc->user = (dc->current_el == 0);
11260 #endif
11261 dc->ns = ARM_TBFLAG_NS(tb->flags);
11262 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
11263 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11264 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11265 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
11266 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
11267 dc->cp_regs = cpu->cp_regs;
11268 dc->features = env->features;
11270 /* Single step state. The code-generation logic here is:
11271 * SS_ACTIVE == 0:
11272 * generate code with no special handling for single-stepping (except
11273 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11274 * this happens anyway because those changes are all system register or
11275 * PSTATE writes).
11276 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11277 * emit code for one insn
11278 * emit code to clear PSTATE.SS
11279 * emit code to generate software step exception for completed step
11280 * end TB (as usual for having generated an exception)
11281 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11282 * emit code to generate a software step exception
11283 * end the TB
11285 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11286 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11287 dc->is_ldex = false;
11288 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11290 cpu_F0s = tcg_temp_new_i32();
11291 cpu_F1s = tcg_temp_new_i32();
11292 cpu_F0d = tcg_temp_new_i64();
11293 cpu_F1d = tcg_temp_new_i64();
11294 cpu_V0 = cpu_F0d;
11295 cpu_V1 = cpu_F1d;
11296 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11297 cpu_M0 = tcg_temp_new_i64();
11298 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
11299 num_insns = 0;
11300 max_insns = tb->cflags & CF_COUNT_MASK;
11301 if (max_insns == 0) {
11302 max_insns = CF_COUNT_MASK;
11304 if (max_insns > TCG_MAX_INSNS) {
11305 max_insns = TCG_MAX_INSNS;
11308 gen_tb_start(tb);
11310 tcg_clear_temp_count();
11312 /* A note on handling of the condexec (IT) bits:
11314 * We want to avoid the overhead of having to write the updated condexec
11315 * bits back to the CPUARMState for every instruction in an IT block. So:
11316 * (1) if the condexec bits are not already zero then we write
11317 * zero back into the CPUARMState now. This avoids complications trying
11318 * to do it at the end of the block. (For example if we don't do this
11319 * it's hard to identify whether we can safely skip writing condexec
11320 * at the end of the TB, which we definitely want to do for the case
11321 * where a TB doesn't do anything with the IT state at all.)
11322 * (2) if we are going to leave the TB then we call gen_set_condexec()
11323 * which will write the correct value into CPUARMState if zero is wrong.
11324 * This is done both for leaving the TB at the end, and for leaving
11325 * it because of an exception we know will happen, which is done in
11326 * gen_exception_insn(). The latter is necessary because we need to
11327 * leave the TB with the PC/IT state just prior to execution of the
11328 * instruction which caused the exception.
11329 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11330 * then the CPUARMState will be wrong and we need to reset it.
11331 * This is handled in the same way as restoration of the
11332 * PC in these situations; we save the value of the condexec bits
11333 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11334 * then uses this to restore them after an exception.
11336 * Note that there are no instructions which can read the condexec
11337 * bits, and none which can write non-static values to them, so
11338 * we don't need to care about whether CPUARMState is correct in the
11339 * middle of a TB.
11342 /* Reset the conditional execution bits immediately. This avoids
11343 complications trying to do it at the end of the block. */
11344 if (dc->condexec_mask || dc->condexec_cond)
11346 TCGv_i32 tmp = tcg_temp_new_i32();
11347 tcg_gen_movi_i32(tmp, 0);
11348 store_cpu_field(tmp, condexec_bits);
11350 do {
11351 tcg_gen_insn_start(dc->pc,
11352 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
11353 num_insns++;
11355 #ifdef CONFIG_USER_ONLY
11356 /* Intercept jump to the magic kernel page. */
11357 if (dc->pc >= 0xffff0000) {
11358 /* We always get here via a jump, so know we are not in a
11359 conditional execution block. */
11360 gen_exception_internal(EXCP_KERNEL_TRAP);
11361 dc->is_jmp = DISAS_EXC;
11362 break;
11364 #else
11365 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
11366 /* We always get here via a jump, so know we are not in a
11367 conditional execution block. */
11368 gen_exception_internal(EXCP_EXCEPTION_EXIT);
11369 dc->is_jmp = DISAS_EXC;
11370 break;
11372 #endif
11374 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11375 CPUBreakpoint *bp;
11376 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
11377 if (bp->pc == dc->pc) {
11378 if (bp->flags & BP_CPU) {
11379 gen_set_condexec(dc);
11380 gen_set_pc_im(dc, dc->pc);
11381 gen_helper_check_breakpoints(cpu_env);
11382 /* End the TB early; it's likely not going to be executed */
11383 dc->is_jmp = DISAS_UPDATE;
11384 } else {
11385 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11386 /* The address covered by the breakpoint must be
11387 included in [tb->pc, tb->pc + tb->size) in order
11388 to for it to be properly cleared -- thus we
11389 increment the PC here so that the logic setting
11390 tb->size below does the right thing. */
11391 /* TODO: Advance PC by correct instruction length to
11392 * avoid disassembler error messages */
11393 dc->pc += 2;
11394 goto done_generating;
11396 break;
11401 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
11402 gen_io_start();
11405 if (dc->ss_active && !dc->pstate_ss) {
11406 /* Singlestep state is Active-pending.
11407 * If we're in this state at the start of a TB then either
11408 * a) we just took an exception to an EL which is being debugged
11409 * and this is the first insn in the exception handler
11410 * b) debug exceptions were masked and we just unmasked them
11411 * without changing EL (eg by clearing PSTATE.D)
11412 * In either case we're going to take a swstep exception in the
11413 * "did not step an insn" case, and so the syndrome ISV and EX
11414 * bits should be zero.
11416 assert(num_insns == 1);
11417 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11418 default_exception_el(dc));
11419 goto done_generating;
11422 if (dc->thumb) {
11423 disas_thumb_insn(env, dc);
11424 if (dc->condexec_mask) {
11425 dc->condexec_cond = (dc->condexec_cond & 0xe)
11426 | ((dc->condexec_mask >> 4) & 1);
11427 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11428 if (dc->condexec_mask == 0) {
11429 dc->condexec_cond = 0;
11432 } else {
11433 unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
11434 dc->pc += 4;
11435 disas_arm_insn(dc, insn);
11438 if (dc->condjmp && !dc->is_jmp) {
11439 gen_set_label(dc->condlabel);
11440 dc->condjmp = 0;
11443 if (tcg_check_temp_count()) {
11444 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11445 dc->pc);
11448 /* Translation stops when a conditional branch is encountered.
11449 * Otherwise the subsequent code could get translated several times.
11450 * Also stop translation when a page boundary is reached. This
11451 * ensures prefetch aborts occur at the right place. */
11453 /* We want to stop the TB if the next insn starts in a new page,
11454 * or if it spans between this page and the next. This means that
11455 * if we're looking at the last halfword in the page we need to
11456 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11457 * or a 32-bit Thumb insn (which won't).
11458 * This is to avoid generating a silly TB with a single 16-bit insn
11459 * in it at the end of this page (which would execute correctly
11460 * but isn't very efficient).
11462 end_of_page = (dc->pc >= next_page_start) ||
11463 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11465 } while (!dc->is_jmp && !tcg_op_buf_full() &&
11466 !cs->singlestep_enabled &&
11467 !singlestep &&
11468 !dc->ss_active &&
11469 !end_of_page &&
11470 num_insns < max_insns);
11472 if (tb->cflags & CF_LAST_IO) {
11473 if (dc->condjmp) {
11474 /* FIXME: This can theoretically happen with self-modifying
11475 code. */
11476 cpu_abort(cs, "IO on conditional branch instruction");
11478 gen_io_end();
11481 /* At this stage dc->condjmp will only be set when the skipped
11482 instruction was a conditional branch or trap, and the PC has
11483 already been written. */
11484 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
11485 /* Unconditional and "condition passed" instruction codepath. */
11486 gen_set_condexec(dc);
11487 switch (dc->is_jmp) {
11488 case DISAS_SWI:
11489 gen_ss_advance(dc);
11490 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11491 default_exception_el(dc));
11492 break;
11493 case DISAS_HVC:
11494 gen_ss_advance(dc);
11495 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11496 break;
11497 case DISAS_SMC:
11498 gen_ss_advance(dc);
11499 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11500 break;
11501 case DISAS_NEXT:
11502 case DISAS_UPDATE:
11503 gen_set_pc_im(dc, dc->pc);
11504 /* fall through */
11505 default:
11506 if (dc->ss_active) {
11507 gen_step_complete_exception(dc);
11508 } else {
11509 /* FIXME: Single stepping a WFI insn will not halt
11510 the CPU. */
11511 gen_exception_internal(EXCP_DEBUG);
11514 if (dc->condjmp) {
11515 /* "Condition failed" instruction codepath. */
11516 gen_set_label(dc->condlabel);
11517 gen_set_condexec(dc);
11518 gen_set_pc_im(dc, dc->pc);
11519 if (dc->ss_active) {
11520 gen_step_complete_exception(dc);
11521 } else {
11522 gen_exception_internal(EXCP_DEBUG);
11525 } else {
11526 /* While branches must always occur at the end of an IT block,
11527 there are a few other things that can cause us to terminate
11528 the TB in the middle of an IT block:
11529 - Exception generating instructions (bkpt, swi, undefined).
11530 - Page boundaries.
11531 - Hardware watchpoints.
11532 Hardware breakpoints have already been handled and skip this code.
11534 gen_set_condexec(dc);
11535 switch(dc->is_jmp) {
11536 case DISAS_NEXT:
11537 gen_goto_tb(dc, 1, dc->pc);
11538 break;
11539 case DISAS_UPDATE:
11540 gen_set_pc_im(dc, dc->pc);
11541 /* fall through */
11542 case DISAS_JUMP:
11543 default:
11544 /* indicate that the hash table must be used to find the next TB */
11545 tcg_gen_exit_tb(0);
11546 break;
11547 case DISAS_TB_JUMP:
11548 /* nothing more to generate */
11549 break;
11550 case DISAS_WFI:
11551 gen_helper_wfi(cpu_env);
11552 /* The helper doesn't necessarily throw an exception, but we
11553 * must go back to the main loop to check for interrupts anyway.
11555 tcg_gen_exit_tb(0);
11556 break;
11557 case DISAS_WFE:
11558 gen_helper_wfe(cpu_env);
11559 break;
11560 case DISAS_YIELD:
11561 gen_helper_yield(cpu_env);
11562 break;
11563 case DISAS_SWI:
11564 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11565 default_exception_el(dc));
11566 break;
11567 case DISAS_HVC:
11568 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11569 break;
11570 case DISAS_SMC:
11571 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11572 break;
11574 if (dc->condjmp) {
11575 gen_set_label(dc->condlabel);
11576 gen_set_condexec(dc);
11577 gen_goto_tb(dc, 1, dc->pc);
11578 dc->condjmp = 0;
11582 done_generating:
11583 gen_tb_end(tb, num_insns);
11585 #ifdef DEBUG_DISAS
11586 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
11587 qemu_log("----------------\n");
11588 qemu_log("IN: %s\n", lookup_symbol(pc_start));
11589 log_target_disas(cs, pc_start, dc->pc - pc_start,
11590 dc->thumb | (dc->bswap_code << 1));
11591 qemu_log("\n");
11593 #endif
11594 tb->size = dc->pc - pc_start;
11595 tb->icount = num_insns;
11598 static const char *cpu_mode_names[16] = {
11599 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11600 "???", "???", "hyp", "und", "???", "???", "???", "sys"
11603 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11604 int flags)
11606 ARMCPU *cpu = ARM_CPU(cs);
11607 CPUARMState *env = &cpu->env;
11608 int i;
11609 uint32_t psr;
11610 const char *ns_status;
11612 if (is_a64(env)) {
11613 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11614 return;
11617 for(i=0;i<16;i++) {
11618 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
11619 if ((i % 4) == 3)
11620 cpu_fprintf(f, "\n");
11621 else
11622 cpu_fprintf(f, " ");
11624 psr = cpsr_read(env);
11626 if (arm_feature(env, ARM_FEATURE_EL3) &&
11627 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
11628 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
11629 } else {
11630 ns_status = "";
11633 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
11634 psr,
11635 psr & (1 << 31) ? 'N' : '-',
11636 psr & (1 << 30) ? 'Z' : '-',
11637 psr & (1 << 29) ? 'C' : '-',
11638 psr & (1 << 28) ? 'V' : '-',
11639 psr & CPSR_T ? 'T' : 'A',
11640 ns_status,
11641 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
11643 if (flags & CPU_DUMP_FPU) {
11644 int numvfpregs = 0;
11645 if (arm_feature(env, ARM_FEATURE_VFP)) {
11646 numvfpregs += 16;
11648 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11649 numvfpregs += 16;
11651 for (i = 0; i < numvfpregs; i++) {
11652 uint64_t v = float64_val(env->vfp.regs[i]);
11653 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11654 i * 2, (uint32_t)v,
11655 i * 2 + 1, (uint32_t)(v >> 32),
11656 i, v);
11658 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
11662 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
11663 target_ulong *data)
11665 if (is_a64(env)) {
11666 env->pc = data[0];
11667 env->condexec_bits = 0;
11668 } else {
11669 env->regs[15] = data[0];
11670 env->condexec_bits = data[1];