sdhci.c: Limit the maximum block size
[qemu/ar7.git] / target-arm / translate.c
blob22c35877e53bfc9b1aaea231a8ab92f94d6915c8
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "internals.h"
29 #include "disas/disas.h"
30 #include "tcg-op.h"
31 #include "qemu/log.h"
32 #include "qemu/bitops.h"
33 #include "arm_ldst.h"
35 #include "exec/helper-proto.h"
36 #include "exec/helper-gen.h"
38 #include "trace-tcg.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J 0
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
57 #define IS_USER(s) 1
58 #else
59 #define IS_USER(s) (s->user)
60 #endif
62 TCGv_ptr cpu_env;
63 /* We reuse the same 64-bit temporaries for efficiency. */
64 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
65 static TCGv_i32 cpu_R[16];
66 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
67 TCGv_i64 cpu_exclusive_addr;
68 TCGv_i64 cpu_exclusive_val;
69 #ifdef CONFIG_USER_ONLY
70 TCGv_i64 cpu_exclusive_test;
71 TCGv_i32 cpu_exclusive_info;
72 #endif
74 /* FIXME: These should be removed. */
75 static TCGv_i32 cpu_F0s, cpu_F1s;
76 static TCGv_i64 cpu_F0d, cpu_F1d;
78 #include "exec/gen-icount.h"
80 static const char *regnames[] =
81 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
82 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
84 /* initialize TCG globals. */
85 void arm_translate_init(void)
87 int i;
89 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
91 for (i = 0; i < 16; i++) {
92 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
93 offsetof(CPUARMState, regs[i]),
94 regnames[i]);
96 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
97 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
98 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
99 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
101 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
102 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
103 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
104 offsetof(CPUARMState, exclusive_val), "exclusive_val");
105 #ifdef CONFIG_USER_ONLY
106 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
107 offsetof(CPUARMState, exclusive_test), "exclusive_test");
108 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
109 offsetof(CPUARMState, exclusive_info), "exclusive_info");
110 #endif
112 a64_translate_init();
115 static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
117 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
118 * insns:
119 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
120 * otherwise, access as if at PL0.
122 switch (s->mmu_idx) {
123 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
124 case ARMMMUIdx_S12NSE0:
125 case ARMMMUIdx_S12NSE1:
126 return ARMMMUIdx_S12NSE0;
127 case ARMMMUIdx_S1E3:
128 case ARMMMUIdx_S1SE0:
129 case ARMMMUIdx_S1SE1:
130 return ARMMMUIdx_S1SE0;
131 case ARMMMUIdx_S2NS:
132 default:
133 g_assert_not_reached();
137 static inline TCGv_i32 load_cpu_offset(int offset)
139 TCGv_i32 tmp = tcg_temp_new_i32();
140 tcg_gen_ld_i32(tmp, cpu_env, offset);
141 return tmp;
144 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
146 static inline void store_cpu_offset(TCGv_i32 var, int offset)
148 tcg_gen_st_i32(var, cpu_env, offset);
149 tcg_temp_free_i32(var);
152 #define store_cpu_field(var, name) \
153 store_cpu_offset(var, offsetof(CPUARMState, name))
155 /* Set a variable to the value of a CPU register. */
156 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
158 if (reg == 15) {
159 uint32_t addr;
160 /* normally, since we updated PC, we need only to add one insn */
161 if (s->thumb)
162 addr = (long)s->pc + 2;
163 else
164 addr = (long)s->pc + 4;
165 tcg_gen_movi_i32(var, addr);
166 } else {
167 tcg_gen_mov_i32(var, cpu_R[reg]);
171 /* Create a new temporary and set it to the value of a CPU register. */
172 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
174 TCGv_i32 tmp = tcg_temp_new_i32();
175 load_reg_var(s, tmp, reg);
176 return tmp;
179 /* Set a CPU register. The source must be a temporary and will be
180 marked as dead. */
181 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
183 if (reg == 15) {
184 tcg_gen_andi_i32(var, var, ~1);
185 s->is_jmp = DISAS_JUMP;
187 tcg_gen_mov_i32(cpu_R[reg], var);
188 tcg_temp_free_i32(var);
191 /* Value extensions. */
192 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
193 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
194 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
195 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
197 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
198 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
201 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
203 TCGv_i32 tmp_mask = tcg_const_i32(mask);
204 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
205 tcg_temp_free_i32(tmp_mask);
207 /* Set NZCV flags from the high 4 bits of var. */
208 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
210 static void gen_exception_internal(int excp)
212 TCGv_i32 tcg_excp = tcg_const_i32(excp);
214 assert(excp_is_internal(excp));
215 gen_helper_exception_internal(cpu_env, tcg_excp);
216 tcg_temp_free_i32(tcg_excp);
219 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
221 TCGv_i32 tcg_excp = tcg_const_i32(excp);
222 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
223 TCGv_i32 tcg_el = tcg_const_i32(target_el);
225 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
226 tcg_syn, tcg_el);
228 tcg_temp_free_i32(tcg_el);
229 tcg_temp_free_i32(tcg_syn);
230 tcg_temp_free_i32(tcg_excp);
233 static void gen_ss_advance(DisasContext *s)
235 /* If the singlestep state is Active-not-pending, advance to
236 * Active-pending.
238 if (s->ss_active) {
239 s->pstate_ss = 0;
240 gen_helper_clear_pstate_ss(cpu_env);
244 static void gen_step_complete_exception(DisasContext *s)
246 /* We just completed step of an insn. Move from Active-not-pending
247 * to Active-pending, and then also take the swstep exception.
248 * This corresponds to making the (IMPDEF) choice to prioritize
249 * swstep exceptions over asynchronous exceptions taken to an exception
250 * level where debug is disabled. This choice has the advantage that
251 * we do not need to maintain internal state corresponding to the
252 * ISV/EX syndrome bits between completion of the step and generation
253 * of the exception, and our syndrome information is always correct.
255 gen_ss_advance(s);
256 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
257 default_exception_el(s));
258 s->is_jmp = DISAS_EXC;
261 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
263 TCGv_i32 tmp1 = tcg_temp_new_i32();
264 TCGv_i32 tmp2 = tcg_temp_new_i32();
265 tcg_gen_ext16s_i32(tmp1, a);
266 tcg_gen_ext16s_i32(tmp2, b);
267 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
268 tcg_temp_free_i32(tmp2);
269 tcg_gen_sari_i32(a, a, 16);
270 tcg_gen_sari_i32(b, b, 16);
271 tcg_gen_mul_i32(b, b, a);
272 tcg_gen_mov_i32(a, tmp1);
273 tcg_temp_free_i32(tmp1);
276 /* Byteswap each halfword. */
277 static void gen_rev16(TCGv_i32 var)
279 TCGv_i32 tmp = tcg_temp_new_i32();
280 tcg_gen_shri_i32(tmp, var, 8);
281 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
282 tcg_gen_shli_i32(var, var, 8);
283 tcg_gen_andi_i32(var, var, 0xff00ff00);
284 tcg_gen_or_i32(var, var, tmp);
285 tcg_temp_free_i32(tmp);
288 /* Byteswap low halfword and sign extend. */
289 static void gen_revsh(TCGv_i32 var)
291 tcg_gen_ext16u_i32(var, var);
292 tcg_gen_bswap16_i32(var, var);
293 tcg_gen_ext16s_i32(var, var);
296 /* Unsigned bitfield extract. */
297 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
299 if (shift)
300 tcg_gen_shri_i32(var, var, shift);
301 tcg_gen_andi_i32(var, var, mask);
304 /* Signed bitfield extract. */
305 static void gen_sbfx(TCGv_i32 var, int shift, int width)
307 uint32_t signbit;
309 if (shift)
310 tcg_gen_sari_i32(var, var, shift);
311 if (shift + width < 32) {
312 signbit = 1u << (width - 1);
313 tcg_gen_andi_i32(var, var, (1u << width) - 1);
314 tcg_gen_xori_i32(var, var, signbit);
315 tcg_gen_subi_i32(var, var, signbit);
319 /* Return (b << 32) + a. Mark inputs as dead */
320 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
322 TCGv_i64 tmp64 = tcg_temp_new_i64();
324 tcg_gen_extu_i32_i64(tmp64, b);
325 tcg_temp_free_i32(b);
326 tcg_gen_shli_i64(tmp64, tmp64, 32);
327 tcg_gen_add_i64(a, tmp64, a);
329 tcg_temp_free_i64(tmp64);
330 return a;
333 /* Return (b << 32) - a. Mark inputs as dead. */
334 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
336 TCGv_i64 tmp64 = tcg_temp_new_i64();
338 tcg_gen_extu_i32_i64(tmp64, b);
339 tcg_temp_free_i32(b);
340 tcg_gen_shli_i64(tmp64, tmp64, 32);
341 tcg_gen_sub_i64(a, tmp64, a);
343 tcg_temp_free_i64(tmp64);
344 return a;
347 /* 32x32->64 multiply. Marks inputs as dead. */
348 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
350 TCGv_i32 lo = tcg_temp_new_i32();
351 TCGv_i32 hi = tcg_temp_new_i32();
352 TCGv_i64 ret;
354 tcg_gen_mulu2_i32(lo, hi, a, b);
355 tcg_temp_free_i32(a);
356 tcg_temp_free_i32(b);
358 ret = tcg_temp_new_i64();
359 tcg_gen_concat_i32_i64(ret, lo, hi);
360 tcg_temp_free_i32(lo);
361 tcg_temp_free_i32(hi);
363 return ret;
366 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
368 TCGv_i32 lo = tcg_temp_new_i32();
369 TCGv_i32 hi = tcg_temp_new_i32();
370 TCGv_i64 ret;
372 tcg_gen_muls2_i32(lo, hi, a, b);
373 tcg_temp_free_i32(a);
374 tcg_temp_free_i32(b);
376 ret = tcg_temp_new_i64();
377 tcg_gen_concat_i32_i64(ret, lo, hi);
378 tcg_temp_free_i32(lo);
379 tcg_temp_free_i32(hi);
381 return ret;
384 /* Swap low and high halfwords. */
385 static void gen_swap_half(TCGv_i32 var)
387 TCGv_i32 tmp = tcg_temp_new_i32();
388 tcg_gen_shri_i32(tmp, var, 16);
389 tcg_gen_shli_i32(var, var, 16);
390 tcg_gen_or_i32(var, var, tmp);
391 tcg_temp_free_i32(tmp);
394 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
395 tmp = (t0 ^ t1) & 0x8000;
396 t0 &= ~0x8000;
397 t1 &= ~0x8000;
398 t0 = (t0 + t1) ^ tmp;
401 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
403 TCGv_i32 tmp = tcg_temp_new_i32();
404 tcg_gen_xor_i32(tmp, t0, t1);
405 tcg_gen_andi_i32(tmp, tmp, 0x8000);
406 tcg_gen_andi_i32(t0, t0, ~0x8000);
407 tcg_gen_andi_i32(t1, t1, ~0x8000);
408 tcg_gen_add_i32(t0, t0, t1);
409 tcg_gen_xor_i32(t0, t0, tmp);
410 tcg_temp_free_i32(tmp);
411 tcg_temp_free_i32(t1);
414 /* Set CF to the top bit of var. */
415 static void gen_set_CF_bit31(TCGv_i32 var)
417 tcg_gen_shri_i32(cpu_CF, var, 31);
420 /* Set N and Z flags from var. */
421 static inline void gen_logic_CC(TCGv_i32 var)
423 tcg_gen_mov_i32(cpu_NF, var);
424 tcg_gen_mov_i32(cpu_ZF, var);
427 /* T0 += T1 + CF. */
428 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
430 tcg_gen_add_i32(t0, t0, t1);
431 tcg_gen_add_i32(t0, t0, cpu_CF);
434 /* dest = T0 + T1 + CF. */
435 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
437 tcg_gen_add_i32(dest, t0, t1);
438 tcg_gen_add_i32(dest, dest, cpu_CF);
441 /* dest = T0 - T1 + CF - 1. */
442 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
444 tcg_gen_sub_i32(dest, t0, t1);
445 tcg_gen_add_i32(dest, dest, cpu_CF);
446 tcg_gen_subi_i32(dest, dest, 1);
449 /* dest = T0 + T1. Compute C, N, V and Z flags */
450 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
452 TCGv_i32 tmp = tcg_temp_new_i32();
453 tcg_gen_movi_i32(tmp, 0);
454 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
455 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
456 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
457 tcg_gen_xor_i32(tmp, t0, t1);
458 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
459 tcg_temp_free_i32(tmp);
460 tcg_gen_mov_i32(dest, cpu_NF);
463 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
464 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
466 TCGv_i32 tmp = tcg_temp_new_i32();
467 if (TCG_TARGET_HAS_add2_i32) {
468 tcg_gen_movi_i32(tmp, 0);
469 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
470 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
471 } else {
472 TCGv_i64 q0 = tcg_temp_new_i64();
473 TCGv_i64 q1 = tcg_temp_new_i64();
474 tcg_gen_extu_i32_i64(q0, t0);
475 tcg_gen_extu_i32_i64(q1, t1);
476 tcg_gen_add_i64(q0, q0, q1);
477 tcg_gen_extu_i32_i64(q1, cpu_CF);
478 tcg_gen_add_i64(q0, q0, q1);
479 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
480 tcg_temp_free_i64(q0);
481 tcg_temp_free_i64(q1);
483 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
484 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
485 tcg_gen_xor_i32(tmp, t0, t1);
486 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
487 tcg_temp_free_i32(tmp);
488 tcg_gen_mov_i32(dest, cpu_NF);
491 /* dest = T0 - T1. Compute C, N, V and Z flags */
492 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
494 TCGv_i32 tmp;
495 tcg_gen_sub_i32(cpu_NF, t0, t1);
496 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
497 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
498 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
499 tmp = tcg_temp_new_i32();
500 tcg_gen_xor_i32(tmp, t0, t1);
501 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
502 tcg_temp_free_i32(tmp);
503 tcg_gen_mov_i32(dest, cpu_NF);
506 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
507 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
509 TCGv_i32 tmp = tcg_temp_new_i32();
510 tcg_gen_not_i32(tmp, t1);
511 gen_adc_CC(dest, t0, tmp);
512 tcg_temp_free_i32(tmp);
515 #define GEN_SHIFT(name) \
516 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
518 TCGv_i32 tmp1, tmp2, tmp3; \
519 tmp1 = tcg_temp_new_i32(); \
520 tcg_gen_andi_i32(tmp1, t1, 0xff); \
521 tmp2 = tcg_const_i32(0); \
522 tmp3 = tcg_const_i32(0x1f); \
523 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
524 tcg_temp_free_i32(tmp3); \
525 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
526 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
527 tcg_temp_free_i32(tmp2); \
528 tcg_temp_free_i32(tmp1); \
530 GEN_SHIFT(shl)
531 GEN_SHIFT(shr)
532 #undef GEN_SHIFT
534 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
536 TCGv_i32 tmp1, tmp2;
537 tmp1 = tcg_temp_new_i32();
538 tcg_gen_andi_i32(tmp1, t1, 0xff);
539 tmp2 = tcg_const_i32(0x1f);
540 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
541 tcg_temp_free_i32(tmp2);
542 tcg_gen_sar_i32(dest, t0, tmp1);
543 tcg_temp_free_i32(tmp1);
546 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
548 TCGv_i32 c0 = tcg_const_i32(0);
549 TCGv_i32 tmp = tcg_temp_new_i32();
550 tcg_gen_neg_i32(tmp, src);
551 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
552 tcg_temp_free_i32(c0);
553 tcg_temp_free_i32(tmp);
556 static void shifter_out_im(TCGv_i32 var, int shift)
558 if (shift == 0) {
559 tcg_gen_andi_i32(cpu_CF, var, 1);
560 } else {
561 tcg_gen_shri_i32(cpu_CF, var, shift);
562 if (shift != 31) {
563 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
568 /* Shift by immediate. Includes special handling for shift == 0. */
569 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
570 int shift, int flags)
572 switch (shiftop) {
573 case 0: /* LSL */
574 if (shift != 0) {
575 if (flags)
576 shifter_out_im(var, 32 - shift);
577 tcg_gen_shli_i32(var, var, shift);
579 break;
580 case 1: /* LSR */
581 if (shift == 0) {
582 if (flags) {
583 tcg_gen_shri_i32(cpu_CF, var, 31);
585 tcg_gen_movi_i32(var, 0);
586 } else {
587 if (flags)
588 shifter_out_im(var, shift - 1);
589 tcg_gen_shri_i32(var, var, shift);
591 break;
592 case 2: /* ASR */
593 if (shift == 0)
594 shift = 32;
595 if (flags)
596 shifter_out_im(var, shift - 1);
597 if (shift == 32)
598 shift = 31;
599 tcg_gen_sari_i32(var, var, shift);
600 break;
601 case 3: /* ROR/RRX */
602 if (shift != 0) {
603 if (flags)
604 shifter_out_im(var, shift - 1);
605 tcg_gen_rotri_i32(var, var, shift); break;
606 } else {
607 TCGv_i32 tmp = tcg_temp_new_i32();
608 tcg_gen_shli_i32(tmp, cpu_CF, 31);
609 if (flags)
610 shifter_out_im(var, 0);
611 tcg_gen_shri_i32(var, var, 1);
612 tcg_gen_or_i32(var, var, tmp);
613 tcg_temp_free_i32(tmp);
618 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
619 TCGv_i32 shift, int flags)
621 if (flags) {
622 switch (shiftop) {
623 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
624 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
625 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
626 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
628 } else {
629 switch (shiftop) {
630 case 0:
631 gen_shl(var, var, shift);
632 break;
633 case 1:
634 gen_shr(var, var, shift);
635 break;
636 case 2:
637 gen_sar(var, var, shift);
638 break;
639 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
640 tcg_gen_rotr_i32(var, var, shift); break;
643 tcg_temp_free_i32(shift);
646 #define PAS_OP(pfx) \
647 switch (op2) { \
648 case 0: gen_pas_helper(glue(pfx,add16)); break; \
649 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
650 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
651 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
652 case 4: gen_pas_helper(glue(pfx,add8)); break; \
653 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
655 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
657 TCGv_ptr tmp;
659 switch (op1) {
660 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
661 case 1:
662 tmp = tcg_temp_new_ptr();
663 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
664 PAS_OP(s)
665 tcg_temp_free_ptr(tmp);
666 break;
667 case 5:
668 tmp = tcg_temp_new_ptr();
669 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
670 PAS_OP(u)
671 tcg_temp_free_ptr(tmp);
672 break;
673 #undef gen_pas_helper
674 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
675 case 2:
676 PAS_OP(q);
677 break;
678 case 3:
679 PAS_OP(sh);
680 break;
681 case 6:
682 PAS_OP(uq);
683 break;
684 case 7:
685 PAS_OP(uh);
686 break;
687 #undef gen_pas_helper
690 #undef PAS_OP
692 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
693 #define PAS_OP(pfx) \
694 switch (op1) { \
695 case 0: gen_pas_helper(glue(pfx,add8)); break; \
696 case 1: gen_pas_helper(glue(pfx,add16)); break; \
697 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
698 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
699 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
700 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
702 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
704 TCGv_ptr tmp;
706 switch (op2) {
707 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
708 case 0:
709 tmp = tcg_temp_new_ptr();
710 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
711 PAS_OP(s)
712 tcg_temp_free_ptr(tmp);
713 break;
714 case 4:
715 tmp = tcg_temp_new_ptr();
716 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
717 PAS_OP(u)
718 tcg_temp_free_ptr(tmp);
719 break;
720 #undef gen_pas_helper
721 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
722 case 1:
723 PAS_OP(q);
724 break;
725 case 2:
726 PAS_OP(sh);
727 break;
728 case 5:
729 PAS_OP(uq);
730 break;
731 case 6:
732 PAS_OP(uh);
733 break;
734 #undef gen_pas_helper
737 #undef PAS_OP
740 * Generate a conditional based on ARM condition code cc.
741 * This is common between ARM and Aarch64 targets.
743 void arm_test_cc(DisasCompare *cmp, int cc)
745 TCGv_i32 value;
746 TCGCond cond;
747 bool global = true;
749 switch (cc) {
750 case 0: /* eq: Z */
751 case 1: /* ne: !Z */
752 cond = TCG_COND_EQ;
753 value = cpu_ZF;
754 break;
756 case 2: /* cs: C */
757 case 3: /* cc: !C */
758 cond = TCG_COND_NE;
759 value = cpu_CF;
760 break;
762 case 4: /* mi: N */
763 case 5: /* pl: !N */
764 cond = TCG_COND_LT;
765 value = cpu_NF;
766 break;
768 case 6: /* vs: V */
769 case 7: /* vc: !V */
770 cond = TCG_COND_LT;
771 value = cpu_VF;
772 break;
774 case 8: /* hi: C && !Z */
775 case 9: /* ls: !C || Z -> !(C && !Z) */
776 cond = TCG_COND_NE;
777 value = tcg_temp_new_i32();
778 global = false;
779 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
780 ZF is non-zero for !Z; so AND the two subexpressions. */
781 tcg_gen_neg_i32(value, cpu_CF);
782 tcg_gen_and_i32(value, value, cpu_ZF);
783 break;
785 case 10: /* ge: N == V -> N ^ V == 0 */
786 case 11: /* lt: N != V -> N ^ V != 0 */
787 /* Since we're only interested in the sign bit, == 0 is >= 0. */
788 cond = TCG_COND_GE;
789 value = tcg_temp_new_i32();
790 global = false;
791 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
792 break;
794 case 12: /* gt: !Z && N == V */
795 case 13: /* le: Z || N != V */
796 cond = TCG_COND_NE;
797 value = tcg_temp_new_i32();
798 global = false;
799 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
800 * the sign bit then AND with ZF to yield the result. */
801 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
802 tcg_gen_sari_i32(value, value, 31);
803 tcg_gen_andc_i32(value, cpu_ZF, value);
804 break;
806 case 14: /* always */
807 case 15: /* always */
808 /* Use the ALWAYS condition, which will fold early.
809 * It doesn't matter what we use for the value. */
810 cond = TCG_COND_ALWAYS;
811 value = cpu_ZF;
812 goto no_invert;
814 default:
815 fprintf(stderr, "Bad condition code 0x%x\n", cc);
816 abort();
819 if (cc & 1) {
820 cond = tcg_invert_cond(cond);
823 no_invert:
824 cmp->cond = cond;
825 cmp->value = value;
826 cmp->value_global = global;
829 void arm_free_cc(DisasCompare *cmp)
831 if (!cmp->value_global) {
832 tcg_temp_free_i32(cmp->value);
836 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
838 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
841 void arm_gen_test_cc(int cc, TCGLabel *label)
843 DisasCompare cmp;
844 arm_test_cc(&cmp, cc);
845 arm_jump_cc(&cmp, label);
846 arm_free_cc(&cmp);
849 static const uint8_t table_logic_cc[16] = {
850 1, /* and */
851 1, /* xor */
852 0, /* sub */
853 0, /* rsb */
854 0, /* add */
855 0, /* adc */
856 0, /* sbc */
857 0, /* rsc */
858 1, /* andl */
859 1, /* xorl */
860 0, /* cmp */
861 0, /* cmn */
862 1, /* orr */
863 1, /* mov */
864 1, /* bic */
865 1, /* mvn */
868 /* Set PC and Thumb state from an immediate address. */
869 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
871 TCGv_i32 tmp;
873 s->is_jmp = DISAS_UPDATE;
874 if (s->thumb != (addr & 1)) {
875 tmp = tcg_temp_new_i32();
876 tcg_gen_movi_i32(tmp, addr & 1);
877 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
878 tcg_temp_free_i32(tmp);
880 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
883 /* Set PC and Thumb state from var. var is marked as dead. */
884 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
886 s->is_jmp = DISAS_UPDATE;
887 tcg_gen_andi_i32(cpu_R[15], var, ~1);
888 tcg_gen_andi_i32(var, var, 1);
889 store_cpu_field(var, thumb);
892 /* Variant of store_reg which uses branch&exchange logic when storing
893 to r15 in ARM architecture v7 and above. The source must be a temporary
894 and will be marked as dead. */
895 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
897 if (reg == 15 && ENABLE_ARCH_7) {
898 gen_bx(s, var);
899 } else {
900 store_reg(s, reg, var);
904 /* Variant of store_reg which uses branch&exchange logic when storing
905 * to r15 in ARM architecture v5T and above. This is used for storing
906 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
907 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
908 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
910 if (reg == 15 && ENABLE_ARCH_5) {
911 gen_bx(s, var);
912 } else {
913 store_reg(s, reg, var);
917 /* Abstractions of "generate code to do a guest load/store for
918 * AArch32", where a vaddr is always 32 bits (and is zero
919 * extended if we're a 64 bit core) and data is also
920 * 32 bits unless specifically doing a 64 bit access.
921 * These functions work like tcg_gen_qemu_{ld,st}* except
922 * that the address argument is TCGv_i32 rather than TCGv.
924 #if TARGET_LONG_BITS == 32
926 #define DO_GEN_LD(SUFF, OPC) \
927 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
929 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
932 #define DO_GEN_ST(SUFF, OPC) \
933 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
935 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
938 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
940 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
943 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
945 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
948 #else
950 #define DO_GEN_LD(SUFF, OPC) \
951 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
953 TCGv addr64 = tcg_temp_new(); \
954 tcg_gen_extu_i32_i64(addr64, addr); \
955 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
956 tcg_temp_free(addr64); \
959 #define DO_GEN_ST(SUFF, OPC) \
960 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
962 TCGv addr64 = tcg_temp_new(); \
963 tcg_gen_extu_i32_i64(addr64, addr); \
964 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
965 tcg_temp_free(addr64); \
968 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
970 TCGv addr64 = tcg_temp_new();
971 tcg_gen_extu_i32_i64(addr64, addr);
972 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
973 tcg_temp_free(addr64);
976 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
978 TCGv addr64 = tcg_temp_new();
979 tcg_gen_extu_i32_i64(addr64, addr);
980 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
981 tcg_temp_free(addr64);
984 #endif
986 DO_GEN_LD(8s, MO_SB)
987 DO_GEN_LD(8u, MO_UB)
988 DO_GEN_LD(16s, MO_TESW)
989 DO_GEN_LD(16u, MO_TEUW)
990 DO_GEN_LD(32u, MO_TEUL)
991 DO_GEN_ST(8, MO_UB)
992 DO_GEN_ST(16, MO_TEUW)
993 DO_GEN_ST(32, MO_TEUL)
995 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
997 tcg_gen_movi_i32(cpu_R[15], val);
1000 static inline void gen_hvc(DisasContext *s, int imm16)
1002 /* The pre HVC helper handles cases when HVC gets trapped
1003 * as an undefined insn by runtime configuration (ie before
1004 * the insn really executes).
1006 gen_set_pc_im(s, s->pc - 4);
1007 gen_helper_pre_hvc(cpu_env);
1008 /* Otherwise we will treat this as a real exception which
1009 * happens after execution of the insn. (The distinction matters
1010 * for the PC value reported to the exception handler and also
1011 * for single stepping.)
1013 s->svc_imm = imm16;
1014 gen_set_pc_im(s, s->pc);
1015 s->is_jmp = DISAS_HVC;
1018 static inline void gen_smc(DisasContext *s)
1020 /* As with HVC, we may take an exception either before or after
1021 * the insn executes.
1023 TCGv_i32 tmp;
1025 gen_set_pc_im(s, s->pc - 4);
1026 tmp = tcg_const_i32(syn_aa32_smc());
1027 gen_helper_pre_smc(cpu_env, tmp);
1028 tcg_temp_free_i32(tmp);
1029 gen_set_pc_im(s, s->pc);
1030 s->is_jmp = DISAS_SMC;
1033 static inline void
1034 gen_set_condexec (DisasContext *s)
1036 if (s->condexec_mask) {
1037 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1038 TCGv_i32 tmp = tcg_temp_new_i32();
1039 tcg_gen_movi_i32(tmp, val);
1040 store_cpu_field(tmp, condexec_bits);
1044 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1046 gen_set_condexec(s);
1047 gen_set_pc_im(s, s->pc - offset);
1048 gen_exception_internal(excp);
1049 s->is_jmp = DISAS_JUMP;
1052 static void gen_exception_insn(DisasContext *s, int offset, int excp,
1053 int syn, uint32_t target_el)
1055 gen_set_condexec(s);
1056 gen_set_pc_im(s, s->pc - offset);
1057 gen_exception(excp, syn, target_el);
1058 s->is_jmp = DISAS_JUMP;
1061 /* Force a TB lookup after an instruction that changes the CPU state. */
1062 static inline void gen_lookup_tb(DisasContext *s)
1064 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1065 s->is_jmp = DISAS_UPDATE;
1068 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1069 TCGv_i32 var)
1071 int val, rm, shift, shiftop;
1072 TCGv_i32 offset;
1074 if (!(insn & (1 << 25))) {
1075 /* immediate */
1076 val = insn & 0xfff;
1077 if (!(insn & (1 << 23)))
1078 val = -val;
1079 if (val != 0)
1080 tcg_gen_addi_i32(var, var, val);
1081 } else {
1082 /* shift/register */
1083 rm = (insn) & 0xf;
1084 shift = (insn >> 7) & 0x1f;
1085 shiftop = (insn >> 5) & 3;
1086 offset = load_reg(s, rm);
1087 gen_arm_shift_im(offset, shiftop, shift, 0);
1088 if (!(insn & (1 << 23)))
1089 tcg_gen_sub_i32(var, var, offset);
1090 else
1091 tcg_gen_add_i32(var, var, offset);
1092 tcg_temp_free_i32(offset);
1096 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1097 int extra, TCGv_i32 var)
1099 int val, rm;
1100 TCGv_i32 offset;
1102 if (insn & (1 << 22)) {
1103 /* immediate */
1104 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1105 if (!(insn & (1 << 23)))
1106 val = -val;
1107 val += extra;
1108 if (val != 0)
1109 tcg_gen_addi_i32(var, var, val);
1110 } else {
1111 /* register */
1112 if (extra)
1113 tcg_gen_addi_i32(var, var, extra);
1114 rm = (insn) & 0xf;
1115 offset = load_reg(s, rm);
1116 if (!(insn & (1 << 23)))
1117 tcg_gen_sub_i32(var, var, offset);
1118 else
1119 tcg_gen_add_i32(var, var, offset);
1120 tcg_temp_free_i32(offset);
1124 static TCGv_ptr get_fpstatus_ptr(int neon)
1126 TCGv_ptr statusptr = tcg_temp_new_ptr();
1127 int offset;
1128 if (neon) {
1129 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1130 } else {
1131 offset = offsetof(CPUARMState, vfp.fp_status);
1133 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1134 return statusptr;
1137 #define VFP_OP2(name) \
1138 static inline void gen_vfp_##name(int dp) \
1140 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1141 if (dp) { \
1142 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1143 } else { \
1144 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1146 tcg_temp_free_ptr(fpst); \
1149 VFP_OP2(add)
1150 VFP_OP2(sub)
1151 VFP_OP2(mul)
1152 VFP_OP2(div)
1154 #undef VFP_OP2
1156 static inline void gen_vfp_F1_mul(int dp)
1158 /* Like gen_vfp_mul() but put result in F1 */
1159 TCGv_ptr fpst = get_fpstatus_ptr(0);
1160 if (dp) {
1161 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1162 } else {
1163 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1165 tcg_temp_free_ptr(fpst);
1168 static inline void gen_vfp_F1_neg(int dp)
1170 /* Like gen_vfp_neg() but put result in F1 */
1171 if (dp) {
1172 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1173 } else {
1174 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1178 static inline void gen_vfp_abs(int dp)
1180 if (dp)
1181 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1182 else
1183 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1186 static inline void gen_vfp_neg(int dp)
1188 if (dp)
1189 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1190 else
1191 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1194 static inline void gen_vfp_sqrt(int dp)
1196 if (dp)
1197 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1198 else
1199 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1202 static inline void gen_vfp_cmp(int dp)
1204 if (dp)
1205 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1206 else
1207 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1210 static inline void gen_vfp_cmpe(int dp)
1212 if (dp)
1213 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1214 else
1215 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1218 static inline void gen_vfp_F1_ld0(int dp)
1220 if (dp)
1221 tcg_gen_movi_i64(cpu_F1d, 0);
1222 else
1223 tcg_gen_movi_i32(cpu_F1s, 0);
1226 #define VFP_GEN_ITOF(name) \
1227 static inline void gen_vfp_##name(int dp, int neon) \
1229 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1230 if (dp) { \
1231 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1232 } else { \
1233 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1235 tcg_temp_free_ptr(statusptr); \
1238 VFP_GEN_ITOF(uito)
1239 VFP_GEN_ITOF(sito)
1240 #undef VFP_GEN_ITOF
1242 #define VFP_GEN_FTOI(name) \
1243 static inline void gen_vfp_##name(int dp, int neon) \
1245 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1246 if (dp) { \
1247 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1248 } else { \
1249 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1251 tcg_temp_free_ptr(statusptr); \
1254 VFP_GEN_FTOI(toui)
1255 VFP_GEN_FTOI(touiz)
1256 VFP_GEN_FTOI(tosi)
1257 VFP_GEN_FTOI(tosiz)
1258 #undef VFP_GEN_FTOI
1260 #define VFP_GEN_FIX(name, round) \
1261 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1263 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1264 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1265 if (dp) { \
1266 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1267 statusptr); \
1268 } else { \
1269 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1270 statusptr); \
1272 tcg_temp_free_i32(tmp_shift); \
1273 tcg_temp_free_ptr(statusptr); \
1275 VFP_GEN_FIX(tosh, _round_to_zero)
1276 VFP_GEN_FIX(tosl, _round_to_zero)
1277 VFP_GEN_FIX(touh, _round_to_zero)
1278 VFP_GEN_FIX(toul, _round_to_zero)
1279 VFP_GEN_FIX(shto, )
1280 VFP_GEN_FIX(slto, )
1281 VFP_GEN_FIX(uhto, )
1282 VFP_GEN_FIX(ulto, )
1283 #undef VFP_GEN_FIX
1285 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1287 if (dp) {
1288 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
1289 } else {
1290 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
1294 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1296 if (dp) {
1297 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
1298 } else {
1299 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
1303 static inline long
1304 vfp_reg_offset (int dp, int reg)
1306 if (dp)
1307 return offsetof(CPUARMState, vfp.regs[reg]);
1308 else if (reg & 1) {
1309 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1310 + offsetof(CPU_DoubleU, l.upper);
1311 } else {
1312 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1313 + offsetof(CPU_DoubleU, l.lower);
1317 /* Return the offset of a 32-bit piece of a NEON register.
1318 zero is the least significant end of the register. */
1319 static inline long
1320 neon_reg_offset (int reg, int n)
1322 int sreg;
1323 sreg = reg * 2 + n;
1324 return vfp_reg_offset(0, sreg);
1327 static TCGv_i32 neon_load_reg(int reg, int pass)
1329 TCGv_i32 tmp = tcg_temp_new_i32();
1330 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1331 return tmp;
1334 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1336 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1337 tcg_temp_free_i32(var);
1340 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1342 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1345 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1347 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1350 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1351 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1352 #define tcg_gen_st_f32 tcg_gen_st_i32
1353 #define tcg_gen_st_f64 tcg_gen_st_i64
1355 static inline void gen_mov_F0_vreg(int dp, int reg)
1357 if (dp)
1358 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1359 else
1360 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1363 static inline void gen_mov_F1_vreg(int dp, int reg)
1365 if (dp)
1366 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1367 else
1368 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1371 static inline void gen_mov_vreg_F0(int dp, int reg)
1373 if (dp)
1374 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1375 else
1376 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1379 #define ARM_CP_RW_BIT (1 << 20)
1381 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1383 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1386 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1388 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1391 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1393 TCGv_i32 var = tcg_temp_new_i32();
1394 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1395 return var;
1398 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1400 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1401 tcg_temp_free_i32(var);
1404 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1406 iwmmxt_store_reg(cpu_M0, rn);
1409 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1411 iwmmxt_load_reg(cpu_M0, rn);
1414 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1416 iwmmxt_load_reg(cpu_V1, rn);
1417 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1420 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1422 iwmmxt_load_reg(cpu_V1, rn);
1423 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1426 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1428 iwmmxt_load_reg(cpu_V1, rn);
1429 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1432 #define IWMMXT_OP(name) \
1433 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1435 iwmmxt_load_reg(cpu_V1, rn); \
1436 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1439 #define IWMMXT_OP_ENV(name) \
1440 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1442 iwmmxt_load_reg(cpu_V1, rn); \
1443 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1446 #define IWMMXT_OP_ENV_SIZE(name) \
1447 IWMMXT_OP_ENV(name##b) \
1448 IWMMXT_OP_ENV(name##w) \
1449 IWMMXT_OP_ENV(name##l)
1451 #define IWMMXT_OP_ENV1(name) \
1452 static inline void gen_op_iwmmxt_##name##_M0(void) \
1454 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1457 IWMMXT_OP(maddsq)
1458 IWMMXT_OP(madduq)
1459 IWMMXT_OP(sadb)
1460 IWMMXT_OP(sadw)
1461 IWMMXT_OP(mulslw)
1462 IWMMXT_OP(mulshw)
1463 IWMMXT_OP(mululw)
1464 IWMMXT_OP(muluhw)
1465 IWMMXT_OP(macsw)
1466 IWMMXT_OP(macuw)
1468 IWMMXT_OP_ENV_SIZE(unpackl)
1469 IWMMXT_OP_ENV_SIZE(unpackh)
1471 IWMMXT_OP_ENV1(unpacklub)
1472 IWMMXT_OP_ENV1(unpackluw)
1473 IWMMXT_OP_ENV1(unpacklul)
1474 IWMMXT_OP_ENV1(unpackhub)
1475 IWMMXT_OP_ENV1(unpackhuw)
1476 IWMMXT_OP_ENV1(unpackhul)
1477 IWMMXT_OP_ENV1(unpacklsb)
1478 IWMMXT_OP_ENV1(unpacklsw)
1479 IWMMXT_OP_ENV1(unpacklsl)
1480 IWMMXT_OP_ENV1(unpackhsb)
1481 IWMMXT_OP_ENV1(unpackhsw)
1482 IWMMXT_OP_ENV1(unpackhsl)
1484 IWMMXT_OP_ENV_SIZE(cmpeq)
1485 IWMMXT_OP_ENV_SIZE(cmpgtu)
1486 IWMMXT_OP_ENV_SIZE(cmpgts)
1488 IWMMXT_OP_ENV_SIZE(mins)
1489 IWMMXT_OP_ENV_SIZE(minu)
1490 IWMMXT_OP_ENV_SIZE(maxs)
1491 IWMMXT_OP_ENV_SIZE(maxu)
1493 IWMMXT_OP_ENV_SIZE(subn)
1494 IWMMXT_OP_ENV_SIZE(addn)
1495 IWMMXT_OP_ENV_SIZE(subu)
1496 IWMMXT_OP_ENV_SIZE(addu)
1497 IWMMXT_OP_ENV_SIZE(subs)
1498 IWMMXT_OP_ENV_SIZE(adds)
1500 IWMMXT_OP_ENV(avgb0)
1501 IWMMXT_OP_ENV(avgb1)
1502 IWMMXT_OP_ENV(avgw0)
1503 IWMMXT_OP_ENV(avgw1)
1505 IWMMXT_OP_ENV(packuw)
1506 IWMMXT_OP_ENV(packul)
1507 IWMMXT_OP_ENV(packuq)
1508 IWMMXT_OP_ENV(packsw)
1509 IWMMXT_OP_ENV(packsl)
1510 IWMMXT_OP_ENV(packsq)
1512 static void gen_op_iwmmxt_set_mup(void)
1514 TCGv_i32 tmp;
1515 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1516 tcg_gen_ori_i32(tmp, tmp, 2);
1517 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1520 static void gen_op_iwmmxt_set_cup(void)
1522 TCGv_i32 tmp;
1523 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1524 tcg_gen_ori_i32(tmp, tmp, 1);
1525 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1528 static void gen_op_iwmmxt_setpsr_nz(void)
1530 TCGv_i32 tmp = tcg_temp_new_i32();
1531 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1532 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1535 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1537 iwmmxt_load_reg(cpu_V1, rn);
1538 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1539 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1542 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1543 TCGv_i32 dest)
1545 int rd;
1546 uint32_t offset;
1547 TCGv_i32 tmp;
1549 rd = (insn >> 16) & 0xf;
1550 tmp = load_reg(s, rd);
1552 offset = (insn & 0xff) << ((insn >> 7) & 2);
1553 if (insn & (1 << 24)) {
1554 /* Pre indexed */
1555 if (insn & (1 << 23))
1556 tcg_gen_addi_i32(tmp, tmp, offset);
1557 else
1558 tcg_gen_addi_i32(tmp, tmp, -offset);
1559 tcg_gen_mov_i32(dest, tmp);
1560 if (insn & (1 << 21))
1561 store_reg(s, rd, tmp);
1562 else
1563 tcg_temp_free_i32(tmp);
1564 } else if (insn & (1 << 21)) {
1565 /* Post indexed */
1566 tcg_gen_mov_i32(dest, tmp);
1567 if (insn & (1 << 23))
1568 tcg_gen_addi_i32(tmp, tmp, offset);
1569 else
1570 tcg_gen_addi_i32(tmp, tmp, -offset);
1571 store_reg(s, rd, tmp);
1572 } else if (!(insn & (1 << 23)))
1573 return 1;
1574 return 0;
1577 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1579 int rd = (insn >> 0) & 0xf;
1580 TCGv_i32 tmp;
1582 if (insn & (1 << 8)) {
1583 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1584 return 1;
1585 } else {
1586 tmp = iwmmxt_load_creg(rd);
1588 } else {
1589 tmp = tcg_temp_new_i32();
1590 iwmmxt_load_reg(cpu_V0, rd);
1591 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1593 tcg_gen_andi_i32(tmp, tmp, mask);
1594 tcg_gen_mov_i32(dest, tmp);
1595 tcg_temp_free_i32(tmp);
1596 return 0;
1599 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1600 (ie. an undefined instruction). */
1601 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1603 int rd, wrd;
1604 int rdhi, rdlo, rd0, rd1, i;
1605 TCGv_i32 addr;
1606 TCGv_i32 tmp, tmp2, tmp3;
1608 if ((insn & 0x0e000e00) == 0x0c000000) {
1609 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1610 wrd = insn & 0xf;
1611 rdlo = (insn >> 12) & 0xf;
1612 rdhi = (insn >> 16) & 0xf;
1613 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1614 iwmmxt_load_reg(cpu_V0, wrd);
1615 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1616 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1617 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1618 } else { /* TMCRR */
1619 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1620 iwmmxt_store_reg(cpu_V0, wrd);
1621 gen_op_iwmmxt_set_mup();
1623 return 0;
1626 wrd = (insn >> 12) & 0xf;
1627 addr = tcg_temp_new_i32();
1628 if (gen_iwmmxt_address(s, insn, addr)) {
1629 tcg_temp_free_i32(addr);
1630 return 1;
1632 if (insn & ARM_CP_RW_BIT) {
1633 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1634 tmp = tcg_temp_new_i32();
1635 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1636 iwmmxt_store_creg(wrd, tmp);
1637 } else {
1638 i = 1;
1639 if (insn & (1 << 8)) {
1640 if (insn & (1 << 22)) { /* WLDRD */
1641 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
1642 i = 0;
1643 } else { /* WLDRW wRd */
1644 tmp = tcg_temp_new_i32();
1645 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1647 } else {
1648 tmp = tcg_temp_new_i32();
1649 if (insn & (1 << 22)) { /* WLDRH */
1650 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
1651 } else { /* WLDRB */
1652 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
1655 if (i) {
1656 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1657 tcg_temp_free_i32(tmp);
1659 gen_op_iwmmxt_movq_wRn_M0(wrd);
1661 } else {
1662 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1663 tmp = iwmmxt_load_creg(wrd);
1664 gen_aa32_st32(tmp, addr, get_mem_index(s));
1665 } else {
1666 gen_op_iwmmxt_movq_M0_wRn(wrd);
1667 tmp = tcg_temp_new_i32();
1668 if (insn & (1 << 8)) {
1669 if (insn & (1 << 22)) { /* WSTRD */
1670 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
1671 } else { /* WSTRW wRd */
1672 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1673 gen_aa32_st32(tmp, addr, get_mem_index(s));
1675 } else {
1676 if (insn & (1 << 22)) { /* WSTRH */
1677 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1678 gen_aa32_st16(tmp, addr, get_mem_index(s));
1679 } else { /* WSTRB */
1680 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1681 gen_aa32_st8(tmp, addr, get_mem_index(s));
1685 tcg_temp_free_i32(tmp);
1687 tcg_temp_free_i32(addr);
1688 return 0;
1691 if ((insn & 0x0f000000) != 0x0e000000)
1692 return 1;
1694 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1695 case 0x000: /* WOR */
1696 wrd = (insn >> 12) & 0xf;
1697 rd0 = (insn >> 0) & 0xf;
1698 rd1 = (insn >> 16) & 0xf;
1699 gen_op_iwmmxt_movq_M0_wRn(rd0);
1700 gen_op_iwmmxt_orq_M0_wRn(rd1);
1701 gen_op_iwmmxt_setpsr_nz();
1702 gen_op_iwmmxt_movq_wRn_M0(wrd);
1703 gen_op_iwmmxt_set_mup();
1704 gen_op_iwmmxt_set_cup();
1705 break;
1706 case 0x011: /* TMCR */
1707 if (insn & 0xf)
1708 return 1;
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 switch (wrd) {
1712 case ARM_IWMMXT_wCID:
1713 case ARM_IWMMXT_wCASF:
1714 break;
1715 case ARM_IWMMXT_wCon:
1716 gen_op_iwmmxt_set_cup();
1717 /* Fall through. */
1718 case ARM_IWMMXT_wCSSF:
1719 tmp = iwmmxt_load_creg(wrd);
1720 tmp2 = load_reg(s, rd);
1721 tcg_gen_andc_i32(tmp, tmp, tmp2);
1722 tcg_temp_free_i32(tmp2);
1723 iwmmxt_store_creg(wrd, tmp);
1724 break;
1725 case ARM_IWMMXT_wCGR0:
1726 case ARM_IWMMXT_wCGR1:
1727 case ARM_IWMMXT_wCGR2:
1728 case ARM_IWMMXT_wCGR3:
1729 gen_op_iwmmxt_set_cup();
1730 tmp = load_reg(s, rd);
1731 iwmmxt_store_creg(wrd, tmp);
1732 break;
1733 default:
1734 return 1;
1736 break;
1737 case 0x100: /* WXOR */
1738 wrd = (insn >> 12) & 0xf;
1739 rd0 = (insn >> 0) & 0xf;
1740 rd1 = (insn >> 16) & 0xf;
1741 gen_op_iwmmxt_movq_M0_wRn(rd0);
1742 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1743 gen_op_iwmmxt_setpsr_nz();
1744 gen_op_iwmmxt_movq_wRn_M0(wrd);
1745 gen_op_iwmmxt_set_mup();
1746 gen_op_iwmmxt_set_cup();
1747 break;
1748 case 0x111: /* TMRC */
1749 if (insn & 0xf)
1750 return 1;
1751 rd = (insn >> 12) & 0xf;
1752 wrd = (insn >> 16) & 0xf;
1753 tmp = iwmmxt_load_creg(wrd);
1754 store_reg(s, rd, tmp);
1755 break;
1756 case 0x300: /* WANDN */
1757 wrd = (insn >> 12) & 0xf;
1758 rd0 = (insn >> 0) & 0xf;
1759 rd1 = (insn >> 16) & 0xf;
1760 gen_op_iwmmxt_movq_M0_wRn(rd0);
1761 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1762 gen_op_iwmmxt_andq_M0_wRn(rd1);
1763 gen_op_iwmmxt_setpsr_nz();
1764 gen_op_iwmmxt_movq_wRn_M0(wrd);
1765 gen_op_iwmmxt_set_mup();
1766 gen_op_iwmmxt_set_cup();
1767 break;
1768 case 0x200: /* WAND */
1769 wrd = (insn >> 12) & 0xf;
1770 rd0 = (insn >> 0) & 0xf;
1771 rd1 = (insn >> 16) & 0xf;
1772 gen_op_iwmmxt_movq_M0_wRn(rd0);
1773 gen_op_iwmmxt_andq_M0_wRn(rd1);
1774 gen_op_iwmmxt_setpsr_nz();
1775 gen_op_iwmmxt_movq_wRn_M0(wrd);
1776 gen_op_iwmmxt_set_mup();
1777 gen_op_iwmmxt_set_cup();
1778 break;
1779 case 0x810: case 0xa10: /* WMADD */
1780 wrd = (insn >> 12) & 0xf;
1781 rd0 = (insn >> 0) & 0xf;
1782 rd1 = (insn >> 16) & 0xf;
1783 gen_op_iwmmxt_movq_M0_wRn(rd0);
1784 if (insn & (1 << 21))
1785 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1786 else
1787 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1788 gen_op_iwmmxt_movq_wRn_M0(wrd);
1789 gen_op_iwmmxt_set_mup();
1790 break;
1791 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1792 wrd = (insn >> 12) & 0xf;
1793 rd0 = (insn >> 16) & 0xf;
1794 rd1 = (insn >> 0) & 0xf;
1795 gen_op_iwmmxt_movq_M0_wRn(rd0);
1796 switch ((insn >> 22) & 3) {
1797 case 0:
1798 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1799 break;
1800 case 1:
1801 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1802 break;
1803 case 2:
1804 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1805 break;
1806 case 3:
1807 return 1;
1809 gen_op_iwmmxt_movq_wRn_M0(wrd);
1810 gen_op_iwmmxt_set_mup();
1811 gen_op_iwmmxt_set_cup();
1812 break;
1813 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1814 wrd = (insn >> 12) & 0xf;
1815 rd0 = (insn >> 16) & 0xf;
1816 rd1 = (insn >> 0) & 0xf;
1817 gen_op_iwmmxt_movq_M0_wRn(rd0);
1818 switch ((insn >> 22) & 3) {
1819 case 0:
1820 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1821 break;
1822 case 1:
1823 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1824 break;
1825 case 2:
1826 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1827 break;
1828 case 3:
1829 return 1;
1831 gen_op_iwmmxt_movq_wRn_M0(wrd);
1832 gen_op_iwmmxt_set_mup();
1833 gen_op_iwmmxt_set_cup();
1834 break;
1835 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1836 wrd = (insn >> 12) & 0xf;
1837 rd0 = (insn >> 16) & 0xf;
1838 rd1 = (insn >> 0) & 0xf;
1839 gen_op_iwmmxt_movq_M0_wRn(rd0);
1840 if (insn & (1 << 22))
1841 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1842 else
1843 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1844 if (!(insn & (1 << 20)))
1845 gen_op_iwmmxt_addl_M0_wRn(wrd);
1846 gen_op_iwmmxt_movq_wRn_M0(wrd);
1847 gen_op_iwmmxt_set_mup();
1848 break;
1849 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1850 wrd = (insn >> 12) & 0xf;
1851 rd0 = (insn >> 16) & 0xf;
1852 rd1 = (insn >> 0) & 0xf;
1853 gen_op_iwmmxt_movq_M0_wRn(rd0);
1854 if (insn & (1 << 21)) {
1855 if (insn & (1 << 20))
1856 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1857 else
1858 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1859 } else {
1860 if (insn & (1 << 20))
1861 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1862 else
1863 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1865 gen_op_iwmmxt_movq_wRn_M0(wrd);
1866 gen_op_iwmmxt_set_mup();
1867 break;
1868 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1869 wrd = (insn >> 12) & 0xf;
1870 rd0 = (insn >> 16) & 0xf;
1871 rd1 = (insn >> 0) & 0xf;
1872 gen_op_iwmmxt_movq_M0_wRn(rd0);
1873 if (insn & (1 << 21))
1874 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1875 else
1876 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1877 if (!(insn & (1 << 20))) {
1878 iwmmxt_load_reg(cpu_V1, wrd);
1879 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1881 gen_op_iwmmxt_movq_wRn_M0(wrd);
1882 gen_op_iwmmxt_set_mup();
1883 break;
1884 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1885 wrd = (insn >> 12) & 0xf;
1886 rd0 = (insn >> 16) & 0xf;
1887 rd1 = (insn >> 0) & 0xf;
1888 gen_op_iwmmxt_movq_M0_wRn(rd0);
1889 switch ((insn >> 22) & 3) {
1890 case 0:
1891 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1892 break;
1893 case 1:
1894 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1895 break;
1896 case 2:
1897 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1898 break;
1899 case 3:
1900 return 1;
1902 gen_op_iwmmxt_movq_wRn_M0(wrd);
1903 gen_op_iwmmxt_set_mup();
1904 gen_op_iwmmxt_set_cup();
1905 break;
1906 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1907 wrd = (insn >> 12) & 0xf;
1908 rd0 = (insn >> 16) & 0xf;
1909 rd1 = (insn >> 0) & 0xf;
1910 gen_op_iwmmxt_movq_M0_wRn(rd0);
1911 if (insn & (1 << 22)) {
1912 if (insn & (1 << 20))
1913 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1914 else
1915 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1916 } else {
1917 if (insn & (1 << 20))
1918 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1919 else
1920 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1922 gen_op_iwmmxt_movq_wRn_M0(wrd);
1923 gen_op_iwmmxt_set_mup();
1924 gen_op_iwmmxt_set_cup();
1925 break;
1926 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1927 wrd = (insn >> 12) & 0xf;
1928 rd0 = (insn >> 16) & 0xf;
1929 rd1 = (insn >> 0) & 0xf;
1930 gen_op_iwmmxt_movq_M0_wRn(rd0);
1931 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1932 tcg_gen_andi_i32(tmp, tmp, 7);
1933 iwmmxt_load_reg(cpu_V1, rd1);
1934 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1935 tcg_temp_free_i32(tmp);
1936 gen_op_iwmmxt_movq_wRn_M0(wrd);
1937 gen_op_iwmmxt_set_mup();
1938 break;
1939 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1940 if (((insn >> 6) & 3) == 3)
1941 return 1;
1942 rd = (insn >> 12) & 0xf;
1943 wrd = (insn >> 16) & 0xf;
1944 tmp = load_reg(s, rd);
1945 gen_op_iwmmxt_movq_M0_wRn(wrd);
1946 switch ((insn >> 6) & 3) {
1947 case 0:
1948 tmp2 = tcg_const_i32(0xff);
1949 tmp3 = tcg_const_i32((insn & 7) << 3);
1950 break;
1951 case 1:
1952 tmp2 = tcg_const_i32(0xffff);
1953 tmp3 = tcg_const_i32((insn & 3) << 4);
1954 break;
1955 case 2:
1956 tmp2 = tcg_const_i32(0xffffffff);
1957 tmp3 = tcg_const_i32((insn & 1) << 5);
1958 break;
1959 default:
1960 TCGV_UNUSED_I32(tmp2);
1961 TCGV_UNUSED_I32(tmp3);
1963 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1964 tcg_temp_free_i32(tmp3);
1965 tcg_temp_free_i32(tmp2);
1966 tcg_temp_free_i32(tmp);
1967 gen_op_iwmmxt_movq_wRn_M0(wrd);
1968 gen_op_iwmmxt_set_mup();
1969 break;
1970 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1971 rd = (insn >> 12) & 0xf;
1972 wrd = (insn >> 16) & 0xf;
1973 if (rd == 15 || ((insn >> 22) & 3) == 3)
1974 return 1;
1975 gen_op_iwmmxt_movq_M0_wRn(wrd);
1976 tmp = tcg_temp_new_i32();
1977 switch ((insn >> 22) & 3) {
1978 case 0:
1979 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1980 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1981 if (insn & 8) {
1982 tcg_gen_ext8s_i32(tmp, tmp);
1983 } else {
1984 tcg_gen_andi_i32(tmp, tmp, 0xff);
1986 break;
1987 case 1:
1988 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1989 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1990 if (insn & 8) {
1991 tcg_gen_ext16s_i32(tmp, tmp);
1992 } else {
1993 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1995 break;
1996 case 2:
1997 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1998 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1999 break;
2001 store_reg(s, rd, tmp);
2002 break;
2003 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2004 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2005 return 1;
2006 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2007 switch ((insn >> 22) & 3) {
2008 case 0:
2009 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2010 break;
2011 case 1:
2012 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2013 break;
2014 case 2:
2015 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2016 break;
2018 tcg_gen_shli_i32(tmp, tmp, 28);
2019 gen_set_nzcv(tmp);
2020 tcg_temp_free_i32(tmp);
2021 break;
2022 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2023 if (((insn >> 6) & 3) == 3)
2024 return 1;
2025 rd = (insn >> 12) & 0xf;
2026 wrd = (insn >> 16) & 0xf;
2027 tmp = load_reg(s, rd);
2028 switch ((insn >> 6) & 3) {
2029 case 0:
2030 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2031 break;
2032 case 1:
2033 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2034 break;
2035 case 2:
2036 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2037 break;
2039 tcg_temp_free_i32(tmp);
2040 gen_op_iwmmxt_movq_wRn_M0(wrd);
2041 gen_op_iwmmxt_set_mup();
2042 break;
2043 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2044 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2045 return 1;
2046 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2047 tmp2 = tcg_temp_new_i32();
2048 tcg_gen_mov_i32(tmp2, tmp);
2049 switch ((insn >> 22) & 3) {
2050 case 0:
2051 for (i = 0; i < 7; i ++) {
2052 tcg_gen_shli_i32(tmp2, tmp2, 4);
2053 tcg_gen_and_i32(tmp, tmp, tmp2);
2055 break;
2056 case 1:
2057 for (i = 0; i < 3; i ++) {
2058 tcg_gen_shli_i32(tmp2, tmp2, 8);
2059 tcg_gen_and_i32(tmp, tmp, tmp2);
2061 break;
2062 case 2:
2063 tcg_gen_shli_i32(tmp2, tmp2, 16);
2064 tcg_gen_and_i32(tmp, tmp, tmp2);
2065 break;
2067 gen_set_nzcv(tmp);
2068 tcg_temp_free_i32(tmp2);
2069 tcg_temp_free_i32(tmp);
2070 break;
2071 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2072 wrd = (insn >> 12) & 0xf;
2073 rd0 = (insn >> 16) & 0xf;
2074 gen_op_iwmmxt_movq_M0_wRn(rd0);
2075 switch ((insn >> 22) & 3) {
2076 case 0:
2077 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2078 break;
2079 case 1:
2080 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2081 break;
2082 case 2:
2083 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2084 break;
2085 case 3:
2086 return 1;
2088 gen_op_iwmmxt_movq_wRn_M0(wrd);
2089 gen_op_iwmmxt_set_mup();
2090 break;
2091 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2092 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2093 return 1;
2094 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2095 tmp2 = tcg_temp_new_i32();
2096 tcg_gen_mov_i32(tmp2, tmp);
2097 switch ((insn >> 22) & 3) {
2098 case 0:
2099 for (i = 0; i < 7; i ++) {
2100 tcg_gen_shli_i32(tmp2, tmp2, 4);
2101 tcg_gen_or_i32(tmp, tmp, tmp2);
2103 break;
2104 case 1:
2105 for (i = 0; i < 3; i ++) {
2106 tcg_gen_shli_i32(tmp2, tmp2, 8);
2107 tcg_gen_or_i32(tmp, tmp, tmp2);
2109 break;
2110 case 2:
2111 tcg_gen_shli_i32(tmp2, tmp2, 16);
2112 tcg_gen_or_i32(tmp, tmp, tmp2);
2113 break;
2115 gen_set_nzcv(tmp);
2116 tcg_temp_free_i32(tmp2);
2117 tcg_temp_free_i32(tmp);
2118 break;
2119 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2120 rd = (insn >> 12) & 0xf;
2121 rd0 = (insn >> 16) & 0xf;
2122 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2123 return 1;
2124 gen_op_iwmmxt_movq_M0_wRn(rd0);
2125 tmp = tcg_temp_new_i32();
2126 switch ((insn >> 22) & 3) {
2127 case 0:
2128 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2129 break;
2130 case 1:
2131 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2132 break;
2133 case 2:
2134 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2135 break;
2137 store_reg(s, rd, tmp);
2138 break;
2139 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2140 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2141 wrd = (insn >> 12) & 0xf;
2142 rd0 = (insn >> 16) & 0xf;
2143 rd1 = (insn >> 0) & 0xf;
2144 gen_op_iwmmxt_movq_M0_wRn(rd0);
2145 switch ((insn >> 22) & 3) {
2146 case 0:
2147 if (insn & (1 << 21))
2148 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2149 else
2150 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2151 break;
2152 case 1:
2153 if (insn & (1 << 21))
2154 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2155 else
2156 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2157 break;
2158 case 2:
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2161 else
2162 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2163 break;
2164 case 3:
2165 return 1;
2167 gen_op_iwmmxt_movq_wRn_M0(wrd);
2168 gen_op_iwmmxt_set_mup();
2169 gen_op_iwmmxt_set_cup();
2170 break;
2171 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2172 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2173 wrd = (insn >> 12) & 0xf;
2174 rd0 = (insn >> 16) & 0xf;
2175 gen_op_iwmmxt_movq_M0_wRn(rd0);
2176 switch ((insn >> 22) & 3) {
2177 case 0:
2178 if (insn & (1 << 21))
2179 gen_op_iwmmxt_unpacklsb_M0();
2180 else
2181 gen_op_iwmmxt_unpacklub_M0();
2182 break;
2183 case 1:
2184 if (insn & (1 << 21))
2185 gen_op_iwmmxt_unpacklsw_M0();
2186 else
2187 gen_op_iwmmxt_unpackluw_M0();
2188 break;
2189 case 2:
2190 if (insn & (1 << 21))
2191 gen_op_iwmmxt_unpacklsl_M0();
2192 else
2193 gen_op_iwmmxt_unpacklul_M0();
2194 break;
2195 case 3:
2196 return 1;
2198 gen_op_iwmmxt_movq_wRn_M0(wrd);
2199 gen_op_iwmmxt_set_mup();
2200 gen_op_iwmmxt_set_cup();
2201 break;
2202 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2203 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2204 wrd = (insn >> 12) & 0xf;
2205 rd0 = (insn >> 16) & 0xf;
2206 gen_op_iwmmxt_movq_M0_wRn(rd0);
2207 switch ((insn >> 22) & 3) {
2208 case 0:
2209 if (insn & (1 << 21))
2210 gen_op_iwmmxt_unpackhsb_M0();
2211 else
2212 gen_op_iwmmxt_unpackhub_M0();
2213 break;
2214 case 1:
2215 if (insn & (1 << 21))
2216 gen_op_iwmmxt_unpackhsw_M0();
2217 else
2218 gen_op_iwmmxt_unpackhuw_M0();
2219 break;
2220 case 2:
2221 if (insn & (1 << 21))
2222 gen_op_iwmmxt_unpackhsl_M0();
2223 else
2224 gen_op_iwmmxt_unpackhul_M0();
2225 break;
2226 case 3:
2227 return 1;
2229 gen_op_iwmmxt_movq_wRn_M0(wrd);
2230 gen_op_iwmmxt_set_mup();
2231 gen_op_iwmmxt_set_cup();
2232 break;
2233 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2234 case 0x214: case 0x614: case 0xa14: case 0xe14:
2235 if (((insn >> 22) & 3) == 0)
2236 return 1;
2237 wrd = (insn >> 12) & 0xf;
2238 rd0 = (insn >> 16) & 0xf;
2239 gen_op_iwmmxt_movq_M0_wRn(rd0);
2240 tmp = tcg_temp_new_i32();
2241 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2242 tcg_temp_free_i32(tmp);
2243 return 1;
2245 switch ((insn >> 22) & 3) {
2246 case 1:
2247 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2248 break;
2249 case 2:
2250 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2251 break;
2252 case 3:
2253 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2254 break;
2256 tcg_temp_free_i32(tmp);
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2260 break;
2261 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2262 case 0x014: case 0x414: case 0x814: case 0xc14:
2263 if (((insn >> 22) & 3) == 0)
2264 return 1;
2265 wrd = (insn >> 12) & 0xf;
2266 rd0 = (insn >> 16) & 0xf;
2267 gen_op_iwmmxt_movq_M0_wRn(rd0);
2268 tmp = tcg_temp_new_i32();
2269 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2270 tcg_temp_free_i32(tmp);
2271 return 1;
2273 switch ((insn >> 22) & 3) {
2274 case 1:
2275 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2276 break;
2277 case 2:
2278 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2279 break;
2280 case 3:
2281 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2282 break;
2284 tcg_temp_free_i32(tmp);
2285 gen_op_iwmmxt_movq_wRn_M0(wrd);
2286 gen_op_iwmmxt_set_mup();
2287 gen_op_iwmmxt_set_cup();
2288 break;
2289 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2290 case 0x114: case 0x514: case 0x914: case 0xd14:
2291 if (((insn >> 22) & 3) == 0)
2292 return 1;
2293 wrd = (insn >> 12) & 0xf;
2294 rd0 = (insn >> 16) & 0xf;
2295 gen_op_iwmmxt_movq_M0_wRn(rd0);
2296 tmp = tcg_temp_new_i32();
2297 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2298 tcg_temp_free_i32(tmp);
2299 return 1;
2301 switch ((insn >> 22) & 3) {
2302 case 1:
2303 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2304 break;
2305 case 2:
2306 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2307 break;
2308 case 3:
2309 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2310 break;
2312 tcg_temp_free_i32(tmp);
2313 gen_op_iwmmxt_movq_wRn_M0(wrd);
2314 gen_op_iwmmxt_set_mup();
2315 gen_op_iwmmxt_set_cup();
2316 break;
2317 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2318 case 0x314: case 0x714: case 0xb14: case 0xf14:
2319 if (((insn >> 22) & 3) == 0)
2320 return 1;
2321 wrd = (insn >> 12) & 0xf;
2322 rd0 = (insn >> 16) & 0xf;
2323 gen_op_iwmmxt_movq_M0_wRn(rd0);
2324 tmp = tcg_temp_new_i32();
2325 switch ((insn >> 22) & 3) {
2326 case 1:
2327 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2328 tcg_temp_free_i32(tmp);
2329 return 1;
2331 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2332 break;
2333 case 2:
2334 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2335 tcg_temp_free_i32(tmp);
2336 return 1;
2338 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2339 break;
2340 case 3:
2341 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2342 tcg_temp_free_i32(tmp);
2343 return 1;
2345 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2346 break;
2348 tcg_temp_free_i32(tmp);
2349 gen_op_iwmmxt_movq_wRn_M0(wrd);
2350 gen_op_iwmmxt_set_mup();
2351 gen_op_iwmmxt_set_cup();
2352 break;
2353 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2354 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2355 wrd = (insn >> 12) & 0xf;
2356 rd0 = (insn >> 16) & 0xf;
2357 rd1 = (insn >> 0) & 0xf;
2358 gen_op_iwmmxt_movq_M0_wRn(rd0);
2359 switch ((insn >> 22) & 3) {
2360 case 0:
2361 if (insn & (1 << 21))
2362 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2363 else
2364 gen_op_iwmmxt_minub_M0_wRn(rd1);
2365 break;
2366 case 1:
2367 if (insn & (1 << 21))
2368 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2369 else
2370 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2371 break;
2372 case 2:
2373 if (insn & (1 << 21))
2374 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2375 else
2376 gen_op_iwmmxt_minul_M0_wRn(rd1);
2377 break;
2378 case 3:
2379 return 1;
2381 gen_op_iwmmxt_movq_wRn_M0(wrd);
2382 gen_op_iwmmxt_set_mup();
2383 break;
2384 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2385 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2386 wrd = (insn >> 12) & 0xf;
2387 rd0 = (insn >> 16) & 0xf;
2388 rd1 = (insn >> 0) & 0xf;
2389 gen_op_iwmmxt_movq_M0_wRn(rd0);
2390 switch ((insn >> 22) & 3) {
2391 case 0:
2392 if (insn & (1 << 21))
2393 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2394 else
2395 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2396 break;
2397 case 1:
2398 if (insn & (1 << 21))
2399 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2400 else
2401 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2402 break;
2403 case 2:
2404 if (insn & (1 << 21))
2405 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2406 else
2407 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2408 break;
2409 case 3:
2410 return 1;
2412 gen_op_iwmmxt_movq_wRn_M0(wrd);
2413 gen_op_iwmmxt_set_mup();
2414 break;
2415 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2416 case 0x402: case 0x502: case 0x602: case 0x702:
2417 wrd = (insn >> 12) & 0xf;
2418 rd0 = (insn >> 16) & 0xf;
2419 rd1 = (insn >> 0) & 0xf;
2420 gen_op_iwmmxt_movq_M0_wRn(rd0);
2421 tmp = tcg_const_i32((insn >> 20) & 3);
2422 iwmmxt_load_reg(cpu_V1, rd1);
2423 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2424 tcg_temp_free_i32(tmp);
2425 gen_op_iwmmxt_movq_wRn_M0(wrd);
2426 gen_op_iwmmxt_set_mup();
2427 break;
2428 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2429 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2430 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2431 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2432 wrd = (insn >> 12) & 0xf;
2433 rd0 = (insn >> 16) & 0xf;
2434 rd1 = (insn >> 0) & 0xf;
2435 gen_op_iwmmxt_movq_M0_wRn(rd0);
2436 switch ((insn >> 20) & 0xf) {
2437 case 0x0:
2438 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2439 break;
2440 case 0x1:
2441 gen_op_iwmmxt_subub_M0_wRn(rd1);
2442 break;
2443 case 0x3:
2444 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2445 break;
2446 case 0x4:
2447 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2448 break;
2449 case 0x5:
2450 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2451 break;
2452 case 0x7:
2453 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2454 break;
2455 case 0x8:
2456 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2457 break;
2458 case 0x9:
2459 gen_op_iwmmxt_subul_M0_wRn(rd1);
2460 break;
2461 case 0xb:
2462 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2463 break;
2464 default:
2465 return 1;
2467 gen_op_iwmmxt_movq_wRn_M0(wrd);
2468 gen_op_iwmmxt_set_mup();
2469 gen_op_iwmmxt_set_cup();
2470 break;
2471 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2472 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2473 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2474 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2475 wrd = (insn >> 12) & 0xf;
2476 rd0 = (insn >> 16) & 0xf;
2477 gen_op_iwmmxt_movq_M0_wRn(rd0);
2478 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2479 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2480 tcg_temp_free_i32(tmp);
2481 gen_op_iwmmxt_movq_wRn_M0(wrd);
2482 gen_op_iwmmxt_set_mup();
2483 gen_op_iwmmxt_set_cup();
2484 break;
2485 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2486 case 0x418: case 0x518: case 0x618: case 0x718:
2487 case 0x818: case 0x918: case 0xa18: case 0xb18:
2488 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2489 wrd = (insn >> 12) & 0xf;
2490 rd0 = (insn >> 16) & 0xf;
2491 rd1 = (insn >> 0) & 0xf;
2492 gen_op_iwmmxt_movq_M0_wRn(rd0);
2493 switch ((insn >> 20) & 0xf) {
2494 case 0x0:
2495 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2496 break;
2497 case 0x1:
2498 gen_op_iwmmxt_addub_M0_wRn(rd1);
2499 break;
2500 case 0x3:
2501 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2502 break;
2503 case 0x4:
2504 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2505 break;
2506 case 0x5:
2507 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2508 break;
2509 case 0x7:
2510 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2511 break;
2512 case 0x8:
2513 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2514 break;
2515 case 0x9:
2516 gen_op_iwmmxt_addul_M0_wRn(rd1);
2517 break;
2518 case 0xb:
2519 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2520 break;
2521 default:
2522 return 1;
2524 gen_op_iwmmxt_movq_wRn_M0(wrd);
2525 gen_op_iwmmxt_set_mup();
2526 gen_op_iwmmxt_set_cup();
2527 break;
2528 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2529 case 0x408: case 0x508: case 0x608: case 0x708:
2530 case 0x808: case 0x908: case 0xa08: case 0xb08:
2531 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2532 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2533 return 1;
2534 wrd = (insn >> 12) & 0xf;
2535 rd0 = (insn >> 16) & 0xf;
2536 rd1 = (insn >> 0) & 0xf;
2537 gen_op_iwmmxt_movq_M0_wRn(rd0);
2538 switch ((insn >> 22) & 3) {
2539 case 1:
2540 if (insn & (1 << 21))
2541 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2542 else
2543 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2544 break;
2545 case 2:
2546 if (insn & (1 << 21))
2547 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2548 else
2549 gen_op_iwmmxt_packul_M0_wRn(rd1);
2550 break;
2551 case 3:
2552 if (insn & (1 << 21))
2553 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2554 else
2555 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2556 break;
2558 gen_op_iwmmxt_movq_wRn_M0(wrd);
2559 gen_op_iwmmxt_set_mup();
2560 gen_op_iwmmxt_set_cup();
2561 break;
2562 case 0x201: case 0x203: case 0x205: case 0x207:
2563 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2564 case 0x211: case 0x213: case 0x215: case 0x217:
2565 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2566 wrd = (insn >> 5) & 0xf;
2567 rd0 = (insn >> 12) & 0xf;
2568 rd1 = (insn >> 0) & 0xf;
2569 if (rd0 == 0xf || rd1 == 0xf)
2570 return 1;
2571 gen_op_iwmmxt_movq_M0_wRn(wrd);
2572 tmp = load_reg(s, rd0);
2573 tmp2 = load_reg(s, rd1);
2574 switch ((insn >> 16) & 0xf) {
2575 case 0x0: /* TMIA */
2576 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2577 break;
2578 case 0x8: /* TMIAPH */
2579 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2580 break;
2581 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2582 if (insn & (1 << 16))
2583 tcg_gen_shri_i32(tmp, tmp, 16);
2584 if (insn & (1 << 17))
2585 tcg_gen_shri_i32(tmp2, tmp2, 16);
2586 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2587 break;
2588 default:
2589 tcg_temp_free_i32(tmp2);
2590 tcg_temp_free_i32(tmp);
2591 return 1;
2593 tcg_temp_free_i32(tmp2);
2594 tcg_temp_free_i32(tmp);
2595 gen_op_iwmmxt_movq_wRn_M0(wrd);
2596 gen_op_iwmmxt_set_mup();
2597 break;
2598 default:
2599 return 1;
2602 return 0;
2605 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2606 (ie. an undefined instruction). */
2607 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2609 int acc, rd0, rd1, rdhi, rdlo;
2610 TCGv_i32 tmp, tmp2;
2612 if ((insn & 0x0ff00f10) == 0x0e200010) {
2613 /* Multiply with Internal Accumulate Format */
2614 rd0 = (insn >> 12) & 0xf;
2615 rd1 = insn & 0xf;
2616 acc = (insn >> 5) & 7;
2618 if (acc != 0)
2619 return 1;
2621 tmp = load_reg(s, rd0);
2622 tmp2 = load_reg(s, rd1);
2623 switch ((insn >> 16) & 0xf) {
2624 case 0x0: /* MIA */
2625 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2626 break;
2627 case 0x8: /* MIAPH */
2628 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2629 break;
2630 case 0xc: /* MIABB */
2631 case 0xd: /* MIABT */
2632 case 0xe: /* MIATB */
2633 case 0xf: /* MIATT */
2634 if (insn & (1 << 16))
2635 tcg_gen_shri_i32(tmp, tmp, 16);
2636 if (insn & (1 << 17))
2637 tcg_gen_shri_i32(tmp2, tmp2, 16);
2638 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2639 break;
2640 default:
2641 return 1;
2643 tcg_temp_free_i32(tmp2);
2644 tcg_temp_free_i32(tmp);
2646 gen_op_iwmmxt_movq_wRn_M0(acc);
2647 return 0;
2650 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2651 /* Internal Accumulator Access Format */
2652 rdhi = (insn >> 16) & 0xf;
2653 rdlo = (insn >> 12) & 0xf;
2654 acc = insn & 7;
2656 if (acc != 0)
2657 return 1;
2659 if (insn & ARM_CP_RW_BIT) { /* MRA */
2660 iwmmxt_load_reg(cpu_V0, acc);
2661 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2662 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2663 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2664 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2665 } else { /* MAR */
2666 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2667 iwmmxt_store_reg(cpu_V0, acc);
2669 return 0;
2672 return 1;
2675 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2676 #define VFP_SREG(insn, bigbit, smallbit) \
2677 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2678 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2679 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2680 reg = (((insn) >> (bigbit)) & 0x0f) \
2681 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2682 } else { \
2683 if (insn & (1 << (smallbit))) \
2684 return 1; \
2685 reg = ((insn) >> (bigbit)) & 0x0f; \
2686 }} while (0)
2688 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2689 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2690 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2691 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2692 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2693 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2695 /* Move between integer and VFP cores. */
2696 static TCGv_i32 gen_vfp_mrs(void)
2698 TCGv_i32 tmp = tcg_temp_new_i32();
2699 tcg_gen_mov_i32(tmp, cpu_F0s);
2700 return tmp;
2703 static void gen_vfp_msr(TCGv_i32 tmp)
2705 tcg_gen_mov_i32(cpu_F0s, tmp);
2706 tcg_temp_free_i32(tmp);
2709 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2711 TCGv_i32 tmp = tcg_temp_new_i32();
2712 if (shift)
2713 tcg_gen_shri_i32(var, var, shift);
2714 tcg_gen_ext8u_i32(var, var);
2715 tcg_gen_shli_i32(tmp, var, 8);
2716 tcg_gen_or_i32(var, var, tmp);
2717 tcg_gen_shli_i32(tmp, var, 16);
2718 tcg_gen_or_i32(var, var, tmp);
2719 tcg_temp_free_i32(tmp);
2722 static void gen_neon_dup_low16(TCGv_i32 var)
2724 TCGv_i32 tmp = tcg_temp_new_i32();
2725 tcg_gen_ext16u_i32(var, var);
2726 tcg_gen_shli_i32(tmp, var, 16);
2727 tcg_gen_or_i32(var, var, tmp);
2728 tcg_temp_free_i32(tmp);
2731 static void gen_neon_dup_high16(TCGv_i32 var)
2733 TCGv_i32 tmp = tcg_temp_new_i32();
2734 tcg_gen_andi_i32(var, var, 0xffff0000);
2735 tcg_gen_shri_i32(tmp, var, 16);
2736 tcg_gen_or_i32(var, var, tmp);
2737 tcg_temp_free_i32(tmp);
2740 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2742 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2743 TCGv_i32 tmp = tcg_temp_new_i32();
2744 switch (size) {
2745 case 0:
2746 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2747 gen_neon_dup_u8(tmp, 0);
2748 break;
2749 case 1:
2750 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2751 gen_neon_dup_low16(tmp);
2752 break;
2753 case 2:
2754 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2755 break;
2756 default: /* Avoid compiler warnings. */
2757 abort();
2759 return tmp;
2762 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2763 uint32_t dp)
2765 uint32_t cc = extract32(insn, 20, 2);
2767 if (dp) {
2768 TCGv_i64 frn, frm, dest;
2769 TCGv_i64 tmp, zero, zf, nf, vf;
2771 zero = tcg_const_i64(0);
2773 frn = tcg_temp_new_i64();
2774 frm = tcg_temp_new_i64();
2775 dest = tcg_temp_new_i64();
2777 zf = tcg_temp_new_i64();
2778 nf = tcg_temp_new_i64();
2779 vf = tcg_temp_new_i64();
2781 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2782 tcg_gen_ext_i32_i64(nf, cpu_NF);
2783 tcg_gen_ext_i32_i64(vf, cpu_VF);
2785 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2786 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2787 switch (cc) {
2788 case 0: /* eq: Z */
2789 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2790 frn, frm);
2791 break;
2792 case 1: /* vs: V */
2793 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2794 frn, frm);
2795 break;
2796 case 2: /* ge: N == V -> N ^ V == 0 */
2797 tmp = tcg_temp_new_i64();
2798 tcg_gen_xor_i64(tmp, vf, nf);
2799 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2800 frn, frm);
2801 tcg_temp_free_i64(tmp);
2802 break;
2803 case 3: /* gt: !Z && N == V */
2804 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2805 frn, frm);
2806 tmp = tcg_temp_new_i64();
2807 tcg_gen_xor_i64(tmp, vf, nf);
2808 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2809 dest, frm);
2810 tcg_temp_free_i64(tmp);
2811 break;
2813 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2814 tcg_temp_free_i64(frn);
2815 tcg_temp_free_i64(frm);
2816 tcg_temp_free_i64(dest);
2818 tcg_temp_free_i64(zf);
2819 tcg_temp_free_i64(nf);
2820 tcg_temp_free_i64(vf);
2822 tcg_temp_free_i64(zero);
2823 } else {
2824 TCGv_i32 frn, frm, dest;
2825 TCGv_i32 tmp, zero;
2827 zero = tcg_const_i32(0);
2829 frn = tcg_temp_new_i32();
2830 frm = tcg_temp_new_i32();
2831 dest = tcg_temp_new_i32();
2832 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2833 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2834 switch (cc) {
2835 case 0: /* eq: Z */
2836 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2837 frn, frm);
2838 break;
2839 case 1: /* vs: V */
2840 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2841 frn, frm);
2842 break;
2843 case 2: /* ge: N == V -> N ^ V == 0 */
2844 tmp = tcg_temp_new_i32();
2845 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2846 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2847 frn, frm);
2848 tcg_temp_free_i32(tmp);
2849 break;
2850 case 3: /* gt: !Z && N == V */
2851 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2852 frn, frm);
2853 tmp = tcg_temp_new_i32();
2854 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2855 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2856 dest, frm);
2857 tcg_temp_free_i32(tmp);
2858 break;
2860 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2861 tcg_temp_free_i32(frn);
2862 tcg_temp_free_i32(frm);
2863 tcg_temp_free_i32(dest);
2865 tcg_temp_free_i32(zero);
2868 return 0;
2871 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2872 uint32_t rm, uint32_t dp)
2874 uint32_t vmin = extract32(insn, 6, 1);
2875 TCGv_ptr fpst = get_fpstatus_ptr(0);
2877 if (dp) {
2878 TCGv_i64 frn, frm, dest;
2880 frn = tcg_temp_new_i64();
2881 frm = tcg_temp_new_i64();
2882 dest = tcg_temp_new_i64();
2884 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2885 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2886 if (vmin) {
2887 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2888 } else {
2889 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2891 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2892 tcg_temp_free_i64(frn);
2893 tcg_temp_free_i64(frm);
2894 tcg_temp_free_i64(dest);
2895 } else {
2896 TCGv_i32 frn, frm, dest;
2898 frn = tcg_temp_new_i32();
2899 frm = tcg_temp_new_i32();
2900 dest = tcg_temp_new_i32();
2902 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2903 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2904 if (vmin) {
2905 gen_helper_vfp_minnums(dest, frn, frm, fpst);
2906 } else {
2907 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
2909 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2910 tcg_temp_free_i32(frn);
2911 tcg_temp_free_i32(frm);
2912 tcg_temp_free_i32(dest);
2915 tcg_temp_free_ptr(fpst);
2916 return 0;
2919 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2920 int rounding)
2922 TCGv_ptr fpst = get_fpstatus_ptr(0);
2923 TCGv_i32 tcg_rmode;
2925 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2926 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2928 if (dp) {
2929 TCGv_i64 tcg_op;
2930 TCGv_i64 tcg_res;
2931 tcg_op = tcg_temp_new_i64();
2932 tcg_res = tcg_temp_new_i64();
2933 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2934 gen_helper_rintd(tcg_res, tcg_op, fpst);
2935 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2936 tcg_temp_free_i64(tcg_op);
2937 tcg_temp_free_i64(tcg_res);
2938 } else {
2939 TCGv_i32 tcg_op;
2940 TCGv_i32 tcg_res;
2941 tcg_op = tcg_temp_new_i32();
2942 tcg_res = tcg_temp_new_i32();
2943 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2944 gen_helper_rints(tcg_res, tcg_op, fpst);
2945 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2946 tcg_temp_free_i32(tcg_op);
2947 tcg_temp_free_i32(tcg_res);
2950 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2951 tcg_temp_free_i32(tcg_rmode);
2953 tcg_temp_free_ptr(fpst);
2954 return 0;
2957 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2958 int rounding)
2960 bool is_signed = extract32(insn, 7, 1);
2961 TCGv_ptr fpst = get_fpstatus_ptr(0);
2962 TCGv_i32 tcg_rmode, tcg_shift;
2964 tcg_shift = tcg_const_i32(0);
2966 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2967 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2969 if (dp) {
2970 TCGv_i64 tcg_double, tcg_res;
2971 TCGv_i32 tcg_tmp;
2972 /* Rd is encoded as a single precision register even when the source
2973 * is double precision.
2975 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2976 tcg_double = tcg_temp_new_i64();
2977 tcg_res = tcg_temp_new_i64();
2978 tcg_tmp = tcg_temp_new_i32();
2979 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2980 if (is_signed) {
2981 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2982 } else {
2983 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2985 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
2986 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2987 tcg_temp_free_i32(tcg_tmp);
2988 tcg_temp_free_i64(tcg_res);
2989 tcg_temp_free_i64(tcg_double);
2990 } else {
2991 TCGv_i32 tcg_single, tcg_res;
2992 tcg_single = tcg_temp_new_i32();
2993 tcg_res = tcg_temp_new_i32();
2994 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2995 if (is_signed) {
2996 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2997 } else {
2998 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3000 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3001 tcg_temp_free_i32(tcg_res);
3002 tcg_temp_free_i32(tcg_single);
3005 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3006 tcg_temp_free_i32(tcg_rmode);
3008 tcg_temp_free_i32(tcg_shift);
3010 tcg_temp_free_ptr(fpst);
3012 return 0;
3015 /* Table for converting the most common AArch32 encoding of
3016 * rounding mode to arm_fprounding order (which matches the
3017 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3019 static const uint8_t fp_decode_rm[] = {
3020 FPROUNDING_TIEAWAY,
3021 FPROUNDING_TIEEVEN,
3022 FPROUNDING_POSINF,
3023 FPROUNDING_NEGINF,
3026 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
3028 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3030 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3031 return 1;
3034 if (dp) {
3035 VFP_DREG_D(rd, insn);
3036 VFP_DREG_N(rn, insn);
3037 VFP_DREG_M(rm, insn);
3038 } else {
3039 rd = VFP_SREG_D(insn);
3040 rn = VFP_SREG_N(insn);
3041 rm = VFP_SREG_M(insn);
3044 if ((insn & 0x0f800e50) == 0x0e000a00) {
3045 return handle_vsel(insn, rd, rn, rm, dp);
3046 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3047 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3048 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3049 /* VRINTA, VRINTN, VRINTP, VRINTM */
3050 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3051 return handle_vrint(insn, rd, rm, dp, rounding);
3052 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3053 /* VCVTA, VCVTN, VCVTP, VCVTM */
3054 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3055 return handle_vcvt(insn, rd, rm, dp, rounding);
3057 return 1;
3060 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3061 (ie. an undefined instruction). */
3062 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3064 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3065 int dp, veclen;
3066 TCGv_i32 addr;
3067 TCGv_i32 tmp;
3068 TCGv_i32 tmp2;
3070 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3071 return 1;
3074 /* FIXME: this access check should not take precedence over UNDEF
3075 * for invalid encodings; we will generate incorrect syndrome information
3076 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3078 if (s->fp_excp_el) {
3079 gen_exception_insn(s, 4, EXCP_UDEF,
3080 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
3081 return 0;
3084 if (!s->vfp_enabled) {
3085 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3086 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3087 return 1;
3088 rn = (insn >> 16) & 0xf;
3089 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3090 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3091 return 1;
3095 if (extract32(insn, 28, 4) == 0xf) {
3096 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3097 * only used in v8 and above.
3099 return disas_vfp_v8_insn(s, insn);
3102 dp = ((insn & 0xf00) == 0xb00);
3103 switch ((insn >> 24) & 0xf) {
3104 case 0xe:
3105 if (insn & (1 << 4)) {
3106 /* single register transfer */
3107 rd = (insn >> 12) & 0xf;
3108 if (dp) {
3109 int size;
3110 int pass;
3112 VFP_DREG_N(rn, insn);
3113 if (insn & 0xf)
3114 return 1;
3115 if (insn & 0x00c00060
3116 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3117 return 1;
3120 pass = (insn >> 21) & 1;
3121 if (insn & (1 << 22)) {
3122 size = 0;
3123 offset = ((insn >> 5) & 3) * 8;
3124 } else if (insn & (1 << 5)) {
3125 size = 1;
3126 offset = (insn & (1 << 6)) ? 16 : 0;
3127 } else {
3128 size = 2;
3129 offset = 0;
3131 if (insn & ARM_CP_RW_BIT) {
3132 /* vfp->arm */
3133 tmp = neon_load_reg(rn, pass);
3134 switch (size) {
3135 case 0:
3136 if (offset)
3137 tcg_gen_shri_i32(tmp, tmp, offset);
3138 if (insn & (1 << 23))
3139 gen_uxtb(tmp);
3140 else
3141 gen_sxtb(tmp);
3142 break;
3143 case 1:
3144 if (insn & (1 << 23)) {
3145 if (offset) {
3146 tcg_gen_shri_i32(tmp, tmp, 16);
3147 } else {
3148 gen_uxth(tmp);
3150 } else {
3151 if (offset) {
3152 tcg_gen_sari_i32(tmp, tmp, 16);
3153 } else {
3154 gen_sxth(tmp);
3157 break;
3158 case 2:
3159 break;
3161 store_reg(s, rd, tmp);
3162 } else {
3163 /* arm->vfp */
3164 tmp = load_reg(s, rd);
3165 if (insn & (1 << 23)) {
3166 /* VDUP */
3167 if (size == 0) {
3168 gen_neon_dup_u8(tmp, 0);
3169 } else if (size == 1) {
3170 gen_neon_dup_low16(tmp);
3172 for (n = 0; n <= pass * 2; n++) {
3173 tmp2 = tcg_temp_new_i32();
3174 tcg_gen_mov_i32(tmp2, tmp);
3175 neon_store_reg(rn, n, tmp2);
3177 neon_store_reg(rn, n, tmp);
3178 } else {
3179 /* VMOV */
3180 switch (size) {
3181 case 0:
3182 tmp2 = neon_load_reg(rn, pass);
3183 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3184 tcg_temp_free_i32(tmp2);
3185 break;
3186 case 1:
3187 tmp2 = neon_load_reg(rn, pass);
3188 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3189 tcg_temp_free_i32(tmp2);
3190 break;
3191 case 2:
3192 break;
3194 neon_store_reg(rn, pass, tmp);
3197 } else { /* !dp */
3198 if ((insn & 0x6f) != 0x00)
3199 return 1;
3200 rn = VFP_SREG_N(insn);
3201 if (insn & ARM_CP_RW_BIT) {
3202 /* vfp->arm */
3203 if (insn & (1 << 21)) {
3204 /* system register */
3205 rn >>= 1;
3207 switch (rn) {
3208 case ARM_VFP_FPSID:
3209 /* VFP2 allows access to FSID from userspace.
3210 VFP3 restricts all id registers to privileged
3211 accesses. */
3212 if (IS_USER(s)
3213 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3214 return 1;
3216 tmp = load_cpu_field(vfp.xregs[rn]);
3217 break;
3218 case ARM_VFP_FPEXC:
3219 if (IS_USER(s))
3220 return 1;
3221 tmp = load_cpu_field(vfp.xregs[rn]);
3222 break;
3223 case ARM_VFP_FPINST:
3224 case ARM_VFP_FPINST2:
3225 /* Not present in VFP3. */
3226 if (IS_USER(s)
3227 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3228 return 1;
3230 tmp = load_cpu_field(vfp.xregs[rn]);
3231 break;
3232 case ARM_VFP_FPSCR:
3233 if (rd == 15) {
3234 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3235 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3236 } else {
3237 tmp = tcg_temp_new_i32();
3238 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3240 break;
3241 case ARM_VFP_MVFR2:
3242 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3243 return 1;
3245 /* fall through */
3246 case ARM_VFP_MVFR0:
3247 case ARM_VFP_MVFR1:
3248 if (IS_USER(s)
3249 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3250 return 1;
3252 tmp = load_cpu_field(vfp.xregs[rn]);
3253 break;
3254 default:
3255 return 1;
3257 } else {
3258 gen_mov_F0_vreg(0, rn);
3259 tmp = gen_vfp_mrs();
3261 if (rd == 15) {
3262 /* Set the 4 flag bits in the CPSR. */
3263 gen_set_nzcv(tmp);
3264 tcg_temp_free_i32(tmp);
3265 } else {
3266 store_reg(s, rd, tmp);
3268 } else {
3269 /* arm->vfp */
3270 if (insn & (1 << 21)) {
3271 rn >>= 1;
3272 /* system register */
3273 switch (rn) {
3274 case ARM_VFP_FPSID:
3275 case ARM_VFP_MVFR0:
3276 case ARM_VFP_MVFR1:
3277 /* Writes are ignored. */
3278 break;
3279 case ARM_VFP_FPSCR:
3280 tmp = load_reg(s, rd);
3281 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3282 tcg_temp_free_i32(tmp);
3283 gen_lookup_tb(s);
3284 break;
3285 case ARM_VFP_FPEXC:
3286 if (IS_USER(s))
3287 return 1;
3288 /* TODO: VFP subarchitecture support.
3289 * For now, keep the EN bit only */
3290 tmp = load_reg(s, rd);
3291 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3292 store_cpu_field(tmp, vfp.xregs[rn]);
3293 gen_lookup_tb(s);
3294 break;
3295 case ARM_VFP_FPINST:
3296 case ARM_VFP_FPINST2:
3297 if (IS_USER(s)) {
3298 return 1;
3300 tmp = load_reg(s, rd);
3301 store_cpu_field(tmp, vfp.xregs[rn]);
3302 break;
3303 default:
3304 return 1;
3306 } else {
3307 tmp = load_reg(s, rd);
3308 gen_vfp_msr(tmp);
3309 gen_mov_vreg_F0(0, rn);
3313 } else {
3314 /* data processing */
3315 /* The opcode is in bits 23, 21, 20 and 6. */
3316 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3317 if (dp) {
3318 if (op == 15) {
3319 /* rn is opcode */
3320 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3321 } else {
3322 /* rn is register number */
3323 VFP_DREG_N(rn, insn);
3326 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3327 ((rn & 0x1e) == 0x6))) {
3328 /* Integer or single/half precision destination. */
3329 rd = VFP_SREG_D(insn);
3330 } else {
3331 VFP_DREG_D(rd, insn);
3333 if (op == 15 &&
3334 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3335 ((rn & 0x1e) == 0x4))) {
3336 /* VCVT from int or half precision is always from S reg
3337 * regardless of dp bit. VCVT with immediate frac_bits
3338 * has same format as SREG_M.
3340 rm = VFP_SREG_M(insn);
3341 } else {
3342 VFP_DREG_M(rm, insn);
3344 } else {
3345 rn = VFP_SREG_N(insn);
3346 if (op == 15 && rn == 15) {
3347 /* Double precision destination. */
3348 VFP_DREG_D(rd, insn);
3349 } else {
3350 rd = VFP_SREG_D(insn);
3352 /* NB that we implicitly rely on the encoding for the frac_bits
3353 * in VCVT of fixed to float being the same as that of an SREG_M
3355 rm = VFP_SREG_M(insn);
3358 veclen = s->vec_len;
3359 if (op == 15 && rn > 3)
3360 veclen = 0;
3362 /* Shut up compiler warnings. */
3363 delta_m = 0;
3364 delta_d = 0;
3365 bank_mask = 0;
3367 if (veclen > 0) {
3368 if (dp)
3369 bank_mask = 0xc;
3370 else
3371 bank_mask = 0x18;
3373 /* Figure out what type of vector operation this is. */
3374 if ((rd & bank_mask) == 0) {
3375 /* scalar */
3376 veclen = 0;
3377 } else {
3378 if (dp)
3379 delta_d = (s->vec_stride >> 1) + 1;
3380 else
3381 delta_d = s->vec_stride + 1;
3383 if ((rm & bank_mask) == 0) {
3384 /* mixed scalar/vector */
3385 delta_m = 0;
3386 } else {
3387 /* vector */
3388 delta_m = delta_d;
3393 /* Load the initial operands. */
3394 if (op == 15) {
3395 switch (rn) {
3396 case 16:
3397 case 17:
3398 /* Integer source */
3399 gen_mov_F0_vreg(0, rm);
3400 break;
3401 case 8:
3402 case 9:
3403 /* Compare */
3404 gen_mov_F0_vreg(dp, rd);
3405 gen_mov_F1_vreg(dp, rm);
3406 break;
3407 case 10:
3408 case 11:
3409 /* Compare with zero */
3410 gen_mov_F0_vreg(dp, rd);
3411 gen_vfp_F1_ld0(dp);
3412 break;
3413 case 20:
3414 case 21:
3415 case 22:
3416 case 23:
3417 case 28:
3418 case 29:
3419 case 30:
3420 case 31:
3421 /* Source and destination the same. */
3422 gen_mov_F0_vreg(dp, rd);
3423 break;
3424 case 4:
3425 case 5:
3426 case 6:
3427 case 7:
3428 /* VCVTB, VCVTT: only present with the halfprec extension
3429 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3430 * (we choose to UNDEF)
3432 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3433 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3434 return 1;
3436 if (!extract32(rn, 1, 1)) {
3437 /* Half precision source. */
3438 gen_mov_F0_vreg(0, rm);
3439 break;
3441 /* Otherwise fall through */
3442 default:
3443 /* One source operand. */
3444 gen_mov_F0_vreg(dp, rm);
3445 break;
3447 } else {
3448 /* Two source operands. */
3449 gen_mov_F0_vreg(dp, rn);
3450 gen_mov_F1_vreg(dp, rm);
3453 for (;;) {
3454 /* Perform the calculation. */
3455 switch (op) {
3456 case 0: /* VMLA: fd + (fn * fm) */
3457 /* Note that order of inputs to the add matters for NaNs */
3458 gen_vfp_F1_mul(dp);
3459 gen_mov_F0_vreg(dp, rd);
3460 gen_vfp_add(dp);
3461 break;
3462 case 1: /* VMLS: fd + -(fn * fm) */
3463 gen_vfp_mul(dp);
3464 gen_vfp_F1_neg(dp);
3465 gen_mov_F0_vreg(dp, rd);
3466 gen_vfp_add(dp);
3467 break;
3468 case 2: /* VNMLS: -fd + (fn * fm) */
3469 /* Note that it isn't valid to replace (-A + B) with (B - A)
3470 * or similar plausible looking simplifications
3471 * because this will give wrong results for NaNs.
3473 gen_vfp_F1_mul(dp);
3474 gen_mov_F0_vreg(dp, rd);
3475 gen_vfp_neg(dp);
3476 gen_vfp_add(dp);
3477 break;
3478 case 3: /* VNMLA: -fd + -(fn * fm) */
3479 gen_vfp_mul(dp);
3480 gen_vfp_F1_neg(dp);
3481 gen_mov_F0_vreg(dp, rd);
3482 gen_vfp_neg(dp);
3483 gen_vfp_add(dp);
3484 break;
3485 case 4: /* mul: fn * fm */
3486 gen_vfp_mul(dp);
3487 break;
3488 case 5: /* nmul: -(fn * fm) */
3489 gen_vfp_mul(dp);
3490 gen_vfp_neg(dp);
3491 break;
3492 case 6: /* add: fn + fm */
3493 gen_vfp_add(dp);
3494 break;
3495 case 7: /* sub: fn - fm */
3496 gen_vfp_sub(dp);
3497 break;
3498 case 8: /* div: fn / fm */
3499 gen_vfp_div(dp);
3500 break;
3501 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3502 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3503 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3504 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3505 /* These are fused multiply-add, and must be done as one
3506 * floating point operation with no rounding between the
3507 * multiplication and addition steps.
3508 * NB that doing the negations here as separate steps is
3509 * correct : an input NaN should come out with its sign bit
3510 * flipped if it is a negated-input.
3512 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3513 return 1;
3515 if (dp) {
3516 TCGv_ptr fpst;
3517 TCGv_i64 frd;
3518 if (op & 1) {
3519 /* VFNMS, VFMS */
3520 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3522 frd = tcg_temp_new_i64();
3523 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3524 if (op & 2) {
3525 /* VFNMA, VFNMS */
3526 gen_helper_vfp_negd(frd, frd);
3528 fpst = get_fpstatus_ptr(0);
3529 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3530 cpu_F1d, frd, fpst);
3531 tcg_temp_free_ptr(fpst);
3532 tcg_temp_free_i64(frd);
3533 } else {
3534 TCGv_ptr fpst;
3535 TCGv_i32 frd;
3536 if (op & 1) {
3537 /* VFNMS, VFMS */
3538 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3540 frd = tcg_temp_new_i32();
3541 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3542 if (op & 2) {
3543 gen_helper_vfp_negs(frd, frd);
3545 fpst = get_fpstatus_ptr(0);
3546 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3547 cpu_F1s, frd, fpst);
3548 tcg_temp_free_ptr(fpst);
3549 tcg_temp_free_i32(frd);
3551 break;
3552 case 14: /* fconst */
3553 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3554 return 1;
3557 n = (insn << 12) & 0x80000000;
3558 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3559 if (dp) {
3560 if (i & 0x40)
3561 i |= 0x3f80;
3562 else
3563 i |= 0x4000;
3564 n |= i << 16;
3565 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3566 } else {
3567 if (i & 0x40)
3568 i |= 0x780;
3569 else
3570 i |= 0x800;
3571 n |= i << 19;
3572 tcg_gen_movi_i32(cpu_F0s, n);
3574 break;
3575 case 15: /* extension space */
3576 switch (rn) {
3577 case 0: /* cpy */
3578 /* no-op */
3579 break;
3580 case 1: /* abs */
3581 gen_vfp_abs(dp);
3582 break;
3583 case 2: /* neg */
3584 gen_vfp_neg(dp);
3585 break;
3586 case 3: /* sqrt */
3587 gen_vfp_sqrt(dp);
3588 break;
3589 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3590 tmp = gen_vfp_mrs();
3591 tcg_gen_ext16u_i32(tmp, tmp);
3592 if (dp) {
3593 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3594 cpu_env);
3595 } else {
3596 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3597 cpu_env);
3599 tcg_temp_free_i32(tmp);
3600 break;
3601 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3602 tmp = gen_vfp_mrs();
3603 tcg_gen_shri_i32(tmp, tmp, 16);
3604 if (dp) {
3605 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3606 cpu_env);
3607 } else {
3608 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3609 cpu_env);
3611 tcg_temp_free_i32(tmp);
3612 break;
3613 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3614 tmp = tcg_temp_new_i32();
3615 if (dp) {
3616 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3617 cpu_env);
3618 } else {
3619 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3620 cpu_env);
3622 gen_mov_F0_vreg(0, rd);
3623 tmp2 = gen_vfp_mrs();
3624 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3625 tcg_gen_or_i32(tmp, tmp, tmp2);
3626 tcg_temp_free_i32(tmp2);
3627 gen_vfp_msr(tmp);
3628 break;
3629 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3630 tmp = tcg_temp_new_i32();
3631 if (dp) {
3632 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3633 cpu_env);
3634 } else {
3635 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3636 cpu_env);
3638 tcg_gen_shli_i32(tmp, tmp, 16);
3639 gen_mov_F0_vreg(0, rd);
3640 tmp2 = gen_vfp_mrs();
3641 tcg_gen_ext16u_i32(tmp2, tmp2);
3642 tcg_gen_or_i32(tmp, tmp, tmp2);
3643 tcg_temp_free_i32(tmp2);
3644 gen_vfp_msr(tmp);
3645 break;
3646 case 8: /* cmp */
3647 gen_vfp_cmp(dp);
3648 break;
3649 case 9: /* cmpe */
3650 gen_vfp_cmpe(dp);
3651 break;
3652 case 10: /* cmpz */
3653 gen_vfp_cmp(dp);
3654 break;
3655 case 11: /* cmpez */
3656 gen_vfp_F1_ld0(dp);
3657 gen_vfp_cmpe(dp);
3658 break;
3659 case 12: /* vrintr */
3661 TCGv_ptr fpst = get_fpstatus_ptr(0);
3662 if (dp) {
3663 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3664 } else {
3665 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3667 tcg_temp_free_ptr(fpst);
3668 break;
3670 case 13: /* vrintz */
3672 TCGv_ptr fpst = get_fpstatus_ptr(0);
3673 TCGv_i32 tcg_rmode;
3674 tcg_rmode = tcg_const_i32(float_round_to_zero);
3675 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3676 if (dp) {
3677 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3678 } else {
3679 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3681 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3682 tcg_temp_free_i32(tcg_rmode);
3683 tcg_temp_free_ptr(fpst);
3684 break;
3686 case 14: /* vrintx */
3688 TCGv_ptr fpst = get_fpstatus_ptr(0);
3689 if (dp) {
3690 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3691 } else {
3692 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3694 tcg_temp_free_ptr(fpst);
3695 break;
3697 case 15: /* single<->double conversion */
3698 if (dp)
3699 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3700 else
3701 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3702 break;
3703 case 16: /* fuito */
3704 gen_vfp_uito(dp, 0);
3705 break;
3706 case 17: /* fsito */
3707 gen_vfp_sito(dp, 0);
3708 break;
3709 case 20: /* fshto */
3710 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3711 return 1;
3713 gen_vfp_shto(dp, 16 - rm, 0);
3714 break;
3715 case 21: /* fslto */
3716 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3717 return 1;
3719 gen_vfp_slto(dp, 32 - rm, 0);
3720 break;
3721 case 22: /* fuhto */
3722 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3723 return 1;
3725 gen_vfp_uhto(dp, 16 - rm, 0);
3726 break;
3727 case 23: /* fulto */
3728 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3729 return 1;
3731 gen_vfp_ulto(dp, 32 - rm, 0);
3732 break;
3733 case 24: /* ftoui */
3734 gen_vfp_toui(dp, 0);
3735 break;
3736 case 25: /* ftouiz */
3737 gen_vfp_touiz(dp, 0);
3738 break;
3739 case 26: /* ftosi */
3740 gen_vfp_tosi(dp, 0);
3741 break;
3742 case 27: /* ftosiz */
3743 gen_vfp_tosiz(dp, 0);
3744 break;
3745 case 28: /* ftosh */
3746 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3747 return 1;
3749 gen_vfp_tosh(dp, 16 - rm, 0);
3750 break;
3751 case 29: /* ftosl */
3752 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3753 return 1;
3755 gen_vfp_tosl(dp, 32 - rm, 0);
3756 break;
3757 case 30: /* ftouh */
3758 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3759 return 1;
3761 gen_vfp_touh(dp, 16 - rm, 0);
3762 break;
3763 case 31: /* ftoul */
3764 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3765 return 1;
3767 gen_vfp_toul(dp, 32 - rm, 0);
3768 break;
3769 default: /* undefined */
3770 return 1;
3772 break;
3773 default: /* undefined */
3774 return 1;
3777 /* Write back the result. */
3778 if (op == 15 && (rn >= 8 && rn <= 11)) {
3779 /* Comparison, do nothing. */
3780 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3781 (rn & 0x1e) == 0x6)) {
3782 /* VCVT double to int: always integer result.
3783 * VCVT double to half precision is always a single
3784 * precision result.
3786 gen_mov_vreg_F0(0, rd);
3787 } else if (op == 15 && rn == 15) {
3788 /* conversion */
3789 gen_mov_vreg_F0(!dp, rd);
3790 } else {
3791 gen_mov_vreg_F0(dp, rd);
3794 /* break out of the loop if we have finished */
3795 if (veclen == 0)
3796 break;
3798 if (op == 15 && delta_m == 0) {
3799 /* single source one-many */
3800 while (veclen--) {
3801 rd = ((rd + delta_d) & (bank_mask - 1))
3802 | (rd & bank_mask);
3803 gen_mov_vreg_F0(dp, rd);
3805 break;
3807 /* Setup the next operands. */
3808 veclen--;
3809 rd = ((rd + delta_d) & (bank_mask - 1))
3810 | (rd & bank_mask);
3812 if (op == 15) {
3813 /* One source operand. */
3814 rm = ((rm + delta_m) & (bank_mask - 1))
3815 | (rm & bank_mask);
3816 gen_mov_F0_vreg(dp, rm);
3817 } else {
3818 /* Two source operands. */
3819 rn = ((rn + delta_d) & (bank_mask - 1))
3820 | (rn & bank_mask);
3821 gen_mov_F0_vreg(dp, rn);
3822 if (delta_m) {
3823 rm = ((rm + delta_m) & (bank_mask - 1))
3824 | (rm & bank_mask);
3825 gen_mov_F1_vreg(dp, rm);
3830 break;
3831 case 0xc:
3832 case 0xd:
3833 if ((insn & 0x03e00000) == 0x00400000) {
3834 /* two-register transfer */
3835 rn = (insn >> 16) & 0xf;
3836 rd = (insn >> 12) & 0xf;
3837 if (dp) {
3838 VFP_DREG_M(rm, insn);
3839 } else {
3840 rm = VFP_SREG_M(insn);
3843 if (insn & ARM_CP_RW_BIT) {
3844 /* vfp->arm */
3845 if (dp) {
3846 gen_mov_F0_vreg(0, rm * 2);
3847 tmp = gen_vfp_mrs();
3848 store_reg(s, rd, tmp);
3849 gen_mov_F0_vreg(0, rm * 2 + 1);
3850 tmp = gen_vfp_mrs();
3851 store_reg(s, rn, tmp);
3852 } else {
3853 gen_mov_F0_vreg(0, rm);
3854 tmp = gen_vfp_mrs();
3855 store_reg(s, rd, tmp);
3856 gen_mov_F0_vreg(0, rm + 1);
3857 tmp = gen_vfp_mrs();
3858 store_reg(s, rn, tmp);
3860 } else {
3861 /* arm->vfp */
3862 if (dp) {
3863 tmp = load_reg(s, rd);
3864 gen_vfp_msr(tmp);
3865 gen_mov_vreg_F0(0, rm * 2);
3866 tmp = load_reg(s, rn);
3867 gen_vfp_msr(tmp);
3868 gen_mov_vreg_F0(0, rm * 2 + 1);
3869 } else {
3870 tmp = load_reg(s, rd);
3871 gen_vfp_msr(tmp);
3872 gen_mov_vreg_F0(0, rm);
3873 tmp = load_reg(s, rn);
3874 gen_vfp_msr(tmp);
3875 gen_mov_vreg_F0(0, rm + 1);
3878 } else {
3879 /* Load/store */
3880 rn = (insn >> 16) & 0xf;
3881 if (dp)
3882 VFP_DREG_D(rd, insn);
3883 else
3884 rd = VFP_SREG_D(insn);
3885 if ((insn & 0x01200000) == 0x01000000) {
3886 /* Single load/store */
3887 offset = (insn & 0xff) << 2;
3888 if ((insn & (1 << 23)) == 0)
3889 offset = -offset;
3890 if (s->thumb && rn == 15) {
3891 /* This is actually UNPREDICTABLE */
3892 addr = tcg_temp_new_i32();
3893 tcg_gen_movi_i32(addr, s->pc & ~2);
3894 } else {
3895 addr = load_reg(s, rn);
3897 tcg_gen_addi_i32(addr, addr, offset);
3898 if (insn & (1 << 20)) {
3899 gen_vfp_ld(s, dp, addr);
3900 gen_mov_vreg_F0(dp, rd);
3901 } else {
3902 gen_mov_F0_vreg(dp, rd);
3903 gen_vfp_st(s, dp, addr);
3905 tcg_temp_free_i32(addr);
3906 } else {
3907 /* load/store multiple */
3908 int w = insn & (1 << 21);
3909 if (dp)
3910 n = (insn >> 1) & 0x7f;
3911 else
3912 n = insn & 0xff;
3914 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3915 /* P == U , W == 1 => UNDEF */
3916 return 1;
3918 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3919 /* UNPREDICTABLE cases for bad immediates: we choose to
3920 * UNDEF to avoid generating huge numbers of TCG ops
3922 return 1;
3924 if (rn == 15 && w) {
3925 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3926 return 1;
3929 if (s->thumb && rn == 15) {
3930 /* This is actually UNPREDICTABLE */
3931 addr = tcg_temp_new_i32();
3932 tcg_gen_movi_i32(addr, s->pc & ~2);
3933 } else {
3934 addr = load_reg(s, rn);
3936 if (insn & (1 << 24)) /* pre-decrement */
3937 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3939 if (dp)
3940 offset = 8;
3941 else
3942 offset = 4;
3943 for (i = 0; i < n; i++) {
3944 if (insn & ARM_CP_RW_BIT) {
3945 /* load */
3946 gen_vfp_ld(s, dp, addr);
3947 gen_mov_vreg_F0(dp, rd + i);
3948 } else {
3949 /* store */
3950 gen_mov_F0_vreg(dp, rd + i);
3951 gen_vfp_st(s, dp, addr);
3953 tcg_gen_addi_i32(addr, addr, offset);
3955 if (w) {
3956 /* writeback */
3957 if (insn & (1 << 24))
3958 offset = -offset * n;
3959 else if (dp && (insn & 1))
3960 offset = 4;
3961 else
3962 offset = 0;
3964 if (offset != 0)
3965 tcg_gen_addi_i32(addr, addr, offset);
3966 store_reg(s, rn, addr);
3967 } else {
3968 tcg_temp_free_i32(addr);
3972 break;
3973 default:
3974 /* Should never happen. */
3975 return 1;
3977 return 0;
3980 static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
3982 TranslationBlock *tb;
3984 tb = s->tb;
3985 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3986 tcg_gen_goto_tb(n);
3987 gen_set_pc_im(s, dest);
3988 tcg_gen_exit_tb((uintptr_t)tb + n);
3989 } else {
3990 gen_set_pc_im(s, dest);
3991 tcg_gen_exit_tb(0);
3995 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3997 if (unlikely(s->singlestep_enabled || s->ss_active)) {
3998 /* An indirect jump so that we still trigger the debug exception. */
3999 if (s->thumb)
4000 dest |= 1;
4001 gen_bx_im(s, dest);
4002 } else {
4003 gen_goto_tb(s, 0, dest);
4004 s->is_jmp = DISAS_TB_JUMP;
4008 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
4010 if (x)
4011 tcg_gen_sari_i32(t0, t0, 16);
4012 else
4013 gen_sxth(t0);
4014 if (y)
4015 tcg_gen_sari_i32(t1, t1, 16);
4016 else
4017 gen_sxth(t1);
4018 tcg_gen_mul_i32(t0, t0, t1);
4021 /* Return the mask of PSR bits set by a MSR instruction. */
4022 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4024 uint32_t mask;
4026 mask = 0;
4027 if (flags & (1 << 0))
4028 mask |= 0xff;
4029 if (flags & (1 << 1))
4030 mask |= 0xff00;
4031 if (flags & (1 << 2))
4032 mask |= 0xff0000;
4033 if (flags & (1 << 3))
4034 mask |= 0xff000000;
4036 /* Mask out undefined bits. */
4037 mask &= ~CPSR_RESERVED;
4038 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4039 mask &= ~CPSR_T;
4041 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4042 mask &= ~CPSR_Q; /* V5TE in reality*/
4044 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4045 mask &= ~(CPSR_E | CPSR_GE);
4047 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4048 mask &= ~CPSR_IT;
4050 /* Mask out execution state and reserved bits. */
4051 if (!spsr) {
4052 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4054 /* Mask out privileged bits. */
4055 if (IS_USER(s))
4056 mask &= CPSR_USER;
4057 return mask;
4060 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4061 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4063 TCGv_i32 tmp;
4064 if (spsr) {
4065 /* ??? This is also undefined in system mode. */
4066 if (IS_USER(s))
4067 return 1;
4069 tmp = load_cpu_field(spsr);
4070 tcg_gen_andi_i32(tmp, tmp, ~mask);
4071 tcg_gen_andi_i32(t0, t0, mask);
4072 tcg_gen_or_i32(tmp, tmp, t0);
4073 store_cpu_field(tmp, spsr);
4074 } else {
4075 gen_set_cpsr(t0, mask);
4077 tcg_temp_free_i32(t0);
4078 gen_lookup_tb(s);
4079 return 0;
4082 /* Returns nonzero if access to the PSR is not permitted. */
4083 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4085 TCGv_i32 tmp;
4086 tmp = tcg_temp_new_i32();
4087 tcg_gen_movi_i32(tmp, val);
4088 return gen_set_psr(s, mask, spsr, tmp);
4091 /* Generate an old-style exception return. Marks pc as dead. */
4092 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4094 TCGv_i32 tmp;
4095 store_reg(s, 15, pc);
4096 tmp = load_cpu_field(spsr);
4097 gen_set_cpsr(tmp, CPSR_ERET_MASK);
4098 tcg_temp_free_i32(tmp);
4099 s->is_jmp = DISAS_UPDATE;
4102 /* Generate a v6 exception return. Marks both values as dead. */
4103 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4105 gen_set_cpsr(cpsr, CPSR_ERET_MASK);
4106 tcg_temp_free_i32(cpsr);
4107 store_reg(s, 15, pc);
4108 s->is_jmp = DISAS_UPDATE;
4111 static void gen_nop_hint(DisasContext *s, int val)
4113 switch (val) {
4114 case 1: /* yield */
4115 gen_set_pc_im(s, s->pc);
4116 s->is_jmp = DISAS_YIELD;
4117 break;
4118 case 3: /* wfi */
4119 gen_set_pc_im(s, s->pc);
4120 s->is_jmp = DISAS_WFI;
4121 break;
4122 case 2: /* wfe */
4123 gen_set_pc_im(s, s->pc);
4124 s->is_jmp = DISAS_WFE;
4125 break;
4126 case 4: /* sev */
4127 case 5: /* sevl */
4128 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4129 default: /* nop */
4130 break;
4134 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4136 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4138 switch (size) {
4139 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4140 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4141 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4142 default: abort();
4146 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4148 switch (size) {
4149 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4150 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4151 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4152 default: return;
4156 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4157 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4158 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4159 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4160 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4162 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4163 switch ((size << 1) | u) { \
4164 case 0: \
4165 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4166 break; \
4167 case 1: \
4168 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4169 break; \
4170 case 2: \
4171 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4172 break; \
4173 case 3: \
4174 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4175 break; \
4176 case 4: \
4177 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4178 break; \
4179 case 5: \
4180 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4181 break; \
4182 default: return 1; \
4183 }} while (0)
4185 #define GEN_NEON_INTEGER_OP(name) do { \
4186 switch ((size << 1) | u) { \
4187 case 0: \
4188 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4189 break; \
4190 case 1: \
4191 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4192 break; \
4193 case 2: \
4194 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4195 break; \
4196 case 3: \
4197 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4198 break; \
4199 case 4: \
4200 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4201 break; \
4202 case 5: \
4203 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4204 break; \
4205 default: return 1; \
4206 }} while (0)
4208 static TCGv_i32 neon_load_scratch(int scratch)
4210 TCGv_i32 tmp = tcg_temp_new_i32();
4211 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4212 return tmp;
4215 static void neon_store_scratch(int scratch, TCGv_i32 var)
4217 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4218 tcg_temp_free_i32(var);
4221 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4223 TCGv_i32 tmp;
4224 if (size == 1) {
4225 tmp = neon_load_reg(reg & 7, reg >> 4);
4226 if (reg & 8) {
4227 gen_neon_dup_high16(tmp);
4228 } else {
4229 gen_neon_dup_low16(tmp);
4231 } else {
4232 tmp = neon_load_reg(reg & 15, reg >> 4);
4234 return tmp;
4237 static int gen_neon_unzip(int rd, int rm, int size, int q)
4239 TCGv_i32 tmp, tmp2;
4240 if (!q && size == 2) {
4241 return 1;
4243 tmp = tcg_const_i32(rd);
4244 tmp2 = tcg_const_i32(rm);
4245 if (q) {
4246 switch (size) {
4247 case 0:
4248 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
4249 break;
4250 case 1:
4251 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
4252 break;
4253 case 2:
4254 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
4255 break;
4256 default:
4257 abort();
4259 } else {
4260 switch (size) {
4261 case 0:
4262 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
4263 break;
4264 case 1:
4265 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
4266 break;
4267 default:
4268 abort();
4271 tcg_temp_free_i32(tmp);
4272 tcg_temp_free_i32(tmp2);
4273 return 0;
4276 static int gen_neon_zip(int rd, int rm, int size, int q)
4278 TCGv_i32 tmp, tmp2;
4279 if (!q && size == 2) {
4280 return 1;
4282 tmp = tcg_const_i32(rd);
4283 tmp2 = tcg_const_i32(rm);
4284 if (q) {
4285 switch (size) {
4286 case 0:
4287 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
4288 break;
4289 case 1:
4290 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
4291 break;
4292 case 2:
4293 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
4294 break;
4295 default:
4296 abort();
4298 } else {
4299 switch (size) {
4300 case 0:
4301 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
4302 break;
4303 case 1:
4304 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
4305 break;
4306 default:
4307 abort();
4310 tcg_temp_free_i32(tmp);
4311 tcg_temp_free_i32(tmp2);
4312 return 0;
4315 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4317 TCGv_i32 rd, tmp;
4319 rd = tcg_temp_new_i32();
4320 tmp = tcg_temp_new_i32();
4322 tcg_gen_shli_i32(rd, t0, 8);
4323 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4324 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4325 tcg_gen_or_i32(rd, rd, tmp);
4327 tcg_gen_shri_i32(t1, t1, 8);
4328 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4329 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4330 tcg_gen_or_i32(t1, t1, tmp);
4331 tcg_gen_mov_i32(t0, rd);
4333 tcg_temp_free_i32(tmp);
4334 tcg_temp_free_i32(rd);
4337 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4339 TCGv_i32 rd, tmp;
4341 rd = tcg_temp_new_i32();
4342 tmp = tcg_temp_new_i32();
4344 tcg_gen_shli_i32(rd, t0, 16);
4345 tcg_gen_andi_i32(tmp, t1, 0xffff);
4346 tcg_gen_or_i32(rd, rd, tmp);
4347 tcg_gen_shri_i32(t1, t1, 16);
4348 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4349 tcg_gen_or_i32(t1, t1, tmp);
4350 tcg_gen_mov_i32(t0, rd);
4352 tcg_temp_free_i32(tmp);
4353 tcg_temp_free_i32(rd);
4357 static struct {
4358 int nregs;
4359 int interleave;
4360 int spacing;
4361 } neon_ls_element_type[11] = {
4362 {4, 4, 1},
4363 {4, 4, 2},
4364 {4, 1, 1},
4365 {4, 2, 1},
4366 {3, 3, 1},
4367 {3, 3, 2},
4368 {3, 1, 1},
4369 {1, 1, 1},
4370 {2, 2, 1},
4371 {2, 2, 2},
4372 {2, 1, 1}
4375 /* Translate a NEON load/store element instruction. Return nonzero if the
4376 instruction is invalid. */
4377 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4379 int rd, rn, rm;
4380 int op;
4381 int nregs;
4382 int interleave;
4383 int spacing;
4384 int stride;
4385 int size;
4386 int reg;
4387 int pass;
4388 int load;
4389 int shift;
4390 int n;
4391 TCGv_i32 addr;
4392 TCGv_i32 tmp;
4393 TCGv_i32 tmp2;
4394 TCGv_i64 tmp64;
4396 /* FIXME: this access check should not take precedence over UNDEF
4397 * for invalid encodings; we will generate incorrect syndrome information
4398 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4400 if (s->fp_excp_el) {
4401 gen_exception_insn(s, 4, EXCP_UDEF,
4402 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
4403 return 0;
4406 if (!s->vfp_enabled)
4407 return 1;
4408 VFP_DREG_D(rd, insn);
4409 rn = (insn >> 16) & 0xf;
4410 rm = insn & 0xf;
4411 load = (insn & (1 << 21)) != 0;
4412 if ((insn & (1 << 23)) == 0) {
4413 /* Load store all elements. */
4414 op = (insn >> 8) & 0xf;
4415 size = (insn >> 6) & 3;
4416 if (op > 10)
4417 return 1;
4418 /* Catch UNDEF cases for bad values of align field */
4419 switch (op & 0xc) {
4420 case 4:
4421 if (((insn >> 5) & 1) == 1) {
4422 return 1;
4424 break;
4425 case 8:
4426 if (((insn >> 4) & 3) == 3) {
4427 return 1;
4429 break;
4430 default:
4431 break;
4433 nregs = neon_ls_element_type[op].nregs;
4434 interleave = neon_ls_element_type[op].interleave;
4435 spacing = neon_ls_element_type[op].spacing;
4436 if (size == 3 && (interleave | spacing) != 1)
4437 return 1;
4438 addr = tcg_temp_new_i32();
4439 load_reg_var(s, addr, rn);
4440 stride = (1 << size) * interleave;
4441 for (reg = 0; reg < nregs; reg++) {
4442 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4443 load_reg_var(s, addr, rn);
4444 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4445 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4446 load_reg_var(s, addr, rn);
4447 tcg_gen_addi_i32(addr, addr, 1 << size);
4449 if (size == 3) {
4450 tmp64 = tcg_temp_new_i64();
4451 if (load) {
4452 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
4453 neon_store_reg64(tmp64, rd);
4454 } else {
4455 neon_load_reg64(tmp64, rd);
4456 gen_aa32_st64(tmp64, addr, get_mem_index(s));
4458 tcg_temp_free_i64(tmp64);
4459 tcg_gen_addi_i32(addr, addr, stride);
4460 } else {
4461 for (pass = 0; pass < 2; pass++) {
4462 if (size == 2) {
4463 if (load) {
4464 tmp = tcg_temp_new_i32();
4465 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
4466 neon_store_reg(rd, pass, tmp);
4467 } else {
4468 tmp = neon_load_reg(rd, pass);
4469 gen_aa32_st32(tmp, addr, get_mem_index(s));
4470 tcg_temp_free_i32(tmp);
4472 tcg_gen_addi_i32(addr, addr, stride);
4473 } else if (size == 1) {
4474 if (load) {
4475 tmp = tcg_temp_new_i32();
4476 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
4477 tcg_gen_addi_i32(addr, addr, stride);
4478 tmp2 = tcg_temp_new_i32();
4479 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
4480 tcg_gen_addi_i32(addr, addr, stride);
4481 tcg_gen_shli_i32(tmp2, tmp2, 16);
4482 tcg_gen_or_i32(tmp, tmp, tmp2);
4483 tcg_temp_free_i32(tmp2);
4484 neon_store_reg(rd, pass, tmp);
4485 } else {
4486 tmp = neon_load_reg(rd, pass);
4487 tmp2 = tcg_temp_new_i32();
4488 tcg_gen_shri_i32(tmp2, tmp, 16);
4489 gen_aa32_st16(tmp, addr, get_mem_index(s));
4490 tcg_temp_free_i32(tmp);
4491 tcg_gen_addi_i32(addr, addr, stride);
4492 gen_aa32_st16(tmp2, addr, get_mem_index(s));
4493 tcg_temp_free_i32(tmp2);
4494 tcg_gen_addi_i32(addr, addr, stride);
4496 } else /* size == 0 */ {
4497 if (load) {
4498 TCGV_UNUSED_I32(tmp2);
4499 for (n = 0; n < 4; n++) {
4500 tmp = tcg_temp_new_i32();
4501 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
4502 tcg_gen_addi_i32(addr, addr, stride);
4503 if (n == 0) {
4504 tmp2 = tmp;
4505 } else {
4506 tcg_gen_shli_i32(tmp, tmp, n * 8);
4507 tcg_gen_or_i32(tmp2, tmp2, tmp);
4508 tcg_temp_free_i32(tmp);
4511 neon_store_reg(rd, pass, tmp2);
4512 } else {
4513 tmp2 = neon_load_reg(rd, pass);
4514 for (n = 0; n < 4; n++) {
4515 tmp = tcg_temp_new_i32();
4516 if (n == 0) {
4517 tcg_gen_mov_i32(tmp, tmp2);
4518 } else {
4519 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4521 gen_aa32_st8(tmp, addr, get_mem_index(s));
4522 tcg_temp_free_i32(tmp);
4523 tcg_gen_addi_i32(addr, addr, stride);
4525 tcg_temp_free_i32(tmp2);
4530 rd += spacing;
4532 tcg_temp_free_i32(addr);
4533 stride = nregs * 8;
4534 } else {
4535 size = (insn >> 10) & 3;
4536 if (size == 3) {
4537 /* Load single element to all lanes. */
4538 int a = (insn >> 4) & 1;
4539 if (!load) {
4540 return 1;
4542 size = (insn >> 6) & 3;
4543 nregs = ((insn >> 8) & 3) + 1;
4545 if (size == 3) {
4546 if (nregs != 4 || a == 0) {
4547 return 1;
4549 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4550 size = 2;
4552 if (nregs == 1 && a == 1 && size == 0) {
4553 return 1;
4555 if (nregs == 3 && a == 1) {
4556 return 1;
4558 addr = tcg_temp_new_i32();
4559 load_reg_var(s, addr, rn);
4560 if (nregs == 1) {
4561 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4562 tmp = gen_load_and_replicate(s, addr, size);
4563 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4564 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4565 if (insn & (1 << 5)) {
4566 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4567 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4569 tcg_temp_free_i32(tmp);
4570 } else {
4571 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4572 stride = (insn & (1 << 5)) ? 2 : 1;
4573 for (reg = 0; reg < nregs; reg++) {
4574 tmp = gen_load_and_replicate(s, addr, size);
4575 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4576 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4577 tcg_temp_free_i32(tmp);
4578 tcg_gen_addi_i32(addr, addr, 1 << size);
4579 rd += stride;
4582 tcg_temp_free_i32(addr);
4583 stride = (1 << size) * nregs;
4584 } else {
4585 /* Single element. */
4586 int idx = (insn >> 4) & 0xf;
4587 pass = (insn >> 7) & 1;
4588 switch (size) {
4589 case 0:
4590 shift = ((insn >> 5) & 3) * 8;
4591 stride = 1;
4592 break;
4593 case 1:
4594 shift = ((insn >> 6) & 1) * 16;
4595 stride = (insn & (1 << 5)) ? 2 : 1;
4596 break;
4597 case 2:
4598 shift = 0;
4599 stride = (insn & (1 << 6)) ? 2 : 1;
4600 break;
4601 default:
4602 abort();
4604 nregs = ((insn >> 8) & 3) + 1;
4605 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4606 switch (nregs) {
4607 case 1:
4608 if (((idx & (1 << size)) != 0) ||
4609 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4610 return 1;
4612 break;
4613 case 3:
4614 if ((idx & 1) != 0) {
4615 return 1;
4617 /* fall through */
4618 case 2:
4619 if (size == 2 && (idx & 2) != 0) {
4620 return 1;
4622 break;
4623 case 4:
4624 if ((size == 2) && ((idx & 3) == 3)) {
4625 return 1;
4627 break;
4628 default:
4629 abort();
4631 if ((rd + stride * (nregs - 1)) > 31) {
4632 /* Attempts to write off the end of the register file
4633 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4634 * the neon_load_reg() would write off the end of the array.
4636 return 1;
4638 addr = tcg_temp_new_i32();
4639 load_reg_var(s, addr, rn);
4640 for (reg = 0; reg < nregs; reg++) {
4641 if (load) {
4642 tmp = tcg_temp_new_i32();
4643 switch (size) {
4644 case 0:
4645 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
4646 break;
4647 case 1:
4648 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
4649 break;
4650 case 2:
4651 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
4652 break;
4653 default: /* Avoid compiler warnings. */
4654 abort();
4656 if (size != 2) {
4657 tmp2 = neon_load_reg(rd, pass);
4658 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4659 shift, size ? 16 : 8);
4660 tcg_temp_free_i32(tmp2);
4662 neon_store_reg(rd, pass, tmp);
4663 } else { /* Store */
4664 tmp = neon_load_reg(rd, pass);
4665 if (shift)
4666 tcg_gen_shri_i32(tmp, tmp, shift);
4667 switch (size) {
4668 case 0:
4669 gen_aa32_st8(tmp, addr, get_mem_index(s));
4670 break;
4671 case 1:
4672 gen_aa32_st16(tmp, addr, get_mem_index(s));
4673 break;
4674 case 2:
4675 gen_aa32_st32(tmp, addr, get_mem_index(s));
4676 break;
4678 tcg_temp_free_i32(tmp);
4680 rd += stride;
4681 tcg_gen_addi_i32(addr, addr, 1 << size);
4683 tcg_temp_free_i32(addr);
4684 stride = nregs * (1 << size);
4687 if (rm != 15) {
4688 TCGv_i32 base;
4690 base = load_reg(s, rn);
4691 if (rm == 13) {
4692 tcg_gen_addi_i32(base, base, stride);
4693 } else {
4694 TCGv_i32 index;
4695 index = load_reg(s, rm);
4696 tcg_gen_add_i32(base, base, index);
4697 tcg_temp_free_i32(index);
4699 store_reg(s, rn, base);
4701 return 0;
4704 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4705 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
4707 tcg_gen_and_i32(t, t, c);
4708 tcg_gen_andc_i32(f, f, c);
4709 tcg_gen_or_i32(dest, t, f);
4712 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
4714 switch (size) {
4715 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4716 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4717 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
4718 default: abort();
4722 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4724 switch (size) {
4725 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4726 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4727 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4728 default: abort();
4732 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
4734 switch (size) {
4735 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4736 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4737 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4738 default: abort();
4742 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4744 switch (size) {
4745 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4746 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4747 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4748 default: abort();
4752 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
4753 int q, int u)
4755 if (q) {
4756 if (u) {
4757 switch (size) {
4758 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4759 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4760 default: abort();
4762 } else {
4763 switch (size) {
4764 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4765 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4766 default: abort();
4769 } else {
4770 if (u) {
4771 switch (size) {
4772 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4773 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4774 default: abort();
4776 } else {
4777 switch (size) {
4778 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4779 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4780 default: abort();
4786 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
4788 if (u) {
4789 switch (size) {
4790 case 0: gen_helper_neon_widen_u8(dest, src); break;
4791 case 1: gen_helper_neon_widen_u16(dest, src); break;
4792 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4793 default: abort();
4795 } else {
4796 switch (size) {
4797 case 0: gen_helper_neon_widen_s8(dest, src); break;
4798 case 1: gen_helper_neon_widen_s16(dest, src); break;
4799 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4800 default: abort();
4803 tcg_temp_free_i32(src);
4806 static inline void gen_neon_addl(int size)
4808 switch (size) {
4809 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4810 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4811 case 2: tcg_gen_add_i64(CPU_V001); break;
4812 default: abort();
4816 static inline void gen_neon_subl(int size)
4818 switch (size) {
4819 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4820 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4821 case 2: tcg_gen_sub_i64(CPU_V001); break;
4822 default: abort();
4826 static inline void gen_neon_negl(TCGv_i64 var, int size)
4828 switch (size) {
4829 case 0: gen_helper_neon_negl_u16(var, var); break;
4830 case 1: gen_helper_neon_negl_u32(var, var); break;
4831 case 2:
4832 tcg_gen_neg_i64(var, var);
4833 break;
4834 default: abort();
4838 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4840 switch (size) {
4841 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4842 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4843 default: abort();
4847 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4848 int size, int u)
4850 TCGv_i64 tmp;
4852 switch ((size << 1) | u) {
4853 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4854 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4855 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4856 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4857 case 4:
4858 tmp = gen_muls_i64_i32(a, b);
4859 tcg_gen_mov_i64(dest, tmp);
4860 tcg_temp_free_i64(tmp);
4861 break;
4862 case 5:
4863 tmp = gen_mulu_i64_i32(a, b);
4864 tcg_gen_mov_i64(dest, tmp);
4865 tcg_temp_free_i64(tmp);
4866 break;
4867 default: abort();
4870 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4871 Don't forget to clean them now. */
4872 if (size < 2) {
4873 tcg_temp_free_i32(a);
4874 tcg_temp_free_i32(b);
4878 static void gen_neon_narrow_op(int op, int u, int size,
4879 TCGv_i32 dest, TCGv_i64 src)
4881 if (op) {
4882 if (u) {
4883 gen_neon_unarrow_sats(size, dest, src);
4884 } else {
4885 gen_neon_narrow(size, dest, src);
4887 } else {
4888 if (u) {
4889 gen_neon_narrow_satu(size, dest, src);
4890 } else {
4891 gen_neon_narrow_sats(size, dest, src);
4896 /* Symbolic constants for op fields for Neon 3-register same-length.
4897 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4898 * table A7-9.
4900 #define NEON_3R_VHADD 0
4901 #define NEON_3R_VQADD 1
4902 #define NEON_3R_VRHADD 2
4903 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4904 #define NEON_3R_VHSUB 4
4905 #define NEON_3R_VQSUB 5
4906 #define NEON_3R_VCGT 6
4907 #define NEON_3R_VCGE 7
4908 #define NEON_3R_VSHL 8
4909 #define NEON_3R_VQSHL 9
4910 #define NEON_3R_VRSHL 10
4911 #define NEON_3R_VQRSHL 11
4912 #define NEON_3R_VMAX 12
4913 #define NEON_3R_VMIN 13
4914 #define NEON_3R_VABD 14
4915 #define NEON_3R_VABA 15
4916 #define NEON_3R_VADD_VSUB 16
4917 #define NEON_3R_VTST_VCEQ 17
4918 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4919 #define NEON_3R_VMUL 19
4920 #define NEON_3R_VPMAX 20
4921 #define NEON_3R_VPMIN 21
4922 #define NEON_3R_VQDMULH_VQRDMULH 22
4923 #define NEON_3R_VPADD 23
4924 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4925 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4926 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4927 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4928 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4929 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4930 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4931 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4933 static const uint8_t neon_3r_sizes[] = {
4934 [NEON_3R_VHADD] = 0x7,
4935 [NEON_3R_VQADD] = 0xf,
4936 [NEON_3R_VRHADD] = 0x7,
4937 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4938 [NEON_3R_VHSUB] = 0x7,
4939 [NEON_3R_VQSUB] = 0xf,
4940 [NEON_3R_VCGT] = 0x7,
4941 [NEON_3R_VCGE] = 0x7,
4942 [NEON_3R_VSHL] = 0xf,
4943 [NEON_3R_VQSHL] = 0xf,
4944 [NEON_3R_VRSHL] = 0xf,
4945 [NEON_3R_VQRSHL] = 0xf,
4946 [NEON_3R_VMAX] = 0x7,
4947 [NEON_3R_VMIN] = 0x7,
4948 [NEON_3R_VABD] = 0x7,
4949 [NEON_3R_VABA] = 0x7,
4950 [NEON_3R_VADD_VSUB] = 0xf,
4951 [NEON_3R_VTST_VCEQ] = 0x7,
4952 [NEON_3R_VML] = 0x7,
4953 [NEON_3R_VMUL] = 0x7,
4954 [NEON_3R_VPMAX] = 0x7,
4955 [NEON_3R_VPMIN] = 0x7,
4956 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4957 [NEON_3R_VPADD] = 0x7,
4958 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
4959 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4960 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4961 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4962 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4963 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4964 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4965 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
4968 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4969 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4970 * table A7-13.
4972 #define NEON_2RM_VREV64 0
4973 #define NEON_2RM_VREV32 1
4974 #define NEON_2RM_VREV16 2
4975 #define NEON_2RM_VPADDL 4
4976 #define NEON_2RM_VPADDL_U 5
4977 #define NEON_2RM_AESE 6 /* Includes AESD */
4978 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
4979 #define NEON_2RM_VCLS 8
4980 #define NEON_2RM_VCLZ 9
4981 #define NEON_2RM_VCNT 10
4982 #define NEON_2RM_VMVN 11
4983 #define NEON_2RM_VPADAL 12
4984 #define NEON_2RM_VPADAL_U 13
4985 #define NEON_2RM_VQABS 14
4986 #define NEON_2RM_VQNEG 15
4987 #define NEON_2RM_VCGT0 16
4988 #define NEON_2RM_VCGE0 17
4989 #define NEON_2RM_VCEQ0 18
4990 #define NEON_2RM_VCLE0 19
4991 #define NEON_2RM_VCLT0 20
4992 #define NEON_2RM_SHA1H 21
4993 #define NEON_2RM_VABS 22
4994 #define NEON_2RM_VNEG 23
4995 #define NEON_2RM_VCGT0_F 24
4996 #define NEON_2RM_VCGE0_F 25
4997 #define NEON_2RM_VCEQ0_F 26
4998 #define NEON_2RM_VCLE0_F 27
4999 #define NEON_2RM_VCLT0_F 28
5000 #define NEON_2RM_VABS_F 30
5001 #define NEON_2RM_VNEG_F 31
5002 #define NEON_2RM_VSWP 32
5003 #define NEON_2RM_VTRN 33
5004 #define NEON_2RM_VUZP 34
5005 #define NEON_2RM_VZIP 35
5006 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5007 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5008 #define NEON_2RM_VSHLL 38
5009 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5010 #define NEON_2RM_VRINTN 40
5011 #define NEON_2RM_VRINTX 41
5012 #define NEON_2RM_VRINTA 42
5013 #define NEON_2RM_VRINTZ 43
5014 #define NEON_2RM_VCVT_F16_F32 44
5015 #define NEON_2RM_VRINTM 45
5016 #define NEON_2RM_VCVT_F32_F16 46
5017 #define NEON_2RM_VRINTP 47
5018 #define NEON_2RM_VCVTAU 48
5019 #define NEON_2RM_VCVTAS 49
5020 #define NEON_2RM_VCVTNU 50
5021 #define NEON_2RM_VCVTNS 51
5022 #define NEON_2RM_VCVTPU 52
5023 #define NEON_2RM_VCVTPS 53
5024 #define NEON_2RM_VCVTMU 54
5025 #define NEON_2RM_VCVTMS 55
5026 #define NEON_2RM_VRECPE 56
5027 #define NEON_2RM_VRSQRTE 57
5028 #define NEON_2RM_VRECPE_F 58
5029 #define NEON_2RM_VRSQRTE_F 59
5030 #define NEON_2RM_VCVT_FS 60
5031 #define NEON_2RM_VCVT_FU 61
5032 #define NEON_2RM_VCVT_SF 62
5033 #define NEON_2RM_VCVT_UF 63
5035 static int neon_2rm_is_float_op(int op)
5037 /* Return true if this neon 2reg-misc op is float-to-float */
5038 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
5039 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
5040 op == NEON_2RM_VRINTM ||
5041 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5042 op >= NEON_2RM_VRECPE_F);
5045 /* Each entry in this array has bit n set if the insn allows
5046 * size value n (otherwise it will UNDEF). Since unallocated
5047 * op values will have no bits set they always UNDEF.
5049 static const uint8_t neon_2rm_sizes[] = {
5050 [NEON_2RM_VREV64] = 0x7,
5051 [NEON_2RM_VREV32] = 0x3,
5052 [NEON_2RM_VREV16] = 0x1,
5053 [NEON_2RM_VPADDL] = 0x7,
5054 [NEON_2RM_VPADDL_U] = 0x7,
5055 [NEON_2RM_AESE] = 0x1,
5056 [NEON_2RM_AESMC] = 0x1,
5057 [NEON_2RM_VCLS] = 0x7,
5058 [NEON_2RM_VCLZ] = 0x7,
5059 [NEON_2RM_VCNT] = 0x1,
5060 [NEON_2RM_VMVN] = 0x1,
5061 [NEON_2RM_VPADAL] = 0x7,
5062 [NEON_2RM_VPADAL_U] = 0x7,
5063 [NEON_2RM_VQABS] = 0x7,
5064 [NEON_2RM_VQNEG] = 0x7,
5065 [NEON_2RM_VCGT0] = 0x7,
5066 [NEON_2RM_VCGE0] = 0x7,
5067 [NEON_2RM_VCEQ0] = 0x7,
5068 [NEON_2RM_VCLE0] = 0x7,
5069 [NEON_2RM_VCLT0] = 0x7,
5070 [NEON_2RM_SHA1H] = 0x4,
5071 [NEON_2RM_VABS] = 0x7,
5072 [NEON_2RM_VNEG] = 0x7,
5073 [NEON_2RM_VCGT0_F] = 0x4,
5074 [NEON_2RM_VCGE0_F] = 0x4,
5075 [NEON_2RM_VCEQ0_F] = 0x4,
5076 [NEON_2RM_VCLE0_F] = 0x4,
5077 [NEON_2RM_VCLT0_F] = 0x4,
5078 [NEON_2RM_VABS_F] = 0x4,
5079 [NEON_2RM_VNEG_F] = 0x4,
5080 [NEON_2RM_VSWP] = 0x1,
5081 [NEON_2RM_VTRN] = 0x7,
5082 [NEON_2RM_VUZP] = 0x7,
5083 [NEON_2RM_VZIP] = 0x7,
5084 [NEON_2RM_VMOVN] = 0x7,
5085 [NEON_2RM_VQMOVN] = 0x7,
5086 [NEON_2RM_VSHLL] = 0x7,
5087 [NEON_2RM_SHA1SU1] = 0x4,
5088 [NEON_2RM_VRINTN] = 0x4,
5089 [NEON_2RM_VRINTX] = 0x4,
5090 [NEON_2RM_VRINTA] = 0x4,
5091 [NEON_2RM_VRINTZ] = 0x4,
5092 [NEON_2RM_VCVT_F16_F32] = 0x2,
5093 [NEON_2RM_VRINTM] = 0x4,
5094 [NEON_2RM_VCVT_F32_F16] = 0x2,
5095 [NEON_2RM_VRINTP] = 0x4,
5096 [NEON_2RM_VCVTAU] = 0x4,
5097 [NEON_2RM_VCVTAS] = 0x4,
5098 [NEON_2RM_VCVTNU] = 0x4,
5099 [NEON_2RM_VCVTNS] = 0x4,
5100 [NEON_2RM_VCVTPU] = 0x4,
5101 [NEON_2RM_VCVTPS] = 0x4,
5102 [NEON_2RM_VCVTMU] = 0x4,
5103 [NEON_2RM_VCVTMS] = 0x4,
5104 [NEON_2RM_VRECPE] = 0x4,
5105 [NEON_2RM_VRSQRTE] = 0x4,
5106 [NEON_2RM_VRECPE_F] = 0x4,
5107 [NEON_2RM_VRSQRTE_F] = 0x4,
5108 [NEON_2RM_VCVT_FS] = 0x4,
5109 [NEON_2RM_VCVT_FU] = 0x4,
5110 [NEON_2RM_VCVT_SF] = 0x4,
5111 [NEON_2RM_VCVT_UF] = 0x4,
5114 /* Translate a NEON data processing instruction. Return nonzero if the
5115 instruction is invalid.
5116 We process data in a mixture of 32-bit and 64-bit chunks.
5117 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5119 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
5121 int op;
5122 int q;
5123 int rd, rn, rm;
5124 int size;
5125 int shift;
5126 int pass;
5127 int count;
5128 int pairwise;
5129 int u;
5130 uint32_t imm, mask;
5131 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
5132 TCGv_i64 tmp64;
5134 /* FIXME: this access check should not take precedence over UNDEF
5135 * for invalid encodings; we will generate incorrect syndrome information
5136 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5138 if (s->fp_excp_el) {
5139 gen_exception_insn(s, 4, EXCP_UDEF,
5140 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
5141 return 0;
5144 if (!s->vfp_enabled)
5145 return 1;
5146 q = (insn & (1 << 6)) != 0;
5147 u = (insn >> 24) & 1;
5148 VFP_DREG_D(rd, insn);
5149 VFP_DREG_N(rn, insn);
5150 VFP_DREG_M(rm, insn);
5151 size = (insn >> 20) & 3;
5152 if ((insn & (1 << 23)) == 0) {
5153 /* Three register same length. */
5154 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5155 /* Catch invalid op and bad size combinations: UNDEF */
5156 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5157 return 1;
5159 /* All insns of this form UNDEF for either this condition or the
5160 * superset of cases "Q==1"; we catch the latter later.
5162 if (q && ((rd | rn | rm) & 1)) {
5163 return 1;
5166 * The SHA-1/SHA-256 3-register instructions require special treatment
5167 * here, as their size field is overloaded as an op type selector, and
5168 * they all consume their input in a single pass.
5170 if (op == NEON_3R_SHA) {
5171 if (!q) {
5172 return 1;
5174 if (!u) { /* SHA-1 */
5175 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
5176 return 1;
5178 tmp = tcg_const_i32(rd);
5179 tmp2 = tcg_const_i32(rn);
5180 tmp3 = tcg_const_i32(rm);
5181 tmp4 = tcg_const_i32(size);
5182 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5183 tcg_temp_free_i32(tmp4);
5184 } else { /* SHA-256 */
5185 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
5186 return 1;
5188 tmp = tcg_const_i32(rd);
5189 tmp2 = tcg_const_i32(rn);
5190 tmp3 = tcg_const_i32(rm);
5191 switch (size) {
5192 case 0:
5193 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5194 break;
5195 case 1:
5196 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5197 break;
5198 case 2:
5199 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5200 break;
5203 tcg_temp_free_i32(tmp);
5204 tcg_temp_free_i32(tmp2);
5205 tcg_temp_free_i32(tmp3);
5206 return 0;
5208 if (size == 3 && op != NEON_3R_LOGIC) {
5209 /* 64-bit element instructions. */
5210 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5211 neon_load_reg64(cpu_V0, rn + pass);
5212 neon_load_reg64(cpu_V1, rm + pass);
5213 switch (op) {
5214 case NEON_3R_VQADD:
5215 if (u) {
5216 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5217 cpu_V0, cpu_V1);
5218 } else {
5219 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5220 cpu_V0, cpu_V1);
5222 break;
5223 case NEON_3R_VQSUB:
5224 if (u) {
5225 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5226 cpu_V0, cpu_V1);
5227 } else {
5228 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5229 cpu_V0, cpu_V1);
5231 break;
5232 case NEON_3R_VSHL:
5233 if (u) {
5234 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5235 } else {
5236 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5238 break;
5239 case NEON_3R_VQSHL:
5240 if (u) {
5241 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5242 cpu_V1, cpu_V0);
5243 } else {
5244 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5245 cpu_V1, cpu_V0);
5247 break;
5248 case NEON_3R_VRSHL:
5249 if (u) {
5250 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5251 } else {
5252 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5254 break;
5255 case NEON_3R_VQRSHL:
5256 if (u) {
5257 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5258 cpu_V1, cpu_V0);
5259 } else {
5260 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5261 cpu_V1, cpu_V0);
5263 break;
5264 case NEON_3R_VADD_VSUB:
5265 if (u) {
5266 tcg_gen_sub_i64(CPU_V001);
5267 } else {
5268 tcg_gen_add_i64(CPU_V001);
5270 break;
5271 default:
5272 abort();
5274 neon_store_reg64(cpu_V0, rd + pass);
5276 return 0;
5278 pairwise = 0;
5279 switch (op) {
5280 case NEON_3R_VSHL:
5281 case NEON_3R_VQSHL:
5282 case NEON_3R_VRSHL:
5283 case NEON_3R_VQRSHL:
5285 int rtmp;
5286 /* Shift instruction operands are reversed. */
5287 rtmp = rn;
5288 rn = rm;
5289 rm = rtmp;
5291 break;
5292 case NEON_3R_VPADD:
5293 if (u) {
5294 return 1;
5296 /* Fall through */
5297 case NEON_3R_VPMAX:
5298 case NEON_3R_VPMIN:
5299 pairwise = 1;
5300 break;
5301 case NEON_3R_FLOAT_ARITH:
5302 pairwise = (u && size < 2); /* if VPADD (float) */
5303 break;
5304 case NEON_3R_FLOAT_MINMAX:
5305 pairwise = u; /* if VPMIN/VPMAX (float) */
5306 break;
5307 case NEON_3R_FLOAT_CMP:
5308 if (!u && size) {
5309 /* no encoding for U=0 C=1x */
5310 return 1;
5312 break;
5313 case NEON_3R_FLOAT_ACMP:
5314 if (!u) {
5315 return 1;
5317 break;
5318 case NEON_3R_FLOAT_MISC:
5319 /* VMAXNM/VMINNM in ARMv8 */
5320 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5321 return 1;
5323 break;
5324 case NEON_3R_VMUL:
5325 if (u && (size != 0)) {
5326 /* UNDEF on invalid size for polynomial subcase */
5327 return 1;
5329 break;
5330 case NEON_3R_VFM:
5331 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
5332 return 1;
5334 break;
5335 default:
5336 break;
5339 if (pairwise && q) {
5340 /* All the pairwise insns UNDEF if Q is set */
5341 return 1;
5344 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5346 if (pairwise) {
5347 /* Pairwise. */
5348 if (pass < 1) {
5349 tmp = neon_load_reg(rn, 0);
5350 tmp2 = neon_load_reg(rn, 1);
5351 } else {
5352 tmp = neon_load_reg(rm, 0);
5353 tmp2 = neon_load_reg(rm, 1);
5355 } else {
5356 /* Elementwise. */
5357 tmp = neon_load_reg(rn, pass);
5358 tmp2 = neon_load_reg(rm, pass);
5360 switch (op) {
5361 case NEON_3R_VHADD:
5362 GEN_NEON_INTEGER_OP(hadd);
5363 break;
5364 case NEON_3R_VQADD:
5365 GEN_NEON_INTEGER_OP_ENV(qadd);
5366 break;
5367 case NEON_3R_VRHADD:
5368 GEN_NEON_INTEGER_OP(rhadd);
5369 break;
5370 case NEON_3R_LOGIC: /* Logic ops. */
5371 switch ((u << 2) | size) {
5372 case 0: /* VAND */
5373 tcg_gen_and_i32(tmp, tmp, tmp2);
5374 break;
5375 case 1: /* BIC */
5376 tcg_gen_andc_i32(tmp, tmp, tmp2);
5377 break;
5378 case 2: /* VORR */
5379 tcg_gen_or_i32(tmp, tmp, tmp2);
5380 break;
5381 case 3: /* VORN */
5382 tcg_gen_orc_i32(tmp, tmp, tmp2);
5383 break;
5384 case 4: /* VEOR */
5385 tcg_gen_xor_i32(tmp, tmp, tmp2);
5386 break;
5387 case 5: /* VBSL */
5388 tmp3 = neon_load_reg(rd, pass);
5389 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5390 tcg_temp_free_i32(tmp3);
5391 break;
5392 case 6: /* VBIT */
5393 tmp3 = neon_load_reg(rd, pass);
5394 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5395 tcg_temp_free_i32(tmp3);
5396 break;
5397 case 7: /* VBIF */
5398 tmp3 = neon_load_reg(rd, pass);
5399 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5400 tcg_temp_free_i32(tmp3);
5401 break;
5403 break;
5404 case NEON_3R_VHSUB:
5405 GEN_NEON_INTEGER_OP(hsub);
5406 break;
5407 case NEON_3R_VQSUB:
5408 GEN_NEON_INTEGER_OP_ENV(qsub);
5409 break;
5410 case NEON_3R_VCGT:
5411 GEN_NEON_INTEGER_OP(cgt);
5412 break;
5413 case NEON_3R_VCGE:
5414 GEN_NEON_INTEGER_OP(cge);
5415 break;
5416 case NEON_3R_VSHL:
5417 GEN_NEON_INTEGER_OP(shl);
5418 break;
5419 case NEON_3R_VQSHL:
5420 GEN_NEON_INTEGER_OP_ENV(qshl);
5421 break;
5422 case NEON_3R_VRSHL:
5423 GEN_NEON_INTEGER_OP(rshl);
5424 break;
5425 case NEON_3R_VQRSHL:
5426 GEN_NEON_INTEGER_OP_ENV(qrshl);
5427 break;
5428 case NEON_3R_VMAX:
5429 GEN_NEON_INTEGER_OP(max);
5430 break;
5431 case NEON_3R_VMIN:
5432 GEN_NEON_INTEGER_OP(min);
5433 break;
5434 case NEON_3R_VABD:
5435 GEN_NEON_INTEGER_OP(abd);
5436 break;
5437 case NEON_3R_VABA:
5438 GEN_NEON_INTEGER_OP(abd);
5439 tcg_temp_free_i32(tmp2);
5440 tmp2 = neon_load_reg(rd, pass);
5441 gen_neon_add(size, tmp, tmp2);
5442 break;
5443 case NEON_3R_VADD_VSUB:
5444 if (!u) { /* VADD */
5445 gen_neon_add(size, tmp, tmp2);
5446 } else { /* VSUB */
5447 switch (size) {
5448 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5449 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5450 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
5451 default: abort();
5454 break;
5455 case NEON_3R_VTST_VCEQ:
5456 if (!u) { /* VTST */
5457 switch (size) {
5458 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5459 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5460 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
5461 default: abort();
5463 } else { /* VCEQ */
5464 switch (size) {
5465 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5466 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5467 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5468 default: abort();
5471 break;
5472 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
5473 switch (size) {
5474 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5475 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5476 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5477 default: abort();
5479 tcg_temp_free_i32(tmp2);
5480 tmp2 = neon_load_reg(rd, pass);
5481 if (u) { /* VMLS */
5482 gen_neon_rsb(size, tmp, tmp2);
5483 } else { /* VMLA */
5484 gen_neon_add(size, tmp, tmp2);
5486 break;
5487 case NEON_3R_VMUL:
5488 if (u) { /* polynomial */
5489 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5490 } else { /* Integer */
5491 switch (size) {
5492 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5493 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5494 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5495 default: abort();
5498 break;
5499 case NEON_3R_VPMAX:
5500 GEN_NEON_INTEGER_OP(pmax);
5501 break;
5502 case NEON_3R_VPMIN:
5503 GEN_NEON_INTEGER_OP(pmin);
5504 break;
5505 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5506 if (!u) { /* VQDMULH */
5507 switch (size) {
5508 case 1:
5509 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5510 break;
5511 case 2:
5512 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5513 break;
5514 default: abort();
5516 } else { /* VQRDMULH */
5517 switch (size) {
5518 case 1:
5519 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5520 break;
5521 case 2:
5522 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5523 break;
5524 default: abort();
5527 break;
5528 case NEON_3R_VPADD:
5529 switch (size) {
5530 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5531 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5532 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5533 default: abort();
5535 break;
5536 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5538 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5539 switch ((u << 2) | size) {
5540 case 0: /* VADD */
5541 case 4: /* VPADD */
5542 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5543 break;
5544 case 2: /* VSUB */
5545 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5546 break;
5547 case 6: /* VABD */
5548 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5549 break;
5550 default:
5551 abort();
5553 tcg_temp_free_ptr(fpstatus);
5554 break;
5556 case NEON_3R_FLOAT_MULTIPLY:
5558 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5559 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5560 if (!u) {
5561 tcg_temp_free_i32(tmp2);
5562 tmp2 = neon_load_reg(rd, pass);
5563 if (size == 0) {
5564 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5565 } else {
5566 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5569 tcg_temp_free_ptr(fpstatus);
5570 break;
5572 case NEON_3R_FLOAT_CMP:
5574 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5575 if (!u) {
5576 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5577 } else {
5578 if (size == 0) {
5579 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5580 } else {
5581 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5584 tcg_temp_free_ptr(fpstatus);
5585 break;
5587 case NEON_3R_FLOAT_ACMP:
5589 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5590 if (size == 0) {
5591 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5592 } else {
5593 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5595 tcg_temp_free_ptr(fpstatus);
5596 break;
5598 case NEON_3R_FLOAT_MINMAX:
5600 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5601 if (size == 0) {
5602 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5603 } else {
5604 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5606 tcg_temp_free_ptr(fpstatus);
5607 break;
5609 case NEON_3R_FLOAT_MISC:
5610 if (u) {
5611 /* VMAXNM/VMINNM */
5612 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5613 if (size == 0) {
5614 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5615 } else {
5616 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5618 tcg_temp_free_ptr(fpstatus);
5619 } else {
5620 if (size == 0) {
5621 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5622 } else {
5623 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5626 break;
5627 case NEON_3R_VFM:
5629 /* VFMA, VFMS: fused multiply-add */
5630 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5631 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5632 if (size) {
5633 /* VFMS */
5634 gen_helper_vfp_negs(tmp, tmp);
5636 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5637 tcg_temp_free_i32(tmp3);
5638 tcg_temp_free_ptr(fpstatus);
5639 break;
5641 default:
5642 abort();
5644 tcg_temp_free_i32(tmp2);
5646 /* Save the result. For elementwise operations we can put it
5647 straight into the destination register. For pairwise operations
5648 we have to be careful to avoid clobbering the source operands. */
5649 if (pairwise && rd == rm) {
5650 neon_store_scratch(pass, tmp);
5651 } else {
5652 neon_store_reg(rd, pass, tmp);
5655 } /* for pass */
5656 if (pairwise && rd == rm) {
5657 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5658 tmp = neon_load_scratch(pass);
5659 neon_store_reg(rd, pass, tmp);
5662 /* End of 3 register same size operations. */
5663 } else if (insn & (1 << 4)) {
5664 if ((insn & 0x00380080) != 0) {
5665 /* Two registers and shift. */
5666 op = (insn >> 8) & 0xf;
5667 if (insn & (1 << 7)) {
5668 /* 64-bit shift. */
5669 if (op > 7) {
5670 return 1;
5672 size = 3;
5673 } else {
5674 size = 2;
5675 while ((insn & (1 << (size + 19))) == 0)
5676 size--;
5678 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5679 /* To avoid excessive duplication of ops we implement shift
5680 by immediate using the variable shift operations. */
5681 if (op < 8) {
5682 /* Shift by immediate:
5683 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5684 if (q && ((rd | rm) & 1)) {
5685 return 1;
5687 if (!u && (op == 4 || op == 6)) {
5688 return 1;
5690 /* Right shifts are encoded as N - shift, where N is the
5691 element size in bits. */
5692 if (op <= 4)
5693 shift = shift - (1 << (size + 3));
5694 if (size == 3) {
5695 count = q + 1;
5696 } else {
5697 count = q ? 4: 2;
5699 switch (size) {
5700 case 0:
5701 imm = (uint8_t) shift;
5702 imm |= imm << 8;
5703 imm |= imm << 16;
5704 break;
5705 case 1:
5706 imm = (uint16_t) shift;
5707 imm |= imm << 16;
5708 break;
5709 case 2:
5710 case 3:
5711 imm = shift;
5712 break;
5713 default:
5714 abort();
5717 for (pass = 0; pass < count; pass++) {
5718 if (size == 3) {
5719 neon_load_reg64(cpu_V0, rm + pass);
5720 tcg_gen_movi_i64(cpu_V1, imm);
5721 switch (op) {
5722 case 0: /* VSHR */
5723 case 1: /* VSRA */
5724 if (u)
5725 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5726 else
5727 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5728 break;
5729 case 2: /* VRSHR */
5730 case 3: /* VRSRA */
5731 if (u)
5732 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5733 else
5734 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5735 break;
5736 case 4: /* VSRI */
5737 case 5: /* VSHL, VSLI */
5738 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5739 break;
5740 case 6: /* VQSHLU */
5741 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5742 cpu_V0, cpu_V1);
5743 break;
5744 case 7: /* VQSHL */
5745 if (u) {
5746 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5747 cpu_V0, cpu_V1);
5748 } else {
5749 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5750 cpu_V0, cpu_V1);
5752 break;
5754 if (op == 1 || op == 3) {
5755 /* Accumulate. */
5756 neon_load_reg64(cpu_V1, rd + pass);
5757 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5758 } else if (op == 4 || (op == 5 && u)) {
5759 /* Insert */
5760 neon_load_reg64(cpu_V1, rd + pass);
5761 uint64_t mask;
5762 if (shift < -63 || shift > 63) {
5763 mask = 0;
5764 } else {
5765 if (op == 4) {
5766 mask = 0xffffffffffffffffull >> -shift;
5767 } else {
5768 mask = 0xffffffffffffffffull << shift;
5771 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5772 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5774 neon_store_reg64(cpu_V0, rd + pass);
5775 } else { /* size < 3 */
5776 /* Operands in T0 and T1. */
5777 tmp = neon_load_reg(rm, pass);
5778 tmp2 = tcg_temp_new_i32();
5779 tcg_gen_movi_i32(tmp2, imm);
5780 switch (op) {
5781 case 0: /* VSHR */
5782 case 1: /* VSRA */
5783 GEN_NEON_INTEGER_OP(shl);
5784 break;
5785 case 2: /* VRSHR */
5786 case 3: /* VRSRA */
5787 GEN_NEON_INTEGER_OP(rshl);
5788 break;
5789 case 4: /* VSRI */
5790 case 5: /* VSHL, VSLI */
5791 switch (size) {
5792 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5793 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5794 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5795 default: abort();
5797 break;
5798 case 6: /* VQSHLU */
5799 switch (size) {
5800 case 0:
5801 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5802 tmp, tmp2);
5803 break;
5804 case 1:
5805 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5806 tmp, tmp2);
5807 break;
5808 case 2:
5809 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5810 tmp, tmp2);
5811 break;
5812 default:
5813 abort();
5815 break;
5816 case 7: /* VQSHL */
5817 GEN_NEON_INTEGER_OP_ENV(qshl);
5818 break;
5820 tcg_temp_free_i32(tmp2);
5822 if (op == 1 || op == 3) {
5823 /* Accumulate. */
5824 tmp2 = neon_load_reg(rd, pass);
5825 gen_neon_add(size, tmp, tmp2);
5826 tcg_temp_free_i32(tmp2);
5827 } else if (op == 4 || (op == 5 && u)) {
5828 /* Insert */
5829 switch (size) {
5830 case 0:
5831 if (op == 4)
5832 mask = 0xff >> -shift;
5833 else
5834 mask = (uint8_t)(0xff << shift);
5835 mask |= mask << 8;
5836 mask |= mask << 16;
5837 break;
5838 case 1:
5839 if (op == 4)
5840 mask = 0xffff >> -shift;
5841 else
5842 mask = (uint16_t)(0xffff << shift);
5843 mask |= mask << 16;
5844 break;
5845 case 2:
5846 if (shift < -31 || shift > 31) {
5847 mask = 0;
5848 } else {
5849 if (op == 4)
5850 mask = 0xffffffffu >> -shift;
5851 else
5852 mask = 0xffffffffu << shift;
5854 break;
5855 default:
5856 abort();
5858 tmp2 = neon_load_reg(rd, pass);
5859 tcg_gen_andi_i32(tmp, tmp, mask);
5860 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5861 tcg_gen_or_i32(tmp, tmp, tmp2);
5862 tcg_temp_free_i32(tmp2);
5864 neon_store_reg(rd, pass, tmp);
5866 } /* for pass */
5867 } else if (op < 10) {
5868 /* Shift by immediate and narrow:
5869 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5870 int input_unsigned = (op == 8) ? !u : u;
5871 if (rm & 1) {
5872 return 1;
5874 shift = shift - (1 << (size + 3));
5875 size++;
5876 if (size == 3) {
5877 tmp64 = tcg_const_i64(shift);
5878 neon_load_reg64(cpu_V0, rm);
5879 neon_load_reg64(cpu_V1, rm + 1);
5880 for (pass = 0; pass < 2; pass++) {
5881 TCGv_i64 in;
5882 if (pass == 0) {
5883 in = cpu_V0;
5884 } else {
5885 in = cpu_V1;
5887 if (q) {
5888 if (input_unsigned) {
5889 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5890 } else {
5891 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5893 } else {
5894 if (input_unsigned) {
5895 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5896 } else {
5897 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5900 tmp = tcg_temp_new_i32();
5901 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5902 neon_store_reg(rd, pass, tmp);
5903 } /* for pass */
5904 tcg_temp_free_i64(tmp64);
5905 } else {
5906 if (size == 1) {
5907 imm = (uint16_t)shift;
5908 imm |= imm << 16;
5909 } else {
5910 /* size == 2 */
5911 imm = (uint32_t)shift;
5913 tmp2 = tcg_const_i32(imm);
5914 tmp4 = neon_load_reg(rm + 1, 0);
5915 tmp5 = neon_load_reg(rm + 1, 1);
5916 for (pass = 0; pass < 2; pass++) {
5917 if (pass == 0) {
5918 tmp = neon_load_reg(rm, 0);
5919 } else {
5920 tmp = tmp4;
5922 gen_neon_shift_narrow(size, tmp, tmp2, q,
5923 input_unsigned);
5924 if (pass == 0) {
5925 tmp3 = neon_load_reg(rm, 1);
5926 } else {
5927 tmp3 = tmp5;
5929 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5930 input_unsigned);
5931 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5932 tcg_temp_free_i32(tmp);
5933 tcg_temp_free_i32(tmp3);
5934 tmp = tcg_temp_new_i32();
5935 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5936 neon_store_reg(rd, pass, tmp);
5937 } /* for pass */
5938 tcg_temp_free_i32(tmp2);
5940 } else if (op == 10) {
5941 /* VSHLL, VMOVL */
5942 if (q || (rd & 1)) {
5943 return 1;
5945 tmp = neon_load_reg(rm, 0);
5946 tmp2 = neon_load_reg(rm, 1);
5947 for (pass = 0; pass < 2; pass++) {
5948 if (pass == 1)
5949 tmp = tmp2;
5951 gen_neon_widen(cpu_V0, tmp, size, u);
5953 if (shift != 0) {
5954 /* The shift is less than the width of the source
5955 type, so we can just shift the whole register. */
5956 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5957 /* Widen the result of shift: we need to clear
5958 * the potential overflow bits resulting from
5959 * left bits of the narrow input appearing as
5960 * right bits of left the neighbour narrow
5961 * input. */
5962 if (size < 2 || !u) {
5963 uint64_t imm64;
5964 if (size == 0) {
5965 imm = (0xffu >> (8 - shift));
5966 imm |= imm << 16;
5967 } else if (size == 1) {
5968 imm = 0xffff >> (16 - shift);
5969 } else {
5970 /* size == 2 */
5971 imm = 0xffffffff >> (32 - shift);
5973 if (size < 2) {
5974 imm64 = imm | (((uint64_t)imm) << 32);
5975 } else {
5976 imm64 = imm;
5978 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5981 neon_store_reg64(cpu_V0, rd + pass);
5983 } else if (op >= 14) {
5984 /* VCVT fixed-point. */
5985 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5986 return 1;
5988 /* We have already masked out the must-be-1 top bit of imm6,
5989 * hence this 32-shift where the ARM ARM has 64-imm6.
5991 shift = 32 - shift;
5992 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5993 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5994 if (!(op & 1)) {
5995 if (u)
5996 gen_vfp_ulto(0, shift, 1);
5997 else
5998 gen_vfp_slto(0, shift, 1);
5999 } else {
6000 if (u)
6001 gen_vfp_toul(0, shift, 1);
6002 else
6003 gen_vfp_tosl(0, shift, 1);
6005 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
6007 } else {
6008 return 1;
6010 } else { /* (insn & 0x00380080) == 0 */
6011 int invert;
6012 if (q && (rd & 1)) {
6013 return 1;
6016 op = (insn >> 8) & 0xf;
6017 /* One register and immediate. */
6018 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6019 invert = (insn & (1 << 5)) != 0;
6020 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6021 * We choose to not special-case this and will behave as if a
6022 * valid constant encoding of 0 had been given.
6024 switch (op) {
6025 case 0: case 1:
6026 /* no-op */
6027 break;
6028 case 2: case 3:
6029 imm <<= 8;
6030 break;
6031 case 4: case 5:
6032 imm <<= 16;
6033 break;
6034 case 6: case 7:
6035 imm <<= 24;
6036 break;
6037 case 8: case 9:
6038 imm |= imm << 16;
6039 break;
6040 case 10: case 11:
6041 imm = (imm << 8) | (imm << 24);
6042 break;
6043 case 12:
6044 imm = (imm << 8) | 0xff;
6045 break;
6046 case 13:
6047 imm = (imm << 16) | 0xffff;
6048 break;
6049 case 14:
6050 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6051 if (invert)
6052 imm = ~imm;
6053 break;
6054 case 15:
6055 if (invert) {
6056 return 1;
6058 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6059 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6060 break;
6062 if (invert)
6063 imm = ~imm;
6065 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6066 if (op & 1 && op < 12) {
6067 tmp = neon_load_reg(rd, pass);
6068 if (invert) {
6069 /* The immediate value has already been inverted, so
6070 BIC becomes AND. */
6071 tcg_gen_andi_i32(tmp, tmp, imm);
6072 } else {
6073 tcg_gen_ori_i32(tmp, tmp, imm);
6075 } else {
6076 /* VMOV, VMVN. */
6077 tmp = tcg_temp_new_i32();
6078 if (op == 14 && invert) {
6079 int n;
6080 uint32_t val;
6081 val = 0;
6082 for (n = 0; n < 4; n++) {
6083 if (imm & (1 << (n + (pass & 1) * 4)))
6084 val |= 0xff << (n * 8);
6086 tcg_gen_movi_i32(tmp, val);
6087 } else {
6088 tcg_gen_movi_i32(tmp, imm);
6091 neon_store_reg(rd, pass, tmp);
6094 } else { /* (insn & 0x00800010 == 0x00800000) */
6095 if (size != 3) {
6096 op = (insn >> 8) & 0xf;
6097 if ((insn & (1 << 6)) == 0) {
6098 /* Three registers of different lengths. */
6099 int src1_wide;
6100 int src2_wide;
6101 int prewiden;
6102 /* undefreq: bit 0 : UNDEF if size == 0
6103 * bit 1 : UNDEF if size == 1
6104 * bit 2 : UNDEF if size == 2
6105 * bit 3 : UNDEF if U == 1
6106 * Note that [2:0] set implies 'always UNDEF'
6108 int undefreq;
6109 /* prewiden, src1_wide, src2_wide, undefreq */
6110 static const int neon_3reg_wide[16][4] = {
6111 {1, 0, 0, 0}, /* VADDL */
6112 {1, 1, 0, 0}, /* VADDW */
6113 {1, 0, 0, 0}, /* VSUBL */
6114 {1, 1, 0, 0}, /* VSUBW */
6115 {0, 1, 1, 0}, /* VADDHN */
6116 {0, 0, 0, 0}, /* VABAL */
6117 {0, 1, 1, 0}, /* VSUBHN */
6118 {0, 0, 0, 0}, /* VABDL */
6119 {0, 0, 0, 0}, /* VMLAL */
6120 {0, 0, 0, 9}, /* VQDMLAL */
6121 {0, 0, 0, 0}, /* VMLSL */
6122 {0, 0, 0, 9}, /* VQDMLSL */
6123 {0, 0, 0, 0}, /* Integer VMULL */
6124 {0, 0, 0, 1}, /* VQDMULL */
6125 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6126 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6129 prewiden = neon_3reg_wide[op][0];
6130 src1_wide = neon_3reg_wide[op][1];
6131 src2_wide = neon_3reg_wide[op][2];
6132 undefreq = neon_3reg_wide[op][3];
6134 if ((undefreq & (1 << size)) ||
6135 ((undefreq & 8) && u)) {
6136 return 1;
6138 if ((src1_wide && (rn & 1)) ||
6139 (src2_wide && (rm & 1)) ||
6140 (!src2_wide && (rd & 1))) {
6141 return 1;
6144 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6145 * outside the loop below as it only performs a single pass.
6147 if (op == 14 && size == 2) {
6148 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6150 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
6151 return 1;
6153 tcg_rn = tcg_temp_new_i64();
6154 tcg_rm = tcg_temp_new_i64();
6155 tcg_rd = tcg_temp_new_i64();
6156 neon_load_reg64(tcg_rn, rn);
6157 neon_load_reg64(tcg_rm, rm);
6158 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6159 neon_store_reg64(tcg_rd, rd);
6160 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6161 neon_store_reg64(tcg_rd, rd + 1);
6162 tcg_temp_free_i64(tcg_rn);
6163 tcg_temp_free_i64(tcg_rm);
6164 tcg_temp_free_i64(tcg_rd);
6165 return 0;
6168 /* Avoid overlapping operands. Wide source operands are
6169 always aligned so will never overlap with wide
6170 destinations in problematic ways. */
6171 if (rd == rm && !src2_wide) {
6172 tmp = neon_load_reg(rm, 1);
6173 neon_store_scratch(2, tmp);
6174 } else if (rd == rn && !src1_wide) {
6175 tmp = neon_load_reg(rn, 1);
6176 neon_store_scratch(2, tmp);
6178 TCGV_UNUSED_I32(tmp3);
6179 for (pass = 0; pass < 2; pass++) {
6180 if (src1_wide) {
6181 neon_load_reg64(cpu_V0, rn + pass);
6182 TCGV_UNUSED_I32(tmp);
6183 } else {
6184 if (pass == 1 && rd == rn) {
6185 tmp = neon_load_scratch(2);
6186 } else {
6187 tmp = neon_load_reg(rn, pass);
6189 if (prewiden) {
6190 gen_neon_widen(cpu_V0, tmp, size, u);
6193 if (src2_wide) {
6194 neon_load_reg64(cpu_V1, rm + pass);
6195 TCGV_UNUSED_I32(tmp2);
6196 } else {
6197 if (pass == 1 && rd == rm) {
6198 tmp2 = neon_load_scratch(2);
6199 } else {
6200 tmp2 = neon_load_reg(rm, pass);
6202 if (prewiden) {
6203 gen_neon_widen(cpu_V1, tmp2, size, u);
6206 switch (op) {
6207 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6208 gen_neon_addl(size);
6209 break;
6210 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6211 gen_neon_subl(size);
6212 break;
6213 case 5: case 7: /* VABAL, VABDL */
6214 switch ((size << 1) | u) {
6215 case 0:
6216 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6217 break;
6218 case 1:
6219 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6220 break;
6221 case 2:
6222 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6223 break;
6224 case 3:
6225 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6226 break;
6227 case 4:
6228 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6229 break;
6230 case 5:
6231 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6232 break;
6233 default: abort();
6235 tcg_temp_free_i32(tmp2);
6236 tcg_temp_free_i32(tmp);
6237 break;
6238 case 8: case 9: case 10: case 11: case 12: case 13:
6239 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6240 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6241 break;
6242 case 14: /* Polynomial VMULL */
6243 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6244 tcg_temp_free_i32(tmp2);
6245 tcg_temp_free_i32(tmp);
6246 break;
6247 default: /* 15 is RESERVED: caught earlier */
6248 abort();
6250 if (op == 13) {
6251 /* VQDMULL */
6252 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6253 neon_store_reg64(cpu_V0, rd + pass);
6254 } else if (op == 5 || (op >= 8 && op <= 11)) {
6255 /* Accumulate. */
6256 neon_load_reg64(cpu_V1, rd + pass);
6257 switch (op) {
6258 case 10: /* VMLSL */
6259 gen_neon_negl(cpu_V0, size);
6260 /* Fall through */
6261 case 5: case 8: /* VABAL, VMLAL */
6262 gen_neon_addl(size);
6263 break;
6264 case 9: case 11: /* VQDMLAL, VQDMLSL */
6265 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6266 if (op == 11) {
6267 gen_neon_negl(cpu_V0, size);
6269 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6270 break;
6271 default:
6272 abort();
6274 neon_store_reg64(cpu_V0, rd + pass);
6275 } else if (op == 4 || op == 6) {
6276 /* Narrowing operation. */
6277 tmp = tcg_temp_new_i32();
6278 if (!u) {
6279 switch (size) {
6280 case 0:
6281 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6282 break;
6283 case 1:
6284 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6285 break;
6286 case 2:
6287 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6288 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6289 break;
6290 default: abort();
6292 } else {
6293 switch (size) {
6294 case 0:
6295 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6296 break;
6297 case 1:
6298 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6299 break;
6300 case 2:
6301 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6302 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6303 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6304 break;
6305 default: abort();
6308 if (pass == 0) {
6309 tmp3 = tmp;
6310 } else {
6311 neon_store_reg(rd, 0, tmp3);
6312 neon_store_reg(rd, 1, tmp);
6314 } else {
6315 /* Write back the result. */
6316 neon_store_reg64(cpu_V0, rd + pass);
6319 } else {
6320 /* Two registers and a scalar. NB that for ops of this form
6321 * the ARM ARM labels bit 24 as Q, but it is in our variable
6322 * 'u', not 'q'.
6324 if (size == 0) {
6325 return 1;
6327 switch (op) {
6328 case 1: /* Float VMLA scalar */
6329 case 5: /* Floating point VMLS scalar */
6330 case 9: /* Floating point VMUL scalar */
6331 if (size == 1) {
6332 return 1;
6334 /* fall through */
6335 case 0: /* Integer VMLA scalar */
6336 case 4: /* Integer VMLS scalar */
6337 case 8: /* Integer VMUL scalar */
6338 case 12: /* VQDMULH scalar */
6339 case 13: /* VQRDMULH scalar */
6340 if (u && ((rd | rn) & 1)) {
6341 return 1;
6343 tmp = neon_get_scalar(size, rm);
6344 neon_store_scratch(0, tmp);
6345 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6346 tmp = neon_load_scratch(0);
6347 tmp2 = neon_load_reg(rn, pass);
6348 if (op == 12) {
6349 if (size == 1) {
6350 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6351 } else {
6352 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6354 } else if (op == 13) {
6355 if (size == 1) {
6356 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6357 } else {
6358 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6360 } else if (op & 1) {
6361 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6362 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6363 tcg_temp_free_ptr(fpstatus);
6364 } else {
6365 switch (size) {
6366 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6367 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6368 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6369 default: abort();
6372 tcg_temp_free_i32(tmp2);
6373 if (op < 8) {
6374 /* Accumulate. */
6375 tmp2 = neon_load_reg(rd, pass);
6376 switch (op) {
6377 case 0:
6378 gen_neon_add(size, tmp, tmp2);
6379 break;
6380 case 1:
6382 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6383 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6384 tcg_temp_free_ptr(fpstatus);
6385 break;
6387 case 4:
6388 gen_neon_rsb(size, tmp, tmp2);
6389 break;
6390 case 5:
6392 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6393 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6394 tcg_temp_free_ptr(fpstatus);
6395 break;
6397 default:
6398 abort();
6400 tcg_temp_free_i32(tmp2);
6402 neon_store_reg(rd, pass, tmp);
6404 break;
6405 case 3: /* VQDMLAL scalar */
6406 case 7: /* VQDMLSL scalar */
6407 case 11: /* VQDMULL scalar */
6408 if (u == 1) {
6409 return 1;
6411 /* fall through */
6412 case 2: /* VMLAL sclar */
6413 case 6: /* VMLSL scalar */
6414 case 10: /* VMULL scalar */
6415 if (rd & 1) {
6416 return 1;
6418 tmp2 = neon_get_scalar(size, rm);
6419 /* We need a copy of tmp2 because gen_neon_mull
6420 * deletes it during pass 0. */
6421 tmp4 = tcg_temp_new_i32();
6422 tcg_gen_mov_i32(tmp4, tmp2);
6423 tmp3 = neon_load_reg(rn, 1);
6425 for (pass = 0; pass < 2; pass++) {
6426 if (pass == 0) {
6427 tmp = neon_load_reg(rn, 0);
6428 } else {
6429 tmp = tmp3;
6430 tmp2 = tmp4;
6432 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6433 if (op != 11) {
6434 neon_load_reg64(cpu_V1, rd + pass);
6436 switch (op) {
6437 case 6:
6438 gen_neon_negl(cpu_V0, size);
6439 /* Fall through */
6440 case 2:
6441 gen_neon_addl(size);
6442 break;
6443 case 3: case 7:
6444 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6445 if (op == 7) {
6446 gen_neon_negl(cpu_V0, size);
6448 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6449 break;
6450 case 10:
6451 /* no-op */
6452 break;
6453 case 11:
6454 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6455 break;
6456 default:
6457 abort();
6459 neon_store_reg64(cpu_V0, rd + pass);
6463 break;
6464 default: /* 14 and 15 are RESERVED */
6465 return 1;
6468 } else { /* size == 3 */
6469 if (!u) {
6470 /* Extract. */
6471 imm = (insn >> 8) & 0xf;
6473 if (imm > 7 && !q)
6474 return 1;
6476 if (q && ((rd | rn | rm) & 1)) {
6477 return 1;
6480 if (imm == 0) {
6481 neon_load_reg64(cpu_V0, rn);
6482 if (q) {
6483 neon_load_reg64(cpu_V1, rn + 1);
6485 } else if (imm == 8) {
6486 neon_load_reg64(cpu_V0, rn + 1);
6487 if (q) {
6488 neon_load_reg64(cpu_V1, rm);
6490 } else if (q) {
6491 tmp64 = tcg_temp_new_i64();
6492 if (imm < 8) {
6493 neon_load_reg64(cpu_V0, rn);
6494 neon_load_reg64(tmp64, rn + 1);
6495 } else {
6496 neon_load_reg64(cpu_V0, rn + 1);
6497 neon_load_reg64(tmp64, rm);
6499 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6500 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6501 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6502 if (imm < 8) {
6503 neon_load_reg64(cpu_V1, rm);
6504 } else {
6505 neon_load_reg64(cpu_V1, rm + 1);
6506 imm -= 8;
6508 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6509 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6510 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6511 tcg_temp_free_i64(tmp64);
6512 } else {
6513 /* BUGFIX */
6514 neon_load_reg64(cpu_V0, rn);
6515 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6516 neon_load_reg64(cpu_V1, rm);
6517 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6518 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6520 neon_store_reg64(cpu_V0, rd);
6521 if (q) {
6522 neon_store_reg64(cpu_V1, rd + 1);
6524 } else if ((insn & (1 << 11)) == 0) {
6525 /* Two register misc. */
6526 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6527 size = (insn >> 18) & 3;
6528 /* UNDEF for unknown op values and bad op-size combinations */
6529 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6530 return 1;
6532 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6533 q && ((rm | rd) & 1)) {
6534 return 1;
6536 switch (op) {
6537 case NEON_2RM_VREV64:
6538 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6539 tmp = neon_load_reg(rm, pass * 2);
6540 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6541 switch (size) {
6542 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6543 case 1: gen_swap_half(tmp); break;
6544 case 2: /* no-op */ break;
6545 default: abort();
6547 neon_store_reg(rd, pass * 2 + 1, tmp);
6548 if (size == 2) {
6549 neon_store_reg(rd, pass * 2, tmp2);
6550 } else {
6551 switch (size) {
6552 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6553 case 1: gen_swap_half(tmp2); break;
6554 default: abort();
6556 neon_store_reg(rd, pass * 2, tmp2);
6559 break;
6560 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6561 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6562 for (pass = 0; pass < q + 1; pass++) {
6563 tmp = neon_load_reg(rm, pass * 2);
6564 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6565 tmp = neon_load_reg(rm, pass * 2 + 1);
6566 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6567 switch (size) {
6568 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6569 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6570 case 2: tcg_gen_add_i64(CPU_V001); break;
6571 default: abort();
6573 if (op >= NEON_2RM_VPADAL) {
6574 /* Accumulate. */
6575 neon_load_reg64(cpu_V1, rd + pass);
6576 gen_neon_addl(size);
6578 neon_store_reg64(cpu_V0, rd + pass);
6580 break;
6581 case NEON_2RM_VTRN:
6582 if (size == 2) {
6583 int n;
6584 for (n = 0; n < (q ? 4 : 2); n += 2) {
6585 tmp = neon_load_reg(rm, n);
6586 tmp2 = neon_load_reg(rd, n + 1);
6587 neon_store_reg(rm, n, tmp2);
6588 neon_store_reg(rd, n + 1, tmp);
6590 } else {
6591 goto elementwise;
6593 break;
6594 case NEON_2RM_VUZP:
6595 if (gen_neon_unzip(rd, rm, size, q)) {
6596 return 1;
6598 break;
6599 case NEON_2RM_VZIP:
6600 if (gen_neon_zip(rd, rm, size, q)) {
6601 return 1;
6603 break;
6604 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6605 /* also VQMOVUN; op field and mnemonics don't line up */
6606 if (rm & 1) {
6607 return 1;
6609 TCGV_UNUSED_I32(tmp2);
6610 for (pass = 0; pass < 2; pass++) {
6611 neon_load_reg64(cpu_V0, rm + pass);
6612 tmp = tcg_temp_new_i32();
6613 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6614 tmp, cpu_V0);
6615 if (pass == 0) {
6616 tmp2 = tmp;
6617 } else {
6618 neon_store_reg(rd, 0, tmp2);
6619 neon_store_reg(rd, 1, tmp);
6622 break;
6623 case NEON_2RM_VSHLL:
6624 if (q || (rd & 1)) {
6625 return 1;
6627 tmp = neon_load_reg(rm, 0);
6628 tmp2 = neon_load_reg(rm, 1);
6629 for (pass = 0; pass < 2; pass++) {
6630 if (pass == 1)
6631 tmp = tmp2;
6632 gen_neon_widen(cpu_V0, tmp, size, 1);
6633 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6634 neon_store_reg64(cpu_V0, rd + pass);
6636 break;
6637 case NEON_2RM_VCVT_F16_F32:
6638 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6639 q || (rm & 1)) {
6640 return 1;
6642 tmp = tcg_temp_new_i32();
6643 tmp2 = tcg_temp_new_i32();
6644 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
6645 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6646 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
6647 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6648 tcg_gen_shli_i32(tmp2, tmp2, 16);
6649 tcg_gen_or_i32(tmp2, tmp2, tmp);
6650 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
6651 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6652 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6653 neon_store_reg(rd, 0, tmp2);
6654 tmp2 = tcg_temp_new_i32();
6655 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6656 tcg_gen_shli_i32(tmp2, tmp2, 16);
6657 tcg_gen_or_i32(tmp2, tmp2, tmp);
6658 neon_store_reg(rd, 1, tmp2);
6659 tcg_temp_free_i32(tmp);
6660 break;
6661 case NEON_2RM_VCVT_F32_F16:
6662 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6663 q || (rd & 1)) {
6664 return 1;
6666 tmp3 = tcg_temp_new_i32();
6667 tmp = neon_load_reg(rm, 0);
6668 tmp2 = neon_load_reg(rm, 1);
6669 tcg_gen_ext16u_i32(tmp3, tmp);
6670 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6671 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6672 tcg_gen_shri_i32(tmp3, tmp, 16);
6673 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6674 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
6675 tcg_temp_free_i32(tmp);
6676 tcg_gen_ext16u_i32(tmp3, tmp2);
6677 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6678 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6679 tcg_gen_shri_i32(tmp3, tmp2, 16);
6680 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6681 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
6682 tcg_temp_free_i32(tmp2);
6683 tcg_temp_free_i32(tmp3);
6684 break;
6685 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6686 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
6687 || ((rm | rd) & 1)) {
6688 return 1;
6690 tmp = tcg_const_i32(rd);
6691 tmp2 = tcg_const_i32(rm);
6693 /* Bit 6 is the lowest opcode bit; it distinguishes between
6694 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6696 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6698 if (op == NEON_2RM_AESE) {
6699 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6700 } else {
6701 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6703 tcg_temp_free_i32(tmp);
6704 tcg_temp_free_i32(tmp2);
6705 tcg_temp_free_i32(tmp3);
6706 break;
6707 case NEON_2RM_SHA1H:
6708 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
6709 || ((rm | rd) & 1)) {
6710 return 1;
6712 tmp = tcg_const_i32(rd);
6713 tmp2 = tcg_const_i32(rm);
6715 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6717 tcg_temp_free_i32(tmp);
6718 tcg_temp_free_i32(tmp2);
6719 break;
6720 case NEON_2RM_SHA1SU1:
6721 if ((rm | rd) & 1) {
6722 return 1;
6724 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6725 if (q) {
6726 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
6727 return 1;
6729 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
6730 return 1;
6732 tmp = tcg_const_i32(rd);
6733 tmp2 = tcg_const_i32(rm);
6734 if (q) {
6735 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6736 } else {
6737 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6739 tcg_temp_free_i32(tmp);
6740 tcg_temp_free_i32(tmp2);
6741 break;
6742 default:
6743 elementwise:
6744 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6745 if (neon_2rm_is_float_op(op)) {
6746 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6747 neon_reg_offset(rm, pass));
6748 TCGV_UNUSED_I32(tmp);
6749 } else {
6750 tmp = neon_load_reg(rm, pass);
6752 switch (op) {
6753 case NEON_2RM_VREV32:
6754 switch (size) {
6755 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6756 case 1: gen_swap_half(tmp); break;
6757 default: abort();
6759 break;
6760 case NEON_2RM_VREV16:
6761 gen_rev16(tmp);
6762 break;
6763 case NEON_2RM_VCLS:
6764 switch (size) {
6765 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6766 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6767 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6768 default: abort();
6770 break;
6771 case NEON_2RM_VCLZ:
6772 switch (size) {
6773 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6774 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6775 case 2: gen_helper_clz(tmp, tmp); break;
6776 default: abort();
6778 break;
6779 case NEON_2RM_VCNT:
6780 gen_helper_neon_cnt_u8(tmp, tmp);
6781 break;
6782 case NEON_2RM_VMVN:
6783 tcg_gen_not_i32(tmp, tmp);
6784 break;
6785 case NEON_2RM_VQABS:
6786 switch (size) {
6787 case 0:
6788 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6789 break;
6790 case 1:
6791 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6792 break;
6793 case 2:
6794 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6795 break;
6796 default: abort();
6798 break;
6799 case NEON_2RM_VQNEG:
6800 switch (size) {
6801 case 0:
6802 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6803 break;
6804 case 1:
6805 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6806 break;
6807 case 2:
6808 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6809 break;
6810 default: abort();
6812 break;
6813 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6814 tmp2 = tcg_const_i32(0);
6815 switch(size) {
6816 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6817 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6818 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6819 default: abort();
6821 tcg_temp_free_i32(tmp2);
6822 if (op == NEON_2RM_VCLE0) {
6823 tcg_gen_not_i32(tmp, tmp);
6825 break;
6826 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6827 tmp2 = tcg_const_i32(0);
6828 switch(size) {
6829 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6830 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6831 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6832 default: abort();
6834 tcg_temp_free_i32(tmp2);
6835 if (op == NEON_2RM_VCLT0) {
6836 tcg_gen_not_i32(tmp, tmp);
6838 break;
6839 case NEON_2RM_VCEQ0:
6840 tmp2 = tcg_const_i32(0);
6841 switch(size) {
6842 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6843 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6844 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6845 default: abort();
6847 tcg_temp_free_i32(tmp2);
6848 break;
6849 case NEON_2RM_VABS:
6850 switch(size) {
6851 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6852 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6853 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6854 default: abort();
6856 break;
6857 case NEON_2RM_VNEG:
6858 tmp2 = tcg_const_i32(0);
6859 gen_neon_rsb(size, tmp, tmp2);
6860 tcg_temp_free_i32(tmp2);
6861 break;
6862 case NEON_2RM_VCGT0_F:
6864 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6865 tmp2 = tcg_const_i32(0);
6866 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6867 tcg_temp_free_i32(tmp2);
6868 tcg_temp_free_ptr(fpstatus);
6869 break;
6871 case NEON_2RM_VCGE0_F:
6873 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6874 tmp2 = tcg_const_i32(0);
6875 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6876 tcg_temp_free_i32(tmp2);
6877 tcg_temp_free_ptr(fpstatus);
6878 break;
6880 case NEON_2RM_VCEQ0_F:
6882 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6883 tmp2 = tcg_const_i32(0);
6884 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6885 tcg_temp_free_i32(tmp2);
6886 tcg_temp_free_ptr(fpstatus);
6887 break;
6889 case NEON_2RM_VCLE0_F:
6891 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6892 tmp2 = tcg_const_i32(0);
6893 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6894 tcg_temp_free_i32(tmp2);
6895 tcg_temp_free_ptr(fpstatus);
6896 break;
6898 case NEON_2RM_VCLT0_F:
6900 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6901 tmp2 = tcg_const_i32(0);
6902 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6903 tcg_temp_free_i32(tmp2);
6904 tcg_temp_free_ptr(fpstatus);
6905 break;
6907 case NEON_2RM_VABS_F:
6908 gen_vfp_abs(0);
6909 break;
6910 case NEON_2RM_VNEG_F:
6911 gen_vfp_neg(0);
6912 break;
6913 case NEON_2RM_VSWP:
6914 tmp2 = neon_load_reg(rd, pass);
6915 neon_store_reg(rm, pass, tmp2);
6916 break;
6917 case NEON_2RM_VTRN:
6918 tmp2 = neon_load_reg(rd, pass);
6919 switch (size) {
6920 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6921 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6922 default: abort();
6924 neon_store_reg(rm, pass, tmp2);
6925 break;
6926 case NEON_2RM_VRINTN:
6927 case NEON_2RM_VRINTA:
6928 case NEON_2RM_VRINTM:
6929 case NEON_2RM_VRINTP:
6930 case NEON_2RM_VRINTZ:
6932 TCGv_i32 tcg_rmode;
6933 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6934 int rmode;
6936 if (op == NEON_2RM_VRINTZ) {
6937 rmode = FPROUNDING_ZERO;
6938 } else {
6939 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6942 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6943 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6944 cpu_env);
6945 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6946 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6947 cpu_env);
6948 tcg_temp_free_ptr(fpstatus);
6949 tcg_temp_free_i32(tcg_rmode);
6950 break;
6952 case NEON_2RM_VRINTX:
6954 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6955 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6956 tcg_temp_free_ptr(fpstatus);
6957 break;
6959 case NEON_2RM_VCVTAU:
6960 case NEON_2RM_VCVTAS:
6961 case NEON_2RM_VCVTNU:
6962 case NEON_2RM_VCVTNS:
6963 case NEON_2RM_VCVTPU:
6964 case NEON_2RM_VCVTPS:
6965 case NEON_2RM_VCVTMU:
6966 case NEON_2RM_VCVTMS:
6968 bool is_signed = !extract32(insn, 7, 1);
6969 TCGv_ptr fpst = get_fpstatus_ptr(1);
6970 TCGv_i32 tcg_rmode, tcg_shift;
6971 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6973 tcg_shift = tcg_const_i32(0);
6974 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6975 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6976 cpu_env);
6978 if (is_signed) {
6979 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6980 tcg_shift, fpst);
6981 } else {
6982 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6983 tcg_shift, fpst);
6986 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6987 cpu_env);
6988 tcg_temp_free_i32(tcg_rmode);
6989 tcg_temp_free_i32(tcg_shift);
6990 tcg_temp_free_ptr(fpst);
6991 break;
6993 case NEON_2RM_VRECPE:
6995 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6996 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6997 tcg_temp_free_ptr(fpstatus);
6998 break;
7000 case NEON_2RM_VRSQRTE:
7002 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7003 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7004 tcg_temp_free_ptr(fpstatus);
7005 break;
7007 case NEON_2RM_VRECPE_F:
7009 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7010 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7011 tcg_temp_free_ptr(fpstatus);
7012 break;
7014 case NEON_2RM_VRSQRTE_F:
7016 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7017 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7018 tcg_temp_free_ptr(fpstatus);
7019 break;
7021 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
7022 gen_vfp_sito(0, 1);
7023 break;
7024 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
7025 gen_vfp_uito(0, 1);
7026 break;
7027 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
7028 gen_vfp_tosiz(0, 1);
7029 break;
7030 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
7031 gen_vfp_touiz(0, 1);
7032 break;
7033 default:
7034 /* Reserved op values were caught by the
7035 * neon_2rm_sizes[] check earlier.
7037 abort();
7039 if (neon_2rm_is_float_op(op)) {
7040 tcg_gen_st_f32(cpu_F0s, cpu_env,
7041 neon_reg_offset(rd, pass));
7042 } else {
7043 neon_store_reg(rd, pass, tmp);
7046 break;
7048 } else if ((insn & (1 << 10)) == 0) {
7049 /* VTBL, VTBX. */
7050 int n = ((insn >> 8) & 3) + 1;
7051 if ((rn + n) > 32) {
7052 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7053 * helper function running off the end of the register file.
7055 return 1;
7057 n <<= 3;
7058 if (insn & (1 << 6)) {
7059 tmp = neon_load_reg(rd, 0);
7060 } else {
7061 tmp = tcg_temp_new_i32();
7062 tcg_gen_movi_i32(tmp, 0);
7064 tmp2 = neon_load_reg(rm, 0);
7065 tmp4 = tcg_const_i32(rn);
7066 tmp5 = tcg_const_i32(n);
7067 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7068 tcg_temp_free_i32(tmp);
7069 if (insn & (1 << 6)) {
7070 tmp = neon_load_reg(rd, 1);
7071 } else {
7072 tmp = tcg_temp_new_i32();
7073 tcg_gen_movi_i32(tmp, 0);
7075 tmp3 = neon_load_reg(rm, 1);
7076 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
7077 tcg_temp_free_i32(tmp5);
7078 tcg_temp_free_i32(tmp4);
7079 neon_store_reg(rd, 0, tmp2);
7080 neon_store_reg(rd, 1, tmp3);
7081 tcg_temp_free_i32(tmp);
7082 } else if ((insn & 0x380) == 0) {
7083 /* VDUP */
7084 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7085 return 1;
7087 if (insn & (1 << 19)) {
7088 tmp = neon_load_reg(rm, 1);
7089 } else {
7090 tmp = neon_load_reg(rm, 0);
7092 if (insn & (1 << 16)) {
7093 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
7094 } else if (insn & (1 << 17)) {
7095 if ((insn >> 18) & 1)
7096 gen_neon_dup_high16(tmp);
7097 else
7098 gen_neon_dup_low16(tmp);
7100 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7101 tmp2 = tcg_temp_new_i32();
7102 tcg_gen_mov_i32(tmp2, tmp);
7103 neon_store_reg(rd, pass, tmp2);
7105 tcg_temp_free_i32(tmp);
7106 } else {
7107 return 1;
7111 return 0;
7114 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7116 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7117 const ARMCPRegInfo *ri;
7119 cpnum = (insn >> 8) & 0xf;
7121 /* First check for coprocessor space used for XScale/iwMMXt insns */
7122 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7123 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7124 return 1;
7126 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7127 return disas_iwmmxt_insn(s, insn);
7128 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7129 return disas_dsp_insn(s, insn);
7131 return 1;
7134 /* Otherwise treat as a generic register access */
7135 is64 = (insn & (1 << 25)) == 0;
7136 if (!is64 && ((insn & (1 << 4)) == 0)) {
7137 /* cdp */
7138 return 1;
7141 crm = insn & 0xf;
7142 if (is64) {
7143 crn = 0;
7144 opc1 = (insn >> 4) & 0xf;
7145 opc2 = 0;
7146 rt2 = (insn >> 16) & 0xf;
7147 } else {
7148 crn = (insn >> 16) & 0xf;
7149 opc1 = (insn >> 21) & 7;
7150 opc2 = (insn >> 5) & 7;
7151 rt2 = 0;
7153 isread = (insn >> 20) & 1;
7154 rt = (insn >> 12) & 0xf;
7156 ri = get_arm_cp_reginfo(s->cp_regs,
7157 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7158 if (ri) {
7159 /* Check access permissions */
7160 if (!cp_access_ok(s->current_el, ri, isread)) {
7161 return 1;
7164 if (ri->accessfn ||
7165 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7166 /* Emit code to perform further access permissions checks at
7167 * runtime; this may result in an exception.
7168 * Note that on XScale all cp0..c13 registers do an access check
7169 * call in order to handle c15_cpar.
7171 TCGv_ptr tmpptr;
7172 TCGv_i32 tcg_syn;
7173 uint32_t syndrome;
7175 /* Note that since we are an implementation which takes an
7176 * exception on a trapped conditional instruction only if the
7177 * instruction passes its condition code check, we can take
7178 * advantage of the clause in the ARM ARM that allows us to set
7179 * the COND field in the instruction to 0xE in all cases.
7180 * We could fish the actual condition out of the insn (ARM)
7181 * or the condexec bits (Thumb) but it isn't necessary.
7183 switch (cpnum) {
7184 case 14:
7185 if (is64) {
7186 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7187 isread, s->thumb);
7188 } else {
7189 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7190 rt, isread, s->thumb);
7192 break;
7193 case 15:
7194 if (is64) {
7195 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7196 isread, s->thumb);
7197 } else {
7198 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7199 rt, isread, s->thumb);
7201 break;
7202 default:
7203 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7204 * so this can only happen if this is an ARMv7 or earlier CPU,
7205 * in which case the syndrome information won't actually be
7206 * guest visible.
7208 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7209 syndrome = syn_uncategorized();
7210 break;
7213 gen_set_pc_im(s, s->pc - 4);
7214 tmpptr = tcg_const_ptr(ri);
7215 tcg_syn = tcg_const_i32(syndrome);
7216 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
7217 tcg_temp_free_ptr(tmpptr);
7218 tcg_temp_free_i32(tcg_syn);
7221 /* Handle special cases first */
7222 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7223 case ARM_CP_NOP:
7224 return 0;
7225 case ARM_CP_WFI:
7226 if (isread) {
7227 return 1;
7229 gen_set_pc_im(s, s->pc);
7230 s->is_jmp = DISAS_WFI;
7231 return 0;
7232 default:
7233 break;
7236 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7237 gen_io_start();
7240 if (isread) {
7241 /* Read */
7242 if (is64) {
7243 TCGv_i64 tmp64;
7244 TCGv_i32 tmp;
7245 if (ri->type & ARM_CP_CONST) {
7246 tmp64 = tcg_const_i64(ri->resetvalue);
7247 } else if (ri->readfn) {
7248 TCGv_ptr tmpptr;
7249 tmp64 = tcg_temp_new_i64();
7250 tmpptr = tcg_const_ptr(ri);
7251 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7252 tcg_temp_free_ptr(tmpptr);
7253 } else {
7254 tmp64 = tcg_temp_new_i64();
7255 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7257 tmp = tcg_temp_new_i32();
7258 tcg_gen_extrl_i64_i32(tmp, tmp64);
7259 store_reg(s, rt, tmp);
7260 tcg_gen_shri_i64(tmp64, tmp64, 32);
7261 tmp = tcg_temp_new_i32();
7262 tcg_gen_extrl_i64_i32(tmp, tmp64);
7263 tcg_temp_free_i64(tmp64);
7264 store_reg(s, rt2, tmp);
7265 } else {
7266 TCGv_i32 tmp;
7267 if (ri->type & ARM_CP_CONST) {
7268 tmp = tcg_const_i32(ri->resetvalue);
7269 } else if (ri->readfn) {
7270 TCGv_ptr tmpptr;
7271 tmp = tcg_temp_new_i32();
7272 tmpptr = tcg_const_ptr(ri);
7273 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7274 tcg_temp_free_ptr(tmpptr);
7275 } else {
7276 tmp = load_cpu_offset(ri->fieldoffset);
7278 if (rt == 15) {
7279 /* Destination register of r15 for 32 bit loads sets
7280 * the condition codes from the high 4 bits of the value
7282 gen_set_nzcv(tmp);
7283 tcg_temp_free_i32(tmp);
7284 } else {
7285 store_reg(s, rt, tmp);
7288 } else {
7289 /* Write */
7290 if (ri->type & ARM_CP_CONST) {
7291 /* If not forbidden by access permissions, treat as WI */
7292 return 0;
7295 if (is64) {
7296 TCGv_i32 tmplo, tmphi;
7297 TCGv_i64 tmp64 = tcg_temp_new_i64();
7298 tmplo = load_reg(s, rt);
7299 tmphi = load_reg(s, rt2);
7300 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7301 tcg_temp_free_i32(tmplo);
7302 tcg_temp_free_i32(tmphi);
7303 if (ri->writefn) {
7304 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7305 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7306 tcg_temp_free_ptr(tmpptr);
7307 } else {
7308 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7310 tcg_temp_free_i64(tmp64);
7311 } else {
7312 if (ri->writefn) {
7313 TCGv_i32 tmp;
7314 TCGv_ptr tmpptr;
7315 tmp = load_reg(s, rt);
7316 tmpptr = tcg_const_ptr(ri);
7317 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7318 tcg_temp_free_ptr(tmpptr);
7319 tcg_temp_free_i32(tmp);
7320 } else {
7321 TCGv_i32 tmp = load_reg(s, rt);
7322 store_cpu_offset(tmp, ri->fieldoffset);
7327 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7328 /* I/O operations must end the TB here (whether read or write) */
7329 gen_io_end();
7330 gen_lookup_tb(s);
7331 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7332 /* We default to ending the TB on a coprocessor register write,
7333 * but allow this to be suppressed by the register definition
7334 * (usually only necessary to work around guest bugs).
7336 gen_lookup_tb(s);
7339 return 0;
7342 /* Unknown register; this might be a guest error or a QEMU
7343 * unimplemented feature.
7345 if (is64) {
7346 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7347 "64 bit system register cp:%d opc1: %d crm:%d "
7348 "(%s)\n",
7349 isread ? "read" : "write", cpnum, opc1, crm,
7350 s->ns ? "non-secure" : "secure");
7351 } else {
7352 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7353 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7354 "(%s)\n",
7355 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7356 s->ns ? "non-secure" : "secure");
7359 return 1;
7363 /* Store a 64-bit value to a register pair. Clobbers val. */
7364 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7366 TCGv_i32 tmp;
7367 tmp = tcg_temp_new_i32();
7368 tcg_gen_extrl_i64_i32(tmp, val);
7369 store_reg(s, rlow, tmp);
7370 tmp = tcg_temp_new_i32();
7371 tcg_gen_shri_i64(val, val, 32);
7372 tcg_gen_extrl_i64_i32(tmp, val);
7373 store_reg(s, rhigh, tmp);
7376 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7377 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7379 TCGv_i64 tmp;
7380 TCGv_i32 tmp2;
7382 /* Load value and extend to 64 bits. */
7383 tmp = tcg_temp_new_i64();
7384 tmp2 = load_reg(s, rlow);
7385 tcg_gen_extu_i32_i64(tmp, tmp2);
7386 tcg_temp_free_i32(tmp2);
7387 tcg_gen_add_i64(val, val, tmp);
7388 tcg_temp_free_i64(tmp);
7391 /* load and add a 64-bit value from a register pair. */
7392 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7394 TCGv_i64 tmp;
7395 TCGv_i32 tmpl;
7396 TCGv_i32 tmph;
7398 /* Load 64-bit value rd:rn. */
7399 tmpl = load_reg(s, rlow);
7400 tmph = load_reg(s, rhigh);
7401 tmp = tcg_temp_new_i64();
7402 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7403 tcg_temp_free_i32(tmpl);
7404 tcg_temp_free_i32(tmph);
7405 tcg_gen_add_i64(val, val, tmp);
7406 tcg_temp_free_i64(tmp);
7409 /* Set N and Z flags from hi|lo. */
7410 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7412 tcg_gen_mov_i32(cpu_NF, hi);
7413 tcg_gen_or_i32(cpu_ZF, lo, hi);
7416 /* Load/Store exclusive instructions are implemented by remembering
7417 the value/address loaded, and seeing if these are the same
7418 when the store is performed. This should be sufficient to implement
7419 the architecturally mandated semantics, and avoids having to monitor
7420 regular stores.
7422 In system emulation mode only one CPU will be running at once, so
7423 this sequence is effectively atomic. In user emulation mode we
7424 throw an exception and handle the atomic operation elsewhere. */
7425 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7426 TCGv_i32 addr, int size)
7428 TCGv_i32 tmp = tcg_temp_new_i32();
7430 s->is_ldex = true;
7432 switch (size) {
7433 case 0:
7434 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
7435 break;
7436 case 1:
7437 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
7438 break;
7439 case 2:
7440 case 3:
7441 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7442 break;
7443 default:
7444 abort();
7447 if (size == 3) {
7448 TCGv_i32 tmp2 = tcg_temp_new_i32();
7449 TCGv_i32 tmp3 = tcg_temp_new_i32();
7451 tcg_gen_addi_i32(tmp2, addr, 4);
7452 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7453 tcg_temp_free_i32(tmp2);
7454 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7455 store_reg(s, rt2, tmp3);
7456 } else {
7457 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7460 store_reg(s, rt, tmp);
7461 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7464 static void gen_clrex(DisasContext *s)
7466 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7469 #ifdef CONFIG_USER_ONLY
7470 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7471 TCGv_i32 addr, int size)
7473 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
7474 tcg_gen_movi_i32(cpu_exclusive_info,
7475 size | (rd << 4) | (rt << 8) | (rt2 << 12));
7476 gen_exception_internal_insn(s, 4, EXCP_STREX);
7478 #else
7479 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7480 TCGv_i32 addr, int size)
7482 TCGv_i32 tmp;
7483 TCGv_i64 val64, extaddr;
7484 TCGLabel *done_label;
7485 TCGLabel *fail_label;
7487 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7488 [addr] = {Rt};
7489 {Rd} = 0;
7490 } else {
7491 {Rd} = 1;
7492 } */
7493 fail_label = gen_new_label();
7494 done_label = gen_new_label();
7495 extaddr = tcg_temp_new_i64();
7496 tcg_gen_extu_i32_i64(extaddr, addr);
7497 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7498 tcg_temp_free_i64(extaddr);
7500 tmp = tcg_temp_new_i32();
7501 switch (size) {
7502 case 0:
7503 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
7504 break;
7505 case 1:
7506 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
7507 break;
7508 case 2:
7509 case 3:
7510 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7511 break;
7512 default:
7513 abort();
7516 val64 = tcg_temp_new_i64();
7517 if (size == 3) {
7518 TCGv_i32 tmp2 = tcg_temp_new_i32();
7519 TCGv_i32 tmp3 = tcg_temp_new_i32();
7520 tcg_gen_addi_i32(tmp2, addr, 4);
7521 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7522 tcg_temp_free_i32(tmp2);
7523 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7524 tcg_temp_free_i32(tmp3);
7525 } else {
7526 tcg_gen_extu_i32_i64(val64, tmp);
7528 tcg_temp_free_i32(tmp);
7530 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7531 tcg_temp_free_i64(val64);
7533 tmp = load_reg(s, rt);
7534 switch (size) {
7535 case 0:
7536 gen_aa32_st8(tmp, addr, get_mem_index(s));
7537 break;
7538 case 1:
7539 gen_aa32_st16(tmp, addr, get_mem_index(s));
7540 break;
7541 case 2:
7542 case 3:
7543 gen_aa32_st32(tmp, addr, get_mem_index(s));
7544 break;
7545 default:
7546 abort();
7548 tcg_temp_free_i32(tmp);
7549 if (size == 3) {
7550 tcg_gen_addi_i32(addr, addr, 4);
7551 tmp = load_reg(s, rt2);
7552 gen_aa32_st32(tmp, addr, get_mem_index(s));
7553 tcg_temp_free_i32(tmp);
7555 tcg_gen_movi_i32(cpu_R[rd], 0);
7556 tcg_gen_br(done_label);
7557 gen_set_label(fail_label);
7558 tcg_gen_movi_i32(cpu_R[rd], 1);
7559 gen_set_label(done_label);
7560 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7562 #endif
7564 /* gen_srs:
7565 * @env: CPUARMState
7566 * @s: DisasContext
7567 * @mode: mode field from insn (which stack to store to)
7568 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7569 * @writeback: true if writeback bit set
7571 * Generate code for the SRS (Store Return State) insn.
7573 static void gen_srs(DisasContext *s,
7574 uint32_t mode, uint32_t amode, bool writeback)
7576 int32_t offset;
7577 TCGv_i32 addr = tcg_temp_new_i32();
7578 TCGv_i32 tmp = tcg_const_i32(mode);
7579 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7580 tcg_temp_free_i32(tmp);
7581 switch (amode) {
7582 case 0: /* DA */
7583 offset = -4;
7584 break;
7585 case 1: /* IA */
7586 offset = 0;
7587 break;
7588 case 2: /* DB */
7589 offset = -8;
7590 break;
7591 case 3: /* IB */
7592 offset = 4;
7593 break;
7594 default:
7595 abort();
7597 tcg_gen_addi_i32(addr, addr, offset);
7598 tmp = load_reg(s, 14);
7599 gen_aa32_st32(tmp, addr, get_mem_index(s));
7600 tcg_temp_free_i32(tmp);
7601 tmp = load_cpu_field(spsr);
7602 tcg_gen_addi_i32(addr, addr, 4);
7603 gen_aa32_st32(tmp, addr, get_mem_index(s));
7604 tcg_temp_free_i32(tmp);
7605 if (writeback) {
7606 switch (amode) {
7607 case 0:
7608 offset = -8;
7609 break;
7610 case 1:
7611 offset = 4;
7612 break;
7613 case 2:
7614 offset = -4;
7615 break;
7616 case 3:
7617 offset = 0;
7618 break;
7619 default:
7620 abort();
7622 tcg_gen_addi_i32(addr, addr, offset);
7623 tmp = tcg_const_i32(mode);
7624 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7625 tcg_temp_free_i32(tmp);
7627 tcg_temp_free_i32(addr);
7630 static void disas_arm_insn(DisasContext *s, unsigned int insn)
7632 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
7633 TCGv_i32 tmp;
7634 TCGv_i32 tmp2;
7635 TCGv_i32 tmp3;
7636 TCGv_i32 addr;
7637 TCGv_i64 tmp64;
7639 /* M variants do not implement ARM mode. */
7640 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7641 goto illegal_op;
7643 cond = insn >> 28;
7644 if (cond == 0xf){
7645 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7646 * choose to UNDEF. In ARMv5 and above the space is used
7647 * for miscellaneous unconditional instructions.
7649 ARCH(5);
7651 /* Unconditional instructions. */
7652 if (((insn >> 25) & 7) == 1) {
7653 /* NEON Data processing. */
7654 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7655 goto illegal_op;
7658 if (disas_neon_data_insn(s, insn)) {
7659 goto illegal_op;
7661 return;
7663 if ((insn & 0x0f100000) == 0x04000000) {
7664 /* NEON load/store. */
7665 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7666 goto illegal_op;
7669 if (disas_neon_ls_insn(s, insn)) {
7670 goto illegal_op;
7672 return;
7674 if ((insn & 0x0f000e10) == 0x0e000a00) {
7675 /* VFP. */
7676 if (disas_vfp_insn(s, insn)) {
7677 goto illegal_op;
7679 return;
7681 if (((insn & 0x0f30f000) == 0x0510f000) ||
7682 ((insn & 0x0f30f010) == 0x0710f000)) {
7683 if ((insn & (1 << 22)) == 0) {
7684 /* PLDW; v7MP */
7685 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7686 goto illegal_op;
7689 /* Otherwise PLD; v5TE+ */
7690 ARCH(5TE);
7691 return;
7693 if (((insn & 0x0f70f000) == 0x0450f000) ||
7694 ((insn & 0x0f70f010) == 0x0650f000)) {
7695 ARCH(7);
7696 return; /* PLI; V7 */
7698 if (((insn & 0x0f700000) == 0x04100000) ||
7699 ((insn & 0x0f700010) == 0x06100000)) {
7700 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7701 goto illegal_op;
7703 return; /* v7MP: Unallocated memory hint: must NOP */
7706 if ((insn & 0x0ffffdff) == 0x01010000) {
7707 ARCH(6);
7708 /* setend */
7709 if (((insn >> 9) & 1) != s->bswap_code) {
7710 /* Dynamic endianness switching not implemented. */
7711 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
7712 goto illegal_op;
7714 return;
7715 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7716 switch ((insn >> 4) & 0xf) {
7717 case 1: /* clrex */
7718 ARCH(6K);
7719 gen_clrex(s);
7720 return;
7721 case 4: /* dsb */
7722 case 5: /* dmb */
7723 case 6: /* isb */
7724 ARCH(7);
7725 /* We don't emulate caches so these are a no-op. */
7726 return;
7727 default:
7728 goto illegal_op;
7730 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7731 /* srs */
7732 if (IS_USER(s)) {
7733 goto illegal_op;
7735 ARCH(6);
7736 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
7737 return;
7738 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
7739 /* rfe */
7740 int32_t offset;
7741 if (IS_USER(s))
7742 goto illegal_op;
7743 ARCH(6);
7744 rn = (insn >> 16) & 0xf;
7745 addr = load_reg(s, rn);
7746 i = (insn >> 23) & 3;
7747 switch (i) {
7748 case 0: offset = -4; break; /* DA */
7749 case 1: offset = 0; break; /* IA */
7750 case 2: offset = -8; break; /* DB */
7751 case 3: offset = 4; break; /* IB */
7752 default: abort();
7754 if (offset)
7755 tcg_gen_addi_i32(addr, addr, offset);
7756 /* Load PC into tmp and CPSR into tmp2. */
7757 tmp = tcg_temp_new_i32();
7758 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7759 tcg_gen_addi_i32(addr, addr, 4);
7760 tmp2 = tcg_temp_new_i32();
7761 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
7762 if (insn & (1 << 21)) {
7763 /* Base writeback. */
7764 switch (i) {
7765 case 0: offset = -8; break;
7766 case 1: offset = 4; break;
7767 case 2: offset = -4; break;
7768 case 3: offset = 0; break;
7769 default: abort();
7771 if (offset)
7772 tcg_gen_addi_i32(addr, addr, offset);
7773 store_reg(s, rn, addr);
7774 } else {
7775 tcg_temp_free_i32(addr);
7777 gen_rfe(s, tmp, tmp2);
7778 return;
7779 } else if ((insn & 0x0e000000) == 0x0a000000) {
7780 /* branch link and change to thumb (blx <offset>) */
7781 int32_t offset;
7783 val = (uint32_t)s->pc;
7784 tmp = tcg_temp_new_i32();
7785 tcg_gen_movi_i32(tmp, val);
7786 store_reg(s, 14, tmp);
7787 /* Sign-extend the 24-bit offset */
7788 offset = (((int32_t)insn) << 8) >> 8;
7789 /* offset * 4 + bit24 * 2 + (thumb bit) */
7790 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7791 /* pipeline offset */
7792 val += 4;
7793 /* protected by ARCH(5); above, near the start of uncond block */
7794 gen_bx_im(s, val);
7795 return;
7796 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7797 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7798 /* iWMMXt register transfer. */
7799 if (extract32(s->c15_cpar, 1, 1)) {
7800 if (!disas_iwmmxt_insn(s, insn)) {
7801 return;
7805 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7806 /* Coprocessor double register transfer. */
7807 ARCH(5TE);
7808 } else if ((insn & 0x0f000010) == 0x0e000010) {
7809 /* Additional coprocessor register transfer. */
7810 } else if ((insn & 0x0ff10020) == 0x01000000) {
7811 uint32_t mask;
7812 uint32_t val;
7813 /* cps (privileged) */
7814 if (IS_USER(s))
7815 return;
7816 mask = val = 0;
7817 if (insn & (1 << 19)) {
7818 if (insn & (1 << 8))
7819 mask |= CPSR_A;
7820 if (insn & (1 << 7))
7821 mask |= CPSR_I;
7822 if (insn & (1 << 6))
7823 mask |= CPSR_F;
7824 if (insn & (1 << 18))
7825 val |= mask;
7827 if (insn & (1 << 17)) {
7828 mask |= CPSR_M;
7829 val |= (insn & 0x1f);
7831 if (mask) {
7832 gen_set_psr_im(s, mask, 0, val);
7834 return;
7836 goto illegal_op;
7838 if (cond != 0xe) {
7839 /* if not always execute, we generate a conditional jump to
7840 next instruction */
7841 s->condlabel = gen_new_label();
7842 arm_gen_test_cc(cond ^ 1, s->condlabel);
7843 s->condjmp = 1;
7845 if ((insn & 0x0f900000) == 0x03000000) {
7846 if ((insn & (1 << 21)) == 0) {
7847 ARCH(6T2);
7848 rd = (insn >> 12) & 0xf;
7849 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7850 if ((insn & (1 << 22)) == 0) {
7851 /* MOVW */
7852 tmp = tcg_temp_new_i32();
7853 tcg_gen_movi_i32(tmp, val);
7854 } else {
7855 /* MOVT */
7856 tmp = load_reg(s, rd);
7857 tcg_gen_ext16u_i32(tmp, tmp);
7858 tcg_gen_ori_i32(tmp, tmp, val << 16);
7860 store_reg(s, rd, tmp);
7861 } else {
7862 if (((insn >> 12) & 0xf) != 0xf)
7863 goto illegal_op;
7864 if (((insn >> 16) & 0xf) == 0) {
7865 gen_nop_hint(s, insn & 0xff);
7866 } else {
7867 /* CPSR = immediate */
7868 val = insn & 0xff;
7869 shift = ((insn >> 8) & 0xf) * 2;
7870 if (shift)
7871 val = (val >> shift) | (val << (32 - shift));
7872 i = ((insn & (1 << 22)) != 0);
7873 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7874 i, val)) {
7875 goto illegal_op;
7879 } else if ((insn & 0x0f900000) == 0x01000000
7880 && (insn & 0x00000090) != 0x00000090) {
7881 /* miscellaneous instructions */
7882 op1 = (insn >> 21) & 3;
7883 sh = (insn >> 4) & 0xf;
7884 rm = insn & 0xf;
7885 switch (sh) {
7886 case 0x0: /* move program status register */
7887 if (op1 & 1) {
7888 /* PSR = reg */
7889 tmp = load_reg(s, rm);
7890 i = ((op1 & 2) != 0);
7891 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
7892 goto illegal_op;
7893 } else {
7894 /* reg = PSR */
7895 rd = (insn >> 12) & 0xf;
7896 if (op1 & 2) {
7897 if (IS_USER(s))
7898 goto illegal_op;
7899 tmp = load_cpu_field(spsr);
7900 } else {
7901 tmp = tcg_temp_new_i32();
7902 gen_helper_cpsr_read(tmp, cpu_env);
7904 store_reg(s, rd, tmp);
7906 break;
7907 case 0x1:
7908 if (op1 == 1) {
7909 /* branch/exchange thumb (bx). */
7910 ARCH(4T);
7911 tmp = load_reg(s, rm);
7912 gen_bx(s, tmp);
7913 } else if (op1 == 3) {
7914 /* clz */
7915 ARCH(5);
7916 rd = (insn >> 12) & 0xf;
7917 tmp = load_reg(s, rm);
7918 gen_helper_clz(tmp, tmp);
7919 store_reg(s, rd, tmp);
7920 } else {
7921 goto illegal_op;
7923 break;
7924 case 0x2:
7925 if (op1 == 1) {
7926 ARCH(5J); /* bxj */
7927 /* Trivial implementation equivalent to bx. */
7928 tmp = load_reg(s, rm);
7929 gen_bx(s, tmp);
7930 } else {
7931 goto illegal_op;
7933 break;
7934 case 0x3:
7935 if (op1 != 1)
7936 goto illegal_op;
7938 ARCH(5);
7939 /* branch link/exchange thumb (blx) */
7940 tmp = load_reg(s, rm);
7941 tmp2 = tcg_temp_new_i32();
7942 tcg_gen_movi_i32(tmp2, s->pc);
7943 store_reg(s, 14, tmp2);
7944 gen_bx(s, tmp);
7945 break;
7946 case 0x4:
7948 /* crc32/crc32c */
7949 uint32_t c = extract32(insn, 8, 4);
7951 /* Check this CPU supports ARMv8 CRC instructions.
7952 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7953 * Bits 8, 10 and 11 should be zero.
7955 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
7956 (c & 0xd) != 0) {
7957 goto illegal_op;
7960 rn = extract32(insn, 16, 4);
7961 rd = extract32(insn, 12, 4);
7963 tmp = load_reg(s, rn);
7964 tmp2 = load_reg(s, rm);
7965 if (op1 == 0) {
7966 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
7967 } else if (op1 == 1) {
7968 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
7970 tmp3 = tcg_const_i32(1 << op1);
7971 if (c & 0x2) {
7972 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7973 } else {
7974 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7976 tcg_temp_free_i32(tmp2);
7977 tcg_temp_free_i32(tmp3);
7978 store_reg(s, rd, tmp);
7979 break;
7981 case 0x5: /* saturating add/subtract */
7982 ARCH(5TE);
7983 rd = (insn >> 12) & 0xf;
7984 rn = (insn >> 16) & 0xf;
7985 tmp = load_reg(s, rm);
7986 tmp2 = load_reg(s, rn);
7987 if (op1 & 2)
7988 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
7989 if (op1 & 1)
7990 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
7991 else
7992 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7993 tcg_temp_free_i32(tmp2);
7994 store_reg(s, rd, tmp);
7995 break;
7996 case 7:
7998 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
7999 switch (op1) {
8000 case 1:
8001 /* bkpt */
8002 ARCH(5);
8003 gen_exception_insn(s, 4, EXCP_BKPT,
8004 syn_aa32_bkpt(imm16, false),
8005 default_exception_el(s));
8006 break;
8007 case 2:
8008 /* Hypervisor call (v7) */
8009 ARCH(7);
8010 if (IS_USER(s)) {
8011 goto illegal_op;
8013 gen_hvc(s, imm16);
8014 break;
8015 case 3:
8016 /* Secure monitor call (v6+) */
8017 ARCH(6K);
8018 if (IS_USER(s)) {
8019 goto illegal_op;
8021 gen_smc(s);
8022 break;
8023 default:
8024 goto illegal_op;
8026 break;
8028 case 0x8: /* signed multiply */
8029 case 0xa:
8030 case 0xc:
8031 case 0xe:
8032 ARCH(5TE);
8033 rs = (insn >> 8) & 0xf;
8034 rn = (insn >> 12) & 0xf;
8035 rd = (insn >> 16) & 0xf;
8036 if (op1 == 1) {
8037 /* (32 * 16) >> 16 */
8038 tmp = load_reg(s, rm);
8039 tmp2 = load_reg(s, rs);
8040 if (sh & 4)
8041 tcg_gen_sari_i32(tmp2, tmp2, 16);
8042 else
8043 gen_sxth(tmp2);
8044 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8045 tcg_gen_shri_i64(tmp64, tmp64, 16);
8046 tmp = tcg_temp_new_i32();
8047 tcg_gen_extrl_i64_i32(tmp, tmp64);
8048 tcg_temp_free_i64(tmp64);
8049 if ((sh & 2) == 0) {
8050 tmp2 = load_reg(s, rn);
8051 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8052 tcg_temp_free_i32(tmp2);
8054 store_reg(s, rd, tmp);
8055 } else {
8056 /* 16 * 16 */
8057 tmp = load_reg(s, rm);
8058 tmp2 = load_reg(s, rs);
8059 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8060 tcg_temp_free_i32(tmp2);
8061 if (op1 == 2) {
8062 tmp64 = tcg_temp_new_i64();
8063 tcg_gen_ext_i32_i64(tmp64, tmp);
8064 tcg_temp_free_i32(tmp);
8065 gen_addq(s, tmp64, rn, rd);
8066 gen_storeq_reg(s, rn, rd, tmp64);
8067 tcg_temp_free_i64(tmp64);
8068 } else {
8069 if (op1 == 0) {
8070 tmp2 = load_reg(s, rn);
8071 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8072 tcg_temp_free_i32(tmp2);
8074 store_reg(s, rd, tmp);
8077 break;
8078 default:
8079 goto illegal_op;
8081 } else if (((insn & 0x0e000000) == 0 &&
8082 (insn & 0x00000090) != 0x90) ||
8083 ((insn & 0x0e000000) == (1 << 25))) {
8084 int set_cc, logic_cc, shiftop;
8086 op1 = (insn >> 21) & 0xf;
8087 set_cc = (insn >> 20) & 1;
8088 logic_cc = table_logic_cc[op1] & set_cc;
8090 /* data processing instruction */
8091 if (insn & (1 << 25)) {
8092 /* immediate operand */
8093 val = insn & 0xff;
8094 shift = ((insn >> 8) & 0xf) * 2;
8095 if (shift) {
8096 val = (val >> shift) | (val << (32 - shift));
8098 tmp2 = tcg_temp_new_i32();
8099 tcg_gen_movi_i32(tmp2, val);
8100 if (logic_cc && shift) {
8101 gen_set_CF_bit31(tmp2);
8103 } else {
8104 /* register */
8105 rm = (insn) & 0xf;
8106 tmp2 = load_reg(s, rm);
8107 shiftop = (insn >> 5) & 3;
8108 if (!(insn & (1 << 4))) {
8109 shift = (insn >> 7) & 0x1f;
8110 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8111 } else {
8112 rs = (insn >> 8) & 0xf;
8113 tmp = load_reg(s, rs);
8114 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8117 if (op1 != 0x0f && op1 != 0x0d) {
8118 rn = (insn >> 16) & 0xf;
8119 tmp = load_reg(s, rn);
8120 } else {
8121 TCGV_UNUSED_I32(tmp);
8123 rd = (insn >> 12) & 0xf;
8124 switch(op1) {
8125 case 0x00:
8126 tcg_gen_and_i32(tmp, tmp, tmp2);
8127 if (logic_cc) {
8128 gen_logic_CC(tmp);
8130 store_reg_bx(s, rd, tmp);
8131 break;
8132 case 0x01:
8133 tcg_gen_xor_i32(tmp, tmp, tmp2);
8134 if (logic_cc) {
8135 gen_logic_CC(tmp);
8137 store_reg_bx(s, rd, tmp);
8138 break;
8139 case 0x02:
8140 if (set_cc && rd == 15) {
8141 /* SUBS r15, ... is used for exception return. */
8142 if (IS_USER(s)) {
8143 goto illegal_op;
8145 gen_sub_CC(tmp, tmp, tmp2);
8146 gen_exception_return(s, tmp);
8147 } else {
8148 if (set_cc) {
8149 gen_sub_CC(tmp, tmp, tmp2);
8150 } else {
8151 tcg_gen_sub_i32(tmp, tmp, tmp2);
8153 store_reg_bx(s, rd, tmp);
8155 break;
8156 case 0x03:
8157 if (set_cc) {
8158 gen_sub_CC(tmp, tmp2, tmp);
8159 } else {
8160 tcg_gen_sub_i32(tmp, tmp2, tmp);
8162 store_reg_bx(s, rd, tmp);
8163 break;
8164 case 0x04:
8165 if (set_cc) {
8166 gen_add_CC(tmp, tmp, tmp2);
8167 } else {
8168 tcg_gen_add_i32(tmp, tmp, tmp2);
8170 store_reg_bx(s, rd, tmp);
8171 break;
8172 case 0x05:
8173 if (set_cc) {
8174 gen_adc_CC(tmp, tmp, tmp2);
8175 } else {
8176 gen_add_carry(tmp, tmp, tmp2);
8178 store_reg_bx(s, rd, tmp);
8179 break;
8180 case 0x06:
8181 if (set_cc) {
8182 gen_sbc_CC(tmp, tmp, tmp2);
8183 } else {
8184 gen_sub_carry(tmp, tmp, tmp2);
8186 store_reg_bx(s, rd, tmp);
8187 break;
8188 case 0x07:
8189 if (set_cc) {
8190 gen_sbc_CC(tmp, tmp2, tmp);
8191 } else {
8192 gen_sub_carry(tmp, tmp2, tmp);
8194 store_reg_bx(s, rd, tmp);
8195 break;
8196 case 0x08:
8197 if (set_cc) {
8198 tcg_gen_and_i32(tmp, tmp, tmp2);
8199 gen_logic_CC(tmp);
8201 tcg_temp_free_i32(tmp);
8202 break;
8203 case 0x09:
8204 if (set_cc) {
8205 tcg_gen_xor_i32(tmp, tmp, tmp2);
8206 gen_logic_CC(tmp);
8208 tcg_temp_free_i32(tmp);
8209 break;
8210 case 0x0a:
8211 if (set_cc) {
8212 gen_sub_CC(tmp, tmp, tmp2);
8214 tcg_temp_free_i32(tmp);
8215 break;
8216 case 0x0b:
8217 if (set_cc) {
8218 gen_add_CC(tmp, tmp, tmp2);
8220 tcg_temp_free_i32(tmp);
8221 break;
8222 case 0x0c:
8223 tcg_gen_or_i32(tmp, tmp, tmp2);
8224 if (logic_cc) {
8225 gen_logic_CC(tmp);
8227 store_reg_bx(s, rd, tmp);
8228 break;
8229 case 0x0d:
8230 if (logic_cc && rd == 15) {
8231 /* MOVS r15, ... is used for exception return. */
8232 if (IS_USER(s)) {
8233 goto illegal_op;
8235 gen_exception_return(s, tmp2);
8236 } else {
8237 if (logic_cc) {
8238 gen_logic_CC(tmp2);
8240 store_reg_bx(s, rd, tmp2);
8242 break;
8243 case 0x0e:
8244 tcg_gen_andc_i32(tmp, tmp, tmp2);
8245 if (logic_cc) {
8246 gen_logic_CC(tmp);
8248 store_reg_bx(s, rd, tmp);
8249 break;
8250 default:
8251 case 0x0f:
8252 tcg_gen_not_i32(tmp2, tmp2);
8253 if (logic_cc) {
8254 gen_logic_CC(tmp2);
8256 store_reg_bx(s, rd, tmp2);
8257 break;
8259 if (op1 != 0x0f && op1 != 0x0d) {
8260 tcg_temp_free_i32(tmp2);
8262 } else {
8263 /* other instructions */
8264 op1 = (insn >> 24) & 0xf;
8265 switch(op1) {
8266 case 0x0:
8267 case 0x1:
8268 /* multiplies, extra load/stores */
8269 sh = (insn >> 5) & 3;
8270 if (sh == 0) {
8271 if (op1 == 0x0) {
8272 rd = (insn >> 16) & 0xf;
8273 rn = (insn >> 12) & 0xf;
8274 rs = (insn >> 8) & 0xf;
8275 rm = (insn) & 0xf;
8276 op1 = (insn >> 20) & 0xf;
8277 switch (op1) {
8278 case 0: case 1: case 2: case 3: case 6:
8279 /* 32 bit mul */
8280 tmp = load_reg(s, rs);
8281 tmp2 = load_reg(s, rm);
8282 tcg_gen_mul_i32(tmp, tmp, tmp2);
8283 tcg_temp_free_i32(tmp2);
8284 if (insn & (1 << 22)) {
8285 /* Subtract (mls) */
8286 ARCH(6T2);
8287 tmp2 = load_reg(s, rn);
8288 tcg_gen_sub_i32(tmp, tmp2, tmp);
8289 tcg_temp_free_i32(tmp2);
8290 } else if (insn & (1 << 21)) {
8291 /* Add */
8292 tmp2 = load_reg(s, rn);
8293 tcg_gen_add_i32(tmp, tmp, tmp2);
8294 tcg_temp_free_i32(tmp2);
8296 if (insn & (1 << 20))
8297 gen_logic_CC(tmp);
8298 store_reg(s, rd, tmp);
8299 break;
8300 case 4:
8301 /* 64 bit mul double accumulate (UMAAL) */
8302 ARCH(6);
8303 tmp = load_reg(s, rs);
8304 tmp2 = load_reg(s, rm);
8305 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8306 gen_addq_lo(s, tmp64, rn);
8307 gen_addq_lo(s, tmp64, rd);
8308 gen_storeq_reg(s, rn, rd, tmp64);
8309 tcg_temp_free_i64(tmp64);
8310 break;
8311 case 8: case 9: case 10: case 11:
8312 case 12: case 13: case 14: case 15:
8313 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8314 tmp = load_reg(s, rs);
8315 tmp2 = load_reg(s, rm);
8316 if (insn & (1 << 22)) {
8317 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8318 } else {
8319 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8321 if (insn & (1 << 21)) { /* mult accumulate */
8322 TCGv_i32 al = load_reg(s, rn);
8323 TCGv_i32 ah = load_reg(s, rd);
8324 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8325 tcg_temp_free_i32(al);
8326 tcg_temp_free_i32(ah);
8328 if (insn & (1 << 20)) {
8329 gen_logicq_cc(tmp, tmp2);
8331 store_reg(s, rn, tmp);
8332 store_reg(s, rd, tmp2);
8333 break;
8334 default:
8335 goto illegal_op;
8337 } else {
8338 rn = (insn >> 16) & 0xf;
8339 rd = (insn >> 12) & 0xf;
8340 if (insn & (1 << 23)) {
8341 /* load/store exclusive */
8342 int op2 = (insn >> 8) & 3;
8343 op1 = (insn >> 21) & 0x3;
8345 switch (op2) {
8346 case 0: /* lda/stl */
8347 if (op1 == 1) {
8348 goto illegal_op;
8350 ARCH(8);
8351 break;
8352 case 1: /* reserved */
8353 goto illegal_op;
8354 case 2: /* ldaex/stlex */
8355 ARCH(8);
8356 break;
8357 case 3: /* ldrex/strex */
8358 if (op1) {
8359 ARCH(6K);
8360 } else {
8361 ARCH(6);
8363 break;
8366 addr = tcg_temp_local_new_i32();
8367 load_reg_var(s, addr, rn);
8369 /* Since the emulation does not have barriers,
8370 the acquire/release semantics need no special
8371 handling */
8372 if (op2 == 0) {
8373 if (insn & (1 << 20)) {
8374 tmp = tcg_temp_new_i32();
8375 switch (op1) {
8376 case 0: /* lda */
8377 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8378 break;
8379 case 2: /* ldab */
8380 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8381 break;
8382 case 3: /* ldah */
8383 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8384 break;
8385 default:
8386 abort();
8388 store_reg(s, rd, tmp);
8389 } else {
8390 rm = insn & 0xf;
8391 tmp = load_reg(s, rm);
8392 switch (op1) {
8393 case 0: /* stl */
8394 gen_aa32_st32(tmp, addr, get_mem_index(s));
8395 break;
8396 case 2: /* stlb */
8397 gen_aa32_st8(tmp, addr, get_mem_index(s));
8398 break;
8399 case 3: /* stlh */
8400 gen_aa32_st16(tmp, addr, get_mem_index(s));
8401 break;
8402 default:
8403 abort();
8405 tcg_temp_free_i32(tmp);
8407 } else if (insn & (1 << 20)) {
8408 switch (op1) {
8409 case 0: /* ldrex */
8410 gen_load_exclusive(s, rd, 15, addr, 2);
8411 break;
8412 case 1: /* ldrexd */
8413 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8414 break;
8415 case 2: /* ldrexb */
8416 gen_load_exclusive(s, rd, 15, addr, 0);
8417 break;
8418 case 3: /* ldrexh */
8419 gen_load_exclusive(s, rd, 15, addr, 1);
8420 break;
8421 default:
8422 abort();
8424 } else {
8425 rm = insn & 0xf;
8426 switch (op1) {
8427 case 0: /* strex */
8428 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8429 break;
8430 case 1: /* strexd */
8431 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8432 break;
8433 case 2: /* strexb */
8434 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8435 break;
8436 case 3: /* strexh */
8437 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8438 break;
8439 default:
8440 abort();
8443 tcg_temp_free_i32(addr);
8444 } else {
8445 /* SWP instruction */
8446 rm = (insn) & 0xf;
8448 /* ??? This is not really atomic. However we know
8449 we never have multiple CPUs running in parallel,
8450 so it is good enough. */
8451 addr = load_reg(s, rn);
8452 tmp = load_reg(s, rm);
8453 tmp2 = tcg_temp_new_i32();
8454 if (insn & (1 << 22)) {
8455 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8456 gen_aa32_st8(tmp, addr, get_mem_index(s));
8457 } else {
8458 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8459 gen_aa32_st32(tmp, addr, get_mem_index(s));
8461 tcg_temp_free_i32(tmp);
8462 tcg_temp_free_i32(addr);
8463 store_reg(s, rd, tmp2);
8466 } else {
8467 int address_offset;
8468 bool load = insn & (1 << 20);
8469 bool doubleword = false;
8470 /* Misc load/store */
8471 rn = (insn >> 16) & 0xf;
8472 rd = (insn >> 12) & 0xf;
8474 if (!load && (sh & 2)) {
8475 /* doubleword */
8476 ARCH(5TE);
8477 if (rd & 1) {
8478 /* UNPREDICTABLE; we choose to UNDEF */
8479 goto illegal_op;
8481 load = (sh & 1) == 0;
8482 doubleword = true;
8485 addr = load_reg(s, rn);
8486 if (insn & (1 << 24))
8487 gen_add_datah_offset(s, insn, 0, addr);
8488 address_offset = 0;
8490 if (doubleword) {
8491 if (!load) {
8492 /* store */
8493 tmp = load_reg(s, rd);
8494 gen_aa32_st32(tmp, addr, get_mem_index(s));
8495 tcg_temp_free_i32(tmp);
8496 tcg_gen_addi_i32(addr, addr, 4);
8497 tmp = load_reg(s, rd + 1);
8498 gen_aa32_st32(tmp, addr, get_mem_index(s));
8499 tcg_temp_free_i32(tmp);
8500 } else {
8501 /* load */
8502 tmp = tcg_temp_new_i32();
8503 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8504 store_reg(s, rd, tmp);
8505 tcg_gen_addi_i32(addr, addr, 4);
8506 tmp = tcg_temp_new_i32();
8507 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8508 rd++;
8510 address_offset = -4;
8511 } else if (load) {
8512 /* load */
8513 tmp = tcg_temp_new_i32();
8514 switch (sh) {
8515 case 1:
8516 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8517 break;
8518 case 2:
8519 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
8520 break;
8521 default:
8522 case 3:
8523 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
8524 break;
8526 } else {
8527 /* store */
8528 tmp = load_reg(s, rd);
8529 gen_aa32_st16(tmp, addr, get_mem_index(s));
8530 tcg_temp_free_i32(tmp);
8532 /* Perform base writeback before the loaded value to
8533 ensure correct behavior with overlapping index registers.
8534 ldrd with base writeback is undefined if the
8535 destination and index registers overlap. */
8536 if (!(insn & (1 << 24))) {
8537 gen_add_datah_offset(s, insn, address_offset, addr);
8538 store_reg(s, rn, addr);
8539 } else if (insn & (1 << 21)) {
8540 if (address_offset)
8541 tcg_gen_addi_i32(addr, addr, address_offset);
8542 store_reg(s, rn, addr);
8543 } else {
8544 tcg_temp_free_i32(addr);
8546 if (load) {
8547 /* Complete the load. */
8548 store_reg(s, rd, tmp);
8551 break;
8552 case 0x4:
8553 case 0x5:
8554 goto do_ldst;
8555 case 0x6:
8556 case 0x7:
8557 if (insn & (1 << 4)) {
8558 ARCH(6);
8559 /* Armv6 Media instructions. */
8560 rm = insn & 0xf;
8561 rn = (insn >> 16) & 0xf;
8562 rd = (insn >> 12) & 0xf;
8563 rs = (insn >> 8) & 0xf;
8564 switch ((insn >> 23) & 3) {
8565 case 0: /* Parallel add/subtract. */
8566 op1 = (insn >> 20) & 7;
8567 tmp = load_reg(s, rn);
8568 tmp2 = load_reg(s, rm);
8569 sh = (insn >> 5) & 7;
8570 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8571 goto illegal_op;
8572 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
8573 tcg_temp_free_i32(tmp2);
8574 store_reg(s, rd, tmp);
8575 break;
8576 case 1:
8577 if ((insn & 0x00700020) == 0) {
8578 /* Halfword pack. */
8579 tmp = load_reg(s, rn);
8580 tmp2 = load_reg(s, rm);
8581 shift = (insn >> 7) & 0x1f;
8582 if (insn & (1 << 6)) {
8583 /* pkhtb */
8584 if (shift == 0)
8585 shift = 31;
8586 tcg_gen_sari_i32(tmp2, tmp2, shift);
8587 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8588 tcg_gen_ext16u_i32(tmp2, tmp2);
8589 } else {
8590 /* pkhbt */
8591 if (shift)
8592 tcg_gen_shli_i32(tmp2, tmp2, shift);
8593 tcg_gen_ext16u_i32(tmp, tmp);
8594 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8596 tcg_gen_or_i32(tmp, tmp, tmp2);
8597 tcg_temp_free_i32(tmp2);
8598 store_reg(s, rd, tmp);
8599 } else if ((insn & 0x00200020) == 0x00200000) {
8600 /* [us]sat */
8601 tmp = load_reg(s, rm);
8602 shift = (insn >> 7) & 0x1f;
8603 if (insn & (1 << 6)) {
8604 if (shift == 0)
8605 shift = 31;
8606 tcg_gen_sari_i32(tmp, tmp, shift);
8607 } else {
8608 tcg_gen_shli_i32(tmp, tmp, shift);
8610 sh = (insn >> 16) & 0x1f;
8611 tmp2 = tcg_const_i32(sh);
8612 if (insn & (1 << 22))
8613 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8614 else
8615 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8616 tcg_temp_free_i32(tmp2);
8617 store_reg(s, rd, tmp);
8618 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8619 /* [us]sat16 */
8620 tmp = load_reg(s, rm);
8621 sh = (insn >> 16) & 0x1f;
8622 tmp2 = tcg_const_i32(sh);
8623 if (insn & (1 << 22))
8624 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8625 else
8626 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8627 tcg_temp_free_i32(tmp2);
8628 store_reg(s, rd, tmp);
8629 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8630 /* Select bytes. */
8631 tmp = load_reg(s, rn);
8632 tmp2 = load_reg(s, rm);
8633 tmp3 = tcg_temp_new_i32();
8634 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8635 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8636 tcg_temp_free_i32(tmp3);
8637 tcg_temp_free_i32(tmp2);
8638 store_reg(s, rd, tmp);
8639 } else if ((insn & 0x000003e0) == 0x00000060) {
8640 tmp = load_reg(s, rm);
8641 shift = (insn >> 10) & 3;
8642 /* ??? In many cases it's not necessary to do a
8643 rotate, a shift is sufficient. */
8644 if (shift != 0)
8645 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8646 op1 = (insn >> 20) & 7;
8647 switch (op1) {
8648 case 0: gen_sxtb16(tmp); break;
8649 case 2: gen_sxtb(tmp); break;
8650 case 3: gen_sxth(tmp); break;
8651 case 4: gen_uxtb16(tmp); break;
8652 case 6: gen_uxtb(tmp); break;
8653 case 7: gen_uxth(tmp); break;
8654 default: goto illegal_op;
8656 if (rn != 15) {
8657 tmp2 = load_reg(s, rn);
8658 if ((op1 & 3) == 0) {
8659 gen_add16(tmp, tmp2);
8660 } else {
8661 tcg_gen_add_i32(tmp, tmp, tmp2);
8662 tcg_temp_free_i32(tmp2);
8665 store_reg(s, rd, tmp);
8666 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8667 /* rev */
8668 tmp = load_reg(s, rm);
8669 if (insn & (1 << 22)) {
8670 if (insn & (1 << 7)) {
8671 gen_revsh(tmp);
8672 } else {
8673 ARCH(6T2);
8674 gen_helper_rbit(tmp, tmp);
8676 } else {
8677 if (insn & (1 << 7))
8678 gen_rev16(tmp);
8679 else
8680 tcg_gen_bswap32_i32(tmp, tmp);
8682 store_reg(s, rd, tmp);
8683 } else {
8684 goto illegal_op;
8686 break;
8687 case 2: /* Multiplies (Type 3). */
8688 switch ((insn >> 20) & 0x7) {
8689 case 5:
8690 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8691 /* op2 not 00x or 11x : UNDEF */
8692 goto illegal_op;
8694 /* Signed multiply most significant [accumulate].
8695 (SMMUL, SMMLA, SMMLS) */
8696 tmp = load_reg(s, rm);
8697 tmp2 = load_reg(s, rs);
8698 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8700 if (rd != 15) {
8701 tmp = load_reg(s, rd);
8702 if (insn & (1 << 6)) {
8703 tmp64 = gen_subq_msw(tmp64, tmp);
8704 } else {
8705 tmp64 = gen_addq_msw(tmp64, tmp);
8708 if (insn & (1 << 5)) {
8709 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8711 tcg_gen_shri_i64(tmp64, tmp64, 32);
8712 tmp = tcg_temp_new_i32();
8713 tcg_gen_extrl_i64_i32(tmp, tmp64);
8714 tcg_temp_free_i64(tmp64);
8715 store_reg(s, rn, tmp);
8716 break;
8717 case 0:
8718 case 4:
8719 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8720 if (insn & (1 << 7)) {
8721 goto illegal_op;
8723 tmp = load_reg(s, rm);
8724 tmp2 = load_reg(s, rs);
8725 if (insn & (1 << 5))
8726 gen_swap_half(tmp2);
8727 gen_smul_dual(tmp, tmp2);
8728 if (insn & (1 << 22)) {
8729 /* smlald, smlsld */
8730 TCGv_i64 tmp64_2;
8732 tmp64 = tcg_temp_new_i64();
8733 tmp64_2 = tcg_temp_new_i64();
8734 tcg_gen_ext_i32_i64(tmp64, tmp);
8735 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
8736 tcg_temp_free_i32(tmp);
8737 tcg_temp_free_i32(tmp2);
8738 if (insn & (1 << 6)) {
8739 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8740 } else {
8741 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8743 tcg_temp_free_i64(tmp64_2);
8744 gen_addq(s, tmp64, rd, rn);
8745 gen_storeq_reg(s, rd, rn, tmp64);
8746 tcg_temp_free_i64(tmp64);
8747 } else {
8748 /* smuad, smusd, smlad, smlsd */
8749 if (insn & (1 << 6)) {
8750 /* This subtraction cannot overflow. */
8751 tcg_gen_sub_i32(tmp, tmp, tmp2);
8752 } else {
8753 /* This addition cannot overflow 32 bits;
8754 * however it may overflow considered as a
8755 * signed operation, in which case we must set
8756 * the Q flag.
8758 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8760 tcg_temp_free_i32(tmp2);
8761 if (rd != 15)
8763 tmp2 = load_reg(s, rd);
8764 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8765 tcg_temp_free_i32(tmp2);
8767 store_reg(s, rn, tmp);
8769 break;
8770 case 1:
8771 case 3:
8772 /* SDIV, UDIV */
8773 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
8774 goto illegal_op;
8776 if (((insn >> 5) & 7) || (rd != 15)) {
8777 goto illegal_op;
8779 tmp = load_reg(s, rm);
8780 tmp2 = load_reg(s, rs);
8781 if (insn & (1 << 21)) {
8782 gen_helper_udiv(tmp, tmp, tmp2);
8783 } else {
8784 gen_helper_sdiv(tmp, tmp, tmp2);
8786 tcg_temp_free_i32(tmp2);
8787 store_reg(s, rn, tmp);
8788 break;
8789 default:
8790 goto illegal_op;
8792 break;
8793 case 3:
8794 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8795 switch (op1) {
8796 case 0: /* Unsigned sum of absolute differences. */
8797 ARCH(6);
8798 tmp = load_reg(s, rm);
8799 tmp2 = load_reg(s, rs);
8800 gen_helper_usad8(tmp, tmp, tmp2);
8801 tcg_temp_free_i32(tmp2);
8802 if (rd != 15) {
8803 tmp2 = load_reg(s, rd);
8804 tcg_gen_add_i32(tmp, tmp, tmp2);
8805 tcg_temp_free_i32(tmp2);
8807 store_reg(s, rn, tmp);
8808 break;
8809 case 0x20: case 0x24: case 0x28: case 0x2c:
8810 /* Bitfield insert/clear. */
8811 ARCH(6T2);
8812 shift = (insn >> 7) & 0x1f;
8813 i = (insn >> 16) & 0x1f;
8814 if (i < shift) {
8815 /* UNPREDICTABLE; we choose to UNDEF */
8816 goto illegal_op;
8818 i = i + 1 - shift;
8819 if (rm == 15) {
8820 tmp = tcg_temp_new_i32();
8821 tcg_gen_movi_i32(tmp, 0);
8822 } else {
8823 tmp = load_reg(s, rm);
8825 if (i != 32) {
8826 tmp2 = load_reg(s, rd);
8827 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
8828 tcg_temp_free_i32(tmp2);
8830 store_reg(s, rd, tmp);
8831 break;
8832 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8833 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
8834 ARCH(6T2);
8835 tmp = load_reg(s, rm);
8836 shift = (insn >> 7) & 0x1f;
8837 i = ((insn >> 16) & 0x1f) + 1;
8838 if (shift + i > 32)
8839 goto illegal_op;
8840 if (i < 32) {
8841 if (op1 & 0x20) {
8842 gen_ubfx(tmp, shift, (1u << i) - 1);
8843 } else {
8844 gen_sbfx(tmp, shift, i);
8847 store_reg(s, rd, tmp);
8848 break;
8849 default:
8850 goto illegal_op;
8852 break;
8854 break;
8856 do_ldst:
8857 /* Check for undefined extension instructions
8858 * per the ARM Bible IE:
8859 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8861 sh = (0xf << 20) | (0xf << 4);
8862 if (op1 == 0x7 && ((insn & sh) == sh))
8864 goto illegal_op;
8866 /* load/store byte/word */
8867 rn = (insn >> 16) & 0xf;
8868 rd = (insn >> 12) & 0xf;
8869 tmp2 = load_reg(s, rn);
8870 if ((insn & 0x01200000) == 0x00200000) {
8871 /* ldrt/strt */
8872 i = get_a32_user_mem_index(s);
8873 } else {
8874 i = get_mem_index(s);
8876 if (insn & (1 << 24))
8877 gen_add_data_offset(s, insn, tmp2);
8878 if (insn & (1 << 20)) {
8879 /* load */
8880 tmp = tcg_temp_new_i32();
8881 if (insn & (1 << 22)) {
8882 gen_aa32_ld8u(tmp, tmp2, i);
8883 } else {
8884 gen_aa32_ld32u(tmp, tmp2, i);
8886 } else {
8887 /* store */
8888 tmp = load_reg(s, rd);
8889 if (insn & (1 << 22)) {
8890 gen_aa32_st8(tmp, tmp2, i);
8891 } else {
8892 gen_aa32_st32(tmp, tmp2, i);
8894 tcg_temp_free_i32(tmp);
8896 if (!(insn & (1 << 24))) {
8897 gen_add_data_offset(s, insn, tmp2);
8898 store_reg(s, rn, tmp2);
8899 } else if (insn & (1 << 21)) {
8900 store_reg(s, rn, tmp2);
8901 } else {
8902 tcg_temp_free_i32(tmp2);
8904 if (insn & (1 << 20)) {
8905 /* Complete the load. */
8906 store_reg_from_load(s, rd, tmp);
8908 break;
8909 case 0x08:
8910 case 0x09:
8912 int j, n, loaded_base;
8913 bool exc_return = false;
8914 bool is_load = extract32(insn, 20, 1);
8915 bool user = false;
8916 TCGv_i32 loaded_var;
8917 /* load/store multiple words */
8918 /* XXX: store correct base if write back */
8919 if (insn & (1 << 22)) {
8920 /* LDM (user), LDM (exception return) and STM (user) */
8921 if (IS_USER(s))
8922 goto illegal_op; /* only usable in supervisor mode */
8924 if (is_load && extract32(insn, 15, 1)) {
8925 exc_return = true;
8926 } else {
8927 user = true;
8930 rn = (insn >> 16) & 0xf;
8931 addr = load_reg(s, rn);
8933 /* compute total size */
8934 loaded_base = 0;
8935 TCGV_UNUSED_I32(loaded_var);
8936 n = 0;
8937 for(i=0;i<16;i++) {
8938 if (insn & (1 << i))
8939 n++;
8941 /* XXX: test invalid n == 0 case ? */
8942 if (insn & (1 << 23)) {
8943 if (insn & (1 << 24)) {
8944 /* pre increment */
8945 tcg_gen_addi_i32(addr, addr, 4);
8946 } else {
8947 /* post increment */
8949 } else {
8950 if (insn & (1 << 24)) {
8951 /* pre decrement */
8952 tcg_gen_addi_i32(addr, addr, -(n * 4));
8953 } else {
8954 /* post decrement */
8955 if (n != 1)
8956 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
8959 j = 0;
8960 for(i=0;i<16;i++) {
8961 if (insn & (1 << i)) {
8962 if (is_load) {
8963 /* load */
8964 tmp = tcg_temp_new_i32();
8965 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8966 if (user) {
8967 tmp2 = tcg_const_i32(i);
8968 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
8969 tcg_temp_free_i32(tmp2);
8970 tcg_temp_free_i32(tmp);
8971 } else if (i == rn) {
8972 loaded_var = tmp;
8973 loaded_base = 1;
8974 } else {
8975 store_reg_from_load(s, i, tmp);
8977 } else {
8978 /* store */
8979 if (i == 15) {
8980 /* special case: r15 = PC + 8 */
8981 val = (long)s->pc + 4;
8982 tmp = tcg_temp_new_i32();
8983 tcg_gen_movi_i32(tmp, val);
8984 } else if (user) {
8985 tmp = tcg_temp_new_i32();
8986 tmp2 = tcg_const_i32(i);
8987 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
8988 tcg_temp_free_i32(tmp2);
8989 } else {
8990 tmp = load_reg(s, i);
8992 gen_aa32_st32(tmp, addr, get_mem_index(s));
8993 tcg_temp_free_i32(tmp);
8995 j++;
8996 /* no need to add after the last transfer */
8997 if (j != n)
8998 tcg_gen_addi_i32(addr, addr, 4);
9001 if (insn & (1 << 21)) {
9002 /* write back */
9003 if (insn & (1 << 23)) {
9004 if (insn & (1 << 24)) {
9005 /* pre increment */
9006 } else {
9007 /* post increment */
9008 tcg_gen_addi_i32(addr, addr, 4);
9010 } else {
9011 if (insn & (1 << 24)) {
9012 /* pre decrement */
9013 if (n != 1)
9014 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9015 } else {
9016 /* post decrement */
9017 tcg_gen_addi_i32(addr, addr, -(n * 4));
9020 store_reg(s, rn, addr);
9021 } else {
9022 tcg_temp_free_i32(addr);
9024 if (loaded_base) {
9025 store_reg(s, rn, loaded_var);
9027 if (exc_return) {
9028 /* Restore CPSR from SPSR. */
9029 tmp = load_cpu_field(spsr);
9030 gen_set_cpsr(tmp, CPSR_ERET_MASK);
9031 tcg_temp_free_i32(tmp);
9032 s->is_jmp = DISAS_UPDATE;
9035 break;
9036 case 0xa:
9037 case 0xb:
9039 int32_t offset;
9041 /* branch (and link) */
9042 val = (int32_t)s->pc;
9043 if (insn & (1 << 24)) {
9044 tmp = tcg_temp_new_i32();
9045 tcg_gen_movi_i32(tmp, val);
9046 store_reg(s, 14, tmp);
9048 offset = sextract32(insn << 2, 0, 26);
9049 val += offset + 4;
9050 gen_jmp(s, val);
9052 break;
9053 case 0xc:
9054 case 0xd:
9055 case 0xe:
9056 if (((insn >> 8) & 0xe) == 10) {
9057 /* VFP. */
9058 if (disas_vfp_insn(s, insn)) {
9059 goto illegal_op;
9061 } else if (disas_coproc_insn(s, insn)) {
9062 /* Coprocessor. */
9063 goto illegal_op;
9065 break;
9066 case 0xf:
9067 /* swi */
9068 gen_set_pc_im(s, s->pc);
9069 s->svc_imm = extract32(insn, 0, 24);
9070 s->is_jmp = DISAS_SWI;
9071 break;
9072 default:
9073 illegal_op:
9074 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9075 default_exception_el(s));
9076 break;
9081 /* Return true if this is a Thumb-2 logical op. */
9082 static int
9083 thumb2_logic_op(int op)
9085 return (op < 8);
9088 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9089 then set condition code flags based on the result of the operation.
9090 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9091 to the high bit of T1.
9092 Returns zero if the opcode is valid. */
9094 static int
9095 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9096 TCGv_i32 t0, TCGv_i32 t1)
9098 int logic_cc;
9100 logic_cc = 0;
9101 switch (op) {
9102 case 0: /* and */
9103 tcg_gen_and_i32(t0, t0, t1);
9104 logic_cc = conds;
9105 break;
9106 case 1: /* bic */
9107 tcg_gen_andc_i32(t0, t0, t1);
9108 logic_cc = conds;
9109 break;
9110 case 2: /* orr */
9111 tcg_gen_or_i32(t0, t0, t1);
9112 logic_cc = conds;
9113 break;
9114 case 3: /* orn */
9115 tcg_gen_orc_i32(t0, t0, t1);
9116 logic_cc = conds;
9117 break;
9118 case 4: /* eor */
9119 tcg_gen_xor_i32(t0, t0, t1);
9120 logic_cc = conds;
9121 break;
9122 case 8: /* add */
9123 if (conds)
9124 gen_add_CC(t0, t0, t1);
9125 else
9126 tcg_gen_add_i32(t0, t0, t1);
9127 break;
9128 case 10: /* adc */
9129 if (conds)
9130 gen_adc_CC(t0, t0, t1);
9131 else
9132 gen_adc(t0, t1);
9133 break;
9134 case 11: /* sbc */
9135 if (conds) {
9136 gen_sbc_CC(t0, t0, t1);
9137 } else {
9138 gen_sub_carry(t0, t0, t1);
9140 break;
9141 case 13: /* sub */
9142 if (conds)
9143 gen_sub_CC(t0, t0, t1);
9144 else
9145 tcg_gen_sub_i32(t0, t0, t1);
9146 break;
9147 case 14: /* rsb */
9148 if (conds)
9149 gen_sub_CC(t0, t1, t0);
9150 else
9151 tcg_gen_sub_i32(t0, t1, t0);
9152 break;
9153 default: /* 5, 6, 7, 9, 12, 15. */
9154 return 1;
9156 if (logic_cc) {
9157 gen_logic_CC(t0);
9158 if (shifter_out)
9159 gen_set_CF_bit31(t1);
9161 return 0;
9164 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9165 is not legal. */
9166 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9168 uint32_t insn, imm, shift, offset;
9169 uint32_t rd, rn, rm, rs;
9170 TCGv_i32 tmp;
9171 TCGv_i32 tmp2;
9172 TCGv_i32 tmp3;
9173 TCGv_i32 addr;
9174 TCGv_i64 tmp64;
9175 int op;
9176 int shiftop;
9177 int conds;
9178 int logic_cc;
9180 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9181 || arm_dc_feature(s, ARM_FEATURE_M))) {
9182 /* Thumb-1 cores may need to treat bl and blx as a pair of
9183 16-bit instructions to get correct prefetch abort behavior. */
9184 insn = insn_hw1;
9185 if ((insn & (1 << 12)) == 0) {
9186 ARCH(5);
9187 /* Second half of blx. */
9188 offset = ((insn & 0x7ff) << 1);
9189 tmp = load_reg(s, 14);
9190 tcg_gen_addi_i32(tmp, tmp, offset);
9191 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9193 tmp2 = tcg_temp_new_i32();
9194 tcg_gen_movi_i32(tmp2, s->pc | 1);
9195 store_reg(s, 14, tmp2);
9196 gen_bx(s, tmp);
9197 return 0;
9199 if (insn & (1 << 11)) {
9200 /* Second half of bl. */
9201 offset = ((insn & 0x7ff) << 1) | 1;
9202 tmp = load_reg(s, 14);
9203 tcg_gen_addi_i32(tmp, tmp, offset);
9205 tmp2 = tcg_temp_new_i32();
9206 tcg_gen_movi_i32(tmp2, s->pc | 1);
9207 store_reg(s, 14, tmp2);
9208 gen_bx(s, tmp);
9209 return 0;
9211 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9212 /* Instruction spans a page boundary. Implement it as two
9213 16-bit instructions in case the second half causes an
9214 prefetch abort. */
9215 offset = ((int32_t)insn << 21) >> 9;
9216 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9217 return 0;
9219 /* Fall through to 32-bit decode. */
9222 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9223 s->pc += 2;
9224 insn |= (uint32_t)insn_hw1 << 16;
9226 if ((insn & 0xf800e800) != 0xf000e800) {
9227 ARCH(6T2);
9230 rn = (insn >> 16) & 0xf;
9231 rs = (insn >> 12) & 0xf;
9232 rd = (insn >> 8) & 0xf;
9233 rm = insn & 0xf;
9234 switch ((insn >> 25) & 0xf) {
9235 case 0: case 1: case 2: case 3:
9236 /* 16-bit instructions. Should never happen. */
9237 abort();
9238 case 4:
9239 if (insn & (1 << 22)) {
9240 /* Other load/store, table branch. */
9241 if (insn & 0x01200000) {
9242 /* Load/store doubleword. */
9243 if (rn == 15) {
9244 addr = tcg_temp_new_i32();
9245 tcg_gen_movi_i32(addr, s->pc & ~3);
9246 } else {
9247 addr = load_reg(s, rn);
9249 offset = (insn & 0xff) * 4;
9250 if ((insn & (1 << 23)) == 0)
9251 offset = -offset;
9252 if (insn & (1 << 24)) {
9253 tcg_gen_addi_i32(addr, addr, offset);
9254 offset = 0;
9256 if (insn & (1 << 20)) {
9257 /* ldrd */
9258 tmp = tcg_temp_new_i32();
9259 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9260 store_reg(s, rs, tmp);
9261 tcg_gen_addi_i32(addr, addr, 4);
9262 tmp = tcg_temp_new_i32();
9263 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9264 store_reg(s, rd, tmp);
9265 } else {
9266 /* strd */
9267 tmp = load_reg(s, rs);
9268 gen_aa32_st32(tmp, addr, get_mem_index(s));
9269 tcg_temp_free_i32(tmp);
9270 tcg_gen_addi_i32(addr, addr, 4);
9271 tmp = load_reg(s, rd);
9272 gen_aa32_st32(tmp, addr, get_mem_index(s));
9273 tcg_temp_free_i32(tmp);
9275 if (insn & (1 << 21)) {
9276 /* Base writeback. */
9277 if (rn == 15)
9278 goto illegal_op;
9279 tcg_gen_addi_i32(addr, addr, offset - 4);
9280 store_reg(s, rn, addr);
9281 } else {
9282 tcg_temp_free_i32(addr);
9284 } else if ((insn & (1 << 23)) == 0) {
9285 /* Load/store exclusive word. */
9286 addr = tcg_temp_local_new_i32();
9287 load_reg_var(s, addr, rn);
9288 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9289 if (insn & (1 << 20)) {
9290 gen_load_exclusive(s, rs, 15, addr, 2);
9291 } else {
9292 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9294 tcg_temp_free_i32(addr);
9295 } else if ((insn & (7 << 5)) == 0) {
9296 /* Table Branch. */
9297 if (rn == 15) {
9298 addr = tcg_temp_new_i32();
9299 tcg_gen_movi_i32(addr, s->pc);
9300 } else {
9301 addr = load_reg(s, rn);
9303 tmp = load_reg(s, rm);
9304 tcg_gen_add_i32(addr, addr, tmp);
9305 if (insn & (1 << 4)) {
9306 /* tbh */
9307 tcg_gen_add_i32(addr, addr, tmp);
9308 tcg_temp_free_i32(tmp);
9309 tmp = tcg_temp_new_i32();
9310 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9311 } else { /* tbb */
9312 tcg_temp_free_i32(tmp);
9313 tmp = tcg_temp_new_i32();
9314 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9316 tcg_temp_free_i32(addr);
9317 tcg_gen_shli_i32(tmp, tmp, 1);
9318 tcg_gen_addi_i32(tmp, tmp, s->pc);
9319 store_reg(s, 15, tmp);
9320 } else {
9321 int op2 = (insn >> 6) & 0x3;
9322 op = (insn >> 4) & 0x3;
9323 switch (op2) {
9324 case 0:
9325 goto illegal_op;
9326 case 1:
9327 /* Load/store exclusive byte/halfword/doubleword */
9328 if (op == 2) {
9329 goto illegal_op;
9331 ARCH(7);
9332 break;
9333 case 2:
9334 /* Load-acquire/store-release */
9335 if (op == 3) {
9336 goto illegal_op;
9338 /* Fall through */
9339 case 3:
9340 /* Load-acquire/store-release exclusive */
9341 ARCH(8);
9342 break;
9344 addr = tcg_temp_local_new_i32();
9345 load_reg_var(s, addr, rn);
9346 if (!(op2 & 1)) {
9347 if (insn & (1 << 20)) {
9348 tmp = tcg_temp_new_i32();
9349 switch (op) {
9350 case 0: /* ldab */
9351 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9352 break;
9353 case 1: /* ldah */
9354 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9355 break;
9356 case 2: /* lda */
9357 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9358 break;
9359 default:
9360 abort();
9362 store_reg(s, rs, tmp);
9363 } else {
9364 tmp = load_reg(s, rs);
9365 switch (op) {
9366 case 0: /* stlb */
9367 gen_aa32_st8(tmp, addr, get_mem_index(s));
9368 break;
9369 case 1: /* stlh */
9370 gen_aa32_st16(tmp, addr, get_mem_index(s));
9371 break;
9372 case 2: /* stl */
9373 gen_aa32_st32(tmp, addr, get_mem_index(s));
9374 break;
9375 default:
9376 abort();
9378 tcg_temp_free_i32(tmp);
9380 } else if (insn & (1 << 20)) {
9381 gen_load_exclusive(s, rs, rd, addr, op);
9382 } else {
9383 gen_store_exclusive(s, rm, rs, rd, addr, op);
9385 tcg_temp_free_i32(addr);
9387 } else {
9388 /* Load/store multiple, RFE, SRS. */
9389 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9390 /* RFE, SRS: not available in user mode or on M profile */
9391 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9392 goto illegal_op;
9394 if (insn & (1 << 20)) {
9395 /* rfe */
9396 addr = load_reg(s, rn);
9397 if ((insn & (1 << 24)) == 0)
9398 tcg_gen_addi_i32(addr, addr, -8);
9399 /* Load PC into tmp and CPSR into tmp2. */
9400 tmp = tcg_temp_new_i32();
9401 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9402 tcg_gen_addi_i32(addr, addr, 4);
9403 tmp2 = tcg_temp_new_i32();
9404 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9405 if (insn & (1 << 21)) {
9406 /* Base writeback. */
9407 if (insn & (1 << 24)) {
9408 tcg_gen_addi_i32(addr, addr, 4);
9409 } else {
9410 tcg_gen_addi_i32(addr, addr, -4);
9412 store_reg(s, rn, addr);
9413 } else {
9414 tcg_temp_free_i32(addr);
9416 gen_rfe(s, tmp, tmp2);
9417 } else {
9418 /* srs */
9419 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9420 insn & (1 << 21));
9422 } else {
9423 int i, loaded_base = 0;
9424 TCGv_i32 loaded_var;
9425 /* Load/store multiple. */
9426 addr = load_reg(s, rn);
9427 offset = 0;
9428 for (i = 0; i < 16; i++) {
9429 if (insn & (1 << i))
9430 offset += 4;
9432 if (insn & (1 << 24)) {
9433 tcg_gen_addi_i32(addr, addr, -offset);
9436 TCGV_UNUSED_I32(loaded_var);
9437 for (i = 0; i < 16; i++) {
9438 if ((insn & (1 << i)) == 0)
9439 continue;
9440 if (insn & (1 << 20)) {
9441 /* Load. */
9442 tmp = tcg_temp_new_i32();
9443 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9444 if (i == 15) {
9445 gen_bx(s, tmp);
9446 } else if (i == rn) {
9447 loaded_var = tmp;
9448 loaded_base = 1;
9449 } else {
9450 store_reg(s, i, tmp);
9452 } else {
9453 /* Store. */
9454 tmp = load_reg(s, i);
9455 gen_aa32_st32(tmp, addr, get_mem_index(s));
9456 tcg_temp_free_i32(tmp);
9458 tcg_gen_addi_i32(addr, addr, 4);
9460 if (loaded_base) {
9461 store_reg(s, rn, loaded_var);
9463 if (insn & (1 << 21)) {
9464 /* Base register writeback. */
9465 if (insn & (1 << 24)) {
9466 tcg_gen_addi_i32(addr, addr, -offset);
9468 /* Fault if writeback register is in register list. */
9469 if (insn & (1 << rn))
9470 goto illegal_op;
9471 store_reg(s, rn, addr);
9472 } else {
9473 tcg_temp_free_i32(addr);
9477 break;
9478 case 5:
9480 op = (insn >> 21) & 0xf;
9481 if (op == 6) {
9482 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9483 goto illegal_op;
9485 /* Halfword pack. */
9486 tmp = load_reg(s, rn);
9487 tmp2 = load_reg(s, rm);
9488 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9489 if (insn & (1 << 5)) {
9490 /* pkhtb */
9491 if (shift == 0)
9492 shift = 31;
9493 tcg_gen_sari_i32(tmp2, tmp2, shift);
9494 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9495 tcg_gen_ext16u_i32(tmp2, tmp2);
9496 } else {
9497 /* pkhbt */
9498 if (shift)
9499 tcg_gen_shli_i32(tmp2, tmp2, shift);
9500 tcg_gen_ext16u_i32(tmp, tmp);
9501 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9503 tcg_gen_or_i32(tmp, tmp, tmp2);
9504 tcg_temp_free_i32(tmp2);
9505 store_reg(s, rd, tmp);
9506 } else {
9507 /* Data processing register constant shift. */
9508 if (rn == 15) {
9509 tmp = tcg_temp_new_i32();
9510 tcg_gen_movi_i32(tmp, 0);
9511 } else {
9512 tmp = load_reg(s, rn);
9514 tmp2 = load_reg(s, rm);
9516 shiftop = (insn >> 4) & 3;
9517 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9518 conds = (insn & (1 << 20)) != 0;
9519 logic_cc = (conds && thumb2_logic_op(op));
9520 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9521 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9522 goto illegal_op;
9523 tcg_temp_free_i32(tmp2);
9524 if (rd != 15) {
9525 store_reg(s, rd, tmp);
9526 } else {
9527 tcg_temp_free_i32(tmp);
9530 break;
9531 case 13: /* Misc data processing. */
9532 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9533 if (op < 4 && (insn & 0xf000) != 0xf000)
9534 goto illegal_op;
9535 switch (op) {
9536 case 0: /* Register controlled shift. */
9537 tmp = load_reg(s, rn);
9538 tmp2 = load_reg(s, rm);
9539 if ((insn & 0x70) != 0)
9540 goto illegal_op;
9541 op = (insn >> 21) & 3;
9542 logic_cc = (insn & (1 << 20)) != 0;
9543 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9544 if (logic_cc)
9545 gen_logic_CC(tmp);
9546 store_reg_bx(s, rd, tmp);
9547 break;
9548 case 1: /* Sign/zero extend. */
9549 op = (insn >> 20) & 7;
9550 switch (op) {
9551 case 0: /* SXTAH, SXTH */
9552 case 1: /* UXTAH, UXTH */
9553 case 4: /* SXTAB, SXTB */
9554 case 5: /* UXTAB, UXTB */
9555 break;
9556 case 2: /* SXTAB16, SXTB16 */
9557 case 3: /* UXTAB16, UXTB16 */
9558 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9559 goto illegal_op;
9561 break;
9562 default:
9563 goto illegal_op;
9565 if (rn != 15) {
9566 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9567 goto illegal_op;
9570 tmp = load_reg(s, rm);
9571 shift = (insn >> 4) & 3;
9572 /* ??? In many cases it's not necessary to do a
9573 rotate, a shift is sufficient. */
9574 if (shift != 0)
9575 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9576 op = (insn >> 20) & 7;
9577 switch (op) {
9578 case 0: gen_sxth(tmp); break;
9579 case 1: gen_uxth(tmp); break;
9580 case 2: gen_sxtb16(tmp); break;
9581 case 3: gen_uxtb16(tmp); break;
9582 case 4: gen_sxtb(tmp); break;
9583 case 5: gen_uxtb(tmp); break;
9584 default:
9585 g_assert_not_reached();
9587 if (rn != 15) {
9588 tmp2 = load_reg(s, rn);
9589 if ((op >> 1) == 1) {
9590 gen_add16(tmp, tmp2);
9591 } else {
9592 tcg_gen_add_i32(tmp, tmp, tmp2);
9593 tcg_temp_free_i32(tmp2);
9596 store_reg(s, rd, tmp);
9597 break;
9598 case 2: /* SIMD add/subtract. */
9599 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9600 goto illegal_op;
9602 op = (insn >> 20) & 7;
9603 shift = (insn >> 4) & 7;
9604 if ((op & 3) == 3 || (shift & 3) == 3)
9605 goto illegal_op;
9606 tmp = load_reg(s, rn);
9607 tmp2 = load_reg(s, rm);
9608 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
9609 tcg_temp_free_i32(tmp2);
9610 store_reg(s, rd, tmp);
9611 break;
9612 case 3: /* Other data processing. */
9613 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9614 if (op < 4) {
9615 /* Saturating add/subtract. */
9616 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9617 goto illegal_op;
9619 tmp = load_reg(s, rn);
9620 tmp2 = load_reg(s, rm);
9621 if (op & 1)
9622 gen_helper_double_saturate(tmp, cpu_env, tmp);
9623 if (op & 2)
9624 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9625 else
9626 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
9627 tcg_temp_free_i32(tmp2);
9628 } else {
9629 switch (op) {
9630 case 0x0a: /* rbit */
9631 case 0x08: /* rev */
9632 case 0x09: /* rev16 */
9633 case 0x0b: /* revsh */
9634 case 0x18: /* clz */
9635 break;
9636 case 0x10: /* sel */
9637 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9638 goto illegal_op;
9640 break;
9641 case 0x20: /* crc32/crc32c */
9642 case 0x21:
9643 case 0x22:
9644 case 0x28:
9645 case 0x29:
9646 case 0x2a:
9647 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
9648 goto illegal_op;
9650 break;
9651 default:
9652 goto illegal_op;
9654 tmp = load_reg(s, rn);
9655 switch (op) {
9656 case 0x0a: /* rbit */
9657 gen_helper_rbit(tmp, tmp);
9658 break;
9659 case 0x08: /* rev */
9660 tcg_gen_bswap32_i32(tmp, tmp);
9661 break;
9662 case 0x09: /* rev16 */
9663 gen_rev16(tmp);
9664 break;
9665 case 0x0b: /* revsh */
9666 gen_revsh(tmp);
9667 break;
9668 case 0x10: /* sel */
9669 tmp2 = load_reg(s, rm);
9670 tmp3 = tcg_temp_new_i32();
9671 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9672 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9673 tcg_temp_free_i32(tmp3);
9674 tcg_temp_free_i32(tmp2);
9675 break;
9676 case 0x18: /* clz */
9677 gen_helper_clz(tmp, tmp);
9678 break;
9679 case 0x20:
9680 case 0x21:
9681 case 0x22:
9682 case 0x28:
9683 case 0x29:
9684 case 0x2a:
9686 /* crc32/crc32c */
9687 uint32_t sz = op & 0x3;
9688 uint32_t c = op & 0x8;
9690 tmp2 = load_reg(s, rm);
9691 if (sz == 0) {
9692 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9693 } else if (sz == 1) {
9694 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9696 tmp3 = tcg_const_i32(1 << sz);
9697 if (c) {
9698 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9699 } else {
9700 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9702 tcg_temp_free_i32(tmp2);
9703 tcg_temp_free_i32(tmp3);
9704 break;
9706 default:
9707 g_assert_not_reached();
9710 store_reg(s, rd, tmp);
9711 break;
9712 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9713 switch ((insn >> 20) & 7) {
9714 case 0: /* 32 x 32 -> 32 */
9715 case 7: /* Unsigned sum of absolute differences. */
9716 break;
9717 case 1: /* 16 x 16 -> 32 */
9718 case 2: /* Dual multiply add. */
9719 case 3: /* 32 * 16 -> 32msb */
9720 case 4: /* Dual multiply subtract. */
9721 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9722 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9723 goto illegal_op;
9725 break;
9727 op = (insn >> 4) & 0xf;
9728 tmp = load_reg(s, rn);
9729 tmp2 = load_reg(s, rm);
9730 switch ((insn >> 20) & 7) {
9731 case 0: /* 32 x 32 -> 32 */
9732 tcg_gen_mul_i32(tmp, tmp, tmp2);
9733 tcg_temp_free_i32(tmp2);
9734 if (rs != 15) {
9735 tmp2 = load_reg(s, rs);
9736 if (op)
9737 tcg_gen_sub_i32(tmp, tmp2, tmp);
9738 else
9739 tcg_gen_add_i32(tmp, tmp, tmp2);
9740 tcg_temp_free_i32(tmp2);
9742 break;
9743 case 1: /* 16 x 16 -> 32 */
9744 gen_mulxy(tmp, tmp2, op & 2, op & 1);
9745 tcg_temp_free_i32(tmp2);
9746 if (rs != 15) {
9747 tmp2 = load_reg(s, rs);
9748 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9749 tcg_temp_free_i32(tmp2);
9751 break;
9752 case 2: /* Dual multiply add. */
9753 case 4: /* Dual multiply subtract. */
9754 if (op)
9755 gen_swap_half(tmp2);
9756 gen_smul_dual(tmp, tmp2);
9757 if (insn & (1 << 22)) {
9758 /* This subtraction cannot overflow. */
9759 tcg_gen_sub_i32(tmp, tmp, tmp2);
9760 } else {
9761 /* This addition cannot overflow 32 bits;
9762 * however it may overflow considered as a signed
9763 * operation, in which case we must set the Q flag.
9765 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9767 tcg_temp_free_i32(tmp2);
9768 if (rs != 15)
9770 tmp2 = load_reg(s, rs);
9771 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9772 tcg_temp_free_i32(tmp2);
9774 break;
9775 case 3: /* 32 * 16 -> 32msb */
9776 if (op)
9777 tcg_gen_sari_i32(tmp2, tmp2, 16);
9778 else
9779 gen_sxth(tmp2);
9780 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9781 tcg_gen_shri_i64(tmp64, tmp64, 16);
9782 tmp = tcg_temp_new_i32();
9783 tcg_gen_extrl_i64_i32(tmp, tmp64);
9784 tcg_temp_free_i64(tmp64);
9785 if (rs != 15)
9787 tmp2 = load_reg(s, rs);
9788 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9789 tcg_temp_free_i32(tmp2);
9791 break;
9792 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9793 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9794 if (rs != 15) {
9795 tmp = load_reg(s, rs);
9796 if (insn & (1 << 20)) {
9797 tmp64 = gen_addq_msw(tmp64, tmp);
9798 } else {
9799 tmp64 = gen_subq_msw(tmp64, tmp);
9802 if (insn & (1 << 4)) {
9803 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9805 tcg_gen_shri_i64(tmp64, tmp64, 32);
9806 tmp = tcg_temp_new_i32();
9807 tcg_gen_extrl_i64_i32(tmp, tmp64);
9808 tcg_temp_free_i64(tmp64);
9809 break;
9810 case 7: /* Unsigned sum of absolute differences. */
9811 gen_helper_usad8(tmp, tmp, tmp2);
9812 tcg_temp_free_i32(tmp2);
9813 if (rs != 15) {
9814 tmp2 = load_reg(s, rs);
9815 tcg_gen_add_i32(tmp, tmp, tmp2);
9816 tcg_temp_free_i32(tmp2);
9818 break;
9820 store_reg(s, rd, tmp);
9821 break;
9822 case 6: case 7: /* 64-bit multiply, Divide. */
9823 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
9824 tmp = load_reg(s, rn);
9825 tmp2 = load_reg(s, rm);
9826 if ((op & 0x50) == 0x10) {
9827 /* sdiv, udiv */
9828 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9829 goto illegal_op;
9831 if (op & 0x20)
9832 gen_helper_udiv(tmp, tmp, tmp2);
9833 else
9834 gen_helper_sdiv(tmp, tmp, tmp2);
9835 tcg_temp_free_i32(tmp2);
9836 store_reg(s, rd, tmp);
9837 } else if ((op & 0xe) == 0xc) {
9838 /* Dual multiply accumulate long. */
9839 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9840 tcg_temp_free_i32(tmp);
9841 tcg_temp_free_i32(tmp2);
9842 goto illegal_op;
9844 if (op & 1)
9845 gen_swap_half(tmp2);
9846 gen_smul_dual(tmp, tmp2);
9847 if (op & 0x10) {
9848 tcg_gen_sub_i32(tmp, tmp, tmp2);
9849 } else {
9850 tcg_gen_add_i32(tmp, tmp, tmp2);
9852 tcg_temp_free_i32(tmp2);
9853 /* BUGFIX */
9854 tmp64 = tcg_temp_new_i64();
9855 tcg_gen_ext_i32_i64(tmp64, tmp);
9856 tcg_temp_free_i32(tmp);
9857 gen_addq(s, tmp64, rs, rd);
9858 gen_storeq_reg(s, rs, rd, tmp64);
9859 tcg_temp_free_i64(tmp64);
9860 } else {
9861 if (op & 0x20) {
9862 /* Unsigned 64-bit multiply */
9863 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9864 } else {
9865 if (op & 8) {
9866 /* smlalxy */
9867 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9868 tcg_temp_free_i32(tmp2);
9869 tcg_temp_free_i32(tmp);
9870 goto illegal_op;
9872 gen_mulxy(tmp, tmp2, op & 2, op & 1);
9873 tcg_temp_free_i32(tmp2);
9874 tmp64 = tcg_temp_new_i64();
9875 tcg_gen_ext_i32_i64(tmp64, tmp);
9876 tcg_temp_free_i32(tmp);
9877 } else {
9878 /* Signed 64-bit multiply */
9879 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9882 if (op & 4) {
9883 /* umaal */
9884 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9885 tcg_temp_free_i64(tmp64);
9886 goto illegal_op;
9888 gen_addq_lo(s, tmp64, rs);
9889 gen_addq_lo(s, tmp64, rd);
9890 } else if (op & 0x40) {
9891 /* 64-bit accumulate. */
9892 gen_addq(s, tmp64, rs, rd);
9894 gen_storeq_reg(s, rs, rd, tmp64);
9895 tcg_temp_free_i64(tmp64);
9897 break;
9899 break;
9900 case 6: case 7: case 14: case 15:
9901 /* Coprocessor. */
9902 if (((insn >> 24) & 3) == 3) {
9903 /* Translate into the equivalent ARM encoding. */
9904 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9905 if (disas_neon_data_insn(s, insn)) {
9906 goto illegal_op;
9908 } else if (((insn >> 8) & 0xe) == 10) {
9909 if (disas_vfp_insn(s, insn)) {
9910 goto illegal_op;
9912 } else {
9913 if (insn & (1 << 28))
9914 goto illegal_op;
9915 if (disas_coproc_insn(s, insn)) {
9916 goto illegal_op;
9919 break;
9920 case 8: case 9: case 10: case 11:
9921 if (insn & (1 << 15)) {
9922 /* Branches, misc control. */
9923 if (insn & 0x5000) {
9924 /* Unconditional branch. */
9925 /* signextend(hw1[10:0]) -> offset[:12]. */
9926 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9927 /* hw1[10:0] -> offset[11:1]. */
9928 offset |= (insn & 0x7ff) << 1;
9929 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9930 offset[24:22] already have the same value because of the
9931 sign extension above. */
9932 offset ^= ((~insn) & (1 << 13)) << 10;
9933 offset ^= ((~insn) & (1 << 11)) << 11;
9935 if (insn & (1 << 14)) {
9936 /* Branch and link. */
9937 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
9940 offset += s->pc;
9941 if (insn & (1 << 12)) {
9942 /* b/bl */
9943 gen_jmp(s, offset);
9944 } else {
9945 /* blx */
9946 offset &= ~(uint32_t)2;
9947 /* thumb2 bx, no need to check */
9948 gen_bx_im(s, offset);
9950 } else if (((insn >> 23) & 7) == 7) {
9951 /* Misc control */
9952 if (insn & (1 << 13))
9953 goto illegal_op;
9955 if (insn & (1 << 26)) {
9956 if (!(insn & (1 << 20))) {
9957 /* Hypervisor call (v7) */
9958 int imm16 = extract32(insn, 16, 4) << 12
9959 | extract32(insn, 0, 12);
9960 ARCH(7);
9961 if (IS_USER(s)) {
9962 goto illegal_op;
9964 gen_hvc(s, imm16);
9965 } else {
9966 /* Secure monitor call (v6+) */
9967 ARCH(6K);
9968 if (IS_USER(s)) {
9969 goto illegal_op;
9971 gen_smc(s);
9973 } else {
9974 op = (insn >> 20) & 7;
9975 switch (op) {
9976 case 0: /* msr cpsr. */
9977 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9978 tmp = load_reg(s, rn);
9979 addr = tcg_const_i32(insn & 0xff);
9980 gen_helper_v7m_msr(cpu_env, addr, tmp);
9981 tcg_temp_free_i32(addr);
9982 tcg_temp_free_i32(tmp);
9983 gen_lookup_tb(s);
9984 break;
9986 /* fall through */
9987 case 1: /* msr spsr. */
9988 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9989 goto illegal_op;
9991 tmp = load_reg(s, rn);
9992 if (gen_set_psr(s,
9993 msr_mask(s, (insn >> 8) & 0xf, op == 1),
9994 op == 1, tmp))
9995 goto illegal_op;
9996 break;
9997 case 2: /* cps, nop-hint. */
9998 if (((insn >> 8) & 7) == 0) {
9999 gen_nop_hint(s, insn & 0xff);
10001 /* Implemented as NOP in user mode. */
10002 if (IS_USER(s))
10003 break;
10004 offset = 0;
10005 imm = 0;
10006 if (insn & (1 << 10)) {
10007 if (insn & (1 << 7))
10008 offset |= CPSR_A;
10009 if (insn & (1 << 6))
10010 offset |= CPSR_I;
10011 if (insn & (1 << 5))
10012 offset |= CPSR_F;
10013 if (insn & (1 << 9))
10014 imm = CPSR_A | CPSR_I | CPSR_F;
10016 if (insn & (1 << 8)) {
10017 offset |= 0x1f;
10018 imm |= (insn & 0x1f);
10020 if (offset) {
10021 gen_set_psr_im(s, offset, 0, imm);
10023 break;
10024 case 3: /* Special control operations. */
10025 ARCH(7);
10026 op = (insn >> 4) & 0xf;
10027 switch (op) {
10028 case 2: /* clrex */
10029 gen_clrex(s);
10030 break;
10031 case 4: /* dsb */
10032 case 5: /* dmb */
10033 case 6: /* isb */
10034 /* These execute as NOPs. */
10035 break;
10036 default:
10037 goto illegal_op;
10039 break;
10040 case 4: /* bxj */
10041 /* Trivial implementation equivalent to bx. */
10042 tmp = load_reg(s, rn);
10043 gen_bx(s, tmp);
10044 break;
10045 case 5: /* Exception return. */
10046 if (IS_USER(s)) {
10047 goto illegal_op;
10049 if (rn != 14 || rd != 15) {
10050 goto illegal_op;
10052 tmp = load_reg(s, rn);
10053 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10054 gen_exception_return(s, tmp);
10055 break;
10056 case 6: /* mrs cpsr. */
10057 tmp = tcg_temp_new_i32();
10058 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10059 addr = tcg_const_i32(insn & 0xff);
10060 gen_helper_v7m_mrs(tmp, cpu_env, addr);
10061 tcg_temp_free_i32(addr);
10062 } else {
10063 gen_helper_cpsr_read(tmp, cpu_env);
10065 store_reg(s, rd, tmp);
10066 break;
10067 case 7: /* mrs spsr. */
10068 /* Not accessible in user mode. */
10069 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10070 goto illegal_op;
10072 tmp = load_cpu_field(spsr);
10073 store_reg(s, rd, tmp);
10074 break;
10077 } else {
10078 /* Conditional branch. */
10079 op = (insn >> 22) & 0xf;
10080 /* Generate a conditional jump to next instruction. */
10081 s->condlabel = gen_new_label();
10082 arm_gen_test_cc(op ^ 1, s->condlabel);
10083 s->condjmp = 1;
10085 /* offset[11:1] = insn[10:0] */
10086 offset = (insn & 0x7ff) << 1;
10087 /* offset[17:12] = insn[21:16]. */
10088 offset |= (insn & 0x003f0000) >> 4;
10089 /* offset[31:20] = insn[26]. */
10090 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10091 /* offset[18] = insn[13]. */
10092 offset |= (insn & (1 << 13)) << 5;
10093 /* offset[19] = insn[11]. */
10094 offset |= (insn & (1 << 11)) << 8;
10096 /* jump to the offset */
10097 gen_jmp(s, s->pc + offset);
10099 } else {
10100 /* Data processing immediate. */
10101 if (insn & (1 << 25)) {
10102 if (insn & (1 << 24)) {
10103 if (insn & (1 << 20))
10104 goto illegal_op;
10105 /* Bitfield/Saturate. */
10106 op = (insn >> 21) & 7;
10107 imm = insn & 0x1f;
10108 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10109 if (rn == 15) {
10110 tmp = tcg_temp_new_i32();
10111 tcg_gen_movi_i32(tmp, 0);
10112 } else {
10113 tmp = load_reg(s, rn);
10115 switch (op) {
10116 case 2: /* Signed bitfield extract. */
10117 imm++;
10118 if (shift + imm > 32)
10119 goto illegal_op;
10120 if (imm < 32)
10121 gen_sbfx(tmp, shift, imm);
10122 break;
10123 case 6: /* Unsigned bitfield extract. */
10124 imm++;
10125 if (shift + imm > 32)
10126 goto illegal_op;
10127 if (imm < 32)
10128 gen_ubfx(tmp, shift, (1u << imm) - 1);
10129 break;
10130 case 3: /* Bitfield insert/clear. */
10131 if (imm < shift)
10132 goto illegal_op;
10133 imm = imm + 1 - shift;
10134 if (imm != 32) {
10135 tmp2 = load_reg(s, rd);
10136 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
10137 tcg_temp_free_i32(tmp2);
10139 break;
10140 case 7:
10141 goto illegal_op;
10142 default: /* Saturate. */
10143 if (shift) {
10144 if (op & 1)
10145 tcg_gen_sari_i32(tmp, tmp, shift);
10146 else
10147 tcg_gen_shli_i32(tmp, tmp, shift);
10149 tmp2 = tcg_const_i32(imm);
10150 if (op & 4) {
10151 /* Unsigned. */
10152 if ((op & 1) && shift == 0) {
10153 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10154 tcg_temp_free_i32(tmp);
10155 tcg_temp_free_i32(tmp2);
10156 goto illegal_op;
10158 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10159 } else {
10160 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10162 } else {
10163 /* Signed. */
10164 if ((op & 1) && shift == 0) {
10165 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10166 tcg_temp_free_i32(tmp);
10167 tcg_temp_free_i32(tmp2);
10168 goto illegal_op;
10170 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10171 } else {
10172 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10175 tcg_temp_free_i32(tmp2);
10176 break;
10178 store_reg(s, rd, tmp);
10179 } else {
10180 imm = ((insn & 0x04000000) >> 15)
10181 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10182 if (insn & (1 << 22)) {
10183 /* 16-bit immediate. */
10184 imm |= (insn >> 4) & 0xf000;
10185 if (insn & (1 << 23)) {
10186 /* movt */
10187 tmp = load_reg(s, rd);
10188 tcg_gen_ext16u_i32(tmp, tmp);
10189 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10190 } else {
10191 /* movw */
10192 tmp = tcg_temp_new_i32();
10193 tcg_gen_movi_i32(tmp, imm);
10195 } else {
10196 /* Add/sub 12-bit immediate. */
10197 if (rn == 15) {
10198 offset = s->pc & ~(uint32_t)3;
10199 if (insn & (1 << 23))
10200 offset -= imm;
10201 else
10202 offset += imm;
10203 tmp = tcg_temp_new_i32();
10204 tcg_gen_movi_i32(tmp, offset);
10205 } else {
10206 tmp = load_reg(s, rn);
10207 if (insn & (1 << 23))
10208 tcg_gen_subi_i32(tmp, tmp, imm);
10209 else
10210 tcg_gen_addi_i32(tmp, tmp, imm);
10213 store_reg(s, rd, tmp);
10215 } else {
10216 int shifter_out = 0;
10217 /* modified 12-bit immediate. */
10218 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10219 imm = (insn & 0xff);
10220 switch (shift) {
10221 case 0: /* XY */
10222 /* Nothing to do. */
10223 break;
10224 case 1: /* 00XY00XY */
10225 imm |= imm << 16;
10226 break;
10227 case 2: /* XY00XY00 */
10228 imm |= imm << 16;
10229 imm <<= 8;
10230 break;
10231 case 3: /* XYXYXYXY */
10232 imm |= imm << 16;
10233 imm |= imm << 8;
10234 break;
10235 default: /* Rotated constant. */
10236 shift = (shift << 1) | (imm >> 7);
10237 imm |= 0x80;
10238 imm = imm << (32 - shift);
10239 shifter_out = 1;
10240 break;
10242 tmp2 = tcg_temp_new_i32();
10243 tcg_gen_movi_i32(tmp2, imm);
10244 rn = (insn >> 16) & 0xf;
10245 if (rn == 15) {
10246 tmp = tcg_temp_new_i32();
10247 tcg_gen_movi_i32(tmp, 0);
10248 } else {
10249 tmp = load_reg(s, rn);
10251 op = (insn >> 21) & 0xf;
10252 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10253 shifter_out, tmp, tmp2))
10254 goto illegal_op;
10255 tcg_temp_free_i32(tmp2);
10256 rd = (insn >> 8) & 0xf;
10257 if (rd != 15) {
10258 store_reg(s, rd, tmp);
10259 } else {
10260 tcg_temp_free_i32(tmp);
10264 break;
10265 case 12: /* Load/store single data item. */
10267 int postinc = 0;
10268 int writeback = 0;
10269 int memidx;
10270 if ((insn & 0x01100000) == 0x01000000) {
10271 if (disas_neon_ls_insn(s, insn)) {
10272 goto illegal_op;
10274 break;
10276 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10277 if (rs == 15) {
10278 if (!(insn & (1 << 20))) {
10279 goto illegal_op;
10281 if (op != 2) {
10282 /* Byte or halfword load space with dest == r15 : memory hints.
10283 * Catch them early so we don't emit pointless addressing code.
10284 * This space is a mix of:
10285 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10286 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10287 * cores)
10288 * unallocated hints, which must be treated as NOPs
10289 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10290 * which is easiest for the decoding logic
10291 * Some space which must UNDEF
10293 int op1 = (insn >> 23) & 3;
10294 int op2 = (insn >> 6) & 0x3f;
10295 if (op & 2) {
10296 goto illegal_op;
10298 if (rn == 15) {
10299 /* UNPREDICTABLE, unallocated hint or
10300 * PLD/PLDW/PLI (literal)
10302 return 0;
10304 if (op1 & 1) {
10305 return 0; /* PLD/PLDW/PLI or unallocated hint */
10307 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10308 return 0; /* PLD/PLDW/PLI or unallocated hint */
10310 /* UNDEF space, or an UNPREDICTABLE */
10311 return 1;
10314 memidx = get_mem_index(s);
10315 if (rn == 15) {
10316 addr = tcg_temp_new_i32();
10317 /* PC relative. */
10318 /* s->pc has already been incremented by 4. */
10319 imm = s->pc & 0xfffffffc;
10320 if (insn & (1 << 23))
10321 imm += insn & 0xfff;
10322 else
10323 imm -= insn & 0xfff;
10324 tcg_gen_movi_i32(addr, imm);
10325 } else {
10326 addr = load_reg(s, rn);
10327 if (insn & (1 << 23)) {
10328 /* Positive offset. */
10329 imm = insn & 0xfff;
10330 tcg_gen_addi_i32(addr, addr, imm);
10331 } else {
10332 imm = insn & 0xff;
10333 switch ((insn >> 8) & 0xf) {
10334 case 0x0: /* Shifted Register. */
10335 shift = (insn >> 4) & 0xf;
10336 if (shift > 3) {
10337 tcg_temp_free_i32(addr);
10338 goto illegal_op;
10340 tmp = load_reg(s, rm);
10341 if (shift)
10342 tcg_gen_shli_i32(tmp, tmp, shift);
10343 tcg_gen_add_i32(addr, addr, tmp);
10344 tcg_temp_free_i32(tmp);
10345 break;
10346 case 0xc: /* Negative offset. */
10347 tcg_gen_addi_i32(addr, addr, -imm);
10348 break;
10349 case 0xe: /* User privilege. */
10350 tcg_gen_addi_i32(addr, addr, imm);
10351 memidx = get_a32_user_mem_index(s);
10352 break;
10353 case 0x9: /* Post-decrement. */
10354 imm = -imm;
10355 /* Fall through. */
10356 case 0xb: /* Post-increment. */
10357 postinc = 1;
10358 writeback = 1;
10359 break;
10360 case 0xd: /* Pre-decrement. */
10361 imm = -imm;
10362 /* Fall through. */
10363 case 0xf: /* Pre-increment. */
10364 tcg_gen_addi_i32(addr, addr, imm);
10365 writeback = 1;
10366 break;
10367 default:
10368 tcg_temp_free_i32(addr);
10369 goto illegal_op;
10373 if (insn & (1 << 20)) {
10374 /* Load. */
10375 tmp = tcg_temp_new_i32();
10376 switch (op) {
10377 case 0:
10378 gen_aa32_ld8u(tmp, addr, memidx);
10379 break;
10380 case 4:
10381 gen_aa32_ld8s(tmp, addr, memidx);
10382 break;
10383 case 1:
10384 gen_aa32_ld16u(tmp, addr, memidx);
10385 break;
10386 case 5:
10387 gen_aa32_ld16s(tmp, addr, memidx);
10388 break;
10389 case 2:
10390 gen_aa32_ld32u(tmp, addr, memidx);
10391 break;
10392 default:
10393 tcg_temp_free_i32(tmp);
10394 tcg_temp_free_i32(addr);
10395 goto illegal_op;
10397 if (rs == 15) {
10398 gen_bx(s, tmp);
10399 } else {
10400 store_reg(s, rs, tmp);
10402 } else {
10403 /* Store. */
10404 tmp = load_reg(s, rs);
10405 switch (op) {
10406 case 0:
10407 gen_aa32_st8(tmp, addr, memidx);
10408 break;
10409 case 1:
10410 gen_aa32_st16(tmp, addr, memidx);
10411 break;
10412 case 2:
10413 gen_aa32_st32(tmp, addr, memidx);
10414 break;
10415 default:
10416 tcg_temp_free_i32(tmp);
10417 tcg_temp_free_i32(addr);
10418 goto illegal_op;
10420 tcg_temp_free_i32(tmp);
10422 if (postinc)
10423 tcg_gen_addi_i32(addr, addr, imm);
10424 if (writeback) {
10425 store_reg(s, rn, addr);
10426 } else {
10427 tcg_temp_free_i32(addr);
10430 break;
10431 default:
10432 goto illegal_op;
10434 return 0;
10435 illegal_op:
10436 return 1;
10439 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
10441 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10442 int32_t offset;
10443 int i;
10444 TCGv_i32 tmp;
10445 TCGv_i32 tmp2;
10446 TCGv_i32 addr;
10448 if (s->condexec_mask) {
10449 cond = s->condexec_cond;
10450 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10451 s->condlabel = gen_new_label();
10452 arm_gen_test_cc(cond ^ 1, s->condlabel);
10453 s->condjmp = 1;
10457 insn = arm_lduw_code(env, s->pc, s->bswap_code);
10458 s->pc += 2;
10460 switch (insn >> 12) {
10461 case 0: case 1:
10463 rd = insn & 7;
10464 op = (insn >> 11) & 3;
10465 if (op == 3) {
10466 /* add/subtract */
10467 rn = (insn >> 3) & 7;
10468 tmp = load_reg(s, rn);
10469 if (insn & (1 << 10)) {
10470 /* immediate */
10471 tmp2 = tcg_temp_new_i32();
10472 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
10473 } else {
10474 /* reg */
10475 rm = (insn >> 6) & 7;
10476 tmp2 = load_reg(s, rm);
10478 if (insn & (1 << 9)) {
10479 if (s->condexec_mask)
10480 tcg_gen_sub_i32(tmp, tmp, tmp2);
10481 else
10482 gen_sub_CC(tmp, tmp, tmp2);
10483 } else {
10484 if (s->condexec_mask)
10485 tcg_gen_add_i32(tmp, tmp, tmp2);
10486 else
10487 gen_add_CC(tmp, tmp, tmp2);
10489 tcg_temp_free_i32(tmp2);
10490 store_reg(s, rd, tmp);
10491 } else {
10492 /* shift immediate */
10493 rm = (insn >> 3) & 7;
10494 shift = (insn >> 6) & 0x1f;
10495 tmp = load_reg(s, rm);
10496 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10497 if (!s->condexec_mask)
10498 gen_logic_CC(tmp);
10499 store_reg(s, rd, tmp);
10501 break;
10502 case 2: case 3:
10503 /* arithmetic large immediate */
10504 op = (insn >> 11) & 3;
10505 rd = (insn >> 8) & 0x7;
10506 if (op == 0) { /* mov */
10507 tmp = tcg_temp_new_i32();
10508 tcg_gen_movi_i32(tmp, insn & 0xff);
10509 if (!s->condexec_mask)
10510 gen_logic_CC(tmp);
10511 store_reg(s, rd, tmp);
10512 } else {
10513 tmp = load_reg(s, rd);
10514 tmp2 = tcg_temp_new_i32();
10515 tcg_gen_movi_i32(tmp2, insn & 0xff);
10516 switch (op) {
10517 case 1: /* cmp */
10518 gen_sub_CC(tmp, tmp, tmp2);
10519 tcg_temp_free_i32(tmp);
10520 tcg_temp_free_i32(tmp2);
10521 break;
10522 case 2: /* add */
10523 if (s->condexec_mask)
10524 tcg_gen_add_i32(tmp, tmp, tmp2);
10525 else
10526 gen_add_CC(tmp, tmp, tmp2);
10527 tcg_temp_free_i32(tmp2);
10528 store_reg(s, rd, tmp);
10529 break;
10530 case 3: /* sub */
10531 if (s->condexec_mask)
10532 tcg_gen_sub_i32(tmp, tmp, tmp2);
10533 else
10534 gen_sub_CC(tmp, tmp, tmp2);
10535 tcg_temp_free_i32(tmp2);
10536 store_reg(s, rd, tmp);
10537 break;
10540 break;
10541 case 4:
10542 if (insn & (1 << 11)) {
10543 rd = (insn >> 8) & 7;
10544 /* load pc-relative. Bit 1 of PC is ignored. */
10545 val = s->pc + 2 + ((insn & 0xff) * 4);
10546 val &= ~(uint32_t)2;
10547 addr = tcg_temp_new_i32();
10548 tcg_gen_movi_i32(addr, val);
10549 tmp = tcg_temp_new_i32();
10550 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10551 tcg_temp_free_i32(addr);
10552 store_reg(s, rd, tmp);
10553 break;
10555 if (insn & (1 << 10)) {
10556 /* data processing extended or blx */
10557 rd = (insn & 7) | ((insn >> 4) & 8);
10558 rm = (insn >> 3) & 0xf;
10559 op = (insn >> 8) & 3;
10560 switch (op) {
10561 case 0: /* add */
10562 tmp = load_reg(s, rd);
10563 tmp2 = load_reg(s, rm);
10564 tcg_gen_add_i32(tmp, tmp, tmp2);
10565 tcg_temp_free_i32(tmp2);
10566 store_reg(s, rd, tmp);
10567 break;
10568 case 1: /* cmp */
10569 tmp = load_reg(s, rd);
10570 tmp2 = load_reg(s, rm);
10571 gen_sub_CC(tmp, tmp, tmp2);
10572 tcg_temp_free_i32(tmp2);
10573 tcg_temp_free_i32(tmp);
10574 break;
10575 case 2: /* mov/cpy */
10576 tmp = load_reg(s, rm);
10577 store_reg(s, rd, tmp);
10578 break;
10579 case 3:/* branch [and link] exchange thumb register */
10580 tmp = load_reg(s, rm);
10581 if (insn & (1 << 7)) {
10582 ARCH(5);
10583 val = (uint32_t)s->pc | 1;
10584 tmp2 = tcg_temp_new_i32();
10585 tcg_gen_movi_i32(tmp2, val);
10586 store_reg(s, 14, tmp2);
10588 /* already thumb, no need to check */
10589 gen_bx(s, tmp);
10590 break;
10592 break;
10595 /* data processing register */
10596 rd = insn & 7;
10597 rm = (insn >> 3) & 7;
10598 op = (insn >> 6) & 0xf;
10599 if (op == 2 || op == 3 || op == 4 || op == 7) {
10600 /* the shift/rotate ops want the operands backwards */
10601 val = rm;
10602 rm = rd;
10603 rd = val;
10604 val = 1;
10605 } else {
10606 val = 0;
10609 if (op == 9) { /* neg */
10610 tmp = tcg_temp_new_i32();
10611 tcg_gen_movi_i32(tmp, 0);
10612 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10613 tmp = load_reg(s, rd);
10614 } else {
10615 TCGV_UNUSED_I32(tmp);
10618 tmp2 = load_reg(s, rm);
10619 switch (op) {
10620 case 0x0: /* and */
10621 tcg_gen_and_i32(tmp, tmp, tmp2);
10622 if (!s->condexec_mask)
10623 gen_logic_CC(tmp);
10624 break;
10625 case 0x1: /* eor */
10626 tcg_gen_xor_i32(tmp, tmp, tmp2);
10627 if (!s->condexec_mask)
10628 gen_logic_CC(tmp);
10629 break;
10630 case 0x2: /* lsl */
10631 if (s->condexec_mask) {
10632 gen_shl(tmp2, tmp2, tmp);
10633 } else {
10634 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
10635 gen_logic_CC(tmp2);
10637 break;
10638 case 0x3: /* lsr */
10639 if (s->condexec_mask) {
10640 gen_shr(tmp2, tmp2, tmp);
10641 } else {
10642 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
10643 gen_logic_CC(tmp2);
10645 break;
10646 case 0x4: /* asr */
10647 if (s->condexec_mask) {
10648 gen_sar(tmp2, tmp2, tmp);
10649 } else {
10650 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
10651 gen_logic_CC(tmp2);
10653 break;
10654 case 0x5: /* adc */
10655 if (s->condexec_mask) {
10656 gen_adc(tmp, tmp2);
10657 } else {
10658 gen_adc_CC(tmp, tmp, tmp2);
10660 break;
10661 case 0x6: /* sbc */
10662 if (s->condexec_mask) {
10663 gen_sub_carry(tmp, tmp, tmp2);
10664 } else {
10665 gen_sbc_CC(tmp, tmp, tmp2);
10667 break;
10668 case 0x7: /* ror */
10669 if (s->condexec_mask) {
10670 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10671 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
10672 } else {
10673 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
10674 gen_logic_CC(tmp2);
10676 break;
10677 case 0x8: /* tst */
10678 tcg_gen_and_i32(tmp, tmp, tmp2);
10679 gen_logic_CC(tmp);
10680 rd = 16;
10681 break;
10682 case 0x9: /* neg */
10683 if (s->condexec_mask)
10684 tcg_gen_neg_i32(tmp, tmp2);
10685 else
10686 gen_sub_CC(tmp, tmp, tmp2);
10687 break;
10688 case 0xa: /* cmp */
10689 gen_sub_CC(tmp, tmp, tmp2);
10690 rd = 16;
10691 break;
10692 case 0xb: /* cmn */
10693 gen_add_CC(tmp, tmp, tmp2);
10694 rd = 16;
10695 break;
10696 case 0xc: /* orr */
10697 tcg_gen_or_i32(tmp, tmp, tmp2);
10698 if (!s->condexec_mask)
10699 gen_logic_CC(tmp);
10700 break;
10701 case 0xd: /* mul */
10702 tcg_gen_mul_i32(tmp, tmp, tmp2);
10703 if (!s->condexec_mask)
10704 gen_logic_CC(tmp);
10705 break;
10706 case 0xe: /* bic */
10707 tcg_gen_andc_i32(tmp, tmp, tmp2);
10708 if (!s->condexec_mask)
10709 gen_logic_CC(tmp);
10710 break;
10711 case 0xf: /* mvn */
10712 tcg_gen_not_i32(tmp2, tmp2);
10713 if (!s->condexec_mask)
10714 gen_logic_CC(tmp2);
10715 val = 1;
10716 rm = rd;
10717 break;
10719 if (rd != 16) {
10720 if (val) {
10721 store_reg(s, rm, tmp2);
10722 if (op != 0xf)
10723 tcg_temp_free_i32(tmp);
10724 } else {
10725 store_reg(s, rd, tmp);
10726 tcg_temp_free_i32(tmp2);
10728 } else {
10729 tcg_temp_free_i32(tmp);
10730 tcg_temp_free_i32(tmp2);
10732 break;
10734 case 5:
10735 /* load/store register offset. */
10736 rd = insn & 7;
10737 rn = (insn >> 3) & 7;
10738 rm = (insn >> 6) & 7;
10739 op = (insn >> 9) & 7;
10740 addr = load_reg(s, rn);
10741 tmp = load_reg(s, rm);
10742 tcg_gen_add_i32(addr, addr, tmp);
10743 tcg_temp_free_i32(tmp);
10745 if (op < 3) { /* store */
10746 tmp = load_reg(s, rd);
10747 } else {
10748 tmp = tcg_temp_new_i32();
10751 switch (op) {
10752 case 0: /* str */
10753 gen_aa32_st32(tmp, addr, get_mem_index(s));
10754 break;
10755 case 1: /* strh */
10756 gen_aa32_st16(tmp, addr, get_mem_index(s));
10757 break;
10758 case 2: /* strb */
10759 gen_aa32_st8(tmp, addr, get_mem_index(s));
10760 break;
10761 case 3: /* ldrsb */
10762 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
10763 break;
10764 case 4: /* ldr */
10765 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10766 break;
10767 case 5: /* ldrh */
10768 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
10769 break;
10770 case 6: /* ldrb */
10771 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
10772 break;
10773 case 7: /* ldrsh */
10774 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
10775 break;
10777 if (op >= 3) { /* load */
10778 store_reg(s, rd, tmp);
10779 } else {
10780 tcg_temp_free_i32(tmp);
10782 tcg_temp_free_i32(addr);
10783 break;
10785 case 6:
10786 /* load/store word immediate offset */
10787 rd = insn & 7;
10788 rn = (insn >> 3) & 7;
10789 addr = load_reg(s, rn);
10790 val = (insn >> 4) & 0x7c;
10791 tcg_gen_addi_i32(addr, addr, val);
10793 if (insn & (1 << 11)) {
10794 /* load */
10795 tmp = tcg_temp_new_i32();
10796 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10797 store_reg(s, rd, tmp);
10798 } else {
10799 /* store */
10800 tmp = load_reg(s, rd);
10801 gen_aa32_st32(tmp, addr, get_mem_index(s));
10802 tcg_temp_free_i32(tmp);
10804 tcg_temp_free_i32(addr);
10805 break;
10807 case 7:
10808 /* load/store byte immediate offset */
10809 rd = insn & 7;
10810 rn = (insn >> 3) & 7;
10811 addr = load_reg(s, rn);
10812 val = (insn >> 6) & 0x1f;
10813 tcg_gen_addi_i32(addr, addr, val);
10815 if (insn & (1 << 11)) {
10816 /* load */
10817 tmp = tcg_temp_new_i32();
10818 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
10819 store_reg(s, rd, tmp);
10820 } else {
10821 /* store */
10822 tmp = load_reg(s, rd);
10823 gen_aa32_st8(tmp, addr, get_mem_index(s));
10824 tcg_temp_free_i32(tmp);
10826 tcg_temp_free_i32(addr);
10827 break;
10829 case 8:
10830 /* load/store halfword immediate offset */
10831 rd = insn & 7;
10832 rn = (insn >> 3) & 7;
10833 addr = load_reg(s, rn);
10834 val = (insn >> 5) & 0x3e;
10835 tcg_gen_addi_i32(addr, addr, val);
10837 if (insn & (1 << 11)) {
10838 /* load */
10839 tmp = tcg_temp_new_i32();
10840 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
10841 store_reg(s, rd, tmp);
10842 } else {
10843 /* store */
10844 tmp = load_reg(s, rd);
10845 gen_aa32_st16(tmp, addr, get_mem_index(s));
10846 tcg_temp_free_i32(tmp);
10848 tcg_temp_free_i32(addr);
10849 break;
10851 case 9:
10852 /* load/store from stack */
10853 rd = (insn >> 8) & 7;
10854 addr = load_reg(s, 13);
10855 val = (insn & 0xff) * 4;
10856 tcg_gen_addi_i32(addr, addr, val);
10858 if (insn & (1 << 11)) {
10859 /* load */
10860 tmp = tcg_temp_new_i32();
10861 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10862 store_reg(s, rd, tmp);
10863 } else {
10864 /* store */
10865 tmp = load_reg(s, rd);
10866 gen_aa32_st32(tmp, addr, get_mem_index(s));
10867 tcg_temp_free_i32(tmp);
10869 tcg_temp_free_i32(addr);
10870 break;
10872 case 10:
10873 /* add to high reg */
10874 rd = (insn >> 8) & 7;
10875 if (insn & (1 << 11)) {
10876 /* SP */
10877 tmp = load_reg(s, 13);
10878 } else {
10879 /* PC. bit 1 is ignored. */
10880 tmp = tcg_temp_new_i32();
10881 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
10883 val = (insn & 0xff) * 4;
10884 tcg_gen_addi_i32(tmp, tmp, val);
10885 store_reg(s, rd, tmp);
10886 break;
10888 case 11:
10889 /* misc */
10890 op = (insn >> 8) & 0xf;
10891 switch (op) {
10892 case 0:
10893 /* adjust stack pointer */
10894 tmp = load_reg(s, 13);
10895 val = (insn & 0x7f) * 4;
10896 if (insn & (1 << 7))
10897 val = -(int32_t)val;
10898 tcg_gen_addi_i32(tmp, tmp, val);
10899 store_reg(s, 13, tmp);
10900 break;
10902 case 2: /* sign/zero extend. */
10903 ARCH(6);
10904 rd = insn & 7;
10905 rm = (insn >> 3) & 7;
10906 tmp = load_reg(s, rm);
10907 switch ((insn >> 6) & 3) {
10908 case 0: gen_sxth(tmp); break;
10909 case 1: gen_sxtb(tmp); break;
10910 case 2: gen_uxth(tmp); break;
10911 case 3: gen_uxtb(tmp); break;
10913 store_reg(s, rd, tmp);
10914 break;
10915 case 4: case 5: case 0xc: case 0xd:
10916 /* push/pop */
10917 addr = load_reg(s, 13);
10918 if (insn & (1 << 8))
10919 offset = 4;
10920 else
10921 offset = 0;
10922 for (i = 0; i < 8; i++) {
10923 if (insn & (1 << i))
10924 offset += 4;
10926 if ((insn & (1 << 11)) == 0) {
10927 tcg_gen_addi_i32(addr, addr, -offset);
10929 for (i = 0; i < 8; i++) {
10930 if (insn & (1 << i)) {
10931 if (insn & (1 << 11)) {
10932 /* pop */
10933 tmp = tcg_temp_new_i32();
10934 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10935 store_reg(s, i, tmp);
10936 } else {
10937 /* push */
10938 tmp = load_reg(s, i);
10939 gen_aa32_st32(tmp, addr, get_mem_index(s));
10940 tcg_temp_free_i32(tmp);
10942 /* advance to the next address. */
10943 tcg_gen_addi_i32(addr, addr, 4);
10946 TCGV_UNUSED_I32(tmp);
10947 if (insn & (1 << 8)) {
10948 if (insn & (1 << 11)) {
10949 /* pop pc */
10950 tmp = tcg_temp_new_i32();
10951 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10952 /* don't set the pc until the rest of the instruction
10953 has completed */
10954 } else {
10955 /* push lr */
10956 tmp = load_reg(s, 14);
10957 gen_aa32_st32(tmp, addr, get_mem_index(s));
10958 tcg_temp_free_i32(tmp);
10960 tcg_gen_addi_i32(addr, addr, 4);
10962 if ((insn & (1 << 11)) == 0) {
10963 tcg_gen_addi_i32(addr, addr, -offset);
10965 /* write back the new stack pointer */
10966 store_reg(s, 13, addr);
10967 /* set the new PC value */
10968 if ((insn & 0x0900) == 0x0900) {
10969 store_reg_from_load(s, 15, tmp);
10971 break;
10973 case 1: case 3: case 9: case 11: /* czb */
10974 rm = insn & 7;
10975 tmp = load_reg(s, rm);
10976 s->condlabel = gen_new_label();
10977 s->condjmp = 1;
10978 if (insn & (1 << 11))
10979 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
10980 else
10981 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
10982 tcg_temp_free_i32(tmp);
10983 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10984 val = (uint32_t)s->pc + 2;
10985 val += offset;
10986 gen_jmp(s, val);
10987 break;
10989 case 15: /* IT, nop-hint. */
10990 if ((insn & 0xf) == 0) {
10991 gen_nop_hint(s, (insn >> 4) & 0xf);
10992 break;
10994 /* If Then. */
10995 s->condexec_cond = (insn >> 4) & 0xe;
10996 s->condexec_mask = insn & 0x1f;
10997 /* No actual code generated for this insn, just setup state. */
10998 break;
11000 case 0xe: /* bkpt */
11002 int imm8 = extract32(insn, 0, 8);
11003 ARCH(5);
11004 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11005 default_exception_el(s));
11006 break;
11009 case 0xa: /* rev */
11010 ARCH(6);
11011 rn = (insn >> 3) & 0x7;
11012 rd = insn & 0x7;
11013 tmp = load_reg(s, rn);
11014 switch ((insn >> 6) & 3) {
11015 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
11016 case 1: gen_rev16(tmp); break;
11017 case 3: gen_revsh(tmp); break;
11018 default: goto illegal_op;
11020 store_reg(s, rd, tmp);
11021 break;
11023 case 6:
11024 switch ((insn >> 5) & 7) {
11025 case 2:
11026 /* setend */
11027 ARCH(6);
11028 if (((insn >> 3) & 1) != s->bswap_code) {
11029 /* Dynamic endianness switching not implemented. */
11030 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
11031 goto illegal_op;
11033 break;
11034 case 3:
11035 /* cps */
11036 ARCH(6);
11037 if (IS_USER(s)) {
11038 break;
11040 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11041 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11042 /* FAULTMASK */
11043 if (insn & 1) {
11044 addr = tcg_const_i32(19);
11045 gen_helper_v7m_msr(cpu_env, addr, tmp);
11046 tcg_temp_free_i32(addr);
11048 /* PRIMASK */
11049 if (insn & 2) {
11050 addr = tcg_const_i32(16);
11051 gen_helper_v7m_msr(cpu_env, addr, tmp);
11052 tcg_temp_free_i32(addr);
11054 tcg_temp_free_i32(tmp);
11055 gen_lookup_tb(s);
11056 } else {
11057 if (insn & (1 << 4)) {
11058 shift = CPSR_A | CPSR_I | CPSR_F;
11059 } else {
11060 shift = 0;
11062 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
11064 break;
11065 default:
11066 goto undef;
11068 break;
11070 default:
11071 goto undef;
11073 break;
11075 case 12:
11077 /* load/store multiple */
11078 TCGv_i32 loaded_var;
11079 TCGV_UNUSED_I32(loaded_var);
11080 rn = (insn >> 8) & 0x7;
11081 addr = load_reg(s, rn);
11082 for (i = 0; i < 8; i++) {
11083 if (insn & (1 << i)) {
11084 if (insn & (1 << 11)) {
11085 /* load */
11086 tmp = tcg_temp_new_i32();
11087 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
11088 if (i == rn) {
11089 loaded_var = tmp;
11090 } else {
11091 store_reg(s, i, tmp);
11093 } else {
11094 /* store */
11095 tmp = load_reg(s, i);
11096 gen_aa32_st32(tmp, addr, get_mem_index(s));
11097 tcg_temp_free_i32(tmp);
11099 /* advance to the next address */
11100 tcg_gen_addi_i32(addr, addr, 4);
11103 if ((insn & (1 << rn)) == 0) {
11104 /* base reg not in list: base register writeback */
11105 store_reg(s, rn, addr);
11106 } else {
11107 /* base reg in list: if load, complete it now */
11108 if (insn & (1 << 11)) {
11109 store_reg(s, rn, loaded_var);
11111 tcg_temp_free_i32(addr);
11113 break;
11115 case 13:
11116 /* conditional branch or swi */
11117 cond = (insn >> 8) & 0xf;
11118 if (cond == 0xe)
11119 goto undef;
11121 if (cond == 0xf) {
11122 /* swi */
11123 gen_set_pc_im(s, s->pc);
11124 s->svc_imm = extract32(insn, 0, 8);
11125 s->is_jmp = DISAS_SWI;
11126 break;
11128 /* generate a conditional jump to next instruction */
11129 s->condlabel = gen_new_label();
11130 arm_gen_test_cc(cond ^ 1, s->condlabel);
11131 s->condjmp = 1;
11133 /* jump to the offset */
11134 val = (uint32_t)s->pc + 2;
11135 offset = ((int32_t)insn << 24) >> 24;
11136 val += offset << 1;
11137 gen_jmp(s, val);
11138 break;
11140 case 14:
11141 if (insn & (1 << 11)) {
11142 if (disas_thumb2_insn(env, s, insn))
11143 goto undef32;
11144 break;
11146 /* unconditional branch */
11147 val = (uint32_t)s->pc;
11148 offset = ((int32_t)insn << 21) >> 21;
11149 val += (offset << 1) + 2;
11150 gen_jmp(s, val);
11151 break;
11153 case 15:
11154 if (disas_thumb2_insn(env, s, insn))
11155 goto undef32;
11156 break;
11158 return;
11159 undef32:
11160 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11161 default_exception_el(s));
11162 return;
11163 illegal_op:
11164 undef:
11165 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11166 default_exception_el(s));
11169 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
11170 basic block 'tb'. */
11171 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
11173 ARMCPU *cpu = arm_env_get_cpu(env);
11174 CPUState *cs = CPU(cpu);
11175 DisasContext dc1, *dc = &dc1;
11176 target_ulong pc_start;
11177 target_ulong next_page_start;
11178 int num_insns;
11179 int max_insns;
11181 /* generate intermediate code */
11183 /* The A64 decoder has its own top level loop, because it doesn't need
11184 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11186 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
11187 gen_intermediate_code_a64(cpu, tb);
11188 return;
11191 pc_start = tb->pc;
11193 dc->tb = tb;
11195 dc->is_jmp = DISAS_NEXT;
11196 dc->pc = pc_start;
11197 dc->singlestep_enabled = cs->singlestep_enabled;
11198 dc->condjmp = 0;
11200 dc->aarch64 = 0;
11201 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11202 * there is no secure EL1, so we route exceptions to EL3.
11204 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11205 !arm_el_is_aa64(env, 3);
11206 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
11207 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
11208 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11209 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
11210 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11211 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11212 #if !defined(CONFIG_USER_ONLY)
11213 dc->user = (dc->current_el == 0);
11214 #endif
11215 dc->ns = ARM_TBFLAG_NS(tb->flags);
11216 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
11217 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11218 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11219 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
11220 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
11221 dc->cp_regs = cpu->cp_regs;
11222 dc->features = env->features;
11224 /* Single step state. The code-generation logic here is:
11225 * SS_ACTIVE == 0:
11226 * generate code with no special handling for single-stepping (except
11227 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11228 * this happens anyway because those changes are all system register or
11229 * PSTATE writes).
11230 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11231 * emit code for one insn
11232 * emit code to clear PSTATE.SS
11233 * emit code to generate software step exception for completed step
11234 * end TB (as usual for having generated an exception)
11235 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11236 * emit code to generate a software step exception
11237 * end the TB
11239 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11240 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11241 dc->is_ldex = false;
11242 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11244 cpu_F0s = tcg_temp_new_i32();
11245 cpu_F1s = tcg_temp_new_i32();
11246 cpu_F0d = tcg_temp_new_i64();
11247 cpu_F1d = tcg_temp_new_i64();
11248 cpu_V0 = cpu_F0d;
11249 cpu_V1 = cpu_F1d;
11250 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11251 cpu_M0 = tcg_temp_new_i64();
11252 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
11253 num_insns = 0;
11254 max_insns = tb->cflags & CF_COUNT_MASK;
11255 if (max_insns == 0) {
11256 max_insns = CF_COUNT_MASK;
11258 if (max_insns > TCG_MAX_INSNS) {
11259 max_insns = TCG_MAX_INSNS;
11262 gen_tb_start(tb);
11264 tcg_clear_temp_count();
11266 /* A note on handling of the condexec (IT) bits:
11268 * We want to avoid the overhead of having to write the updated condexec
11269 * bits back to the CPUARMState for every instruction in an IT block. So:
11270 * (1) if the condexec bits are not already zero then we write
11271 * zero back into the CPUARMState now. This avoids complications trying
11272 * to do it at the end of the block. (For example if we don't do this
11273 * it's hard to identify whether we can safely skip writing condexec
11274 * at the end of the TB, which we definitely want to do for the case
11275 * where a TB doesn't do anything with the IT state at all.)
11276 * (2) if we are going to leave the TB then we call gen_set_condexec()
11277 * which will write the correct value into CPUARMState if zero is wrong.
11278 * This is done both for leaving the TB at the end, and for leaving
11279 * it because of an exception we know will happen, which is done in
11280 * gen_exception_insn(). The latter is necessary because we need to
11281 * leave the TB with the PC/IT state just prior to execution of the
11282 * instruction which caused the exception.
11283 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11284 * then the CPUARMState will be wrong and we need to reset it.
11285 * This is handled in the same way as restoration of the
11286 * PC in these situations; we save the value of the condexec bits
11287 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11288 * then uses this to restore them after an exception.
11290 * Note that there are no instructions which can read the condexec
11291 * bits, and none which can write non-static values to them, so
11292 * we don't need to care about whether CPUARMState is correct in the
11293 * middle of a TB.
11296 /* Reset the conditional execution bits immediately. This avoids
11297 complications trying to do it at the end of the block. */
11298 if (dc->condexec_mask || dc->condexec_cond)
11300 TCGv_i32 tmp = tcg_temp_new_i32();
11301 tcg_gen_movi_i32(tmp, 0);
11302 store_cpu_field(tmp, condexec_bits);
11304 do {
11305 tcg_gen_insn_start(dc->pc,
11306 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
11307 num_insns++;
11309 #ifdef CONFIG_USER_ONLY
11310 /* Intercept jump to the magic kernel page. */
11311 if (dc->pc >= 0xffff0000) {
11312 /* We always get here via a jump, so know we are not in a
11313 conditional execution block. */
11314 gen_exception_internal(EXCP_KERNEL_TRAP);
11315 dc->is_jmp = DISAS_UPDATE;
11316 break;
11318 #else
11319 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
11320 /* We always get here via a jump, so know we are not in a
11321 conditional execution block. */
11322 gen_exception_internal(EXCP_EXCEPTION_EXIT);
11323 dc->is_jmp = DISAS_UPDATE;
11324 break;
11326 #endif
11328 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11329 CPUBreakpoint *bp;
11330 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
11331 if (bp->pc == dc->pc) {
11332 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11333 /* Advance PC so that clearing the breakpoint will
11334 invalidate this TB. */
11335 dc->pc += 2;
11336 goto done_generating;
11341 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
11342 gen_io_start();
11345 if (dc->ss_active && !dc->pstate_ss) {
11346 /* Singlestep state is Active-pending.
11347 * If we're in this state at the start of a TB then either
11348 * a) we just took an exception to an EL which is being debugged
11349 * and this is the first insn in the exception handler
11350 * b) debug exceptions were masked and we just unmasked them
11351 * without changing EL (eg by clearing PSTATE.D)
11352 * In either case we're going to take a swstep exception in the
11353 * "did not step an insn" case, and so the syndrome ISV and EX
11354 * bits should be zero.
11356 assert(num_insns == 1);
11357 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11358 default_exception_el(dc));
11359 goto done_generating;
11362 if (dc->thumb) {
11363 disas_thumb_insn(env, dc);
11364 if (dc->condexec_mask) {
11365 dc->condexec_cond = (dc->condexec_cond & 0xe)
11366 | ((dc->condexec_mask >> 4) & 1);
11367 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11368 if (dc->condexec_mask == 0) {
11369 dc->condexec_cond = 0;
11372 } else {
11373 unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
11374 dc->pc += 4;
11375 disas_arm_insn(dc, insn);
11378 if (dc->condjmp && !dc->is_jmp) {
11379 gen_set_label(dc->condlabel);
11380 dc->condjmp = 0;
11383 if (tcg_check_temp_count()) {
11384 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11385 dc->pc);
11388 /* Translation stops when a conditional branch is encountered.
11389 * Otherwise the subsequent code could get translated several times.
11390 * Also stop translation when a page boundary is reached. This
11391 * ensures prefetch aborts occur at the right place. */
11392 } while (!dc->is_jmp && !tcg_op_buf_full() &&
11393 !cs->singlestep_enabled &&
11394 !singlestep &&
11395 !dc->ss_active &&
11396 dc->pc < next_page_start &&
11397 num_insns < max_insns);
11399 if (tb->cflags & CF_LAST_IO) {
11400 if (dc->condjmp) {
11401 /* FIXME: This can theoretically happen with self-modifying
11402 code. */
11403 cpu_abort(cs, "IO on conditional branch instruction");
11405 gen_io_end();
11408 /* At this stage dc->condjmp will only be set when the skipped
11409 instruction was a conditional branch or trap, and the PC has
11410 already been written. */
11411 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
11412 /* Make sure the pc is updated, and raise a debug exception. */
11413 if (dc->condjmp) {
11414 gen_set_condexec(dc);
11415 if (dc->is_jmp == DISAS_SWI) {
11416 gen_ss_advance(dc);
11417 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11418 default_exception_el(dc));
11419 } else if (dc->is_jmp == DISAS_HVC) {
11420 gen_ss_advance(dc);
11421 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11422 } else if (dc->is_jmp == DISAS_SMC) {
11423 gen_ss_advance(dc);
11424 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11425 } else if (dc->ss_active) {
11426 gen_step_complete_exception(dc);
11427 } else {
11428 gen_exception_internal(EXCP_DEBUG);
11430 gen_set_label(dc->condlabel);
11432 if (dc->condjmp || !dc->is_jmp) {
11433 gen_set_pc_im(dc, dc->pc);
11434 dc->condjmp = 0;
11436 gen_set_condexec(dc);
11437 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
11438 gen_ss_advance(dc);
11439 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11440 default_exception_el(dc));
11441 } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
11442 gen_ss_advance(dc);
11443 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11444 } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
11445 gen_ss_advance(dc);
11446 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11447 } else if (dc->ss_active) {
11448 gen_step_complete_exception(dc);
11449 } else {
11450 /* FIXME: Single stepping a WFI insn will not halt
11451 the CPU. */
11452 gen_exception_internal(EXCP_DEBUG);
11454 } else {
11455 /* While branches must always occur at the end of an IT block,
11456 there are a few other things that can cause us to terminate
11457 the TB in the middle of an IT block:
11458 - Exception generating instructions (bkpt, swi, undefined).
11459 - Page boundaries.
11460 - Hardware watchpoints.
11461 Hardware breakpoints have already been handled and skip this code.
11463 gen_set_condexec(dc);
11464 switch(dc->is_jmp) {
11465 case DISAS_NEXT:
11466 gen_goto_tb(dc, 1, dc->pc);
11467 break;
11468 default:
11469 case DISAS_JUMP:
11470 case DISAS_UPDATE:
11471 /* indicate that the hash table must be used to find the next TB */
11472 tcg_gen_exit_tb(0);
11473 break;
11474 case DISAS_TB_JUMP:
11475 /* nothing more to generate */
11476 break;
11477 case DISAS_WFI:
11478 gen_helper_wfi(cpu_env);
11479 /* The helper doesn't necessarily throw an exception, but we
11480 * must go back to the main loop to check for interrupts anyway.
11482 tcg_gen_exit_tb(0);
11483 break;
11484 case DISAS_WFE:
11485 gen_helper_wfe(cpu_env);
11486 break;
11487 case DISAS_YIELD:
11488 gen_helper_yield(cpu_env);
11489 break;
11490 case DISAS_SWI:
11491 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11492 default_exception_el(dc));
11493 break;
11494 case DISAS_HVC:
11495 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
11496 break;
11497 case DISAS_SMC:
11498 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
11499 break;
11501 if (dc->condjmp) {
11502 gen_set_label(dc->condlabel);
11503 gen_set_condexec(dc);
11504 gen_goto_tb(dc, 1, dc->pc);
11505 dc->condjmp = 0;
11509 done_generating:
11510 gen_tb_end(tb, num_insns);
11512 #ifdef DEBUG_DISAS
11513 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
11514 qemu_log("----------------\n");
11515 qemu_log("IN: %s\n", lookup_symbol(pc_start));
11516 log_target_disas(cs, pc_start, dc->pc - pc_start,
11517 dc->thumb | (dc->bswap_code << 1));
11518 qemu_log("\n");
11520 #endif
11521 tb->size = dc->pc - pc_start;
11522 tb->icount = num_insns;
11525 static const char *cpu_mode_names[16] = {
11526 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11527 "???", "???", "hyp", "und", "???", "???", "???", "sys"
11530 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11531 int flags)
11533 ARMCPU *cpu = ARM_CPU(cs);
11534 CPUARMState *env = &cpu->env;
11535 int i;
11536 uint32_t psr;
11538 if (is_a64(env)) {
11539 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11540 return;
11543 for(i=0;i<16;i++) {
11544 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
11545 if ((i % 4) == 3)
11546 cpu_fprintf(f, "\n");
11547 else
11548 cpu_fprintf(f, " ");
11550 psr = cpsr_read(env);
11551 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
11552 psr,
11553 psr & (1 << 31) ? 'N' : '-',
11554 psr & (1 << 30) ? 'Z' : '-',
11555 psr & (1 << 29) ? 'C' : '-',
11556 psr & (1 << 28) ? 'V' : '-',
11557 psr & CPSR_T ? 'T' : 'A',
11558 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
11560 if (flags & CPU_DUMP_FPU) {
11561 int numvfpregs = 0;
11562 if (arm_feature(env, ARM_FEATURE_VFP)) {
11563 numvfpregs += 16;
11565 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11566 numvfpregs += 16;
11568 for (i = 0; i < numvfpregs; i++) {
11569 uint64_t v = float64_val(env->vfp.regs[i]);
11570 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11571 i * 2, (uint32_t)v,
11572 i * 2 + 1, (uint32_t)(v >> 32),
11573 i, v);
11575 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
11579 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
11580 target_ulong *data)
11582 if (is_a64(env)) {
11583 env->pc = data[0];
11584 env->condexec_bits = 0;
11585 } else {
11586 env->regs[15] = data[0];
11587 env->condexec_bits = data[1];