target/arm: Use vector infrastructure for aa64 mov/not/neg
[qemu/ar7.git] / target / arm / translate-a64.c
blob11310f1a7a8d9390548cf46033b8d2802ee30396
1 /*
2 * AArch64 translation
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "tcg-op.h"
24 #include "tcg-op-gvec.h"
25 #include "qemu/log.h"
26 #include "arm_ldst.h"
27 #include "translate.h"
28 #include "internals.h"
29 #include "qemu/host-utils.h"
31 #include "exec/semihost.h"
32 #include "exec/gen-icount.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
36 #include "exec/log.h"
38 #include "trace-tcg.h"
40 static TCGv_i64 cpu_X[32];
41 static TCGv_i64 cpu_pc;
43 /* Load/store exclusive handling */
44 static TCGv_i64 cpu_exclusive_high;
45 static TCGv_i64 cpu_reg(DisasContext *s, int reg);
47 static const char *regnames[] = {
48 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
49 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
50 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
51 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
54 enum a64_shift_type {
55 A64_SHIFT_TYPE_LSL = 0,
56 A64_SHIFT_TYPE_LSR = 1,
57 A64_SHIFT_TYPE_ASR = 2,
58 A64_SHIFT_TYPE_ROR = 3
61 /* Table based decoder typedefs - used when the relevant bits for decode
62 * are too awkwardly scattered across the instruction (eg SIMD).
64 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
66 typedef struct AArch64DecodeTable {
67 uint32_t pattern;
68 uint32_t mask;
69 AArch64DecodeFn *disas_fn;
70 } AArch64DecodeTable;
72 /* Function prototype for gen_ functions for calling Neon helpers */
73 typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
74 typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
75 typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
76 typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
77 typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
78 typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
79 typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
80 typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
81 typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
82 typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
83 typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
84 typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
85 typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
86 typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
88 /* Note that the gvec expanders operate on offsets + sizes. */
89 typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
90 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
91 uint32_t, uint32_t, uint32_t);
93 /* initialize TCG globals. */
94 void a64_translate_init(void)
96 int i;
98 cpu_pc = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, pc),
100 "pc");
101 for (i = 0; i < 32; i++) {
102 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
103 offsetof(CPUARMState, xregs[i]),
104 regnames[i]);
107 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
108 offsetof(CPUARMState, exclusive_high), "exclusive_high");
111 static inline int get_a64_user_mem_index(DisasContext *s)
113 /* Return the core mmu_idx to use for A64 "unprivileged load/store" insns:
114 * if EL1, access as if EL0; otherwise access at current EL
116 ARMMMUIdx useridx;
118 switch (s->mmu_idx) {
119 case ARMMMUIdx_S12NSE1:
120 useridx = ARMMMUIdx_S12NSE0;
121 break;
122 case ARMMMUIdx_S1SE1:
123 useridx = ARMMMUIdx_S1SE0;
124 break;
125 case ARMMMUIdx_S2NS:
126 g_assert_not_reached();
127 default:
128 useridx = s->mmu_idx;
129 break;
131 return arm_to_core_mmu_idx(useridx);
134 void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
135 fprintf_function cpu_fprintf, int flags)
137 ARMCPU *cpu = ARM_CPU(cs);
138 CPUARMState *env = &cpu->env;
139 uint32_t psr = pstate_read(env);
140 int i;
141 int el = arm_current_el(env);
142 const char *ns_status;
144 cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
145 env->pc, env->xregs[31]);
146 for (i = 0; i < 31; i++) {
147 cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
148 if ((i % 4) == 3) {
149 cpu_fprintf(f, "\n");
150 } else {
151 cpu_fprintf(f, " ");
155 if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
156 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
157 } else {
158 ns_status = "";
161 cpu_fprintf(f, "\nPSTATE=%08x %c%c%c%c %sEL%d%c\n",
162 psr,
163 psr & PSTATE_N ? 'N' : '-',
164 psr & PSTATE_Z ? 'Z' : '-',
165 psr & PSTATE_C ? 'C' : '-',
166 psr & PSTATE_V ? 'V' : '-',
167 ns_status,
169 psr & PSTATE_SP ? 'h' : 't');
171 if (flags & CPU_DUMP_FPU) {
172 int numvfpregs = 32;
173 for (i = 0; i < numvfpregs; i++) {
174 uint64_t *q = aa64_vfp_qreg(env, i);
175 uint64_t vlo = q[0];
176 uint64_t vhi = q[1];
177 cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 "%c",
178 i, vhi, vlo, (i & 1 ? '\n' : ' '));
180 cpu_fprintf(f, "FPCR: %08x FPSR: %08x\n",
181 vfp_get_fpcr(env), vfp_get_fpsr(env));
185 void gen_a64_set_pc_im(uint64_t val)
187 tcg_gen_movi_i64(cpu_pc, val);
190 /* Load the PC from a generic TCG variable.
192 * If address tagging is enabled via the TCR TBI bits, then loading
193 * an address into the PC will clear out any tag in the it:
194 * + for EL2 and EL3 there is only one TBI bit, and if it is set
195 * then the address is zero-extended, clearing bits [63:56]
196 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
197 * and TBI1 controls addressses with bit 55 == 1.
198 * If the appropriate TBI bit is set for the address then
199 * the address is sign-extended from bit 55 into bits [63:56]
201 * We can avoid doing this for relative-branches, because the
202 * PC + offset can never overflow into the tag bits (assuming
203 * that virtual addresses are less than 56 bits wide, as they
204 * are currently), but we must handle it for branch-to-register.
206 static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
209 if (s->current_el <= 1) {
210 /* Test if NEITHER or BOTH TBI values are set. If so, no need to
211 * examine bit 55 of address, can just generate code.
212 * If mixed, then test via generated code
214 if (s->tbi0 && s->tbi1) {
215 TCGv_i64 tmp_reg = tcg_temp_new_i64();
216 /* Both bits set, sign extension from bit 55 into [63:56] will
217 * cover both cases
219 tcg_gen_shli_i64(tmp_reg, src, 8);
220 tcg_gen_sari_i64(cpu_pc, tmp_reg, 8);
221 tcg_temp_free_i64(tmp_reg);
222 } else if (!s->tbi0 && !s->tbi1) {
223 /* Neither bit set, just load it as-is */
224 tcg_gen_mov_i64(cpu_pc, src);
225 } else {
226 TCGv_i64 tcg_tmpval = tcg_temp_new_i64();
227 TCGv_i64 tcg_bit55 = tcg_temp_new_i64();
228 TCGv_i64 tcg_zero = tcg_const_i64(0);
230 tcg_gen_andi_i64(tcg_bit55, src, (1ull << 55));
232 if (s->tbi0) {
233 /* tbi0==1, tbi1==0, so 0-fill upper byte if bit 55 = 0 */
234 tcg_gen_andi_i64(tcg_tmpval, src,
235 0x00FFFFFFFFFFFFFFull);
236 tcg_gen_movcond_i64(TCG_COND_EQ, cpu_pc, tcg_bit55, tcg_zero,
237 tcg_tmpval, src);
238 } else {
239 /* tbi0==0, tbi1==1, so 1-fill upper byte if bit 55 = 1 */
240 tcg_gen_ori_i64(tcg_tmpval, src,
241 0xFF00000000000000ull);
242 tcg_gen_movcond_i64(TCG_COND_NE, cpu_pc, tcg_bit55, tcg_zero,
243 tcg_tmpval, src);
245 tcg_temp_free_i64(tcg_zero);
246 tcg_temp_free_i64(tcg_bit55);
247 tcg_temp_free_i64(tcg_tmpval);
249 } else { /* EL > 1 */
250 if (s->tbi0) {
251 /* Force tag byte to all zero */
252 tcg_gen_andi_i64(cpu_pc, src, 0x00FFFFFFFFFFFFFFull);
253 } else {
254 /* Load unmodified address */
255 tcg_gen_mov_i64(cpu_pc, src);
260 typedef struct DisasCompare64 {
261 TCGCond cond;
262 TCGv_i64 value;
263 } DisasCompare64;
265 static void a64_test_cc(DisasCompare64 *c64, int cc)
267 DisasCompare c32;
269 arm_test_cc(&c32, cc);
271 /* Sign-extend the 32-bit value so that the GE/LT comparisons work
272 * properly. The NE/EQ comparisons are also fine with this choice. */
273 c64->cond = c32.cond;
274 c64->value = tcg_temp_new_i64();
275 tcg_gen_ext_i32_i64(c64->value, c32.value);
277 arm_free_cc(&c32);
280 static void a64_free_cc(DisasCompare64 *c64)
282 tcg_temp_free_i64(c64->value);
285 static void gen_exception_internal(int excp)
287 TCGv_i32 tcg_excp = tcg_const_i32(excp);
289 assert(excp_is_internal(excp));
290 gen_helper_exception_internal(cpu_env, tcg_excp);
291 tcg_temp_free_i32(tcg_excp);
294 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
296 TCGv_i32 tcg_excp = tcg_const_i32(excp);
297 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
298 TCGv_i32 tcg_el = tcg_const_i32(target_el);
300 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
301 tcg_syn, tcg_el);
302 tcg_temp_free_i32(tcg_el);
303 tcg_temp_free_i32(tcg_syn);
304 tcg_temp_free_i32(tcg_excp);
307 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
309 gen_a64_set_pc_im(s->pc - offset);
310 gen_exception_internal(excp);
311 s->base.is_jmp = DISAS_NORETURN;
314 static void gen_exception_insn(DisasContext *s, int offset, int excp,
315 uint32_t syndrome, uint32_t target_el)
317 gen_a64_set_pc_im(s->pc - offset);
318 gen_exception(excp, syndrome, target_el);
319 s->base.is_jmp = DISAS_NORETURN;
322 static void gen_ss_advance(DisasContext *s)
324 /* If the singlestep state is Active-not-pending, advance to
325 * Active-pending.
327 if (s->ss_active) {
328 s->pstate_ss = 0;
329 gen_helper_clear_pstate_ss(cpu_env);
333 static void gen_step_complete_exception(DisasContext *s)
335 /* We just completed step of an insn. Move from Active-not-pending
336 * to Active-pending, and then also take the swstep exception.
337 * This corresponds to making the (IMPDEF) choice to prioritize
338 * swstep exceptions over asynchronous exceptions taken to an exception
339 * level where debug is disabled. This choice has the advantage that
340 * we do not need to maintain internal state corresponding to the
341 * ISV/EX syndrome bits between completion of the step and generation
342 * of the exception, and our syndrome information is always correct.
344 gen_ss_advance(s);
345 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
346 default_exception_el(s));
347 s->base.is_jmp = DISAS_NORETURN;
350 static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
352 /* No direct tb linking with singlestep (either QEMU's or the ARM
353 * debug architecture kind) or deterministic io
355 if (s->base.singlestep_enabled || s->ss_active ||
356 (tb_cflags(s->base.tb) & CF_LAST_IO)) {
357 return false;
360 #ifndef CONFIG_USER_ONLY
361 /* Only link tbs from inside the same guest page */
362 if ((s->base.tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
363 return false;
365 #endif
367 return true;
370 static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
372 TranslationBlock *tb;
374 tb = s->base.tb;
375 if (use_goto_tb(s, n, dest)) {
376 tcg_gen_goto_tb(n);
377 gen_a64_set_pc_im(dest);
378 tcg_gen_exit_tb((intptr_t)tb + n);
379 s->base.is_jmp = DISAS_NORETURN;
380 } else {
381 gen_a64_set_pc_im(dest);
382 if (s->ss_active) {
383 gen_step_complete_exception(s);
384 } else if (s->base.singlestep_enabled) {
385 gen_exception_internal(EXCP_DEBUG);
386 } else {
387 tcg_gen_lookup_and_goto_ptr();
388 s->base.is_jmp = DISAS_NORETURN;
393 static void unallocated_encoding(DisasContext *s)
395 /* Unallocated and reserved encodings are uncategorized */
396 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
397 default_exception_el(s));
400 #define unsupported_encoding(s, insn) \
401 do { \
402 qemu_log_mask(LOG_UNIMP, \
403 "%s:%d: unsupported instruction encoding 0x%08x " \
404 "at pc=%016" PRIx64 "\n", \
405 __FILE__, __LINE__, insn, s->pc - 4); \
406 unallocated_encoding(s); \
407 } while (0)
409 static void init_tmp_a64_array(DisasContext *s)
411 #ifdef CONFIG_DEBUG_TCG
412 memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
413 #endif
414 s->tmp_a64_count = 0;
417 static void free_tmp_a64(DisasContext *s)
419 int i;
420 for (i = 0; i < s->tmp_a64_count; i++) {
421 tcg_temp_free_i64(s->tmp_a64[i]);
423 init_tmp_a64_array(s);
426 static TCGv_i64 new_tmp_a64(DisasContext *s)
428 assert(s->tmp_a64_count < TMP_A64_MAX);
429 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
432 static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
434 TCGv_i64 t = new_tmp_a64(s);
435 tcg_gen_movi_i64(t, 0);
436 return t;
440 * Register access functions
442 * These functions are used for directly accessing a register in where
443 * changes to the final register value are likely to be made. If you
444 * need to use a register for temporary calculation (e.g. index type
445 * operations) use the read_* form.
447 * B1.2.1 Register mappings
449 * In instruction register encoding 31 can refer to ZR (zero register) or
450 * the SP (stack pointer) depending on context. In QEMU's case we map SP
451 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
452 * This is the point of the _sp forms.
454 static TCGv_i64 cpu_reg(DisasContext *s, int reg)
456 if (reg == 31) {
457 return new_tmp_a64_zero(s);
458 } else {
459 return cpu_X[reg];
463 /* register access for when 31 == SP */
464 static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
466 return cpu_X[reg];
469 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
470 * representing the register contents. This TCGv is an auto-freed
471 * temporary so it need not be explicitly freed, and may be modified.
473 static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
475 TCGv_i64 v = new_tmp_a64(s);
476 if (reg != 31) {
477 if (sf) {
478 tcg_gen_mov_i64(v, cpu_X[reg]);
479 } else {
480 tcg_gen_ext32u_i64(v, cpu_X[reg]);
482 } else {
483 tcg_gen_movi_i64(v, 0);
485 return v;
488 static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
490 TCGv_i64 v = new_tmp_a64(s);
491 if (sf) {
492 tcg_gen_mov_i64(v, cpu_X[reg]);
493 } else {
494 tcg_gen_ext32u_i64(v, cpu_X[reg]);
496 return v;
499 /* We should have at some point before trying to access an FP register
500 * done the necessary access check, so assert that
501 * (a) we did the check and
502 * (b) we didn't then just plough ahead anyway if it failed.
503 * Print the instruction pattern in the abort message so we can figure
504 * out what we need to fix if a user encounters this problem in the wild.
506 static inline void assert_fp_access_checked(DisasContext *s)
508 #ifdef CONFIG_DEBUG_TCG
509 if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
510 fprintf(stderr, "target-arm: FP access check missing for "
511 "instruction 0x%08x\n", s->insn);
512 abort();
514 #endif
517 /* Return the offset into CPUARMState of an element of specified
518 * size, 'element' places in from the least significant end of
519 * the FP/vector register Qn.
521 static inline int vec_reg_offset(DisasContext *s, int regno,
522 int element, TCGMemOp size)
524 int offs = 0;
525 #ifdef HOST_WORDS_BIGENDIAN
526 /* This is complicated slightly because vfp.regs[2n] is
527 * still the low half and vfp.regs[2n+1] the high half
528 * of the 128 bit vector, even on big endian systems.
529 * Calculate the offset assuming a fully bigendian 128 bits,
530 * then XOR to account for the order of the two 64 bit halves.
532 offs += (16 - ((element + 1) * (1 << size)));
533 offs ^= 8;
534 #else
535 offs += element * (1 << size);
536 #endif
537 offs += offsetof(CPUARMState, vfp.regs[regno * 2]);
538 assert_fp_access_checked(s);
539 return offs;
542 /* Return the offset info CPUARMState of the "whole" vector register Qn. */
543 static inline int vec_full_reg_offset(DisasContext *s, int regno)
545 assert_fp_access_checked(s);
546 return offsetof(CPUARMState, vfp.regs[regno * 2]);
549 /* Return a newly allocated pointer to the vector register. */
550 static TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno)
552 TCGv_ptr ret = tcg_temp_new_ptr();
553 tcg_gen_addi_ptr(ret, cpu_env, vec_full_reg_offset(s, regno));
554 return ret;
557 /* Return the byte size of the "whole" vector register, VL / 8. */
558 static inline int vec_full_reg_size(DisasContext *s)
560 /* FIXME SVE: We should put the composite ZCR_EL* value into tb->flags.
561 In the meantime this is just the AdvSIMD length of 128. */
562 return 128 / 8;
565 /* Return the offset into CPUARMState of a slice (from
566 * the least significant end) of FP register Qn (ie
567 * Dn, Sn, Hn or Bn).
568 * (Note that this is not the same mapping as for A32; see cpu.h)
570 static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
572 return vec_reg_offset(s, regno, 0, size);
575 /* Offset of the high half of the 128 bit vector Qn */
576 static inline int fp_reg_hi_offset(DisasContext *s, int regno)
578 return vec_reg_offset(s, regno, 1, MO_64);
581 /* Convenience accessors for reading and writing single and double
582 * FP registers. Writing clears the upper parts of the associated
583 * 128 bit vector register, as required by the architecture.
584 * Note that unlike the GP register accessors, the values returned
585 * by the read functions must be manually freed.
587 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
589 TCGv_i64 v = tcg_temp_new_i64();
591 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
592 return v;
595 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
597 TCGv_i32 v = tcg_temp_new_i32();
599 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
600 return v;
603 static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
605 TCGv_i64 tcg_zero = tcg_const_i64(0);
607 tcg_gen_st_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
608 tcg_gen_st_i64(tcg_zero, cpu_env, fp_reg_hi_offset(s, reg));
609 tcg_temp_free_i64(tcg_zero);
612 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
614 TCGv_i64 tmp = tcg_temp_new_i64();
616 tcg_gen_extu_i32_i64(tmp, v);
617 write_fp_dreg(s, reg, tmp);
618 tcg_temp_free_i64(tmp);
621 static TCGv_ptr get_fpstatus_ptr(void)
623 TCGv_ptr statusptr = tcg_temp_new_ptr();
624 int offset;
626 /* In A64 all instructions (both FP and Neon) use the FPCR;
627 * there is no equivalent of the A32 Neon "standard FPSCR value"
628 * and all operations use vfp.fp_status.
630 offset = offsetof(CPUARMState, vfp.fp_status);
631 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
632 return statusptr;
635 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
636 static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
637 GVecGen2Fn *gvec_fn, int vece)
639 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
640 is_q ? 16 : 8, vec_full_reg_size(s));
643 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
644 static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
645 GVecGen3Fn *gvec_fn, int vece)
647 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
648 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
651 /* Expand a 3-operand AdvSIMD vector operation using an op descriptor. */
652 static void gen_gvec_op3(DisasContext *s, bool is_q, int rd,
653 int rn, int rm, const GVecGen3 *gvec_op)
655 tcg_gen_gvec_3(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
656 vec_full_reg_offset(s, rm), is_q ? 16 : 8,
657 vec_full_reg_size(s), gvec_op);
660 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
661 * than the 32 bit equivalent.
663 static inline void gen_set_NZ64(TCGv_i64 result)
665 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
666 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
669 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
670 static inline void gen_logic_CC(int sf, TCGv_i64 result)
672 if (sf) {
673 gen_set_NZ64(result);
674 } else {
675 tcg_gen_extrl_i64_i32(cpu_ZF, result);
676 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
678 tcg_gen_movi_i32(cpu_CF, 0);
679 tcg_gen_movi_i32(cpu_VF, 0);
682 /* dest = T0 + T1; compute C, N, V and Z flags */
683 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
685 if (sf) {
686 TCGv_i64 result, flag, tmp;
687 result = tcg_temp_new_i64();
688 flag = tcg_temp_new_i64();
689 tmp = tcg_temp_new_i64();
691 tcg_gen_movi_i64(tmp, 0);
692 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
694 tcg_gen_extrl_i64_i32(cpu_CF, flag);
696 gen_set_NZ64(result);
698 tcg_gen_xor_i64(flag, result, t0);
699 tcg_gen_xor_i64(tmp, t0, t1);
700 tcg_gen_andc_i64(flag, flag, tmp);
701 tcg_temp_free_i64(tmp);
702 tcg_gen_extrh_i64_i32(cpu_VF, flag);
704 tcg_gen_mov_i64(dest, result);
705 tcg_temp_free_i64(result);
706 tcg_temp_free_i64(flag);
707 } else {
708 /* 32 bit arithmetic */
709 TCGv_i32 t0_32 = tcg_temp_new_i32();
710 TCGv_i32 t1_32 = tcg_temp_new_i32();
711 TCGv_i32 tmp = tcg_temp_new_i32();
713 tcg_gen_movi_i32(tmp, 0);
714 tcg_gen_extrl_i64_i32(t0_32, t0);
715 tcg_gen_extrl_i64_i32(t1_32, t1);
716 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
717 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
718 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
719 tcg_gen_xor_i32(tmp, t0_32, t1_32);
720 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
721 tcg_gen_extu_i32_i64(dest, cpu_NF);
723 tcg_temp_free_i32(tmp);
724 tcg_temp_free_i32(t0_32);
725 tcg_temp_free_i32(t1_32);
729 /* dest = T0 - T1; compute C, N, V and Z flags */
730 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
732 if (sf) {
733 /* 64 bit arithmetic */
734 TCGv_i64 result, flag, tmp;
736 result = tcg_temp_new_i64();
737 flag = tcg_temp_new_i64();
738 tcg_gen_sub_i64(result, t0, t1);
740 gen_set_NZ64(result);
742 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
743 tcg_gen_extrl_i64_i32(cpu_CF, flag);
745 tcg_gen_xor_i64(flag, result, t0);
746 tmp = tcg_temp_new_i64();
747 tcg_gen_xor_i64(tmp, t0, t1);
748 tcg_gen_and_i64(flag, flag, tmp);
749 tcg_temp_free_i64(tmp);
750 tcg_gen_extrh_i64_i32(cpu_VF, flag);
751 tcg_gen_mov_i64(dest, result);
752 tcg_temp_free_i64(flag);
753 tcg_temp_free_i64(result);
754 } else {
755 /* 32 bit arithmetic */
756 TCGv_i32 t0_32 = tcg_temp_new_i32();
757 TCGv_i32 t1_32 = tcg_temp_new_i32();
758 TCGv_i32 tmp;
760 tcg_gen_extrl_i64_i32(t0_32, t0);
761 tcg_gen_extrl_i64_i32(t1_32, t1);
762 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
763 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
764 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
765 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
766 tmp = tcg_temp_new_i32();
767 tcg_gen_xor_i32(tmp, t0_32, t1_32);
768 tcg_temp_free_i32(t0_32);
769 tcg_temp_free_i32(t1_32);
770 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
771 tcg_temp_free_i32(tmp);
772 tcg_gen_extu_i32_i64(dest, cpu_NF);
776 /* dest = T0 + T1 + CF; do not compute flags. */
777 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
779 TCGv_i64 flag = tcg_temp_new_i64();
780 tcg_gen_extu_i32_i64(flag, cpu_CF);
781 tcg_gen_add_i64(dest, t0, t1);
782 tcg_gen_add_i64(dest, dest, flag);
783 tcg_temp_free_i64(flag);
785 if (!sf) {
786 tcg_gen_ext32u_i64(dest, dest);
790 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
791 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
793 if (sf) {
794 TCGv_i64 result, cf_64, vf_64, tmp;
795 result = tcg_temp_new_i64();
796 cf_64 = tcg_temp_new_i64();
797 vf_64 = tcg_temp_new_i64();
798 tmp = tcg_const_i64(0);
800 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
801 tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
802 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
803 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
804 gen_set_NZ64(result);
806 tcg_gen_xor_i64(vf_64, result, t0);
807 tcg_gen_xor_i64(tmp, t0, t1);
808 tcg_gen_andc_i64(vf_64, vf_64, tmp);
809 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
811 tcg_gen_mov_i64(dest, result);
813 tcg_temp_free_i64(tmp);
814 tcg_temp_free_i64(vf_64);
815 tcg_temp_free_i64(cf_64);
816 tcg_temp_free_i64(result);
817 } else {
818 TCGv_i32 t0_32, t1_32, tmp;
819 t0_32 = tcg_temp_new_i32();
820 t1_32 = tcg_temp_new_i32();
821 tmp = tcg_const_i32(0);
823 tcg_gen_extrl_i64_i32(t0_32, t0);
824 tcg_gen_extrl_i64_i32(t1_32, t1);
825 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
826 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
828 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
829 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
830 tcg_gen_xor_i32(tmp, t0_32, t1_32);
831 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
832 tcg_gen_extu_i32_i64(dest, cpu_NF);
834 tcg_temp_free_i32(tmp);
835 tcg_temp_free_i32(t1_32);
836 tcg_temp_free_i32(t0_32);
841 * Load/Store generators
845 * Store from GPR register to memory.
847 static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
848 TCGv_i64 tcg_addr, int size, int memidx,
849 bool iss_valid,
850 unsigned int iss_srt,
851 bool iss_sf, bool iss_ar)
853 g_assert(size <= 3);
854 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
856 if (iss_valid) {
857 uint32_t syn;
859 syn = syn_data_abort_with_iss(0,
860 size,
861 false,
862 iss_srt,
863 iss_sf,
864 iss_ar,
865 0, 0, 0, 0, 0, false);
866 disas_set_insn_syndrome(s, syn);
870 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
871 TCGv_i64 tcg_addr, int size,
872 bool iss_valid,
873 unsigned int iss_srt,
874 bool iss_sf, bool iss_ar)
876 do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
877 iss_valid, iss_srt, iss_sf, iss_ar);
881 * Load from memory to GPR register
883 static void do_gpr_ld_memidx(DisasContext *s,
884 TCGv_i64 dest, TCGv_i64 tcg_addr,
885 int size, bool is_signed,
886 bool extend, int memidx,
887 bool iss_valid, unsigned int iss_srt,
888 bool iss_sf, bool iss_ar)
890 TCGMemOp memop = s->be_data + size;
892 g_assert(size <= 3);
894 if (is_signed) {
895 memop += MO_SIGN;
898 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
900 if (extend && is_signed) {
901 g_assert(size < 3);
902 tcg_gen_ext32u_i64(dest, dest);
905 if (iss_valid) {
906 uint32_t syn;
908 syn = syn_data_abort_with_iss(0,
909 size,
910 is_signed,
911 iss_srt,
912 iss_sf,
913 iss_ar,
914 0, 0, 0, 0, 0, false);
915 disas_set_insn_syndrome(s, syn);
919 static void do_gpr_ld(DisasContext *s,
920 TCGv_i64 dest, TCGv_i64 tcg_addr,
921 int size, bool is_signed, bool extend,
922 bool iss_valid, unsigned int iss_srt,
923 bool iss_sf, bool iss_ar)
925 do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
926 get_mem_index(s),
927 iss_valid, iss_srt, iss_sf, iss_ar);
931 * Store from FP register to memory
933 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
935 /* This writes the bottom N bits of a 128 bit wide vector to memory */
936 TCGv_i64 tmp = tcg_temp_new_i64();
937 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
938 if (size < 4) {
939 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
940 s->be_data + size);
941 } else {
942 bool be = s->be_data == MO_BE;
943 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
945 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
946 tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
947 s->be_data | MO_Q);
948 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
949 tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
950 s->be_data | MO_Q);
951 tcg_temp_free_i64(tcg_hiaddr);
954 tcg_temp_free_i64(tmp);
958 * Load from memory to FP register
960 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
962 /* This always zero-extends and writes to a full 128 bit wide vector */
963 TCGv_i64 tmplo = tcg_temp_new_i64();
964 TCGv_i64 tmphi;
966 if (size < 4) {
967 TCGMemOp memop = s->be_data + size;
968 tmphi = tcg_const_i64(0);
969 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
970 } else {
971 bool be = s->be_data == MO_BE;
972 TCGv_i64 tcg_hiaddr;
974 tmphi = tcg_temp_new_i64();
975 tcg_hiaddr = tcg_temp_new_i64();
977 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
978 tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
979 s->be_data | MO_Q);
980 tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
981 s->be_data | MO_Q);
982 tcg_temp_free_i64(tcg_hiaddr);
985 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
986 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
988 tcg_temp_free_i64(tmplo);
989 tcg_temp_free_i64(tmphi);
993 * Vector load/store helpers.
995 * The principal difference between this and a FP load is that we don't
996 * zero extend as we are filling a partial chunk of the vector register.
997 * These functions don't support 128 bit loads/stores, which would be
998 * normal load/store operations.
1000 * The _i32 versions are useful when operating on 32 bit quantities
1001 * (eg for floating point single or using Neon helper functions).
1004 /* Get value of an element within a vector register */
1005 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1006 int element, TCGMemOp memop)
1008 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1009 switch (memop) {
1010 case MO_8:
1011 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1012 break;
1013 case MO_16:
1014 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1015 break;
1016 case MO_32:
1017 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1018 break;
1019 case MO_8|MO_SIGN:
1020 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1021 break;
1022 case MO_16|MO_SIGN:
1023 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1024 break;
1025 case MO_32|MO_SIGN:
1026 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1027 break;
1028 case MO_64:
1029 case MO_64|MO_SIGN:
1030 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1031 break;
1032 default:
1033 g_assert_not_reached();
1037 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1038 int element, TCGMemOp memop)
1040 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1041 switch (memop) {
1042 case MO_8:
1043 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1044 break;
1045 case MO_16:
1046 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1047 break;
1048 case MO_8|MO_SIGN:
1049 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1050 break;
1051 case MO_16|MO_SIGN:
1052 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1053 break;
1054 case MO_32:
1055 case MO_32|MO_SIGN:
1056 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1057 break;
1058 default:
1059 g_assert_not_reached();
1063 /* Set value of an element within a vector register */
1064 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1065 int element, TCGMemOp memop)
1067 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1068 switch (memop) {
1069 case MO_8:
1070 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1071 break;
1072 case MO_16:
1073 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1074 break;
1075 case MO_32:
1076 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1077 break;
1078 case MO_64:
1079 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1080 break;
1081 default:
1082 g_assert_not_reached();
1086 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1087 int destidx, int element, TCGMemOp memop)
1089 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1090 switch (memop) {
1091 case MO_8:
1092 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1093 break;
1094 case MO_16:
1095 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1096 break;
1097 case MO_32:
1098 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1099 break;
1100 default:
1101 g_assert_not_reached();
1105 /* Clear the high 64 bits of a 128 bit vector (in general non-quad
1106 * vector ops all need to do this).
1108 static void clear_vec_high(DisasContext *s, int rd)
1110 TCGv_i64 tcg_zero = tcg_const_i64(0);
1112 write_vec_element(s, tcg_zero, rd, 1, MO_64);
1113 tcg_temp_free_i64(tcg_zero);
1116 /* Store from vector register to memory */
1117 static void do_vec_st(DisasContext *s, int srcidx, int element,
1118 TCGv_i64 tcg_addr, int size)
1120 TCGMemOp memop = s->be_data + size;
1121 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1123 read_vec_element(s, tcg_tmp, srcidx, element, size);
1124 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1126 tcg_temp_free_i64(tcg_tmp);
1129 /* Load from memory to vector register */
1130 static void do_vec_ld(DisasContext *s, int destidx, int element,
1131 TCGv_i64 tcg_addr, int size)
1133 TCGMemOp memop = s->be_data + size;
1134 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1136 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1137 write_vec_element(s, tcg_tmp, destidx, element, size);
1139 tcg_temp_free_i64(tcg_tmp);
1142 /* Check that FP/Neon access is enabled. If it is, return
1143 * true. If not, emit code to generate an appropriate exception,
1144 * and return false; the caller should not emit any code for
1145 * the instruction. Note that this check must happen after all
1146 * unallocated-encoding checks (otherwise the syndrome information
1147 * for the resulting exception will be incorrect).
1149 static inline bool fp_access_check(DisasContext *s)
1151 assert(!s->fp_access_checked);
1152 s->fp_access_checked = true;
1154 if (!s->fp_excp_el) {
1155 return true;
1158 gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
1159 s->fp_excp_el);
1160 return false;
1164 * This utility function is for doing register extension with an
1165 * optional shift. You will likely want to pass a temporary for the
1166 * destination register. See DecodeRegExtend() in the ARM ARM.
1168 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1169 int option, unsigned int shift)
1171 int extsize = extract32(option, 0, 2);
1172 bool is_signed = extract32(option, 2, 1);
1174 if (is_signed) {
1175 switch (extsize) {
1176 case 0:
1177 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1178 break;
1179 case 1:
1180 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1181 break;
1182 case 2:
1183 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1184 break;
1185 case 3:
1186 tcg_gen_mov_i64(tcg_out, tcg_in);
1187 break;
1189 } else {
1190 switch (extsize) {
1191 case 0:
1192 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1193 break;
1194 case 1:
1195 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1196 break;
1197 case 2:
1198 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1199 break;
1200 case 3:
1201 tcg_gen_mov_i64(tcg_out, tcg_in);
1202 break;
1206 if (shift) {
1207 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1211 static inline void gen_check_sp_alignment(DisasContext *s)
1213 /* The AArch64 architecture mandates that (if enabled via PSTATE
1214 * or SCTLR bits) there is a check that SP is 16-aligned on every
1215 * SP-relative load or store (with an exception generated if it is not).
1216 * In line with general QEMU practice regarding misaligned accesses,
1217 * we omit these checks for the sake of guest program performance.
1218 * This function is provided as a hook so we can more easily add these
1219 * checks in future (possibly as a "favour catching guest program bugs
1220 * over speed" user selectable option).
1225 * This provides a simple table based table lookup decoder. It is
1226 * intended to be used when the relevant bits for decode are too
1227 * awkwardly placed and switch/if based logic would be confusing and
1228 * deeply nested. Since it's a linear search through the table, tables
1229 * should be kept small.
1231 * It returns the first handler where insn & mask == pattern, or
1232 * NULL if there is no match.
1233 * The table is terminated by an empty mask (i.e. 0)
1235 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1236 uint32_t insn)
1238 const AArch64DecodeTable *tptr = table;
1240 while (tptr->mask) {
1241 if ((insn & tptr->mask) == tptr->pattern) {
1242 return tptr->disas_fn;
1244 tptr++;
1246 return NULL;
1250 * The instruction disassembly implemented here matches
1251 * the instruction encoding classifications in chapter C4
1252 * of the ARM Architecture Reference Manual (DDI0487B_a);
1253 * classification names and decode diagrams here should generally
1254 * match up with those in the manual.
1257 /* Unconditional branch (immediate)
1258 * 31 30 26 25 0
1259 * +----+-----------+-------------------------------------+
1260 * | op | 0 0 1 0 1 | imm26 |
1261 * +----+-----------+-------------------------------------+
1263 static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1265 uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
1267 if (insn & (1U << 31)) {
1268 /* BL Branch with link */
1269 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1272 /* B Branch / BL Branch with link */
1273 gen_goto_tb(s, 0, addr);
1276 /* Compare and branch (immediate)
1277 * 31 30 25 24 23 5 4 0
1278 * +----+-------------+----+---------------------+--------+
1279 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
1280 * +----+-------------+----+---------------------+--------+
1282 static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1284 unsigned int sf, op, rt;
1285 uint64_t addr;
1286 TCGLabel *label_match;
1287 TCGv_i64 tcg_cmp;
1289 sf = extract32(insn, 31, 1);
1290 op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
1291 rt = extract32(insn, 0, 5);
1292 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1294 tcg_cmp = read_cpu_reg(s, rt, sf);
1295 label_match = gen_new_label();
1297 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1298 tcg_cmp, 0, label_match);
1300 gen_goto_tb(s, 0, s->pc);
1301 gen_set_label(label_match);
1302 gen_goto_tb(s, 1, addr);
1305 /* Test and branch (immediate)
1306 * 31 30 25 24 23 19 18 5 4 0
1307 * +----+-------------+----+-------+-------------+------+
1308 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
1309 * +----+-------------+----+-------+-------------+------+
1311 static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1313 unsigned int bit_pos, op, rt;
1314 uint64_t addr;
1315 TCGLabel *label_match;
1316 TCGv_i64 tcg_cmp;
1318 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1319 op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
1320 addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
1321 rt = extract32(insn, 0, 5);
1323 tcg_cmp = tcg_temp_new_i64();
1324 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1325 label_match = gen_new_label();
1326 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1327 tcg_cmp, 0, label_match);
1328 tcg_temp_free_i64(tcg_cmp);
1329 gen_goto_tb(s, 0, s->pc);
1330 gen_set_label(label_match);
1331 gen_goto_tb(s, 1, addr);
1334 /* Conditional branch (immediate)
1335 * 31 25 24 23 5 4 3 0
1336 * +---------------+----+---------------------+----+------+
1337 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
1338 * +---------------+----+---------------------+----+------+
1340 static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1342 unsigned int cond;
1343 uint64_t addr;
1345 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1346 unallocated_encoding(s);
1347 return;
1349 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1350 cond = extract32(insn, 0, 4);
1352 if (cond < 0x0e) {
1353 /* genuinely conditional branches */
1354 TCGLabel *label_match = gen_new_label();
1355 arm_gen_test_cc(cond, label_match);
1356 gen_goto_tb(s, 0, s->pc);
1357 gen_set_label(label_match);
1358 gen_goto_tb(s, 1, addr);
1359 } else {
1360 /* 0xe and 0xf are both "always" conditions */
1361 gen_goto_tb(s, 0, addr);
1365 /* HINT instruction group, including various allocated HINTs */
1366 static void handle_hint(DisasContext *s, uint32_t insn,
1367 unsigned int op1, unsigned int op2, unsigned int crm)
1369 unsigned int selector = crm << 3 | op2;
1371 if (op1 != 3) {
1372 unallocated_encoding(s);
1373 return;
1376 switch (selector) {
1377 case 0: /* NOP */
1378 return;
1379 case 3: /* WFI */
1380 s->base.is_jmp = DISAS_WFI;
1381 return;
1382 /* When running in MTTCG we don't generate jumps to the yield and
1383 * WFE helpers as it won't affect the scheduling of other vCPUs.
1384 * If we wanted to more completely model WFE/SEV so we don't busy
1385 * spin unnecessarily we would need to do something more involved.
1387 case 1: /* YIELD */
1388 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1389 s->base.is_jmp = DISAS_YIELD;
1391 return;
1392 case 2: /* WFE */
1393 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1394 s->base.is_jmp = DISAS_WFE;
1396 return;
1397 case 4: /* SEV */
1398 case 5: /* SEVL */
1399 /* we treat all as NOP at least for now */
1400 return;
1401 default:
1402 /* default specified as NOP equivalent */
1403 return;
1407 static void gen_clrex(DisasContext *s, uint32_t insn)
1409 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1412 /* CLREX, DSB, DMB, ISB */
1413 static void handle_sync(DisasContext *s, uint32_t insn,
1414 unsigned int op1, unsigned int op2, unsigned int crm)
1416 TCGBar bar;
1418 if (op1 != 3) {
1419 unallocated_encoding(s);
1420 return;
1423 switch (op2) {
1424 case 2: /* CLREX */
1425 gen_clrex(s, insn);
1426 return;
1427 case 4: /* DSB */
1428 case 5: /* DMB */
1429 switch (crm & 3) {
1430 case 1: /* MBReqTypes_Reads */
1431 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1432 break;
1433 case 2: /* MBReqTypes_Writes */
1434 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1435 break;
1436 default: /* MBReqTypes_All */
1437 bar = TCG_BAR_SC | TCG_MO_ALL;
1438 break;
1440 tcg_gen_mb(bar);
1441 return;
1442 case 6: /* ISB */
1443 /* We need to break the TB after this insn to execute
1444 * a self-modified code correctly and also to take
1445 * any pending interrupts immediately.
1447 gen_goto_tb(s, 0, s->pc);
1448 return;
1449 default:
1450 unallocated_encoding(s);
1451 return;
1455 /* MSR (immediate) - move immediate to processor state field */
1456 static void handle_msr_i(DisasContext *s, uint32_t insn,
1457 unsigned int op1, unsigned int op2, unsigned int crm)
1459 int op = op1 << 3 | op2;
1460 switch (op) {
1461 case 0x05: /* SPSel */
1462 if (s->current_el == 0) {
1463 unallocated_encoding(s);
1464 return;
1466 /* fall through */
1467 case 0x1e: /* DAIFSet */
1468 case 0x1f: /* DAIFClear */
1470 TCGv_i32 tcg_imm = tcg_const_i32(crm);
1471 TCGv_i32 tcg_op = tcg_const_i32(op);
1472 gen_a64_set_pc_im(s->pc - 4);
1473 gen_helper_msr_i_pstate(cpu_env, tcg_op, tcg_imm);
1474 tcg_temp_free_i32(tcg_imm);
1475 tcg_temp_free_i32(tcg_op);
1476 /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
1477 gen_a64_set_pc_im(s->pc);
1478 s->base.is_jmp = (op == 0x1f ? DISAS_EXIT : DISAS_JUMP);
1479 break;
1481 default:
1482 unallocated_encoding(s);
1483 return;
1487 static void gen_get_nzcv(TCGv_i64 tcg_rt)
1489 TCGv_i32 tmp = tcg_temp_new_i32();
1490 TCGv_i32 nzcv = tcg_temp_new_i32();
1492 /* build bit 31, N */
1493 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1494 /* build bit 30, Z */
1495 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1496 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1497 /* build bit 29, C */
1498 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1499 /* build bit 28, V */
1500 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1501 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1502 /* generate result */
1503 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1505 tcg_temp_free_i32(nzcv);
1506 tcg_temp_free_i32(tmp);
1509 static void gen_set_nzcv(TCGv_i64 tcg_rt)
1512 TCGv_i32 nzcv = tcg_temp_new_i32();
1514 /* take NZCV from R[t] */
1515 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1517 /* bit 31, N */
1518 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1519 /* bit 30, Z */
1520 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1521 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1522 /* bit 29, C */
1523 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1524 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1525 /* bit 28, V */
1526 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1527 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1528 tcg_temp_free_i32(nzcv);
1531 /* MRS - move from system register
1532 * MSR (register) - move to system register
1533 * SYS
1534 * SYSL
1535 * These are all essentially the same insn in 'read' and 'write'
1536 * versions, with varying op0 fields.
1538 static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1539 unsigned int op0, unsigned int op1, unsigned int op2,
1540 unsigned int crn, unsigned int crm, unsigned int rt)
1542 const ARMCPRegInfo *ri;
1543 TCGv_i64 tcg_rt;
1545 ri = get_arm_cp_reginfo(s->cp_regs,
1546 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1547 crn, crm, op0, op1, op2));
1549 if (!ri) {
1550 /* Unknown register; this might be a guest error or a QEMU
1551 * unimplemented feature.
1553 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1554 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1555 isread ? "read" : "write", op0, op1, crn, crm, op2);
1556 unallocated_encoding(s);
1557 return;
1560 /* Check access permissions */
1561 if (!cp_access_ok(s->current_el, ri, isread)) {
1562 unallocated_encoding(s);
1563 return;
1566 if (ri->accessfn) {
1567 /* Emit code to perform further access permissions checks at
1568 * runtime; this may result in an exception.
1570 TCGv_ptr tmpptr;
1571 TCGv_i32 tcg_syn, tcg_isread;
1572 uint32_t syndrome;
1574 gen_a64_set_pc_im(s->pc - 4);
1575 tmpptr = tcg_const_ptr(ri);
1576 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1577 tcg_syn = tcg_const_i32(syndrome);
1578 tcg_isread = tcg_const_i32(isread);
1579 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1580 tcg_temp_free_ptr(tmpptr);
1581 tcg_temp_free_i32(tcg_syn);
1582 tcg_temp_free_i32(tcg_isread);
1585 /* Handle special cases first */
1586 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1587 case ARM_CP_NOP:
1588 return;
1589 case ARM_CP_NZCV:
1590 tcg_rt = cpu_reg(s, rt);
1591 if (isread) {
1592 gen_get_nzcv(tcg_rt);
1593 } else {
1594 gen_set_nzcv(tcg_rt);
1596 return;
1597 case ARM_CP_CURRENTEL:
1598 /* Reads as current EL value from pstate, which is
1599 * guaranteed to be constant by the tb flags.
1601 tcg_rt = cpu_reg(s, rt);
1602 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1603 return;
1604 case ARM_CP_DC_ZVA:
1605 /* Writes clear the aligned block of memory which rt points into. */
1606 tcg_rt = cpu_reg(s, rt);
1607 gen_helper_dc_zva(cpu_env, tcg_rt);
1608 return;
1609 default:
1610 break;
1613 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1614 gen_io_start();
1617 tcg_rt = cpu_reg(s, rt);
1619 if (isread) {
1620 if (ri->type & ARM_CP_CONST) {
1621 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1622 } else if (ri->readfn) {
1623 TCGv_ptr tmpptr;
1624 tmpptr = tcg_const_ptr(ri);
1625 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1626 tcg_temp_free_ptr(tmpptr);
1627 } else {
1628 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1630 } else {
1631 if (ri->type & ARM_CP_CONST) {
1632 /* If not forbidden by access permissions, treat as WI */
1633 return;
1634 } else if (ri->writefn) {
1635 TCGv_ptr tmpptr;
1636 tmpptr = tcg_const_ptr(ri);
1637 gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1638 tcg_temp_free_ptr(tmpptr);
1639 } else {
1640 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1644 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1645 /* I/O operations must end the TB here (whether read or write) */
1646 gen_io_end();
1647 s->base.is_jmp = DISAS_UPDATE;
1648 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1649 /* We default to ending the TB on a coprocessor register write,
1650 * but allow this to be suppressed by the register definition
1651 * (usually only necessary to work around guest bugs).
1653 s->base.is_jmp = DISAS_UPDATE;
1657 /* System
1658 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
1659 * +---------------------+---+-----+-----+-------+-------+-----+------+
1660 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
1661 * +---------------------+---+-----+-----+-------+-------+-----+------+
1663 static void disas_system(DisasContext *s, uint32_t insn)
1665 unsigned int l, op0, op1, crn, crm, op2, rt;
1666 l = extract32(insn, 21, 1);
1667 op0 = extract32(insn, 19, 2);
1668 op1 = extract32(insn, 16, 3);
1669 crn = extract32(insn, 12, 4);
1670 crm = extract32(insn, 8, 4);
1671 op2 = extract32(insn, 5, 3);
1672 rt = extract32(insn, 0, 5);
1674 if (op0 == 0) {
1675 if (l || rt != 31) {
1676 unallocated_encoding(s);
1677 return;
1679 switch (crn) {
1680 case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
1681 handle_hint(s, insn, op1, op2, crm);
1682 break;
1683 case 3: /* CLREX, DSB, DMB, ISB */
1684 handle_sync(s, insn, op1, op2, crm);
1685 break;
1686 case 4: /* MSR (immediate) */
1687 handle_msr_i(s, insn, op1, op2, crm);
1688 break;
1689 default:
1690 unallocated_encoding(s);
1691 break;
1693 return;
1695 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
1698 /* Exception generation
1700 * 31 24 23 21 20 5 4 2 1 0
1701 * +-----------------+-----+------------------------+-----+----+
1702 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
1703 * +-----------------------+------------------------+----------+
1705 static void disas_exc(DisasContext *s, uint32_t insn)
1707 int opc = extract32(insn, 21, 3);
1708 int op2_ll = extract32(insn, 0, 5);
1709 int imm16 = extract32(insn, 5, 16);
1710 TCGv_i32 tmp;
1712 switch (opc) {
1713 case 0:
1714 /* For SVC, HVC and SMC we advance the single-step state
1715 * machine before taking the exception. This is architecturally
1716 * mandated, to ensure that single-stepping a system call
1717 * instruction works properly.
1719 switch (op2_ll) {
1720 case 1: /* SVC */
1721 gen_ss_advance(s);
1722 gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
1723 default_exception_el(s));
1724 break;
1725 case 2: /* HVC */
1726 if (s->current_el == 0) {
1727 unallocated_encoding(s);
1728 break;
1730 /* The pre HVC helper handles cases when HVC gets trapped
1731 * as an undefined insn by runtime configuration.
1733 gen_a64_set_pc_im(s->pc - 4);
1734 gen_helper_pre_hvc(cpu_env);
1735 gen_ss_advance(s);
1736 gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
1737 break;
1738 case 3: /* SMC */
1739 if (s->current_el == 0) {
1740 unallocated_encoding(s);
1741 break;
1743 gen_a64_set_pc_im(s->pc - 4);
1744 tmp = tcg_const_i32(syn_aa64_smc(imm16));
1745 gen_helper_pre_smc(cpu_env, tmp);
1746 tcg_temp_free_i32(tmp);
1747 gen_ss_advance(s);
1748 gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
1749 break;
1750 default:
1751 unallocated_encoding(s);
1752 break;
1754 break;
1755 case 1:
1756 if (op2_ll != 0) {
1757 unallocated_encoding(s);
1758 break;
1760 /* BRK */
1761 gen_exception_insn(s, 4, EXCP_BKPT, syn_aa64_bkpt(imm16),
1762 default_exception_el(s));
1763 break;
1764 case 2:
1765 if (op2_ll != 0) {
1766 unallocated_encoding(s);
1767 break;
1769 /* HLT. This has two purposes.
1770 * Architecturally, it is an external halting debug instruction.
1771 * Since QEMU doesn't implement external debug, we treat this as
1772 * it is required for halting debug disabled: it will UNDEF.
1773 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
1775 if (semihosting_enabled() && imm16 == 0xf000) {
1776 #ifndef CONFIG_USER_ONLY
1777 /* In system mode, don't allow userspace access to semihosting,
1778 * to provide some semblance of security (and for consistency
1779 * with our 32-bit semihosting).
1781 if (s->current_el == 0) {
1782 unsupported_encoding(s, insn);
1783 break;
1785 #endif
1786 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1787 } else {
1788 unsupported_encoding(s, insn);
1790 break;
1791 case 5:
1792 if (op2_ll < 1 || op2_ll > 3) {
1793 unallocated_encoding(s);
1794 break;
1796 /* DCPS1, DCPS2, DCPS3 */
1797 unsupported_encoding(s, insn);
1798 break;
1799 default:
1800 unallocated_encoding(s);
1801 break;
1805 /* Unconditional branch (register)
1806 * 31 25 24 21 20 16 15 10 9 5 4 0
1807 * +---------------+-------+-------+-------+------+-------+
1808 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
1809 * +---------------+-------+-------+-------+------+-------+
1811 static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
1813 unsigned int opc, op2, op3, rn, op4;
1815 opc = extract32(insn, 21, 4);
1816 op2 = extract32(insn, 16, 5);
1817 op3 = extract32(insn, 10, 6);
1818 rn = extract32(insn, 5, 5);
1819 op4 = extract32(insn, 0, 5);
1821 if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
1822 unallocated_encoding(s);
1823 return;
1826 switch (opc) {
1827 case 0: /* BR */
1828 case 1: /* BLR */
1829 case 2: /* RET */
1830 gen_a64_set_pc(s, cpu_reg(s, rn));
1831 /* BLR also needs to load return address */
1832 if (opc == 1) {
1833 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1835 break;
1836 case 4: /* ERET */
1837 if (s->current_el == 0) {
1838 unallocated_encoding(s);
1839 return;
1841 gen_helper_exception_return(cpu_env);
1842 /* Must exit loop to check un-masked IRQs */
1843 s->base.is_jmp = DISAS_EXIT;
1844 return;
1845 case 5: /* DRPS */
1846 if (rn != 0x1f) {
1847 unallocated_encoding(s);
1848 } else {
1849 unsupported_encoding(s, insn);
1851 return;
1852 default:
1853 unallocated_encoding(s);
1854 return;
1857 s->base.is_jmp = DISAS_JUMP;
1860 /* Branches, exception generating and system instructions */
1861 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
1863 switch (extract32(insn, 25, 7)) {
1864 case 0x0a: case 0x0b:
1865 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
1866 disas_uncond_b_imm(s, insn);
1867 break;
1868 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
1869 disas_comp_b_imm(s, insn);
1870 break;
1871 case 0x1b: case 0x5b: /* Test & branch (immediate) */
1872 disas_test_b_imm(s, insn);
1873 break;
1874 case 0x2a: /* Conditional branch (immediate) */
1875 disas_cond_b_imm(s, insn);
1876 break;
1877 case 0x6a: /* Exception generation / System */
1878 if (insn & (1 << 24)) {
1879 disas_system(s, insn);
1880 } else {
1881 disas_exc(s, insn);
1883 break;
1884 case 0x6b: /* Unconditional branch (register) */
1885 disas_uncond_b_reg(s, insn);
1886 break;
1887 default:
1888 unallocated_encoding(s);
1889 break;
1894 * Load/Store exclusive instructions are implemented by remembering
1895 * the value/address loaded, and seeing if these are the same
1896 * when the store is performed. This is not actually the architecturally
1897 * mandated semantics, but it works for typical guest code sequences
1898 * and avoids having to monitor regular stores.
1900 * The store exclusive uses the atomic cmpxchg primitives to avoid
1901 * races in multi-threaded linux-user and when MTTCG softmmu is
1902 * enabled.
1904 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
1905 TCGv_i64 addr, int size, bool is_pair)
1907 int idx = get_mem_index(s);
1908 TCGMemOp memop = s->be_data;
1910 g_assert(size <= 3);
1911 if (is_pair) {
1912 g_assert(size >= 2);
1913 if (size == 2) {
1914 /* The pair must be single-copy atomic for the doubleword. */
1915 memop |= MO_64 | MO_ALIGN;
1916 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
1917 if (s->be_data == MO_LE) {
1918 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
1919 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
1920 } else {
1921 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
1922 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
1924 } else {
1925 /* The pair must be single-copy atomic for *each* doubleword, not
1926 the entire quadword, however it must be quadword aligned. */
1927 memop |= MO_64;
1928 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
1929 memop | MO_ALIGN_16);
1931 TCGv_i64 addr2 = tcg_temp_new_i64();
1932 tcg_gen_addi_i64(addr2, addr, 8);
1933 tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
1934 tcg_temp_free_i64(addr2);
1936 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
1937 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
1939 } else {
1940 memop |= size | MO_ALIGN;
1941 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
1942 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
1944 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
1947 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
1948 TCGv_i64 addr, int size, int is_pair)
1950 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
1951 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
1952 * [addr] = {Rt};
1953 * if (is_pair) {
1954 * [addr + datasize] = {Rt2};
1956 * {Rd} = 0;
1957 * } else {
1958 * {Rd} = 1;
1960 * env->exclusive_addr = -1;
1962 TCGLabel *fail_label = gen_new_label();
1963 TCGLabel *done_label = gen_new_label();
1964 TCGv_i64 tmp;
1966 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
1968 tmp = tcg_temp_new_i64();
1969 if (is_pair) {
1970 if (size == 2) {
1971 if (s->be_data == MO_LE) {
1972 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
1973 } else {
1974 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
1976 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
1977 cpu_exclusive_val, tmp,
1978 get_mem_index(s),
1979 MO_64 | MO_ALIGN | s->be_data);
1980 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
1981 } else if (s->be_data == MO_LE) {
1982 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
1983 gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
1984 cpu_exclusive_addr,
1985 cpu_reg(s, rt),
1986 cpu_reg(s, rt2));
1987 } else {
1988 gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
1989 cpu_reg(s, rt), cpu_reg(s, rt2));
1991 } else {
1992 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
1993 gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
1994 cpu_exclusive_addr,
1995 cpu_reg(s, rt),
1996 cpu_reg(s, rt2));
1997 } else {
1998 gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
1999 cpu_reg(s, rt), cpu_reg(s, rt2));
2002 } else {
2003 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2004 cpu_reg(s, rt), get_mem_index(s),
2005 size | MO_ALIGN | s->be_data);
2006 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2008 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2009 tcg_temp_free_i64(tmp);
2010 tcg_gen_br(done_label);
2012 gen_set_label(fail_label);
2013 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2014 gen_set_label(done_label);
2015 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2018 /* Update the Sixty-Four bit (SF) registersize. This logic is derived
2019 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2021 static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2023 int opc0 = extract32(opc, 0, 1);
2024 int regsize;
2026 if (is_signed) {
2027 regsize = opc0 ? 32 : 64;
2028 } else {
2029 regsize = size == 3 ? 64 : 32;
2031 return regsize == 64;
2034 /* Load/store exclusive
2036 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
2037 * +-----+-------------+----+---+----+------+----+-------+------+------+
2038 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
2039 * +-----+-------------+----+---+----+------+----+-------+------+------+
2041 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2042 * L: 0 -> store, 1 -> load
2043 * o2: 0 -> exclusive, 1 -> not
2044 * o1: 0 -> single register, 1 -> register pair
2045 * o0: 1 -> load-acquire/store-release, 0 -> not
2047 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2049 int rt = extract32(insn, 0, 5);
2050 int rn = extract32(insn, 5, 5);
2051 int rt2 = extract32(insn, 10, 5);
2052 int is_lasr = extract32(insn, 15, 1);
2053 int rs = extract32(insn, 16, 5);
2054 int is_pair = extract32(insn, 21, 1);
2055 int is_store = !extract32(insn, 22, 1);
2056 int is_excl = !extract32(insn, 23, 1);
2057 int size = extract32(insn, 30, 2);
2058 TCGv_i64 tcg_addr;
2060 if ((!is_excl && !is_pair && !is_lasr) ||
2061 (!is_excl && is_pair) ||
2062 (is_pair && size < 2)) {
2063 unallocated_encoding(s);
2064 return;
2067 if (rn == 31) {
2068 gen_check_sp_alignment(s);
2070 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2072 /* Note that since TCG is single threaded load-acquire/store-release
2073 * semantics require no extra if (is_lasr) { ... } handling.
2076 if (is_excl) {
2077 if (!is_store) {
2078 s->is_ldex = true;
2079 gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair);
2080 if (is_lasr) {
2081 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2083 } else {
2084 if (is_lasr) {
2085 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2087 gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair);
2089 } else {
2090 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2091 bool iss_sf = disas_ldst_compute_iss_sf(size, false, 0);
2093 /* Generate ISS for non-exclusive accesses including LASR. */
2094 if (is_store) {
2095 if (is_lasr) {
2096 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2098 do_gpr_st(s, tcg_rt, tcg_addr, size,
2099 true, rt, iss_sf, is_lasr);
2100 } else {
2101 do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false,
2102 true, rt, iss_sf, is_lasr);
2103 if (is_lasr) {
2104 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2111 * Load register (literal)
2113 * 31 30 29 27 26 25 24 23 5 4 0
2114 * +-----+-------+---+-----+-------------------+-------+
2115 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
2116 * +-----+-------+---+-----+-------------------+-------+
2118 * V: 1 -> vector (simd/fp)
2119 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2120 * 10-> 32 bit signed, 11 -> prefetch
2121 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2123 static void disas_ld_lit(DisasContext *s, uint32_t insn)
2125 int rt = extract32(insn, 0, 5);
2126 int64_t imm = sextract32(insn, 5, 19) << 2;
2127 bool is_vector = extract32(insn, 26, 1);
2128 int opc = extract32(insn, 30, 2);
2129 bool is_signed = false;
2130 int size = 2;
2131 TCGv_i64 tcg_rt, tcg_addr;
2133 if (is_vector) {
2134 if (opc == 3) {
2135 unallocated_encoding(s);
2136 return;
2138 size = 2 + opc;
2139 if (!fp_access_check(s)) {
2140 return;
2142 } else {
2143 if (opc == 3) {
2144 /* PRFM (literal) : prefetch */
2145 return;
2147 size = 2 + extract32(opc, 0, 1);
2148 is_signed = extract32(opc, 1, 1);
2151 tcg_rt = cpu_reg(s, rt);
2153 tcg_addr = tcg_const_i64((s->pc - 4) + imm);
2154 if (is_vector) {
2155 do_fp_ld(s, rt, tcg_addr, size);
2156 } else {
2157 /* Only unsigned 32bit loads target 32bit registers. */
2158 bool iss_sf = opc != 0;
2160 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
2161 true, rt, iss_sf, false);
2163 tcg_temp_free_i64(tcg_addr);
2167 * LDNP (Load Pair - non-temporal hint)
2168 * LDP (Load Pair - non vector)
2169 * LDPSW (Load Pair Signed Word - non vector)
2170 * STNP (Store Pair - non-temporal hint)
2171 * STP (Store Pair - non vector)
2172 * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2173 * LDP (Load Pair of SIMD&FP)
2174 * STNP (Store Pair of SIMD&FP - non-temporal hint)
2175 * STP (Store Pair of SIMD&FP)
2177 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
2178 * +-----+-------+---+---+-------+---+-----------------------------+
2179 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
2180 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2182 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
2183 * LDPSW 01
2184 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2185 * V: 0 -> GPR, 1 -> Vector
2186 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2187 * 10 -> signed offset, 11 -> pre-index
2188 * L: 0 -> Store 1 -> Load
2190 * Rt, Rt2 = GPR or SIMD registers to be stored
2191 * Rn = general purpose register containing address
2192 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2194 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2196 int rt = extract32(insn, 0, 5);
2197 int rn = extract32(insn, 5, 5);
2198 int rt2 = extract32(insn, 10, 5);
2199 uint64_t offset = sextract64(insn, 15, 7);
2200 int index = extract32(insn, 23, 2);
2201 bool is_vector = extract32(insn, 26, 1);
2202 bool is_load = extract32(insn, 22, 1);
2203 int opc = extract32(insn, 30, 2);
2205 bool is_signed = false;
2206 bool postindex = false;
2207 bool wback = false;
2209 TCGv_i64 tcg_addr; /* calculated address */
2210 int size;
2212 if (opc == 3) {
2213 unallocated_encoding(s);
2214 return;
2217 if (is_vector) {
2218 size = 2 + opc;
2219 } else {
2220 size = 2 + extract32(opc, 1, 1);
2221 is_signed = extract32(opc, 0, 1);
2222 if (!is_load && is_signed) {
2223 unallocated_encoding(s);
2224 return;
2228 switch (index) {
2229 case 1: /* post-index */
2230 postindex = true;
2231 wback = true;
2232 break;
2233 case 0:
2234 /* signed offset with "non-temporal" hint. Since we don't emulate
2235 * caches we don't care about hints to the cache system about
2236 * data access patterns, and handle this identically to plain
2237 * signed offset.
2239 if (is_signed) {
2240 /* There is no non-temporal-hint version of LDPSW */
2241 unallocated_encoding(s);
2242 return;
2244 postindex = false;
2245 break;
2246 case 2: /* signed offset, rn not updated */
2247 postindex = false;
2248 break;
2249 case 3: /* pre-index */
2250 postindex = false;
2251 wback = true;
2252 break;
2255 if (is_vector && !fp_access_check(s)) {
2256 return;
2259 offset <<= size;
2261 if (rn == 31) {
2262 gen_check_sp_alignment(s);
2265 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2267 if (!postindex) {
2268 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2271 if (is_vector) {
2272 if (is_load) {
2273 do_fp_ld(s, rt, tcg_addr, size);
2274 } else {
2275 do_fp_st(s, rt, tcg_addr, size);
2277 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2278 if (is_load) {
2279 do_fp_ld(s, rt2, tcg_addr, size);
2280 } else {
2281 do_fp_st(s, rt2, tcg_addr, size);
2283 } else {
2284 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2285 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2287 if (is_load) {
2288 TCGv_i64 tmp = tcg_temp_new_i64();
2290 /* Do not modify tcg_rt before recognizing any exception
2291 * from the second load.
2293 do_gpr_ld(s, tmp, tcg_addr, size, is_signed, false,
2294 false, 0, false, false);
2295 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2296 do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
2297 false, 0, false, false);
2299 tcg_gen_mov_i64(tcg_rt, tmp);
2300 tcg_temp_free_i64(tmp);
2301 } else {
2302 do_gpr_st(s, tcg_rt, tcg_addr, size,
2303 false, 0, false, false);
2304 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2305 do_gpr_st(s, tcg_rt2, tcg_addr, size,
2306 false, 0, false, false);
2310 if (wback) {
2311 if (postindex) {
2312 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
2313 } else {
2314 tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
2316 tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
2321 * Load/store (immediate post-indexed)
2322 * Load/store (immediate pre-indexed)
2323 * Load/store (unscaled immediate)
2325 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
2326 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2327 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
2328 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2330 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
2331 10 -> unprivileged
2332 * V = 0 -> non-vector
2333 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
2334 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2336 static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
2337 int opc,
2338 int size,
2339 int rt,
2340 bool is_vector)
2342 int rn = extract32(insn, 5, 5);
2343 int imm9 = sextract32(insn, 12, 9);
2344 int idx = extract32(insn, 10, 2);
2345 bool is_signed = false;
2346 bool is_store = false;
2347 bool is_extended = false;
2348 bool is_unpriv = (idx == 2);
2349 bool iss_valid = !is_vector;
2350 bool post_index;
2351 bool writeback;
2353 TCGv_i64 tcg_addr;
2355 if (is_vector) {
2356 size |= (opc & 2) << 1;
2357 if (size > 4 || is_unpriv) {
2358 unallocated_encoding(s);
2359 return;
2361 is_store = ((opc & 1) == 0);
2362 if (!fp_access_check(s)) {
2363 return;
2365 } else {
2366 if (size == 3 && opc == 2) {
2367 /* PRFM - prefetch */
2368 if (is_unpriv) {
2369 unallocated_encoding(s);
2370 return;
2372 return;
2374 if (opc == 3 && size > 1) {
2375 unallocated_encoding(s);
2376 return;
2378 is_store = (opc == 0);
2379 is_signed = extract32(opc, 1, 1);
2380 is_extended = (size < 3) && extract32(opc, 0, 1);
2383 switch (idx) {
2384 case 0:
2385 case 2:
2386 post_index = false;
2387 writeback = false;
2388 break;
2389 case 1:
2390 post_index = true;
2391 writeback = true;
2392 break;
2393 case 3:
2394 post_index = false;
2395 writeback = true;
2396 break;
2397 default:
2398 g_assert_not_reached();
2401 if (rn == 31) {
2402 gen_check_sp_alignment(s);
2404 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2406 if (!post_index) {
2407 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2410 if (is_vector) {
2411 if (is_store) {
2412 do_fp_st(s, rt, tcg_addr, size);
2413 } else {
2414 do_fp_ld(s, rt, tcg_addr, size);
2416 } else {
2417 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2418 int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
2419 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2421 if (is_store) {
2422 do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx,
2423 iss_valid, rt, iss_sf, false);
2424 } else {
2425 do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
2426 is_signed, is_extended, memidx,
2427 iss_valid, rt, iss_sf, false);
2431 if (writeback) {
2432 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2433 if (post_index) {
2434 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2436 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2441 * Load/store (register offset)
2443 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2444 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2445 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
2446 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2448 * For non-vector:
2449 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2450 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2451 * For vector:
2452 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2453 * opc<0>: 0 -> store, 1 -> load
2454 * V: 1 -> vector/simd
2455 * opt: extend encoding (see DecodeRegExtend)
2456 * S: if S=1 then scale (essentially index by sizeof(size))
2457 * Rt: register to transfer into/out of
2458 * Rn: address register or SP for base
2459 * Rm: offset register or ZR for offset
2461 static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
2462 int opc,
2463 int size,
2464 int rt,
2465 bool is_vector)
2467 int rn = extract32(insn, 5, 5);
2468 int shift = extract32(insn, 12, 1);
2469 int rm = extract32(insn, 16, 5);
2470 int opt = extract32(insn, 13, 3);
2471 bool is_signed = false;
2472 bool is_store = false;
2473 bool is_extended = false;
2475 TCGv_i64 tcg_rm;
2476 TCGv_i64 tcg_addr;
2478 if (extract32(opt, 1, 1) == 0) {
2479 unallocated_encoding(s);
2480 return;
2483 if (is_vector) {
2484 size |= (opc & 2) << 1;
2485 if (size > 4) {
2486 unallocated_encoding(s);
2487 return;
2489 is_store = !extract32(opc, 0, 1);
2490 if (!fp_access_check(s)) {
2491 return;
2493 } else {
2494 if (size == 3 && opc == 2) {
2495 /* PRFM - prefetch */
2496 return;
2498 if (opc == 3 && size > 1) {
2499 unallocated_encoding(s);
2500 return;
2502 is_store = (opc == 0);
2503 is_signed = extract32(opc, 1, 1);
2504 is_extended = (size < 3) && extract32(opc, 0, 1);
2507 if (rn == 31) {
2508 gen_check_sp_alignment(s);
2510 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2512 tcg_rm = read_cpu_reg(s, rm, 1);
2513 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
2515 tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
2517 if (is_vector) {
2518 if (is_store) {
2519 do_fp_st(s, rt, tcg_addr, size);
2520 } else {
2521 do_fp_ld(s, rt, tcg_addr, size);
2523 } else {
2524 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2525 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2526 if (is_store) {
2527 do_gpr_st(s, tcg_rt, tcg_addr, size,
2528 true, rt, iss_sf, false);
2529 } else {
2530 do_gpr_ld(s, tcg_rt, tcg_addr, size,
2531 is_signed, is_extended,
2532 true, rt, iss_sf, false);
2538 * Load/store (unsigned immediate)
2540 * 31 30 29 27 26 25 24 23 22 21 10 9 5
2541 * +----+-------+---+-----+-----+------------+-------+------+
2542 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
2543 * +----+-------+---+-----+-----+------------+-------+------+
2545 * For non-vector:
2546 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2547 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2548 * For vector:
2549 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2550 * opc<0>: 0 -> store, 1 -> load
2551 * Rn: base address register (inc SP)
2552 * Rt: target register
2554 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
2555 int opc,
2556 int size,
2557 int rt,
2558 bool is_vector)
2560 int rn = extract32(insn, 5, 5);
2561 unsigned int imm12 = extract32(insn, 10, 12);
2562 unsigned int offset;
2564 TCGv_i64 tcg_addr;
2566 bool is_store;
2567 bool is_signed = false;
2568 bool is_extended = false;
2570 if (is_vector) {
2571 size |= (opc & 2) << 1;
2572 if (size > 4) {
2573 unallocated_encoding(s);
2574 return;
2576 is_store = !extract32(opc, 0, 1);
2577 if (!fp_access_check(s)) {
2578 return;
2580 } else {
2581 if (size == 3 && opc == 2) {
2582 /* PRFM - prefetch */
2583 return;
2585 if (opc == 3 && size > 1) {
2586 unallocated_encoding(s);
2587 return;
2589 is_store = (opc == 0);
2590 is_signed = extract32(opc, 1, 1);
2591 is_extended = (size < 3) && extract32(opc, 0, 1);
2594 if (rn == 31) {
2595 gen_check_sp_alignment(s);
2597 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2598 offset = imm12 << size;
2599 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2601 if (is_vector) {
2602 if (is_store) {
2603 do_fp_st(s, rt, tcg_addr, size);
2604 } else {
2605 do_fp_ld(s, rt, tcg_addr, size);
2607 } else {
2608 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2609 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2610 if (is_store) {
2611 do_gpr_st(s, tcg_rt, tcg_addr, size,
2612 true, rt, iss_sf, false);
2613 } else {
2614 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended,
2615 true, rt, iss_sf, false);
2620 /* Load/store register (all forms) */
2621 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
2623 int rt = extract32(insn, 0, 5);
2624 int opc = extract32(insn, 22, 2);
2625 bool is_vector = extract32(insn, 26, 1);
2626 int size = extract32(insn, 30, 2);
2628 switch (extract32(insn, 24, 2)) {
2629 case 0:
2630 if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
2631 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
2632 } else {
2633 /* Load/store register (unscaled immediate)
2634 * Load/store immediate pre/post-indexed
2635 * Load/store register unprivileged
2637 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
2639 break;
2640 case 1:
2641 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
2642 break;
2643 default:
2644 unallocated_encoding(s);
2645 break;
2649 /* AdvSIMD load/store multiple structures
2651 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
2652 * +---+---+---------------+---+-------------+--------+------+------+------+
2653 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
2654 * +---+---+---------------+---+-------------+--------+------+------+------+
2656 * AdvSIMD load/store multiple structures (post-indexed)
2658 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
2659 * +---+---+---------------+---+---+---------+--------+------+------+------+
2660 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
2661 * +---+---+---------------+---+---+---------+--------+------+------+------+
2663 * Rt: first (or only) SIMD&FP register to be transferred
2664 * Rn: base address or SP
2665 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2667 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
2669 int rt = extract32(insn, 0, 5);
2670 int rn = extract32(insn, 5, 5);
2671 int size = extract32(insn, 10, 2);
2672 int opcode = extract32(insn, 12, 4);
2673 bool is_store = !extract32(insn, 22, 1);
2674 bool is_postidx = extract32(insn, 23, 1);
2675 bool is_q = extract32(insn, 30, 1);
2676 TCGv_i64 tcg_addr, tcg_rn;
2678 int ebytes = 1 << size;
2679 int elements = (is_q ? 128 : 64) / (8 << size);
2680 int rpt; /* num iterations */
2681 int selem; /* structure elements */
2682 int r;
2684 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
2685 unallocated_encoding(s);
2686 return;
2689 /* From the shared decode logic */
2690 switch (opcode) {
2691 case 0x0:
2692 rpt = 1;
2693 selem = 4;
2694 break;
2695 case 0x2:
2696 rpt = 4;
2697 selem = 1;
2698 break;
2699 case 0x4:
2700 rpt = 1;
2701 selem = 3;
2702 break;
2703 case 0x6:
2704 rpt = 3;
2705 selem = 1;
2706 break;
2707 case 0x7:
2708 rpt = 1;
2709 selem = 1;
2710 break;
2711 case 0x8:
2712 rpt = 1;
2713 selem = 2;
2714 break;
2715 case 0xa:
2716 rpt = 2;
2717 selem = 1;
2718 break;
2719 default:
2720 unallocated_encoding(s);
2721 return;
2724 if (size == 3 && !is_q && selem != 1) {
2725 /* reserved */
2726 unallocated_encoding(s);
2727 return;
2730 if (!fp_access_check(s)) {
2731 return;
2734 if (rn == 31) {
2735 gen_check_sp_alignment(s);
2738 tcg_rn = cpu_reg_sp(s, rn);
2739 tcg_addr = tcg_temp_new_i64();
2740 tcg_gen_mov_i64(tcg_addr, tcg_rn);
2742 for (r = 0; r < rpt; r++) {
2743 int e;
2744 for (e = 0; e < elements; e++) {
2745 int tt = (rt + r) % 32;
2746 int xs;
2747 for (xs = 0; xs < selem; xs++) {
2748 if (is_store) {
2749 do_vec_st(s, tt, e, tcg_addr, size);
2750 } else {
2751 do_vec_ld(s, tt, e, tcg_addr, size);
2753 /* For non-quad operations, setting a slice of the low
2754 * 64 bits of the register clears the high 64 bits (in
2755 * the ARM ARM pseudocode this is implicit in the fact
2756 * that 'rval' is a 64 bit wide variable). We optimize
2757 * by noticing that we only need to do this the first
2758 * time we touch a register.
2760 if (!is_q && e == 0 && (r == 0 || xs == selem - 1)) {
2761 clear_vec_high(s, tt);
2764 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
2765 tt = (tt + 1) % 32;
2770 if (is_postidx) {
2771 int rm = extract32(insn, 16, 5);
2772 if (rm == 31) {
2773 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2774 } else {
2775 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
2778 tcg_temp_free_i64(tcg_addr);
2781 /* AdvSIMD load/store single structure
2783 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2784 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2785 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
2786 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2788 * AdvSIMD load/store single structure (post-indexed)
2790 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2791 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2792 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
2793 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2795 * Rt: first (or only) SIMD&FP register to be transferred
2796 * Rn: base address or SP
2797 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2798 * index = encoded in Q:S:size dependent on size
2800 * lane_size = encoded in R, opc
2801 * transfer width = encoded in opc, S, size
2803 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
2805 int rt = extract32(insn, 0, 5);
2806 int rn = extract32(insn, 5, 5);
2807 int size = extract32(insn, 10, 2);
2808 int S = extract32(insn, 12, 1);
2809 int opc = extract32(insn, 13, 3);
2810 int R = extract32(insn, 21, 1);
2811 int is_load = extract32(insn, 22, 1);
2812 int is_postidx = extract32(insn, 23, 1);
2813 int is_q = extract32(insn, 30, 1);
2815 int scale = extract32(opc, 1, 2);
2816 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
2817 bool replicate = false;
2818 int index = is_q << 3 | S << 2 | size;
2819 int ebytes, xs;
2820 TCGv_i64 tcg_addr, tcg_rn;
2822 switch (scale) {
2823 case 3:
2824 if (!is_load || S) {
2825 unallocated_encoding(s);
2826 return;
2828 scale = size;
2829 replicate = true;
2830 break;
2831 case 0:
2832 break;
2833 case 1:
2834 if (extract32(size, 0, 1)) {
2835 unallocated_encoding(s);
2836 return;
2838 index >>= 1;
2839 break;
2840 case 2:
2841 if (extract32(size, 1, 1)) {
2842 unallocated_encoding(s);
2843 return;
2845 if (!extract32(size, 0, 1)) {
2846 index >>= 2;
2847 } else {
2848 if (S) {
2849 unallocated_encoding(s);
2850 return;
2852 index >>= 3;
2853 scale = 3;
2855 break;
2856 default:
2857 g_assert_not_reached();
2860 if (!fp_access_check(s)) {
2861 return;
2864 ebytes = 1 << scale;
2866 if (rn == 31) {
2867 gen_check_sp_alignment(s);
2870 tcg_rn = cpu_reg_sp(s, rn);
2871 tcg_addr = tcg_temp_new_i64();
2872 tcg_gen_mov_i64(tcg_addr, tcg_rn);
2874 for (xs = 0; xs < selem; xs++) {
2875 if (replicate) {
2876 /* Load and replicate to all elements */
2877 uint64_t mulconst;
2878 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
2880 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
2881 get_mem_index(s), s->be_data + scale);
2882 switch (scale) {
2883 case 0:
2884 mulconst = 0x0101010101010101ULL;
2885 break;
2886 case 1:
2887 mulconst = 0x0001000100010001ULL;
2888 break;
2889 case 2:
2890 mulconst = 0x0000000100000001ULL;
2891 break;
2892 case 3:
2893 mulconst = 0;
2894 break;
2895 default:
2896 g_assert_not_reached();
2898 if (mulconst) {
2899 tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
2901 write_vec_element(s, tcg_tmp, rt, 0, MO_64);
2902 if (is_q) {
2903 write_vec_element(s, tcg_tmp, rt, 1, MO_64);
2904 } else {
2905 clear_vec_high(s, rt);
2907 tcg_temp_free_i64(tcg_tmp);
2908 } else {
2909 /* Load/store one element per register */
2910 if (is_load) {
2911 do_vec_ld(s, rt, index, tcg_addr, scale);
2912 } else {
2913 do_vec_st(s, rt, index, tcg_addr, scale);
2916 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
2917 rt = (rt + 1) % 32;
2920 if (is_postidx) {
2921 int rm = extract32(insn, 16, 5);
2922 if (rm == 31) {
2923 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2924 } else {
2925 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
2928 tcg_temp_free_i64(tcg_addr);
2931 /* Loads and stores */
2932 static void disas_ldst(DisasContext *s, uint32_t insn)
2934 switch (extract32(insn, 24, 6)) {
2935 case 0x08: /* Load/store exclusive */
2936 disas_ldst_excl(s, insn);
2937 break;
2938 case 0x18: case 0x1c: /* Load register (literal) */
2939 disas_ld_lit(s, insn);
2940 break;
2941 case 0x28: case 0x29:
2942 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
2943 disas_ldst_pair(s, insn);
2944 break;
2945 case 0x38: case 0x39:
2946 case 0x3c: case 0x3d: /* Load/store register (all forms) */
2947 disas_ldst_reg(s, insn);
2948 break;
2949 case 0x0c: /* AdvSIMD load/store multiple structures */
2950 disas_ldst_multiple_struct(s, insn);
2951 break;
2952 case 0x0d: /* AdvSIMD load/store single structure */
2953 disas_ldst_single_struct(s, insn);
2954 break;
2955 default:
2956 unallocated_encoding(s);
2957 break;
2961 /* PC-rel. addressing
2962 * 31 30 29 28 24 23 5 4 0
2963 * +----+-------+-----------+-------------------+------+
2964 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
2965 * +----+-------+-----------+-------------------+------+
2967 static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
2969 unsigned int page, rd;
2970 uint64_t base;
2971 uint64_t offset;
2973 page = extract32(insn, 31, 1);
2974 /* SignExtend(immhi:immlo) -> offset */
2975 offset = sextract64(insn, 5, 19);
2976 offset = offset << 2 | extract32(insn, 29, 2);
2977 rd = extract32(insn, 0, 5);
2978 base = s->pc - 4;
2980 if (page) {
2981 /* ADRP (page based) */
2982 base &= ~0xfff;
2983 offset <<= 12;
2986 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
2990 * Add/subtract (immediate)
2992 * 31 30 29 28 24 23 22 21 10 9 5 4 0
2993 * +--+--+--+-----------+-----+-------------+-----+-----+
2994 * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
2995 * +--+--+--+-----------+-----+-------------+-----+-----+
2997 * sf: 0 -> 32bit, 1 -> 64bit
2998 * op: 0 -> add , 1 -> sub
2999 * S: 1 -> set flags
3000 * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
3002 static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
3004 int rd = extract32(insn, 0, 5);
3005 int rn = extract32(insn, 5, 5);
3006 uint64_t imm = extract32(insn, 10, 12);
3007 int shift = extract32(insn, 22, 2);
3008 bool setflags = extract32(insn, 29, 1);
3009 bool sub_op = extract32(insn, 30, 1);
3010 bool is_64bit = extract32(insn, 31, 1);
3012 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3013 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
3014 TCGv_i64 tcg_result;
3016 switch (shift) {
3017 case 0x0:
3018 break;
3019 case 0x1:
3020 imm <<= 12;
3021 break;
3022 default:
3023 unallocated_encoding(s);
3024 return;
3027 tcg_result = tcg_temp_new_i64();
3028 if (!setflags) {
3029 if (sub_op) {
3030 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
3031 } else {
3032 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
3034 } else {
3035 TCGv_i64 tcg_imm = tcg_const_i64(imm);
3036 if (sub_op) {
3037 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3038 } else {
3039 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3041 tcg_temp_free_i64(tcg_imm);
3044 if (is_64bit) {
3045 tcg_gen_mov_i64(tcg_rd, tcg_result);
3046 } else {
3047 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3050 tcg_temp_free_i64(tcg_result);
3053 /* The input should be a value in the bottom e bits (with higher
3054 * bits zero); returns that value replicated into every element
3055 * of size e in a 64 bit integer.
3057 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
3059 assert(e != 0);
3060 while (e < 64) {
3061 mask |= mask << e;
3062 e *= 2;
3064 return mask;
3067 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
3068 static inline uint64_t bitmask64(unsigned int length)
3070 assert(length > 0 && length <= 64);
3071 return ~0ULL >> (64 - length);
3074 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
3075 * only require the wmask. Returns false if the imms/immr/immn are a reserved
3076 * value (ie should cause a guest UNDEF exception), and true if they are
3077 * valid, in which case the decoded bit pattern is written to result.
3079 static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
3080 unsigned int imms, unsigned int immr)
3082 uint64_t mask;
3083 unsigned e, levels, s, r;
3084 int len;
3086 assert(immn < 2 && imms < 64 && immr < 64);
3088 /* The bit patterns we create here are 64 bit patterns which
3089 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
3090 * 64 bits each. Each element contains the same value: a run
3091 * of between 1 and e-1 non-zero bits, rotated within the
3092 * element by between 0 and e-1 bits.
3094 * The element size and run length are encoded into immn (1 bit)
3095 * and imms (6 bits) as follows:
3096 * 64 bit elements: immn = 1, imms = <length of run - 1>
3097 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
3098 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
3099 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
3100 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
3101 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
3102 * Notice that immn = 0, imms = 11111x is the only combination
3103 * not covered by one of the above options; this is reserved.
3104 * Further, <length of run - 1> all-ones is a reserved pattern.
3106 * In all cases the rotation is by immr % e (and immr is 6 bits).
3109 /* First determine the element size */
3110 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
3111 if (len < 1) {
3112 /* This is the immn == 0, imms == 0x11111x case */
3113 return false;
3115 e = 1 << len;
3117 levels = e - 1;
3118 s = imms & levels;
3119 r = immr & levels;
3121 if (s == levels) {
3122 /* <length of run - 1> mustn't be all-ones. */
3123 return false;
3126 /* Create the value of one element: s+1 set bits rotated
3127 * by r within the element (which is e bits wide)...
3129 mask = bitmask64(s + 1);
3130 if (r) {
3131 mask = (mask >> r) | (mask << (e - r));
3132 mask &= bitmask64(e);
3134 /* ...then replicate the element over the whole 64 bit value */
3135 mask = bitfield_replicate(mask, e);
3136 *result = mask;
3137 return true;
3140 /* Logical (immediate)
3141 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
3142 * +----+-----+-------------+---+------+------+------+------+
3143 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
3144 * +----+-----+-------------+---+------+------+------+------+
3146 static void disas_logic_imm(DisasContext *s, uint32_t insn)
3148 unsigned int sf, opc, is_n, immr, imms, rn, rd;
3149 TCGv_i64 tcg_rd, tcg_rn;
3150 uint64_t wmask;
3151 bool is_and = false;
3153 sf = extract32(insn, 31, 1);
3154 opc = extract32(insn, 29, 2);
3155 is_n = extract32(insn, 22, 1);
3156 immr = extract32(insn, 16, 6);
3157 imms = extract32(insn, 10, 6);
3158 rn = extract32(insn, 5, 5);
3159 rd = extract32(insn, 0, 5);
3161 if (!sf && is_n) {
3162 unallocated_encoding(s);
3163 return;
3166 if (opc == 0x3) { /* ANDS */
3167 tcg_rd = cpu_reg(s, rd);
3168 } else {
3169 tcg_rd = cpu_reg_sp(s, rd);
3171 tcg_rn = cpu_reg(s, rn);
3173 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
3174 /* some immediate field values are reserved */
3175 unallocated_encoding(s);
3176 return;
3179 if (!sf) {
3180 wmask &= 0xffffffff;
3183 switch (opc) {
3184 case 0x3: /* ANDS */
3185 case 0x0: /* AND */
3186 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
3187 is_and = true;
3188 break;
3189 case 0x1: /* ORR */
3190 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
3191 break;
3192 case 0x2: /* EOR */
3193 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
3194 break;
3195 default:
3196 assert(FALSE); /* must handle all above */
3197 break;
3200 if (!sf && !is_and) {
3201 /* zero extend final result; we know we can skip this for AND
3202 * since the immediate had the high 32 bits clear.
3204 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3207 if (opc == 3) { /* ANDS */
3208 gen_logic_CC(sf, tcg_rd);
3213 * Move wide (immediate)
3215 * 31 30 29 28 23 22 21 20 5 4 0
3216 * +--+-----+-------------+-----+----------------+------+
3217 * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
3218 * +--+-----+-------------+-----+----------------+------+
3220 * sf: 0 -> 32 bit, 1 -> 64 bit
3221 * opc: 00 -> N, 10 -> Z, 11 -> K
3222 * hw: shift/16 (0,16, and sf only 32, 48)
3224 static void disas_movw_imm(DisasContext *s, uint32_t insn)
3226 int rd = extract32(insn, 0, 5);
3227 uint64_t imm = extract32(insn, 5, 16);
3228 int sf = extract32(insn, 31, 1);
3229 int opc = extract32(insn, 29, 2);
3230 int pos = extract32(insn, 21, 2) << 4;
3231 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3232 TCGv_i64 tcg_imm;
3234 if (!sf && (pos >= 32)) {
3235 unallocated_encoding(s);
3236 return;
3239 switch (opc) {
3240 case 0: /* MOVN */
3241 case 2: /* MOVZ */
3242 imm <<= pos;
3243 if (opc == 0) {
3244 imm = ~imm;
3246 if (!sf) {
3247 imm &= 0xffffffffu;
3249 tcg_gen_movi_i64(tcg_rd, imm);
3250 break;
3251 case 3: /* MOVK */
3252 tcg_imm = tcg_const_i64(imm);
3253 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
3254 tcg_temp_free_i64(tcg_imm);
3255 if (!sf) {
3256 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3258 break;
3259 default:
3260 unallocated_encoding(s);
3261 break;
3265 /* Bitfield
3266 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
3267 * +----+-----+-------------+---+------+------+------+------+
3268 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
3269 * +----+-----+-------------+---+------+------+------+------+
3271 static void disas_bitfield(DisasContext *s, uint32_t insn)
3273 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
3274 TCGv_i64 tcg_rd, tcg_tmp;
3276 sf = extract32(insn, 31, 1);
3277 opc = extract32(insn, 29, 2);
3278 n = extract32(insn, 22, 1);
3279 ri = extract32(insn, 16, 6);
3280 si = extract32(insn, 10, 6);
3281 rn = extract32(insn, 5, 5);
3282 rd = extract32(insn, 0, 5);
3283 bitsize = sf ? 64 : 32;
3285 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
3286 unallocated_encoding(s);
3287 return;
3290 tcg_rd = cpu_reg(s, rd);
3292 /* Suppress the zero-extend for !sf. Since RI and SI are constrained
3293 to be smaller than bitsize, we'll never reference data outside the
3294 low 32-bits anyway. */
3295 tcg_tmp = read_cpu_reg(s, rn, 1);
3297 /* Recognize simple(r) extractions. */
3298 if (si >= ri) {
3299 /* Wd<s-r:0> = Wn<s:r> */
3300 len = (si - ri) + 1;
3301 if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
3302 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
3303 goto done;
3304 } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
3305 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
3306 return;
3308 /* opc == 1, BXFIL fall through to deposit */
3309 tcg_gen_extract_i64(tcg_tmp, tcg_tmp, ri, len);
3310 pos = 0;
3311 } else {
3312 /* Handle the ri > si case with a deposit
3313 * Wd<32+s-r,32-r> = Wn<s:0>
3315 len = si + 1;
3316 pos = (bitsize - ri) & (bitsize - 1);
3319 if (opc == 0 && len < ri) {
3320 /* SBFM: sign extend the destination field from len to fill
3321 the balance of the word. Let the deposit below insert all
3322 of those sign bits. */
3323 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
3324 len = ri;
3327 if (opc == 1) { /* BFM, BXFIL */
3328 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
3329 } else {
3330 /* SBFM or UBFM: We start with zero, and we haven't modified
3331 any bits outside bitsize, therefore the zero-extension
3332 below is unneeded. */
3333 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
3334 return;
3337 done:
3338 if (!sf) { /* zero extend final result */
3339 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3343 /* Extract
3344 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
3345 * +----+------+-------------+---+----+------+--------+------+------+
3346 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
3347 * +----+------+-------------+---+----+------+--------+------+------+
3349 static void disas_extract(DisasContext *s, uint32_t insn)
3351 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
3353 sf = extract32(insn, 31, 1);
3354 n = extract32(insn, 22, 1);
3355 rm = extract32(insn, 16, 5);
3356 imm = extract32(insn, 10, 6);
3357 rn = extract32(insn, 5, 5);
3358 rd = extract32(insn, 0, 5);
3359 op21 = extract32(insn, 29, 2);
3360 op0 = extract32(insn, 21, 1);
3361 bitsize = sf ? 64 : 32;
3363 if (sf != n || op21 || op0 || imm >= bitsize) {
3364 unallocated_encoding(s);
3365 } else {
3366 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
3368 tcg_rd = cpu_reg(s, rd);
3370 if (unlikely(imm == 0)) {
3371 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
3372 * so an extract from bit 0 is a special case.
3374 if (sf) {
3375 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
3376 } else {
3377 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
3379 } else if (rm == rn) { /* ROR */
3380 tcg_rm = cpu_reg(s, rm);
3381 if (sf) {
3382 tcg_gen_rotri_i64(tcg_rd, tcg_rm, imm);
3383 } else {
3384 TCGv_i32 tmp = tcg_temp_new_i32();
3385 tcg_gen_extrl_i64_i32(tmp, tcg_rm);
3386 tcg_gen_rotri_i32(tmp, tmp, imm);
3387 tcg_gen_extu_i32_i64(tcg_rd, tmp);
3388 tcg_temp_free_i32(tmp);
3390 } else {
3391 tcg_rm = read_cpu_reg(s, rm, sf);
3392 tcg_rn = read_cpu_reg(s, rn, sf);
3393 tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
3394 tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
3395 tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
3396 if (!sf) {
3397 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3403 /* Data processing - immediate */
3404 static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
3406 switch (extract32(insn, 23, 6)) {
3407 case 0x20: case 0x21: /* PC-rel. addressing */
3408 disas_pc_rel_adr(s, insn);
3409 break;
3410 case 0x22: case 0x23: /* Add/subtract (immediate) */
3411 disas_add_sub_imm(s, insn);
3412 break;
3413 case 0x24: /* Logical (immediate) */
3414 disas_logic_imm(s, insn);
3415 break;
3416 case 0x25: /* Move wide (immediate) */
3417 disas_movw_imm(s, insn);
3418 break;
3419 case 0x26: /* Bitfield */
3420 disas_bitfield(s, insn);
3421 break;
3422 case 0x27: /* Extract */
3423 disas_extract(s, insn);
3424 break;
3425 default:
3426 unallocated_encoding(s);
3427 break;
3431 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
3432 * Note that it is the caller's responsibility to ensure that the
3433 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
3434 * mandated semantics for out of range shifts.
3436 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
3437 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
3439 switch (shift_type) {
3440 case A64_SHIFT_TYPE_LSL:
3441 tcg_gen_shl_i64(dst, src, shift_amount);
3442 break;
3443 case A64_SHIFT_TYPE_LSR:
3444 tcg_gen_shr_i64(dst, src, shift_amount);
3445 break;
3446 case A64_SHIFT_TYPE_ASR:
3447 if (!sf) {
3448 tcg_gen_ext32s_i64(dst, src);
3450 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
3451 break;
3452 case A64_SHIFT_TYPE_ROR:
3453 if (sf) {
3454 tcg_gen_rotr_i64(dst, src, shift_amount);
3455 } else {
3456 TCGv_i32 t0, t1;
3457 t0 = tcg_temp_new_i32();
3458 t1 = tcg_temp_new_i32();
3459 tcg_gen_extrl_i64_i32(t0, src);
3460 tcg_gen_extrl_i64_i32(t1, shift_amount);
3461 tcg_gen_rotr_i32(t0, t0, t1);
3462 tcg_gen_extu_i32_i64(dst, t0);
3463 tcg_temp_free_i32(t0);
3464 tcg_temp_free_i32(t1);
3466 break;
3467 default:
3468 assert(FALSE); /* all shift types should be handled */
3469 break;
3472 if (!sf) { /* zero extend final result */
3473 tcg_gen_ext32u_i64(dst, dst);
3477 /* Shift a TCGv src by immediate, put result in dst.
3478 * The shift amount must be in range (this should always be true as the
3479 * relevant instructions will UNDEF on bad shift immediates).
3481 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
3482 enum a64_shift_type shift_type, unsigned int shift_i)
3484 assert(shift_i < (sf ? 64 : 32));
3486 if (shift_i == 0) {
3487 tcg_gen_mov_i64(dst, src);
3488 } else {
3489 TCGv_i64 shift_const;
3491 shift_const = tcg_const_i64(shift_i);
3492 shift_reg(dst, src, sf, shift_type, shift_const);
3493 tcg_temp_free_i64(shift_const);
3497 /* Logical (shifted register)
3498 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
3499 * +----+-----+-----------+-------+---+------+--------+------+------+
3500 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
3501 * +----+-----+-----------+-------+---+------+--------+------+------+
3503 static void disas_logic_reg(DisasContext *s, uint32_t insn)
3505 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
3506 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
3508 sf = extract32(insn, 31, 1);
3509 opc = extract32(insn, 29, 2);
3510 shift_type = extract32(insn, 22, 2);
3511 invert = extract32(insn, 21, 1);
3512 rm = extract32(insn, 16, 5);
3513 shift_amount = extract32(insn, 10, 6);
3514 rn = extract32(insn, 5, 5);
3515 rd = extract32(insn, 0, 5);
3517 if (!sf && (shift_amount & (1 << 5))) {
3518 unallocated_encoding(s);
3519 return;
3522 tcg_rd = cpu_reg(s, rd);
3524 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
3525 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
3526 * register-register MOV and MVN, so it is worth special casing.
3528 tcg_rm = cpu_reg(s, rm);
3529 if (invert) {
3530 tcg_gen_not_i64(tcg_rd, tcg_rm);
3531 if (!sf) {
3532 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3534 } else {
3535 if (sf) {
3536 tcg_gen_mov_i64(tcg_rd, tcg_rm);
3537 } else {
3538 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
3541 return;
3544 tcg_rm = read_cpu_reg(s, rm, sf);
3546 if (shift_amount) {
3547 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
3550 tcg_rn = cpu_reg(s, rn);
3552 switch (opc | (invert << 2)) {
3553 case 0: /* AND */
3554 case 3: /* ANDS */
3555 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
3556 break;
3557 case 1: /* ORR */
3558 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
3559 break;
3560 case 2: /* EOR */
3561 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
3562 break;
3563 case 4: /* BIC */
3564 case 7: /* BICS */
3565 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
3566 break;
3567 case 5: /* ORN */
3568 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
3569 break;
3570 case 6: /* EON */
3571 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
3572 break;
3573 default:
3574 assert(FALSE);
3575 break;
3578 if (!sf) {
3579 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3582 if (opc == 3) {
3583 gen_logic_CC(sf, tcg_rd);
3588 * Add/subtract (extended register)
3590 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
3591 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3592 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
3593 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3595 * sf: 0 -> 32bit, 1 -> 64bit
3596 * op: 0 -> add , 1 -> sub
3597 * S: 1 -> set flags
3598 * opt: 00
3599 * option: extension type (see DecodeRegExtend)
3600 * imm3: optional shift to Rm
3602 * Rd = Rn + LSL(extend(Rm), amount)
3604 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
3606 int rd = extract32(insn, 0, 5);
3607 int rn = extract32(insn, 5, 5);
3608 int imm3 = extract32(insn, 10, 3);
3609 int option = extract32(insn, 13, 3);
3610 int rm = extract32(insn, 16, 5);
3611 bool setflags = extract32(insn, 29, 1);
3612 bool sub_op = extract32(insn, 30, 1);
3613 bool sf = extract32(insn, 31, 1);
3615 TCGv_i64 tcg_rm, tcg_rn; /* temps */
3616 TCGv_i64 tcg_rd;
3617 TCGv_i64 tcg_result;
3619 if (imm3 > 4) {
3620 unallocated_encoding(s);
3621 return;
3624 /* non-flag setting ops may use SP */
3625 if (!setflags) {
3626 tcg_rd = cpu_reg_sp(s, rd);
3627 } else {
3628 tcg_rd = cpu_reg(s, rd);
3630 tcg_rn = read_cpu_reg_sp(s, rn, sf);
3632 tcg_rm = read_cpu_reg(s, rm, sf);
3633 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
3635 tcg_result = tcg_temp_new_i64();
3637 if (!setflags) {
3638 if (sub_op) {
3639 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3640 } else {
3641 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3643 } else {
3644 if (sub_op) {
3645 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3646 } else {
3647 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3651 if (sf) {
3652 tcg_gen_mov_i64(tcg_rd, tcg_result);
3653 } else {
3654 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3657 tcg_temp_free_i64(tcg_result);
3661 * Add/subtract (shifted register)
3663 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
3664 * +--+--+--+-----------+-----+--+-------+---------+------+------+
3665 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
3666 * +--+--+--+-----------+-----+--+-------+---------+------+------+
3668 * sf: 0 -> 32bit, 1 -> 64bit
3669 * op: 0 -> add , 1 -> sub
3670 * S: 1 -> set flags
3671 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
3672 * imm6: Shift amount to apply to Rm before the add/sub
3674 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
3676 int rd = extract32(insn, 0, 5);
3677 int rn = extract32(insn, 5, 5);
3678 int imm6 = extract32(insn, 10, 6);
3679 int rm = extract32(insn, 16, 5);
3680 int shift_type = extract32(insn, 22, 2);
3681 bool setflags = extract32(insn, 29, 1);
3682 bool sub_op = extract32(insn, 30, 1);
3683 bool sf = extract32(insn, 31, 1);
3685 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3686 TCGv_i64 tcg_rn, tcg_rm;
3687 TCGv_i64 tcg_result;
3689 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
3690 unallocated_encoding(s);
3691 return;
3694 tcg_rn = read_cpu_reg(s, rn, sf);
3695 tcg_rm = read_cpu_reg(s, rm, sf);
3697 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
3699 tcg_result = tcg_temp_new_i64();
3701 if (!setflags) {
3702 if (sub_op) {
3703 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3704 } else {
3705 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3707 } else {
3708 if (sub_op) {
3709 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3710 } else {
3711 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3715 if (sf) {
3716 tcg_gen_mov_i64(tcg_rd, tcg_result);
3717 } else {
3718 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3721 tcg_temp_free_i64(tcg_result);
3724 /* Data-processing (3 source)
3726 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
3727 * +--+------+-----------+------+------+----+------+------+------+
3728 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
3729 * +--+------+-----------+------+------+----+------+------+------+
3731 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
3733 int rd = extract32(insn, 0, 5);
3734 int rn = extract32(insn, 5, 5);
3735 int ra = extract32(insn, 10, 5);
3736 int rm = extract32(insn, 16, 5);
3737 int op_id = (extract32(insn, 29, 3) << 4) |
3738 (extract32(insn, 21, 3) << 1) |
3739 extract32(insn, 15, 1);
3740 bool sf = extract32(insn, 31, 1);
3741 bool is_sub = extract32(op_id, 0, 1);
3742 bool is_high = extract32(op_id, 2, 1);
3743 bool is_signed = false;
3744 TCGv_i64 tcg_op1;
3745 TCGv_i64 tcg_op2;
3746 TCGv_i64 tcg_tmp;
3748 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
3749 switch (op_id) {
3750 case 0x42: /* SMADDL */
3751 case 0x43: /* SMSUBL */
3752 case 0x44: /* SMULH */
3753 is_signed = true;
3754 break;
3755 case 0x0: /* MADD (32bit) */
3756 case 0x1: /* MSUB (32bit) */
3757 case 0x40: /* MADD (64bit) */
3758 case 0x41: /* MSUB (64bit) */
3759 case 0x4a: /* UMADDL */
3760 case 0x4b: /* UMSUBL */
3761 case 0x4c: /* UMULH */
3762 break;
3763 default:
3764 unallocated_encoding(s);
3765 return;
3768 if (is_high) {
3769 TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
3770 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3771 TCGv_i64 tcg_rn = cpu_reg(s, rn);
3772 TCGv_i64 tcg_rm = cpu_reg(s, rm);
3774 if (is_signed) {
3775 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
3776 } else {
3777 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
3780 tcg_temp_free_i64(low_bits);
3781 return;
3784 tcg_op1 = tcg_temp_new_i64();
3785 tcg_op2 = tcg_temp_new_i64();
3786 tcg_tmp = tcg_temp_new_i64();
3788 if (op_id < 0x42) {
3789 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
3790 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
3791 } else {
3792 if (is_signed) {
3793 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
3794 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
3795 } else {
3796 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
3797 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
3801 if (ra == 31 && !is_sub) {
3802 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
3803 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
3804 } else {
3805 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
3806 if (is_sub) {
3807 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
3808 } else {
3809 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
3813 if (!sf) {
3814 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
3817 tcg_temp_free_i64(tcg_op1);
3818 tcg_temp_free_i64(tcg_op2);
3819 tcg_temp_free_i64(tcg_tmp);
3822 /* Add/subtract (with carry)
3823 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
3824 * +--+--+--+------------------------+------+---------+------+-----+
3825 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd |
3826 * +--+--+--+------------------------+------+---------+------+-----+
3827 * [000000]
3830 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
3832 unsigned int sf, op, setflags, rm, rn, rd;
3833 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
3835 if (extract32(insn, 10, 6) != 0) {
3836 unallocated_encoding(s);
3837 return;
3840 sf = extract32(insn, 31, 1);
3841 op = extract32(insn, 30, 1);
3842 setflags = extract32(insn, 29, 1);
3843 rm = extract32(insn, 16, 5);
3844 rn = extract32(insn, 5, 5);
3845 rd = extract32(insn, 0, 5);
3847 tcg_rd = cpu_reg(s, rd);
3848 tcg_rn = cpu_reg(s, rn);
3850 if (op) {
3851 tcg_y = new_tmp_a64(s);
3852 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
3853 } else {
3854 tcg_y = cpu_reg(s, rm);
3857 if (setflags) {
3858 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
3859 } else {
3860 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
3864 /* Conditional compare (immediate / register)
3865 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
3866 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
3867 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
3868 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
3869 * [1] y [0] [0]
3871 static void disas_cc(DisasContext *s, uint32_t insn)
3873 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
3874 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
3875 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
3876 DisasCompare c;
3878 if (!extract32(insn, 29, 1)) {
3879 unallocated_encoding(s);
3880 return;
3882 if (insn & (1 << 10 | 1 << 4)) {
3883 unallocated_encoding(s);
3884 return;
3886 sf = extract32(insn, 31, 1);
3887 op = extract32(insn, 30, 1);
3888 is_imm = extract32(insn, 11, 1);
3889 y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
3890 cond = extract32(insn, 12, 4);
3891 rn = extract32(insn, 5, 5);
3892 nzcv = extract32(insn, 0, 4);
3894 /* Set T0 = !COND. */
3895 tcg_t0 = tcg_temp_new_i32();
3896 arm_test_cc(&c, cond);
3897 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
3898 arm_free_cc(&c);
3900 /* Load the arguments for the new comparison. */
3901 if (is_imm) {
3902 tcg_y = new_tmp_a64(s);
3903 tcg_gen_movi_i64(tcg_y, y);
3904 } else {
3905 tcg_y = cpu_reg(s, y);
3907 tcg_rn = cpu_reg(s, rn);
3909 /* Set the flags for the new comparison. */
3910 tcg_tmp = tcg_temp_new_i64();
3911 if (op) {
3912 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
3913 } else {
3914 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
3916 tcg_temp_free_i64(tcg_tmp);
3918 /* If COND was false, force the flags to #nzcv. Compute two masks
3919 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
3920 * For tcg hosts that support ANDC, we can make do with just T1.
3921 * In either case, allow the tcg optimizer to delete any unused mask.
3923 tcg_t1 = tcg_temp_new_i32();
3924 tcg_t2 = tcg_temp_new_i32();
3925 tcg_gen_neg_i32(tcg_t1, tcg_t0);
3926 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
3928 if (nzcv & 8) { /* N */
3929 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
3930 } else {
3931 if (TCG_TARGET_HAS_andc_i32) {
3932 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
3933 } else {
3934 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
3937 if (nzcv & 4) { /* Z */
3938 if (TCG_TARGET_HAS_andc_i32) {
3939 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
3940 } else {
3941 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
3943 } else {
3944 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
3946 if (nzcv & 2) { /* C */
3947 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
3948 } else {
3949 if (TCG_TARGET_HAS_andc_i32) {
3950 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
3951 } else {
3952 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
3955 if (nzcv & 1) { /* V */
3956 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
3957 } else {
3958 if (TCG_TARGET_HAS_andc_i32) {
3959 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
3960 } else {
3961 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
3964 tcg_temp_free_i32(tcg_t0);
3965 tcg_temp_free_i32(tcg_t1);
3966 tcg_temp_free_i32(tcg_t2);
3969 /* Conditional select
3970 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
3971 * +----+----+---+-----------------+------+------+-----+------+------+
3972 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
3973 * +----+----+---+-----------------+------+------+-----+------+------+
3975 static void disas_cond_select(DisasContext *s, uint32_t insn)
3977 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
3978 TCGv_i64 tcg_rd, zero;
3979 DisasCompare64 c;
3981 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
3982 /* S == 1 or op2<1> == 1 */
3983 unallocated_encoding(s);
3984 return;
3986 sf = extract32(insn, 31, 1);
3987 else_inv = extract32(insn, 30, 1);
3988 rm = extract32(insn, 16, 5);
3989 cond = extract32(insn, 12, 4);
3990 else_inc = extract32(insn, 10, 1);
3991 rn = extract32(insn, 5, 5);
3992 rd = extract32(insn, 0, 5);
3994 tcg_rd = cpu_reg(s, rd);
3996 a64_test_cc(&c, cond);
3997 zero = tcg_const_i64(0);
3999 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
4000 /* CSET & CSETM. */
4001 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
4002 if (else_inv) {
4003 tcg_gen_neg_i64(tcg_rd, tcg_rd);
4005 } else {
4006 TCGv_i64 t_true = cpu_reg(s, rn);
4007 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
4008 if (else_inv && else_inc) {
4009 tcg_gen_neg_i64(t_false, t_false);
4010 } else if (else_inv) {
4011 tcg_gen_not_i64(t_false, t_false);
4012 } else if (else_inc) {
4013 tcg_gen_addi_i64(t_false, t_false, 1);
4015 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
4018 tcg_temp_free_i64(zero);
4019 a64_free_cc(&c);
4021 if (!sf) {
4022 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4026 static void handle_clz(DisasContext *s, unsigned int sf,
4027 unsigned int rn, unsigned int rd)
4029 TCGv_i64 tcg_rd, tcg_rn;
4030 tcg_rd = cpu_reg(s, rd);
4031 tcg_rn = cpu_reg(s, rn);
4033 if (sf) {
4034 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
4035 } else {
4036 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4037 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4038 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
4039 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4040 tcg_temp_free_i32(tcg_tmp32);
4044 static void handle_cls(DisasContext *s, unsigned int sf,
4045 unsigned int rn, unsigned int rd)
4047 TCGv_i64 tcg_rd, tcg_rn;
4048 tcg_rd = cpu_reg(s, rd);
4049 tcg_rn = cpu_reg(s, rn);
4051 if (sf) {
4052 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
4053 } else {
4054 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4055 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4056 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
4057 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4058 tcg_temp_free_i32(tcg_tmp32);
4062 static void handle_rbit(DisasContext *s, unsigned int sf,
4063 unsigned int rn, unsigned int rd)
4065 TCGv_i64 tcg_rd, tcg_rn;
4066 tcg_rd = cpu_reg(s, rd);
4067 tcg_rn = cpu_reg(s, rn);
4069 if (sf) {
4070 gen_helper_rbit64(tcg_rd, tcg_rn);
4071 } else {
4072 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4073 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4074 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
4075 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4076 tcg_temp_free_i32(tcg_tmp32);
4080 /* REV with sf==1, opcode==3 ("REV64") */
4081 static void handle_rev64(DisasContext *s, unsigned int sf,
4082 unsigned int rn, unsigned int rd)
4084 if (!sf) {
4085 unallocated_encoding(s);
4086 return;
4088 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
4091 /* REV with sf==0, opcode==2
4092 * REV32 (sf==1, opcode==2)
4094 static void handle_rev32(DisasContext *s, unsigned int sf,
4095 unsigned int rn, unsigned int rd)
4097 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4099 if (sf) {
4100 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4101 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4103 /* bswap32_i64 requires zero high word */
4104 tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
4105 tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
4106 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
4107 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
4108 tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
4110 tcg_temp_free_i64(tcg_tmp);
4111 } else {
4112 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
4113 tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
4117 /* REV16 (opcode==1) */
4118 static void handle_rev16(DisasContext *s, unsigned int sf,
4119 unsigned int rn, unsigned int rd)
4121 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4122 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4123 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4124 TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
4126 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
4127 tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
4128 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
4129 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
4130 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
4132 tcg_temp_free_i64(mask);
4133 tcg_temp_free_i64(tcg_tmp);
4136 /* Data-processing (1 source)
4137 * 31 30 29 28 21 20 16 15 10 9 5 4 0
4138 * +----+---+---+-----------------+---------+--------+------+------+
4139 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
4140 * +----+---+---+-----------------+---------+--------+------+------+
4142 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
4144 unsigned int sf, opcode, rn, rd;
4146 if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
4147 unallocated_encoding(s);
4148 return;
4151 sf = extract32(insn, 31, 1);
4152 opcode = extract32(insn, 10, 6);
4153 rn = extract32(insn, 5, 5);
4154 rd = extract32(insn, 0, 5);
4156 switch (opcode) {
4157 case 0: /* RBIT */
4158 handle_rbit(s, sf, rn, rd);
4159 break;
4160 case 1: /* REV16 */
4161 handle_rev16(s, sf, rn, rd);
4162 break;
4163 case 2: /* REV32 */
4164 handle_rev32(s, sf, rn, rd);
4165 break;
4166 case 3: /* REV64 */
4167 handle_rev64(s, sf, rn, rd);
4168 break;
4169 case 4: /* CLZ */
4170 handle_clz(s, sf, rn, rd);
4171 break;
4172 case 5: /* CLS */
4173 handle_cls(s, sf, rn, rd);
4174 break;
4178 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
4179 unsigned int rm, unsigned int rn, unsigned int rd)
4181 TCGv_i64 tcg_n, tcg_m, tcg_rd;
4182 tcg_rd = cpu_reg(s, rd);
4184 if (!sf && is_signed) {
4185 tcg_n = new_tmp_a64(s);
4186 tcg_m = new_tmp_a64(s);
4187 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
4188 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
4189 } else {
4190 tcg_n = read_cpu_reg(s, rn, sf);
4191 tcg_m = read_cpu_reg(s, rm, sf);
4194 if (is_signed) {
4195 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
4196 } else {
4197 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
4200 if (!sf) { /* zero extend final result */
4201 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4205 /* LSLV, LSRV, ASRV, RORV */
4206 static void handle_shift_reg(DisasContext *s,
4207 enum a64_shift_type shift_type, unsigned int sf,
4208 unsigned int rm, unsigned int rn, unsigned int rd)
4210 TCGv_i64 tcg_shift = tcg_temp_new_i64();
4211 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4212 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4214 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
4215 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
4216 tcg_temp_free_i64(tcg_shift);
4219 /* CRC32[BHWX], CRC32C[BHWX] */
4220 static void handle_crc32(DisasContext *s,
4221 unsigned int sf, unsigned int sz, bool crc32c,
4222 unsigned int rm, unsigned int rn, unsigned int rd)
4224 TCGv_i64 tcg_acc, tcg_val;
4225 TCGv_i32 tcg_bytes;
4227 if (!arm_dc_feature(s, ARM_FEATURE_CRC)
4228 || (sf == 1 && sz != 3)
4229 || (sf == 0 && sz == 3)) {
4230 unallocated_encoding(s);
4231 return;
4234 if (sz == 3) {
4235 tcg_val = cpu_reg(s, rm);
4236 } else {
4237 uint64_t mask;
4238 switch (sz) {
4239 case 0:
4240 mask = 0xFF;
4241 break;
4242 case 1:
4243 mask = 0xFFFF;
4244 break;
4245 case 2:
4246 mask = 0xFFFFFFFF;
4247 break;
4248 default:
4249 g_assert_not_reached();
4251 tcg_val = new_tmp_a64(s);
4252 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
4255 tcg_acc = cpu_reg(s, rn);
4256 tcg_bytes = tcg_const_i32(1 << sz);
4258 if (crc32c) {
4259 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4260 } else {
4261 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4264 tcg_temp_free_i32(tcg_bytes);
4267 /* Data-processing (2 source)
4268 * 31 30 29 28 21 20 16 15 10 9 5 4 0
4269 * +----+---+---+-----------------+------+--------+------+------+
4270 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
4271 * +----+---+---+-----------------+------+--------+------+------+
4273 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
4275 unsigned int sf, rm, opcode, rn, rd;
4276 sf = extract32(insn, 31, 1);
4277 rm = extract32(insn, 16, 5);
4278 opcode = extract32(insn, 10, 6);
4279 rn = extract32(insn, 5, 5);
4280 rd = extract32(insn, 0, 5);
4282 if (extract32(insn, 29, 1)) {
4283 unallocated_encoding(s);
4284 return;
4287 switch (opcode) {
4288 case 2: /* UDIV */
4289 handle_div(s, false, sf, rm, rn, rd);
4290 break;
4291 case 3: /* SDIV */
4292 handle_div(s, true, sf, rm, rn, rd);
4293 break;
4294 case 8: /* LSLV */
4295 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
4296 break;
4297 case 9: /* LSRV */
4298 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
4299 break;
4300 case 10: /* ASRV */
4301 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
4302 break;
4303 case 11: /* RORV */
4304 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
4305 break;
4306 case 16:
4307 case 17:
4308 case 18:
4309 case 19:
4310 case 20:
4311 case 21:
4312 case 22:
4313 case 23: /* CRC32 */
4315 int sz = extract32(opcode, 0, 2);
4316 bool crc32c = extract32(opcode, 2, 1);
4317 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
4318 break;
4320 default:
4321 unallocated_encoding(s);
4322 break;
4326 /* Data processing - register */
4327 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
4329 switch (extract32(insn, 24, 5)) {
4330 case 0x0a: /* Logical (shifted register) */
4331 disas_logic_reg(s, insn);
4332 break;
4333 case 0x0b: /* Add/subtract */
4334 if (insn & (1 << 21)) { /* (extended register) */
4335 disas_add_sub_ext_reg(s, insn);
4336 } else {
4337 disas_add_sub_reg(s, insn);
4339 break;
4340 case 0x1b: /* Data-processing (3 source) */
4341 disas_data_proc_3src(s, insn);
4342 break;
4343 case 0x1a:
4344 switch (extract32(insn, 21, 3)) {
4345 case 0x0: /* Add/subtract (with carry) */
4346 disas_adc_sbc(s, insn);
4347 break;
4348 case 0x2: /* Conditional compare */
4349 disas_cc(s, insn); /* both imm and reg forms */
4350 break;
4351 case 0x4: /* Conditional select */
4352 disas_cond_select(s, insn);
4353 break;
4354 case 0x6: /* Data-processing */
4355 if (insn & (1 << 30)) { /* (1 source) */
4356 disas_data_proc_1src(s, insn);
4357 } else { /* (2 source) */
4358 disas_data_proc_2src(s, insn);
4360 break;
4361 default:
4362 unallocated_encoding(s);
4363 break;
4365 break;
4366 default:
4367 unallocated_encoding(s);
4368 break;
4372 static void handle_fp_compare(DisasContext *s, bool is_double,
4373 unsigned int rn, unsigned int rm,
4374 bool cmp_with_zero, bool signal_all_nans)
4376 TCGv_i64 tcg_flags = tcg_temp_new_i64();
4377 TCGv_ptr fpst = get_fpstatus_ptr();
4379 if (is_double) {
4380 TCGv_i64 tcg_vn, tcg_vm;
4382 tcg_vn = read_fp_dreg(s, rn);
4383 if (cmp_with_zero) {
4384 tcg_vm = tcg_const_i64(0);
4385 } else {
4386 tcg_vm = read_fp_dreg(s, rm);
4388 if (signal_all_nans) {
4389 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4390 } else {
4391 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4393 tcg_temp_free_i64(tcg_vn);
4394 tcg_temp_free_i64(tcg_vm);
4395 } else {
4396 TCGv_i32 tcg_vn, tcg_vm;
4398 tcg_vn = read_fp_sreg(s, rn);
4399 if (cmp_with_zero) {
4400 tcg_vm = tcg_const_i32(0);
4401 } else {
4402 tcg_vm = read_fp_sreg(s, rm);
4404 if (signal_all_nans) {
4405 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4406 } else {
4407 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4409 tcg_temp_free_i32(tcg_vn);
4410 tcg_temp_free_i32(tcg_vm);
4413 tcg_temp_free_ptr(fpst);
4415 gen_set_nzcv(tcg_flags);
4417 tcg_temp_free_i64(tcg_flags);
4420 /* Floating point compare
4421 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
4422 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4423 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
4424 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4426 static void disas_fp_compare(DisasContext *s, uint32_t insn)
4428 unsigned int mos, type, rm, op, rn, opc, op2r;
4430 mos = extract32(insn, 29, 3);
4431 type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
4432 rm = extract32(insn, 16, 5);
4433 op = extract32(insn, 14, 2);
4434 rn = extract32(insn, 5, 5);
4435 opc = extract32(insn, 3, 2);
4436 op2r = extract32(insn, 0, 3);
4438 if (mos || op || op2r || type > 1) {
4439 unallocated_encoding(s);
4440 return;
4443 if (!fp_access_check(s)) {
4444 return;
4447 handle_fp_compare(s, type, rn, rm, opc & 1, opc & 2);
4450 /* Floating point conditional compare
4451 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
4452 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4453 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
4454 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4456 static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
4458 unsigned int mos, type, rm, cond, rn, op, nzcv;
4459 TCGv_i64 tcg_flags;
4460 TCGLabel *label_continue = NULL;
4462 mos = extract32(insn, 29, 3);
4463 type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
4464 rm = extract32(insn, 16, 5);
4465 cond = extract32(insn, 12, 4);
4466 rn = extract32(insn, 5, 5);
4467 op = extract32(insn, 4, 1);
4468 nzcv = extract32(insn, 0, 4);
4470 if (mos || type > 1) {
4471 unallocated_encoding(s);
4472 return;
4475 if (!fp_access_check(s)) {
4476 return;
4479 if (cond < 0x0e) { /* not always */
4480 TCGLabel *label_match = gen_new_label();
4481 label_continue = gen_new_label();
4482 arm_gen_test_cc(cond, label_match);
4483 /* nomatch: */
4484 tcg_flags = tcg_const_i64(nzcv << 28);
4485 gen_set_nzcv(tcg_flags);
4486 tcg_temp_free_i64(tcg_flags);
4487 tcg_gen_br(label_continue);
4488 gen_set_label(label_match);
4491 handle_fp_compare(s, type, rn, rm, false, op);
4493 if (cond < 0x0e) {
4494 gen_set_label(label_continue);
4498 /* Floating point conditional select
4499 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
4500 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4501 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
4502 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4504 static void disas_fp_csel(DisasContext *s, uint32_t insn)
4506 unsigned int mos, type, rm, cond, rn, rd;
4507 TCGv_i64 t_true, t_false, t_zero;
4508 DisasCompare64 c;
4510 mos = extract32(insn, 29, 3);
4511 type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
4512 rm = extract32(insn, 16, 5);
4513 cond = extract32(insn, 12, 4);
4514 rn = extract32(insn, 5, 5);
4515 rd = extract32(insn, 0, 5);
4517 if (mos || type > 1) {
4518 unallocated_encoding(s);
4519 return;
4522 if (!fp_access_check(s)) {
4523 return;
4526 /* Zero extend sreg inputs to 64 bits now. */
4527 t_true = tcg_temp_new_i64();
4528 t_false = tcg_temp_new_i64();
4529 read_vec_element(s, t_true, rn, 0, type ? MO_64 : MO_32);
4530 read_vec_element(s, t_false, rm, 0, type ? MO_64 : MO_32);
4532 a64_test_cc(&c, cond);
4533 t_zero = tcg_const_i64(0);
4534 tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
4535 tcg_temp_free_i64(t_zero);
4536 tcg_temp_free_i64(t_false);
4537 a64_free_cc(&c);
4539 /* Note that sregs write back zeros to the high bits,
4540 and we've already done the zero-extension. */
4541 write_fp_dreg(s, rd, t_true);
4542 tcg_temp_free_i64(t_true);
4545 /* Floating-point data-processing (1 source) - single precision */
4546 static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
4548 TCGv_ptr fpst;
4549 TCGv_i32 tcg_op;
4550 TCGv_i32 tcg_res;
4552 fpst = get_fpstatus_ptr();
4553 tcg_op = read_fp_sreg(s, rn);
4554 tcg_res = tcg_temp_new_i32();
4556 switch (opcode) {
4557 case 0x0: /* FMOV */
4558 tcg_gen_mov_i32(tcg_res, tcg_op);
4559 break;
4560 case 0x1: /* FABS */
4561 gen_helper_vfp_abss(tcg_res, tcg_op);
4562 break;
4563 case 0x2: /* FNEG */
4564 gen_helper_vfp_negs(tcg_res, tcg_op);
4565 break;
4566 case 0x3: /* FSQRT */
4567 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
4568 break;
4569 case 0x8: /* FRINTN */
4570 case 0x9: /* FRINTP */
4571 case 0xa: /* FRINTM */
4572 case 0xb: /* FRINTZ */
4573 case 0xc: /* FRINTA */
4575 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4577 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4578 gen_helper_rints(tcg_res, tcg_op, fpst);
4580 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4581 tcg_temp_free_i32(tcg_rmode);
4582 break;
4584 case 0xe: /* FRINTX */
4585 gen_helper_rints_exact(tcg_res, tcg_op, fpst);
4586 break;
4587 case 0xf: /* FRINTI */
4588 gen_helper_rints(tcg_res, tcg_op, fpst);
4589 break;
4590 default:
4591 abort();
4594 write_fp_sreg(s, rd, tcg_res);
4596 tcg_temp_free_ptr(fpst);
4597 tcg_temp_free_i32(tcg_op);
4598 tcg_temp_free_i32(tcg_res);
4601 /* Floating-point data-processing (1 source) - double precision */
4602 static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
4604 TCGv_ptr fpst;
4605 TCGv_i64 tcg_op;
4606 TCGv_i64 tcg_res;
4608 switch (opcode) {
4609 case 0x0: /* FMOV */
4610 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
4611 return;
4614 fpst = get_fpstatus_ptr();
4615 tcg_op = read_fp_dreg(s, rn);
4616 tcg_res = tcg_temp_new_i64();
4618 switch (opcode) {
4619 case 0x1: /* FABS */
4620 gen_helper_vfp_absd(tcg_res, tcg_op);
4621 break;
4622 case 0x2: /* FNEG */
4623 gen_helper_vfp_negd(tcg_res, tcg_op);
4624 break;
4625 case 0x3: /* FSQRT */
4626 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
4627 break;
4628 case 0x8: /* FRINTN */
4629 case 0x9: /* FRINTP */
4630 case 0xa: /* FRINTM */
4631 case 0xb: /* FRINTZ */
4632 case 0xc: /* FRINTA */
4634 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4636 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4637 gen_helper_rintd(tcg_res, tcg_op, fpst);
4639 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4640 tcg_temp_free_i32(tcg_rmode);
4641 break;
4643 case 0xe: /* FRINTX */
4644 gen_helper_rintd_exact(tcg_res, tcg_op, fpst);
4645 break;
4646 case 0xf: /* FRINTI */
4647 gen_helper_rintd(tcg_res, tcg_op, fpst);
4648 break;
4649 default:
4650 abort();
4653 write_fp_dreg(s, rd, tcg_res);
4655 tcg_temp_free_ptr(fpst);
4656 tcg_temp_free_i64(tcg_op);
4657 tcg_temp_free_i64(tcg_res);
4660 static void handle_fp_fcvt(DisasContext *s, int opcode,
4661 int rd, int rn, int dtype, int ntype)
4663 switch (ntype) {
4664 case 0x0:
4666 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
4667 if (dtype == 1) {
4668 /* Single to double */
4669 TCGv_i64 tcg_rd = tcg_temp_new_i64();
4670 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
4671 write_fp_dreg(s, rd, tcg_rd);
4672 tcg_temp_free_i64(tcg_rd);
4673 } else {
4674 /* Single to half */
4675 TCGv_i32 tcg_rd = tcg_temp_new_i32();
4676 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, cpu_env);
4677 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
4678 write_fp_sreg(s, rd, tcg_rd);
4679 tcg_temp_free_i32(tcg_rd);
4681 tcg_temp_free_i32(tcg_rn);
4682 break;
4684 case 0x1:
4686 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
4687 TCGv_i32 tcg_rd = tcg_temp_new_i32();
4688 if (dtype == 0) {
4689 /* Double to single */
4690 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
4691 } else {
4692 /* Double to half */
4693 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, cpu_env);
4694 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
4696 write_fp_sreg(s, rd, tcg_rd);
4697 tcg_temp_free_i32(tcg_rd);
4698 tcg_temp_free_i64(tcg_rn);
4699 break;
4701 case 0x3:
4703 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
4704 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
4705 if (dtype == 0) {
4706 /* Half to single */
4707 TCGv_i32 tcg_rd = tcg_temp_new_i32();
4708 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, cpu_env);
4709 write_fp_sreg(s, rd, tcg_rd);
4710 tcg_temp_free_i32(tcg_rd);
4711 } else {
4712 /* Half to double */
4713 TCGv_i64 tcg_rd = tcg_temp_new_i64();
4714 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, cpu_env);
4715 write_fp_dreg(s, rd, tcg_rd);
4716 tcg_temp_free_i64(tcg_rd);
4718 tcg_temp_free_i32(tcg_rn);
4719 break;
4721 default:
4722 abort();
4726 /* Floating point data-processing (1 source)
4727 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
4728 * +---+---+---+-----------+------+---+--------+-----------+------+------+
4729 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
4730 * +---+---+---+-----------+------+---+--------+-----------+------+------+
4732 static void disas_fp_1src(DisasContext *s, uint32_t insn)
4734 int type = extract32(insn, 22, 2);
4735 int opcode = extract32(insn, 15, 6);
4736 int rn = extract32(insn, 5, 5);
4737 int rd = extract32(insn, 0, 5);
4739 switch (opcode) {
4740 case 0x4: case 0x5: case 0x7:
4742 /* FCVT between half, single and double precision */
4743 int dtype = extract32(opcode, 0, 2);
4744 if (type == 2 || dtype == type) {
4745 unallocated_encoding(s);
4746 return;
4748 if (!fp_access_check(s)) {
4749 return;
4752 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
4753 break;
4755 case 0x0 ... 0x3:
4756 case 0x8 ... 0xc:
4757 case 0xe ... 0xf:
4758 /* 32-to-32 and 64-to-64 ops */
4759 switch (type) {
4760 case 0:
4761 if (!fp_access_check(s)) {
4762 return;
4765 handle_fp_1src_single(s, opcode, rd, rn);
4766 break;
4767 case 1:
4768 if (!fp_access_check(s)) {
4769 return;
4772 handle_fp_1src_double(s, opcode, rd, rn);
4773 break;
4774 default:
4775 unallocated_encoding(s);
4777 break;
4778 default:
4779 unallocated_encoding(s);
4780 break;
4784 /* Floating-point data-processing (2 source) - single precision */
4785 static void handle_fp_2src_single(DisasContext *s, int opcode,
4786 int rd, int rn, int rm)
4788 TCGv_i32 tcg_op1;
4789 TCGv_i32 tcg_op2;
4790 TCGv_i32 tcg_res;
4791 TCGv_ptr fpst;
4793 tcg_res = tcg_temp_new_i32();
4794 fpst = get_fpstatus_ptr();
4795 tcg_op1 = read_fp_sreg(s, rn);
4796 tcg_op2 = read_fp_sreg(s, rm);
4798 switch (opcode) {
4799 case 0x0: /* FMUL */
4800 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
4801 break;
4802 case 0x1: /* FDIV */
4803 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
4804 break;
4805 case 0x2: /* FADD */
4806 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
4807 break;
4808 case 0x3: /* FSUB */
4809 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
4810 break;
4811 case 0x4: /* FMAX */
4812 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
4813 break;
4814 case 0x5: /* FMIN */
4815 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
4816 break;
4817 case 0x6: /* FMAXNM */
4818 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
4819 break;
4820 case 0x7: /* FMINNM */
4821 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
4822 break;
4823 case 0x8: /* FNMUL */
4824 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
4825 gen_helper_vfp_negs(tcg_res, tcg_res);
4826 break;
4829 write_fp_sreg(s, rd, tcg_res);
4831 tcg_temp_free_ptr(fpst);
4832 tcg_temp_free_i32(tcg_op1);
4833 tcg_temp_free_i32(tcg_op2);
4834 tcg_temp_free_i32(tcg_res);
4837 /* Floating-point data-processing (2 source) - double precision */
4838 static void handle_fp_2src_double(DisasContext *s, int opcode,
4839 int rd, int rn, int rm)
4841 TCGv_i64 tcg_op1;
4842 TCGv_i64 tcg_op2;
4843 TCGv_i64 tcg_res;
4844 TCGv_ptr fpst;
4846 tcg_res = tcg_temp_new_i64();
4847 fpst = get_fpstatus_ptr();
4848 tcg_op1 = read_fp_dreg(s, rn);
4849 tcg_op2 = read_fp_dreg(s, rm);
4851 switch (opcode) {
4852 case 0x0: /* FMUL */
4853 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
4854 break;
4855 case 0x1: /* FDIV */
4856 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
4857 break;
4858 case 0x2: /* FADD */
4859 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
4860 break;
4861 case 0x3: /* FSUB */
4862 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
4863 break;
4864 case 0x4: /* FMAX */
4865 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
4866 break;
4867 case 0x5: /* FMIN */
4868 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
4869 break;
4870 case 0x6: /* FMAXNM */
4871 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
4872 break;
4873 case 0x7: /* FMINNM */
4874 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
4875 break;
4876 case 0x8: /* FNMUL */
4877 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
4878 gen_helper_vfp_negd(tcg_res, tcg_res);
4879 break;
4882 write_fp_dreg(s, rd, tcg_res);
4884 tcg_temp_free_ptr(fpst);
4885 tcg_temp_free_i64(tcg_op1);
4886 tcg_temp_free_i64(tcg_op2);
4887 tcg_temp_free_i64(tcg_res);
4890 /* Floating point data-processing (2 source)
4891 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
4892 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
4893 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
4894 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
4896 static void disas_fp_2src(DisasContext *s, uint32_t insn)
4898 int type = extract32(insn, 22, 2);
4899 int rd = extract32(insn, 0, 5);
4900 int rn = extract32(insn, 5, 5);
4901 int rm = extract32(insn, 16, 5);
4902 int opcode = extract32(insn, 12, 4);
4904 if (opcode > 8) {
4905 unallocated_encoding(s);
4906 return;
4909 switch (type) {
4910 case 0:
4911 if (!fp_access_check(s)) {
4912 return;
4914 handle_fp_2src_single(s, opcode, rd, rn, rm);
4915 break;
4916 case 1:
4917 if (!fp_access_check(s)) {
4918 return;
4920 handle_fp_2src_double(s, opcode, rd, rn, rm);
4921 break;
4922 default:
4923 unallocated_encoding(s);
4927 /* Floating-point data-processing (3 source) - single precision */
4928 static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
4929 int rd, int rn, int rm, int ra)
4931 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
4932 TCGv_i32 tcg_res = tcg_temp_new_i32();
4933 TCGv_ptr fpst = get_fpstatus_ptr();
4935 tcg_op1 = read_fp_sreg(s, rn);
4936 tcg_op2 = read_fp_sreg(s, rm);
4937 tcg_op3 = read_fp_sreg(s, ra);
4939 /* These are fused multiply-add, and must be done as one
4940 * floating point operation with no rounding between the
4941 * multiplication and addition steps.
4942 * NB that doing the negations here as separate steps is
4943 * correct : an input NaN should come out with its sign bit
4944 * flipped if it is a negated-input.
4946 if (o1 == true) {
4947 gen_helper_vfp_negs(tcg_op3, tcg_op3);
4950 if (o0 != o1) {
4951 gen_helper_vfp_negs(tcg_op1, tcg_op1);
4954 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
4956 write_fp_sreg(s, rd, tcg_res);
4958 tcg_temp_free_ptr(fpst);
4959 tcg_temp_free_i32(tcg_op1);
4960 tcg_temp_free_i32(tcg_op2);
4961 tcg_temp_free_i32(tcg_op3);
4962 tcg_temp_free_i32(tcg_res);
4965 /* Floating-point data-processing (3 source) - double precision */
4966 static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
4967 int rd, int rn, int rm, int ra)
4969 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
4970 TCGv_i64 tcg_res = tcg_temp_new_i64();
4971 TCGv_ptr fpst = get_fpstatus_ptr();
4973 tcg_op1 = read_fp_dreg(s, rn);
4974 tcg_op2 = read_fp_dreg(s, rm);
4975 tcg_op3 = read_fp_dreg(s, ra);
4977 /* These are fused multiply-add, and must be done as one
4978 * floating point operation with no rounding between the
4979 * multiplication and addition steps.
4980 * NB that doing the negations here as separate steps is
4981 * correct : an input NaN should come out with its sign bit
4982 * flipped if it is a negated-input.
4984 if (o1 == true) {
4985 gen_helper_vfp_negd(tcg_op3, tcg_op3);
4988 if (o0 != o1) {
4989 gen_helper_vfp_negd(tcg_op1, tcg_op1);
4992 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
4994 write_fp_dreg(s, rd, tcg_res);
4996 tcg_temp_free_ptr(fpst);
4997 tcg_temp_free_i64(tcg_op1);
4998 tcg_temp_free_i64(tcg_op2);
4999 tcg_temp_free_i64(tcg_op3);
5000 tcg_temp_free_i64(tcg_res);
5003 /* Floating point data-processing (3 source)
5004 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
5005 * +---+---+---+-----------+------+----+------+----+------+------+------+
5006 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
5007 * +---+---+---+-----------+------+----+------+----+------+------+------+
5009 static void disas_fp_3src(DisasContext *s, uint32_t insn)
5011 int type = extract32(insn, 22, 2);
5012 int rd = extract32(insn, 0, 5);
5013 int rn = extract32(insn, 5, 5);
5014 int ra = extract32(insn, 10, 5);
5015 int rm = extract32(insn, 16, 5);
5016 bool o0 = extract32(insn, 15, 1);
5017 bool o1 = extract32(insn, 21, 1);
5019 switch (type) {
5020 case 0:
5021 if (!fp_access_check(s)) {
5022 return;
5024 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
5025 break;
5026 case 1:
5027 if (!fp_access_check(s)) {
5028 return;
5030 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
5031 break;
5032 default:
5033 unallocated_encoding(s);
5037 /* The imm8 encodes the sign bit, enough bits to represent an exponent in
5038 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
5039 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
5041 static uint64_t vfp_expand_imm(int size, uint8_t imm8)
5043 uint64_t imm;
5045 switch (size) {
5046 case MO_64:
5047 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5048 (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
5049 extract32(imm8, 0, 6);
5050 imm <<= 48;
5051 break;
5052 case MO_32:
5053 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5054 (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
5055 (extract32(imm8, 0, 6) << 3);
5056 imm <<= 16;
5057 break;
5058 case MO_16:
5059 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5060 (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
5061 (extract32(imm8, 0, 6) << 6);
5062 break;
5063 default:
5064 g_assert_not_reached();
5066 return imm;
5069 /* Floating point immediate
5070 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
5071 * +---+---+---+-----------+------+---+------------+-------+------+------+
5072 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
5073 * +---+---+---+-----------+------+---+------------+-------+------+------+
5075 static void disas_fp_imm(DisasContext *s, uint32_t insn)
5077 int rd = extract32(insn, 0, 5);
5078 int imm8 = extract32(insn, 13, 8);
5079 int is_double = extract32(insn, 22, 2);
5080 uint64_t imm;
5081 TCGv_i64 tcg_res;
5083 if (is_double > 1) {
5084 unallocated_encoding(s);
5085 return;
5088 if (!fp_access_check(s)) {
5089 return;
5092 imm = vfp_expand_imm(MO_32 + is_double, imm8);
5094 tcg_res = tcg_const_i64(imm);
5095 write_fp_dreg(s, rd, tcg_res);
5096 tcg_temp_free_i64(tcg_res);
5099 /* Handle floating point <=> fixed point conversions. Note that we can
5100 * also deal with fp <=> integer conversions as a special case (scale == 64)
5101 * OPTME: consider handling that special case specially or at least skipping
5102 * the call to scalbn in the helpers for zero shifts.
5104 static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
5105 bool itof, int rmode, int scale, int sf, int type)
5107 bool is_signed = !(opcode & 1);
5108 bool is_double = type;
5109 TCGv_ptr tcg_fpstatus;
5110 TCGv_i32 tcg_shift;
5112 tcg_fpstatus = get_fpstatus_ptr();
5114 tcg_shift = tcg_const_i32(64 - scale);
5116 if (itof) {
5117 TCGv_i64 tcg_int = cpu_reg(s, rn);
5118 if (!sf) {
5119 TCGv_i64 tcg_extend = new_tmp_a64(s);
5121 if (is_signed) {
5122 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
5123 } else {
5124 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
5127 tcg_int = tcg_extend;
5130 if (is_double) {
5131 TCGv_i64 tcg_double = tcg_temp_new_i64();
5132 if (is_signed) {
5133 gen_helper_vfp_sqtod(tcg_double, tcg_int,
5134 tcg_shift, tcg_fpstatus);
5135 } else {
5136 gen_helper_vfp_uqtod(tcg_double, tcg_int,
5137 tcg_shift, tcg_fpstatus);
5139 write_fp_dreg(s, rd, tcg_double);
5140 tcg_temp_free_i64(tcg_double);
5141 } else {
5142 TCGv_i32 tcg_single = tcg_temp_new_i32();
5143 if (is_signed) {
5144 gen_helper_vfp_sqtos(tcg_single, tcg_int,
5145 tcg_shift, tcg_fpstatus);
5146 } else {
5147 gen_helper_vfp_uqtos(tcg_single, tcg_int,
5148 tcg_shift, tcg_fpstatus);
5150 write_fp_sreg(s, rd, tcg_single);
5151 tcg_temp_free_i32(tcg_single);
5153 } else {
5154 TCGv_i64 tcg_int = cpu_reg(s, rd);
5155 TCGv_i32 tcg_rmode;
5157 if (extract32(opcode, 2, 1)) {
5158 /* There are too many rounding modes to all fit into rmode,
5159 * so FCVTA[US] is a special case.
5161 rmode = FPROUNDING_TIEAWAY;
5164 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
5166 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
5168 if (is_double) {
5169 TCGv_i64 tcg_double = read_fp_dreg(s, rn);
5170 if (is_signed) {
5171 if (!sf) {
5172 gen_helper_vfp_tosld(tcg_int, tcg_double,
5173 tcg_shift, tcg_fpstatus);
5174 } else {
5175 gen_helper_vfp_tosqd(tcg_int, tcg_double,
5176 tcg_shift, tcg_fpstatus);
5178 } else {
5179 if (!sf) {
5180 gen_helper_vfp_tould(tcg_int, tcg_double,
5181 tcg_shift, tcg_fpstatus);
5182 } else {
5183 gen_helper_vfp_touqd(tcg_int, tcg_double,
5184 tcg_shift, tcg_fpstatus);
5187 tcg_temp_free_i64(tcg_double);
5188 } else {
5189 TCGv_i32 tcg_single = read_fp_sreg(s, rn);
5190 if (sf) {
5191 if (is_signed) {
5192 gen_helper_vfp_tosqs(tcg_int, tcg_single,
5193 tcg_shift, tcg_fpstatus);
5194 } else {
5195 gen_helper_vfp_touqs(tcg_int, tcg_single,
5196 tcg_shift, tcg_fpstatus);
5198 } else {
5199 TCGv_i32 tcg_dest = tcg_temp_new_i32();
5200 if (is_signed) {
5201 gen_helper_vfp_tosls(tcg_dest, tcg_single,
5202 tcg_shift, tcg_fpstatus);
5203 } else {
5204 gen_helper_vfp_touls(tcg_dest, tcg_single,
5205 tcg_shift, tcg_fpstatus);
5207 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
5208 tcg_temp_free_i32(tcg_dest);
5210 tcg_temp_free_i32(tcg_single);
5213 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
5214 tcg_temp_free_i32(tcg_rmode);
5216 if (!sf) {
5217 tcg_gen_ext32u_i64(tcg_int, tcg_int);
5221 tcg_temp_free_ptr(tcg_fpstatus);
5222 tcg_temp_free_i32(tcg_shift);
5225 /* Floating point <-> fixed point conversions
5226 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
5227 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5228 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
5229 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5231 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
5233 int rd = extract32(insn, 0, 5);
5234 int rn = extract32(insn, 5, 5);
5235 int scale = extract32(insn, 10, 6);
5236 int opcode = extract32(insn, 16, 3);
5237 int rmode = extract32(insn, 19, 2);
5238 int type = extract32(insn, 22, 2);
5239 bool sbit = extract32(insn, 29, 1);
5240 bool sf = extract32(insn, 31, 1);
5241 bool itof;
5243 if (sbit || (type > 1)
5244 || (!sf && scale < 32)) {
5245 unallocated_encoding(s);
5246 return;
5249 switch ((rmode << 3) | opcode) {
5250 case 0x2: /* SCVTF */
5251 case 0x3: /* UCVTF */
5252 itof = true;
5253 break;
5254 case 0x18: /* FCVTZS */
5255 case 0x19: /* FCVTZU */
5256 itof = false;
5257 break;
5258 default:
5259 unallocated_encoding(s);
5260 return;
5263 if (!fp_access_check(s)) {
5264 return;
5267 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
5270 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
5272 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
5273 * without conversion.
5276 if (itof) {
5277 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5279 switch (type) {
5280 case 0:
5282 /* 32 bit */
5283 TCGv_i64 tmp = tcg_temp_new_i64();
5284 tcg_gen_ext32u_i64(tmp, tcg_rn);
5285 tcg_gen_st_i64(tmp, cpu_env, fp_reg_offset(s, rd, MO_64));
5286 tcg_gen_movi_i64(tmp, 0);
5287 tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
5288 tcg_temp_free_i64(tmp);
5289 break;
5291 case 1:
5293 /* 64 bit */
5294 TCGv_i64 tmp = tcg_const_i64(0);
5295 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_offset(s, rd, MO_64));
5296 tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
5297 tcg_temp_free_i64(tmp);
5298 break;
5300 case 2:
5301 /* 64 bit to top half. */
5302 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
5303 break;
5305 } else {
5306 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5308 switch (type) {
5309 case 0:
5310 /* 32 bit */
5311 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
5312 break;
5313 case 1:
5314 /* 64 bit */
5315 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
5316 break;
5317 case 2:
5318 /* 64 bits from top half */
5319 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
5320 break;
5325 /* Floating point <-> integer conversions
5326 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
5327 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
5328 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
5329 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
5331 static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
5333 int rd = extract32(insn, 0, 5);
5334 int rn = extract32(insn, 5, 5);
5335 int opcode = extract32(insn, 16, 3);
5336 int rmode = extract32(insn, 19, 2);
5337 int type = extract32(insn, 22, 2);
5338 bool sbit = extract32(insn, 29, 1);
5339 bool sf = extract32(insn, 31, 1);
5341 if (sbit) {
5342 unallocated_encoding(s);
5343 return;
5346 if (opcode > 5) {
5347 /* FMOV */
5348 bool itof = opcode & 1;
5350 if (rmode >= 2) {
5351 unallocated_encoding(s);
5352 return;
5355 switch (sf << 3 | type << 1 | rmode) {
5356 case 0x0: /* 32 bit */
5357 case 0xa: /* 64 bit */
5358 case 0xd: /* 64 bit to top half of quad */
5359 break;
5360 default:
5361 /* all other sf/type/rmode combinations are invalid */
5362 unallocated_encoding(s);
5363 break;
5366 if (!fp_access_check(s)) {
5367 return;
5369 handle_fmov(s, rd, rn, type, itof);
5370 } else {
5371 /* actual FP conversions */
5372 bool itof = extract32(opcode, 1, 1);
5374 if (type > 1 || (rmode != 0 && opcode > 1)) {
5375 unallocated_encoding(s);
5376 return;
5379 if (!fp_access_check(s)) {
5380 return;
5382 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
5386 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
5387 * 31 30 29 28 25 24 0
5388 * +---+---+---+---------+-----------------------------+
5389 * | | 0 | | 1 1 1 1 | |
5390 * +---+---+---+---------+-----------------------------+
5392 static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
5394 if (extract32(insn, 24, 1)) {
5395 /* Floating point data-processing (3 source) */
5396 disas_fp_3src(s, insn);
5397 } else if (extract32(insn, 21, 1) == 0) {
5398 /* Floating point to fixed point conversions */
5399 disas_fp_fixed_conv(s, insn);
5400 } else {
5401 switch (extract32(insn, 10, 2)) {
5402 case 1:
5403 /* Floating point conditional compare */
5404 disas_fp_ccomp(s, insn);
5405 break;
5406 case 2:
5407 /* Floating point data-processing (2 source) */
5408 disas_fp_2src(s, insn);
5409 break;
5410 case 3:
5411 /* Floating point conditional select */
5412 disas_fp_csel(s, insn);
5413 break;
5414 case 0:
5415 switch (ctz32(extract32(insn, 12, 4))) {
5416 case 0: /* [15:12] == xxx1 */
5417 /* Floating point immediate */
5418 disas_fp_imm(s, insn);
5419 break;
5420 case 1: /* [15:12] == xx10 */
5421 /* Floating point compare */
5422 disas_fp_compare(s, insn);
5423 break;
5424 case 2: /* [15:12] == x100 */
5425 /* Floating point data-processing (1 source) */
5426 disas_fp_1src(s, insn);
5427 break;
5428 case 3: /* [15:12] == 1000 */
5429 unallocated_encoding(s);
5430 break;
5431 default: /* [15:12] == 0000 */
5432 /* Floating point <-> integer conversions */
5433 disas_fp_int_conv(s, insn);
5434 break;
5436 break;
5441 static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
5442 int pos)
5444 /* Extract 64 bits from the middle of two concatenated 64 bit
5445 * vector register slices left:right. The extracted bits start
5446 * at 'pos' bits into the right (least significant) side.
5447 * We return the result in tcg_right, and guarantee not to
5448 * trash tcg_left.
5450 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5451 assert(pos > 0 && pos < 64);
5453 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
5454 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
5455 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
5457 tcg_temp_free_i64(tcg_tmp);
5460 /* EXT
5461 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
5462 * +---+---+-------------+-----+---+------+---+------+---+------+------+
5463 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
5464 * +---+---+-------------+-----+---+------+---+------+---+------+------+
5466 static void disas_simd_ext(DisasContext *s, uint32_t insn)
5468 int is_q = extract32(insn, 30, 1);
5469 int op2 = extract32(insn, 22, 2);
5470 int imm4 = extract32(insn, 11, 4);
5471 int rm = extract32(insn, 16, 5);
5472 int rn = extract32(insn, 5, 5);
5473 int rd = extract32(insn, 0, 5);
5474 int pos = imm4 << 3;
5475 TCGv_i64 tcg_resl, tcg_resh;
5477 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
5478 unallocated_encoding(s);
5479 return;
5482 if (!fp_access_check(s)) {
5483 return;
5486 tcg_resh = tcg_temp_new_i64();
5487 tcg_resl = tcg_temp_new_i64();
5489 /* Vd gets bits starting at pos bits into Vm:Vn. This is
5490 * either extracting 128 bits from a 128:128 concatenation, or
5491 * extracting 64 bits from a 64:64 concatenation.
5493 if (!is_q) {
5494 read_vec_element(s, tcg_resl, rn, 0, MO_64);
5495 if (pos != 0) {
5496 read_vec_element(s, tcg_resh, rm, 0, MO_64);
5497 do_ext64(s, tcg_resh, tcg_resl, pos);
5499 tcg_gen_movi_i64(tcg_resh, 0);
5500 } else {
5501 TCGv_i64 tcg_hh;
5502 typedef struct {
5503 int reg;
5504 int elt;
5505 } EltPosns;
5506 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
5507 EltPosns *elt = eltposns;
5509 if (pos >= 64) {
5510 elt++;
5511 pos -= 64;
5514 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
5515 elt++;
5516 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
5517 elt++;
5518 if (pos != 0) {
5519 do_ext64(s, tcg_resh, tcg_resl, pos);
5520 tcg_hh = tcg_temp_new_i64();
5521 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
5522 do_ext64(s, tcg_hh, tcg_resh, pos);
5523 tcg_temp_free_i64(tcg_hh);
5527 write_vec_element(s, tcg_resl, rd, 0, MO_64);
5528 tcg_temp_free_i64(tcg_resl);
5529 write_vec_element(s, tcg_resh, rd, 1, MO_64);
5530 tcg_temp_free_i64(tcg_resh);
5533 /* TBL/TBX
5534 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
5535 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
5536 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
5537 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
5539 static void disas_simd_tb(DisasContext *s, uint32_t insn)
5541 int op2 = extract32(insn, 22, 2);
5542 int is_q = extract32(insn, 30, 1);
5543 int rm = extract32(insn, 16, 5);
5544 int rn = extract32(insn, 5, 5);
5545 int rd = extract32(insn, 0, 5);
5546 int is_tblx = extract32(insn, 12, 1);
5547 int len = extract32(insn, 13, 2);
5548 TCGv_i64 tcg_resl, tcg_resh, tcg_idx;
5549 TCGv_i32 tcg_regno, tcg_numregs;
5551 if (op2 != 0) {
5552 unallocated_encoding(s);
5553 return;
5556 if (!fp_access_check(s)) {
5557 return;
5560 /* This does a table lookup: for every byte element in the input
5561 * we index into a table formed from up to four vector registers,
5562 * and then the output is the result of the lookups. Our helper
5563 * function does the lookup operation for a single 64 bit part of
5564 * the input.
5566 tcg_resl = tcg_temp_new_i64();
5567 tcg_resh = tcg_temp_new_i64();
5569 if (is_tblx) {
5570 read_vec_element(s, tcg_resl, rd, 0, MO_64);
5571 } else {
5572 tcg_gen_movi_i64(tcg_resl, 0);
5574 if (is_tblx && is_q) {
5575 read_vec_element(s, tcg_resh, rd, 1, MO_64);
5576 } else {
5577 tcg_gen_movi_i64(tcg_resh, 0);
5580 tcg_idx = tcg_temp_new_i64();
5581 tcg_regno = tcg_const_i32(rn);
5582 tcg_numregs = tcg_const_i32(len + 1);
5583 read_vec_element(s, tcg_idx, rm, 0, MO_64);
5584 gen_helper_simd_tbl(tcg_resl, cpu_env, tcg_resl, tcg_idx,
5585 tcg_regno, tcg_numregs);
5586 if (is_q) {
5587 read_vec_element(s, tcg_idx, rm, 1, MO_64);
5588 gen_helper_simd_tbl(tcg_resh, cpu_env, tcg_resh, tcg_idx,
5589 tcg_regno, tcg_numregs);
5591 tcg_temp_free_i64(tcg_idx);
5592 tcg_temp_free_i32(tcg_regno);
5593 tcg_temp_free_i32(tcg_numregs);
5595 write_vec_element(s, tcg_resl, rd, 0, MO_64);
5596 tcg_temp_free_i64(tcg_resl);
5597 write_vec_element(s, tcg_resh, rd, 1, MO_64);
5598 tcg_temp_free_i64(tcg_resh);
5601 /* ZIP/UZP/TRN
5602 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
5603 * +---+---+-------------+------+---+------+---+------------------+------+
5604 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
5605 * +---+---+-------------+------+---+------+---+------------------+------+
5607 static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
5609 int rd = extract32(insn, 0, 5);
5610 int rn = extract32(insn, 5, 5);
5611 int rm = extract32(insn, 16, 5);
5612 int size = extract32(insn, 22, 2);
5613 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
5614 * bit 2 indicates 1 vs 2 variant of the insn.
5616 int opcode = extract32(insn, 12, 2);
5617 bool part = extract32(insn, 14, 1);
5618 bool is_q = extract32(insn, 30, 1);
5619 int esize = 8 << size;
5620 int i, ofs;
5621 int datasize = is_q ? 128 : 64;
5622 int elements = datasize / esize;
5623 TCGv_i64 tcg_res, tcg_resl, tcg_resh;
5625 if (opcode == 0 || (size == 3 && !is_q)) {
5626 unallocated_encoding(s);
5627 return;
5630 if (!fp_access_check(s)) {
5631 return;
5634 tcg_resl = tcg_const_i64(0);
5635 tcg_resh = tcg_const_i64(0);
5636 tcg_res = tcg_temp_new_i64();
5638 for (i = 0; i < elements; i++) {
5639 switch (opcode) {
5640 case 1: /* UZP1/2 */
5642 int midpoint = elements / 2;
5643 if (i < midpoint) {
5644 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
5645 } else {
5646 read_vec_element(s, tcg_res, rm,
5647 2 * (i - midpoint) + part, size);
5649 break;
5651 case 2: /* TRN1/2 */
5652 if (i & 1) {
5653 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
5654 } else {
5655 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
5657 break;
5658 case 3: /* ZIP1/2 */
5660 int base = part * elements / 2;
5661 if (i & 1) {
5662 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
5663 } else {
5664 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
5666 break;
5668 default:
5669 g_assert_not_reached();
5672 ofs = i * esize;
5673 if (ofs < 64) {
5674 tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
5675 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
5676 } else {
5677 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
5678 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
5682 tcg_temp_free_i64(tcg_res);
5684 write_vec_element(s, tcg_resl, rd, 0, MO_64);
5685 tcg_temp_free_i64(tcg_resl);
5686 write_vec_element(s, tcg_resh, rd, 1, MO_64);
5687 tcg_temp_free_i64(tcg_resh);
5690 static void do_minmaxop(DisasContext *s, TCGv_i32 tcg_elt1, TCGv_i32 tcg_elt2,
5691 int opc, bool is_min, TCGv_ptr fpst)
5693 /* Helper function for disas_simd_across_lanes: do a single precision
5694 * min/max operation on the specified two inputs,
5695 * and return the result in tcg_elt1.
5697 if (opc == 0xc) {
5698 if (is_min) {
5699 gen_helper_vfp_minnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
5700 } else {
5701 gen_helper_vfp_maxnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
5703 } else {
5704 assert(opc == 0xf);
5705 if (is_min) {
5706 gen_helper_vfp_mins(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
5707 } else {
5708 gen_helper_vfp_maxs(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
5713 /* AdvSIMD across lanes
5714 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
5715 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
5716 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
5717 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
5719 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
5721 int rd = extract32(insn, 0, 5);
5722 int rn = extract32(insn, 5, 5);
5723 int size = extract32(insn, 22, 2);
5724 int opcode = extract32(insn, 12, 5);
5725 bool is_q = extract32(insn, 30, 1);
5726 bool is_u = extract32(insn, 29, 1);
5727 bool is_fp = false;
5728 bool is_min = false;
5729 int esize;
5730 int elements;
5731 int i;
5732 TCGv_i64 tcg_res, tcg_elt;
5734 switch (opcode) {
5735 case 0x1b: /* ADDV */
5736 if (is_u) {
5737 unallocated_encoding(s);
5738 return;
5740 /* fall through */
5741 case 0x3: /* SADDLV, UADDLV */
5742 case 0xa: /* SMAXV, UMAXV */
5743 case 0x1a: /* SMINV, UMINV */
5744 if (size == 3 || (size == 2 && !is_q)) {
5745 unallocated_encoding(s);
5746 return;
5748 break;
5749 case 0xc: /* FMAXNMV, FMINNMV */
5750 case 0xf: /* FMAXV, FMINV */
5751 if (!is_u || !is_q || extract32(size, 0, 1)) {
5752 unallocated_encoding(s);
5753 return;
5755 /* Bit 1 of size field encodes min vs max, and actual size is always
5756 * 32 bits: adjust the size variable so following code can rely on it
5758 is_min = extract32(size, 1, 1);
5759 is_fp = true;
5760 size = 2;
5761 break;
5762 default:
5763 unallocated_encoding(s);
5764 return;
5767 if (!fp_access_check(s)) {
5768 return;
5771 esize = 8 << size;
5772 elements = (is_q ? 128 : 64) / esize;
5774 tcg_res = tcg_temp_new_i64();
5775 tcg_elt = tcg_temp_new_i64();
5777 /* These instructions operate across all lanes of a vector
5778 * to produce a single result. We can guarantee that a 64
5779 * bit intermediate is sufficient:
5780 * + for [US]ADDLV the maximum element size is 32 bits, and
5781 * the result type is 64 bits
5782 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
5783 * same as the element size, which is 32 bits at most
5784 * For the integer operations we can choose to work at 64
5785 * or 32 bits and truncate at the end; for simplicity
5786 * we use 64 bits always. The floating point
5787 * ops do require 32 bit intermediates, though.
5789 if (!is_fp) {
5790 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
5792 for (i = 1; i < elements; i++) {
5793 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
5795 switch (opcode) {
5796 case 0x03: /* SADDLV / UADDLV */
5797 case 0x1b: /* ADDV */
5798 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
5799 break;
5800 case 0x0a: /* SMAXV / UMAXV */
5801 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
5802 tcg_res,
5803 tcg_res, tcg_elt, tcg_res, tcg_elt);
5804 break;
5805 case 0x1a: /* SMINV / UMINV */
5806 tcg_gen_movcond_i64(is_u ? TCG_COND_LEU : TCG_COND_LE,
5807 tcg_res,
5808 tcg_res, tcg_elt, tcg_res, tcg_elt);
5809 break;
5810 break;
5811 default:
5812 g_assert_not_reached();
5816 } else {
5817 /* Floating point ops which work on 32 bit (single) intermediates.
5818 * Note that correct NaN propagation requires that we do these
5819 * operations in exactly the order specified by the pseudocode.
5821 TCGv_i32 tcg_elt1 = tcg_temp_new_i32();
5822 TCGv_i32 tcg_elt2 = tcg_temp_new_i32();
5823 TCGv_i32 tcg_elt3 = tcg_temp_new_i32();
5824 TCGv_ptr fpst = get_fpstatus_ptr();
5826 assert(esize == 32);
5827 assert(elements == 4);
5829 read_vec_element(s, tcg_elt, rn, 0, MO_32);
5830 tcg_gen_extrl_i64_i32(tcg_elt1, tcg_elt);
5831 read_vec_element(s, tcg_elt, rn, 1, MO_32);
5832 tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
5834 do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
5836 read_vec_element(s, tcg_elt, rn, 2, MO_32);
5837 tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
5838 read_vec_element(s, tcg_elt, rn, 3, MO_32);
5839 tcg_gen_extrl_i64_i32(tcg_elt3, tcg_elt);
5841 do_minmaxop(s, tcg_elt2, tcg_elt3, opcode, is_min, fpst);
5843 do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
5845 tcg_gen_extu_i32_i64(tcg_res, tcg_elt1);
5846 tcg_temp_free_i32(tcg_elt1);
5847 tcg_temp_free_i32(tcg_elt2);
5848 tcg_temp_free_i32(tcg_elt3);
5849 tcg_temp_free_ptr(fpst);
5852 tcg_temp_free_i64(tcg_elt);
5854 /* Now truncate the result to the width required for the final output */
5855 if (opcode == 0x03) {
5856 /* SADDLV, UADDLV: result is 2*esize */
5857 size++;
5860 switch (size) {
5861 case 0:
5862 tcg_gen_ext8u_i64(tcg_res, tcg_res);
5863 break;
5864 case 1:
5865 tcg_gen_ext16u_i64(tcg_res, tcg_res);
5866 break;
5867 case 2:
5868 tcg_gen_ext32u_i64(tcg_res, tcg_res);
5869 break;
5870 case 3:
5871 break;
5872 default:
5873 g_assert_not_reached();
5876 write_fp_dreg(s, rd, tcg_res);
5877 tcg_temp_free_i64(tcg_res);
5880 /* DUP (Element, Vector)
5882 * 31 30 29 21 20 16 15 10 9 5 4 0
5883 * +---+---+-------------------+--------+-------------+------+------+
5884 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
5885 * +---+---+-------------------+--------+-------------+------+------+
5887 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5889 static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
5890 int imm5)
5892 int size = ctz32(imm5);
5893 int esize = 8 << size;
5894 int elements = (is_q ? 128 : 64) / esize;
5895 int index, i;
5896 TCGv_i64 tmp;
5898 if (size > 3 || (size == 3 && !is_q)) {
5899 unallocated_encoding(s);
5900 return;
5903 if (!fp_access_check(s)) {
5904 return;
5907 index = imm5 >> (size + 1);
5909 tmp = tcg_temp_new_i64();
5910 read_vec_element(s, tmp, rn, index, size);
5912 for (i = 0; i < elements; i++) {
5913 write_vec_element(s, tmp, rd, i, size);
5916 if (!is_q) {
5917 clear_vec_high(s, rd);
5920 tcg_temp_free_i64(tmp);
5923 /* DUP (element, scalar)
5924 * 31 21 20 16 15 10 9 5 4 0
5925 * +-----------------------+--------+-------------+------+------+
5926 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
5927 * +-----------------------+--------+-------------+------+------+
5929 static void handle_simd_dupes(DisasContext *s, int rd, int rn,
5930 int imm5)
5932 int size = ctz32(imm5);
5933 int index;
5934 TCGv_i64 tmp;
5936 if (size > 3) {
5937 unallocated_encoding(s);
5938 return;
5941 if (!fp_access_check(s)) {
5942 return;
5945 index = imm5 >> (size + 1);
5947 /* This instruction just extracts the specified element and
5948 * zero-extends it into the bottom of the destination register.
5950 tmp = tcg_temp_new_i64();
5951 read_vec_element(s, tmp, rn, index, size);
5952 write_fp_dreg(s, rd, tmp);
5953 tcg_temp_free_i64(tmp);
5956 /* DUP (General)
5958 * 31 30 29 21 20 16 15 10 9 5 4 0
5959 * +---+---+-------------------+--------+-------------+------+------+
5960 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
5961 * +---+---+-------------------+--------+-------------+------+------+
5963 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5965 static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
5966 int imm5)
5968 int size = ctz32(imm5);
5969 int esize = 8 << size;
5970 int elements = (is_q ? 128 : 64)/esize;
5971 int i = 0;
5973 if (size > 3 || ((size == 3) && !is_q)) {
5974 unallocated_encoding(s);
5975 return;
5978 if (!fp_access_check(s)) {
5979 return;
5982 for (i = 0; i < elements; i++) {
5983 write_vec_element(s, cpu_reg(s, rn), rd, i, size);
5985 if (!is_q) {
5986 clear_vec_high(s, rd);
5990 /* INS (Element)
5992 * 31 21 20 16 15 14 11 10 9 5 4 0
5993 * +-----------------------+--------+------------+---+------+------+
5994 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
5995 * +-----------------------+--------+------------+---+------+------+
5997 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5998 * index: encoded in imm5<4:size+1>
6000 static void handle_simd_inse(DisasContext *s, int rd, int rn,
6001 int imm4, int imm5)
6003 int size = ctz32(imm5);
6004 int src_index, dst_index;
6005 TCGv_i64 tmp;
6007 if (size > 3) {
6008 unallocated_encoding(s);
6009 return;
6012 if (!fp_access_check(s)) {
6013 return;
6016 dst_index = extract32(imm5, 1+size, 5);
6017 src_index = extract32(imm4, size, 4);
6019 tmp = tcg_temp_new_i64();
6021 read_vec_element(s, tmp, rn, src_index, size);
6022 write_vec_element(s, tmp, rd, dst_index, size);
6024 tcg_temp_free_i64(tmp);
6028 /* INS (General)
6030 * 31 21 20 16 15 10 9 5 4 0
6031 * +-----------------------+--------+-------------+------+------+
6032 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
6033 * +-----------------------+--------+-------------+------+------+
6035 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6036 * index: encoded in imm5<4:size+1>
6038 static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
6040 int size = ctz32(imm5);
6041 int idx;
6043 if (size > 3) {
6044 unallocated_encoding(s);
6045 return;
6048 if (!fp_access_check(s)) {
6049 return;
6052 idx = extract32(imm5, 1 + size, 4 - size);
6053 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
6057 * UMOV (General)
6058 * SMOV (General)
6060 * 31 30 29 21 20 16 15 12 10 9 5 4 0
6061 * +---+---+-------------------+--------+-------------+------+------+
6062 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
6063 * +---+---+-------------------+--------+-------------+------+------+
6065 * U: unsigned when set
6066 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6068 static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
6069 int rn, int rd, int imm5)
6071 int size = ctz32(imm5);
6072 int element;
6073 TCGv_i64 tcg_rd;
6075 /* Check for UnallocatedEncodings */
6076 if (is_signed) {
6077 if (size > 2 || (size == 2 && !is_q)) {
6078 unallocated_encoding(s);
6079 return;
6081 } else {
6082 if (size > 3
6083 || (size < 3 && is_q)
6084 || (size == 3 && !is_q)) {
6085 unallocated_encoding(s);
6086 return;
6090 if (!fp_access_check(s)) {
6091 return;
6094 element = extract32(imm5, 1+size, 4);
6096 tcg_rd = cpu_reg(s, rd);
6097 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
6098 if (is_signed && !is_q) {
6099 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
6103 /* AdvSIMD copy
6104 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
6105 * +---+---+----+-----------------+------+---+------+---+------+------+
6106 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6107 * +---+---+----+-----------------+------+---+------+---+------+------+
6109 static void disas_simd_copy(DisasContext *s, uint32_t insn)
6111 int rd = extract32(insn, 0, 5);
6112 int rn = extract32(insn, 5, 5);
6113 int imm4 = extract32(insn, 11, 4);
6114 int op = extract32(insn, 29, 1);
6115 int is_q = extract32(insn, 30, 1);
6116 int imm5 = extract32(insn, 16, 5);
6118 if (op) {
6119 if (is_q) {
6120 /* INS (element) */
6121 handle_simd_inse(s, rd, rn, imm4, imm5);
6122 } else {
6123 unallocated_encoding(s);
6125 } else {
6126 switch (imm4) {
6127 case 0:
6128 /* DUP (element - vector) */
6129 handle_simd_dupe(s, is_q, rd, rn, imm5);
6130 break;
6131 case 1:
6132 /* DUP (general) */
6133 handle_simd_dupg(s, is_q, rd, rn, imm5);
6134 break;
6135 case 3:
6136 if (is_q) {
6137 /* INS (general) */
6138 handle_simd_insg(s, rd, rn, imm5);
6139 } else {
6140 unallocated_encoding(s);
6142 break;
6143 case 5:
6144 case 7:
6145 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
6146 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
6147 break;
6148 default:
6149 unallocated_encoding(s);
6150 break;
6155 /* AdvSIMD modified immediate
6156 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
6157 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6158 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
6159 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6161 * There are a number of operations that can be carried out here:
6162 * MOVI - move (shifted) imm into register
6163 * MVNI - move inverted (shifted) imm into register
6164 * ORR - bitwise OR of (shifted) imm with register
6165 * BIC - bitwise clear of (shifted) imm with register
6167 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
6169 int rd = extract32(insn, 0, 5);
6170 int cmode = extract32(insn, 12, 4);
6171 int cmode_3_1 = extract32(cmode, 1, 3);
6172 int cmode_0 = extract32(cmode, 0, 1);
6173 int o2 = extract32(insn, 11, 1);
6174 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
6175 bool is_neg = extract32(insn, 29, 1);
6176 bool is_q = extract32(insn, 30, 1);
6177 uint64_t imm = 0;
6178 TCGv_i64 tcg_rd, tcg_imm;
6179 int i;
6181 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
6182 unallocated_encoding(s);
6183 return;
6186 if (!fp_access_check(s)) {
6187 return;
6190 /* See AdvSIMDExpandImm() in ARM ARM */
6191 switch (cmode_3_1) {
6192 case 0: /* Replicate(Zeros(24):imm8, 2) */
6193 case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
6194 case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
6195 case 3: /* Replicate(imm8:Zeros(24), 2) */
6197 int shift = cmode_3_1 * 8;
6198 imm = bitfield_replicate(abcdefgh << shift, 32);
6199 break;
6201 case 4: /* Replicate(Zeros(8):imm8, 4) */
6202 case 5: /* Replicate(imm8:Zeros(8), 4) */
6204 int shift = (cmode_3_1 & 0x1) * 8;
6205 imm = bitfield_replicate(abcdefgh << shift, 16);
6206 break;
6208 case 6:
6209 if (cmode_0) {
6210 /* Replicate(Zeros(8):imm8:Ones(16), 2) */
6211 imm = (abcdefgh << 16) | 0xffff;
6212 } else {
6213 /* Replicate(Zeros(16):imm8:Ones(8), 2) */
6214 imm = (abcdefgh << 8) | 0xff;
6216 imm = bitfield_replicate(imm, 32);
6217 break;
6218 case 7:
6219 if (!cmode_0 && !is_neg) {
6220 imm = bitfield_replicate(abcdefgh, 8);
6221 } else if (!cmode_0 && is_neg) {
6222 int i;
6223 imm = 0;
6224 for (i = 0; i < 8; i++) {
6225 if ((abcdefgh) & (1 << i)) {
6226 imm |= 0xffULL << (i * 8);
6229 } else if (cmode_0) {
6230 if (is_neg) {
6231 imm = (abcdefgh & 0x3f) << 48;
6232 if (abcdefgh & 0x80) {
6233 imm |= 0x8000000000000000ULL;
6235 if (abcdefgh & 0x40) {
6236 imm |= 0x3fc0000000000000ULL;
6237 } else {
6238 imm |= 0x4000000000000000ULL;
6240 } else {
6241 imm = (abcdefgh & 0x3f) << 19;
6242 if (abcdefgh & 0x80) {
6243 imm |= 0x80000000;
6245 if (abcdefgh & 0x40) {
6246 imm |= 0x3e000000;
6247 } else {
6248 imm |= 0x40000000;
6250 imm |= (imm << 32);
6253 break;
6256 if (cmode_3_1 != 7 && is_neg) {
6257 imm = ~imm;
6260 tcg_imm = tcg_const_i64(imm);
6261 tcg_rd = new_tmp_a64(s);
6263 for (i = 0; i < 2; i++) {
6264 int foffs = i ? fp_reg_hi_offset(s, rd) : fp_reg_offset(s, rd, MO_64);
6266 if (i == 1 && !is_q) {
6267 /* non-quad ops clear high half of vector */
6268 tcg_gen_movi_i64(tcg_rd, 0);
6269 } else if ((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9) {
6270 tcg_gen_ld_i64(tcg_rd, cpu_env, foffs);
6271 if (is_neg) {
6272 /* AND (BIC) */
6273 tcg_gen_and_i64(tcg_rd, tcg_rd, tcg_imm);
6274 } else {
6275 /* ORR */
6276 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_imm);
6278 } else {
6279 /* MOVI */
6280 tcg_gen_mov_i64(tcg_rd, tcg_imm);
6282 tcg_gen_st_i64(tcg_rd, cpu_env, foffs);
6285 tcg_temp_free_i64(tcg_imm);
6288 /* AdvSIMD scalar copy
6289 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
6290 * +-----+----+-----------------+------+---+------+---+------+------+
6291 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6292 * +-----+----+-----------------+------+---+------+---+------+------+
6294 static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
6296 int rd = extract32(insn, 0, 5);
6297 int rn = extract32(insn, 5, 5);
6298 int imm4 = extract32(insn, 11, 4);
6299 int imm5 = extract32(insn, 16, 5);
6300 int op = extract32(insn, 29, 1);
6302 if (op != 0 || imm4 != 0) {
6303 unallocated_encoding(s);
6304 return;
6307 /* DUP (element, scalar) */
6308 handle_simd_dupes(s, rd, rn, imm5);
6311 /* AdvSIMD scalar pairwise
6312 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
6313 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6314 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
6315 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6317 static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
6319 int u = extract32(insn, 29, 1);
6320 int size = extract32(insn, 22, 2);
6321 int opcode = extract32(insn, 12, 5);
6322 int rn = extract32(insn, 5, 5);
6323 int rd = extract32(insn, 0, 5);
6324 TCGv_ptr fpst;
6326 /* For some ops (the FP ones), size[1] is part of the encoding.
6327 * For ADDP strictly it is not but size[1] is always 1 for valid
6328 * encodings.
6330 opcode |= (extract32(size, 1, 1) << 5);
6332 switch (opcode) {
6333 case 0x3b: /* ADDP */
6334 if (u || size != 3) {
6335 unallocated_encoding(s);
6336 return;
6338 if (!fp_access_check(s)) {
6339 return;
6342 fpst = NULL;
6343 break;
6344 case 0xc: /* FMAXNMP */
6345 case 0xd: /* FADDP */
6346 case 0xf: /* FMAXP */
6347 case 0x2c: /* FMINNMP */
6348 case 0x2f: /* FMINP */
6349 /* FP op, size[0] is 32 or 64 bit */
6350 if (!u) {
6351 unallocated_encoding(s);
6352 return;
6354 if (!fp_access_check(s)) {
6355 return;
6358 size = extract32(size, 0, 1) ? 3 : 2;
6359 fpst = get_fpstatus_ptr();
6360 break;
6361 default:
6362 unallocated_encoding(s);
6363 return;
6366 if (size == 3) {
6367 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
6368 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
6369 TCGv_i64 tcg_res = tcg_temp_new_i64();
6371 read_vec_element(s, tcg_op1, rn, 0, MO_64);
6372 read_vec_element(s, tcg_op2, rn, 1, MO_64);
6374 switch (opcode) {
6375 case 0x3b: /* ADDP */
6376 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
6377 break;
6378 case 0xc: /* FMAXNMP */
6379 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6380 break;
6381 case 0xd: /* FADDP */
6382 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6383 break;
6384 case 0xf: /* FMAXP */
6385 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6386 break;
6387 case 0x2c: /* FMINNMP */
6388 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6389 break;
6390 case 0x2f: /* FMINP */
6391 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6392 break;
6393 default:
6394 g_assert_not_reached();
6397 write_fp_dreg(s, rd, tcg_res);
6399 tcg_temp_free_i64(tcg_op1);
6400 tcg_temp_free_i64(tcg_op2);
6401 tcg_temp_free_i64(tcg_res);
6402 } else {
6403 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
6404 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
6405 TCGv_i32 tcg_res = tcg_temp_new_i32();
6407 read_vec_element_i32(s, tcg_op1, rn, 0, MO_32);
6408 read_vec_element_i32(s, tcg_op2, rn, 1, MO_32);
6410 switch (opcode) {
6411 case 0xc: /* FMAXNMP */
6412 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6413 break;
6414 case 0xd: /* FADDP */
6415 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6416 break;
6417 case 0xf: /* FMAXP */
6418 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6419 break;
6420 case 0x2c: /* FMINNMP */
6421 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6422 break;
6423 case 0x2f: /* FMINP */
6424 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6425 break;
6426 default:
6427 g_assert_not_reached();
6430 write_fp_sreg(s, rd, tcg_res);
6432 tcg_temp_free_i32(tcg_op1);
6433 tcg_temp_free_i32(tcg_op2);
6434 tcg_temp_free_i32(tcg_res);
6437 if (fpst) {
6438 tcg_temp_free_ptr(fpst);
6443 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
6445 * This code is handles the common shifting code and is used by both
6446 * the vector and scalar code.
6448 static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
6449 TCGv_i64 tcg_rnd, bool accumulate,
6450 bool is_u, int size, int shift)
6452 bool extended_result = false;
6453 bool round = tcg_rnd != NULL;
6454 int ext_lshift = 0;
6455 TCGv_i64 tcg_src_hi;
6457 if (round && size == 3) {
6458 extended_result = true;
6459 ext_lshift = 64 - shift;
6460 tcg_src_hi = tcg_temp_new_i64();
6461 } else if (shift == 64) {
6462 if (!accumulate && is_u) {
6463 /* result is zero */
6464 tcg_gen_movi_i64(tcg_res, 0);
6465 return;
6469 /* Deal with the rounding step */
6470 if (round) {
6471 if (extended_result) {
6472 TCGv_i64 tcg_zero = tcg_const_i64(0);
6473 if (!is_u) {
6474 /* take care of sign extending tcg_res */
6475 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
6476 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
6477 tcg_src, tcg_src_hi,
6478 tcg_rnd, tcg_zero);
6479 } else {
6480 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
6481 tcg_src, tcg_zero,
6482 tcg_rnd, tcg_zero);
6484 tcg_temp_free_i64(tcg_zero);
6485 } else {
6486 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
6490 /* Now do the shift right */
6491 if (round && extended_result) {
6492 /* extended case, >64 bit precision required */
6493 if (ext_lshift == 0) {
6494 /* special case, only high bits matter */
6495 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
6496 } else {
6497 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
6498 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
6499 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
6501 } else {
6502 if (is_u) {
6503 if (shift == 64) {
6504 /* essentially shifting in 64 zeros */
6505 tcg_gen_movi_i64(tcg_src, 0);
6506 } else {
6507 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
6509 } else {
6510 if (shift == 64) {
6511 /* effectively extending the sign-bit */
6512 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
6513 } else {
6514 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
6519 if (accumulate) {
6520 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
6521 } else {
6522 tcg_gen_mov_i64(tcg_res, tcg_src);
6525 if (extended_result) {
6526 tcg_temp_free_i64(tcg_src_hi);
6530 /* Common SHL/SLI - Shift left with an optional insert */
6531 static void handle_shli_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
6532 bool insert, int shift)
6534 if (insert) { /* SLI */
6535 tcg_gen_deposit_i64(tcg_res, tcg_res, tcg_src, shift, 64 - shift);
6536 } else { /* SHL */
6537 tcg_gen_shli_i64(tcg_res, tcg_src, shift);
6541 /* SRI: shift right with insert */
6542 static void handle_shri_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
6543 int size, int shift)
6545 int esize = 8 << size;
6547 /* shift count same as element size is valid but does nothing;
6548 * special case to avoid potential shift by 64.
6550 if (shift != esize) {
6551 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
6552 tcg_gen_deposit_i64(tcg_res, tcg_res, tcg_src, 0, esize - shift);
6556 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
6557 static void handle_scalar_simd_shri(DisasContext *s,
6558 bool is_u, int immh, int immb,
6559 int opcode, int rn, int rd)
6561 const int size = 3;
6562 int immhb = immh << 3 | immb;
6563 int shift = 2 * (8 << size) - immhb;
6564 bool accumulate = false;
6565 bool round = false;
6566 bool insert = false;
6567 TCGv_i64 tcg_rn;
6568 TCGv_i64 tcg_rd;
6569 TCGv_i64 tcg_round;
6571 if (!extract32(immh, 3, 1)) {
6572 unallocated_encoding(s);
6573 return;
6576 if (!fp_access_check(s)) {
6577 return;
6580 switch (opcode) {
6581 case 0x02: /* SSRA / USRA (accumulate) */
6582 accumulate = true;
6583 break;
6584 case 0x04: /* SRSHR / URSHR (rounding) */
6585 round = true;
6586 break;
6587 case 0x06: /* SRSRA / URSRA (accum + rounding) */
6588 accumulate = round = true;
6589 break;
6590 case 0x08: /* SRI */
6591 insert = true;
6592 break;
6595 if (round) {
6596 uint64_t round_const = 1ULL << (shift - 1);
6597 tcg_round = tcg_const_i64(round_const);
6598 } else {
6599 tcg_round = NULL;
6602 tcg_rn = read_fp_dreg(s, rn);
6603 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
6605 if (insert) {
6606 handle_shri_with_ins(tcg_rd, tcg_rn, size, shift);
6607 } else {
6608 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
6609 accumulate, is_u, size, shift);
6612 write_fp_dreg(s, rd, tcg_rd);
6614 tcg_temp_free_i64(tcg_rn);
6615 tcg_temp_free_i64(tcg_rd);
6616 if (round) {
6617 tcg_temp_free_i64(tcg_round);
6621 /* SHL/SLI - Scalar shift left */
6622 static void handle_scalar_simd_shli(DisasContext *s, bool insert,
6623 int immh, int immb, int opcode,
6624 int rn, int rd)
6626 int size = 32 - clz32(immh) - 1;
6627 int immhb = immh << 3 | immb;
6628 int shift = immhb - (8 << size);
6629 TCGv_i64 tcg_rn = new_tmp_a64(s);
6630 TCGv_i64 tcg_rd = new_tmp_a64(s);
6632 if (!extract32(immh, 3, 1)) {
6633 unallocated_encoding(s);
6634 return;
6637 if (!fp_access_check(s)) {
6638 return;
6641 tcg_rn = read_fp_dreg(s, rn);
6642 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
6644 handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift);
6646 write_fp_dreg(s, rd, tcg_rd);
6648 tcg_temp_free_i64(tcg_rn);
6649 tcg_temp_free_i64(tcg_rd);
6652 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
6653 * (signed/unsigned) narrowing */
6654 static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
6655 bool is_u_shift, bool is_u_narrow,
6656 int immh, int immb, int opcode,
6657 int rn, int rd)
6659 int immhb = immh << 3 | immb;
6660 int size = 32 - clz32(immh) - 1;
6661 int esize = 8 << size;
6662 int shift = (2 * esize) - immhb;
6663 int elements = is_scalar ? 1 : (64 / esize);
6664 bool round = extract32(opcode, 0, 1);
6665 TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
6666 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
6667 TCGv_i32 tcg_rd_narrowed;
6668 TCGv_i64 tcg_final;
6670 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
6671 { gen_helper_neon_narrow_sat_s8,
6672 gen_helper_neon_unarrow_sat8 },
6673 { gen_helper_neon_narrow_sat_s16,
6674 gen_helper_neon_unarrow_sat16 },
6675 { gen_helper_neon_narrow_sat_s32,
6676 gen_helper_neon_unarrow_sat32 },
6677 { NULL, NULL },
6679 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
6680 gen_helper_neon_narrow_sat_u8,
6681 gen_helper_neon_narrow_sat_u16,
6682 gen_helper_neon_narrow_sat_u32,
6683 NULL
6685 NeonGenNarrowEnvFn *narrowfn;
6687 int i;
6689 assert(size < 4);
6691 if (extract32(immh, 3, 1)) {
6692 unallocated_encoding(s);
6693 return;
6696 if (!fp_access_check(s)) {
6697 return;
6700 if (is_u_shift) {
6701 narrowfn = unsigned_narrow_fns[size];
6702 } else {
6703 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
6706 tcg_rn = tcg_temp_new_i64();
6707 tcg_rd = tcg_temp_new_i64();
6708 tcg_rd_narrowed = tcg_temp_new_i32();
6709 tcg_final = tcg_const_i64(0);
6711 if (round) {
6712 uint64_t round_const = 1ULL << (shift - 1);
6713 tcg_round = tcg_const_i64(round_const);
6714 } else {
6715 tcg_round = NULL;
6718 for (i = 0; i < elements; i++) {
6719 read_vec_element(s, tcg_rn, rn, i, ldop);
6720 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
6721 false, is_u_shift, size+1, shift);
6722 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
6723 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
6724 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
6727 if (!is_q) {
6728 clear_vec_high(s, rd);
6729 write_vec_element(s, tcg_final, rd, 0, MO_64);
6730 } else {
6731 write_vec_element(s, tcg_final, rd, 1, MO_64);
6734 if (round) {
6735 tcg_temp_free_i64(tcg_round);
6737 tcg_temp_free_i64(tcg_rn);
6738 tcg_temp_free_i64(tcg_rd);
6739 tcg_temp_free_i32(tcg_rd_narrowed);
6740 tcg_temp_free_i64(tcg_final);
6741 return;
6744 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
6745 static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
6746 bool src_unsigned, bool dst_unsigned,
6747 int immh, int immb, int rn, int rd)
6749 int immhb = immh << 3 | immb;
6750 int size = 32 - clz32(immh) - 1;
6751 int shift = immhb - (8 << size);
6752 int pass;
6754 assert(immh != 0);
6755 assert(!(scalar && is_q));
6757 if (!scalar) {
6758 if (!is_q && extract32(immh, 3, 1)) {
6759 unallocated_encoding(s);
6760 return;
6763 /* Since we use the variable-shift helpers we must
6764 * replicate the shift count into each element of
6765 * the tcg_shift value.
6767 switch (size) {
6768 case 0:
6769 shift |= shift << 8;
6770 /* fall through */
6771 case 1:
6772 shift |= shift << 16;
6773 break;
6774 case 2:
6775 case 3:
6776 break;
6777 default:
6778 g_assert_not_reached();
6782 if (!fp_access_check(s)) {
6783 return;
6786 if (size == 3) {
6787 TCGv_i64 tcg_shift = tcg_const_i64(shift);
6788 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
6789 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
6790 { NULL, gen_helper_neon_qshl_u64 },
6792 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
6793 int maxpass = is_q ? 2 : 1;
6795 for (pass = 0; pass < maxpass; pass++) {
6796 TCGv_i64 tcg_op = tcg_temp_new_i64();
6798 read_vec_element(s, tcg_op, rn, pass, MO_64);
6799 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
6800 write_vec_element(s, tcg_op, rd, pass, MO_64);
6802 tcg_temp_free_i64(tcg_op);
6804 tcg_temp_free_i64(tcg_shift);
6806 if (!is_q) {
6807 clear_vec_high(s, rd);
6809 } else {
6810 TCGv_i32 tcg_shift = tcg_const_i32(shift);
6811 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
6813 { gen_helper_neon_qshl_s8,
6814 gen_helper_neon_qshl_s16,
6815 gen_helper_neon_qshl_s32 },
6816 { gen_helper_neon_qshlu_s8,
6817 gen_helper_neon_qshlu_s16,
6818 gen_helper_neon_qshlu_s32 }
6819 }, {
6820 { NULL, NULL, NULL },
6821 { gen_helper_neon_qshl_u8,
6822 gen_helper_neon_qshl_u16,
6823 gen_helper_neon_qshl_u32 }
6826 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
6827 TCGMemOp memop = scalar ? size : MO_32;
6828 int maxpass = scalar ? 1 : is_q ? 4 : 2;
6830 for (pass = 0; pass < maxpass; pass++) {
6831 TCGv_i32 tcg_op = tcg_temp_new_i32();
6833 read_vec_element_i32(s, tcg_op, rn, pass, memop);
6834 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
6835 if (scalar) {
6836 switch (size) {
6837 case 0:
6838 tcg_gen_ext8u_i32(tcg_op, tcg_op);
6839 break;
6840 case 1:
6841 tcg_gen_ext16u_i32(tcg_op, tcg_op);
6842 break;
6843 case 2:
6844 break;
6845 default:
6846 g_assert_not_reached();
6848 write_fp_sreg(s, rd, tcg_op);
6849 } else {
6850 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
6853 tcg_temp_free_i32(tcg_op);
6855 tcg_temp_free_i32(tcg_shift);
6857 if (!is_q && !scalar) {
6858 clear_vec_high(s, rd);
6863 /* Common vector code for handling integer to FP conversion */
6864 static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
6865 int elements, int is_signed,
6866 int fracbits, int size)
6868 bool is_double = size == 3 ? true : false;
6869 TCGv_ptr tcg_fpst = get_fpstatus_ptr();
6870 TCGv_i32 tcg_shift = tcg_const_i32(fracbits);
6871 TCGv_i64 tcg_int = tcg_temp_new_i64();
6872 TCGMemOp mop = size | (is_signed ? MO_SIGN : 0);
6873 int pass;
6875 for (pass = 0; pass < elements; pass++) {
6876 read_vec_element(s, tcg_int, rn, pass, mop);
6878 if (is_double) {
6879 TCGv_i64 tcg_double = tcg_temp_new_i64();
6880 if (is_signed) {
6881 gen_helper_vfp_sqtod(tcg_double, tcg_int,
6882 tcg_shift, tcg_fpst);
6883 } else {
6884 gen_helper_vfp_uqtod(tcg_double, tcg_int,
6885 tcg_shift, tcg_fpst);
6887 if (elements == 1) {
6888 write_fp_dreg(s, rd, tcg_double);
6889 } else {
6890 write_vec_element(s, tcg_double, rd, pass, MO_64);
6892 tcg_temp_free_i64(tcg_double);
6893 } else {
6894 TCGv_i32 tcg_single = tcg_temp_new_i32();
6895 if (is_signed) {
6896 gen_helper_vfp_sqtos(tcg_single, tcg_int,
6897 tcg_shift, tcg_fpst);
6898 } else {
6899 gen_helper_vfp_uqtos(tcg_single, tcg_int,
6900 tcg_shift, tcg_fpst);
6902 if (elements == 1) {
6903 write_fp_sreg(s, rd, tcg_single);
6904 } else {
6905 write_vec_element_i32(s, tcg_single, rd, pass, MO_32);
6907 tcg_temp_free_i32(tcg_single);
6911 if (!is_double && elements == 2) {
6912 clear_vec_high(s, rd);
6915 tcg_temp_free_i64(tcg_int);
6916 tcg_temp_free_ptr(tcg_fpst);
6917 tcg_temp_free_i32(tcg_shift);
6920 /* UCVTF/SCVTF - Integer to FP conversion */
6921 static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
6922 bool is_q, bool is_u,
6923 int immh, int immb, int opcode,
6924 int rn, int rd)
6926 bool is_double = extract32(immh, 3, 1);
6927 int size = is_double ? MO_64 : MO_32;
6928 int elements;
6929 int immhb = immh << 3 | immb;
6930 int fracbits = (is_double ? 128 : 64) - immhb;
6932 if (!extract32(immh, 2, 2)) {
6933 unallocated_encoding(s);
6934 return;
6937 if (is_scalar) {
6938 elements = 1;
6939 } else {
6940 elements = is_double ? 2 : is_q ? 4 : 2;
6941 if (is_double && !is_q) {
6942 unallocated_encoding(s);
6943 return;
6947 if (!fp_access_check(s)) {
6948 return;
6951 /* immh == 0 would be a failure of the decode logic */
6952 g_assert(immh);
6954 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
6957 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
6958 static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
6959 bool is_q, bool is_u,
6960 int immh, int immb, int rn, int rd)
6962 bool is_double = extract32(immh, 3, 1);
6963 int immhb = immh << 3 | immb;
6964 int fracbits = (is_double ? 128 : 64) - immhb;
6965 int pass;
6966 TCGv_ptr tcg_fpstatus;
6967 TCGv_i32 tcg_rmode, tcg_shift;
6969 if (!extract32(immh, 2, 2)) {
6970 unallocated_encoding(s);
6971 return;
6974 if (!is_scalar && !is_q && is_double) {
6975 unallocated_encoding(s);
6976 return;
6979 if (!fp_access_check(s)) {
6980 return;
6983 assert(!(is_scalar && is_q));
6985 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
6986 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
6987 tcg_fpstatus = get_fpstatus_ptr();
6988 tcg_shift = tcg_const_i32(fracbits);
6990 if (is_double) {
6991 int maxpass = is_scalar ? 1 : 2;
6993 for (pass = 0; pass < maxpass; pass++) {
6994 TCGv_i64 tcg_op = tcg_temp_new_i64();
6996 read_vec_element(s, tcg_op, rn, pass, MO_64);
6997 if (is_u) {
6998 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
6999 } else {
7000 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7002 write_vec_element(s, tcg_op, rd, pass, MO_64);
7003 tcg_temp_free_i64(tcg_op);
7005 if (!is_q) {
7006 clear_vec_high(s, rd);
7008 } else {
7009 int maxpass = is_scalar ? 1 : is_q ? 4 : 2;
7010 for (pass = 0; pass < maxpass; pass++) {
7011 TCGv_i32 tcg_op = tcg_temp_new_i32();
7013 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
7014 if (is_u) {
7015 gen_helper_vfp_touls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7016 } else {
7017 gen_helper_vfp_tosls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7019 if (is_scalar) {
7020 write_fp_sreg(s, rd, tcg_op);
7021 } else {
7022 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
7024 tcg_temp_free_i32(tcg_op);
7026 if (!is_q && !is_scalar) {
7027 clear_vec_high(s, rd);
7031 tcg_temp_free_ptr(tcg_fpstatus);
7032 tcg_temp_free_i32(tcg_shift);
7033 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
7034 tcg_temp_free_i32(tcg_rmode);
7037 /* AdvSIMD scalar shift by immediate
7038 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
7039 * +-----+---+-------------+------+------+--------+---+------+------+
7040 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
7041 * +-----+---+-------------+------+------+--------+---+------+------+
7043 * This is the scalar version so it works on a fixed sized registers
7045 static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
7047 int rd = extract32(insn, 0, 5);
7048 int rn = extract32(insn, 5, 5);
7049 int opcode = extract32(insn, 11, 5);
7050 int immb = extract32(insn, 16, 3);
7051 int immh = extract32(insn, 19, 4);
7052 bool is_u = extract32(insn, 29, 1);
7054 if (immh == 0) {
7055 unallocated_encoding(s);
7056 return;
7059 switch (opcode) {
7060 case 0x08: /* SRI */
7061 if (!is_u) {
7062 unallocated_encoding(s);
7063 return;
7065 /* fall through */
7066 case 0x00: /* SSHR / USHR */
7067 case 0x02: /* SSRA / USRA */
7068 case 0x04: /* SRSHR / URSHR */
7069 case 0x06: /* SRSRA / URSRA */
7070 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
7071 break;
7072 case 0x0a: /* SHL / SLI */
7073 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
7074 break;
7075 case 0x1c: /* SCVTF, UCVTF */
7076 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
7077 opcode, rn, rd);
7078 break;
7079 case 0x10: /* SQSHRUN, SQSHRUN2 */
7080 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
7081 if (!is_u) {
7082 unallocated_encoding(s);
7083 return;
7085 handle_vec_simd_sqshrn(s, true, false, false, true,
7086 immh, immb, opcode, rn, rd);
7087 break;
7088 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
7089 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
7090 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
7091 immh, immb, opcode, rn, rd);
7092 break;
7093 case 0xc: /* SQSHLU */
7094 if (!is_u) {
7095 unallocated_encoding(s);
7096 return;
7098 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
7099 break;
7100 case 0xe: /* SQSHL, UQSHL */
7101 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
7102 break;
7103 case 0x1f: /* FCVTZS, FCVTZU */
7104 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
7105 break;
7106 default:
7107 unallocated_encoding(s);
7108 break;
7112 /* AdvSIMD scalar three different
7113 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
7114 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7115 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
7116 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7118 static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
7120 bool is_u = extract32(insn, 29, 1);
7121 int size = extract32(insn, 22, 2);
7122 int opcode = extract32(insn, 12, 4);
7123 int rm = extract32(insn, 16, 5);
7124 int rn = extract32(insn, 5, 5);
7125 int rd = extract32(insn, 0, 5);
7127 if (is_u) {
7128 unallocated_encoding(s);
7129 return;
7132 switch (opcode) {
7133 case 0x9: /* SQDMLAL, SQDMLAL2 */
7134 case 0xb: /* SQDMLSL, SQDMLSL2 */
7135 case 0xd: /* SQDMULL, SQDMULL2 */
7136 if (size == 0 || size == 3) {
7137 unallocated_encoding(s);
7138 return;
7140 break;
7141 default:
7142 unallocated_encoding(s);
7143 return;
7146 if (!fp_access_check(s)) {
7147 return;
7150 if (size == 2) {
7151 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7152 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7153 TCGv_i64 tcg_res = tcg_temp_new_i64();
7155 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
7156 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
7158 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
7159 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
7161 switch (opcode) {
7162 case 0xd: /* SQDMULL, SQDMULL2 */
7163 break;
7164 case 0xb: /* SQDMLSL, SQDMLSL2 */
7165 tcg_gen_neg_i64(tcg_res, tcg_res);
7166 /* fall through */
7167 case 0x9: /* SQDMLAL, SQDMLAL2 */
7168 read_vec_element(s, tcg_op1, rd, 0, MO_64);
7169 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
7170 tcg_res, tcg_op1);
7171 break;
7172 default:
7173 g_assert_not_reached();
7176 write_fp_dreg(s, rd, tcg_res);
7178 tcg_temp_free_i64(tcg_op1);
7179 tcg_temp_free_i64(tcg_op2);
7180 tcg_temp_free_i64(tcg_res);
7181 } else {
7182 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7183 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7184 TCGv_i64 tcg_res = tcg_temp_new_i64();
7186 read_vec_element_i32(s, tcg_op1, rn, 0, MO_16);
7187 read_vec_element_i32(s, tcg_op2, rm, 0, MO_16);
7189 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
7190 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
7192 switch (opcode) {
7193 case 0xd: /* SQDMULL, SQDMULL2 */
7194 break;
7195 case 0xb: /* SQDMLSL, SQDMLSL2 */
7196 gen_helper_neon_negl_u32(tcg_res, tcg_res);
7197 /* fall through */
7198 case 0x9: /* SQDMLAL, SQDMLAL2 */
7200 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
7201 read_vec_element(s, tcg_op3, rd, 0, MO_32);
7202 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
7203 tcg_res, tcg_op3);
7204 tcg_temp_free_i64(tcg_op3);
7205 break;
7207 default:
7208 g_assert_not_reached();
7211 tcg_gen_ext32u_i64(tcg_res, tcg_res);
7212 write_fp_dreg(s, rd, tcg_res);
7214 tcg_temp_free_i32(tcg_op1);
7215 tcg_temp_free_i32(tcg_op2);
7216 tcg_temp_free_i64(tcg_res);
7220 static void handle_3same_64(DisasContext *s, int opcode, bool u,
7221 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
7223 /* Handle 64x64->64 opcodes which are shared between the scalar
7224 * and vector 3-same groups. We cover every opcode where size == 3
7225 * is valid in either the three-reg-same (integer, not pairwise)
7226 * or scalar-three-reg-same groups. (Some opcodes are not yet
7227 * implemented.)
7229 TCGCond cond;
7231 switch (opcode) {
7232 case 0x1: /* SQADD */
7233 if (u) {
7234 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7235 } else {
7236 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7238 break;
7239 case 0x5: /* SQSUB */
7240 if (u) {
7241 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7242 } else {
7243 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7245 break;
7246 case 0x6: /* CMGT, CMHI */
7247 /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
7248 * We implement this using setcond (test) and then negating.
7250 cond = u ? TCG_COND_GTU : TCG_COND_GT;
7251 do_cmop:
7252 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
7253 tcg_gen_neg_i64(tcg_rd, tcg_rd);
7254 break;
7255 case 0x7: /* CMGE, CMHS */
7256 cond = u ? TCG_COND_GEU : TCG_COND_GE;
7257 goto do_cmop;
7258 case 0x11: /* CMTST, CMEQ */
7259 if (u) {
7260 cond = TCG_COND_EQ;
7261 goto do_cmop;
7263 /* CMTST : test is "if (X & Y != 0)". */
7264 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
7265 tcg_gen_setcondi_i64(TCG_COND_NE, tcg_rd, tcg_rd, 0);
7266 tcg_gen_neg_i64(tcg_rd, tcg_rd);
7267 break;
7268 case 0x8: /* SSHL, USHL */
7269 if (u) {
7270 gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm);
7271 } else {
7272 gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm);
7274 break;
7275 case 0x9: /* SQSHL, UQSHL */
7276 if (u) {
7277 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7278 } else {
7279 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7281 break;
7282 case 0xa: /* SRSHL, URSHL */
7283 if (u) {
7284 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
7285 } else {
7286 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
7288 break;
7289 case 0xb: /* SQRSHL, UQRSHL */
7290 if (u) {
7291 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7292 } else {
7293 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7295 break;
7296 case 0x10: /* ADD, SUB */
7297 if (u) {
7298 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
7299 } else {
7300 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
7302 break;
7303 default:
7304 g_assert_not_reached();
7308 /* Handle the 3-same-operands float operations; shared by the scalar
7309 * and vector encodings. The caller must filter out any encodings
7310 * not allocated for the encoding it is dealing with.
7312 static void handle_3same_float(DisasContext *s, int size, int elements,
7313 int fpopcode, int rd, int rn, int rm)
7315 int pass;
7316 TCGv_ptr fpst = get_fpstatus_ptr();
7318 for (pass = 0; pass < elements; pass++) {
7319 if (size) {
7320 /* Double */
7321 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7322 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7323 TCGv_i64 tcg_res = tcg_temp_new_i64();
7325 read_vec_element(s, tcg_op1, rn, pass, MO_64);
7326 read_vec_element(s, tcg_op2, rm, pass, MO_64);
7328 switch (fpopcode) {
7329 case 0x39: /* FMLS */
7330 /* As usual for ARM, separate negation for fused multiply-add */
7331 gen_helper_vfp_negd(tcg_op1, tcg_op1);
7332 /* fall through */
7333 case 0x19: /* FMLA */
7334 read_vec_element(s, tcg_res, rd, pass, MO_64);
7335 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
7336 tcg_res, fpst);
7337 break;
7338 case 0x18: /* FMAXNM */
7339 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7340 break;
7341 case 0x1a: /* FADD */
7342 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
7343 break;
7344 case 0x1b: /* FMULX */
7345 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
7346 break;
7347 case 0x1c: /* FCMEQ */
7348 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7349 break;
7350 case 0x1e: /* FMAX */
7351 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
7352 break;
7353 case 0x1f: /* FRECPS */
7354 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7355 break;
7356 case 0x38: /* FMINNM */
7357 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7358 break;
7359 case 0x3a: /* FSUB */
7360 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
7361 break;
7362 case 0x3e: /* FMIN */
7363 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
7364 break;
7365 case 0x3f: /* FRSQRTS */
7366 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7367 break;
7368 case 0x5b: /* FMUL */
7369 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
7370 break;
7371 case 0x5c: /* FCMGE */
7372 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7373 break;
7374 case 0x5d: /* FACGE */
7375 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7376 break;
7377 case 0x5f: /* FDIV */
7378 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
7379 break;
7380 case 0x7a: /* FABD */
7381 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
7382 gen_helper_vfp_absd(tcg_res, tcg_res);
7383 break;
7384 case 0x7c: /* FCMGT */
7385 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7386 break;
7387 case 0x7d: /* FACGT */
7388 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7389 break;
7390 default:
7391 g_assert_not_reached();
7394 write_vec_element(s, tcg_res, rd, pass, MO_64);
7396 tcg_temp_free_i64(tcg_res);
7397 tcg_temp_free_i64(tcg_op1);
7398 tcg_temp_free_i64(tcg_op2);
7399 } else {
7400 /* Single */
7401 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7402 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7403 TCGv_i32 tcg_res = tcg_temp_new_i32();
7405 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
7406 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
7408 switch (fpopcode) {
7409 case 0x39: /* FMLS */
7410 /* As usual for ARM, separate negation for fused multiply-add */
7411 gen_helper_vfp_negs(tcg_op1, tcg_op1);
7412 /* fall through */
7413 case 0x19: /* FMLA */
7414 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7415 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
7416 tcg_res, fpst);
7417 break;
7418 case 0x1a: /* FADD */
7419 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
7420 break;
7421 case 0x1b: /* FMULX */
7422 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
7423 break;
7424 case 0x1c: /* FCMEQ */
7425 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7426 break;
7427 case 0x1e: /* FMAX */
7428 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
7429 break;
7430 case 0x1f: /* FRECPS */
7431 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7432 break;
7433 case 0x18: /* FMAXNM */
7434 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
7435 break;
7436 case 0x38: /* FMINNM */
7437 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
7438 break;
7439 case 0x3a: /* FSUB */
7440 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
7441 break;
7442 case 0x3e: /* FMIN */
7443 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
7444 break;
7445 case 0x3f: /* FRSQRTS */
7446 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7447 break;
7448 case 0x5b: /* FMUL */
7449 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
7450 break;
7451 case 0x5c: /* FCMGE */
7452 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7453 break;
7454 case 0x5d: /* FACGE */
7455 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7456 break;
7457 case 0x5f: /* FDIV */
7458 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
7459 break;
7460 case 0x7a: /* FABD */
7461 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
7462 gen_helper_vfp_abss(tcg_res, tcg_res);
7463 break;
7464 case 0x7c: /* FCMGT */
7465 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7466 break;
7467 case 0x7d: /* FACGT */
7468 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7469 break;
7470 default:
7471 g_assert_not_reached();
7474 if (elements == 1) {
7475 /* scalar single so clear high part */
7476 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
7478 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
7479 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
7480 tcg_temp_free_i64(tcg_tmp);
7481 } else {
7482 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7485 tcg_temp_free_i32(tcg_res);
7486 tcg_temp_free_i32(tcg_op1);
7487 tcg_temp_free_i32(tcg_op2);
7491 tcg_temp_free_ptr(fpst);
7493 if ((elements << size) < 4) {
7494 /* scalar, or non-quad vector op */
7495 clear_vec_high(s, rd);
7499 /* AdvSIMD scalar three same
7500 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
7501 * +-----+---+-----------+------+---+------+--------+---+------+------+
7502 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
7503 * +-----+---+-----------+------+---+------+--------+---+------+------+
7505 static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
7507 int rd = extract32(insn, 0, 5);
7508 int rn = extract32(insn, 5, 5);
7509 int opcode = extract32(insn, 11, 5);
7510 int rm = extract32(insn, 16, 5);
7511 int size = extract32(insn, 22, 2);
7512 bool u = extract32(insn, 29, 1);
7513 TCGv_i64 tcg_rd;
7515 if (opcode >= 0x18) {
7516 /* Floating point: U, size[1] and opcode indicate operation */
7517 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
7518 switch (fpopcode) {
7519 case 0x1b: /* FMULX */
7520 case 0x1f: /* FRECPS */
7521 case 0x3f: /* FRSQRTS */
7522 case 0x5d: /* FACGE */
7523 case 0x7d: /* FACGT */
7524 case 0x1c: /* FCMEQ */
7525 case 0x5c: /* FCMGE */
7526 case 0x7c: /* FCMGT */
7527 case 0x7a: /* FABD */
7528 break;
7529 default:
7530 unallocated_encoding(s);
7531 return;
7534 if (!fp_access_check(s)) {
7535 return;
7538 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
7539 return;
7542 switch (opcode) {
7543 case 0x1: /* SQADD, UQADD */
7544 case 0x5: /* SQSUB, UQSUB */
7545 case 0x9: /* SQSHL, UQSHL */
7546 case 0xb: /* SQRSHL, UQRSHL */
7547 break;
7548 case 0x8: /* SSHL, USHL */
7549 case 0xa: /* SRSHL, URSHL */
7550 case 0x6: /* CMGT, CMHI */
7551 case 0x7: /* CMGE, CMHS */
7552 case 0x11: /* CMTST, CMEQ */
7553 case 0x10: /* ADD, SUB (vector) */
7554 if (size != 3) {
7555 unallocated_encoding(s);
7556 return;
7558 break;
7559 case 0x16: /* SQDMULH, SQRDMULH (vector) */
7560 if (size != 1 && size != 2) {
7561 unallocated_encoding(s);
7562 return;
7564 break;
7565 default:
7566 unallocated_encoding(s);
7567 return;
7570 if (!fp_access_check(s)) {
7571 return;
7574 tcg_rd = tcg_temp_new_i64();
7576 if (size == 3) {
7577 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
7578 TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
7580 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
7581 tcg_temp_free_i64(tcg_rn);
7582 tcg_temp_free_i64(tcg_rm);
7583 } else {
7584 /* Do a single operation on the lowest element in the vector.
7585 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
7586 * no side effects for all these operations.
7587 * OPTME: special-purpose helpers would avoid doing some
7588 * unnecessary work in the helper for the 8 and 16 bit cases.
7590 NeonGenTwoOpEnvFn *genenvfn;
7591 TCGv_i32 tcg_rn = tcg_temp_new_i32();
7592 TCGv_i32 tcg_rm = tcg_temp_new_i32();
7593 TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
7595 read_vec_element_i32(s, tcg_rn, rn, 0, size);
7596 read_vec_element_i32(s, tcg_rm, rm, 0, size);
7598 switch (opcode) {
7599 case 0x1: /* SQADD, UQADD */
7601 static NeonGenTwoOpEnvFn * const fns[3][2] = {
7602 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
7603 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
7604 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
7606 genenvfn = fns[size][u];
7607 break;
7609 case 0x5: /* SQSUB, UQSUB */
7611 static NeonGenTwoOpEnvFn * const fns[3][2] = {
7612 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
7613 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
7614 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
7616 genenvfn = fns[size][u];
7617 break;
7619 case 0x9: /* SQSHL, UQSHL */
7621 static NeonGenTwoOpEnvFn * const fns[3][2] = {
7622 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
7623 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
7624 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
7626 genenvfn = fns[size][u];
7627 break;
7629 case 0xb: /* SQRSHL, UQRSHL */
7631 static NeonGenTwoOpEnvFn * const fns[3][2] = {
7632 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
7633 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
7634 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
7636 genenvfn = fns[size][u];
7637 break;
7639 case 0x16: /* SQDMULH, SQRDMULH */
7641 static NeonGenTwoOpEnvFn * const fns[2][2] = {
7642 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
7643 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
7645 assert(size == 1 || size == 2);
7646 genenvfn = fns[size - 1][u];
7647 break;
7649 default:
7650 g_assert_not_reached();
7653 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
7654 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
7655 tcg_temp_free_i32(tcg_rd32);
7656 tcg_temp_free_i32(tcg_rn);
7657 tcg_temp_free_i32(tcg_rm);
7660 write_fp_dreg(s, rd, tcg_rd);
7662 tcg_temp_free_i64(tcg_rd);
7665 static void handle_2misc_64(DisasContext *s, int opcode, bool u,
7666 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
7667 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
7669 /* Handle 64->64 opcodes which are shared between the scalar and
7670 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
7671 * is valid in either group and also the double-precision fp ops.
7672 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
7673 * requires them.
7675 TCGCond cond;
7677 switch (opcode) {
7678 case 0x4: /* CLS, CLZ */
7679 if (u) {
7680 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
7681 } else {
7682 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
7684 break;
7685 case 0x5: /* NOT */
7686 /* This opcode is shared with CNT and RBIT but we have earlier
7687 * enforced that size == 3 if and only if this is the NOT insn.
7689 tcg_gen_not_i64(tcg_rd, tcg_rn);
7690 break;
7691 case 0x7: /* SQABS, SQNEG */
7692 if (u) {
7693 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
7694 } else {
7695 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
7697 break;
7698 case 0xa: /* CMLT */
7699 /* 64 bit integer comparison against zero, result is
7700 * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
7701 * subtracting 1.
7703 cond = TCG_COND_LT;
7704 do_cmop:
7705 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
7706 tcg_gen_neg_i64(tcg_rd, tcg_rd);
7707 break;
7708 case 0x8: /* CMGT, CMGE */
7709 cond = u ? TCG_COND_GE : TCG_COND_GT;
7710 goto do_cmop;
7711 case 0x9: /* CMEQ, CMLE */
7712 cond = u ? TCG_COND_LE : TCG_COND_EQ;
7713 goto do_cmop;
7714 case 0xb: /* ABS, NEG */
7715 if (u) {
7716 tcg_gen_neg_i64(tcg_rd, tcg_rn);
7717 } else {
7718 TCGv_i64 tcg_zero = tcg_const_i64(0);
7719 tcg_gen_neg_i64(tcg_rd, tcg_rn);
7720 tcg_gen_movcond_i64(TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero,
7721 tcg_rn, tcg_rd);
7722 tcg_temp_free_i64(tcg_zero);
7724 break;
7725 case 0x2f: /* FABS */
7726 gen_helper_vfp_absd(tcg_rd, tcg_rn);
7727 break;
7728 case 0x6f: /* FNEG */
7729 gen_helper_vfp_negd(tcg_rd, tcg_rn);
7730 break;
7731 case 0x7f: /* FSQRT */
7732 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
7733 break;
7734 case 0x1a: /* FCVTNS */
7735 case 0x1b: /* FCVTMS */
7736 case 0x1c: /* FCVTAS */
7737 case 0x3a: /* FCVTPS */
7738 case 0x3b: /* FCVTZS */
7740 TCGv_i32 tcg_shift = tcg_const_i32(0);
7741 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
7742 tcg_temp_free_i32(tcg_shift);
7743 break;
7745 case 0x5a: /* FCVTNU */
7746 case 0x5b: /* FCVTMU */
7747 case 0x5c: /* FCVTAU */
7748 case 0x7a: /* FCVTPU */
7749 case 0x7b: /* FCVTZU */
7751 TCGv_i32 tcg_shift = tcg_const_i32(0);
7752 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
7753 tcg_temp_free_i32(tcg_shift);
7754 break;
7756 case 0x18: /* FRINTN */
7757 case 0x19: /* FRINTM */
7758 case 0x38: /* FRINTP */
7759 case 0x39: /* FRINTZ */
7760 case 0x58: /* FRINTA */
7761 case 0x79: /* FRINTI */
7762 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
7763 break;
7764 case 0x59: /* FRINTX */
7765 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
7766 break;
7767 default:
7768 g_assert_not_reached();
7772 static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
7773 bool is_scalar, bool is_u, bool is_q,
7774 int size, int rn, int rd)
7776 bool is_double = (size == 3);
7777 TCGv_ptr fpst;
7779 if (!fp_access_check(s)) {
7780 return;
7783 fpst = get_fpstatus_ptr();
7785 if (is_double) {
7786 TCGv_i64 tcg_op = tcg_temp_new_i64();
7787 TCGv_i64 tcg_zero = tcg_const_i64(0);
7788 TCGv_i64 tcg_res = tcg_temp_new_i64();
7789 NeonGenTwoDoubleOPFn *genfn;
7790 bool swap = false;
7791 int pass;
7793 switch (opcode) {
7794 case 0x2e: /* FCMLT (zero) */
7795 swap = true;
7796 /* fallthrough */
7797 case 0x2c: /* FCMGT (zero) */
7798 genfn = gen_helper_neon_cgt_f64;
7799 break;
7800 case 0x2d: /* FCMEQ (zero) */
7801 genfn = gen_helper_neon_ceq_f64;
7802 break;
7803 case 0x6d: /* FCMLE (zero) */
7804 swap = true;
7805 /* fall through */
7806 case 0x6c: /* FCMGE (zero) */
7807 genfn = gen_helper_neon_cge_f64;
7808 break;
7809 default:
7810 g_assert_not_reached();
7813 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
7814 read_vec_element(s, tcg_op, rn, pass, MO_64);
7815 if (swap) {
7816 genfn(tcg_res, tcg_zero, tcg_op, fpst);
7817 } else {
7818 genfn(tcg_res, tcg_op, tcg_zero, fpst);
7820 write_vec_element(s, tcg_res, rd, pass, MO_64);
7822 if (is_scalar) {
7823 clear_vec_high(s, rd);
7826 tcg_temp_free_i64(tcg_res);
7827 tcg_temp_free_i64(tcg_zero);
7828 tcg_temp_free_i64(tcg_op);
7829 } else {
7830 TCGv_i32 tcg_op = tcg_temp_new_i32();
7831 TCGv_i32 tcg_zero = tcg_const_i32(0);
7832 TCGv_i32 tcg_res = tcg_temp_new_i32();
7833 NeonGenTwoSingleOPFn *genfn;
7834 bool swap = false;
7835 int pass, maxpasses;
7837 switch (opcode) {
7838 case 0x2e: /* FCMLT (zero) */
7839 swap = true;
7840 /* fall through */
7841 case 0x2c: /* FCMGT (zero) */
7842 genfn = gen_helper_neon_cgt_f32;
7843 break;
7844 case 0x2d: /* FCMEQ (zero) */
7845 genfn = gen_helper_neon_ceq_f32;
7846 break;
7847 case 0x6d: /* FCMLE (zero) */
7848 swap = true;
7849 /* fall through */
7850 case 0x6c: /* FCMGE (zero) */
7851 genfn = gen_helper_neon_cge_f32;
7852 break;
7853 default:
7854 g_assert_not_reached();
7857 if (is_scalar) {
7858 maxpasses = 1;
7859 } else {
7860 maxpasses = is_q ? 4 : 2;
7863 for (pass = 0; pass < maxpasses; pass++) {
7864 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
7865 if (swap) {
7866 genfn(tcg_res, tcg_zero, tcg_op, fpst);
7867 } else {
7868 genfn(tcg_res, tcg_op, tcg_zero, fpst);
7870 if (is_scalar) {
7871 write_fp_sreg(s, rd, tcg_res);
7872 } else {
7873 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7876 tcg_temp_free_i32(tcg_res);
7877 tcg_temp_free_i32(tcg_zero);
7878 tcg_temp_free_i32(tcg_op);
7879 if (!is_q && !is_scalar) {
7880 clear_vec_high(s, rd);
7884 tcg_temp_free_ptr(fpst);
7887 static void handle_2misc_reciprocal(DisasContext *s, int opcode,
7888 bool is_scalar, bool is_u, bool is_q,
7889 int size, int rn, int rd)
7891 bool is_double = (size == 3);
7892 TCGv_ptr fpst = get_fpstatus_ptr();
7894 if (is_double) {
7895 TCGv_i64 tcg_op = tcg_temp_new_i64();
7896 TCGv_i64 tcg_res = tcg_temp_new_i64();
7897 int pass;
7899 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
7900 read_vec_element(s, tcg_op, rn, pass, MO_64);
7901 switch (opcode) {
7902 case 0x3d: /* FRECPE */
7903 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
7904 break;
7905 case 0x3f: /* FRECPX */
7906 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
7907 break;
7908 case 0x7d: /* FRSQRTE */
7909 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
7910 break;
7911 default:
7912 g_assert_not_reached();
7914 write_vec_element(s, tcg_res, rd, pass, MO_64);
7916 if (is_scalar) {
7917 clear_vec_high(s, rd);
7920 tcg_temp_free_i64(tcg_res);
7921 tcg_temp_free_i64(tcg_op);
7922 } else {
7923 TCGv_i32 tcg_op = tcg_temp_new_i32();
7924 TCGv_i32 tcg_res = tcg_temp_new_i32();
7925 int pass, maxpasses;
7927 if (is_scalar) {
7928 maxpasses = 1;
7929 } else {
7930 maxpasses = is_q ? 4 : 2;
7933 for (pass = 0; pass < maxpasses; pass++) {
7934 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
7936 switch (opcode) {
7937 case 0x3c: /* URECPE */
7938 gen_helper_recpe_u32(tcg_res, tcg_op, fpst);
7939 break;
7940 case 0x3d: /* FRECPE */
7941 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
7942 break;
7943 case 0x3f: /* FRECPX */
7944 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
7945 break;
7946 case 0x7d: /* FRSQRTE */
7947 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
7948 break;
7949 default:
7950 g_assert_not_reached();
7953 if (is_scalar) {
7954 write_fp_sreg(s, rd, tcg_res);
7955 } else {
7956 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7959 tcg_temp_free_i32(tcg_res);
7960 tcg_temp_free_i32(tcg_op);
7961 if (!is_q && !is_scalar) {
7962 clear_vec_high(s, rd);
7965 tcg_temp_free_ptr(fpst);
7968 static void handle_2misc_narrow(DisasContext *s, bool scalar,
7969 int opcode, bool u, bool is_q,
7970 int size, int rn, int rd)
7972 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
7973 * in the source becomes a size element in the destination).
7975 int pass;
7976 TCGv_i32 tcg_res[2];
7977 int destelt = is_q ? 2 : 0;
7978 int passes = scalar ? 1 : 2;
7980 if (scalar) {
7981 tcg_res[1] = tcg_const_i32(0);
7984 for (pass = 0; pass < passes; pass++) {
7985 TCGv_i64 tcg_op = tcg_temp_new_i64();
7986 NeonGenNarrowFn *genfn = NULL;
7987 NeonGenNarrowEnvFn *genenvfn = NULL;
7989 if (scalar) {
7990 read_vec_element(s, tcg_op, rn, pass, size + 1);
7991 } else {
7992 read_vec_element(s, tcg_op, rn, pass, MO_64);
7994 tcg_res[pass] = tcg_temp_new_i32();
7996 switch (opcode) {
7997 case 0x12: /* XTN, SQXTUN */
7999 static NeonGenNarrowFn * const xtnfns[3] = {
8000 gen_helper_neon_narrow_u8,
8001 gen_helper_neon_narrow_u16,
8002 tcg_gen_extrl_i64_i32,
8004 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
8005 gen_helper_neon_unarrow_sat8,
8006 gen_helper_neon_unarrow_sat16,
8007 gen_helper_neon_unarrow_sat32,
8009 if (u) {
8010 genenvfn = sqxtunfns[size];
8011 } else {
8012 genfn = xtnfns[size];
8014 break;
8016 case 0x14: /* SQXTN, UQXTN */
8018 static NeonGenNarrowEnvFn * const fns[3][2] = {
8019 { gen_helper_neon_narrow_sat_s8,
8020 gen_helper_neon_narrow_sat_u8 },
8021 { gen_helper_neon_narrow_sat_s16,
8022 gen_helper_neon_narrow_sat_u16 },
8023 { gen_helper_neon_narrow_sat_s32,
8024 gen_helper_neon_narrow_sat_u32 },
8026 genenvfn = fns[size][u];
8027 break;
8029 case 0x16: /* FCVTN, FCVTN2 */
8030 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
8031 if (size == 2) {
8032 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
8033 } else {
8034 TCGv_i32 tcg_lo = tcg_temp_new_i32();
8035 TCGv_i32 tcg_hi = tcg_temp_new_i32();
8036 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
8037 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, cpu_env);
8038 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, cpu_env);
8039 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
8040 tcg_temp_free_i32(tcg_lo);
8041 tcg_temp_free_i32(tcg_hi);
8043 break;
8044 case 0x56: /* FCVTXN, FCVTXN2 */
8045 /* 64 bit to 32 bit float conversion
8046 * with von Neumann rounding (round to odd)
8048 assert(size == 2);
8049 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
8050 break;
8051 default:
8052 g_assert_not_reached();
8055 if (genfn) {
8056 genfn(tcg_res[pass], tcg_op);
8057 } else if (genenvfn) {
8058 genenvfn(tcg_res[pass], cpu_env, tcg_op);
8061 tcg_temp_free_i64(tcg_op);
8064 for (pass = 0; pass < 2; pass++) {
8065 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
8066 tcg_temp_free_i32(tcg_res[pass]);
8068 if (!is_q) {
8069 clear_vec_high(s, rd);
8073 /* Remaining saturating accumulating ops */
8074 static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
8075 bool is_q, int size, int rn, int rd)
8077 bool is_double = (size == 3);
8079 if (is_double) {
8080 TCGv_i64 tcg_rn = tcg_temp_new_i64();
8081 TCGv_i64 tcg_rd = tcg_temp_new_i64();
8082 int pass;
8084 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8085 read_vec_element(s, tcg_rn, rn, pass, MO_64);
8086 read_vec_element(s, tcg_rd, rd, pass, MO_64);
8088 if (is_u) { /* USQADD */
8089 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8090 } else { /* SUQADD */
8091 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8093 write_vec_element(s, tcg_rd, rd, pass, MO_64);
8095 if (is_scalar) {
8096 clear_vec_high(s, rd);
8099 tcg_temp_free_i64(tcg_rd);
8100 tcg_temp_free_i64(tcg_rn);
8101 } else {
8102 TCGv_i32 tcg_rn = tcg_temp_new_i32();
8103 TCGv_i32 tcg_rd = tcg_temp_new_i32();
8104 int pass, maxpasses;
8106 if (is_scalar) {
8107 maxpasses = 1;
8108 } else {
8109 maxpasses = is_q ? 4 : 2;
8112 for (pass = 0; pass < maxpasses; pass++) {
8113 if (is_scalar) {
8114 read_vec_element_i32(s, tcg_rn, rn, pass, size);
8115 read_vec_element_i32(s, tcg_rd, rd, pass, size);
8116 } else {
8117 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
8118 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
8121 if (is_u) { /* USQADD */
8122 switch (size) {
8123 case 0:
8124 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8125 break;
8126 case 1:
8127 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8128 break;
8129 case 2:
8130 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8131 break;
8132 default:
8133 g_assert_not_reached();
8135 } else { /* SUQADD */
8136 switch (size) {
8137 case 0:
8138 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8139 break;
8140 case 1:
8141 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8142 break;
8143 case 2:
8144 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8145 break;
8146 default:
8147 g_assert_not_reached();
8151 if (is_scalar) {
8152 TCGv_i64 tcg_zero = tcg_const_i64(0);
8153 write_vec_element(s, tcg_zero, rd, 0, MO_64);
8154 tcg_temp_free_i64(tcg_zero);
8156 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
8159 if (!is_q) {
8160 clear_vec_high(s, rd);
8163 tcg_temp_free_i32(tcg_rd);
8164 tcg_temp_free_i32(tcg_rn);
8168 /* AdvSIMD scalar two reg misc
8169 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
8170 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8171 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
8172 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8174 static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
8176 int rd = extract32(insn, 0, 5);
8177 int rn = extract32(insn, 5, 5);
8178 int opcode = extract32(insn, 12, 5);
8179 int size = extract32(insn, 22, 2);
8180 bool u = extract32(insn, 29, 1);
8181 bool is_fcvt = false;
8182 int rmode;
8183 TCGv_i32 tcg_rmode;
8184 TCGv_ptr tcg_fpstatus;
8186 switch (opcode) {
8187 case 0x3: /* USQADD / SUQADD*/
8188 if (!fp_access_check(s)) {
8189 return;
8191 handle_2misc_satacc(s, true, u, false, size, rn, rd);
8192 return;
8193 case 0x7: /* SQABS / SQNEG */
8194 break;
8195 case 0xa: /* CMLT */
8196 if (u) {
8197 unallocated_encoding(s);
8198 return;
8200 /* fall through */
8201 case 0x8: /* CMGT, CMGE */
8202 case 0x9: /* CMEQ, CMLE */
8203 case 0xb: /* ABS, NEG */
8204 if (size != 3) {
8205 unallocated_encoding(s);
8206 return;
8208 break;
8209 case 0x12: /* SQXTUN */
8210 if (!u) {
8211 unallocated_encoding(s);
8212 return;
8214 /* fall through */
8215 case 0x14: /* SQXTN, UQXTN */
8216 if (size == 3) {
8217 unallocated_encoding(s);
8218 return;
8220 if (!fp_access_check(s)) {
8221 return;
8223 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
8224 return;
8225 case 0xc ... 0xf:
8226 case 0x16 ... 0x1d:
8227 case 0x1f:
8228 /* Floating point: U, size[1] and opcode indicate operation;
8229 * size[0] indicates single or double precision.
8231 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
8232 size = extract32(size, 0, 1) ? 3 : 2;
8233 switch (opcode) {
8234 case 0x2c: /* FCMGT (zero) */
8235 case 0x2d: /* FCMEQ (zero) */
8236 case 0x2e: /* FCMLT (zero) */
8237 case 0x6c: /* FCMGE (zero) */
8238 case 0x6d: /* FCMLE (zero) */
8239 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
8240 return;
8241 case 0x1d: /* SCVTF */
8242 case 0x5d: /* UCVTF */
8244 bool is_signed = (opcode == 0x1d);
8245 if (!fp_access_check(s)) {
8246 return;
8248 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
8249 return;
8251 case 0x3d: /* FRECPE */
8252 case 0x3f: /* FRECPX */
8253 case 0x7d: /* FRSQRTE */
8254 if (!fp_access_check(s)) {
8255 return;
8257 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
8258 return;
8259 case 0x1a: /* FCVTNS */
8260 case 0x1b: /* FCVTMS */
8261 case 0x3a: /* FCVTPS */
8262 case 0x3b: /* FCVTZS */
8263 case 0x5a: /* FCVTNU */
8264 case 0x5b: /* FCVTMU */
8265 case 0x7a: /* FCVTPU */
8266 case 0x7b: /* FCVTZU */
8267 is_fcvt = true;
8268 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
8269 break;
8270 case 0x1c: /* FCVTAS */
8271 case 0x5c: /* FCVTAU */
8272 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
8273 is_fcvt = true;
8274 rmode = FPROUNDING_TIEAWAY;
8275 break;
8276 case 0x56: /* FCVTXN, FCVTXN2 */
8277 if (size == 2) {
8278 unallocated_encoding(s);
8279 return;
8281 if (!fp_access_check(s)) {
8282 return;
8284 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
8285 return;
8286 default:
8287 unallocated_encoding(s);
8288 return;
8290 break;
8291 default:
8292 unallocated_encoding(s);
8293 return;
8296 if (!fp_access_check(s)) {
8297 return;
8300 if (is_fcvt) {
8301 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8302 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
8303 tcg_fpstatus = get_fpstatus_ptr();
8304 } else {
8305 tcg_rmode = NULL;
8306 tcg_fpstatus = NULL;
8309 if (size == 3) {
8310 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
8311 TCGv_i64 tcg_rd = tcg_temp_new_i64();
8313 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
8314 write_fp_dreg(s, rd, tcg_rd);
8315 tcg_temp_free_i64(tcg_rd);
8316 tcg_temp_free_i64(tcg_rn);
8317 } else {
8318 TCGv_i32 tcg_rn = tcg_temp_new_i32();
8319 TCGv_i32 tcg_rd = tcg_temp_new_i32();
8321 read_vec_element_i32(s, tcg_rn, rn, 0, size);
8323 switch (opcode) {
8324 case 0x7: /* SQABS, SQNEG */
8326 NeonGenOneOpEnvFn *genfn;
8327 static NeonGenOneOpEnvFn * const fns[3][2] = {
8328 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
8329 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
8330 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
8332 genfn = fns[size][u];
8333 genfn(tcg_rd, cpu_env, tcg_rn);
8334 break;
8336 case 0x1a: /* FCVTNS */
8337 case 0x1b: /* FCVTMS */
8338 case 0x1c: /* FCVTAS */
8339 case 0x3a: /* FCVTPS */
8340 case 0x3b: /* FCVTZS */
8342 TCGv_i32 tcg_shift = tcg_const_i32(0);
8343 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8344 tcg_temp_free_i32(tcg_shift);
8345 break;
8347 case 0x5a: /* FCVTNU */
8348 case 0x5b: /* FCVTMU */
8349 case 0x5c: /* FCVTAU */
8350 case 0x7a: /* FCVTPU */
8351 case 0x7b: /* FCVTZU */
8353 TCGv_i32 tcg_shift = tcg_const_i32(0);
8354 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8355 tcg_temp_free_i32(tcg_shift);
8356 break;
8358 default:
8359 g_assert_not_reached();
8362 write_fp_sreg(s, rd, tcg_rd);
8363 tcg_temp_free_i32(tcg_rd);
8364 tcg_temp_free_i32(tcg_rn);
8367 if (is_fcvt) {
8368 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
8369 tcg_temp_free_i32(tcg_rmode);
8370 tcg_temp_free_ptr(tcg_fpstatus);
8374 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
8375 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
8376 int immh, int immb, int opcode, int rn, int rd)
8378 int size = 32 - clz32(immh) - 1;
8379 int immhb = immh << 3 | immb;
8380 int shift = 2 * (8 << size) - immhb;
8381 bool accumulate = false;
8382 bool round = false;
8383 bool insert = false;
8384 int dsize = is_q ? 128 : 64;
8385 int esize = 8 << size;
8386 int elements = dsize/esize;
8387 TCGMemOp memop = size | (is_u ? 0 : MO_SIGN);
8388 TCGv_i64 tcg_rn = new_tmp_a64(s);
8389 TCGv_i64 tcg_rd = new_tmp_a64(s);
8390 TCGv_i64 tcg_round;
8391 int i;
8393 if (extract32(immh, 3, 1) && !is_q) {
8394 unallocated_encoding(s);
8395 return;
8398 if (size > 3 && !is_q) {
8399 unallocated_encoding(s);
8400 return;
8403 if (!fp_access_check(s)) {
8404 return;
8407 switch (opcode) {
8408 case 0x02: /* SSRA / USRA (accumulate) */
8409 accumulate = true;
8410 break;
8411 case 0x04: /* SRSHR / URSHR (rounding) */
8412 round = true;
8413 break;
8414 case 0x06: /* SRSRA / URSRA (accum + rounding) */
8415 accumulate = round = true;
8416 break;
8417 case 0x08: /* SRI */
8418 insert = true;
8419 break;
8422 if (round) {
8423 uint64_t round_const = 1ULL << (shift - 1);
8424 tcg_round = tcg_const_i64(round_const);
8425 } else {
8426 tcg_round = NULL;
8429 for (i = 0; i < elements; i++) {
8430 read_vec_element(s, tcg_rn, rn, i, memop);
8431 if (accumulate || insert) {
8432 read_vec_element(s, tcg_rd, rd, i, memop);
8435 if (insert) {
8436 handle_shri_with_ins(tcg_rd, tcg_rn, size, shift);
8437 } else {
8438 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8439 accumulate, is_u, size, shift);
8442 write_vec_element(s, tcg_rd, rd, i, size);
8445 if (!is_q) {
8446 clear_vec_high(s, rd);
8449 if (round) {
8450 tcg_temp_free_i64(tcg_round);
8454 /* SHL/SLI - Vector shift left */
8455 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
8456 int immh, int immb, int opcode, int rn, int rd)
8458 int size = 32 - clz32(immh) - 1;
8459 int immhb = immh << 3 | immb;
8460 int shift = immhb - (8 << size);
8461 int dsize = is_q ? 128 : 64;
8462 int esize = 8 << size;
8463 int elements = dsize/esize;
8464 TCGv_i64 tcg_rn = new_tmp_a64(s);
8465 TCGv_i64 tcg_rd = new_tmp_a64(s);
8466 int i;
8468 if (extract32(immh, 3, 1) && !is_q) {
8469 unallocated_encoding(s);
8470 return;
8473 if (size > 3 && !is_q) {
8474 unallocated_encoding(s);
8475 return;
8478 if (!fp_access_check(s)) {
8479 return;
8482 for (i = 0; i < elements; i++) {
8483 read_vec_element(s, tcg_rn, rn, i, size);
8484 if (insert) {
8485 read_vec_element(s, tcg_rd, rd, i, size);
8488 handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift);
8490 write_vec_element(s, tcg_rd, rd, i, size);
8493 if (!is_q) {
8494 clear_vec_high(s, rd);
8498 /* USHLL/SHLL - Vector shift left with widening */
8499 static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
8500 int immh, int immb, int opcode, int rn, int rd)
8502 int size = 32 - clz32(immh) - 1;
8503 int immhb = immh << 3 | immb;
8504 int shift = immhb - (8 << size);
8505 int dsize = 64;
8506 int esize = 8 << size;
8507 int elements = dsize/esize;
8508 TCGv_i64 tcg_rn = new_tmp_a64(s);
8509 TCGv_i64 tcg_rd = new_tmp_a64(s);
8510 int i;
8512 if (size >= 3) {
8513 unallocated_encoding(s);
8514 return;
8517 if (!fp_access_check(s)) {
8518 return;
8521 /* For the LL variants the store is larger than the load,
8522 * so if rd == rn we would overwrite parts of our input.
8523 * So load everything right now and use shifts in the main loop.
8525 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
8527 for (i = 0; i < elements; i++) {
8528 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
8529 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
8530 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
8531 write_vec_element(s, tcg_rd, rd, i, size + 1);
8535 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
8536 static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
8537 int immh, int immb, int opcode, int rn, int rd)
8539 int immhb = immh << 3 | immb;
8540 int size = 32 - clz32(immh) - 1;
8541 int dsize = 64;
8542 int esize = 8 << size;
8543 int elements = dsize/esize;
8544 int shift = (2 * esize) - immhb;
8545 bool round = extract32(opcode, 0, 1);
8546 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
8547 TCGv_i64 tcg_round;
8548 int i;
8550 if (extract32(immh, 3, 1)) {
8551 unallocated_encoding(s);
8552 return;
8555 if (!fp_access_check(s)) {
8556 return;
8559 tcg_rn = tcg_temp_new_i64();
8560 tcg_rd = tcg_temp_new_i64();
8561 tcg_final = tcg_temp_new_i64();
8562 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
8564 if (round) {
8565 uint64_t round_const = 1ULL << (shift - 1);
8566 tcg_round = tcg_const_i64(round_const);
8567 } else {
8568 tcg_round = NULL;
8571 for (i = 0; i < elements; i++) {
8572 read_vec_element(s, tcg_rn, rn, i, size+1);
8573 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8574 false, true, size+1, shift);
8576 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8579 if (!is_q) {
8580 clear_vec_high(s, rd);
8581 write_vec_element(s, tcg_final, rd, 0, MO_64);
8582 } else {
8583 write_vec_element(s, tcg_final, rd, 1, MO_64);
8586 if (round) {
8587 tcg_temp_free_i64(tcg_round);
8589 tcg_temp_free_i64(tcg_rn);
8590 tcg_temp_free_i64(tcg_rd);
8591 tcg_temp_free_i64(tcg_final);
8592 return;
8596 /* AdvSIMD shift by immediate
8597 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
8598 * +---+---+---+-------------+------+------+--------+---+------+------+
8599 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
8600 * +---+---+---+-------------+------+------+--------+---+------+------+
8602 static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
8604 int rd = extract32(insn, 0, 5);
8605 int rn = extract32(insn, 5, 5);
8606 int opcode = extract32(insn, 11, 5);
8607 int immb = extract32(insn, 16, 3);
8608 int immh = extract32(insn, 19, 4);
8609 bool is_u = extract32(insn, 29, 1);
8610 bool is_q = extract32(insn, 30, 1);
8612 switch (opcode) {
8613 case 0x08: /* SRI */
8614 if (!is_u) {
8615 unallocated_encoding(s);
8616 return;
8618 /* fall through */
8619 case 0x00: /* SSHR / USHR */
8620 case 0x02: /* SSRA / USRA (accumulate) */
8621 case 0x04: /* SRSHR / URSHR (rounding) */
8622 case 0x06: /* SRSRA / URSRA (accum + rounding) */
8623 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
8624 break;
8625 case 0x0a: /* SHL / SLI */
8626 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
8627 break;
8628 case 0x10: /* SHRN */
8629 case 0x11: /* RSHRN / SQRSHRUN */
8630 if (is_u) {
8631 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
8632 opcode, rn, rd);
8633 } else {
8634 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
8636 break;
8637 case 0x12: /* SQSHRN / UQSHRN */
8638 case 0x13: /* SQRSHRN / UQRSHRN */
8639 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
8640 opcode, rn, rd);
8641 break;
8642 case 0x14: /* SSHLL / USHLL */
8643 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
8644 break;
8645 case 0x1c: /* SCVTF / UCVTF */
8646 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
8647 opcode, rn, rd);
8648 break;
8649 case 0xc: /* SQSHLU */
8650 if (!is_u) {
8651 unallocated_encoding(s);
8652 return;
8654 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
8655 break;
8656 case 0xe: /* SQSHL, UQSHL */
8657 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
8658 break;
8659 case 0x1f: /* FCVTZS/ FCVTZU */
8660 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
8661 return;
8662 default:
8663 unallocated_encoding(s);
8664 return;
8668 /* Generate code to do a "long" addition or subtraction, ie one done in
8669 * TCGv_i64 on vector lanes twice the width specified by size.
8671 static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
8672 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
8674 static NeonGenTwo64OpFn * const fns[3][2] = {
8675 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
8676 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
8677 { tcg_gen_add_i64, tcg_gen_sub_i64 },
8679 NeonGenTwo64OpFn *genfn;
8680 assert(size < 3);
8682 genfn = fns[size][is_sub];
8683 genfn(tcg_res, tcg_op1, tcg_op2);
8686 static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
8687 int opcode, int rd, int rn, int rm)
8689 /* 3-reg-different widening insns: 64 x 64 -> 128 */
8690 TCGv_i64 tcg_res[2];
8691 int pass, accop;
8693 tcg_res[0] = tcg_temp_new_i64();
8694 tcg_res[1] = tcg_temp_new_i64();
8696 /* Does this op do an adding accumulate, a subtracting accumulate,
8697 * or no accumulate at all?
8699 switch (opcode) {
8700 case 5:
8701 case 8:
8702 case 9:
8703 accop = 1;
8704 break;
8705 case 10:
8706 case 11:
8707 accop = -1;
8708 break;
8709 default:
8710 accop = 0;
8711 break;
8714 if (accop != 0) {
8715 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
8716 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
8719 /* size == 2 means two 32x32->64 operations; this is worth special
8720 * casing because we can generally handle it inline.
8722 if (size == 2) {
8723 for (pass = 0; pass < 2; pass++) {
8724 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8725 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8726 TCGv_i64 tcg_passres;
8727 TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
8729 int elt = pass + is_q * 2;
8731 read_vec_element(s, tcg_op1, rn, elt, memop);
8732 read_vec_element(s, tcg_op2, rm, elt, memop);
8734 if (accop == 0) {
8735 tcg_passres = tcg_res[pass];
8736 } else {
8737 tcg_passres = tcg_temp_new_i64();
8740 switch (opcode) {
8741 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
8742 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
8743 break;
8744 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
8745 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
8746 break;
8747 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
8748 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
8750 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
8751 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
8753 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
8754 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
8755 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
8756 tcg_passres,
8757 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
8758 tcg_temp_free_i64(tcg_tmp1);
8759 tcg_temp_free_i64(tcg_tmp2);
8760 break;
8762 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
8763 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
8764 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
8765 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
8766 break;
8767 case 9: /* SQDMLAL, SQDMLAL2 */
8768 case 11: /* SQDMLSL, SQDMLSL2 */
8769 case 13: /* SQDMULL, SQDMULL2 */
8770 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
8771 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
8772 tcg_passres, tcg_passres);
8773 break;
8774 default:
8775 g_assert_not_reached();
8778 if (opcode == 9 || opcode == 11) {
8779 /* saturating accumulate ops */
8780 if (accop < 0) {
8781 tcg_gen_neg_i64(tcg_passres, tcg_passres);
8783 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
8784 tcg_res[pass], tcg_passres);
8785 } else if (accop > 0) {
8786 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
8787 } else if (accop < 0) {
8788 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
8791 if (accop != 0) {
8792 tcg_temp_free_i64(tcg_passres);
8795 tcg_temp_free_i64(tcg_op1);
8796 tcg_temp_free_i64(tcg_op2);
8798 } else {
8799 /* size 0 or 1, generally helper functions */
8800 for (pass = 0; pass < 2; pass++) {
8801 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8802 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8803 TCGv_i64 tcg_passres;
8804 int elt = pass + is_q * 2;
8806 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
8807 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
8809 if (accop == 0) {
8810 tcg_passres = tcg_res[pass];
8811 } else {
8812 tcg_passres = tcg_temp_new_i64();
8815 switch (opcode) {
8816 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
8817 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
8819 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
8820 static NeonGenWidenFn * const widenfns[2][2] = {
8821 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
8822 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
8824 NeonGenWidenFn *widenfn = widenfns[size][is_u];
8826 widenfn(tcg_op2_64, tcg_op2);
8827 widenfn(tcg_passres, tcg_op1);
8828 gen_neon_addl(size, (opcode == 2), tcg_passres,
8829 tcg_passres, tcg_op2_64);
8830 tcg_temp_free_i64(tcg_op2_64);
8831 break;
8833 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
8834 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
8835 if (size == 0) {
8836 if (is_u) {
8837 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
8838 } else {
8839 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
8841 } else {
8842 if (is_u) {
8843 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
8844 } else {
8845 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
8848 break;
8849 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
8850 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
8851 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
8852 if (size == 0) {
8853 if (is_u) {
8854 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
8855 } else {
8856 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
8858 } else {
8859 if (is_u) {
8860 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
8861 } else {
8862 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
8865 break;
8866 case 9: /* SQDMLAL, SQDMLAL2 */
8867 case 11: /* SQDMLSL, SQDMLSL2 */
8868 case 13: /* SQDMULL, SQDMULL2 */
8869 assert(size == 1);
8870 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
8871 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
8872 tcg_passres, tcg_passres);
8873 break;
8874 case 14: /* PMULL */
8875 assert(size == 0);
8876 gen_helper_neon_mull_p8(tcg_passres, tcg_op1, tcg_op2);
8877 break;
8878 default:
8879 g_assert_not_reached();
8881 tcg_temp_free_i32(tcg_op1);
8882 tcg_temp_free_i32(tcg_op2);
8884 if (accop != 0) {
8885 if (opcode == 9 || opcode == 11) {
8886 /* saturating accumulate ops */
8887 if (accop < 0) {
8888 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
8890 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
8891 tcg_res[pass],
8892 tcg_passres);
8893 } else {
8894 gen_neon_addl(size, (accop < 0), tcg_res[pass],
8895 tcg_res[pass], tcg_passres);
8897 tcg_temp_free_i64(tcg_passres);
8902 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
8903 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
8904 tcg_temp_free_i64(tcg_res[0]);
8905 tcg_temp_free_i64(tcg_res[1]);
8908 static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
8909 int opcode, int rd, int rn, int rm)
8911 TCGv_i64 tcg_res[2];
8912 int part = is_q ? 2 : 0;
8913 int pass;
8915 for (pass = 0; pass < 2; pass++) {
8916 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8917 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8918 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
8919 static NeonGenWidenFn * const widenfns[3][2] = {
8920 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
8921 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
8922 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
8924 NeonGenWidenFn *widenfn = widenfns[size][is_u];
8926 read_vec_element(s, tcg_op1, rn, pass, MO_64);
8927 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
8928 widenfn(tcg_op2_wide, tcg_op2);
8929 tcg_temp_free_i32(tcg_op2);
8930 tcg_res[pass] = tcg_temp_new_i64();
8931 gen_neon_addl(size, (opcode == 3),
8932 tcg_res[pass], tcg_op1, tcg_op2_wide);
8933 tcg_temp_free_i64(tcg_op1);
8934 tcg_temp_free_i64(tcg_op2_wide);
8937 for (pass = 0; pass < 2; pass++) {
8938 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
8939 tcg_temp_free_i64(tcg_res[pass]);
8943 static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
8945 tcg_gen_addi_i64(in, in, 1U << 31);
8946 tcg_gen_extrh_i64_i32(res, in);
8949 static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
8950 int opcode, int rd, int rn, int rm)
8952 TCGv_i32 tcg_res[2];
8953 int part = is_q ? 2 : 0;
8954 int pass;
8956 for (pass = 0; pass < 2; pass++) {
8957 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8958 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8959 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
8960 static NeonGenNarrowFn * const narrowfns[3][2] = {
8961 { gen_helper_neon_narrow_high_u8,
8962 gen_helper_neon_narrow_round_high_u8 },
8963 { gen_helper_neon_narrow_high_u16,
8964 gen_helper_neon_narrow_round_high_u16 },
8965 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
8967 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
8969 read_vec_element(s, tcg_op1, rn, pass, MO_64);
8970 read_vec_element(s, tcg_op2, rm, pass, MO_64);
8972 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
8974 tcg_temp_free_i64(tcg_op1);
8975 tcg_temp_free_i64(tcg_op2);
8977 tcg_res[pass] = tcg_temp_new_i32();
8978 gennarrow(tcg_res[pass], tcg_wideres);
8979 tcg_temp_free_i64(tcg_wideres);
8982 for (pass = 0; pass < 2; pass++) {
8983 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
8984 tcg_temp_free_i32(tcg_res[pass]);
8986 if (!is_q) {
8987 clear_vec_high(s, rd);
8991 static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm)
8993 /* PMULL of 64 x 64 -> 128 is an odd special case because it
8994 * is the only three-reg-diff instruction which produces a
8995 * 128-bit wide result from a single operation. However since
8996 * it's possible to calculate the two halves more or less
8997 * separately we just use two helper calls.
8999 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9000 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9001 TCGv_i64 tcg_res = tcg_temp_new_i64();
9003 read_vec_element(s, tcg_op1, rn, is_q, MO_64);
9004 read_vec_element(s, tcg_op2, rm, is_q, MO_64);
9005 gen_helper_neon_pmull_64_lo(tcg_res, tcg_op1, tcg_op2);
9006 write_vec_element(s, tcg_res, rd, 0, MO_64);
9007 gen_helper_neon_pmull_64_hi(tcg_res, tcg_op1, tcg_op2);
9008 write_vec_element(s, tcg_res, rd, 1, MO_64);
9010 tcg_temp_free_i64(tcg_op1);
9011 tcg_temp_free_i64(tcg_op2);
9012 tcg_temp_free_i64(tcg_res);
9015 /* AdvSIMD three different
9016 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
9017 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
9018 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
9019 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
9021 static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
9023 /* Instructions in this group fall into three basic classes
9024 * (in each case with the operation working on each element in
9025 * the input vectors):
9026 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
9027 * 128 bit input)
9028 * (2) wide 64 x 128 -> 128
9029 * (3) narrowing 128 x 128 -> 64
9030 * Here we do initial decode, catch unallocated cases and
9031 * dispatch to separate functions for each class.
9033 int is_q = extract32(insn, 30, 1);
9034 int is_u = extract32(insn, 29, 1);
9035 int size = extract32(insn, 22, 2);
9036 int opcode = extract32(insn, 12, 4);
9037 int rm = extract32(insn, 16, 5);
9038 int rn = extract32(insn, 5, 5);
9039 int rd = extract32(insn, 0, 5);
9041 switch (opcode) {
9042 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
9043 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
9044 /* 64 x 128 -> 128 */
9045 if (size == 3) {
9046 unallocated_encoding(s);
9047 return;
9049 if (!fp_access_check(s)) {
9050 return;
9052 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
9053 break;
9054 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
9055 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
9056 /* 128 x 128 -> 64 */
9057 if (size == 3) {
9058 unallocated_encoding(s);
9059 return;
9061 if (!fp_access_check(s)) {
9062 return;
9064 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
9065 break;
9066 case 14: /* PMULL, PMULL2 */
9067 if (is_u || size == 1 || size == 2) {
9068 unallocated_encoding(s);
9069 return;
9071 if (size == 3) {
9072 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
9073 unallocated_encoding(s);
9074 return;
9076 if (!fp_access_check(s)) {
9077 return;
9079 handle_pmull_64(s, is_q, rd, rn, rm);
9080 return;
9082 goto is_widening;
9083 case 9: /* SQDMLAL, SQDMLAL2 */
9084 case 11: /* SQDMLSL, SQDMLSL2 */
9085 case 13: /* SQDMULL, SQDMULL2 */
9086 if (is_u || size == 0) {
9087 unallocated_encoding(s);
9088 return;
9090 /* fall through */
9091 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
9092 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
9093 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
9094 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
9095 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
9096 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
9097 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
9098 /* 64 x 64 -> 128 */
9099 if (size == 3) {
9100 unallocated_encoding(s);
9101 return;
9103 is_widening:
9104 if (!fp_access_check(s)) {
9105 return;
9108 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
9109 break;
9110 default:
9111 /* opcode 15 not allocated */
9112 unallocated_encoding(s);
9113 break;
9117 static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
9119 tcg_gen_xor_i64(rn, rn, rm);
9120 tcg_gen_and_i64(rn, rn, rd);
9121 tcg_gen_xor_i64(rd, rm, rn);
9124 static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
9126 tcg_gen_xor_i64(rn, rn, rd);
9127 tcg_gen_and_i64(rn, rn, rm);
9128 tcg_gen_xor_i64(rd, rd, rn);
9131 static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
9133 tcg_gen_xor_i64(rn, rn, rd);
9134 tcg_gen_andc_i64(rn, rn, rm);
9135 tcg_gen_xor_i64(rd, rd, rn);
9138 static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
9140 tcg_gen_xor_vec(vece, rn, rn, rm);
9141 tcg_gen_and_vec(vece, rn, rn, rd);
9142 tcg_gen_xor_vec(vece, rd, rm, rn);
9145 static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
9147 tcg_gen_xor_vec(vece, rn, rn, rd);
9148 tcg_gen_and_vec(vece, rn, rn, rm);
9149 tcg_gen_xor_vec(vece, rd, rd, rn);
9152 static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
9154 tcg_gen_xor_vec(vece, rn, rn, rd);
9155 tcg_gen_andc_vec(vece, rn, rn, rm);
9156 tcg_gen_xor_vec(vece, rd, rd, rn);
9159 /* Logic op (opcode == 3) subgroup of C3.6.16. */
9160 static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
9162 static const GVecGen3 bsl_op = {
9163 .fni8 = gen_bsl_i64,
9164 .fniv = gen_bsl_vec,
9165 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9166 .load_dest = true
9168 static const GVecGen3 bit_op = {
9169 .fni8 = gen_bit_i64,
9170 .fniv = gen_bit_vec,
9171 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9172 .load_dest = true
9174 static const GVecGen3 bif_op = {
9175 .fni8 = gen_bif_i64,
9176 .fniv = gen_bif_vec,
9177 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9178 .load_dest = true
9181 int rd = extract32(insn, 0, 5);
9182 int rn = extract32(insn, 5, 5);
9183 int rm = extract32(insn, 16, 5);
9184 int size = extract32(insn, 22, 2);
9185 bool is_u = extract32(insn, 29, 1);
9186 bool is_q = extract32(insn, 30, 1);
9188 if (!fp_access_check(s)) {
9189 return;
9192 switch (size + 4 * is_u) {
9193 case 0: /* AND */
9194 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
9195 return;
9196 case 1: /* BIC */
9197 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
9198 return;
9199 case 2: /* ORR */
9200 if (rn == rm) { /* MOV */
9201 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_mov, 0);
9202 } else {
9203 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
9205 return;
9206 case 3: /* ORN */
9207 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
9208 return;
9209 case 4: /* EOR */
9210 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
9211 return;
9213 case 5: /* BSL bitwise select */
9214 gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op);
9215 return;
9216 case 6: /* BIT, bitwise insert if true */
9217 gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op);
9218 return;
9219 case 7: /* BIF, bitwise insert if false */
9220 gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op);
9221 return;
9223 default:
9224 g_assert_not_reached();
9228 /* Helper functions for 32 bit comparisons */
9229 static void gen_max_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9231 tcg_gen_movcond_i32(TCG_COND_GE, res, op1, op2, op1, op2);
9234 static void gen_max_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9236 tcg_gen_movcond_i32(TCG_COND_GEU, res, op1, op2, op1, op2);
9239 static void gen_min_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9241 tcg_gen_movcond_i32(TCG_COND_LE, res, op1, op2, op1, op2);
9244 static void gen_min_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9246 tcg_gen_movcond_i32(TCG_COND_LEU, res, op1, op2, op1, op2);
9249 /* Pairwise op subgroup of C3.6.16.
9251 * This is called directly or via the handle_3same_float for float pairwise
9252 * operations where the opcode and size are calculated differently.
9254 static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
9255 int size, int rn, int rm, int rd)
9257 TCGv_ptr fpst;
9258 int pass;
9260 /* Floating point operations need fpst */
9261 if (opcode >= 0x58) {
9262 fpst = get_fpstatus_ptr();
9263 } else {
9264 fpst = NULL;
9267 if (!fp_access_check(s)) {
9268 return;
9271 /* These operations work on the concatenated rm:rn, with each pair of
9272 * adjacent elements being operated on to produce an element in the result.
9274 if (size == 3) {
9275 TCGv_i64 tcg_res[2];
9277 for (pass = 0; pass < 2; pass++) {
9278 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9279 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9280 int passreg = (pass == 0) ? rn : rm;
9282 read_vec_element(s, tcg_op1, passreg, 0, MO_64);
9283 read_vec_element(s, tcg_op2, passreg, 1, MO_64);
9284 tcg_res[pass] = tcg_temp_new_i64();
9286 switch (opcode) {
9287 case 0x17: /* ADDP */
9288 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
9289 break;
9290 case 0x58: /* FMAXNMP */
9291 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9292 break;
9293 case 0x5a: /* FADDP */
9294 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9295 break;
9296 case 0x5e: /* FMAXP */
9297 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9298 break;
9299 case 0x78: /* FMINNMP */
9300 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9301 break;
9302 case 0x7e: /* FMINP */
9303 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9304 break;
9305 default:
9306 g_assert_not_reached();
9309 tcg_temp_free_i64(tcg_op1);
9310 tcg_temp_free_i64(tcg_op2);
9313 for (pass = 0; pass < 2; pass++) {
9314 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9315 tcg_temp_free_i64(tcg_res[pass]);
9317 } else {
9318 int maxpass = is_q ? 4 : 2;
9319 TCGv_i32 tcg_res[4];
9321 for (pass = 0; pass < maxpass; pass++) {
9322 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9323 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9324 NeonGenTwoOpFn *genfn = NULL;
9325 int passreg = pass < (maxpass / 2) ? rn : rm;
9326 int passelt = (is_q && (pass & 1)) ? 2 : 0;
9328 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
9329 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
9330 tcg_res[pass] = tcg_temp_new_i32();
9332 switch (opcode) {
9333 case 0x17: /* ADDP */
9335 static NeonGenTwoOpFn * const fns[3] = {
9336 gen_helper_neon_padd_u8,
9337 gen_helper_neon_padd_u16,
9338 tcg_gen_add_i32,
9340 genfn = fns[size];
9341 break;
9343 case 0x14: /* SMAXP, UMAXP */
9345 static NeonGenTwoOpFn * const fns[3][2] = {
9346 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
9347 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
9348 { gen_max_s32, gen_max_u32 },
9350 genfn = fns[size][u];
9351 break;
9353 case 0x15: /* SMINP, UMINP */
9355 static NeonGenTwoOpFn * const fns[3][2] = {
9356 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
9357 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
9358 { gen_min_s32, gen_min_u32 },
9360 genfn = fns[size][u];
9361 break;
9363 /* The FP operations are all on single floats (32 bit) */
9364 case 0x58: /* FMAXNMP */
9365 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9366 break;
9367 case 0x5a: /* FADDP */
9368 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9369 break;
9370 case 0x5e: /* FMAXP */
9371 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9372 break;
9373 case 0x78: /* FMINNMP */
9374 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9375 break;
9376 case 0x7e: /* FMINP */
9377 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9378 break;
9379 default:
9380 g_assert_not_reached();
9383 /* FP ops called directly, otherwise call now */
9384 if (genfn) {
9385 genfn(tcg_res[pass], tcg_op1, tcg_op2);
9388 tcg_temp_free_i32(tcg_op1);
9389 tcg_temp_free_i32(tcg_op2);
9392 for (pass = 0; pass < maxpass; pass++) {
9393 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
9394 tcg_temp_free_i32(tcg_res[pass]);
9396 if (!is_q) {
9397 clear_vec_high(s, rd);
9401 if (fpst) {
9402 tcg_temp_free_ptr(fpst);
9406 /* Floating point op subgroup of C3.6.16. */
9407 static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
9409 /* For floating point ops, the U, size[1] and opcode bits
9410 * together indicate the operation. size[0] indicates single
9411 * or double.
9413 int fpopcode = extract32(insn, 11, 5)
9414 | (extract32(insn, 23, 1) << 5)
9415 | (extract32(insn, 29, 1) << 6);
9416 int is_q = extract32(insn, 30, 1);
9417 int size = extract32(insn, 22, 1);
9418 int rm = extract32(insn, 16, 5);
9419 int rn = extract32(insn, 5, 5);
9420 int rd = extract32(insn, 0, 5);
9422 int datasize = is_q ? 128 : 64;
9423 int esize = 32 << size;
9424 int elements = datasize / esize;
9426 if (size == 1 && !is_q) {
9427 unallocated_encoding(s);
9428 return;
9431 switch (fpopcode) {
9432 case 0x58: /* FMAXNMP */
9433 case 0x5a: /* FADDP */
9434 case 0x5e: /* FMAXP */
9435 case 0x78: /* FMINNMP */
9436 case 0x7e: /* FMINP */
9437 if (size && !is_q) {
9438 unallocated_encoding(s);
9439 return;
9441 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
9442 rn, rm, rd);
9443 return;
9444 case 0x1b: /* FMULX */
9445 case 0x1f: /* FRECPS */
9446 case 0x3f: /* FRSQRTS */
9447 case 0x5d: /* FACGE */
9448 case 0x7d: /* FACGT */
9449 case 0x19: /* FMLA */
9450 case 0x39: /* FMLS */
9451 case 0x18: /* FMAXNM */
9452 case 0x1a: /* FADD */
9453 case 0x1c: /* FCMEQ */
9454 case 0x1e: /* FMAX */
9455 case 0x38: /* FMINNM */
9456 case 0x3a: /* FSUB */
9457 case 0x3e: /* FMIN */
9458 case 0x5b: /* FMUL */
9459 case 0x5c: /* FCMGE */
9460 case 0x5f: /* FDIV */
9461 case 0x7a: /* FABD */
9462 case 0x7c: /* FCMGT */
9463 if (!fp_access_check(s)) {
9464 return;
9467 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
9468 return;
9469 default:
9470 unallocated_encoding(s);
9471 return;
9475 /* Integer op subgroup of C3.6.16. */
9476 static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
9478 int is_q = extract32(insn, 30, 1);
9479 int u = extract32(insn, 29, 1);
9480 int size = extract32(insn, 22, 2);
9481 int opcode = extract32(insn, 11, 5);
9482 int rm = extract32(insn, 16, 5);
9483 int rn = extract32(insn, 5, 5);
9484 int rd = extract32(insn, 0, 5);
9485 int pass;
9487 switch (opcode) {
9488 case 0x13: /* MUL, PMUL */
9489 if (u && size != 0) {
9490 unallocated_encoding(s);
9491 return;
9493 /* fall through */
9494 case 0x0: /* SHADD, UHADD */
9495 case 0x2: /* SRHADD, URHADD */
9496 case 0x4: /* SHSUB, UHSUB */
9497 case 0xc: /* SMAX, UMAX */
9498 case 0xd: /* SMIN, UMIN */
9499 case 0xe: /* SABD, UABD */
9500 case 0xf: /* SABA, UABA */
9501 case 0x12: /* MLA, MLS */
9502 if (size == 3) {
9503 unallocated_encoding(s);
9504 return;
9506 break;
9507 case 0x16: /* SQDMULH, SQRDMULH */
9508 if (size == 0 || size == 3) {
9509 unallocated_encoding(s);
9510 return;
9512 break;
9513 default:
9514 if (size == 3 && !is_q) {
9515 unallocated_encoding(s);
9516 return;
9518 break;
9521 if (!fp_access_check(s)) {
9522 return;
9525 switch (opcode) {
9526 case 0x10: /* ADD, SUB */
9527 if (u) {
9528 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
9529 } else {
9530 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
9532 return;
9535 if (size == 3) {
9536 assert(is_q);
9537 for (pass = 0; pass < 2; pass++) {
9538 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9539 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9540 TCGv_i64 tcg_res = tcg_temp_new_i64();
9542 read_vec_element(s, tcg_op1, rn, pass, MO_64);
9543 read_vec_element(s, tcg_op2, rm, pass, MO_64);
9545 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
9547 write_vec_element(s, tcg_res, rd, pass, MO_64);
9549 tcg_temp_free_i64(tcg_res);
9550 tcg_temp_free_i64(tcg_op1);
9551 tcg_temp_free_i64(tcg_op2);
9553 } else {
9554 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
9555 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9556 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9557 TCGv_i32 tcg_res = tcg_temp_new_i32();
9558 NeonGenTwoOpFn *genfn = NULL;
9559 NeonGenTwoOpEnvFn *genenvfn = NULL;
9561 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
9562 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
9564 switch (opcode) {
9565 case 0x0: /* SHADD, UHADD */
9567 static NeonGenTwoOpFn * const fns[3][2] = {
9568 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
9569 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
9570 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
9572 genfn = fns[size][u];
9573 break;
9575 case 0x1: /* SQADD, UQADD */
9577 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9578 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
9579 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
9580 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
9582 genenvfn = fns[size][u];
9583 break;
9585 case 0x2: /* SRHADD, URHADD */
9587 static NeonGenTwoOpFn * const fns[3][2] = {
9588 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
9589 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
9590 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
9592 genfn = fns[size][u];
9593 break;
9595 case 0x4: /* SHSUB, UHSUB */
9597 static NeonGenTwoOpFn * const fns[3][2] = {
9598 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
9599 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
9600 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
9602 genfn = fns[size][u];
9603 break;
9605 case 0x5: /* SQSUB, UQSUB */
9607 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9608 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9609 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9610 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9612 genenvfn = fns[size][u];
9613 break;
9615 case 0x6: /* CMGT, CMHI */
9617 static NeonGenTwoOpFn * const fns[3][2] = {
9618 { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_u8 },
9619 { gen_helper_neon_cgt_s16, gen_helper_neon_cgt_u16 },
9620 { gen_helper_neon_cgt_s32, gen_helper_neon_cgt_u32 },
9622 genfn = fns[size][u];
9623 break;
9625 case 0x7: /* CMGE, CMHS */
9627 static NeonGenTwoOpFn * const fns[3][2] = {
9628 { gen_helper_neon_cge_s8, gen_helper_neon_cge_u8 },
9629 { gen_helper_neon_cge_s16, gen_helper_neon_cge_u16 },
9630 { gen_helper_neon_cge_s32, gen_helper_neon_cge_u32 },
9632 genfn = fns[size][u];
9633 break;
9635 case 0x8: /* SSHL, USHL */
9637 static NeonGenTwoOpFn * const fns[3][2] = {
9638 { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
9639 { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
9640 { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
9642 genfn = fns[size][u];
9643 break;
9645 case 0x9: /* SQSHL, UQSHL */
9647 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9648 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9649 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9650 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9652 genenvfn = fns[size][u];
9653 break;
9655 case 0xa: /* SRSHL, URSHL */
9657 static NeonGenTwoOpFn * const fns[3][2] = {
9658 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
9659 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
9660 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
9662 genfn = fns[size][u];
9663 break;
9665 case 0xb: /* SQRSHL, UQRSHL */
9667 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9668 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9669 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9670 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9672 genenvfn = fns[size][u];
9673 break;
9675 case 0xc: /* SMAX, UMAX */
9677 static NeonGenTwoOpFn * const fns[3][2] = {
9678 { gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
9679 { gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
9680 { gen_max_s32, gen_max_u32 },
9682 genfn = fns[size][u];
9683 break;
9686 case 0xd: /* SMIN, UMIN */
9688 static NeonGenTwoOpFn * const fns[3][2] = {
9689 { gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
9690 { gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
9691 { gen_min_s32, gen_min_u32 },
9693 genfn = fns[size][u];
9694 break;
9696 case 0xe: /* SABD, UABD */
9697 case 0xf: /* SABA, UABA */
9699 static NeonGenTwoOpFn * const fns[3][2] = {
9700 { gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 },
9701 { gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 },
9702 { gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 },
9704 genfn = fns[size][u];
9705 break;
9707 case 0x11: /* CMTST, CMEQ */
9709 static NeonGenTwoOpFn * const fns[3][2] = {
9710 { gen_helper_neon_tst_u8, gen_helper_neon_ceq_u8 },
9711 { gen_helper_neon_tst_u16, gen_helper_neon_ceq_u16 },
9712 { gen_helper_neon_tst_u32, gen_helper_neon_ceq_u32 },
9714 genfn = fns[size][u];
9715 break;
9717 case 0x13: /* MUL, PMUL */
9718 if (u) {
9719 /* PMUL */
9720 assert(size == 0);
9721 genfn = gen_helper_neon_mul_p8;
9722 break;
9724 /* fall through : MUL */
9725 case 0x12: /* MLA, MLS */
9727 static NeonGenTwoOpFn * const fns[3] = {
9728 gen_helper_neon_mul_u8,
9729 gen_helper_neon_mul_u16,
9730 tcg_gen_mul_i32,
9732 genfn = fns[size];
9733 break;
9735 case 0x16: /* SQDMULH, SQRDMULH */
9737 static NeonGenTwoOpEnvFn * const fns[2][2] = {
9738 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9739 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9741 assert(size == 1 || size == 2);
9742 genenvfn = fns[size - 1][u];
9743 break;
9745 default:
9746 g_assert_not_reached();
9749 if (genenvfn) {
9750 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
9751 } else {
9752 genfn(tcg_res, tcg_op1, tcg_op2);
9755 if (opcode == 0xf || opcode == 0x12) {
9756 /* SABA, UABA, MLA, MLS: accumulating ops */
9757 static NeonGenTwoOpFn * const fns[3][2] = {
9758 { gen_helper_neon_add_u8, gen_helper_neon_sub_u8 },
9759 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
9760 { tcg_gen_add_i32, tcg_gen_sub_i32 },
9762 bool is_sub = (opcode == 0x12 && u); /* MLS */
9764 genfn = fns[size][is_sub];
9765 read_vec_element_i32(s, tcg_op1, rd, pass, MO_32);
9766 genfn(tcg_res, tcg_op1, tcg_res);
9769 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9771 tcg_temp_free_i32(tcg_res);
9772 tcg_temp_free_i32(tcg_op1);
9773 tcg_temp_free_i32(tcg_op2);
9777 if (!is_q) {
9778 clear_vec_high(s, rd);
9782 /* AdvSIMD three same
9783 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
9784 * +---+---+---+-----------+------+---+------+--------+---+------+------+
9785 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
9786 * +---+---+---+-----------+------+---+------+--------+---+------+------+
9788 static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
9790 int opcode = extract32(insn, 11, 5);
9792 switch (opcode) {
9793 case 0x3: /* logic ops */
9794 disas_simd_3same_logic(s, insn);
9795 break;
9796 case 0x17: /* ADDP */
9797 case 0x14: /* SMAXP, UMAXP */
9798 case 0x15: /* SMINP, UMINP */
9800 /* Pairwise operations */
9801 int is_q = extract32(insn, 30, 1);
9802 int u = extract32(insn, 29, 1);
9803 int size = extract32(insn, 22, 2);
9804 int rm = extract32(insn, 16, 5);
9805 int rn = extract32(insn, 5, 5);
9806 int rd = extract32(insn, 0, 5);
9807 if (opcode == 0x17) {
9808 if (u || (size == 3 && !is_q)) {
9809 unallocated_encoding(s);
9810 return;
9812 } else {
9813 if (size == 3) {
9814 unallocated_encoding(s);
9815 return;
9818 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
9819 break;
9821 case 0x18 ... 0x31:
9822 /* floating point ops, sz[1] and U are part of opcode */
9823 disas_simd_3same_float(s, insn);
9824 break;
9825 default:
9826 disas_simd_3same_int(s, insn);
9827 break;
9831 static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
9832 int size, int rn, int rd)
9834 /* Handle 2-reg-misc ops which are widening (so each size element
9835 * in the source becomes a 2*size element in the destination.
9836 * The only instruction like this is FCVTL.
9838 int pass;
9840 if (size == 3) {
9841 /* 32 -> 64 bit fp conversion */
9842 TCGv_i64 tcg_res[2];
9843 int srcelt = is_q ? 2 : 0;
9845 for (pass = 0; pass < 2; pass++) {
9846 TCGv_i32 tcg_op = tcg_temp_new_i32();
9847 tcg_res[pass] = tcg_temp_new_i64();
9849 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
9850 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
9851 tcg_temp_free_i32(tcg_op);
9853 for (pass = 0; pass < 2; pass++) {
9854 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9855 tcg_temp_free_i64(tcg_res[pass]);
9857 } else {
9858 /* 16 -> 32 bit fp conversion */
9859 int srcelt = is_q ? 4 : 0;
9860 TCGv_i32 tcg_res[4];
9862 for (pass = 0; pass < 4; pass++) {
9863 tcg_res[pass] = tcg_temp_new_i32();
9865 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
9866 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
9867 cpu_env);
9869 for (pass = 0; pass < 4; pass++) {
9870 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
9871 tcg_temp_free_i32(tcg_res[pass]);
9876 static void handle_rev(DisasContext *s, int opcode, bool u,
9877 bool is_q, int size, int rn, int rd)
9879 int op = (opcode << 1) | u;
9880 int opsz = op + size;
9881 int grp_size = 3 - opsz;
9882 int dsize = is_q ? 128 : 64;
9883 int i;
9885 if (opsz >= 3) {
9886 unallocated_encoding(s);
9887 return;
9890 if (!fp_access_check(s)) {
9891 return;
9894 if (size == 0) {
9895 /* Special case bytes, use bswap op on each group of elements */
9896 int groups = dsize / (8 << grp_size);
9898 for (i = 0; i < groups; i++) {
9899 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
9901 read_vec_element(s, tcg_tmp, rn, i, grp_size);
9902 switch (grp_size) {
9903 case MO_16:
9904 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
9905 break;
9906 case MO_32:
9907 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
9908 break;
9909 case MO_64:
9910 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
9911 break;
9912 default:
9913 g_assert_not_reached();
9915 write_vec_element(s, tcg_tmp, rd, i, grp_size);
9916 tcg_temp_free_i64(tcg_tmp);
9918 if (!is_q) {
9919 clear_vec_high(s, rd);
9921 } else {
9922 int revmask = (1 << grp_size) - 1;
9923 int esize = 8 << size;
9924 int elements = dsize / esize;
9925 TCGv_i64 tcg_rn = tcg_temp_new_i64();
9926 TCGv_i64 tcg_rd = tcg_const_i64(0);
9927 TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
9929 for (i = 0; i < elements; i++) {
9930 int e_rev = (i & 0xf) ^ revmask;
9931 int off = e_rev * esize;
9932 read_vec_element(s, tcg_rn, rn, i, size);
9933 if (off >= 64) {
9934 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
9935 tcg_rn, off - 64, esize);
9936 } else {
9937 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
9940 write_vec_element(s, tcg_rd, rd, 0, MO_64);
9941 write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
9943 tcg_temp_free_i64(tcg_rd_hi);
9944 tcg_temp_free_i64(tcg_rd);
9945 tcg_temp_free_i64(tcg_rn);
9949 static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
9950 bool is_q, int size, int rn, int rd)
9952 /* Implement the pairwise operations from 2-misc:
9953 * SADDLP, UADDLP, SADALP, UADALP.
9954 * These all add pairs of elements in the input to produce a
9955 * double-width result element in the output (possibly accumulating).
9957 bool accum = (opcode == 0x6);
9958 int maxpass = is_q ? 2 : 1;
9959 int pass;
9960 TCGv_i64 tcg_res[2];
9962 if (size == 2) {
9963 /* 32 + 32 -> 64 op */
9964 TCGMemOp memop = size + (u ? 0 : MO_SIGN);
9966 for (pass = 0; pass < maxpass; pass++) {
9967 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9968 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9970 tcg_res[pass] = tcg_temp_new_i64();
9972 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
9973 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
9974 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
9975 if (accum) {
9976 read_vec_element(s, tcg_op1, rd, pass, MO_64);
9977 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
9980 tcg_temp_free_i64(tcg_op1);
9981 tcg_temp_free_i64(tcg_op2);
9983 } else {
9984 for (pass = 0; pass < maxpass; pass++) {
9985 TCGv_i64 tcg_op = tcg_temp_new_i64();
9986 NeonGenOneOpFn *genfn;
9987 static NeonGenOneOpFn * const fns[2][2] = {
9988 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
9989 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
9992 genfn = fns[size][u];
9994 tcg_res[pass] = tcg_temp_new_i64();
9996 read_vec_element(s, tcg_op, rn, pass, MO_64);
9997 genfn(tcg_res[pass], tcg_op);
9999 if (accum) {
10000 read_vec_element(s, tcg_op, rd, pass, MO_64);
10001 if (size == 0) {
10002 gen_helper_neon_addl_u16(tcg_res[pass],
10003 tcg_res[pass], tcg_op);
10004 } else {
10005 gen_helper_neon_addl_u32(tcg_res[pass],
10006 tcg_res[pass], tcg_op);
10009 tcg_temp_free_i64(tcg_op);
10012 if (!is_q) {
10013 tcg_res[1] = tcg_const_i64(0);
10015 for (pass = 0; pass < 2; pass++) {
10016 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10017 tcg_temp_free_i64(tcg_res[pass]);
10021 static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
10023 /* Implement SHLL and SHLL2 */
10024 int pass;
10025 int part = is_q ? 2 : 0;
10026 TCGv_i64 tcg_res[2];
10028 for (pass = 0; pass < 2; pass++) {
10029 static NeonGenWidenFn * const widenfns[3] = {
10030 gen_helper_neon_widen_u8,
10031 gen_helper_neon_widen_u16,
10032 tcg_gen_extu_i32_i64,
10034 NeonGenWidenFn *widenfn = widenfns[size];
10035 TCGv_i32 tcg_op = tcg_temp_new_i32();
10037 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
10038 tcg_res[pass] = tcg_temp_new_i64();
10039 widenfn(tcg_res[pass], tcg_op);
10040 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
10042 tcg_temp_free_i32(tcg_op);
10045 for (pass = 0; pass < 2; pass++) {
10046 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10047 tcg_temp_free_i64(tcg_res[pass]);
10051 /* AdvSIMD two reg misc
10052 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
10053 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
10054 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
10055 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
10057 static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
10059 int size = extract32(insn, 22, 2);
10060 int opcode = extract32(insn, 12, 5);
10061 bool u = extract32(insn, 29, 1);
10062 bool is_q = extract32(insn, 30, 1);
10063 int rn = extract32(insn, 5, 5);
10064 int rd = extract32(insn, 0, 5);
10065 bool need_fpstatus = false;
10066 bool need_rmode = false;
10067 int rmode = -1;
10068 TCGv_i32 tcg_rmode;
10069 TCGv_ptr tcg_fpstatus;
10071 switch (opcode) {
10072 case 0x0: /* REV64, REV32 */
10073 case 0x1: /* REV16 */
10074 handle_rev(s, opcode, u, is_q, size, rn, rd);
10075 return;
10076 case 0x5: /* CNT, NOT, RBIT */
10077 if (u && size == 0) {
10078 /* NOT */
10079 break;
10080 } else if (u && size == 1) {
10081 /* RBIT */
10082 break;
10083 } else if (!u && size == 0) {
10084 /* CNT */
10085 break;
10087 unallocated_encoding(s);
10088 return;
10089 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
10090 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
10091 if (size == 3) {
10092 unallocated_encoding(s);
10093 return;
10095 if (!fp_access_check(s)) {
10096 return;
10099 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
10100 return;
10101 case 0x4: /* CLS, CLZ */
10102 if (size == 3) {
10103 unallocated_encoding(s);
10104 return;
10106 break;
10107 case 0x2: /* SADDLP, UADDLP */
10108 case 0x6: /* SADALP, UADALP */
10109 if (size == 3) {
10110 unallocated_encoding(s);
10111 return;
10113 if (!fp_access_check(s)) {
10114 return;
10116 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
10117 return;
10118 case 0x13: /* SHLL, SHLL2 */
10119 if (u == 0 || size == 3) {
10120 unallocated_encoding(s);
10121 return;
10123 if (!fp_access_check(s)) {
10124 return;
10126 handle_shll(s, is_q, size, rn, rd);
10127 return;
10128 case 0xa: /* CMLT */
10129 if (u == 1) {
10130 unallocated_encoding(s);
10131 return;
10133 /* fall through */
10134 case 0x8: /* CMGT, CMGE */
10135 case 0x9: /* CMEQ, CMLE */
10136 case 0xb: /* ABS, NEG */
10137 if (size == 3 && !is_q) {
10138 unallocated_encoding(s);
10139 return;
10141 break;
10142 case 0x3: /* SUQADD, USQADD */
10143 if (size == 3 && !is_q) {
10144 unallocated_encoding(s);
10145 return;
10147 if (!fp_access_check(s)) {
10148 return;
10150 handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
10151 return;
10152 case 0x7: /* SQABS, SQNEG */
10153 if (size == 3 && !is_q) {
10154 unallocated_encoding(s);
10155 return;
10157 break;
10158 case 0xc ... 0xf:
10159 case 0x16 ... 0x1d:
10160 case 0x1f:
10162 /* Floating point: U, size[1] and opcode indicate operation;
10163 * size[0] indicates single or double precision.
10165 int is_double = extract32(size, 0, 1);
10166 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
10167 size = is_double ? 3 : 2;
10168 switch (opcode) {
10169 case 0x2f: /* FABS */
10170 case 0x6f: /* FNEG */
10171 if (size == 3 && !is_q) {
10172 unallocated_encoding(s);
10173 return;
10175 break;
10176 case 0x1d: /* SCVTF */
10177 case 0x5d: /* UCVTF */
10179 bool is_signed = (opcode == 0x1d) ? true : false;
10180 int elements = is_double ? 2 : is_q ? 4 : 2;
10181 if (is_double && !is_q) {
10182 unallocated_encoding(s);
10183 return;
10185 if (!fp_access_check(s)) {
10186 return;
10188 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
10189 return;
10191 case 0x2c: /* FCMGT (zero) */
10192 case 0x2d: /* FCMEQ (zero) */
10193 case 0x2e: /* FCMLT (zero) */
10194 case 0x6c: /* FCMGE (zero) */
10195 case 0x6d: /* FCMLE (zero) */
10196 if (size == 3 && !is_q) {
10197 unallocated_encoding(s);
10198 return;
10200 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
10201 return;
10202 case 0x7f: /* FSQRT */
10203 if (size == 3 && !is_q) {
10204 unallocated_encoding(s);
10205 return;
10207 break;
10208 case 0x1a: /* FCVTNS */
10209 case 0x1b: /* FCVTMS */
10210 case 0x3a: /* FCVTPS */
10211 case 0x3b: /* FCVTZS */
10212 case 0x5a: /* FCVTNU */
10213 case 0x5b: /* FCVTMU */
10214 case 0x7a: /* FCVTPU */
10215 case 0x7b: /* FCVTZU */
10216 need_fpstatus = true;
10217 need_rmode = true;
10218 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10219 if (size == 3 && !is_q) {
10220 unallocated_encoding(s);
10221 return;
10223 break;
10224 case 0x5c: /* FCVTAU */
10225 case 0x1c: /* FCVTAS */
10226 need_fpstatus = true;
10227 need_rmode = true;
10228 rmode = FPROUNDING_TIEAWAY;
10229 if (size == 3 && !is_q) {
10230 unallocated_encoding(s);
10231 return;
10233 break;
10234 case 0x3c: /* URECPE */
10235 if (size == 3) {
10236 unallocated_encoding(s);
10237 return;
10239 /* fall through */
10240 case 0x3d: /* FRECPE */
10241 case 0x7d: /* FRSQRTE */
10242 if (size == 3 && !is_q) {
10243 unallocated_encoding(s);
10244 return;
10246 if (!fp_access_check(s)) {
10247 return;
10249 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
10250 return;
10251 case 0x56: /* FCVTXN, FCVTXN2 */
10252 if (size == 2) {
10253 unallocated_encoding(s);
10254 return;
10256 /* fall through */
10257 case 0x16: /* FCVTN, FCVTN2 */
10258 /* handle_2misc_narrow does a 2*size -> size operation, but these
10259 * instructions encode the source size rather than dest size.
10261 if (!fp_access_check(s)) {
10262 return;
10264 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
10265 return;
10266 case 0x17: /* FCVTL, FCVTL2 */
10267 if (!fp_access_check(s)) {
10268 return;
10270 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
10271 return;
10272 case 0x18: /* FRINTN */
10273 case 0x19: /* FRINTM */
10274 case 0x38: /* FRINTP */
10275 case 0x39: /* FRINTZ */
10276 need_rmode = true;
10277 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10278 /* fall through */
10279 case 0x59: /* FRINTX */
10280 case 0x79: /* FRINTI */
10281 need_fpstatus = true;
10282 if (size == 3 && !is_q) {
10283 unallocated_encoding(s);
10284 return;
10286 break;
10287 case 0x58: /* FRINTA */
10288 need_rmode = true;
10289 rmode = FPROUNDING_TIEAWAY;
10290 need_fpstatus = true;
10291 if (size == 3 && !is_q) {
10292 unallocated_encoding(s);
10293 return;
10295 break;
10296 case 0x7c: /* URSQRTE */
10297 if (size == 3) {
10298 unallocated_encoding(s);
10299 return;
10301 need_fpstatus = true;
10302 break;
10303 default:
10304 unallocated_encoding(s);
10305 return;
10307 break;
10309 default:
10310 unallocated_encoding(s);
10311 return;
10314 if (!fp_access_check(s)) {
10315 return;
10318 if (need_fpstatus) {
10319 tcg_fpstatus = get_fpstatus_ptr();
10320 } else {
10321 tcg_fpstatus = NULL;
10323 if (need_rmode) {
10324 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
10325 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
10326 } else {
10327 tcg_rmode = NULL;
10330 switch (opcode) {
10331 case 0x5:
10332 if (u && size == 0) { /* NOT */
10333 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
10334 return;
10336 break;
10337 case 0xb:
10338 if (u) { /* NEG */
10339 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
10340 return;
10342 break;
10345 if (size == 3) {
10346 /* All 64-bit element operations can be shared with scalar 2misc */
10347 int pass;
10349 for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
10350 TCGv_i64 tcg_op = tcg_temp_new_i64();
10351 TCGv_i64 tcg_res = tcg_temp_new_i64();
10353 read_vec_element(s, tcg_op, rn, pass, MO_64);
10355 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
10356 tcg_rmode, tcg_fpstatus);
10358 write_vec_element(s, tcg_res, rd, pass, MO_64);
10360 tcg_temp_free_i64(tcg_res);
10361 tcg_temp_free_i64(tcg_op);
10363 } else {
10364 int pass;
10366 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
10367 TCGv_i32 tcg_op = tcg_temp_new_i32();
10368 TCGv_i32 tcg_res = tcg_temp_new_i32();
10369 TCGCond cond;
10371 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
10373 if (size == 2) {
10374 /* Special cases for 32 bit elements */
10375 switch (opcode) {
10376 case 0xa: /* CMLT */
10377 /* 32 bit integer comparison against zero, result is
10378 * test ? (2^32 - 1) : 0. We implement via setcond(test)
10379 * and inverting.
10381 cond = TCG_COND_LT;
10382 do_cmop:
10383 tcg_gen_setcondi_i32(cond, tcg_res, tcg_op, 0);
10384 tcg_gen_neg_i32(tcg_res, tcg_res);
10385 break;
10386 case 0x8: /* CMGT, CMGE */
10387 cond = u ? TCG_COND_GE : TCG_COND_GT;
10388 goto do_cmop;
10389 case 0x9: /* CMEQ, CMLE */
10390 cond = u ? TCG_COND_LE : TCG_COND_EQ;
10391 goto do_cmop;
10392 case 0x4: /* CLS */
10393 if (u) {
10394 tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
10395 } else {
10396 tcg_gen_clrsb_i32(tcg_res, tcg_op);
10398 break;
10399 case 0x7: /* SQABS, SQNEG */
10400 if (u) {
10401 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
10402 } else {
10403 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
10405 break;
10406 case 0xb: /* ABS, NEG */
10407 if (u) {
10408 tcg_gen_neg_i32(tcg_res, tcg_op);
10409 } else {
10410 TCGv_i32 tcg_zero = tcg_const_i32(0);
10411 tcg_gen_neg_i32(tcg_res, tcg_op);
10412 tcg_gen_movcond_i32(TCG_COND_GT, tcg_res, tcg_op,
10413 tcg_zero, tcg_op, tcg_res);
10414 tcg_temp_free_i32(tcg_zero);
10416 break;
10417 case 0x2f: /* FABS */
10418 gen_helper_vfp_abss(tcg_res, tcg_op);
10419 break;
10420 case 0x6f: /* FNEG */
10421 gen_helper_vfp_negs(tcg_res, tcg_op);
10422 break;
10423 case 0x7f: /* FSQRT */
10424 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
10425 break;
10426 case 0x1a: /* FCVTNS */
10427 case 0x1b: /* FCVTMS */
10428 case 0x1c: /* FCVTAS */
10429 case 0x3a: /* FCVTPS */
10430 case 0x3b: /* FCVTZS */
10432 TCGv_i32 tcg_shift = tcg_const_i32(0);
10433 gen_helper_vfp_tosls(tcg_res, tcg_op,
10434 tcg_shift, tcg_fpstatus);
10435 tcg_temp_free_i32(tcg_shift);
10436 break;
10438 case 0x5a: /* FCVTNU */
10439 case 0x5b: /* FCVTMU */
10440 case 0x5c: /* FCVTAU */
10441 case 0x7a: /* FCVTPU */
10442 case 0x7b: /* FCVTZU */
10444 TCGv_i32 tcg_shift = tcg_const_i32(0);
10445 gen_helper_vfp_touls(tcg_res, tcg_op,
10446 tcg_shift, tcg_fpstatus);
10447 tcg_temp_free_i32(tcg_shift);
10448 break;
10450 case 0x18: /* FRINTN */
10451 case 0x19: /* FRINTM */
10452 case 0x38: /* FRINTP */
10453 case 0x39: /* FRINTZ */
10454 case 0x58: /* FRINTA */
10455 case 0x79: /* FRINTI */
10456 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
10457 break;
10458 case 0x59: /* FRINTX */
10459 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
10460 break;
10461 case 0x7c: /* URSQRTE */
10462 gen_helper_rsqrte_u32(tcg_res, tcg_op, tcg_fpstatus);
10463 break;
10464 default:
10465 g_assert_not_reached();
10467 } else {
10468 /* Use helpers for 8 and 16 bit elements */
10469 switch (opcode) {
10470 case 0x5: /* CNT, RBIT */
10471 /* For these two insns size is part of the opcode specifier
10472 * (handled earlier); they always operate on byte elements.
10474 if (u) {
10475 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
10476 } else {
10477 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
10479 break;
10480 case 0x7: /* SQABS, SQNEG */
10482 NeonGenOneOpEnvFn *genfn;
10483 static NeonGenOneOpEnvFn * const fns[2][2] = {
10484 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10485 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10487 genfn = fns[size][u];
10488 genfn(tcg_res, cpu_env, tcg_op);
10489 break;
10491 case 0x8: /* CMGT, CMGE */
10492 case 0x9: /* CMEQ, CMLE */
10493 case 0xa: /* CMLT */
10495 static NeonGenTwoOpFn * const fns[3][2] = {
10496 { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 },
10497 { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 },
10498 { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 },
10500 NeonGenTwoOpFn *genfn;
10501 int comp;
10502 bool reverse;
10503 TCGv_i32 tcg_zero = tcg_const_i32(0);
10505 /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */
10506 comp = (opcode - 0x8) * 2 + u;
10507 /* ...but LE, LT are implemented as reverse GE, GT */
10508 reverse = (comp > 2);
10509 if (reverse) {
10510 comp = 4 - comp;
10512 genfn = fns[comp][size];
10513 if (reverse) {
10514 genfn(tcg_res, tcg_zero, tcg_op);
10515 } else {
10516 genfn(tcg_res, tcg_op, tcg_zero);
10518 tcg_temp_free_i32(tcg_zero);
10519 break;
10521 case 0xb: /* ABS, NEG */
10522 if (u) {
10523 TCGv_i32 tcg_zero = tcg_const_i32(0);
10524 if (size) {
10525 gen_helper_neon_sub_u16(tcg_res, tcg_zero, tcg_op);
10526 } else {
10527 gen_helper_neon_sub_u8(tcg_res, tcg_zero, tcg_op);
10529 tcg_temp_free_i32(tcg_zero);
10530 } else {
10531 if (size) {
10532 gen_helper_neon_abs_s16(tcg_res, tcg_op);
10533 } else {
10534 gen_helper_neon_abs_s8(tcg_res, tcg_op);
10537 break;
10538 case 0x4: /* CLS, CLZ */
10539 if (u) {
10540 if (size == 0) {
10541 gen_helper_neon_clz_u8(tcg_res, tcg_op);
10542 } else {
10543 gen_helper_neon_clz_u16(tcg_res, tcg_op);
10545 } else {
10546 if (size == 0) {
10547 gen_helper_neon_cls_s8(tcg_res, tcg_op);
10548 } else {
10549 gen_helper_neon_cls_s16(tcg_res, tcg_op);
10552 break;
10553 default:
10554 g_assert_not_reached();
10558 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10560 tcg_temp_free_i32(tcg_res);
10561 tcg_temp_free_i32(tcg_op);
10564 if (!is_q) {
10565 clear_vec_high(s, rd);
10568 if (need_rmode) {
10569 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
10570 tcg_temp_free_i32(tcg_rmode);
10572 if (need_fpstatus) {
10573 tcg_temp_free_ptr(tcg_fpstatus);
10577 /* AdvSIMD scalar x indexed element
10578 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
10579 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
10580 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
10581 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
10582 * AdvSIMD vector x indexed element
10583 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
10584 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
10585 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
10586 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
10588 static void disas_simd_indexed(DisasContext *s, uint32_t insn)
10590 /* This encoding has two kinds of instruction:
10591 * normal, where we perform elt x idxelt => elt for each
10592 * element in the vector
10593 * long, where we perform elt x idxelt and generate a result of
10594 * double the width of the input element
10595 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
10597 bool is_scalar = extract32(insn, 28, 1);
10598 bool is_q = extract32(insn, 30, 1);
10599 bool u = extract32(insn, 29, 1);
10600 int size = extract32(insn, 22, 2);
10601 int l = extract32(insn, 21, 1);
10602 int m = extract32(insn, 20, 1);
10603 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
10604 int rm = extract32(insn, 16, 4);
10605 int opcode = extract32(insn, 12, 4);
10606 int h = extract32(insn, 11, 1);
10607 int rn = extract32(insn, 5, 5);
10608 int rd = extract32(insn, 0, 5);
10609 bool is_long = false;
10610 bool is_fp = false;
10611 int index;
10612 TCGv_ptr fpst;
10614 switch (opcode) {
10615 case 0x0: /* MLA */
10616 case 0x4: /* MLS */
10617 if (!u || is_scalar) {
10618 unallocated_encoding(s);
10619 return;
10621 break;
10622 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10623 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10624 case 0xa: /* SMULL, SMULL2, UMULL, UMULL2 */
10625 if (is_scalar) {
10626 unallocated_encoding(s);
10627 return;
10629 is_long = true;
10630 break;
10631 case 0x3: /* SQDMLAL, SQDMLAL2 */
10632 case 0x7: /* SQDMLSL, SQDMLSL2 */
10633 case 0xb: /* SQDMULL, SQDMULL2 */
10634 is_long = true;
10635 /* fall through */
10636 case 0xc: /* SQDMULH */
10637 case 0xd: /* SQRDMULH */
10638 if (u) {
10639 unallocated_encoding(s);
10640 return;
10642 break;
10643 case 0x8: /* MUL */
10644 if (u || is_scalar) {
10645 unallocated_encoding(s);
10646 return;
10648 break;
10649 case 0x1: /* FMLA */
10650 case 0x5: /* FMLS */
10651 if (u) {
10652 unallocated_encoding(s);
10653 return;
10655 /* fall through */
10656 case 0x9: /* FMUL, FMULX */
10657 if (!extract32(size, 1, 1)) {
10658 unallocated_encoding(s);
10659 return;
10661 is_fp = true;
10662 break;
10663 default:
10664 unallocated_encoding(s);
10665 return;
10668 if (is_fp) {
10669 /* low bit of size indicates single/double */
10670 size = extract32(size, 0, 1) ? 3 : 2;
10671 if (size == 2) {
10672 index = h << 1 | l;
10673 } else {
10674 if (l || !is_q) {
10675 unallocated_encoding(s);
10676 return;
10678 index = h;
10680 rm |= (m << 4);
10681 } else {
10682 switch (size) {
10683 case 1:
10684 index = h << 2 | l << 1 | m;
10685 break;
10686 case 2:
10687 index = h << 1 | l;
10688 rm |= (m << 4);
10689 break;
10690 default:
10691 unallocated_encoding(s);
10692 return;
10696 if (!fp_access_check(s)) {
10697 return;
10700 if (is_fp) {
10701 fpst = get_fpstatus_ptr();
10702 } else {
10703 fpst = NULL;
10706 if (size == 3) {
10707 TCGv_i64 tcg_idx = tcg_temp_new_i64();
10708 int pass;
10710 assert(is_fp && is_q && !is_long);
10712 read_vec_element(s, tcg_idx, rm, index, MO_64);
10714 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10715 TCGv_i64 tcg_op = tcg_temp_new_i64();
10716 TCGv_i64 tcg_res = tcg_temp_new_i64();
10718 read_vec_element(s, tcg_op, rn, pass, MO_64);
10720 switch (opcode) {
10721 case 0x5: /* FMLS */
10722 /* As usual for ARM, separate negation for fused multiply-add */
10723 gen_helper_vfp_negd(tcg_op, tcg_op);
10724 /* fall through */
10725 case 0x1: /* FMLA */
10726 read_vec_element(s, tcg_res, rd, pass, MO_64);
10727 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
10728 break;
10729 case 0x9: /* FMUL, FMULX */
10730 if (u) {
10731 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
10732 } else {
10733 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
10735 break;
10736 default:
10737 g_assert_not_reached();
10740 write_vec_element(s, tcg_res, rd, pass, MO_64);
10741 tcg_temp_free_i64(tcg_op);
10742 tcg_temp_free_i64(tcg_res);
10745 if (is_scalar) {
10746 clear_vec_high(s, rd);
10749 tcg_temp_free_i64(tcg_idx);
10750 } else if (!is_long) {
10751 /* 32 bit floating point, or 16 or 32 bit integer.
10752 * For the 16 bit scalar case we use the usual Neon helpers and
10753 * rely on the fact that 0 op 0 == 0 with no side effects.
10755 TCGv_i32 tcg_idx = tcg_temp_new_i32();
10756 int pass, maxpasses;
10758 if (is_scalar) {
10759 maxpasses = 1;
10760 } else {
10761 maxpasses = is_q ? 4 : 2;
10764 read_vec_element_i32(s, tcg_idx, rm, index, size);
10766 if (size == 1 && !is_scalar) {
10767 /* The simplest way to handle the 16x16 indexed ops is to duplicate
10768 * the index into both halves of the 32 bit tcg_idx and then use
10769 * the usual Neon helpers.
10771 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
10774 for (pass = 0; pass < maxpasses; pass++) {
10775 TCGv_i32 tcg_op = tcg_temp_new_i32();
10776 TCGv_i32 tcg_res = tcg_temp_new_i32();
10778 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
10780 switch (opcode) {
10781 case 0x0: /* MLA */
10782 case 0x4: /* MLS */
10783 case 0x8: /* MUL */
10785 static NeonGenTwoOpFn * const fns[2][2] = {
10786 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
10787 { tcg_gen_add_i32, tcg_gen_sub_i32 },
10789 NeonGenTwoOpFn *genfn;
10790 bool is_sub = opcode == 0x4;
10792 if (size == 1) {
10793 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
10794 } else {
10795 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
10797 if (opcode == 0x8) {
10798 break;
10800 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
10801 genfn = fns[size - 1][is_sub];
10802 genfn(tcg_res, tcg_op, tcg_res);
10803 break;
10805 case 0x5: /* FMLS */
10806 /* As usual for ARM, separate negation for fused multiply-add */
10807 gen_helper_vfp_negs(tcg_op, tcg_op);
10808 /* fall through */
10809 case 0x1: /* FMLA */
10810 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10811 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
10812 break;
10813 case 0x9: /* FMUL, FMULX */
10814 if (u) {
10815 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
10816 } else {
10817 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
10819 break;
10820 case 0xc: /* SQDMULH */
10821 if (size == 1) {
10822 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
10823 tcg_op, tcg_idx);
10824 } else {
10825 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
10826 tcg_op, tcg_idx);
10828 break;
10829 case 0xd: /* SQRDMULH */
10830 if (size == 1) {
10831 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
10832 tcg_op, tcg_idx);
10833 } else {
10834 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
10835 tcg_op, tcg_idx);
10837 break;
10838 default:
10839 g_assert_not_reached();
10842 if (is_scalar) {
10843 write_fp_sreg(s, rd, tcg_res);
10844 } else {
10845 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10848 tcg_temp_free_i32(tcg_op);
10849 tcg_temp_free_i32(tcg_res);
10852 tcg_temp_free_i32(tcg_idx);
10854 if (!is_q) {
10855 clear_vec_high(s, rd);
10857 } else {
10858 /* long ops: 16x16->32 or 32x32->64 */
10859 TCGv_i64 tcg_res[2];
10860 int pass;
10861 bool satop = extract32(opcode, 0, 1);
10862 TCGMemOp memop = MO_32;
10864 if (satop || !u) {
10865 memop |= MO_SIGN;
10868 if (size == 2) {
10869 TCGv_i64 tcg_idx = tcg_temp_new_i64();
10871 read_vec_element(s, tcg_idx, rm, index, memop);
10873 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10874 TCGv_i64 tcg_op = tcg_temp_new_i64();
10875 TCGv_i64 tcg_passres;
10876 int passelt;
10878 if (is_scalar) {
10879 passelt = 0;
10880 } else {
10881 passelt = pass + (is_q * 2);
10884 read_vec_element(s, tcg_op, rn, passelt, memop);
10886 tcg_res[pass] = tcg_temp_new_i64();
10888 if (opcode == 0xa || opcode == 0xb) {
10889 /* Non-accumulating ops */
10890 tcg_passres = tcg_res[pass];
10891 } else {
10892 tcg_passres = tcg_temp_new_i64();
10895 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
10896 tcg_temp_free_i64(tcg_op);
10898 if (satop) {
10899 /* saturating, doubling */
10900 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10901 tcg_passres, tcg_passres);
10904 if (opcode == 0xa || opcode == 0xb) {
10905 continue;
10908 /* Accumulating op: handle accumulate step */
10909 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10911 switch (opcode) {
10912 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10913 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10914 break;
10915 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10916 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10917 break;
10918 case 0x7: /* SQDMLSL, SQDMLSL2 */
10919 tcg_gen_neg_i64(tcg_passres, tcg_passres);
10920 /* fall through */
10921 case 0x3: /* SQDMLAL, SQDMLAL2 */
10922 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10923 tcg_res[pass],
10924 tcg_passres);
10925 break;
10926 default:
10927 g_assert_not_reached();
10929 tcg_temp_free_i64(tcg_passres);
10931 tcg_temp_free_i64(tcg_idx);
10933 if (is_scalar) {
10934 clear_vec_high(s, rd);
10936 } else {
10937 TCGv_i32 tcg_idx = tcg_temp_new_i32();
10939 assert(size == 1);
10940 read_vec_element_i32(s, tcg_idx, rm, index, size);
10942 if (!is_scalar) {
10943 /* The simplest way to handle the 16x16 indexed ops is to
10944 * duplicate the index into both halves of the 32 bit tcg_idx
10945 * and then use the usual Neon helpers.
10947 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
10950 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10951 TCGv_i32 tcg_op = tcg_temp_new_i32();
10952 TCGv_i64 tcg_passres;
10954 if (is_scalar) {
10955 read_vec_element_i32(s, tcg_op, rn, pass, size);
10956 } else {
10957 read_vec_element_i32(s, tcg_op, rn,
10958 pass + (is_q * 2), MO_32);
10961 tcg_res[pass] = tcg_temp_new_i64();
10963 if (opcode == 0xa || opcode == 0xb) {
10964 /* Non-accumulating ops */
10965 tcg_passres = tcg_res[pass];
10966 } else {
10967 tcg_passres = tcg_temp_new_i64();
10970 if (memop & MO_SIGN) {
10971 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
10972 } else {
10973 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
10975 if (satop) {
10976 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
10977 tcg_passres, tcg_passres);
10979 tcg_temp_free_i32(tcg_op);
10981 if (opcode == 0xa || opcode == 0xb) {
10982 continue;
10985 /* Accumulating op: handle accumulate step */
10986 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10988 switch (opcode) {
10989 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10990 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
10991 tcg_passres);
10992 break;
10993 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10994 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
10995 tcg_passres);
10996 break;
10997 case 0x7: /* SQDMLSL, SQDMLSL2 */
10998 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
10999 /* fall through */
11000 case 0x3: /* SQDMLAL, SQDMLAL2 */
11001 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
11002 tcg_res[pass],
11003 tcg_passres);
11004 break;
11005 default:
11006 g_assert_not_reached();
11008 tcg_temp_free_i64(tcg_passres);
11010 tcg_temp_free_i32(tcg_idx);
11012 if (is_scalar) {
11013 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
11017 if (is_scalar) {
11018 tcg_res[1] = tcg_const_i64(0);
11021 for (pass = 0; pass < 2; pass++) {
11022 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11023 tcg_temp_free_i64(tcg_res[pass]);
11027 if (fpst) {
11028 tcg_temp_free_ptr(fpst);
11032 /* Crypto AES
11033 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
11034 * +-----------------+------+-----------+--------+-----+------+------+
11035 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
11036 * +-----------------+------+-----------+--------+-----+------+------+
11038 static void disas_crypto_aes(DisasContext *s, uint32_t insn)
11040 int size = extract32(insn, 22, 2);
11041 int opcode = extract32(insn, 12, 5);
11042 int rn = extract32(insn, 5, 5);
11043 int rd = extract32(insn, 0, 5);
11044 int decrypt;
11045 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
11046 TCGv_i32 tcg_decrypt;
11047 CryptoThreeOpIntFn *genfn;
11049 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
11050 || size != 0) {
11051 unallocated_encoding(s);
11052 return;
11055 switch (opcode) {
11056 case 0x4: /* AESE */
11057 decrypt = 0;
11058 genfn = gen_helper_crypto_aese;
11059 break;
11060 case 0x6: /* AESMC */
11061 decrypt = 0;
11062 genfn = gen_helper_crypto_aesmc;
11063 break;
11064 case 0x5: /* AESD */
11065 decrypt = 1;
11066 genfn = gen_helper_crypto_aese;
11067 break;
11068 case 0x7: /* AESIMC */
11069 decrypt = 1;
11070 genfn = gen_helper_crypto_aesmc;
11071 break;
11072 default:
11073 unallocated_encoding(s);
11074 return;
11077 if (!fp_access_check(s)) {
11078 return;
11081 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
11082 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
11083 tcg_decrypt = tcg_const_i32(decrypt);
11085 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_decrypt);
11087 tcg_temp_free_ptr(tcg_rd_ptr);
11088 tcg_temp_free_ptr(tcg_rn_ptr);
11089 tcg_temp_free_i32(tcg_decrypt);
11092 /* Crypto three-reg SHA
11093 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
11094 * +-----------------+------+---+------+---+--------+-----+------+------+
11095 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
11096 * +-----------------+------+---+------+---+--------+-----+------+------+
11098 static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
11100 int size = extract32(insn, 22, 2);
11101 int opcode = extract32(insn, 12, 3);
11102 int rm = extract32(insn, 16, 5);
11103 int rn = extract32(insn, 5, 5);
11104 int rd = extract32(insn, 0, 5);
11105 CryptoThreeOpFn *genfn;
11106 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
11107 int feature = ARM_FEATURE_V8_SHA256;
11109 if (size != 0) {
11110 unallocated_encoding(s);
11111 return;
11114 switch (opcode) {
11115 case 0: /* SHA1C */
11116 case 1: /* SHA1P */
11117 case 2: /* SHA1M */
11118 case 3: /* SHA1SU0 */
11119 genfn = NULL;
11120 feature = ARM_FEATURE_V8_SHA1;
11121 break;
11122 case 4: /* SHA256H */
11123 genfn = gen_helper_crypto_sha256h;
11124 break;
11125 case 5: /* SHA256H2 */
11126 genfn = gen_helper_crypto_sha256h2;
11127 break;
11128 case 6: /* SHA256SU1 */
11129 genfn = gen_helper_crypto_sha256su1;
11130 break;
11131 default:
11132 unallocated_encoding(s);
11133 return;
11136 if (!arm_dc_feature(s, feature)) {
11137 unallocated_encoding(s);
11138 return;
11141 if (!fp_access_check(s)) {
11142 return;
11145 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
11146 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
11147 tcg_rm_ptr = vec_full_reg_ptr(s, rm);
11149 if (genfn) {
11150 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
11151 } else {
11152 TCGv_i32 tcg_opcode = tcg_const_i32(opcode);
11154 gen_helper_crypto_sha1_3reg(tcg_rd_ptr, tcg_rn_ptr,
11155 tcg_rm_ptr, tcg_opcode);
11156 tcg_temp_free_i32(tcg_opcode);
11159 tcg_temp_free_ptr(tcg_rd_ptr);
11160 tcg_temp_free_ptr(tcg_rn_ptr);
11161 tcg_temp_free_ptr(tcg_rm_ptr);
11164 /* Crypto two-reg SHA
11165 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
11166 * +-----------------+------+-----------+--------+-----+------+------+
11167 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
11168 * +-----------------+------+-----------+--------+-----+------+------+
11170 static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
11172 int size = extract32(insn, 22, 2);
11173 int opcode = extract32(insn, 12, 5);
11174 int rn = extract32(insn, 5, 5);
11175 int rd = extract32(insn, 0, 5);
11176 CryptoTwoOpFn *genfn;
11177 int feature;
11178 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
11180 if (size != 0) {
11181 unallocated_encoding(s);
11182 return;
11185 switch (opcode) {
11186 case 0: /* SHA1H */
11187 feature = ARM_FEATURE_V8_SHA1;
11188 genfn = gen_helper_crypto_sha1h;
11189 break;
11190 case 1: /* SHA1SU1 */
11191 feature = ARM_FEATURE_V8_SHA1;
11192 genfn = gen_helper_crypto_sha1su1;
11193 break;
11194 case 2: /* SHA256SU0 */
11195 feature = ARM_FEATURE_V8_SHA256;
11196 genfn = gen_helper_crypto_sha256su0;
11197 break;
11198 default:
11199 unallocated_encoding(s);
11200 return;
11203 if (!arm_dc_feature(s, feature)) {
11204 unallocated_encoding(s);
11205 return;
11208 if (!fp_access_check(s)) {
11209 return;
11212 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
11213 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
11215 genfn(tcg_rd_ptr, tcg_rn_ptr);
11217 tcg_temp_free_ptr(tcg_rd_ptr);
11218 tcg_temp_free_ptr(tcg_rn_ptr);
11221 /* C3.6 Data processing - SIMD, inc Crypto
11223 * As the decode gets a little complex we are using a table based
11224 * approach for this part of the decode.
11226 static const AArch64DecodeTable data_proc_simd[] = {
11227 /* pattern , mask , fn */
11228 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
11229 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
11230 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
11231 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
11232 { 0x0e000400, 0x9fe08400, disas_simd_copy },
11233 { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
11234 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
11235 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
11236 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
11237 { 0x0e000000, 0xbf208c00, disas_simd_tb },
11238 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
11239 { 0x2e000000, 0xbf208400, disas_simd_ext },
11240 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
11241 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
11242 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
11243 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
11244 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
11245 { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
11246 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
11247 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
11248 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
11249 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
11250 { 0x00000000, 0x00000000, NULL }
11253 static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
11255 /* Note that this is called with all non-FP cases from
11256 * table C3-6 so it must UNDEF for entries not specifically
11257 * allocated to instructions in that table.
11259 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
11260 if (fn) {
11261 fn(s, insn);
11262 } else {
11263 unallocated_encoding(s);
11267 /* C3.6 Data processing - SIMD and floating point */
11268 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
11270 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
11271 disas_data_proc_fp(s, insn);
11272 } else {
11273 /* SIMD, including crypto */
11274 disas_data_proc_simd(s, insn);
11278 /* C3.1 A64 instruction index by encoding */
11279 static void disas_a64_insn(CPUARMState *env, DisasContext *s)
11281 uint32_t insn;
11283 insn = arm_ldl_code(env, s->pc, s->sctlr_b);
11284 s->insn = insn;
11285 s->pc += 4;
11287 s->fp_access_checked = false;
11289 switch (extract32(insn, 25, 4)) {
11290 case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
11291 unallocated_encoding(s);
11292 break;
11293 case 0x8: case 0x9: /* Data processing - immediate */
11294 disas_data_proc_imm(s, insn);
11295 break;
11296 case 0xa: case 0xb: /* Branch, exception generation and system insns */
11297 disas_b_exc_sys(s, insn);
11298 break;
11299 case 0x4:
11300 case 0x6:
11301 case 0xc:
11302 case 0xe: /* Loads and stores */
11303 disas_ldst(s, insn);
11304 break;
11305 case 0x5:
11306 case 0xd: /* Data processing - register */
11307 disas_data_proc_reg(s, insn);
11308 break;
11309 case 0x7:
11310 case 0xf: /* Data processing - SIMD and floating point */
11311 disas_data_proc_simd_fp(s, insn);
11312 break;
11313 default:
11314 assert(FALSE); /* all 15 cases should be handled above */
11315 break;
11318 /* if we allocated any temporaries, free them here */
11319 free_tmp_a64(s);
11322 static int aarch64_tr_init_disas_context(DisasContextBase *dcbase,
11323 CPUState *cpu, int max_insns)
11325 DisasContext *dc = container_of(dcbase, DisasContext, base);
11326 CPUARMState *env = cpu->env_ptr;
11327 ARMCPU *arm_cpu = arm_env_get_cpu(env);
11328 int bound;
11330 dc->pc = dc->base.pc_first;
11331 dc->condjmp = 0;
11333 dc->aarch64 = 1;
11334 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11335 * there is no secure EL1, so we route exceptions to EL3.
11337 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11338 !arm_el_is_aa64(env, 3);
11339 dc->thumb = 0;
11340 dc->sctlr_b = 0;
11341 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
11342 dc->condexec_mask = 0;
11343 dc->condexec_cond = 0;
11344 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
11345 dc->tbi0 = ARM_TBFLAG_TBI0(dc->base.tb->flags);
11346 dc->tbi1 = ARM_TBFLAG_TBI1(dc->base.tb->flags);
11347 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11348 #if !defined(CONFIG_USER_ONLY)
11349 dc->user = (dc->current_el == 0);
11350 #endif
11351 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
11352 dc->vec_len = 0;
11353 dc->vec_stride = 0;
11354 dc->cp_regs = arm_cpu->cp_regs;
11355 dc->features = env->features;
11357 /* Single step state. The code-generation logic here is:
11358 * SS_ACTIVE == 0:
11359 * generate code with no special handling for single-stepping (except
11360 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11361 * this happens anyway because those changes are all system register or
11362 * PSTATE writes).
11363 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11364 * emit code for one insn
11365 * emit code to clear PSTATE.SS
11366 * emit code to generate software step exception for completed step
11367 * end TB (as usual for having generated an exception)
11368 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11369 * emit code to generate a software step exception
11370 * end the TB
11372 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
11373 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
11374 dc->is_ldex = false;
11375 dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
11377 /* Bound the number of insns to execute to those left on the page. */
11378 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
11380 /* If architectural single step active, limit to 1. */
11381 if (dc->ss_active) {
11382 bound = 1;
11384 max_insns = MIN(max_insns, bound);
11386 init_tmp_a64_array(dc);
11388 return max_insns;
11391 static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
11393 tcg_clear_temp_count();
11396 static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
11398 DisasContext *dc = container_of(dcbase, DisasContext, base);
11400 tcg_gen_insn_start(dc->pc, 0, 0);
11401 dc->insn_start = tcg_last_op();
11404 static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
11405 const CPUBreakpoint *bp)
11407 DisasContext *dc = container_of(dcbase, DisasContext, base);
11409 if (bp->flags & BP_CPU) {
11410 gen_a64_set_pc_im(dc->pc);
11411 gen_helper_check_breakpoints(cpu_env);
11412 /* End the TB early; it likely won't be executed */
11413 dc->base.is_jmp = DISAS_TOO_MANY;
11414 } else {
11415 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11416 /* The address covered by the breakpoint must be
11417 included in [tb->pc, tb->pc + tb->size) in order
11418 to for it to be properly cleared -- thus we
11419 increment the PC here so that the logic setting
11420 tb->size below does the right thing. */
11421 dc->pc += 4;
11422 dc->base.is_jmp = DISAS_NORETURN;
11425 return true;
11428 static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
11430 DisasContext *dc = container_of(dcbase, DisasContext, base);
11431 CPUARMState *env = cpu->env_ptr;
11433 if (dc->ss_active && !dc->pstate_ss) {
11434 /* Singlestep state is Active-pending.
11435 * If we're in this state at the start of a TB then either
11436 * a) we just took an exception to an EL which is being debugged
11437 * and this is the first insn in the exception handler
11438 * b) debug exceptions were masked and we just unmasked them
11439 * without changing EL (eg by clearing PSTATE.D)
11440 * In either case we're going to take a swstep exception in the
11441 * "did not step an insn" case, and so the syndrome ISV and EX
11442 * bits should be zero.
11444 assert(dc->base.num_insns == 1);
11445 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11446 default_exception_el(dc));
11447 dc->base.is_jmp = DISAS_NORETURN;
11448 } else {
11449 disas_a64_insn(env, dc);
11452 dc->base.pc_next = dc->pc;
11453 translator_loop_temp_check(&dc->base);
11456 static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
11458 DisasContext *dc = container_of(dcbase, DisasContext, base);
11460 if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) {
11461 /* Note that this means single stepping WFI doesn't halt the CPU.
11462 * For conditional branch insns this is harmless unreachable code as
11463 * gen_goto_tb() has already handled emitting the debug exception
11464 * (and thus a tb-jump is not possible when singlestepping).
11466 switch (dc->base.is_jmp) {
11467 default:
11468 gen_a64_set_pc_im(dc->pc);
11469 /* fall through */
11470 case DISAS_EXIT:
11471 case DISAS_JUMP:
11472 if (dc->base.singlestep_enabled) {
11473 gen_exception_internal(EXCP_DEBUG);
11474 } else {
11475 gen_step_complete_exception(dc);
11477 break;
11478 case DISAS_NORETURN:
11479 break;
11481 } else {
11482 switch (dc->base.is_jmp) {
11483 case DISAS_NEXT:
11484 case DISAS_TOO_MANY:
11485 gen_goto_tb(dc, 1, dc->pc);
11486 break;
11487 default:
11488 case DISAS_UPDATE:
11489 gen_a64_set_pc_im(dc->pc);
11490 /* fall through */
11491 case DISAS_JUMP:
11492 tcg_gen_lookup_and_goto_ptr();
11493 break;
11494 case DISAS_EXIT:
11495 tcg_gen_exit_tb(0);
11496 break;
11497 case DISAS_NORETURN:
11498 case DISAS_SWI:
11499 break;
11500 case DISAS_WFE:
11501 gen_a64_set_pc_im(dc->pc);
11502 gen_helper_wfe(cpu_env);
11503 break;
11504 case DISAS_YIELD:
11505 gen_a64_set_pc_im(dc->pc);
11506 gen_helper_yield(cpu_env);
11507 break;
11508 case DISAS_WFI:
11510 /* This is a special case because we don't want to just halt the CPU
11511 * if trying to debug across a WFI.
11513 TCGv_i32 tmp = tcg_const_i32(4);
11515 gen_a64_set_pc_im(dc->pc);
11516 gen_helper_wfi(cpu_env, tmp);
11517 tcg_temp_free_i32(tmp);
11518 /* The helper doesn't necessarily throw an exception, but we
11519 * must go back to the main loop to check for interrupts anyway.
11521 tcg_gen_exit_tb(0);
11522 break;
11527 /* Functions above can change dc->pc, so re-align db->pc_next */
11528 dc->base.pc_next = dc->pc;
11531 static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
11532 CPUState *cpu)
11534 DisasContext *dc = container_of(dcbase, DisasContext, base);
11536 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
11537 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
11540 const TranslatorOps aarch64_translator_ops = {
11541 .init_disas_context = aarch64_tr_init_disas_context,
11542 .tb_start = aarch64_tr_tb_start,
11543 .insn_start = aarch64_tr_insn_start,
11544 .breakpoint_check = aarch64_tr_breakpoint_check,
11545 .translate_insn = aarch64_tr_translate_insn,
11546 .tb_stop = aarch64_tr_tb_stop,
11547 .disas_log = aarch64_tr_disas_log,