target/arm: Trap non-streaming usage when Streaming SVE is active
[qemu/ar7.git] / target / arm / translate-a64.c
blob7fab7f64f86bde40aae979bb4d73146e61cc549d
1 /*
2 * AArch64 translation
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg-op.h"
24 #include "tcg/tcg-op-gvec.h"
25 #include "qemu/log.h"
26 #include "arm_ldst.h"
27 #include "translate.h"
28 #include "internals.h"
29 #include "qemu/host-utils.h"
30 #include "semihosting/semihost.h"
31 #include "exec/gen-icount.h"
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
34 #include "exec/log.h"
35 #include "cpregs.h"
36 #include "translate-a64.h"
37 #include "qemu/atomic128.h"
39 static TCGv_i64 cpu_X[32];
40 static TCGv_i64 cpu_pc;
42 /* Load/store exclusive handling */
43 static TCGv_i64 cpu_exclusive_high;
45 static const char *regnames[] = {
46 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
47 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
48 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
49 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
52 enum a64_shift_type {
53 A64_SHIFT_TYPE_LSL = 0,
54 A64_SHIFT_TYPE_LSR = 1,
55 A64_SHIFT_TYPE_ASR = 2,
56 A64_SHIFT_TYPE_ROR = 3
59 /* Table based decoder typedefs - used when the relevant bits for decode
60 * are too awkwardly scattered across the instruction (eg SIMD).
62 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
64 typedef struct AArch64DecodeTable {
65 uint32_t pattern;
66 uint32_t mask;
67 AArch64DecodeFn *disas_fn;
68 } AArch64DecodeTable;
70 /* initialize TCG globals. */
71 void a64_translate_init(void)
73 int i;
75 cpu_pc = tcg_global_mem_new_i64(cpu_env,
76 offsetof(CPUARMState, pc),
77 "pc");
78 for (i = 0; i < 32; i++) {
79 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
80 offsetof(CPUARMState, xregs[i]),
81 regnames[i]);
84 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
85 offsetof(CPUARMState, exclusive_high), "exclusive_high");
89 * Return the core mmu_idx to use for A64 "unprivileged load/store" insns
91 static int get_a64_user_mem_index(DisasContext *s)
94 * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
95 * which is the usual mmu_idx for this cpu state.
97 ARMMMUIdx useridx = s->mmu_idx;
99 if (s->unpriv) {
101 * We have pre-computed the condition for AccType_UNPRIV.
102 * Therefore we should never get here with a mmu_idx for
103 * which we do not know the corresponding user mmu_idx.
105 switch (useridx) {
106 case ARMMMUIdx_E10_1:
107 case ARMMMUIdx_E10_1_PAN:
108 useridx = ARMMMUIdx_E10_0;
109 break;
110 case ARMMMUIdx_E20_2:
111 case ARMMMUIdx_E20_2_PAN:
112 useridx = ARMMMUIdx_E20_0;
113 break;
114 case ARMMMUIdx_SE10_1:
115 case ARMMMUIdx_SE10_1_PAN:
116 useridx = ARMMMUIdx_SE10_0;
117 break;
118 case ARMMMUIdx_SE20_2:
119 case ARMMMUIdx_SE20_2_PAN:
120 useridx = ARMMMUIdx_SE20_0;
121 break;
122 default:
123 g_assert_not_reached();
126 return arm_to_core_mmu_idx(useridx);
129 static void set_btype_raw(int val)
131 tcg_gen_st_i32(tcg_constant_i32(val), cpu_env,
132 offsetof(CPUARMState, btype));
135 static void set_btype(DisasContext *s, int val)
137 /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */
138 tcg_debug_assert(val >= 1 && val <= 3);
139 set_btype_raw(val);
140 s->btype = -1;
143 static void reset_btype(DisasContext *s)
145 if (s->btype != 0) {
146 set_btype_raw(0);
147 s->btype = 0;
151 void gen_a64_set_pc_im(uint64_t val)
153 tcg_gen_movi_i64(cpu_pc, val);
157 * Handle Top Byte Ignore (TBI) bits.
159 * If address tagging is enabled via the TCR TBI bits:
160 * + for EL2 and EL3 there is only one TBI bit, and if it is set
161 * then the address is zero-extended, clearing bits [63:56]
162 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
163 * and TBI1 controls addressses with bit 55 == 1.
164 * If the appropriate TBI bit is set for the address then
165 * the address is sign-extended from bit 55 into bits [63:56]
167 * Here We have concatenated TBI{1,0} into tbi.
169 static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
170 TCGv_i64 src, int tbi)
172 if (tbi == 0) {
173 /* Load unmodified address */
174 tcg_gen_mov_i64(dst, src);
175 } else if (!regime_has_2_ranges(s->mmu_idx)) {
176 /* Force tag byte to all zero */
177 tcg_gen_extract_i64(dst, src, 0, 56);
178 } else {
179 /* Sign-extend from bit 55. */
180 tcg_gen_sextract_i64(dst, src, 0, 56);
182 switch (tbi) {
183 case 1:
184 /* tbi0 but !tbi1: only use the extension if positive */
185 tcg_gen_and_i64(dst, dst, src);
186 break;
187 case 2:
188 /* !tbi0 but tbi1: only use the extension if negative */
189 tcg_gen_or_i64(dst, dst, src);
190 break;
191 case 3:
192 /* tbi0 and tbi1: always use the extension */
193 break;
194 default:
195 g_assert_not_reached();
200 static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
203 * If address tagging is enabled for instructions via the TCR TBI bits,
204 * then loading an address into the PC will clear out any tag.
206 gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
210 * Handle MTE and/or TBI.
212 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
213 * for the tag to be present in the FAR_ELx register. But for user-only
214 * mode we do not have a TLB with which to implement this, so we must
215 * remove the top byte now.
217 * Always return a fresh temporary that we can increment independently
218 * of the write-back address.
221 TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
223 TCGv_i64 clean = new_tmp_a64(s);
224 #ifdef CONFIG_USER_ONLY
225 gen_top_byte_ignore(s, clean, addr, s->tbid);
226 #else
227 tcg_gen_mov_i64(clean, addr);
228 #endif
229 return clean;
232 /* Insert a zero tag into src, with the result at dst. */
233 static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src)
235 tcg_gen_andi_i64(dst, src, ~MAKE_64BIT_MASK(56, 4));
238 static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
239 MMUAccessType acc, int log2_size)
241 gen_helper_probe_access(cpu_env, ptr,
242 tcg_constant_i32(acc),
243 tcg_constant_i32(get_mem_index(s)),
244 tcg_constant_i32(1 << log2_size));
248 * For MTE, check a single logical or atomic access. This probes a single
249 * address, the exact one specified. The size and alignment of the access
250 * is not relevant to MTE, per se, but watchpoints do require the size,
251 * and we want to recognize those before making any other changes to state.
253 static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
254 bool is_write, bool tag_checked,
255 int log2_size, bool is_unpriv,
256 int core_idx)
258 if (tag_checked && s->mte_active[is_unpriv]) {
259 TCGv_i64 ret;
260 int desc = 0;
262 desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx);
263 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
264 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
265 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
266 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1);
268 ret = new_tmp_a64(s);
269 gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
271 return ret;
273 return clean_data_tbi(s, addr);
276 TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
277 bool tag_checked, int log2_size)
279 return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, log2_size,
280 false, get_mem_index(s));
284 * For MTE, check multiple logical sequential accesses.
286 TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
287 bool tag_checked, int size)
289 if (tag_checked && s->mte_active[0]) {
290 TCGv_i64 ret;
291 int desc = 0;
293 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
294 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
295 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
296 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
297 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1);
299 ret = new_tmp_a64(s);
300 gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
302 return ret;
304 return clean_data_tbi(s, addr);
307 typedef struct DisasCompare64 {
308 TCGCond cond;
309 TCGv_i64 value;
310 } DisasCompare64;
312 static void a64_test_cc(DisasCompare64 *c64, int cc)
314 DisasCompare c32;
316 arm_test_cc(&c32, cc);
318 /* Sign-extend the 32-bit value so that the GE/LT comparisons work
319 * properly. The NE/EQ comparisons are also fine with this choice. */
320 c64->cond = c32.cond;
321 c64->value = tcg_temp_new_i64();
322 tcg_gen_ext_i32_i64(c64->value, c32.value);
324 arm_free_cc(&c32);
327 static void a64_free_cc(DisasCompare64 *c64)
329 tcg_temp_free_i64(c64->value);
332 static void gen_rebuild_hflags(DisasContext *s)
334 gen_helper_rebuild_hflags_a64(cpu_env, tcg_constant_i32(s->current_el));
337 static void gen_exception_internal(int excp)
339 assert(excp_is_internal(excp));
340 gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp));
343 static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
345 gen_a64_set_pc_im(pc);
346 gen_exception_internal(excp);
347 s->base.is_jmp = DISAS_NORETURN;
350 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
352 gen_a64_set_pc_im(s->pc_curr);
353 gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syndrome));
354 s->base.is_jmp = DISAS_NORETURN;
357 static void gen_step_complete_exception(DisasContext *s)
359 /* We just completed step of an insn. Move from Active-not-pending
360 * to Active-pending, and then also take the swstep exception.
361 * This corresponds to making the (IMPDEF) choice to prioritize
362 * swstep exceptions over asynchronous exceptions taken to an exception
363 * level where debug is disabled. This choice has the advantage that
364 * we do not need to maintain internal state corresponding to the
365 * ISV/EX syndrome bits between completion of the step and generation
366 * of the exception, and our syndrome information is always correct.
368 gen_ss_advance(s);
369 gen_swstep_exception(s, 1, s->is_ldex);
370 s->base.is_jmp = DISAS_NORETURN;
373 static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
375 if (s->ss_active) {
376 return false;
378 return translator_use_goto_tb(&s->base, dest);
381 static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
383 if (use_goto_tb(s, dest)) {
384 tcg_gen_goto_tb(n);
385 gen_a64_set_pc_im(dest);
386 tcg_gen_exit_tb(s->base.tb, n);
387 s->base.is_jmp = DISAS_NORETURN;
388 } else {
389 gen_a64_set_pc_im(dest);
390 if (s->ss_active) {
391 gen_step_complete_exception(s);
392 } else {
393 tcg_gen_lookup_and_goto_ptr();
394 s->base.is_jmp = DISAS_NORETURN;
399 static void init_tmp_a64_array(DisasContext *s)
401 #ifdef CONFIG_DEBUG_TCG
402 memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
403 #endif
404 s->tmp_a64_count = 0;
407 static void free_tmp_a64(DisasContext *s)
409 int i;
410 for (i = 0; i < s->tmp_a64_count; i++) {
411 tcg_temp_free_i64(s->tmp_a64[i]);
413 init_tmp_a64_array(s);
416 TCGv_i64 new_tmp_a64(DisasContext *s)
418 assert(s->tmp_a64_count < TMP_A64_MAX);
419 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
422 TCGv_i64 new_tmp_a64_local(DisasContext *s)
424 assert(s->tmp_a64_count < TMP_A64_MAX);
425 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_local_new_i64();
428 TCGv_i64 new_tmp_a64_zero(DisasContext *s)
430 TCGv_i64 t = new_tmp_a64(s);
431 tcg_gen_movi_i64(t, 0);
432 return t;
436 * Register access functions
438 * These functions are used for directly accessing a register in where
439 * changes to the final register value are likely to be made. If you
440 * need to use a register for temporary calculation (e.g. index type
441 * operations) use the read_* form.
443 * B1.2.1 Register mappings
445 * In instruction register encoding 31 can refer to ZR (zero register) or
446 * the SP (stack pointer) depending on context. In QEMU's case we map SP
447 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
448 * This is the point of the _sp forms.
450 TCGv_i64 cpu_reg(DisasContext *s, int reg)
452 if (reg == 31) {
453 return new_tmp_a64_zero(s);
454 } else {
455 return cpu_X[reg];
459 /* register access for when 31 == SP */
460 TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
462 return cpu_X[reg];
465 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
466 * representing the register contents. This TCGv is an auto-freed
467 * temporary so it need not be explicitly freed, and may be modified.
469 TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
471 TCGv_i64 v = new_tmp_a64(s);
472 if (reg != 31) {
473 if (sf) {
474 tcg_gen_mov_i64(v, cpu_X[reg]);
475 } else {
476 tcg_gen_ext32u_i64(v, cpu_X[reg]);
478 } else {
479 tcg_gen_movi_i64(v, 0);
481 return v;
484 TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
486 TCGv_i64 v = new_tmp_a64(s);
487 if (sf) {
488 tcg_gen_mov_i64(v, cpu_X[reg]);
489 } else {
490 tcg_gen_ext32u_i64(v, cpu_X[reg]);
492 return v;
495 /* Return the offset into CPUARMState of a slice (from
496 * the least significant end) of FP register Qn (ie
497 * Dn, Sn, Hn or Bn).
498 * (Note that this is not the same mapping as for A32; see cpu.h)
500 static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
502 return vec_reg_offset(s, regno, 0, size);
505 /* Offset of the high half of the 128 bit vector Qn */
506 static inline int fp_reg_hi_offset(DisasContext *s, int regno)
508 return vec_reg_offset(s, regno, 1, MO_64);
511 /* Convenience accessors for reading and writing single and double
512 * FP registers. Writing clears the upper parts of the associated
513 * 128 bit vector register, as required by the architecture.
514 * Note that unlike the GP register accessors, the values returned
515 * by the read functions must be manually freed.
517 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
519 TCGv_i64 v = tcg_temp_new_i64();
521 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
522 return v;
525 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
527 TCGv_i32 v = tcg_temp_new_i32();
529 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
530 return v;
533 static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
535 TCGv_i32 v = tcg_temp_new_i32();
537 tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
538 return v;
541 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
542 * If SVE is not enabled, then there are only 128 bits in the vector.
544 static void clear_vec_high(DisasContext *s, bool is_q, int rd)
546 unsigned ofs = fp_reg_offset(s, rd, MO_64);
547 unsigned vsz = vec_full_reg_size(s);
549 /* Nop move, with side effect of clearing the tail. */
550 tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz);
553 void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
555 unsigned ofs = fp_reg_offset(s, reg, MO_64);
557 tcg_gen_st_i64(v, cpu_env, ofs);
558 clear_vec_high(s, false, reg);
561 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
563 TCGv_i64 tmp = tcg_temp_new_i64();
565 tcg_gen_extu_i32_i64(tmp, v);
566 write_fp_dreg(s, reg, tmp);
567 tcg_temp_free_i64(tmp);
570 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
571 static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
572 GVecGen2Fn *gvec_fn, int vece)
574 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
575 is_q ? 16 : 8, vec_full_reg_size(s));
578 /* Expand a 2-operand + immediate AdvSIMD vector operation using
579 * an expander function.
581 static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
582 int64_t imm, GVecGen2iFn *gvec_fn, int vece)
584 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
585 imm, is_q ? 16 : 8, vec_full_reg_size(s));
588 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
589 static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
590 GVecGen3Fn *gvec_fn, int vece)
592 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
593 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
596 /* Expand a 4-operand AdvSIMD vector operation using an expander function. */
597 static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
598 int rx, GVecGen4Fn *gvec_fn, int vece)
600 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
601 vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
602 is_q ? 16 : 8, vec_full_reg_size(s));
605 /* Expand a 2-operand operation using an out-of-line helper. */
606 static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd,
607 int rn, int data, gen_helper_gvec_2 *fn)
609 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
610 vec_full_reg_offset(s, rn),
611 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
614 /* Expand a 3-operand operation using an out-of-line helper. */
615 static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
616 int rn, int rm, int data, gen_helper_gvec_3 *fn)
618 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
619 vec_full_reg_offset(s, rn),
620 vec_full_reg_offset(s, rm),
621 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
624 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
625 * an out-of-line helper.
627 static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
628 int rm, bool is_fp16, int data,
629 gen_helper_gvec_3_ptr *fn)
631 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
632 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
633 vec_full_reg_offset(s, rn),
634 vec_full_reg_offset(s, rm), fpst,
635 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
636 tcg_temp_free_ptr(fpst);
639 /* Expand a 3-operand + qc + operation using an out-of-line helper. */
640 static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn,
641 int rm, gen_helper_gvec_3_ptr *fn)
643 TCGv_ptr qc_ptr = tcg_temp_new_ptr();
645 tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
646 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
647 vec_full_reg_offset(s, rn),
648 vec_full_reg_offset(s, rm), qc_ptr,
649 is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
650 tcg_temp_free_ptr(qc_ptr);
653 /* Expand a 4-operand operation using an out-of-line helper. */
654 static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
655 int rm, int ra, int data, gen_helper_gvec_4 *fn)
657 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
658 vec_full_reg_offset(s, rn),
659 vec_full_reg_offset(s, rm),
660 vec_full_reg_offset(s, ra),
661 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
665 * Expand a 4-operand + fpstatus pointer + simd data value operation using
666 * an out-of-line helper.
668 static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
669 int rm, int ra, bool is_fp16, int data,
670 gen_helper_gvec_4_ptr *fn)
672 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
673 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
674 vec_full_reg_offset(s, rn),
675 vec_full_reg_offset(s, rm),
676 vec_full_reg_offset(s, ra), fpst,
677 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
678 tcg_temp_free_ptr(fpst);
681 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
682 * than the 32 bit equivalent.
684 static inline void gen_set_NZ64(TCGv_i64 result)
686 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
687 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
690 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
691 static inline void gen_logic_CC(int sf, TCGv_i64 result)
693 if (sf) {
694 gen_set_NZ64(result);
695 } else {
696 tcg_gen_extrl_i64_i32(cpu_ZF, result);
697 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
699 tcg_gen_movi_i32(cpu_CF, 0);
700 tcg_gen_movi_i32(cpu_VF, 0);
703 /* dest = T0 + T1; compute C, N, V and Z flags */
704 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
706 if (sf) {
707 TCGv_i64 result, flag, tmp;
708 result = tcg_temp_new_i64();
709 flag = tcg_temp_new_i64();
710 tmp = tcg_temp_new_i64();
712 tcg_gen_movi_i64(tmp, 0);
713 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
715 tcg_gen_extrl_i64_i32(cpu_CF, flag);
717 gen_set_NZ64(result);
719 tcg_gen_xor_i64(flag, result, t0);
720 tcg_gen_xor_i64(tmp, t0, t1);
721 tcg_gen_andc_i64(flag, flag, tmp);
722 tcg_temp_free_i64(tmp);
723 tcg_gen_extrh_i64_i32(cpu_VF, flag);
725 tcg_gen_mov_i64(dest, result);
726 tcg_temp_free_i64(result);
727 tcg_temp_free_i64(flag);
728 } else {
729 /* 32 bit arithmetic */
730 TCGv_i32 t0_32 = tcg_temp_new_i32();
731 TCGv_i32 t1_32 = tcg_temp_new_i32();
732 TCGv_i32 tmp = tcg_temp_new_i32();
734 tcg_gen_movi_i32(tmp, 0);
735 tcg_gen_extrl_i64_i32(t0_32, t0);
736 tcg_gen_extrl_i64_i32(t1_32, t1);
737 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
738 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
739 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
740 tcg_gen_xor_i32(tmp, t0_32, t1_32);
741 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
742 tcg_gen_extu_i32_i64(dest, cpu_NF);
744 tcg_temp_free_i32(tmp);
745 tcg_temp_free_i32(t0_32);
746 tcg_temp_free_i32(t1_32);
750 /* dest = T0 - T1; compute C, N, V and Z flags */
751 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
753 if (sf) {
754 /* 64 bit arithmetic */
755 TCGv_i64 result, flag, tmp;
757 result = tcg_temp_new_i64();
758 flag = tcg_temp_new_i64();
759 tcg_gen_sub_i64(result, t0, t1);
761 gen_set_NZ64(result);
763 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
764 tcg_gen_extrl_i64_i32(cpu_CF, flag);
766 tcg_gen_xor_i64(flag, result, t0);
767 tmp = tcg_temp_new_i64();
768 tcg_gen_xor_i64(tmp, t0, t1);
769 tcg_gen_and_i64(flag, flag, tmp);
770 tcg_temp_free_i64(tmp);
771 tcg_gen_extrh_i64_i32(cpu_VF, flag);
772 tcg_gen_mov_i64(dest, result);
773 tcg_temp_free_i64(flag);
774 tcg_temp_free_i64(result);
775 } else {
776 /* 32 bit arithmetic */
777 TCGv_i32 t0_32 = tcg_temp_new_i32();
778 TCGv_i32 t1_32 = tcg_temp_new_i32();
779 TCGv_i32 tmp;
781 tcg_gen_extrl_i64_i32(t0_32, t0);
782 tcg_gen_extrl_i64_i32(t1_32, t1);
783 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
784 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
785 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
786 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
787 tmp = tcg_temp_new_i32();
788 tcg_gen_xor_i32(tmp, t0_32, t1_32);
789 tcg_temp_free_i32(t0_32);
790 tcg_temp_free_i32(t1_32);
791 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
792 tcg_temp_free_i32(tmp);
793 tcg_gen_extu_i32_i64(dest, cpu_NF);
797 /* dest = T0 + T1 + CF; do not compute flags. */
798 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
800 TCGv_i64 flag = tcg_temp_new_i64();
801 tcg_gen_extu_i32_i64(flag, cpu_CF);
802 tcg_gen_add_i64(dest, t0, t1);
803 tcg_gen_add_i64(dest, dest, flag);
804 tcg_temp_free_i64(flag);
806 if (!sf) {
807 tcg_gen_ext32u_i64(dest, dest);
811 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
812 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
814 if (sf) {
815 TCGv_i64 result = tcg_temp_new_i64();
816 TCGv_i64 cf_64 = tcg_temp_new_i64();
817 TCGv_i64 vf_64 = tcg_temp_new_i64();
818 TCGv_i64 tmp = tcg_temp_new_i64();
819 TCGv_i64 zero = tcg_constant_i64(0);
821 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
822 tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero);
823 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero);
824 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
825 gen_set_NZ64(result);
827 tcg_gen_xor_i64(vf_64, result, t0);
828 tcg_gen_xor_i64(tmp, t0, t1);
829 tcg_gen_andc_i64(vf_64, vf_64, tmp);
830 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
832 tcg_gen_mov_i64(dest, result);
834 tcg_temp_free_i64(tmp);
835 tcg_temp_free_i64(vf_64);
836 tcg_temp_free_i64(cf_64);
837 tcg_temp_free_i64(result);
838 } else {
839 TCGv_i32 t0_32 = tcg_temp_new_i32();
840 TCGv_i32 t1_32 = tcg_temp_new_i32();
841 TCGv_i32 tmp = tcg_temp_new_i32();
842 TCGv_i32 zero = tcg_constant_i32(0);
844 tcg_gen_extrl_i64_i32(t0_32, t0);
845 tcg_gen_extrl_i64_i32(t1_32, t1);
846 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero);
847 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero);
849 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
850 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
851 tcg_gen_xor_i32(tmp, t0_32, t1_32);
852 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
853 tcg_gen_extu_i32_i64(dest, cpu_NF);
855 tcg_temp_free_i32(tmp);
856 tcg_temp_free_i32(t1_32);
857 tcg_temp_free_i32(t0_32);
862 * Load/Store generators
866 * Store from GPR register to memory.
868 static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
869 TCGv_i64 tcg_addr, MemOp memop, int memidx,
870 bool iss_valid,
871 unsigned int iss_srt,
872 bool iss_sf, bool iss_ar)
874 memop = finalize_memop(s, memop);
875 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
877 if (iss_valid) {
878 uint32_t syn;
880 syn = syn_data_abort_with_iss(0,
881 (memop & MO_SIZE),
882 false,
883 iss_srt,
884 iss_sf,
885 iss_ar,
886 0, 0, 0, 0, 0, false);
887 disas_set_insn_syndrome(s, syn);
891 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
892 TCGv_i64 tcg_addr, MemOp memop,
893 bool iss_valid,
894 unsigned int iss_srt,
895 bool iss_sf, bool iss_ar)
897 do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
898 iss_valid, iss_srt, iss_sf, iss_ar);
902 * Load from memory to GPR register
904 static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
905 MemOp memop, bool extend, int memidx,
906 bool iss_valid, unsigned int iss_srt,
907 bool iss_sf, bool iss_ar)
909 memop = finalize_memop(s, memop);
910 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
912 if (extend && (memop & MO_SIGN)) {
913 g_assert((memop & MO_SIZE) <= MO_32);
914 tcg_gen_ext32u_i64(dest, dest);
917 if (iss_valid) {
918 uint32_t syn;
920 syn = syn_data_abort_with_iss(0,
921 (memop & MO_SIZE),
922 (memop & MO_SIGN) != 0,
923 iss_srt,
924 iss_sf,
925 iss_ar,
926 0, 0, 0, 0, 0, false);
927 disas_set_insn_syndrome(s, syn);
931 static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
932 MemOp memop, bool extend,
933 bool iss_valid, unsigned int iss_srt,
934 bool iss_sf, bool iss_ar)
936 do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
937 iss_valid, iss_srt, iss_sf, iss_ar);
941 * Store from FP register to memory
943 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
945 /* This writes the bottom N bits of a 128 bit wide vector to memory */
946 TCGv_i64 tmplo = tcg_temp_new_i64();
947 MemOp mop;
949 tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
951 if (size < 4) {
952 mop = finalize_memop(s, size);
953 tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
954 } else {
955 bool be = s->be_data == MO_BE;
956 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
957 TCGv_i64 tmphi = tcg_temp_new_i64();
959 tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
961 mop = s->be_data | MO_UQ;
962 tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
963 mop | (s->align_mem ? MO_ALIGN_16 : 0));
964 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
965 tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr,
966 get_mem_index(s), mop);
968 tcg_temp_free_i64(tcg_hiaddr);
969 tcg_temp_free_i64(tmphi);
972 tcg_temp_free_i64(tmplo);
976 * Load from memory to FP register
978 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
980 /* This always zero-extends and writes to a full 128 bit wide vector */
981 TCGv_i64 tmplo = tcg_temp_new_i64();
982 TCGv_i64 tmphi = NULL;
983 MemOp mop;
985 if (size < 4) {
986 mop = finalize_memop(s, size);
987 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
988 } else {
989 bool be = s->be_data == MO_BE;
990 TCGv_i64 tcg_hiaddr;
992 tmphi = tcg_temp_new_i64();
993 tcg_hiaddr = tcg_temp_new_i64();
995 mop = s->be_data | MO_UQ;
996 tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
997 mop | (s->align_mem ? MO_ALIGN_16 : 0));
998 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
999 tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr,
1000 get_mem_index(s), mop);
1001 tcg_temp_free_i64(tcg_hiaddr);
1004 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
1005 tcg_temp_free_i64(tmplo);
1007 if (tmphi) {
1008 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
1009 tcg_temp_free_i64(tmphi);
1011 clear_vec_high(s, tmphi != NULL, destidx);
1015 * Vector load/store helpers.
1017 * The principal difference between this and a FP load is that we don't
1018 * zero extend as we are filling a partial chunk of the vector register.
1019 * These functions don't support 128 bit loads/stores, which would be
1020 * normal load/store operations.
1022 * The _i32 versions are useful when operating on 32 bit quantities
1023 * (eg for floating point single or using Neon helper functions).
1026 /* Get value of an element within a vector register */
1027 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1028 int element, MemOp memop)
1030 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1031 switch ((unsigned)memop) {
1032 case MO_8:
1033 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1034 break;
1035 case MO_16:
1036 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1037 break;
1038 case MO_32:
1039 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1040 break;
1041 case MO_8|MO_SIGN:
1042 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1043 break;
1044 case MO_16|MO_SIGN:
1045 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1046 break;
1047 case MO_32|MO_SIGN:
1048 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1049 break;
1050 case MO_64:
1051 case MO_64|MO_SIGN:
1052 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1053 break;
1054 default:
1055 g_assert_not_reached();
1059 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1060 int element, MemOp memop)
1062 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1063 switch (memop) {
1064 case MO_8:
1065 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1066 break;
1067 case MO_16:
1068 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1069 break;
1070 case MO_8|MO_SIGN:
1071 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1072 break;
1073 case MO_16|MO_SIGN:
1074 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1075 break;
1076 case MO_32:
1077 case MO_32|MO_SIGN:
1078 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1079 break;
1080 default:
1081 g_assert_not_reached();
1085 /* Set value of an element within a vector register */
1086 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1087 int element, MemOp memop)
1089 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1090 switch (memop) {
1091 case MO_8:
1092 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1093 break;
1094 case MO_16:
1095 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1096 break;
1097 case MO_32:
1098 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1099 break;
1100 case MO_64:
1101 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1102 break;
1103 default:
1104 g_assert_not_reached();
1108 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1109 int destidx, int element, MemOp memop)
1111 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1112 switch (memop) {
1113 case MO_8:
1114 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1115 break;
1116 case MO_16:
1117 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1118 break;
1119 case MO_32:
1120 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1121 break;
1122 default:
1123 g_assert_not_reached();
1127 /* Store from vector register to memory */
1128 static void do_vec_st(DisasContext *s, int srcidx, int element,
1129 TCGv_i64 tcg_addr, MemOp mop)
1131 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1133 read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
1134 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1136 tcg_temp_free_i64(tcg_tmp);
1139 /* Load from memory to vector register */
1140 static void do_vec_ld(DisasContext *s, int destidx, int element,
1141 TCGv_i64 tcg_addr, MemOp mop)
1143 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1145 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1146 write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
1148 tcg_temp_free_i64(tcg_tmp);
1151 /* Check that FP/Neon access is enabled. If it is, return
1152 * true. If not, emit code to generate an appropriate exception,
1153 * and return false; the caller should not emit any code for
1154 * the instruction. Note that this check must happen after all
1155 * unallocated-encoding checks (otherwise the syndrome information
1156 * for the resulting exception will be incorrect).
1158 static bool fp_access_check_only(DisasContext *s)
1160 if (s->fp_excp_el) {
1161 assert(!s->fp_access_checked);
1162 s->fp_access_checked = true;
1164 gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
1165 syn_fp_access_trap(1, 0xe, false, 0),
1166 s->fp_excp_el);
1167 return false;
1169 s->fp_access_checked = true;
1170 return true;
1173 static bool fp_access_check(DisasContext *s)
1175 if (!fp_access_check_only(s)) {
1176 return false;
1178 if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
1179 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1180 syn_smetrap(SME_ET_Streaming, false));
1181 return false;
1183 return true;
1186 /* Check that SVE access is enabled. If it is, return true.
1187 * If not, emit code to generate an appropriate exception and return false.
1189 bool sve_access_check(DisasContext *s)
1191 if (s->sve_excp_el) {
1192 assert(!s->sve_access_checked);
1193 s->sve_access_checked = true;
1195 gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
1196 syn_sve_access_trap(), s->sve_excp_el);
1197 return false;
1199 s->sve_access_checked = true;
1200 return fp_access_check(s);
1204 * Check that SME access is enabled, raise an exception if not.
1205 * Note that this function corresponds to CheckSMEAccess and is
1206 * only used directly for cpregs.
1208 static bool sme_access_check(DisasContext *s)
1210 if (s->sme_excp_el) {
1211 gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
1212 syn_smetrap(SME_ET_AccessTrap, false),
1213 s->sme_excp_el);
1214 return false;
1216 return true;
1220 * This utility function is for doing register extension with an
1221 * optional shift. You will likely want to pass a temporary for the
1222 * destination register. See DecodeRegExtend() in the ARM ARM.
1224 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1225 int option, unsigned int shift)
1227 int extsize = extract32(option, 0, 2);
1228 bool is_signed = extract32(option, 2, 1);
1230 if (is_signed) {
1231 switch (extsize) {
1232 case 0:
1233 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1234 break;
1235 case 1:
1236 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1237 break;
1238 case 2:
1239 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1240 break;
1241 case 3:
1242 tcg_gen_mov_i64(tcg_out, tcg_in);
1243 break;
1245 } else {
1246 switch (extsize) {
1247 case 0:
1248 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1249 break;
1250 case 1:
1251 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1252 break;
1253 case 2:
1254 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1255 break;
1256 case 3:
1257 tcg_gen_mov_i64(tcg_out, tcg_in);
1258 break;
1262 if (shift) {
1263 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1267 static inline void gen_check_sp_alignment(DisasContext *s)
1269 /* The AArch64 architecture mandates that (if enabled via PSTATE
1270 * or SCTLR bits) there is a check that SP is 16-aligned on every
1271 * SP-relative load or store (with an exception generated if it is not).
1272 * In line with general QEMU practice regarding misaligned accesses,
1273 * we omit these checks for the sake of guest program performance.
1274 * This function is provided as a hook so we can more easily add these
1275 * checks in future (possibly as a "favour catching guest program bugs
1276 * over speed" user selectable option).
1281 * This provides a simple table based table lookup decoder. It is
1282 * intended to be used when the relevant bits for decode are too
1283 * awkwardly placed and switch/if based logic would be confusing and
1284 * deeply nested. Since it's a linear search through the table, tables
1285 * should be kept small.
1287 * It returns the first handler where insn & mask == pattern, or
1288 * NULL if there is no match.
1289 * The table is terminated by an empty mask (i.e. 0)
1291 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1292 uint32_t insn)
1294 const AArch64DecodeTable *tptr = table;
1296 while (tptr->mask) {
1297 if ((insn & tptr->mask) == tptr->pattern) {
1298 return tptr->disas_fn;
1300 tptr++;
1302 return NULL;
1306 * The instruction disassembly implemented here matches
1307 * the instruction encoding classifications in chapter C4
1308 * of the ARM Architecture Reference Manual (DDI0487B_a);
1309 * classification names and decode diagrams here should generally
1310 * match up with those in the manual.
1313 /* Unconditional branch (immediate)
1314 * 31 30 26 25 0
1315 * +----+-----------+-------------------------------------+
1316 * | op | 0 0 1 0 1 | imm26 |
1317 * +----+-----------+-------------------------------------+
1319 static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1321 uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
1323 if (insn & (1U << 31)) {
1324 /* BL Branch with link */
1325 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
1328 /* B Branch / BL Branch with link */
1329 reset_btype(s);
1330 gen_goto_tb(s, 0, addr);
1333 /* Compare and branch (immediate)
1334 * 31 30 25 24 23 5 4 0
1335 * +----+-------------+----+---------------------+--------+
1336 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
1337 * +----+-------------+----+---------------------+--------+
1339 static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1341 unsigned int sf, op, rt;
1342 uint64_t addr;
1343 TCGLabel *label_match;
1344 TCGv_i64 tcg_cmp;
1346 sf = extract32(insn, 31, 1);
1347 op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
1348 rt = extract32(insn, 0, 5);
1349 addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1351 tcg_cmp = read_cpu_reg(s, rt, sf);
1352 label_match = gen_new_label();
1354 reset_btype(s);
1355 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1356 tcg_cmp, 0, label_match);
1358 gen_goto_tb(s, 0, s->base.pc_next);
1359 gen_set_label(label_match);
1360 gen_goto_tb(s, 1, addr);
1363 /* Test and branch (immediate)
1364 * 31 30 25 24 23 19 18 5 4 0
1365 * +----+-------------+----+-------+-------------+------+
1366 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
1367 * +----+-------------+----+-------+-------------+------+
1369 static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1371 unsigned int bit_pos, op, rt;
1372 uint64_t addr;
1373 TCGLabel *label_match;
1374 TCGv_i64 tcg_cmp;
1376 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1377 op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
1378 addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
1379 rt = extract32(insn, 0, 5);
1381 tcg_cmp = tcg_temp_new_i64();
1382 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1383 label_match = gen_new_label();
1385 reset_btype(s);
1386 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1387 tcg_cmp, 0, label_match);
1388 tcg_temp_free_i64(tcg_cmp);
1389 gen_goto_tb(s, 0, s->base.pc_next);
1390 gen_set_label(label_match);
1391 gen_goto_tb(s, 1, addr);
1394 /* Conditional branch (immediate)
1395 * 31 25 24 23 5 4 3 0
1396 * +---------------+----+---------------------+----+------+
1397 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
1398 * +---------------+----+---------------------+----+------+
1400 static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1402 unsigned int cond;
1403 uint64_t addr;
1405 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1406 unallocated_encoding(s);
1407 return;
1409 addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1410 cond = extract32(insn, 0, 4);
1412 reset_btype(s);
1413 if (cond < 0x0e) {
1414 /* genuinely conditional branches */
1415 TCGLabel *label_match = gen_new_label();
1416 arm_gen_test_cc(cond, label_match);
1417 gen_goto_tb(s, 0, s->base.pc_next);
1418 gen_set_label(label_match);
1419 gen_goto_tb(s, 1, addr);
1420 } else {
1421 /* 0xe and 0xf are both "always" conditions */
1422 gen_goto_tb(s, 0, addr);
1426 /* HINT instruction group, including various allocated HINTs */
1427 static void handle_hint(DisasContext *s, uint32_t insn,
1428 unsigned int op1, unsigned int op2, unsigned int crm)
1430 unsigned int selector = crm << 3 | op2;
1432 if (op1 != 3) {
1433 unallocated_encoding(s);
1434 return;
1437 switch (selector) {
1438 case 0b00000: /* NOP */
1439 break;
1440 case 0b00011: /* WFI */
1441 s->base.is_jmp = DISAS_WFI;
1442 break;
1443 case 0b00001: /* YIELD */
1444 /* When running in MTTCG we don't generate jumps to the yield and
1445 * WFE helpers as it won't affect the scheduling of other vCPUs.
1446 * If we wanted to more completely model WFE/SEV so we don't busy
1447 * spin unnecessarily we would need to do something more involved.
1449 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1450 s->base.is_jmp = DISAS_YIELD;
1452 break;
1453 case 0b00010: /* WFE */
1454 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1455 s->base.is_jmp = DISAS_WFE;
1457 break;
1458 case 0b00100: /* SEV */
1459 case 0b00101: /* SEVL */
1460 case 0b00110: /* DGH */
1461 /* we treat all as NOP at least for now */
1462 break;
1463 case 0b00111: /* XPACLRI */
1464 if (s->pauth_active) {
1465 gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
1467 break;
1468 case 0b01000: /* PACIA1716 */
1469 if (s->pauth_active) {
1470 gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1472 break;
1473 case 0b01010: /* PACIB1716 */
1474 if (s->pauth_active) {
1475 gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1477 break;
1478 case 0b01100: /* AUTIA1716 */
1479 if (s->pauth_active) {
1480 gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1482 break;
1483 case 0b01110: /* AUTIB1716 */
1484 if (s->pauth_active) {
1485 gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1487 break;
1488 case 0b10000: /* ESB */
1489 /* Without RAS, we must implement this as NOP. */
1490 if (dc_isar_feature(aa64_ras, s)) {
1492 * QEMU does not have a source of physical SErrors,
1493 * so we are only concerned with virtual SErrors.
1494 * The pseudocode in the ARM for this case is
1495 * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
1496 * AArch64.vESBOperation();
1497 * Most of the condition can be evaluated at translation time.
1498 * Test for EL2 present, and defer test for SEL2 to runtime.
1500 if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
1501 gen_helper_vesb(cpu_env);
1504 break;
1505 case 0b11000: /* PACIAZ */
1506 if (s->pauth_active) {
1507 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
1508 new_tmp_a64_zero(s));
1510 break;
1511 case 0b11001: /* PACIASP */
1512 if (s->pauth_active) {
1513 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1515 break;
1516 case 0b11010: /* PACIBZ */
1517 if (s->pauth_active) {
1518 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30],
1519 new_tmp_a64_zero(s));
1521 break;
1522 case 0b11011: /* PACIBSP */
1523 if (s->pauth_active) {
1524 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1526 break;
1527 case 0b11100: /* AUTIAZ */
1528 if (s->pauth_active) {
1529 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30],
1530 new_tmp_a64_zero(s));
1532 break;
1533 case 0b11101: /* AUTIASP */
1534 if (s->pauth_active) {
1535 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1537 break;
1538 case 0b11110: /* AUTIBZ */
1539 if (s->pauth_active) {
1540 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30],
1541 new_tmp_a64_zero(s));
1543 break;
1544 case 0b11111: /* AUTIBSP */
1545 if (s->pauth_active) {
1546 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1548 break;
1549 default:
1550 /* default specified as NOP equivalent */
1551 break;
1555 static void gen_clrex(DisasContext *s, uint32_t insn)
1557 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1560 /* CLREX, DSB, DMB, ISB */
1561 static void handle_sync(DisasContext *s, uint32_t insn,
1562 unsigned int op1, unsigned int op2, unsigned int crm)
1564 TCGBar bar;
1566 if (op1 != 3) {
1567 unallocated_encoding(s);
1568 return;
1571 switch (op2) {
1572 case 2: /* CLREX */
1573 gen_clrex(s, insn);
1574 return;
1575 case 4: /* DSB */
1576 case 5: /* DMB */
1577 switch (crm & 3) {
1578 case 1: /* MBReqTypes_Reads */
1579 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1580 break;
1581 case 2: /* MBReqTypes_Writes */
1582 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1583 break;
1584 default: /* MBReqTypes_All */
1585 bar = TCG_BAR_SC | TCG_MO_ALL;
1586 break;
1588 tcg_gen_mb(bar);
1589 return;
1590 case 6: /* ISB */
1591 /* We need to break the TB after this insn to execute
1592 * a self-modified code correctly and also to take
1593 * any pending interrupts immediately.
1595 reset_btype(s);
1596 gen_goto_tb(s, 0, s->base.pc_next);
1597 return;
1599 case 7: /* SB */
1600 if (crm != 0 || !dc_isar_feature(aa64_sb, s)) {
1601 goto do_unallocated;
1604 * TODO: There is no speculation barrier opcode for TCG;
1605 * MB and end the TB instead.
1607 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1608 gen_goto_tb(s, 0, s->base.pc_next);
1609 return;
1611 default:
1612 do_unallocated:
1613 unallocated_encoding(s);
1614 return;
1618 static void gen_xaflag(void)
1620 TCGv_i32 z = tcg_temp_new_i32();
1622 tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
1625 * (!C & !Z) << 31
1626 * (!(C | Z)) << 31
1627 * ~((C | Z) << 31)
1628 * ~-(C | Z)
1629 * (C | Z) - 1
1631 tcg_gen_or_i32(cpu_NF, cpu_CF, z);
1632 tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
1634 /* !(Z & C) */
1635 tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
1636 tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
1638 /* (!C & Z) << 31 -> -(Z & ~C) */
1639 tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
1640 tcg_gen_neg_i32(cpu_VF, cpu_VF);
1642 /* C | Z */
1643 tcg_gen_or_i32(cpu_CF, cpu_CF, z);
1645 tcg_temp_free_i32(z);
1648 static void gen_axflag(void)
1650 tcg_gen_sari_i32(cpu_VF, cpu_VF, 31); /* V ? -1 : 0 */
1651 tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF); /* C & !V */
1653 /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
1654 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
1656 tcg_gen_movi_i32(cpu_NF, 0);
1657 tcg_gen_movi_i32(cpu_VF, 0);
1660 /* MSR (immediate) - move immediate to processor state field */
1661 static void handle_msr_i(DisasContext *s, uint32_t insn,
1662 unsigned int op1, unsigned int op2, unsigned int crm)
1664 int op = op1 << 3 | op2;
1666 /* End the TB by default, chaining is ok. */
1667 s->base.is_jmp = DISAS_TOO_MANY;
1669 switch (op) {
1670 case 0x00: /* CFINV */
1671 if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) {
1672 goto do_unallocated;
1674 tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
1675 s->base.is_jmp = DISAS_NEXT;
1676 break;
1678 case 0x01: /* XAFlag */
1679 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1680 goto do_unallocated;
1682 gen_xaflag();
1683 s->base.is_jmp = DISAS_NEXT;
1684 break;
1686 case 0x02: /* AXFlag */
1687 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1688 goto do_unallocated;
1690 gen_axflag();
1691 s->base.is_jmp = DISAS_NEXT;
1692 break;
1694 case 0x03: /* UAO */
1695 if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
1696 goto do_unallocated;
1698 if (crm & 1) {
1699 set_pstate_bits(PSTATE_UAO);
1700 } else {
1701 clear_pstate_bits(PSTATE_UAO);
1703 gen_rebuild_hflags(s);
1704 break;
1706 case 0x04: /* PAN */
1707 if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
1708 goto do_unallocated;
1710 if (crm & 1) {
1711 set_pstate_bits(PSTATE_PAN);
1712 } else {
1713 clear_pstate_bits(PSTATE_PAN);
1715 gen_rebuild_hflags(s);
1716 break;
1718 case 0x05: /* SPSel */
1719 if (s->current_el == 0) {
1720 goto do_unallocated;
1722 gen_helper_msr_i_spsel(cpu_env, tcg_constant_i32(crm & PSTATE_SP));
1723 break;
1725 case 0x19: /* SSBS */
1726 if (!dc_isar_feature(aa64_ssbs, s)) {
1727 goto do_unallocated;
1729 if (crm & 1) {
1730 set_pstate_bits(PSTATE_SSBS);
1731 } else {
1732 clear_pstate_bits(PSTATE_SSBS);
1734 /* Don't need to rebuild hflags since SSBS is a nop */
1735 break;
1737 case 0x1a: /* DIT */
1738 if (!dc_isar_feature(aa64_dit, s)) {
1739 goto do_unallocated;
1741 if (crm & 1) {
1742 set_pstate_bits(PSTATE_DIT);
1743 } else {
1744 clear_pstate_bits(PSTATE_DIT);
1746 /* There's no need to rebuild hflags because DIT is a nop */
1747 break;
1749 case 0x1e: /* DAIFSet */
1750 gen_helper_msr_i_daifset(cpu_env, tcg_constant_i32(crm));
1751 break;
1753 case 0x1f: /* DAIFClear */
1754 gen_helper_msr_i_daifclear(cpu_env, tcg_constant_i32(crm));
1755 /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
1756 s->base.is_jmp = DISAS_UPDATE_EXIT;
1757 break;
1759 case 0x1c: /* TCO */
1760 if (dc_isar_feature(aa64_mte, s)) {
1761 /* Full MTE is enabled -- set the TCO bit as directed. */
1762 if (crm & 1) {
1763 set_pstate_bits(PSTATE_TCO);
1764 } else {
1765 clear_pstate_bits(PSTATE_TCO);
1767 gen_rebuild_hflags(s);
1768 /* Many factors, including TCO, go into MTE_ACTIVE. */
1769 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
1770 } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
1771 /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */
1772 s->base.is_jmp = DISAS_NEXT;
1773 } else {
1774 goto do_unallocated;
1776 break;
1778 case 0x1b: /* SVCR* */
1779 if (!dc_isar_feature(aa64_sme, s) || crm < 2 || crm > 7) {
1780 goto do_unallocated;
1782 if (sme_access_check(s)) {
1783 bool i = crm & 1;
1784 bool changed = false;
1786 if ((crm & 2) && i != s->pstate_sm) {
1787 gen_helper_set_pstate_sm(cpu_env, tcg_constant_i32(i));
1788 changed = true;
1790 if ((crm & 4) && i != s->pstate_za) {
1791 gen_helper_set_pstate_za(cpu_env, tcg_constant_i32(i));
1792 changed = true;
1794 if (changed) {
1795 gen_rebuild_hflags(s);
1796 } else {
1797 s->base.is_jmp = DISAS_NEXT;
1800 break;
1802 default:
1803 do_unallocated:
1804 unallocated_encoding(s);
1805 return;
1809 static void gen_get_nzcv(TCGv_i64 tcg_rt)
1811 TCGv_i32 tmp = tcg_temp_new_i32();
1812 TCGv_i32 nzcv = tcg_temp_new_i32();
1814 /* build bit 31, N */
1815 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1816 /* build bit 30, Z */
1817 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1818 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1819 /* build bit 29, C */
1820 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1821 /* build bit 28, V */
1822 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1823 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1824 /* generate result */
1825 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1827 tcg_temp_free_i32(nzcv);
1828 tcg_temp_free_i32(tmp);
1831 static void gen_set_nzcv(TCGv_i64 tcg_rt)
1833 TCGv_i32 nzcv = tcg_temp_new_i32();
1835 /* take NZCV from R[t] */
1836 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1838 /* bit 31, N */
1839 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1840 /* bit 30, Z */
1841 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1842 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1843 /* bit 29, C */
1844 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1845 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1846 /* bit 28, V */
1847 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1848 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1849 tcg_temp_free_i32(nzcv);
1852 static void gen_sysreg_undef(DisasContext *s, bool isread,
1853 uint8_t op0, uint8_t op1, uint8_t op2,
1854 uint8_t crn, uint8_t crm, uint8_t rt)
1857 * Generate code to emit an UNDEF with correct syndrome
1858 * information for a failed system register access.
1859 * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases,
1860 * but if FEAT_IDST is implemented then read accesses to registers
1861 * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP
1862 * syndrome.
1864 uint32_t syndrome;
1866 if (isread && dc_isar_feature(aa64_ids, s) &&
1867 arm_cpreg_encoding_in_idspace(op0, op1, op2, crn, crm)) {
1868 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1869 } else {
1870 syndrome = syn_uncategorized();
1872 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syndrome);
1875 /* MRS - move from system register
1876 * MSR (register) - move to system register
1877 * SYS
1878 * SYSL
1879 * These are all essentially the same insn in 'read' and 'write'
1880 * versions, with varying op0 fields.
1882 static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1883 unsigned int op0, unsigned int op1, unsigned int op2,
1884 unsigned int crn, unsigned int crm, unsigned int rt)
1886 const ARMCPRegInfo *ri;
1887 TCGv_i64 tcg_rt;
1889 ri = get_arm_cp_reginfo(s->cp_regs,
1890 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1891 crn, crm, op0, op1, op2));
1893 if (!ri) {
1894 /* Unknown register; this might be a guest error or a QEMU
1895 * unimplemented feature.
1897 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1898 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1899 isread ? "read" : "write", op0, op1, crn, crm, op2);
1900 gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
1901 return;
1904 /* Check access permissions */
1905 if (!cp_access_ok(s->current_el, ri, isread)) {
1906 gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
1907 return;
1910 if (ri->accessfn) {
1911 /* Emit code to perform further access permissions checks at
1912 * runtime; this may result in an exception.
1914 uint32_t syndrome;
1916 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1917 gen_a64_set_pc_im(s->pc_curr);
1918 gen_helper_access_check_cp_reg(cpu_env,
1919 tcg_constant_ptr(ri),
1920 tcg_constant_i32(syndrome),
1921 tcg_constant_i32(isread));
1922 } else if (ri->type & ARM_CP_RAISES_EXC) {
1924 * The readfn or writefn might raise an exception;
1925 * synchronize the CPU state in case it does.
1927 gen_a64_set_pc_im(s->pc_curr);
1930 /* Handle special cases first */
1931 switch (ri->type & ARM_CP_SPECIAL_MASK) {
1932 case 0:
1933 break;
1934 case ARM_CP_NOP:
1935 return;
1936 case ARM_CP_NZCV:
1937 tcg_rt = cpu_reg(s, rt);
1938 if (isread) {
1939 gen_get_nzcv(tcg_rt);
1940 } else {
1941 gen_set_nzcv(tcg_rt);
1943 return;
1944 case ARM_CP_CURRENTEL:
1945 /* Reads as current EL value from pstate, which is
1946 * guaranteed to be constant by the tb flags.
1948 tcg_rt = cpu_reg(s, rt);
1949 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1950 return;
1951 case ARM_CP_DC_ZVA:
1952 /* Writes clear the aligned block of memory which rt points into. */
1953 if (s->mte_active[0]) {
1954 int desc = 0;
1956 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
1957 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
1958 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
1960 tcg_rt = new_tmp_a64(s);
1961 gen_helper_mte_check_zva(tcg_rt, cpu_env,
1962 tcg_constant_i32(desc), cpu_reg(s, rt));
1963 } else {
1964 tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
1966 gen_helper_dc_zva(cpu_env, tcg_rt);
1967 return;
1968 case ARM_CP_DC_GVA:
1970 TCGv_i64 clean_addr, tag;
1973 * DC_GVA, like DC_ZVA, requires that we supply the original
1974 * pointer for an invalid page. Probe that address first.
1976 tcg_rt = cpu_reg(s, rt);
1977 clean_addr = clean_data_tbi(s, tcg_rt);
1978 gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8);
1980 if (s->ata) {
1981 /* Extract the tag from the register to match STZGM. */
1982 tag = tcg_temp_new_i64();
1983 tcg_gen_shri_i64(tag, tcg_rt, 56);
1984 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
1985 tcg_temp_free_i64(tag);
1988 return;
1989 case ARM_CP_DC_GZVA:
1991 TCGv_i64 clean_addr, tag;
1993 /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */
1994 tcg_rt = cpu_reg(s, rt);
1995 clean_addr = clean_data_tbi(s, tcg_rt);
1996 gen_helper_dc_zva(cpu_env, clean_addr);
1998 if (s->ata) {
1999 /* Extract the tag from the register to match STZGM. */
2000 tag = tcg_temp_new_i64();
2001 tcg_gen_shri_i64(tag, tcg_rt, 56);
2002 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
2003 tcg_temp_free_i64(tag);
2006 return;
2007 default:
2008 g_assert_not_reached();
2010 if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) {
2011 return;
2012 } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
2013 return;
2014 } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) {
2015 return;
2018 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2019 gen_io_start();
2022 tcg_rt = cpu_reg(s, rt);
2024 if (isread) {
2025 if (ri->type & ARM_CP_CONST) {
2026 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
2027 } else if (ri->readfn) {
2028 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tcg_constant_ptr(ri));
2029 } else {
2030 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
2032 } else {
2033 if (ri->type & ARM_CP_CONST) {
2034 /* If not forbidden by access permissions, treat as WI */
2035 return;
2036 } else if (ri->writefn) {
2037 gen_helper_set_cp_reg64(cpu_env, tcg_constant_ptr(ri), tcg_rt);
2038 } else {
2039 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
2043 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2044 /* I/O operations must end the TB here (whether read or write) */
2045 s->base.is_jmp = DISAS_UPDATE_EXIT;
2047 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
2049 * A write to any coprocessor regiser that ends a TB
2050 * must rebuild the hflags for the next TB.
2052 gen_rebuild_hflags(s);
2054 * We default to ending the TB on a coprocessor register write,
2055 * but allow this to be suppressed by the register definition
2056 * (usually only necessary to work around guest bugs).
2058 s->base.is_jmp = DISAS_UPDATE_EXIT;
2062 /* System
2063 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
2064 * +---------------------+---+-----+-----+-------+-------+-----+------+
2065 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
2066 * +---------------------+---+-----+-----+-------+-------+-----+------+
2068 static void disas_system(DisasContext *s, uint32_t insn)
2070 unsigned int l, op0, op1, crn, crm, op2, rt;
2071 l = extract32(insn, 21, 1);
2072 op0 = extract32(insn, 19, 2);
2073 op1 = extract32(insn, 16, 3);
2074 crn = extract32(insn, 12, 4);
2075 crm = extract32(insn, 8, 4);
2076 op2 = extract32(insn, 5, 3);
2077 rt = extract32(insn, 0, 5);
2079 if (op0 == 0) {
2080 if (l || rt != 31) {
2081 unallocated_encoding(s);
2082 return;
2084 switch (crn) {
2085 case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
2086 handle_hint(s, insn, op1, op2, crm);
2087 break;
2088 case 3: /* CLREX, DSB, DMB, ISB */
2089 handle_sync(s, insn, op1, op2, crm);
2090 break;
2091 case 4: /* MSR (immediate) */
2092 handle_msr_i(s, insn, op1, op2, crm);
2093 break;
2094 default:
2095 unallocated_encoding(s);
2096 break;
2098 return;
2100 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
2103 /* Exception generation
2105 * 31 24 23 21 20 5 4 2 1 0
2106 * +-----------------+-----+------------------------+-----+----+
2107 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
2108 * +-----------------------+------------------------+----------+
2110 static void disas_exc(DisasContext *s, uint32_t insn)
2112 int opc = extract32(insn, 21, 3);
2113 int op2_ll = extract32(insn, 0, 5);
2114 int imm16 = extract32(insn, 5, 16);
2116 switch (opc) {
2117 case 0:
2118 /* For SVC, HVC and SMC we advance the single-step state
2119 * machine before taking the exception. This is architecturally
2120 * mandated, to ensure that single-stepping a system call
2121 * instruction works properly.
2123 switch (op2_ll) {
2124 case 1: /* SVC */
2125 gen_ss_advance(s);
2126 gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
2127 syn_aa64_svc(imm16));
2128 break;
2129 case 2: /* HVC */
2130 if (s->current_el == 0) {
2131 unallocated_encoding(s);
2132 break;
2134 /* The pre HVC helper handles cases when HVC gets trapped
2135 * as an undefined insn by runtime configuration.
2137 gen_a64_set_pc_im(s->pc_curr);
2138 gen_helper_pre_hvc(cpu_env);
2139 gen_ss_advance(s);
2140 gen_exception_insn_el(s, s->base.pc_next, EXCP_HVC,
2141 syn_aa64_hvc(imm16), 2);
2142 break;
2143 case 3: /* SMC */
2144 if (s->current_el == 0) {
2145 unallocated_encoding(s);
2146 break;
2148 gen_a64_set_pc_im(s->pc_curr);
2149 gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(imm16)));
2150 gen_ss_advance(s);
2151 gen_exception_insn_el(s, s->base.pc_next, EXCP_SMC,
2152 syn_aa64_smc(imm16), 3);
2153 break;
2154 default:
2155 unallocated_encoding(s);
2156 break;
2158 break;
2159 case 1:
2160 if (op2_ll != 0) {
2161 unallocated_encoding(s);
2162 break;
2164 /* BRK */
2165 gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
2166 break;
2167 case 2:
2168 if (op2_ll != 0) {
2169 unallocated_encoding(s);
2170 break;
2172 /* HLT. This has two purposes.
2173 * Architecturally, it is an external halting debug instruction.
2174 * Since QEMU doesn't implement external debug, we treat this as
2175 * it is required for halting debug disabled: it will UNDEF.
2176 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
2178 if (semihosting_enabled() && imm16 == 0xf000) {
2179 #ifndef CONFIG_USER_ONLY
2180 /* In system mode, don't allow userspace access to semihosting,
2181 * to provide some semblance of security (and for consistency
2182 * with our 32-bit semihosting).
2184 if (s->current_el == 0) {
2185 unallocated_encoding(s);
2186 break;
2188 #endif
2189 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
2190 } else {
2191 unallocated_encoding(s);
2193 break;
2194 case 5:
2195 if (op2_ll < 1 || op2_ll > 3) {
2196 unallocated_encoding(s);
2197 break;
2199 /* DCPS1, DCPS2, DCPS3 */
2200 unallocated_encoding(s);
2201 break;
2202 default:
2203 unallocated_encoding(s);
2204 break;
2208 /* Unconditional branch (register)
2209 * 31 25 24 21 20 16 15 10 9 5 4 0
2210 * +---------------+-------+-------+-------+------+-------+
2211 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
2212 * +---------------+-------+-------+-------+------+-------+
2214 static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
2216 unsigned int opc, op2, op3, rn, op4;
2217 unsigned btype_mod = 2; /* 0: BR, 1: BLR, 2: other */
2218 TCGv_i64 dst;
2219 TCGv_i64 modifier;
2221 opc = extract32(insn, 21, 4);
2222 op2 = extract32(insn, 16, 5);
2223 op3 = extract32(insn, 10, 6);
2224 rn = extract32(insn, 5, 5);
2225 op4 = extract32(insn, 0, 5);
2227 if (op2 != 0x1f) {
2228 goto do_unallocated;
2231 switch (opc) {
2232 case 0: /* BR */
2233 case 1: /* BLR */
2234 case 2: /* RET */
2235 btype_mod = opc;
2236 switch (op3) {
2237 case 0:
2238 /* BR, BLR, RET */
2239 if (op4 != 0) {
2240 goto do_unallocated;
2242 dst = cpu_reg(s, rn);
2243 break;
2245 case 2:
2246 case 3:
2247 if (!dc_isar_feature(aa64_pauth, s)) {
2248 goto do_unallocated;
2250 if (opc == 2) {
2251 /* RETAA, RETAB */
2252 if (rn != 0x1f || op4 != 0x1f) {
2253 goto do_unallocated;
2255 rn = 30;
2256 modifier = cpu_X[31];
2257 } else {
2258 /* BRAAZ, BRABZ, BLRAAZ, BLRABZ */
2259 if (op4 != 0x1f) {
2260 goto do_unallocated;
2262 modifier = new_tmp_a64_zero(s);
2264 if (s->pauth_active) {
2265 dst = new_tmp_a64(s);
2266 if (op3 == 2) {
2267 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2268 } else {
2269 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2271 } else {
2272 dst = cpu_reg(s, rn);
2274 break;
2276 default:
2277 goto do_unallocated;
2279 gen_a64_set_pc(s, dst);
2280 /* BLR also needs to load return address */
2281 if (opc == 1) {
2282 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2284 break;
2286 case 8: /* BRAA */
2287 case 9: /* BLRAA */
2288 if (!dc_isar_feature(aa64_pauth, s)) {
2289 goto do_unallocated;
2291 if ((op3 & ~1) != 2) {
2292 goto do_unallocated;
2294 btype_mod = opc & 1;
2295 if (s->pauth_active) {
2296 dst = new_tmp_a64(s);
2297 modifier = cpu_reg_sp(s, op4);
2298 if (op3 == 2) {
2299 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2300 } else {
2301 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2303 } else {
2304 dst = cpu_reg(s, rn);
2306 gen_a64_set_pc(s, dst);
2307 /* BLRAA also needs to load return address */
2308 if (opc == 9) {
2309 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2311 break;
2313 case 4: /* ERET */
2314 if (s->current_el == 0) {
2315 goto do_unallocated;
2317 switch (op3) {
2318 case 0: /* ERET */
2319 if (op4 != 0) {
2320 goto do_unallocated;
2322 dst = tcg_temp_new_i64();
2323 tcg_gen_ld_i64(dst, cpu_env,
2324 offsetof(CPUARMState, elr_el[s->current_el]));
2325 break;
2327 case 2: /* ERETAA */
2328 case 3: /* ERETAB */
2329 if (!dc_isar_feature(aa64_pauth, s)) {
2330 goto do_unallocated;
2332 if (rn != 0x1f || op4 != 0x1f) {
2333 goto do_unallocated;
2335 dst = tcg_temp_new_i64();
2336 tcg_gen_ld_i64(dst, cpu_env,
2337 offsetof(CPUARMState, elr_el[s->current_el]));
2338 if (s->pauth_active) {
2339 modifier = cpu_X[31];
2340 if (op3 == 2) {
2341 gen_helper_autia(dst, cpu_env, dst, modifier);
2342 } else {
2343 gen_helper_autib(dst, cpu_env, dst, modifier);
2346 break;
2348 default:
2349 goto do_unallocated;
2351 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
2352 gen_io_start();
2355 gen_helper_exception_return(cpu_env, dst);
2356 tcg_temp_free_i64(dst);
2357 /* Must exit loop to check un-masked IRQs */
2358 s->base.is_jmp = DISAS_EXIT;
2359 return;
2361 case 5: /* DRPS */
2362 if (op3 != 0 || op4 != 0 || rn != 0x1f) {
2363 goto do_unallocated;
2364 } else {
2365 unallocated_encoding(s);
2367 return;
2369 default:
2370 do_unallocated:
2371 unallocated_encoding(s);
2372 return;
2375 switch (btype_mod) {
2376 case 0: /* BR */
2377 if (dc_isar_feature(aa64_bti, s)) {
2378 /* BR to {x16,x17} or !guard -> 1, else 3. */
2379 set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
2381 break;
2383 case 1: /* BLR */
2384 if (dc_isar_feature(aa64_bti, s)) {
2385 /* BLR sets BTYPE to 2, regardless of source guarded page. */
2386 set_btype(s, 2);
2388 break;
2390 default: /* RET or none of the above. */
2391 /* BTYPE will be set to 0 by normal end-of-insn processing. */
2392 break;
2395 s->base.is_jmp = DISAS_JUMP;
2398 /* Branches, exception generating and system instructions */
2399 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
2401 switch (extract32(insn, 25, 7)) {
2402 case 0x0a: case 0x0b:
2403 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
2404 disas_uncond_b_imm(s, insn);
2405 break;
2406 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
2407 disas_comp_b_imm(s, insn);
2408 break;
2409 case 0x1b: case 0x5b: /* Test & branch (immediate) */
2410 disas_test_b_imm(s, insn);
2411 break;
2412 case 0x2a: /* Conditional branch (immediate) */
2413 disas_cond_b_imm(s, insn);
2414 break;
2415 case 0x6a: /* Exception generation / System */
2416 if (insn & (1 << 24)) {
2417 if (extract32(insn, 22, 2) == 0) {
2418 disas_system(s, insn);
2419 } else {
2420 unallocated_encoding(s);
2422 } else {
2423 disas_exc(s, insn);
2425 break;
2426 case 0x6b: /* Unconditional branch (register) */
2427 disas_uncond_b_reg(s, insn);
2428 break;
2429 default:
2430 unallocated_encoding(s);
2431 break;
2436 * Load/Store exclusive instructions are implemented by remembering
2437 * the value/address loaded, and seeing if these are the same
2438 * when the store is performed. This is not actually the architecturally
2439 * mandated semantics, but it works for typical guest code sequences
2440 * and avoids having to monitor regular stores.
2442 * The store exclusive uses the atomic cmpxchg primitives to avoid
2443 * races in multi-threaded linux-user and when MTTCG softmmu is
2444 * enabled.
2446 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
2447 TCGv_i64 addr, int size, bool is_pair)
2449 int idx = get_mem_index(s);
2450 MemOp memop = s->be_data;
2452 g_assert(size <= 3);
2453 if (is_pair) {
2454 g_assert(size >= 2);
2455 if (size == 2) {
2456 /* The pair must be single-copy atomic for the doubleword. */
2457 memop |= MO_64 | MO_ALIGN;
2458 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2459 if (s->be_data == MO_LE) {
2460 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2461 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2462 } else {
2463 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2464 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2466 } else {
2467 /* The pair must be single-copy atomic for *each* doubleword, not
2468 the entire quadword, however it must be quadword aligned. */
2469 memop |= MO_64;
2470 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
2471 memop | MO_ALIGN_16);
2473 TCGv_i64 addr2 = tcg_temp_new_i64();
2474 tcg_gen_addi_i64(addr2, addr, 8);
2475 tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
2476 tcg_temp_free_i64(addr2);
2478 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2479 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2481 } else {
2482 memop |= size | MO_ALIGN;
2483 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2484 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2486 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2489 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2490 TCGv_i64 addr, int size, int is_pair)
2492 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2493 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
2494 * [addr] = {Rt};
2495 * if (is_pair) {
2496 * [addr + datasize] = {Rt2};
2498 * {Rd} = 0;
2499 * } else {
2500 * {Rd} = 1;
2502 * env->exclusive_addr = -1;
2504 TCGLabel *fail_label = gen_new_label();
2505 TCGLabel *done_label = gen_new_label();
2506 TCGv_i64 tmp;
2508 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2510 tmp = tcg_temp_new_i64();
2511 if (is_pair) {
2512 if (size == 2) {
2513 if (s->be_data == MO_LE) {
2514 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2515 } else {
2516 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2518 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2519 cpu_exclusive_val, tmp,
2520 get_mem_index(s),
2521 MO_64 | MO_ALIGN | s->be_data);
2522 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2523 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2524 if (!HAVE_CMPXCHG128) {
2525 gen_helper_exit_atomic(cpu_env);
2527 * Produce a result so we have a well-formed opcode
2528 * stream when the following (dead) code uses 'tmp'.
2529 * TCG will remove the dead ops for us.
2531 tcg_gen_movi_i64(tmp, 0);
2532 } else if (s->be_data == MO_LE) {
2533 gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
2534 cpu_exclusive_addr,
2535 cpu_reg(s, rt),
2536 cpu_reg(s, rt2));
2537 } else {
2538 gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
2539 cpu_exclusive_addr,
2540 cpu_reg(s, rt),
2541 cpu_reg(s, rt2));
2543 } else if (s->be_data == MO_LE) {
2544 gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
2545 cpu_reg(s, rt), cpu_reg(s, rt2));
2546 } else {
2547 gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
2548 cpu_reg(s, rt), cpu_reg(s, rt2));
2550 } else {
2551 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2552 cpu_reg(s, rt), get_mem_index(s),
2553 size | MO_ALIGN | s->be_data);
2554 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2556 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2557 tcg_temp_free_i64(tmp);
2558 tcg_gen_br(done_label);
2560 gen_set_label(fail_label);
2561 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2562 gen_set_label(done_label);
2563 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2566 static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2567 int rn, int size)
2569 TCGv_i64 tcg_rs = cpu_reg(s, rs);
2570 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2571 int memidx = get_mem_index(s);
2572 TCGv_i64 clean_addr;
2574 if (rn == 31) {
2575 gen_check_sp_alignment(s);
2577 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size);
2578 tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
2579 size | MO_ALIGN | s->be_data);
2582 static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2583 int rn, int size)
2585 TCGv_i64 s1 = cpu_reg(s, rs);
2586 TCGv_i64 s2 = cpu_reg(s, rs + 1);
2587 TCGv_i64 t1 = cpu_reg(s, rt);
2588 TCGv_i64 t2 = cpu_reg(s, rt + 1);
2589 TCGv_i64 clean_addr;
2590 int memidx = get_mem_index(s);
2592 if (rn == 31) {
2593 gen_check_sp_alignment(s);
2596 /* This is a single atomic access, despite the "pair". */
2597 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size + 1);
2599 if (size == 2) {
2600 TCGv_i64 cmp = tcg_temp_new_i64();
2601 TCGv_i64 val = tcg_temp_new_i64();
2603 if (s->be_data == MO_LE) {
2604 tcg_gen_concat32_i64(val, t1, t2);
2605 tcg_gen_concat32_i64(cmp, s1, s2);
2606 } else {
2607 tcg_gen_concat32_i64(val, t2, t1);
2608 tcg_gen_concat32_i64(cmp, s2, s1);
2611 tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx,
2612 MO_64 | MO_ALIGN | s->be_data);
2613 tcg_temp_free_i64(val);
2615 if (s->be_data == MO_LE) {
2616 tcg_gen_extr32_i64(s1, s2, cmp);
2617 } else {
2618 tcg_gen_extr32_i64(s2, s1, cmp);
2620 tcg_temp_free_i64(cmp);
2621 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2622 if (HAVE_CMPXCHG128) {
2623 TCGv_i32 tcg_rs = tcg_constant_i32(rs);
2624 if (s->be_data == MO_LE) {
2625 gen_helper_casp_le_parallel(cpu_env, tcg_rs,
2626 clean_addr, t1, t2);
2627 } else {
2628 gen_helper_casp_be_parallel(cpu_env, tcg_rs,
2629 clean_addr, t1, t2);
2631 } else {
2632 gen_helper_exit_atomic(cpu_env);
2633 s->base.is_jmp = DISAS_NORETURN;
2635 } else {
2636 TCGv_i64 d1 = tcg_temp_new_i64();
2637 TCGv_i64 d2 = tcg_temp_new_i64();
2638 TCGv_i64 a2 = tcg_temp_new_i64();
2639 TCGv_i64 c1 = tcg_temp_new_i64();
2640 TCGv_i64 c2 = tcg_temp_new_i64();
2641 TCGv_i64 zero = tcg_constant_i64(0);
2643 /* Load the two words, in memory order. */
2644 tcg_gen_qemu_ld_i64(d1, clean_addr, memidx,
2645 MO_64 | MO_ALIGN_16 | s->be_data);
2646 tcg_gen_addi_i64(a2, clean_addr, 8);
2647 tcg_gen_qemu_ld_i64(d2, a2, memidx, MO_64 | s->be_data);
2649 /* Compare the two words, also in memory order. */
2650 tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
2651 tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
2652 tcg_gen_and_i64(c2, c2, c1);
2654 /* If compare equal, write back new data, else write back old data. */
2655 tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
2656 tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
2657 tcg_gen_qemu_st_i64(c1, clean_addr, memidx, MO_64 | s->be_data);
2658 tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
2659 tcg_temp_free_i64(a2);
2660 tcg_temp_free_i64(c1);
2661 tcg_temp_free_i64(c2);
2663 /* Write back the data from memory to Rs. */
2664 tcg_gen_mov_i64(s1, d1);
2665 tcg_gen_mov_i64(s2, d2);
2666 tcg_temp_free_i64(d1);
2667 tcg_temp_free_i64(d2);
2671 /* Update the Sixty-Four bit (SF) registersize. This logic is derived
2672 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2674 static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2676 int opc0 = extract32(opc, 0, 1);
2677 int regsize;
2679 if (is_signed) {
2680 regsize = opc0 ? 32 : 64;
2681 } else {
2682 regsize = size == 3 ? 64 : 32;
2684 return regsize == 64;
2687 /* Load/store exclusive
2689 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
2690 * +-----+-------------+----+---+----+------+----+-------+------+------+
2691 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
2692 * +-----+-------------+----+---+----+------+----+-------+------+------+
2694 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2695 * L: 0 -> store, 1 -> load
2696 * o2: 0 -> exclusive, 1 -> not
2697 * o1: 0 -> single register, 1 -> register pair
2698 * o0: 1 -> load-acquire/store-release, 0 -> not
2700 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2702 int rt = extract32(insn, 0, 5);
2703 int rn = extract32(insn, 5, 5);
2704 int rt2 = extract32(insn, 10, 5);
2705 int rs = extract32(insn, 16, 5);
2706 int is_lasr = extract32(insn, 15, 1);
2707 int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2708 int size = extract32(insn, 30, 2);
2709 TCGv_i64 clean_addr;
2711 switch (o2_L_o1_o0) {
2712 case 0x0: /* STXR */
2713 case 0x1: /* STLXR */
2714 if (rn == 31) {
2715 gen_check_sp_alignment(s);
2717 if (is_lasr) {
2718 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2720 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2721 true, rn != 31, size);
2722 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
2723 return;
2725 case 0x4: /* LDXR */
2726 case 0x5: /* LDAXR */
2727 if (rn == 31) {
2728 gen_check_sp_alignment(s);
2730 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2731 false, rn != 31, size);
2732 s->is_ldex = true;
2733 gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
2734 if (is_lasr) {
2735 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2737 return;
2739 case 0x8: /* STLLR */
2740 if (!dc_isar_feature(aa64_lor, s)) {
2741 break;
2743 /* StoreLORelease is the same as Store-Release for QEMU. */
2744 /* fall through */
2745 case 0x9: /* STLR */
2746 /* Generate ISS for non-exclusive accesses including LASR. */
2747 if (rn == 31) {
2748 gen_check_sp_alignment(s);
2750 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2751 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2752 true, rn != 31, size);
2753 /* TODO: ARMv8.4-LSE SCTLR.nAA */
2754 do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
2755 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2756 return;
2758 case 0xc: /* LDLAR */
2759 if (!dc_isar_feature(aa64_lor, s)) {
2760 break;
2762 /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
2763 /* fall through */
2764 case 0xd: /* LDAR */
2765 /* Generate ISS for non-exclusive accesses including LASR. */
2766 if (rn == 31) {
2767 gen_check_sp_alignment(s);
2769 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2770 false, rn != 31, size);
2771 /* TODO: ARMv8.4-LSE SCTLR.nAA */
2772 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
2773 rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2774 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2775 return;
2777 case 0x2: case 0x3: /* CASP / STXP */
2778 if (size & 2) { /* STXP / STLXP */
2779 if (rn == 31) {
2780 gen_check_sp_alignment(s);
2782 if (is_lasr) {
2783 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2785 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2786 true, rn != 31, size);
2787 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
2788 return;
2790 if (rt2 == 31
2791 && ((rt | rs) & 1) == 0
2792 && dc_isar_feature(aa64_atomics, s)) {
2793 /* CASP / CASPL */
2794 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2795 return;
2797 break;
2799 case 0x6: case 0x7: /* CASPA / LDXP */
2800 if (size & 2) { /* LDXP / LDAXP */
2801 if (rn == 31) {
2802 gen_check_sp_alignment(s);
2804 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2805 false, rn != 31, size);
2806 s->is_ldex = true;
2807 gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
2808 if (is_lasr) {
2809 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2811 return;
2813 if (rt2 == 31
2814 && ((rt | rs) & 1) == 0
2815 && dc_isar_feature(aa64_atomics, s)) {
2816 /* CASPA / CASPAL */
2817 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2818 return;
2820 break;
2822 case 0xa: /* CAS */
2823 case 0xb: /* CASL */
2824 case 0xe: /* CASA */
2825 case 0xf: /* CASAL */
2826 if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
2827 gen_compare_and_swap(s, rs, rt, rn, size);
2828 return;
2830 break;
2832 unallocated_encoding(s);
2836 * Load register (literal)
2838 * 31 30 29 27 26 25 24 23 5 4 0
2839 * +-----+-------+---+-----+-------------------+-------+
2840 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
2841 * +-----+-------+---+-----+-------------------+-------+
2843 * V: 1 -> vector (simd/fp)
2844 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2845 * 10-> 32 bit signed, 11 -> prefetch
2846 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2848 static void disas_ld_lit(DisasContext *s, uint32_t insn)
2850 int rt = extract32(insn, 0, 5);
2851 int64_t imm = sextract32(insn, 5, 19) << 2;
2852 bool is_vector = extract32(insn, 26, 1);
2853 int opc = extract32(insn, 30, 2);
2854 bool is_signed = false;
2855 int size = 2;
2856 TCGv_i64 tcg_rt, clean_addr;
2858 if (is_vector) {
2859 if (opc == 3) {
2860 unallocated_encoding(s);
2861 return;
2863 size = 2 + opc;
2864 if (!fp_access_check(s)) {
2865 return;
2867 } else {
2868 if (opc == 3) {
2869 /* PRFM (literal) : prefetch */
2870 return;
2872 size = 2 + extract32(opc, 0, 1);
2873 is_signed = extract32(opc, 1, 1);
2876 tcg_rt = cpu_reg(s, rt);
2878 clean_addr = tcg_constant_i64(s->pc_curr + imm);
2879 if (is_vector) {
2880 do_fp_ld(s, rt, clean_addr, size);
2881 } else {
2882 /* Only unsigned 32bit loads target 32bit registers. */
2883 bool iss_sf = opc != 0;
2885 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
2886 false, true, rt, iss_sf, false);
2891 * LDNP (Load Pair - non-temporal hint)
2892 * LDP (Load Pair - non vector)
2893 * LDPSW (Load Pair Signed Word - non vector)
2894 * STNP (Store Pair - non-temporal hint)
2895 * STP (Store Pair - non vector)
2896 * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2897 * LDP (Load Pair of SIMD&FP)
2898 * STNP (Store Pair of SIMD&FP - non-temporal hint)
2899 * STP (Store Pair of SIMD&FP)
2901 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
2902 * +-----+-------+---+---+-------+---+-----------------------------+
2903 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
2904 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2906 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
2907 * LDPSW/STGP 01
2908 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2909 * V: 0 -> GPR, 1 -> Vector
2910 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2911 * 10 -> signed offset, 11 -> pre-index
2912 * L: 0 -> Store 1 -> Load
2914 * Rt, Rt2 = GPR or SIMD registers to be stored
2915 * Rn = general purpose register containing address
2916 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2918 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2920 int rt = extract32(insn, 0, 5);
2921 int rn = extract32(insn, 5, 5);
2922 int rt2 = extract32(insn, 10, 5);
2923 uint64_t offset = sextract64(insn, 15, 7);
2924 int index = extract32(insn, 23, 2);
2925 bool is_vector = extract32(insn, 26, 1);
2926 bool is_load = extract32(insn, 22, 1);
2927 int opc = extract32(insn, 30, 2);
2929 bool is_signed = false;
2930 bool postindex = false;
2931 bool wback = false;
2932 bool set_tag = false;
2934 TCGv_i64 clean_addr, dirty_addr;
2936 int size;
2938 if (opc == 3) {
2939 unallocated_encoding(s);
2940 return;
2943 if (is_vector) {
2944 size = 2 + opc;
2945 } else if (opc == 1 && !is_load) {
2946 /* STGP */
2947 if (!dc_isar_feature(aa64_mte_insn_reg, s) || index == 0) {
2948 unallocated_encoding(s);
2949 return;
2951 size = 3;
2952 set_tag = true;
2953 } else {
2954 size = 2 + extract32(opc, 1, 1);
2955 is_signed = extract32(opc, 0, 1);
2956 if (!is_load && is_signed) {
2957 unallocated_encoding(s);
2958 return;
2962 switch (index) {
2963 case 1: /* post-index */
2964 postindex = true;
2965 wback = true;
2966 break;
2967 case 0:
2968 /* signed offset with "non-temporal" hint. Since we don't emulate
2969 * caches we don't care about hints to the cache system about
2970 * data access patterns, and handle this identically to plain
2971 * signed offset.
2973 if (is_signed) {
2974 /* There is no non-temporal-hint version of LDPSW */
2975 unallocated_encoding(s);
2976 return;
2978 postindex = false;
2979 break;
2980 case 2: /* signed offset, rn not updated */
2981 postindex = false;
2982 break;
2983 case 3: /* pre-index */
2984 postindex = false;
2985 wback = true;
2986 break;
2989 if (is_vector && !fp_access_check(s)) {
2990 return;
2993 offset <<= (set_tag ? LOG2_TAG_GRANULE : size);
2995 if (rn == 31) {
2996 gen_check_sp_alignment(s);
2999 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3000 if (!postindex) {
3001 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3004 if (set_tag) {
3005 if (!s->ata) {
3007 * TODO: We could rely on the stores below, at least for
3008 * system mode, if we arrange to add MO_ALIGN_16.
3010 gen_helper_stg_stub(cpu_env, dirty_addr);
3011 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3012 gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
3013 } else {
3014 gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
3018 clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
3019 (wback || rn != 31) && !set_tag, 2 << size);
3021 if (is_vector) {
3022 if (is_load) {
3023 do_fp_ld(s, rt, clean_addr, size);
3024 } else {
3025 do_fp_st(s, rt, clean_addr, size);
3027 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
3028 if (is_load) {
3029 do_fp_ld(s, rt2, clean_addr, size);
3030 } else {
3031 do_fp_st(s, rt2, clean_addr, size);
3033 } else {
3034 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3035 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
3037 if (is_load) {
3038 TCGv_i64 tmp = tcg_temp_new_i64();
3040 /* Do not modify tcg_rt before recognizing any exception
3041 * from the second load.
3043 do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN,
3044 false, false, 0, false, false);
3045 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
3046 do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN,
3047 false, false, 0, false, false);
3049 tcg_gen_mov_i64(tcg_rt, tmp);
3050 tcg_temp_free_i64(tmp);
3051 } else {
3052 do_gpr_st(s, tcg_rt, clean_addr, size,
3053 false, 0, false, false);
3054 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
3055 do_gpr_st(s, tcg_rt2, clean_addr, size,
3056 false, 0, false, false);
3060 if (wback) {
3061 if (postindex) {
3062 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3064 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3069 * Load/store (immediate post-indexed)
3070 * Load/store (immediate pre-indexed)
3071 * Load/store (unscaled immediate)
3073 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
3074 * +----+-------+---+-----+-----+---+--------+-----+------+------+
3075 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
3076 * +----+-------+---+-----+-----+---+--------+-----+------+------+
3078 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
3079 10 -> unprivileged
3080 * V = 0 -> non-vector
3081 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
3082 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3084 static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
3085 int opc,
3086 int size,
3087 int rt,
3088 bool is_vector)
3090 int rn = extract32(insn, 5, 5);
3091 int imm9 = sextract32(insn, 12, 9);
3092 int idx = extract32(insn, 10, 2);
3093 bool is_signed = false;
3094 bool is_store = false;
3095 bool is_extended = false;
3096 bool is_unpriv = (idx == 2);
3097 bool iss_valid = !is_vector;
3098 bool post_index;
3099 bool writeback;
3100 int memidx;
3102 TCGv_i64 clean_addr, dirty_addr;
3104 if (is_vector) {
3105 size |= (opc & 2) << 1;
3106 if (size > 4 || is_unpriv) {
3107 unallocated_encoding(s);
3108 return;
3110 is_store = ((opc & 1) == 0);
3111 if (!fp_access_check(s)) {
3112 return;
3114 } else {
3115 if (size == 3 && opc == 2) {
3116 /* PRFM - prefetch */
3117 if (idx != 0) {
3118 unallocated_encoding(s);
3119 return;
3121 return;
3123 if (opc == 3 && size > 1) {
3124 unallocated_encoding(s);
3125 return;
3127 is_store = (opc == 0);
3128 is_signed = extract32(opc, 1, 1);
3129 is_extended = (size < 3) && extract32(opc, 0, 1);
3132 switch (idx) {
3133 case 0:
3134 case 2:
3135 post_index = false;
3136 writeback = false;
3137 break;
3138 case 1:
3139 post_index = true;
3140 writeback = true;
3141 break;
3142 case 3:
3143 post_index = false;
3144 writeback = true;
3145 break;
3146 default:
3147 g_assert_not_reached();
3150 if (rn == 31) {
3151 gen_check_sp_alignment(s);
3154 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3155 if (!post_index) {
3156 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3159 memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
3160 clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
3161 writeback || rn != 31,
3162 size, is_unpriv, memidx);
3164 if (is_vector) {
3165 if (is_store) {
3166 do_fp_st(s, rt, clean_addr, size);
3167 } else {
3168 do_fp_ld(s, rt, clean_addr, size);
3170 } else {
3171 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3172 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3174 if (is_store) {
3175 do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
3176 iss_valid, rt, iss_sf, false);
3177 } else {
3178 do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3179 is_extended, memidx,
3180 iss_valid, rt, iss_sf, false);
3184 if (writeback) {
3185 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3186 if (post_index) {
3187 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3189 tcg_gen_mov_i64(tcg_rn, dirty_addr);
3194 * Load/store (register offset)
3196 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3197 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
3198 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
3199 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
3201 * For non-vector:
3202 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3203 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3204 * For vector:
3205 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3206 * opc<0>: 0 -> store, 1 -> load
3207 * V: 1 -> vector/simd
3208 * opt: extend encoding (see DecodeRegExtend)
3209 * S: if S=1 then scale (essentially index by sizeof(size))
3210 * Rt: register to transfer into/out of
3211 * Rn: address register or SP for base
3212 * Rm: offset register or ZR for offset
3214 static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
3215 int opc,
3216 int size,
3217 int rt,
3218 bool is_vector)
3220 int rn = extract32(insn, 5, 5);
3221 int shift = extract32(insn, 12, 1);
3222 int rm = extract32(insn, 16, 5);
3223 int opt = extract32(insn, 13, 3);
3224 bool is_signed = false;
3225 bool is_store = false;
3226 bool is_extended = false;
3228 TCGv_i64 tcg_rm, clean_addr, dirty_addr;
3230 if (extract32(opt, 1, 1) == 0) {
3231 unallocated_encoding(s);
3232 return;
3235 if (is_vector) {
3236 size |= (opc & 2) << 1;
3237 if (size > 4) {
3238 unallocated_encoding(s);
3239 return;
3241 is_store = !extract32(opc, 0, 1);
3242 if (!fp_access_check(s)) {
3243 return;
3245 } else {
3246 if (size == 3 && opc == 2) {
3247 /* PRFM - prefetch */
3248 return;
3250 if (opc == 3 && size > 1) {
3251 unallocated_encoding(s);
3252 return;
3254 is_store = (opc == 0);
3255 is_signed = extract32(opc, 1, 1);
3256 is_extended = (size < 3) && extract32(opc, 0, 1);
3259 if (rn == 31) {
3260 gen_check_sp_alignment(s);
3262 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3264 tcg_rm = read_cpu_reg(s, rm, 1);
3265 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
3267 tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
3268 clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size);
3270 if (is_vector) {
3271 if (is_store) {
3272 do_fp_st(s, rt, clean_addr, size);
3273 } else {
3274 do_fp_ld(s, rt, clean_addr, size);
3276 } else {
3277 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3278 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3279 if (is_store) {
3280 do_gpr_st(s, tcg_rt, clean_addr, size,
3281 true, rt, iss_sf, false);
3282 } else {
3283 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3284 is_extended, true, rt, iss_sf, false);
3290 * Load/store (unsigned immediate)
3292 * 31 30 29 27 26 25 24 23 22 21 10 9 5
3293 * +----+-------+---+-----+-----+------------+-------+------+
3294 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
3295 * +----+-------+---+-----+-----+------------+-------+------+
3297 * For non-vector:
3298 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3299 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3300 * For vector:
3301 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3302 * opc<0>: 0 -> store, 1 -> load
3303 * Rn: base address register (inc SP)
3304 * Rt: target register
3306 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
3307 int opc,
3308 int size,
3309 int rt,
3310 bool is_vector)
3312 int rn = extract32(insn, 5, 5);
3313 unsigned int imm12 = extract32(insn, 10, 12);
3314 unsigned int offset;
3316 TCGv_i64 clean_addr, dirty_addr;
3318 bool is_store;
3319 bool is_signed = false;
3320 bool is_extended = false;
3322 if (is_vector) {
3323 size |= (opc & 2) << 1;
3324 if (size > 4) {
3325 unallocated_encoding(s);
3326 return;
3328 is_store = !extract32(opc, 0, 1);
3329 if (!fp_access_check(s)) {
3330 return;
3332 } else {
3333 if (size == 3 && opc == 2) {
3334 /* PRFM - prefetch */
3335 return;
3337 if (opc == 3 && size > 1) {
3338 unallocated_encoding(s);
3339 return;
3341 is_store = (opc == 0);
3342 is_signed = extract32(opc, 1, 1);
3343 is_extended = (size < 3) && extract32(opc, 0, 1);
3346 if (rn == 31) {
3347 gen_check_sp_alignment(s);
3349 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3350 offset = imm12 << size;
3351 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3352 clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size);
3354 if (is_vector) {
3355 if (is_store) {
3356 do_fp_st(s, rt, clean_addr, size);
3357 } else {
3358 do_fp_ld(s, rt, clean_addr, size);
3360 } else {
3361 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3362 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3363 if (is_store) {
3364 do_gpr_st(s, tcg_rt, clean_addr, size,
3365 true, rt, iss_sf, false);
3366 } else {
3367 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3368 is_extended, true, rt, iss_sf, false);
3373 /* Atomic memory operations
3375 * 31 30 27 26 24 22 21 16 15 12 10 5 0
3376 * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
3377 * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt |
3378 * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
3380 * Rt: the result register
3381 * Rn: base address or SP
3382 * Rs: the source register for the operation
3383 * V: vector flag (always 0 as of v8.3)
3384 * A: acquire flag
3385 * R: release flag
3387 static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
3388 int size, int rt, bool is_vector)
3390 int rs = extract32(insn, 16, 5);
3391 int rn = extract32(insn, 5, 5);
3392 int o3_opc = extract32(insn, 12, 4);
3393 bool r = extract32(insn, 22, 1);
3394 bool a = extract32(insn, 23, 1);
3395 TCGv_i64 tcg_rs, tcg_rt, clean_addr;
3396 AtomicThreeOpFn *fn = NULL;
3397 MemOp mop = s->be_data | size | MO_ALIGN;
3399 if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
3400 unallocated_encoding(s);
3401 return;
3403 switch (o3_opc) {
3404 case 000: /* LDADD */
3405 fn = tcg_gen_atomic_fetch_add_i64;
3406 break;
3407 case 001: /* LDCLR */
3408 fn = tcg_gen_atomic_fetch_and_i64;
3409 break;
3410 case 002: /* LDEOR */
3411 fn = tcg_gen_atomic_fetch_xor_i64;
3412 break;
3413 case 003: /* LDSET */
3414 fn = tcg_gen_atomic_fetch_or_i64;
3415 break;
3416 case 004: /* LDSMAX */
3417 fn = tcg_gen_atomic_fetch_smax_i64;
3418 mop |= MO_SIGN;
3419 break;
3420 case 005: /* LDSMIN */
3421 fn = tcg_gen_atomic_fetch_smin_i64;
3422 mop |= MO_SIGN;
3423 break;
3424 case 006: /* LDUMAX */
3425 fn = tcg_gen_atomic_fetch_umax_i64;
3426 break;
3427 case 007: /* LDUMIN */
3428 fn = tcg_gen_atomic_fetch_umin_i64;
3429 break;
3430 case 010: /* SWP */
3431 fn = tcg_gen_atomic_xchg_i64;
3432 break;
3433 case 014: /* LDAPR, LDAPRH, LDAPRB */
3434 if (!dc_isar_feature(aa64_rcpc_8_3, s) ||
3435 rs != 31 || a != 1 || r != 0) {
3436 unallocated_encoding(s);
3437 return;
3439 break;
3440 default:
3441 unallocated_encoding(s);
3442 return;
3445 if (rn == 31) {
3446 gen_check_sp_alignment(s);
3448 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size);
3450 if (o3_opc == 014) {
3452 * LDAPR* are a special case because they are a simple load, not a
3453 * fetch-and-do-something op.
3454 * The architectural consistency requirements here are weaker than
3455 * full load-acquire (we only need "load-acquire processor consistent"),
3456 * but we choose to implement them as full LDAQ.
3458 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false,
3459 true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
3460 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3461 return;
3464 tcg_rs = read_cpu_reg(s, rs, true);
3465 tcg_rt = cpu_reg(s, rt);
3467 if (o3_opc == 1) { /* LDCLR */
3468 tcg_gen_not_i64(tcg_rs, tcg_rs);
3471 /* The tcg atomic primitives are all full barriers. Therefore we
3472 * can ignore the Acquire and Release bits of this instruction.
3474 fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
3476 if ((mop & MO_SIGN) && size != MO_64) {
3477 tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
3482 * PAC memory operations
3484 * 31 30 27 26 24 22 21 12 11 10 5 0
3485 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3486 * | size | 1 1 1 | V | 0 0 | M S | 1 | imm9 | W | 1 | Rn | Rt |
3487 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3489 * Rt: the result register
3490 * Rn: base address or SP
3491 * V: vector flag (always 0 as of v8.3)
3492 * M: clear for key DA, set for key DB
3493 * W: pre-indexing flag
3494 * S: sign for imm9.
3496 static void disas_ldst_pac(DisasContext *s, uint32_t insn,
3497 int size, int rt, bool is_vector)
3499 int rn = extract32(insn, 5, 5);
3500 bool is_wback = extract32(insn, 11, 1);
3501 bool use_key_a = !extract32(insn, 23, 1);
3502 int offset;
3503 TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3505 if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
3506 unallocated_encoding(s);
3507 return;
3510 if (rn == 31) {
3511 gen_check_sp_alignment(s);
3513 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3515 if (s->pauth_active) {
3516 if (use_key_a) {
3517 gen_helper_autda(dirty_addr, cpu_env, dirty_addr,
3518 new_tmp_a64_zero(s));
3519 } else {
3520 gen_helper_autdb(dirty_addr, cpu_env, dirty_addr,
3521 new_tmp_a64_zero(s));
3525 /* Form the 10-bit signed, scaled offset. */
3526 offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
3527 offset = sextract32(offset << size, 0, 10 + size);
3528 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3530 /* Note that "clean" and "dirty" here refer to TBI not PAC. */
3531 clean_addr = gen_mte_check1(s, dirty_addr, false,
3532 is_wback || rn != 31, size);
3534 tcg_rt = cpu_reg(s, rt);
3535 do_gpr_ld(s, tcg_rt, clean_addr, size,
3536 /* extend */ false, /* iss_valid */ !is_wback,
3537 /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
3539 if (is_wback) {
3540 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3545 * LDAPR/STLR (unscaled immediate)
3547 * 31 30 24 22 21 12 10 5 0
3548 * +------+-------------+-----+---+--------+-----+----+-----+
3549 * | size | 0 1 1 0 0 1 | opc | 0 | imm9 | 0 0 | Rn | Rt |
3550 * +------+-------------+-----+---+--------+-----+----+-----+
3552 * Rt: source or destination register
3553 * Rn: base register
3554 * imm9: unscaled immediate offset
3555 * opc: 00: STLUR*, 01/10/11: various LDAPUR*
3556 * size: size of load/store
3558 static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
3560 int rt = extract32(insn, 0, 5);
3561 int rn = extract32(insn, 5, 5);
3562 int offset = sextract32(insn, 12, 9);
3563 int opc = extract32(insn, 22, 2);
3564 int size = extract32(insn, 30, 2);
3565 TCGv_i64 clean_addr, dirty_addr;
3566 bool is_store = false;
3567 bool extend = false;
3568 bool iss_sf;
3569 MemOp mop;
3571 if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3572 unallocated_encoding(s);
3573 return;
3576 /* TODO: ARMv8.4-LSE SCTLR.nAA */
3577 mop = size | MO_ALIGN;
3579 switch (opc) {
3580 case 0: /* STLURB */
3581 is_store = true;
3582 break;
3583 case 1: /* LDAPUR* */
3584 break;
3585 case 2: /* LDAPURS* 64-bit variant */
3586 if (size == 3) {
3587 unallocated_encoding(s);
3588 return;
3590 mop |= MO_SIGN;
3591 break;
3592 case 3: /* LDAPURS* 32-bit variant */
3593 if (size > 1) {
3594 unallocated_encoding(s);
3595 return;
3597 mop |= MO_SIGN;
3598 extend = true; /* zero-extend 32->64 after signed load */
3599 break;
3600 default:
3601 g_assert_not_reached();
3604 iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
3606 if (rn == 31) {
3607 gen_check_sp_alignment(s);
3610 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3611 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3612 clean_addr = clean_data_tbi(s, dirty_addr);
3614 if (is_store) {
3615 /* Store-Release semantics */
3616 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3617 do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
3618 } else {
3620 * Load-AcquirePC semantics; we implement as the slightly more
3621 * restrictive Load-Acquire.
3623 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
3624 extend, true, rt, iss_sf, true);
3625 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3629 /* Load/store register (all forms) */
3630 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
3632 int rt = extract32(insn, 0, 5);
3633 int opc = extract32(insn, 22, 2);
3634 bool is_vector = extract32(insn, 26, 1);
3635 int size = extract32(insn, 30, 2);
3637 switch (extract32(insn, 24, 2)) {
3638 case 0:
3639 if (extract32(insn, 21, 1) == 0) {
3640 /* Load/store register (unscaled immediate)
3641 * Load/store immediate pre/post-indexed
3642 * Load/store register unprivileged
3644 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
3645 return;
3647 switch (extract32(insn, 10, 2)) {
3648 case 0:
3649 disas_ldst_atomic(s, insn, size, rt, is_vector);
3650 return;
3651 case 2:
3652 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
3653 return;
3654 default:
3655 disas_ldst_pac(s, insn, size, rt, is_vector);
3656 return;
3658 break;
3659 case 1:
3660 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
3661 return;
3663 unallocated_encoding(s);
3666 /* AdvSIMD load/store multiple structures
3668 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
3669 * +---+---+---------------+---+-------------+--------+------+------+------+
3670 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
3671 * +---+---+---------------+---+-------------+--------+------+------+------+
3673 * AdvSIMD load/store multiple structures (post-indexed)
3675 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
3676 * +---+---+---------------+---+---+---------+--------+------+------+------+
3677 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
3678 * +---+---+---------------+---+---+---------+--------+------+------+------+
3680 * Rt: first (or only) SIMD&FP register to be transferred
3681 * Rn: base address or SP
3682 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3684 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3686 int rt = extract32(insn, 0, 5);
3687 int rn = extract32(insn, 5, 5);
3688 int rm = extract32(insn, 16, 5);
3689 int size = extract32(insn, 10, 2);
3690 int opcode = extract32(insn, 12, 4);
3691 bool is_store = !extract32(insn, 22, 1);
3692 bool is_postidx = extract32(insn, 23, 1);
3693 bool is_q = extract32(insn, 30, 1);
3694 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3695 MemOp endian, align, mop;
3697 int total; /* total bytes */
3698 int elements; /* elements per vector */
3699 int rpt; /* num iterations */
3700 int selem; /* structure elements */
3701 int r;
3703 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3704 unallocated_encoding(s);
3705 return;
3708 if (!is_postidx && rm != 0) {
3709 unallocated_encoding(s);
3710 return;
3713 /* From the shared decode logic */
3714 switch (opcode) {
3715 case 0x0:
3716 rpt = 1;
3717 selem = 4;
3718 break;
3719 case 0x2:
3720 rpt = 4;
3721 selem = 1;
3722 break;
3723 case 0x4:
3724 rpt = 1;
3725 selem = 3;
3726 break;
3727 case 0x6:
3728 rpt = 3;
3729 selem = 1;
3730 break;
3731 case 0x7:
3732 rpt = 1;
3733 selem = 1;
3734 break;
3735 case 0x8:
3736 rpt = 1;
3737 selem = 2;
3738 break;
3739 case 0xa:
3740 rpt = 2;
3741 selem = 1;
3742 break;
3743 default:
3744 unallocated_encoding(s);
3745 return;
3748 if (size == 3 && !is_q && selem != 1) {
3749 /* reserved */
3750 unallocated_encoding(s);
3751 return;
3754 if (!fp_access_check(s)) {
3755 return;
3758 if (rn == 31) {
3759 gen_check_sp_alignment(s);
3762 /* For our purposes, bytes are always little-endian. */
3763 endian = s->be_data;
3764 if (size == 0) {
3765 endian = MO_LE;
3768 total = rpt * selem * (is_q ? 16 : 8);
3769 tcg_rn = cpu_reg_sp(s, rn);
3772 * Issue the MTE check vs the logical repeat count, before we
3773 * promote consecutive little-endian elements below.
3775 clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
3776 total);
3779 * Consecutive little-endian elements from a single register
3780 * can be promoted to a larger little-endian operation.
3782 align = MO_ALIGN;
3783 if (selem == 1 && endian == MO_LE) {
3784 align = pow2_align(size);
3785 size = 3;
3787 if (!s->align_mem) {
3788 align = 0;
3790 mop = endian | size | align;
3792 elements = (is_q ? 16 : 8) >> size;
3793 tcg_ebytes = tcg_constant_i64(1 << size);
3794 for (r = 0; r < rpt; r++) {
3795 int e;
3796 for (e = 0; e < elements; e++) {
3797 int xs;
3798 for (xs = 0; xs < selem; xs++) {
3799 int tt = (rt + r + xs) % 32;
3800 if (is_store) {
3801 do_vec_st(s, tt, e, clean_addr, mop);
3802 } else {
3803 do_vec_ld(s, tt, e, clean_addr, mop);
3805 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3810 if (!is_store) {
3811 /* For non-quad operations, setting a slice of the low
3812 * 64 bits of the register clears the high 64 bits (in
3813 * the ARM ARM pseudocode this is implicit in the fact
3814 * that 'rval' is a 64 bit wide variable).
3815 * For quad operations, we might still need to zero the
3816 * high bits of SVE.
3818 for (r = 0; r < rpt * selem; r++) {
3819 int tt = (rt + r) % 32;
3820 clear_vec_high(s, is_q, tt);
3824 if (is_postidx) {
3825 if (rm == 31) {
3826 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3827 } else {
3828 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3833 /* AdvSIMD load/store single structure
3835 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3836 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3837 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
3838 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3840 * AdvSIMD load/store single structure (post-indexed)
3842 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3843 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3844 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
3845 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3847 * Rt: first (or only) SIMD&FP register to be transferred
3848 * Rn: base address or SP
3849 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3850 * index = encoded in Q:S:size dependent on size
3852 * lane_size = encoded in R, opc
3853 * transfer width = encoded in opc, S, size
3855 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3857 int rt = extract32(insn, 0, 5);
3858 int rn = extract32(insn, 5, 5);
3859 int rm = extract32(insn, 16, 5);
3860 int size = extract32(insn, 10, 2);
3861 int S = extract32(insn, 12, 1);
3862 int opc = extract32(insn, 13, 3);
3863 int R = extract32(insn, 21, 1);
3864 int is_load = extract32(insn, 22, 1);
3865 int is_postidx = extract32(insn, 23, 1);
3866 int is_q = extract32(insn, 30, 1);
3868 int scale = extract32(opc, 1, 2);
3869 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3870 bool replicate = false;
3871 int index = is_q << 3 | S << 2 | size;
3872 int xs, total;
3873 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3874 MemOp mop;
3876 if (extract32(insn, 31, 1)) {
3877 unallocated_encoding(s);
3878 return;
3880 if (!is_postidx && rm != 0) {
3881 unallocated_encoding(s);
3882 return;
3885 switch (scale) {
3886 case 3:
3887 if (!is_load || S) {
3888 unallocated_encoding(s);
3889 return;
3891 scale = size;
3892 replicate = true;
3893 break;
3894 case 0:
3895 break;
3896 case 1:
3897 if (extract32(size, 0, 1)) {
3898 unallocated_encoding(s);
3899 return;
3901 index >>= 1;
3902 break;
3903 case 2:
3904 if (extract32(size, 1, 1)) {
3905 unallocated_encoding(s);
3906 return;
3908 if (!extract32(size, 0, 1)) {
3909 index >>= 2;
3910 } else {
3911 if (S) {
3912 unallocated_encoding(s);
3913 return;
3915 index >>= 3;
3916 scale = 3;
3918 break;
3919 default:
3920 g_assert_not_reached();
3923 if (!fp_access_check(s)) {
3924 return;
3927 if (rn == 31) {
3928 gen_check_sp_alignment(s);
3931 total = selem << scale;
3932 tcg_rn = cpu_reg_sp(s, rn);
3934 clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
3935 total);
3936 mop = finalize_memop(s, scale);
3938 tcg_ebytes = tcg_constant_i64(1 << scale);
3939 for (xs = 0; xs < selem; xs++) {
3940 if (replicate) {
3941 /* Load and replicate to all elements */
3942 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3944 tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
3945 tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
3946 (is_q + 1) * 8, vec_full_reg_size(s),
3947 tcg_tmp);
3948 tcg_temp_free_i64(tcg_tmp);
3949 } else {
3950 /* Load/store one element per register */
3951 if (is_load) {
3952 do_vec_ld(s, rt, index, clean_addr, mop);
3953 } else {
3954 do_vec_st(s, rt, index, clean_addr, mop);
3957 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3958 rt = (rt + 1) % 32;
3961 if (is_postidx) {
3962 if (rm == 31) {
3963 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3964 } else {
3965 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3971 * Load/Store memory tags
3973 * 31 30 29 24 22 21 12 10 5 0
3974 * +-----+-------------+-----+---+------+-----+------+------+
3975 * | 1 1 | 0 1 1 0 0 1 | op1 | 1 | imm9 | op2 | Rn | Rt |
3976 * +-----+-------------+-----+---+------+-----+------+------+
3978 static void disas_ldst_tag(DisasContext *s, uint32_t insn)
3980 int rt = extract32(insn, 0, 5);
3981 int rn = extract32(insn, 5, 5);
3982 uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE;
3983 int op2 = extract32(insn, 10, 2);
3984 int op1 = extract32(insn, 22, 2);
3985 bool is_load = false, is_pair = false, is_zero = false, is_mult = false;
3986 int index = 0;
3987 TCGv_i64 addr, clean_addr, tcg_rt;
3989 /* We checked insn bits [29:24,21] in the caller. */
3990 if (extract32(insn, 30, 2) != 3) {
3991 goto do_unallocated;
3995 * @index is a tri-state variable which has 3 states:
3996 * < 0 : post-index, writeback
3997 * = 0 : signed offset
3998 * > 0 : pre-index, writeback
4000 switch (op1) {
4001 case 0:
4002 if (op2 != 0) {
4003 /* STG */
4004 index = op2 - 2;
4005 } else {
4006 /* STZGM */
4007 if (s->current_el == 0 || offset != 0) {
4008 goto do_unallocated;
4010 is_mult = is_zero = true;
4012 break;
4013 case 1:
4014 if (op2 != 0) {
4015 /* STZG */
4016 is_zero = true;
4017 index = op2 - 2;
4018 } else {
4019 /* LDG */
4020 is_load = true;
4022 break;
4023 case 2:
4024 if (op2 != 0) {
4025 /* ST2G */
4026 is_pair = true;
4027 index = op2 - 2;
4028 } else {
4029 /* STGM */
4030 if (s->current_el == 0 || offset != 0) {
4031 goto do_unallocated;
4033 is_mult = true;
4035 break;
4036 case 3:
4037 if (op2 != 0) {
4038 /* STZ2G */
4039 is_pair = is_zero = true;
4040 index = op2 - 2;
4041 } else {
4042 /* LDGM */
4043 if (s->current_el == 0 || offset != 0) {
4044 goto do_unallocated;
4046 is_mult = is_load = true;
4048 break;
4050 default:
4051 do_unallocated:
4052 unallocated_encoding(s);
4053 return;
4056 if (is_mult
4057 ? !dc_isar_feature(aa64_mte, s)
4058 : !dc_isar_feature(aa64_mte_insn_reg, s)) {
4059 goto do_unallocated;
4062 if (rn == 31) {
4063 gen_check_sp_alignment(s);
4066 addr = read_cpu_reg_sp(s, rn, true);
4067 if (index >= 0) {
4068 /* pre-index or signed offset */
4069 tcg_gen_addi_i64(addr, addr, offset);
4072 if (is_mult) {
4073 tcg_rt = cpu_reg(s, rt);
4075 if (is_zero) {
4076 int size = 4 << s->dcz_blocksize;
4078 if (s->ata) {
4079 gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
4082 * The non-tags portion of STZGM is mostly like DC_ZVA,
4083 * except the alignment happens before the access.
4085 clean_addr = clean_data_tbi(s, addr);
4086 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4087 gen_helper_dc_zva(cpu_env, clean_addr);
4088 } else if (s->ata) {
4089 if (is_load) {
4090 gen_helper_ldgm(tcg_rt, cpu_env, addr);
4091 } else {
4092 gen_helper_stgm(cpu_env, addr, tcg_rt);
4094 } else {
4095 MMUAccessType acc = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
4096 int size = 4 << GMID_EL1_BS;
4098 clean_addr = clean_data_tbi(s, addr);
4099 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4100 gen_probe_access(s, clean_addr, acc, size);
4102 if (is_load) {
4103 /* The result tags are zeros. */
4104 tcg_gen_movi_i64(tcg_rt, 0);
4107 return;
4110 if (is_load) {
4111 tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
4112 tcg_rt = cpu_reg(s, rt);
4113 if (s->ata) {
4114 gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
4115 } else {
4116 clean_addr = clean_data_tbi(s, addr);
4117 gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
4118 gen_address_with_allocation_tag0(tcg_rt, addr);
4120 } else {
4121 tcg_rt = cpu_reg_sp(s, rt);
4122 if (!s->ata) {
4124 * For STG and ST2G, we need to check alignment and probe memory.
4125 * TODO: For STZG and STZ2G, we could rely on the stores below,
4126 * at least for system mode; user-only won't enforce alignment.
4128 if (is_pair) {
4129 gen_helper_st2g_stub(cpu_env, addr);
4130 } else {
4131 gen_helper_stg_stub(cpu_env, addr);
4133 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
4134 if (is_pair) {
4135 gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
4136 } else {
4137 gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
4139 } else {
4140 if (is_pair) {
4141 gen_helper_st2g(cpu_env, addr, tcg_rt);
4142 } else {
4143 gen_helper_stg(cpu_env, addr, tcg_rt);
4148 if (is_zero) {
4149 TCGv_i64 clean_addr = clean_data_tbi(s, addr);
4150 TCGv_i64 tcg_zero = tcg_constant_i64(0);
4151 int mem_index = get_mem_index(s);
4152 int i, n = (1 + is_pair) << LOG2_TAG_GRANULE;
4154 tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index,
4155 MO_UQ | MO_ALIGN_16);
4156 for (i = 8; i < n; i += 8) {
4157 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4158 tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_UQ);
4162 if (index != 0) {
4163 /* pre-index or post-index */
4164 if (index < 0) {
4165 /* post-index */
4166 tcg_gen_addi_i64(addr, addr, offset);
4168 tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr);
4172 /* Loads and stores */
4173 static void disas_ldst(DisasContext *s, uint32_t insn)
4175 switch (extract32(insn, 24, 6)) {
4176 case 0x08: /* Load/store exclusive */
4177 disas_ldst_excl(s, insn);
4178 break;
4179 case 0x18: case 0x1c: /* Load register (literal) */
4180 disas_ld_lit(s, insn);
4181 break;
4182 case 0x28: case 0x29:
4183 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
4184 disas_ldst_pair(s, insn);
4185 break;
4186 case 0x38: case 0x39:
4187 case 0x3c: case 0x3d: /* Load/store register (all forms) */
4188 disas_ldst_reg(s, insn);
4189 break;
4190 case 0x0c: /* AdvSIMD load/store multiple structures */
4191 disas_ldst_multiple_struct(s, insn);
4192 break;
4193 case 0x0d: /* AdvSIMD load/store single structure */
4194 disas_ldst_single_struct(s, insn);
4195 break;
4196 case 0x19:
4197 if (extract32(insn, 21, 1) != 0) {
4198 disas_ldst_tag(s, insn);
4199 } else if (extract32(insn, 10, 2) == 0) {
4200 disas_ldst_ldapr_stlr(s, insn);
4201 } else {
4202 unallocated_encoding(s);
4204 break;
4205 default:
4206 unallocated_encoding(s);
4207 break;
4211 /* PC-rel. addressing
4212 * 31 30 29 28 24 23 5 4 0
4213 * +----+-------+-----------+-------------------+------+
4214 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
4215 * +----+-------+-----------+-------------------+------+
4217 static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
4219 unsigned int page, rd;
4220 uint64_t base;
4221 uint64_t offset;
4223 page = extract32(insn, 31, 1);
4224 /* SignExtend(immhi:immlo) -> offset */
4225 offset = sextract64(insn, 5, 19);
4226 offset = offset << 2 | extract32(insn, 29, 2);
4227 rd = extract32(insn, 0, 5);
4228 base = s->pc_curr;
4230 if (page) {
4231 /* ADRP (page based) */
4232 base &= ~0xfff;
4233 offset <<= 12;
4236 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
4240 * Add/subtract (immediate)
4242 * 31 30 29 28 23 22 21 10 9 5 4 0
4243 * +--+--+--+-------------+--+-------------+-----+-----+
4244 * |sf|op| S| 1 0 0 0 1 0 |sh| imm12 | Rn | Rd |
4245 * +--+--+--+-------------+--+-------------+-----+-----+
4247 * sf: 0 -> 32bit, 1 -> 64bit
4248 * op: 0 -> add , 1 -> sub
4249 * S: 1 -> set flags
4250 * sh: 1 -> LSL imm by 12
4252 static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
4254 int rd = extract32(insn, 0, 5);
4255 int rn = extract32(insn, 5, 5);
4256 uint64_t imm = extract32(insn, 10, 12);
4257 bool shift = extract32(insn, 22, 1);
4258 bool setflags = extract32(insn, 29, 1);
4259 bool sub_op = extract32(insn, 30, 1);
4260 bool is_64bit = extract32(insn, 31, 1);
4262 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
4263 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
4264 TCGv_i64 tcg_result;
4266 if (shift) {
4267 imm <<= 12;
4270 tcg_result = tcg_temp_new_i64();
4271 if (!setflags) {
4272 if (sub_op) {
4273 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
4274 } else {
4275 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
4277 } else {
4278 TCGv_i64 tcg_imm = tcg_constant_i64(imm);
4279 if (sub_op) {
4280 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
4281 } else {
4282 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
4286 if (is_64bit) {
4287 tcg_gen_mov_i64(tcg_rd, tcg_result);
4288 } else {
4289 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4292 tcg_temp_free_i64(tcg_result);
4296 * Add/subtract (immediate, with tags)
4298 * 31 30 29 28 23 22 21 16 14 10 9 5 4 0
4299 * +--+--+--+-------------+--+---------+--+-------+-----+-----+
4300 * |sf|op| S| 1 0 0 0 1 1 |o2| uimm6 |o3| uimm4 | Rn | Rd |
4301 * +--+--+--+-------------+--+---------+--+-------+-----+-----+
4303 * op: 0 -> add, 1 -> sub
4305 static void disas_add_sub_imm_with_tags(DisasContext *s, uint32_t insn)
4307 int rd = extract32(insn, 0, 5);
4308 int rn = extract32(insn, 5, 5);
4309 int uimm4 = extract32(insn, 10, 4);
4310 int uimm6 = extract32(insn, 16, 6);
4311 bool sub_op = extract32(insn, 30, 1);
4312 TCGv_i64 tcg_rn, tcg_rd;
4313 int imm;
4315 /* Test all of sf=1, S=0, o2=0, o3=0. */
4316 if ((insn & 0xa040c000u) != 0x80000000u ||
4317 !dc_isar_feature(aa64_mte_insn_reg, s)) {
4318 unallocated_encoding(s);
4319 return;
4322 imm = uimm6 << LOG2_TAG_GRANULE;
4323 if (sub_op) {
4324 imm = -imm;
4327 tcg_rn = cpu_reg_sp(s, rn);
4328 tcg_rd = cpu_reg_sp(s, rd);
4330 if (s->ata) {
4331 gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn,
4332 tcg_constant_i32(imm),
4333 tcg_constant_i32(uimm4));
4334 } else {
4335 tcg_gen_addi_i64(tcg_rd, tcg_rn, imm);
4336 gen_address_with_allocation_tag0(tcg_rd, tcg_rd);
4340 /* The input should be a value in the bottom e bits (with higher
4341 * bits zero); returns that value replicated into every element
4342 * of size e in a 64 bit integer.
4344 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
4346 assert(e != 0);
4347 while (e < 64) {
4348 mask |= mask << e;
4349 e *= 2;
4351 return mask;
4354 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
4355 static inline uint64_t bitmask64(unsigned int length)
4357 assert(length > 0 && length <= 64);
4358 return ~0ULL >> (64 - length);
4361 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
4362 * only require the wmask. Returns false if the imms/immr/immn are a reserved
4363 * value (ie should cause a guest UNDEF exception), and true if they are
4364 * valid, in which case the decoded bit pattern is written to result.
4366 bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
4367 unsigned int imms, unsigned int immr)
4369 uint64_t mask;
4370 unsigned e, levels, s, r;
4371 int len;
4373 assert(immn < 2 && imms < 64 && immr < 64);
4375 /* The bit patterns we create here are 64 bit patterns which
4376 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
4377 * 64 bits each. Each element contains the same value: a run
4378 * of between 1 and e-1 non-zero bits, rotated within the
4379 * element by between 0 and e-1 bits.
4381 * The element size and run length are encoded into immn (1 bit)
4382 * and imms (6 bits) as follows:
4383 * 64 bit elements: immn = 1, imms = <length of run - 1>
4384 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
4385 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
4386 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
4387 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
4388 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
4389 * Notice that immn = 0, imms = 11111x is the only combination
4390 * not covered by one of the above options; this is reserved.
4391 * Further, <length of run - 1> all-ones is a reserved pattern.
4393 * In all cases the rotation is by immr % e (and immr is 6 bits).
4396 /* First determine the element size */
4397 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
4398 if (len < 1) {
4399 /* This is the immn == 0, imms == 0x11111x case */
4400 return false;
4402 e = 1 << len;
4404 levels = e - 1;
4405 s = imms & levels;
4406 r = immr & levels;
4408 if (s == levels) {
4409 /* <length of run - 1> mustn't be all-ones. */
4410 return false;
4413 /* Create the value of one element: s+1 set bits rotated
4414 * by r within the element (which is e bits wide)...
4416 mask = bitmask64(s + 1);
4417 if (r) {
4418 mask = (mask >> r) | (mask << (e - r));
4419 mask &= bitmask64(e);
4421 /* ...then replicate the element over the whole 64 bit value */
4422 mask = bitfield_replicate(mask, e);
4423 *result = mask;
4424 return true;
4427 /* Logical (immediate)
4428 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
4429 * +----+-----+-------------+---+------+------+------+------+
4430 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
4431 * +----+-----+-------------+---+------+------+------+------+
4433 static void disas_logic_imm(DisasContext *s, uint32_t insn)
4435 unsigned int sf, opc, is_n, immr, imms, rn, rd;
4436 TCGv_i64 tcg_rd, tcg_rn;
4437 uint64_t wmask;
4438 bool is_and = false;
4440 sf = extract32(insn, 31, 1);
4441 opc = extract32(insn, 29, 2);
4442 is_n = extract32(insn, 22, 1);
4443 immr = extract32(insn, 16, 6);
4444 imms = extract32(insn, 10, 6);
4445 rn = extract32(insn, 5, 5);
4446 rd = extract32(insn, 0, 5);
4448 if (!sf && is_n) {
4449 unallocated_encoding(s);
4450 return;
4453 if (opc == 0x3) { /* ANDS */
4454 tcg_rd = cpu_reg(s, rd);
4455 } else {
4456 tcg_rd = cpu_reg_sp(s, rd);
4458 tcg_rn = cpu_reg(s, rn);
4460 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
4461 /* some immediate field values are reserved */
4462 unallocated_encoding(s);
4463 return;
4466 if (!sf) {
4467 wmask &= 0xffffffff;
4470 switch (opc) {
4471 case 0x3: /* ANDS */
4472 case 0x0: /* AND */
4473 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
4474 is_and = true;
4475 break;
4476 case 0x1: /* ORR */
4477 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
4478 break;
4479 case 0x2: /* EOR */
4480 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
4481 break;
4482 default:
4483 assert(FALSE); /* must handle all above */
4484 break;
4487 if (!sf && !is_and) {
4488 /* zero extend final result; we know we can skip this for AND
4489 * since the immediate had the high 32 bits clear.
4491 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4494 if (opc == 3) { /* ANDS */
4495 gen_logic_CC(sf, tcg_rd);
4500 * Move wide (immediate)
4502 * 31 30 29 28 23 22 21 20 5 4 0
4503 * +--+-----+-------------+-----+----------------+------+
4504 * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
4505 * +--+-----+-------------+-----+----------------+------+
4507 * sf: 0 -> 32 bit, 1 -> 64 bit
4508 * opc: 00 -> N, 10 -> Z, 11 -> K
4509 * hw: shift/16 (0,16, and sf only 32, 48)
4511 static void disas_movw_imm(DisasContext *s, uint32_t insn)
4513 int rd = extract32(insn, 0, 5);
4514 uint64_t imm = extract32(insn, 5, 16);
4515 int sf = extract32(insn, 31, 1);
4516 int opc = extract32(insn, 29, 2);
4517 int pos = extract32(insn, 21, 2) << 4;
4518 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4520 if (!sf && (pos >= 32)) {
4521 unallocated_encoding(s);
4522 return;
4525 switch (opc) {
4526 case 0: /* MOVN */
4527 case 2: /* MOVZ */
4528 imm <<= pos;
4529 if (opc == 0) {
4530 imm = ~imm;
4532 if (!sf) {
4533 imm &= 0xffffffffu;
4535 tcg_gen_movi_i64(tcg_rd, imm);
4536 break;
4537 case 3: /* MOVK */
4538 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_constant_i64(imm), pos, 16);
4539 if (!sf) {
4540 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4542 break;
4543 default:
4544 unallocated_encoding(s);
4545 break;
4549 /* Bitfield
4550 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
4551 * +----+-----+-------------+---+------+------+------+------+
4552 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
4553 * +----+-----+-------------+---+------+------+------+------+
4555 static void disas_bitfield(DisasContext *s, uint32_t insn)
4557 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
4558 TCGv_i64 tcg_rd, tcg_tmp;
4560 sf = extract32(insn, 31, 1);
4561 opc = extract32(insn, 29, 2);
4562 n = extract32(insn, 22, 1);
4563 ri = extract32(insn, 16, 6);
4564 si = extract32(insn, 10, 6);
4565 rn = extract32(insn, 5, 5);
4566 rd = extract32(insn, 0, 5);
4567 bitsize = sf ? 64 : 32;
4569 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
4570 unallocated_encoding(s);
4571 return;
4574 tcg_rd = cpu_reg(s, rd);
4576 /* Suppress the zero-extend for !sf. Since RI and SI are constrained
4577 to be smaller than bitsize, we'll never reference data outside the
4578 low 32-bits anyway. */
4579 tcg_tmp = read_cpu_reg(s, rn, 1);
4581 /* Recognize simple(r) extractions. */
4582 if (si >= ri) {
4583 /* Wd<s-r:0> = Wn<s:r> */
4584 len = (si - ri) + 1;
4585 if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
4586 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
4587 goto done;
4588 } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
4589 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
4590 return;
4592 /* opc == 1, BFXIL fall through to deposit */
4593 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
4594 pos = 0;
4595 } else {
4596 /* Handle the ri > si case with a deposit
4597 * Wd<32+s-r,32-r> = Wn<s:0>
4599 len = si + 1;
4600 pos = (bitsize - ri) & (bitsize - 1);
4603 if (opc == 0 && len < ri) {
4604 /* SBFM: sign extend the destination field from len to fill
4605 the balance of the word. Let the deposit below insert all
4606 of those sign bits. */
4607 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
4608 len = ri;
4611 if (opc == 1) { /* BFM, BFXIL */
4612 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
4613 } else {
4614 /* SBFM or UBFM: We start with zero, and we haven't modified
4615 any bits outside bitsize, therefore the zero-extension
4616 below is unneeded. */
4617 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4618 return;
4621 done:
4622 if (!sf) { /* zero extend final result */
4623 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4627 /* Extract
4628 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
4629 * +----+------+-------------+---+----+------+--------+------+------+
4630 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
4631 * +----+------+-------------+---+----+------+--------+------+------+
4633 static void disas_extract(DisasContext *s, uint32_t insn)
4635 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
4637 sf = extract32(insn, 31, 1);
4638 n = extract32(insn, 22, 1);
4639 rm = extract32(insn, 16, 5);
4640 imm = extract32(insn, 10, 6);
4641 rn = extract32(insn, 5, 5);
4642 rd = extract32(insn, 0, 5);
4643 op21 = extract32(insn, 29, 2);
4644 op0 = extract32(insn, 21, 1);
4645 bitsize = sf ? 64 : 32;
4647 if (sf != n || op21 || op0 || imm >= bitsize) {
4648 unallocated_encoding(s);
4649 } else {
4650 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
4652 tcg_rd = cpu_reg(s, rd);
4654 if (unlikely(imm == 0)) {
4655 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
4656 * so an extract from bit 0 is a special case.
4658 if (sf) {
4659 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
4660 } else {
4661 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
4663 } else {
4664 tcg_rm = cpu_reg(s, rm);
4665 tcg_rn = cpu_reg(s, rn);
4667 if (sf) {
4668 /* Specialization to ROR happens in EXTRACT2. */
4669 tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm);
4670 } else {
4671 TCGv_i32 t0 = tcg_temp_new_i32();
4673 tcg_gen_extrl_i64_i32(t0, tcg_rm);
4674 if (rm == rn) {
4675 tcg_gen_rotri_i32(t0, t0, imm);
4676 } else {
4677 TCGv_i32 t1 = tcg_temp_new_i32();
4678 tcg_gen_extrl_i64_i32(t1, tcg_rn);
4679 tcg_gen_extract2_i32(t0, t0, t1, imm);
4680 tcg_temp_free_i32(t1);
4682 tcg_gen_extu_i32_i64(tcg_rd, t0);
4683 tcg_temp_free_i32(t0);
4689 /* Data processing - immediate */
4690 static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
4692 switch (extract32(insn, 23, 6)) {
4693 case 0x20: case 0x21: /* PC-rel. addressing */
4694 disas_pc_rel_adr(s, insn);
4695 break;
4696 case 0x22: /* Add/subtract (immediate) */
4697 disas_add_sub_imm(s, insn);
4698 break;
4699 case 0x23: /* Add/subtract (immediate, with tags) */
4700 disas_add_sub_imm_with_tags(s, insn);
4701 break;
4702 case 0x24: /* Logical (immediate) */
4703 disas_logic_imm(s, insn);
4704 break;
4705 case 0x25: /* Move wide (immediate) */
4706 disas_movw_imm(s, insn);
4707 break;
4708 case 0x26: /* Bitfield */
4709 disas_bitfield(s, insn);
4710 break;
4711 case 0x27: /* Extract */
4712 disas_extract(s, insn);
4713 break;
4714 default:
4715 unallocated_encoding(s);
4716 break;
4720 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
4721 * Note that it is the caller's responsibility to ensure that the
4722 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
4723 * mandated semantics for out of range shifts.
4725 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
4726 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
4728 switch (shift_type) {
4729 case A64_SHIFT_TYPE_LSL:
4730 tcg_gen_shl_i64(dst, src, shift_amount);
4731 break;
4732 case A64_SHIFT_TYPE_LSR:
4733 tcg_gen_shr_i64(dst, src, shift_amount);
4734 break;
4735 case A64_SHIFT_TYPE_ASR:
4736 if (!sf) {
4737 tcg_gen_ext32s_i64(dst, src);
4739 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
4740 break;
4741 case A64_SHIFT_TYPE_ROR:
4742 if (sf) {
4743 tcg_gen_rotr_i64(dst, src, shift_amount);
4744 } else {
4745 TCGv_i32 t0, t1;
4746 t0 = tcg_temp_new_i32();
4747 t1 = tcg_temp_new_i32();
4748 tcg_gen_extrl_i64_i32(t0, src);
4749 tcg_gen_extrl_i64_i32(t1, shift_amount);
4750 tcg_gen_rotr_i32(t0, t0, t1);
4751 tcg_gen_extu_i32_i64(dst, t0);
4752 tcg_temp_free_i32(t0);
4753 tcg_temp_free_i32(t1);
4755 break;
4756 default:
4757 assert(FALSE); /* all shift types should be handled */
4758 break;
4761 if (!sf) { /* zero extend final result */
4762 tcg_gen_ext32u_i64(dst, dst);
4766 /* Shift a TCGv src by immediate, put result in dst.
4767 * The shift amount must be in range (this should always be true as the
4768 * relevant instructions will UNDEF on bad shift immediates).
4770 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
4771 enum a64_shift_type shift_type, unsigned int shift_i)
4773 assert(shift_i < (sf ? 64 : 32));
4775 if (shift_i == 0) {
4776 tcg_gen_mov_i64(dst, src);
4777 } else {
4778 shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i));
4782 /* Logical (shifted register)
4783 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4784 * +----+-----+-----------+-------+---+------+--------+------+------+
4785 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
4786 * +----+-----+-----------+-------+---+------+--------+------+------+
4788 static void disas_logic_reg(DisasContext *s, uint32_t insn)
4790 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
4791 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
4793 sf = extract32(insn, 31, 1);
4794 opc = extract32(insn, 29, 2);
4795 shift_type = extract32(insn, 22, 2);
4796 invert = extract32(insn, 21, 1);
4797 rm = extract32(insn, 16, 5);
4798 shift_amount = extract32(insn, 10, 6);
4799 rn = extract32(insn, 5, 5);
4800 rd = extract32(insn, 0, 5);
4802 if (!sf && (shift_amount & (1 << 5))) {
4803 unallocated_encoding(s);
4804 return;
4807 tcg_rd = cpu_reg(s, rd);
4809 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
4810 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
4811 * register-register MOV and MVN, so it is worth special casing.
4813 tcg_rm = cpu_reg(s, rm);
4814 if (invert) {
4815 tcg_gen_not_i64(tcg_rd, tcg_rm);
4816 if (!sf) {
4817 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4819 } else {
4820 if (sf) {
4821 tcg_gen_mov_i64(tcg_rd, tcg_rm);
4822 } else {
4823 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
4826 return;
4829 tcg_rm = read_cpu_reg(s, rm, sf);
4831 if (shift_amount) {
4832 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
4835 tcg_rn = cpu_reg(s, rn);
4837 switch (opc | (invert << 2)) {
4838 case 0: /* AND */
4839 case 3: /* ANDS */
4840 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
4841 break;
4842 case 1: /* ORR */
4843 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
4844 break;
4845 case 2: /* EOR */
4846 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
4847 break;
4848 case 4: /* BIC */
4849 case 7: /* BICS */
4850 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
4851 break;
4852 case 5: /* ORN */
4853 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
4854 break;
4855 case 6: /* EON */
4856 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
4857 break;
4858 default:
4859 assert(FALSE);
4860 break;
4863 if (!sf) {
4864 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4867 if (opc == 3) {
4868 gen_logic_CC(sf, tcg_rd);
4873 * Add/subtract (extended register)
4875 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
4876 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4877 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
4878 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4880 * sf: 0 -> 32bit, 1 -> 64bit
4881 * op: 0 -> add , 1 -> sub
4882 * S: 1 -> set flags
4883 * opt: 00
4884 * option: extension type (see DecodeRegExtend)
4885 * imm3: optional shift to Rm
4887 * Rd = Rn + LSL(extend(Rm), amount)
4889 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
4891 int rd = extract32(insn, 0, 5);
4892 int rn = extract32(insn, 5, 5);
4893 int imm3 = extract32(insn, 10, 3);
4894 int option = extract32(insn, 13, 3);
4895 int rm = extract32(insn, 16, 5);
4896 int opt = extract32(insn, 22, 2);
4897 bool setflags = extract32(insn, 29, 1);
4898 bool sub_op = extract32(insn, 30, 1);
4899 bool sf = extract32(insn, 31, 1);
4901 TCGv_i64 tcg_rm, tcg_rn; /* temps */
4902 TCGv_i64 tcg_rd;
4903 TCGv_i64 tcg_result;
4905 if (imm3 > 4 || opt != 0) {
4906 unallocated_encoding(s);
4907 return;
4910 /* non-flag setting ops may use SP */
4911 if (!setflags) {
4912 tcg_rd = cpu_reg_sp(s, rd);
4913 } else {
4914 tcg_rd = cpu_reg(s, rd);
4916 tcg_rn = read_cpu_reg_sp(s, rn, sf);
4918 tcg_rm = read_cpu_reg(s, rm, sf);
4919 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
4921 tcg_result = tcg_temp_new_i64();
4923 if (!setflags) {
4924 if (sub_op) {
4925 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4926 } else {
4927 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4929 } else {
4930 if (sub_op) {
4931 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4932 } else {
4933 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4937 if (sf) {
4938 tcg_gen_mov_i64(tcg_rd, tcg_result);
4939 } else {
4940 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4943 tcg_temp_free_i64(tcg_result);
4947 * Add/subtract (shifted register)
4949 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4950 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4951 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
4952 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4954 * sf: 0 -> 32bit, 1 -> 64bit
4955 * op: 0 -> add , 1 -> sub
4956 * S: 1 -> set flags
4957 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
4958 * imm6: Shift amount to apply to Rm before the add/sub
4960 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
4962 int rd = extract32(insn, 0, 5);
4963 int rn = extract32(insn, 5, 5);
4964 int imm6 = extract32(insn, 10, 6);
4965 int rm = extract32(insn, 16, 5);
4966 int shift_type = extract32(insn, 22, 2);
4967 bool setflags = extract32(insn, 29, 1);
4968 bool sub_op = extract32(insn, 30, 1);
4969 bool sf = extract32(insn, 31, 1);
4971 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4972 TCGv_i64 tcg_rn, tcg_rm;
4973 TCGv_i64 tcg_result;
4975 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
4976 unallocated_encoding(s);
4977 return;
4980 tcg_rn = read_cpu_reg(s, rn, sf);
4981 tcg_rm = read_cpu_reg(s, rm, sf);
4983 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
4985 tcg_result = tcg_temp_new_i64();
4987 if (!setflags) {
4988 if (sub_op) {
4989 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4990 } else {
4991 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4993 } else {
4994 if (sub_op) {
4995 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4996 } else {
4997 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
5001 if (sf) {
5002 tcg_gen_mov_i64(tcg_rd, tcg_result);
5003 } else {
5004 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
5007 tcg_temp_free_i64(tcg_result);
5010 /* Data-processing (3 source)
5012 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
5013 * +--+------+-----------+------+------+----+------+------+------+
5014 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
5015 * +--+------+-----------+------+------+----+------+------+------+
5017 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
5019 int rd = extract32(insn, 0, 5);
5020 int rn = extract32(insn, 5, 5);
5021 int ra = extract32(insn, 10, 5);
5022 int rm = extract32(insn, 16, 5);
5023 int op_id = (extract32(insn, 29, 3) << 4) |
5024 (extract32(insn, 21, 3) << 1) |
5025 extract32(insn, 15, 1);
5026 bool sf = extract32(insn, 31, 1);
5027 bool is_sub = extract32(op_id, 0, 1);
5028 bool is_high = extract32(op_id, 2, 1);
5029 bool is_signed = false;
5030 TCGv_i64 tcg_op1;
5031 TCGv_i64 tcg_op2;
5032 TCGv_i64 tcg_tmp;
5034 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
5035 switch (op_id) {
5036 case 0x42: /* SMADDL */
5037 case 0x43: /* SMSUBL */
5038 case 0x44: /* SMULH */
5039 is_signed = true;
5040 break;
5041 case 0x0: /* MADD (32bit) */
5042 case 0x1: /* MSUB (32bit) */
5043 case 0x40: /* MADD (64bit) */
5044 case 0x41: /* MSUB (64bit) */
5045 case 0x4a: /* UMADDL */
5046 case 0x4b: /* UMSUBL */
5047 case 0x4c: /* UMULH */
5048 break;
5049 default:
5050 unallocated_encoding(s);
5051 return;
5054 if (is_high) {
5055 TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
5056 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5057 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5058 TCGv_i64 tcg_rm = cpu_reg(s, rm);
5060 if (is_signed) {
5061 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
5062 } else {
5063 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
5066 tcg_temp_free_i64(low_bits);
5067 return;
5070 tcg_op1 = tcg_temp_new_i64();
5071 tcg_op2 = tcg_temp_new_i64();
5072 tcg_tmp = tcg_temp_new_i64();
5074 if (op_id < 0x42) {
5075 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
5076 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
5077 } else {
5078 if (is_signed) {
5079 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
5080 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
5081 } else {
5082 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
5083 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
5087 if (ra == 31 && !is_sub) {
5088 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
5089 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
5090 } else {
5091 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
5092 if (is_sub) {
5093 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
5094 } else {
5095 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
5099 if (!sf) {
5100 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
5103 tcg_temp_free_i64(tcg_op1);
5104 tcg_temp_free_i64(tcg_op2);
5105 tcg_temp_free_i64(tcg_tmp);
5108 /* Add/subtract (with carry)
5109 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
5110 * +--+--+--+------------------------+------+-------------+------+-----+
5111 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd |
5112 * +--+--+--+------------------------+------+-------------+------+-----+
5115 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
5117 unsigned int sf, op, setflags, rm, rn, rd;
5118 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
5120 sf = extract32(insn, 31, 1);
5121 op = extract32(insn, 30, 1);
5122 setflags = extract32(insn, 29, 1);
5123 rm = extract32(insn, 16, 5);
5124 rn = extract32(insn, 5, 5);
5125 rd = extract32(insn, 0, 5);
5127 tcg_rd = cpu_reg(s, rd);
5128 tcg_rn = cpu_reg(s, rn);
5130 if (op) {
5131 tcg_y = new_tmp_a64(s);
5132 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
5133 } else {
5134 tcg_y = cpu_reg(s, rm);
5137 if (setflags) {
5138 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
5139 } else {
5140 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
5145 * Rotate right into flags
5146 * 31 30 29 21 15 10 5 4 0
5147 * +--+--+--+-----------------+--------+-----------+------+--+------+
5148 * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask |
5149 * +--+--+--+-----------------+--------+-----------+------+--+------+
5151 static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
5153 int mask = extract32(insn, 0, 4);
5154 int o2 = extract32(insn, 4, 1);
5155 int rn = extract32(insn, 5, 5);
5156 int imm6 = extract32(insn, 15, 6);
5157 int sf_op_s = extract32(insn, 29, 3);
5158 TCGv_i64 tcg_rn;
5159 TCGv_i32 nzcv;
5161 if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
5162 unallocated_encoding(s);
5163 return;
5166 tcg_rn = read_cpu_reg(s, rn, 1);
5167 tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
5169 nzcv = tcg_temp_new_i32();
5170 tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
5172 if (mask & 8) { /* N */
5173 tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
5175 if (mask & 4) { /* Z */
5176 tcg_gen_not_i32(cpu_ZF, nzcv);
5177 tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
5179 if (mask & 2) { /* C */
5180 tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
5182 if (mask & 1) { /* V */
5183 tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
5186 tcg_temp_free_i32(nzcv);
5190 * Evaluate into flags
5191 * 31 30 29 21 15 14 10 5 4 0
5192 * +--+--+--+-----------------+---------+----+---------+------+--+------+
5193 * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask |
5194 * +--+--+--+-----------------+---------+----+---------+------+--+------+
5196 static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
5198 int o3_mask = extract32(insn, 0, 5);
5199 int rn = extract32(insn, 5, 5);
5200 int o2 = extract32(insn, 15, 6);
5201 int sz = extract32(insn, 14, 1);
5202 int sf_op_s = extract32(insn, 29, 3);
5203 TCGv_i32 tmp;
5204 int shift;
5206 if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
5207 !dc_isar_feature(aa64_condm_4, s)) {
5208 unallocated_encoding(s);
5209 return;
5211 shift = sz ? 16 : 24; /* SETF16 or SETF8 */
5213 tmp = tcg_temp_new_i32();
5214 tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
5215 tcg_gen_shli_i32(cpu_NF, tmp, shift);
5216 tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
5217 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
5218 tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
5219 tcg_temp_free_i32(tmp);
5222 /* Conditional compare (immediate / register)
5223 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
5224 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
5225 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
5226 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
5227 * [1] y [0] [0]
5229 static void disas_cc(DisasContext *s, uint32_t insn)
5231 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
5232 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
5233 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
5234 DisasCompare c;
5236 if (!extract32(insn, 29, 1)) {
5237 unallocated_encoding(s);
5238 return;
5240 if (insn & (1 << 10 | 1 << 4)) {
5241 unallocated_encoding(s);
5242 return;
5244 sf = extract32(insn, 31, 1);
5245 op = extract32(insn, 30, 1);
5246 is_imm = extract32(insn, 11, 1);
5247 y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
5248 cond = extract32(insn, 12, 4);
5249 rn = extract32(insn, 5, 5);
5250 nzcv = extract32(insn, 0, 4);
5252 /* Set T0 = !COND. */
5253 tcg_t0 = tcg_temp_new_i32();
5254 arm_test_cc(&c, cond);
5255 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
5256 arm_free_cc(&c);
5258 /* Load the arguments for the new comparison. */
5259 if (is_imm) {
5260 tcg_y = new_tmp_a64(s);
5261 tcg_gen_movi_i64(tcg_y, y);
5262 } else {
5263 tcg_y = cpu_reg(s, y);
5265 tcg_rn = cpu_reg(s, rn);
5267 /* Set the flags for the new comparison. */
5268 tcg_tmp = tcg_temp_new_i64();
5269 if (op) {
5270 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5271 } else {
5272 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5274 tcg_temp_free_i64(tcg_tmp);
5276 /* If COND was false, force the flags to #nzcv. Compute two masks
5277 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
5278 * For tcg hosts that support ANDC, we can make do with just T1.
5279 * In either case, allow the tcg optimizer to delete any unused mask.
5281 tcg_t1 = tcg_temp_new_i32();
5282 tcg_t2 = tcg_temp_new_i32();
5283 tcg_gen_neg_i32(tcg_t1, tcg_t0);
5284 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
5286 if (nzcv & 8) { /* N */
5287 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
5288 } else {
5289 if (TCG_TARGET_HAS_andc_i32) {
5290 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
5291 } else {
5292 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
5295 if (nzcv & 4) { /* Z */
5296 if (TCG_TARGET_HAS_andc_i32) {
5297 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
5298 } else {
5299 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
5301 } else {
5302 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
5304 if (nzcv & 2) { /* C */
5305 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
5306 } else {
5307 if (TCG_TARGET_HAS_andc_i32) {
5308 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
5309 } else {
5310 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
5313 if (nzcv & 1) { /* V */
5314 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
5315 } else {
5316 if (TCG_TARGET_HAS_andc_i32) {
5317 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
5318 } else {
5319 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
5322 tcg_temp_free_i32(tcg_t0);
5323 tcg_temp_free_i32(tcg_t1);
5324 tcg_temp_free_i32(tcg_t2);
5327 /* Conditional select
5328 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
5329 * +----+----+---+-----------------+------+------+-----+------+------+
5330 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
5331 * +----+----+---+-----------------+------+------+-----+------+------+
5333 static void disas_cond_select(DisasContext *s, uint32_t insn)
5335 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
5336 TCGv_i64 tcg_rd, zero;
5337 DisasCompare64 c;
5339 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
5340 /* S == 1 or op2<1> == 1 */
5341 unallocated_encoding(s);
5342 return;
5344 sf = extract32(insn, 31, 1);
5345 else_inv = extract32(insn, 30, 1);
5346 rm = extract32(insn, 16, 5);
5347 cond = extract32(insn, 12, 4);
5348 else_inc = extract32(insn, 10, 1);
5349 rn = extract32(insn, 5, 5);
5350 rd = extract32(insn, 0, 5);
5352 tcg_rd = cpu_reg(s, rd);
5354 a64_test_cc(&c, cond);
5355 zero = tcg_constant_i64(0);
5357 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
5358 /* CSET & CSETM. */
5359 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
5360 if (else_inv) {
5361 tcg_gen_neg_i64(tcg_rd, tcg_rd);
5363 } else {
5364 TCGv_i64 t_true = cpu_reg(s, rn);
5365 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
5366 if (else_inv && else_inc) {
5367 tcg_gen_neg_i64(t_false, t_false);
5368 } else if (else_inv) {
5369 tcg_gen_not_i64(t_false, t_false);
5370 } else if (else_inc) {
5371 tcg_gen_addi_i64(t_false, t_false, 1);
5373 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
5376 a64_free_cc(&c);
5378 if (!sf) {
5379 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5383 static void handle_clz(DisasContext *s, unsigned int sf,
5384 unsigned int rn, unsigned int rd)
5386 TCGv_i64 tcg_rd, tcg_rn;
5387 tcg_rd = cpu_reg(s, rd);
5388 tcg_rn = cpu_reg(s, rn);
5390 if (sf) {
5391 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
5392 } else {
5393 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5394 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5395 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
5396 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5397 tcg_temp_free_i32(tcg_tmp32);
5401 static void handle_cls(DisasContext *s, unsigned int sf,
5402 unsigned int rn, unsigned int rd)
5404 TCGv_i64 tcg_rd, tcg_rn;
5405 tcg_rd = cpu_reg(s, rd);
5406 tcg_rn = cpu_reg(s, rn);
5408 if (sf) {
5409 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
5410 } else {
5411 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5412 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5413 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
5414 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5415 tcg_temp_free_i32(tcg_tmp32);
5419 static void handle_rbit(DisasContext *s, unsigned int sf,
5420 unsigned int rn, unsigned int rd)
5422 TCGv_i64 tcg_rd, tcg_rn;
5423 tcg_rd = cpu_reg(s, rd);
5424 tcg_rn = cpu_reg(s, rn);
5426 if (sf) {
5427 gen_helper_rbit64(tcg_rd, tcg_rn);
5428 } else {
5429 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5430 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5431 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
5432 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5433 tcg_temp_free_i32(tcg_tmp32);
5437 /* REV with sf==1, opcode==3 ("REV64") */
5438 static void handle_rev64(DisasContext *s, unsigned int sf,
5439 unsigned int rn, unsigned int rd)
5441 if (!sf) {
5442 unallocated_encoding(s);
5443 return;
5445 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
5448 /* REV with sf==0, opcode==2
5449 * REV32 (sf==1, opcode==2)
5451 static void handle_rev32(DisasContext *s, unsigned int sf,
5452 unsigned int rn, unsigned int rd)
5454 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5455 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5457 if (sf) {
5458 tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
5459 tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
5460 } else {
5461 tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
5465 /* REV16 (opcode==1) */
5466 static void handle_rev16(DisasContext *s, unsigned int sf,
5467 unsigned int rn, unsigned int rd)
5469 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5470 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5471 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5472 TCGv_i64 mask = tcg_constant_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
5474 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
5475 tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
5476 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
5477 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
5478 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
5480 tcg_temp_free_i64(tcg_tmp);
5483 /* Data-processing (1 source)
5484 * 31 30 29 28 21 20 16 15 10 9 5 4 0
5485 * +----+---+---+-----------------+---------+--------+------+------+
5486 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
5487 * +----+---+---+-----------------+---------+--------+------+------+
5489 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
5491 unsigned int sf, opcode, opcode2, rn, rd;
5492 TCGv_i64 tcg_rd;
5494 if (extract32(insn, 29, 1)) {
5495 unallocated_encoding(s);
5496 return;
5499 sf = extract32(insn, 31, 1);
5500 opcode = extract32(insn, 10, 6);
5501 opcode2 = extract32(insn, 16, 5);
5502 rn = extract32(insn, 5, 5);
5503 rd = extract32(insn, 0, 5);
5505 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5507 switch (MAP(sf, opcode2, opcode)) {
5508 case MAP(0, 0x00, 0x00): /* RBIT */
5509 case MAP(1, 0x00, 0x00):
5510 handle_rbit(s, sf, rn, rd);
5511 break;
5512 case MAP(0, 0x00, 0x01): /* REV16 */
5513 case MAP(1, 0x00, 0x01):
5514 handle_rev16(s, sf, rn, rd);
5515 break;
5516 case MAP(0, 0x00, 0x02): /* REV/REV32 */
5517 case MAP(1, 0x00, 0x02):
5518 handle_rev32(s, sf, rn, rd);
5519 break;
5520 case MAP(1, 0x00, 0x03): /* REV64 */
5521 handle_rev64(s, sf, rn, rd);
5522 break;
5523 case MAP(0, 0x00, 0x04): /* CLZ */
5524 case MAP(1, 0x00, 0x04):
5525 handle_clz(s, sf, rn, rd);
5526 break;
5527 case MAP(0, 0x00, 0x05): /* CLS */
5528 case MAP(1, 0x00, 0x05):
5529 handle_cls(s, sf, rn, rd);
5530 break;
5531 case MAP(1, 0x01, 0x00): /* PACIA */
5532 if (s->pauth_active) {
5533 tcg_rd = cpu_reg(s, rd);
5534 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5535 } else if (!dc_isar_feature(aa64_pauth, s)) {
5536 goto do_unallocated;
5538 break;
5539 case MAP(1, 0x01, 0x01): /* PACIB */
5540 if (s->pauth_active) {
5541 tcg_rd = cpu_reg(s, rd);
5542 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5543 } else if (!dc_isar_feature(aa64_pauth, s)) {
5544 goto do_unallocated;
5546 break;
5547 case MAP(1, 0x01, 0x02): /* PACDA */
5548 if (s->pauth_active) {
5549 tcg_rd = cpu_reg(s, rd);
5550 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5551 } else if (!dc_isar_feature(aa64_pauth, s)) {
5552 goto do_unallocated;
5554 break;
5555 case MAP(1, 0x01, 0x03): /* PACDB */
5556 if (s->pauth_active) {
5557 tcg_rd = cpu_reg(s, rd);
5558 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5559 } else if (!dc_isar_feature(aa64_pauth, s)) {
5560 goto do_unallocated;
5562 break;
5563 case MAP(1, 0x01, 0x04): /* AUTIA */
5564 if (s->pauth_active) {
5565 tcg_rd = cpu_reg(s, rd);
5566 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5567 } else if (!dc_isar_feature(aa64_pauth, s)) {
5568 goto do_unallocated;
5570 break;
5571 case MAP(1, 0x01, 0x05): /* AUTIB */
5572 if (s->pauth_active) {
5573 tcg_rd = cpu_reg(s, rd);
5574 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5575 } else if (!dc_isar_feature(aa64_pauth, s)) {
5576 goto do_unallocated;
5578 break;
5579 case MAP(1, 0x01, 0x06): /* AUTDA */
5580 if (s->pauth_active) {
5581 tcg_rd = cpu_reg(s, rd);
5582 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5583 } else if (!dc_isar_feature(aa64_pauth, s)) {
5584 goto do_unallocated;
5586 break;
5587 case MAP(1, 0x01, 0x07): /* AUTDB */
5588 if (s->pauth_active) {
5589 tcg_rd = cpu_reg(s, rd);
5590 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5591 } else if (!dc_isar_feature(aa64_pauth, s)) {
5592 goto do_unallocated;
5594 break;
5595 case MAP(1, 0x01, 0x08): /* PACIZA */
5596 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5597 goto do_unallocated;
5598 } else if (s->pauth_active) {
5599 tcg_rd = cpu_reg(s, rd);
5600 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5602 break;
5603 case MAP(1, 0x01, 0x09): /* PACIZB */
5604 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5605 goto do_unallocated;
5606 } else if (s->pauth_active) {
5607 tcg_rd = cpu_reg(s, rd);
5608 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5610 break;
5611 case MAP(1, 0x01, 0x0a): /* PACDZA */
5612 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5613 goto do_unallocated;
5614 } else if (s->pauth_active) {
5615 tcg_rd = cpu_reg(s, rd);
5616 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5618 break;
5619 case MAP(1, 0x01, 0x0b): /* PACDZB */
5620 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5621 goto do_unallocated;
5622 } else if (s->pauth_active) {
5623 tcg_rd = cpu_reg(s, rd);
5624 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5626 break;
5627 case MAP(1, 0x01, 0x0c): /* AUTIZA */
5628 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5629 goto do_unallocated;
5630 } else if (s->pauth_active) {
5631 tcg_rd = cpu_reg(s, rd);
5632 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5634 break;
5635 case MAP(1, 0x01, 0x0d): /* AUTIZB */
5636 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5637 goto do_unallocated;
5638 } else if (s->pauth_active) {
5639 tcg_rd = cpu_reg(s, rd);
5640 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5642 break;
5643 case MAP(1, 0x01, 0x0e): /* AUTDZA */
5644 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5645 goto do_unallocated;
5646 } else if (s->pauth_active) {
5647 tcg_rd = cpu_reg(s, rd);
5648 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5650 break;
5651 case MAP(1, 0x01, 0x0f): /* AUTDZB */
5652 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5653 goto do_unallocated;
5654 } else if (s->pauth_active) {
5655 tcg_rd = cpu_reg(s, rd);
5656 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5658 break;
5659 case MAP(1, 0x01, 0x10): /* XPACI */
5660 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5661 goto do_unallocated;
5662 } else if (s->pauth_active) {
5663 tcg_rd = cpu_reg(s, rd);
5664 gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
5666 break;
5667 case MAP(1, 0x01, 0x11): /* XPACD */
5668 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5669 goto do_unallocated;
5670 } else if (s->pauth_active) {
5671 tcg_rd = cpu_reg(s, rd);
5672 gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
5674 break;
5675 default:
5676 do_unallocated:
5677 unallocated_encoding(s);
5678 break;
5681 #undef MAP
5684 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
5685 unsigned int rm, unsigned int rn, unsigned int rd)
5687 TCGv_i64 tcg_n, tcg_m, tcg_rd;
5688 tcg_rd = cpu_reg(s, rd);
5690 if (!sf && is_signed) {
5691 tcg_n = new_tmp_a64(s);
5692 tcg_m = new_tmp_a64(s);
5693 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
5694 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
5695 } else {
5696 tcg_n = read_cpu_reg(s, rn, sf);
5697 tcg_m = read_cpu_reg(s, rm, sf);
5700 if (is_signed) {
5701 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
5702 } else {
5703 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
5706 if (!sf) { /* zero extend final result */
5707 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5711 /* LSLV, LSRV, ASRV, RORV */
5712 static void handle_shift_reg(DisasContext *s,
5713 enum a64_shift_type shift_type, unsigned int sf,
5714 unsigned int rm, unsigned int rn, unsigned int rd)
5716 TCGv_i64 tcg_shift = tcg_temp_new_i64();
5717 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5718 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5720 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
5721 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
5722 tcg_temp_free_i64(tcg_shift);
5725 /* CRC32[BHWX], CRC32C[BHWX] */
5726 static void handle_crc32(DisasContext *s,
5727 unsigned int sf, unsigned int sz, bool crc32c,
5728 unsigned int rm, unsigned int rn, unsigned int rd)
5730 TCGv_i64 tcg_acc, tcg_val;
5731 TCGv_i32 tcg_bytes;
5733 if (!dc_isar_feature(aa64_crc32, s)
5734 || (sf == 1 && sz != 3)
5735 || (sf == 0 && sz == 3)) {
5736 unallocated_encoding(s);
5737 return;
5740 if (sz == 3) {
5741 tcg_val = cpu_reg(s, rm);
5742 } else {
5743 uint64_t mask;
5744 switch (sz) {
5745 case 0:
5746 mask = 0xFF;
5747 break;
5748 case 1:
5749 mask = 0xFFFF;
5750 break;
5751 case 2:
5752 mask = 0xFFFFFFFF;
5753 break;
5754 default:
5755 g_assert_not_reached();
5757 tcg_val = new_tmp_a64(s);
5758 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
5761 tcg_acc = cpu_reg(s, rn);
5762 tcg_bytes = tcg_constant_i32(1 << sz);
5764 if (crc32c) {
5765 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5766 } else {
5767 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5771 /* Data-processing (2 source)
5772 * 31 30 29 28 21 20 16 15 10 9 5 4 0
5773 * +----+---+---+-----------------+------+--------+------+------+
5774 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
5775 * +----+---+---+-----------------+------+--------+------+------+
5777 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
5779 unsigned int sf, rm, opcode, rn, rd, setflag;
5780 sf = extract32(insn, 31, 1);
5781 setflag = extract32(insn, 29, 1);
5782 rm = extract32(insn, 16, 5);
5783 opcode = extract32(insn, 10, 6);
5784 rn = extract32(insn, 5, 5);
5785 rd = extract32(insn, 0, 5);
5787 if (setflag && opcode != 0) {
5788 unallocated_encoding(s);
5789 return;
5792 switch (opcode) {
5793 case 0: /* SUBP(S) */
5794 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5795 goto do_unallocated;
5796 } else {
5797 TCGv_i64 tcg_n, tcg_m, tcg_d;
5799 tcg_n = read_cpu_reg_sp(s, rn, true);
5800 tcg_m = read_cpu_reg_sp(s, rm, true);
5801 tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56);
5802 tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56);
5803 tcg_d = cpu_reg(s, rd);
5805 if (setflag) {
5806 gen_sub_CC(true, tcg_d, tcg_n, tcg_m);
5807 } else {
5808 tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m);
5811 break;
5812 case 2: /* UDIV */
5813 handle_div(s, false, sf, rm, rn, rd);
5814 break;
5815 case 3: /* SDIV */
5816 handle_div(s, true, sf, rm, rn, rd);
5817 break;
5818 case 4: /* IRG */
5819 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5820 goto do_unallocated;
5822 if (s->ata) {
5823 gen_helper_irg(cpu_reg_sp(s, rd), cpu_env,
5824 cpu_reg_sp(s, rn), cpu_reg(s, rm));
5825 } else {
5826 gen_address_with_allocation_tag0(cpu_reg_sp(s, rd),
5827 cpu_reg_sp(s, rn));
5829 break;
5830 case 5: /* GMI */
5831 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5832 goto do_unallocated;
5833 } else {
5834 TCGv_i64 t = tcg_temp_new_i64();
5836 tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4);
5837 tcg_gen_shl_i64(t, tcg_constant_i64(1), t);
5838 tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t);
5840 tcg_temp_free_i64(t);
5842 break;
5843 case 8: /* LSLV */
5844 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
5845 break;
5846 case 9: /* LSRV */
5847 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
5848 break;
5849 case 10: /* ASRV */
5850 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
5851 break;
5852 case 11: /* RORV */
5853 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
5854 break;
5855 case 12: /* PACGA */
5856 if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
5857 goto do_unallocated;
5859 gen_helper_pacga(cpu_reg(s, rd), cpu_env,
5860 cpu_reg(s, rn), cpu_reg_sp(s, rm));
5861 break;
5862 case 16:
5863 case 17:
5864 case 18:
5865 case 19:
5866 case 20:
5867 case 21:
5868 case 22:
5869 case 23: /* CRC32 */
5871 int sz = extract32(opcode, 0, 2);
5872 bool crc32c = extract32(opcode, 2, 1);
5873 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
5874 break;
5876 default:
5877 do_unallocated:
5878 unallocated_encoding(s);
5879 break;
5884 * Data processing - register
5885 * 31 30 29 28 25 21 20 16 10 0
5886 * +--+---+--+---+-------+-----+-------+-------+---------+
5887 * | |op0| |op1| 1 0 1 | op2 | | op3 | |
5888 * +--+---+--+---+-------+-----+-------+-------+---------+
5890 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
5892 int op0 = extract32(insn, 30, 1);
5893 int op1 = extract32(insn, 28, 1);
5894 int op2 = extract32(insn, 21, 4);
5895 int op3 = extract32(insn, 10, 6);
5897 if (!op1) {
5898 if (op2 & 8) {
5899 if (op2 & 1) {
5900 /* Add/sub (extended register) */
5901 disas_add_sub_ext_reg(s, insn);
5902 } else {
5903 /* Add/sub (shifted register) */
5904 disas_add_sub_reg(s, insn);
5906 } else {
5907 /* Logical (shifted register) */
5908 disas_logic_reg(s, insn);
5910 return;
5913 switch (op2) {
5914 case 0x0:
5915 switch (op3) {
5916 case 0x00: /* Add/subtract (with carry) */
5917 disas_adc_sbc(s, insn);
5918 break;
5920 case 0x01: /* Rotate right into flags */
5921 case 0x21:
5922 disas_rotate_right_into_flags(s, insn);
5923 break;
5925 case 0x02: /* Evaluate into flags */
5926 case 0x12:
5927 case 0x22:
5928 case 0x32:
5929 disas_evaluate_into_flags(s, insn);
5930 break;
5932 default:
5933 goto do_unallocated;
5935 break;
5937 case 0x2: /* Conditional compare */
5938 disas_cc(s, insn); /* both imm and reg forms */
5939 break;
5941 case 0x4: /* Conditional select */
5942 disas_cond_select(s, insn);
5943 break;
5945 case 0x6: /* Data-processing */
5946 if (op0) { /* (1 source) */
5947 disas_data_proc_1src(s, insn);
5948 } else { /* (2 source) */
5949 disas_data_proc_2src(s, insn);
5951 break;
5952 case 0x8 ... 0xf: /* (3 source) */
5953 disas_data_proc_3src(s, insn);
5954 break;
5956 default:
5957 do_unallocated:
5958 unallocated_encoding(s);
5959 break;
5963 static void handle_fp_compare(DisasContext *s, int size,
5964 unsigned int rn, unsigned int rm,
5965 bool cmp_with_zero, bool signal_all_nans)
5967 TCGv_i64 tcg_flags = tcg_temp_new_i64();
5968 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
5970 if (size == MO_64) {
5971 TCGv_i64 tcg_vn, tcg_vm;
5973 tcg_vn = read_fp_dreg(s, rn);
5974 if (cmp_with_zero) {
5975 tcg_vm = tcg_constant_i64(0);
5976 } else {
5977 tcg_vm = read_fp_dreg(s, rm);
5979 if (signal_all_nans) {
5980 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5981 } else {
5982 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5984 tcg_temp_free_i64(tcg_vn);
5985 tcg_temp_free_i64(tcg_vm);
5986 } else {
5987 TCGv_i32 tcg_vn = tcg_temp_new_i32();
5988 TCGv_i32 tcg_vm = tcg_temp_new_i32();
5990 read_vec_element_i32(s, tcg_vn, rn, 0, size);
5991 if (cmp_with_zero) {
5992 tcg_gen_movi_i32(tcg_vm, 0);
5993 } else {
5994 read_vec_element_i32(s, tcg_vm, rm, 0, size);
5997 switch (size) {
5998 case MO_32:
5999 if (signal_all_nans) {
6000 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6001 } else {
6002 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6004 break;
6005 case MO_16:
6006 if (signal_all_nans) {
6007 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6008 } else {
6009 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6011 break;
6012 default:
6013 g_assert_not_reached();
6016 tcg_temp_free_i32(tcg_vn);
6017 tcg_temp_free_i32(tcg_vm);
6020 tcg_temp_free_ptr(fpst);
6022 gen_set_nzcv(tcg_flags);
6024 tcg_temp_free_i64(tcg_flags);
6027 /* Floating point compare
6028 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
6029 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
6030 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
6031 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
6033 static void disas_fp_compare(DisasContext *s, uint32_t insn)
6035 unsigned int mos, type, rm, op, rn, opc, op2r;
6036 int size;
6038 mos = extract32(insn, 29, 3);
6039 type = extract32(insn, 22, 2);
6040 rm = extract32(insn, 16, 5);
6041 op = extract32(insn, 14, 2);
6042 rn = extract32(insn, 5, 5);
6043 opc = extract32(insn, 3, 2);
6044 op2r = extract32(insn, 0, 3);
6046 if (mos || op || op2r) {
6047 unallocated_encoding(s);
6048 return;
6051 switch (type) {
6052 case 0:
6053 size = MO_32;
6054 break;
6055 case 1:
6056 size = MO_64;
6057 break;
6058 case 3:
6059 size = MO_16;
6060 if (dc_isar_feature(aa64_fp16, s)) {
6061 break;
6063 /* fallthru */
6064 default:
6065 unallocated_encoding(s);
6066 return;
6069 if (!fp_access_check(s)) {
6070 return;
6073 handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
6076 /* Floating point conditional compare
6077 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
6078 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
6079 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
6080 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
6082 static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
6084 unsigned int mos, type, rm, cond, rn, op, nzcv;
6085 TCGLabel *label_continue = NULL;
6086 int size;
6088 mos = extract32(insn, 29, 3);
6089 type = extract32(insn, 22, 2);
6090 rm = extract32(insn, 16, 5);
6091 cond = extract32(insn, 12, 4);
6092 rn = extract32(insn, 5, 5);
6093 op = extract32(insn, 4, 1);
6094 nzcv = extract32(insn, 0, 4);
6096 if (mos) {
6097 unallocated_encoding(s);
6098 return;
6101 switch (type) {
6102 case 0:
6103 size = MO_32;
6104 break;
6105 case 1:
6106 size = MO_64;
6107 break;
6108 case 3:
6109 size = MO_16;
6110 if (dc_isar_feature(aa64_fp16, s)) {
6111 break;
6113 /* fallthru */
6114 default:
6115 unallocated_encoding(s);
6116 return;
6119 if (!fp_access_check(s)) {
6120 return;
6123 if (cond < 0x0e) { /* not always */
6124 TCGLabel *label_match = gen_new_label();
6125 label_continue = gen_new_label();
6126 arm_gen_test_cc(cond, label_match);
6127 /* nomatch: */
6128 gen_set_nzcv(tcg_constant_i64(nzcv << 28));
6129 tcg_gen_br(label_continue);
6130 gen_set_label(label_match);
6133 handle_fp_compare(s, size, rn, rm, false, op);
6135 if (cond < 0x0e) {
6136 gen_set_label(label_continue);
6140 /* Floating point conditional select
6141 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
6142 * +---+---+---+-----------+------+---+------+------+-----+------+------+
6143 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
6144 * +---+---+---+-----------+------+---+------+------+-----+------+------+
6146 static void disas_fp_csel(DisasContext *s, uint32_t insn)
6148 unsigned int mos, type, rm, cond, rn, rd;
6149 TCGv_i64 t_true, t_false;
6150 DisasCompare64 c;
6151 MemOp sz;
6153 mos = extract32(insn, 29, 3);
6154 type = extract32(insn, 22, 2);
6155 rm = extract32(insn, 16, 5);
6156 cond = extract32(insn, 12, 4);
6157 rn = extract32(insn, 5, 5);
6158 rd = extract32(insn, 0, 5);
6160 if (mos) {
6161 unallocated_encoding(s);
6162 return;
6165 switch (type) {
6166 case 0:
6167 sz = MO_32;
6168 break;
6169 case 1:
6170 sz = MO_64;
6171 break;
6172 case 3:
6173 sz = MO_16;
6174 if (dc_isar_feature(aa64_fp16, s)) {
6175 break;
6177 /* fallthru */
6178 default:
6179 unallocated_encoding(s);
6180 return;
6183 if (!fp_access_check(s)) {
6184 return;
6187 /* Zero extend sreg & hreg inputs to 64 bits now. */
6188 t_true = tcg_temp_new_i64();
6189 t_false = tcg_temp_new_i64();
6190 read_vec_element(s, t_true, rn, 0, sz);
6191 read_vec_element(s, t_false, rm, 0, sz);
6193 a64_test_cc(&c, cond);
6194 tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0),
6195 t_true, t_false);
6196 tcg_temp_free_i64(t_false);
6197 a64_free_cc(&c);
6199 /* Note that sregs & hregs write back zeros to the high bits,
6200 and we've already done the zero-extension. */
6201 write_fp_dreg(s, rd, t_true);
6202 tcg_temp_free_i64(t_true);
6205 /* Floating-point data-processing (1 source) - half precision */
6206 static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
6208 TCGv_ptr fpst = NULL;
6209 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
6210 TCGv_i32 tcg_res = tcg_temp_new_i32();
6212 switch (opcode) {
6213 case 0x0: /* FMOV */
6214 tcg_gen_mov_i32(tcg_res, tcg_op);
6215 break;
6216 case 0x1: /* FABS */
6217 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
6218 break;
6219 case 0x2: /* FNEG */
6220 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
6221 break;
6222 case 0x3: /* FSQRT */
6223 fpst = fpstatus_ptr(FPST_FPCR_F16);
6224 gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
6225 break;
6226 case 0x8: /* FRINTN */
6227 case 0x9: /* FRINTP */
6228 case 0xa: /* FRINTM */
6229 case 0xb: /* FRINTZ */
6230 case 0xc: /* FRINTA */
6232 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
6233 fpst = fpstatus_ptr(FPST_FPCR_F16);
6235 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6236 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
6238 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6239 tcg_temp_free_i32(tcg_rmode);
6240 break;
6242 case 0xe: /* FRINTX */
6243 fpst = fpstatus_ptr(FPST_FPCR_F16);
6244 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
6245 break;
6246 case 0xf: /* FRINTI */
6247 fpst = fpstatus_ptr(FPST_FPCR_F16);
6248 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
6249 break;
6250 default:
6251 g_assert_not_reached();
6254 write_fp_sreg(s, rd, tcg_res);
6256 if (fpst) {
6257 tcg_temp_free_ptr(fpst);
6259 tcg_temp_free_i32(tcg_op);
6260 tcg_temp_free_i32(tcg_res);
6263 /* Floating-point data-processing (1 source) - single precision */
6264 static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
6266 void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
6267 TCGv_i32 tcg_op, tcg_res;
6268 TCGv_ptr fpst;
6269 int rmode = -1;
6271 tcg_op = read_fp_sreg(s, rn);
6272 tcg_res = tcg_temp_new_i32();
6274 switch (opcode) {
6275 case 0x0: /* FMOV */
6276 tcg_gen_mov_i32(tcg_res, tcg_op);
6277 goto done;
6278 case 0x1: /* FABS */
6279 gen_helper_vfp_abss(tcg_res, tcg_op);
6280 goto done;
6281 case 0x2: /* FNEG */
6282 gen_helper_vfp_negs(tcg_res, tcg_op);
6283 goto done;
6284 case 0x3: /* FSQRT */
6285 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
6286 goto done;
6287 case 0x6: /* BFCVT */
6288 gen_fpst = gen_helper_bfcvt;
6289 break;
6290 case 0x8: /* FRINTN */
6291 case 0x9: /* FRINTP */
6292 case 0xa: /* FRINTM */
6293 case 0xb: /* FRINTZ */
6294 case 0xc: /* FRINTA */
6295 rmode = arm_rmode_to_sf(opcode & 7);
6296 gen_fpst = gen_helper_rints;
6297 break;
6298 case 0xe: /* FRINTX */
6299 gen_fpst = gen_helper_rints_exact;
6300 break;
6301 case 0xf: /* FRINTI */
6302 gen_fpst = gen_helper_rints;
6303 break;
6304 case 0x10: /* FRINT32Z */
6305 rmode = float_round_to_zero;
6306 gen_fpst = gen_helper_frint32_s;
6307 break;
6308 case 0x11: /* FRINT32X */
6309 gen_fpst = gen_helper_frint32_s;
6310 break;
6311 case 0x12: /* FRINT64Z */
6312 rmode = float_round_to_zero;
6313 gen_fpst = gen_helper_frint64_s;
6314 break;
6315 case 0x13: /* FRINT64X */
6316 gen_fpst = gen_helper_frint64_s;
6317 break;
6318 default:
6319 g_assert_not_reached();
6322 fpst = fpstatus_ptr(FPST_FPCR);
6323 if (rmode >= 0) {
6324 TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
6325 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6326 gen_fpst(tcg_res, tcg_op, fpst);
6327 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6328 tcg_temp_free_i32(tcg_rmode);
6329 } else {
6330 gen_fpst(tcg_res, tcg_op, fpst);
6332 tcg_temp_free_ptr(fpst);
6334 done:
6335 write_fp_sreg(s, rd, tcg_res);
6336 tcg_temp_free_i32(tcg_op);
6337 tcg_temp_free_i32(tcg_res);
6340 /* Floating-point data-processing (1 source) - double precision */
6341 static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
6343 void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
6344 TCGv_i64 tcg_op, tcg_res;
6345 TCGv_ptr fpst;
6346 int rmode = -1;
6348 switch (opcode) {
6349 case 0x0: /* FMOV */
6350 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
6351 return;
6354 tcg_op = read_fp_dreg(s, rn);
6355 tcg_res = tcg_temp_new_i64();
6357 switch (opcode) {
6358 case 0x1: /* FABS */
6359 gen_helper_vfp_absd(tcg_res, tcg_op);
6360 goto done;
6361 case 0x2: /* FNEG */
6362 gen_helper_vfp_negd(tcg_res, tcg_op);
6363 goto done;
6364 case 0x3: /* FSQRT */
6365 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
6366 goto done;
6367 case 0x8: /* FRINTN */
6368 case 0x9: /* FRINTP */
6369 case 0xa: /* FRINTM */
6370 case 0xb: /* FRINTZ */
6371 case 0xc: /* FRINTA */
6372 rmode = arm_rmode_to_sf(opcode & 7);
6373 gen_fpst = gen_helper_rintd;
6374 break;
6375 case 0xe: /* FRINTX */
6376 gen_fpst = gen_helper_rintd_exact;
6377 break;
6378 case 0xf: /* FRINTI */
6379 gen_fpst = gen_helper_rintd;
6380 break;
6381 case 0x10: /* FRINT32Z */
6382 rmode = float_round_to_zero;
6383 gen_fpst = gen_helper_frint32_d;
6384 break;
6385 case 0x11: /* FRINT32X */
6386 gen_fpst = gen_helper_frint32_d;
6387 break;
6388 case 0x12: /* FRINT64Z */
6389 rmode = float_round_to_zero;
6390 gen_fpst = gen_helper_frint64_d;
6391 break;
6392 case 0x13: /* FRINT64X */
6393 gen_fpst = gen_helper_frint64_d;
6394 break;
6395 default:
6396 g_assert_not_reached();
6399 fpst = fpstatus_ptr(FPST_FPCR);
6400 if (rmode >= 0) {
6401 TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
6402 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6403 gen_fpst(tcg_res, tcg_op, fpst);
6404 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6405 tcg_temp_free_i32(tcg_rmode);
6406 } else {
6407 gen_fpst(tcg_res, tcg_op, fpst);
6409 tcg_temp_free_ptr(fpst);
6411 done:
6412 write_fp_dreg(s, rd, tcg_res);
6413 tcg_temp_free_i64(tcg_op);
6414 tcg_temp_free_i64(tcg_res);
6417 static void handle_fp_fcvt(DisasContext *s, int opcode,
6418 int rd, int rn, int dtype, int ntype)
6420 switch (ntype) {
6421 case 0x0:
6423 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6424 if (dtype == 1) {
6425 /* Single to double */
6426 TCGv_i64 tcg_rd = tcg_temp_new_i64();
6427 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
6428 write_fp_dreg(s, rd, tcg_rd);
6429 tcg_temp_free_i64(tcg_rd);
6430 } else {
6431 /* Single to half */
6432 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6433 TCGv_i32 ahp = get_ahp_flag();
6434 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6436 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6437 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6438 write_fp_sreg(s, rd, tcg_rd);
6439 tcg_temp_free_i32(tcg_rd);
6440 tcg_temp_free_i32(ahp);
6441 tcg_temp_free_ptr(fpst);
6443 tcg_temp_free_i32(tcg_rn);
6444 break;
6446 case 0x1:
6448 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
6449 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6450 if (dtype == 0) {
6451 /* Double to single */
6452 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
6453 } else {
6454 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6455 TCGv_i32 ahp = get_ahp_flag();
6456 /* Double to half */
6457 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6458 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6459 tcg_temp_free_ptr(fpst);
6460 tcg_temp_free_i32(ahp);
6462 write_fp_sreg(s, rd, tcg_rd);
6463 tcg_temp_free_i32(tcg_rd);
6464 tcg_temp_free_i64(tcg_rn);
6465 break;
6467 case 0x3:
6469 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6470 TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR);
6471 TCGv_i32 tcg_ahp = get_ahp_flag();
6472 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
6473 if (dtype == 0) {
6474 /* Half to single */
6475 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6476 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6477 write_fp_sreg(s, rd, tcg_rd);
6478 tcg_temp_free_i32(tcg_rd);
6479 } else {
6480 /* Half to double */
6481 TCGv_i64 tcg_rd = tcg_temp_new_i64();
6482 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6483 write_fp_dreg(s, rd, tcg_rd);
6484 tcg_temp_free_i64(tcg_rd);
6486 tcg_temp_free_i32(tcg_rn);
6487 tcg_temp_free_ptr(tcg_fpst);
6488 tcg_temp_free_i32(tcg_ahp);
6489 break;
6491 default:
6492 g_assert_not_reached();
6496 /* Floating point data-processing (1 source)
6497 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
6498 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6499 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
6500 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6502 static void disas_fp_1src(DisasContext *s, uint32_t insn)
6504 int mos = extract32(insn, 29, 3);
6505 int type = extract32(insn, 22, 2);
6506 int opcode = extract32(insn, 15, 6);
6507 int rn = extract32(insn, 5, 5);
6508 int rd = extract32(insn, 0, 5);
6510 if (mos) {
6511 goto do_unallocated;
6514 switch (opcode) {
6515 case 0x4: case 0x5: case 0x7:
6517 /* FCVT between half, single and double precision */
6518 int dtype = extract32(opcode, 0, 2);
6519 if (type == 2 || dtype == type) {
6520 goto do_unallocated;
6522 if (!fp_access_check(s)) {
6523 return;
6526 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
6527 break;
6530 case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
6531 if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
6532 goto do_unallocated;
6534 /* fall through */
6535 case 0x0 ... 0x3:
6536 case 0x8 ... 0xc:
6537 case 0xe ... 0xf:
6538 /* 32-to-32 and 64-to-64 ops */
6539 switch (type) {
6540 case 0:
6541 if (!fp_access_check(s)) {
6542 return;
6544 handle_fp_1src_single(s, opcode, rd, rn);
6545 break;
6546 case 1:
6547 if (!fp_access_check(s)) {
6548 return;
6550 handle_fp_1src_double(s, opcode, rd, rn);
6551 break;
6552 case 3:
6553 if (!dc_isar_feature(aa64_fp16, s)) {
6554 goto do_unallocated;
6557 if (!fp_access_check(s)) {
6558 return;
6560 handle_fp_1src_half(s, opcode, rd, rn);
6561 break;
6562 default:
6563 goto do_unallocated;
6565 break;
6567 case 0x6:
6568 switch (type) {
6569 case 1: /* BFCVT */
6570 if (!dc_isar_feature(aa64_bf16, s)) {
6571 goto do_unallocated;
6573 if (!fp_access_check(s)) {
6574 return;
6576 handle_fp_1src_single(s, opcode, rd, rn);
6577 break;
6578 default:
6579 goto do_unallocated;
6581 break;
6583 default:
6584 do_unallocated:
6585 unallocated_encoding(s);
6586 break;
6590 /* Floating-point data-processing (2 source) - single precision */
6591 static void handle_fp_2src_single(DisasContext *s, int opcode,
6592 int rd, int rn, int rm)
6594 TCGv_i32 tcg_op1;
6595 TCGv_i32 tcg_op2;
6596 TCGv_i32 tcg_res;
6597 TCGv_ptr fpst;
6599 tcg_res = tcg_temp_new_i32();
6600 fpst = fpstatus_ptr(FPST_FPCR);
6601 tcg_op1 = read_fp_sreg(s, rn);
6602 tcg_op2 = read_fp_sreg(s, rm);
6604 switch (opcode) {
6605 case 0x0: /* FMUL */
6606 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6607 break;
6608 case 0x1: /* FDIV */
6609 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
6610 break;
6611 case 0x2: /* FADD */
6612 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6613 break;
6614 case 0x3: /* FSUB */
6615 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
6616 break;
6617 case 0x4: /* FMAX */
6618 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6619 break;
6620 case 0x5: /* FMIN */
6621 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6622 break;
6623 case 0x6: /* FMAXNM */
6624 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6625 break;
6626 case 0x7: /* FMINNM */
6627 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6628 break;
6629 case 0x8: /* FNMUL */
6630 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6631 gen_helper_vfp_negs(tcg_res, tcg_res);
6632 break;
6635 write_fp_sreg(s, rd, tcg_res);
6637 tcg_temp_free_ptr(fpst);
6638 tcg_temp_free_i32(tcg_op1);
6639 tcg_temp_free_i32(tcg_op2);
6640 tcg_temp_free_i32(tcg_res);
6643 /* Floating-point data-processing (2 source) - double precision */
6644 static void handle_fp_2src_double(DisasContext *s, int opcode,
6645 int rd, int rn, int rm)
6647 TCGv_i64 tcg_op1;
6648 TCGv_i64 tcg_op2;
6649 TCGv_i64 tcg_res;
6650 TCGv_ptr fpst;
6652 tcg_res = tcg_temp_new_i64();
6653 fpst = fpstatus_ptr(FPST_FPCR);
6654 tcg_op1 = read_fp_dreg(s, rn);
6655 tcg_op2 = read_fp_dreg(s, rm);
6657 switch (opcode) {
6658 case 0x0: /* FMUL */
6659 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6660 break;
6661 case 0x1: /* FDIV */
6662 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
6663 break;
6664 case 0x2: /* FADD */
6665 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6666 break;
6667 case 0x3: /* FSUB */
6668 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
6669 break;
6670 case 0x4: /* FMAX */
6671 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6672 break;
6673 case 0x5: /* FMIN */
6674 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6675 break;
6676 case 0x6: /* FMAXNM */
6677 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6678 break;
6679 case 0x7: /* FMINNM */
6680 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6681 break;
6682 case 0x8: /* FNMUL */
6683 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6684 gen_helper_vfp_negd(tcg_res, tcg_res);
6685 break;
6688 write_fp_dreg(s, rd, tcg_res);
6690 tcg_temp_free_ptr(fpst);
6691 tcg_temp_free_i64(tcg_op1);
6692 tcg_temp_free_i64(tcg_op2);
6693 tcg_temp_free_i64(tcg_res);
6696 /* Floating-point data-processing (2 source) - half precision */
6697 static void handle_fp_2src_half(DisasContext *s, int opcode,
6698 int rd, int rn, int rm)
6700 TCGv_i32 tcg_op1;
6701 TCGv_i32 tcg_op2;
6702 TCGv_i32 tcg_res;
6703 TCGv_ptr fpst;
6705 tcg_res = tcg_temp_new_i32();
6706 fpst = fpstatus_ptr(FPST_FPCR_F16);
6707 tcg_op1 = read_fp_hreg(s, rn);
6708 tcg_op2 = read_fp_hreg(s, rm);
6710 switch (opcode) {
6711 case 0x0: /* FMUL */
6712 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6713 break;
6714 case 0x1: /* FDIV */
6715 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
6716 break;
6717 case 0x2: /* FADD */
6718 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
6719 break;
6720 case 0x3: /* FSUB */
6721 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
6722 break;
6723 case 0x4: /* FMAX */
6724 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
6725 break;
6726 case 0x5: /* FMIN */
6727 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
6728 break;
6729 case 0x6: /* FMAXNM */
6730 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6731 break;
6732 case 0x7: /* FMINNM */
6733 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6734 break;
6735 case 0x8: /* FNMUL */
6736 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6737 tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
6738 break;
6739 default:
6740 g_assert_not_reached();
6743 write_fp_sreg(s, rd, tcg_res);
6745 tcg_temp_free_ptr(fpst);
6746 tcg_temp_free_i32(tcg_op1);
6747 tcg_temp_free_i32(tcg_op2);
6748 tcg_temp_free_i32(tcg_res);
6751 /* Floating point data-processing (2 source)
6752 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
6753 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6754 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
6755 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6757 static void disas_fp_2src(DisasContext *s, uint32_t insn)
6759 int mos = extract32(insn, 29, 3);
6760 int type = extract32(insn, 22, 2);
6761 int rd = extract32(insn, 0, 5);
6762 int rn = extract32(insn, 5, 5);
6763 int rm = extract32(insn, 16, 5);
6764 int opcode = extract32(insn, 12, 4);
6766 if (opcode > 8 || mos) {
6767 unallocated_encoding(s);
6768 return;
6771 switch (type) {
6772 case 0:
6773 if (!fp_access_check(s)) {
6774 return;
6776 handle_fp_2src_single(s, opcode, rd, rn, rm);
6777 break;
6778 case 1:
6779 if (!fp_access_check(s)) {
6780 return;
6782 handle_fp_2src_double(s, opcode, rd, rn, rm);
6783 break;
6784 case 3:
6785 if (!dc_isar_feature(aa64_fp16, s)) {
6786 unallocated_encoding(s);
6787 return;
6789 if (!fp_access_check(s)) {
6790 return;
6792 handle_fp_2src_half(s, opcode, rd, rn, rm);
6793 break;
6794 default:
6795 unallocated_encoding(s);
6799 /* Floating-point data-processing (3 source) - single precision */
6800 static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
6801 int rd, int rn, int rm, int ra)
6803 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6804 TCGv_i32 tcg_res = tcg_temp_new_i32();
6805 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6807 tcg_op1 = read_fp_sreg(s, rn);
6808 tcg_op2 = read_fp_sreg(s, rm);
6809 tcg_op3 = read_fp_sreg(s, ra);
6811 /* These are fused multiply-add, and must be done as one
6812 * floating point operation with no rounding between the
6813 * multiplication and addition steps.
6814 * NB that doing the negations here as separate steps is
6815 * correct : an input NaN should come out with its sign bit
6816 * flipped if it is a negated-input.
6818 if (o1 == true) {
6819 gen_helper_vfp_negs(tcg_op3, tcg_op3);
6822 if (o0 != o1) {
6823 gen_helper_vfp_negs(tcg_op1, tcg_op1);
6826 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6828 write_fp_sreg(s, rd, tcg_res);
6830 tcg_temp_free_ptr(fpst);
6831 tcg_temp_free_i32(tcg_op1);
6832 tcg_temp_free_i32(tcg_op2);
6833 tcg_temp_free_i32(tcg_op3);
6834 tcg_temp_free_i32(tcg_res);
6837 /* Floating-point data-processing (3 source) - double precision */
6838 static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
6839 int rd, int rn, int rm, int ra)
6841 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
6842 TCGv_i64 tcg_res = tcg_temp_new_i64();
6843 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6845 tcg_op1 = read_fp_dreg(s, rn);
6846 tcg_op2 = read_fp_dreg(s, rm);
6847 tcg_op3 = read_fp_dreg(s, ra);
6849 /* These are fused multiply-add, and must be done as one
6850 * floating point operation with no rounding between the
6851 * multiplication and addition steps.
6852 * NB that doing the negations here as separate steps is
6853 * correct : an input NaN should come out with its sign bit
6854 * flipped if it is a negated-input.
6856 if (o1 == true) {
6857 gen_helper_vfp_negd(tcg_op3, tcg_op3);
6860 if (o0 != o1) {
6861 gen_helper_vfp_negd(tcg_op1, tcg_op1);
6864 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6866 write_fp_dreg(s, rd, tcg_res);
6868 tcg_temp_free_ptr(fpst);
6869 tcg_temp_free_i64(tcg_op1);
6870 tcg_temp_free_i64(tcg_op2);
6871 tcg_temp_free_i64(tcg_op3);
6872 tcg_temp_free_i64(tcg_res);
6875 /* Floating-point data-processing (3 source) - half precision */
6876 static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
6877 int rd, int rn, int rm, int ra)
6879 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6880 TCGv_i32 tcg_res = tcg_temp_new_i32();
6881 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR_F16);
6883 tcg_op1 = read_fp_hreg(s, rn);
6884 tcg_op2 = read_fp_hreg(s, rm);
6885 tcg_op3 = read_fp_hreg(s, ra);
6887 /* These are fused multiply-add, and must be done as one
6888 * floating point operation with no rounding between the
6889 * multiplication and addition steps.
6890 * NB that doing the negations here as separate steps is
6891 * correct : an input NaN should come out with its sign bit
6892 * flipped if it is a negated-input.
6894 if (o1 == true) {
6895 tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
6898 if (o0 != o1) {
6899 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
6902 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6904 write_fp_sreg(s, rd, tcg_res);
6906 tcg_temp_free_ptr(fpst);
6907 tcg_temp_free_i32(tcg_op1);
6908 tcg_temp_free_i32(tcg_op2);
6909 tcg_temp_free_i32(tcg_op3);
6910 tcg_temp_free_i32(tcg_res);
6913 /* Floating point data-processing (3 source)
6914 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
6915 * +---+---+---+-----------+------+----+------+----+------+------+------+
6916 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
6917 * +---+---+---+-----------+------+----+------+----+------+------+------+
6919 static void disas_fp_3src(DisasContext *s, uint32_t insn)
6921 int mos = extract32(insn, 29, 3);
6922 int type = extract32(insn, 22, 2);
6923 int rd = extract32(insn, 0, 5);
6924 int rn = extract32(insn, 5, 5);
6925 int ra = extract32(insn, 10, 5);
6926 int rm = extract32(insn, 16, 5);
6927 bool o0 = extract32(insn, 15, 1);
6928 bool o1 = extract32(insn, 21, 1);
6930 if (mos) {
6931 unallocated_encoding(s);
6932 return;
6935 switch (type) {
6936 case 0:
6937 if (!fp_access_check(s)) {
6938 return;
6940 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
6941 break;
6942 case 1:
6943 if (!fp_access_check(s)) {
6944 return;
6946 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
6947 break;
6948 case 3:
6949 if (!dc_isar_feature(aa64_fp16, s)) {
6950 unallocated_encoding(s);
6951 return;
6953 if (!fp_access_check(s)) {
6954 return;
6956 handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
6957 break;
6958 default:
6959 unallocated_encoding(s);
6963 /* Floating point immediate
6964 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
6965 * +---+---+---+-----------+------+---+------------+-------+------+------+
6966 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
6967 * +---+---+---+-----------+------+---+------------+-------+------+------+
6969 static void disas_fp_imm(DisasContext *s, uint32_t insn)
6971 int rd = extract32(insn, 0, 5);
6972 int imm5 = extract32(insn, 5, 5);
6973 int imm8 = extract32(insn, 13, 8);
6974 int type = extract32(insn, 22, 2);
6975 int mos = extract32(insn, 29, 3);
6976 uint64_t imm;
6977 MemOp sz;
6979 if (mos || imm5) {
6980 unallocated_encoding(s);
6981 return;
6984 switch (type) {
6985 case 0:
6986 sz = MO_32;
6987 break;
6988 case 1:
6989 sz = MO_64;
6990 break;
6991 case 3:
6992 sz = MO_16;
6993 if (dc_isar_feature(aa64_fp16, s)) {
6994 break;
6996 /* fallthru */
6997 default:
6998 unallocated_encoding(s);
6999 return;
7002 if (!fp_access_check(s)) {
7003 return;
7006 imm = vfp_expand_imm(sz, imm8);
7007 write_fp_dreg(s, rd, tcg_constant_i64(imm));
7010 /* Handle floating point <=> fixed point conversions. Note that we can
7011 * also deal with fp <=> integer conversions as a special case (scale == 64)
7012 * OPTME: consider handling that special case specially or at least skipping
7013 * the call to scalbn in the helpers for zero shifts.
7015 static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
7016 bool itof, int rmode, int scale, int sf, int type)
7018 bool is_signed = !(opcode & 1);
7019 TCGv_ptr tcg_fpstatus;
7020 TCGv_i32 tcg_shift, tcg_single;
7021 TCGv_i64 tcg_double;
7023 tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR);
7025 tcg_shift = tcg_constant_i32(64 - scale);
7027 if (itof) {
7028 TCGv_i64 tcg_int = cpu_reg(s, rn);
7029 if (!sf) {
7030 TCGv_i64 tcg_extend = new_tmp_a64(s);
7032 if (is_signed) {
7033 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
7034 } else {
7035 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
7038 tcg_int = tcg_extend;
7041 switch (type) {
7042 case 1: /* float64 */
7043 tcg_double = tcg_temp_new_i64();
7044 if (is_signed) {
7045 gen_helper_vfp_sqtod(tcg_double, tcg_int,
7046 tcg_shift, tcg_fpstatus);
7047 } else {
7048 gen_helper_vfp_uqtod(tcg_double, tcg_int,
7049 tcg_shift, tcg_fpstatus);
7051 write_fp_dreg(s, rd, tcg_double);
7052 tcg_temp_free_i64(tcg_double);
7053 break;
7055 case 0: /* float32 */
7056 tcg_single = tcg_temp_new_i32();
7057 if (is_signed) {
7058 gen_helper_vfp_sqtos(tcg_single, tcg_int,
7059 tcg_shift, tcg_fpstatus);
7060 } else {
7061 gen_helper_vfp_uqtos(tcg_single, tcg_int,
7062 tcg_shift, tcg_fpstatus);
7064 write_fp_sreg(s, rd, tcg_single);
7065 tcg_temp_free_i32(tcg_single);
7066 break;
7068 case 3: /* float16 */
7069 tcg_single = tcg_temp_new_i32();
7070 if (is_signed) {
7071 gen_helper_vfp_sqtoh(tcg_single, tcg_int,
7072 tcg_shift, tcg_fpstatus);
7073 } else {
7074 gen_helper_vfp_uqtoh(tcg_single, tcg_int,
7075 tcg_shift, tcg_fpstatus);
7077 write_fp_sreg(s, rd, tcg_single);
7078 tcg_temp_free_i32(tcg_single);
7079 break;
7081 default:
7082 g_assert_not_reached();
7084 } else {
7085 TCGv_i64 tcg_int = cpu_reg(s, rd);
7086 TCGv_i32 tcg_rmode;
7088 if (extract32(opcode, 2, 1)) {
7089 /* There are too many rounding modes to all fit into rmode,
7090 * so FCVTA[US] is a special case.
7092 rmode = FPROUNDING_TIEAWAY;
7095 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7097 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7099 switch (type) {
7100 case 1: /* float64 */
7101 tcg_double = read_fp_dreg(s, rn);
7102 if (is_signed) {
7103 if (!sf) {
7104 gen_helper_vfp_tosld(tcg_int, tcg_double,
7105 tcg_shift, tcg_fpstatus);
7106 } else {
7107 gen_helper_vfp_tosqd(tcg_int, tcg_double,
7108 tcg_shift, tcg_fpstatus);
7110 } else {
7111 if (!sf) {
7112 gen_helper_vfp_tould(tcg_int, tcg_double,
7113 tcg_shift, tcg_fpstatus);
7114 } else {
7115 gen_helper_vfp_touqd(tcg_int, tcg_double,
7116 tcg_shift, tcg_fpstatus);
7119 if (!sf) {
7120 tcg_gen_ext32u_i64(tcg_int, tcg_int);
7122 tcg_temp_free_i64(tcg_double);
7123 break;
7125 case 0: /* float32 */
7126 tcg_single = read_fp_sreg(s, rn);
7127 if (sf) {
7128 if (is_signed) {
7129 gen_helper_vfp_tosqs(tcg_int, tcg_single,
7130 tcg_shift, tcg_fpstatus);
7131 } else {
7132 gen_helper_vfp_touqs(tcg_int, tcg_single,
7133 tcg_shift, tcg_fpstatus);
7135 } else {
7136 TCGv_i32 tcg_dest = tcg_temp_new_i32();
7137 if (is_signed) {
7138 gen_helper_vfp_tosls(tcg_dest, tcg_single,
7139 tcg_shift, tcg_fpstatus);
7140 } else {
7141 gen_helper_vfp_touls(tcg_dest, tcg_single,
7142 tcg_shift, tcg_fpstatus);
7144 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7145 tcg_temp_free_i32(tcg_dest);
7147 tcg_temp_free_i32(tcg_single);
7148 break;
7150 case 3: /* float16 */
7151 tcg_single = read_fp_sreg(s, rn);
7152 if (sf) {
7153 if (is_signed) {
7154 gen_helper_vfp_tosqh(tcg_int, tcg_single,
7155 tcg_shift, tcg_fpstatus);
7156 } else {
7157 gen_helper_vfp_touqh(tcg_int, tcg_single,
7158 tcg_shift, tcg_fpstatus);
7160 } else {
7161 TCGv_i32 tcg_dest = tcg_temp_new_i32();
7162 if (is_signed) {
7163 gen_helper_vfp_toslh(tcg_dest, tcg_single,
7164 tcg_shift, tcg_fpstatus);
7165 } else {
7166 gen_helper_vfp_toulh(tcg_dest, tcg_single,
7167 tcg_shift, tcg_fpstatus);
7169 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7170 tcg_temp_free_i32(tcg_dest);
7172 tcg_temp_free_i32(tcg_single);
7173 break;
7175 default:
7176 g_assert_not_reached();
7179 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7180 tcg_temp_free_i32(tcg_rmode);
7183 tcg_temp_free_ptr(tcg_fpstatus);
7186 /* Floating point <-> fixed point conversions
7187 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
7188 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
7189 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
7190 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
7192 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
7194 int rd = extract32(insn, 0, 5);
7195 int rn = extract32(insn, 5, 5);
7196 int scale = extract32(insn, 10, 6);
7197 int opcode = extract32(insn, 16, 3);
7198 int rmode = extract32(insn, 19, 2);
7199 int type = extract32(insn, 22, 2);
7200 bool sbit = extract32(insn, 29, 1);
7201 bool sf = extract32(insn, 31, 1);
7202 bool itof;
7204 if (sbit || (!sf && scale < 32)) {
7205 unallocated_encoding(s);
7206 return;
7209 switch (type) {
7210 case 0: /* float32 */
7211 case 1: /* float64 */
7212 break;
7213 case 3: /* float16 */
7214 if (dc_isar_feature(aa64_fp16, s)) {
7215 break;
7217 /* fallthru */
7218 default:
7219 unallocated_encoding(s);
7220 return;
7223 switch ((rmode << 3) | opcode) {
7224 case 0x2: /* SCVTF */
7225 case 0x3: /* UCVTF */
7226 itof = true;
7227 break;
7228 case 0x18: /* FCVTZS */
7229 case 0x19: /* FCVTZU */
7230 itof = false;
7231 break;
7232 default:
7233 unallocated_encoding(s);
7234 return;
7237 if (!fp_access_check(s)) {
7238 return;
7241 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
7244 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
7246 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
7247 * without conversion.
7250 if (itof) {
7251 TCGv_i64 tcg_rn = cpu_reg(s, rn);
7252 TCGv_i64 tmp;
7254 switch (type) {
7255 case 0:
7256 /* 32 bit */
7257 tmp = tcg_temp_new_i64();
7258 tcg_gen_ext32u_i64(tmp, tcg_rn);
7259 write_fp_dreg(s, rd, tmp);
7260 tcg_temp_free_i64(tmp);
7261 break;
7262 case 1:
7263 /* 64 bit */
7264 write_fp_dreg(s, rd, tcg_rn);
7265 break;
7266 case 2:
7267 /* 64 bit to top half. */
7268 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
7269 clear_vec_high(s, true, rd);
7270 break;
7271 case 3:
7272 /* 16 bit */
7273 tmp = tcg_temp_new_i64();
7274 tcg_gen_ext16u_i64(tmp, tcg_rn);
7275 write_fp_dreg(s, rd, tmp);
7276 tcg_temp_free_i64(tmp);
7277 break;
7278 default:
7279 g_assert_not_reached();
7281 } else {
7282 TCGv_i64 tcg_rd = cpu_reg(s, rd);
7284 switch (type) {
7285 case 0:
7286 /* 32 bit */
7287 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
7288 break;
7289 case 1:
7290 /* 64 bit */
7291 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
7292 break;
7293 case 2:
7294 /* 64 bits from top half */
7295 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
7296 break;
7297 case 3:
7298 /* 16 bit */
7299 tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
7300 break;
7301 default:
7302 g_assert_not_reached();
7307 static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
7309 TCGv_i64 t = read_fp_dreg(s, rn);
7310 TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR);
7312 gen_helper_fjcvtzs(t, t, fpstatus);
7314 tcg_temp_free_ptr(fpstatus);
7316 tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
7317 tcg_gen_extrh_i64_i32(cpu_ZF, t);
7318 tcg_gen_movi_i32(cpu_CF, 0);
7319 tcg_gen_movi_i32(cpu_NF, 0);
7320 tcg_gen_movi_i32(cpu_VF, 0);
7322 tcg_temp_free_i64(t);
7325 /* Floating point <-> integer conversions
7326 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
7327 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
7328 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
7329 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
7331 static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
7333 int rd = extract32(insn, 0, 5);
7334 int rn = extract32(insn, 5, 5);
7335 int opcode = extract32(insn, 16, 3);
7336 int rmode = extract32(insn, 19, 2);
7337 int type = extract32(insn, 22, 2);
7338 bool sbit = extract32(insn, 29, 1);
7339 bool sf = extract32(insn, 31, 1);
7340 bool itof = false;
7342 if (sbit) {
7343 goto do_unallocated;
7346 switch (opcode) {
7347 case 2: /* SCVTF */
7348 case 3: /* UCVTF */
7349 itof = true;
7350 /* fallthru */
7351 case 4: /* FCVTAS */
7352 case 5: /* FCVTAU */
7353 if (rmode != 0) {
7354 goto do_unallocated;
7356 /* fallthru */
7357 case 0: /* FCVT[NPMZ]S */
7358 case 1: /* FCVT[NPMZ]U */
7359 switch (type) {
7360 case 0: /* float32 */
7361 case 1: /* float64 */
7362 break;
7363 case 3: /* float16 */
7364 if (!dc_isar_feature(aa64_fp16, s)) {
7365 goto do_unallocated;
7367 break;
7368 default:
7369 goto do_unallocated;
7371 if (!fp_access_check(s)) {
7372 return;
7374 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
7375 break;
7377 default:
7378 switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
7379 case 0b01100110: /* FMOV half <-> 32-bit int */
7380 case 0b01100111:
7381 case 0b11100110: /* FMOV half <-> 64-bit int */
7382 case 0b11100111:
7383 if (!dc_isar_feature(aa64_fp16, s)) {
7384 goto do_unallocated;
7386 /* fallthru */
7387 case 0b00000110: /* FMOV 32-bit */
7388 case 0b00000111:
7389 case 0b10100110: /* FMOV 64-bit */
7390 case 0b10100111:
7391 case 0b11001110: /* FMOV top half of 128-bit */
7392 case 0b11001111:
7393 if (!fp_access_check(s)) {
7394 return;
7396 itof = opcode & 1;
7397 handle_fmov(s, rd, rn, type, itof);
7398 break;
7400 case 0b00111110: /* FJCVTZS */
7401 if (!dc_isar_feature(aa64_jscvt, s)) {
7402 goto do_unallocated;
7403 } else if (fp_access_check(s)) {
7404 handle_fjcvtzs(s, rd, rn);
7406 break;
7408 default:
7409 do_unallocated:
7410 unallocated_encoding(s);
7411 return;
7413 break;
7417 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
7418 * 31 30 29 28 25 24 0
7419 * +---+---+---+---------+-----------------------------+
7420 * | | 0 | | 1 1 1 1 | |
7421 * +---+---+---+---------+-----------------------------+
7423 static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
7425 if (extract32(insn, 24, 1)) {
7426 /* Floating point data-processing (3 source) */
7427 disas_fp_3src(s, insn);
7428 } else if (extract32(insn, 21, 1) == 0) {
7429 /* Floating point to fixed point conversions */
7430 disas_fp_fixed_conv(s, insn);
7431 } else {
7432 switch (extract32(insn, 10, 2)) {
7433 case 1:
7434 /* Floating point conditional compare */
7435 disas_fp_ccomp(s, insn);
7436 break;
7437 case 2:
7438 /* Floating point data-processing (2 source) */
7439 disas_fp_2src(s, insn);
7440 break;
7441 case 3:
7442 /* Floating point conditional select */
7443 disas_fp_csel(s, insn);
7444 break;
7445 case 0:
7446 switch (ctz32(extract32(insn, 12, 4))) {
7447 case 0: /* [15:12] == xxx1 */
7448 /* Floating point immediate */
7449 disas_fp_imm(s, insn);
7450 break;
7451 case 1: /* [15:12] == xx10 */
7452 /* Floating point compare */
7453 disas_fp_compare(s, insn);
7454 break;
7455 case 2: /* [15:12] == x100 */
7456 /* Floating point data-processing (1 source) */
7457 disas_fp_1src(s, insn);
7458 break;
7459 case 3: /* [15:12] == 1000 */
7460 unallocated_encoding(s);
7461 break;
7462 default: /* [15:12] == 0000 */
7463 /* Floating point <-> integer conversions */
7464 disas_fp_int_conv(s, insn);
7465 break;
7467 break;
7472 static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
7473 int pos)
7475 /* Extract 64 bits from the middle of two concatenated 64 bit
7476 * vector register slices left:right. The extracted bits start
7477 * at 'pos' bits into the right (least significant) side.
7478 * We return the result in tcg_right, and guarantee not to
7479 * trash tcg_left.
7481 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
7482 assert(pos > 0 && pos < 64);
7484 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
7485 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
7486 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
7488 tcg_temp_free_i64(tcg_tmp);
7491 /* EXT
7492 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
7493 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7494 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
7495 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7497 static void disas_simd_ext(DisasContext *s, uint32_t insn)
7499 int is_q = extract32(insn, 30, 1);
7500 int op2 = extract32(insn, 22, 2);
7501 int imm4 = extract32(insn, 11, 4);
7502 int rm = extract32(insn, 16, 5);
7503 int rn = extract32(insn, 5, 5);
7504 int rd = extract32(insn, 0, 5);
7505 int pos = imm4 << 3;
7506 TCGv_i64 tcg_resl, tcg_resh;
7508 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
7509 unallocated_encoding(s);
7510 return;
7513 if (!fp_access_check(s)) {
7514 return;
7517 tcg_resh = tcg_temp_new_i64();
7518 tcg_resl = tcg_temp_new_i64();
7520 /* Vd gets bits starting at pos bits into Vm:Vn. This is
7521 * either extracting 128 bits from a 128:128 concatenation, or
7522 * extracting 64 bits from a 64:64 concatenation.
7524 if (!is_q) {
7525 read_vec_element(s, tcg_resl, rn, 0, MO_64);
7526 if (pos != 0) {
7527 read_vec_element(s, tcg_resh, rm, 0, MO_64);
7528 do_ext64(s, tcg_resh, tcg_resl, pos);
7530 } else {
7531 TCGv_i64 tcg_hh;
7532 typedef struct {
7533 int reg;
7534 int elt;
7535 } EltPosns;
7536 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
7537 EltPosns *elt = eltposns;
7539 if (pos >= 64) {
7540 elt++;
7541 pos -= 64;
7544 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
7545 elt++;
7546 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
7547 elt++;
7548 if (pos != 0) {
7549 do_ext64(s, tcg_resh, tcg_resl, pos);
7550 tcg_hh = tcg_temp_new_i64();
7551 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
7552 do_ext64(s, tcg_hh, tcg_resh, pos);
7553 tcg_temp_free_i64(tcg_hh);
7557 write_vec_element(s, tcg_resl, rd, 0, MO_64);
7558 tcg_temp_free_i64(tcg_resl);
7559 if (is_q) {
7560 write_vec_element(s, tcg_resh, rd, 1, MO_64);
7562 tcg_temp_free_i64(tcg_resh);
7563 clear_vec_high(s, is_q, rd);
7566 /* TBL/TBX
7567 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
7568 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7569 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
7570 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7572 static void disas_simd_tb(DisasContext *s, uint32_t insn)
7574 int op2 = extract32(insn, 22, 2);
7575 int is_q = extract32(insn, 30, 1);
7576 int rm = extract32(insn, 16, 5);
7577 int rn = extract32(insn, 5, 5);
7578 int rd = extract32(insn, 0, 5);
7579 int is_tbx = extract32(insn, 12, 1);
7580 int len = (extract32(insn, 13, 2) + 1) * 16;
7582 if (op2 != 0) {
7583 unallocated_encoding(s);
7584 return;
7587 if (!fp_access_check(s)) {
7588 return;
7591 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
7592 vec_full_reg_offset(s, rm), cpu_env,
7593 is_q ? 16 : 8, vec_full_reg_size(s),
7594 (len << 6) | (is_tbx << 5) | rn,
7595 gen_helper_simd_tblx);
7598 /* ZIP/UZP/TRN
7599 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
7600 * +---+---+-------------+------+---+------+---+------------------+------+
7601 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
7602 * +---+---+-------------+------+---+------+---+------------------+------+
7604 static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
7606 int rd = extract32(insn, 0, 5);
7607 int rn = extract32(insn, 5, 5);
7608 int rm = extract32(insn, 16, 5);
7609 int size = extract32(insn, 22, 2);
7610 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
7611 * bit 2 indicates 1 vs 2 variant of the insn.
7613 int opcode = extract32(insn, 12, 2);
7614 bool part = extract32(insn, 14, 1);
7615 bool is_q = extract32(insn, 30, 1);
7616 int esize = 8 << size;
7617 int i, ofs;
7618 int datasize = is_q ? 128 : 64;
7619 int elements = datasize / esize;
7620 TCGv_i64 tcg_res, tcg_resl, tcg_resh;
7622 if (opcode == 0 || (size == 3 && !is_q)) {
7623 unallocated_encoding(s);
7624 return;
7627 if (!fp_access_check(s)) {
7628 return;
7631 tcg_resl = tcg_const_i64(0);
7632 tcg_resh = is_q ? tcg_const_i64(0) : NULL;
7633 tcg_res = tcg_temp_new_i64();
7635 for (i = 0; i < elements; i++) {
7636 switch (opcode) {
7637 case 1: /* UZP1/2 */
7639 int midpoint = elements / 2;
7640 if (i < midpoint) {
7641 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
7642 } else {
7643 read_vec_element(s, tcg_res, rm,
7644 2 * (i - midpoint) + part, size);
7646 break;
7648 case 2: /* TRN1/2 */
7649 if (i & 1) {
7650 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
7651 } else {
7652 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
7654 break;
7655 case 3: /* ZIP1/2 */
7657 int base = part * elements / 2;
7658 if (i & 1) {
7659 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
7660 } else {
7661 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
7663 break;
7665 default:
7666 g_assert_not_reached();
7669 ofs = i * esize;
7670 if (ofs < 64) {
7671 tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
7672 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
7673 } else {
7674 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
7675 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
7679 tcg_temp_free_i64(tcg_res);
7681 write_vec_element(s, tcg_resl, rd, 0, MO_64);
7682 tcg_temp_free_i64(tcg_resl);
7684 if (is_q) {
7685 write_vec_element(s, tcg_resh, rd, 1, MO_64);
7686 tcg_temp_free_i64(tcg_resh);
7688 clear_vec_high(s, is_q, rd);
7692 * do_reduction_op helper
7694 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
7695 * important for correct NaN propagation that we do these
7696 * operations in exactly the order specified by the pseudocode.
7698 * This is a recursive function, TCG temps should be freed by the
7699 * calling function once it is done with the values.
7701 static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
7702 int esize, int size, int vmap, TCGv_ptr fpst)
7704 if (esize == size) {
7705 int element;
7706 MemOp msize = esize == 16 ? MO_16 : MO_32;
7707 TCGv_i32 tcg_elem;
7709 /* We should have one register left here */
7710 assert(ctpop8(vmap) == 1);
7711 element = ctz32(vmap);
7712 assert(element < 8);
7714 tcg_elem = tcg_temp_new_i32();
7715 read_vec_element_i32(s, tcg_elem, rn, element, msize);
7716 return tcg_elem;
7717 } else {
7718 int bits = size / 2;
7719 int shift = ctpop8(vmap) / 2;
7720 int vmap_lo = (vmap >> shift) & vmap;
7721 int vmap_hi = (vmap & ~vmap_lo);
7722 TCGv_i32 tcg_hi, tcg_lo, tcg_res;
7724 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
7725 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
7726 tcg_res = tcg_temp_new_i32();
7728 switch (fpopcode) {
7729 case 0x0c: /* fmaxnmv half-precision */
7730 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7731 break;
7732 case 0x0f: /* fmaxv half-precision */
7733 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
7734 break;
7735 case 0x1c: /* fminnmv half-precision */
7736 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7737 break;
7738 case 0x1f: /* fminv half-precision */
7739 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
7740 break;
7741 case 0x2c: /* fmaxnmv */
7742 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
7743 break;
7744 case 0x2f: /* fmaxv */
7745 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
7746 break;
7747 case 0x3c: /* fminnmv */
7748 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
7749 break;
7750 case 0x3f: /* fminv */
7751 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
7752 break;
7753 default:
7754 g_assert_not_reached();
7757 tcg_temp_free_i32(tcg_hi);
7758 tcg_temp_free_i32(tcg_lo);
7759 return tcg_res;
7763 /* AdvSIMD across lanes
7764 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
7765 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7766 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
7767 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7769 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
7771 int rd = extract32(insn, 0, 5);
7772 int rn = extract32(insn, 5, 5);
7773 int size = extract32(insn, 22, 2);
7774 int opcode = extract32(insn, 12, 5);
7775 bool is_q = extract32(insn, 30, 1);
7776 bool is_u = extract32(insn, 29, 1);
7777 bool is_fp = false;
7778 bool is_min = false;
7779 int esize;
7780 int elements;
7781 int i;
7782 TCGv_i64 tcg_res, tcg_elt;
7784 switch (opcode) {
7785 case 0x1b: /* ADDV */
7786 if (is_u) {
7787 unallocated_encoding(s);
7788 return;
7790 /* fall through */
7791 case 0x3: /* SADDLV, UADDLV */
7792 case 0xa: /* SMAXV, UMAXV */
7793 case 0x1a: /* SMINV, UMINV */
7794 if (size == 3 || (size == 2 && !is_q)) {
7795 unallocated_encoding(s);
7796 return;
7798 break;
7799 case 0xc: /* FMAXNMV, FMINNMV */
7800 case 0xf: /* FMAXV, FMINV */
7801 /* Bit 1 of size field encodes min vs max and the actual size
7802 * depends on the encoding of the U bit. If not set (and FP16
7803 * enabled) then we do half-precision float instead of single
7804 * precision.
7806 is_min = extract32(size, 1, 1);
7807 is_fp = true;
7808 if (!is_u && dc_isar_feature(aa64_fp16, s)) {
7809 size = 1;
7810 } else if (!is_u || !is_q || extract32(size, 0, 1)) {
7811 unallocated_encoding(s);
7812 return;
7813 } else {
7814 size = 2;
7816 break;
7817 default:
7818 unallocated_encoding(s);
7819 return;
7822 if (!fp_access_check(s)) {
7823 return;
7826 esize = 8 << size;
7827 elements = (is_q ? 128 : 64) / esize;
7829 tcg_res = tcg_temp_new_i64();
7830 tcg_elt = tcg_temp_new_i64();
7832 /* These instructions operate across all lanes of a vector
7833 * to produce a single result. We can guarantee that a 64
7834 * bit intermediate is sufficient:
7835 * + for [US]ADDLV the maximum element size is 32 bits, and
7836 * the result type is 64 bits
7837 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
7838 * same as the element size, which is 32 bits at most
7839 * For the integer operations we can choose to work at 64
7840 * or 32 bits and truncate at the end; for simplicity
7841 * we use 64 bits always. The floating point
7842 * ops do require 32 bit intermediates, though.
7844 if (!is_fp) {
7845 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
7847 for (i = 1; i < elements; i++) {
7848 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
7850 switch (opcode) {
7851 case 0x03: /* SADDLV / UADDLV */
7852 case 0x1b: /* ADDV */
7853 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
7854 break;
7855 case 0x0a: /* SMAXV / UMAXV */
7856 if (is_u) {
7857 tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
7858 } else {
7859 tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
7861 break;
7862 case 0x1a: /* SMINV / UMINV */
7863 if (is_u) {
7864 tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
7865 } else {
7866 tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
7868 break;
7869 default:
7870 g_assert_not_reached();
7874 } else {
7875 /* Floating point vector reduction ops which work across 32
7876 * bit (single) or 16 bit (half-precision) intermediates.
7877 * Note that correct NaN propagation requires that we do these
7878 * operations in exactly the order specified by the pseudocode.
7880 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7881 int fpopcode = opcode | is_min << 4 | is_u << 5;
7882 int vmap = (1 << elements) - 1;
7883 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
7884 (is_q ? 128 : 64), vmap, fpst);
7885 tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
7886 tcg_temp_free_i32(tcg_res32);
7887 tcg_temp_free_ptr(fpst);
7890 tcg_temp_free_i64(tcg_elt);
7892 /* Now truncate the result to the width required for the final output */
7893 if (opcode == 0x03) {
7894 /* SADDLV, UADDLV: result is 2*esize */
7895 size++;
7898 switch (size) {
7899 case 0:
7900 tcg_gen_ext8u_i64(tcg_res, tcg_res);
7901 break;
7902 case 1:
7903 tcg_gen_ext16u_i64(tcg_res, tcg_res);
7904 break;
7905 case 2:
7906 tcg_gen_ext32u_i64(tcg_res, tcg_res);
7907 break;
7908 case 3:
7909 break;
7910 default:
7911 g_assert_not_reached();
7914 write_fp_dreg(s, rd, tcg_res);
7915 tcg_temp_free_i64(tcg_res);
7918 /* DUP (Element, Vector)
7920 * 31 30 29 21 20 16 15 10 9 5 4 0
7921 * +---+---+-------------------+--------+-------------+------+------+
7922 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7923 * +---+---+-------------------+--------+-------------+------+------+
7925 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7927 static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
7928 int imm5)
7930 int size = ctz32(imm5);
7931 int index;
7933 if (size > 3 || (size == 3 && !is_q)) {
7934 unallocated_encoding(s);
7935 return;
7938 if (!fp_access_check(s)) {
7939 return;
7942 index = imm5 >> (size + 1);
7943 tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
7944 vec_reg_offset(s, rn, index, size),
7945 is_q ? 16 : 8, vec_full_reg_size(s));
7948 /* DUP (element, scalar)
7949 * 31 21 20 16 15 10 9 5 4 0
7950 * +-----------------------+--------+-------------+------+------+
7951 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7952 * +-----------------------+--------+-------------+------+------+
7954 static void handle_simd_dupes(DisasContext *s, int rd, int rn,
7955 int imm5)
7957 int size = ctz32(imm5);
7958 int index;
7959 TCGv_i64 tmp;
7961 if (size > 3) {
7962 unallocated_encoding(s);
7963 return;
7966 if (!fp_access_check(s)) {
7967 return;
7970 index = imm5 >> (size + 1);
7972 /* This instruction just extracts the specified element and
7973 * zero-extends it into the bottom of the destination register.
7975 tmp = tcg_temp_new_i64();
7976 read_vec_element(s, tmp, rn, index, size);
7977 write_fp_dreg(s, rd, tmp);
7978 tcg_temp_free_i64(tmp);
7981 /* DUP (General)
7983 * 31 30 29 21 20 16 15 10 9 5 4 0
7984 * +---+---+-------------------+--------+-------------+------+------+
7985 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
7986 * +---+---+-------------------+--------+-------------+------+------+
7988 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7990 static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
7991 int imm5)
7993 int size = ctz32(imm5);
7994 uint32_t dofs, oprsz, maxsz;
7996 if (size > 3 || ((size == 3) && !is_q)) {
7997 unallocated_encoding(s);
7998 return;
8001 if (!fp_access_check(s)) {
8002 return;
8005 dofs = vec_full_reg_offset(s, rd);
8006 oprsz = is_q ? 16 : 8;
8007 maxsz = vec_full_reg_size(s);
8009 tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
8012 /* INS (Element)
8014 * 31 21 20 16 15 14 11 10 9 5 4 0
8015 * +-----------------------+--------+------------+---+------+------+
8016 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
8017 * +-----------------------+--------+------------+---+------+------+
8019 * size: encoded in imm5 (see ARM ARM LowestSetBit())
8020 * index: encoded in imm5<4:size+1>
8022 static void handle_simd_inse(DisasContext *s, int rd, int rn,
8023 int imm4, int imm5)
8025 int size = ctz32(imm5);
8026 int src_index, dst_index;
8027 TCGv_i64 tmp;
8029 if (size > 3) {
8030 unallocated_encoding(s);
8031 return;
8034 if (!fp_access_check(s)) {
8035 return;
8038 dst_index = extract32(imm5, 1+size, 5);
8039 src_index = extract32(imm4, size, 4);
8041 tmp = tcg_temp_new_i64();
8043 read_vec_element(s, tmp, rn, src_index, size);
8044 write_vec_element(s, tmp, rd, dst_index, size);
8046 tcg_temp_free_i64(tmp);
8048 /* INS is considered a 128-bit write for SVE. */
8049 clear_vec_high(s, true, rd);
8053 /* INS (General)
8055 * 31 21 20 16 15 10 9 5 4 0
8056 * +-----------------------+--------+-------------+------+------+
8057 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
8058 * +-----------------------+--------+-------------+------+------+
8060 * size: encoded in imm5 (see ARM ARM LowestSetBit())
8061 * index: encoded in imm5<4:size+1>
8063 static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
8065 int size = ctz32(imm5);
8066 int idx;
8068 if (size > 3) {
8069 unallocated_encoding(s);
8070 return;
8073 if (!fp_access_check(s)) {
8074 return;
8077 idx = extract32(imm5, 1 + size, 4 - size);
8078 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
8080 /* INS is considered a 128-bit write for SVE. */
8081 clear_vec_high(s, true, rd);
8085 * UMOV (General)
8086 * SMOV (General)
8088 * 31 30 29 21 20 16 15 12 10 9 5 4 0
8089 * +---+---+-------------------+--------+-------------+------+------+
8090 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
8091 * +---+---+-------------------+--------+-------------+------+------+
8093 * U: unsigned when set
8094 * size: encoded in imm5 (see ARM ARM LowestSetBit())
8096 static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
8097 int rn, int rd, int imm5)
8099 int size = ctz32(imm5);
8100 int element;
8101 TCGv_i64 tcg_rd;
8103 /* Check for UnallocatedEncodings */
8104 if (is_signed) {
8105 if (size > 2 || (size == 2 && !is_q)) {
8106 unallocated_encoding(s);
8107 return;
8109 } else {
8110 if (size > 3
8111 || (size < 3 && is_q)
8112 || (size == 3 && !is_q)) {
8113 unallocated_encoding(s);
8114 return;
8118 if (!fp_access_check(s)) {
8119 return;
8122 element = extract32(imm5, 1+size, 4);
8124 tcg_rd = cpu_reg(s, rd);
8125 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
8126 if (is_signed && !is_q) {
8127 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
8131 /* AdvSIMD copy
8132 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
8133 * +---+---+----+-----------------+------+---+------+---+------+------+
8134 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
8135 * +---+---+----+-----------------+------+---+------+---+------+------+
8137 static void disas_simd_copy(DisasContext *s, uint32_t insn)
8139 int rd = extract32(insn, 0, 5);
8140 int rn = extract32(insn, 5, 5);
8141 int imm4 = extract32(insn, 11, 4);
8142 int op = extract32(insn, 29, 1);
8143 int is_q = extract32(insn, 30, 1);
8144 int imm5 = extract32(insn, 16, 5);
8146 if (op) {
8147 if (is_q) {
8148 /* INS (element) */
8149 handle_simd_inse(s, rd, rn, imm4, imm5);
8150 } else {
8151 unallocated_encoding(s);
8153 } else {
8154 switch (imm4) {
8155 case 0:
8156 /* DUP (element - vector) */
8157 handle_simd_dupe(s, is_q, rd, rn, imm5);
8158 break;
8159 case 1:
8160 /* DUP (general) */
8161 handle_simd_dupg(s, is_q, rd, rn, imm5);
8162 break;
8163 case 3:
8164 if (is_q) {
8165 /* INS (general) */
8166 handle_simd_insg(s, rd, rn, imm5);
8167 } else {
8168 unallocated_encoding(s);
8170 break;
8171 case 5:
8172 case 7:
8173 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
8174 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
8175 break;
8176 default:
8177 unallocated_encoding(s);
8178 break;
8183 /* AdvSIMD modified immediate
8184 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
8185 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
8186 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
8187 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
8189 * There are a number of operations that can be carried out here:
8190 * MOVI - move (shifted) imm into register
8191 * MVNI - move inverted (shifted) imm into register
8192 * ORR - bitwise OR of (shifted) imm with register
8193 * BIC - bitwise clear of (shifted) imm with register
8194 * With ARMv8.2 we also have:
8195 * FMOV half-precision
8197 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
8199 int rd = extract32(insn, 0, 5);
8200 int cmode = extract32(insn, 12, 4);
8201 int o2 = extract32(insn, 11, 1);
8202 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
8203 bool is_neg = extract32(insn, 29, 1);
8204 bool is_q = extract32(insn, 30, 1);
8205 uint64_t imm = 0;
8207 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
8208 /* Check for FMOV (vector, immediate) - half-precision */
8209 if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
8210 unallocated_encoding(s);
8211 return;
8215 if (!fp_access_check(s)) {
8216 return;
8219 if (cmode == 15 && o2 && !is_neg) {
8220 /* FMOV (vector, immediate) - half-precision */
8221 imm = vfp_expand_imm(MO_16, abcdefgh);
8222 /* now duplicate across the lanes */
8223 imm = dup_const(MO_16, imm);
8224 } else {
8225 imm = asimd_imm_const(abcdefgh, cmode, is_neg);
8228 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
8229 /* MOVI or MVNI, with MVNI negation handled above. */
8230 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
8231 vec_full_reg_size(s), imm);
8232 } else {
8233 /* ORR or BIC, with BIC negation to AND handled above. */
8234 if (is_neg) {
8235 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
8236 } else {
8237 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
8242 /* AdvSIMD scalar copy
8243 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
8244 * +-----+----+-----------------+------+---+------+---+------+------+
8245 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
8246 * +-----+----+-----------------+------+---+------+---+------+------+
8248 static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
8250 int rd = extract32(insn, 0, 5);
8251 int rn = extract32(insn, 5, 5);
8252 int imm4 = extract32(insn, 11, 4);
8253 int imm5 = extract32(insn, 16, 5);
8254 int op = extract32(insn, 29, 1);
8256 if (op != 0 || imm4 != 0) {
8257 unallocated_encoding(s);
8258 return;
8261 /* DUP (element, scalar) */
8262 handle_simd_dupes(s, rd, rn, imm5);
8265 /* AdvSIMD scalar pairwise
8266 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
8267 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8268 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
8269 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8271 static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
8273 int u = extract32(insn, 29, 1);
8274 int size = extract32(insn, 22, 2);
8275 int opcode = extract32(insn, 12, 5);
8276 int rn = extract32(insn, 5, 5);
8277 int rd = extract32(insn, 0, 5);
8278 TCGv_ptr fpst;
8280 /* For some ops (the FP ones), size[1] is part of the encoding.
8281 * For ADDP strictly it is not but size[1] is always 1 for valid
8282 * encodings.
8284 opcode |= (extract32(size, 1, 1) << 5);
8286 switch (opcode) {
8287 case 0x3b: /* ADDP */
8288 if (u || size != 3) {
8289 unallocated_encoding(s);
8290 return;
8292 if (!fp_access_check(s)) {
8293 return;
8296 fpst = NULL;
8297 break;
8298 case 0xc: /* FMAXNMP */
8299 case 0xd: /* FADDP */
8300 case 0xf: /* FMAXP */
8301 case 0x2c: /* FMINNMP */
8302 case 0x2f: /* FMINP */
8303 /* FP op, size[0] is 32 or 64 bit*/
8304 if (!u) {
8305 if (!dc_isar_feature(aa64_fp16, s)) {
8306 unallocated_encoding(s);
8307 return;
8308 } else {
8309 size = MO_16;
8311 } else {
8312 size = extract32(size, 0, 1) ? MO_64 : MO_32;
8315 if (!fp_access_check(s)) {
8316 return;
8319 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8320 break;
8321 default:
8322 unallocated_encoding(s);
8323 return;
8326 if (size == MO_64) {
8327 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8328 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8329 TCGv_i64 tcg_res = tcg_temp_new_i64();
8331 read_vec_element(s, tcg_op1, rn, 0, MO_64);
8332 read_vec_element(s, tcg_op2, rn, 1, MO_64);
8334 switch (opcode) {
8335 case 0x3b: /* ADDP */
8336 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
8337 break;
8338 case 0xc: /* FMAXNMP */
8339 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8340 break;
8341 case 0xd: /* FADDP */
8342 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8343 break;
8344 case 0xf: /* FMAXP */
8345 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8346 break;
8347 case 0x2c: /* FMINNMP */
8348 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8349 break;
8350 case 0x2f: /* FMINP */
8351 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8352 break;
8353 default:
8354 g_assert_not_reached();
8357 write_fp_dreg(s, rd, tcg_res);
8359 tcg_temp_free_i64(tcg_op1);
8360 tcg_temp_free_i64(tcg_op2);
8361 tcg_temp_free_i64(tcg_res);
8362 } else {
8363 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8364 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8365 TCGv_i32 tcg_res = tcg_temp_new_i32();
8367 read_vec_element_i32(s, tcg_op1, rn, 0, size);
8368 read_vec_element_i32(s, tcg_op2, rn, 1, size);
8370 if (size == MO_16) {
8371 switch (opcode) {
8372 case 0xc: /* FMAXNMP */
8373 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8374 break;
8375 case 0xd: /* FADDP */
8376 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
8377 break;
8378 case 0xf: /* FMAXP */
8379 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
8380 break;
8381 case 0x2c: /* FMINNMP */
8382 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8383 break;
8384 case 0x2f: /* FMINP */
8385 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
8386 break;
8387 default:
8388 g_assert_not_reached();
8390 } else {
8391 switch (opcode) {
8392 case 0xc: /* FMAXNMP */
8393 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
8394 break;
8395 case 0xd: /* FADDP */
8396 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
8397 break;
8398 case 0xf: /* FMAXP */
8399 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
8400 break;
8401 case 0x2c: /* FMINNMP */
8402 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
8403 break;
8404 case 0x2f: /* FMINP */
8405 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
8406 break;
8407 default:
8408 g_assert_not_reached();
8412 write_fp_sreg(s, rd, tcg_res);
8414 tcg_temp_free_i32(tcg_op1);
8415 tcg_temp_free_i32(tcg_op2);
8416 tcg_temp_free_i32(tcg_res);
8419 if (fpst) {
8420 tcg_temp_free_ptr(fpst);
8425 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
8427 * This code is handles the common shifting code and is used by both
8428 * the vector and scalar code.
8430 static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
8431 TCGv_i64 tcg_rnd, bool accumulate,
8432 bool is_u, int size, int shift)
8434 bool extended_result = false;
8435 bool round = tcg_rnd != NULL;
8436 int ext_lshift = 0;
8437 TCGv_i64 tcg_src_hi;
8439 if (round && size == 3) {
8440 extended_result = true;
8441 ext_lshift = 64 - shift;
8442 tcg_src_hi = tcg_temp_new_i64();
8443 } else if (shift == 64) {
8444 if (!accumulate && is_u) {
8445 /* result is zero */
8446 tcg_gen_movi_i64(tcg_res, 0);
8447 return;
8451 /* Deal with the rounding step */
8452 if (round) {
8453 if (extended_result) {
8454 TCGv_i64 tcg_zero = tcg_constant_i64(0);
8455 if (!is_u) {
8456 /* take care of sign extending tcg_res */
8457 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
8458 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8459 tcg_src, tcg_src_hi,
8460 tcg_rnd, tcg_zero);
8461 } else {
8462 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8463 tcg_src, tcg_zero,
8464 tcg_rnd, tcg_zero);
8466 } else {
8467 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
8471 /* Now do the shift right */
8472 if (round && extended_result) {
8473 /* extended case, >64 bit precision required */
8474 if (ext_lshift == 0) {
8475 /* special case, only high bits matter */
8476 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
8477 } else {
8478 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8479 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
8480 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
8482 } else {
8483 if (is_u) {
8484 if (shift == 64) {
8485 /* essentially shifting in 64 zeros */
8486 tcg_gen_movi_i64(tcg_src, 0);
8487 } else {
8488 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8490 } else {
8491 if (shift == 64) {
8492 /* effectively extending the sign-bit */
8493 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
8494 } else {
8495 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
8500 if (accumulate) {
8501 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
8502 } else {
8503 tcg_gen_mov_i64(tcg_res, tcg_src);
8506 if (extended_result) {
8507 tcg_temp_free_i64(tcg_src_hi);
8511 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
8512 static void handle_scalar_simd_shri(DisasContext *s,
8513 bool is_u, int immh, int immb,
8514 int opcode, int rn, int rd)
8516 const int size = 3;
8517 int immhb = immh << 3 | immb;
8518 int shift = 2 * (8 << size) - immhb;
8519 bool accumulate = false;
8520 bool round = false;
8521 bool insert = false;
8522 TCGv_i64 tcg_rn;
8523 TCGv_i64 tcg_rd;
8524 TCGv_i64 tcg_round;
8526 if (!extract32(immh, 3, 1)) {
8527 unallocated_encoding(s);
8528 return;
8531 if (!fp_access_check(s)) {
8532 return;
8535 switch (opcode) {
8536 case 0x02: /* SSRA / USRA (accumulate) */
8537 accumulate = true;
8538 break;
8539 case 0x04: /* SRSHR / URSHR (rounding) */
8540 round = true;
8541 break;
8542 case 0x06: /* SRSRA / URSRA (accum + rounding) */
8543 accumulate = round = true;
8544 break;
8545 case 0x08: /* SRI */
8546 insert = true;
8547 break;
8550 if (round) {
8551 tcg_round = tcg_constant_i64(1ULL << (shift - 1));
8552 } else {
8553 tcg_round = NULL;
8556 tcg_rn = read_fp_dreg(s, rn);
8557 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8559 if (insert) {
8560 /* shift count same as element size is valid but does nothing;
8561 * special case to avoid potential shift by 64.
8563 int esize = 8 << size;
8564 if (shift != esize) {
8565 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
8566 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
8568 } else {
8569 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8570 accumulate, is_u, size, shift);
8573 write_fp_dreg(s, rd, tcg_rd);
8575 tcg_temp_free_i64(tcg_rn);
8576 tcg_temp_free_i64(tcg_rd);
8579 /* SHL/SLI - Scalar shift left */
8580 static void handle_scalar_simd_shli(DisasContext *s, bool insert,
8581 int immh, int immb, int opcode,
8582 int rn, int rd)
8584 int size = 32 - clz32(immh) - 1;
8585 int immhb = immh << 3 | immb;
8586 int shift = immhb - (8 << size);
8587 TCGv_i64 tcg_rn;
8588 TCGv_i64 tcg_rd;
8590 if (!extract32(immh, 3, 1)) {
8591 unallocated_encoding(s);
8592 return;
8595 if (!fp_access_check(s)) {
8596 return;
8599 tcg_rn = read_fp_dreg(s, rn);
8600 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8602 if (insert) {
8603 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
8604 } else {
8605 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
8608 write_fp_dreg(s, rd, tcg_rd);
8610 tcg_temp_free_i64(tcg_rn);
8611 tcg_temp_free_i64(tcg_rd);
8614 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
8615 * (signed/unsigned) narrowing */
8616 static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
8617 bool is_u_shift, bool is_u_narrow,
8618 int immh, int immb, int opcode,
8619 int rn, int rd)
8621 int immhb = immh << 3 | immb;
8622 int size = 32 - clz32(immh) - 1;
8623 int esize = 8 << size;
8624 int shift = (2 * esize) - immhb;
8625 int elements = is_scalar ? 1 : (64 / esize);
8626 bool round = extract32(opcode, 0, 1);
8627 MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
8628 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
8629 TCGv_i32 tcg_rd_narrowed;
8630 TCGv_i64 tcg_final;
8632 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
8633 { gen_helper_neon_narrow_sat_s8,
8634 gen_helper_neon_unarrow_sat8 },
8635 { gen_helper_neon_narrow_sat_s16,
8636 gen_helper_neon_unarrow_sat16 },
8637 { gen_helper_neon_narrow_sat_s32,
8638 gen_helper_neon_unarrow_sat32 },
8639 { NULL, NULL },
8641 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
8642 gen_helper_neon_narrow_sat_u8,
8643 gen_helper_neon_narrow_sat_u16,
8644 gen_helper_neon_narrow_sat_u32,
8645 NULL
8647 NeonGenNarrowEnvFn *narrowfn;
8649 int i;
8651 assert(size < 4);
8653 if (extract32(immh, 3, 1)) {
8654 unallocated_encoding(s);
8655 return;
8658 if (!fp_access_check(s)) {
8659 return;
8662 if (is_u_shift) {
8663 narrowfn = unsigned_narrow_fns[size];
8664 } else {
8665 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
8668 tcg_rn = tcg_temp_new_i64();
8669 tcg_rd = tcg_temp_new_i64();
8670 tcg_rd_narrowed = tcg_temp_new_i32();
8671 tcg_final = tcg_const_i64(0);
8673 if (round) {
8674 tcg_round = tcg_constant_i64(1ULL << (shift - 1));
8675 } else {
8676 tcg_round = NULL;
8679 for (i = 0; i < elements; i++) {
8680 read_vec_element(s, tcg_rn, rn, i, ldop);
8681 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8682 false, is_u_shift, size+1, shift);
8683 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
8684 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
8685 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8688 if (!is_q) {
8689 write_vec_element(s, tcg_final, rd, 0, MO_64);
8690 } else {
8691 write_vec_element(s, tcg_final, rd, 1, MO_64);
8694 tcg_temp_free_i64(tcg_rn);
8695 tcg_temp_free_i64(tcg_rd);
8696 tcg_temp_free_i32(tcg_rd_narrowed);
8697 tcg_temp_free_i64(tcg_final);
8699 clear_vec_high(s, is_q, rd);
8702 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
8703 static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
8704 bool src_unsigned, bool dst_unsigned,
8705 int immh, int immb, int rn, int rd)
8707 int immhb = immh << 3 | immb;
8708 int size = 32 - clz32(immh) - 1;
8709 int shift = immhb - (8 << size);
8710 int pass;
8712 assert(immh != 0);
8713 assert(!(scalar && is_q));
8715 if (!scalar) {
8716 if (!is_q && extract32(immh, 3, 1)) {
8717 unallocated_encoding(s);
8718 return;
8721 /* Since we use the variable-shift helpers we must
8722 * replicate the shift count into each element of
8723 * the tcg_shift value.
8725 switch (size) {
8726 case 0:
8727 shift |= shift << 8;
8728 /* fall through */
8729 case 1:
8730 shift |= shift << 16;
8731 break;
8732 case 2:
8733 case 3:
8734 break;
8735 default:
8736 g_assert_not_reached();
8740 if (!fp_access_check(s)) {
8741 return;
8744 if (size == 3) {
8745 TCGv_i64 tcg_shift = tcg_constant_i64(shift);
8746 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
8747 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
8748 { NULL, gen_helper_neon_qshl_u64 },
8750 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
8751 int maxpass = is_q ? 2 : 1;
8753 for (pass = 0; pass < maxpass; pass++) {
8754 TCGv_i64 tcg_op = tcg_temp_new_i64();
8756 read_vec_element(s, tcg_op, rn, pass, MO_64);
8757 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8758 write_vec_element(s, tcg_op, rd, pass, MO_64);
8760 tcg_temp_free_i64(tcg_op);
8762 clear_vec_high(s, is_q, rd);
8763 } else {
8764 TCGv_i32 tcg_shift = tcg_constant_i32(shift);
8765 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
8767 { gen_helper_neon_qshl_s8,
8768 gen_helper_neon_qshl_s16,
8769 gen_helper_neon_qshl_s32 },
8770 { gen_helper_neon_qshlu_s8,
8771 gen_helper_neon_qshlu_s16,
8772 gen_helper_neon_qshlu_s32 }
8773 }, {
8774 { NULL, NULL, NULL },
8775 { gen_helper_neon_qshl_u8,
8776 gen_helper_neon_qshl_u16,
8777 gen_helper_neon_qshl_u32 }
8780 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
8781 MemOp memop = scalar ? size : MO_32;
8782 int maxpass = scalar ? 1 : is_q ? 4 : 2;
8784 for (pass = 0; pass < maxpass; pass++) {
8785 TCGv_i32 tcg_op = tcg_temp_new_i32();
8787 read_vec_element_i32(s, tcg_op, rn, pass, memop);
8788 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8789 if (scalar) {
8790 switch (size) {
8791 case 0:
8792 tcg_gen_ext8u_i32(tcg_op, tcg_op);
8793 break;
8794 case 1:
8795 tcg_gen_ext16u_i32(tcg_op, tcg_op);
8796 break;
8797 case 2:
8798 break;
8799 default:
8800 g_assert_not_reached();
8802 write_fp_sreg(s, rd, tcg_op);
8803 } else {
8804 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
8807 tcg_temp_free_i32(tcg_op);
8810 if (!scalar) {
8811 clear_vec_high(s, is_q, rd);
8816 /* Common vector code for handling integer to FP conversion */
8817 static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
8818 int elements, int is_signed,
8819 int fracbits, int size)
8821 TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8822 TCGv_i32 tcg_shift = NULL;
8824 MemOp mop = size | (is_signed ? MO_SIGN : 0);
8825 int pass;
8827 if (fracbits || size == MO_64) {
8828 tcg_shift = tcg_constant_i32(fracbits);
8831 if (size == MO_64) {
8832 TCGv_i64 tcg_int64 = tcg_temp_new_i64();
8833 TCGv_i64 tcg_double = tcg_temp_new_i64();
8835 for (pass = 0; pass < elements; pass++) {
8836 read_vec_element(s, tcg_int64, rn, pass, mop);
8838 if (is_signed) {
8839 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
8840 tcg_shift, tcg_fpst);
8841 } else {
8842 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
8843 tcg_shift, tcg_fpst);
8845 if (elements == 1) {
8846 write_fp_dreg(s, rd, tcg_double);
8847 } else {
8848 write_vec_element(s, tcg_double, rd, pass, MO_64);
8852 tcg_temp_free_i64(tcg_int64);
8853 tcg_temp_free_i64(tcg_double);
8855 } else {
8856 TCGv_i32 tcg_int32 = tcg_temp_new_i32();
8857 TCGv_i32 tcg_float = tcg_temp_new_i32();
8859 for (pass = 0; pass < elements; pass++) {
8860 read_vec_element_i32(s, tcg_int32, rn, pass, mop);
8862 switch (size) {
8863 case MO_32:
8864 if (fracbits) {
8865 if (is_signed) {
8866 gen_helper_vfp_sltos(tcg_float, tcg_int32,
8867 tcg_shift, tcg_fpst);
8868 } else {
8869 gen_helper_vfp_ultos(tcg_float, tcg_int32,
8870 tcg_shift, tcg_fpst);
8872 } else {
8873 if (is_signed) {
8874 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
8875 } else {
8876 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
8879 break;
8880 case MO_16:
8881 if (fracbits) {
8882 if (is_signed) {
8883 gen_helper_vfp_sltoh(tcg_float, tcg_int32,
8884 tcg_shift, tcg_fpst);
8885 } else {
8886 gen_helper_vfp_ultoh(tcg_float, tcg_int32,
8887 tcg_shift, tcg_fpst);
8889 } else {
8890 if (is_signed) {
8891 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
8892 } else {
8893 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
8896 break;
8897 default:
8898 g_assert_not_reached();
8901 if (elements == 1) {
8902 write_fp_sreg(s, rd, tcg_float);
8903 } else {
8904 write_vec_element_i32(s, tcg_float, rd, pass, size);
8908 tcg_temp_free_i32(tcg_int32);
8909 tcg_temp_free_i32(tcg_float);
8912 tcg_temp_free_ptr(tcg_fpst);
8914 clear_vec_high(s, elements << size == 16, rd);
8917 /* UCVTF/SCVTF - Integer to FP conversion */
8918 static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
8919 bool is_q, bool is_u,
8920 int immh, int immb, int opcode,
8921 int rn, int rd)
8923 int size, elements, fracbits;
8924 int immhb = immh << 3 | immb;
8926 if (immh & 8) {
8927 size = MO_64;
8928 if (!is_scalar && !is_q) {
8929 unallocated_encoding(s);
8930 return;
8932 } else if (immh & 4) {
8933 size = MO_32;
8934 } else if (immh & 2) {
8935 size = MO_16;
8936 if (!dc_isar_feature(aa64_fp16, s)) {
8937 unallocated_encoding(s);
8938 return;
8940 } else {
8941 /* immh == 0 would be a failure of the decode logic */
8942 g_assert(immh == 1);
8943 unallocated_encoding(s);
8944 return;
8947 if (is_scalar) {
8948 elements = 1;
8949 } else {
8950 elements = (8 << is_q) >> size;
8952 fracbits = (16 << size) - immhb;
8954 if (!fp_access_check(s)) {
8955 return;
8958 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
8961 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
8962 static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
8963 bool is_q, bool is_u,
8964 int immh, int immb, int rn, int rd)
8966 int immhb = immh << 3 | immb;
8967 int pass, size, fracbits;
8968 TCGv_ptr tcg_fpstatus;
8969 TCGv_i32 tcg_rmode, tcg_shift;
8971 if (immh & 0x8) {
8972 size = MO_64;
8973 if (!is_scalar && !is_q) {
8974 unallocated_encoding(s);
8975 return;
8977 } else if (immh & 0x4) {
8978 size = MO_32;
8979 } else if (immh & 0x2) {
8980 size = MO_16;
8981 if (!dc_isar_feature(aa64_fp16, s)) {
8982 unallocated_encoding(s);
8983 return;
8985 } else {
8986 /* Should have split out AdvSIMD modified immediate earlier. */
8987 assert(immh == 1);
8988 unallocated_encoding(s);
8989 return;
8992 if (!fp_access_check(s)) {
8993 return;
8996 assert(!(is_scalar && is_q));
8998 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
8999 tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
9000 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9001 fracbits = (16 << size) - immhb;
9002 tcg_shift = tcg_constant_i32(fracbits);
9004 if (size == MO_64) {
9005 int maxpass = is_scalar ? 1 : 2;
9007 for (pass = 0; pass < maxpass; pass++) {
9008 TCGv_i64 tcg_op = tcg_temp_new_i64();
9010 read_vec_element(s, tcg_op, rn, pass, MO_64);
9011 if (is_u) {
9012 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9013 } else {
9014 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9016 write_vec_element(s, tcg_op, rd, pass, MO_64);
9017 tcg_temp_free_i64(tcg_op);
9019 clear_vec_high(s, is_q, rd);
9020 } else {
9021 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
9022 int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
9024 switch (size) {
9025 case MO_16:
9026 if (is_u) {
9027 fn = gen_helper_vfp_touhh;
9028 } else {
9029 fn = gen_helper_vfp_toshh;
9031 break;
9032 case MO_32:
9033 if (is_u) {
9034 fn = gen_helper_vfp_touls;
9035 } else {
9036 fn = gen_helper_vfp_tosls;
9038 break;
9039 default:
9040 g_assert_not_reached();
9043 for (pass = 0; pass < maxpass; pass++) {
9044 TCGv_i32 tcg_op = tcg_temp_new_i32();
9046 read_vec_element_i32(s, tcg_op, rn, pass, size);
9047 fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9048 if (is_scalar) {
9049 write_fp_sreg(s, rd, tcg_op);
9050 } else {
9051 write_vec_element_i32(s, tcg_op, rd, pass, size);
9053 tcg_temp_free_i32(tcg_op);
9055 if (!is_scalar) {
9056 clear_vec_high(s, is_q, rd);
9060 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9061 tcg_temp_free_ptr(tcg_fpstatus);
9062 tcg_temp_free_i32(tcg_rmode);
9065 /* AdvSIMD scalar shift by immediate
9066 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
9067 * +-----+---+-------------+------+------+--------+---+------+------+
9068 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
9069 * +-----+---+-------------+------+------+--------+---+------+------+
9071 * This is the scalar version so it works on a fixed sized registers
9073 static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
9075 int rd = extract32(insn, 0, 5);
9076 int rn = extract32(insn, 5, 5);
9077 int opcode = extract32(insn, 11, 5);
9078 int immb = extract32(insn, 16, 3);
9079 int immh = extract32(insn, 19, 4);
9080 bool is_u = extract32(insn, 29, 1);
9082 if (immh == 0) {
9083 unallocated_encoding(s);
9084 return;
9087 switch (opcode) {
9088 case 0x08: /* SRI */
9089 if (!is_u) {
9090 unallocated_encoding(s);
9091 return;
9093 /* fall through */
9094 case 0x00: /* SSHR / USHR */
9095 case 0x02: /* SSRA / USRA */
9096 case 0x04: /* SRSHR / URSHR */
9097 case 0x06: /* SRSRA / URSRA */
9098 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
9099 break;
9100 case 0x0a: /* SHL / SLI */
9101 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
9102 break;
9103 case 0x1c: /* SCVTF, UCVTF */
9104 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
9105 opcode, rn, rd);
9106 break;
9107 case 0x10: /* SQSHRUN, SQSHRUN2 */
9108 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
9109 if (!is_u) {
9110 unallocated_encoding(s);
9111 return;
9113 handle_vec_simd_sqshrn(s, true, false, false, true,
9114 immh, immb, opcode, rn, rd);
9115 break;
9116 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
9117 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
9118 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
9119 immh, immb, opcode, rn, rd);
9120 break;
9121 case 0xc: /* SQSHLU */
9122 if (!is_u) {
9123 unallocated_encoding(s);
9124 return;
9126 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
9127 break;
9128 case 0xe: /* SQSHL, UQSHL */
9129 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
9130 break;
9131 case 0x1f: /* FCVTZS, FCVTZU */
9132 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
9133 break;
9134 default:
9135 unallocated_encoding(s);
9136 break;
9140 /* AdvSIMD scalar three different
9141 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
9142 * +-----+---+-----------+------+---+------+--------+-----+------+------+
9143 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
9144 * +-----+---+-----------+------+---+------+--------+-----+------+------+
9146 static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
9148 bool is_u = extract32(insn, 29, 1);
9149 int size = extract32(insn, 22, 2);
9150 int opcode = extract32(insn, 12, 4);
9151 int rm = extract32(insn, 16, 5);
9152 int rn = extract32(insn, 5, 5);
9153 int rd = extract32(insn, 0, 5);
9155 if (is_u) {
9156 unallocated_encoding(s);
9157 return;
9160 switch (opcode) {
9161 case 0x9: /* SQDMLAL, SQDMLAL2 */
9162 case 0xb: /* SQDMLSL, SQDMLSL2 */
9163 case 0xd: /* SQDMULL, SQDMULL2 */
9164 if (size == 0 || size == 3) {
9165 unallocated_encoding(s);
9166 return;
9168 break;
9169 default:
9170 unallocated_encoding(s);
9171 return;
9174 if (!fp_access_check(s)) {
9175 return;
9178 if (size == 2) {
9179 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9180 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9181 TCGv_i64 tcg_res = tcg_temp_new_i64();
9183 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
9184 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
9186 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
9187 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
9189 switch (opcode) {
9190 case 0xd: /* SQDMULL, SQDMULL2 */
9191 break;
9192 case 0xb: /* SQDMLSL, SQDMLSL2 */
9193 tcg_gen_neg_i64(tcg_res, tcg_res);
9194 /* fall through */
9195 case 0x9: /* SQDMLAL, SQDMLAL2 */
9196 read_vec_element(s, tcg_op1, rd, 0, MO_64);
9197 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
9198 tcg_res, tcg_op1);
9199 break;
9200 default:
9201 g_assert_not_reached();
9204 write_fp_dreg(s, rd, tcg_res);
9206 tcg_temp_free_i64(tcg_op1);
9207 tcg_temp_free_i64(tcg_op2);
9208 tcg_temp_free_i64(tcg_res);
9209 } else {
9210 TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
9211 TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
9212 TCGv_i64 tcg_res = tcg_temp_new_i64();
9214 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
9215 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
9217 switch (opcode) {
9218 case 0xd: /* SQDMULL, SQDMULL2 */
9219 break;
9220 case 0xb: /* SQDMLSL, SQDMLSL2 */
9221 gen_helper_neon_negl_u32(tcg_res, tcg_res);
9222 /* fall through */
9223 case 0x9: /* SQDMLAL, SQDMLAL2 */
9225 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
9226 read_vec_element(s, tcg_op3, rd, 0, MO_32);
9227 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
9228 tcg_res, tcg_op3);
9229 tcg_temp_free_i64(tcg_op3);
9230 break;
9232 default:
9233 g_assert_not_reached();
9236 tcg_gen_ext32u_i64(tcg_res, tcg_res);
9237 write_fp_dreg(s, rd, tcg_res);
9239 tcg_temp_free_i32(tcg_op1);
9240 tcg_temp_free_i32(tcg_op2);
9241 tcg_temp_free_i64(tcg_res);
9245 static void handle_3same_64(DisasContext *s, int opcode, bool u,
9246 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
9248 /* Handle 64x64->64 opcodes which are shared between the scalar
9249 * and vector 3-same groups. We cover every opcode where size == 3
9250 * is valid in either the three-reg-same (integer, not pairwise)
9251 * or scalar-three-reg-same groups.
9253 TCGCond cond;
9255 switch (opcode) {
9256 case 0x1: /* SQADD */
9257 if (u) {
9258 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9259 } else {
9260 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9262 break;
9263 case 0x5: /* SQSUB */
9264 if (u) {
9265 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9266 } else {
9267 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9269 break;
9270 case 0x6: /* CMGT, CMHI */
9271 /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
9272 * We implement this using setcond (test) and then negating.
9274 cond = u ? TCG_COND_GTU : TCG_COND_GT;
9275 do_cmop:
9276 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
9277 tcg_gen_neg_i64(tcg_rd, tcg_rd);
9278 break;
9279 case 0x7: /* CMGE, CMHS */
9280 cond = u ? TCG_COND_GEU : TCG_COND_GE;
9281 goto do_cmop;
9282 case 0x11: /* CMTST, CMEQ */
9283 if (u) {
9284 cond = TCG_COND_EQ;
9285 goto do_cmop;
9287 gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
9288 break;
9289 case 0x8: /* SSHL, USHL */
9290 if (u) {
9291 gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
9292 } else {
9293 gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
9295 break;
9296 case 0x9: /* SQSHL, UQSHL */
9297 if (u) {
9298 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9299 } else {
9300 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9302 break;
9303 case 0xa: /* SRSHL, URSHL */
9304 if (u) {
9305 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
9306 } else {
9307 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
9309 break;
9310 case 0xb: /* SQRSHL, UQRSHL */
9311 if (u) {
9312 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9313 } else {
9314 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9316 break;
9317 case 0x10: /* ADD, SUB */
9318 if (u) {
9319 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
9320 } else {
9321 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
9323 break;
9324 default:
9325 g_assert_not_reached();
9329 /* Handle the 3-same-operands float operations; shared by the scalar
9330 * and vector encodings. The caller must filter out any encodings
9331 * not allocated for the encoding it is dealing with.
9333 static void handle_3same_float(DisasContext *s, int size, int elements,
9334 int fpopcode, int rd, int rn, int rm)
9336 int pass;
9337 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9339 for (pass = 0; pass < elements; pass++) {
9340 if (size) {
9341 /* Double */
9342 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9343 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9344 TCGv_i64 tcg_res = tcg_temp_new_i64();
9346 read_vec_element(s, tcg_op1, rn, pass, MO_64);
9347 read_vec_element(s, tcg_op2, rm, pass, MO_64);
9349 switch (fpopcode) {
9350 case 0x39: /* FMLS */
9351 /* As usual for ARM, separate negation for fused multiply-add */
9352 gen_helper_vfp_negd(tcg_op1, tcg_op1);
9353 /* fall through */
9354 case 0x19: /* FMLA */
9355 read_vec_element(s, tcg_res, rd, pass, MO_64);
9356 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
9357 tcg_res, fpst);
9358 break;
9359 case 0x18: /* FMAXNM */
9360 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
9361 break;
9362 case 0x1a: /* FADD */
9363 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
9364 break;
9365 case 0x1b: /* FMULX */
9366 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
9367 break;
9368 case 0x1c: /* FCMEQ */
9369 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9370 break;
9371 case 0x1e: /* FMAX */
9372 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
9373 break;
9374 case 0x1f: /* FRECPS */
9375 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9376 break;
9377 case 0x38: /* FMINNM */
9378 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
9379 break;
9380 case 0x3a: /* FSUB */
9381 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
9382 break;
9383 case 0x3e: /* FMIN */
9384 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
9385 break;
9386 case 0x3f: /* FRSQRTS */
9387 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9388 break;
9389 case 0x5b: /* FMUL */
9390 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
9391 break;
9392 case 0x5c: /* FCMGE */
9393 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9394 break;
9395 case 0x5d: /* FACGE */
9396 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9397 break;
9398 case 0x5f: /* FDIV */
9399 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
9400 break;
9401 case 0x7a: /* FABD */
9402 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
9403 gen_helper_vfp_absd(tcg_res, tcg_res);
9404 break;
9405 case 0x7c: /* FCMGT */
9406 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9407 break;
9408 case 0x7d: /* FACGT */
9409 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9410 break;
9411 default:
9412 g_assert_not_reached();
9415 write_vec_element(s, tcg_res, rd, pass, MO_64);
9417 tcg_temp_free_i64(tcg_res);
9418 tcg_temp_free_i64(tcg_op1);
9419 tcg_temp_free_i64(tcg_op2);
9420 } else {
9421 /* Single */
9422 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9423 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9424 TCGv_i32 tcg_res = tcg_temp_new_i32();
9426 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
9427 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
9429 switch (fpopcode) {
9430 case 0x39: /* FMLS */
9431 /* As usual for ARM, separate negation for fused multiply-add */
9432 gen_helper_vfp_negs(tcg_op1, tcg_op1);
9433 /* fall through */
9434 case 0x19: /* FMLA */
9435 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9436 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
9437 tcg_res, fpst);
9438 break;
9439 case 0x1a: /* FADD */
9440 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
9441 break;
9442 case 0x1b: /* FMULX */
9443 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
9444 break;
9445 case 0x1c: /* FCMEQ */
9446 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9447 break;
9448 case 0x1e: /* FMAX */
9449 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
9450 break;
9451 case 0x1f: /* FRECPS */
9452 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9453 break;
9454 case 0x18: /* FMAXNM */
9455 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
9456 break;
9457 case 0x38: /* FMINNM */
9458 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
9459 break;
9460 case 0x3a: /* FSUB */
9461 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9462 break;
9463 case 0x3e: /* FMIN */
9464 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
9465 break;
9466 case 0x3f: /* FRSQRTS */
9467 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9468 break;
9469 case 0x5b: /* FMUL */
9470 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
9471 break;
9472 case 0x5c: /* FCMGE */
9473 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9474 break;
9475 case 0x5d: /* FACGE */
9476 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9477 break;
9478 case 0x5f: /* FDIV */
9479 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
9480 break;
9481 case 0x7a: /* FABD */
9482 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9483 gen_helper_vfp_abss(tcg_res, tcg_res);
9484 break;
9485 case 0x7c: /* FCMGT */
9486 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9487 break;
9488 case 0x7d: /* FACGT */
9489 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9490 break;
9491 default:
9492 g_assert_not_reached();
9495 if (elements == 1) {
9496 /* scalar single so clear high part */
9497 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
9499 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
9500 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
9501 tcg_temp_free_i64(tcg_tmp);
9502 } else {
9503 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9506 tcg_temp_free_i32(tcg_res);
9507 tcg_temp_free_i32(tcg_op1);
9508 tcg_temp_free_i32(tcg_op2);
9512 tcg_temp_free_ptr(fpst);
9514 clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
9517 /* AdvSIMD scalar three same
9518 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
9519 * +-----+---+-----------+------+---+------+--------+---+------+------+
9520 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
9521 * +-----+---+-----------+------+---+------+--------+---+------+------+
9523 static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
9525 int rd = extract32(insn, 0, 5);
9526 int rn = extract32(insn, 5, 5);
9527 int opcode = extract32(insn, 11, 5);
9528 int rm = extract32(insn, 16, 5);
9529 int size = extract32(insn, 22, 2);
9530 bool u = extract32(insn, 29, 1);
9531 TCGv_i64 tcg_rd;
9533 if (opcode >= 0x18) {
9534 /* Floating point: U, size[1] and opcode indicate operation */
9535 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
9536 switch (fpopcode) {
9537 case 0x1b: /* FMULX */
9538 case 0x1f: /* FRECPS */
9539 case 0x3f: /* FRSQRTS */
9540 case 0x5d: /* FACGE */
9541 case 0x7d: /* FACGT */
9542 case 0x1c: /* FCMEQ */
9543 case 0x5c: /* FCMGE */
9544 case 0x7c: /* FCMGT */
9545 case 0x7a: /* FABD */
9546 break;
9547 default:
9548 unallocated_encoding(s);
9549 return;
9552 if (!fp_access_check(s)) {
9553 return;
9556 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
9557 return;
9560 switch (opcode) {
9561 case 0x1: /* SQADD, UQADD */
9562 case 0x5: /* SQSUB, UQSUB */
9563 case 0x9: /* SQSHL, UQSHL */
9564 case 0xb: /* SQRSHL, UQRSHL */
9565 break;
9566 case 0x8: /* SSHL, USHL */
9567 case 0xa: /* SRSHL, URSHL */
9568 case 0x6: /* CMGT, CMHI */
9569 case 0x7: /* CMGE, CMHS */
9570 case 0x11: /* CMTST, CMEQ */
9571 case 0x10: /* ADD, SUB (vector) */
9572 if (size != 3) {
9573 unallocated_encoding(s);
9574 return;
9576 break;
9577 case 0x16: /* SQDMULH, SQRDMULH (vector) */
9578 if (size != 1 && size != 2) {
9579 unallocated_encoding(s);
9580 return;
9582 break;
9583 default:
9584 unallocated_encoding(s);
9585 return;
9588 if (!fp_access_check(s)) {
9589 return;
9592 tcg_rd = tcg_temp_new_i64();
9594 if (size == 3) {
9595 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9596 TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
9598 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
9599 tcg_temp_free_i64(tcg_rn);
9600 tcg_temp_free_i64(tcg_rm);
9601 } else {
9602 /* Do a single operation on the lowest element in the vector.
9603 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
9604 * no side effects for all these operations.
9605 * OPTME: special-purpose helpers would avoid doing some
9606 * unnecessary work in the helper for the 8 and 16 bit cases.
9608 NeonGenTwoOpEnvFn *genenvfn;
9609 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9610 TCGv_i32 tcg_rm = tcg_temp_new_i32();
9611 TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
9613 read_vec_element_i32(s, tcg_rn, rn, 0, size);
9614 read_vec_element_i32(s, tcg_rm, rm, 0, size);
9616 switch (opcode) {
9617 case 0x1: /* SQADD, UQADD */
9619 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9620 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
9621 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
9622 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
9624 genenvfn = fns[size][u];
9625 break;
9627 case 0x5: /* SQSUB, UQSUB */
9629 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9630 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9631 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9632 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9634 genenvfn = fns[size][u];
9635 break;
9637 case 0x9: /* SQSHL, UQSHL */
9639 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9640 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9641 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9642 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9644 genenvfn = fns[size][u];
9645 break;
9647 case 0xb: /* SQRSHL, UQRSHL */
9649 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9650 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9651 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9652 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9654 genenvfn = fns[size][u];
9655 break;
9657 case 0x16: /* SQDMULH, SQRDMULH */
9659 static NeonGenTwoOpEnvFn * const fns[2][2] = {
9660 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9661 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9663 assert(size == 1 || size == 2);
9664 genenvfn = fns[size - 1][u];
9665 break;
9667 default:
9668 g_assert_not_reached();
9671 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
9672 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
9673 tcg_temp_free_i32(tcg_rd32);
9674 tcg_temp_free_i32(tcg_rn);
9675 tcg_temp_free_i32(tcg_rm);
9678 write_fp_dreg(s, rd, tcg_rd);
9680 tcg_temp_free_i64(tcg_rd);
9683 /* AdvSIMD scalar three same FP16
9684 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
9685 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9686 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
9687 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9688 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
9689 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
9691 static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
9692 uint32_t insn)
9694 int rd = extract32(insn, 0, 5);
9695 int rn = extract32(insn, 5, 5);
9696 int opcode = extract32(insn, 11, 3);
9697 int rm = extract32(insn, 16, 5);
9698 bool u = extract32(insn, 29, 1);
9699 bool a = extract32(insn, 23, 1);
9700 int fpopcode = opcode | (a << 3) | (u << 4);
9701 TCGv_ptr fpst;
9702 TCGv_i32 tcg_op1;
9703 TCGv_i32 tcg_op2;
9704 TCGv_i32 tcg_res;
9706 switch (fpopcode) {
9707 case 0x03: /* FMULX */
9708 case 0x04: /* FCMEQ (reg) */
9709 case 0x07: /* FRECPS */
9710 case 0x0f: /* FRSQRTS */
9711 case 0x14: /* FCMGE (reg) */
9712 case 0x15: /* FACGE */
9713 case 0x1a: /* FABD */
9714 case 0x1c: /* FCMGT (reg) */
9715 case 0x1d: /* FACGT */
9716 break;
9717 default:
9718 unallocated_encoding(s);
9719 return;
9722 if (!dc_isar_feature(aa64_fp16, s)) {
9723 unallocated_encoding(s);
9726 if (!fp_access_check(s)) {
9727 return;
9730 fpst = fpstatus_ptr(FPST_FPCR_F16);
9732 tcg_op1 = read_fp_hreg(s, rn);
9733 tcg_op2 = read_fp_hreg(s, rm);
9734 tcg_res = tcg_temp_new_i32();
9736 switch (fpopcode) {
9737 case 0x03: /* FMULX */
9738 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
9739 break;
9740 case 0x04: /* FCMEQ (reg) */
9741 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9742 break;
9743 case 0x07: /* FRECPS */
9744 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9745 break;
9746 case 0x0f: /* FRSQRTS */
9747 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9748 break;
9749 case 0x14: /* FCMGE (reg) */
9750 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9751 break;
9752 case 0x15: /* FACGE */
9753 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9754 break;
9755 case 0x1a: /* FABD */
9756 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
9757 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
9758 break;
9759 case 0x1c: /* FCMGT (reg) */
9760 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9761 break;
9762 case 0x1d: /* FACGT */
9763 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9764 break;
9765 default:
9766 g_assert_not_reached();
9769 write_fp_sreg(s, rd, tcg_res);
9772 tcg_temp_free_i32(tcg_res);
9773 tcg_temp_free_i32(tcg_op1);
9774 tcg_temp_free_i32(tcg_op2);
9775 tcg_temp_free_ptr(fpst);
9778 /* AdvSIMD scalar three same extra
9779 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
9780 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9781 * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
9782 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9784 static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
9785 uint32_t insn)
9787 int rd = extract32(insn, 0, 5);
9788 int rn = extract32(insn, 5, 5);
9789 int opcode = extract32(insn, 11, 4);
9790 int rm = extract32(insn, 16, 5);
9791 int size = extract32(insn, 22, 2);
9792 bool u = extract32(insn, 29, 1);
9793 TCGv_i32 ele1, ele2, ele3;
9794 TCGv_i64 res;
9795 bool feature;
9797 switch (u * 16 + opcode) {
9798 case 0x10: /* SQRDMLAH (vector) */
9799 case 0x11: /* SQRDMLSH (vector) */
9800 if (size != 1 && size != 2) {
9801 unallocated_encoding(s);
9802 return;
9804 feature = dc_isar_feature(aa64_rdm, s);
9805 break;
9806 default:
9807 unallocated_encoding(s);
9808 return;
9810 if (!feature) {
9811 unallocated_encoding(s);
9812 return;
9814 if (!fp_access_check(s)) {
9815 return;
9818 /* Do a single operation on the lowest element in the vector.
9819 * We use the standard Neon helpers and rely on 0 OP 0 == 0
9820 * with no side effects for all these operations.
9821 * OPTME: special-purpose helpers would avoid doing some
9822 * unnecessary work in the helper for the 16 bit cases.
9824 ele1 = tcg_temp_new_i32();
9825 ele2 = tcg_temp_new_i32();
9826 ele3 = tcg_temp_new_i32();
9828 read_vec_element_i32(s, ele1, rn, 0, size);
9829 read_vec_element_i32(s, ele2, rm, 0, size);
9830 read_vec_element_i32(s, ele3, rd, 0, size);
9832 switch (opcode) {
9833 case 0x0: /* SQRDMLAH */
9834 if (size == 1) {
9835 gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
9836 } else {
9837 gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
9839 break;
9840 case 0x1: /* SQRDMLSH */
9841 if (size == 1) {
9842 gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
9843 } else {
9844 gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
9846 break;
9847 default:
9848 g_assert_not_reached();
9850 tcg_temp_free_i32(ele1);
9851 tcg_temp_free_i32(ele2);
9853 res = tcg_temp_new_i64();
9854 tcg_gen_extu_i32_i64(res, ele3);
9855 tcg_temp_free_i32(ele3);
9857 write_fp_dreg(s, rd, res);
9858 tcg_temp_free_i64(res);
9861 static void handle_2misc_64(DisasContext *s, int opcode, bool u,
9862 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
9863 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
9865 /* Handle 64->64 opcodes which are shared between the scalar and
9866 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
9867 * is valid in either group and also the double-precision fp ops.
9868 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
9869 * requires them.
9871 TCGCond cond;
9873 switch (opcode) {
9874 case 0x4: /* CLS, CLZ */
9875 if (u) {
9876 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
9877 } else {
9878 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
9880 break;
9881 case 0x5: /* NOT */
9882 /* This opcode is shared with CNT and RBIT but we have earlier
9883 * enforced that size == 3 if and only if this is the NOT insn.
9885 tcg_gen_not_i64(tcg_rd, tcg_rn);
9886 break;
9887 case 0x7: /* SQABS, SQNEG */
9888 if (u) {
9889 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
9890 } else {
9891 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
9893 break;
9894 case 0xa: /* CMLT */
9895 /* 64 bit integer comparison against zero, result is
9896 * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
9897 * subtracting 1.
9899 cond = TCG_COND_LT;
9900 do_cmop:
9901 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
9902 tcg_gen_neg_i64(tcg_rd, tcg_rd);
9903 break;
9904 case 0x8: /* CMGT, CMGE */
9905 cond = u ? TCG_COND_GE : TCG_COND_GT;
9906 goto do_cmop;
9907 case 0x9: /* CMEQ, CMLE */
9908 cond = u ? TCG_COND_LE : TCG_COND_EQ;
9909 goto do_cmop;
9910 case 0xb: /* ABS, NEG */
9911 if (u) {
9912 tcg_gen_neg_i64(tcg_rd, tcg_rn);
9913 } else {
9914 tcg_gen_abs_i64(tcg_rd, tcg_rn);
9916 break;
9917 case 0x2f: /* FABS */
9918 gen_helper_vfp_absd(tcg_rd, tcg_rn);
9919 break;
9920 case 0x6f: /* FNEG */
9921 gen_helper_vfp_negd(tcg_rd, tcg_rn);
9922 break;
9923 case 0x7f: /* FSQRT */
9924 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
9925 break;
9926 case 0x1a: /* FCVTNS */
9927 case 0x1b: /* FCVTMS */
9928 case 0x1c: /* FCVTAS */
9929 case 0x3a: /* FCVTPS */
9930 case 0x3b: /* FCVTZS */
9931 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9932 break;
9933 case 0x5a: /* FCVTNU */
9934 case 0x5b: /* FCVTMU */
9935 case 0x5c: /* FCVTAU */
9936 case 0x7a: /* FCVTPU */
9937 case 0x7b: /* FCVTZU */
9938 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9939 break;
9940 case 0x18: /* FRINTN */
9941 case 0x19: /* FRINTM */
9942 case 0x38: /* FRINTP */
9943 case 0x39: /* FRINTZ */
9944 case 0x58: /* FRINTA */
9945 case 0x79: /* FRINTI */
9946 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
9947 break;
9948 case 0x59: /* FRINTX */
9949 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
9950 break;
9951 case 0x1e: /* FRINT32Z */
9952 case 0x5e: /* FRINT32X */
9953 gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
9954 break;
9955 case 0x1f: /* FRINT64Z */
9956 case 0x5f: /* FRINT64X */
9957 gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
9958 break;
9959 default:
9960 g_assert_not_reached();
9964 static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
9965 bool is_scalar, bool is_u, bool is_q,
9966 int size, int rn, int rd)
9968 bool is_double = (size == MO_64);
9969 TCGv_ptr fpst;
9971 if (!fp_access_check(s)) {
9972 return;
9975 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
9977 if (is_double) {
9978 TCGv_i64 tcg_op = tcg_temp_new_i64();
9979 TCGv_i64 tcg_zero = tcg_constant_i64(0);
9980 TCGv_i64 tcg_res = tcg_temp_new_i64();
9981 NeonGenTwoDoubleOpFn *genfn;
9982 bool swap = false;
9983 int pass;
9985 switch (opcode) {
9986 case 0x2e: /* FCMLT (zero) */
9987 swap = true;
9988 /* fallthrough */
9989 case 0x2c: /* FCMGT (zero) */
9990 genfn = gen_helper_neon_cgt_f64;
9991 break;
9992 case 0x2d: /* FCMEQ (zero) */
9993 genfn = gen_helper_neon_ceq_f64;
9994 break;
9995 case 0x6d: /* FCMLE (zero) */
9996 swap = true;
9997 /* fall through */
9998 case 0x6c: /* FCMGE (zero) */
9999 genfn = gen_helper_neon_cge_f64;
10000 break;
10001 default:
10002 g_assert_not_reached();
10005 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10006 read_vec_element(s, tcg_op, rn, pass, MO_64);
10007 if (swap) {
10008 genfn(tcg_res, tcg_zero, tcg_op, fpst);
10009 } else {
10010 genfn(tcg_res, tcg_op, tcg_zero, fpst);
10012 write_vec_element(s, tcg_res, rd, pass, MO_64);
10014 tcg_temp_free_i64(tcg_res);
10015 tcg_temp_free_i64(tcg_op);
10017 clear_vec_high(s, !is_scalar, rd);
10018 } else {
10019 TCGv_i32 tcg_op = tcg_temp_new_i32();
10020 TCGv_i32 tcg_zero = tcg_constant_i32(0);
10021 TCGv_i32 tcg_res = tcg_temp_new_i32();
10022 NeonGenTwoSingleOpFn *genfn;
10023 bool swap = false;
10024 int pass, maxpasses;
10026 if (size == MO_16) {
10027 switch (opcode) {
10028 case 0x2e: /* FCMLT (zero) */
10029 swap = true;
10030 /* fall through */
10031 case 0x2c: /* FCMGT (zero) */
10032 genfn = gen_helper_advsimd_cgt_f16;
10033 break;
10034 case 0x2d: /* FCMEQ (zero) */
10035 genfn = gen_helper_advsimd_ceq_f16;
10036 break;
10037 case 0x6d: /* FCMLE (zero) */
10038 swap = true;
10039 /* fall through */
10040 case 0x6c: /* FCMGE (zero) */
10041 genfn = gen_helper_advsimd_cge_f16;
10042 break;
10043 default:
10044 g_assert_not_reached();
10046 } else {
10047 switch (opcode) {
10048 case 0x2e: /* FCMLT (zero) */
10049 swap = true;
10050 /* fall through */
10051 case 0x2c: /* FCMGT (zero) */
10052 genfn = gen_helper_neon_cgt_f32;
10053 break;
10054 case 0x2d: /* FCMEQ (zero) */
10055 genfn = gen_helper_neon_ceq_f32;
10056 break;
10057 case 0x6d: /* FCMLE (zero) */
10058 swap = true;
10059 /* fall through */
10060 case 0x6c: /* FCMGE (zero) */
10061 genfn = gen_helper_neon_cge_f32;
10062 break;
10063 default:
10064 g_assert_not_reached();
10068 if (is_scalar) {
10069 maxpasses = 1;
10070 } else {
10071 int vector_size = 8 << is_q;
10072 maxpasses = vector_size >> size;
10075 for (pass = 0; pass < maxpasses; pass++) {
10076 read_vec_element_i32(s, tcg_op, rn, pass, size);
10077 if (swap) {
10078 genfn(tcg_res, tcg_zero, tcg_op, fpst);
10079 } else {
10080 genfn(tcg_res, tcg_op, tcg_zero, fpst);
10082 if (is_scalar) {
10083 write_fp_sreg(s, rd, tcg_res);
10084 } else {
10085 write_vec_element_i32(s, tcg_res, rd, pass, size);
10088 tcg_temp_free_i32(tcg_res);
10089 tcg_temp_free_i32(tcg_op);
10090 if (!is_scalar) {
10091 clear_vec_high(s, is_q, rd);
10095 tcg_temp_free_ptr(fpst);
10098 static void handle_2misc_reciprocal(DisasContext *s, int opcode,
10099 bool is_scalar, bool is_u, bool is_q,
10100 int size, int rn, int rd)
10102 bool is_double = (size == 3);
10103 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10105 if (is_double) {
10106 TCGv_i64 tcg_op = tcg_temp_new_i64();
10107 TCGv_i64 tcg_res = tcg_temp_new_i64();
10108 int pass;
10110 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10111 read_vec_element(s, tcg_op, rn, pass, MO_64);
10112 switch (opcode) {
10113 case 0x3d: /* FRECPE */
10114 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
10115 break;
10116 case 0x3f: /* FRECPX */
10117 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
10118 break;
10119 case 0x7d: /* FRSQRTE */
10120 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
10121 break;
10122 default:
10123 g_assert_not_reached();
10125 write_vec_element(s, tcg_res, rd, pass, MO_64);
10127 tcg_temp_free_i64(tcg_res);
10128 tcg_temp_free_i64(tcg_op);
10129 clear_vec_high(s, !is_scalar, rd);
10130 } else {
10131 TCGv_i32 tcg_op = tcg_temp_new_i32();
10132 TCGv_i32 tcg_res = tcg_temp_new_i32();
10133 int pass, maxpasses;
10135 if (is_scalar) {
10136 maxpasses = 1;
10137 } else {
10138 maxpasses = is_q ? 4 : 2;
10141 for (pass = 0; pass < maxpasses; pass++) {
10142 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
10144 switch (opcode) {
10145 case 0x3c: /* URECPE */
10146 gen_helper_recpe_u32(tcg_res, tcg_op);
10147 break;
10148 case 0x3d: /* FRECPE */
10149 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
10150 break;
10151 case 0x3f: /* FRECPX */
10152 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
10153 break;
10154 case 0x7d: /* FRSQRTE */
10155 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
10156 break;
10157 default:
10158 g_assert_not_reached();
10161 if (is_scalar) {
10162 write_fp_sreg(s, rd, tcg_res);
10163 } else {
10164 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10167 tcg_temp_free_i32(tcg_res);
10168 tcg_temp_free_i32(tcg_op);
10169 if (!is_scalar) {
10170 clear_vec_high(s, is_q, rd);
10173 tcg_temp_free_ptr(fpst);
10176 static void handle_2misc_narrow(DisasContext *s, bool scalar,
10177 int opcode, bool u, bool is_q,
10178 int size, int rn, int rd)
10180 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
10181 * in the source becomes a size element in the destination).
10183 int pass;
10184 TCGv_i32 tcg_res[2];
10185 int destelt = is_q ? 2 : 0;
10186 int passes = scalar ? 1 : 2;
10188 if (scalar) {
10189 tcg_res[1] = tcg_constant_i32(0);
10192 for (pass = 0; pass < passes; pass++) {
10193 TCGv_i64 tcg_op = tcg_temp_new_i64();
10194 NeonGenNarrowFn *genfn = NULL;
10195 NeonGenNarrowEnvFn *genenvfn = NULL;
10197 if (scalar) {
10198 read_vec_element(s, tcg_op, rn, pass, size + 1);
10199 } else {
10200 read_vec_element(s, tcg_op, rn, pass, MO_64);
10202 tcg_res[pass] = tcg_temp_new_i32();
10204 switch (opcode) {
10205 case 0x12: /* XTN, SQXTUN */
10207 static NeonGenNarrowFn * const xtnfns[3] = {
10208 gen_helper_neon_narrow_u8,
10209 gen_helper_neon_narrow_u16,
10210 tcg_gen_extrl_i64_i32,
10212 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
10213 gen_helper_neon_unarrow_sat8,
10214 gen_helper_neon_unarrow_sat16,
10215 gen_helper_neon_unarrow_sat32,
10217 if (u) {
10218 genenvfn = sqxtunfns[size];
10219 } else {
10220 genfn = xtnfns[size];
10222 break;
10224 case 0x14: /* SQXTN, UQXTN */
10226 static NeonGenNarrowEnvFn * const fns[3][2] = {
10227 { gen_helper_neon_narrow_sat_s8,
10228 gen_helper_neon_narrow_sat_u8 },
10229 { gen_helper_neon_narrow_sat_s16,
10230 gen_helper_neon_narrow_sat_u16 },
10231 { gen_helper_neon_narrow_sat_s32,
10232 gen_helper_neon_narrow_sat_u32 },
10234 genenvfn = fns[size][u];
10235 break;
10237 case 0x16: /* FCVTN, FCVTN2 */
10238 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
10239 if (size == 2) {
10240 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
10241 } else {
10242 TCGv_i32 tcg_lo = tcg_temp_new_i32();
10243 TCGv_i32 tcg_hi = tcg_temp_new_i32();
10244 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10245 TCGv_i32 ahp = get_ahp_flag();
10247 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
10248 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
10249 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
10250 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
10251 tcg_temp_free_i32(tcg_lo);
10252 tcg_temp_free_i32(tcg_hi);
10253 tcg_temp_free_ptr(fpst);
10254 tcg_temp_free_i32(ahp);
10256 break;
10257 case 0x36: /* BFCVTN, BFCVTN2 */
10259 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10260 gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst);
10261 tcg_temp_free_ptr(fpst);
10263 break;
10264 case 0x56: /* FCVTXN, FCVTXN2 */
10265 /* 64 bit to 32 bit float conversion
10266 * with von Neumann rounding (round to odd)
10268 assert(size == 2);
10269 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
10270 break;
10271 default:
10272 g_assert_not_reached();
10275 if (genfn) {
10276 genfn(tcg_res[pass], tcg_op);
10277 } else if (genenvfn) {
10278 genenvfn(tcg_res[pass], cpu_env, tcg_op);
10281 tcg_temp_free_i64(tcg_op);
10284 for (pass = 0; pass < 2; pass++) {
10285 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
10286 tcg_temp_free_i32(tcg_res[pass]);
10288 clear_vec_high(s, is_q, rd);
10291 /* Remaining saturating accumulating ops */
10292 static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
10293 bool is_q, int size, int rn, int rd)
10295 bool is_double = (size == 3);
10297 if (is_double) {
10298 TCGv_i64 tcg_rn = tcg_temp_new_i64();
10299 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10300 int pass;
10302 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10303 read_vec_element(s, tcg_rn, rn, pass, MO_64);
10304 read_vec_element(s, tcg_rd, rd, pass, MO_64);
10306 if (is_u) { /* USQADD */
10307 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10308 } else { /* SUQADD */
10309 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10311 write_vec_element(s, tcg_rd, rd, pass, MO_64);
10313 tcg_temp_free_i64(tcg_rd);
10314 tcg_temp_free_i64(tcg_rn);
10315 clear_vec_high(s, !is_scalar, rd);
10316 } else {
10317 TCGv_i32 tcg_rn = tcg_temp_new_i32();
10318 TCGv_i32 tcg_rd = tcg_temp_new_i32();
10319 int pass, maxpasses;
10321 if (is_scalar) {
10322 maxpasses = 1;
10323 } else {
10324 maxpasses = is_q ? 4 : 2;
10327 for (pass = 0; pass < maxpasses; pass++) {
10328 if (is_scalar) {
10329 read_vec_element_i32(s, tcg_rn, rn, pass, size);
10330 read_vec_element_i32(s, tcg_rd, rd, pass, size);
10331 } else {
10332 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
10333 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
10336 if (is_u) { /* USQADD */
10337 switch (size) {
10338 case 0:
10339 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10340 break;
10341 case 1:
10342 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10343 break;
10344 case 2:
10345 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10346 break;
10347 default:
10348 g_assert_not_reached();
10350 } else { /* SUQADD */
10351 switch (size) {
10352 case 0:
10353 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10354 break;
10355 case 1:
10356 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10357 break;
10358 case 2:
10359 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10360 break;
10361 default:
10362 g_assert_not_reached();
10366 if (is_scalar) {
10367 write_vec_element(s, tcg_constant_i64(0), rd, 0, MO_64);
10369 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
10371 tcg_temp_free_i32(tcg_rd);
10372 tcg_temp_free_i32(tcg_rn);
10373 clear_vec_high(s, is_q, rd);
10377 /* AdvSIMD scalar two reg misc
10378 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
10379 * +-----+---+-----------+------+-----------+--------+-----+------+------+
10380 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
10381 * +-----+---+-----------+------+-----------+--------+-----+------+------+
10383 static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
10385 int rd = extract32(insn, 0, 5);
10386 int rn = extract32(insn, 5, 5);
10387 int opcode = extract32(insn, 12, 5);
10388 int size = extract32(insn, 22, 2);
10389 bool u = extract32(insn, 29, 1);
10390 bool is_fcvt = false;
10391 int rmode;
10392 TCGv_i32 tcg_rmode;
10393 TCGv_ptr tcg_fpstatus;
10395 switch (opcode) {
10396 case 0x3: /* USQADD / SUQADD*/
10397 if (!fp_access_check(s)) {
10398 return;
10400 handle_2misc_satacc(s, true, u, false, size, rn, rd);
10401 return;
10402 case 0x7: /* SQABS / SQNEG */
10403 break;
10404 case 0xa: /* CMLT */
10405 if (u) {
10406 unallocated_encoding(s);
10407 return;
10409 /* fall through */
10410 case 0x8: /* CMGT, CMGE */
10411 case 0x9: /* CMEQ, CMLE */
10412 case 0xb: /* ABS, NEG */
10413 if (size != 3) {
10414 unallocated_encoding(s);
10415 return;
10417 break;
10418 case 0x12: /* SQXTUN */
10419 if (!u) {
10420 unallocated_encoding(s);
10421 return;
10423 /* fall through */
10424 case 0x14: /* SQXTN, UQXTN */
10425 if (size == 3) {
10426 unallocated_encoding(s);
10427 return;
10429 if (!fp_access_check(s)) {
10430 return;
10432 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
10433 return;
10434 case 0xc ... 0xf:
10435 case 0x16 ... 0x1d:
10436 case 0x1f:
10437 /* Floating point: U, size[1] and opcode indicate operation;
10438 * size[0] indicates single or double precision.
10440 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
10441 size = extract32(size, 0, 1) ? 3 : 2;
10442 switch (opcode) {
10443 case 0x2c: /* FCMGT (zero) */
10444 case 0x2d: /* FCMEQ (zero) */
10445 case 0x2e: /* FCMLT (zero) */
10446 case 0x6c: /* FCMGE (zero) */
10447 case 0x6d: /* FCMLE (zero) */
10448 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
10449 return;
10450 case 0x1d: /* SCVTF */
10451 case 0x5d: /* UCVTF */
10453 bool is_signed = (opcode == 0x1d);
10454 if (!fp_access_check(s)) {
10455 return;
10457 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
10458 return;
10460 case 0x3d: /* FRECPE */
10461 case 0x3f: /* FRECPX */
10462 case 0x7d: /* FRSQRTE */
10463 if (!fp_access_check(s)) {
10464 return;
10466 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
10467 return;
10468 case 0x1a: /* FCVTNS */
10469 case 0x1b: /* FCVTMS */
10470 case 0x3a: /* FCVTPS */
10471 case 0x3b: /* FCVTZS */
10472 case 0x5a: /* FCVTNU */
10473 case 0x5b: /* FCVTMU */
10474 case 0x7a: /* FCVTPU */
10475 case 0x7b: /* FCVTZU */
10476 is_fcvt = true;
10477 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10478 break;
10479 case 0x1c: /* FCVTAS */
10480 case 0x5c: /* FCVTAU */
10481 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
10482 is_fcvt = true;
10483 rmode = FPROUNDING_TIEAWAY;
10484 break;
10485 case 0x56: /* FCVTXN, FCVTXN2 */
10486 if (size == 2) {
10487 unallocated_encoding(s);
10488 return;
10490 if (!fp_access_check(s)) {
10491 return;
10493 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
10494 return;
10495 default:
10496 unallocated_encoding(s);
10497 return;
10499 break;
10500 default:
10501 unallocated_encoding(s);
10502 return;
10505 if (!fp_access_check(s)) {
10506 return;
10509 if (is_fcvt) {
10510 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
10511 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
10512 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10513 } else {
10514 tcg_rmode = NULL;
10515 tcg_fpstatus = NULL;
10518 if (size == 3) {
10519 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
10520 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10522 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
10523 write_fp_dreg(s, rd, tcg_rd);
10524 tcg_temp_free_i64(tcg_rd);
10525 tcg_temp_free_i64(tcg_rn);
10526 } else {
10527 TCGv_i32 tcg_rn = tcg_temp_new_i32();
10528 TCGv_i32 tcg_rd = tcg_temp_new_i32();
10530 read_vec_element_i32(s, tcg_rn, rn, 0, size);
10532 switch (opcode) {
10533 case 0x7: /* SQABS, SQNEG */
10535 NeonGenOneOpEnvFn *genfn;
10536 static NeonGenOneOpEnvFn * const fns[3][2] = {
10537 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10538 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10539 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
10541 genfn = fns[size][u];
10542 genfn(tcg_rd, cpu_env, tcg_rn);
10543 break;
10545 case 0x1a: /* FCVTNS */
10546 case 0x1b: /* FCVTMS */
10547 case 0x1c: /* FCVTAS */
10548 case 0x3a: /* FCVTPS */
10549 case 0x3b: /* FCVTZS */
10550 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_constant_i32(0),
10551 tcg_fpstatus);
10552 break;
10553 case 0x5a: /* FCVTNU */
10554 case 0x5b: /* FCVTMU */
10555 case 0x5c: /* FCVTAU */
10556 case 0x7a: /* FCVTPU */
10557 case 0x7b: /* FCVTZU */
10558 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_constant_i32(0),
10559 tcg_fpstatus);
10560 break;
10561 default:
10562 g_assert_not_reached();
10565 write_fp_sreg(s, rd, tcg_rd);
10566 tcg_temp_free_i32(tcg_rd);
10567 tcg_temp_free_i32(tcg_rn);
10570 if (is_fcvt) {
10571 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10572 tcg_temp_free_i32(tcg_rmode);
10573 tcg_temp_free_ptr(tcg_fpstatus);
10577 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
10578 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
10579 int immh, int immb, int opcode, int rn, int rd)
10581 int size = 32 - clz32(immh) - 1;
10582 int immhb = immh << 3 | immb;
10583 int shift = 2 * (8 << size) - immhb;
10584 GVecGen2iFn *gvec_fn;
10586 if (extract32(immh, 3, 1) && !is_q) {
10587 unallocated_encoding(s);
10588 return;
10590 tcg_debug_assert(size <= 3);
10592 if (!fp_access_check(s)) {
10593 return;
10596 switch (opcode) {
10597 case 0x02: /* SSRA / USRA (accumulate) */
10598 gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
10599 break;
10601 case 0x08: /* SRI */
10602 gvec_fn = gen_gvec_sri;
10603 break;
10605 case 0x00: /* SSHR / USHR */
10606 if (is_u) {
10607 if (shift == 8 << size) {
10608 /* Shift count the same size as element size produces zero. */
10609 tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
10610 is_q ? 16 : 8, vec_full_reg_size(s), 0);
10611 return;
10613 gvec_fn = tcg_gen_gvec_shri;
10614 } else {
10615 /* Shift count the same size as element size produces all sign. */
10616 if (shift == 8 << size) {
10617 shift -= 1;
10619 gvec_fn = tcg_gen_gvec_sari;
10621 break;
10623 case 0x04: /* SRSHR / URSHR (rounding) */
10624 gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
10625 break;
10627 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10628 gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
10629 break;
10631 default:
10632 g_assert_not_reached();
10635 gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
10638 /* SHL/SLI - Vector shift left */
10639 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
10640 int immh, int immb, int opcode, int rn, int rd)
10642 int size = 32 - clz32(immh) - 1;
10643 int immhb = immh << 3 | immb;
10644 int shift = immhb - (8 << size);
10646 /* Range of size is limited by decode: immh is a non-zero 4 bit field */
10647 assert(size >= 0 && size <= 3);
10649 if (extract32(immh, 3, 1) && !is_q) {
10650 unallocated_encoding(s);
10651 return;
10654 if (!fp_access_check(s)) {
10655 return;
10658 if (insert) {
10659 gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
10660 } else {
10661 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
10665 /* USHLL/SHLL - Vector shift left with widening */
10666 static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
10667 int immh, int immb, int opcode, int rn, int rd)
10669 int size = 32 - clz32(immh) - 1;
10670 int immhb = immh << 3 | immb;
10671 int shift = immhb - (8 << size);
10672 int dsize = 64;
10673 int esize = 8 << size;
10674 int elements = dsize/esize;
10675 TCGv_i64 tcg_rn = new_tmp_a64(s);
10676 TCGv_i64 tcg_rd = new_tmp_a64(s);
10677 int i;
10679 if (size >= 3) {
10680 unallocated_encoding(s);
10681 return;
10684 if (!fp_access_check(s)) {
10685 return;
10688 /* For the LL variants the store is larger than the load,
10689 * so if rd == rn we would overwrite parts of our input.
10690 * So load everything right now and use shifts in the main loop.
10692 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
10694 for (i = 0; i < elements; i++) {
10695 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
10696 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
10697 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
10698 write_vec_element(s, tcg_rd, rd, i, size + 1);
10702 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
10703 static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
10704 int immh, int immb, int opcode, int rn, int rd)
10706 int immhb = immh << 3 | immb;
10707 int size = 32 - clz32(immh) - 1;
10708 int dsize = 64;
10709 int esize = 8 << size;
10710 int elements = dsize/esize;
10711 int shift = (2 * esize) - immhb;
10712 bool round = extract32(opcode, 0, 1);
10713 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
10714 TCGv_i64 tcg_round;
10715 int i;
10717 if (extract32(immh, 3, 1)) {
10718 unallocated_encoding(s);
10719 return;
10722 if (!fp_access_check(s)) {
10723 return;
10726 tcg_rn = tcg_temp_new_i64();
10727 tcg_rd = tcg_temp_new_i64();
10728 tcg_final = tcg_temp_new_i64();
10729 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
10731 if (round) {
10732 tcg_round = tcg_constant_i64(1ULL << (shift - 1));
10733 } else {
10734 tcg_round = NULL;
10737 for (i = 0; i < elements; i++) {
10738 read_vec_element(s, tcg_rn, rn, i, size+1);
10739 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
10740 false, true, size+1, shift);
10742 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
10745 if (!is_q) {
10746 write_vec_element(s, tcg_final, rd, 0, MO_64);
10747 } else {
10748 write_vec_element(s, tcg_final, rd, 1, MO_64);
10750 tcg_temp_free_i64(tcg_rn);
10751 tcg_temp_free_i64(tcg_rd);
10752 tcg_temp_free_i64(tcg_final);
10754 clear_vec_high(s, is_q, rd);
10758 /* AdvSIMD shift by immediate
10759 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
10760 * +---+---+---+-------------+------+------+--------+---+------+------+
10761 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
10762 * +---+---+---+-------------+------+------+--------+---+------+------+
10764 static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
10766 int rd = extract32(insn, 0, 5);
10767 int rn = extract32(insn, 5, 5);
10768 int opcode = extract32(insn, 11, 5);
10769 int immb = extract32(insn, 16, 3);
10770 int immh = extract32(insn, 19, 4);
10771 bool is_u = extract32(insn, 29, 1);
10772 bool is_q = extract32(insn, 30, 1);
10774 /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
10775 assert(immh != 0);
10777 switch (opcode) {
10778 case 0x08: /* SRI */
10779 if (!is_u) {
10780 unallocated_encoding(s);
10781 return;
10783 /* fall through */
10784 case 0x00: /* SSHR / USHR */
10785 case 0x02: /* SSRA / USRA (accumulate) */
10786 case 0x04: /* SRSHR / URSHR (rounding) */
10787 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10788 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
10789 break;
10790 case 0x0a: /* SHL / SLI */
10791 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10792 break;
10793 case 0x10: /* SHRN */
10794 case 0x11: /* RSHRN / SQRSHRUN */
10795 if (is_u) {
10796 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
10797 opcode, rn, rd);
10798 } else {
10799 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
10801 break;
10802 case 0x12: /* SQSHRN / UQSHRN */
10803 case 0x13: /* SQRSHRN / UQRSHRN */
10804 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
10805 opcode, rn, rd);
10806 break;
10807 case 0x14: /* SSHLL / USHLL */
10808 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10809 break;
10810 case 0x1c: /* SCVTF / UCVTF */
10811 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
10812 opcode, rn, rd);
10813 break;
10814 case 0xc: /* SQSHLU */
10815 if (!is_u) {
10816 unallocated_encoding(s);
10817 return;
10819 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
10820 break;
10821 case 0xe: /* SQSHL, UQSHL */
10822 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
10823 break;
10824 case 0x1f: /* FCVTZS/ FCVTZU */
10825 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
10826 return;
10827 default:
10828 unallocated_encoding(s);
10829 return;
10833 /* Generate code to do a "long" addition or subtraction, ie one done in
10834 * TCGv_i64 on vector lanes twice the width specified by size.
10836 static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
10837 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
10839 static NeonGenTwo64OpFn * const fns[3][2] = {
10840 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
10841 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
10842 { tcg_gen_add_i64, tcg_gen_sub_i64 },
10844 NeonGenTwo64OpFn *genfn;
10845 assert(size < 3);
10847 genfn = fns[size][is_sub];
10848 genfn(tcg_res, tcg_op1, tcg_op2);
10851 static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
10852 int opcode, int rd, int rn, int rm)
10854 /* 3-reg-different widening insns: 64 x 64 -> 128 */
10855 TCGv_i64 tcg_res[2];
10856 int pass, accop;
10858 tcg_res[0] = tcg_temp_new_i64();
10859 tcg_res[1] = tcg_temp_new_i64();
10861 /* Does this op do an adding accumulate, a subtracting accumulate,
10862 * or no accumulate at all?
10864 switch (opcode) {
10865 case 5:
10866 case 8:
10867 case 9:
10868 accop = 1;
10869 break;
10870 case 10:
10871 case 11:
10872 accop = -1;
10873 break;
10874 default:
10875 accop = 0;
10876 break;
10879 if (accop != 0) {
10880 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10881 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10884 /* size == 2 means two 32x32->64 operations; this is worth special
10885 * casing because we can generally handle it inline.
10887 if (size == 2) {
10888 for (pass = 0; pass < 2; pass++) {
10889 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10890 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10891 TCGv_i64 tcg_passres;
10892 MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10894 int elt = pass + is_q * 2;
10896 read_vec_element(s, tcg_op1, rn, elt, memop);
10897 read_vec_element(s, tcg_op2, rm, elt, memop);
10899 if (accop == 0) {
10900 tcg_passres = tcg_res[pass];
10901 } else {
10902 tcg_passres = tcg_temp_new_i64();
10905 switch (opcode) {
10906 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10907 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10908 break;
10909 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10910 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10911 break;
10912 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10913 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10915 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10916 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10918 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10919 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10920 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10921 tcg_passres,
10922 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10923 tcg_temp_free_i64(tcg_tmp1);
10924 tcg_temp_free_i64(tcg_tmp2);
10925 break;
10927 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10928 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10929 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10930 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10931 break;
10932 case 9: /* SQDMLAL, SQDMLAL2 */
10933 case 11: /* SQDMLSL, SQDMLSL2 */
10934 case 13: /* SQDMULL, SQDMULL2 */
10935 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10936 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10937 tcg_passres, tcg_passres);
10938 break;
10939 default:
10940 g_assert_not_reached();
10943 if (opcode == 9 || opcode == 11) {
10944 /* saturating accumulate ops */
10945 if (accop < 0) {
10946 tcg_gen_neg_i64(tcg_passres, tcg_passres);
10948 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10949 tcg_res[pass], tcg_passres);
10950 } else if (accop > 0) {
10951 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10952 } else if (accop < 0) {
10953 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10956 if (accop != 0) {
10957 tcg_temp_free_i64(tcg_passres);
10960 tcg_temp_free_i64(tcg_op1);
10961 tcg_temp_free_i64(tcg_op2);
10963 } else {
10964 /* size 0 or 1, generally helper functions */
10965 for (pass = 0; pass < 2; pass++) {
10966 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10967 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10968 TCGv_i64 tcg_passres;
10969 int elt = pass + is_q * 2;
10971 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
10972 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
10974 if (accop == 0) {
10975 tcg_passres = tcg_res[pass];
10976 } else {
10977 tcg_passres = tcg_temp_new_i64();
10980 switch (opcode) {
10981 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10982 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10984 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
10985 static NeonGenWidenFn * const widenfns[2][2] = {
10986 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10987 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10989 NeonGenWidenFn *widenfn = widenfns[size][is_u];
10991 widenfn(tcg_op2_64, tcg_op2);
10992 widenfn(tcg_passres, tcg_op1);
10993 gen_neon_addl(size, (opcode == 2), tcg_passres,
10994 tcg_passres, tcg_op2_64);
10995 tcg_temp_free_i64(tcg_op2_64);
10996 break;
10998 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10999 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
11000 if (size == 0) {
11001 if (is_u) {
11002 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
11003 } else {
11004 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
11006 } else {
11007 if (is_u) {
11008 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
11009 } else {
11010 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
11013 break;
11014 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
11015 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
11016 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
11017 if (size == 0) {
11018 if (is_u) {
11019 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
11020 } else {
11021 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
11023 } else {
11024 if (is_u) {
11025 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
11026 } else {
11027 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
11030 break;
11031 case 9: /* SQDMLAL, SQDMLAL2 */
11032 case 11: /* SQDMLSL, SQDMLSL2 */
11033 case 13: /* SQDMULL, SQDMULL2 */
11034 assert(size == 1);
11035 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
11036 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
11037 tcg_passres, tcg_passres);
11038 break;
11039 default:
11040 g_assert_not_reached();
11042 tcg_temp_free_i32(tcg_op1);
11043 tcg_temp_free_i32(tcg_op2);
11045 if (accop != 0) {
11046 if (opcode == 9 || opcode == 11) {
11047 /* saturating accumulate ops */
11048 if (accop < 0) {
11049 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
11051 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
11052 tcg_res[pass],
11053 tcg_passres);
11054 } else {
11055 gen_neon_addl(size, (accop < 0), tcg_res[pass],
11056 tcg_res[pass], tcg_passres);
11058 tcg_temp_free_i64(tcg_passres);
11063 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
11064 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
11065 tcg_temp_free_i64(tcg_res[0]);
11066 tcg_temp_free_i64(tcg_res[1]);
11069 static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
11070 int opcode, int rd, int rn, int rm)
11072 TCGv_i64 tcg_res[2];
11073 int part = is_q ? 2 : 0;
11074 int pass;
11076 for (pass = 0; pass < 2; pass++) {
11077 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11078 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11079 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
11080 static NeonGenWidenFn * const widenfns[3][2] = {
11081 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
11082 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
11083 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
11085 NeonGenWidenFn *widenfn = widenfns[size][is_u];
11087 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11088 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
11089 widenfn(tcg_op2_wide, tcg_op2);
11090 tcg_temp_free_i32(tcg_op2);
11091 tcg_res[pass] = tcg_temp_new_i64();
11092 gen_neon_addl(size, (opcode == 3),
11093 tcg_res[pass], tcg_op1, tcg_op2_wide);
11094 tcg_temp_free_i64(tcg_op1);
11095 tcg_temp_free_i64(tcg_op2_wide);
11098 for (pass = 0; pass < 2; pass++) {
11099 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11100 tcg_temp_free_i64(tcg_res[pass]);
11104 static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
11106 tcg_gen_addi_i64(in, in, 1U << 31);
11107 tcg_gen_extrh_i64_i32(res, in);
11110 static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
11111 int opcode, int rd, int rn, int rm)
11113 TCGv_i32 tcg_res[2];
11114 int part = is_q ? 2 : 0;
11115 int pass;
11117 for (pass = 0; pass < 2; pass++) {
11118 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11119 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11120 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
11121 static NeonGenNarrowFn * const narrowfns[3][2] = {
11122 { gen_helper_neon_narrow_high_u8,
11123 gen_helper_neon_narrow_round_high_u8 },
11124 { gen_helper_neon_narrow_high_u16,
11125 gen_helper_neon_narrow_round_high_u16 },
11126 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
11128 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
11130 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11131 read_vec_element(s, tcg_op2, rm, pass, MO_64);
11133 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
11135 tcg_temp_free_i64(tcg_op1);
11136 tcg_temp_free_i64(tcg_op2);
11138 tcg_res[pass] = tcg_temp_new_i32();
11139 gennarrow(tcg_res[pass], tcg_wideres);
11140 tcg_temp_free_i64(tcg_wideres);
11143 for (pass = 0; pass < 2; pass++) {
11144 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
11145 tcg_temp_free_i32(tcg_res[pass]);
11147 clear_vec_high(s, is_q, rd);
11150 /* AdvSIMD three different
11151 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
11152 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
11153 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
11154 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
11156 static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
11158 /* Instructions in this group fall into three basic classes
11159 * (in each case with the operation working on each element in
11160 * the input vectors):
11161 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
11162 * 128 bit input)
11163 * (2) wide 64 x 128 -> 128
11164 * (3) narrowing 128 x 128 -> 64
11165 * Here we do initial decode, catch unallocated cases and
11166 * dispatch to separate functions for each class.
11168 int is_q = extract32(insn, 30, 1);
11169 int is_u = extract32(insn, 29, 1);
11170 int size = extract32(insn, 22, 2);
11171 int opcode = extract32(insn, 12, 4);
11172 int rm = extract32(insn, 16, 5);
11173 int rn = extract32(insn, 5, 5);
11174 int rd = extract32(insn, 0, 5);
11176 switch (opcode) {
11177 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
11178 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
11179 /* 64 x 128 -> 128 */
11180 if (size == 3) {
11181 unallocated_encoding(s);
11182 return;
11184 if (!fp_access_check(s)) {
11185 return;
11187 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
11188 break;
11189 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
11190 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
11191 /* 128 x 128 -> 64 */
11192 if (size == 3) {
11193 unallocated_encoding(s);
11194 return;
11196 if (!fp_access_check(s)) {
11197 return;
11199 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
11200 break;
11201 case 14: /* PMULL, PMULL2 */
11202 if (is_u) {
11203 unallocated_encoding(s);
11204 return;
11206 switch (size) {
11207 case 0: /* PMULL.P8 */
11208 if (!fp_access_check(s)) {
11209 return;
11211 /* The Q field specifies lo/hi half input for this insn. */
11212 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
11213 gen_helper_neon_pmull_h);
11214 break;
11216 case 3: /* PMULL.P64 */
11217 if (!dc_isar_feature(aa64_pmull, s)) {
11218 unallocated_encoding(s);
11219 return;
11221 if (!fp_access_check(s)) {
11222 return;
11224 /* The Q field specifies lo/hi half input for this insn. */
11225 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
11226 gen_helper_gvec_pmull_q);
11227 break;
11229 default:
11230 unallocated_encoding(s);
11231 break;
11233 return;
11234 case 9: /* SQDMLAL, SQDMLAL2 */
11235 case 11: /* SQDMLSL, SQDMLSL2 */
11236 case 13: /* SQDMULL, SQDMULL2 */
11237 if (is_u || size == 0) {
11238 unallocated_encoding(s);
11239 return;
11241 /* fall through */
11242 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
11243 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
11244 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
11245 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
11246 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
11247 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
11248 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
11249 /* 64 x 64 -> 128 */
11250 if (size == 3) {
11251 unallocated_encoding(s);
11252 return;
11254 if (!fp_access_check(s)) {
11255 return;
11258 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
11259 break;
11260 default:
11261 /* opcode 15 not allocated */
11262 unallocated_encoding(s);
11263 break;
11267 /* Logic op (opcode == 3) subgroup of C3.6.16. */
11268 static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
11270 int rd = extract32(insn, 0, 5);
11271 int rn = extract32(insn, 5, 5);
11272 int rm = extract32(insn, 16, 5);
11273 int size = extract32(insn, 22, 2);
11274 bool is_u = extract32(insn, 29, 1);
11275 bool is_q = extract32(insn, 30, 1);
11277 if (!fp_access_check(s)) {
11278 return;
11281 switch (size + 4 * is_u) {
11282 case 0: /* AND */
11283 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
11284 return;
11285 case 1: /* BIC */
11286 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
11287 return;
11288 case 2: /* ORR */
11289 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
11290 return;
11291 case 3: /* ORN */
11292 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
11293 return;
11294 case 4: /* EOR */
11295 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
11296 return;
11298 case 5: /* BSL bitwise select */
11299 gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
11300 return;
11301 case 6: /* BIT, bitwise insert if true */
11302 gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
11303 return;
11304 case 7: /* BIF, bitwise insert if false */
11305 gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
11306 return;
11308 default:
11309 g_assert_not_reached();
11313 /* Pairwise op subgroup of C3.6.16.
11315 * This is called directly or via the handle_3same_float for float pairwise
11316 * operations where the opcode and size are calculated differently.
11318 static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
11319 int size, int rn, int rm, int rd)
11321 TCGv_ptr fpst;
11322 int pass;
11324 /* Floating point operations need fpst */
11325 if (opcode >= 0x58) {
11326 fpst = fpstatus_ptr(FPST_FPCR);
11327 } else {
11328 fpst = NULL;
11331 if (!fp_access_check(s)) {
11332 return;
11335 /* These operations work on the concatenated rm:rn, with each pair of
11336 * adjacent elements being operated on to produce an element in the result.
11338 if (size == 3) {
11339 TCGv_i64 tcg_res[2];
11341 for (pass = 0; pass < 2; pass++) {
11342 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11343 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11344 int passreg = (pass == 0) ? rn : rm;
11346 read_vec_element(s, tcg_op1, passreg, 0, MO_64);
11347 read_vec_element(s, tcg_op2, passreg, 1, MO_64);
11348 tcg_res[pass] = tcg_temp_new_i64();
11350 switch (opcode) {
11351 case 0x17: /* ADDP */
11352 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11353 break;
11354 case 0x58: /* FMAXNMP */
11355 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11356 break;
11357 case 0x5a: /* FADDP */
11358 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11359 break;
11360 case 0x5e: /* FMAXP */
11361 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11362 break;
11363 case 0x78: /* FMINNMP */
11364 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11365 break;
11366 case 0x7e: /* FMINP */
11367 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11368 break;
11369 default:
11370 g_assert_not_reached();
11373 tcg_temp_free_i64(tcg_op1);
11374 tcg_temp_free_i64(tcg_op2);
11377 for (pass = 0; pass < 2; pass++) {
11378 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11379 tcg_temp_free_i64(tcg_res[pass]);
11381 } else {
11382 int maxpass = is_q ? 4 : 2;
11383 TCGv_i32 tcg_res[4];
11385 for (pass = 0; pass < maxpass; pass++) {
11386 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11387 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11388 NeonGenTwoOpFn *genfn = NULL;
11389 int passreg = pass < (maxpass / 2) ? rn : rm;
11390 int passelt = (is_q && (pass & 1)) ? 2 : 0;
11392 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
11393 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
11394 tcg_res[pass] = tcg_temp_new_i32();
11396 switch (opcode) {
11397 case 0x17: /* ADDP */
11399 static NeonGenTwoOpFn * const fns[3] = {
11400 gen_helper_neon_padd_u8,
11401 gen_helper_neon_padd_u16,
11402 tcg_gen_add_i32,
11404 genfn = fns[size];
11405 break;
11407 case 0x14: /* SMAXP, UMAXP */
11409 static NeonGenTwoOpFn * const fns[3][2] = {
11410 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
11411 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
11412 { tcg_gen_smax_i32, tcg_gen_umax_i32 },
11414 genfn = fns[size][u];
11415 break;
11417 case 0x15: /* SMINP, UMINP */
11419 static NeonGenTwoOpFn * const fns[3][2] = {
11420 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
11421 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
11422 { tcg_gen_smin_i32, tcg_gen_umin_i32 },
11424 genfn = fns[size][u];
11425 break;
11427 /* The FP operations are all on single floats (32 bit) */
11428 case 0x58: /* FMAXNMP */
11429 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11430 break;
11431 case 0x5a: /* FADDP */
11432 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11433 break;
11434 case 0x5e: /* FMAXP */
11435 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11436 break;
11437 case 0x78: /* FMINNMP */
11438 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11439 break;
11440 case 0x7e: /* FMINP */
11441 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11442 break;
11443 default:
11444 g_assert_not_reached();
11447 /* FP ops called directly, otherwise call now */
11448 if (genfn) {
11449 genfn(tcg_res[pass], tcg_op1, tcg_op2);
11452 tcg_temp_free_i32(tcg_op1);
11453 tcg_temp_free_i32(tcg_op2);
11456 for (pass = 0; pass < maxpass; pass++) {
11457 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11458 tcg_temp_free_i32(tcg_res[pass]);
11460 clear_vec_high(s, is_q, rd);
11463 if (fpst) {
11464 tcg_temp_free_ptr(fpst);
11468 /* Floating point op subgroup of C3.6.16. */
11469 static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
11471 /* For floating point ops, the U, size[1] and opcode bits
11472 * together indicate the operation. size[0] indicates single
11473 * or double.
11475 int fpopcode = extract32(insn, 11, 5)
11476 | (extract32(insn, 23, 1) << 5)
11477 | (extract32(insn, 29, 1) << 6);
11478 int is_q = extract32(insn, 30, 1);
11479 int size = extract32(insn, 22, 1);
11480 int rm = extract32(insn, 16, 5);
11481 int rn = extract32(insn, 5, 5);
11482 int rd = extract32(insn, 0, 5);
11484 int datasize = is_q ? 128 : 64;
11485 int esize = 32 << size;
11486 int elements = datasize / esize;
11488 if (size == 1 && !is_q) {
11489 unallocated_encoding(s);
11490 return;
11493 switch (fpopcode) {
11494 case 0x58: /* FMAXNMP */
11495 case 0x5a: /* FADDP */
11496 case 0x5e: /* FMAXP */
11497 case 0x78: /* FMINNMP */
11498 case 0x7e: /* FMINP */
11499 if (size && !is_q) {
11500 unallocated_encoding(s);
11501 return;
11503 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
11504 rn, rm, rd);
11505 return;
11506 case 0x1b: /* FMULX */
11507 case 0x1f: /* FRECPS */
11508 case 0x3f: /* FRSQRTS */
11509 case 0x5d: /* FACGE */
11510 case 0x7d: /* FACGT */
11511 case 0x19: /* FMLA */
11512 case 0x39: /* FMLS */
11513 case 0x18: /* FMAXNM */
11514 case 0x1a: /* FADD */
11515 case 0x1c: /* FCMEQ */
11516 case 0x1e: /* FMAX */
11517 case 0x38: /* FMINNM */
11518 case 0x3a: /* FSUB */
11519 case 0x3e: /* FMIN */
11520 case 0x5b: /* FMUL */
11521 case 0x5c: /* FCMGE */
11522 case 0x5f: /* FDIV */
11523 case 0x7a: /* FABD */
11524 case 0x7c: /* FCMGT */
11525 if (!fp_access_check(s)) {
11526 return;
11528 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
11529 return;
11531 case 0x1d: /* FMLAL */
11532 case 0x3d: /* FMLSL */
11533 case 0x59: /* FMLAL2 */
11534 case 0x79: /* FMLSL2 */
11535 if (size & 1 || !dc_isar_feature(aa64_fhm, s)) {
11536 unallocated_encoding(s);
11537 return;
11539 if (fp_access_check(s)) {
11540 int is_s = extract32(insn, 23, 1);
11541 int is_2 = extract32(insn, 29, 1);
11542 int data = (is_2 << 1) | is_s;
11543 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
11544 vec_full_reg_offset(s, rn),
11545 vec_full_reg_offset(s, rm), cpu_env,
11546 is_q ? 16 : 8, vec_full_reg_size(s),
11547 data, gen_helper_gvec_fmlal_a64);
11549 return;
11551 default:
11552 unallocated_encoding(s);
11553 return;
11557 /* Integer op subgroup of C3.6.16. */
11558 static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
11560 int is_q = extract32(insn, 30, 1);
11561 int u = extract32(insn, 29, 1);
11562 int size = extract32(insn, 22, 2);
11563 int opcode = extract32(insn, 11, 5);
11564 int rm = extract32(insn, 16, 5);
11565 int rn = extract32(insn, 5, 5);
11566 int rd = extract32(insn, 0, 5);
11567 int pass;
11568 TCGCond cond;
11570 switch (opcode) {
11571 case 0x13: /* MUL, PMUL */
11572 if (u && size != 0) {
11573 unallocated_encoding(s);
11574 return;
11576 /* fall through */
11577 case 0x0: /* SHADD, UHADD */
11578 case 0x2: /* SRHADD, URHADD */
11579 case 0x4: /* SHSUB, UHSUB */
11580 case 0xc: /* SMAX, UMAX */
11581 case 0xd: /* SMIN, UMIN */
11582 case 0xe: /* SABD, UABD */
11583 case 0xf: /* SABA, UABA */
11584 case 0x12: /* MLA, MLS */
11585 if (size == 3) {
11586 unallocated_encoding(s);
11587 return;
11589 break;
11590 case 0x16: /* SQDMULH, SQRDMULH */
11591 if (size == 0 || size == 3) {
11592 unallocated_encoding(s);
11593 return;
11595 break;
11596 default:
11597 if (size == 3 && !is_q) {
11598 unallocated_encoding(s);
11599 return;
11601 break;
11604 if (!fp_access_check(s)) {
11605 return;
11608 switch (opcode) {
11609 case 0x01: /* SQADD, UQADD */
11610 if (u) {
11611 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size);
11612 } else {
11613 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size);
11615 return;
11616 case 0x05: /* SQSUB, UQSUB */
11617 if (u) {
11618 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size);
11619 } else {
11620 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size);
11622 return;
11623 case 0x08: /* SSHL, USHL */
11624 if (u) {
11625 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size);
11626 } else {
11627 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size);
11629 return;
11630 case 0x0c: /* SMAX, UMAX */
11631 if (u) {
11632 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
11633 } else {
11634 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
11636 return;
11637 case 0x0d: /* SMIN, UMIN */
11638 if (u) {
11639 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
11640 } else {
11641 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
11643 return;
11644 case 0xe: /* SABD, UABD */
11645 if (u) {
11646 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uabd, size);
11647 } else {
11648 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sabd, size);
11650 return;
11651 case 0xf: /* SABA, UABA */
11652 if (u) {
11653 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uaba, size);
11654 } else {
11655 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_saba, size);
11657 return;
11658 case 0x10: /* ADD, SUB */
11659 if (u) {
11660 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
11661 } else {
11662 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
11664 return;
11665 case 0x13: /* MUL, PMUL */
11666 if (!u) { /* MUL */
11667 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
11668 } else { /* PMUL */
11669 gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
11671 return;
11672 case 0x12: /* MLA, MLS */
11673 if (u) {
11674 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mls, size);
11675 } else {
11676 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mla, size);
11678 return;
11679 case 0x16: /* SQDMULH, SQRDMULH */
11681 static gen_helper_gvec_3_ptr * const fns[2][2] = {
11682 { gen_helper_neon_sqdmulh_h, gen_helper_neon_sqrdmulh_h },
11683 { gen_helper_neon_sqdmulh_s, gen_helper_neon_sqrdmulh_s },
11685 gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]);
11687 return;
11688 case 0x11:
11689 if (!u) { /* CMTST */
11690 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
11691 return;
11693 /* else CMEQ */
11694 cond = TCG_COND_EQ;
11695 goto do_gvec_cmp;
11696 case 0x06: /* CMGT, CMHI */
11697 cond = u ? TCG_COND_GTU : TCG_COND_GT;
11698 goto do_gvec_cmp;
11699 case 0x07: /* CMGE, CMHS */
11700 cond = u ? TCG_COND_GEU : TCG_COND_GE;
11701 do_gvec_cmp:
11702 tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
11703 vec_full_reg_offset(s, rn),
11704 vec_full_reg_offset(s, rm),
11705 is_q ? 16 : 8, vec_full_reg_size(s));
11706 return;
11709 if (size == 3) {
11710 assert(is_q);
11711 for (pass = 0; pass < 2; pass++) {
11712 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11713 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11714 TCGv_i64 tcg_res = tcg_temp_new_i64();
11716 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11717 read_vec_element(s, tcg_op2, rm, pass, MO_64);
11719 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
11721 write_vec_element(s, tcg_res, rd, pass, MO_64);
11723 tcg_temp_free_i64(tcg_res);
11724 tcg_temp_free_i64(tcg_op1);
11725 tcg_temp_free_i64(tcg_op2);
11727 } else {
11728 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11729 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11730 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11731 TCGv_i32 tcg_res = tcg_temp_new_i32();
11732 NeonGenTwoOpFn *genfn = NULL;
11733 NeonGenTwoOpEnvFn *genenvfn = NULL;
11735 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
11736 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
11738 switch (opcode) {
11739 case 0x0: /* SHADD, UHADD */
11741 static NeonGenTwoOpFn * const fns[3][2] = {
11742 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
11743 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
11744 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
11746 genfn = fns[size][u];
11747 break;
11749 case 0x2: /* SRHADD, URHADD */
11751 static NeonGenTwoOpFn * const fns[3][2] = {
11752 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
11753 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
11754 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
11756 genfn = fns[size][u];
11757 break;
11759 case 0x4: /* SHSUB, UHSUB */
11761 static NeonGenTwoOpFn * const fns[3][2] = {
11762 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
11763 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
11764 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
11766 genfn = fns[size][u];
11767 break;
11769 case 0x9: /* SQSHL, UQSHL */
11771 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11772 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
11773 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
11774 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
11776 genenvfn = fns[size][u];
11777 break;
11779 case 0xa: /* SRSHL, URSHL */
11781 static NeonGenTwoOpFn * const fns[3][2] = {
11782 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
11783 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
11784 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
11786 genfn = fns[size][u];
11787 break;
11789 case 0xb: /* SQRSHL, UQRSHL */
11791 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11792 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
11793 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
11794 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
11796 genenvfn = fns[size][u];
11797 break;
11799 default:
11800 g_assert_not_reached();
11803 if (genenvfn) {
11804 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
11805 } else {
11806 genfn(tcg_res, tcg_op1, tcg_op2);
11809 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11811 tcg_temp_free_i32(tcg_res);
11812 tcg_temp_free_i32(tcg_op1);
11813 tcg_temp_free_i32(tcg_op2);
11816 clear_vec_high(s, is_q, rd);
11819 /* AdvSIMD three same
11820 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
11821 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11822 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
11823 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11825 static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
11827 int opcode = extract32(insn, 11, 5);
11829 switch (opcode) {
11830 case 0x3: /* logic ops */
11831 disas_simd_3same_logic(s, insn);
11832 break;
11833 case 0x17: /* ADDP */
11834 case 0x14: /* SMAXP, UMAXP */
11835 case 0x15: /* SMINP, UMINP */
11837 /* Pairwise operations */
11838 int is_q = extract32(insn, 30, 1);
11839 int u = extract32(insn, 29, 1);
11840 int size = extract32(insn, 22, 2);
11841 int rm = extract32(insn, 16, 5);
11842 int rn = extract32(insn, 5, 5);
11843 int rd = extract32(insn, 0, 5);
11844 if (opcode == 0x17) {
11845 if (u || (size == 3 && !is_q)) {
11846 unallocated_encoding(s);
11847 return;
11849 } else {
11850 if (size == 3) {
11851 unallocated_encoding(s);
11852 return;
11855 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
11856 break;
11858 case 0x18 ... 0x31:
11859 /* floating point ops, sz[1] and U are part of opcode */
11860 disas_simd_3same_float(s, insn);
11861 break;
11862 default:
11863 disas_simd_3same_int(s, insn);
11864 break;
11869 * Advanced SIMD three same (ARMv8.2 FP16 variants)
11871 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
11872 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11873 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
11874 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11876 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
11877 * (register), FACGE, FABD, FCMGT (register) and FACGT.
11880 static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
11882 int opcode = extract32(insn, 11, 3);
11883 int u = extract32(insn, 29, 1);
11884 int a = extract32(insn, 23, 1);
11885 int is_q = extract32(insn, 30, 1);
11886 int rm = extract32(insn, 16, 5);
11887 int rn = extract32(insn, 5, 5);
11888 int rd = extract32(insn, 0, 5);
11890 * For these floating point ops, the U, a and opcode bits
11891 * together indicate the operation.
11893 int fpopcode = opcode | (a << 3) | (u << 4);
11894 int datasize = is_q ? 128 : 64;
11895 int elements = datasize / 16;
11896 bool pairwise;
11897 TCGv_ptr fpst;
11898 int pass;
11900 switch (fpopcode) {
11901 case 0x0: /* FMAXNM */
11902 case 0x1: /* FMLA */
11903 case 0x2: /* FADD */
11904 case 0x3: /* FMULX */
11905 case 0x4: /* FCMEQ */
11906 case 0x6: /* FMAX */
11907 case 0x7: /* FRECPS */
11908 case 0x8: /* FMINNM */
11909 case 0x9: /* FMLS */
11910 case 0xa: /* FSUB */
11911 case 0xe: /* FMIN */
11912 case 0xf: /* FRSQRTS */
11913 case 0x13: /* FMUL */
11914 case 0x14: /* FCMGE */
11915 case 0x15: /* FACGE */
11916 case 0x17: /* FDIV */
11917 case 0x1a: /* FABD */
11918 case 0x1c: /* FCMGT */
11919 case 0x1d: /* FACGT */
11920 pairwise = false;
11921 break;
11922 case 0x10: /* FMAXNMP */
11923 case 0x12: /* FADDP */
11924 case 0x16: /* FMAXP */
11925 case 0x18: /* FMINNMP */
11926 case 0x1e: /* FMINP */
11927 pairwise = true;
11928 break;
11929 default:
11930 unallocated_encoding(s);
11931 return;
11934 if (!dc_isar_feature(aa64_fp16, s)) {
11935 unallocated_encoding(s);
11936 return;
11939 if (!fp_access_check(s)) {
11940 return;
11943 fpst = fpstatus_ptr(FPST_FPCR_F16);
11945 if (pairwise) {
11946 int maxpass = is_q ? 8 : 4;
11947 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11948 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11949 TCGv_i32 tcg_res[8];
11951 for (pass = 0; pass < maxpass; pass++) {
11952 int passreg = pass < (maxpass / 2) ? rn : rm;
11953 int passelt = (pass << 1) & (maxpass - 1);
11955 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
11956 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
11957 tcg_res[pass] = tcg_temp_new_i32();
11959 switch (fpopcode) {
11960 case 0x10: /* FMAXNMP */
11961 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
11962 fpst);
11963 break;
11964 case 0x12: /* FADDP */
11965 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11966 break;
11967 case 0x16: /* FMAXP */
11968 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11969 break;
11970 case 0x18: /* FMINNMP */
11971 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
11972 fpst);
11973 break;
11974 case 0x1e: /* FMINP */
11975 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11976 break;
11977 default:
11978 g_assert_not_reached();
11982 for (pass = 0; pass < maxpass; pass++) {
11983 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
11984 tcg_temp_free_i32(tcg_res[pass]);
11987 tcg_temp_free_i32(tcg_op1);
11988 tcg_temp_free_i32(tcg_op2);
11990 } else {
11991 for (pass = 0; pass < elements; pass++) {
11992 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11993 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11994 TCGv_i32 tcg_res = tcg_temp_new_i32();
11996 read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
11997 read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
11999 switch (fpopcode) {
12000 case 0x0: /* FMAXNM */
12001 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
12002 break;
12003 case 0x1: /* FMLA */
12004 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12005 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
12006 fpst);
12007 break;
12008 case 0x2: /* FADD */
12009 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
12010 break;
12011 case 0x3: /* FMULX */
12012 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
12013 break;
12014 case 0x4: /* FCMEQ */
12015 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12016 break;
12017 case 0x6: /* FMAX */
12018 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
12019 break;
12020 case 0x7: /* FRECPS */
12021 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12022 break;
12023 case 0x8: /* FMINNM */
12024 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
12025 break;
12026 case 0x9: /* FMLS */
12027 /* As usual for ARM, separate negation for fused multiply-add */
12028 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
12029 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12030 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
12031 fpst);
12032 break;
12033 case 0xa: /* FSUB */
12034 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
12035 break;
12036 case 0xe: /* FMIN */
12037 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
12038 break;
12039 case 0xf: /* FRSQRTS */
12040 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12041 break;
12042 case 0x13: /* FMUL */
12043 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
12044 break;
12045 case 0x14: /* FCMGE */
12046 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12047 break;
12048 case 0x15: /* FACGE */
12049 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12050 break;
12051 case 0x17: /* FDIV */
12052 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
12053 break;
12054 case 0x1a: /* FABD */
12055 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
12056 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
12057 break;
12058 case 0x1c: /* FCMGT */
12059 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12060 break;
12061 case 0x1d: /* FACGT */
12062 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12063 break;
12064 default:
12065 g_assert_not_reached();
12068 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12069 tcg_temp_free_i32(tcg_res);
12070 tcg_temp_free_i32(tcg_op1);
12071 tcg_temp_free_i32(tcg_op2);
12075 tcg_temp_free_ptr(fpst);
12077 clear_vec_high(s, is_q, rd);
12080 /* AdvSIMD three same extra
12081 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
12082 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
12083 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
12084 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
12086 static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
12088 int rd = extract32(insn, 0, 5);
12089 int rn = extract32(insn, 5, 5);
12090 int opcode = extract32(insn, 11, 4);
12091 int rm = extract32(insn, 16, 5);
12092 int size = extract32(insn, 22, 2);
12093 bool u = extract32(insn, 29, 1);
12094 bool is_q = extract32(insn, 30, 1);
12095 bool feature;
12096 int rot;
12098 switch (u * 16 + opcode) {
12099 case 0x10: /* SQRDMLAH (vector) */
12100 case 0x11: /* SQRDMLSH (vector) */
12101 if (size != 1 && size != 2) {
12102 unallocated_encoding(s);
12103 return;
12105 feature = dc_isar_feature(aa64_rdm, s);
12106 break;
12107 case 0x02: /* SDOT (vector) */
12108 case 0x12: /* UDOT (vector) */
12109 if (size != MO_32) {
12110 unallocated_encoding(s);
12111 return;
12113 feature = dc_isar_feature(aa64_dp, s);
12114 break;
12115 case 0x03: /* USDOT */
12116 if (size != MO_32) {
12117 unallocated_encoding(s);
12118 return;
12120 feature = dc_isar_feature(aa64_i8mm, s);
12121 break;
12122 case 0x04: /* SMMLA */
12123 case 0x14: /* UMMLA */
12124 case 0x05: /* USMMLA */
12125 if (!is_q || size != MO_32) {
12126 unallocated_encoding(s);
12127 return;
12129 feature = dc_isar_feature(aa64_i8mm, s);
12130 break;
12131 case 0x18: /* FCMLA, #0 */
12132 case 0x19: /* FCMLA, #90 */
12133 case 0x1a: /* FCMLA, #180 */
12134 case 0x1b: /* FCMLA, #270 */
12135 case 0x1c: /* FCADD, #90 */
12136 case 0x1e: /* FCADD, #270 */
12137 if (size == 0
12138 || (size == 1 && !dc_isar_feature(aa64_fp16, s))
12139 || (size == 3 && !is_q)) {
12140 unallocated_encoding(s);
12141 return;
12143 feature = dc_isar_feature(aa64_fcma, s);
12144 break;
12145 case 0x1d: /* BFMMLA */
12146 if (size != MO_16 || !is_q) {
12147 unallocated_encoding(s);
12148 return;
12150 feature = dc_isar_feature(aa64_bf16, s);
12151 break;
12152 case 0x1f:
12153 switch (size) {
12154 case 1: /* BFDOT */
12155 case 3: /* BFMLAL{B,T} */
12156 feature = dc_isar_feature(aa64_bf16, s);
12157 break;
12158 default:
12159 unallocated_encoding(s);
12160 return;
12162 break;
12163 default:
12164 unallocated_encoding(s);
12165 return;
12167 if (!feature) {
12168 unallocated_encoding(s);
12169 return;
12171 if (!fp_access_check(s)) {
12172 return;
12175 switch (opcode) {
12176 case 0x0: /* SQRDMLAH (vector) */
12177 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlah_qc, size);
12178 return;
12180 case 0x1: /* SQRDMLSH (vector) */
12181 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlsh_qc, size);
12182 return;
12184 case 0x2: /* SDOT / UDOT */
12185 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0,
12186 u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
12187 return;
12189 case 0x3: /* USDOT */
12190 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_usdot_b);
12191 return;
12193 case 0x04: /* SMMLA, UMMLA */
12194 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0,
12195 u ? gen_helper_gvec_ummla_b
12196 : gen_helper_gvec_smmla_b);
12197 return;
12198 case 0x05: /* USMMLA */
12199 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b);
12200 return;
12202 case 0x8: /* FCMLA, #0 */
12203 case 0x9: /* FCMLA, #90 */
12204 case 0xa: /* FCMLA, #180 */
12205 case 0xb: /* FCMLA, #270 */
12206 rot = extract32(opcode, 0, 2);
12207 switch (size) {
12208 case 1:
12209 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
12210 gen_helper_gvec_fcmlah);
12211 break;
12212 case 2:
12213 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
12214 gen_helper_gvec_fcmlas);
12215 break;
12216 case 3:
12217 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
12218 gen_helper_gvec_fcmlad);
12219 break;
12220 default:
12221 g_assert_not_reached();
12223 return;
12225 case 0xc: /* FCADD, #90 */
12226 case 0xe: /* FCADD, #270 */
12227 rot = extract32(opcode, 1, 1);
12228 switch (size) {
12229 case 1:
12230 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12231 gen_helper_gvec_fcaddh);
12232 break;
12233 case 2:
12234 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12235 gen_helper_gvec_fcadds);
12236 break;
12237 case 3:
12238 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12239 gen_helper_gvec_fcaddd);
12240 break;
12241 default:
12242 g_assert_not_reached();
12244 return;
12246 case 0xd: /* BFMMLA */
12247 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla);
12248 return;
12249 case 0xf:
12250 switch (size) {
12251 case 1: /* BFDOT */
12252 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot);
12253 break;
12254 case 3: /* BFMLAL{B,T} */
12255 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q,
12256 gen_helper_gvec_bfmlal);
12257 break;
12258 default:
12259 g_assert_not_reached();
12261 return;
12263 default:
12264 g_assert_not_reached();
12268 static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
12269 int size, int rn, int rd)
12271 /* Handle 2-reg-misc ops which are widening (so each size element
12272 * in the source becomes a 2*size element in the destination.
12273 * The only instruction like this is FCVTL.
12275 int pass;
12277 if (size == 3) {
12278 /* 32 -> 64 bit fp conversion */
12279 TCGv_i64 tcg_res[2];
12280 int srcelt = is_q ? 2 : 0;
12282 for (pass = 0; pass < 2; pass++) {
12283 TCGv_i32 tcg_op = tcg_temp_new_i32();
12284 tcg_res[pass] = tcg_temp_new_i64();
12286 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
12287 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
12288 tcg_temp_free_i32(tcg_op);
12290 for (pass = 0; pass < 2; pass++) {
12291 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12292 tcg_temp_free_i64(tcg_res[pass]);
12294 } else {
12295 /* 16 -> 32 bit fp conversion */
12296 int srcelt = is_q ? 4 : 0;
12297 TCGv_i32 tcg_res[4];
12298 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
12299 TCGv_i32 ahp = get_ahp_flag();
12301 for (pass = 0; pass < 4; pass++) {
12302 tcg_res[pass] = tcg_temp_new_i32();
12304 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
12305 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
12306 fpst, ahp);
12308 for (pass = 0; pass < 4; pass++) {
12309 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
12310 tcg_temp_free_i32(tcg_res[pass]);
12313 tcg_temp_free_ptr(fpst);
12314 tcg_temp_free_i32(ahp);
12318 static void handle_rev(DisasContext *s, int opcode, bool u,
12319 bool is_q, int size, int rn, int rd)
12321 int op = (opcode << 1) | u;
12322 int opsz = op + size;
12323 int grp_size = 3 - opsz;
12324 int dsize = is_q ? 128 : 64;
12325 int i;
12327 if (opsz >= 3) {
12328 unallocated_encoding(s);
12329 return;
12332 if (!fp_access_check(s)) {
12333 return;
12336 if (size == 0) {
12337 /* Special case bytes, use bswap op on each group of elements */
12338 int groups = dsize / (8 << grp_size);
12340 for (i = 0; i < groups; i++) {
12341 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
12343 read_vec_element(s, tcg_tmp, rn, i, grp_size);
12344 switch (grp_size) {
12345 case MO_16:
12346 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
12347 break;
12348 case MO_32:
12349 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
12350 break;
12351 case MO_64:
12352 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
12353 break;
12354 default:
12355 g_assert_not_reached();
12357 write_vec_element(s, tcg_tmp, rd, i, grp_size);
12358 tcg_temp_free_i64(tcg_tmp);
12360 clear_vec_high(s, is_q, rd);
12361 } else {
12362 int revmask = (1 << grp_size) - 1;
12363 int esize = 8 << size;
12364 int elements = dsize / esize;
12365 TCGv_i64 tcg_rn = tcg_temp_new_i64();
12366 TCGv_i64 tcg_rd = tcg_const_i64(0);
12367 TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
12369 for (i = 0; i < elements; i++) {
12370 int e_rev = (i & 0xf) ^ revmask;
12371 int off = e_rev * esize;
12372 read_vec_element(s, tcg_rn, rn, i, size);
12373 if (off >= 64) {
12374 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
12375 tcg_rn, off - 64, esize);
12376 } else {
12377 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
12380 write_vec_element(s, tcg_rd, rd, 0, MO_64);
12381 write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
12383 tcg_temp_free_i64(tcg_rd_hi);
12384 tcg_temp_free_i64(tcg_rd);
12385 tcg_temp_free_i64(tcg_rn);
12389 static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
12390 bool is_q, int size, int rn, int rd)
12392 /* Implement the pairwise operations from 2-misc:
12393 * SADDLP, UADDLP, SADALP, UADALP.
12394 * These all add pairs of elements in the input to produce a
12395 * double-width result element in the output (possibly accumulating).
12397 bool accum = (opcode == 0x6);
12398 int maxpass = is_q ? 2 : 1;
12399 int pass;
12400 TCGv_i64 tcg_res[2];
12402 if (size == 2) {
12403 /* 32 + 32 -> 64 op */
12404 MemOp memop = size + (u ? 0 : MO_SIGN);
12406 for (pass = 0; pass < maxpass; pass++) {
12407 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
12408 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
12410 tcg_res[pass] = tcg_temp_new_i64();
12412 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
12413 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
12414 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
12415 if (accum) {
12416 read_vec_element(s, tcg_op1, rd, pass, MO_64);
12417 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
12420 tcg_temp_free_i64(tcg_op1);
12421 tcg_temp_free_i64(tcg_op2);
12423 } else {
12424 for (pass = 0; pass < maxpass; pass++) {
12425 TCGv_i64 tcg_op = tcg_temp_new_i64();
12426 NeonGenOne64OpFn *genfn;
12427 static NeonGenOne64OpFn * const fns[2][2] = {
12428 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
12429 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
12432 genfn = fns[size][u];
12434 tcg_res[pass] = tcg_temp_new_i64();
12436 read_vec_element(s, tcg_op, rn, pass, MO_64);
12437 genfn(tcg_res[pass], tcg_op);
12439 if (accum) {
12440 read_vec_element(s, tcg_op, rd, pass, MO_64);
12441 if (size == 0) {
12442 gen_helper_neon_addl_u16(tcg_res[pass],
12443 tcg_res[pass], tcg_op);
12444 } else {
12445 gen_helper_neon_addl_u32(tcg_res[pass],
12446 tcg_res[pass], tcg_op);
12449 tcg_temp_free_i64(tcg_op);
12452 if (!is_q) {
12453 tcg_res[1] = tcg_constant_i64(0);
12455 for (pass = 0; pass < 2; pass++) {
12456 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12457 tcg_temp_free_i64(tcg_res[pass]);
12461 static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
12463 /* Implement SHLL and SHLL2 */
12464 int pass;
12465 int part = is_q ? 2 : 0;
12466 TCGv_i64 tcg_res[2];
12468 for (pass = 0; pass < 2; pass++) {
12469 static NeonGenWidenFn * const widenfns[3] = {
12470 gen_helper_neon_widen_u8,
12471 gen_helper_neon_widen_u16,
12472 tcg_gen_extu_i32_i64,
12474 NeonGenWidenFn *widenfn = widenfns[size];
12475 TCGv_i32 tcg_op = tcg_temp_new_i32();
12477 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
12478 tcg_res[pass] = tcg_temp_new_i64();
12479 widenfn(tcg_res[pass], tcg_op);
12480 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
12482 tcg_temp_free_i32(tcg_op);
12485 for (pass = 0; pass < 2; pass++) {
12486 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12487 tcg_temp_free_i64(tcg_res[pass]);
12491 /* AdvSIMD two reg misc
12492 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
12493 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
12494 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
12495 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
12497 static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
12499 int size = extract32(insn, 22, 2);
12500 int opcode = extract32(insn, 12, 5);
12501 bool u = extract32(insn, 29, 1);
12502 bool is_q = extract32(insn, 30, 1);
12503 int rn = extract32(insn, 5, 5);
12504 int rd = extract32(insn, 0, 5);
12505 bool need_fpstatus = false;
12506 bool need_rmode = false;
12507 int rmode = -1;
12508 TCGv_i32 tcg_rmode;
12509 TCGv_ptr tcg_fpstatus;
12511 switch (opcode) {
12512 case 0x0: /* REV64, REV32 */
12513 case 0x1: /* REV16 */
12514 handle_rev(s, opcode, u, is_q, size, rn, rd);
12515 return;
12516 case 0x5: /* CNT, NOT, RBIT */
12517 if (u && size == 0) {
12518 /* NOT */
12519 break;
12520 } else if (u && size == 1) {
12521 /* RBIT */
12522 break;
12523 } else if (!u && size == 0) {
12524 /* CNT */
12525 break;
12527 unallocated_encoding(s);
12528 return;
12529 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
12530 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
12531 if (size == 3) {
12532 unallocated_encoding(s);
12533 return;
12535 if (!fp_access_check(s)) {
12536 return;
12539 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
12540 return;
12541 case 0x4: /* CLS, CLZ */
12542 if (size == 3) {
12543 unallocated_encoding(s);
12544 return;
12546 break;
12547 case 0x2: /* SADDLP, UADDLP */
12548 case 0x6: /* SADALP, UADALP */
12549 if (size == 3) {
12550 unallocated_encoding(s);
12551 return;
12553 if (!fp_access_check(s)) {
12554 return;
12556 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
12557 return;
12558 case 0x13: /* SHLL, SHLL2 */
12559 if (u == 0 || size == 3) {
12560 unallocated_encoding(s);
12561 return;
12563 if (!fp_access_check(s)) {
12564 return;
12566 handle_shll(s, is_q, size, rn, rd);
12567 return;
12568 case 0xa: /* CMLT */
12569 if (u == 1) {
12570 unallocated_encoding(s);
12571 return;
12573 /* fall through */
12574 case 0x8: /* CMGT, CMGE */
12575 case 0x9: /* CMEQ, CMLE */
12576 case 0xb: /* ABS, NEG */
12577 if (size == 3 && !is_q) {
12578 unallocated_encoding(s);
12579 return;
12581 break;
12582 case 0x3: /* SUQADD, USQADD */
12583 if (size == 3 && !is_q) {
12584 unallocated_encoding(s);
12585 return;
12587 if (!fp_access_check(s)) {
12588 return;
12590 handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
12591 return;
12592 case 0x7: /* SQABS, SQNEG */
12593 if (size == 3 && !is_q) {
12594 unallocated_encoding(s);
12595 return;
12597 break;
12598 case 0xc ... 0xf:
12599 case 0x16 ... 0x1f:
12601 /* Floating point: U, size[1] and opcode indicate operation;
12602 * size[0] indicates single or double precision.
12604 int is_double = extract32(size, 0, 1);
12605 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
12606 size = is_double ? 3 : 2;
12607 switch (opcode) {
12608 case 0x2f: /* FABS */
12609 case 0x6f: /* FNEG */
12610 if (size == 3 && !is_q) {
12611 unallocated_encoding(s);
12612 return;
12614 break;
12615 case 0x1d: /* SCVTF */
12616 case 0x5d: /* UCVTF */
12618 bool is_signed = (opcode == 0x1d) ? true : false;
12619 int elements = is_double ? 2 : is_q ? 4 : 2;
12620 if (is_double && !is_q) {
12621 unallocated_encoding(s);
12622 return;
12624 if (!fp_access_check(s)) {
12625 return;
12627 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
12628 return;
12630 case 0x2c: /* FCMGT (zero) */
12631 case 0x2d: /* FCMEQ (zero) */
12632 case 0x2e: /* FCMLT (zero) */
12633 case 0x6c: /* FCMGE (zero) */
12634 case 0x6d: /* FCMLE (zero) */
12635 if (size == 3 && !is_q) {
12636 unallocated_encoding(s);
12637 return;
12639 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
12640 return;
12641 case 0x7f: /* FSQRT */
12642 if (size == 3 && !is_q) {
12643 unallocated_encoding(s);
12644 return;
12646 break;
12647 case 0x1a: /* FCVTNS */
12648 case 0x1b: /* FCVTMS */
12649 case 0x3a: /* FCVTPS */
12650 case 0x3b: /* FCVTZS */
12651 case 0x5a: /* FCVTNU */
12652 case 0x5b: /* FCVTMU */
12653 case 0x7a: /* FCVTPU */
12654 case 0x7b: /* FCVTZU */
12655 need_fpstatus = true;
12656 need_rmode = true;
12657 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12658 if (size == 3 && !is_q) {
12659 unallocated_encoding(s);
12660 return;
12662 break;
12663 case 0x5c: /* FCVTAU */
12664 case 0x1c: /* FCVTAS */
12665 need_fpstatus = true;
12666 need_rmode = true;
12667 rmode = FPROUNDING_TIEAWAY;
12668 if (size == 3 && !is_q) {
12669 unallocated_encoding(s);
12670 return;
12672 break;
12673 case 0x3c: /* URECPE */
12674 if (size == 3) {
12675 unallocated_encoding(s);
12676 return;
12678 /* fall through */
12679 case 0x3d: /* FRECPE */
12680 case 0x7d: /* FRSQRTE */
12681 if (size == 3 && !is_q) {
12682 unallocated_encoding(s);
12683 return;
12685 if (!fp_access_check(s)) {
12686 return;
12688 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
12689 return;
12690 case 0x56: /* FCVTXN, FCVTXN2 */
12691 if (size == 2) {
12692 unallocated_encoding(s);
12693 return;
12695 /* fall through */
12696 case 0x16: /* FCVTN, FCVTN2 */
12697 /* handle_2misc_narrow does a 2*size -> size operation, but these
12698 * instructions encode the source size rather than dest size.
12700 if (!fp_access_check(s)) {
12701 return;
12703 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12704 return;
12705 case 0x36: /* BFCVTN, BFCVTN2 */
12706 if (!dc_isar_feature(aa64_bf16, s) || size != 2) {
12707 unallocated_encoding(s);
12708 return;
12710 if (!fp_access_check(s)) {
12711 return;
12713 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12714 return;
12715 case 0x17: /* FCVTL, FCVTL2 */
12716 if (!fp_access_check(s)) {
12717 return;
12719 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
12720 return;
12721 case 0x18: /* FRINTN */
12722 case 0x19: /* FRINTM */
12723 case 0x38: /* FRINTP */
12724 case 0x39: /* FRINTZ */
12725 need_rmode = true;
12726 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12727 /* fall through */
12728 case 0x59: /* FRINTX */
12729 case 0x79: /* FRINTI */
12730 need_fpstatus = true;
12731 if (size == 3 && !is_q) {
12732 unallocated_encoding(s);
12733 return;
12735 break;
12736 case 0x58: /* FRINTA */
12737 need_rmode = true;
12738 rmode = FPROUNDING_TIEAWAY;
12739 need_fpstatus = true;
12740 if (size == 3 && !is_q) {
12741 unallocated_encoding(s);
12742 return;
12744 break;
12745 case 0x7c: /* URSQRTE */
12746 if (size == 3) {
12747 unallocated_encoding(s);
12748 return;
12750 break;
12751 case 0x1e: /* FRINT32Z */
12752 case 0x1f: /* FRINT64Z */
12753 need_rmode = true;
12754 rmode = FPROUNDING_ZERO;
12755 /* fall through */
12756 case 0x5e: /* FRINT32X */
12757 case 0x5f: /* FRINT64X */
12758 need_fpstatus = true;
12759 if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
12760 unallocated_encoding(s);
12761 return;
12763 break;
12764 default:
12765 unallocated_encoding(s);
12766 return;
12768 break;
12770 default:
12771 unallocated_encoding(s);
12772 return;
12775 if (!fp_access_check(s)) {
12776 return;
12779 if (need_fpstatus || need_rmode) {
12780 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
12781 } else {
12782 tcg_fpstatus = NULL;
12784 if (need_rmode) {
12785 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12786 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12787 } else {
12788 tcg_rmode = NULL;
12791 switch (opcode) {
12792 case 0x5:
12793 if (u && size == 0) { /* NOT */
12794 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
12795 return;
12797 break;
12798 case 0x8: /* CMGT, CMGE */
12799 if (u) {
12800 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
12801 } else {
12802 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
12804 return;
12805 case 0x9: /* CMEQ, CMLE */
12806 if (u) {
12807 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
12808 } else {
12809 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
12811 return;
12812 case 0xa: /* CMLT */
12813 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
12814 return;
12815 case 0xb:
12816 if (u) { /* ABS, NEG */
12817 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
12818 } else {
12819 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
12821 return;
12824 if (size == 3) {
12825 /* All 64-bit element operations can be shared with scalar 2misc */
12826 int pass;
12828 /* Coverity claims (size == 3 && !is_q) has been eliminated
12829 * from all paths leading to here.
12831 tcg_debug_assert(is_q);
12832 for (pass = 0; pass < 2; pass++) {
12833 TCGv_i64 tcg_op = tcg_temp_new_i64();
12834 TCGv_i64 tcg_res = tcg_temp_new_i64();
12836 read_vec_element(s, tcg_op, rn, pass, MO_64);
12838 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
12839 tcg_rmode, tcg_fpstatus);
12841 write_vec_element(s, tcg_res, rd, pass, MO_64);
12843 tcg_temp_free_i64(tcg_res);
12844 tcg_temp_free_i64(tcg_op);
12846 } else {
12847 int pass;
12849 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
12850 TCGv_i32 tcg_op = tcg_temp_new_i32();
12851 TCGv_i32 tcg_res = tcg_temp_new_i32();
12853 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
12855 if (size == 2) {
12856 /* Special cases for 32 bit elements */
12857 switch (opcode) {
12858 case 0x4: /* CLS */
12859 if (u) {
12860 tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
12861 } else {
12862 tcg_gen_clrsb_i32(tcg_res, tcg_op);
12864 break;
12865 case 0x7: /* SQABS, SQNEG */
12866 if (u) {
12867 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
12868 } else {
12869 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
12871 break;
12872 case 0x2f: /* FABS */
12873 gen_helper_vfp_abss(tcg_res, tcg_op);
12874 break;
12875 case 0x6f: /* FNEG */
12876 gen_helper_vfp_negs(tcg_res, tcg_op);
12877 break;
12878 case 0x7f: /* FSQRT */
12879 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
12880 break;
12881 case 0x1a: /* FCVTNS */
12882 case 0x1b: /* FCVTMS */
12883 case 0x1c: /* FCVTAS */
12884 case 0x3a: /* FCVTPS */
12885 case 0x3b: /* FCVTZS */
12886 gen_helper_vfp_tosls(tcg_res, tcg_op,
12887 tcg_constant_i32(0), tcg_fpstatus);
12888 break;
12889 case 0x5a: /* FCVTNU */
12890 case 0x5b: /* FCVTMU */
12891 case 0x5c: /* FCVTAU */
12892 case 0x7a: /* FCVTPU */
12893 case 0x7b: /* FCVTZU */
12894 gen_helper_vfp_touls(tcg_res, tcg_op,
12895 tcg_constant_i32(0), tcg_fpstatus);
12896 break;
12897 case 0x18: /* FRINTN */
12898 case 0x19: /* FRINTM */
12899 case 0x38: /* FRINTP */
12900 case 0x39: /* FRINTZ */
12901 case 0x58: /* FRINTA */
12902 case 0x79: /* FRINTI */
12903 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
12904 break;
12905 case 0x59: /* FRINTX */
12906 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
12907 break;
12908 case 0x7c: /* URSQRTE */
12909 gen_helper_rsqrte_u32(tcg_res, tcg_op);
12910 break;
12911 case 0x1e: /* FRINT32Z */
12912 case 0x5e: /* FRINT32X */
12913 gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
12914 break;
12915 case 0x1f: /* FRINT64Z */
12916 case 0x5f: /* FRINT64X */
12917 gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
12918 break;
12919 default:
12920 g_assert_not_reached();
12922 } else {
12923 /* Use helpers for 8 and 16 bit elements */
12924 switch (opcode) {
12925 case 0x5: /* CNT, RBIT */
12926 /* For these two insns size is part of the opcode specifier
12927 * (handled earlier); they always operate on byte elements.
12929 if (u) {
12930 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
12931 } else {
12932 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
12934 break;
12935 case 0x7: /* SQABS, SQNEG */
12937 NeonGenOneOpEnvFn *genfn;
12938 static NeonGenOneOpEnvFn * const fns[2][2] = {
12939 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
12940 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
12942 genfn = fns[size][u];
12943 genfn(tcg_res, cpu_env, tcg_op);
12944 break;
12946 case 0x4: /* CLS, CLZ */
12947 if (u) {
12948 if (size == 0) {
12949 gen_helper_neon_clz_u8(tcg_res, tcg_op);
12950 } else {
12951 gen_helper_neon_clz_u16(tcg_res, tcg_op);
12953 } else {
12954 if (size == 0) {
12955 gen_helper_neon_cls_s8(tcg_res, tcg_op);
12956 } else {
12957 gen_helper_neon_cls_s16(tcg_res, tcg_op);
12960 break;
12961 default:
12962 g_assert_not_reached();
12966 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12968 tcg_temp_free_i32(tcg_res);
12969 tcg_temp_free_i32(tcg_op);
12972 clear_vec_high(s, is_q, rd);
12974 if (need_rmode) {
12975 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12976 tcg_temp_free_i32(tcg_rmode);
12978 if (need_fpstatus) {
12979 tcg_temp_free_ptr(tcg_fpstatus);
12983 /* AdvSIMD [scalar] two register miscellaneous (FP16)
12985 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
12986 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12987 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
12988 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12989 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
12990 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
12992 * This actually covers two groups where scalar access is governed by
12993 * bit 28. A bunch of the instructions (float to integral) only exist
12994 * in the vector form and are un-allocated for the scalar decode. Also
12995 * in the scalar decode Q is always 1.
12997 static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
12999 int fpop, opcode, a, u;
13000 int rn, rd;
13001 bool is_q;
13002 bool is_scalar;
13003 bool only_in_vector = false;
13005 int pass;
13006 TCGv_i32 tcg_rmode = NULL;
13007 TCGv_ptr tcg_fpstatus = NULL;
13008 bool need_rmode = false;
13009 bool need_fpst = true;
13010 int rmode;
13012 if (!dc_isar_feature(aa64_fp16, s)) {
13013 unallocated_encoding(s);
13014 return;
13017 rd = extract32(insn, 0, 5);
13018 rn = extract32(insn, 5, 5);
13020 a = extract32(insn, 23, 1);
13021 u = extract32(insn, 29, 1);
13022 is_scalar = extract32(insn, 28, 1);
13023 is_q = extract32(insn, 30, 1);
13025 opcode = extract32(insn, 12, 5);
13026 fpop = deposit32(opcode, 5, 1, a);
13027 fpop = deposit32(fpop, 6, 1, u);
13029 switch (fpop) {
13030 case 0x1d: /* SCVTF */
13031 case 0x5d: /* UCVTF */
13033 int elements;
13035 if (is_scalar) {
13036 elements = 1;
13037 } else {
13038 elements = (is_q ? 8 : 4);
13041 if (!fp_access_check(s)) {
13042 return;
13044 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
13045 return;
13047 break;
13048 case 0x2c: /* FCMGT (zero) */
13049 case 0x2d: /* FCMEQ (zero) */
13050 case 0x2e: /* FCMLT (zero) */
13051 case 0x6c: /* FCMGE (zero) */
13052 case 0x6d: /* FCMLE (zero) */
13053 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
13054 return;
13055 case 0x3d: /* FRECPE */
13056 case 0x3f: /* FRECPX */
13057 break;
13058 case 0x18: /* FRINTN */
13059 need_rmode = true;
13060 only_in_vector = true;
13061 rmode = FPROUNDING_TIEEVEN;
13062 break;
13063 case 0x19: /* FRINTM */
13064 need_rmode = true;
13065 only_in_vector = true;
13066 rmode = FPROUNDING_NEGINF;
13067 break;
13068 case 0x38: /* FRINTP */
13069 need_rmode = true;
13070 only_in_vector = true;
13071 rmode = FPROUNDING_POSINF;
13072 break;
13073 case 0x39: /* FRINTZ */
13074 need_rmode = true;
13075 only_in_vector = true;
13076 rmode = FPROUNDING_ZERO;
13077 break;
13078 case 0x58: /* FRINTA */
13079 need_rmode = true;
13080 only_in_vector = true;
13081 rmode = FPROUNDING_TIEAWAY;
13082 break;
13083 case 0x59: /* FRINTX */
13084 case 0x79: /* FRINTI */
13085 only_in_vector = true;
13086 /* current rounding mode */
13087 break;
13088 case 0x1a: /* FCVTNS */
13089 need_rmode = true;
13090 rmode = FPROUNDING_TIEEVEN;
13091 break;
13092 case 0x1b: /* FCVTMS */
13093 need_rmode = true;
13094 rmode = FPROUNDING_NEGINF;
13095 break;
13096 case 0x1c: /* FCVTAS */
13097 need_rmode = true;
13098 rmode = FPROUNDING_TIEAWAY;
13099 break;
13100 case 0x3a: /* FCVTPS */
13101 need_rmode = true;
13102 rmode = FPROUNDING_POSINF;
13103 break;
13104 case 0x3b: /* FCVTZS */
13105 need_rmode = true;
13106 rmode = FPROUNDING_ZERO;
13107 break;
13108 case 0x5a: /* FCVTNU */
13109 need_rmode = true;
13110 rmode = FPROUNDING_TIEEVEN;
13111 break;
13112 case 0x5b: /* FCVTMU */
13113 need_rmode = true;
13114 rmode = FPROUNDING_NEGINF;
13115 break;
13116 case 0x5c: /* FCVTAU */
13117 need_rmode = true;
13118 rmode = FPROUNDING_TIEAWAY;
13119 break;
13120 case 0x7a: /* FCVTPU */
13121 need_rmode = true;
13122 rmode = FPROUNDING_POSINF;
13123 break;
13124 case 0x7b: /* FCVTZU */
13125 need_rmode = true;
13126 rmode = FPROUNDING_ZERO;
13127 break;
13128 case 0x2f: /* FABS */
13129 case 0x6f: /* FNEG */
13130 need_fpst = false;
13131 break;
13132 case 0x7d: /* FRSQRTE */
13133 case 0x7f: /* FSQRT (vector) */
13134 break;
13135 default:
13136 unallocated_encoding(s);
13137 return;
13141 /* Check additional constraints for the scalar encoding */
13142 if (is_scalar) {
13143 if (!is_q) {
13144 unallocated_encoding(s);
13145 return;
13147 /* FRINTxx is only in the vector form */
13148 if (only_in_vector) {
13149 unallocated_encoding(s);
13150 return;
13154 if (!fp_access_check(s)) {
13155 return;
13158 if (need_rmode || need_fpst) {
13159 tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
13162 if (need_rmode) {
13163 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
13164 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13167 if (is_scalar) {
13168 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
13169 TCGv_i32 tcg_res = tcg_temp_new_i32();
13171 switch (fpop) {
13172 case 0x1a: /* FCVTNS */
13173 case 0x1b: /* FCVTMS */
13174 case 0x1c: /* FCVTAS */
13175 case 0x3a: /* FCVTPS */
13176 case 0x3b: /* FCVTZS */
13177 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
13178 break;
13179 case 0x3d: /* FRECPE */
13180 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
13181 break;
13182 case 0x3f: /* FRECPX */
13183 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
13184 break;
13185 case 0x5a: /* FCVTNU */
13186 case 0x5b: /* FCVTMU */
13187 case 0x5c: /* FCVTAU */
13188 case 0x7a: /* FCVTPU */
13189 case 0x7b: /* FCVTZU */
13190 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
13191 break;
13192 case 0x6f: /* FNEG */
13193 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
13194 break;
13195 case 0x7d: /* FRSQRTE */
13196 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
13197 break;
13198 default:
13199 g_assert_not_reached();
13202 /* limit any sign extension going on */
13203 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
13204 write_fp_sreg(s, rd, tcg_res);
13206 tcg_temp_free_i32(tcg_res);
13207 tcg_temp_free_i32(tcg_op);
13208 } else {
13209 for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
13210 TCGv_i32 tcg_op = tcg_temp_new_i32();
13211 TCGv_i32 tcg_res = tcg_temp_new_i32();
13213 read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
13215 switch (fpop) {
13216 case 0x1a: /* FCVTNS */
13217 case 0x1b: /* FCVTMS */
13218 case 0x1c: /* FCVTAS */
13219 case 0x3a: /* FCVTPS */
13220 case 0x3b: /* FCVTZS */
13221 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
13222 break;
13223 case 0x3d: /* FRECPE */
13224 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
13225 break;
13226 case 0x5a: /* FCVTNU */
13227 case 0x5b: /* FCVTMU */
13228 case 0x5c: /* FCVTAU */
13229 case 0x7a: /* FCVTPU */
13230 case 0x7b: /* FCVTZU */
13231 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
13232 break;
13233 case 0x18: /* FRINTN */
13234 case 0x19: /* FRINTM */
13235 case 0x38: /* FRINTP */
13236 case 0x39: /* FRINTZ */
13237 case 0x58: /* FRINTA */
13238 case 0x79: /* FRINTI */
13239 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
13240 break;
13241 case 0x59: /* FRINTX */
13242 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
13243 break;
13244 case 0x2f: /* FABS */
13245 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
13246 break;
13247 case 0x6f: /* FNEG */
13248 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
13249 break;
13250 case 0x7d: /* FRSQRTE */
13251 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
13252 break;
13253 case 0x7f: /* FSQRT */
13254 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
13255 break;
13256 default:
13257 g_assert_not_reached();
13260 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
13262 tcg_temp_free_i32(tcg_res);
13263 tcg_temp_free_i32(tcg_op);
13266 clear_vec_high(s, is_q, rd);
13269 if (tcg_rmode) {
13270 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13271 tcg_temp_free_i32(tcg_rmode);
13274 if (tcg_fpstatus) {
13275 tcg_temp_free_ptr(tcg_fpstatus);
13279 /* AdvSIMD scalar x indexed element
13280 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
13281 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
13282 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
13283 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
13284 * AdvSIMD vector x indexed element
13285 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
13286 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
13287 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
13288 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
13290 static void disas_simd_indexed(DisasContext *s, uint32_t insn)
13292 /* This encoding has two kinds of instruction:
13293 * normal, where we perform elt x idxelt => elt for each
13294 * element in the vector
13295 * long, where we perform elt x idxelt and generate a result of
13296 * double the width of the input element
13297 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
13299 bool is_scalar = extract32(insn, 28, 1);
13300 bool is_q = extract32(insn, 30, 1);
13301 bool u = extract32(insn, 29, 1);
13302 int size = extract32(insn, 22, 2);
13303 int l = extract32(insn, 21, 1);
13304 int m = extract32(insn, 20, 1);
13305 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
13306 int rm = extract32(insn, 16, 4);
13307 int opcode = extract32(insn, 12, 4);
13308 int h = extract32(insn, 11, 1);
13309 int rn = extract32(insn, 5, 5);
13310 int rd = extract32(insn, 0, 5);
13311 bool is_long = false;
13312 int is_fp = 0;
13313 bool is_fp16 = false;
13314 int index;
13315 TCGv_ptr fpst;
13317 switch (16 * u + opcode) {
13318 case 0x08: /* MUL */
13319 case 0x10: /* MLA */
13320 case 0x14: /* MLS */
13321 if (is_scalar) {
13322 unallocated_encoding(s);
13323 return;
13325 break;
13326 case 0x02: /* SMLAL, SMLAL2 */
13327 case 0x12: /* UMLAL, UMLAL2 */
13328 case 0x06: /* SMLSL, SMLSL2 */
13329 case 0x16: /* UMLSL, UMLSL2 */
13330 case 0x0a: /* SMULL, SMULL2 */
13331 case 0x1a: /* UMULL, UMULL2 */
13332 if (is_scalar) {
13333 unallocated_encoding(s);
13334 return;
13336 is_long = true;
13337 break;
13338 case 0x03: /* SQDMLAL, SQDMLAL2 */
13339 case 0x07: /* SQDMLSL, SQDMLSL2 */
13340 case 0x0b: /* SQDMULL, SQDMULL2 */
13341 is_long = true;
13342 break;
13343 case 0x0c: /* SQDMULH */
13344 case 0x0d: /* SQRDMULH */
13345 break;
13346 case 0x01: /* FMLA */
13347 case 0x05: /* FMLS */
13348 case 0x09: /* FMUL */
13349 case 0x19: /* FMULX */
13350 is_fp = 1;
13351 break;
13352 case 0x1d: /* SQRDMLAH */
13353 case 0x1f: /* SQRDMLSH */
13354 if (!dc_isar_feature(aa64_rdm, s)) {
13355 unallocated_encoding(s);
13356 return;
13358 break;
13359 case 0x0e: /* SDOT */
13360 case 0x1e: /* UDOT */
13361 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
13362 unallocated_encoding(s);
13363 return;
13365 break;
13366 case 0x0f:
13367 switch (size) {
13368 case 0: /* SUDOT */
13369 case 2: /* USDOT */
13370 if (is_scalar || !dc_isar_feature(aa64_i8mm, s)) {
13371 unallocated_encoding(s);
13372 return;
13374 size = MO_32;
13375 break;
13376 case 1: /* BFDOT */
13377 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
13378 unallocated_encoding(s);
13379 return;
13381 size = MO_32;
13382 break;
13383 case 3: /* BFMLAL{B,T} */
13384 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
13385 unallocated_encoding(s);
13386 return;
13388 /* can't set is_fp without other incorrect size checks */
13389 size = MO_16;
13390 break;
13391 default:
13392 unallocated_encoding(s);
13393 return;
13395 break;
13396 case 0x11: /* FCMLA #0 */
13397 case 0x13: /* FCMLA #90 */
13398 case 0x15: /* FCMLA #180 */
13399 case 0x17: /* FCMLA #270 */
13400 if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
13401 unallocated_encoding(s);
13402 return;
13404 is_fp = 2;
13405 break;
13406 case 0x00: /* FMLAL */
13407 case 0x04: /* FMLSL */
13408 case 0x18: /* FMLAL2 */
13409 case 0x1c: /* FMLSL2 */
13410 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) {
13411 unallocated_encoding(s);
13412 return;
13414 size = MO_16;
13415 /* is_fp, but we pass cpu_env not fp_status. */
13416 break;
13417 default:
13418 unallocated_encoding(s);
13419 return;
13422 switch (is_fp) {
13423 case 1: /* normal fp */
13424 /* convert insn encoded size to MemOp size */
13425 switch (size) {
13426 case 0: /* half-precision */
13427 size = MO_16;
13428 is_fp16 = true;
13429 break;
13430 case MO_32: /* single precision */
13431 case MO_64: /* double precision */
13432 break;
13433 default:
13434 unallocated_encoding(s);
13435 return;
13437 break;
13439 case 2: /* complex fp */
13440 /* Each indexable element is a complex pair. */
13441 size += 1;
13442 switch (size) {
13443 case MO_32:
13444 if (h && !is_q) {
13445 unallocated_encoding(s);
13446 return;
13448 is_fp16 = true;
13449 break;
13450 case MO_64:
13451 break;
13452 default:
13453 unallocated_encoding(s);
13454 return;
13456 break;
13458 default: /* integer */
13459 switch (size) {
13460 case MO_8:
13461 case MO_64:
13462 unallocated_encoding(s);
13463 return;
13465 break;
13467 if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
13468 unallocated_encoding(s);
13469 return;
13472 /* Given MemOp size, adjust register and indexing. */
13473 switch (size) {
13474 case MO_16:
13475 index = h << 2 | l << 1 | m;
13476 break;
13477 case MO_32:
13478 index = h << 1 | l;
13479 rm |= m << 4;
13480 break;
13481 case MO_64:
13482 if (l || !is_q) {
13483 unallocated_encoding(s);
13484 return;
13486 index = h;
13487 rm |= m << 4;
13488 break;
13489 default:
13490 g_assert_not_reached();
13493 if (!fp_access_check(s)) {
13494 return;
13497 if (is_fp) {
13498 fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
13499 } else {
13500 fpst = NULL;
13503 switch (16 * u + opcode) {
13504 case 0x0e: /* SDOT */
13505 case 0x1e: /* UDOT */
13506 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13507 u ? gen_helper_gvec_udot_idx_b
13508 : gen_helper_gvec_sdot_idx_b);
13509 return;
13510 case 0x0f:
13511 switch (extract32(insn, 22, 2)) {
13512 case 0: /* SUDOT */
13513 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13514 gen_helper_gvec_sudot_idx_b);
13515 return;
13516 case 1: /* BFDOT */
13517 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13518 gen_helper_gvec_bfdot_idx);
13519 return;
13520 case 2: /* USDOT */
13521 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13522 gen_helper_gvec_usdot_idx_b);
13523 return;
13524 case 3: /* BFMLAL{B,T} */
13525 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q,
13526 gen_helper_gvec_bfmlal_idx);
13527 return;
13529 g_assert_not_reached();
13530 case 0x11: /* FCMLA #0 */
13531 case 0x13: /* FCMLA #90 */
13532 case 0x15: /* FCMLA #180 */
13533 case 0x17: /* FCMLA #270 */
13535 int rot = extract32(insn, 13, 2);
13536 int data = (index << 2) | rot;
13537 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
13538 vec_full_reg_offset(s, rn),
13539 vec_full_reg_offset(s, rm),
13540 vec_full_reg_offset(s, rd), fpst,
13541 is_q ? 16 : 8, vec_full_reg_size(s), data,
13542 size == MO_64
13543 ? gen_helper_gvec_fcmlas_idx
13544 : gen_helper_gvec_fcmlah_idx);
13545 tcg_temp_free_ptr(fpst);
13547 return;
13549 case 0x00: /* FMLAL */
13550 case 0x04: /* FMLSL */
13551 case 0x18: /* FMLAL2 */
13552 case 0x1c: /* FMLSL2 */
13554 int is_s = extract32(opcode, 2, 1);
13555 int is_2 = u;
13556 int data = (index << 2) | (is_2 << 1) | is_s;
13557 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
13558 vec_full_reg_offset(s, rn),
13559 vec_full_reg_offset(s, rm), cpu_env,
13560 is_q ? 16 : 8, vec_full_reg_size(s),
13561 data, gen_helper_gvec_fmlal_idx_a64);
13563 return;
13565 case 0x08: /* MUL */
13566 if (!is_long && !is_scalar) {
13567 static gen_helper_gvec_3 * const fns[3] = {
13568 gen_helper_gvec_mul_idx_h,
13569 gen_helper_gvec_mul_idx_s,
13570 gen_helper_gvec_mul_idx_d,
13572 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
13573 vec_full_reg_offset(s, rn),
13574 vec_full_reg_offset(s, rm),
13575 is_q ? 16 : 8, vec_full_reg_size(s),
13576 index, fns[size - 1]);
13577 return;
13579 break;
13581 case 0x10: /* MLA */
13582 if (!is_long && !is_scalar) {
13583 static gen_helper_gvec_4 * const fns[3] = {
13584 gen_helper_gvec_mla_idx_h,
13585 gen_helper_gvec_mla_idx_s,
13586 gen_helper_gvec_mla_idx_d,
13588 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13589 vec_full_reg_offset(s, rn),
13590 vec_full_reg_offset(s, rm),
13591 vec_full_reg_offset(s, rd),
13592 is_q ? 16 : 8, vec_full_reg_size(s),
13593 index, fns[size - 1]);
13594 return;
13596 break;
13598 case 0x14: /* MLS */
13599 if (!is_long && !is_scalar) {
13600 static gen_helper_gvec_4 * const fns[3] = {
13601 gen_helper_gvec_mls_idx_h,
13602 gen_helper_gvec_mls_idx_s,
13603 gen_helper_gvec_mls_idx_d,
13605 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13606 vec_full_reg_offset(s, rn),
13607 vec_full_reg_offset(s, rm),
13608 vec_full_reg_offset(s, rd),
13609 is_q ? 16 : 8, vec_full_reg_size(s),
13610 index, fns[size - 1]);
13611 return;
13613 break;
13616 if (size == 3) {
13617 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13618 int pass;
13620 assert(is_fp && is_q && !is_long);
13622 read_vec_element(s, tcg_idx, rm, index, MO_64);
13624 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13625 TCGv_i64 tcg_op = tcg_temp_new_i64();
13626 TCGv_i64 tcg_res = tcg_temp_new_i64();
13628 read_vec_element(s, tcg_op, rn, pass, MO_64);
13630 switch (16 * u + opcode) {
13631 case 0x05: /* FMLS */
13632 /* As usual for ARM, separate negation for fused multiply-add */
13633 gen_helper_vfp_negd(tcg_op, tcg_op);
13634 /* fall through */
13635 case 0x01: /* FMLA */
13636 read_vec_element(s, tcg_res, rd, pass, MO_64);
13637 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
13638 break;
13639 case 0x09: /* FMUL */
13640 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
13641 break;
13642 case 0x19: /* FMULX */
13643 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
13644 break;
13645 default:
13646 g_assert_not_reached();
13649 write_vec_element(s, tcg_res, rd, pass, MO_64);
13650 tcg_temp_free_i64(tcg_op);
13651 tcg_temp_free_i64(tcg_res);
13654 tcg_temp_free_i64(tcg_idx);
13655 clear_vec_high(s, !is_scalar, rd);
13656 } else if (!is_long) {
13657 /* 32 bit floating point, or 16 or 32 bit integer.
13658 * For the 16 bit scalar case we use the usual Neon helpers and
13659 * rely on the fact that 0 op 0 == 0 with no side effects.
13661 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13662 int pass, maxpasses;
13664 if (is_scalar) {
13665 maxpasses = 1;
13666 } else {
13667 maxpasses = is_q ? 4 : 2;
13670 read_vec_element_i32(s, tcg_idx, rm, index, size);
13672 if (size == 1 && !is_scalar) {
13673 /* The simplest way to handle the 16x16 indexed ops is to duplicate
13674 * the index into both halves of the 32 bit tcg_idx and then use
13675 * the usual Neon helpers.
13677 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13680 for (pass = 0; pass < maxpasses; pass++) {
13681 TCGv_i32 tcg_op = tcg_temp_new_i32();
13682 TCGv_i32 tcg_res = tcg_temp_new_i32();
13684 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
13686 switch (16 * u + opcode) {
13687 case 0x08: /* MUL */
13688 case 0x10: /* MLA */
13689 case 0x14: /* MLS */
13691 static NeonGenTwoOpFn * const fns[2][2] = {
13692 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
13693 { tcg_gen_add_i32, tcg_gen_sub_i32 },
13695 NeonGenTwoOpFn *genfn;
13696 bool is_sub = opcode == 0x4;
13698 if (size == 1) {
13699 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
13700 } else {
13701 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
13703 if (opcode == 0x8) {
13704 break;
13706 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
13707 genfn = fns[size - 1][is_sub];
13708 genfn(tcg_res, tcg_op, tcg_res);
13709 break;
13711 case 0x05: /* FMLS */
13712 case 0x01: /* FMLA */
13713 read_vec_element_i32(s, tcg_res, rd, pass,
13714 is_scalar ? size : MO_32);
13715 switch (size) {
13716 case 1:
13717 if (opcode == 0x5) {
13718 /* As usual for ARM, separate negation for fused
13719 * multiply-add */
13720 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
13722 if (is_scalar) {
13723 gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
13724 tcg_res, fpst);
13725 } else {
13726 gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
13727 tcg_res, fpst);
13729 break;
13730 case 2:
13731 if (opcode == 0x5) {
13732 /* As usual for ARM, separate negation for
13733 * fused multiply-add */
13734 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
13736 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
13737 tcg_res, fpst);
13738 break;
13739 default:
13740 g_assert_not_reached();
13742 break;
13743 case 0x09: /* FMUL */
13744 switch (size) {
13745 case 1:
13746 if (is_scalar) {
13747 gen_helper_advsimd_mulh(tcg_res, tcg_op,
13748 tcg_idx, fpst);
13749 } else {
13750 gen_helper_advsimd_mul2h(tcg_res, tcg_op,
13751 tcg_idx, fpst);
13753 break;
13754 case 2:
13755 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
13756 break;
13757 default:
13758 g_assert_not_reached();
13760 break;
13761 case 0x19: /* FMULX */
13762 switch (size) {
13763 case 1:
13764 if (is_scalar) {
13765 gen_helper_advsimd_mulxh(tcg_res, tcg_op,
13766 tcg_idx, fpst);
13767 } else {
13768 gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
13769 tcg_idx, fpst);
13771 break;
13772 case 2:
13773 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
13774 break;
13775 default:
13776 g_assert_not_reached();
13778 break;
13779 case 0x0c: /* SQDMULH */
13780 if (size == 1) {
13781 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
13782 tcg_op, tcg_idx);
13783 } else {
13784 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
13785 tcg_op, tcg_idx);
13787 break;
13788 case 0x0d: /* SQRDMULH */
13789 if (size == 1) {
13790 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
13791 tcg_op, tcg_idx);
13792 } else {
13793 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
13794 tcg_op, tcg_idx);
13796 break;
13797 case 0x1d: /* SQRDMLAH */
13798 read_vec_element_i32(s, tcg_res, rd, pass,
13799 is_scalar ? size : MO_32);
13800 if (size == 1) {
13801 gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
13802 tcg_op, tcg_idx, tcg_res);
13803 } else {
13804 gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
13805 tcg_op, tcg_idx, tcg_res);
13807 break;
13808 case 0x1f: /* SQRDMLSH */
13809 read_vec_element_i32(s, tcg_res, rd, pass,
13810 is_scalar ? size : MO_32);
13811 if (size == 1) {
13812 gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
13813 tcg_op, tcg_idx, tcg_res);
13814 } else {
13815 gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
13816 tcg_op, tcg_idx, tcg_res);
13818 break;
13819 default:
13820 g_assert_not_reached();
13823 if (is_scalar) {
13824 write_fp_sreg(s, rd, tcg_res);
13825 } else {
13826 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13829 tcg_temp_free_i32(tcg_op);
13830 tcg_temp_free_i32(tcg_res);
13833 tcg_temp_free_i32(tcg_idx);
13834 clear_vec_high(s, is_q, rd);
13835 } else {
13836 /* long ops: 16x16->32 or 32x32->64 */
13837 TCGv_i64 tcg_res[2];
13838 int pass;
13839 bool satop = extract32(opcode, 0, 1);
13840 MemOp memop = MO_32;
13842 if (satop || !u) {
13843 memop |= MO_SIGN;
13846 if (size == 2) {
13847 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13849 read_vec_element(s, tcg_idx, rm, index, memop);
13851 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13852 TCGv_i64 tcg_op = tcg_temp_new_i64();
13853 TCGv_i64 tcg_passres;
13854 int passelt;
13856 if (is_scalar) {
13857 passelt = 0;
13858 } else {
13859 passelt = pass + (is_q * 2);
13862 read_vec_element(s, tcg_op, rn, passelt, memop);
13864 tcg_res[pass] = tcg_temp_new_i64();
13866 if (opcode == 0xa || opcode == 0xb) {
13867 /* Non-accumulating ops */
13868 tcg_passres = tcg_res[pass];
13869 } else {
13870 tcg_passres = tcg_temp_new_i64();
13873 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
13874 tcg_temp_free_i64(tcg_op);
13876 if (satop) {
13877 /* saturating, doubling */
13878 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
13879 tcg_passres, tcg_passres);
13882 if (opcode == 0xa || opcode == 0xb) {
13883 continue;
13886 /* Accumulating op: handle accumulate step */
13887 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13889 switch (opcode) {
13890 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13891 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13892 break;
13893 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13894 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13895 break;
13896 case 0x7: /* SQDMLSL, SQDMLSL2 */
13897 tcg_gen_neg_i64(tcg_passres, tcg_passres);
13898 /* fall through */
13899 case 0x3: /* SQDMLAL, SQDMLAL2 */
13900 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
13901 tcg_res[pass],
13902 tcg_passres);
13903 break;
13904 default:
13905 g_assert_not_reached();
13907 tcg_temp_free_i64(tcg_passres);
13909 tcg_temp_free_i64(tcg_idx);
13911 clear_vec_high(s, !is_scalar, rd);
13912 } else {
13913 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13915 assert(size == 1);
13916 read_vec_element_i32(s, tcg_idx, rm, index, size);
13918 if (!is_scalar) {
13919 /* The simplest way to handle the 16x16 indexed ops is to
13920 * duplicate the index into both halves of the 32 bit tcg_idx
13921 * and then use the usual Neon helpers.
13923 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13926 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13927 TCGv_i32 tcg_op = tcg_temp_new_i32();
13928 TCGv_i64 tcg_passres;
13930 if (is_scalar) {
13931 read_vec_element_i32(s, tcg_op, rn, pass, size);
13932 } else {
13933 read_vec_element_i32(s, tcg_op, rn,
13934 pass + (is_q * 2), MO_32);
13937 tcg_res[pass] = tcg_temp_new_i64();
13939 if (opcode == 0xa || opcode == 0xb) {
13940 /* Non-accumulating ops */
13941 tcg_passres = tcg_res[pass];
13942 } else {
13943 tcg_passres = tcg_temp_new_i64();
13946 if (memop & MO_SIGN) {
13947 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
13948 } else {
13949 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
13951 if (satop) {
13952 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
13953 tcg_passres, tcg_passres);
13955 tcg_temp_free_i32(tcg_op);
13957 if (opcode == 0xa || opcode == 0xb) {
13958 continue;
13961 /* Accumulating op: handle accumulate step */
13962 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13964 switch (opcode) {
13965 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13966 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
13967 tcg_passres);
13968 break;
13969 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13970 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
13971 tcg_passres);
13972 break;
13973 case 0x7: /* SQDMLSL, SQDMLSL2 */
13974 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
13975 /* fall through */
13976 case 0x3: /* SQDMLAL, SQDMLAL2 */
13977 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
13978 tcg_res[pass],
13979 tcg_passres);
13980 break;
13981 default:
13982 g_assert_not_reached();
13984 tcg_temp_free_i64(tcg_passres);
13986 tcg_temp_free_i32(tcg_idx);
13988 if (is_scalar) {
13989 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
13993 if (is_scalar) {
13994 tcg_res[1] = tcg_constant_i64(0);
13997 for (pass = 0; pass < 2; pass++) {
13998 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13999 tcg_temp_free_i64(tcg_res[pass]);
14003 if (fpst) {
14004 tcg_temp_free_ptr(fpst);
14008 /* Crypto AES
14009 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
14010 * +-----------------+------+-----------+--------+-----+------+------+
14011 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
14012 * +-----------------+------+-----------+--------+-----+------+------+
14014 static void disas_crypto_aes(DisasContext *s, uint32_t insn)
14016 int size = extract32(insn, 22, 2);
14017 int opcode = extract32(insn, 12, 5);
14018 int rn = extract32(insn, 5, 5);
14019 int rd = extract32(insn, 0, 5);
14020 int decrypt;
14021 gen_helper_gvec_2 *genfn2 = NULL;
14022 gen_helper_gvec_3 *genfn3 = NULL;
14024 if (!dc_isar_feature(aa64_aes, s) || size != 0) {
14025 unallocated_encoding(s);
14026 return;
14029 switch (opcode) {
14030 case 0x4: /* AESE */
14031 decrypt = 0;
14032 genfn3 = gen_helper_crypto_aese;
14033 break;
14034 case 0x6: /* AESMC */
14035 decrypt = 0;
14036 genfn2 = gen_helper_crypto_aesmc;
14037 break;
14038 case 0x5: /* AESD */
14039 decrypt = 1;
14040 genfn3 = gen_helper_crypto_aese;
14041 break;
14042 case 0x7: /* AESIMC */
14043 decrypt = 1;
14044 genfn2 = gen_helper_crypto_aesmc;
14045 break;
14046 default:
14047 unallocated_encoding(s);
14048 return;
14051 if (!fp_access_check(s)) {
14052 return;
14054 if (genfn2) {
14055 gen_gvec_op2_ool(s, true, rd, rn, decrypt, genfn2);
14056 } else {
14057 gen_gvec_op3_ool(s, true, rd, rd, rn, decrypt, genfn3);
14061 /* Crypto three-reg SHA
14062 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
14063 * +-----------------+------+---+------+---+--------+-----+------+------+
14064 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
14065 * +-----------------+------+---+------+---+--------+-----+------+------+
14067 static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
14069 int size = extract32(insn, 22, 2);
14070 int opcode = extract32(insn, 12, 3);
14071 int rm = extract32(insn, 16, 5);
14072 int rn = extract32(insn, 5, 5);
14073 int rd = extract32(insn, 0, 5);
14074 gen_helper_gvec_3 *genfn;
14075 bool feature;
14077 if (size != 0) {
14078 unallocated_encoding(s);
14079 return;
14082 switch (opcode) {
14083 case 0: /* SHA1C */
14084 genfn = gen_helper_crypto_sha1c;
14085 feature = dc_isar_feature(aa64_sha1, s);
14086 break;
14087 case 1: /* SHA1P */
14088 genfn = gen_helper_crypto_sha1p;
14089 feature = dc_isar_feature(aa64_sha1, s);
14090 break;
14091 case 2: /* SHA1M */
14092 genfn = gen_helper_crypto_sha1m;
14093 feature = dc_isar_feature(aa64_sha1, s);
14094 break;
14095 case 3: /* SHA1SU0 */
14096 genfn = gen_helper_crypto_sha1su0;
14097 feature = dc_isar_feature(aa64_sha1, s);
14098 break;
14099 case 4: /* SHA256H */
14100 genfn = gen_helper_crypto_sha256h;
14101 feature = dc_isar_feature(aa64_sha256, s);
14102 break;
14103 case 5: /* SHA256H2 */
14104 genfn = gen_helper_crypto_sha256h2;
14105 feature = dc_isar_feature(aa64_sha256, s);
14106 break;
14107 case 6: /* SHA256SU1 */
14108 genfn = gen_helper_crypto_sha256su1;
14109 feature = dc_isar_feature(aa64_sha256, s);
14110 break;
14111 default:
14112 unallocated_encoding(s);
14113 return;
14116 if (!feature) {
14117 unallocated_encoding(s);
14118 return;
14121 if (!fp_access_check(s)) {
14122 return;
14124 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, genfn);
14127 /* Crypto two-reg SHA
14128 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
14129 * +-----------------+------+-----------+--------+-----+------+------+
14130 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
14131 * +-----------------+------+-----------+--------+-----+------+------+
14133 static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
14135 int size = extract32(insn, 22, 2);
14136 int opcode = extract32(insn, 12, 5);
14137 int rn = extract32(insn, 5, 5);
14138 int rd = extract32(insn, 0, 5);
14139 gen_helper_gvec_2 *genfn;
14140 bool feature;
14142 if (size != 0) {
14143 unallocated_encoding(s);
14144 return;
14147 switch (opcode) {
14148 case 0: /* SHA1H */
14149 feature = dc_isar_feature(aa64_sha1, s);
14150 genfn = gen_helper_crypto_sha1h;
14151 break;
14152 case 1: /* SHA1SU1 */
14153 feature = dc_isar_feature(aa64_sha1, s);
14154 genfn = gen_helper_crypto_sha1su1;
14155 break;
14156 case 2: /* SHA256SU0 */
14157 feature = dc_isar_feature(aa64_sha256, s);
14158 genfn = gen_helper_crypto_sha256su0;
14159 break;
14160 default:
14161 unallocated_encoding(s);
14162 return;
14165 if (!feature) {
14166 unallocated_encoding(s);
14167 return;
14170 if (!fp_access_check(s)) {
14171 return;
14173 gen_gvec_op2_ool(s, true, rd, rn, 0, genfn);
14176 static void gen_rax1_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m)
14178 tcg_gen_rotli_i64(d, m, 1);
14179 tcg_gen_xor_i64(d, d, n);
14182 static void gen_rax1_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m)
14184 tcg_gen_rotli_vec(vece, d, m, 1);
14185 tcg_gen_xor_vec(vece, d, d, n);
14188 void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
14189 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
14191 static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
14192 static const GVecGen3 op = {
14193 .fni8 = gen_rax1_i64,
14194 .fniv = gen_rax1_vec,
14195 .opt_opc = vecop_list,
14196 .fno = gen_helper_crypto_rax1,
14197 .vece = MO_64,
14199 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &op);
14202 /* Crypto three-reg SHA512
14203 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
14204 * +-----------------------+------+---+---+-----+--------+------+------+
14205 * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd |
14206 * +-----------------------+------+---+---+-----+--------+------+------+
14208 static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
14210 int opcode = extract32(insn, 10, 2);
14211 int o = extract32(insn, 14, 1);
14212 int rm = extract32(insn, 16, 5);
14213 int rn = extract32(insn, 5, 5);
14214 int rd = extract32(insn, 0, 5);
14215 bool feature;
14216 gen_helper_gvec_3 *oolfn = NULL;
14217 GVecGen3Fn *gvecfn = NULL;
14219 if (o == 0) {
14220 switch (opcode) {
14221 case 0: /* SHA512H */
14222 feature = dc_isar_feature(aa64_sha512, s);
14223 oolfn = gen_helper_crypto_sha512h;
14224 break;
14225 case 1: /* SHA512H2 */
14226 feature = dc_isar_feature(aa64_sha512, s);
14227 oolfn = gen_helper_crypto_sha512h2;
14228 break;
14229 case 2: /* SHA512SU1 */
14230 feature = dc_isar_feature(aa64_sha512, s);
14231 oolfn = gen_helper_crypto_sha512su1;
14232 break;
14233 case 3: /* RAX1 */
14234 feature = dc_isar_feature(aa64_sha3, s);
14235 gvecfn = gen_gvec_rax1;
14236 break;
14237 default:
14238 g_assert_not_reached();
14240 } else {
14241 switch (opcode) {
14242 case 0: /* SM3PARTW1 */
14243 feature = dc_isar_feature(aa64_sm3, s);
14244 oolfn = gen_helper_crypto_sm3partw1;
14245 break;
14246 case 1: /* SM3PARTW2 */
14247 feature = dc_isar_feature(aa64_sm3, s);
14248 oolfn = gen_helper_crypto_sm3partw2;
14249 break;
14250 case 2: /* SM4EKEY */
14251 feature = dc_isar_feature(aa64_sm4, s);
14252 oolfn = gen_helper_crypto_sm4ekey;
14253 break;
14254 default:
14255 unallocated_encoding(s);
14256 return;
14260 if (!feature) {
14261 unallocated_encoding(s);
14262 return;
14265 if (!fp_access_check(s)) {
14266 return;
14269 if (oolfn) {
14270 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, oolfn);
14271 } else {
14272 gen_gvec_fn3(s, true, rd, rn, rm, gvecfn, MO_64);
14276 /* Crypto two-reg SHA512
14277 * 31 12 11 10 9 5 4 0
14278 * +-----------------------------------------+--------+------+------+
14279 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd |
14280 * +-----------------------------------------+--------+------+------+
14282 static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
14284 int opcode = extract32(insn, 10, 2);
14285 int rn = extract32(insn, 5, 5);
14286 int rd = extract32(insn, 0, 5);
14287 bool feature;
14289 switch (opcode) {
14290 case 0: /* SHA512SU0 */
14291 feature = dc_isar_feature(aa64_sha512, s);
14292 break;
14293 case 1: /* SM4E */
14294 feature = dc_isar_feature(aa64_sm4, s);
14295 break;
14296 default:
14297 unallocated_encoding(s);
14298 return;
14301 if (!feature) {
14302 unallocated_encoding(s);
14303 return;
14306 if (!fp_access_check(s)) {
14307 return;
14310 switch (opcode) {
14311 case 0: /* SHA512SU0 */
14312 gen_gvec_op2_ool(s, true, rd, rn, 0, gen_helper_crypto_sha512su0);
14313 break;
14314 case 1: /* SM4E */
14315 gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gen_helper_crypto_sm4e);
14316 break;
14317 default:
14318 g_assert_not_reached();
14322 /* Crypto four-register
14323 * 31 23 22 21 20 16 15 14 10 9 5 4 0
14324 * +-------------------+-----+------+---+------+------+------+
14325 * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
14326 * +-------------------+-----+------+---+------+------+------+
14328 static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
14330 int op0 = extract32(insn, 21, 2);
14331 int rm = extract32(insn, 16, 5);
14332 int ra = extract32(insn, 10, 5);
14333 int rn = extract32(insn, 5, 5);
14334 int rd = extract32(insn, 0, 5);
14335 bool feature;
14337 switch (op0) {
14338 case 0: /* EOR3 */
14339 case 1: /* BCAX */
14340 feature = dc_isar_feature(aa64_sha3, s);
14341 break;
14342 case 2: /* SM3SS1 */
14343 feature = dc_isar_feature(aa64_sm3, s);
14344 break;
14345 default:
14346 unallocated_encoding(s);
14347 return;
14350 if (!feature) {
14351 unallocated_encoding(s);
14352 return;
14355 if (!fp_access_check(s)) {
14356 return;
14359 if (op0 < 2) {
14360 TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
14361 int pass;
14363 tcg_op1 = tcg_temp_new_i64();
14364 tcg_op2 = tcg_temp_new_i64();
14365 tcg_op3 = tcg_temp_new_i64();
14366 tcg_res[0] = tcg_temp_new_i64();
14367 tcg_res[1] = tcg_temp_new_i64();
14369 for (pass = 0; pass < 2; pass++) {
14370 read_vec_element(s, tcg_op1, rn, pass, MO_64);
14371 read_vec_element(s, tcg_op2, rm, pass, MO_64);
14372 read_vec_element(s, tcg_op3, ra, pass, MO_64);
14374 if (op0 == 0) {
14375 /* EOR3 */
14376 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
14377 } else {
14378 /* BCAX */
14379 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
14381 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
14383 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
14384 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
14386 tcg_temp_free_i64(tcg_op1);
14387 tcg_temp_free_i64(tcg_op2);
14388 tcg_temp_free_i64(tcg_op3);
14389 tcg_temp_free_i64(tcg_res[0]);
14390 tcg_temp_free_i64(tcg_res[1]);
14391 } else {
14392 TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
14394 tcg_op1 = tcg_temp_new_i32();
14395 tcg_op2 = tcg_temp_new_i32();
14396 tcg_op3 = tcg_temp_new_i32();
14397 tcg_res = tcg_temp_new_i32();
14398 tcg_zero = tcg_constant_i32(0);
14400 read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
14401 read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
14402 read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
14404 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
14405 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
14406 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
14407 tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
14409 write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
14410 write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
14411 write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
14412 write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
14414 tcg_temp_free_i32(tcg_op1);
14415 tcg_temp_free_i32(tcg_op2);
14416 tcg_temp_free_i32(tcg_op3);
14417 tcg_temp_free_i32(tcg_res);
14421 /* Crypto XAR
14422 * 31 21 20 16 15 10 9 5 4 0
14423 * +-----------------------+------+--------+------+------+
14424 * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
14425 * +-----------------------+------+--------+------+------+
14427 static void disas_crypto_xar(DisasContext *s, uint32_t insn)
14429 int rm = extract32(insn, 16, 5);
14430 int imm6 = extract32(insn, 10, 6);
14431 int rn = extract32(insn, 5, 5);
14432 int rd = extract32(insn, 0, 5);
14434 if (!dc_isar_feature(aa64_sha3, s)) {
14435 unallocated_encoding(s);
14436 return;
14439 if (!fp_access_check(s)) {
14440 return;
14443 gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd),
14444 vec_full_reg_offset(s, rn),
14445 vec_full_reg_offset(s, rm), imm6, 16,
14446 vec_full_reg_size(s));
14449 /* Crypto three-reg imm2
14450 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
14451 * +-----------------------+------+-----+------+--------+------+------+
14452 * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd |
14453 * +-----------------------+------+-----+------+--------+------+------+
14455 static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
14457 static gen_helper_gvec_3 * const fns[4] = {
14458 gen_helper_crypto_sm3tt1a, gen_helper_crypto_sm3tt1b,
14459 gen_helper_crypto_sm3tt2a, gen_helper_crypto_sm3tt2b,
14461 int opcode = extract32(insn, 10, 2);
14462 int imm2 = extract32(insn, 12, 2);
14463 int rm = extract32(insn, 16, 5);
14464 int rn = extract32(insn, 5, 5);
14465 int rd = extract32(insn, 0, 5);
14467 if (!dc_isar_feature(aa64_sm3, s)) {
14468 unallocated_encoding(s);
14469 return;
14472 if (!fp_access_check(s)) {
14473 return;
14476 gen_gvec_op3_ool(s, true, rd, rn, rm, imm2, fns[opcode]);
14479 /* C3.6 Data processing - SIMD, inc Crypto
14481 * As the decode gets a little complex we are using a table based
14482 * approach for this part of the decode.
14484 static const AArch64DecodeTable data_proc_simd[] = {
14485 /* pattern , mask , fn */
14486 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
14487 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
14488 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
14489 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
14490 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
14491 { 0x0e000400, 0x9fe08400, disas_simd_copy },
14492 { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
14493 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
14494 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
14495 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
14496 { 0x0e000000, 0xbf208c00, disas_simd_tb },
14497 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
14498 { 0x2e000000, 0xbf208400, disas_simd_ext },
14499 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
14500 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
14501 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
14502 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
14503 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
14504 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
14505 { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
14506 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
14507 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
14508 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
14509 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
14510 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
14511 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
14512 { 0xce000000, 0xff808000, disas_crypto_four_reg },
14513 { 0xce800000, 0xffe00000, disas_crypto_xar },
14514 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
14515 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
14516 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
14517 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
14518 { 0x00000000, 0x00000000, NULL }
14521 static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
14523 /* Note that this is called with all non-FP cases from
14524 * table C3-6 so it must UNDEF for entries not specifically
14525 * allocated to instructions in that table.
14527 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
14528 if (fn) {
14529 fn(s, insn);
14530 } else {
14531 unallocated_encoding(s);
14535 /* C3.6 Data processing - SIMD and floating point */
14536 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
14538 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
14539 disas_data_proc_fp(s, insn);
14540 } else {
14541 /* SIMD, including crypto */
14542 disas_data_proc_simd(s, insn);
14547 * Include the generated SME FA64 decoder.
14550 #include "decode-sme-fa64.c.inc"
14552 static bool trans_OK(DisasContext *s, arg_OK *a)
14554 return true;
14557 static bool trans_FAIL(DisasContext *s, arg_OK *a)
14559 s->is_nonstreaming = true;
14560 return true;
14564 * is_guarded_page:
14565 * @env: The cpu environment
14566 * @s: The DisasContext
14568 * Return true if the page is guarded.
14570 static bool is_guarded_page(CPUARMState *env, DisasContext *s)
14572 uint64_t addr = s->base.pc_first;
14573 #ifdef CONFIG_USER_ONLY
14574 return page_get_flags(addr) & PAGE_BTI;
14575 #else
14576 int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
14577 unsigned int index = tlb_index(env, mmu_idx, addr);
14578 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
14581 * We test this immediately after reading an insn, which means
14582 * that any normal page must be in the TLB. The only exception
14583 * would be for executing from flash or device memory, which
14584 * does not retain the TLB entry.
14586 * FIXME: Assume false for those, for now. We could use
14587 * arm_cpu_get_phys_page_attrs_debug to re-read the page
14588 * table entry even for that case.
14590 return (tlb_hit(entry->addr_code, addr) &&
14591 arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
14592 #endif
14596 * btype_destination_ok:
14597 * @insn: The instruction at the branch destination
14598 * @bt: SCTLR_ELx.BT
14599 * @btype: PSTATE.BTYPE, and is non-zero
14601 * On a guarded page, there are a limited number of insns
14602 * that may be present at the branch target:
14603 * - branch target identifiers,
14604 * - paciasp, pacibsp,
14605 * - BRK insn
14606 * - HLT insn
14607 * Anything else causes a Branch Target Exception.
14609 * Return true if the branch is compatible, false to raise BTITRAP.
14611 static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
14613 if ((insn & 0xfffff01fu) == 0xd503201fu) {
14614 /* HINT space */
14615 switch (extract32(insn, 5, 7)) {
14616 case 0b011001: /* PACIASP */
14617 case 0b011011: /* PACIBSP */
14619 * If SCTLR_ELx.BT, then PACI*SP are not compatible
14620 * with btype == 3. Otherwise all btype are ok.
14622 return !bt || btype != 3;
14623 case 0b100000: /* BTI */
14624 /* Not compatible with any btype. */
14625 return false;
14626 case 0b100010: /* BTI c */
14627 /* Not compatible with btype == 3 */
14628 return btype != 3;
14629 case 0b100100: /* BTI j */
14630 /* Not compatible with btype == 2 */
14631 return btype != 2;
14632 case 0b100110: /* BTI jc */
14633 /* Compatible with any btype. */
14634 return true;
14636 } else {
14637 switch (insn & 0xffe0001fu) {
14638 case 0xd4200000u: /* BRK */
14639 case 0xd4400000u: /* HLT */
14640 /* Give priority to the breakpoint exception. */
14641 return true;
14644 return false;
14647 static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
14648 CPUState *cpu)
14650 DisasContext *dc = container_of(dcbase, DisasContext, base);
14651 CPUARMState *env = cpu->env_ptr;
14652 ARMCPU *arm_cpu = env_archcpu(env);
14653 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
14654 int bound, core_mmu_idx;
14656 dc->isar = &arm_cpu->isar;
14657 dc->condjmp = 0;
14659 dc->aarch64 = true;
14660 dc->thumb = false;
14661 dc->sctlr_b = 0;
14662 dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
14663 dc->condexec_mask = 0;
14664 dc->condexec_cond = 0;
14665 core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
14666 dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
14667 dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
14668 dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
14669 dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
14670 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
14671 #if !defined(CONFIG_USER_ONLY)
14672 dc->user = (dc->current_el == 0);
14673 #endif
14674 dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
14675 dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
14676 dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
14677 dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
14678 dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL);
14679 dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16;
14680 dc->svl = (EX_TBFLAG_A64(tb_flags, SVL) + 1) * 16;
14681 dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
14682 dc->bt = EX_TBFLAG_A64(tb_flags, BT);
14683 dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
14684 dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
14685 dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
14686 dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
14687 dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
14688 dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
14689 dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
14690 dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING);
14691 dc->vec_len = 0;
14692 dc->vec_stride = 0;
14693 dc->cp_regs = arm_cpu->cp_regs;
14694 dc->features = env->features;
14695 dc->dcz_blocksize = arm_cpu->dcz_blocksize;
14697 #ifdef CONFIG_USER_ONLY
14698 /* In sve_probe_page, we assume TBI is enabled. */
14699 tcg_debug_assert(dc->tbid & 1);
14700 #endif
14702 /* Single step state. The code-generation logic here is:
14703 * SS_ACTIVE == 0:
14704 * generate code with no special handling for single-stepping (except
14705 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
14706 * this happens anyway because those changes are all system register or
14707 * PSTATE writes).
14708 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
14709 * emit code for one insn
14710 * emit code to clear PSTATE.SS
14711 * emit code to generate software step exception for completed step
14712 * end TB (as usual for having generated an exception)
14713 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
14714 * emit code to generate a software step exception
14715 * end the TB
14717 dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
14718 dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
14719 dc->is_ldex = false;
14721 /* Bound the number of insns to execute to those left on the page. */
14722 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
14724 /* If architectural single step active, limit to 1. */
14725 if (dc->ss_active) {
14726 bound = 1;
14728 dc->base.max_insns = MIN(dc->base.max_insns, bound);
14730 init_tmp_a64_array(dc);
14733 static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
14737 static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
14739 DisasContext *dc = container_of(dcbase, DisasContext, base);
14741 tcg_gen_insn_start(dc->base.pc_next, 0, 0);
14742 dc->insn_start = tcg_last_op();
14745 static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
14747 DisasContext *s = container_of(dcbase, DisasContext, base);
14748 CPUARMState *env = cpu->env_ptr;
14749 uint64_t pc = s->base.pc_next;
14750 uint32_t insn;
14752 /* Singlestep exceptions have the highest priority. */
14753 if (s->ss_active && !s->pstate_ss) {
14754 /* Singlestep state is Active-pending.
14755 * If we're in this state at the start of a TB then either
14756 * a) we just took an exception to an EL which is being debugged
14757 * and this is the first insn in the exception handler
14758 * b) debug exceptions were masked and we just unmasked them
14759 * without changing EL (eg by clearing PSTATE.D)
14760 * In either case we're going to take a swstep exception in the
14761 * "did not step an insn" case, and so the syndrome ISV and EX
14762 * bits should be zero.
14764 assert(s->base.num_insns == 1);
14765 gen_swstep_exception(s, 0, 0);
14766 s->base.is_jmp = DISAS_NORETURN;
14767 s->base.pc_next = pc + 4;
14768 return;
14771 if (pc & 3) {
14773 * PC alignment fault. This has priority over the instruction abort
14774 * that we would receive from a translation fault via arm_ldl_code.
14775 * This should only be possible after an indirect branch, at the
14776 * start of the TB.
14778 assert(s->base.num_insns == 1);
14779 gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
14780 s->base.is_jmp = DISAS_NORETURN;
14781 s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
14782 return;
14785 s->pc_curr = pc;
14786 insn = arm_ldl_code(env, &s->base, pc, s->sctlr_b);
14787 s->insn = insn;
14788 s->base.pc_next = pc + 4;
14790 s->fp_access_checked = false;
14791 s->sve_access_checked = false;
14793 if (s->pstate_il) {
14795 * Illegal execution state. This has priority over BTI
14796 * exceptions, but comes after instruction abort exceptions.
14798 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_illegalstate());
14799 return;
14802 if (dc_isar_feature(aa64_bti, s)) {
14803 if (s->base.num_insns == 1) {
14805 * At the first insn of the TB, compute s->guarded_page.
14806 * We delayed computing this until successfully reading
14807 * the first insn of the TB, above. This (mostly) ensures
14808 * that the softmmu tlb entry has been populated, and the
14809 * page table GP bit is available.
14811 * Note that we need to compute this even if btype == 0,
14812 * because this value is used for BR instructions later
14813 * where ENV is not available.
14815 s->guarded_page = is_guarded_page(env, s);
14817 /* First insn can have btype set to non-zero. */
14818 tcg_debug_assert(s->btype >= 0);
14821 * Note that the Branch Target Exception has fairly high
14822 * priority -- below debugging exceptions but above most
14823 * everything else. This allows us to handle this now
14824 * instead of waiting until the insn is otherwise decoded.
14826 if (s->btype != 0
14827 && s->guarded_page
14828 && !btype_destination_ok(insn, s->bt, s->btype)) {
14829 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
14830 syn_btitrap(s->btype));
14831 return;
14833 } else {
14834 /* Not the first insn: btype must be 0. */
14835 tcg_debug_assert(s->btype == 0);
14839 s->is_nonstreaming = false;
14840 if (s->sme_trap_nonstreaming) {
14841 disas_sme_fa64(s, insn);
14844 switch (extract32(insn, 25, 4)) {
14845 case 0x0:
14846 if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) {
14847 unallocated_encoding(s);
14849 break;
14850 case 0x1: case 0x3: /* UNALLOCATED */
14851 unallocated_encoding(s);
14852 break;
14853 case 0x2:
14854 if (!disas_sve(s, insn)) {
14855 unallocated_encoding(s);
14857 break;
14858 case 0x8: case 0x9: /* Data processing - immediate */
14859 disas_data_proc_imm(s, insn);
14860 break;
14861 case 0xa: case 0xb: /* Branch, exception generation and system insns */
14862 disas_b_exc_sys(s, insn);
14863 break;
14864 case 0x4:
14865 case 0x6:
14866 case 0xc:
14867 case 0xe: /* Loads and stores */
14868 disas_ldst(s, insn);
14869 break;
14870 case 0x5:
14871 case 0xd: /* Data processing - register */
14872 disas_data_proc_reg(s, insn);
14873 break;
14874 case 0x7:
14875 case 0xf: /* Data processing - SIMD and floating point */
14876 disas_data_proc_simd_fp(s, insn);
14877 break;
14878 default:
14879 assert(FALSE); /* all 15 cases should be handled above */
14880 break;
14883 /* if we allocated any temporaries, free them here */
14884 free_tmp_a64(s);
14887 * After execution of most insns, btype is reset to 0.
14888 * Note that we set btype == -1 when the insn sets btype.
14890 if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
14891 reset_btype(s);
14894 translator_loop_temp_check(&s->base);
14897 static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14899 DisasContext *dc = container_of(dcbase, DisasContext, base);
14901 if (unlikely(dc->ss_active)) {
14902 /* Note that this means single stepping WFI doesn't halt the CPU.
14903 * For conditional branch insns this is harmless unreachable code as
14904 * gen_goto_tb() has already handled emitting the debug exception
14905 * (and thus a tb-jump is not possible when singlestepping).
14907 switch (dc->base.is_jmp) {
14908 default:
14909 gen_a64_set_pc_im(dc->base.pc_next);
14910 /* fall through */
14911 case DISAS_EXIT:
14912 case DISAS_JUMP:
14913 gen_step_complete_exception(dc);
14914 break;
14915 case DISAS_NORETURN:
14916 break;
14918 } else {
14919 switch (dc->base.is_jmp) {
14920 case DISAS_NEXT:
14921 case DISAS_TOO_MANY:
14922 gen_goto_tb(dc, 1, dc->base.pc_next);
14923 break;
14924 default:
14925 case DISAS_UPDATE_EXIT:
14926 gen_a64_set_pc_im(dc->base.pc_next);
14927 /* fall through */
14928 case DISAS_EXIT:
14929 tcg_gen_exit_tb(NULL, 0);
14930 break;
14931 case DISAS_UPDATE_NOCHAIN:
14932 gen_a64_set_pc_im(dc->base.pc_next);
14933 /* fall through */
14934 case DISAS_JUMP:
14935 tcg_gen_lookup_and_goto_ptr();
14936 break;
14937 case DISAS_NORETURN:
14938 case DISAS_SWI:
14939 break;
14940 case DISAS_WFE:
14941 gen_a64_set_pc_im(dc->base.pc_next);
14942 gen_helper_wfe(cpu_env);
14943 break;
14944 case DISAS_YIELD:
14945 gen_a64_set_pc_im(dc->base.pc_next);
14946 gen_helper_yield(cpu_env);
14947 break;
14948 case DISAS_WFI:
14950 * This is a special case because we don't want to just halt
14951 * the CPU if trying to debug across a WFI.
14953 gen_a64_set_pc_im(dc->base.pc_next);
14954 gen_helper_wfi(cpu_env, tcg_constant_i32(4));
14956 * The helper doesn't necessarily throw an exception, but we
14957 * must go back to the main loop to check for interrupts anyway.
14959 tcg_gen_exit_tb(NULL, 0);
14960 break;
14965 static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
14966 CPUState *cpu, FILE *logfile)
14968 DisasContext *dc = container_of(dcbase, DisasContext, base);
14970 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
14971 target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
14974 const TranslatorOps aarch64_translator_ops = {
14975 .init_disas_context = aarch64_tr_init_disas_context,
14976 .tb_start = aarch64_tr_tb_start,
14977 .insn_start = aarch64_tr_insn_start,
14978 .translate_insn = aarch64_tr_translate_insn,
14979 .tb_stop = aarch64_tr_tb_stop,
14980 .disas_log = aarch64_tr_disas_log,