4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg-op.h"
24 #include "tcg/tcg-op-gvec.h"
27 #include "translate.h"
28 #include "internals.h"
29 #include "qemu/host-utils.h"
30 #include "semihosting/semihost.h"
31 #include "exec/gen-icount.h"
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
36 #include "translate-a64.h"
37 #include "qemu/atomic128.h"
39 static TCGv_i64 cpu_X
[32];
40 static TCGv_i64 cpu_pc
;
42 /* Load/store exclusive handling */
43 static TCGv_i64 cpu_exclusive_high
;
45 static const char *regnames
[] = {
46 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
47 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
48 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
49 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
53 A64_SHIFT_TYPE_LSL
= 0,
54 A64_SHIFT_TYPE_LSR
= 1,
55 A64_SHIFT_TYPE_ASR
= 2,
56 A64_SHIFT_TYPE_ROR
= 3
59 /* Table based decoder typedefs - used when the relevant bits for decode
60 * are too awkwardly scattered across the instruction (eg SIMD).
62 typedef void AArch64DecodeFn(DisasContext
*s
, uint32_t insn
);
64 typedef struct AArch64DecodeTable
{
67 AArch64DecodeFn
*disas_fn
;
70 /* initialize TCG globals. */
71 void a64_translate_init(void)
75 cpu_pc
= tcg_global_mem_new_i64(cpu_env
,
76 offsetof(CPUARMState
, pc
),
78 for (i
= 0; i
< 32; i
++) {
79 cpu_X
[i
] = tcg_global_mem_new_i64(cpu_env
,
80 offsetof(CPUARMState
, xregs
[i
]),
84 cpu_exclusive_high
= tcg_global_mem_new_i64(cpu_env
,
85 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
89 * Return the core mmu_idx to use for A64 "unprivileged load/store" insns
91 static int get_a64_user_mem_index(DisasContext
*s
)
94 * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
95 * which is the usual mmu_idx for this cpu state.
97 ARMMMUIdx useridx
= s
->mmu_idx
;
101 * We have pre-computed the condition for AccType_UNPRIV.
102 * Therefore we should never get here with a mmu_idx for
103 * which we do not know the corresponding user mmu_idx.
106 case ARMMMUIdx_E10_1
:
107 case ARMMMUIdx_E10_1_PAN
:
108 useridx
= ARMMMUIdx_E10_0
;
110 case ARMMMUIdx_E20_2
:
111 case ARMMMUIdx_E20_2_PAN
:
112 useridx
= ARMMMUIdx_E20_0
;
114 case ARMMMUIdx_SE10_1
:
115 case ARMMMUIdx_SE10_1_PAN
:
116 useridx
= ARMMMUIdx_SE10_0
;
118 case ARMMMUIdx_SE20_2
:
119 case ARMMMUIdx_SE20_2_PAN
:
120 useridx
= ARMMMUIdx_SE20_0
;
123 g_assert_not_reached();
126 return arm_to_core_mmu_idx(useridx
);
129 static void set_btype_raw(int val
)
131 tcg_gen_st_i32(tcg_constant_i32(val
), cpu_env
,
132 offsetof(CPUARMState
, btype
));
135 static void set_btype(DisasContext
*s
, int val
)
137 /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */
138 tcg_debug_assert(val
>= 1 && val
<= 3);
143 static void reset_btype(DisasContext
*s
)
151 void gen_a64_set_pc_im(uint64_t val
)
153 tcg_gen_movi_i64(cpu_pc
, val
);
157 * Handle Top Byte Ignore (TBI) bits.
159 * If address tagging is enabled via the TCR TBI bits:
160 * + for EL2 and EL3 there is only one TBI bit, and if it is set
161 * then the address is zero-extended, clearing bits [63:56]
162 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
163 * and TBI1 controls addressses with bit 55 == 1.
164 * If the appropriate TBI bit is set for the address then
165 * the address is sign-extended from bit 55 into bits [63:56]
167 * Here We have concatenated TBI{1,0} into tbi.
169 static void gen_top_byte_ignore(DisasContext
*s
, TCGv_i64 dst
,
170 TCGv_i64 src
, int tbi
)
173 /* Load unmodified address */
174 tcg_gen_mov_i64(dst
, src
);
175 } else if (!regime_has_2_ranges(s
->mmu_idx
)) {
176 /* Force tag byte to all zero */
177 tcg_gen_extract_i64(dst
, src
, 0, 56);
179 /* Sign-extend from bit 55. */
180 tcg_gen_sextract_i64(dst
, src
, 0, 56);
184 /* tbi0 but !tbi1: only use the extension if positive */
185 tcg_gen_and_i64(dst
, dst
, src
);
188 /* !tbi0 but tbi1: only use the extension if negative */
189 tcg_gen_or_i64(dst
, dst
, src
);
192 /* tbi0 and tbi1: always use the extension */
195 g_assert_not_reached();
200 static void gen_a64_set_pc(DisasContext
*s
, TCGv_i64 src
)
203 * If address tagging is enabled for instructions via the TCR TBI bits,
204 * then loading an address into the PC will clear out any tag.
206 gen_top_byte_ignore(s
, cpu_pc
, src
, s
->tbii
);
210 * Handle MTE and/or TBI.
212 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
213 * for the tag to be present in the FAR_ELx register. But for user-only
214 * mode we do not have a TLB with which to implement this, so we must
215 * remove the top byte now.
217 * Always return a fresh temporary that we can increment independently
218 * of the write-back address.
221 TCGv_i64
clean_data_tbi(DisasContext
*s
, TCGv_i64 addr
)
223 TCGv_i64 clean
= new_tmp_a64(s
);
224 #ifdef CONFIG_USER_ONLY
225 gen_top_byte_ignore(s
, clean
, addr
, s
->tbid
);
227 tcg_gen_mov_i64(clean
, addr
);
232 /* Insert a zero tag into src, with the result at dst. */
233 static void gen_address_with_allocation_tag0(TCGv_i64 dst
, TCGv_i64 src
)
235 tcg_gen_andi_i64(dst
, src
, ~MAKE_64BIT_MASK(56, 4));
238 static void gen_probe_access(DisasContext
*s
, TCGv_i64 ptr
,
239 MMUAccessType acc
, int log2_size
)
241 gen_helper_probe_access(cpu_env
, ptr
,
242 tcg_constant_i32(acc
),
243 tcg_constant_i32(get_mem_index(s
)),
244 tcg_constant_i32(1 << log2_size
));
248 * For MTE, check a single logical or atomic access. This probes a single
249 * address, the exact one specified. The size and alignment of the access
250 * is not relevant to MTE, per se, but watchpoints do require the size,
251 * and we want to recognize those before making any other changes to state.
253 static TCGv_i64
gen_mte_check1_mmuidx(DisasContext
*s
, TCGv_i64 addr
,
254 bool is_write
, bool tag_checked
,
255 int log2_size
, bool is_unpriv
,
258 if (tag_checked
&& s
->mte_active
[is_unpriv
]) {
262 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, core_idx
);
263 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
264 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
265 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, is_write
);
266 desc
= FIELD_DP32(desc
, MTEDESC
, SIZEM1
, (1 << log2_size
) - 1);
268 ret
= new_tmp_a64(s
);
269 gen_helper_mte_check(ret
, cpu_env
, tcg_constant_i32(desc
), addr
);
273 return clean_data_tbi(s
, addr
);
276 TCGv_i64
gen_mte_check1(DisasContext
*s
, TCGv_i64 addr
, bool is_write
,
277 bool tag_checked
, int log2_size
)
279 return gen_mte_check1_mmuidx(s
, addr
, is_write
, tag_checked
, log2_size
,
280 false, get_mem_index(s
));
284 * For MTE, check multiple logical sequential accesses.
286 TCGv_i64
gen_mte_checkN(DisasContext
*s
, TCGv_i64 addr
, bool is_write
,
287 bool tag_checked
, int size
)
289 if (tag_checked
&& s
->mte_active
[0]) {
293 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, get_mem_index(s
));
294 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
295 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
296 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, is_write
);
297 desc
= FIELD_DP32(desc
, MTEDESC
, SIZEM1
, size
- 1);
299 ret
= new_tmp_a64(s
);
300 gen_helper_mte_check(ret
, cpu_env
, tcg_constant_i32(desc
), addr
);
304 return clean_data_tbi(s
, addr
);
307 typedef struct DisasCompare64
{
312 static void a64_test_cc(DisasCompare64
*c64
, int cc
)
316 arm_test_cc(&c32
, cc
);
318 /* Sign-extend the 32-bit value so that the GE/LT comparisons work
319 * properly. The NE/EQ comparisons are also fine with this choice. */
320 c64
->cond
= c32
.cond
;
321 c64
->value
= tcg_temp_new_i64();
322 tcg_gen_ext_i32_i64(c64
->value
, c32
.value
);
327 static void a64_free_cc(DisasCompare64
*c64
)
329 tcg_temp_free_i64(c64
->value
);
332 static void gen_rebuild_hflags(DisasContext
*s
)
334 gen_helper_rebuild_hflags_a64(cpu_env
, tcg_constant_i32(s
->current_el
));
337 static void gen_exception_internal(int excp
)
339 assert(excp_is_internal(excp
));
340 gen_helper_exception_internal(cpu_env
, tcg_constant_i32(excp
));
343 static void gen_exception_internal_insn(DisasContext
*s
, uint64_t pc
, int excp
)
345 gen_a64_set_pc_im(pc
);
346 gen_exception_internal(excp
);
347 s
->base
.is_jmp
= DISAS_NORETURN
;
350 static void gen_exception_bkpt_insn(DisasContext
*s
, uint32_t syndrome
)
352 gen_a64_set_pc_im(s
->pc_curr
);
353 gen_helper_exception_bkpt_insn(cpu_env
, tcg_constant_i32(syndrome
));
354 s
->base
.is_jmp
= DISAS_NORETURN
;
357 static void gen_step_complete_exception(DisasContext
*s
)
359 /* We just completed step of an insn. Move from Active-not-pending
360 * to Active-pending, and then also take the swstep exception.
361 * This corresponds to making the (IMPDEF) choice to prioritize
362 * swstep exceptions over asynchronous exceptions taken to an exception
363 * level where debug is disabled. This choice has the advantage that
364 * we do not need to maintain internal state corresponding to the
365 * ISV/EX syndrome bits between completion of the step and generation
366 * of the exception, and our syndrome information is always correct.
369 gen_swstep_exception(s
, 1, s
->is_ldex
);
370 s
->base
.is_jmp
= DISAS_NORETURN
;
373 static inline bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
378 return translator_use_goto_tb(&s
->base
, dest
);
381 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint64_t dest
)
383 if (use_goto_tb(s
, dest
)) {
385 gen_a64_set_pc_im(dest
);
386 tcg_gen_exit_tb(s
->base
.tb
, n
);
387 s
->base
.is_jmp
= DISAS_NORETURN
;
389 gen_a64_set_pc_im(dest
);
391 gen_step_complete_exception(s
);
393 tcg_gen_lookup_and_goto_ptr();
394 s
->base
.is_jmp
= DISAS_NORETURN
;
399 static void init_tmp_a64_array(DisasContext
*s
)
401 #ifdef CONFIG_DEBUG_TCG
402 memset(s
->tmp_a64
, 0, sizeof(s
->tmp_a64
));
404 s
->tmp_a64_count
= 0;
407 static void free_tmp_a64(DisasContext
*s
)
410 for (i
= 0; i
< s
->tmp_a64_count
; i
++) {
411 tcg_temp_free_i64(s
->tmp_a64
[i
]);
413 init_tmp_a64_array(s
);
416 TCGv_i64
new_tmp_a64(DisasContext
*s
)
418 assert(s
->tmp_a64_count
< TMP_A64_MAX
);
419 return s
->tmp_a64
[s
->tmp_a64_count
++] = tcg_temp_new_i64();
422 TCGv_i64
new_tmp_a64_local(DisasContext
*s
)
424 assert(s
->tmp_a64_count
< TMP_A64_MAX
);
425 return s
->tmp_a64
[s
->tmp_a64_count
++] = tcg_temp_local_new_i64();
428 TCGv_i64
new_tmp_a64_zero(DisasContext
*s
)
430 TCGv_i64 t
= new_tmp_a64(s
);
431 tcg_gen_movi_i64(t
, 0);
436 * Register access functions
438 * These functions are used for directly accessing a register in where
439 * changes to the final register value are likely to be made. If you
440 * need to use a register for temporary calculation (e.g. index type
441 * operations) use the read_* form.
443 * B1.2.1 Register mappings
445 * In instruction register encoding 31 can refer to ZR (zero register) or
446 * the SP (stack pointer) depending on context. In QEMU's case we map SP
447 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
448 * This is the point of the _sp forms.
450 TCGv_i64
cpu_reg(DisasContext
*s
, int reg
)
453 return new_tmp_a64_zero(s
);
459 /* register access for when 31 == SP */
460 TCGv_i64
cpu_reg_sp(DisasContext
*s
, int reg
)
465 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
466 * representing the register contents. This TCGv is an auto-freed
467 * temporary so it need not be explicitly freed, and may be modified.
469 TCGv_i64
read_cpu_reg(DisasContext
*s
, int reg
, int sf
)
471 TCGv_i64 v
= new_tmp_a64(s
);
474 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
476 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
479 tcg_gen_movi_i64(v
, 0);
484 TCGv_i64
read_cpu_reg_sp(DisasContext
*s
, int reg
, int sf
)
486 TCGv_i64 v
= new_tmp_a64(s
);
488 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
490 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
495 /* Return the offset into CPUARMState of a slice (from
496 * the least significant end) of FP register Qn (ie
498 * (Note that this is not the same mapping as for A32; see cpu.h)
500 static inline int fp_reg_offset(DisasContext
*s
, int regno
, MemOp size
)
502 return vec_reg_offset(s
, regno
, 0, size
);
505 /* Offset of the high half of the 128 bit vector Qn */
506 static inline int fp_reg_hi_offset(DisasContext
*s
, int regno
)
508 return vec_reg_offset(s
, regno
, 1, MO_64
);
511 /* Convenience accessors for reading and writing single and double
512 * FP registers. Writing clears the upper parts of the associated
513 * 128 bit vector register, as required by the architecture.
514 * Note that unlike the GP register accessors, the values returned
515 * by the read functions must be manually freed.
517 static TCGv_i64
read_fp_dreg(DisasContext
*s
, int reg
)
519 TCGv_i64 v
= tcg_temp_new_i64();
521 tcg_gen_ld_i64(v
, cpu_env
, fp_reg_offset(s
, reg
, MO_64
));
525 static TCGv_i32
read_fp_sreg(DisasContext
*s
, int reg
)
527 TCGv_i32 v
= tcg_temp_new_i32();
529 tcg_gen_ld_i32(v
, cpu_env
, fp_reg_offset(s
, reg
, MO_32
));
533 static TCGv_i32
read_fp_hreg(DisasContext
*s
, int reg
)
535 TCGv_i32 v
= tcg_temp_new_i32();
537 tcg_gen_ld16u_i32(v
, cpu_env
, fp_reg_offset(s
, reg
, MO_16
));
541 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
542 * If SVE is not enabled, then there are only 128 bits in the vector.
544 static void clear_vec_high(DisasContext
*s
, bool is_q
, int rd
)
546 unsigned ofs
= fp_reg_offset(s
, rd
, MO_64
);
547 unsigned vsz
= vec_full_reg_size(s
);
549 /* Nop move, with side effect of clearing the tail. */
550 tcg_gen_gvec_mov(MO_64
, ofs
, ofs
, is_q
? 16 : 8, vsz
);
553 void write_fp_dreg(DisasContext
*s
, int reg
, TCGv_i64 v
)
555 unsigned ofs
= fp_reg_offset(s
, reg
, MO_64
);
557 tcg_gen_st_i64(v
, cpu_env
, ofs
);
558 clear_vec_high(s
, false, reg
);
561 static void write_fp_sreg(DisasContext
*s
, int reg
, TCGv_i32 v
)
563 TCGv_i64 tmp
= tcg_temp_new_i64();
565 tcg_gen_extu_i32_i64(tmp
, v
);
566 write_fp_dreg(s
, reg
, tmp
);
567 tcg_temp_free_i64(tmp
);
570 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
571 static void gen_gvec_fn2(DisasContext
*s
, bool is_q
, int rd
, int rn
,
572 GVecGen2Fn
*gvec_fn
, int vece
)
574 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
575 is_q
? 16 : 8, vec_full_reg_size(s
));
578 /* Expand a 2-operand + immediate AdvSIMD vector operation using
579 * an expander function.
581 static void gen_gvec_fn2i(DisasContext
*s
, bool is_q
, int rd
, int rn
,
582 int64_t imm
, GVecGen2iFn
*gvec_fn
, int vece
)
584 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
585 imm
, is_q
? 16 : 8, vec_full_reg_size(s
));
588 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
589 static void gen_gvec_fn3(DisasContext
*s
, bool is_q
, int rd
, int rn
, int rm
,
590 GVecGen3Fn
*gvec_fn
, int vece
)
592 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
593 vec_full_reg_offset(s
, rm
), is_q
? 16 : 8, vec_full_reg_size(s
));
596 /* Expand a 4-operand AdvSIMD vector operation using an expander function. */
597 static void gen_gvec_fn4(DisasContext
*s
, bool is_q
, int rd
, int rn
, int rm
,
598 int rx
, GVecGen4Fn
*gvec_fn
, int vece
)
600 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
601 vec_full_reg_offset(s
, rm
), vec_full_reg_offset(s
, rx
),
602 is_q
? 16 : 8, vec_full_reg_size(s
));
605 /* Expand a 2-operand operation using an out-of-line helper. */
606 static void gen_gvec_op2_ool(DisasContext
*s
, bool is_q
, int rd
,
607 int rn
, int data
, gen_helper_gvec_2
*fn
)
609 tcg_gen_gvec_2_ool(vec_full_reg_offset(s
, rd
),
610 vec_full_reg_offset(s
, rn
),
611 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
614 /* Expand a 3-operand operation using an out-of-line helper. */
615 static void gen_gvec_op3_ool(DisasContext
*s
, bool is_q
, int rd
,
616 int rn
, int rm
, int data
, gen_helper_gvec_3
*fn
)
618 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, rd
),
619 vec_full_reg_offset(s
, rn
),
620 vec_full_reg_offset(s
, rm
),
621 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
624 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
625 * an out-of-line helper.
627 static void gen_gvec_op3_fpst(DisasContext
*s
, bool is_q
, int rd
, int rn
,
628 int rm
, bool is_fp16
, int data
,
629 gen_helper_gvec_3_ptr
*fn
)
631 TCGv_ptr fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
632 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
633 vec_full_reg_offset(s
, rn
),
634 vec_full_reg_offset(s
, rm
), fpst
,
635 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
636 tcg_temp_free_ptr(fpst
);
639 /* Expand a 3-operand + qc + operation using an out-of-line helper. */
640 static void gen_gvec_op3_qc(DisasContext
*s
, bool is_q
, int rd
, int rn
,
641 int rm
, gen_helper_gvec_3_ptr
*fn
)
643 TCGv_ptr qc_ptr
= tcg_temp_new_ptr();
645 tcg_gen_addi_ptr(qc_ptr
, cpu_env
, offsetof(CPUARMState
, vfp
.qc
));
646 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
647 vec_full_reg_offset(s
, rn
),
648 vec_full_reg_offset(s
, rm
), qc_ptr
,
649 is_q
? 16 : 8, vec_full_reg_size(s
), 0, fn
);
650 tcg_temp_free_ptr(qc_ptr
);
653 /* Expand a 4-operand operation using an out-of-line helper. */
654 static void gen_gvec_op4_ool(DisasContext
*s
, bool is_q
, int rd
, int rn
,
655 int rm
, int ra
, int data
, gen_helper_gvec_4
*fn
)
657 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
658 vec_full_reg_offset(s
, rn
),
659 vec_full_reg_offset(s
, rm
),
660 vec_full_reg_offset(s
, ra
),
661 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
665 * Expand a 4-operand + fpstatus pointer + simd data value operation using
666 * an out-of-line helper.
668 static void gen_gvec_op4_fpst(DisasContext
*s
, bool is_q
, int rd
, int rn
,
669 int rm
, int ra
, bool is_fp16
, int data
,
670 gen_helper_gvec_4_ptr
*fn
)
672 TCGv_ptr fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
673 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, rd
),
674 vec_full_reg_offset(s
, rn
),
675 vec_full_reg_offset(s
, rm
),
676 vec_full_reg_offset(s
, ra
), fpst
,
677 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
678 tcg_temp_free_ptr(fpst
);
681 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
682 * than the 32 bit equivalent.
684 static inline void gen_set_NZ64(TCGv_i64 result
)
686 tcg_gen_extr_i64_i32(cpu_ZF
, cpu_NF
, result
);
687 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, cpu_NF
);
690 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
691 static inline void gen_logic_CC(int sf
, TCGv_i64 result
)
694 gen_set_NZ64(result
);
696 tcg_gen_extrl_i64_i32(cpu_ZF
, result
);
697 tcg_gen_mov_i32(cpu_NF
, cpu_ZF
);
699 tcg_gen_movi_i32(cpu_CF
, 0);
700 tcg_gen_movi_i32(cpu_VF
, 0);
703 /* dest = T0 + T1; compute C, N, V and Z flags */
704 static void gen_add_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
707 TCGv_i64 result
, flag
, tmp
;
708 result
= tcg_temp_new_i64();
709 flag
= tcg_temp_new_i64();
710 tmp
= tcg_temp_new_i64();
712 tcg_gen_movi_i64(tmp
, 0);
713 tcg_gen_add2_i64(result
, flag
, t0
, tmp
, t1
, tmp
);
715 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
717 gen_set_NZ64(result
);
719 tcg_gen_xor_i64(flag
, result
, t0
);
720 tcg_gen_xor_i64(tmp
, t0
, t1
);
721 tcg_gen_andc_i64(flag
, flag
, tmp
);
722 tcg_temp_free_i64(tmp
);
723 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
725 tcg_gen_mov_i64(dest
, result
);
726 tcg_temp_free_i64(result
);
727 tcg_temp_free_i64(flag
);
729 /* 32 bit arithmetic */
730 TCGv_i32 t0_32
= tcg_temp_new_i32();
731 TCGv_i32 t1_32
= tcg_temp_new_i32();
732 TCGv_i32 tmp
= tcg_temp_new_i32();
734 tcg_gen_movi_i32(tmp
, 0);
735 tcg_gen_extrl_i64_i32(t0_32
, t0
);
736 tcg_gen_extrl_i64_i32(t1_32
, t1
);
737 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, tmp
, t1_32
, tmp
);
738 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
739 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
740 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
741 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
742 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
744 tcg_temp_free_i32(tmp
);
745 tcg_temp_free_i32(t0_32
);
746 tcg_temp_free_i32(t1_32
);
750 /* dest = T0 - T1; compute C, N, V and Z flags */
751 static void gen_sub_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
754 /* 64 bit arithmetic */
755 TCGv_i64 result
, flag
, tmp
;
757 result
= tcg_temp_new_i64();
758 flag
= tcg_temp_new_i64();
759 tcg_gen_sub_i64(result
, t0
, t1
);
761 gen_set_NZ64(result
);
763 tcg_gen_setcond_i64(TCG_COND_GEU
, flag
, t0
, t1
);
764 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
766 tcg_gen_xor_i64(flag
, result
, t0
);
767 tmp
= tcg_temp_new_i64();
768 tcg_gen_xor_i64(tmp
, t0
, t1
);
769 tcg_gen_and_i64(flag
, flag
, tmp
);
770 tcg_temp_free_i64(tmp
);
771 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
772 tcg_gen_mov_i64(dest
, result
);
773 tcg_temp_free_i64(flag
);
774 tcg_temp_free_i64(result
);
776 /* 32 bit arithmetic */
777 TCGv_i32 t0_32
= tcg_temp_new_i32();
778 TCGv_i32 t1_32
= tcg_temp_new_i32();
781 tcg_gen_extrl_i64_i32(t0_32
, t0
);
782 tcg_gen_extrl_i64_i32(t1_32
, t1
);
783 tcg_gen_sub_i32(cpu_NF
, t0_32
, t1_32
);
784 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
785 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0_32
, t1_32
);
786 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
787 tmp
= tcg_temp_new_i32();
788 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
789 tcg_temp_free_i32(t0_32
);
790 tcg_temp_free_i32(t1_32
);
791 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
792 tcg_temp_free_i32(tmp
);
793 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
797 /* dest = T0 + T1 + CF; do not compute flags. */
798 static void gen_adc(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
800 TCGv_i64 flag
= tcg_temp_new_i64();
801 tcg_gen_extu_i32_i64(flag
, cpu_CF
);
802 tcg_gen_add_i64(dest
, t0
, t1
);
803 tcg_gen_add_i64(dest
, dest
, flag
);
804 tcg_temp_free_i64(flag
);
807 tcg_gen_ext32u_i64(dest
, dest
);
811 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
812 static void gen_adc_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
815 TCGv_i64 result
= tcg_temp_new_i64();
816 TCGv_i64 cf_64
= tcg_temp_new_i64();
817 TCGv_i64 vf_64
= tcg_temp_new_i64();
818 TCGv_i64 tmp
= tcg_temp_new_i64();
819 TCGv_i64 zero
= tcg_constant_i64(0);
821 tcg_gen_extu_i32_i64(cf_64
, cpu_CF
);
822 tcg_gen_add2_i64(result
, cf_64
, t0
, zero
, cf_64
, zero
);
823 tcg_gen_add2_i64(result
, cf_64
, result
, cf_64
, t1
, zero
);
824 tcg_gen_extrl_i64_i32(cpu_CF
, cf_64
);
825 gen_set_NZ64(result
);
827 tcg_gen_xor_i64(vf_64
, result
, t0
);
828 tcg_gen_xor_i64(tmp
, t0
, t1
);
829 tcg_gen_andc_i64(vf_64
, vf_64
, tmp
);
830 tcg_gen_extrh_i64_i32(cpu_VF
, vf_64
);
832 tcg_gen_mov_i64(dest
, result
);
834 tcg_temp_free_i64(tmp
);
835 tcg_temp_free_i64(vf_64
);
836 tcg_temp_free_i64(cf_64
);
837 tcg_temp_free_i64(result
);
839 TCGv_i32 t0_32
= tcg_temp_new_i32();
840 TCGv_i32 t1_32
= tcg_temp_new_i32();
841 TCGv_i32 tmp
= tcg_temp_new_i32();
842 TCGv_i32 zero
= tcg_constant_i32(0);
844 tcg_gen_extrl_i64_i32(t0_32
, t0
);
845 tcg_gen_extrl_i64_i32(t1_32
, t1
);
846 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, zero
, cpu_CF
, zero
);
847 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1_32
, zero
);
849 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
850 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
851 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
852 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
853 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
855 tcg_temp_free_i32(tmp
);
856 tcg_temp_free_i32(t1_32
);
857 tcg_temp_free_i32(t0_32
);
862 * Load/Store generators
866 * Store from GPR register to memory.
868 static void do_gpr_st_memidx(DisasContext
*s
, TCGv_i64 source
,
869 TCGv_i64 tcg_addr
, MemOp memop
, int memidx
,
871 unsigned int iss_srt
,
872 bool iss_sf
, bool iss_ar
)
874 memop
= finalize_memop(s
, memop
);
875 tcg_gen_qemu_st_i64(source
, tcg_addr
, memidx
, memop
);
880 syn
= syn_data_abort_with_iss(0,
886 0, 0, 0, 0, 0, false);
887 disas_set_insn_syndrome(s
, syn
);
891 static void do_gpr_st(DisasContext
*s
, TCGv_i64 source
,
892 TCGv_i64 tcg_addr
, MemOp memop
,
894 unsigned int iss_srt
,
895 bool iss_sf
, bool iss_ar
)
897 do_gpr_st_memidx(s
, source
, tcg_addr
, memop
, get_mem_index(s
),
898 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
902 * Load from memory to GPR register
904 static void do_gpr_ld_memidx(DisasContext
*s
, TCGv_i64 dest
, TCGv_i64 tcg_addr
,
905 MemOp memop
, bool extend
, int memidx
,
906 bool iss_valid
, unsigned int iss_srt
,
907 bool iss_sf
, bool iss_ar
)
909 memop
= finalize_memop(s
, memop
);
910 tcg_gen_qemu_ld_i64(dest
, tcg_addr
, memidx
, memop
);
912 if (extend
&& (memop
& MO_SIGN
)) {
913 g_assert((memop
& MO_SIZE
) <= MO_32
);
914 tcg_gen_ext32u_i64(dest
, dest
);
920 syn
= syn_data_abort_with_iss(0,
922 (memop
& MO_SIGN
) != 0,
926 0, 0, 0, 0, 0, false);
927 disas_set_insn_syndrome(s
, syn
);
931 static void do_gpr_ld(DisasContext
*s
, TCGv_i64 dest
, TCGv_i64 tcg_addr
,
932 MemOp memop
, bool extend
,
933 bool iss_valid
, unsigned int iss_srt
,
934 bool iss_sf
, bool iss_ar
)
936 do_gpr_ld_memidx(s
, dest
, tcg_addr
, memop
, extend
, get_mem_index(s
),
937 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
941 * Store from FP register to memory
943 static void do_fp_st(DisasContext
*s
, int srcidx
, TCGv_i64 tcg_addr
, int size
)
945 /* This writes the bottom N bits of a 128 bit wide vector to memory */
946 TCGv_i64 tmplo
= tcg_temp_new_i64();
949 tcg_gen_ld_i64(tmplo
, cpu_env
, fp_reg_offset(s
, srcidx
, MO_64
));
952 mop
= finalize_memop(s
, size
);
953 tcg_gen_qemu_st_i64(tmplo
, tcg_addr
, get_mem_index(s
), mop
);
955 bool be
= s
->be_data
== MO_BE
;
956 TCGv_i64 tcg_hiaddr
= tcg_temp_new_i64();
957 TCGv_i64 tmphi
= tcg_temp_new_i64();
959 tcg_gen_ld_i64(tmphi
, cpu_env
, fp_reg_hi_offset(s
, srcidx
));
961 mop
= s
->be_data
| MO_UQ
;
962 tcg_gen_qemu_st_i64(be
? tmphi
: tmplo
, tcg_addr
, get_mem_index(s
),
963 mop
| (s
->align_mem
? MO_ALIGN_16
: 0));
964 tcg_gen_addi_i64(tcg_hiaddr
, tcg_addr
, 8);
965 tcg_gen_qemu_st_i64(be
? tmplo
: tmphi
, tcg_hiaddr
,
966 get_mem_index(s
), mop
);
968 tcg_temp_free_i64(tcg_hiaddr
);
969 tcg_temp_free_i64(tmphi
);
972 tcg_temp_free_i64(tmplo
);
976 * Load from memory to FP register
978 static void do_fp_ld(DisasContext
*s
, int destidx
, TCGv_i64 tcg_addr
, int size
)
980 /* This always zero-extends and writes to a full 128 bit wide vector */
981 TCGv_i64 tmplo
= tcg_temp_new_i64();
982 TCGv_i64 tmphi
= NULL
;
986 mop
= finalize_memop(s
, size
);
987 tcg_gen_qemu_ld_i64(tmplo
, tcg_addr
, get_mem_index(s
), mop
);
989 bool be
= s
->be_data
== MO_BE
;
992 tmphi
= tcg_temp_new_i64();
993 tcg_hiaddr
= tcg_temp_new_i64();
995 mop
= s
->be_data
| MO_UQ
;
996 tcg_gen_qemu_ld_i64(be
? tmphi
: tmplo
, tcg_addr
, get_mem_index(s
),
997 mop
| (s
->align_mem
? MO_ALIGN_16
: 0));
998 tcg_gen_addi_i64(tcg_hiaddr
, tcg_addr
, 8);
999 tcg_gen_qemu_ld_i64(be
? tmplo
: tmphi
, tcg_hiaddr
,
1000 get_mem_index(s
), mop
);
1001 tcg_temp_free_i64(tcg_hiaddr
);
1004 tcg_gen_st_i64(tmplo
, cpu_env
, fp_reg_offset(s
, destidx
, MO_64
));
1005 tcg_temp_free_i64(tmplo
);
1008 tcg_gen_st_i64(tmphi
, cpu_env
, fp_reg_hi_offset(s
, destidx
));
1009 tcg_temp_free_i64(tmphi
);
1011 clear_vec_high(s
, tmphi
!= NULL
, destidx
);
1015 * Vector load/store helpers.
1017 * The principal difference between this and a FP load is that we don't
1018 * zero extend as we are filling a partial chunk of the vector register.
1019 * These functions don't support 128 bit loads/stores, which would be
1020 * normal load/store operations.
1022 * The _i32 versions are useful when operating on 32 bit quantities
1023 * (eg for floating point single or using Neon helper functions).
1026 /* Get value of an element within a vector register */
1027 static void read_vec_element(DisasContext
*s
, TCGv_i64 tcg_dest
, int srcidx
,
1028 int element
, MemOp memop
)
1030 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
1031 switch ((unsigned)memop
) {
1033 tcg_gen_ld8u_i64(tcg_dest
, cpu_env
, vect_off
);
1036 tcg_gen_ld16u_i64(tcg_dest
, cpu_env
, vect_off
);
1039 tcg_gen_ld32u_i64(tcg_dest
, cpu_env
, vect_off
);
1042 tcg_gen_ld8s_i64(tcg_dest
, cpu_env
, vect_off
);
1045 tcg_gen_ld16s_i64(tcg_dest
, cpu_env
, vect_off
);
1048 tcg_gen_ld32s_i64(tcg_dest
, cpu_env
, vect_off
);
1052 tcg_gen_ld_i64(tcg_dest
, cpu_env
, vect_off
);
1055 g_assert_not_reached();
1059 static void read_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_dest
, int srcidx
,
1060 int element
, MemOp memop
)
1062 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
1065 tcg_gen_ld8u_i32(tcg_dest
, cpu_env
, vect_off
);
1068 tcg_gen_ld16u_i32(tcg_dest
, cpu_env
, vect_off
);
1071 tcg_gen_ld8s_i32(tcg_dest
, cpu_env
, vect_off
);
1074 tcg_gen_ld16s_i32(tcg_dest
, cpu_env
, vect_off
);
1078 tcg_gen_ld_i32(tcg_dest
, cpu_env
, vect_off
);
1081 g_assert_not_reached();
1085 /* Set value of an element within a vector register */
1086 static void write_vec_element(DisasContext
*s
, TCGv_i64 tcg_src
, int destidx
,
1087 int element
, MemOp memop
)
1089 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1092 tcg_gen_st8_i64(tcg_src
, cpu_env
, vect_off
);
1095 tcg_gen_st16_i64(tcg_src
, cpu_env
, vect_off
);
1098 tcg_gen_st32_i64(tcg_src
, cpu_env
, vect_off
);
1101 tcg_gen_st_i64(tcg_src
, cpu_env
, vect_off
);
1104 g_assert_not_reached();
1108 static void write_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_src
,
1109 int destidx
, int element
, MemOp memop
)
1111 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1114 tcg_gen_st8_i32(tcg_src
, cpu_env
, vect_off
);
1117 tcg_gen_st16_i32(tcg_src
, cpu_env
, vect_off
);
1120 tcg_gen_st_i32(tcg_src
, cpu_env
, vect_off
);
1123 g_assert_not_reached();
1127 /* Store from vector register to memory */
1128 static void do_vec_st(DisasContext
*s
, int srcidx
, int element
,
1129 TCGv_i64 tcg_addr
, MemOp mop
)
1131 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1133 read_vec_element(s
, tcg_tmp
, srcidx
, element
, mop
& MO_SIZE
);
1134 tcg_gen_qemu_st_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), mop
);
1136 tcg_temp_free_i64(tcg_tmp
);
1139 /* Load from memory to vector register */
1140 static void do_vec_ld(DisasContext
*s
, int destidx
, int element
,
1141 TCGv_i64 tcg_addr
, MemOp mop
)
1143 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1145 tcg_gen_qemu_ld_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), mop
);
1146 write_vec_element(s
, tcg_tmp
, destidx
, element
, mop
& MO_SIZE
);
1148 tcg_temp_free_i64(tcg_tmp
);
1151 /* Check that FP/Neon access is enabled. If it is, return
1152 * true. If not, emit code to generate an appropriate exception,
1153 * and return false; the caller should not emit any code for
1154 * the instruction. Note that this check must happen after all
1155 * unallocated-encoding checks (otherwise the syndrome information
1156 * for the resulting exception will be incorrect).
1158 static bool fp_access_check(DisasContext
*s
)
1160 if (s
->fp_excp_el
) {
1161 assert(!s
->fp_access_checked
);
1162 s
->fp_access_checked
= true;
1164 gen_exception_insn_el(s
, s
->pc_curr
, EXCP_UDEF
,
1165 syn_fp_access_trap(1, 0xe, false, 0),
1169 s
->fp_access_checked
= true;
1173 /* Check that SVE access is enabled. If it is, return true.
1174 * If not, emit code to generate an appropriate exception and return false.
1176 bool sve_access_check(DisasContext
*s
)
1178 if (s
->sve_excp_el
) {
1179 assert(!s
->sve_access_checked
);
1180 s
->sve_access_checked
= true;
1182 gen_exception_insn_el(s
, s
->pc_curr
, EXCP_UDEF
,
1183 syn_sve_access_trap(), s
->sve_excp_el
);
1186 s
->sve_access_checked
= true;
1187 return fp_access_check(s
);
1191 * Check that SME access is enabled, raise an exception if not.
1192 * Note that this function corresponds to CheckSMEAccess and is
1193 * only used directly for cpregs.
1195 static bool sme_access_check(DisasContext
*s
)
1197 if (s
->sme_excp_el
) {
1198 gen_exception_insn_el(s
, s
->pc_curr
, EXCP_UDEF
,
1199 syn_smetrap(SME_ET_AccessTrap
, false),
1207 * This utility function is for doing register extension with an
1208 * optional shift. You will likely want to pass a temporary for the
1209 * destination register. See DecodeRegExtend() in the ARM ARM.
1211 static void ext_and_shift_reg(TCGv_i64 tcg_out
, TCGv_i64 tcg_in
,
1212 int option
, unsigned int shift
)
1214 int extsize
= extract32(option
, 0, 2);
1215 bool is_signed
= extract32(option
, 2, 1);
1220 tcg_gen_ext8s_i64(tcg_out
, tcg_in
);
1223 tcg_gen_ext16s_i64(tcg_out
, tcg_in
);
1226 tcg_gen_ext32s_i64(tcg_out
, tcg_in
);
1229 tcg_gen_mov_i64(tcg_out
, tcg_in
);
1235 tcg_gen_ext8u_i64(tcg_out
, tcg_in
);
1238 tcg_gen_ext16u_i64(tcg_out
, tcg_in
);
1241 tcg_gen_ext32u_i64(tcg_out
, tcg_in
);
1244 tcg_gen_mov_i64(tcg_out
, tcg_in
);
1250 tcg_gen_shli_i64(tcg_out
, tcg_out
, shift
);
1254 static inline void gen_check_sp_alignment(DisasContext
*s
)
1256 /* The AArch64 architecture mandates that (if enabled via PSTATE
1257 * or SCTLR bits) there is a check that SP is 16-aligned on every
1258 * SP-relative load or store (with an exception generated if it is not).
1259 * In line with general QEMU practice regarding misaligned accesses,
1260 * we omit these checks for the sake of guest program performance.
1261 * This function is provided as a hook so we can more easily add these
1262 * checks in future (possibly as a "favour catching guest program bugs
1263 * over speed" user selectable option).
1268 * This provides a simple table based table lookup decoder. It is
1269 * intended to be used when the relevant bits for decode are too
1270 * awkwardly placed and switch/if based logic would be confusing and
1271 * deeply nested. Since it's a linear search through the table, tables
1272 * should be kept small.
1274 * It returns the first handler where insn & mask == pattern, or
1275 * NULL if there is no match.
1276 * The table is terminated by an empty mask (i.e. 0)
1278 static inline AArch64DecodeFn
*lookup_disas_fn(const AArch64DecodeTable
*table
,
1281 const AArch64DecodeTable
*tptr
= table
;
1283 while (tptr
->mask
) {
1284 if ((insn
& tptr
->mask
) == tptr
->pattern
) {
1285 return tptr
->disas_fn
;
1293 * The instruction disassembly implemented here matches
1294 * the instruction encoding classifications in chapter C4
1295 * of the ARM Architecture Reference Manual (DDI0487B_a);
1296 * classification names and decode diagrams here should generally
1297 * match up with those in the manual.
1300 /* Unconditional branch (immediate)
1302 * +----+-----------+-------------------------------------+
1303 * | op | 0 0 1 0 1 | imm26 |
1304 * +----+-----------+-------------------------------------+
1306 static void disas_uncond_b_imm(DisasContext
*s
, uint32_t insn
)
1308 uint64_t addr
= s
->pc_curr
+ sextract32(insn
, 0, 26) * 4;
1310 if (insn
& (1U << 31)) {
1311 /* BL Branch with link */
1312 tcg_gen_movi_i64(cpu_reg(s
, 30), s
->base
.pc_next
);
1315 /* B Branch / BL Branch with link */
1317 gen_goto_tb(s
, 0, addr
);
1320 /* Compare and branch (immediate)
1321 * 31 30 25 24 23 5 4 0
1322 * +----+-------------+----+---------------------+--------+
1323 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
1324 * +----+-------------+----+---------------------+--------+
1326 static void disas_comp_b_imm(DisasContext
*s
, uint32_t insn
)
1328 unsigned int sf
, op
, rt
;
1330 TCGLabel
*label_match
;
1333 sf
= extract32(insn
, 31, 1);
1334 op
= extract32(insn
, 24, 1); /* 0: CBZ; 1: CBNZ */
1335 rt
= extract32(insn
, 0, 5);
1336 addr
= s
->pc_curr
+ sextract32(insn
, 5, 19) * 4;
1338 tcg_cmp
= read_cpu_reg(s
, rt
, sf
);
1339 label_match
= gen_new_label();
1342 tcg_gen_brcondi_i64(op
? TCG_COND_NE
: TCG_COND_EQ
,
1343 tcg_cmp
, 0, label_match
);
1345 gen_goto_tb(s
, 0, s
->base
.pc_next
);
1346 gen_set_label(label_match
);
1347 gen_goto_tb(s
, 1, addr
);
1350 /* Test and branch (immediate)
1351 * 31 30 25 24 23 19 18 5 4 0
1352 * +----+-------------+----+-------+-------------+------+
1353 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
1354 * +----+-------------+----+-------+-------------+------+
1356 static void disas_test_b_imm(DisasContext
*s
, uint32_t insn
)
1358 unsigned int bit_pos
, op
, rt
;
1360 TCGLabel
*label_match
;
1363 bit_pos
= (extract32(insn
, 31, 1) << 5) | extract32(insn
, 19, 5);
1364 op
= extract32(insn
, 24, 1); /* 0: TBZ; 1: TBNZ */
1365 addr
= s
->pc_curr
+ sextract32(insn
, 5, 14) * 4;
1366 rt
= extract32(insn
, 0, 5);
1368 tcg_cmp
= tcg_temp_new_i64();
1369 tcg_gen_andi_i64(tcg_cmp
, cpu_reg(s
, rt
), (1ULL << bit_pos
));
1370 label_match
= gen_new_label();
1373 tcg_gen_brcondi_i64(op
? TCG_COND_NE
: TCG_COND_EQ
,
1374 tcg_cmp
, 0, label_match
);
1375 tcg_temp_free_i64(tcg_cmp
);
1376 gen_goto_tb(s
, 0, s
->base
.pc_next
);
1377 gen_set_label(label_match
);
1378 gen_goto_tb(s
, 1, addr
);
1381 /* Conditional branch (immediate)
1382 * 31 25 24 23 5 4 3 0
1383 * +---------------+----+---------------------+----+------+
1384 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
1385 * +---------------+----+---------------------+----+------+
1387 static void disas_cond_b_imm(DisasContext
*s
, uint32_t insn
)
1392 if ((insn
& (1 << 4)) || (insn
& (1 << 24))) {
1393 unallocated_encoding(s
);
1396 addr
= s
->pc_curr
+ sextract32(insn
, 5, 19) * 4;
1397 cond
= extract32(insn
, 0, 4);
1401 /* genuinely conditional branches */
1402 TCGLabel
*label_match
= gen_new_label();
1403 arm_gen_test_cc(cond
, label_match
);
1404 gen_goto_tb(s
, 0, s
->base
.pc_next
);
1405 gen_set_label(label_match
);
1406 gen_goto_tb(s
, 1, addr
);
1408 /* 0xe and 0xf are both "always" conditions */
1409 gen_goto_tb(s
, 0, addr
);
1413 /* HINT instruction group, including various allocated HINTs */
1414 static void handle_hint(DisasContext
*s
, uint32_t insn
,
1415 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1417 unsigned int selector
= crm
<< 3 | op2
;
1420 unallocated_encoding(s
);
1425 case 0b00000: /* NOP */
1427 case 0b00011: /* WFI */
1428 s
->base
.is_jmp
= DISAS_WFI
;
1430 case 0b00001: /* YIELD */
1431 /* When running in MTTCG we don't generate jumps to the yield and
1432 * WFE helpers as it won't affect the scheduling of other vCPUs.
1433 * If we wanted to more completely model WFE/SEV so we don't busy
1434 * spin unnecessarily we would need to do something more involved.
1436 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1437 s
->base
.is_jmp
= DISAS_YIELD
;
1440 case 0b00010: /* WFE */
1441 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1442 s
->base
.is_jmp
= DISAS_WFE
;
1445 case 0b00100: /* SEV */
1446 case 0b00101: /* SEVL */
1447 case 0b00110: /* DGH */
1448 /* we treat all as NOP at least for now */
1450 case 0b00111: /* XPACLRI */
1451 if (s
->pauth_active
) {
1452 gen_helper_xpaci(cpu_X
[30], cpu_env
, cpu_X
[30]);
1455 case 0b01000: /* PACIA1716 */
1456 if (s
->pauth_active
) {
1457 gen_helper_pacia(cpu_X
[17], cpu_env
, cpu_X
[17], cpu_X
[16]);
1460 case 0b01010: /* PACIB1716 */
1461 if (s
->pauth_active
) {
1462 gen_helper_pacib(cpu_X
[17], cpu_env
, cpu_X
[17], cpu_X
[16]);
1465 case 0b01100: /* AUTIA1716 */
1466 if (s
->pauth_active
) {
1467 gen_helper_autia(cpu_X
[17], cpu_env
, cpu_X
[17], cpu_X
[16]);
1470 case 0b01110: /* AUTIB1716 */
1471 if (s
->pauth_active
) {
1472 gen_helper_autib(cpu_X
[17], cpu_env
, cpu_X
[17], cpu_X
[16]);
1475 case 0b10000: /* ESB */
1476 /* Without RAS, we must implement this as NOP. */
1477 if (dc_isar_feature(aa64_ras
, s
)) {
1479 * QEMU does not have a source of physical SErrors,
1480 * so we are only concerned with virtual SErrors.
1481 * The pseudocode in the ARM for this case is
1482 * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
1483 * AArch64.vESBOperation();
1484 * Most of the condition can be evaluated at translation time.
1485 * Test for EL2 present, and defer test for SEL2 to runtime.
1487 if (s
->current_el
<= 1 && arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
1488 gen_helper_vesb(cpu_env
);
1492 case 0b11000: /* PACIAZ */
1493 if (s
->pauth_active
) {
1494 gen_helper_pacia(cpu_X
[30], cpu_env
, cpu_X
[30],
1495 new_tmp_a64_zero(s
));
1498 case 0b11001: /* PACIASP */
1499 if (s
->pauth_active
) {
1500 gen_helper_pacia(cpu_X
[30], cpu_env
, cpu_X
[30], cpu_X
[31]);
1503 case 0b11010: /* PACIBZ */
1504 if (s
->pauth_active
) {
1505 gen_helper_pacib(cpu_X
[30], cpu_env
, cpu_X
[30],
1506 new_tmp_a64_zero(s
));
1509 case 0b11011: /* PACIBSP */
1510 if (s
->pauth_active
) {
1511 gen_helper_pacib(cpu_X
[30], cpu_env
, cpu_X
[30], cpu_X
[31]);
1514 case 0b11100: /* AUTIAZ */
1515 if (s
->pauth_active
) {
1516 gen_helper_autia(cpu_X
[30], cpu_env
, cpu_X
[30],
1517 new_tmp_a64_zero(s
));
1520 case 0b11101: /* AUTIASP */
1521 if (s
->pauth_active
) {
1522 gen_helper_autia(cpu_X
[30], cpu_env
, cpu_X
[30], cpu_X
[31]);
1525 case 0b11110: /* AUTIBZ */
1526 if (s
->pauth_active
) {
1527 gen_helper_autib(cpu_X
[30], cpu_env
, cpu_X
[30],
1528 new_tmp_a64_zero(s
));
1531 case 0b11111: /* AUTIBSP */
1532 if (s
->pauth_active
) {
1533 gen_helper_autib(cpu_X
[30], cpu_env
, cpu_X
[30], cpu_X
[31]);
1537 /* default specified as NOP equivalent */
1542 static void gen_clrex(DisasContext
*s
, uint32_t insn
)
1544 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
1547 /* CLREX, DSB, DMB, ISB */
1548 static void handle_sync(DisasContext
*s
, uint32_t insn
,
1549 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1554 unallocated_encoding(s
);
1565 case 1: /* MBReqTypes_Reads */
1566 bar
= TCG_BAR_SC
| TCG_MO_LD_LD
| TCG_MO_LD_ST
;
1568 case 2: /* MBReqTypes_Writes */
1569 bar
= TCG_BAR_SC
| TCG_MO_ST_ST
;
1571 default: /* MBReqTypes_All */
1572 bar
= TCG_BAR_SC
| TCG_MO_ALL
;
1578 /* We need to break the TB after this insn to execute
1579 * a self-modified code correctly and also to take
1580 * any pending interrupts immediately.
1583 gen_goto_tb(s
, 0, s
->base
.pc_next
);
1587 if (crm
!= 0 || !dc_isar_feature(aa64_sb
, s
)) {
1588 goto do_unallocated
;
1591 * TODO: There is no speculation barrier opcode for TCG;
1592 * MB and end the TB instead.
1594 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1595 gen_goto_tb(s
, 0, s
->base
.pc_next
);
1600 unallocated_encoding(s
);
1605 static void gen_xaflag(void)
1607 TCGv_i32 z
= tcg_temp_new_i32();
1609 tcg_gen_setcondi_i32(TCG_COND_EQ
, z
, cpu_ZF
, 0);
1618 tcg_gen_or_i32(cpu_NF
, cpu_CF
, z
);
1619 tcg_gen_subi_i32(cpu_NF
, cpu_NF
, 1);
1622 tcg_gen_and_i32(cpu_ZF
, z
, cpu_CF
);
1623 tcg_gen_xori_i32(cpu_ZF
, cpu_ZF
, 1);
1625 /* (!C & Z) << 31 -> -(Z & ~C) */
1626 tcg_gen_andc_i32(cpu_VF
, z
, cpu_CF
);
1627 tcg_gen_neg_i32(cpu_VF
, cpu_VF
);
1630 tcg_gen_or_i32(cpu_CF
, cpu_CF
, z
);
1632 tcg_temp_free_i32(z
);
1635 static void gen_axflag(void)
1637 tcg_gen_sari_i32(cpu_VF
, cpu_VF
, 31); /* V ? -1 : 0 */
1638 tcg_gen_andc_i32(cpu_CF
, cpu_CF
, cpu_VF
); /* C & !V */
1640 /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
1641 tcg_gen_andc_i32(cpu_ZF
, cpu_ZF
, cpu_VF
);
1643 tcg_gen_movi_i32(cpu_NF
, 0);
1644 tcg_gen_movi_i32(cpu_VF
, 0);
1647 /* MSR (immediate) - move immediate to processor state field */
1648 static void handle_msr_i(DisasContext
*s
, uint32_t insn
,
1649 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1651 int op
= op1
<< 3 | op2
;
1653 /* End the TB by default, chaining is ok. */
1654 s
->base
.is_jmp
= DISAS_TOO_MANY
;
1657 case 0x00: /* CFINV */
1658 if (crm
!= 0 || !dc_isar_feature(aa64_condm_4
, s
)) {
1659 goto do_unallocated
;
1661 tcg_gen_xori_i32(cpu_CF
, cpu_CF
, 1);
1662 s
->base
.is_jmp
= DISAS_NEXT
;
1665 case 0x01: /* XAFlag */
1666 if (crm
!= 0 || !dc_isar_feature(aa64_condm_5
, s
)) {
1667 goto do_unallocated
;
1670 s
->base
.is_jmp
= DISAS_NEXT
;
1673 case 0x02: /* AXFlag */
1674 if (crm
!= 0 || !dc_isar_feature(aa64_condm_5
, s
)) {
1675 goto do_unallocated
;
1678 s
->base
.is_jmp
= DISAS_NEXT
;
1681 case 0x03: /* UAO */
1682 if (!dc_isar_feature(aa64_uao
, s
) || s
->current_el
== 0) {
1683 goto do_unallocated
;
1686 set_pstate_bits(PSTATE_UAO
);
1688 clear_pstate_bits(PSTATE_UAO
);
1690 gen_rebuild_hflags(s
);
1693 case 0x04: /* PAN */
1694 if (!dc_isar_feature(aa64_pan
, s
) || s
->current_el
== 0) {
1695 goto do_unallocated
;
1698 set_pstate_bits(PSTATE_PAN
);
1700 clear_pstate_bits(PSTATE_PAN
);
1702 gen_rebuild_hflags(s
);
1705 case 0x05: /* SPSel */
1706 if (s
->current_el
== 0) {
1707 goto do_unallocated
;
1709 gen_helper_msr_i_spsel(cpu_env
, tcg_constant_i32(crm
& PSTATE_SP
));
1712 case 0x19: /* SSBS */
1713 if (!dc_isar_feature(aa64_ssbs
, s
)) {
1714 goto do_unallocated
;
1717 set_pstate_bits(PSTATE_SSBS
);
1719 clear_pstate_bits(PSTATE_SSBS
);
1721 /* Don't need to rebuild hflags since SSBS is a nop */
1724 case 0x1a: /* DIT */
1725 if (!dc_isar_feature(aa64_dit
, s
)) {
1726 goto do_unallocated
;
1729 set_pstate_bits(PSTATE_DIT
);
1731 clear_pstate_bits(PSTATE_DIT
);
1733 /* There's no need to rebuild hflags because DIT is a nop */
1736 case 0x1e: /* DAIFSet */
1737 gen_helper_msr_i_daifset(cpu_env
, tcg_constant_i32(crm
));
1740 case 0x1f: /* DAIFClear */
1741 gen_helper_msr_i_daifclear(cpu_env
, tcg_constant_i32(crm
));
1742 /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
1743 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
1746 case 0x1c: /* TCO */
1747 if (dc_isar_feature(aa64_mte
, s
)) {
1748 /* Full MTE is enabled -- set the TCO bit as directed. */
1750 set_pstate_bits(PSTATE_TCO
);
1752 clear_pstate_bits(PSTATE_TCO
);
1754 gen_rebuild_hflags(s
);
1755 /* Many factors, including TCO, go into MTE_ACTIVE. */
1756 s
->base
.is_jmp
= DISAS_UPDATE_NOCHAIN
;
1757 } else if (dc_isar_feature(aa64_mte_insn_reg
, s
)) {
1758 /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */
1759 s
->base
.is_jmp
= DISAS_NEXT
;
1761 goto do_unallocated
;
1765 case 0x1b: /* SVCR* */
1766 if (!dc_isar_feature(aa64_sme
, s
) || crm
< 2 || crm
> 7) {
1767 goto do_unallocated
;
1769 if (sme_access_check(s
)) {
1771 bool changed
= false;
1773 if ((crm
& 2) && i
!= s
->pstate_sm
) {
1774 gen_helper_set_pstate_sm(cpu_env
, tcg_constant_i32(i
));
1777 if ((crm
& 4) && i
!= s
->pstate_za
) {
1778 gen_helper_set_pstate_za(cpu_env
, tcg_constant_i32(i
));
1782 gen_rebuild_hflags(s
);
1784 s
->base
.is_jmp
= DISAS_NEXT
;
1791 unallocated_encoding(s
);
1796 static void gen_get_nzcv(TCGv_i64 tcg_rt
)
1798 TCGv_i32 tmp
= tcg_temp_new_i32();
1799 TCGv_i32 nzcv
= tcg_temp_new_i32();
1801 /* build bit 31, N */
1802 tcg_gen_andi_i32(nzcv
, cpu_NF
, (1U << 31));
1803 /* build bit 30, Z */
1804 tcg_gen_setcondi_i32(TCG_COND_EQ
, tmp
, cpu_ZF
, 0);
1805 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 30, 1);
1806 /* build bit 29, C */
1807 tcg_gen_deposit_i32(nzcv
, nzcv
, cpu_CF
, 29, 1);
1808 /* build bit 28, V */
1809 tcg_gen_shri_i32(tmp
, cpu_VF
, 31);
1810 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 28, 1);
1811 /* generate result */
1812 tcg_gen_extu_i32_i64(tcg_rt
, nzcv
);
1814 tcg_temp_free_i32(nzcv
);
1815 tcg_temp_free_i32(tmp
);
1818 static void gen_set_nzcv(TCGv_i64 tcg_rt
)
1820 TCGv_i32 nzcv
= tcg_temp_new_i32();
1822 /* take NZCV from R[t] */
1823 tcg_gen_extrl_i64_i32(nzcv
, tcg_rt
);
1826 tcg_gen_andi_i32(cpu_NF
, nzcv
, (1U << 31));
1828 tcg_gen_andi_i32(cpu_ZF
, nzcv
, (1 << 30));
1829 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_ZF
, cpu_ZF
, 0);
1831 tcg_gen_andi_i32(cpu_CF
, nzcv
, (1 << 29));
1832 tcg_gen_shri_i32(cpu_CF
, cpu_CF
, 29);
1834 tcg_gen_andi_i32(cpu_VF
, nzcv
, (1 << 28));
1835 tcg_gen_shli_i32(cpu_VF
, cpu_VF
, 3);
1836 tcg_temp_free_i32(nzcv
);
1839 static void gen_sysreg_undef(DisasContext
*s
, bool isread
,
1840 uint8_t op0
, uint8_t op1
, uint8_t op2
,
1841 uint8_t crn
, uint8_t crm
, uint8_t rt
)
1844 * Generate code to emit an UNDEF with correct syndrome
1845 * information for a failed system register access.
1846 * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases,
1847 * but if FEAT_IDST is implemented then read accesses to registers
1848 * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP
1853 if (isread
&& dc_isar_feature(aa64_ids
, s
) &&
1854 arm_cpreg_encoding_in_idspace(op0
, op1
, op2
, crn
, crm
)) {
1855 syndrome
= syn_aa64_sysregtrap(op0
, op1
, op2
, crn
, crm
, rt
, isread
);
1857 syndrome
= syn_uncategorized();
1859 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
, syndrome
);
1862 /* MRS - move from system register
1863 * MSR (register) - move to system register
1866 * These are all essentially the same insn in 'read' and 'write'
1867 * versions, with varying op0 fields.
1869 static void handle_sys(DisasContext
*s
, uint32_t insn
, bool isread
,
1870 unsigned int op0
, unsigned int op1
, unsigned int op2
,
1871 unsigned int crn
, unsigned int crm
, unsigned int rt
)
1873 const ARMCPRegInfo
*ri
;
1876 ri
= get_arm_cp_reginfo(s
->cp_regs
,
1877 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP
,
1878 crn
, crm
, op0
, op1
, op2
));
1881 /* Unknown register; this might be a guest error or a QEMU
1882 * unimplemented feature.
1884 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch64 "
1885 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1886 isread
? "read" : "write", op0
, op1
, crn
, crm
, op2
);
1887 gen_sysreg_undef(s
, isread
, op0
, op1
, op2
, crn
, crm
, rt
);
1891 /* Check access permissions */
1892 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
1893 gen_sysreg_undef(s
, isread
, op0
, op1
, op2
, crn
, crm
, rt
);
1898 /* Emit code to perform further access permissions checks at
1899 * runtime; this may result in an exception.
1903 syndrome
= syn_aa64_sysregtrap(op0
, op1
, op2
, crn
, crm
, rt
, isread
);
1904 gen_a64_set_pc_im(s
->pc_curr
);
1905 gen_helper_access_check_cp_reg(cpu_env
,
1906 tcg_constant_ptr(ri
),
1907 tcg_constant_i32(syndrome
),
1908 tcg_constant_i32(isread
));
1909 } else if (ri
->type
& ARM_CP_RAISES_EXC
) {
1911 * The readfn or writefn might raise an exception;
1912 * synchronize the CPU state in case it does.
1914 gen_a64_set_pc_im(s
->pc_curr
);
1917 /* Handle special cases first */
1918 switch (ri
->type
& ARM_CP_SPECIAL_MASK
) {
1924 tcg_rt
= cpu_reg(s
, rt
);
1926 gen_get_nzcv(tcg_rt
);
1928 gen_set_nzcv(tcg_rt
);
1931 case ARM_CP_CURRENTEL
:
1932 /* Reads as current EL value from pstate, which is
1933 * guaranteed to be constant by the tb flags.
1935 tcg_rt
= cpu_reg(s
, rt
);
1936 tcg_gen_movi_i64(tcg_rt
, s
->current_el
<< 2);
1939 /* Writes clear the aligned block of memory which rt points into. */
1940 if (s
->mte_active
[0]) {
1943 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, get_mem_index(s
));
1944 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
1945 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
1947 tcg_rt
= new_tmp_a64(s
);
1948 gen_helper_mte_check_zva(tcg_rt
, cpu_env
,
1949 tcg_constant_i32(desc
), cpu_reg(s
, rt
));
1951 tcg_rt
= clean_data_tbi(s
, cpu_reg(s
, rt
));
1953 gen_helper_dc_zva(cpu_env
, tcg_rt
);
1957 TCGv_i64 clean_addr
, tag
;
1960 * DC_GVA, like DC_ZVA, requires that we supply the original
1961 * pointer for an invalid page. Probe that address first.
1963 tcg_rt
= cpu_reg(s
, rt
);
1964 clean_addr
= clean_data_tbi(s
, tcg_rt
);
1965 gen_probe_access(s
, clean_addr
, MMU_DATA_STORE
, MO_8
);
1968 /* Extract the tag from the register to match STZGM. */
1969 tag
= tcg_temp_new_i64();
1970 tcg_gen_shri_i64(tag
, tcg_rt
, 56);
1971 gen_helper_stzgm_tags(cpu_env
, clean_addr
, tag
);
1972 tcg_temp_free_i64(tag
);
1976 case ARM_CP_DC_GZVA
:
1978 TCGv_i64 clean_addr
, tag
;
1980 /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */
1981 tcg_rt
= cpu_reg(s
, rt
);
1982 clean_addr
= clean_data_tbi(s
, tcg_rt
);
1983 gen_helper_dc_zva(cpu_env
, clean_addr
);
1986 /* Extract the tag from the register to match STZGM. */
1987 tag
= tcg_temp_new_i64();
1988 tcg_gen_shri_i64(tag
, tcg_rt
, 56);
1989 gen_helper_stzgm_tags(cpu_env
, clean_addr
, tag
);
1990 tcg_temp_free_i64(tag
);
1995 g_assert_not_reached();
1997 if ((ri
->type
& ARM_CP_FPU
) && !fp_access_check(s
)) {
1999 } else if ((ri
->type
& ARM_CP_SVE
) && !sve_access_check(s
)) {
2001 } else if ((ri
->type
& ARM_CP_SME
) && !sme_access_check(s
)) {
2005 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
2009 tcg_rt
= cpu_reg(s
, rt
);
2012 if (ri
->type
& ARM_CP_CONST
) {
2013 tcg_gen_movi_i64(tcg_rt
, ri
->resetvalue
);
2014 } else if (ri
->readfn
) {
2015 gen_helper_get_cp_reg64(tcg_rt
, cpu_env
, tcg_constant_ptr(ri
));
2017 tcg_gen_ld_i64(tcg_rt
, cpu_env
, ri
->fieldoffset
);
2020 if (ri
->type
& ARM_CP_CONST
) {
2021 /* If not forbidden by access permissions, treat as WI */
2023 } else if (ri
->writefn
) {
2024 gen_helper_set_cp_reg64(cpu_env
, tcg_constant_ptr(ri
), tcg_rt
);
2026 tcg_gen_st_i64(tcg_rt
, cpu_env
, ri
->fieldoffset
);
2030 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
2031 /* I/O operations must end the TB here (whether read or write) */
2032 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
2034 if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
2036 * A write to any coprocessor regiser that ends a TB
2037 * must rebuild the hflags for the next TB.
2039 gen_rebuild_hflags(s
);
2041 * We default to ending the TB on a coprocessor register write,
2042 * but allow this to be suppressed by the register definition
2043 * (usually only necessary to work around guest bugs).
2045 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
2050 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
2051 * +---------------------+---+-----+-----+-------+-------+-----+------+
2052 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
2053 * +---------------------+---+-----+-----+-------+-------+-----+------+
2055 static void disas_system(DisasContext
*s
, uint32_t insn
)
2057 unsigned int l
, op0
, op1
, crn
, crm
, op2
, rt
;
2058 l
= extract32(insn
, 21, 1);
2059 op0
= extract32(insn
, 19, 2);
2060 op1
= extract32(insn
, 16, 3);
2061 crn
= extract32(insn
, 12, 4);
2062 crm
= extract32(insn
, 8, 4);
2063 op2
= extract32(insn
, 5, 3);
2064 rt
= extract32(insn
, 0, 5);
2067 if (l
|| rt
!= 31) {
2068 unallocated_encoding(s
);
2072 case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
2073 handle_hint(s
, insn
, op1
, op2
, crm
);
2075 case 3: /* CLREX, DSB, DMB, ISB */
2076 handle_sync(s
, insn
, op1
, op2
, crm
);
2078 case 4: /* MSR (immediate) */
2079 handle_msr_i(s
, insn
, op1
, op2
, crm
);
2082 unallocated_encoding(s
);
2087 handle_sys(s
, insn
, l
, op0
, op1
, op2
, crn
, crm
, rt
);
2090 /* Exception generation
2092 * 31 24 23 21 20 5 4 2 1 0
2093 * +-----------------+-----+------------------------+-----+----+
2094 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
2095 * +-----------------------+------------------------+----------+
2097 static void disas_exc(DisasContext
*s
, uint32_t insn
)
2099 int opc
= extract32(insn
, 21, 3);
2100 int op2_ll
= extract32(insn
, 0, 5);
2101 int imm16
= extract32(insn
, 5, 16);
2105 /* For SVC, HVC and SMC we advance the single-step state
2106 * machine before taking the exception. This is architecturally
2107 * mandated, to ensure that single-stepping a system call
2108 * instruction works properly.
2113 gen_exception_insn(s
, s
->base
.pc_next
, EXCP_SWI
,
2114 syn_aa64_svc(imm16
));
2117 if (s
->current_el
== 0) {
2118 unallocated_encoding(s
);
2121 /* The pre HVC helper handles cases when HVC gets trapped
2122 * as an undefined insn by runtime configuration.
2124 gen_a64_set_pc_im(s
->pc_curr
);
2125 gen_helper_pre_hvc(cpu_env
);
2127 gen_exception_insn_el(s
, s
->base
.pc_next
, EXCP_HVC
,
2128 syn_aa64_hvc(imm16
), 2);
2131 if (s
->current_el
== 0) {
2132 unallocated_encoding(s
);
2135 gen_a64_set_pc_im(s
->pc_curr
);
2136 gen_helper_pre_smc(cpu_env
, tcg_constant_i32(syn_aa64_smc(imm16
)));
2138 gen_exception_insn_el(s
, s
->base
.pc_next
, EXCP_SMC
,
2139 syn_aa64_smc(imm16
), 3);
2142 unallocated_encoding(s
);
2148 unallocated_encoding(s
);
2152 gen_exception_bkpt_insn(s
, syn_aa64_bkpt(imm16
));
2156 unallocated_encoding(s
);
2159 /* HLT. This has two purposes.
2160 * Architecturally, it is an external halting debug instruction.
2161 * Since QEMU doesn't implement external debug, we treat this as
2162 * it is required for halting debug disabled: it will UNDEF.
2163 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
2165 if (semihosting_enabled() && imm16
== 0xf000) {
2166 #ifndef CONFIG_USER_ONLY
2167 /* In system mode, don't allow userspace access to semihosting,
2168 * to provide some semblance of security (and for consistency
2169 * with our 32-bit semihosting).
2171 if (s
->current_el
== 0) {
2172 unallocated_encoding(s
);
2176 gen_exception_internal_insn(s
, s
->pc_curr
, EXCP_SEMIHOST
);
2178 unallocated_encoding(s
);
2182 if (op2_ll
< 1 || op2_ll
> 3) {
2183 unallocated_encoding(s
);
2186 /* DCPS1, DCPS2, DCPS3 */
2187 unallocated_encoding(s
);
2190 unallocated_encoding(s
);
2195 /* Unconditional branch (register)
2196 * 31 25 24 21 20 16 15 10 9 5 4 0
2197 * +---------------+-------+-------+-------+------+-------+
2198 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
2199 * +---------------+-------+-------+-------+------+-------+
2201 static void disas_uncond_b_reg(DisasContext
*s
, uint32_t insn
)
2203 unsigned int opc
, op2
, op3
, rn
, op4
;
2204 unsigned btype_mod
= 2; /* 0: BR, 1: BLR, 2: other */
2208 opc
= extract32(insn
, 21, 4);
2209 op2
= extract32(insn
, 16, 5);
2210 op3
= extract32(insn
, 10, 6);
2211 rn
= extract32(insn
, 5, 5);
2212 op4
= extract32(insn
, 0, 5);
2215 goto do_unallocated
;
2227 goto do_unallocated
;
2229 dst
= cpu_reg(s
, rn
);
2234 if (!dc_isar_feature(aa64_pauth
, s
)) {
2235 goto do_unallocated
;
2239 if (rn
!= 0x1f || op4
!= 0x1f) {
2240 goto do_unallocated
;
2243 modifier
= cpu_X
[31];
2245 /* BRAAZ, BRABZ, BLRAAZ, BLRABZ */
2247 goto do_unallocated
;
2249 modifier
= new_tmp_a64_zero(s
);
2251 if (s
->pauth_active
) {
2252 dst
= new_tmp_a64(s
);
2254 gen_helper_autia(dst
, cpu_env
, cpu_reg(s
, rn
), modifier
);
2256 gen_helper_autib(dst
, cpu_env
, cpu_reg(s
, rn
), modifier
);
2259 dst
= cpu_reg(s
, rn
);
2264 goto do_unallocated
;
2266 gen_a64_set_pc(s
, dst
);
2267 /* BLR also needs to load return address */
2269 tcg_gen_movi_i64(cpu_reg(s
, 30), s
->base
.pc_next
);
2275 if (!dc_isar_feature(aa64_pauth
, s
)) {
2276 goto do_unallocated
;
2278 if ((op3
& ~1) != 2) {
2279 goto do_unallocated
;
2281 btype_mod
= opc
& 1;
2282 if (s
->pauth_active
) {
2283 dst
= new_tmp_a64(s
);
2284 modifier
= cpu_reg_sp(s
, op4
);
2286 gen_helper_autia(dst
, cpu_env
, cpu_reg(s
, rn
), modifier
);
2288 gen_helper_autib(dst
, cpu_env
, cpu_reg(s
, rn
), modifier
);
2291 dst
= cpu_reg(s
, rn
);
2293 gen_a64_set_pc(s
, dst
);
2294 /* BLRAA also needs to load return address */
2296 tcg_gen_movi_i64(cpu_reg(s
, 30), s
->base
.pc_next
);
2301 if (s
->current_el
== 0) {
2302 goto do_unallocated
;
2307 goto do_unallocated
;
2309 dst
= tcg_temp_new_i64();
2310 tcg_gen_ld_i64(dst
, cpu_env
,
2311 offsetof(CPUARMState
, elr_el
[s
->current_el
]));
2314 case 2: /* ERETAA */
2315 case 3: /* ERETAB */
2316 if (!dc_isar_feature(aa64_pauth
, s
)) {
2317 goto do_unallocated
;
2319 if (rn
!= 0x1f || op4
!= 0x1f) {
2320 goto do_unallocated
;
2322 dst
= tcg_temp_new_i64();
2323 tcg_gen_ld_i64(dst
, cpu_env
,
2324 offsetof(CPUARMState
, elr_el
[s
->current_el
]));
2325 if (s
->pauth_active
) {
2326 modifier
= cpu_X
[31];
2328 gen_helper_autia(dst
, cpu_env
, dst
, modifier
);
2330 gen_helper_autib(dst
, cpu_env
, dst
, modifier
);
2336 goto do_unallocated
;
2338 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
2342 gen_helper_exception_return(cpu_env
, dst
);
2343 tcg_temp_free_i64(dst
);
2344 /* Must exit loop to check un-masked IRQs */
2345 s
->base
.is_jmp
= DISAS_EXIT
;
2349 if (op3
!= 0 || op4
!= 0 || rn
!= 0x1f) {
2350 goto do_unallocated
;
2352 unallocated_encoding(s
);
2358 unallocated_encoding(s
);
2362 switch (btype_mod
) {
2364 if (dc_isar_feature(aa64_bti
, s
)) {
2365 /* BR to {x16,x17} or !guard -> 1, else 3. */
2366 set_btype(s
, rn
== 16 || rn
== 17 || !s
->guarded_page
? 1 : 3);
2371 if (dc_isar_feature(aa64_bti
, s
)) {
2372 /* BLR sets BTYPE to 2, regardless of source guarded page. */
2377 default: /* RET or none of the above. */
2378 /* BTYPE will be set to 0 by normal end-of-insn processing. */
2382 s
->base
.is_jmp
= DISAS_JUMP
;
2385 /* Branches, exception generating and system instructions */
2386 static void disas_b_exc_sys(DisasContext
*s
, uint32_t insn
)
2388 switch (extract32(insn
, 25, 7)) {
2389 case 0x0a: case 0x0b:
2390 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
2391 disas_uncond_b_imm(s
, insn
);
2393 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
2394 disas_comp_b_imm(s
, insn
);
2396 case 0x1b: case 0x5b: /* Test & branch (immediate) */
2397 disas_test_b_imm(s
, insn
);
2399 case 0x2a: /* Conditional branch (immediate) */
2400 disas_cond_b_imm(s
, insn
);
2402 case 0x6a: /* Exception generation / System */
2403 if (insn
& (1 << 24)) {
2404 if (extract32(insn
, 22, 2) == 0) {
2405 disas_system(s
, insn
);
2407 unallocated_encoding(s
);
2413 case 0x6b: /* Unconditional branch (register) */
2414 disas_uncond_b_reg(s
, insn
);
2417 unallocated_encoding(s
);
2423 * Load/Store exclusive instructions are implemented by remembering
2424 * the value/address loaded, and seeing if these are the same
2425 * when the store is performed. This is not actually the architecturally
2426 * mandated semantics, but it works for typical guest code sequences
2427 * and avoids having to monitor regular stores.
2429 * The store exclusive uses the atomic cmpxchg primitives to avoid
2430 * races in multi-threaded linux-user and when MTTCG softmmu is
2433 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
2434 TCGv_i64 addr
, int size
, bool is_pair
)
2436 int idx
= get_mem_index(s
);
2437 MemOp memop
= s
->be_data
;
2439 g_assert(size
<= 3);
2441 g_assert(size
>= 2);
2443 /* The pair must be single-copy atomic for the doubleword. */
2444 memop
|= MO_64
| MO_ALIGN
;
2445 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, addr
, idx
, memop
);
2446 if (s
->be_data
== MO_LE
) {
2447 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 0, 32);
2448 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 32, 32);
2450 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 32, 32);
2451 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 0, 32);
2454 /* The pair must be single-copy atomic for *each* doubleword, not
2455 the entire quadword, however it must be quadword aligned. */
2457 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, addr
, idx
,
2458 memop
| MO_ALIGN_16
);
2460 TCGv_i64 addr2
= tcg_temp_new_i64();
2461 tcg_gen_addi_i64(addr2
, addr
, 8);
2462 tcg_gen_qemu_ld_i64(cpu_exclusive_high
, addr2
, idx
, memop
);
2463 tcg_temp_free_i64(addr2
);
2465 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2466 tcg_gen_mov_i64(cpu_reg(s
, rt2
), cpu_exclusive_high
);
2469 memop
|= size
| MO_ALIGN
;
2470 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, addr
, idx
, memop
);
2471 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2473 tcg_gen_mov_i64(cpu_exclusive_addr
, addr
);
2476 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
2477 TCGv_i64 addr
, int size
, int is_pair
)
2479 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2480 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
2483 * [addr + datasize] = {Rt2};
2489 * env->exclusive_addr = -1;
2491 TCGLabel
*fail_label
= gen_new_label();
2492 TCGLabel
*done_label
= gen_new_label();
2495 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_exclusive_addr
, fail_label
);
2497 tmp
= tcg_temp_new_i64();
2500 if (s
->be_data
== MO_LE
) {
2501 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2503 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt2
), cpu_reg(s
, rt
));
2505 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
,
2506 cpu_exclusive_val
, tmp
,
2508 MO_64
| MO_ALIGN
| s
->be_data
);
2509 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2510 } else if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2511 if (!HAVE_CMPXCHG128
) {
2512 gen_helper_exit_atomic(cpu_env
);
2514 * Produce a result so we have a well-formed opcode
2515 * stream when the following (dead) code uses 'tmp'.
2516 * TCG will remove the dead ops for us.
2518 tcg_gen_movi_i64(tmp
, 0);
2519 } else if (s
->be_data
== MO_LE
) {
2520 gen_helper_paired_cmpxchg64_le_parallel(tmp
, cpu_env
,
2525 gen_helper_paired_cmpxchg64_be_parallel(tmp
, cpu_env
,
2530 } else if (s
->be_data
== MO_LE
) {
2531 gen_helper_paired_cmpxchg64_le(tmp
, cpu_env
, cpu_exclusive_addr
,
2532 cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2534 gen_helper_paired_cmpxchg64_be(tmp
, cpu_env
, cpu_exclusive_addr
,
2535 cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2538 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
, cpu_exclusive_val
,
2539 cpu_reg(s
, rt
), get_mem_index(s
),
2540 size
| MO_ALIGN
| s
->be_data
);
2541 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2543 tcg_gen_mov_i64(cpu_reg(s
, rd
), tmp
);
2544 tcg_temp_free_i64(tmp
);
2545 tcg_gen_br(done_label
);
2547 gen_set_label(fail_label
);
2548 tcg_gen_movi_i64(cpu_reg(s
, rd
), 1);
2549 gen_set_label(done_label
);
2550 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
2553 static void gen_compare_and_swap(DisasContext
*s
, int rs
, int rt
,
2556 TCGv_i64 tcg_rs
= cpu_reg(s
, rs
);
2557 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2558 int memidx
= get_mem_index(s
);
2559 TCGv_i64 clean_addr
;
2562 gen_check_sp_alignment(s
);
2564 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
), true, rn
!= 31, size
);
2565 tcg_gen_atomic_cmpxchg_i64(tcg_rs
, clean_addr
, tcg_rs
, tcg_rt
, memidx
,
2566 size
| MO_ALIGN
| s
->be_data
);
2569 static void gen_compare_and_swap_pair(DisasContext
*s
, int rs
, int rt
,
2572 TCGv_i64 s1
= cpu_reg(s
, rs
);
2573 TCGv_i64 s2
= cpu_reg(s
, rs
+ 1);
2574 TCGv_i64 t1
= cpu_reg(s
, rt
);
2575 TCGv_i64 t2
= cpu_reg(s
, rt
+ 1);
2576 TCGv_i64 clean_addr
;
2577 int memidx
= get_mem_index(s
);
2580 gen_check_sp_alignment(s
);
2583 /* This is a single atomic access, despite the "pair". */
2584 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
), true, rn
!= 31, size
+ 1);
2587 TCGv_i64 cmp
= tcg_temp_new_i64();
2588 TCGv_i64 val
= tcg_temp_new_i64();
2590 if (s
->be_data
== MO_LE
) {
2591 tcg_gen_concat32_i64(val
, t1
, t2
);
2592 tcg_gen_concat32_i64(cmp
, s1
, s2
);
2594 tcg_gen_concat32_i64(val
, t2
, t1
);
2595 tcg_gen_concat32_i64(cmp
, s2
, s1
);
2598 tcg_gen_atomic_cmpxchg_i64(cmp
, clean_addr
, cmp
, val
, memidx
,
2599 MO_64
| MO_ALIGN
| s
->be_data
);
2600 tcg_temp_free_i64(val
);
2602 if (s
->be_data
== MO_LE
) {
2603 tcg_gen_extr32_i64(s1
, s2
, cmp
);
2605 tcg_gen_extr32_i64(s2
, s1
, cmp
);
2607 tcg_temp_free_i64(cmp
);
2608 } else if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2609 if (HAVE_CMPXCHG128
) {
2610 TCGv_i32 tcg_rs
= tcg_constant_i32(rs
);
2611 if (s
->be_data
== MO_LE
) {
2612 gen_helper_casp_le_parallel(cpu_env
, tcg_rs
,
2613 clean_addr
, t1
, t2
);
2615 gen_helper_casp_be_parallel(cpu_env
, tcg_rs
,
2616 clean_addr
, t1
, t2
);
2619 gen_helper_exit_atomic(cpu_env
);
2620 s
->base
.is_jmp
= DISAS_NORETURN
;
2623 TCGv_i64 d1
= tcg_temp_new_i64();
2624 TCGv_i64 d2
= tcg_temp_new_i64();
2625 TCGv_i64 a2
= tcg_temp_new_i64();
2626 TCGv_i64 c1
= tcg_temp_new_i64();
2627 TCGv_i64 c2
= tcg_temp_new_i64();
2628 TCGv_i64 zero
= tcg_constant_i64(0);
2630 /* Load the two words, in memory order. */
2631 tcg_gen_qemu_ld_i64(d1
, clean_addr
, memidx
,
2632 MO_64
| MO_ALIGN_16
| s
->be_data
);
2633 tcg_gen_addi_i64(a2
, clean_addr
, 8);
2634 tcg_gen_qemu_ld_i64(d2
, a2
, memidx
, MO_64
| s
->be_data
);
2636 /* Compare the two words, also in memory order. */
2637 tcg_gen_setcond_i64(TCG_COND_EQ
, c1
, d1
, s1
);
2638 tcg_gen_setcond_i64(TCG_COND_EQ
, c2
, d2
, s2
);
2639 tcg_gen_and_i64(c2
, c2
, c1
);
2641 /* If compare equal, write back new data, else write back old data. */
2642 tcg_gen_movcond_i64(TCG_COND_NE
, c1
, c2
, zero
, t1
, d1
);
2643 tcg_gen_movcond_i64(TCG_COND_NE
, c2
, c2
, zero
, t2
, d2
);
2644 tcg_gen_qemu_st_i64(c1
, clean_addr
, memidx
, MO_64
| s
->be_data
);
2645 tcg_gen_qemu_st_i64(c2
, a2
, memidx
, MO_64
| s
->be_data
);
2646 tcg_temp_free_i64(a2
);
2647 tcg_temp_free_i64(c1
);
2648 tcg_temp_free_i64(c2
);
2650 /* Write back the data from memory to Rs. */
2651 tcg_gen_mov_i64(s1
, d1
);
2652 tcg_gen_mov_i64(s2
, d2
);
2653 tcg_temp_free_i64(d1
);
2654 tcg_temp_free_i64(d2
);
2658 /* Update the Sixty-Four bit (SF) registersize. This logic is derived
2659 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2661 static bool disas_ldst_compute_iss_sf(int size
, bool is_signed
, int opc
)
2663 int opc0
= extract32(opc
, 0, 1);
2667 regsize
= opc0
? 32 : 64;
2669 regsize
= size
== 3 ? 64 : 32;
2671 return regsize
== 64;
2674 /* Load/store exclusive
2676 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
2677 * +-----+-------------+----+---+----+------+----+-------+------+------+
2678 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
2679 * +-----+-------------+----+---+----+------+----+-------+------+------+
2681 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2682 * L: 0 -> store, 1 -> load
2683 * o2: 0 -> exclusive, 1 -> not
2684 * o1: 0 -> single register, 1 -> register pair
2685 * o0: 1 -> load-acquire/store-release, 0 -> not
2687 static void disas_ldst_excl(DisasContext
*s
, uint32_t insn
)
2689 int rt
= extract32(insn
, 0, 5);
2690 int rn
= extract32(insn
, 5, 5);
2691 int rt2
= extract32(insn
, 10, 5);
2692 int rs
= extract32(insn
, 16, 5);
2693 int is_lasr
= extract32(insn
, 15, 1);
2694 int o2_L_o1_o0
= extract32(insn
, 21, 3) * 2 | is_lasr
;
2695 int size
= extract32(insn
, 30, 2);
2696 TCGv_i64 clean_addr
;
2698 switch (o2_L_o1_o0
) {
2699 case 0x0: /* STXR */
2700 case 0x1: /* STLXR */
2702 gen_check_sp_alignment(s
);
2705 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2707 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
),
2708 true, rn
!= 31, size
);
2709 gen_store_exclusive(s
, rs
, rt
, rt2
, clean_addr
, size
, false);
2712 case 0x4: /* LDXR */
2713 case 0x5: /* LDAXR */
2715 gen_check_sp_alignment(s
);
2717 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
),
2718 false, rn
!= 31, size
);
2720 gen_load_exclusive(s
, rt
, rt2
, clean_addr
, size
, false);
2722 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2726 case 0x8: /* STLLR */
2727 if (!dc_isar_feature(aa64_lor
, s
)) {
2730 /* StoreLORelease is the same as Store-Release for QEMU. */
2732 case 0x9: /* STLR */
2733 /* Generate ISS for non-exclusive accesses including LASR. */
2735 gen_check_sp_alignment(s
);
2737 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2738 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
),
2739 true, rn
!= 31, size
);
2740 /* TODO: ARMv8.4-LSE SCTLR.nAA */
2741 do_gpr_st(s
, cpu_reg(s
, rt
), clean_addr
, size
| MO_ALIGN
, true, rt
,
2742 disas_ldst_compute_iss_sf(size
, false, 0), is_lasr
);
2745 case 0xc: /* LDLAR */
2746 if (!dc_isar_feature(aa64_lor
, s
)) {
2749 /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
2751 case 0xd: /* LDAR */
2752 /* Generate ISS for non-exclusive accesses including LASR. */
2754 gen_check_sp_alignment(s
);
2756 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
),
2757 false, rn
!= 31, size
);
2758 /* TODO: ARMv8.4-LSE SCTLR.nAA */
2759 do_gpr_ld(s
, cpu_reg(s
, rt
), clean_addr
, size
| MO_ALIGN
, false, true,
2760 rt
, disas_ldst_compute_iss_sf(size
, false, 0), is_lasr
);
2761 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2764 case 0x2: case 0x3: /* CASP / STXP */
2765 if (size
& 2) { /* STXP / STLXP */
2767 gen_check_sp_alignment(s
);
2770 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2772 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
),
2773 true, rn
!= 31, size
);
2774 gen_store_exclusive(s
, rs
, rt
, rt2
, clean_addr
, size
, true);
2778 && ((rt
| rs
) & 1) == 0
2779 && dc_isar_feature(aa64_atomics
, s
)) {
2781 gen_compare_and_swap_pair(s
, rs
, rt
, rn
, size
| 2);
2786 case 0x6: case 0x7: /* CASPA / LDXP */
2787 if (size
& 2) { /* LDXP / LDAXP */
2789 gen_check_sp_alignment(s
);
2791 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
),
2792 false, rn
!= 31, size
);
2794 gen_load_exclusive(s
, rt
, rt2
, clean_addr
, size
, true);
2796 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2801 && ((rt
| rs
) & 1) == 0
2802 && dc_isar_feature(aa64_atomics
, s
)) {
2803 /* CASPA / CASPAL */
2804 gen_compare_and_swap_pair(s
, rs
, rt
, rn
, size
| 2);
2810 case 0xb: /* CASL */
2811 case 0xe: /* CASA */
2812 case 0xf: /* CASAL */
2813 if (rt2
== 31 && dc_isar_feature(aa64_atomics
, s
)) {
2814 gen_compare_and_swap(s
, rs
, rt
, rn
, size
);
2819 unallocated_encoding(s
);
2823 * Load register (literal)
2825 * 31 30 29 27 26 25 24 23 5 4 0
2826 * +-----+-------+---+-----+-------------------+-------+
2827 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
2828 * +-----+-------+---+-----+-------------------+-------+
2830 * V: 1 -> vector (simd/fp)
2831 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2832 * 10-> 32 bit signed, 11 -> prefetch
2833 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2835 static void disas_ld_lit(DisasContext
*s
, uint32_t insn
)
2837 int rt
= extract32(insn
, 0, 5);
2838 int64_t imm
= sextract32(insn
, 5, 19) << 2;
2839 bool is_vector
= extract32(insn
, 26, 1);
2840 int opc
= extract32(insn
, 30, 2);
2841 bool is_signed
= false;
2843 TCGv_i64 tcg_rt
, clean_addr
;
2847 unallocated_encoding(s
);
2851 if (!fp_access_check(s
)) {
2856 /* PRFM (literal) : prefetch */
2859 size
= 2 + extract32(opc
, 0, 1);
2860 is_signed
= extract32(opc
, 1, 1);
2863 tcg_rt
= cpu_reg(s
, rt
);
2865 clean_addr
= tcg_constant_i64(s
->pc_curr
+ imm
);
2867 do_fp_ld(s
, rt
, clean_addr
, size
);
2869 /* Only unsigned 32bit loads target 32bit registers. */
2870 bool iss_sf
= opc
!= 0;
2872 do_gpr_ld(s
, tcg_rt
, clean_addr
, size
+ is_signed
* MO_SIGN
,
2873 false, true, rt
, iss_sf
, false);
2878 * LDNP (Load Pair - non-temporal hint)
2879 * LDP (Load Pair - non vector)
2880 * LDPSW (Load Pair Signed Word - non vector)
2881 * STNP (Store Pair - non-temporal hint)
2882 * STP (Store Pair - non vector)
2883 * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2884 * LDP (Load Pair of SIMD&FP)
2885 * STNP (Store Pair of SIMD&FP - non-temporal hint)
2886 * STP (Store Pair of SIMD&FP)
2888 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
2889 * +-----+-------+---+---+-------+---+-----------------------------+
2890 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
2891 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2893 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
2895 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2896 * V: 0 -> GPR, 1 -> Vector
2897 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2898 * 10 -> signed offset, 11 -> pre-index
2899 * L: 0 -> Store 1 -> Load
2901 * Rt, Rt2 = GPR or SIMD registers to be stored
2902 * Rn = general purpose register containing address
2903 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2905 static void disas_ldst_pair(DisasContext
*s
, uint32_t insn
)
2907 int rt
= extract32(insn
, 0, 5);
2908 int rn
= extract32(insn
, 5, 5);
2909 int rt2
= extract32(insn
, 10, 5);
2910 uint64_t offset
= sextract64(insn
, 15, 7);
2911 int index
= extract32(insn
, 23, 2);
2912 bool is_vector
= extract32(insn
, 26, 1);
2913 bool is_load
= extract32(insn
, 22, 1);
2914 int opc
= extract32(insn
, 30, 2);
2916 bool is_signed
= false;
2917 bool postindex
= false;
2919 bool set_tag
= false;
2921 TCGv_i64 clean_addr
, dirty_addr
;
2926 unallocated_encoding(s
);
2932 } else if (opc
== 1 && !is_load
) {
2934 if (!dc_isar_feature(aa64_mte_insn_reg
, s
) || index
== 0) {
2935 unallocated_encoding(s
);
2941 size
= 2 + extract32(opc
, 1, 1);
2942 is_signed
= extract32(opc
, 0, 1);
2943 if (!is_load
&& is_signed
) {
2944 unallocated_encoding(s
);
2950 case 1: /* post-index */
2955 /* signed offset with "non-temporal" hint. Since we don't emulate
2956 * caches we don't care about hints to the cache system about
2957 * data access patterns, and handle this identically to plain
2961 /* There is no non-temporal-hint version of LDPSW */
2962 unallocated_encoding(s
);
2967 case 2: /* signed offset, rn not updated */
2970 case 3: /* pre-index */
2976 if (is_vector
&& !fp_access_check(s
)) {
2980 offset
<<= (set_tag
? LOG2_TAG_GRANULE
: size
);
2983 gen_check_sp_alignment(s
);
2986 dirty_addr
= read_cpu_reg_sp(s
, rn
, 1);
2988 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
2994 * TODO: We could rely on the stores below, at least for
2995 * system mode, if we arrange to add MO_ALIGN_16.
2997 gen_helper_stg_stub(cpu_env
, dirty_addr
);
2998 } else if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
2999 gen_helper_stg_parallel(cpu_env
, dirty_addr
, dirty_addr
);
3001 gen_helper_stg(cpu_env
, dirty_addr
, dirty_addr
);
3005 clean_addr
= gen_mte_checkN(s
, dirty_addr
, !is_load
,
3006 (wback
|| rn
!= 31) && !set_tag
, 2 << size
);
3010 do_fp_ld(s
, rt
, clean_addr
, size
);
3012 do_fp_st(s
, rt
, clean_addr
, size
);
3014 tcg_gen_addi_i64(clean_addr
, clean_addr
, 1 << size
);
3016 do_fp_ld(s
, rt2
, clean_addr
, size
);
3018 do_fp_st(s
, rt2
, clean_addr
, size
);
3021 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
3022 TCGv_i64 tcg_rt2
= cpu_reg(s
, rt2
);
3025 TCGv_i64 tmp
= tcg_temp_new_i64();
3027 /* Do not modify tcg_rt before recognizing any exception
3028 * from the second load.
3030 do_gpr_ld(s
, tmp
, clean_addr
, size
+ is_signed
* MO_SIGN
,
3031 false, false, 0, false, false);
3032 tcg_gen_addi_i64(clean_addr
, clean_addr
, 1 << size
);
3033 do_gpr_ld(s
, tcg_rt2
, clean_addr
, size
+ is_signed
* MO_SIGN
,
3034 false, false, 0, false, false);
3036 tcg_gen_mov_i64(tcg_rt
, tmp
);
3037 tcg_temp_free_i64(tmp
);
3039 do_gpr_st(s
, tcg_rt
, clean_addr
, size
,
3040 false, 0, false, false);
3041 tcg_gen_addi_i64(clean_addr
, clean_addr
, 1 << size
);
3042 do_gpr_st(s
, tcg_rt2
, clean_addr
, size
,
3043 false, 0, false, false);
3049 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3051 tcg_gen_mov_i64(cpu_reg_sp(s
, rn
), dirty_addr
);
3056 * Load/store (immediate post-indexed)
3057 * Load/store (immediate pre-indexed)
3058 * Load/store (unscaled immediate)
3060 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
3061 * +----+-------+---+-----+-----+---+--------+-----+------+------+
3062 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
3063 * +----+-------+---+-----+-----+---+--------+-----+------+------+
3065 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
3067 * V = 0 -> non-vector
3068 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
3069 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3071 static void disas_ldst_reg_imm9(DisasContext
*s
, uint32_t insn
,
3077 int rn
= extract32(insn
, 5, 5);
3078 int imm9
= sextract32(insn
, 12, 9);
3079 int idx
= extract32(insn
, 10, 2);
3080 bool is_signed
= false;
3081 bool is_store
= false;
3082 bool is_extended
= false;
3083 bool is_unpriv
= (idx
== 2);
3084 bool iss_valid
= !is_vector
;
3089 TCGv_i64 clean_addr
, dirty_addr
;
3092 size
|= (opc
& 2) << 1;
3093 if (size
> 4 || is_unpriv
) {
3094 unallocated_encoding(s
);
3097 is_store
= ((opc
& 1) == 0);
3098 if (!fp_access_check(s
)) {
3102 if (size
== 3 && opc
== 2) {
3103 /* PRFM - prefetch */
3105 unallocated_encoding(s
);
3110 if (opc
== 3 && size
> 1) {
3111 unallocated_encoding(s
);
3114 is_store
= (opc
== 0);
3115 is_signed
= extract32(opc
, 1, 1);
3116 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
3134 g_assert_not_reached();
3138 gen_check_sp_alignment(s
);
3141 dirty_addr
= read_cpu_reg_sp(s
, rn
, 1);
3143 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, imm9
);
3146 memidx
= is_unpriv
? get_a64_user_mem_index(s
) : get_mem_index(s
);
3147 clean_addr
= gen_mte_check1_mmuidx(s
, dirty_addr
, is_store
,
3148 writeback
|| rn
!= 31,
3149 size
, is_unpriv
, memidx
);
3153 do_fp_st(s
, rt
, clean_addr
, size
);
3155 do_fp_ld(s
, rt
, clean_addr
, size
);
3158 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
3159 bool iss_sf
= disas_ldst_compute_iss_sf(size
, is_signed
, opc
);
3162 do_gpr_st_memidx(s
, tcg_rt
, clean_addr
, size
, memidx
,
3163 iss_valid
, rt
, iss_sf
, false);
3165 do_gpr_ld_memidx(s
, tcg_rt
, clean_addr
, size
+ is_signed
* MO_SIGN
,
3166 is_extended
, memidx
,
3167 iss_valid
, rt
, iss_sf
, false);
3172 TCGv_i64 tcg_rn
= cpu_reg_sp(s
, rn
);
3174 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, imm9
);
3176 tcg_gen_mov_i64(tcg_rn
, dirty_addr
);
3181 * Load/store (register offset)
3183 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3184 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
3185 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
3186 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
3189 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3190 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3192 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3193 * opc<0>: 0 -> store, 1 -> load
3194 * V: 1 -> vector/simd
3195 * opt: extend encoding (see DecodeRegExtend)
3196 * S: if S=1 then scale (essentially index by sizeof(size))
3197 * Rt: register to transfer into/out of
3198 * Rn: address register or SP for base
3199 * Rm: offset register or ZR for offset
3201 static void disas_ldst_reg_roffset(DisasContext
*s
, uint32_t insn
,
3207 int rn
= extract32(insn
, 5, 5);
3208 int shift
= extract32(insn
, 12, 1);
3209 int rm
= extract32(insn
, 16, 5);
3210 int opt
= extract32(insn
, 13, 3);
3211 bool is_signed
= false;
3212 bool is_store
= false;
3213 bool is_extended
= false;
3215 TCGv_i64 tcg_rm
, clean_addr
, dirty_addr
;
3217 if (extract32(opt
, 1, 1) == 0) {
3218 unallocated_encoding(s
);
3223 size
|= (opc
& 2) << 1;
3225 unallocated_encoding(s
);
3228 is_store
= !extract32(opc
, 0, 1);
3229 if (!fp_access_check(s
)) {
3233 if (size
== 3 && opc
== 2) {
3234 /* PRFM - prefetch */
3237 if (opc
== 3 && size
> 1) {
3238 unallocated_encoding(s
);
3241 is_store
= (opc
== 0);
3242 is_signed
= extract32(opc
, 1, 1);
3243 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
3247 gen_check_sp_alignment(s
);
3249 dirty_addr
= read_cpu_reg_sp(s
, rn
, 1);
3251 tcg_rm
= read_cpu_reg(s
, rm
, 1);
3252 ext_and_shift_reg(tcg_rm
, tcg_rm
, opt
, shift
? size
: 0);
3254 tcg_gen_add_i64(dirty_addr
, dirty_addr
, tcg_rm
);
3255 clean_addr
= gen_mte_check1(s
, dirty_addr
, is_store
, true, size
);
3259 do_fp_st(s
, rt
, clean_addr
, size
);
3261 do_fp_ld(s
, rt
, clean_addr
, size
);
3264 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
3265 bool iss_sf
= disas_ldst_compute_iss_sf(size
, is_signed
, opc
);
3267 do_gpr_st(s
, tcg_rt
, clean_addr
, size
,
3268 true, rt
, iss_sf
, false);
3270 do_gpr_ld(s
, tcg_rt
, clean_addr
, size
+ is_signed
* MO_SIGN
,
3271 is_extended
, true, rt
, iss_sf
, false);
3277 * Load/store (unsigned immediate)
3279 * 31 30 29 27 26 25 24 23 22 21 10 9 5
3280 * +----+-------+---+-----+-----+------------+-------+------+
3281 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
3282 * +----+-------+---+-----+-----+------------+-------+------+
3285 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3286 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3288 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3289 * opc<0>: 0 -> store, 1 -> load
3290 * Rn: base address register (inc SP)
3291 * Rt: target register
3293 static void disas_ldst_reg_unsigned_imm(DisasContext
*s
, uint32_t insn
,
3299 int rn
= extract32(insn
, 5, 5);
3300 unsigned int imm12
= extract32(insn
, 10, 12);
3301 unsigned int offset
;
3303 TCGv_i64 clean_addr
, dirty_addr
;
3306 bool is_signed
= false;
3307 bool is_extended
= false;
3310 size
|= (opc
& 2) << 1;
3312 unallocated_encoding(s
);
3315 is_store
= !extract32(opc
, 0, 1);
3316 if (!fp_access_check(s
)) {
3320 if (size
== 3 && opc
== 2) {
3321 /* PRFM - prefetch */
3324 if (opc
== 3 && size
> 1) {
3325 unallocated_encoding(s
);
3328 is_store
= (opc
== 0);
3329 is_signed
= extract32(opc
, 1, 1);
3330 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
3334 gen_check_sp_alignment(s
);
3336 dirty_addr
= read_cpu_reg_sp(s
, rn
, 1);
3337 offset
= imm12
<< size
;
3338 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3339 clean_addr
= gen_mte_check1(s
, dirty_addr
, is_store
, rn
!= 31, size
);
3343 do_fp_st(s
, rt
, clean_addr
, size
);
3345 do_fp_ld(s
, rt
, clean_addr
, size
);
3348 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
3349 bool iss_sf
= disas_ldst_compute_iss_sf(size
, is_signed
, opc
);
3351 do_gpr_st(s
, tcg_rt
, clean_addr
, size
,
3352 true, rt
, iss_sf
, false);
3354 do_gpr_ld(s
, tcg_rt
, clean_addr
, size
+ is_signed
* MO_SIGN
,
3355 is_extended
, true, rt
, iss_sf
, false);
3360 /* Atomic memory operations
3362 * 31 30 27 26 24 22 21 16 15 12 10 5 0
3363 * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
3364 * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt |
3365 * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
3367 * Rt: the result register
3368 * Rn: base address or SP
3369 * Rs: the source register for the operation
3370 * V: vector flag (always 0 as of v8.3)
3374 static void disas_ldst_atomic(DisasContext
*s
, uint32_t insn
,
3375 int size
, int rt
, bool is_vector
)
3377 int rs
= extract32(insn
, 16, 5);
3378 int rn
= extract32(insn
, 5, 5);
3379 int o3_opc
= extract32(insn
, 12, 4);
3380 bool r
= extract32(insn
, 22, 1);
3381 bool a
= extract32(insn
, 23, 1);
3382 TCGv_i64 tcg_rs
, tcg_rt
, clean_addr
;
3383 AtomicThreeOpFn
*fn
= NULL
;
3384 MemOp mop
= s
->be_data
| size
| MO_ALIGN
;
3386 if (is_vector
|| !dc_isar_feature(aa64_atomics
, s
)) {
3387 unallocated_encoding(s
);
3391 case 000: /* LDADD */
3392 fn
= tcg_gen_atomic_fetch_add_i64
;
3394 case 001: /* LDCLR */
3395 fn
= tcg_gen_atomic_fetch_and_i64
;
3397 case 002: /* LDEOR */
3398 fn
= tcg_gen_atomic_fetch_xor_i64
;
3400 case 003: /* LDSET */
3401 fn
= tcg_gen_atomic_fetch_or_i64
;
3403 case 004: /* LDSMAX */
3404 fn
= tcg_gen_atomic_fetch_smax_i64
;
3407 case 005: /* LDSMIN */
3408 fn
= tcg_gen_atomic_fetch_smin_i64
;
3411 case 006: /* LDUMAX */
3412 fn
= tcg_gen_atomic_fetch_umax_i64
;
3414 case 007: /* LDUMIN */
3415 fn
= tcg_gen_atomic_fetch_umin_i64
;
3418 fn
= tcg_gen_atomic_xchg_i64
;
3420 case 014: /* LDAPR, LDAPRH, LDAPRB */
3421 if (!dc_isar_feature(aa64_rcpc_8_3
, s
) ||
3422 rs
!= 31 || a
!= 1 || r
!= 0) {
3423 unallocated_encoding(s
);
3428 unallocated_encoding(s
);
3433 gen_check_sp_alignment(s
);
3435 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
), false, rn
!= 31, size
);
3437 if (o3_opc
== 014) {
3439 * LDAPR* are a special case because they are a simple load, not a
3440 * fetch-and-do-something op.
3441 * The architectural consistency requirements here are weaker than
3442 * full load-acquire (we only need "load-acquire processor consistent"),
3443 * but we choose to implement them as full LDAQ.
3445 do_gpr_ld(s
, cpu_reg(s
, rt
), clean_addr
, size
, false,
3446 true, rt
, disas_ldst_compute_iss_sf(size
, false, 0), true);
3447 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
3451 tcg_rs
= read_cpu_reg(s
, rs
, true);
3452 tcg_rt
= cpu_reg(s
, rt
);
3454 if (o3_opc
== 1) { /* LDCLR */
3455 tcg_gen_not_i64(tcg_rs
, tcg_rs
);
3458 /* The tcg atomic primitives are all full barriers. Therefore we
3459 * can ignore the Acquire and Release bits of this instruction.
3461 fn(tcg_rt
, clean_addr
, tcg_rs
, get_mem_index(s
), mop
);
3463 if ((mop
& MO_SIGN
) && size
!= MO_64
) {
3464 tcg_gen_ext32u_i64(tcg_rt
, tcg_rt
);
3469 * PAC memory operations
3471 * 31 30 27 26 24 22 21 12 11 10 5 0
3472 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3473 * | size | 1 1 1 | V | 0 0 | M S | 1 | imm9 | W | 1 | Rn | Rt |
3474 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3476 * Rt: the result register
3477 * Rn: base address or SP
3478 * V: vector flag (always 0 as of v8.3)
3479 * M: clear for key DA, set for key DB
3480 * W: pre-indexing flag
3483 static void disas_ldst_pac(DisasContext
*s
, uint32_t insn
,
3484 int size
, int rt
, bool is_vector
)
3486 int rn
= extract32(insn
, 5, 5);
3487 bool is_wback
= extract32(insn
, 11, 1);
3488 bool use_key_a
= !extract32(insn
, 23, 1);
3490 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3492 if (size
!= 3 || is_vector
|| !dc_isar_feature(aa64_pauth
, s
)) {
3493 unallocated_encoding(s
);
3498 gen_check_sp_alignment(s
);
3500 dirty_addr
= read_cpu_reg_sp(s
, rn
, 1);
3502 if (s
->pauth_active
) {
3504 gen_helper_autda(dirty_addr
, cpu_env
, dirty_addr
,
3505 new_tmp_a64_zero(s
));
3507 gen_helper_autdb(dirty_addr
, cpu_env
, dirty_addr
,
3508 new_tmp_a64_zero(s
));
3512 /* Form the 10-bit signed, scaled offset. */
3513 offset
= (extract32(insn
, 22, 1) << 9) | extract32(insn
, 12, 9);
3514 offset
= sextract32(offset
<< size
, 0, 10 + size
);
3515 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3517 /* Note that "clean" and "dirty" here refer to TBI not PAC. */
3518 clean_addr
= gen_mte_check1(s
, dirty_addr
, false,
3519 is_wback
|| rn
!= 31, size
);
3521 tcg_rt
= cpu_reg(s
, rt
);
3522 do_gpr_ld(s
, tcg_rt
, clean_addr
, size
,
3523 /* extend */ false, /* iss_valid */ !is_wback
,
3524 /* iss_srt */ rt
, /* iss_sf */ true, /* iss_ar */ false);
3527 tcg_gen_mov_i64(cpu_reg_sp(s
, rn
), dirty_addr
);
3532 * LDAPR/STLR (unscaled immediate)
3534 * 31 30 24 22 21 12 10 5 0
3535 * +------+-------------+-----+---+--------+-----+----+-----+
3536 * | size | 0 1 1 0 0 1 | opc | 0 | imm9 | 0 0 | Rn | Rt |
3537 * +------+-------------+-----+---+--------+-----+----+-----+
3539 * Rt: source or destination register
3541 * imm9: unscaled immediate offset
3542 * opc: 00: STLUR*, 01/10/11: various LDAPUR*
3543 * size: size of load/store
3545 static void disas_ldst_ldapr_stlr(DisasContext
*s
, uint32_t insn
)
3547 int rt
= extract32(insn
, 0, 5);
3548 int rn
= extract32(insn
, 5, 5);
3549 int offset
= sextract32(insn
, 12, 9);
3550 int opc
= extract32(insn
, 22, 2);
3551 int size
= extract32(insn
, 30, 2);
3552 TCGv_i64 clean_addr
, dirty_addr
;
3553 bool is_store
= false;
3554 bool extend
= false;
3558 if (!dc_isar_feature(aa64_rcpc_8_4
, s
)) {
3559 unallocated_encoding(s
);
3563 /* TODO: ARMv8.4-LSE SCTLR.nAA */
3564 mop
= size
| MO_ALIGN
;
3567 case 0: /* STLURB */
3570 case 1: /* LDAPUR* */
3572 case 2: /* LDAPURS* 64-bit variant */
3574 unallocated_encoding(s
);
3579 case 3: /* LDAPURS* 32-bit variant */
3581 unallocated_encoding(s
);
3585 extend
= true; /* zero-extend 32->64 after signed load */
3588 g_assert_not_reached();
3591 iss_sf
= disas_ldst_compute_iss_sf(size
, (mop
& MO_SIGN
) != 0, opc
);
3594 gen_check_sp_alignment(s
);
3597 dirty_addr
= read_cpu_reg_sp(s
, rn
, 1);
3598 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3599 clean_addr
= clean_data_tbi(s
, dirty_addr
);
3602 /* Store-Release semantics */
3603 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
3604 do_gpr_st(s
, cpu_reg(s
, rt
), clean_addr
, mop
, true, rt
, iss_sf
, true);
3607 * Load-AcquirePC semantics; we implement as the slightly more
3608 * restrictive Load-Acquire.
3610 do_gpr_ld(s
, cpu_reg(s
, rt
), clean_addr
, mop
,
3611 extend
, true, rt
, iss_sf
, true);
3612 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
3616 /* Load/store register (all forms) */
3617 static void disas_ldst_reg(DisasContext
*s
, uint32_t insn
)
3619 int rt
= extract32(insn
, 0, 5);
3620 int opc
= extract32(insn
, 22, 2);
3621 bool is_vector
= extract32(insn
, 26, 1);
3622 int size
= extract32(insn
, 30, 2);
3624 switch (extract32(insn
, 24, 2)) {
3626 if (extract32(insn
, 21, 1) == 0) {
3627 /* Load/store register (unscaled immediate)
3628 * Load/store immediate pre/post-indexed
3629 * Load/store register unprivileged
3631 disas_ldst_reg_imm9(s
, insn
, opc
, size
, rt
, is_vector
);
3634 switch (extract32(insn
, 10, 2)) {
3636 disas_ldst_atomic(s
, insn
, size
, rt
, is_vector
);
3639 disas_ldst_reg_roffset(s
, insn
, opc
, size
, rt
, is_vector
);
3642 disas_ldst_pac(s
, insn
, size
, rt
, is_vector
);
3647 disas_ldst_reg_unsigned_imm(s
, insn
, opc
, size
, rt
, is_vector
);
3650 unallocated_encoding(s
);
3653 /* AdvSIMD load/store multiple structures
3655 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
3656 * +---+---+---------------+---+-------------+--------+------+------+------+
3657 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
3658 * +---+---+---------------+---+-------------+--------+------+------+------+
3660 * AdvSIMD load/store multiple structures (post-indexed)
3662 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
3663 * +---+---+---------------+---+---+---------+--------+------+------+------+
3664 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
3665 * +---+---+---------------+---+---+---------+--------+------+------+------+
3667 * Rt: first (or only) SIMD&FP register to be transferred
3668 * Rn: base address or SP
3669 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3671 static void disas_ldst_multiple_struct(DisasContext
*s
, uint32_t insn
)
3673 int rt
= extract32(insn
, 0, 5);
3674 int rn
= extract32(insn
, 5, 5);
3675 int rm
= extract32(insn
, 16, 5);
3676 int size
= extract32(insn
, 10, 2);
3677 int opcode
= extract32(insn
, 12, 4);
3678 bool is_store
= !extract32(insn
, 22, 1);
3679 bool is_postidx
= extract32(insn
, 23, 1);
3680 bool is_q
= extract32(insn
, 30, 1);
3681 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3682 MemOp endian
, align
, mop
;
3684 int total
; /* total bytes */
3685 int elements
; /* elements per vector */
3686 int rpt
; /* num iterations */
3687 int selem
; /* structure elements */
3690 if (extract32(insn
, 31, 1) || extract32(insn
, 21, 1)) {
3691 unallocated_encoding(s
);
3695 if (!is_postidx
&& rm
!= 0) {
3696 unallocated_encoding(s
);
3700 /* From the shared decode logic */
3731 unallocated_encoding(s
);
3735 if (size
== 3 && !is_q
&& selem
!= 1) {
3737 unallocated_encoding(s
);
3741 if (!fp_access_check(s
)) {
3746 gen_check_sp_alignment(s
);
3749 /* For our purposes, bytes are always little-endian. */
3750 endian
= s
->be_data
;
3755 total
= rpt
* selem
* (is_q
? 16 : 8);
3756 tcg_rn
= cpu_reg_sp(s
, rn
);
3759 * Issue the MTE check vs the logical repeat count, before we
3760 * promote consecutive little-endian elements below.
3762 clean_addr
= gen_mte_checkN(s
, tcg_rn
, is_store
, is_postidx
|| rn
!= 31,
3766 * Consecutive little-endian elements from a single register
3767 * can be promoted to a larger little-endian operation.
3770 if (selem
== 1 && endian
== MO_LE
) {
3771 align
= pow2_align(size
);
3774 if (!s
->align_mem
) {
3777 mop
= endian
| size
| align
;
3779 elements
= (is_q
? 16 : 8) >> size
;
3780 tcg_ebytes
= tcg_constant_i64(1 << size
);
3781 for (r
= 0; r
< rpt
; r
++) {
3783 for (e
= 0; e
< elements
; e
++) {
3785 for (xs
= 0; xs
< selem
; xs
++) {
3786 int tt
= (rt
+ r
+ xs
) % 32;
3788 do_vec_st(s
, tt
, e
, clean_addr
, mop
);
3790 do_vec_ld(s
, tt
, e
, clean_addr
, mop
);
3792 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3798 /* For non-quad operations, setting a slice of the low
3799 * 64 bits of the register clears the high 64 bits (in
3800 * the ARM ARM pseudocode this is implicit in the fact
3801 * that 'rval' is a 64 bit wide variable).
3802 * For quad operations, we might still need to zero the
3805 for (r
= 0; r
< rpt
* selem
; r
++) {
3806 int tt
= (rt
+ r
) % 32;
3807 clear_vec_high(s
, is_q
, tt
);
3813 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3815 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, rm
));
3820 /* AdvSIMD load/store single structure
3822 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3823 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3824 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
3825 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3827 * AdvSIMD load/store single structure (post-indexed)
3829 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3830 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3831 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
3832 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3834 * Rt: first (or only) SIMD&FP register to be transferred
3835 * Rn: base address or SP
3836 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3837 * index = encoded in Q:S:size dependent on size
3839 * lane_size = encoded in R, opc
3840 * transfer width = encoded in opc, S, size
3842 static void disas_ldst_single_struct(DisasContext
*s
, uint32_t insn
)
3844 int rt
= extract32(insn
, 0, 5);
3845 int rn
= extract32(insn
, 5, 5);
3846 int rm
= extract32(insn
, 16, 5);
3847 int size
= extract32(insn
, 10, 2);
3848 int S
= extract32(insn
, 12, 1);
3849 int opc
= extract32(insn
, 13, 3);
3850 int R
= extract32(insn
, 21, 1);
3851 int is_load
= extract32(insn
, 22, 1);
3852 int is_postidx
= extract32(insn
, 23, 1);
3853 int is_q
= extract32(insn
, 30, 1);
3855 int scale
= extract32(opc
, 1, 2);
3856 int selem
= (extract32(opc
, 0, 1) << 1 | R
) + 1;
3857 bool replicate
= false;
3858 int index
= is_q
<< 3 | S
<< 2 | size
;
3860 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3863 if (extract32(insn
, 31, 1)) {
3864 unallocated_encoding(s
);
3867 if (!is_postidx
&& rm
!= 0) {
3868 unallocated_encoding(s
);
3874 if (!is_load
|| S
) {
3875 unallocated_encoding(s
);
3884 if (extract32(size
, 0, 1)) {
3885 unallocated_encoding(s
);
3891 if (extract32(size
, 1, 1)) {
3892 unallocated_encoding(s
);
3895 if (!extract32(size
, 0, 1)) {
3899 unallocated_encoding(s
);
3907 g_assert_not_reached();
3910 if (!fp_access_check(s
)) {
3915 gen_check_sp_alignment(s
);
3918 total
= selem
<< scale
;
3919 tcg_rn
= cpu_reg_sp(s
, rn
);
3921 clean_addr
= gen_mte_checkN(s
, tcg_rn
, !is_load
, is_postidx
|| rn
!= 31,
3923 mop
= finalize_memop(s
, scale
);
3925 tcg_ebytes
= tcg_constant_i64(1 << scale
);
3926 for (xs
= 0; xs
< selem
; xs
++) {
3928 /* Load and replicate to all elements */
3929 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
3931 tcg_gen_qemu_ld_i64(tcg_tmp
, clean_addr
, get_mem_index(s
), mop
);
3932 tcg_gen_gvec_dup_i64(scale
, vec_full_reg_offset(s
, rt
),
3933 (is_q
+ 1) * 8, vec_full_reg_size(s
),
3935 tcg_temp_free_i64(tcg_tmp
);
3937 /* Load/store one element per register */
3939 do_vec_ld(s
, rt
, index
, clean_addr
, mop
);
3941 do_vec_st(s
, rt
, index
, clean_addr
, mop
);
3944 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3950 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3952 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, rm
));
3958 * Load/Store memory tags
3960 * 31 30 29 24 22 21 12 10 5 0
3961 * +-----+-------------+-----+---+------+-----+------+------+
3962 * | 1 1 | 0 1 1 0 0 1 | op1 | 1 | imm9 | op2 | Rn | Rt |
3963 * +-----+-------------+-----+---+------+-----+------+------+
3965 static void disas_ldst_tag(DisasContext
*s
, uint32_t insn
)
3967 int rt
= extract32(insn
, 0, 5);
3968 int rn
= extract32(insn
, 5, 5);
3969 uint64_t offset
= sextract64(insn
, 12, 9) << LOG2_TAG_GRANULE
;
3970 int op2
= extract32(insn
, 10, 2);
3971 int op1
= extract32(insn
, 22, 2);
3972 bool is_load
= false, is_pair
= false, is_zero
= false, is_mult
= false;
3974 TCGv_i64 addr
, clean_addr
, tcg_rt
;
3976 /* We checked insn bits [29:24,21] in the caller. */
3977 if (extract32(insn
, 30, 2) != 3) {
3978 goto do_unallocated
;
3982 * @index is a tri-state variable which has 3 states:
3983 * < 0 : post-index, writeback
3984 * = 0 : signed offset
3985 * > 0 : pre-index, writeback
3994 if (s
->current_el
== 0 || offset
!= 0) {
3995 goto do_unallocated
;
3997 is_mult
= is_zero
= true;
4017 if (s
->current_el
== 0 || offset
!= 0) {
4018 goto do_unallocated
;
4026 is_pair
= is_zero
= true;
4030 if (s
->current_el
== 0 || offset
!= 0) {
4031 goto do_unallocated
;
4033 is_mult
= is_load
= true;
4039 unallocated_encoding(s
);
4044 ? !dc_isar_feature(aa64_mte
, s
)
4045 : !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
4046 goto do_unallocated
;
4050 gen_check_sp_alignment(s
);
4053 addr
= read_cpu_reg_sp(s
, rn
, true);
4055 /* pre-index or signed offset */
4056 tcg_gen_addi_i64(addr
, addr
, offset
);
4060 tcg_rt
= cpu_reg(s
, rt
);
4063 int size
= 4 << s
->dcz_blocksize
;
4066 gen_helper_stzgm_tags(cpu_env
, addr
, tcg_rt
);
4069 * The non-tags portion of STZGM is mostly like DC_ZVA,
4070 * except the alignment happens before the access.
4072 clean_addr
= clean_data_tbi(s
, addr
);
4073 tcg_gen_andi_i64(clean_addr
, clean_addr
, -size
);
4074 gen_helper_dc_zva(cpu_env
, clean_addr
);
4075 } else if (s
->ata
) {
4077 gen_helper_ldgm(tcg_rt
, cpu_env
, addr
);
4079 gen_helper_stgm(cpu_env
, addr
, tcg_rt
);
4082 MMUAccessType acc
= is_load
? MMU_DATA_LOAD
: MMU_DATA_STORE
;
4083 int size
= 4 << GMID_EL1_BS
;
4085 clean_addr
= clean_data_tbi(s
, addr
);
4086 tcg_gen_andi_i64(clean_addr
, clean_addr
, -size
);
4087 gen_probe_access(s
, clean_addr
, acc
, size
);
4090 /* The result tags are zeros. */
4091 tcg_gen_movi_i64(tcg_rt
, 0);
4098 tcg_gen_andi_i64(addr
, addr
, -TAG_GRANULE
);
4099 tcg_rt
= cpu_reg(s
, rt
);
4101 gen_helper_ldg(tcg_rt
, cpu_env
, addr
, tcg_rt
);
4103 clean_addr
= clean_data_tbi(s
, addr
);
4104 gen_probe_access(s
, clean_addr
, MMU_DATA_LOAD
, MO_8
);
4105 gen_address_with_allocation_tag0(tcg_rt
, addr
);
4108 tcg_rt
= cpu_reg_sp(s
, rt
);
4111 * For STG and ST2G, we need to check alignment and probe memory.
4112 * TODO: For STZG and STZ2G, we could rely on the stores below,
4113 * at least for system mode; user-only won't enforce alignment.
4116 gen_helper_st2g_stub(cpu_env
, addr
);
4118 gen_helper_stg_stub(cpu_env
, addr
);
4120 } else if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
4122 gen_helper_st2g_parallel(cpu_env
, addr
, tcg_rt
);
4124 gen_helper_stg_parallel(cpu_env
, addr
, tcg_rt
);
4128 gen_helper_st2g(cpu_env
, addr
, tcg_rt
);
4130 gen_helper_stg(cpu_env
, addr
, tcg_rt
);
4136 TCGv_i64 clean_addr
= clean_data_tbi(s
, addr
);
4137 TCGv_i64 tcg_zero
= tcg_constant_i64(0);
4138 int mem_index
= get_mem_index(s
);
4139 int i
, n
= (1 + is_pair
) << LOG2_TAG_GRANULE
;
4141 tcg_gen_qemu_st_i64(tcg_zero
, clean_addr
, mem_index
,
4142 MO_UQ
| MO_ALIGN_16
);
4143 for (i
= 8; i
< n
; i
+= 8) {
4144 tcg_gen_addi_i64(clean_addr
, clean_addr
, 8);
4145 tcg_gen_qemu_st_i64(tcg_zero
, clean_addr
, mem_index
, MO_UQ
);
4150 /* pre-index or post-index */
4153 tcg_gen_addi_i64(addr
, addr
, offset
);
4155 tcg_gen_mov_i64(cpu_reg_sp(s
, rn
), addr
);
4159 /* Loads and stores */
4160 static void disas_ldst(DisasContext
*s
, uint32_t insn
)
4162 switch (extract32(insn
, 24, 6)) {
4163 case 0x08: /* Load/store exclusive */
4164 disas_ldst_excl(s
, insn
);
4166 case 0x18: case 0x1c: /* Load register (literal) */
4167 disas_ld_lit(s
, insn
);
4169 case 0x28: case 0x29:
4170 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
4171 disas_ldst_pair(s
, insn
);
4173 case 0x38: case 0x39:
4174 case 0x3c: case 0x3d: /* Load/store register (all forms) */
4175 disas_ldst_reg(s
, insn
);
4177 case 0x0c: /* AdvSIMD load/store multiple structures */
4178 disas_ldst_multiple_struct(s
, insn
);
4180 case 0x0d: /* AdvSIMD load/store single structure */
4181 disas_ldst_single_struct(s
, insn
);
4184 if (extract32(insn
, 21, 1) != 0) {
4185 disas_ldst_tag(s
, insn
);
4186 } else if (extract32(insn
, 10, 2) == 0) {
4187 disas_ldst_ldapr_stlr(s
, insn
);
4189 unallocated_encoding(s
);
4193 unallocated_encoding(s
);
4198 /* PC-rel. addressing
4199 * 31 30 29 28 24 23 5 4 0
4200 * +----+-------+-----------+-------------------+------+
4201 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
4202 * +----+-------+-----------+-------------------+------+
4204 static void disas_pc_rel_adr(DisasContext
*s
, uint32_t insn
)
4206 unsigned int page
, rd
;
4210 page
= extract32(insn
, 31, 1);
4211 /* SignExtend(immhi:immlo) -> offset */
4212 offset
= sextract64(insn
, 5, 19);
4213 offset
= offset
<< 2 | extract32(insn
, 29, 2);
4214 rd
= extract32(insn
, 0, 5);
4218 /* ADRP (page based) */
4223 tcg_gen_movi_i64(cpu_reg(s
, rd
), base
+ offset
);
4227 * Add/subtract (immediate)
4229 * 31 30 29 28 23 22 21 10 9 5 4 0
4230 * +--+--+--+-------------+--+-------------+-----+-----+
4231 * |sf|op| S| 1 0 0 0 1 0 |sh| imm12 | Rn | Rd |
4232 * +--+--+--+-------------+--+-------------+-----+-----+
4234 * sf: 0 -> 32bit, 1 -> 64bit
4235 * op: 0 -> add , 1 -> sub
4237 * sh: 1 -> LSL imm by 12
4239 static void disas_add_sub_imm(DisasContext
*s
, uint32_t insn
)
4241 int rd
= extract32(insn
, 0, 5);
4242 int rn
= extract32(insn
, 5, 5);
4243 uint64_t imm
= extract32(insn
, 10, 12);
4244 bool shift
= extract32(insn
, 22, 1);
4245 bool setflags
= extract32(insn
, 29, 1);
4246 bool sub_op
= extract32(insn
, 30, 1);
4247 bool is_64bit
= extract32(insn
, 31, 1);
4249 TCGv_i64 tcg_rn
= cpu_reg_sp(s
, rn
);
4250 TCGv_i64 tcg_rd
= setflags
? cpu_reg(s
, rd
) : cpu_reg_sp(s
, rd
);
4251 TCGv_i64 tcg_result
;
4257 tcg_result
= tcg_temp_new_i64();
4260 tcg_gen_subi_i64(tcg_result
, tcg_rn
, imm
);
4262 tcg_gen_addi_i64(tcg_result
, tcg_rn
, imm
);
4265 TCGv_i64 tcg_imm
= tcg_constant_i64(imm
);
4267 gen_sub_CC(is_64bit
, tcg_result
, tcg_rn
, tcg_imm
);
4269 gen_add_CC(is_64bit
, tcg_result
, tcg_rn
, tcg_imm
);
4274 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
4276 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
4279 tcg_temp_free_i64(tcg_result
);
4283 * Add/subtract (immediate, with tags)
4285 * 31 30 29 28 23 22 21 16 14 10 9 5 4 0
4286 * +--+--+--+-------------+--+---------+--+-------+-----+-----+
4287 * |sf|op| S| 1 0 0 0 1 1 |o2| uimm6 |o3| uimm4 | Rn | Rd |
4288 * +--+--+--+-------------+--+---------+--+-------+-----+-----+
4290 * op: 0 -> add, 1 -> sub
4292 static void disas_add_sub_imm_with_tags(DisasContext
*s
, uint32_t insn
)
4294 int rd
= extract32(insn
, 0, 5);
4295 int rn
= extract32(insn
, 5, 5);
4296 int uimm4
= extract32(insn
, 10, 4);
4297 int uimm6
= extract32(insn
, 16, 6);
4298 bool sub_op
= extract32(insn
, 30, 1);
4299 TCGv_i64 tcg_rn
, tcg_rd
;
4302 /* Test all of sf=1, S=0, o2=0, o3=0. */
4303 if ((insn
& 0xa040c000u
) != 0x80000000u
||
4304 !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
4305 unallocated_encoding(s
);
4309 imm
= uimm6
<< LOG2_TAG_GRANULE
;
4314 tcg_rn
= cpu_reg_sp(s
, rn
);
4315 tcg_rd
= cpu_reg_sp(s
, rd
);
4318 gen_helper_addsubg(tcg_rd
, cpu_env
, tcg_rn
,
4319 tcg_constant_i32(imm
),
4320 tcg_constant_i32(uimm4
));
4322 tcg_gen_addi_i64(tcg_rd
, tcg_rn
, imm
);
4323 gen_address_with_allocation_tag0(tcg_rd
, tcg_rd
);
4327 /* The input should be a value in the bottom e bits (with higher
4328 * bits zero); returns that value replicated into every element
4329 * of size e in a 64 bit integer.
4331 static uint64_t bitfield_replicate(uint64_t mask
, unsigned int e
)
4341 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
4342 static inline uint64_t bitmask64(unsigned int length
)
4344 assert(length
> 0 && length
<= 64);
4345 return ~0ULL >> (64 - length
);
4348 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
4349 * only require the wmask. Returns false if the imms/immr/immn are a reserved
4350 * value (ie should cause a guest UNDEF exception), and true if they are
4351 * valid, in which case the decoded bit pattern is written to result.
4353 bool logic_imm_decode_wmask(uint64_t *result
, unsigned int immn
,
4354 unsigned int imms
, unsigned int immr
)
4357 unsigned e
, levels
, s
, r
;
4360 assert(immn
< 2 && imms
< 64 && immr
< 64);
4362 /* The bit patterns we create here are 64 bit patterns which
4363 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
4364 * 64 bits each. Each element contains the same value: a run
4365 * of between 1 and e-1 non-zero bits, rotated within the
4366 * element by between 0 and e-1 bits.
4368 * The element size and run length are encoded into immn (1 bit)
4369 * and imms (6 bits) as follows:
4370 * 64 bit elements: immn = 1, imms = <length of run - 1>
4371 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
4372 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
4373 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
4374 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
4375 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
4376 * Notice that immn = 0, imms = 11111x is the only combination
4377 * not covered by one of the above options; this is reserved.
4378 * Further, <length of run - 1> all-ones is a reserved pattern.
4380 * In all cases the rotation is by immr % e (and immr is 6 bits).
4383 /* First determine the element size */
4384 len
= 31 - clz32((immn
<< 6) | (~imms
& 0x3f));
4386 /* This is the immn == 0, imms == 0x11111x case */
4396 /* <length of run - 1> mustn't be all-ones. */
4400 /* Create the value of one element: s+1 set bits rotated
4401 * by r within the element (which is e bits wide)...
4403 mask
= bitmask64(s
+ 1);
4405 mask
= (mask
>> r
) | (mask
<< (e
- r
));
4406 mask
&= bitmask64(e
);
4408 /* ...then replicate the element over the whole 64 bit value */
4409 mask
= bitfield_replicate(mask
, e
);
4414 /* Logical (immediate)
4415 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
4416 * +----+-----+-------------+---+------+------+------+------+
4417 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
4418 * +----+-----+-------------+---+------+------+------+------+
4420 static void disas_logic_imm(DisasContext
*s
, uint32_t insn
)
4422 unsigned int sf
, opc
, is_n
, immr
, imms
, rn
, rd
;
4423 TCGv_i64 tcg_rd
, tcg_rn
;
4425 bool is_and
= false;
4427 sf
= extract32(insn
, 31, 1);
4428 opc
= extract32(insn
, 29, 2);
4429 is_n
= extract32(insn
, 22, 1);
4430 immr
= extract32(insn
, 16, 6);
4431 imms
= extract32(insn
, 10, 6);
4432 rn
= extract32(insn
, 5, 5);
4433 rd
= extract32(insn
, 0, 5);
4436 unallocated_encoding(s
);
4440 if (opc
== 0x3) { /* ANDS */
4441 tcg_rd
= cpu_reg(s
, rd
);
4443 tcg_rd
= cpu_reg_sp(s
, rd
);
4445 tcg_rn
= cpu_reg(s
, rn
);
4447 if (!logic_imm_decode_wmask(&wmask
, is_n
, imms
, immr
)) {
4448 /* some immediate field values are reserved */
4449 unallocated_encoding(s
);
4454 wmask
&= 0xffffffff;
4458 case 0x3: /* ANDS */
4460 tcg_gen_andi_i64(tcg_rd
, tcg_rn
, wmask
);
4464 tcg_gen_ori_i64(tcg_rd
, tcg_rn
, wmask
);
4467 tcg_gen_xori_i64(tcg_rd
, tcg_rn
, wmask
);
4470 assert(FALSE
); /* must handle all above */
4474 if (!sf
&& !is_and
) {
4475 /* zero extend final result; we know we can skip this for AND
4476 * since the immediate had the high 32 bits clear.
4478 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4481 if (opc
== 3) { /* ANDS */
4482 gen_logic_CC(sf
, tcg_rd
);
4487 * Move wide (immediate)
4489 * 31 30 29 28 23 22 21 20 5 4 0
4490 * +--+-----+-------------+-----+----------------+------+
4491 * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
4492 * +--+-----+-------------+-----+----------------+------+
4494 * sf: 0 -> 32 bit, 1 -> 64 bit
4495 * opc: 00 -> N, 10 -> Z, 11 -> K
4496 * hw: shift/16 (0,16, and sf only 32, 48)
4498 static void disas_movw_imm(DisasContext
*s
, uint32_t insn
)
4500 int rd
= extract32(insn
, 0, 5);
4501 uint64_t imm
= extract32(insn
, 5, 16);
4502 int sf
= extract32(insn
, 31, 1);
4503 int opc
= extract32(insn
, 29, 2);
4504 int pos
= extract32(insn
, 21, 2) << 4;
4505 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4507 if (!sf
&& (pos
>= 32)) {
4508 unallocated_encoding(s
);
4522 tcg_gen_movi_i64(tcg_rd
, imm
);
4525 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_constant_i64(imm
), pos
, 16);
4527 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4531 unallocated_encoding(s
);
4537 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
4538 * +----+-----+-------------+---+------+------+------+------+
4539 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
4540 * +----+-----+-------------+---+------+------+------+------+
4542 static void disas_bitfield(DisasContext
*s
, uint32_t insn
)
4544 unsigned int sf
, n
, opc
, ri
, si
, rn
, rd
, bitsize
, pos
, len
;
4545 TCGv_i64 tcg_rd
, tcg_tmp
;
4547 sf
= extract32(insn
, 31, 1);
4548 opc
= extract32(insn
, 29, 2);
4549 n
= extract32(insn
, 22, 1);
4550 ri
= extract32(insn
, 16, 6);
4551 si
= extract32(insn
, 10, 6);
4552 rn
= extract32(insn
, 5, 5);
4553 rd
= extract32(insn
, 0, 5);
4554 bitsize
= sf
? 64 : 32;
4556 if (sf
!= n
|| ri
>= bitsize
|| si
>= bitsize
|| opc
> 2) {
4557 unallocated_encoding(s
);
4561 tcg_rd
= cpu_reg(s
, rd
);
4563 /* Suppress the zero-extend for !sf. Since RI and SI are constrained
4564 to be smaller than bitsize, we'll never reference data outside the
4565 low 32-bits anyway. */
4566 tcg_tmp
= read_cpu_reg(s
, rn
, 1);
4568 /* Recognize simple(r) extractions. */
4570 /* Wd<s-r:0> = Wn<s:r> */
4571 len
= (si
- ri
) + 1;
4572 if (opc
== 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
4573 tcg_gen_sextract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
4575 } else if (opc
== 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
4576 tcg_gen_extract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
4579 /* opc == 1, BFXIL fall through to deposit */
4580 tcg_gen_shri_i64(tcg_tmp
, tcg_tmp
, ri
);
4583 /* Handle the ri > si case with a deposit
4584 * Wd<32+s-r,32-r> = Wn<s:0>
4587 pos
= (bitsize
- ri
) & (bitsize
- 1);
4590 if (opc
== 0 && len
< ri
) {
4591 /* SBFM: sign extend the destination field from len to fill
4592 the balance of the word. Let the deposit below insert all
4593 of those sign bits. */
4594 tcg_gen_sextract_i64(tcg_tmp
, tcg_tmp
, 0, len
);
4598 if (opc
== 1) { /* BFM, BFXIL */
4599 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_tmp
, pos
, len
);
4601 /* SBFM or UBFM: We start with zero, and we haven't modified
4602 any bits outside bitsize, therefore the zero-extension
4603 below is unneeded. */
4604 tcg_gen_deposit_z_i64(tcg_rd
, tcg_tmp
, pos
, len
);
4609 if (!sf
) { /* zero extend final result */
4610 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4615 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
4616 * +----+------+-------------+---+----+------+--------+------+------+
4617 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
4618 * +----+------+-------------+---+----+------+--------+------+------+
4620 static void disas_extract(DisasContext
*s
, uint32_t insn
)
4622 unsigned int sf
, n
, rm
, imm
, rn
, rd
, bitsize
, op21
, op0
;
4624 sf
= extract32(insn
, 31, 1);
4625 n
= extract32(insn
, 22, 1);
4626 rm
= extract32(insn
, 16, 5);
4627 imm
= extract32(insn
, 10, 6);
4628 rn
= extract32(insn
, 5, 5);
4629 rd
= extract32(insn
, 0, 5);
4630 op21
= extract32(insn
, 29, 2);
4631 op0
= extract32(insn
, 21, 1);
4632 bitsize
= sf
? 64 : 32;
4634 if (sf
!= n
|| op21
|| op0
|| imm
>= bitsize
) {
4635 unallocated_encoding(s
);
4637 TCGv_i64 tcg_rd
, tcg_rm
, tcg_rn
;
4639 tcg_rd
= cpu_reg(s
, rd
);
4641 if (unlikely(imm
== 0)) {
4642 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
4643 * so an extract from bit 0 is a special case.
4646 tcg_gen_mov_i64(tcg_rd
, cpu_reg(s
, rm
));
4648 tcg_gen_ext32u_i64(tcg_rd
, cpu_reg(s
, rm
));
4651 tcg_rm
= cpu_reg(s
, rm
);
4652 tcg_rn
= cpu_reg(s
, rn
);
4655 /* Specialization to ROR happens in EXTRACT2. */
4656 tcg_gen_extract2_i64(tcg_rd
, tcg_rm
, tcg_rn
, imm
);
4658 TCGv_i32 t0
= tcg_temp_new_i32();
4660 tcg_gen_extrl_i64_i32(t0
, tcg_rm
);
4662 tcg_gen_rotri_i32(t0
, t0
, imm
);
4664 TCGv_i32 t1
= tcg_temp_new_i32();
4665 tcg_gen_extrl_i64_i32(t1
, tcg_rn
);
4666 tcg_gen_extract2_i32(t0
, t0
, t1
, imm
);
4667 tcg_temp_free_i32(t1
);
4669 tcg_gen_extu_i32_i64(tcg_rd
, t0
);
4670 tcg_temp_free_i32(t0
);
4676 /* Data processing - immediate */
4677 static void disas_data_proc_imm(DisasContext
*s
, uint32_t insn
)
4679 switch (extract32(insn
, 23, 6)) {
4680 case 0x20: case 0x21: /* PC-rel. addressing */
4681 disas_pc_rel_adr(s
, insn
);
4683 case 0x22: /* Add/subtract (immediate) */
4684 disas_add_sub_imm(s
, insn
);
4686 case 0x23: /* Add/subtract (immediate, with tags) */
4687 disas_add_sub_imm_with_tags(s
, insn
);
4689 case 0x24: /* Logical (immediate) */
4690 disas_logic_imm(s
, insn
);
4692 case 0x25: /* Move wide (immediate) */
4693 disas_movw_imm(s
, insn
);
4695 case 0x26: /* Bitfield */
4696 disas_bitfield(s
, insn
);
4698 case 0x27: /* Extract */
4699 disas_extract(s
, insn
);
4702 unallocated_encoding(s
);
4707 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
4708 * Note that it is the caller's responsibility to ensure that the
4709 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
4710 * mandated semantics for out of range shifts.
4712 static void shift_reg(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
4713 enum a64_shift_type shift_type
, TCGv_i64 shift_amount
)
4715 switch (shift_type
) {
4716 case A64_SHIFT_TYPE_LSL
:
4717 tcg_gen_shl_i64(dst
, src
, shift_amount
);
4719 case A64_SHIFT_TYPE_LSR
:
4720 tcg_gen_shr_i64(dst
, src
, shift_amount
);
4722 case A64_SHIFT_TYPE_ASR
:
4724 tcg_gen_ext32s_i64(dst
, src
);
4726 tcg_gen_sar_i64(dst
, sf
? src
: dst
, shift_amount
);
4728 case A64_SHIFT_TYPE_ROR
:
4730 tcg_gen_rotr_i64(dst
, src
, shift_amount
);
4733 t0
= tcg_temp_new_i32();
4734 t1
= tcg_temp_new_i32();
4735 tcg_gen_extrl_i64_i32(t0
, src
);
4736 tcg_gen_extrl_i64_i32(t1
, shift_amount
);
4737 tcg_gen_rotr_i32(t0
, t0
, t1
);
4738 tcg_gen_extu_i32_i64(dst
, t0
);
4739 tcg_temp_free_i32(t0
);
4740 tcg_temp_free_i32(t1
);
4744 assert(FALSE
); /* all shift types should be handled */
4748 if (!sf
) { /* zero extend final result */
4749 tcg_gen_ext32u_i64(dst
, dst
);
4753 /* Shift a TCGv src by immediate, put result in dst.
4754 * The shift amount must be in range (this should always be true as the
4755 * relevant instructions will UNDEF on bad shift immediates).
4757 static void shift_reg_imm(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
4758 enum a64_shift_type shift_type
, unsigned int shift_i
)
4760 assert(shift_i
< (sf
? 64 : 32));
4763 tcg_gen_mov_i64(dst
, src
);
4765 shift_reg(dst
, src
, sf
, shift_type
, tcg_constant_i64(shift_i
));
4769 /* Logical (shifted register)
4770 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4771 * +----+-----+-----------+-------+---+------+--------+------+------+
4772 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
4773 * +----+-----+-----------+-------+---+------+--------+------+------+
4775 static void disas_logic_reg(DisasContext
*s
, uint32_t insn
)
4777 TCGv_i64 tcg_rd
, tcg_rn
, tcg_rm
;
4778 unsigned int sf
, opc
, shift_type
, invert
, rm
, shift_amount
, rn
, rd
;
4780 sf
= extract32(insn
, 31, 1);
4781 opc
= extract32(insn
, 29, 2);
4782 shift_type
= extract32(insn
, 22, 2);
4783 invert
= extract32(insn
, 21, 1);
4784 rm
= extract32(insn
, 16, 5);
4785 shift_amount
= extract32(insn
, 10, 6);
4786 rn
= extract32(insn
, 5, 5);
4787 rd
= extract32(insn
, 0, 5);
4789 if (!sf
&& (shift_amount
& (1 << 5))) {
4790 unallocated_encoding(s
);
4794 tcg_rd
= cpu_reg(s
, rd
);
4796 if (opc
== 1 && shift_amount
== 0 && shift_type
== 0 && rn
== 31) {
4797 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
4798 * register-register MOV and MVN, so it is worth special casing.
4800 tcg_rm
= cpu_reg(s
, rm
);
4802 tcg_gen_not_i64(tcg_rd
, tcg_rm
);
4804 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4808 tcg_gen_mov_i64(tcg_rd
, tcg_rm
);
4810 tcg_gen_ext32u_i64(tcg_rd
, tcg_rm
);
4816 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
4819 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, shift_amount
);
4822 tcg_rn
= cpu_reg(s
, rn
);
4824 switch (opc
| (invert
<< 2)) {
4827 tcg_gen_and_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4830 tcg_gen_or_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4833 tcg_gen_xor_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4837 tcg_gen_andc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4840 tcg_gen_orc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4843 tcg_gen_eqv_i64(tcg_rd
, tcg_rn
, tcg_rm
);
4851 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4855 gen_logic_CC(sf
, tcg_rd
);
4860 * Add/subtract (extended register)
4862 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
4863 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4864 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
4865 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4867 * sf: 0 -> 32bit, 1 -> 64bit
4868 * op: 0 -> add , 1 -> sub
4871 * option: extension type (see DecodeRegExtend)
4872 * imm3: optional shift to Rm
4874 * Rd = Rn + LSL(extend(Rm), amount)
4876 static void disas_add_sub_ext_reg(DisasContext
*s
, uint32_t insn
)
4878 int rd
= extract32(insn
, 0, 5);
4879 int rn
= extract32(insn
, 5, 5);
4880 int imm3
= extract32(insn
, 10, 3);
4881 int option
= extract32(insn
, 13, 3);
4882 int rm
= extract32(insn
, 16, 5);
4883 int opt
= extract32(insn
, 22, 2);
4884 bool setflags
= extract32(insn
, 29, 1);
4885 bool sub_op
= extract32(insn
, 30, 1);
4886 bool sf
= extract32(insn
, 31, 1);
4888 TCGv_i64 tcg_rm
, tcg_rn
; /* temps */
4890 TCGv_i64 tcg_result
;
4892 if (imm3
> 4 || opt
!= 0) {
4893 unallocated_encoding(s
);
4897 /* non-flag setting ops may use SP */
4899 tcg_rd
= cpu_reg_sp(s
, rd
);
4901 tcg_rd
= cpu_reg(s
, rd
);
4903 tcg_rn
= read_cpu_reg_sp(s
, rn
, sf
);
4905 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
4906 ext_and_shift_reg(tcg_rm
, tcg_rm
, option
, imm3
);
4908 tcg_result
= tcg_temp_new_i64();
4912 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
4914 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
4918 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4920 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4925 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
4927 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
4930 tcg_temp_free_i64(tcg_result
);
4934 * Add/subtract (shifted register)
4936 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
4937 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4938 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
4939 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4941 * sf: 0 -> 32bit, 1 -> 64bit
4942 * op: 0 -> add , 1 -> sub
4944 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
4945 * imm6: Shift amount to apply to Rm before the add/sub
4947 static void disas_add_sub_reg(DisasContext
*s
, uint32_t insn
)
4949 int rd
= extract32(insn
, 0, 5);
4950 int rn
= extract32(insn
, 5, 5);
4951 int imm6
= extract32(insn
, 10, 6);
4952 int rm
= extract32(insn
, 16, 5);
4953 int shift_type
= extract32(insn
, 22, 2);
4954 bool setflags
= extract32(insn
, 29, 1);
4955 bool sub_op
= extract32(insn
, 30, 1);
4956 bool sf
= extract32(insn
, 31, 1);
4958 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4959 TCGv_i64 tcg_rn
, tcg_rm
;
4960 TCGv_i64 tcg_result
;
4962 if ((shift_type
== 3) || (!sf
&& (imm6
> 31))) {
4963 unallocated_encoding(s
);
4967 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
4968 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
4970 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, imm6
);
4972 tcg_result
= tcg_temp_new_i64();
4976 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
4978 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
4982 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4984 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
4989 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
4991 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
4994 tcg_temp_free_i64(tcg_result
);
4997 /* Data-processing (3 source)
4999 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
5000 * +--+------+-----------+------+------+----+------+------+------+
5001 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
5002 * +--+------+-----------+------+------+----+------+------+------+
5004 static void disas_data_proc_3src(DisasContext
*s
, uint32_t insn
)
5006 int rd
= extract32(insn
, 0, 5);
5007 int rn
= extract32(insn
, 5, 5);
5008 int ra
= extract32(insn
, 10, 5);
5009 int rm
= extract32(insn
, 16, 5);
5010 int op_id
= (extract32(insn
, 29, 3) << 4) |
5011 (extract32(insn
, 21, 3) << 1) |
5012 extract32(insn
, 15, 1);
5013 bool sf
= extract32(insn
, 31, 1);
5014 bool is_sub
= extract32(op_id
, 0, 1);
5015 bool is_high
= extract32(op_id
, 2, 1);
5016 bool is_signed
= false;
5021 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
5023 case 0x42: /* SMADDL */
5024 case 0x43: /* SMSUBL */
5025 case 0x44: /* SMULH */
5028 case 0x0: /* MADD (32bit) */
5029 case 0x1: /* MSUB (32bit) */
5030 case 0x40: /* MADD (64bit) */
5031 case 0x41: /* MSUB (64bit) */
5032 case 0x4a: /* UMADDL */
5033 case 0x4b: /* UMSUBL */
5034 case 0x4c: /* UMULH */
5037 unallocated_encoding(s
);
5042 TCGv_i64 low_bits
= tcg_temp_new_i64(); /* low bits discarded */
5043 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5044 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
5045 TCGv_i64 tcg_rm
= cpu_reg(s
, rm
);
5048 tcg_gen_muls2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
5050 tcg_gen_mulu2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
5053 tcg_temp_free_i64(low_bits
);
5057 tcg_op1
= tcg_temp_new_i64();
5058 tcg_op2
= tcg_temp_new_i64();
5059 tcg_tmp
= tcg_temp_new_i64();
5062 tcg_gen_mov_i64(tcg_op1
, cpu_reg(s
, rn
));
5063 tcg_gen_mov_i64(tcg_op2
, cpu_reg(s
, rm
));
5066 tcg_gen_ext32s_i64(tcg_op1
, cpu_reg(s
, rn
));
5067 tcg_gen_ext32s_i64(tcg_op2
, cpu_reg(s
, rm
));
5069 tcg_gen_ext32u_i64(tcg_op1
, cpu_reg(s
, rn
));
5070 tcg_gen_ext32u_i64(tcg_op2
, cpu_reg(s
, rm
));
5074 if (ra
== 31 && !is_sub
) {
5075 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
5076 tcg_gen_mul_i64(cpu_reg(s
, rd
), tcg_op1
, tcg_op2
);
5078 tcg_gen_mul_i64(tcg_tmp
, tcg_op1
, tcg_op2
);
5080 tcg_gen_sub_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
5082 tcg_gen_add_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
5087 tcg_gen_ext32u_i64(cpu_reg(s
, rd
), cpu_reg(s
, rd
));
5090 tcg_temp_free_i64(tcg_op1
);
5091 tcg_temp_free_i64(tcg_op2
);
5092 tcg_temp_free_i64(tcg_tmp
);
5095 /* Add/subtract (with carry)
5096 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
5097 * +--+--+--+------------------------+------+-------------+------+-----+
5098 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd |
5099 * +--+--+--+------------------------+------+-------------+------+-----+
5102 static void disas_adc_sbc(DisasContext
*s
, uint32_t insn
)
5104 unsigned int sf
, op
, setflags
, rm
, rn
, rd
;
5105 TCGv_i64 tcg_y
, tcg_rn
, tcg_rd
;
5107 sf
= extract32(insn
, 31, 1);
5108 op
= extract32(insn
, 30, 1);
5109 setflags
= extract32(insn
, 29, 1);
5110 rm
= extract32(insn
, 16, 5);
5111 rn
= extract32(insn
, 5, 5);
5112 rd
= extract32(insn
, 0, 5);
5114 tcg_rd
= cpu_reg(s
, rd
);
5115 tcg_rn
= cpu_reg(s
, rn
);
5118 tcg_y
= new_tmp_a64(s
);
5119 tcg_gen_not_i64(tcg_y
, cpu_reg(s
, rm
));
5121 tcg_y
= cpu_reg(s
, rm
);
5125 gen_adc_CC(sf
, tcg_rd
, tcg_rn
, tcg_y
);
5127 gen_adc(sf
, tcg_rd
, tcg_rn
, tcg_y
);
5132 * Rotate right into flags
5133 * 31 30 29 21 15 10 5 4 0
5134 * +--+--+--+-----------------+--------+-----------+------+--+------+
5135 * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask |
5136 * +--+--+--+-----------------+--------+-----------+------+--+------+
5138 static void disas_rotate_right_into_flags(DisasContext
*s
, uint32_t insn
)
5140 int mask
= extract32(insn
, 0, 4);
5141 int o2
= extract32(insn
, 4, 1);
5142 int rn
= extract32(insn
, 5, 5);
5143 int imm6
= extract32(insn
, 15, 6);
5144 int sf_op_s
= extract32(insn
, 29, 3);
5148 if (sf_op_s
!= 5 || o2
!= 0 || !dc_isar_feature(aa64_condm_4
, s
)) {
5149 unallocated_encoding(s
);
5153 tcg_rn
= read_cpu_reg(s
, rn
, 1);
5154 tcg_gen_rotri_i64(tcg_rn
, tcg_rn
, imm6
);
5156 nzcv
= tcg_temp_new_i32();
5157 tcg_gen_extrl_i64_i32(nzcv
, tcg_rn
);
5159 if (mask
& 8) { /* N */
5160 tcg_gen_shli_i32(cpu_NF
, nzcv
, 31 - 3);
5162 if (mask
& 4) { /* Z */
5163 tcg_gen_not_i32(cpu_ZF
, nzcv
);
5164 tcg_gen_andi_i32(cpu_ZF
, cpu_ZF
, 4);
5166 if (mask
& 2) { /* C */
5167 tcg_gen_extract_i32(cpu_CF
, nzcv
, 1, 1);
5169 if (mask
& 1) { /* V */
5170 tcg_gen_shli_i32(cpu_VF
, nzcv
, 31 - 0);
5173 tcg_temp_free_i32(nzcv
);
5177 * Evaluate into flags
5178 * 31 30 29 21 15 14 10 5 4 0
5179 * +--+--+--+-----------------+---------+----+---------+------+--+------+
5180 * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask |
5181 * +--+--+--+-----------------+---------+----+---------+------+--+------+
5183 static void disas_evaluate_into_flags(DisasContext
*s
, uint32_t insn
)
5185 int o3_mask
= extract32(insn
, 0, 5);
5186 int rn
= extract32(insn
, 5, 5);
5187 int o2
= extract32(insn
, 15, 6);
5188 int sz
= extract32(insn
, 14, 1);
5189 int sf_op_s
= extract32(insn
, 29, 3);
5193 if (sf_op_s
!= 1 || o2
!= 0 || o3_mask
!= 0xd ||
5194 !dc_isar_feature(aa64_condm_4
, s
)) {
5195 unallocated_encoding(s
);
5198 shift
= sz
? 16 : 24; /* SETF16 or SETF8 */
5200 tmp
= tcg_temp_new_i32();
5201 tcg_gen_extrl_i64_i32(tmp
, cpu_reg(s
, rn
));
5202 tcg_gen_shli_i32(cpu_NF
, tmp
, shift
);
5203 tcg_gen_shli_i32(cpu_VF
, tmp
, shift
- 1);
5204 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
5205 tcg_gen_xor_i32(cpu_VF
, cpu_VF
, cpu_NF
);
5206 tcg_temp_free_i32(tmp
);
5209 /* Conditional compare (immediate / register)
5210 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
5211 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
5212 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
5213 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
5216 static void disas_cc(DisasContext
*s
, uint32_t insn
)
5218 unsigned int sf
, op
, y
, cond
, rn
, nzcv
, is_imm
;
5219 TCGv_i32 tcg_t0
, tcg_t1
, tcg_t2
;
5220 TCGv_i64 tcg_tmp
, tcg_y
, tcg_rn
;
5223 if (!extract32(insn
, 29, 1)) {
5224 unallocated_encoding(s
);
5227 if (insn
& (1 << 10 | 1 << 4)) {
5228 unallocated_encoding(s
);
5231 sf
= extract32(insn
, 31, 1);
5232 op
= extract32(insn
, 30, 1);
5233 is_imm
= extract32(insn
, 11, 1);
5234 y
= extract32(insn
, 16, 5); /* y = rm (reg) or imm5 (imm) */
5235 cond
= extract32(insn
, 12, 4);
5236 rn
= extract32(insn
, 5, 5);
5237 nzcv
= extract32(insn
, 0, 4);
5239 /* Set T0 = !COND. */
5240 tcg_t0
= tcg_temp_new_i32();
5241 arm_test_cc(&c
, cond
);
5242 tcg_gen_setcondi_i32(tcg_invert_cond(c
.cond
), tcg_t0
, c
.value
, 0);
5245 /* Load the arguments for the new comparison. */
5247 tcg_y
= new_tmp_a64(s
);
5248 tcg_gen_movi_i64(tcg_y
, y
);
5250 tcg_y
= cpu_reg(s
, y
);
5252 tcg_rn
= cpu_reg(s
, rn
);
5254 /* Set the flags for the new comparison. */
5255 tcg_tmp
= tcg_temp_new_i64();
5257 gen_sub_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
5259 gen_add_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
5261 tcg_temp_free_i64(tcg_tmp
);
5263 /* If COND was false, force the flags to #nzcv. Compute two masks
5264 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
5265 * For tcg hosts that support ANDC, we can make do with just T1.
5266 * In either case, allow the tcg optimizer to delete any unused mask.
5268 tcg_t1
= tcg_temp_new_i32();
5269 tcg_t2
= tcg_temp_new_i32();
5270 tcg_gen_neg_i32(tcg_t1
, tcg_t0
);
5271 tcg_gen_subi_i32(tcg_t2
, tcg_t0
, 1);
5273 if (nzcv
& 8) { /* N */
5274 tcg_gen_or_i32(cpu_NF
, cpu_NF
, tcg_t1
);
5276 if (TCG_TARGET_HAS_andc_i32
) {
5277 tcg_gen_andc_i32(cpu_NF
, cpu_NF
, tcg_t1
);
5279 tcg_gen_and_i32(cpu_NF
, cpu_NF
, tcg_t2
);
5282 if (nzcv
& 4) { /* Z */
5283 if (TCG_TARGET_HAS_andc_i32
) {
5284 tcg_gen_andc_i32(cpu_ZF
, cpu_ZF
, tcg_t1
);
5286 tcg_gen_and_i32(cpu_ZF
, cpu_ZF
, tcg_t2
);
5289 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, tcg_t0
);
5291 if (nzcv
& 2) { /* C */
5292 tcg_gen_or_i32(cpu_CF
, cpu_CF
, tcg_t0
);
5294 if (TCG_TARGET_HAS_andc_i32
) {
5295 tcg_gen_andc_i32(cpu_CF
, cpu_CF
, tcg_t1
);
5297 tcg_gen_and_i32(cpu_CF
, cpu_CF
, tcg_t2
);
5300 if (nzcv
& 1) { /* V */
5301 tcg_gen_or_i32(cpu_VF
, cpu_VF
, tcg_t1
);
5303 if (TCG_TARGET_HAS_andc_i32
) {
5304 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tcg_t1
);
5306 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tcg_t2
);
5309 tcg_temp_free_i32(tcg_t0
);
5310 tcg_temp_free_i32(tcg_t1
);
5311 tcg_temp_free_i32(tcg_t2
);
5314 /* Conditional select
5315 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
5316 * +----+----+---+-----------------+------+------+-----+------+------+
5317 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
5318 * +----+----+---+-----------------+------+------+-----+------+------+
5320 static void disas_cond_select(DisasContext
*s
, uint32_t insn
)
5322 unsigned int sf
, else_inv
, rm
, cond
, else_inc
, rn
, rd
;
5323 TCGv_i64 tcg_rd
, zero
;
5326 if (extract32(insn
, 29, 1) || extract32(insn
, 11, 1)) {
5327 /* S == 1 or op2<1> == 1 */
5328 unallocated_encoding(s
);
5331 sf
= extract32(insn
, 31, 1);
5332 else_inv
= extract32(insn
, 30, 1);
5333 rm
= extract32(insn
, 16, 5);
5334 cond
= extract32(insn
, 12, 4);
5335 else_inc
= extract32(insn
, 10, 1);
5336 rn
= extract32(insn
, 5, 5);
5337 rd
= extract32(insn
, 0, 5);
5339 tcg_rd
= cpu_reg(s
, rd
);
5341 a64_test_cc(&c
, cond
);
5342 zero
= tcg_constant_i64(0);
5344 if (rn
== 31 && rm
== 31 && (else_inc
^ else_inv
)) {
5346 tcg_gen_setcond_i64(tcg_invert_cond(c
.cond
), tcg_rd
, c
.value
, zero
);
5348 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
5351 TCGv_i64 t_true
= cpu_reg(s
, rn
);
5352 TCGv_i64 t_false
= read_cpu_reg(s
, rm
, 1);
5353 if (else_inv
&& else_inc
) {
5354 tcg_gen_neg_i64(t_false
, t_false
);
5355 } else if (else_inv
) {
5356 tcg_gen_not_i64(t_false
, t_false
);
5357 } else if (else_inc
) {
5358 tcg_gen_addi_i64(t_false
, t_false
, 1);
5360 tcg_gen_movcond_i64(c
.cond
, tcg_rd
, c
.value
, zero
, t_true
, t_false
);
5366 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
5370 static void handle_clz(DisasContext
*s
, unsigned int sf
,
5371 unsigned int rn
, unsigned int rd
)
5373 TCGv_i64 tcg_rd
, tcg_rn
;
5374 tcg_rd
= cpu_reg(s
, rd
);
5375 tcg_rn
= cpu_reg(s
, rn
);
5378 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
5380 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
5381 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
5382 tcg_gen_clzi_i32(tcg_tmp32
, tcg_tmp32
, 32);
5383 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
5384 tcg_temp_free_i32(tcg_tmp32
);
5388 static void handle_cls(DisasContext
*s
, unsigned int sf
,
5389 unsigned int rn
, unsigned int rd
)
5391 TCGv_i64 tcg_rd
, tcg_rn
;
5392 tcg_rd
= cpu_reg(s
, rd
);
5393 tcg_rn
= cpu_reg(s
, rn
);
5396 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
5398 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
5399 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
5400 tcg_gen_clrsb_i32(tcg_tmp32
, tcg_tmp32
);
5401 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
5402 tcg_temp_free_i32(tcg_tmp32
);
5406 static void handle_rbit(DisasContext
*s
, unsigned int sf
,
5407 unsigned int rn
, unsigned int rd
)
5409 TCGv_i64 tcg_rd
, tcg_rn
;
5410 tcg_rd
= cpu_reg(s
, rd
);
5411 tcg_rn
= cpu_reg(s
, rn
);
5414 gen_helper_rbit64(tcg_rd
, tcg_rn
);
5416 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
5417 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
5418 gen_helper_rbit(tcg_tmp32
, tcg_tmp32
);
5419 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
5420 tcg_temp_free_i32(tcg_tmp32
);
5424 /* REV with sf==1, opcode==3 ("REV64") */
5425 static void handle_rev64(DisasContext
*s
, unsigned int sf
,
5426 unsigned int rn
, unsigned int rd
)
5429 unallocated_encoding(s
);
5432 tcg_gen_bswap64_i64(cpu_reg(s
, rd
), cpu_reg(s
, rn
));
5435 /* REV with sf==0, opcode==2
5436 * REV32 (sf==1, opcode==2)
5438 static void handle_rev32(DisasContext
*s
, unsigned int sf
,
5439 unsigned int rn
, unsigned int rd
)
5441 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5442 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
5445 tcg_gen_bswap64_i64(tcg_rd
, tcg_rn
);
5446 tcg_gen_rotri_i64(tcg_rd
, tcg_rd
, 32);
5448 tcg_gen_bswap32_i64(tcg_rd
, tcg_rn
, TCG_BSWAP_OZ
);
5452 /* REV16 (opcode==1) */
5453 static void handle_rev16(DisasContext
*s
, unsigned int sf
,
5454 unsigned int rn
, unsigned int rd
)
5456 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5457 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
5458 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
5459 TCGv_i64 mask
= tcg_constant_i64(sf
? 0x00ff00ff00ff00ffull
: 0x00ff00ff);
5461 tcg_gen_shri_i64(tcg_tmp
, tcg_rn
, 8);
5462 tcg_gen_and_i64(tcg_rd
, tcg_rn
, mask
);
5463 tcg_gen_and_i64(tcg_tmp
, tcg_tmp
, mask
);
5464 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, 8);
5465 tcg_gen_or_i64(tcg_rd
, tcg_rd
, tcg_tmp
);
5467 tcg_temp_free_i64(tcg_tmp
);
5470 /* Data-processing (1 source)
5471 * 31 30 29 28 21 20 16 15 10 9 5 4 0
5472 * +----+---+---+-----------------+---------+--------+------+------+
5473 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
5474 * +----+---+---+-----------------+---------+--------+------+------+
5476 static void disas_data_proc_1src(DisasContext
*s
, uint32_t insn
)
5478 unsigned int sf
, opcode
, opcode2
, rn
, rd
;
5481 if (extract32(insn
, 29, 1)) {
5482 unallocated_encoding(s
);
5486 sf
= extract32(insn
, 31, 1);
5487 opcode
= extract32(insn
, 10, 6);
5488 opcode2
= extract32(insn
, 16, 5);
5489 rn
= extract32(insn
, 5, 5);
5490 rd
= extract32(insn
, 0, 5);
5492 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5494 switch (MAP(sf
, opcode2
, opcode
)) {
5495 case MAP(0, 0x00, 0x00): /* RBIT */
5496 case MAP(1, 0x00, 0x00):
5497 handle_rbit(s
, sf
, rn
, rd
);
5499 case MAP(0, 0x00, 0x01): /* REV16 */
5500 case MAP(1, 0x00, 0x01):
5501 handle_rev16(s
, sf
, rn
, rd
);
5503 case MAP(0, 0x00, 0x02): /* REV/REV32 */
5504 case MAP(1, 0x00, 0x02):
5505 handle_rev32(s
, sf
, rn
, rd
);
5507 case MAP(1, 0x00, 0x03): /* REV64 */
5508 handle_rev64(s
, sf
, rn
, rd
);
5510 case MAP(0, 0x00, 0x04): /* CLZ */
5511 case MAP(1, 0x00, 0x04):
5512 handle_clz(s
, sf
, rn
, rd
);
5514 case MAP(0, 0x00, 0x05): /* CLS */
5515 case MAP(1, 0x00, 0x05):
5516 handle_cls(s
, sf
, rn
, rd
);
5518 case MAP(1, 0x01, 0x00): /* PACIA */
5519 if (s
->pauth_active
) {
5520 tcg_rd
= cpu_reg(s
, rd
);
5521 gen_helper_pacia(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5522 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5523 goto do_unallocated
;
5526 case MAP(1, 0x01, 0x01): /* PACIB */
5527 if (s
->pauth_active
) {
5528 tcg_rd
= cpu_reg(s
, rd
);
5529 gen_helper_pacib(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5530 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5531 goto do_unallocated
;
5534 case MAP(1, 0x01, 0x02): /* PACDA */
5535 if (s
->pauth_active
) {
5536 tcg_rd
= cpu_reg(s
, rd
);
5537 gen_helper_pacda(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5538 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5539 goto do_unallocated
;
5542 case MAP(1, 0x01, 0x03): /* PACDB */
5543 if (s
->pauth_active
) {
5544 tcg_rd
= cpu_reg(s
, rd
);
5545 gen_helper_pacdb(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5546 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5547 goto do_unallocated
;
5550 case MAP(1, 0x01, 0x04): /* AUTIA */
5551 if (s
->pauth_active
) {
5552 tcg_rd
= cpu_reg(s
, rd
);
5553 gen_helper_autia(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5554 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5555 goto do_unallocated
;
5558 case MAP(1, 0x01, 0x05): /* AUTIB */
5559 if (s
->pauth_active
) {
5560 tcg_rd
= cpu_reg(s
, rd
);
5561 gen_helper_autib(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5562 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5563 goto do_unallocated
;
5566 case MAP(1, 0x01, 0x06): /* AUTDA */
5567 if (s
->pauth_active
) {
5568 tcg_rd
= cpu_reg(s
, rd
);
5569 gen_helper_autda(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5570 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5571 goto do_unallocated
;
5574 case MAP(1, 0x01, 0x07): /* AUTDB */
5575 if (s
->pauth_active
) {
5576 tcg_rd
= cpu_reg(s
, rd
);
5577 gen_helper_autdb(tcg_rd
, cpu_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
5578 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
5579 goto do_unallocated
;
5582 case MAP(1, 0x01, 0x08): /* PACIZA */
5583 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5584 goto do_unallocated
;
5585 } else if (s
->pauth_active
) {
5586 tcg_rd
= cpu_reg(s
, rd
);
5587 gen_helper_pacia(tcg_rd
, cpu_env
, tcg_rd
, new_tmp_a64_zero(s
));
5590 case MAP(1, 0x01, 0x09): /* PACIZB */
5591 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5592 goto do_unallocated
;
5593 } else if (s
->pauth_active
) {
5594 tcg_rd
= cpu_reg(s
, rd
);
5595 gen_helper_pacib(tcg_rd
, cpu_env
, tcg_rd
, new_tmp_a64_zero(s
));
5598 case MAP(1, 0x01, 0x0a): /* PACDZA */
5599 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5600 goto do_unallocated
;
5601 } else if (s
->pauth_active
) {
5602 tcg_rd
= cpu_reg(s
, rd
);
5603 gen_helper_pacda(tcg_rd
, cpu_env
, tcg_rd
, new_tmp_a64_zero(s
));
5606 case MAP(1, 0x01, 0x0b): /* PACDZB */
5607 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5608 goto do_unallocated
;
5609 } else if (s
->pauth_active
) {
5610 tcg_rd
= cpu_reg(s
, rd
);
5611 gen_helper_pacdb(tcg_rd
, cpu_env
, tcg_rd
, new_tmp_a64_zero(s
));
5614 case MAP(1, 0x01, 0x0c): /* AUTIZA */
5615 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5616 goto do_unallocated
;
5617 } else if (s
->pauth_active
) {
5618 tcg_rd
= cpu_reg(s
, rd
);
5619 gen_helper_autia(tcg_rd
, cpu_env
, tcg_rd
, new_tmp_a64_zero(s
));
5622 case MAP(1, 0x01, 0x0d): /* AUTIZB */
5623 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5624 goto do_unallocated
;
5625 } else if (s
->pauth_active
) {
5626 tcg_rd
= cpu_reg(s
, rd
);
5627 gen_helper_autib(tcg_rd
, cpu_env
, tcg_rd
, new_tmp_a64_zero(s
));
5630 case MAP(1, 0x01, 0x0e): /* AUTDZA */
5631 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5632 goto do_unallocated
;
5633 } else if (s
->pauth_active
) {
5634 tcg_rd
= cpu_reg(s
, rd
);
5635 gen_helper_autda(tcg_rd
, cpu_env
, tcg_rd
, new_tmp_a64_zero(s
));
5638 case MAP(1, 0x01, 0x0f): /* AUTDZB */
5639 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5640 goto do_unallocated
;
5641 } else if (s
->pauth_active
) {
5642 tcg_rd
= cpu_reg(s
, rd
);
5643 gen_helper_autdb(tcg_rd
, cpu_env
, tcg_rd
, new_tmp_a64_zero(s
));
5646 case MAP(1, 0x01, 0x10): /* XPACI */
5647 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5648 goto do_unallocated
;
5649 } else if (s
->pauth_active
) {
5650 tcg_rd
= cpu_reg(s
, rd
);
5651 gen_helper_xpaci(tcg_rd
, cpu_env
, tcg_rd
);
5654 case MAP(1, 0x01, 0x11): /* XPACD */
5655 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
5656 goto do_unallocated
;
5657 } else if (s
->pauth_active
) {
5658 tcg_rd
= cpu_reg(s
, rd
);
5659 gen_helper_xpacd(tcg_rd
, cpu_env
, tcg_rd
);
5664 unallocated_encoding(s
);
5671 static void handle_div(DisasContext
*s
, bool is_signed
, unsigned int sf
,
5672 unsigned int rm
, unsigned int rn
, unsigned int rd
)
5674 TCGv_i64 tcg_n
, tcg_m
, tcg_rd
;
5675 tcg_rd
= cpu_reg(s
, rd
);
5677 if (!sf
&& is_signed
) {
5678 tcg_n
= new_tmp_a64(s
);
5679 tcg_m
= new_tmp_a64(s
);
5680 tcg_gen_ext32s_i64(tcg_n
, cpu_reg(s
, rn
));
5681 tcg_gen_ext32s_i64(tcg_m
, cpu_reg(s
, rm
));
5683 tcg_n
= read_cpu_reg(s
, rn
, sf
);
5684 tcg_m
= read_cpu_reg(s
, rm
, sf
);
5688 gen_helper_sdiv64(tcg_rd
, tcg_n
, tcg_m
);
5690 gen_helper_udiv64(tcg_rd
, tcg_n
, tcg_m
);
5693 if (!sf
) { /* zero extend final result */
5694 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
5698 /* LSLV, LSRV, ASRV, RORV */
5699 static void handle_shift_reg(DisasContext
*s
,
5700 enum a64_shift_type shift_type
, unsigned int sf
,
5701 unsigned int rm
, unsigned int rn
, unsigned int rd
)
5703 TCGv_i64 tcg_shift
= tcg_temp_new_i64();
5704 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
5705 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
5707 tcg_gen_andi_i64(tcg_shift
, cpu_reg(s
, rm
), sf
? 63 : 31);
5708 shift_reg(tcg_rd
, tcg_rn
, sf
, shift_type
, tcg_shift
);
5709 tcg_temp_free_i64(tcg_shift
);
5712 /* CRC32[BHWX], CRC32C[BHWX] */
5713 static void handle_crc32(DisasContext
*s
,
5714 unsigned int sf
, unsigned int sz
, bool crc32c
,
5715 unsigned int rm
, unsigned int rn
, unsigned int rd
)
5717 TCGv_i64 tcg_acc
, tcg_val
;
5720 if (!dc_isar_feature(aa64_crc32
, s
)
5721 || (sf
== 1 && sz
!= 3)
5722 || (sf
== 0 && sz
== 3)) {
5723 unallocated_encoding(s
);
5728 tcg_val
= cpu_reg(s
, rm
);
5742 g_assert_not_reached();
5744 tcg_val
= new_tmp_a64(s
);
5745 tcg_gen_andi_i64(tcg_val
, cpu_reg(s
, rm
), mask
);
5748 tcg_acc
= cpu_reg(s
, rn
);
5749 tcg_bytes
= tcg_constant_i32(1 << sz
);
5752 gen_helper_crc32c_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
5754 gen_helper_crc32_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
5758 /* Data-processing (2 source)
5759 * 31 30 29 28 21 20 16 15 10 9 5 4 0
5760 * +----+---+---+-----------------+------+--------+------+------+
5761 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
5762 * +----+---+---+-----------------+------+--------+------+------+
5764 static void disas_data_proc_2src(DisasContext
*s
, uint32_t insn
)
5766 unsigned int sf
, rm
, opcode
, rn
, rd
, setflag
;
5767 sf
= extract32(insn
, 31, 1);
5768 setflag
= extract32(insn
, 29, 1);
5769 rm
= extract32(insn
, 16, 5);
5770 opcode
= extract32(insn
, 10, 6);
5771 rn
= extract32(insn
, 5, 5);
5772 rd
= extract32(insn
, 0, 5);
5774 if (setflag
&& opcode
!= 0) {
5775 unallocated_encoding(s
);
5780 case 0: /* SUBP(S) */
5781 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
5782 goto do_unallocated
;
5784 TCGv_i64 tcg_n
, tcg_m
, tcg_d
;
5786 tcg_n
= read_cpu_reg_sp(s
, rn
, true);
5787 tcg_m
= read_cpu_reg_sp(s
, rm
, true);
5788 tcg_gen_sextract_i64(tcg_n
, tcg_n
, 0, 56);
5789 tcg_gen_sextract_i64(tcg_m
, tcg_m
, 0, 56);
5790 tcg_d
= cpu_reg(s
, rd
);
5793 gen_sub_CC(true, tcg_d
, tcg_n
, tcg_m
);
5795 tcg_gen_sub_i64(tcg_d
, tcg_n
, tcg_m
);
5800 handle_div(s
, false, sf
, rm
, rn
, rd
);
5803 handle_div(s
, true, sf
, rm
, rn
, rd
);
5806 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
5807 goto do_unallocated
;
5810 gen_helper_irg(cpu_reg_sp(s
, rd
), cpu_env
,
5811 cpu_reg_sp(s
, rn
), cpu_reg(s
, rm
));
5813 gen_address_with_allocation_tag0(cpu_reg_sp(s
, rd
),
5818 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
5819 goto do_unallocated
;
5821 TCGv_i64 t
= tcg_temp_new_i64();
5823 tcg_gen_extract_i64(t
, cpu_reg_sp(s
, rn
), 56, 4);
5824 tcg_gen_shl_i64(t
, tcg_constant_i64(1), t
);
5825 tcg_gen_or_i64(cpu_reg(s
, rd
), cpu_reg(s
, rm
), t
);
5827 tcg_temp_free_i64(t
);
5831 handle_shift_reg(s
, A64_SHIFT_TYPE_LSL
, sf
, rm
, rn
, rd
);
5834 handle_shift_reg(s
, A64_SHIFT_TYPE_LSR
, sf
, rm
, rn
, rd
);
5837 handle_shift_reg(s
, A64_SHIFT_TYPE_ASR
, sf
, rm
, rn
, rd
);
5840 handle_shift_reg(s
, A64_SHIFT_TYPE_ROR
, sf
, rm
, rn
, rd
);
5842 case 12: /* PACGA */
5843 if (sf
== 0 || !dc_isar_feature(aa64_pauth
, s
)) {
5844 goto do_unallocated
;
5846 gen_helper_pacga(cpu_reg(s
, rd
), cpu_env
,
5847 cpu_reg(s
, rn
), cpu_reg_sp(s
, rm
));
5856 case 23: /* CRC32 */
5858 int sz
= extract32(opcode
, 0, 2);
5859 bool crc32c
= extract32(opcode
, 2, 1);
5860 handle_crc32(s
, sf
, sz
, crc32c
, rm
, rn
, rd
);
5865 unallocated_encoding(s
);
5871 * Data processing - register
5872 * 31 30 29 28 25 21 20 16 10 0
5873 * +--+---+--+---+-------+-----+-------+-------+---------+
5874 * | |op0| |op1| 1 0 1 | op2 | | op3 | |
5875 * +--+---+--+---+-------+-----+-------+-------+---------+
5877 static void disas_data_proc_reg(DisasContext
*s
, uint32_t insn
)
5879 int op0
= extract32(insn
, 30, 1);
5880 int op1
= extract32(insn
, 28, 1);
5881 int op2
= extract32(insn
, 21, 4);
5882 int op3
= extract32(insn
, 10, 6);
5887 /* Add/sub (extended register) */
5888 disas_add_sub_ext_reg(s
, insn
);
5890 /* Add/sub (shifted register) */
5891 disas_add_sub_reg(s
, insn
);
5894 /* Logical (shifted register) */
5895 disas_logic_reg(s
, insn
);
5903 case 0x00: /* Add/subtract (with carry) */
5904 disas_adc_sbc(s
, insn
);
5907 case 0x01: /* Rotate right into flags */
5909 disas_rotate_right_into_flags(s
, insn
);
5912 case 0x02: /* Evaluate into flags */
5916 disas_evaluate_into_flags(s
, insn
);
5920 goto do_unallocated
;
5924 case 0x2: /* Conditional compare */
5925 disas_cc(s
, insn
); /* both imm and reg forms */
5928 case 0x4: /* Conditional select */
5929 disas_cond_select(s
, insn
);
5932 case 0x6: /* Data-processing */
5933 if (op0
) { /* (1 source) */
5934 disas_data_proc_1src(s
, insn
);
5935 } else { /* (2 source) */
5936 disas_data_proc_2src(s
, insn
);
5939 case 0x8 ... 0xf: /* (3 source) */
5940 disas_data_proc_3src(s
, insn
);
5945 unallocated_encoding(s
);
5950 static void handle_fp_compare(DisasContext
*s
, int size
,
5951 unsigned int rn
, unsigned int rm
,
5952 bool cmp_with_zero
, bool signal_all_nans
)
5954 TCGv_i64 tcg_flags
= tcg_temp_new_i64();
5955 TCGv_ptr fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
5957 if (size
== MO_64
) {
5958 TCGv_i64 tcg_vn
, tcg_vm
;
5960 tcg_vn
= read_fp_dreg(s
, rn
);
5961 if (cmp_with_zero
) {
5962 tcg_vm
= tcg_constant_i64(0);
5964 tcg_vm
= read_fp_dreg(s
, rm
);
5966 if (signal_all_nans
) {
5967 gen_helper_vfp_cmped_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5969 gen_helper_vfp_cmpd_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5971 tcg_temp_free_i64(tcg_vn
);
5972 tcg_temp_free_i64(tcg_vm
);
5974 TCGv_i32 tcg_vn
= tcg_temp_new_i32();
5975 TCGv_i32 tcg_vm
= tcg_temp_new_i32();
5977 read_vec_element_i32(s
, tcg_vn
, rn
, 0, size
);
5978 if (cmp_with_zero
) {
5979 tcg_gen_movi_i32(tcg_vm
, 0);
5981 read_vec_element_i32(s
, tcg_vm
, rm
, 0, size
);
5986 if (signal_all_nans
) {
5987 gen_helper_vfp_cmpes_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5989 gen_helper_vfp_cmps_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5993 if (signal_all_nans
) {
5994 gen_helper_vfp_cmpeh_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
5996 gen_helper_vfp_cmph_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
6000 g_assert_not_reached();
6003 tcg_temp_free_i32(tcg_vn
);
6004 tcg_temp_free_i32(tcg_vm
);
6007 tcg_temp_free_ptr(fpst
);
6009 gen_set_nzcv(tcg_flags
);
6011 tcg_temp_free_i64(tcg_flags
);
6014 /* Floating point compare
6015 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
6016 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
6017 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
6018 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
6020 static void disas_fp_compare(DisasContext
*s
, uint32_t insn
)
6022 unsigned int mos
, type
, rm
, op
, rn
, opc
, op2r
;
6025 mos
= extract32(insn
, 29, 3);
6026 type
= extract32(insn
, 22, 2);
6027 rm
= extract32(insn
, 16, 5);
6028 op
= extract32(insn
, 14, 2);
6029 rn
= extract32(insn
, 5, 5);
6030 opc
= extract32(insn
, 3, 2);
6031 op2r
= extract32(insn
, 0, 3);
6033 if (mos
|| op
|| op2r
) {
6034 unallocated_encoding(s
);
6047 if (dc_isar_feature(aa64_fp16
, s
)) {
6052 unallocated_encoding(s
);
6056 if (!fp_access_check(s
)) {
6060 handle_fp_compare(s
, size
, rn
, rm
, opc
& 1, opc
& 2);
6063 /* Floating point conditional compare
6064 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
6065 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
6066 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
6067 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
6069 static void disas_fp_ccomp(DisasContext
*s
, uint32_t insn
)
6071 unsigned int mos
, type
, rm
, cond
, rn
, op
, nzcv
;
6072 TCGLabel
*label_continue
= NULL
;
6075 mos
= extract32(insn
, 29, 3);
6076 type
= extract32(insn
, 22, 2);
6077 rm
= extract32(insn
, 16, 5);
6078 cond
= extract32(insn
, 12, 4);
6079 rn
= extract32(insn
, 5, 5);
6080 op
= extract32(insn
, 4, 1);
6081 nzcv
= extract32(insn
, 0, 4);
6084 unallocated_encoding(s
);
6097 if (dc_isar_feature(aa64_fp16
, s
)) {
6102 unallocated_encoding(s
);
6106 if (!fp_access_check(s
)) {
6110 if (cond
< 0x0e) { /* not always */
6111 TCGLabel
*label_match
= gen_new_label();
6112 label_continue
= gen_new_label();
6113 arm_gen_test_cc(cond
, label_match
);
6115 gen_set_nzcv(tcg_constant_i64(nzcv
<< 28));
6116 tcg_gen_br(label_continue
);
6117 gen_set_label(label_match
);
6120 handle_fp_compare(s
, size
, rn
, rm
, false, op
);
6123 gen_set_label(label_continue
);
6127 /* Floating point conditional select
6128 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
6129 * +---+---+---+-----------+------+---+------+------+-----+------+------+
6130 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
6131 * +---+---+---+-----------+------+---+------+------+-----+------+------+
6133 static void disas_fp_csel(DisasContext
*s
, uint32_t insn
)
6135 unsigned int mos
, type
, rm
, cond
, rn
, rd
;
6136 TCGv_i64 t_true
, t_false
;
6140 mos
= extract32(insn
, 29, 3);
6141 type
= extract32(insn
, 22, 2);
6142 rm
= extract32(insn
, 16, 5);
6143 cond
= extract32(insn
, 12, 4);
6144 rn
= extract32(insn
, 5, 5);
6145 rd
= extract32(insn
, 0, 5);
6148 unallocated_encoding(s
);
6161 if (dc_isar_feature(aa64_fp16
, s
)) {
6166 unallocated_encoding(s
);
6170 if (!fp_access_check(s
)) {
6174 /* Zero extend sreg & hreg inputs to 64 bits now. */
6175 t_true
= tcg_temp_new_i64();
6176 t_false
= tcg_temp_new_i64();
6177 read_vec_element(s
, t_true
, rn
, 0, sz
);
6178 read_vec_element(s
, t_false
, rm
, 0, sz
);
6180 a64_test_cc(&c
, cond
);
6181 tcg_gen_movcond_i64(c
.cond
, t_true
, c
.value
, tcg_constant_i64(0),
6183 tcg_temp_free_i64(t_false
);
6186 /* Note that sregs & hregs write back zeros to the high bits,
6187 and we've already done the zero-extension. */
6188 write_fp_dreg(s
, rd
, t_true
);
6189 tcg_temp_free_i64(t_true
);
6192 /* Floating-point data-processing (1 source) - half precision */
6193 static void handle_fp_1src_half(DisasContext
*s
, int opcode
, int rd
, int rn
)
6195 TCGv_ptr fpst
= NULL
;
6196 TCGv_i32 tcg_op
= read_fp_hreg(s
, rn
);
6197 TCGv_i32 tcg_res
= tcg_temp_new_i32();
6200 case 0x0: /* FMOV */
6201 tcg_gen_mov_i32(tcg_res
, tcg_op
);
6203 case 0x1: /* FABS */
6204 tcg_gen_andi_i32(tcg_res
, tcg_op
, 0x7fff);
6206 case 0x2: /* FNEG */
6207 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
6209 case 0x3: /* FSQRT */
6210 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6211 gen_helper_sqrt_f16(tcg_res
, tcg_op
, fpst
);
6213 case 0x8: /* FRINTN */
6214 case 0x9: /* FRINTP */
6215 case 0xa: /* FRINTM */
6216 case 0xb: /* FRINTZ */
6217 case 0xc: /* FRINTA */
6219 TCGv_i32 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(opcode
& 7));
6220 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6222 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
6223 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
6225 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
6226 tcg_temp_free_i32(tcg_rmode
);
6229 case 0xe: /* FRINTX */
6230 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6231 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, fpst
);
6233 case 0xf: /* FRINTI */
6234 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6235 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
6238 g_assert_not_reached();
6241 write_fp_sreg(s
, rd
, tcg_res
);
6244 tcg_temp_free_ptr(fpst
);
6246 tcg_temp_free_i32(tcg_op
);
6247 tcg_temp_free_i32(tcg_res
);
6250 /* Floating-point data-processing (1 source) - single precision */
6251 static void handle_fp_1src_single(DisasContext
*s
, int opcode
, int rd
, int rn
)
6253 void (*gen_fpst
)(TCGv_i32
, TCGv_i32
, TCGv_ptr
);
6254 TCGv_i32 tcg_op
, tcg_res
;
6258 tcg_op
= read_fp_sreg(s
, rn
);
6259 tcg_res
= tcg_temp_new_i32();
6262 case 0x0: /* FMOV */
6263 tcg_gen_mov_i32(tcg_res
, tcg_op
);
6265 case 0x1: /* FABS */
6266 gen_helper_vfp_abss(tcg_res
, tcg_op
);
6268 case 0x2: /* FNEG */
6269 gen_helper_vfp_negs(tcg_res
, tcg_op
);
6271 case 0x3: /* FSQRT */
6272 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, cpu_env
);
6274 case 0x6: /* BFCVT */
6275 gen_fpst
= gen_helper_bfcvt
;
6277 case 0x8: /* FRINTN */
6278 case 0x9: /* FRINTP */
6279 case 0xa: /* FRINTM */
6280 case 0xb: /* FRINTZ */
6281 case 0xc: /* FRINTA */
6282 rmode
= arm_rmode_to_sf(opcode
& 7);
6283 gen_fpst
= gen_helper_rints
;
6285 case 0xe: /* FRINTX */
6286 gen_fpst
= gen_helper_rints_exact
;
6288 case 0xf: /* FRINTI */
6289 gen_fpst
= gen_helper_rints
;
6291 case 0x10: /* FRINT32Z */
6292 rmode
= float_round_to_zero
;
6293 gen_fpst
= gen_helper_frint32_s
;
6295 case 0x11: /* FRINT32X */
6296 gen_fpst
= gen_helper_frint32_s
;
6298 case 0x12: /* FRINT64Z */
6299 rmode
= float_round_to_zero
;
6300 gen_fpst
= gen_helper_frint64_s
;
6302 case 0x13: /* FRINT64X */
6303 gen_fpst
= gen_helper_frint64_s
;
6306 g_assert_not_reached();
6309 fpst
= fpstatus_ptr(FPST_FPCR
);
6311 TCGv_i32 tcg_rmode
= tcg_const_i32(rmode
);
6312 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
6313 gen_fpst(tcg_res
, tcg_op
, fpst
);
6314 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
6315 tcg_temp_free_i32(tcg_rmode
);
6317 gen_fpst(tcg_res
, tcg_op
, fpst
);
6319 tcg_temp_free_ptr(fpst
);
6322 write_fp_sreg(s
, rd
, tcg_res
);
6323 tcg_temp_free_i32(tcg_op
);
6324 tcg_temp_free_i32(tcg_res
);
6327 /* Floating-point data-processing (1 source) - double precision */
6328 static void handle_fp_1src_double(DisasContext
*s
, int opcode
, int rd
, int rn
)
6330 void (*gen_fpst
)(TCGv_i64
, TCGv_i64
, TCGv_ptr
);
6331 TCGv_i64 tcg_op
, tcg_res
;
6336 case 0x0: /* FMOV */
6337 gen_gvec_fn2(s
, false, rd
, rn
, tcg_gen_gvec_mov
, 0);
6341 tcg_op
= read_fp_dreg(s
, rn
);
6342 tcg_res
= tcg_temp_new_i64();
6345 case 0x1: /* FABS */
6346 gen_helper_vfp_absd(tcg_res
, tcg_op
);
6348 case 0x2: /* FNEG */
6349 gen_helper_vfp_negd(tcg_res
, tcg_op
);
6351 case 0x3: /* FSQRT */
6352 gen_helper_vfp_sqrtd(tcg_res
, tcg_op
, cpu_env
);
6354 case 0x8: /* FRINTN */
6355 case 0x9: /* FRINTP */
6356 case 0xa: /* FRINTM */
6357 case 0xb: /* FRINTZ */
6358 case 0xc: /* FRINTA */
6359 rmode
= arm_rmode_to_sf(opcode
& 7);
6360 gen_fpst
= gen_helper_rintd
;
6362 case 0xe: /* FRINTX */
6363 gen_fpst
= gen_helper_rintd_exact
;
6365 case 0xf: /* FRINTI */
6366 gen_fpst
= gen_helper_rintd
;
6368 case 0x10: /* FRINT32Z */
6369 rmode
= float_round_to_zero
;
6370 gen_fpst
= gen_helper_frint32_d
;
6372 case 0x11: /* FRINT32X */
6373 gen_fpst
= gen_helper_frint32_d
;
6375 case 0x12: /* FRINT64Z */
6376 rmode
= float_round_to_zero
;
6377 gen_fpst
= gen_helper_frint64_d
;
6379 case 0x13: /* FRINT64X */
6380 gen_fpst
= gen_helper_frint64_d
;
6383 g_assert_not_reached();
6386 fpst
= fpstatus_ptr(FPST_FPCR
);
6388 TCGv_i32 tcg_rmode
= tcg_const_i32(rmode
);
6389 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
6390 gen_fpst(tcg_res
, tcg_op
, fpst
);
6391 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
6392 tcg_temp_free_i32(tcg_rmode
);
6394 gen_fpst(tcg_res
, tcg_op
, fpst
);
6396 tcg_temp_free_ptr(fpst
);
6399 write_fp_dreg(s
, rd
, tcg_res
);
6400 tcg_temp_free_i64(tcg_op
);
6401 tcg_temp_free_i64(tcg_res
);
6404 static void handle_fp_fcvt(DisasContext
*s
, int opcode
,
6405 int rd
, int rn
, int dtype
, int ntype
)
6410 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
6412 /* Single to double */
6413 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
6414 gen_helper_vfp_fcvtds(tcg_rd
, tcg_rn
, cpu_env
);
6415 write_fp_dreg(s
, rd
, tcg_rd
);
6416 tcg_temp_free_i64(tcg_rd
);
6418 /* Single to half */
6419 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
6420 TCGv_i32 ahp
= get_ahp_flag();
6421 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6423 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd
, tcg_rn
, fpst
, ahp
);
6424 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6425 write_fp_sreg(s
, rd
, tcg_rd
);
6426 tcg_temp_free_i32(tcg_rd
);
6427 tcg_temp_free_i32(ahp
);
6428 tcg_temp_free_ptr(fpst
);
6430 tcg_temp_free_i32(tcg_rn
);
6435 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
6436 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
6438 /* Double to single */
6439 gen_helper_vfp_fcvtsd(tcg_rd
, tcg_rn
, cpu_env
);
6441 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6442 TCGv_i32 ahp
= get_ahp_flag();
6443 /* Double to half */
6444 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd
, tcg_rn
, fpst
, ahp
);
6445 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6446 tcg_temp_free_ptr(fpst
);
6447 tcg_temp_free_i32(ahp
);
6449 write_fp_sreg(s
, rd
, tcg_rd
);
6450 tcg_temp_free_i32(tcg_rd
);
6451 tcg_temp_free_i64(tcg_rn
);
6456 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
6457 TCGv_ptr tcg_fpst
= fpstatus_ptr(FPST_FPCR
);
6458 TCGv_i32 tcg_ahp
= get_ahp_flag();
6459 tcg_gen_ext16u_i32(tcg_rn
, tcg_rn
);
6461 /* Half to single */
6462 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
6463 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd
, tcg_rn
, tcg_fpst
, tcg_ahp
);
6464 write_fp_sreg(s
, rd
, tcg_rd
);
6465 tcg_temp_free_i32(tcg_rd
);
6467 /* Half to double */
6468 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
6469 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd
, tcg_rn
, tcg_fpst
, tcg_ahp
);
6470 write_fp_dreg(s
, rd
, tcg_rd
);
6471 tcg_temp_free_i64(tcg_rd
);
6473 tcg_temp_free_i32(tcg_rn
);
6474 tcg_temp_free_ptr(tcg_fpst
);
6475 tcg_temp_free_i32(tcg_ahp
);
6479 g_assert_not_reached();
6483 /* Floating point data-processing (1 source)
6484 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
6485 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6486 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
6487 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6489 static void disas_fp_1src(DisasContext
*s
, uint32_t insn
)
6491 int mos
= extract32(insn
, 29, 3);
6492 int type
= extract32(insn
, 22, 2);
6493 int opcode
= extract32(insn
, 15, 6);
6494 int rn
= extract32(insn
, 5, 5);
6495 int rd
= extract32(insn
, 0, 5);
6498 goto do_unallocated
;
6502 case 0x4: case 0x5: case 0x7:
6504 /* FCVT between half, single and double precision */
6505 int dtype
= extract32(opcode
, 0, 2);
6506 if (type
== 2 || dtype
== type
) {
6507 goto do_unallocated
;
6509 if (!fp_access_check(s
)) {
6513 handle_fp_fcvt(s
, opcode
, rd
, rn
, dtype
, type
);
6517 case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
6518 if (type
> 1 || !dc_isar_feature(aa64_frint
, s
)) {
6519 goto do_unallocated
;
6525 /* 32-to-32 and 64-to-64 ops */
6528 if (!fp_access_check(s
)) {
6531 handle_fp_1src_single(s
, opcode
, rd
, rn
);
6534 if (!fp_access_check(s
)) {
6537 handle_fp_1src_double(s
, opcode
, rd
, rn
);
6540 if (!dc_isar_feature(aa64_fp16
, s
)) {
6541 goto do_unallocated
;
6544 if (!fp_access_check(s
)) {
6547 handle_fp_1src_half(s
, opcode
, rd
, rn
);
6550 goto do_unallocated
;
6557 if (!dc_isar_feature(aa64_bf16
, s
)) {
6558 goto do_unallocated
;
6560 if (!fp_access_check(s
)) {
6563 handle_fp_1src_single(s
, opcode
, rd
, rn
);
6566 goto do_unallocated
;
6572 unallocated_encoding(s
);
6577 /* Floating-point data-processing (2 source) - single precision */
6578 static void handle_fp_2src_single(DisasContext
*s
, int opcode
,
6579 int rd
, int rn
, int rm
)
6586 tcg_res
= tcg_temp_new_i32();
6587 fpst
= fpstatus_ptr(FPST_FPCR
);
6588 tcg_op1
= read_fp_sreg(s
, rn
);
6589 tcg_op2
= read_fp_sreg(s
, rm
);
6592 case 0x0: /* FMUL */
6593 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6595 case 0x1: /* FDIV */
6596 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6598 case 0x2: /* FADD */
6599 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6601 case 0x3: /* FSUB */
6602 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6604 case 0x4: /* FMAX */
6605 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6607 case 0x5: /* FMIN */
6608 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6610 case 0x6: /* FMAXNM */
6611 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6613 case 0x7: /* FMINNM */
6614 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6616 case 0x8: /* FNMUL */
6617 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6618 gen_helper_vfp_negs(tcg_res
, tcg_res
);
6622 write_fp_sreg(s
, rd
, tcg_res
);
6624 tcg_temp_free_ptr(fpst
);
6625 tcg_temp_free_i32(tcg_op1
);
6626 tcg_temp_free_i32(tcg_op2
);
6627 tcg_temp_free_i32(tcg_res
);
6630 /* Floating-point data-processing (2 source) - double precision */
6631 static void handle_fp_2src_double(DisasContext
*s
, int opcode
,
6632 int rd
, int rn
, int rm
)
6639 tcg_res
= tcg_temp_new_i64();
6640 fpst
= fpstatus_ptr(FPST_FPCR
);
6641 tcg_op1
= read_fp_dreg(s
, rn
);
6642 tcg_op2
= read_fp_dreg(s
, rm
);
6645 case 0x0: /* FMUL */
6646 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6648 case 0x1: /* FDIV */
6649 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6651 case 0x2: /* FADD */
6652 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6654 case 0x3: /* FSUB */
6655 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6657 case 0x4: /* FMAX */
6658 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6660 case 0x5: /* FMIN */
6661 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6663 case 0x6: /* FMAXNM */
6664 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6666 case 0x7: /* FMINNM */
6667 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6669 case 0x8: /* FNMUL */
6670 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6671 gen_helper_vfp_negd(tcg_res
, tcg_res
);
6675 write_fp_dreg(s
, rd
, tcg_res
);
6677 tcg_temp_free_ptr(fpst
);
6678 tcg_temp_free_i64(tcg_op1
);
6679 tcg_temp_free_i64(tcg_op2
);
6680 tcg_temp_free_i64(tcg_res
);
6683 /* Floating-point data-processing (2 source) - half precision */
6684 static void handle_fp_2src_half(DisasContext
*s
, int opcode
,
6685 int rd
, int rn
, int rm
)
6692 tcg_res
= tcg_temp_new_i32();
6693 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6694 tcg_op1
= read_fp_hreg(s
, rn
);
6695 tcg_op2
= read_fp_hreg(s
, rm
);
6698 case 0x0: /* FMUL */
6699 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6701 case 0x1: /* FDIV */
6702 gen_helper_advsimd_divh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6704 case 0x2: /* FADD */
6705 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6707 case 0x3: /* FSUB */
6708 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6710 case 0x4: /* FMAX */
6711 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6713 case 0x5: /* FMIN */
6714 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6716 case 0x6: /* FMAXNM */
6717 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6719 case 0x7: /* FMINNM */
6720 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6722 case 0x8: /* FNMUL */
6723 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6724 tcg_gen_xori_i32(tcg_res
, tcg_res
, 0x8000);
6727 g_assert_not_reached();
6730 write_fp_sreg(s
, rd
, tcg_res
);
6732 tcg_temp_free_ptr(fpst
);
6733 tcg_temp_free_i32(tcg_op1
);
6734 tcg_temp_free_i32(tcg_op2
);
6735 tcg_temp_free_i32(tcg_res
);
6738 /* Floating point data-processing (2 source)
6739 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
6740 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6741 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
6742 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6744 static void disas_fp_2src(DisasContext
*s
, uint32_t insn
)
6746 int mos
= extract32(insn
, 29, 3);
6747 int type
= extract32(insn
, 22, 2);
6748 int rd
= extract32(insn
, 0, 5);
6749 int rn
= extract32(insn
, 5, 5);
6750 int rm
= extract32(insn
, 16, 5);
6751 int opcode
= extract32(insn
, 12, 4);
6753 if (opcode
> 8 || mos
) {
6754 unallocated_encoding(s
);
6760 if (!fp_access_check(s
)) {
6763 handle_fp_2src_single(s
, opcode
, rd
, rn
, rm
);
6766 if (!fp_access_check(s
)) {
6769 handle_fp_2src_double(s
, opcode
, rd
, rn
, rm
);
6772 if (!dc_isar_feature(aa64_fp16
, s
)) {
6773 unallocated_encoding(s
);
6776 if (!fp_access_check(s
)) {
6779 handle_fp_2src_half(s
, opcode
, rd
, rn
, rm
);
6782 unallocated_encoding(s
);
6786 /* Floating-point data-processing (3 source) - single precision */
6787 static void handle_fp_3src_single(DisasContext
*s
, bool o0
, bool o1
,
6788 int rd
, int rn
, int rm
, int ra
)
6790 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
;
6791 TCGv_i32 tcg_res
= tcg_temp_new_i32();
6792 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6794 tcg_op1
= read_fp_sreg(s
, rn
);
6795 tcg_op2
= read_fp_sreg(s
, rm
);
6796 tcg_op3
= read_fp_sreg(s
, ra
);
6798 /* These are fused multiply-add, and must be done as one
6799 * floating point operation with no rounding between the
6800 * multiplication and addition steps.
6801 * NB that doing the negations here as separate steps is
6802 * correct : an input NaN should come out with its sign bit
6803 * flipped if it is a negated-input.
6806 gen_helper_vfp_negs(tcg_op3
, tcg_op3
);
6810 gen_helper_vfp_negs(tcg_op1
, tcg_op1
);
6813 gen_helper_vfp_muladds(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
6815 write_fp_sreg(s
, rd
, tcg_res
);
6817 tcg_temp_free_ptr(fpst
);
6818 tcg_temp_free_i32(tcg_op1
);
6819 tcg_temp_free_i32(tcg_op2
);
6820 tcg_temp_free_i32(tcg_op3
);
6821 tcg_temp_free_i32(tcg_res
);
6824 /* Floating-point data-processing (3 source) - double precision */
6825 static void handle_fp_3src_double(DisasContext
*s
, bool o0
, bool o1
,
6826 int rd
, int rn
, int rm
, int ra
)
6828 TCGv_i64 tcg_op1
, tcg_op2
, tcg_op3
;
6829 TCGv_i64 tcg_res
= tcg_temp_new_i64();
6830 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
6832 tcg_op1
= read_fp_dreg(s
, rn
);
6833 tcg_op2
= read_fp_dreg(s
, rm
);
6834 tcg_op3
= read_fp_dreg(s
, ra
);
6836 /* These are fused multiply-add, and must be done as one
6837 * floating point operation with no rounding between the
6838 * multiplication and addition steps.
6839 * NB that doing the negations here as separate steps is
6840 * correct : an input NaN should come out with its sign bit
6841 * flipped if it is a negated-input.
6844 gen_helper_vfp_negd(tcg_op3
, tcg_op3
);
6848 gen_helper_vfp_negd(tcg_op1
, tcg_op1
);
6851 gen_helper_vfp_muladdd(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
6853 write_fp_dreg(s
, rd
, tcg_res
);
6855 tcg_temp_free_ptr(fpst
);
6856 tcg_temp_free_i64(tcg_op1
);
6857 tcg_temp_free_i64(tcg_op2
);
6858 tcg_temp_free_i64(tcg_op3
);
6859 tcg_temp_free_i64(tcg_res
);
6862 /* Floating-point data-processing (3 source) - half precision */
6863 static void handle_fp_3src_half(DisasContext
*s
, bool o0
, bool o1
,
6864 int rd
, int rn
, int rm
, int ra
)
6866 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
;
6867 TCGv_i32 tcg_res
= tcg_temp_new_i32();
6868 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6870 tcg_op1
= read_fp_hreg(s
, rn
);
6871 tcg_op2
= read_fp_hreg(s
, rm
);
6872 tcg_op3
= read_fp_hreg(s
, ra
);
6874 /* These are fused multiply-add, and must be done as one
6875 * floating point operation with no rounding between the
6876 * multiplication and addition steps.
6877 * NB that doing the negations here as separate steps is
6878 * correct : an input NaN should come out with its sign bit
6879 * flipped if it is a negated-input.
6882 tcg_gen_xori_i32(tcg_op3
, tcg_op3
, 0x8000);
6886 tcg_gen_xori_i32(tcg_op1
, tcg_op1
, 0x8000);
6889 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
6891 write_fp_sreg(s
, rd
, tcg_res
);
6893 tcg_temp_free_ptr(fpst
);
6894 tcg_temp_free_i32(tcg_op1
);
6895 tcg_temp_free_i32(tcg_op2
);
6896 tcg_temp_free_i32(tcg_op3
);
6897 tcg_temp_free_i32(tcg_res
);
6900 /* Floating point data-processing (3 source)
6901 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
6902 * +---+---+---+-----------+------+----+------+----+------+------+------+
6903 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
6904 * +---+---+---+-----------+------+----+------+----+------+------+------+
6906 static void disas_fp_3src(DisasContext
*s
, uint32_t insn
)
6908 int mos
= extract32(insn
, 29, 3);
6909 int type
= extract32(insn
, 22, 2);
6910 int rd
= extract32(insn
, 0, 5);
6911 int rn
= extract32(insn
, 5, 5);
6912 int ra
= extract32(insn
, 10, 5);
6913 int rm
= extract32(insn
, 16, 5);
6914 bool o0
= extract32(insn
, 15, 1);
6915 bool o1
= extract32(insn
, 21, 1);
6918 unallocated_encoding(s
);
6924 if (!fp_access_check(s
)) {
6927 handle_fp_3src_single(s
, o0
, o1
, rd
, rn
, rm
, ra
);
6930 if (!fp_access_check(s
)) {
6933 handle_fp_3src_double(s
, o0
, o1
, rd
, rn
, rm
, ra
);
6936 if (!dc_isar_feature(aa64_fp16
, s
)) {
6937 unallocated_encoding(s
);
6940 if (!fp_access_check(s
)) {
6943 handle_fp_3src_half(s
, o0
, o1
, rd
, rn
, rm
, ra
);
6946 unallocated_encoding(s
);
6950 /* Floating point immediate
6951 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
6952 * +---+---+---+-----------+------+---+------------+-------+------+------+
6953 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
6954 * +---+---+---+-----------+------+---+------------+-------+------+------+
6956 static void disas_fp_imm(DisasContext
*s
, uint32_t insn
)
6958 int rd
= extract32(insn
, 0, 5);
6959 int imm5
= extract32(insn
, 5, 5);
6960 int imm8
= extract32(insn
, 13, 8);
6961 int type
= extract32(insn
, 22, 2);
6962 int mos
= extract32(insn
, 29, 3);
6967 unallocated_encoding(s
);
6980 if (dc_isar_feature(aa64_fp16
, s
)) {
6985 unallocated_encoding(s
);
6989 if (!fp_access_check(s
)) {
6993 imm
= vfp_expand_imm(sz
, imm8
);
6994 write_fp_dreg(s
, rd
, tcg_constant_i64(imm
));
6997 /* Handle floating point <=> fixed point conversions. Note that we can
6998 * also deal with fp <=> integer conversions as a special case (scale == 64)
6999 * OPTME: consider handling that special case specially or at least skipping
7000 * the call to scalbn in the helpers for zero shifts.
7002 static void handle_fpfpcvt(DisasContext
*s
, int rd
, int rn
, int opcode
,
7003 bool itof
, int rmode
, int scale
, int sf
, int type
)
7005 bool is_signed
= !(opcode
& 1);
7006 TCGv_ptr tcg_fpstatus
;
7007 TCGv_i32 tcg_shift
, tcg_single
;
7008 TCGv_i64 tcg_double
;
7010 tcg_fpstatus
= fpstatus_ptr(type
== 3 ? FPST_FPCR_F16
: FPST_FPCR
);
7012 tcg_shift
= tcg_constant_i32(64 - scale
);
7015 TCGv_i64 tcg_int
= cpu_reg(s
, rn
);
7017 TCGv_i64 tcg_extend
= new_tmp_a64(s
);
7020 tcg_gen_ext32s_i64(tcg_extend
, tcg_int
);
7022 tcg_gen_ext32u_i64(tcg_extend
, tcg_int
);
7025 tcg_int
= tcg_extend
;
7029 case 1: /* float64 */
7030 tcg_double
= tcg_temp_new_i64();
7032 gen_helper_vfp_sqtod(tcg_double
, tcg_int
,
7033 tcg_shift
, tcg_fpstatus
);
7035 gen_helper_vfp_uqtod(tcg_double
, tcg_int
,
7036 tcg_shift
, tcg_fpstatus
);
7038 write_fp_dreg(s
, rd
, tcg_double
);
7039 tcg_temp_free_i64(tcg_double
);
7042 case 0: /* float32 */
7043 tcg_single
= tcg_temp_new_i32();
7045 gen_helper_vfp_sqtos(tcg_single
, tcg_int
,
7046 tcg_shift
, tcg_fpstatus
);
7048 gen_helper_vfp_uqtos(tcg_single
, tcg_int
,
7049 tcg_shift
, tcg_fpstatus
);
7051 write_fp_sreg(s
, rd
, tcg_single
);
7052 tcg_temp_free_i32(tcg_single
);
7055 case 3: /* float16 */
7056 tcg_single
= tcg_temp_new_i32();
7058 gen_helper_vfp_sqtoh(tcg_single
, tcg_int
,
7059 tcg_shift
, tcg_fpstatus
);
7061 gen_helper_vfp_uqtoh(tcg_single
, tcg_int
,
7062 tcg_shift
, tcg_fpstatus
);
7064 write_fp_sreg(s
, rd
, tcg_single
);
7065 tcg_temp_free_i32(tcg_single
);
7069 g_assert_not_reached();
7072 TCGv_i64 tcg_int
= cpu_reg(s
, rd
);
7075 if (extract32(opcode
, 2, 1)) {
7076 /* There are too many rounding modes to all fit into rmode,
7077 * so FCVTA[US] is a special case.
7079 rmode
= FPROUNDING_TIEAWAY
;
7082 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
7084 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
7087 case 1: /* float64 */
7088 tcg_double
= read_fp_dreg(s
, rn
);
7091 gen_helper_vfp_tosld(tcg_int
, tcg_double
,
7092 tcg_shift
, tcg_fpstatus
);
7094 gen_helper_vfp_tosqd(tcg_int
, tcg_double
,
7095 tcg_shift
, tcg_fpstatus
);
7099 gen_helper_vfp_tould(tcg_int
, tcg_double
,
7100 tcg_shift
, tcg_fpstatus
);
7102 gen_helper_vfp_touqd(tcg_int
, tcg_double
,
7103 tcg_shift
, tcg_fpstatus
);
7107 tcg_gen_ext32u_i64(tcg_int
, tcg_int
);
7109 tcg_temp_free_i64(tcg_double
);
7112 case 0: /* float32 */
7113 tcg_single
= read_fp_sreg(s
, rn
);
7116 gen_helper_vfp_tosqs(tcg_int
, tcg_single
,
7117 tcg_shift
, tcg_fpstatus
);
7119 gen_helper_vfp_touqs(tcg_int
, tcg_single
,
7120 tcg_shift
, tcg_fpstatus
);
7123 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
7125 gen_helper_vfp_tosls(tcg_dest
, tcg_single
,
7126 tcg_shift
, tcg_fpstatus
);
7128 gen_helper_vfp_touls(tcg_dest
, tcg_single
,
7129 tcg_shift
, tcg_fpstatus
);
7131 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
7132 tcg_temp_free_i32(tcg_dest
);
7134 tcg_temp_free_i32(tcg_single
);
7137 case 3: /* float16 */
7138 tcg_single
= read_fp_sreg(s
, rn
);
7141 gen_helper_vfp_tosqh(tcg_int
, tcg_single
,
7142 tcg_shift
, tcg_fpstatus
);
7144 gen_helper_vfp_touqh(tcg_int
, tcg_single
,
7145 tcg_shift
, tcg_fpstatus
);
7148 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
7150 gen_helper_vfp_toslh(tcg_dest
, tcg_single
,
7151 tcg_shift
, tcg_fpstatus
);
7153 gen_helper_vfp_toulh(tcg_dest
, tcg_single
,
7154 tcg_shift
, tcg_fpstatus
);
7156 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
7157 tcg_temp_free_i32(tcg_dest
);
7159 tcg_temp_free_i32(tcg_single
);
7163 g_assert_not_reached();
7166 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
7167 tcg_temp_free_i32(tcg_rmode
);
7170 tcg_temp_free_ptr(tcg_fpstatus
);
7173 /* Floating point <-> fixed point conversions
7174 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
7175 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
7176 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
7177 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
7179 static void disas_fp_fixed_conv(DisasContext
*s
, uint32_t insn
)
7181 int rd
= extract32(insn
, 0, 5);
7182 int rn
= extract32(insn
, 5, 5);
7183 int scale
= extract32(insn
, 10, 6);
7184 int opcode
= extract32(insn
, 16, 3);
7185 int rmode
= extract32(insn
, 19, 2);
7186 int type
= extract32(insn
, 22, 2);
7187 bool sbit
= extract32(insn
, 29, 1);
7188 bool sf
= extract32(insn
, 31, 1);
7191 if (sbit
|| (!sf
&& scale
< 32)) {
7192 unallocated_encoding(s
);
7197 case 0: /* float32 */
7198 case 1: /* float64 */
7200 case 3: /* float16 */
7201 if (dc_isar_feature(aa64_fp16
, s
)) {
7206 unallocated_encoding(s
);
7210 switch ((rmode
<< 3) | opcode
) {
7211 case 0x2: /* SCVTF */
7212 case 0x3: /* UCVTF */
7215 case 0x18: /* FCVTZS */
7216 case 0x19: /* FCVTZU */
7220 unallocated_encoding(s
);
7224 if (!fp_access_check(s
)) {
7228 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, FPROUNDING_ZERO
, scale
, sf
, type
);
7231 static void handle_fmov(DisasContext
*s
, int rd
, int rn
, int type
, bool itof
)
7233 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
7234 * without conversion.
7238 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
7244 tmp
= tcg_temp_new_i64();
7245 tcg_gen_ext32u_i64(tmp
, tcg_rn
);
7246 write_fp_dreg(s
, rd
, tmp
);
7247 tcg_temp_free_i64(tmp
);
7251 write_fp_dreg(s
, rd
, tcg_rn
);
7254 /* 64 bit to top half. */
7255 tcg_gen_st_i64(tcg_rn
, cpu_env
, fp_reg_hi_offset(s
, rd
));
7256 clear_vec_high(s
, true, rd
);
7260 tmp
= tcg_temp_new_i64();
7261 tcg_gen_ext16u_i64(tmp
, tcg_rn
);
7262 write_fp_dreg(s
, rd
, tmp
);
7263 tcg_temp_free_i64(tmp
);
7266 g_assert_not_reached();
7269 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
7274 tcg_gen_ld32u_i64(tcg_rd
, cpu_env
, fp_reg_offset(s
, rn
, MO_32
));
7278 tcg_gen_ld_i64(tcg_rd
, cpu_env
, fp_reg_offset(s
, rn
, MO_64
));
7281 /* 64 bits from top half */
7282 tcg_gen_ld_i64(tcg_rd
, cpu_env
, fp_reg_hi_offset(s
, rn
));
7286 tcg_gen_ld16u_i64(tcg_rd
, cpu_env
, fp_reg_offset(s
, rn
, MO_16
));
7289 g_assert_not_reached();
7294 static void handle_fjcvtzs(DisasContext
*s
, int rd
, int rn
)
7296 TCGv_i64 t
= read_fp_dreg(s
, rn
);
7297 TCGv_ptr fpstatus
= fpstatus_ptr(FPST_FPCR
);
7299 gen_helper_fjcvtzs(t
, t
, fpstatus
);
7301 tcg_temp_free_ptr(fpstatus
);
7303 tcg_gen_ext32u_i64(cpu_reg(s
, rd
), t
);
7304 tcg_gen_extrh_i64_i32(cpu_ZF
, t
);
7305 tcg_gen_movi_i32(cpu_CF
, 0);
7306 tcg_gen_movi_i32(cpu_NF
, 0);
7307 tcg_gen_movi_i32(cpu_VF
, 0);
7309 tcg_temp_free_i64(t
);
7312 /* Floating point <-> integer conversions
7313 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
7314 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
7315 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
7316 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
7318 static void disas_fp_int_conv(DisasContext
*s
, uint32_t insn
)
7320 int rd
= extract32(insn
, 0, 5);
7321 int rn
= extract32(insn
, 5, 5);
7322 int opcode
= extract32(insn
, 16, 3);
7323 int rmode
= extract32(insn
, 19, 2);
7324 int type
= extract32(insn
, 22, 2);
7325 bool sbit
= extract32(insn
, 29, 1);
7326 bool sf
= extract32(insn
, 31, 1);
7330 goto do_unallocated
;
7338 case 4: /* FCVTAS */
7339 case 5: /* FCVTAU */
7341 goto do_unallocated
;
7344 case 0: /* FCVT[NPMZ]S */
7345 case 1: /* FCVT[NPMZ]U */
7347 case 0: /* float32 */
7348 case 1: /* float64 */
7350 case 3: /* float16 */
7351 if (!dc_isar_feature(aa64_fp16
, s
)) {
7352 goto do_unallocated
;
7356 goto do_unallocated
;
7358 if (!fp_access_check(s
)) {
7361 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, rmode
, 64, sf
, type
);
7365 switch (sf
<< 7 | type
<< 5 | rmode
<< 3 | opcode
) {
7366 case 0b01100110: /* FMOV half <-> 32-bit int */
7368 case 0b11100110: /* FMOV half <-> 64-bit int */
7370 if (!dc_isar_feature(aa64_fp16
, s
)) {
7371 goto do_unallocated
;
7374 case 0b00000110: /* FMOV 32-bit */
7376 case 0b10100110: /* FMOV 64-bit */
7378 case 0b11001110: /* FMOV top half of 128-bit */
7380 if (!fp_access_check(s
)) {
7384 handle_fmov(s
, rd
, rn
, type
, itof
);
7387 case 0b00111110: /* FJCVTZS */
7388 if (!dc_isar_feature(aa64_jscvt
, s
)) {
7389 goto do_unallocated
;
7390 } else if (fp_access_check(s
)) {
7391 handle_fjcvtzs(s
, rd
, rn
);
7397 unallocated_encoding(s
);
7404 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
7405 * 31 30 29 28 25 24 0
7406 * +---+---+---+---------+-----------------------------+
7407 * | | 0 | | 1 1 1 1 | |
7408 * +---+---+---+---------+-----------------------------+
7410 static void disas_data_proc_fp(DisasContext
*s
, uint32_t insn
)
7412 if (extract32(insn
, 24, 1)) {
7413 /* Floating point data-processing (3 source) */
7414 disas_fp_3src(s
, insn
);
7415 } else if (extract32(insn
, 21, 1) == 0) {
7416 /* Floating point to fixed point conversions */
7417 disas_fp_fixed_conv(s
, insn
);
7419 switch (extract32(insn
, 10, 2)) {
7421 /* Floating point conditional compare */
7422 disas_fp_ccomp(s
, insn
);
7425 /* Floating point data-processing (2 source) */
7426 disas_fp_2src(s
, insn
);
7429 /* Floating point conditional select */
7430 disas_fp_csel(s
, insn
);
7433 switch (ctz32(extract32(insn
, 12, 4))) {
7434 case 0: /* [15:12] == xxx1 */
7435 /* Floating point immediate */
7436 disas_fp_imm(s
, insn
);
7438 case 1: /* [15:12] == xx10 */
7439 /* Floating point compare */
7440 disas_fp_compare(s
, insn
);
7442 case 2: /* [15:12] == x100 */
7443 /* Floating point data-processing (1 source) */
7444 disas_fp_1src(s
, insn
);
7446 case 3: /* [15:12] == 1000 */
7447 unallocated_encoding(s
);
7449 default: /* [15:12] == 0000 */
7450 /* Floating point <-> integer conversions */
7451 disas_fp_int_conv(s
, insn
);
7459 static void do_ext64(DisasContext
*s
, TCGv_i64 tcg_left
, TCGv_i64 tcg_right
,
7462 /* Extract 64 bits from the middle of two concatenated 64 bit
7463 * vector register slices left:right. The extracted bits start
7464 * at 'pos' bits into the right (least significant) side.
7465 * We return the result in tcg_right, and guarantee not to
7468 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
7469 assert(pos
> 0 && pos
< 64);
7471 tcg_gen_shri_i64(tcg_right
, tcg_right
, pos
);
7472 tcg_gen_shli_i64(tcg_tmp
, tcg_left
, 64 - pos
);
7473 tcg_gen_or_i64(tcg_right
, tcg_right
, tcg_tmp
);
7475 tcg_temp_free_i64(tcg_tmp
);
7479 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
7480 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7481 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
7482 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7484 static void disas_simd_ext(DisasContext
*s
, uint32_t insn
)
7486 int is_q
= extract32(insn
, 30, 1);
7487 int op2
= extract32(insn
, 22, 2);
7488 int imm4
= extract32(insn
, 11, 4);
7489 int rm
= extract32(insn
, 16, 5);
7490 int rn
= extract32(insn
, 5, 5);
7491 int rd
= extract32(insn
, 0, 5);
7492 int pos
= imm4
<< 3;
7493 TCGv_i64 tcg_resl
, tcg_resh
;
7495 if (op2
!= 0 || (!is_q
&& extract32(imm4
, 3, 1))) {
7496 unallocated_encoding(s
);
7500 if (!fp_access_check(s
)) {
7504 tcg_resh
= tcg_temp_new_i64();
7505 tcg_resl
= tcg_temp_new_i64();
7507 /* Vd gets bits starting at pos bits into Vm:Vn. This is
7508 * either extracting 128 bits from a 128:128 concatenation, or
7509 * extracting 64 bits from a 64:64 concatenation.
7512 read_vec_element(s
, tcg_resl
, rn
, 0, MO_64
);
7514 read_vec_element(s
, tcg_resh
, rm
, 0, MO_64
);
7515 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
7523 EltPosns eltposns
[] = { {rn
, 0}, {rn
, 1}, {rm
, 0}, {rm
, 1} };
7524 EltPosns
*elt
= eltposns
;
7531 read_vec_element(s
, tcg_resl
, elt
->reg
, elt
->elt
, MO_64
);
7533 read_vec_element(s
, tcg_resh
, elt
->reg
, elt
->elt
, MO_64
);
7536 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
7537 tcg_hh
= tcg_temp_new_i64();
7538 read_vec_element(s
, tcg_hh
, elt
->reg
, elt
->elt
, MO_64
);
7539 do_ext64(s
, tcg_hh
, tcg_resh
, pos
);
7540 tcg_temp_free_i64(tcg_hh
);
7544 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
7545 tcg_temp_free_i64(tcg_resl
);
7547 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
7549 tcg_temp_free_i64(tcg_resh
);
7550 clear_vec_high(s
, is_q
, rd
);
7554 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
7555 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7556 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
7557 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7559 static void disas_simd_tb(DisasContext
*s
, uint32_t insn
)
7561 int op2
= extract32(insn
, 22, 2);
7562 int is_q
= extract32(insn
, 30, 1);
7563 int rm
= extract32(insn
, 16, 5);
7564 int rn
= extract32(insn
, 5, 5);
7565 int rd
= extract32(insn
, 0, 5);
7566 int is_tbx
= extract32(insn
, 12, 1);
7567 int len
= (extract32(insn
, 13, 2) + 1) * 16;
7570 unallocated_encoding(s
);
7574 if (!fp_access_check(s
)) {
7578 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s
, rd
),
7579 vec_full_reg_offset(s
, rm
), cpu_env
,
7580 is_q
? 16 : 8, vec_full_reg_size(s
),
7581 (len
<< 6) | (is_tbx
<< 5) | rn
,
7582 gen_helper_simd_tblx
);
7586 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
7587 * +---+---+-------------+------+---+------+---+------------------+------+
7588 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
7589 * +---+---+-------------+------+---+------+---+------------------+------+
7591 static void disas_simd_zip_trn(DisasContext
*s
, uint32_t insn
)
7593 int rd
= extract32(insn
, 0, 5);
7594 int rn
= extract32(insn
, 5, 5);
7595 int rm
= extract32(insn
, 16, 5);
7596 int size
= extract32(insn
, 22, 2);
7597 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
7598 * bit 2 indicates 1 vs 2 variant of the insn.
7600 int opcode
= extract32(insn
, 12, 2);
7601 bool part
= extract32(insn
, 14, 1);
7602 bool is_q
= extract32(insn
, 30, 1);
7603 int esize
= 8 << size
;
7605 int datasize
= is_q
? 128 : 64;
7606 int elements
= datasize
/ esize
;
7607 TCGv_i64 tcg_res
, tcg_resl
, tcg_resh
;
7609 if (opcode
== 0 || (size
== 3 && !is_q
)) {
7610 unallocated_encoding(s
);
7614 if (!fp_access_check(s
)) {
7618 tcg_resl
= tcg_const_i64(0);
7619 tcg_resh
= is_q
? tcg_const_i64(0) : NULL
;
7620 tcg_res
= tcg_temp_new_i64();
7622 for (i
= 0; i
< elements
; i
++) {
7624 case 1: /* UZP1/2 */
7626 int midpoint
= elements
/ 2;
7628 read_vec_element(s
, tcg_res
, rn
, 2 * i
+ part
, size
);
7630 read_vec_element(s
, tcg_res
, rm
,
7631 2 * (i
- midpoint
) + part
, size
);
7635 case 2: /* TRN1/2 */
7637 read_vec_element(s
, tcg_res
, rm
, (i
& ~1) + part
, size
);
7639 read_vec_element(s
, tcg_res
, rn
, (i
& ~1) + part
, size
);
7642 case 3: /* ZIP1/2 */
7644 int base
= part
* elements
/ 2;
7646 read_vec_element(s
, tcg_res
, rm
, base
+ (i
>> 1), size
);
7648 read_vec_element(s
, tcg_res
, rn
, base
+ (i
>> 1), size
);
7653 g_assert_not_reached();
7658 tcg_gen_shli_i64(tcg_res
, tcg_res
, ofs
);
7659 tcg_gen_or_i64(tcg_resl
, tcg_resl
, tcg_res
);
7661 tcg_gen_shli_i64(tcg_res
, tcg_res
, ofs
- 64);
7662 tcg_gen_or_i64(tcg_resh
, tcg_resh
, tcg_res
);
7666 tcg_temp_free_i64(tcg_res
);
7668 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
7669 tcg_temp_free_i64(tcg_resl
);
7672 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
7673 tcg_temp_free_i64(tcg_resh
);
7675 clear_vec_high(s
, is_q
, rd
);
7679 * do_reduction_op helper
7681 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
7682 * important for correct NaN propagation that we do these
7683 * operations in exactly the order specified by the pseudocode.
7685 * This is a recursive function, TCG temps should be freed by the
7686 * calling function once it is done with the values.
7688 static TCGv_i32
do_reduction_op(DisasContext
*s
, int fpopcode
, int rn
,
7689 int esize
, int size
, int vmap
, TCGv_ptr fpst
)
7691 if (esize
== size
) {
7693 MemOp msize
= esize
== 16 ? MO_16
: MO_32
;
7696 /* We should have one register left here */
7697 assert(ctpop8(vmap
) == 1);
7698 element
= ctz32(vmap
);
7699 assert(element
< 8);
7701 tcg_elem
= tcg_temp_new_i32();
7702 read_vec_element_i32(s
, tcg_elem
, rn
, element
, msize
);
7705 int bits
= size
/ 2;
7706 int shift
= ctpop8(vmap
) / 2;
7707 int vmap_lo
= (vmap
>> shift
) & vmap
;
7708 int vmap_hi
= (vmap
& ~vmap_lo
);
7709 TCGv_i32 tcg_hi
, tcg_lo
, tcg_res
;
7711 tcg_hi
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_hi
, fpst
);
7712 tcg_lo
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_lo
, fpst
);
7713 tcg_res
= tcg_temp_new_i32();
7716 case 0x0c: /* fmaxnmv half-precision */
7717 gen_helper_advsimd_maxnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7719 case 0x0f: /* fmaxv half-precision */
7720 gen_helper_advsimd_maxh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7722 case 0x1c: /* fminnmv half-precision */
7723 gen_helper_advsimd_minnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7725 case 0x1f: /* fminv half-precision */
7726 gen_helper_advsimd_minh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7728 case 0x2c: /* fmaxnmv */
7729 gen_helper_vfp_maxnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7731 case 0x2f: /* fmaxv */
7732 gen_helper_vfp_maxs(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7734 case 0x3c: /* fminnmv */
7735 gen_helper_vfp_minnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7737 case 0x3f: /* fminv */
7738 gen_helper_vfp_mins(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
7741 g_assert_not_reached();
7744 tcg_temp_free_i32(tcg_hi
);
7745 tcg_temp_free_i32(tcg_lo
);
7750 /* AdvSIMD across lanes
7751 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
7752 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7753 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
7754 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7756 static void disas_simd_across_lanes(DisasContext
*s
, uint32_t insn
)
7758 int rd
= extract32(insn
, 0, 5);
7759 int rn
= extract32(insn
, 5, 5);
7760 int size
= extract32(insn
, 22, 2);
7761 int opcode
= extract32(insn
, 12, 5);
7762 bool is_q
= extract32(insn
, 30, 1);
7763 bool is_u
= extract32(insn
, 29, 1);
7765 bool is_min
= false;
7769 TCGv_i64 tcg_res
, tcg_elt
;
7772 case 0x1b: /* ADDV */
7774 unallocated_encoding(s
);
7778 case 0x3: /* SADDLV, UADDLV */
7779 case 0xa: /* SMAXV, UMAXV */
7780 case 0x1a: /* SMINV, UMINV */
7781 if (size
== 3 || (size
== 2 && !is_q
)) {
7782 unallocated_encoding(s
);
7786 case 0xc: /* FMAXNMV, FMINNMV */
7787 case 0xf: /* FMAXV, FMINV */
7788 /* Bit 1 of size field encodes min vs max and the actual size
7789 * depends on the encoding of the U bit. If not set (and FP16
7790 * enabled) then we do half-precision float instead of single
7793 is_min
= extract32(size
, 1, 1);
7795 if (!is_u
&& dc_isar_feature(aa64_fp16
, s
)) {
7797 } else if (!is_u
|| !is_q
|| extract32(size
, 0, 1)) {
7798 unallocated_encoding(s
);
7805 unallocated_encoding(s
);
7809 if (!fp_access_check(s
)) {
7814 elements
= (is_q
? 128 : 64) / esize
;
7816 tcg_res
= tcg_temp_new_i64();
7817 tcg_elt
= tcg_temp_new_i64();
7819 /* These instructions operate across all lanes of a vector
7820 * to produce a single result. We can guarantee that a 64
7821 * bit intermediate is sufficient:
7822 * + for [US]ADDLV the maximum element size is 32 bits, and
7823 * the result type is 64 bits
7824 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
7825 * same as the element size, which is 32 bits at most
7826 * For the integer operations we can choose to work at 64
7827 * or 32 bits and truncate at the end; for simplicity
7828 * we use 64 bits always. The floating point
7829 * ops do require 32 bit intermediates, though.
7832 read_vec_element(s
, tcg_res
, rn
, 0, size
| (is_u
? 0 : MO_SIGN
));
7834 for (i
= 1; i
< elements
; i
++) {
7835 read_vec_element(s
, tcg_elt
, rn
, i
, size
| (is_u
? 0 : MO_SIGN
));
7838 case 0x03: /* SADDLV / UADDLV */
7839 case 0x1b: /* ADDV */
7840 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_elt
);
7842 case 0x0a: /* SMAXV / UMAXV */
7844 tcg_gen_umax_i64(tcg_res
, tcg_res
, tcg_elt
);
7846 tcg_gen_smax_i64(tcg_res
, tcg_res
, tcg_elt
);
7849 case 0x1a: /* SMINV / UMINV */
7851 tcg_gen_umin_i64(tcg_res
, tcg_res
, tcg_elt
);
7853 tcg_gen_smin_i64(tcg_res
, tcg_res
, tcg_elt
);
7857 g_assert_not_reached();
7862 /* Floating point vector reduction ops which work across 32
7863 * bit (single) or 16 bit (half-precision) intermediates.
7864 * Note that correct NaN propagation requires that we do these
7865 * operations in exactly the order specified by the pseudocode.
7867 TCGv_ptr fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
7868 int fpopcode
= opcode
| is_min
<< 4 | is_u
<< 5;
7869 int vmap
= (1 << elements
) - 1;
7870 TCGv_i32 tcg_res32
= do_reduction_op(s
, fpopcode
, rn
, esize
,
7871 (is_q
? 128 : 64), vmap
, fpst
);
7872 tcg_gen_extu_i32_i64(tcg_res
, tcg_res32
);
7873 tcg_temp_free_i32(tcg_res32
);
7874 tcg_temp_free_ptr(fpst
);
7877 tcg_temp_free_i64(tcg_elt
);
7879 /* Now truncate the result to the width required for the final output */
7880 if (opcode
== 0x03) {
7881 /* SADDLV, UADDLV: result is 2*esize */
7887 tcg_gen_ext8u_i64(tcg_res
, tcg_res
);
7890 tcg_gen_ext16u_i64(tcg_res
, tcg_res
);
7893 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
7898 g_assert_not_reached();
7901 write_fp_dreg(s
, rd
, tcg_res
);
7902 tcg_temp_free_i64(tcg_res
);
7905 /* DUP (Element, Vector)
7907 * 31 30 29 21 20 16 15 10 9 5 4 0
7908 * +---+---+-------------------+--------+-------------+------+------+
7909 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7910 * +---+---+-------------------+--------+-------------+------+------+
7912 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7914 static void handle_simd_dupe(DisasContext
*s
, int is_q
, int rd
, int rn
,
7917 int size
= ctz32(imm5
);
7920 if (size
> 3 || (size
== 3 && !is_q
)) {
7921 unallocated_encoding(s
);
7925 if (!fp_access_check(s
)) {
7929 index
= imm5
>> (size
+ 1);
7930 tcg_gen_gvec_dup_mem(size
, vec_full_reg_offset(s
, rd
),
7931 vec_reg_offset(s
, rn
, index
, size
),
7932 is_q
? 16 : 8, vec_full_reg_size(s
));
7935 /* DUP (element, scalar)
7936 * 31 21 20 16 15 10 9 5 4 0
7937 * +-----------------------+--------+-------------+------+------+
7938 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
7939 * +-----------------------+--------+-------------+------+------+
7941 static void handle_simd_dupes(DisasContext
*s
, int rd
, int rn
,
7944 int size
= ctz32(imm5
);
7949 unallocated_encoding(s
);
7953 if (!fp_access_check(s
)) {
7957 index
= imm5
>> (size
+ 1);
7959 /* This instruction just extracts the specified element and
7960 * zero-extends it into the bottom of the destination register.
7962 tmp
= tcg_temp_new_i64();
7963 read_vec_element(s
, tmp
, rn
, index
, size
);
7964 write_fp_dreg(s
, rd
, tmp
);
7965 tcg_temp_free_i64(tmp
);
7970 * 31 30 29 21 20 16 15 10 9 5 4 0
7971 * +---+---+-------------------+--------+-------------+------+------+
7972 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
7973 * +---+---+-------------------+--------+-------------+------+------+
7975 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7977 static void handle_simd_dupg(DisasContext
*s
, int is_q
, int rd
, int rn
,
7980 int size
= ctz32(imm5
);
7981 uint32_t dofs
, oprsz
, maxsz
;
7983 if (size
> 3 || ((size
== 3) && !is_q
)) {
7984 unallocated_encoding(s
);
7988 if (!fp_access_check(s
)) {
7992 dofs
= vec_full_reg_offset(s
, rd
);
7993 oprsz
= is_q
? 16 : 8;
7994 maxsz
= vec_full_reg_size(s
);
7996 tcg_gen_gvec_dup_i64(size
, dofs
, oprsz
, maxsz
, cpu_reg(s
, rn
));
8001 * 31 21 20 16 15 14 11 10 9 5 4 0
8002 * +-----------------------+--------+------------+---+------+------+
8003 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
8004 * +-----------------------+--------+------------+---+------+------+
8006 * size: encoded in imm5 (see ARM ARM LowestSetBit())
8007 * index: encoded in imm5<4:size+1>
8009 static void handle_simd_inse(DisasContext
*s
, int rd
, int rn
,
8012 int size
= ctz32(imm5
);
8013 int src_index
, dst_index
;
8017 unallocated_encoding(s
);
8021 if (!fp_access_check(s
)) {
8025 dst_index
= extract32(imm5
, 1+size
, 5);
8026 src_index
= extract32(imm4
, size
, 4);
8028 tmp
= tcg_temp_new_i64();
8030 read_vec_element(s
, tmp
, rn
, src_index
, size
);
8031 write_vec_element(s
, tmp
, rd
, dst_index
, size
);
8033 tcg_temp_free_i64(tmp
);
8035 /* INS is considered a 128-bit write for SVE. */
8036 clear_vec_high(s
, true, rd
);
8042 * 31 21 20 16 15 10 9 5 4 0
8043 * +-----------------------+--------+-------------+------+------+
8044 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
8045 * +-----------------------+--------+-------------+------+------+
8047 * size: encoded in imm5 (see ARM ARM LowestSetBit())
8048 * index: encoded in imm5<4:size+1>
8050 static void handle_simd_insg(DisasContext
*s
, int rd
, int rn
, int imm5
)
8052 int size
= ctz32(imm5
);
8056 unallocated_encoding(s
);
8060 if (!fp_access_check(s
)) {
8064 idx
= extract32(imm5
, 1 + size
, 4 - size
);
8065 write_vec_element(s
, cpu_reg(s
, rn
), rd
, idx
, size
);
8067 /* INS is considered a 128-bit write for SVE. */
8068 clear_vec_high(s
, true, rd
);
8075 * 31 30 29 21 20 16 15 12 10 9 5 4 0
8076 * +---+---+-------------------+--------+-------------+------+------+
8077 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
8078 * +---+---+-------------------+--------+-------------+------+------+
8080 * U: unsigned when set
8081 * size: encoded in imm5 (see ARM ARM LowestSetBit())
8083 static void handle_simd_umov_smov(DisasContext
*s
, int is_q
, int is_signed
,
8084 int rn
, int rd
, int imm5
)
8086 int size
= ctz32(imm5
);
8090 /* Check for UnallocatedEncodings */
8092 if (size
> 2 || (size
== 2 && !is_q
)) {
8093 unallocated_encoding(s
);
8098 || (size
< 3 && is_q
)
8099 || (size
== 3 && !is_q
)) {
8100 unallocated_encoding(s
);
8105 if (!fp_access_check(s
)) {
8109 element
= extract32(imm5
, 1+size
, 4);
8111 tcg_rd
= cpu_reg(s
, rd
);
8112 read_vec_element(s
, tcg_rd
, rn
, element
, size
| (is_signed
? MO_SIGN
: 0));
8113 if (is_signed
&& !is_q
) {
8114 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
8119 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
8120 * +---+---+----+-----------------+------+---+------+---+------+------+
8121 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
8122 * +---+---+----+-----------------+------+---+------+---+------+------+
8124 static void disas_simd_copy(DisasContext
*s
, uint32_t insn
)
8126 int rd
= extract32(insn
, 0, 5);
8127 int rn
= extract32(insn
, 5, 5);
8128 int imm4
= extract32(insn
, 11, 4);
8129 int op
= extract32(insn
, 29, 1);
8130 int is_q
= extract32(insn
, 30, 1);
8131 int imm5
= extract32(insn
, 16, 5);
8136 handle_simd_inse(s
, rd
, rn
, imm4
, imm5
);
8138 unallocated_encoding(s
);
8143 /* DUP (element - vector) */
8144 handle_simd_dupe(s
, is_q
, rd
, rn
, imm5
);
8148 handle_simd_dupg(s
, is_q
, rd
, rn
, imm5
);
8153 handle_simd_insg(s
, rd
, rn
, imm5
);
8155 unallocated_encoding(s
);
8160 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
8161 handle_simd_umov_smov(s
, is_q
, (imm4
== 5), rn
, rd
, imm5
);
8164 unallocated_encoding(s
);
8170 /* AdvSIMD modified immediate
8171 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
8172 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
8173 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
8174 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
8176 * There are a number of operations that can be carried out here:
8177 * MOVI - move (shifted) imm into register
8178 * MVNI - move inverted (shifted) imm into register
8179 * ORR - bitwise OR of (shifted) imm with register
8180 * BIC - bitwise clear of (shifted) imm with register
8181 * With ARMv8.2 we also have:
8182 * FMOV half-precision
8184 static void disas_simd_mod_imm(DisasContext
*s
, uint32_t insn
)
8186 int rd
= extract32(insn
, 0, 5);
8187 int cmode
= extract32(insn
, 12, 4);
8188 int o2
= extract32(insn
, 11, 1);
8189 uint64_t abcdefgh
= extract32(insn
, 5, 5) | (extract32(insn
, 16, 3) << 5);
8190 bool is_neg
= extract32(insn
, 29, 1);
8191 bool is_q
= extract32(insn
, 30, 1);
8194 if (o2
!= 0 || ((cmode
== 0xf) && is_neg
&& !is_q
)) {
8195 /* Check for FMOV (vector, immediate) - half-precision */
8196 if (!(dc_isar_feature(aa64_fp16
, s
) && o2
&& cmode
== 0xf)) {
8197 unallocated_encoding(s
);
8202 if (!fp_access_check(s
)) {
8206 if (cmode
== 15 && o2
&& !is_neg
) {
8207 /* FMOV (vector, immediate) - half-precision */
8208 imm
= vfp_expand_imm(MO_16
, abcdefgh
);
8209 /* now duplicate across the lanes */
8210 imm
= dup_const(MO_16
, imm
);
8212 imm
= asimd_imm_const(abcdefgh
, cmode
, is_neg
);
8215 if (!((cmode
& 0x9) == 0x1 || (cmode
& 0xd) == 0x9)) {
8216 /* MOVI or MVNI, with MVNI negation handled above. */
8217 tcg_gen_gvec_dup_imm(MO_64
, vec_full_reg_offset(s
, rd
), is_q
? 16 : 8,
8218 vec_full_reg_size(s
), imm
);
8220 /* ORR or BIC, with BIC negation to AND handled above. */
8222 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_andi
, MO_64
);
8224 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_ori
, MO_64
);
8229 /* AdvSIMD scalar copy
8230 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
8231 * +-----+----+-----------------+------+---+------+---+------+------+
8232 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
8233 * +-----+----+-----------------+------+---+------+---+------+------+
8235 static void disas_simd_scalar_copy(DisasContext
*s
, uint32_t insn
)
8237 int rd
= extract32(insn
, 0, 5);
8238 int rn
= extract32(insn
, 5, 5);
8239 int imm4
= extract32(insn
, 11, 4);
8240 int imm5
= extract32(insn
, 16, 5);
8241 int op
= extract32(insn
, 29, 1);
8243 if (op
!= 0 || imm4
!= 0) {
8244 unallocated_encoding(s
);
8248 /* DUP (element, scalar) */
8249 handle_simd_dupes(s
, rd
, rn
, imm5
);
8252 /* AdvSIMD scalar pairwise
8253 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
8254 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8255 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
8256 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8258 static void disas_simd_scalar_pairwise(DisasContext
*s
, uint32_t insn
)
8260 int u
= extract32(insn
, 29, 1);
8261 int size
= extract32(insn
, 22, 2);
8262 int opcode
= extract32(insn
, 12, 5);
8263 int rn
= extract32(insn
, 5, 5);
8264 int rd
= extract32(insn
, 0, 5);
8267 /* For some ops (the FP ones), size[1] is part of the encoding.
8268 * For ADDP strictly it is not but size[1] is always 1 for valid
8271 opcode
|= (extract32(size
, 1, 1) << 5);
8274 case 0x3b: /* ADDP */
8275 if (u
|| size
!= 3) {
8276 unallocated_encoding(s
);
8279 if (!fp_access_check(s
)) {
8285 case 0xc: /* FMAXNMP */
8286 case 0xd: /* FADDP */
8287 case 0xf: /* FMAXP */
8288 case 0x2c: /* FMINNMP */
8289 case 0x2f: /* FMINP */
8290 /* FP op, size[0] is 32 or 64 bit*/
8292 if (!dc_isar_feature(aa64_fp16
, s
)) {
8293 unallocated_encoding(s
);
8299 size
= extract32(size
, 0, 1) ? MO_64
: MO_32
;
8302 if (!fp_access_check(s
)) {
8306 fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
8309 unallocated_encoding(s
);
8313 if (size
== MO_64
) {
8314 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
8315 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
8316 TCGv_i64 tcg_res
= tcg_temp_new_i64();
8318 read_vec_element(s
, tcg_op1
, rn
, 0, MO_64
);
8319 read_vec_element(s
, tcg_op2
, rn
, 1, MO_64
);
8322 case 0x3b: /* ADDP */
8323 tcg_gen_add_i64(tcg_res
, tcg_op1
, tcg_op2
);
8325 case 0xc: /* FMAXNMP */
8326 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8328 case 0xd: /* FADDP */
8329 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8331 case 0xf: /* FMAXP */
8332 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8334 case 0x2c: /* FMINNMP */
8335 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8337 case 0x2f: /* FMINP */
8338 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8341 g_assert_not_reached();
8344 write_fp_dreg(s
, rd
, tcg_res
);
8346 tcg_temp_free_i64(tcg_op1
);
8347 tcg_temp_free_i64(tcg_op2
);
8348 tcg_temp_free_i64(tcg_res
);
8350 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
8351 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
8352 TCGv_i32 tcg_res
= tcg_temp_new_i32();
8354 read_vec_element_i32(s
, tcg_op1
, rn
, 0, size
);
8355 read_vec_element_i32(s
, tcg_op2
, rn
, 1, size
);
8357 if (size
== MO_16
) {
8359 case 0xc: /* FMAXNMP */
8360 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8362 case 0xd: /* FADDP */
8363 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8365 case 0xf: /* FMAXP */
8366 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8368 case 0x2c: /* FMINNMP */
8369 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8371 case 0x2f: /* FMINP */
8372 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8375 g_assert_not_reached();
8379 case 0xc: /* FMAXNMP */
8380 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8382 case 0xd: /* FADDP */
8383 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8385 case 0xf: /* FMAXP */
8386 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8388 case 0x2c: /* FMINNMP */
8389 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8391 case 0x2f: /* FMINP */
8392 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
8395 g_assert_not_reached();
8399 write_fp_sreg(s
, rd
, tcg_res
);
8401 tcg_temp_free_i32(tcg_op1
);
8402 tcg_temp_free_i32(tcg_op2
);
8403 tcg_temp_free_i32(tcg_res
);
8407 tcg_temp_free_ptr(fpst
);
8412 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
8414 * This code is handles the common shifting code and is used by both
8415 * the vector and scalar code.
8417 static void handle_shri_with_rndacc(TCGv_i64 tcg_res
, TCGv_i64 tcg_src
,
8418 TCGv_i64 tcg_rnd
, bool accumulate
,
8419 bool is_u
, int size
, int shift
)
8421 bool extended_result
= false;
8422 bool round
= tcg_rnd
!= NULL
;
8424 TCGv_i64 tcg_src_hi
;
8426 if (round
&& size
== 3) {
8427 extended_result
= true;
8428 ext_lshift
= 64 - shift
;
8429 tcg_src_hi
= tcg_temp_new_i64();
8430 } else if (shift
== 64) {
8431 if (!accumulate
&& is_u
) {
8432 /* result is zero */
8433 tcg_gen_movi_i64(tcg_res
, 0);
8438 /* Deal with the rounding step */
8440 if (extended_result
) {
8441 TCGv_i64 tcg_zero
= tcg_constant_i64(0);
8443 /* take care of sign extending tcg_res */
8444 tcg_gen_sari_i64(tcg_src_hi
, tcg_src
, 63);
8445 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
8446 tcg_src
, tcg_src_hi
,
8449 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
8454 tcg_gen_add_i64(tcg_src
, tcg_src
, tcg_rnd
);
8458 /* Now do the shift right */
8459 if (round
&& extended_result
) {
8460 /* extended case, >64 bit precision required */
8461 if (ext_lshift
== 0) {
8462 /* special case, only high bits matter */
8463 tcg_gen_mov_i64(tcg_src
, tcg_src_hi
);
8465 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
8466 tcg_gen_shli_i64(tcg_src_hi
, tcg_src_hi
, ext_lshift
);
8467 tcg_gen_or_i64(tcg_src
, tcg_src
, tcg_src_hi
);
8472 /* essentially shifting in 64 zeros */
8473 tcg_gen_movi_i64(tcg_src
, 0);
8475 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
8479 /* effectively extending the sign-bit */
8480 tcg_gen_sari_i64(tcg_src
, tcg_src
, 63);
8482 tcg_gen_sari_i64(tcg_src
, tcg_src
, shift
);
8488 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_src
);
8490 tcg_gen_mov_i64(tcg_res
, tcg_src
);
8493 if (extended_result
) {
8494 tcg_temp_free_i64(tcg_src_hi
);
8498 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
8499 static void handle_scalar_simd_shri(DisasContext
*s
,
8500 bool is_u
, int immh
, int immb
,
8501 int opcode
, int rn
, int rd
)
8504 int immhb
= immh
<< 3 | immb
;
8505 int shift
= 2 * (8 << size
) - immhb
;
8506 bool accumulate
= false;
8508 bool insert
= false;
8513 if (!extract32(immh
, 3, 1)) {
8514 unallocated_encoding(s
);
8518 if (!fp_access_check(s
)) {
8523 case 0x02: /* SSRA / USRA (accumulate) */
8526 case 0x04: /* SRSHR / URSHR (rounding) */
8529 case 0x06: /* SRSRA / URSRA (accum + rounding) */
8530 accumulate
= round
= true;
8532 case 0x08: /* SRI */
8538 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
8543 tcg_rn
= read_fp_dreg(s
, rn
);
8544 tcg_rd
= (accumulate
|| insert
) ? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
8547 /* shift count same as element size is valid but does nothing;
8548 * special case to avoid potential shift by 64.
8550 int esize
= 8 << size
;
8551 if (shift
!= esize
) {
8552 tcg_gen_shri_i64(tcg_rn
, tcg_rn
, shift
);
8553 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, 0, esize
- shift
);
8556 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
8557 accumulate
, is_u
, size
, shift
);
8560 write_fp_dreg(s
, rd
, tcg_rd
);
8562 tcg_temp_free_i64(tcg_rn
);
8563 tcg_temp_free_i64(tcg_rd
);
8566 /* SHL/SLI - Scalar shift left */
8567 static void handle_scalar_simd_shli(DisasContext
*s
, bool insert
,
8568 int immh
, int immb
, int opcode
,
8571 int size
= 32 - clz32(immh
) - 1;
8572 int immhb
= immh
<< 3 | immb
;
8573 int shift
= immhb
- (8 << size
);
8577 if (!extract32(immh
, 3, 1)) {
8578 unallocated_encoding(s
);
8582 if (!fp_access_check(s
)) {
8586 tcg_rn
= read_fp_dreg(s
, rn
);
8587 tcg_rd
= insert
? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
8590 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, shift
, 64 - shift
);
8592 tcg_gen_shli_i64(tcg_rd
, tcg_rn
, shift
);
8595 write_fp_dreg(s
, rd
, tcg_rd
);
8597 tcg_temp_free_i64(tcg_rn
);
8598 tcg_temp_free_i64(tcg_rd
);
8601 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
8602 * (signed/unsigned) narrowing */
8603 static void handle_vec_simd_sqshrn(DisasContext
*s
, bool is_scalar
, bool is_q
,
8604 bool is_u_shift
, bool is_u_narrow
,
8605 int immh
, int immb
, int opcode
,
8608 int immhb
= immh
<< 3 | immb
;
8609 int size
= 32 - clz32(immh
) - 1;
8610 int esize
= 8 << size
;
8611 int shift
= (2 * esize
) - immhb
;
8612 int elements
= is_scalar
? 1 : (64 / esize
);
8613 bool round
= extract32(opcode
, 0, 1);
8614 MemOp ldop
= (size
+ 1) | (is_u_shift
? 0 : MO_SIGN
);
8615 TCGv_i64 tcg_rn
, tcg_rd
, tcg_round
;
8616 TCGv_i32 tcg_rd_narrowed
;
8619 static NeonGenNarrowEnvFn
* const signed_narrow_fns
[4][2] = {
8620 { gen_helper_neon_narrow_sat_s8
,
8621 gen_helper_neon_unarrow_sat8
},
8622 { gen_helper_neon_narrow_sat_s16
,
8623 gen_helper_neon_unarrow_sat16
},
8624 { gen_helper_neon_narrow_sat_s32
,
8625 gen_helper_neon_unarrow_sat32
},
8628 static NeonGenNarrowEnvFn
* const unsigned_narrow_fns
[4] = {
8629 gen_helper_neon_narrow_sat_u8
,
8630 gen_helper_neon_narrow_sat_u16
,
8631 gen_helper_neon_narrow_sat_u32
,
8634 NeonGenNarrowEnvFn
*narrowfn
;
8640 if (extract32(immh
, 3, 1)) {
8641 unallocated_encoding(s
);
8645 if (!fp_access_check(s
)) {
8650 narrowfn
= unsigned_narrow_fns
[size
];
8652 narrowfn
= signed_narrow_fns
[size
][is_u_narrow
? 1 : 0];
8655 tcg_rn
= tcg_temp_new_i64();
8656 tcg_rd
= tcg_temp_new_i64();
8657 tcg_rd_narrowed
= tcg_temp_new_i32();
8658 tcg_final
= tcg_const_i64(0);
8661 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
8666 for (i
= 0; i
< elements
; i
++) {
8667 read_vec_element(s
, tcg_rn
, rn
, i
, ldop
);
8668 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
8669 false, is_u_shift
, size
+1, shift
);
8670 narrowfn(tcg_rd_narrowed
, cpu_env
, tcg_rd
);
8671 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd_narrowed
);
8672 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
8676 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
8678 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
8681 tcg_temp_free_i64(tcg_rn
);
8682 tcg_temp_free_i64(tcg_rd
);
8683 tcg_temp_free_i32(tcg_rd_narrowed
);
8684 tcg_temp_free_i64(tcg_final
);
8686 clear_vec_high(s
, is_q
, rd
);
8689 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
8690 static void handle_simd_qshl(DisasContext
*s
, bool scalar
, bool is_q
,
8691 bool src_unsigned
, bool dst_unsigned
,
8692 int immh
, int immb
, int rn
, int rd
)
8694 int immhb
= immh
<< 3 | immb
;
8695 int size
= 32 - clz32(immh
) - 1;
8696 int shift
= immhb
- (8 << size
);
8700 assert(!(scalar
&& is_q
));
8703 if (!is_q
&& extract32(immh
, 3, 1)) {
8704 unallocated_encoding(s
);
8708 /* Since we use the variable-shift helpers we must
8709 * replicate the shift count into each element of
8710 * the tcg_shift value.
8714 shift
|= shift
<< 8;
8717 shift
|= shift
<< 16;
8723 g_assert_not_reached();
8727 if (!fp_access_check(s
)) {
8732 TCGv_i64 tcg_shift
= tcg_constant_i64(shift
);
8733 static NeonGenTwo64OpEnvFn
* const fns
[2][2] = {
8734 { gen_helper_neon_qshl_s64
, gen_helper_neon_qshlu_s64
},
8735 { NULL
, gen_helper_neon_qshl_u64
},
8737 NeonGenTwo64OpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
];
8738 int maxpass
= is_q
? 2 : 1;
8740 for (pass
= 0; pass
< maxpass
; pass
++) {
8741 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8743 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8744 genfn(tcg_op
, cpu_env
, tcg_op
, tcg_shift
);
8745 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
8747 tcg_temp_free_i64(tcg_op
);
8749 clear_vec_high(s
, is_q
, rd
);
8751 TCGv_i32 tcg_shift
= tcg_constant_i32(shift
);
8752 static NeonGenTwoOpEnvFn
* const fns
[2][2][3] = {
8754 { gen_helper_neon_qshl_s8
,
8755 gen_helper_neon_qshl_s16
,
8756 gen_helper_neon_qshl_s32
},
8757 { gen_helper_neon_qshlu_s8
,
8758 gen_helper_neon_qshlu_s16
,
8759 gen_helper_neon_qshlu_s32
}
8761 { NULL
, NULL
, NULL
},
8762 { gen_helper_neon_qshl_u8
,
8763 gen_helper_neon_qshl_u16
,
8764 gen_helper_neon_qshl_u32
}
8767 NeonGenTwoOpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
][size
];
8768 MemOp memop
= scalar
? size
: MO_32
;
8769 int maxpass
= scalar
? 1 : is_q
? 4 : 2;
8771 for (pass
= 0; pass
< maxpass
; pass
++) {
8772 TCGv_i32 tcg_op
= tcg_temp_new_i32();
8774 read_vec_element_i32(s
, tcg_op
, rn
, pass
, memop
);
8775 genfn(tcg_op
, cpu_env
, tcg_op
, tcg_shift
);
8779 tcg_gen_ext8u_i32(tcg_op
, tcg_op
);
8782 tcg_gen_ext16u_i32(tcg_op
, tcg_op
);
8787 g_assert_not_reached();
8789 write_fp_sreg(s
, rd
, tcg_op
);
8791 write_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
8794 tcg_temp_free_i32(tcg_op
);
8798 clear_vec_high(s
, is_q
, rd
);
8803 /* Common vector code for handling integer to FP conversion */
8804 static void handle_simd_intfp_conv(DisasContext
*s
, int rd
, int rn
,
8805 int elements
, int is_signed
,
8806 int fracbits
, int size
)
8808 TCGv_ptr tcg_fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
8809 TCGv_i32 tcg_shift
= NULL
;
8811 MemOp mop
= size
| (is_signed
? MO_SIGN
: 0);
8814 if (fracbits
|| size
== MO_64
) {
8815 tcg_shift
= tcg_constant_i32(fracbits
);
8818 if (size
== MO_64
) {
8819 TCGv_i64 tcg_int64
= tcg_temp_new_i64();
8820 TCGv_i64 tcg_double
= tcg_temp_new_i64();
8822 for (pass
= 0; pass
< elements
; pass
++) {
8823 read_vec_element(s
, tcg_int64
, rn
, pass
, mop
);
8826 gen_helper_vfp_sqtod(tcg_double
, tcg_int64
,
8827 tcg_shift
, tcg_fpst
);
8829 gen_helper_vfp_uqtod(tcg_double
, tcg_int64
,
8830 tcg_shift
, tcg_fpst
);
8832 if (elements
== 1) {
8833 write_fp_dreg(s
, rd
, tcg_double
);
8835 write_vec_element(s
, tcg_double
, rd
, pass
, MO_64
);
8839 tcg_temp_free_i64(tcg_int64
);
8840 tcg_temp_free_i64(tcg_double
);
8843 TCGv_i32 tcg_int32
= tcg_temp_new_i32();
8844 TCGv_i32 tcg_float
= tcg_temp_new_i32();
8846 for (pass
= 0; pass
< elements
; pass
++) {
8847 read_vec_element_i32(s
, tcg_int32
, rn
, pass
, mop
);
8853 gen_helper_vfp_sltos(tcg_float
, tcg_int32
,
8854 tcg_shift
, tcg_fpst
);
8856 gen_helper_vfp_ultos(tcg_float
, tcg_int32
,
8857 tcg_shift
, tcg_fpst
);
8861 gen_helper_vfp_sitos(tcg_float
, tcg_int32
, tcg_fpst
);
8863 gen_helper_vfp_uitos(tcg_float
, tcg_int32
, tcg_fpst
);
8870 gen_helper_vfp_sltoh(tcg_float
, tcg_int32
,
8871 tcg_shift
, tcg_fpst
);
8873 gen_helper_vfp_ultoh(tcg_float
, tcg_int32
,
8874 tcg_shift
, tcg_fpst
);
8878 gen_helper_vfp_sitoh(tcg_float
, tcg_int32
, tcg_fpst
);
8880 gen_helper_vfp_uitoh(tcg_float
, tcg_int32
, tcg_fpst
);
8885 g_assert_not_reached();
8888 if (elements
== 1) {
8889 write_fp_sreg(s
, rd
, tcg_float
);
8891 write_vec_element_i32(s
, tcg_float
, rd
, pass
, size
);
8895 tcg_temp_free_i32(tcg_int32
);
8896 tcg_temp_free_i32(tcg_float
);
8899 tcg_temp_free_ptr(tcg_fpst
);
8901 clear_vec_high(s
, elements
<< size
== 16, rd
);
8904 /* UCVTF/SCVTF - Integer to FP conversion */
8905 static void handle_simd_shift_intfp_conv(DisasContext
*s
, bool is_scalar
,
8906 bool is_q
, bool is_u
,
8907 int immh
, int immb
, int opcode
,
8910 int size
, elements
, fracbits
;
8911 int immhb
= immh
<< 3 | immb
;
8915 if (!is_scalar
&& !is_q
) {
8916 unallocated_encoding(s
);
8919 } else if (immh
& 4) {
8921 } else if (immh
& 2) {
8923 if (!dc_isar_feature(aa64_fp16
, s
)) {
8924 unallocated_encoding(s
);
8928 /* immh == 0 would be a failure of the decode logic */
8929 g_assert(immh
== 1);
8930 unallocated_encoding(s
);
8937 elements
= (8 << is_q
) >> size
;
8939 fracbits
= (16 << size
) - immhb
;
8941 if (!fp_access_check(s
)) {
8945 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !is_u
, fracbits
, size
);
8948 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
8949 static void handle_simd_shift_fpint_conv(DisasContext
*s
, bool is_scalar
,
8950 bool is_q
, bool is_u
,
8951 int immh
, int immb
, int rn
, int rd
)
8953 int immhb
= immh
<< 3 | immb
;
8954 int pass
, size
, fracbits
;
8955 TCGv_ptr tcg_fpstatus
;
8956 TCGv_i32 tcg_rmode
, tcg_shift
;
8960 if (!is_scalar
&& !is_q
) {
8961 unallocated_encoding(s
);
8964 } else if (immh
& 0x4) {
8966 } else if (immh
& 0x2) {
8968 if (!dc_isar_feature(aa64_fp16
, s
)) {
8969 unallocated_encoding(s
);
8973 /* Should have split out AdvSIMD modified immediate earlier. */
8975 unallocated_encoding(s
);
8979 if (!fp_access_check(s
)) {
8983 assert(!(is_scalar
&& is_q
));
8985 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO
));
8986 tcg_fpstatus
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
8987 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
8988 fracbits
= (16 << size
) - immhb
;
8989 tcg_shift
= tcg_constant_i32(fracbits
);
8991 if (size
== MO_64
) {
8992 int maxpass
= is_scalar
? 1 : 2;
8994 for (pass
= 0; pass
< maxpass
; pass
++) {
8995 TCGv_i64 tcg_op
= tcg_temp_new_i64();
8997 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
8999 gen_helper_vfp_touqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
9001 gen_helper_vfp_tosqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
9003 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
9004 tcg_temp_free_i64(tcg_op
);
9006 clear_vec_high(s
, is_q
, rd
);
9008 void (*fn
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
9009 int maxpass
= is_scalar
? 1 : ((8 << is_q
) >> size
);
9014 fn
= gen_helper_vfp_touhh
;
9016 fn
= gen_helper_vfp_toshh
;
9021 fn
= gen_helper_vfp_touls
;
9023 fn
= gen_helper_vfp_tosls
;
9027 g_assert_not_reached();
9030 for (pass
= 0; pass
< maxpass
; pass
++) {
9031 TCGv_i32 tcg_op
= tcg_temp_new_i32();
9033 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
9034 fn(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
9036 write_fp_sreg(s
, rd
, tcg_op
);
9038 write_vec_element_i32(s
, tcg_op
, rd
, pass
, size
);
9040 tcg_temp_free_i32(tcg_op
);
9043 clear_vec_high(s
, is_q
, rd
);
9047 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
9048 tcg_temp_free_ptr(tcg_fpstatus
);
9049 tcg_temp_free_i32(tcg_rmode
);
9052 /* AdvSIMD scalar shift by immediate
9053 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
9054 * +-----+---+-------------+------+------+--------+---+------+------+
9055 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
9056 * +-----+---+-------------+------+------+--------+---+------+------+
9058 * This is the scalar version so it works on a fixed sized registers
9060 static void disas_simd_scalar_shift_imm(DisasContext
*s
, uint32_t insn
)
9062 int rd
= extract32(insn
, 0, 5);
9063 int rn
= extract32(insn
, 5, 5);
9064 int opcode
= extract32(insn
, 11, 5);
9065 int immb
= extract32(insn
, 16, 3);
9066 int immh
= extract32(insn
, 19, 4);
9067 bool is_u
= extract32(insn
, 29, 1);
9070 unallocated_encoding(s
);
9075 case 0x08: /* SRI */
9077 unallocated_encoding(s
);
9081 case 0x00: /* SSHR / USHR */
9082 case 0x02: /* SSRA / USRA */
9083 case 0x04: /* SRSHR / URSHR */
9084 case 0x06: /* SRSRA / URSRA */
9085 handle_scalar_simd_shri(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
9087 case 0x0a: /* SHL / SLI */
9088 handle_scalar_simd_shli(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
9090 case 0x1c: /* SCVTF, UCVTF */
9091 handle_simd_shift_intfp_conv(s
, true, false, is_u
, immh
, immb
,
9094 case 0x10: /* SQSHRUN, SQSHRUN2 */
9095 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
9097 unallocated_encoding(s
);
9100 handle_vec_simd_sqshrn(s
, true, false, false, true,
9101 immh
, immb
, opcode
, rn
, rd
);
9103 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
9104 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
9105 handle_vec_simd_sqshrn(s
, true, false, is_u
, is_u
,
9106 immh
, immb
, opcode
, rn
, rd
);
9108 case 0xc: /* SQSHLU */
9110 unallocated_encoding(s
);
9113 handle_simd_qshl(s
, true, false, false, true, immh
, immb
, rn
, rd
);
9115 case 0xe: /* SQSHL, UQSHL */
9116 handle_simd_qshl(s
, true, false, is_u
, is_u
, immh
, immb
, rn
, rd
);
9118 case 0x1f: /* FCVTZS, FCVTZU */
9119 handle_simd_shift_fpint_conv(s
, true, false, is_u
, immh
, immb
, rn
, rd
);
9122 unallocated_encoding(s
);
9127 /* AdvSIMD scalar three different
9128 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
9129 * +-----+---+-----------+------+---+------+--------+-----+------+------+
9130 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
9131 * +-----+---+-----------+------+---+------+--------+-----+------+------+
9133 static void disas_simd_scalar_three_reg_diff(DisasContext
*s
, uint32_t insn
)
9135 bool is_u
= extract32(insn
, 29, 1);
9136 int size
= extract32(insn
, 22, 2);
9137 int opcode
= extract32(insn
, 12, 4);
9138 int rm
= extract32(insn
, 16, 5);
9139 int rn
= extract32(insn
, 5, 5);
9140 int rd
= extract32(insn
, 0, 5);
9143 unallocated_encoding(s
);
9148 case 0x9: /* SQDMLAL, SQDMLAL2 */
9149 case 0xb: /* SQDMLSL, SQDMLSL2 */
9150 case 0xd: /* SQDMULL, SQDMULL2 */
9151 if (size
== 0 || size
== 3) {
9152 unallocated_encoding(s
);
9157 unallocated_encoding(s
);
9161 if (!fp_access_check(s
)) {
9166 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
9167 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
9168 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9170 read_vec_element(s
, tcg_op1
, rn
, 0, MO_32
| MO_SIGN
);
9171 read_vec_element(s
, tcg_op2
, rm
, 0, MO_32
| MO_SIGN
);
9173 tcg_gen_mul_i64(tcg_res
, tcg_op1
, tcg_op2
);
9174 gen_helper_neon_addl_saturate_s64(tcg_res
, cpu_env
, tcg_res
, tcg_res
);
9177 case 0xd: /* SQDMULL, SQDMULL2 */
9179 case 0xb: /* SQDMLSL, SQDMLSL2 */
9180 tcg_gen_neg_i64(tcg_res
, tcg_res
);
9182 case 0x9: /* SQDMLAL, SQDMLAL2 */
9183 read_vec_element(s
, tcg_op1
, rd
, 0, MO_64
);
9184 gen_helper_neon_addl_saturate_s64(tcg_res
, cpu_env
,
9188 g_assert_not_reached();
9191 write_fp_dreg(s
, rd
, tcg_res
);
9193 tcg_temp_free_i64(tcg_op1
);
9194 tcg_temp_free_i64(tcg_op2
);
9195 tcg_temp_free_i64(tcg_res
);
9197 TCGv_i32 tcg_op1
= read_fp_hreg(s
, rn
);
9198 TCGv_i32 tcg_op2
= read_fp_hreg(s
, rm
);
9199 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9201 gen_helper_neon_mull_s16(tcg_res
, tcg_op1
, tcg_op2
);
9202 gen_helper_neon_addl_saturate_s32(tcg_res
, cpu_env
, tcg_res
, tcg_res
);
9205 case 0xd: /* SQDMULL, SQDMULL2 */
9207 case 0xb: /* SQDMLSL, SQDMLSL2 */
9208 gen_helper_neon_negl_u32(tcg_res
, tcg_res
);
9210 case 0x9: /* SQDMLAL, SQDMLAL2 */
9212 TCGv_i64 tcg_op3
= tcg_temp_new_i64();
9213 read_vec_element(s
, tcg_op3
, rd
, 0, MO_32
);
9214 gen_helper_neon_addl_saturate_s32(tcg_res
, cpu_env
,
9216 tcg_temp_free_i64(tcg_op3
);
9220 g_assert_not_reached();
9223 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
9224 write_fp_dreg(s
, rd
, tcg_res
);
9226 tcg_temp_free_i32(tcg_op1
);
9227 tcg_temp_free_i32(tcg_op2
);
9228 tcg_temp_free_i64(tcg_res
);
9232 static void handle_3same_64(DisasContext
*s
, int opcode
, bool u
,
9233 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
, TCGv_i64 tcg_rm
)
9235 /* Handle 64x64->64 opcodes which are shared between the scalar
9236 * and vector 3-same groups. We cover every opcode where size == 3
9237 * is valid in either the three-reg-same (integer, not pairwise)
9238 * or scalar-three-reg-same groups.
9243 case 0x1: /* SQADD */
9245 gen_helper_neon_qadd_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
9247 gen_helper_neon_qadd_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
9250 case 0x5: /* SQSUB */
9252 gen_helper_neon_qsub_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
9254 gen_helper_neon_qsub_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
9257 case 0x6: /* CMGT, CMHI */
9258 /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
9259 * We implement this using setcond (test) and then negating.
9261 cond
= u
? TCG_COND_GTU
: TCG_COND_GT
;
9263 tcg_gen_setcond_i64(cond
, tcg_rd
, tcg_rn
, tcg_rm
);
9264 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
9266 case 0x7: /* CMGE, CMHS */
9267 cond
= u
? TCG_COND_GEU
: TCG_COND_GE
;
9269 case 0x11: /* CMTST, CMEQ */
9274 gen_cmtst_i64(tcg_rd
, tcg_rn
, tcg_rm
);
9276 case 0x8: /* SSHL, USHL */
9278 gen_ushl_i64(tcg_rd
, tcg_rn
, tcg_rm
);
9280 gen_sshl_i64(tcg_rd
, tcg_rn
, tcg_rm
);
9283 case 0x9: /* SQSHL, UQSHL */
9285 gen_helper_neon_qshl_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
9287 gen_helper_neon_qshl_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
9290 case 0xa: /* SRSHL, URSHL */
9292 gen_helper_neon_rshl_u64(tcg_rd
, tcg_rn
, tcg_rm
);
9294 gen_helper_neon_rshl_s64(tcg_rd
, tcg_rn
, tcg_rm
);
9297 case 0xb: /* SQRSHL, UQRSHL */
9299 gen_helper_neon_qrshl_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
9301 gen_helper_neon_qrshl_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
9304 case 0x10: /* ADD, SUB */
9306 tcg_gen_sub_i64(tcg_rd
, tcg_rn
, tcg_rm
);
9308 tcg_gen_add_i64(tcg_rd
, tcg_rn
, tcg_rm
);
9312 g_assert_not_reached();
9316 /* Handle the 3-same-operands float operations; shared by the scalar
9317 * and vector encodings. The caller must filter out any encodings
9318 * not allocated for the encoding it is dealing with.
9320 static void handle_3same_float(DisasContext
*s
, int size
, int elements
,
9321 int fpopcode
, int rd
, int rn
, int rm
)
9324 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
9326 for (pass
= 0; pass
< elements
; pass
++) {
9329 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
9330 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
9331 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9333 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
9334 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
9337 case 0x39: /* FMLS */
9338 /* As usual for ARM, separate negation for fused multiply-add */
9339 gen_helper_vfp_negd(tcg_op1
, tcg_op1
);
9341 case 0x19: /* FMLA */
9342 read_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
9343 gen_helper_vfp_muladdd(tcg_res
, tcg_op1
, tcg_op2
,
9346 case 0x18: /* FMAXNM */
9347 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9349 case 0x1a: /* FADD */
9350 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9352 case 0x1b: /* FMULX */
9353 gen_helper_vfp_mulxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9355 case 0x1c: /* FCMEQ */
9356 gen_helper_neon_ceq_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9358 case 0x1e: /* FMAX */
9359 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9361 case 0x1f: /* FRECPS */
9362 gen_helper_recpsf_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9364 case 0x38: /* FMINNM */
9365 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9367 case 0x3a: /* FSUB */
9368 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9370 case 0x3e: /* FMIN */
9371 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9373 case 0x3f: /* FRSQRTS */
9374 gen_helper_rsqrtsf_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9376 case 0x5b: /* FMUL */
9377 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9379 case 0x5c: /* FCMGE */
9380 gen_helper_neon_cge_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9382 case 0x5d: /* FACGE */
9383 gen_helper_neon_acge_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9385 case 0x5f: /* FDIV */
9386 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9388 case 0x7a: /* FABD */
9389 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9390 gen_helper_vfp_absd(tcg_res
, tcg_res
);
9392 case 0x7c: /* FCMGT */
9393 gen_helper_neon_cgt_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9395 case 0x7d: /* FACGT */
9396 gen_helper_neon_acgt_f64(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9399 g_assert_not_reached();
9402 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
9404 tcg_temp_free_i64(tcg_res
);
9405 tcg_temp_free_i64(tcg_op1
);
9406 tcg_temp_free_i64(tcg_op2
);
9409 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
9410 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
9411 TCGv_i32 tcg_res
= tcg_temp_new_i32();
9413 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
9414 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
9417 case 0x39: /* FMLS */
9418 /* As usual for ARM, separate negation for fused multiply-add */
9419 gen_helper_vfp_negs(tcg_op1
, tcg_op1
);
9421 case 0x19: /* FMLA */
9422 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
9423 gen_helper_vfp_muladds(tcg_res
, tcg_op1
, tcg_op2
,
9426 case 0x1a: /* FADD */
9427 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9429 case 0x1b: /* FMULX */
9430 gen_helper_vfp_mulxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9432 case 0x1c: /* FCMEQ */
9433 gen_helper_neon_ceq_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9435 case 0x1e: /* FMAX */
9436 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9438 case 0x1f: /* FRECPS */
9439 gen_helper_recpsf_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9441 case 0x18: /* FMAXNM */
9442 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9444 case 0x38: /* FMINNM */
9445 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9447 case 0x3a: /* FSUB */
9448 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9450 case 0x3e: /* FMIN */
9451 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9453 case 0x3f: /* FRSQRTS */
9454 gen_helper_rsqrtsf_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9456 case 0x5b: /* FMUL */
9457 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9459 case 0x5c: /* FCMGE */
9460 gen_helper_neon_cge_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9462 case 0x5d: /* FACGE */
9463 gen_helper_neon_acge_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9465 case 0x5f: /* FDIV */
9466 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9468 case 0x7a: /* FABD */
9469 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9470 gen_helper_vfp_abss(tcg_res
, tcg_res
);
9472 case 0x7c: /* FCMGT */
9473 gen_helper_neon_cgt_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9475 case 0x7d: /* FACGT */
9476 gen_helper_neon_acgt_f32(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9479 g_assert_not_reached();
9482 if (elements
== 1) {
9483 /* scalar single so clear high part */
9484 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
9486 tcg_gen_extu_i32_i64(tcg_tmp
, tcg_res
);
9487 write_vec_element(s
, tcg_tmp
, rd
, pass
, MO_64
);
9488 tcg_temp_free_i64(tcg_tmp
);
9490 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
9493 tcg_temp_free_i32(tcg_res
);
9494 tcg_temp_free_i32(tcg_op1
);
9495 tcg_temp_free_i32(tcg_op2
);
9499 tcg_temp_free_ptr(fpst
);
9501 clear_vec_high(s
, elements
* (size
? 8 : 4) > 8, rd
);
9504 /* AdvSIMD scalar three same
9505 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
9506 * +-----+---+-----------+------+---+------+--------+---+------+------+
9507 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
9508 * +-----+---+-----------+------+---+------+--------+---+------+------+
9510 static void disas_simd_scalar_three_reg_same(DisasContext
*s
, uint32_t insn
)
9512 int rd
= extract32(insn
, 0, 5);
9513 int rn
= extract32(insn
, 5, 5);
9514 int opcode
= extract32(insn
, 11, 5);
9515 int rm
= extract32(insn
, 16, 5);
9516 int size
= extract32(insn
, 22, 2);
9517 bool u
= extract32(insn
, 29, 1);
9520 if (opcode
>= 0x18) {
9521 /* Floating point: U, size[1] and opcode indicate operation */
9522 int fpopcode
= opcode
| (extract32(size
, 1, 1) << 5) | (u
<< 6);
9524 case 0x1b: /* FMULX */
9525 case 0x1f: /* FRECPS */
9526 case 0x3f: /* FRSQRTS */
9527 case 0x5d: /* FACGE */
9528 case 0x7d: /* FACGT */
9529 case 0x1c: /* FCMEQ */
9530 case 0x5c: /* FCMGE */
9531 case 0x7c: /* FCMGT */
9532 case 0x7a: /* FABD */
9535 unallocated_encoding(s
);
9539 if (!fp_access_check(s
)) {
9543 handle_3same_float(s
, extract32(size
, 0, 1), 1, fpopcode
, rd
, rn
, rm
);
9548 case 0x1: /* SQADD, UQADD */
9549 case 0x5: /* SQSUB, UQSUB */
9550 case 0x9: /* SQSHL, UQSHL */
9551 case 0xb: /* SQRSHL, UQRSHL */
9553 case 0x8: /* SSHL, USHL */
9554 case 0xa: /* SRSHL, URSHL */
9555 case 0x6: /* CMGT, CMHI */
9556 case 0x7: /* CMGE, CMHS */
9557 case 0x11: /* CMTST, CMEQ */
9558 case 0x10: /* ADD, SUB (vector) */
9560 unallocated_encoding(s
);
9564 case 0x16: /* SQDMULH, SQRDMULH (vector) */
9565 if (size
!= 1 && size
!= 2) {
9566 unallocated_encoding(s
);
9571 unallocated_encoding(s
);
9575 if (!fp_access_check(s
)) {
9579 tcg_rd
= tcg_temp_new_i64();
9582 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
9583 TCGv_i64 tcg_rm
= read_fp_dreg(s
, rm
);
9585 handle_3same_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rm
);
9586 tcg_temp_free_i64(tcg_rn
);
9587 tcg_temp_free_i64(tcg_rm
);
9589 /* Do a single operation on the lowest element in the vector.
9590 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
9591 * no side effects for all these operations.
9592 * OPTME: special-purpose helpers would avoid doing some
9593 * unnecessary work in the helper for the 8 and 16 bit cases.
9595 NeonGenTwoOpEnvFn
*genenvfn
;
9596 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
9597 TCGv_i32 tcg_rm
= tcg_temp_new_i32();
9598 TCGv_i32 tcg_rd32
= tcg_temp_new_i32();
9600 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
9601 read_vec_element_i32(s
, tcg_rm
, rm
, 0, size
);
9604 case 0x1: /* SQADD, UQADD */
9606 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9607 { gen_helper_neon_qadd_s8
, gen_helper_neon_qadd_u8
},
9608 { gen_helper_neon_qadd_s16
, gen_helper_neon_qadd_u16
},
9609 { gen_helper_neon_qadd_s32
, gen_helper_neon_qadd_u32
},
9611 genenvfn
= fns
[size
][u
];
9614 case 0x5: /* SQSUB, UQSUB */
9616 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9617 { gen_helper_neon_qsub_s8
, gen_helper_neon_qsub_u8
},
9618 { gen_helper_neon_qsub_s16
, gen_helper_neon_qsub_u16
},
9619 { gen_helper_neon_qsub_s32
, gen_helper_neon_qsub_u32
},
9621 genenvfn
= fns
[size
][u
];
9624 case 0x9: /* SQSHL, UQSHL */
9626 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9627 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
9628 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
9629 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
9631 genenvfn
= fns
[size
][u
];
9634 case 0xb: /* SQRSHL, UQRSHL */
9636 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
9637 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
9638 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
9639 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
9641 genenvfn
= fns
[size
][u
];
9644 case 0x16: /* SQDMULH, SQRDMULH */
9646 static NeonGenTwoOpEnvFn
* const fns
[2][2] = {
9647 { gen_helper_neon_qdmulh_s16
, gen_helper_neon_qrdmulh_s16
},
9648 { gen_helper_neon_qdmulh_s32
, gen_helper_neon_qrdmulh_s32
},
9650 assert(size
== 1 || size
== 2);
9651 genenvfn
= fns
[size
- 1][u
];
9655 g_assert_not_reached();
9658 genenvfn(tcg_rd32
, cpu_env
, tcg_rn
, tcg_rm
);
9659 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd32
);
9660 tcg_temp_free_i32(tcg_rd32
);
9661 tcg_temp_free_i32(tcg_rn
);
9662 tcg_temp_free_i32(tcg_rm
);
9665 write_fp_dreg(s
, rd
, tcg_rd
);
9667 tcg_temp_free_i64(tcg_rd
);
9670 /* AdvSIMD scalar three same FP16
9671 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
9672 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9673 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
9674 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9675 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
9676 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
9678 static void disas_simd_scalar_three_reg_same_fp16(DisasContext
*s
,
9681 int rd
= extract32(insn
, 0, 5);
9682 int rn
= extract32(insn
, 5, 5);
9683 int opcode
= extract32(insn
, 11, 3);
9684 int rm
= extract32(insn
, 16, 5);
9685 bool u
= extract32(insn
, 29, 1);
9686 bool a
= extract32(insn
, 23, 1);
9687 int fpopcode
= opcode
| (a
<< 3) | (u
<< 4);
9694 case 0x03: /* FMULX */
9695 case 0x04: /* FCMEQ (reg) */
9696 case 0x07: /* FRECPS */
9697 case 0x0f: /* FRSQRTS */
9698 case 0x14: /* FCMGE (reg) */
9699 case 0x15: /* FACGE */
9700 case 0x1a: /* FABD */
9701 case 0x1c: /* FCMGT (reg) */
9702 case 0x1d: /* FACGT */
9705 unallocated_encoding(s
);
9709 if (!dc_isar_feature(aa64_fp16
, s
)) {
9710 unallocated_encoding(s
);
9713 if (!fp_access_check(s
)) {
9717 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
9719 tcg_op1
= read_fp_hreg(s
, rn
);
9720 tcg_op2
= read_fp_hreg(s
, rm
);
9721 tcg_res
= tcg_temp_new_i32();
9724 case 0x03: /* FMULX */
9725 gen_helper_advsimd_mulxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9727 case 0x04: /* FCMEQ (reg) */
9728 gen_helper_advsimd_ceq_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9730 case 0x07: /* FRECPS */
9731 gen_helper_recpsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9733 case 0x0f: /* FRSQRTS */
9734 gen_helper_rsqrtsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9736 case 0x14: /* FCMGE (reg) */
9737 gen_helper_advsimd_cge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9739 case 0x15: /* FACGE */
9740 gen_helper_advsimd_acge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9742 case 0x1a: /* FABD */
9743 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9744 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0x7fff);
9746 case 0x1c: /* FCMGT (reg) */
9747 gen_helper_advsimd_cgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9749 case 0x1d: /* FACGT */
9750 gen_helper_advsimd_acgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
9753 g_assert_not_reached();
9756 write_fp_sreg(s
, rd
, tcg_res
);
9759 tcg_temp_free_i32(tcg_res
);
9760 tcg_temp_free_i32(tcg_op1
);
9761 tcg_temp_free_i32(tcg_op2
);
9762 tcg_temp_free_ptr(fpst
);
9765 /* AdvSIMD scalar three same extra
9766 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
9767 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9768 * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
9769 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9771 static void disas_simd_scalar_three_reg_same_extra(DisasContext
*s
,
9774 int rd
= extract32(insn
, 0, 5);
9775 int rn
= extract32(insn
, 5, 5);
9776 int opcode
= extract32(insn
, 11, 4);
9777 int rm
= extract32(insn
, 16, 5);
9778 int size
= extract32(insn
, 22, 2);
9779 bool u
= extract32(insn
, 29, 1);
9780 TCGv_i32 ele1
, ele2
, ele3
;
9784 switch (u
* 16 + opcode
) {
9785 case 0x10: /* SQRDMLAH (vector) */
9786 case 0x11: /* SQRDMLSH (vector) */
9787 if (size
!= 1 && size
!= 2) {
9788 unallocated_encoding(s
);
9791 feature
= dc_isar_feature(aa64_rdm
, s
);
9794 unallocated_encoding(s
);
9798 unallocated_encoding(s
);
9801 if (!fp_access_check(s
)) {
9805 /* Do a single operation on the lowest element in the vector.
9806 * We use the standard Neon helpers and rely on 0 OP 0 == 0
9807 * with no side effects for all these operations.
9808 * OPTME: special-purpose helpers would avoid doing some
9809 * unnecessary work in the helper for the 16 bit cases.
9811 ele1
= tcg_temp_new_i32();
9812 ele2
= tcg_temp_new_i32();
9813 ele3
= tcg_temp_new_i32();
9815 read_vec_element_i32(s
, ele1
, rn
, 0, size
);
9816 read_vec_element_i32(s
, ele2
, rm
, 0, size
);
9817 read_vec_element_i32(s
, ele3
, rd
, 0, size
);
9820 case 0x0: /* SQRDMLAH */
9822 gen_helper_neon_qrdmlah_s16(ele3
, cpu_env
, ele1
, ele2
, ele3
);
9824 gen_helper_neon_qrdmlah_s32(ele3
, cpu_env
, ele1
, ele2
, ele3
);
9827 case 0x1: /* SQRDMLSH */
9829 gen_helper_neon_qrdmlsh_s16(ele3
, cpu_env
, ele1
, ele2
, ele3
);
9831 gen_helper_neon_qrdmlsh_s32(ele3
, cpu_env
, ele1
, ele2
, ele3
);
9835 g_assert_not_reached();
9837 tcg_temp_free_i32(ele1
);
9838 tcg_temp_free_i32(ele2
);
9840 res
= tcg_temp_new_i64();
9841 tcg_gen_extu_i32_i64(res
, ele3
);
9842 tcg_temp_free_i32(ele3
);
9844 write_fp_dreg(s
, rd
, res
);
9845 tcg_temp_free_i64(res
);
9848 static void handle_2misc_64(DisasContext
*s
, int opcode
, bool u
,
9849 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
,
9850 TCGv_i32 tcg_rmode
, TCGv_ptr tcg_fpstatus
)
9852 /* Handle 64->64 opcodes which are shared between the scalar and
9853 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
9854 * is valid in either group and also the double-precision fp ops.
9855 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
9861 case 0x4: /* CLS, CLZ */
9863 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
9865 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
9869 /* This opcode is shared with CNT and RBIT but we have earlier
9870 * enforced that size == 3 if and only if this is the NOT insn.
9872 tcg_gen_not_i64(tcg_rd
, tcg_rn
);
9874 case 0x7: /* SQABS, SQNEG */
9876 gen_helper_neon_qneg_s64(tcg_rd
, cpu_env
, tcg_rn
);
9878 gen_helper_neon_qabs_s64(tcg_rd
, cpu_env
, tcg_rn
);
9881 case 0xa: /* CMLT */
9882 /* 64 bit integer comparison against zero, result is
9883 * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
9888 tcg_gen_setcondi_i64(cond
, tcg_rd
, tcg_rn
, 0);
9889 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
9891 case 0x8: /* CMGT, CMGE */
9892 cond
= u
? TCG_COND_GE
: TCG_COND_GT
;
9894 case 0x9: /* CMEQ, CMLE */
9895 cond
= u
? TCG_COND_LE
: TCG_COND_EQ
;
9897 case 0xb: /* ABS, NEG */
9899 tcg_gen_neg_i64(tcg_rd
, tcg_rn
);
9901 tcg_gen_abs_i64(tcg_rd
, tcg_rn
);
9904 case 0x2f: /* FABS */
9905 gen_helper_vfp_absd(tcg_rd
, tcg_rn
);
9907 case 0x6f: /* FNEG */
9908 gen_helper_vfp_negd(tcg_rd
, tcg_rn
);
9910 case 0x7f: /* FSQRT */
9911 gen_helper_vfp_sqrtd(tcg_rd
, tcg_rn
, cpu_env
);
9913 case 0x1a: /* FCVTNS */
9914 case 0x1b: /* FCVTMS */
9915 case 0x1c: /* FCVTAS */
9916 case 0x3a: /* FCVTPS */
9917 case 0x3b: /* FCVTZS */
9918 gen_helper_vfp_tosqd(tcg_rd
, tcg_rn
, tcg_constant_i32(0), tcg_fpstatus
);
9920 case 0x5a: /* FCVTNU */
9921 case 0x5b: /* FCVTMU */
9922 case 0x5c: /* FCVTAU */
9923 case 0x7a: /* FCVTPU */
9924 case 0x7b: /* FCVTZU */
9925 gen_helper_vfp_touqd(tcg_rd
, tcg_rn
, tcg_constant_i32(0), tcg_fpstatus
);
9927 case 0x18: /* FRINTN */
9928 case 0x19: /* FRINTM */
9929 case 0x38: /* FRINTP */
9930 case 0x39: /* FRINTZ */
9931 case 0x58: /* FRINTA */
9932 case 0x79: /* FRINTI */
9933 gen_helper_rintd(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9935 case 0x59: /* FRINTX */
9936 gen_helper_rintd_exact(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9938 case 0x1e: /* FRINT32Z */
9939 case 0x5e: /* FRINT32X */
9940 gen_helper_frint32_d(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9942 case 0x1f: /* FRINT64Z */
9943 case 0x5f: /* FRINT64X */
9944 gen_helper_frint64_d(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9947 g_assert_not_reached();
9951 static void handle_2misc_fcmp_zero(DisasContext
*s
, int opcode
,
9952 bool is_scalar
, bool is_u
, bool is_q
,
9953 int size
, int rn
, int rd
)
9955 bool is_double
= (size
== MO_64
);
9958 if (!fp_access_check(s
)) {
9962 fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
9965 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9966 TCGv_i64 tcg_zero
= tcg_constant_i64(0);
9967 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9968 NeonGenTwoDoubleOpFn
*genfn
;
9973 case 0x2e: /* FCMLT (zero) */
9976 case 0x2c: /* FCMGT (zero) */
9977 genfn
= gen_helper_neon_cgt_f64
;
9979 case 0x2d: /* FCMEQ (zero) */
9980 genfn
= gen_helper_neon_ceq_f64
;
9982 case 0x6d: /* FCMLE (zero) */
9985 case 0x6c: /* FCMGE (zero) */
9986 genfn
= gen_helper_neon_cge_f64
;
9989 g_assert_not_reached();
9992 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
9993 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9995 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
9997 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
9999 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
10001 tcg_temp_free_i64(tcg_res
);
10002 tcg_temp_free_i64(tcg_op
);
10004 clear_vec_high(s
, !is_scalar
, rd
);
10006 TCGv_i32 tcg_op
= tcg_temp_new_i32();
10007 TCGv_i32 tcg_zero
= tcg_constant_i32(0);
10008 TCGv_i32 tcg_res
= tcg_temp_new_i32();
10009 NeonGenTwoSingleOpFn
*genfn
;
10011 int pass
, maxpasses
;
10013 if (size
== MO_16
) {
10015 case 0x2e: /* FCMLT (zero) */
10018 case 0x2c: /* FCMGT (zero) */
10019 genfn
= gen_helper_advsimd_cgt_f16
;
10021 case 0x2d: /* FCMEQ (zero) */
10022 genfn
= gen_helper_advsimd_ceq_f16
;
10024 case 0x6d: /* FCMLE (zero) */
10027 case 0x6c: /* FCMGE (zero) */
10028 genfn
= gen_helper_advsimd_cge_f16
;
10031 g_assert_not_reached();
10035 case 0x2e: /* FCMLT (zero) */
10038 case 0x2c: /* FCMGT (zero) */
10039 genfn
= gen_helper_neon_cgt_f32
;
10041 case 0x2d: /* FCMEQ (zero) */
10042 genfn
= gen_helper_neon_ceq_f32
;
10044 case 0x6d: /* FCMLE (zero) */
10047 case 0x6c: /* FCMGE (zero) */
10048 genfn
= gen_helper_neon_cge_f32
;
10051 g_assert_not_reached();
10058 int vector_size
= 8 << is_q
;
10059 maxpasses
= vector_size
>> size
;
10062 for (pass
= 0; pass
< maxpasses
; pass
++) {
10063 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
10065 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
10067 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
10070 write_fp_sreg(s
, rd
, tcg_res
);
10072 write_vec_element_i32(s
, tcg_res
, rd
, pass
, size
);
10075 tcg_temp_free_i32(tcg_res
);
10076 tcg_temp_free_i32(tcg_op
);
10078 clear_vec_high(s
, is_q
, rd
);
10082 tcg_temp_free_ptr(fpst
);
10085 static void handle_2misc_reciprocal(DisasContext
*s
, int opcode
,
10086 bool is_scalar
, bool is_u
, bool is_q
,
10087 int size
, int rn
, int rd
)
10089 bool is_double
= (size
== 3);
10090 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
10093 TCGv_i64 tcg_op
= tcg_temp_new_i64();
10094 TCGv_i64 tcg_res
= tcg_temp_new_i64();
10097 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
10098 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
10100 case 0x3d: /* FRECPE */
10101 gen_helper_recpe_f64(tcg_res
, tcg_op
, fpst
);
10103 case 0x3f: /* FRECPX */
10104 gen_helper_frecpx_f64(tcg_res
, tcg_op
, fpst
);
10106 case 0x7d: /* FRSQRTE */
10107 gen_helper_rsqrte_f64(tcg_res
, tcg_op
, fpst
);
10110 g_assert_not_reached();
10112 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
10114 tcg_temp_free_i64(tcg_res
);
10115 tcg_temp_free_i64(tcg_op
);
10116 clear_vec_high(s
, !is_scalar
, rd
);
10118 TCGv_i32 tcg_op
= tcg_temp_new_i32();
10119 TCGv_i32 tcg_res
= tcg_temp_new_i32();
10120 int pass
, maxpasses
;
10125 maxpasses
= is_q
? 4 : 2;
10128 for (pass
= 0; pass
< maxpasses
; pass
++) {
10129 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
10132 case 0x3c: /* URECPE */
10133 gen_helper_recpe_u32(tcg_res
, tcg_op
);
10135 case 0x3d: /* FRECPE */
10136 gen_helper_recpe_f32(tcg_res
, tcg_op
, fpst
);
10138 case 0x3f: /* FRECPX */
10139 gen_helper_frecpx_f32(tcg_res
, tcg_op
, fpst
);
10141 case 0x7d: /* FRSQRTE */
10142 gen_helper_rsqrte_f32(tcg_res
, tcg_op
, fpst
);
10145 g_assert_not_reached();
10149 write_fp_sreg(s
, rd
, tcg_res
);
10151 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
10154 tcg_temp_free_i32(tcg_res
);
10155 tcg_temp_free_i32(tcg_op
);
10157 clear_vec_high(s
, is_q
, rd
);
10160 tcg_temp_free_ptr(fpst
);
10163 static void handle_2misc_narrow(DisasContext
*s
, bool scalar
,
10164 int opcode
, bool u
, bool is_q
,
10165 int size
, int rn
, int rd
)
10167 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
10168 * in the source becomes a size element in the destination).
10171 TCGv_i32 tcg_res
[2];
10172 int destelt
= is_q
? 2 : 0;
10173 int passes
= scalar
? 1 : 2;
10176 tcg_res
[1] = tcg_constant_i32(0);
10179 for (pass
= 0; pass
< passes
; pass
++) {
10180 TCGv_i64 tcg_op
= tcg_temp_new_i64();
10181 NeonGenNarrowFn
*genfn
= NULL
;
10182 NeonGenNarrowEnvFn
*genenvfn
= NULL
;
10185 read_vec_element(s
, tcg_op
, rn
, pass
, size
+ 1);
10187 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
10189 tcg_res
[pass
] = tcg_temp_new_i32();
10192 case 0x12: /* XTN, SQXTUN */
10194 static NeonGenNarrowFn
* const xtnfns
[3] = {
10195 gen_helper_neon_narrow_u8
,
10196 gen_helper_neon_narrow_u16
,
10197 tcg_gen_extrl_i64_i32
,
10199 static NeonGenNarrowEnvFn
* const sqxtunfns
[3] = {
10200 gen_helper_neon_unarrow_sat8
,
10201 gen_helper_neon_unarrow_sat16
,
10202 gen_helper_neon_unarrow_sat32
,
10205 genenvfn
= sqxtunfns
[size
];
10207 genfn
= xtnfns
[size
];
10211 case 0x14: /* SQXTN, UQXTN */
10213 static NeonGenNarrowEnvFn
* const fns
[3][2] = {
10214 { gen_helper_neon_narrow_sat_s8
,
10215 gen_helper_neon_narrow_sat_u8
},
10216 { gen_helper_neon_narrow_sat_s16
,
10217 gen_helper_neon_narrow_sat_u16
},
10218 { gen_helper_neon_narrow_sat_s32
,
10219 gen_helper_neon_narrow_sat_u32
},
10221 genenvfn
= fns
[size
][u
];
10224 case 0x16: /* FCVTN, FCVTN2 */
10225 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
10227 gen_helper_vfp_fcvtsd(tcg_res
[pass
], tcg_op
, cpu_env
);
10229 TCGv_i32 tcg_lo
= tcg_temp_new_i32();
10230 TCGv_i32 tcg_hi
= tcg_temp_new_i32();
10231 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
10232 TCGv_i32 ahp
= get_ahp_flag();
10234 tcg_gen_extr_i64_i32(tcg_lo
, tcg_hi
, tcg_op
);
10235 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo
, tcg_lo
, fpst
, ahp
);
10236 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi
, tcg_hi
, fpst
, ahp
);
10237 tcg_gen_deposit_i32(tcg_res
[pass
], tcg_lo
, tcg_hi
, 16, 16);
10238 tcg_temp_free_i32(tcg_lo
);
10239 tcg_temp_free_i32(tcg_hi
);
10240 tcg_temp_free_ptr(fpst
);
10241 tcg_temp_free_i32(ahp
);
10244 case 0x36: /* BFCVTN, BFCVTN2 */
10246 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
10247 gen_helper_bfcvt_pair(tcg_res
[pass
], tcg_op
, fpst
);
10248 tcg_temp_free_ptr(fpst
);
10251 case 0x56: /* FCVTXN, FCVTXN2 */
10252 /* 64 bit to 32 bit float conversion
10253 * with von Neumann rounding (round to odd)
10256 gen_helper_fcvtx_f64_to_f32(tcg_res
[pass
], tcg_op
, cpu_env
);
10259 g_assert_not_reached();
10263 genfn(tcg_res
[pass
], tcg_op
);
10264 } else if (genenvfn
) {
10265 genenvfn(tcg_res
[pass
], cpu_env
, tcg_op
);
10268 tcg_temp_free_i64(tcg_op
);
10271 for (pass
= 0; pass
< 2; pass
++) {
10272 write_vec_element_i32(s
, tcg_res
[pass
], rd
, destelt
+ pass
, MO_32
);
10273 tcg_temp_free_i32(tcg_res
[pass
]);
10275 clear_vec_high(s
, is_q
, rd
);
10278 /* Remaining saturating accumulating ops */
10279 static void handle_2misc_satacc(DisasContext
*s
, bool is_scalar
, bool is_u
,
10280 bool is_q
, int size
, int rn
, int rd
)
10282 bool is_double
= (size
== 3);
10285 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
10286 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
10289 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
10290 read_vec_element(s
, tcg_rn
, rn
, pass
, MO_64
);
10291 read_vec_element(s
, tcg_rd
, rd
, pass
, MO_64
);
10293 if (is_u
) { /* USQADD */
10294 gen_helper_neon_uqadd_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
10295 } else { /* SUQADD */
10296 gen_helper_neon_sqadd_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
10298 write_vec_element(s
, tcg_rd
, rd
, pass
, MO_64
);
10300 tcg_temp_free_i64(tcg_rd
);
10301 tcg_temp_free_i64(tcg_rn
);
10302 clear_vec_high(s
, !is_scalar
, rd
);
10304 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
10305 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
10306 int pass
, maxpasses
;
10311 maxpasses
= is_q
? 4 : 2;
10314 for (pass
= 0; pass
< maxpasses
; pass
++) {
10316 read_vec_element_i32(s
, tcg_rn
, rn
, pass
, size
);
10317 read_vec_element_i32(s
, tcg_rd
, rd
, pass
, size
);
10319 read_vec_element_i32(s
, tcg_rn
, rn
, pass
, MO_32
);
10320 read_vec_element_i32(s
, tcg_rd
, rd
, pass
, MO_32
);
10323 if (is_u
) { /* USQADD */
10326 gen_helper_neon_uqadd_s8(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
10329 gen_helper_neon_uqadd_s16(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
10332 gen_helper_neon_uqadd_s32(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
10335 g_assert_not_reached();
10337 } else { /* SUQADD */
10340 gen_helper_neon_sqadd_u8(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
10343 gen_helper_neon_sqadd_u16(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
10346 gen_helper_neon_sqadd_u32(tcg_rd
, cpu_env
, tcg_rn
, tcg_rd
);
10349 g_assert_not_reached();
10354 write_vec_element(s
, tcg_constant_i64(0), rd
, 0, MO_64
);
10356 write_vec_element_i32(s
, tcg_rd
, rd
, pass
, MO_32
);
10358 tcg_temp_free_i32(tcg_rd
);
10359 tcg_temp_free_i32(tcg_rn
);
10360 clear_vec_high(s
, is_q
, rd
);
10364 /* AdvSIMD scalar two reg misc
10365 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
10366 * +-----+---+-----------+------+-----------+--------+-----+------+------+
10367 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
10368 * +-----+---+-----------+------+-----------+--------+-----+------+------+
10370 static void disas_simd_scalar_two_reg_misc(DisasContext
*s
, uint32_t insn
)
10372 int rd
= extract32(insn
, 0, 5);
10373 int rn
= extract32(insn
, 5, 5);
10374 int opcode
= extract32(insn
, 12, 5);
10375 int size
= extract32(insn
, 22, 2);
10376 bool u
= extract32(insn
, 29, 1);
10377 bool is_fcvt
= false;
10379 TCGv_i32 tcg_rmode
;
10380 TCGv_ptr tcg_fpstatus
;
10383 case 0x3: /* USQADD / SUQADD*/
10384 if (!fp_access_check(s
)) {
10387 handle_2misc_satacc(s
, true, u
, false, size
, rn
, rd
);
10389 case 0x7: /* SQABS / SQNEG */
10391 case 0xa: /* CMLT */
10393 unallocated_encoding(s
);
10397 case 0x8: /* CMGT, CMGE */
10398 case 0x9: /* CMEQ, CMLE */
10399 case 0xb: /* ABS, NEG */
10401 unallocated_encoding(s
);
10405 case 0x12: /* SQXTUN */
10407 unallocated_encoding(s
);
10411 case 0x14: /* SQXTN, UQXTN */
10413 unallocated_encoding(s
);
10416 if (!fp_access_check(s
)) {
10419 handle_2misc_narrow(s
, true, opcode
, u
, false, size
, rn
, rd
);
10422 case 0x16 ... 0x1d:
10424 /* Floating point: U, size[1] and opcode indicate operation;
10425 * size[0] indicates single or double precision.
10427 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
10428 size
= extract32(size
, 0, 1) ? 3 : 2;
10430 case 0x2c: /* FCMGT (zero) */
10431 case 0x2d: /* FCMEQ (zero) */
10432 case 0x2e: /* FCMLT (zero) */
10433 case 0x6c: /* FCMGE (zero) */
10434 case 0x6d: /* FCMLE (zero) */
10435 handle_2misc_fcmp_zero(s
, opcode
, true, u
, true, size
, rn
, rd
);
10437 case 0x1d: /* SCVTF */
10438 case 0x5d: /* UCVTF */
10440 bool is_signed
= (opcode
== 0x1d);
10441 if (!fp_access_check(s
)) {
10444 handle_simd_intfp_conv(s
, rd
, rn
, 1, is_signed
, 0, size
);
10447 case 0x3d: /* FRECPE */
10448 case 0x3f: /* FRECPX */
10449 case 0x7d: /* FRSQRTE */
10450 if (!fp_access_check(s
)) {
10453 handle_2misc_reciprocal(s
, opcode
, true, u
, true, size
, rn
, rd
);
10455 case 0x1a: /* FCVTNS */
10456 case 0x1b: /* FCVTMS */
10457 case 0x3a: /* FCVTPS */
10458 case 0x3b: /* FCVTZS */
10459 case 0x5a: /* FCVTNU */
10460 case 0x5b: /* FCVTMU */
10461 case 0x7a: /* FCVTPU */
10462 case 0x7b: /* FCVTZU */
10464 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
10466 case 0x1c: /* FCVTAS */
10467 case 0x5c: /* FCVTAU */
10468 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
10470 rmode
= FPROUNDING_TIEAWAY
;
10472 case 0x56: /* FCVTXN, FCVTXN2 */
10474 unallocated_encoding(s
);
10477 if (!fp_access_check(s
)) {
10480 handle_2misc_narrow(s
, true, opcode
, u
, false, size
- 1, rn
, rd
);
10483 unallocated_encoding(s
);
10488 unallocated_encoding(s
);
10492 if (!fp_access_check(s
)) {
10497 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
10498 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR
);
10499 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
10502 tcg_fpstatus
= NULL
;
10506 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
10507 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
10509 handle_2misc_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rmode
, tcg_fpstatus
);
10510 write_fp_dreg(s
, rd
, tcg_rd
);
10511 tcg_temp_free_i64(tcg_rd
);
10512 tcg_temp_free_i64(tcg_rn
);
10514 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
10515 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
10517 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
10520 case 0x7: /* SQABS, SQNEG */
10522 NeonGenOneOpEnvFn
*genfn
;
10523 static NeonGenOneOpEnvFn
* const fns
[3][2] = {
10524 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
10525 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
10526 { gen_helper_neon_qabs_s32
, gen_helper_neon_qneg_s32
},
10528 genfn
= fns
[size
][u
];
10529 genfn(tcg_rd
, cpu_env
, tcg_rn
);
10532 case 0x1a: /* FCVTNS */
10533 case 0x1b: /* FCVTMS */
10534 case 0x1c: /* FCVTAS */
10535 case 0x3a: /* FCVTPS */
10536 case 0x3b: /* FCVTZS */
10537 gen_helper_vfp_tosls(tcg_rd
, tcg_rn
, tcg_constant_i32(0),
10540 case 0x5a: /* FCVTNU */
10541 case 0x5b: /* FCVTMU */
10542 case 0x5c: /* FCVTAU */
10543 case 0x7a: /* FCVTPU */
10544 case 0x7b: /* FCVTZU */
10545 gen_helper_vfp_touls(tcg_rd
, tcg_rn
, tcg_constant_i32(0),
10549 g_assert_not_reached();
10552 write_fp_sreg(s
, rd
, tcg_rd
);
10553 tcg_temp_free_i32(tcg_rd
);
10554 tcg_temp_free_i32(tcg_rn
);
10558 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
10559 tcg_temp_free_i32(tcg_rmode
);
10560 tcg_temp_free_ptr(tcg_fpstatus
);
10564 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
10565 static void handle_vec_simd_shri(DisasContext
*s
, bool is_q
, bool is_u
,
10566 int immh
, int immb
, int opcode
, int rn
, int rd
)
10568 int size
= 32 - clz32(immh
) - 1;
10569 int immhb
= immh
<< 3 | immb
;
10570 int shift
= 2 * (8 << size
) - immhb
;
10571 GVecGen2iFn
*gvec_fn
;
10573 if (extract32(immh
, 3, 1) && !is_q
) {
10574 unallocated_encoding(s
);
10577 tcg_debug_assert(size
<= 3);
10579 if (!fp_access_check(s
)) {
10584 case 0x02: /* SSRA / USRA (accumulate) */
10585 gvec_fn
= is_u
? gen_gvec_usra
: gen_gvec_ssra
;
10588 case 0x08: /* SRI */
10589 gvec_fn
= gen_gvec_sri
;
10592 case 0x00: /* SSHR / USHR */
10594 if (shift
== 8 << size
) {
10595 /* Shift count the same size as element size produces zero. */
10596 tcg_gen_gvec_dup_imm(size
, vec_full_reg_offset(s
, rd
),
10597 is_q
? 16 : 8, vec_full_reg_size(s
), 0);
10600 gvec_fn
= tcg_gen_gvec_shri
;
10602 /* Shift count the same size as element size produces all sign. */
10603 if (shift
== 8 << size
) {
10606 gvec_fn
= tcg_gen_gvec_sari
;
10610 case 0x04: /* SRSHR / URSHR (rounding) */
10611 gvec_fn
= is_u
? gen_gvec_urshr
: gen_gvec_srshr
;
10614 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10615 gvec_fn
= is_u
? gen_gvec_ursra
: gen_gvec_srsra
;
10619 g_assert_not_reached();
10622 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, gvec_fn
, size
);
10625 /* SHL/SLI - Vector shift left */
10626 static void handle_vec_simd_shli(DisasContext
*s
, bool is_q
, bool insert
,
10627 int immh
, int immb
, int opcode
, int rn
, int rd
)
10629 int size
= 32 - clz32(immh
) - 1;
10630 int immhb
= immh
<< 3 | immb
;
10631 int shift
= immhb
- (8 << size
);
10633 /* Range of size is limited by decode: immh is a non-zero 4 bit field */
10634 assert(size
>= 0 && size
<= 3);
10636 if (extract32(immh
, 3, 1) && !is_q
) {
10637 unallocated_encoding(s
);
10641 if (!fp_access_check(s
)) {
10646 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, gen_gvec_sli
, size
);
10648 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, tcg_gen_gvec_shli
, size
);
10652 /* USHLL/SHLL - Vector shift left with widening */
10653 static void handle_vec_simd_wshli(DisasContext
*s
, bool is_q
, bool is_u
,
10654 int immh
, int immb
, int opcode
, int rn
, int rd
)
10656 int size
= 32 - clz32(immh
) - 1;
10657 int immhb
= immh
<< 3 | immb
;
10658 int shift
= immhb
- (8 << size
);
10660 int esize
= 8 << size
;
10661 int elements
= dsize
/esize
;
10662 TCGv_i64 tcg_rn
= new_tmp_a64(s
);
10663 TCGv_i64 tcg_rd
= new_tmp_a64(s
);
10667 unallocated_encoding(s
);
10671 if (!fp_access_check(s
)) {
10675 /* For the LL variants the store is larger than the load,
10676 * so if rd == rn we would overwrite parts of our input.
10677 * So load everything right now and use shifts in the main loop.
10679 read_vec_element(s
, tcg_rn
, rn
, is_q
? 1 : 0, MO_64
);
10681 for (i
= 0; i
< elements
; i
++) {
10682 tcg_gen_shri_i64(tcg_rd
, tcg_rn
, i
* esize
);
10683 ext_and_shift_reg(tcg_rd
, tcg_rd
, size
| (!is_u
<< 2), 0);
10684 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, shift
);
10685 write_vec_element(s
, tcg_rd
, rd
, i
, size
+ 1);
10689 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
10690 static void handle_vec_simd_shrn(DisasContext
*s
, bool is_q
,
10691 int immh
, int immb
, int opcode
, int rn
, int rd
)
10693 int immhb
= immh
<< 3 | immb
;
10694 int size
= 32 - clz32(immh
) - 1;
10696 int esize
= 8 << size
;
10697 int elements
= dsize
/esize
;
10698 int shift
= (2 * esize
) - immhb
;
10699 bool round
= extract32(opcode
, 0, 1);
10700 TCGv_i64 tcg_rn
, tcg_rd
, tcg_final
;
10701 TCGv_i64 tcg_round
;
10704 if (extract32(immh
, 3, 1)) {
10705 unallocated_encoding(s
);
10709 if (!fp_access_check(s
)) {
10713 tcg_rn
= tcg_temp_new_i64();
10714 tcg_rd
= tcg_temp_new_i64();
10715 tcg_final
= tcg_temp_new_i64();
10716 read_vec_element(s
, tcg_final
, rd
, is_q
? 1 : 0, MO_64
);
10719 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
10724 for (i
= 0; i
< elements
; i
++) {
10725 read_vec_element(s
, tcg_rn
, rn
, i
, size
+1);
10726 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
10727 false, true, size
+1, shift
);
10729 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
10733 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
10735 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
10737 tcg_temp_free_i64(tcg_rn
);
10738 tcg_temp_free_i64(tcg_rd
);
10739 tcg_temp_free_i64(tcg_final
);
10741 clear_vec_high(s
, is_q
, rd
);
10745 /* AdvSIMD shift by immediate
10746 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
10747 * +---+---+---+-------------+------+------+--------+---+------+------+
10748 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
10749 * +---+---+---+-------------+------+------+--------+---+------+------+
10751 static void disas_simd_shift_imm(DisasContext
*s
, uint32_t insn
)
10753 int rd
= extract32(insn
, 0, 5);
10754 int rn
= extract32(insn
, 5, 5);
10755 int opcode
= extract32(insn
, 11, 5);
10756 int immb
= extract32(insn
, 16, 3);
10757 int immh
= extract32(insn
, 19, 4);
10758 bool is_u
= extract32(insn
, 29, 1);
10759 bool is_q
= extract32(insn
, 30, 1);
10761 /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
10765 case 0x08: /* SRI */
10767 unallocated_encoding(s
);
10771 case 0x00: /* SSHR / USHR */
10772 case 0x02: /* SSRA / USRA (accumulate) */
10773 case 0x04: /* SRSHR / URSHR (rounding) */
10774 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10775 handle_vec_simd_shri(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10777 case 0x0a: /* SHL / SLI */
10778 handle_vec_simd_shli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10780 case 0x10: /* SHRN */
10781 case 0x11: /* RSHRN / SQRSHRUN */
10783 handle_vec_simd_sqshrn(s
, false, is_q
, false, true, immh
, immb
,
10786 handle_vec_simd_shrn(s
, is_q
, immh
, immb
, opcode
, rn
, rd
);
10789 case 0x12: /* SQSHRN / UQSHRN */
10790 case 0x13: /* SQRSHRN / UQRSHRN */
10791 handle_vec_simd_sqshrn(s
, false, is_q
, is_u
, is_u
, immh
, immb
,
10794 case 0x14: /* SSHLL / USHLL */
10795 handle_vec_simd_wshli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10797 case 0x1c: /* SCVTF / UCVTF */
10798 handle_simd_shift_intfp_conv(s
, false, is_q
, is_u
, immh
, immb
,
10801 case 0xc: /* SQSHLU */
10803 unallocated_encoding(s
);
10806 handle_simd_qshl(s
, false, is_q
, false, true, immh
, immb
, rn
, rd
);
10808 case 0xe: /* SQSHL, UQSHL */
10809 handle_simd_qshl(s
, false, is_q
, is_u
, is_u
, immh
, immb
, rn
, rd
);
10811 case 0x1f: /* FCVTZS/ FCVTZU */
10812 handle_simd_shift_fpint_conv(s
, false, is_q
, is_u
, immh
, immb
, rn
, rd
);
10815 unallocated_encoding(s
);
10820 /* Generate code to do a "long" addition or subtraction, ie one done in
10821 * TCGv_i64 on vector lanes twice the width specified by size.
10823 static void gen_neon_addl(int size
, bool is_sub
, TCGv_i64 tcg_res
,
10824 TCGv_i64 tcg_op1
, TCGv_i64 tcg_op2
)
10826 static NeonGenTwo64OpFn
* const fns
[3][2] = {
10827 { gen_helper_neon_addl_u16
, gen_helper_neon_subl_u16
},
10828 { gen_helper_neon_addl_u32
, gen_helper_neon_subl_u32
},
10829 { tcg_gen_add_i64
, tcg_gen_sub_i64
},
10831 NeonGenTwo64OpFn
*genfn
;
10834 genfn
= fns
[size
][is_sub
];
10835 genfn(tcg_res
, tcg_op1
, tcg_op2
);
10838 static void handle_3rd_widening(DisasContext
*s
, int is_q
, int is_u
, int size
,
10839 int opcode
, int rd
, int rn
, int rm
)
10841 /* 3-reg-different widening insns: 64 x 64 -> 128 */
10842 TCGv_i64 tcg_res
[2];
10845 tcg_res
[0] = tcg_temp_new_i64();
10846 tcg_res
[1] = tcg_temp_new_i64();
10848 /* Does this op do an adding accumulate, a subtracting accumulate,
10849 * or no accumulate at all?
10867 read_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
10868 read_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
10871 /* size == 2 means two 32x32->64 operations; this is worth special
10872 * casing because we can generally handle it inline.
10875 for (pass
= 0; pass
< 2; pass
++) {
10876 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10877 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10878 TCGv_i64 tcg_passres
;
10879 MemOp memop
= MO_32
| (is_u
? 0 : MO_SIGN
);
10881 int elt
= pass
+ is_q
* 2;
10883 read_vec_element(s
, tcg_op1
, rn
, elt
, memop
);
10884 read_vec_element(s
, tcg_op2
, rm
, elt
, memop
);
10887 tcg_passres
= tcg_res
[pass
];
10889 tcg_passres
= tcg_temp_new_i64();
10893 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10894 tcg_gen_add_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10896 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10897 tcg_gen_sub_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10899 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10900 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10902 TCGv_i64 tcg_tmp1
= tcg_temp_new_i64();
10903 TCGv_i64 tcg_tmp2
= tcg_temp_new_i64();
10905 tcg_gen_sub_i64(tcg_tmp1
, tcg_op1
, tcg_op2
);
10906 tcg_gen_sub_i64(tcg_tmp2
, tcg_op2
, tcg_op1
);
10907 tcg_gen_movcond_i64(is_u
? TCG_COND_GEU
: TCG_COND_GE
,
10909 tcg_op1
, tcg_op2
, tcg_tmp1
, tcg_tmp2
);
10910 tcg_temp_free_i64(tcg_tmp1
);
10911 tcg_temp_free_i64(tcg_tmp2
);
10914 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10915 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10916 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10917 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10919 case 9: /* SQDMLAL, SQDMLAL2 */
10920 case 11: /* SQDMLSL, SQDMLSL2 */
10921 case 13: /* SQDMULL, SQDMULL2 */
10922 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10923 gen_helper_neon_addl_saturate_s64(tcg_passres
, cpu_env
,
10924 tcg_passres
, tcg_passres
);
10927 g_assert_not_reached();
10930 if (opcode
== 9 || opcode
== 11) {
10931 /* saturating accumulate ops */
10933 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
10935 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], cpu_env
,
10936 tcg_res
[pass
], tcg_passres
);
10937 } else if (accop
> 0) {
10938 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
10939 } else if (accop
< 0) {
10940 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
10944 tcg_temp_free_i64(tcg_passres
);
10947 tcg_temp_free_i64(tcg_op1
);
10948 tcg_temp_free_i64(tcg_op2
);
10951 /* size 0 or 1, generally helper functions */
10952 for (pass
= 0; pass
< 2; pass
++) {
10953 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10954 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10955 TCGv_i64 tcg_passres
;
10956 int elt
= pass
+ is_q
* 2;
10958 read_vec_element_i32(s
, tcg_op1
, rn
, elt
, MO_32
);
10959 read_vec_element_i32(s
, tcg_op2
, rm
, elt
, MO_32
);
10962 tcg_passres
= tcg_res
[pass
];
10964 tcg_passres
= tcg_temp_new_i64();
10968 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10969 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10971 TCGv_i64 tcg_op2_64
= tcg_temp_new_i64();
10972 static NeonGenWidenFn
* const widenfns
[2][2] = {
10973 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
10974 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
10976 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
10978 widenfn(tcg_op2_64
, tcg_op2
);
10979 widenfn(tcg_passres
, tcg_op1
);
10980 gen_neon_addl(size
, (opcode
== 2), tcg_passres
,
10981 tcg_passres
, tcg_op2_64
);
10982 tcg_temp_free_i64(tcg_op2_64
);
10985 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10986 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10989 gen_helper_neon_abdl_u16(tcg_passres
, tcg_op1
, tcg_op2
);
10991 gen_helper_neon_abdl_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10995 gen_helper_neon_abdl_u32(tcg_passres
, tcg_op1
, tcg_op2
);
10997 gen_helper_neon_abdl_s32(tcg_passres
, tcg_op1
, tcg_op2
);
11001 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
11002 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
11003 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
11006 gen_helper_neon_mull_u8(tcg_passres
, tcg_op1
, tcg_op2
);
11008 gen_helper_neon_mull_s8(tcg_passres
, tcg_op1
, tcg_op2
);
11012 gen_helper_neon_mull_u16(tcg_passres
, tcg_op1
, tcg_op2
);
11014 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
11018 case 9: /* SQDMLAL, SQDMLAL2 */
11019 case 11: /* SQDMLSL, SQDMLSL2 */
11020 case 13: /* SQDMULL, SQDMULL2 */
11022 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
11023 gen_helper_neon_addl_saturate_s32(tcg_passres
, cpu_env
,
11024 tcg_passres
, tcg_passres
);
11027 g_assert_not_reached();
11029 tcg_temp_free_i32(tcg_op1
);
11030 tcg_temp_free_i32(tcg_op2
);
11033 if (opcode
== 9 || opcode
== 11) {
11034 /* saturating accumulate ops */
11036 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
11038 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], cpu_env
,
11042 gen_neon_addl(size
, (accop
< 0), tcg_res
[pass
],
11043 tcg_res
[pass
], tcg_passres
);
11045 tcg_temp_free_i64(tcg_passres
);
11050 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
11051 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
11052 tcg_temp_free_i64(tcg_res
[0]);
11053 tcg_temp_free_i64(tcg_res
[1]);
11056 static void handle_3rd_wide(DisasContext
*s
, int is_q
, int is_u
, int size
,
11057 int opcode
, int rd
, int rn
, int rm
)
11059 TCGv_i64 tcg_res
[2];
11060 int part
= is_q
? 2 : 0;
11063 for (pass
= 0; pass
< 2; pass
++) {
11064 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
11065 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11066 TCGv_i64 tcg_op2_wide
= tcg_temp_new_i64();
11067 static NeonGenWidenFn
* const widenfns
[3][2] = {
11068 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
11069 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
11070 { tcg_gen_ext_i32_i64
, tcg_gen_extu_i32_i64
},
11072 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
11074 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
11075 read_vec_element_i32(s
, tcg_op2
, rm
, part
+ pass
, MO_32
);
11076 widenfn(tcg_op2_wide
, tcg_op2
);
11077 tcg_temp_free_i32(tcg_op2
);
11078 tcg_res
[pass
] = tcg_temp_new_i64();
11079 gen_neon_addl(size
, (opcode
== 3),
11080 tcg_res
[pass
], tcg_op1
, tcg_op2_wide
);
11081 tcg_temp_free_i64(tcg_op1
);
11082 tcg_temp_free_i64(tcg_op2_wide
);
11085 for (pass
= 0; pass
< 2; pass
++) {
11086 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11087 tcg_temp_free_i64(tcg_res
[pass
]);
11091 static void do_narrow_round_high_u32(TCGv_i32 res
, TCGv_i64 in
)
11093 tcg_gen_addi_i64(in
, in
, 1U << 31);
11094 tcg_gen_extrh_i64_i32(res
, in
);
11097 static void handle_3rd_narrowing(DisasContext
*s
, int is_q
, int is_u
, int size
,
11098 int opcode
, int rd
, int rn
, int rm
)
11100 TCGv_i32 tcg_res
[2];
11101 int part
= is_q
? 2 : 0;
11104 for (pass
= 0; pass
< 2; pass
++) {
11105 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
11106 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
11107 TCGv_i64 tcg_wideres
= tcg_temp_new_i64();
11108 static NeonGenNarrowFn
* const narrowfns
[3][2] = {
11109 { gen_helper_neon_narrow_high_u8
,
11110 gen_helper_neon_narrow_round_high_u8
},
11111 { gen_helper_neon_narrow_high_u16
,
11112 gen_helper_neon_narrow_round_high_u16
},
11113 { tcg_gen_extrh_i64_i32
, do_narrow_round_high_u32
},
11115 NeonGenNarrowFn
*gennarrow
= narrowfns
[size
][is_u
];
11117 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
11118 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
11120 gen_neon_addl(size
, (opcode
== 6), tcg_wideres
, tcg_op1
, tcg_op2
);
11122 tcg_temp_free_i64(tcg_op1
);
11123 tcg_temp_free_i64(tcg_op2
);
11125 tcg_res
[pass
] = tcg_temp_new_i32();
11126 gennarrow(tcg_res
[pass
], tcg_wideres
);
11127 tcg_temp_free_i64(tcg_wideres
);
11130 for (pass
= 0; pass
< 2; pass
++) {
11131 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
+ part
, MO_32
);
11132 tcg_temp_free_i32(tcg_res
[pass
]);
11134 clear_vec_high(s
, is_q
, rd
);
11137 /* AdvSIMD three different
11138 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
11139 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
11140 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
11141 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
11143 static void disas_simd_three_reg_diff(DisasContext
*s
, uint32_t insn
)
11145 /* Instructions in this group fall into three basic classes
11146 * (in each case with the operation working on each element in
11147 * the input vectors):
11148 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
11150 * (2) wide 64 x 128 -> 128
11151 * (3) narrowing 128 x 128 -> 64
11152 * Here we do initial decode, catch unallocated cases and
11153 * dispatch to separate functions for each class.
11155 int is_q
= extract32(insn
, 30, 1);
11156 int is_u
= extract32(insn
, 29, 1);
11157 int size
= extract32(insn
, 22, 2);
11158 int opcode
= extract32(insn
, 12, 4);
11159 int rm
= extract32(insn
, 16, 5);
11160 int rn
= extract32(insn
, 5, 5);
11161 int rd
= extract32(insn
, 0, 5);
11164 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
11165 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
11166 /* 64 x 128 -> 128 */
11168 unallocated_encoding(s
);
11171 if (!fp_access_check(s
)) {
11174 handle_3rd_wide(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
11176 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
11177 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
11178 /* 128 x 128 -> 64 */
11180 unallocated_encoding(s
);
11183 if (!fp_access_check(s
)) {
11186 handle_3rd_narrowing(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
11188 case 14: /* PMULL, PMULL2 */
11190 unallocated_encoding(s
);
11194 case 0: /* PMULL.P8 */
11195 if (!fp_access_check(s
)) {
11198 /* The Q field specifies lo/hi half input for this insn. */
11199 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, is_q
,
11200 gen_helper_neon_pmull_h
);
11203 case 3: /* PMULL.P64 */
11204 if (!dc_isar_feature(aa64_pmull
, s
)) {
11205 unallocated_encoding(s
);
11208 if (!fp_access_check(s
)) {
11211 /* The Q field specifies lo/hi half input for this insn. */
11212 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, is_q
,
11213 gen_helper_gvec_pmull_q
);
11217 unallocated_encoding(s
);
11221 case 9: /* SQDMLAL, SQDMLAL2 */
11222 case 11: /* SQDMLSL, SQDMLSL2 */
11223 case 13: /* SQDMULL, SQDMULL2 */
11224 if (is_u
|| size
== 0) {
11225 unallocated_encoding(s
);
11229 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
11230 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
11231 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
11232 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
11233 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
11234 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
11235 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
11236 /* 64 x 64 -> 128 */
11238 unallocated_encoding(s
);
11241 if (!fp_access_check(s
)) {
11245 handle_3rd_widening(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
11248 /* opcode 15 not allocated */
11249 unallocated_encoding(s
);
11254 /* Logic op (opcode == 3) subgroup of C3.6.16. */
11255 static void disas_simd_3same_logic(DisasContext
*s
, uint32_t insn
)
11257 int rd
= extract32(insn
, 0, 5);
11258 int rn
= extract32(insn
, 5, 5);
11259 int rm
= extract32(insn
, 16, 5);
11260 int size
= extract32(insn
, 22, 2);
11261 bool is_u
= extract32(insn
, 29, 1);
11262 bool is_q
= extract32(insn
, 30, 1);
11264 if (!fp_access_check(s
)) {
11268 switch (size
+ 4 * is_u
) {
11270 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_and
, 0);
11273 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_andc
, 0);
11276 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_or
, 0);
11279 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_orc
, 0);
11282 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_xor
, 0);
11285 case 5: /* BSL bitwise select */
11286 gen_gvec_fn4(s
, is_q
, rd
, rd
, rn
, rm
, tcg_gen_gvec_bitsel
, 0);
11288 case 6: /* BIT, bitwise insert if true */
11289 gen_gvec_fn4(s
, is_q
, rd
, rm
, rn
, rd
, tcg_gen_gvec_bitsel
, 0);
11291 case 7: /* BIF, bitwise insert if false */
11292 gen_gvec_fn4(s
, is_q
, rd
, rm
, rd
, rn
, tcg_gen_gvec_bitsel
, 0);
11296 g_assert_not_reached();
11300 /* Pairwise op subgroup of C3.6.16.
11302 * This is called directly or via the handle_3same_float for float pairwise
11303 * operations where the opcode and size are calculated differently.
11305 static void handle_simd_3same_pair(DisasContext
*s
, int is_q
, int u
, int opcode
,
11306 int size
, int rn
, int rm
, int rd
)
11311 /* Floating point operations need fpst */
11312 if (opcode
>= 0x58) {
11313 fpst
= fpstatus_ptr(FPST_FPCR
);
11318 if (!fp_access_check(s
)) {
11322 /* These operations work on the concatenated rm:rn, with each pair of
11323 * adjacent elements being operated on to produce an element in the result.
11326 TCGv_i64 tcg_res
[2];
11328 for (pass
= 0; pass
< 2; pass
++) {
11329 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
11330 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
11331 int passreg
= (pass
== 0) ? rn
: rm
;
11333 read_vec_element(s
, tcg_op1
, passreg
, 0, MO_64
);
11334 read_vec_element(s
, tcg_op2
, passreg
, 1, MO_64
);
11335 tcg_res
[pass
] = tcg_temp_new_i64();
11338 case 0x17: /* ADDP */
11339 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
11341 case 0x58: /* FMAXNMP */
11342 gen_helper_vfp_maxnumd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11344 case 0x5a: /* FADDP */
11345 gen_helper_vfp_addd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11347 case 0x5e: /* FMAXP */
11348 gen_helper_vfp_maxd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11350 case 0x78: /* FMINNMP */
11351 gen_helper_vfp_minnumd(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11353 case 0x7e: /* FMINP */
11354 gen_helper_vfp_mind(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11357 g_assert_not_reached();
11360 tcg_temp_free_i64(tcg_op1
);
11361 tcg_temp_free_i64(tcg_op2
);
11364 for (pass
= 0; pass
< 2; pass
++) {
11365 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11366 tcg_temp_free_i64(tcg_res
[pass
]);
11369 int maxpass
= is_q
? 4 : 2;
11370 TCGv_i32 tcg_res
[4];
11372 for (pass
= 0; pass
< maxpass
; pass
++) {
11373 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11374 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11375 NeonGenTwoOpFn
*genfn
= NULL
;
11376 int passreg
= pass
< (maxpass
/ 2) ? rn
: rm
;
11377 int passelt
= (is_q
&& (pass
& 1)) ? 2 : 0;
11379 read_vec_element_i32(s
, tcg_op1
, passreg
, passelt
, MO_32
);
11380 read_vec_element_i32(s
, tcg_op2
, passreg
, passelt
+ 1, MO_32
);
11381 tcg_res
[pass
] = tcg_temp_new_i32();
11384 case 0x17: /* ADDP */
11386 static NeonGenTwoOpFn
* const fns
[3] = {
11387 gen_helper_neon_padd_u8
,
11388 gen_helper_neon_padd_u16
,
11394 case 0x14: /* SMAXP, UMAXP */
11396 static NeonGenTwoOpFn
* const fns
[3][2] = {
11397 { gen_helper_neon_pmax_s8
, gen_helper_neon_pmax_u8
},
11398 { gen_helper_neon_pmax_s16
, gen_helper_neon_pmax_u16
},
11399 { tcg_gen_smax_i32
, tcg_gen_umax_i32
},
11401 genfn
= fns
[size
][u
];
11404 case 0x15: /* SMINP, UMINP */
11406 static NeonGenTwoOpFn
* const fns
[3][2] = {
11407 { gen_helper_neon_pmin_s8
, gen_helper_neon_pmin_u8
},
11408 { gen_helper_neon_pmin_s16
, gen_helper_neon_pmin_u16
},
11409 { tcg_gen_smin_i32
, tcg_gen_umin_i32
},
11411 genfn
= fns
[size
][u
];
11414 /* The FP operations are all on single floats (32 bit) */
11415 case 0x58: /* FMAXNMP */
11416 gen_helper_vfp_maxnums(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11418 case 0x5a: /* FADDP */
11419 gen_helper_vfp_adds(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11421 case 0x5e: /* FMAXP */
11422 gen_helper_vfp_maxs(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11424 case 0x78: /* FMINNMP */
11425 gen_helper_vfp_minnums(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11427 case 0x7e: /* FMINP */
11428 gen_helper_vfp_mins(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11431 g_assert_not_reached();
11434 /* FP ops called directly, otherwise call now */
11436 genfn(tcg_res
[pass
], tcg_op1
, tcg_op2
);
11439 tcg_temp_free_i32(tcg_op1
);
11440 tcg_temp_free_i32(tcg_op2
);
11443 for (pass
= 0; pass
< maxpass
; pass
++) {
11444 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
11445 tcg_temp_free_i32(tcg_res
[pass
]);
11447 clear_vec_high(s
, is_q
, rd
);
11451 tcg_temp_free_ptr(fpst
);
11455 /* Floating point op subgroup of C3.6.16. */
11456 static void disas_simd_3same_float(DisasContext
*s
, uint32_t insn
)
11458 /* For floating point ops, the U, size[1] and opcode bits
11459 * together indicate the operation. size[0] indicates single
11462 int fpopcode
= extract32(insn
, 11, 5)
11463 | (extract32(insn
, 23, 1) << 5)
11464 | (extract32(insn
, 29, 1) << 6);
11465 int is_q
= extract32(insn
, 30, 1);
11466 int size
= extract32(insn
, 22, 1);
11467 int rm
= extract32(insn
, 16, 5);
11468 int rn
= extract32(insn
, 5, 5);
11469 int rd
= extract32(insn
, 0, 5);
11471 int datasize
= is_q
? 128 : 64;
11472 int esize
= 32 << size
;
11473 int elements
= datasize
/ esize
;
11475 if (size
== 1 && !is_q
) {
11476 unallocated_encoding(s
);
11480 switch (fpopcode
) {
11481 case 0x58: /* FMAXNMP */
11482 case 0x5a: /* FADDP */
11483 case 0x5e: /* FMAXP */
11484 case 0x78: /* FMINNMP */
11485 case 0x7e: /* FMINP */
11486 if (size
&& !is_q
) {
11487 unallocated_encoding(s
);
11490 handle_simd_3same_pair(s
, is_q
, 0, fpopcode
, size
? MO_64
: MO_32
,
11493 case 0x1b: /* FMULX */
11494 case 0x1f: /* FRECPS */
11495 case 0x3f: /* FRSQRTS */
11496 case 0x5d: /* FACGE */
11497 case 0x7d: /* FACGT */
11498 case 0x19: /* FMLA */
11499 case 0x39: /* FMLS */
11500 case 0x18: /* FMAXNM */
11501 case 0x1a: /* FADD */
11502 case 0x1c: /* FCMEQ */
11503 case 0x1e: /* FMAX */
11504 case 0x38: /* FMINNM */
11505 case 0x3a: /* FSUB */
11506 case 0x3e: /* FMIN */
11507 case 0x5b: /* FMUL */
11508 case 0x5c: /* FCMGE */
11509 case 0x5f: /* FDIV */
11510 case 0x7a: /* FABD */
11511 case 0x7c: /* FCMGT */
11512 if (!fp_access_check(s
)) {
11515 handle_3same_float(s
, size
, elements
, fpopcode
, rd
, rn
, rm
);
11518 case 0x1d: /* FMLAL */
11519 case 0x3d: /* FMLSL */
11520 case 0x59: /* FMLAL2 */
11521 case 0x79: /* FMLSL2 */
11522 if (size
& 1 || !dc_isar_feature(aa64_fhm
, s
)) {
11523 unallocated_encoding(s
);
11526 if (fp_access_check(s
)) {
11527 int is_s
= extract32(insn
, 23, 1);
11528 int is_2
= extract32(insn
, 29, 1);
11529 int data
= (is_2
<< 1) | is_s
;
11530 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
11531 vec_full_reg_offset(s
, rn
),
11532 vec_full_reg_offset(s
, rm
), cpu_env
,
11533 is_q
? 16 : 8, vec_full_reg_size(s
),
11534 data
, gen_helper_gvec_fmlal_a64
);
11539 unallocated_encoding(s
);
11544 /* Integer op subgroup of C3.6.16. */
11545 static void disas_simd_3same_int(DisasContext
*s
, uint32_t insn
)
11547 int is_q
= extract32(insn
, 30, 1);
11548 int u
= extract32(insn
, 29, 1);
11549 int size
= extract32(insn
, 22, 2);
11550 int opcode
= extract32(insn
, 11, 5);
11551 int rm
= extract32(insn
, 16, 5);
11552 int rn
= extract32(insn
, 5, 5);
11553 int rd
= extract32(insn
, 0, 5);
11558 case 0x13: /* MUL, PMUL */
11559 if (u
&& size
!= 0) {
11560 unallocated_encoding(s
);
11564 case 0x0: /* SHADD, UHADD */
11565 case 0x2: /* SRHADD, URHADD */
11566 case 0x4: /* SHSUB, UHSUB */
11567 case 0xc: /* SMAX, UMAX */
11568 case 0xd: /* SMIN, UMIN */
11569 case 0xe: /* SABD, UABD */
11570 case 0xf: /* SABA, UABA */
11571 case 0x12: /* MLA, MLS */
11573 unallocated_encoding(s
);
11577 case 0x16: /* SQDMULH, SQRDMULH */
11578 if (size
== 0 || size
== 3) {
11579 unallocated_encoding(s
);
11584 if (size
== 3 && !is_q
) {
11585 unallocated_encoding(s
);
11591 if (!fp_access_check(s
)) {
11596 case 0x01: /* SQADD, UQADD */
11598 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uqadd_qc
, size
);
11600 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqadd_qc
, size
);
11603 case 0x05: /* SQSUB, UQSUB */
11605 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uqsub_qc
, size
);
11607 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqsub_qc
, size
);
11610 case 0x08: /* SSHL, USHL */
11612 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_ushl
, size
);
11614 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sshl
, size
);
11617 case 0x0c: /* SMAX, UMAX */
11619 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_umax
, size
);
11621 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_smax
, size
);
11624 case 0x0d: /* SMIN, UMIN */
11626 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_umin
, size
);
11628 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_smin
, size
);
11631 case 0xe: /* SABD, UABD */
11633 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uabd
, size
);
11635 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sabd
, size
);
11638 case 0xf: /* SABA, UABA */
11640 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_uaba
, size
);
11642 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_saba
, size
);
11645 case 0x10: /* ADD, SUB */
11647 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_sub
, size
);
11649 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_add
, size
);
11652 case 0x13: /* MUL, PMUL */
11653 if (!u
) { /* MUL */
11654 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, tcg_gen_gvec_mul
, size
);
11655 } else { /* PMUL */
11656 gen_gvec_op3_ool(s
, is_q
, rd
, rn
, rm
, 0, gen_helper_gvec_pmul_b
);
11659 case 0x12: /* MLA, MLS */
11661 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_mls
, size
);
11663 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_mla
, size
);
11666 case 0x16: /* SQDMULH, SQRDMULH */
11668 static gen_helper_gvec_3_ptr
* const fns
[2][2] = {
11669 { gen_helper_neon_sqdmulh_h
, gen_helper_neon_sqrdmulh_h
},
11670 { gen_helper_neon_sqdmulh_s
, gen_helper_neon_sqrdmulh_s
},
11672 gen_gvec_op3_qc(s
, is_q
, rd
, rn
, rm
, fns
[size
- 1][u
]);
11676 if (!u
) { /* CMTST */
11677 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_cmtst
, size
);
11681 cond
= TCG_COND_EQ
;
11683 case 0x06: /* CMGT, CMHI */
11684 cond
= u
? TCG_COND_GTU
: TCG_COND_GT
;
11686 case 0x07: /* CMGE, CMHS */
11687 cond
= u
? TCG_COND_GEU
: TCG_COND_GE
;
11689 tcg_gen_gvec_cmp(cond
, size
, vec_full_reg_offset(s
, rd
),
11690 vec_full_reg_offset(s
, rn
),
11691 vec_full_reg_offset(s
, rm
),
11692 is_q
? 16 : 8, vec_full_reg_size(s
));
11698 for (pass
= 0; pass
< 2; pass
++) {
11699 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
11700 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
11701 TCGv_i64 tcg_res
= tcg_temp_new_i64();
11703 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
11704 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
11706 handle_3same_64(s
, opcode
, u
, tcg_res
, tcg_op1
, tcg_op2
);
11708 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
11710 tcg_temp_free_i64(tcg_res
);
11711 tcg_temp_free_i64(tcg_op1
);
11712 tcg_temp_free_i64(tcg_op2
);
11715 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
11716 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11717 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11718 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11719 NeonGenTwoOpFn
*genfn
= NULL
;
11720 NeonGenTwoOpEnvFn
*genenvfn
= NULL
;
11722 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
11723 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
11726 case 0x0: /* SHADD, UHADD */
11728 static NeonGenTwoOpFn
* const fns
[3][2] = {
11729 { gen_helper_neon_hadd_s8
, gen_helper_neon_hadd_u8
},
11730 { gen_helper_neon_hadd_s16
, gen_helper_neon_hadd_u16
},
11731 { gen_helper_neon_hadd_s32
, gen_helper_neon_hadd_u32
},
11733 genfn
= fns
[size
][u
];
11736 case 0x2: /* SRHADD, URHADD */
11738 static NeonGenTwoOpFn
* const fns
[3][2] = {
11739 { gen_helper_neon_rhadd_s8
, gen_helper_neon_rhadd_u8
},
11740 { gen_helper_neon_rhadd_s16
, gen_helper_neon_rhadd_u16
},
11741 { gen_helper_neon_rhadd_s32
, gen_helper_neon_rhadd_u32
},
11743 genfn
= fns
[size
][u
];
11746 case 0x4: /* SHSUB, UHSUB */
11748 static NeonGenTwoOpFn
* const fns
[3][2] = {
11749 { gen_helper_neon_hsub_s8
, gen_helper_neon_hsub_u8
},
11750 { gen_helper_neon_hsub_s16
, gen_helper_neon_hsub_u16
},
11751 { gen_helper_neon_hsub_s32
, gen_helper_neon_hsub_u32
},
11753 genfn
= fns
[size
][u
];
11756 case 0x9: /* SQSHL, UQSHL */
11758 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
11759 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
11760 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
11761 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
11763 genenvfn
= fns
[size
][u
];
11766 case 0xa: /* SRSHL, URSHL */
11768 static NeonGenTwoOpFn
* const fns
[3][2] = {
11769 { gen_helper_neon_rshl_s8
, gen_helper_neon_rshl_u8
},
11770 { gen_helper_neon_rshl_s16
, gen_helper_neon_rshl_u16
},
11771 { gen_helper_neon_rshl_s32
, gen_helper_neon_rshl_u32
},
11773 genfn
= fns
[size
][u
];
11776 case 0xb: /* SQRSHL, UQRSHL */
11778 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
11779 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
11780 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
11781 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
11783 genenvfn
= fns
[size
][u
];
11787 g_assert_not_reached();
11791 genenvfn(tcg_res
, cpu_env
, tcg_op1
, tcg_op2
);
11793 genfn(tcg_res
, tcg_op1
, tcg_op2
);
11796 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
11798 tcg_temp_free_i32(tcg_res
);
11799 tcg_temp_free_i32(tcg_op1
);
11800 tcg_temp_free_i32(tcg_op2
);
11803 clear_vec_high(s
, is_q
, rd
);
11806 /* AdvSIMD three same
11807 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
11808 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11809 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
11810 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11812 static void disas_simd_three_reg_same(DisasContext
*s
, uint32_t insn
)
11814 int opcode
= extract32(insn
, 11, 5);
11817 case 0x3: /* logic ops */
11818 disas_simd_3same_logic(s
, insn
);
11820 case 0x17: /* ADDP */
11821 case 0x14: /* SMAXP, UMAXP */
11822 case 0x15: /* SMINP, UMINP */
11824 /* Pairwise operations */
11825 int is_q
= extract32(insn
, 30, 1);
11826 int u
= extract32(insn
, 29, 1);
11827 int size
= extract32(insn
, 22, 2);
11828 int rm
= extract32(insn
, 16, 5);
11829 int rn
= extract32(insn
, 5, 5);
11830 int rd
= extract32(insn
, 0, 5);
11831 if (opcode
== 0x17) {
11832 if (u
|| (size
== 3 && !is_q
)) {
11833 unallocated_encoding(s
);
11838 unallocated_encoding(s
);
11842 handle_simd_3same_pair(s
, is_q
, u
, opcode
, size
, rn
, rm
, rd
);
11845 case 0x18 ... 0x31:
11846 /* floating point ops, sz[1] and U are part of opcode */
11847 disas_simd_3same_float(s
, insn
);
11850 disas_simd_3same_int(s
, insn
);
11856 * Advanced SIMD three same (ARMv8.2 FP16 variants)
11858 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
11859 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11860 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
11861 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11863 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
11864 * (register), FACGE, FABD, FCMGT (register) and FACGT.
11867 static void disas_simd_three_reg_same_fp16(DisasContext
*s
, uint32_t insn
)
11869 int opcode
= extract32(insn
, 11, 3);
11870 int u
= extract32(insn
, 29, 1);
11871 int a
= extract32(insn
, 23, 1);
11872 int is_q
= extract32(insn
, 30, 1);
11873 int rm
= extract32(insn
, 16, 5);
11874 int rn
= extract32(insn
, 5, 5);
11875 int rd
= extract32(insn
, 0, 5);
11877 * For these floating point ops, the U, a and opcode bits
11878 * together indicate the operation.
11880 int fpopcode
= opcode
| (a
<< 3) | (u
<< 4);
11881 int datasize
= is_q
? 128 : 64;
11882 int elements
= datasize
/ 16;
11887 switch (fpopcode
) {
11888 case 0x0: /* FMAXNM */
11889 case 0x1: /* FMLA */
11890 case 0x2: /* FADD */
11891 case 0x3: /* FMULX */
11892 case 0x4: /* FCMEQ */
11893 case 0x6: /* FMAX */
11894 case 0x7: /* FRECPS */
11895 case 0x8: /* FMINNM */
11896 case 0x9: /* FMLS */
11897 case 0xa: /* FSUB */
11898 case 0xe: /* FMIN */
11899 case 0xf: /* FRSQRTS */
11900 case 0x13: /* FMUL */
11901 case 0x14: /* FCMGE */
11902 case 0x15: /* FACGE */
11903 case 0x17: /* FDIV */
11904 case 0x1a: /* FABD */
11905 case 0x1c: /* FCMGT */
11906 case 0x1d: /* FACGT */
11909 case 0x10: /* FMAXNMP */
11910 case 0x12: /* FADDP */
11911 case 0x16: /* FMAXP */
11912 case 0x18: /* FMINNMP */
11913 case 0x1e: /* FMINP */
11917 unallocated_encoding(s
);
11921 if (!dc_isar_feature(aa64_fp16
, s
)) {
11922 unallocated_encoding(s
);
11926 if (!fp_access_check(s
)) {
11930 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
11933 int maxpass
= is_q
? 8 : 4;
11934 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11935 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11936 TCGv_i32 tcg_res
[8];
11938 for (pass
= 0; pass
< maxpass
; pass
++) {
11939 int passreg
= pass
< (maxpass
/ 2) ? rn
: rm
;
11940 int passelt
= (pass
<< 1) & (maxpass
- 1);
11942 read_vec_element_i32(s
, tcg_op1
, passreg
, passelt
, MO_16
);
11943 read_vec_element_i32(s
, tcg_op2
, passreg
, passelt
+ 1, MO_16
);
11944 tcg_res
[pass
] = tcg_temp_new_i32();
11946 switch (fpopcode
) {
11947 case 0x10: /* FMAXNMP */
11948 gen_helper_advsimd_maxnumh(tcg_res
[pass
], tcg_op1
, tcg_op2
,
11951 case 0x12: /* FADDP */
11952 gen_helper_advsimd_addh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11954 case 0x16: /* FMAXP */
11955 gen_helper_advsimd_maxh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11957 case 0x18: /* FMINNMP */
11958 gen_helper_advsimd_minnumh(tcg_res
[pass
], tcg_op1
, tcg_op2
,
11961 case 0x1e: /* FMINP */
11962 gen_helper_advsimd_minh(tcg_res
[pass
], tcg_op1
, tcg_op2
, fpst
);
11965 g_assert_not_reached();
11969 for (pass
= 0; pass
< maxpass
; pass
++) {
11970 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_16
);
11971 tcg_temp_free_i32(tcg_res
[pass
]);
11974 tcg_temp_free_i32(tcg_op1
);
11975 tcg_temp_free_i32(tcg_op2
);
11978 for (pass
= 0; pass
< elements
; pass
++) {
11979 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
11980 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
11981 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11983 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_16
);
11984 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_16
);
11986 switch (fpopcode
) {
11987 case 0x0: /* FMAXNM */
11988 gen_helper_advsimd_maxnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11990 case 0x1: /* FMLA */
11991 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11992 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_res
,
11995 case 0x2: /* FADD */
11996 gen_helper_advsimd_addh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
11998 case 0x3: /* FMULX */
11999 gen_helper_advsimd_mulxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12001 case 0x4: /* FCMEQ */
12002 gen_helper_advsimd_ceq_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12004 case 0x6: /* FMAX */
12005 gen_helper_advsimd_maxh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12007 case 0x7: /* FRECPS */
12008 gen_helper_recpsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12010 case 0x8: /* FMINNM */
12011 gen_helper_advsimd_minnumh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12013 case 0x9: /* FMLS */
12014 /* As usual for ARM, separate negation for fused multiply-add */
12015 tcg_gen_xori_i32(tcg_op1
, tcg_op1
, 0x8000);
12016 read_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
12017 gen_helper_advsimd_muladdh(tcg_res
, tcg_op1
, tcg_op2
, tcg_res
,
12020 case 0xa: /* FSUB */
12021 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12023 case 0xe: /* FMIN */
12024 gen_helper_advsimd_minh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12026 case 0xf: /* FRSQRTS */
12027 gen_helper_rsqrtsf_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12029 case 0x13: /* FMUL */
12030 gen_helper_advsimd_mulh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12032 case 0x14: /* FCMGE */
12033 gen_helper_advsimd_cge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12035 case 0x15: /* FACGE */
12036 gen_helper_advsimd_acge_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12038 case 0x17: /* FDIV */
12039 gen_helper_advsimd_divh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12041 case 0x1a: /* FABD */
12042 gen_helper_advsimd_subh(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12043 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0x7fff);
12045 case 0x1c: /* FCMGT */
12046 gen_helper_advsimd_cgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12048 case 0x1d: /* FACGT */
12049 gen_helper_advsimd_acgt_f16(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
12052 g_assert_not_reached();
12055 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
12056 tcg_temp_free_i32(tcg_res
);
12057 tcg_temp_free_i32(tcg_op1
);
12058 tcg_temp_free_i32(tcg_op2
);
12062 tcg_temp_free_ptr(fpst
);
12064 clear_vec_high(s
, is_q
, rd
);
12067 /* AdvSIMD three same extra
12068 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
12069 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
12070 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
12071 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
12073 static void disas_simd_three_reg_same_extra(DisasContext
*s
, uint32_t insn
)
12075 int rd
= extract32(insn
, 0, 5);
12076 int rn
= extract32(insn
, 5, 5);
12077 int opcode
= extract32(insn
, 11, 4);
12078 int rm
= extract32(insn
, 16, 5);
12079 int size
= extract32(insn
, 22, 2);
12080 bool u
= extract32(insn
, 29, 1);
12081 bool is_q
= extract32(insn
, 30, 1);
12085 switch (u
* 16 + opcode
) {
12086 case 0x10: /* SQRDMLAH (vector) */
12087 case 0x11: /* SQRDMLSH (vector) */
12088 if (size
!= 1 && size
!= 2) {
12089 unallocated_encoding(s
);
12092 feature
= dc_isar_feature(aa64_rdm
, s
);
12094 case 0x02: /* SDOT (vector) */
12095 case 0x12: /* UDOT (vector) */
12096 if (size
!= MO_32
) {
12097 unallocated_encoding(s
);
12100 feature
= dc_isar_feature(aa64_dp
, s
);
12102 case 0x03: /* USDOT */
12103 if (size
!= MO_32
) {
12104 unallocated_encoding(s
);
12107 feature
= dc_isar_feature(aa64_i8mm
, s
);
12109 case 0x04: /* SMMLA */
12110 case 0x14: /* UMMLA */
12111 case 0x05: /* USMMLA */
12112 if (!is_q
|| size
!= MO_32
) {
12113 unallocated_encoding(s
);
12116 feature
= dc_isar_feature(aa64_i8mm
, s
);
12118 case 0x18: /* FCMLA, #0 */
12119 case 0x19: /* FCMLA, #90 */
12120 case 0x1a: /* FCMLA, #180 */
12121 case 0x1b: /* FCMLA, #270 */
12122 case 0x1c: /* FCADD, #90 */
12123 case 0x1e: /* FCADD, #270 */
12125 || (size
== 1 && !dc_isar_feature(aa64_fp16
, s
))
12126 || (size
== 3 && !is_q
)) {
12127 unallocated_encoding(s
);
12130 feature
= dc_isar_feature(aa64_fcma
, s
);
12132 case 0x1d: /* BFMMLA */
12133 if (size
!= MO_16
|| !is_q
) {
12134 unallocated_encoding(s
);
12137 feature
= dc_isar_feature(aa64_bf16
, s
);
12141 case 1: /* BFDOT */
12142 case 3: /* BFMLAL{B,T} */
12143 feature
= dc_isar_feature(aa64_bf16
, s
);
12146 unallocated_encoding(s
);
12151 unallocated_encoding(s
);
12155 unallocated_encoding(s
);
12158 if (!fp_access_check(s
)) {
12163 case 0x0: /* SQRDMLAH (vector) */
12164 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqrdmlah_qc
, size
);
12167 case 0x1: /* SQRDMLSH (vector) */
12168 gen_gvec_fn3(s
, is_q
, rd
, rn
, rm
, gen_gvec_sqrdmlsh_qc
, size
);
12171 case 0x2: /* SDOT / UDOT */
12172 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0,
12173 u
? gen_helper_gvec_udot_b
: gen_helper_gvec_sdot_b
);
12176 case 0x3: /* USDOT */
12177 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_usdot_b
);
12180 case 0x04: /* SMMLA, UMMLA */
12181 gen_gvec_op4_ool(s
, 1, rd
, rn
, rm
, rd
, 0,
12182 u
? gen_helper_gvec_ummla_b
12183 : gen_helper_gvec_smmla_b
);
12185 case 0x05: /* USMMLA */
12186 gen_gvec_op4_ool(s
, 1, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_usmmla_b
);
12189 case 0x8: /* FCMLA, #0 */
12190 case 0x9: /* FCMLA, #90 */
12191 case 0xa: /* FCMLA, #180 */
12192 case 0xb: /* FCMLA, #270 */
12193 rot
= extract32(opcode
, 0, 2);
12196 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, true, rot
,
12197 gen_helper_gvec_fcmlah
);
12200 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, false, rot
,
12201 gen_helper_gvec_fcmlas
);
12204 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, false, rot
,
12205 gen_helper_gvec_fcmlad
);
12208 g_assert_not_reached();
12212 case 0xc: /* FCADD, #90 */
12213 case 0xe: /* FCADD, #270 */
12214 rot
= extract32(opcode
, 1, 1);
12217 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
12218 gen_helper_gvec_fcaddh
);
12221 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
12222 gen_helper_gvec_fcadds
);
12225 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
12226 gen_helper_gvec_fcaddd
);
12229 g_assert_not_reached();
12233 case 0xd: /* BFMMLA */
12234 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_bfmmla
);
12238 case 1: /* BFDOT */
12239 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_bfdot
);
12241 case 3: /* BFMLAL{B,T} */
12242 gen_gvec_op4_fpst(s
, 1, rd
, rn
, rm
, rd
, false, is_q
,
12243 gen_helper_gvec_bfmlal
);
12246 g_assert_not_reached();
12251 g_assert_not_reached();
12255 static void handle_2misc_widening(DisasContext
*s
, int opcode
, bool is_q
,
12256 int size
, int rn
, int rd
)
12258 /* Handle 2-reg-misc ops which are widening (so each size element
12259 * in the source becomes a 2*size element in the destination.
12260 * The only instruction like this is FCVTL.
12265 /* 32 -> 64 bit fp conversion */
12266 TCGv_i64 tcg_res
[2];
12267 int srcelt
= is_q
? 2 : 0;
12269 for (pass
= 0; pass
< 2; pass
++) {
12270 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12271 tcg_res
[pass
] = tcg_temp_new_i64();
12273 read_vec_element_i32(s
, tcg_op
, rn
, srcelt
+ pass
, MO_32
);
12274 gen_helper_vfp_fcvtds(tcg_res
[pass
], tcg_op
, cpu_env
);
12275 tcg_temp_free_i32(tcg_op
);
12277 for (pass
= 0; pass
< 2; pass
++) {
12278 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
12279 tcg_temp_free_i64(tcg_res
[pass
]);
12282 /* 16 -> 32 bit fp conversion */
12283 int srcelt
= is_q
? 4 : 0;
12284 TCGv_i32 tcg_res
[4];
12285 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
12286 TCGv_i32 ahp
= get_ahp_flag();
12288 for (pass
= 0; pass
< 4; pass
++) {
12289 tcg_res
[pass
] = tcg_temp_new_i32();
12291 read_vec_element_i32(s
, tcg_res
[pass
], rn
, srcelt
+ pass
, MO_16
);
12292 gen_helper_vfp_fcvt_f16_to_f32(tcg_res
[pass
], tcg_res
[pass
],
12295 for (pass
= 0; pass
< 4; pass
++) {
12296 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
12297 tcg_temp_free_i32(tcg_res
[pass
]);
12300 tcg_temp_free_ptr(fpst
);
12301 tcg_temp_free_i32(ahp
);
12305 static void handle_rev(DisasContext
*s
, int opcode
, bool u
,
12306 bool is_q
, int size
, int rn
, int rd
)
12308 int op
= (opcode
<< 1) | u
;
12309 int opsz
= op
+ size
;
12310 int grp_size
= 3 - opsz
;
12311 int dsize
= is_q
? 128 : 64;
12315 unallocated_encoding(s
);
12319 if (!fp_access_check(s
)) {
12324 /* Special case bytes, use bswap op on each group of elements */
12325 int groups
= dsize
/ (8 << grp_size
);
12327 for (i
= 0; i
< groups
; i
++) {
12328 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
12330 read_vec_element(s
, tcg_tmp
, rn
, i
, grp_size
);
12331 switch (grp_size
) {
12333 tcg_gen_bswap16_i64(tcg_tmp
, tcg_tmp
, TCG_BSWAP_IZ
);
12336 tcg_gen_bswap32_i64(tcg_tmp
, tcg_tmp
, TCG_BSWAP_IZ
);
12339 tcg_gen_bswap64_i64(tcg_tmp
, tcg_tmp
);
12342 g_assert_not_reached();
12344 write_vec_element(s
, tcg_tmp
, rd
, i
, grp_size
);
12345 tcg_temp_free_i64(tcg_tmp
);
12347 clear_vec_high(s
, is_q
, rd
);
12349 int revmask
= (1 << grp_size
) - 1;
12350 int esize
= 8 << size
;
12351 int elements
= dsize
/ esize
;
12352 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
12353 TCGv_i64 tcg_rd
= tcg_const_i64(0);
12354 TCGv_i64 tcg_rd_hi
= tcg_const_i64(0);
12356 for (i
= 0; i
< elements
; i
++) {
12357 int e_rev
= (i
& 0xf) ^ revmask
;
12358 int off
= e_rev
* esize
;
12359 read_vec_element(s
, tcg_rn
, rn
, i
, size
);
12361 tcg_gen_deposit_i64(tcg_rd_hi
, tcg_rd_hi
,
12362 tcg_rn
, off
- 64, esize
);
12364 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, off
, esize
);
12367 write_vec_element(s
, tcg_rd
, rd
, 0, MO_64
);
12368 write_vec_element(s
, tcg_rd_hi
, rd
, 1, MO_64
);
12370 tcg_temp_free_i64(tcg_rd_hi
);
12371 tcg_temp_free_i64(tcg_rd
);
12372 tcg_temp_free_i64(tcg_rn
);
12376 static void handle_2misc_pairwise(DisasContext
*s
, int opcode
, bool u
,
12377 bool is_q
, int size
, int rn
, int rd
)
12379 /* Implement the pairwise operations from 2-misc:
12380 * SADDLP, UADDLP, SADALP, UADALP.
12381 * These all add pairs of elements in the input to produce a
12382 * double-width result element in the output (possibly accumulating).
12384 bool accum
= (opcode
== 0x6);
12385 int maxpass
= is_q
? 2 : 1;
12387 TCGv_i64 tcg_res
[2];
12390 /* 32 + 32 -> 64 op */
12391 MemOp memop
= size
+ (u
? 0 : MO_SIGN
);
12393 for (pass
= 0; pass
< maxpass
; pass
++) {
12394 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
12395 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
12397 tcg_res
[pass
] = tcg_temp_new_i64();
12399 read_vec_element(s
, tcg_op1
, rn
, pass
* 2, memop
);
12400 read_vec_element(s
, tcg_op2
, rn
, pass
* 2 + 1, memop
);
12401 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
12403 read_vec_element(s
, tcg_op1
, rd
, pass
, MO_64
);
12404 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
12407 tcg_temp_free_i64(tcg_op1
);
12408 tcg_temp_free_i64(tcg_op2
);
12411 for (pass
= 0; pass
< maxpass
; pass
++) {
12412 TCGv_i64 tcg_op
= tcg_temp_new_i64();
12413 NeonGenOne64OpFn
*genfn
;
12414 static NeonGenOne64OpFn
* const fns
[2][2] = {
12415 { gen_helper_neon_addlp_s8
, gen_helper_neon_addlp_u8
},
12416 { gen_helper_neon_addlp_s16
, gen_helper_neon_addlp_u16
},
12419 genfn
= fns
[size
][u
];
12421 tcg_res
[pass
] = tcg_temp_new_i64();
12423 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
12424 genfn(tcg_res
[pass
], tcg_op
);
12427 read_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
12429 gen_helper_neon_addl_u16(tcg_res
[pass
],
12430 tcg_res
[pass
], tcg_op
);
12432 gen_helper_neon_addl_u32(tcg_res
[pass
],
12433 tcg_res
[pass
], tcg_op
);
12436 tcg_temp_free_i64(tcg_op
);
12440 tcg_res
[1] = tcg_constant_i64(0);
12442 for (pass
= 0; pass
< 2; pass
++) {
12443 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
12444 tcg_temp_free_i64(tcg_res
[pass
]);
12448 static void handle_shll(DisasContext
*s
, bool is_q
, int size
, int rn
, int rd
)
12450 /* Implement SHLL and SHLL2 */
12452 int part
= is_q
? 2 : 0;
12453 TCGv_i64 tcg_res
[2];
12455 for (pass
= 0; pass
< 2; pass
++) {
12456 static NeonGenWidenFn
* const widenfns
[3] = {
12457 gen_helper_neon_widen_u8
,
12458 gen_helper_neon_widen_u16
,
12459 tcg_gen_extu_i32_i64
,
12461 NeonGenWidenFn
*widenfn
= widenfns
[size
];
12462 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12464 read_vec_element_i32(s
, tcg_op
, rn
, part
+ pass
, MO_32
);
12465 tcg_res
[pass
] = tcg_temp_new_i64();
12466 widenfn(tcg_res
[pass
], tcg_op
);
12467 tcg_gen_shli_i64(tcg_res
[pass
], tcg_res
[pass
], 8 << size
);
12469 tcg_temp_free_i32(tcg_op
);
12472 for (pass
= 0; pass
< 2; pass
++) {
12473 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
12474 tcg_temp_free_i64(tcg_res
[pass
]);
12478 /* AdvSIMD two reg misc
12479 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
12480 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
12481 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
12482 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
12484 static void disas_simd_two_reg_misc(DisasContext
*s
, uint32_t insn
)
12486 int size
= extract32(insn
, 22, 2);
12487 int opcode
= extract32(insn
, 12, 5);
12488 bool u
= extract32(insn
, 29, 1);
12489 bool is_q
= extract32(insn
, 30, 1);
12490 int rn
= extract32(insn
, 5, 5);
12491 int rd
= extract32(insn
, 0, 5);
12492 bool need_fpstatus
= false;
12493 bool need_rmode
= false;
12495 TCGv_i32 tcg_rmode
;
12496 TCGv_ptr tcg_fpstatus
;
12499 case 0x0: /* REV64, REV32 */
12500 case 0x1: /* REV16 */
12501 handle_rev(s
, opcode
, u
, is_q
, size
, rn
, rd
);
12503 case 0x5: /* CNT, NOT, RBIT */
12504 if (u
&& size
== 0) {
12507 } else if (u
&& size
== 1) {
12510 } else if (!u
&& size
== 0) {
12514 unallocated_encoding(s
);
12516 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
12517 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
12519 unallocated_encoding(s
);
12522 if (!fp_access_check(s
)) {
12526 handle_2misc_narrow(s
, false, opcode
, u
, is_q
, size
, rn
, rd
);
12528 case 0x4: /* CLS, CLZ */
12530 unallocated_encoding(s
);
12534 case 0x2: /* SADDLP, UADDLP */
12535 case 0x6: /* SADALP, UADALP */
12537 unallocated_encoding(s
);
12540 if (!fp_access_check(s
)) {
12543 handle_2misc_pairwise(s
, opcode
, u
, is_q
, size
, rn
, rd
);
12545 case 0x13: /* SHLL, SHLL2 */
12546 if (u
== 0 || size
== 3) {
12547 unallocated_encoding(s
);
12550 if (!fp_access_check(s
)) {
12553 handle_shll(s
, is_q
, size
, rn
, rd
);
12555 case 0xa: /* CMLT */
12557 unallocated_encoding(s
);
12561 case 0x8: /* CMGT, CMGE */
12562 case 0x9: /* CMEQ, CMLE */
12563 case 0xb: /* ABS, NEG */
12564 if (size
== 3 && !is_q
) {
12565 unallocated_encoding(s
);
12569 case 0x3: /* SUQADD, USQADD */
12570 if (size
== 3 && !is_q
) {
12571 unallocated_encoding(s
);
12574 if (!fp_access_check(s
)) {
12577 handle_2misc_satacc(s
, false, u
, is_q
, size
, rn
, rd
);
12579 case 0x7: /* SQABS, SQNEG */
12580 if (size
== 3 && !is_q
) {
12581 unallocated_encoding(s
);
12586 case 0x16 ... 0x1f:
12588 /* Floating point: U, size[1] and opcode indicate operation;
12589 * size[0] indicates single or double precision.
12591 int is_double
= extract32(size
, 0, 1);
12592 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
12593 size
= is_double
? 3 : 2;
12595 case 0x2f: /* FABS */
12596 case 0x6f: /* FNEG */
12597 if (size
== 3 && !is_q
) {
12598 unallocated_encoding(s
);
12602 case 0x1d: /* SCVTF */
12603 case 0x5d: /* UCVTF */
12605 bool is_signed
= (opcode
== 0x1d) ? true : false;
12606 int elements
= is_double
? 2 : is_q
? 4 : 2;
12607 if (is_double
&& !is_q
) {
12608 unallocated_encoding(s
);
12611 if (!fp_access_check(s
)) {
12614 handle_simd_intfp_conv(s
, rd
, rn
, elements
, is_signed
, 0, size
);
12617 case 0x2c: /* FCMGT (zero) */
12618 case 0x2d: /* FCMEQ (zero) */
12619 case 0x2e: /* FCMLT (zero) */
12620 case 0x6c: /* FCMGE (zero) */
12621 case 0x6d: /* FCMLE (zero) */
12622 if (size
== 3 && !is_q
) {
12623 unallocated_encoding(s
);
12626 handle_2misc_fcmp_zero(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
12628 case 0x7f: /* FSQRT */
12629 if (size
== 3 && !is_q
) {
12630 unallocated_encoding(s
);
12634 case 0x1a: /* FCVTNS */
12635 case 0x1b: /* FCVTMS */
12636 case 0x3a: /* FCVTPS */
12637 case 0x3b: /* FCVTZS */
12638 case 0x5a: /* FCVTNU */
12639 case 0x5b: /* FCVTMU */
12640 case 0x7a: /* FCVTPU */
12641 case 0x7b: /* FCVTZU */
12642 need_fpstatus
= true;
12644 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
12645 if (size
== 3 && !is_q
) {
12646 unallocated_encoding(s
);
12650 case 0x5c: /* FCVTAU */
12651 case 0x1c: /* FCVTAS */
12652 need_fpstatus
= true;
12654 rmode
= FPROUNDING_TIEAWAY
;
12655 if (size
== 3 && !is_q
) {
12656 unallocated_encoding(s
);
12660 case 0x3c: /* URECPE */
12662 unallocated_encoding(s
);
12666 case 0x3d: /* FRECPE */
12667 case 0x7d: /* FRSQRTE */
12668 if (size
== 3 && !is_q
) {
12669 unallocated_encoding(s
);
12672 if (!fp_access_check(s
)) {
12675 handle_2misc_reciprocal(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
12677 case 0x56: /* FCVTXN, FCVTXN2 */
12679 unallocated_encoding(s
);
12683 case 0x16: /* FCVTN, FCVTN2 */
12684 /* handle_2misc_narrow does a 2*size -> size operation, but these
12685 * instructions encode the source size rather than dest size.
12687 if (!fp_access_check(s
)) {
12690 handle_2misc_narrow(s
, false, opcode
, 0, is_q
, size
- 1, rn
, rd
);
12692 case 0x36: /* BFCVTN, BFCVTN2 */
12693 if (!dc_isar_feature(aa64_bf16
, s
) || size
!= 2) {
12694 unallocated_encoding(s
);
12697 if (!fp_access_check(s
)) {
12700 handle_2misc_narrow(s
, false, opcode
, 0, is_q
, size
- 1, rn
, rd
);
12702 case 0x17: /* FCVTL, FCVTL2 */
12703 if (!fp_access_check(s
)) {
12706 handle_2misc_widening(s
, opcode
, is_q
, size
, rn
, rd
);
12708 case 0x18: /* FRINTN */
12709 case 0x19: /* FRINTM */
12710 case 0x38: /* FRINTP */
12711 case 0x39: /* FRINTZ */
12713 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
12715 case 0x59: /* FRINTX */
12716 case 0x79: /* FRINTI */
12717 need_fpstatus
= true;
12718 if (size
== 3 && !is_q
) {
12719 unallocated_encoding(s
);
12723 case 0x58: /* FRINTA */
12725 rmode
= FPROUNDING_TIEAWAY
;
12726 need_fpstatus
= true;
12727 if (size
== 3 && !is_q
) {
12728 unallocated_encoding(s
);
12732 case 0x7c: /* URSQRTE */
12734 unallocated_encoding(s
);
12738 case 0x1e: /* FRINT32Z */
12739 case 0x1f: /* FRINT64Z */
12741 rmode
= FPROUNDING_ZERO
;
12743 case 0x5e: /* FRINT32X */
12744 case 0x5f: /* FRINT64X */
12745 need_fpstatus
= true;
12746 if ((size
== 3 && !is_q
) || !dc_isar_feature(aa64_frint
, s
)) {
12747 unallocated_encoding(s
);
12752 unallocated_encoding(s
);
12758 unallocated_encoding(s
);
12762 if (!fp_access_check(s
)) {
12766 if (need_fpstatus
|| need_rmode
) {
12767 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR
);
12769 tcg_fpstatus
= NULL
;
12772 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
12773 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
12780 if (u
&& size
== 0) { /* NOT */
12781 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_not
, 0);
12785 case 0x8: /* CMGT, CMGE */
12787 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cge0
, size
);
12789 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cgt0
, size
);
12792 case 0x9: /* CMEQ, CMLE */
12794 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cle0
, size
);
12796 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_ceq0
, size
);
12799 case 0xa: /* CMLT */
12800 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_clt0
, size
);
12803 if (u
) { /* ABS, NEG */
12804 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_neg
, size
);
12806 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_abs
, size
);
12812 /* All 64-bit element operations can be shared with scalar 2misc */
12815 /* Coverity claims (size == 3 && !is_q) has been eliminated
12816 * from all paths leading to here.
12818 tcg_debug_assert(is_q
);
12819 for (pass
= 0; pass
< 2; pass
++) {
12820 TCGv_i64 tcg_op
= tcg_temp_new_i64();
12821 TCGv_i64 tcg_res
= tcg_temp_new_i64();
12823 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
12825 handle_2misc_64(s
, opcode
, u
, tcg_res
, tcg_op
,
12826 tcg_rmode
, tcg_fpstatus
);
12828 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
12830 tcg_temp_free_i64(tcg_res
);
12831 tcg_temp_free_i64(tcg_op
);
12836 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
12837 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12838 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12840 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
12843 /* Special cases for 32 bit elements */
12845 case 0x4: /* CLS */
12847 tcg_gen_clzi_i32(tcg_res
, tcg_op
, 32);
12849 tcg_gen_clrsb_i32(tcg_res
, tcg_op
);
12852 case 0x7: /* SQABS, SQNEG */
12854 gen_helper_neon_qneg_s32(tcg_res
, cpu_env
, tcg_op
);
12856 gen_helper_neon_qabs_s32(tcg_res
, cpu_env
, tcg_op
);
12859 case 0x2f: /* FABS */
12860 gen_helper_vfp_abss(tcg_res
, tcg_op
);
12862 case 0x6f: /* FNEG */
12863 gen_helper_vfp_negs(tcg_res
, tcg_op
);
12865 case 0x7f: /* FSQRT */
12866 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, cpu_env
);
12868 case 0x1a: /* FCVTNS */
12869 case 0x1b: /* FCVTMS */
12870 case 0x1c: /* FCVTAS */
12871 case 0x3a: /* FCVTPS */
12872 case 0x3b: /* FCVTZS */
12873 gen_helper_vfp_tosls(tcg_res
, tcg_op
,
12874 tcg_constant_i32(0), tcg_fpstatus
);
12876 case 0x5a: /* FCVTNU */
12877 case 0x5b: /* FCVTMU */
12878 case 0x5c: /* FCVTAU */
12879 case 0x7a: /* FCVTPU */
12880 case 0x7b: /* FCVTZU */
12881 gen_helper_vfp_touls(tcg_res
, tcg_op
,
12882 tcg_constant_i32(0), tcg_fpstatus
);
12884 case 0x18: /* FRINTN */
12885 case 0x19: /* FRINTM */
12886 case 0x38: /* FRINTP */
12887 case 0x39: /* FRINTZ */
12888 case 0x58: /* FRINTA */
12889 case 0x79: /* FRINTI */
12890 gen_helper_rints(tcg_res
, tcg_op
, tcg_fpstatus
);
12892 case 0x59: /* FRINTX */
12893 gen_helper_rints_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
12895 case 0x7c: /* URSQRTE */
12896 gen_helper_rsqrte_u32(tcg_res
, tcg_op
);
12898 case 0x1e: /* FRINT32Z */
12899 case 0x5e: /* FRINT32X */
12900 gen_helper_frint32_s(tcg_res
, tcg_op
, tcg_fpstatus
);
12902 case 0x1f: /* FRINT64Z */
12903 case 0x5f: /* FRINT64X */
12904 gen_helper_frint64_s(tcg_res
, tcg_op
, tcg_fpstatus
);
12907 g_assert_not_reached();
12910 /* Use helpers for 8 and 16 bit elements */
12912 case 0x5: /* CNT, RBIT */
12913 /* For these two insns size is part of the opcode specifier
12914 * (handled earlier); they always operate on byte elements.
12917 gen_helper_neon_rbit_u8(tcg_res
, tcg_op
);
12919 gen_helper_neon_cnt_u8(tcg_res
, tcg_op
);
12922 case 0x7: /* SQABS, SQNEG */
12924 NeonGenOneOpEnvFn
*genfn
;
12925 static NeonGenOneOpEnvFn
* const fns
[2][2] = {
12926 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
12927 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
12929 genfn
= fns
[size
][u
];
12930 genfn(tcg_res
, cpu_env
, tcg_op
);
12933 case 0x4: /* CLS, CLZ */
12936 gen_helper_neon_clz_u8(tcg_res
, tcg_op
);
12938 gen_helper_neon_clz_u16(tcg_res
, tcg_op
);
12942 gen_helper_neon_cls_s8(tcg_res
, tcg_op
);
12944 gen_helper_neon_cls_s16(tcg_res
, tcg_op
);
12949 g_assert_not_reached();
12953 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
12955 tcg_temp_free_i32(tcg_res
);
12956 tcg_temp_free_i32(tcg_op
);
12959 clear_vec_high(s
, is_q
, rd
);
12962 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
12963 tcg_temp_free_i32(tcg_rmode
);
12965 if (need_fpstatus
) {
12966 tcg_temp_free_ptr(tcg_fpstatus
);
12970 /* AdvSIMD [scalar] two register miscellaneous (FP16)
12972 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
12973 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12974 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
12975 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12976 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
12977 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
12979 * This actually covers two groups where scalar access is governed by
12980 * bit 28. A bunch of the instructions (float to integral) only exist
12981 * in the vector form and are un-allocated for the scalar decode. Also
12982 * in the scalar decode Q is always 1.
12984 static void disas_simd_two_reg_misc_fp16(DisasContext
*s
, uint32_t insn
)
12986 int fpop
, opcode
, a
, u
;
12990 bool only_in_vector
= false;
12993 TCGv_i32 tcg_rmode
= NULL
;
12994 TCGv_ptr tcg_fpstatus
= NULL
;
12995 bool need_rmode
= false;
12996 bool need_fpst
= true;
12999 if (!dc_isar_feature(aa64_fp16
, s
)) {
13000 unallocated_encoding(s
);
13004 rd
= extract32(insn
, 0, 5);
13005 rn
= extract32(insn
, 5, 5);
13007 a
= extract32(insn
, 23, 1);
13008 u
= extract32(insn
, 29, 1);
13009 is_scalar
= extract32(insn
, 28, 1);
13010 is_q
= extract32(insn
, 30, 1);
13012 opcode
= extract32(insn
, 12, 5);
13013 fpop
= deposit32(opcode
, 5, 1, a
);
13014 fpop
= deposit32(fpop
, 6, 1, u
);
13017 case 0x1d: /* SCVTF */
13018 case 0x5d: /* UCVTF */
13025 elements
= (is_q
? 8 : 4);
13028 if (!fp_access_check(s
)) {
13031 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !u
, 0, MO_16
);
13035 case 0x2c: /* FCMGT (zero) */
13036 case 0x2d: /* FCMEQ (zero) */
13037 case 0x2e: /* FCMLT (zero) */
13038 case 0x6c: /* FCMGE (zero) */
13039 case 0x6d: /* FCMLE (zero) */
13040 handle_2misc_fcmp_zero(s
, fpop
, is_scalar
, 0, is_q
, MO_16
, rn
, rd
);
13042 case 0x3d: /* FRECPE */
13043 case 0x3f: /* FRECPX */
13045 case 0x18: /* FRINTN */
13047 only_in_vector
= true;
13048 rmode
= FPROUNDING_TIEEVEN
;
13050 case 0x19: /* FRINTM */
13052 only_in_vector
= true;
13053 rmode
= FPROUNDING_NEGINF
;
13055 case 0x38: /* FRINTP */
13057 only_in_vector
= true;
13058 rmode
= FPROUNDING_POSINF
;
13060 case 0x39: /* FRINTZ */
13062 only_in_vector
= true;
13063 rmode
= FPROUNDING_ZERO
;
13065 case 0x58: /* FRINTA */
13067 only_in_vector
= true;
13068 rmode
= FPROUNDING_TIEAWAY
;
13070 case 0x59: /* FRINTX */
13071 case 0x79: /* FRINTI */
13072 only_in_vector
= true;
13073 /* current rounding mode */
13075 case 0x1a: /* FCVTNS */
13077 rmode
= FPROUNDING_TIEEVEN
;
13079 case 0x1b: /* FCVTMS */
13081 rmode
= FPROUNDING_NEGINF
;
13083 case 0x1c: /* FCVTAS */
13085 rmode
= FPROUNDING_TIEAWAY
;
13087 case 0x3a: /* FCVTPS */
13089 rmode
= FPROUNDING_POSINF
;
13091 case 0x3b: /* FCVTZS */
13093 rmode
= FPROUNDING_ZERO
;
13095 case 0x5a: /* FCVTNU */
13097 rmode
= FPROUNDING_TIEEVEN
;
13099 case 0x5b: /* FCVTMU */
13101 rmode
= FPROUNDING_NEGINF
;
13103 case 0x5c: /* FCVTAU */
13105 rmode
= FPROUNDING_TIEAWAY
;
13107 case 0x7a: /* FCVTPU */
13109 rmode
= FPROUNDING_POSINF
;
13111 case 0x7b: /* FCVTZU */
13113 rmode
= FPROUNDING_ZERO
;
13115 case 0x2f: /* FABS */
13116 case 0x6f: /* FNEG */
13119 case 0x7d: /* FRSQRTE */
13120 case 0x7f: /* FSQRT (vector) */
13123 unallocated_encoding(s
);
13128 /* Check additional constraints for the scalar encoding */
13131 unallocated_encoding(s
);
13134 /* FRINTxx is only in the vector form */
13135 if (only_in_vector
) {
13136 unallocated_encoding(s
);
13141 if (!fp_access_check(s
)) {
13145 if (need_rmode
|| need_fpst
) {
13146 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR_F16
);
13150 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
13151 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
13155 TCGv_i32 tcg_op
= read_fp_hreg(s
, rn
);
13156 TCGv_i32 tcg_res
= tcg_temp_new_i32();
13159 case 0x1a: /* FCVTNS */
13160 case 0x1b: /* FCVTMS */
13161 case 0x1c: /* FCVTAS */
13162 case 0x3a: /* FCVTPS */
13163 case 0x3b: /* FCVTZS */
13164 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
13166 case 0x3d: /* FRECPE */
13167 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
13169 case 0x3f: /* FRECPX */
13170 gen_helper_frecpx_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
13172 case 0x5a: /* FCVTNU */
13173 case 0x5b: /* FCVTMU */
13174 case 0x5c: /* FCVTAU */
13175 case 0x7a: /* FCVTPU */
13176 case 0x7b: /* FCVTZU */
13177 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
13179 case 0x6f: /* FNEG */
13180 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
13182 case 0x7d: /* FRSQRTE */
13183 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
13186 g_assert_not_reached();
13189 /* limit any sign extension going on */
13190 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0xffff);
13191 write_fp_sreg(s
, rd
, tcg_res
);
13193 tcg_temp_free_i32(tcg_res
);
13194 tcg_temp_free_i32(tcg_op
);
13196 for (pass
= 0; pass
< (is_q
? 8 : 4); pass
++) {
13197 TCGv_i32 tcg_op
= tcg_temp_new_i32();
13198 TCGv_i32 tcg_res
= tcg_temp_new_i32();
13200 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_16
);
13203 case 0x1a: /* FCVTNS */
13204 case 0x1b: /* FCVTMS */
13205 case 0x1c: /* FCVTAS */
13206 case 0x3a: /* FCVTPS */
13207 case 0x3b: /* FCVTZS */
13208 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
13210 case 0x3d: /* FRECPE */
13211 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
13213 case 0x5a: /* FCVTNU */
13214 case 0x5b: /* FCVTMU */
13215 case 0x5c: /* FCVTAU */
13216 case 0x7a: /* FCVTPU */
13217 case 0x7b: /* FCVTZU */
13218 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
13220 case 0x18: /* FRINTN */
13221 case 0x19: /* FRINTM */
13222 case 0x38: /* FRINTP */
13223 case 0x39: /* FRINTZ */
13224 case 0x58: /* FRINTA */
13225 case 0x79: /* FRINTI */
13226 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, tcg_fpstatus
);
13228 case 0x59: /* FRINTX */
13229 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
13231 case 0x2f: /* FABS */
13232 tcg_gen_andi_i32(tcg_res
, tcg_op
, 0x7fff);
13234 case 0x6f: /* FNEG */
13235 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
13237 case 0x7d: /* FRSQRTE */
13238 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
13240 case 0x7f: /* FSQRT */
13241 gen_helper_sqrt_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
13244 g_assert_not_reached();
13247 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
13249 tcg_temp_free_i32(tcg_res
);
13250 tcg_temp_free_i32(tcg_op
);
13253 clear_vec_high(s
, is_q
, rd
);
13257 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, tcg_fpstatus
);
13258 tcg_temp_free_i32(tcg_rmode
);
13261 if (tcg_fpstatus
) {
13262 tcg_temp_free_ptr(tcg_fpstatus
);
13266 /* AdvSIMD scalar x indexed element
13267 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
13268 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
13269 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
13270 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
13271 * AdvSIMD vector x indexed element
13272 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
13273 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
13274 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
13275 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
13277 static void disas_simd_indexed(DisasContext
*s
, uint32_t insn
)
13279 /* This encoding has two kinds of instruction:
13280 * normal, where we perform elt x idxelt => elt for each
13281 * element in the vector
13282 * long, where we perform elt x idxelt and generate a result of
13283 * double the width of the input element
13284 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
13286 bool is_scalar
= extract32(insn
, 28, 1);
13287 bool is_q
= extract32(insn
, 30, 1);
13288 bool u
= extract32(insn
, 29, 1);
13289 int size
= extract32(insn
, 22, 2);
13290 int l
= extract32(insn
, 21, 1);
13291 int m
= extract32(insn
, 20, 1);
13292 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
13293 int rm
= extract32(insn
, 16, 4);
13294 int opcode
= extract32(insn
, 12, 4);
13295 int h
= extract32(insn
, 11, 1);
13296 int rn
= extract32(insn
, 5, 5);
13297 int rd
= extract32(insn
, 0, 5);
13298 bool is_long
= false;
13300 bool is_fp16
= false;
13304 switch (16 * u
+ opcode
) {
13305 case 0x08: /* MUL */
13306 case 0x10: /* MLA */
13307 case 0x14: /* MLS */
13309 unallocated_encoding(s
);
13313 case 0x02: /* SMLAL, SMLAL2 */
13314 case 0x12: /* UMLAL, UMLAL2 */
13315 case 0x06: /* SMLSL, SMLSL2 */
13316 case 0x16: /* UMLSL, UMLSL2 */
13317 case 0x0a: /* SMULL, SMULL2 */
13318 case 0x1a: /* UMULL, UMULL2 */
13320 unallocated_encoding(s
);
13325 case 0x03: /* SQDMLAL, SQDMLAL2 */
13326 case 0x07: /* SQDMLSL, SQDMLSL2 */
13327 case 0x0b: /* SQDMULL, SQDMULL2 */
13330 case 0x0c: /* SQDMULH */
13331 case 0x0d: /* SQRDMULH */
13333 case 0x01: /* FMLA */
13334 case 0x05: /* FMLS */
13335 case 0x09: /* FMUL */
13336 case 0x19: /* FMULX */
13339 case 0x1d: /* SQRDMLAH */
13340 case 0x1f: /* SQRDMLSH */
13341 if (!dc_isar_feature(aa64_rdm
, s
)) {
13342 unallocated_encoding(s
);
13346 case 0x0e: /* SDOT */
13347 case 0x1e: /* UDOT */
13348 if (is_scalar
|| size
!= MO_32
|| !dc_isar_feature(aa64_dp
, s
)) {
13349 unallocated_encoding(s
);
13355 case 0: /* SUDOT */
13356 case 2: /* USDOT */
13357 if (is_scalar
|| !dc_isar_feature(aa64_i8mm
, s
)) {
13358 unallocated_encoding(s
);
13363 case 1: /* BFDOT */
13364 if (is_scalar
|| !dc_isar_feature(aa64_bf16
, s
)) {
13365 unallocated_encoding(s
);
13370 case 3: /* BFMLAL{B,T} */
13371 if (is_scalar
|| !dc_isar_feature(aa64_bf16
, s
)) {
13372 unallocated_encoding(s
);
13375 /* can't set is_fp without other incorrect size checks */
13379 unallocated_encoding(s
);
13383 case 0x11: /* FCMLA #0 */
13384 case 0x13: /* FCMLA #90 */
13385 case 0x15: /* FCMLA #180 */
13386 case 0x17: /* FCMLA #270 */
13387 if (is_scalar
|| !dc_isar_feature(aa64_fcma
, s
)) {
13388 unallocated_encoding(s
);
13393 case 0x00: /* FMLAL */
13394 case 0x04: /* FMLSL */
13395 case 0x18: /* FMLAL2 */
13396 case 0x1c: /* FMLSL2 */
13397 if (is_scalar
|| size
!= MO_32
|| !dc_isar_feature(aa64_fhm
, s
)) {
13398 unallocated_encoding(s
);
13402 /* is_fp, but we pass cpu_env not fp_status. */
13405 unallocated_encoding(s
);
13410 case 1: /* normal fp */
13411 /* convert insn encoded size to MemOp size */
13413 case 0: /* half-precision */
13417 case MO_32
: /* single precision */
13418 case MO_64
: /* double precision */
13421 unallocated_encoding(s
);
13426 case 2: /* complex fp */
13427 /* Each indexable element is a complex pair. */
13432 unallocated_encoding(s
);
13440 unallocated_encoding(s
);
13445 default: /* integer */
13449 unallocated_encoding(s
);
13454 if (is_fp16
&& !dc_isar_feature(aa64_fp16
, s
)) {
13455 unallocated_encoding(s
);
13459 /* Given MemOp size, adjust register and indexing. */
13462 index
= h
<< 2 | l
<< 1 | m
;
13465 index
= h
<< 1 | l
;
13470 unallocated_encoding(s
);
13477 g_assert_not_reached();
13480 if (!fp_access_check(s
)) {
13485 fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
13490 switch (16 * u
+ opcode
) {
13491 case 0x0e: /* SDOT */
13492 case 0x1e: /* UDOT */
13493 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
13494 u
? gen_helper_gvec_udot_idx_b
13495 : gen_helper_gvec_sdot_idx_b
);
13498 switch (extract32(insn
, 22, 2)) {
13499 case 0: /* SUDOT */
13500 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
13501 gen_helper_gvec_sudot_idx_b
);
13503 case 1: /* BFDOT */
13504 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
13505 gen_helper_gvec_bfdot_idx
);
13507 case 2: /* USDOT */
13508 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, index
,
13509 gen_helper_gvec_usdot_idx_b
);
13511 case 3: /* BFMLAL{B,T} */
13512 gen_gvec_op4_fpst(s
, 1, rd
, rn
, rm
, rd
, 0, (index
<< 1) | is_q
,
13513 gen_helper_gvec_bfmlal_idx
);
13516 g_assert_not_reached();
13517 case 0x11: /* FCMLA #0 */
13518 case 0x13: /* FCMLA #90 */
13519 case 0x15: /* FCMLA #180 */
13520 case 0x17: /* FCMLA #270 */
13522 int rot
= extract32(insn
, 13, 2);
13523 int data
= (index
<< 2) | rot
;
13524 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, rd
),
13525 vec_full_reg_offset(s
, rn
),
13526 vec_full_reg_offset(s
, rm
),
13527 vec_full_reg_offset(s
, rd
), fpst
,
13528 is_q
? 16 : 8, vec_full_reg_size(s
), data
,
13530 ? gen_helper_gvec_fcmlas_idx
13531 : gen_helper_gvec_fcmlah_idx
);
13532 tcg_temp_free_ptr(fpst
);
13536 case 0x00: /* FMLAL */
13537 case 0x04: /* FMLSL */
13538 case 0x18: /* FMLAL2 */
13539 case 0x1c: /* FMLSL2 */
13541 int is_s
= extract32(opcode
, 2, 1);
13543 int data
= (index
<< 2) | (is_2
<< 1) | is_s
;
13544 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
13545 vec_full_reg_offset(s
, rn
),
13546 vec_full_reg_offset(s
, rm
), cpu_env
,
13547 is_q
? 16 : 8, vec_full_reg_size(s
),
13548 data
, gen_helper_gvec_fmlal_idx_a64
);
13552 case 0x08: /* MUL */
13553 if (!is_long
&& !is_scalar
) {
13554 static gen_helper_gvec_3
* const fns
[3] = {
13555 gen_helper_gvec_mul_idx_h
,
13556 gen_helper_gvec_mul_idx_s
,
13557 gen_helper_gvec_mul_idx_d
,
13559 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, rd
),
13560 vec_full_reg_offset(s
, rn
),
13561 vec_full_reg_offset(s
, rm
),
13562 is_q
? 16 : 8, vec_full_reg_size(s
),
13563 index
, fns
[size
- 1]);
13568 case 0x10: /* MLA */
13569 if (!is_long
&& !is_scalar
) {
13570 static gen_helper_gvec_4
* const fns
[3] = {
13571 gen_helper_gvec_mla_idx_h
,
13572 gen_helper_gvec_mla_idx_s
,
13573 gen_helper_gvec_mla_idx_d
,
13575 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
13576 vec_full_reg_offset(s
, rn
),
13577 vec_full_reg_offset(s
, rm
),
13578 vec_full_reg_offset(s
, rd
),
13579 is_q
? 16 : 8, vec_full_reg_size(s
),
13580 index
, fns
[size
- 1]);
13585 case 0x14: /* MLS */
13586 if (!is_long
&& !is_scalar
) {
13587 static gen_helper_gvec_4
* const fns
[3] = {
13588 gen_helper_gvec_mls_idx_h
,
13589 gen_helper_gvec_mls_idx_s
,
13590 gen_helper_gvec_mls_idx_d
,
13592 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
13593 vec_full_reg_offset(s
, rn
),
13594 vec_full_reg_offset(s
, rm
),
13595 vec_full_reg_offset(s
, rd
),
13596 is_q
? 16 : 8, vec_full_reg_size(s
),
13597 index
, fns
[size
- 1]);
13604 TCGv_i64 tcg_idx
= tcg_temp_new_i64();
13607 assert(is_fp
&& is_q
&& !is_long
);
13609 read_vec_element(s
, tcg_idx
, rm
, index
, MO_64
);
13611 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
13612 TCGv_i64 tcg_op
= tcg_temp_new_i64();
13613 TCGv_i64 tcg_res
= tcg_temp_new_i64();
13615 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
13617 switch (16 * u
+ opcode
) {
13618 case 0x05: /* FMLS */
13619 /* As usual for ARM, separate negation for fused multiply-add */
13620 gen_helper_vfp_negd(tcg_op
, tcg_op
);
13622 case 0x01: /* FMLA */
13623 read_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
13624 gen_helper_vfp_muladdd(tcg_res
, tcg_op
, tcg_idx
, tcg_res
, fpst
);
13626 case 0x09: /* FMUL */
13627 gen_helper_vfp_muld(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13629 case 0x19: /* FMULX */
13630 gen_helper_vfp_mulxd(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13633 g_assert_not_reached();
13636 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
13637 tcg_temp_free_i64(tcg_op
);
13638 tcg_temp_free_i64(tcg_res
);
13641 tcg_temp_free_i64(tcg_idx
);
13642 clear_vec_high(s
, !is_scalar
, rd
);
13643 } else if (!is_long
) {
13644 /* 32 bit floating point, or 16 or 32 bit integer.
13645 * For the 16 bit scalar case we use the usual Neon helpers and
13646 * rely on the fact that 0 op 0 == 0 with no side effects.
13648 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
13649 int pass
, maxpasses
;
13654 maxpasses
= is_q
? 4 : 2;
13657 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
13659 if (size
== 1 && !is_scalar
) {
13660 /* The simplest way to handle the 16x16 indexed ops is to duplicate
13661 * the index into both halves of the 32 bit tcg_idx and then use
13662 * the usual Neon helpers.
13664 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
13667 for (pass
= 0; pass
< maxpasses
; pass
++) {
13668 TCGv_i32 tcg_op
= tcg_temp_new_i32();
13669 TCGv_i32 tcg_res
= tcg_temp_new_i32();
13671 read_vec_element_i32(s
, tcg_op
, rn
, pass
, is_scalar
? size
: MO_32
);
13673 switch (16 * u
+ opcode
) {
13674 case 0x08: /* MUL */
13675 case 0x10: /* MLA */
13676 case 0x14: /* MLS */
13678 static NeonGenTwoOpFn
* const fns
[2][2] = {
13679 { gen_helper_neon_add_u16
, gen_helper_neon_sub_u16
},
13680 { tcg_gen_add_i32
, tcg_gen_sub_i32
},
13682 NeonGenTwoOpFn
*genfn
;
13683 bool is_sub
= opcode
== 0x4;
13686 gen_helper_neon_mul_u16(tcg_res
, tcg_op
, tcg_idx
);
13688 tcg_gen_mul_i32(tcg_res
, tcg_op
, tcg_idx
);
13690 if (opcode
== 0x8) {
13693 read_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
13694 genfn
= fns
[size
- 1][is_sub
];
13695 genfn(tcg_res
, tcg_op
, tcg_res
);
13698 case 0x05: /* FMLS */
13699 case 0x01: /* FMLA */
13700 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
13701 is_scalar
? size
: MO_32
);
13704 if (opcode
== 0x5) {
13705 /* As usual for ARM, separate negation for fused
13707 tcg_gen_xori_i32(tcg_op
, tcg_op
, 0x80008000);
13710 gen_helper_advsimd_muladdh(tcg_res
, tcg_op
, tcg_idx
,
13713 gen_helper_advsimd_muladd2h(tcg_res
, tcg_op
, tcg_idx
,
13718 if (opcode
== 0x5) {
13719 /* As usual for ARM, separate negation for
13720 * fused multiply-add */
13721 tcg_gen_xori_i32(tcg_op
, tcg_op
, 0x80000000);
13723 gen_helper_vfp_muladds(tcg_res
, tcg_op
, tcg_idx
,
13727 g_assert_not_reached();
13730 case 0x09: /* FMUL */
13734 gen_helper_advsimd_mulh(tcg_res
, tcg_op
,
13737 gen_helper_advsimd_mul2h(tcg_res
, tcg_op
,
13742 gen_helper_vfp_muls(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13745 g_assert_not_reached();
13748 case 0x19: /* FMULX */
13752 gen_helper_advsimd_mulxh(tcg_res
, tcg_op
,
13755 gen_helper_advsimd_mulx2h(tcg_res
, tcg_op
,
13760 gen_helper_vfp_mulxs(tcg_res
, tcg_op
, tcg_idx
, fpst
);
13763 g_assert_not_reached();
13766 case 0x0c: /* SQDMULH */
13768 gen_helper_neon_qdmulh_s16(tcg_res
, cpu_env
,
13771 gen_helper_neon_qdmulh_s32(tcg_res
, cpu_env
,
13775 case 0x0d: /* SQRDMULH */
13777 gen_helper_neon_qrdmulh_s16(tcg_res
, cpu_env
,
13780 gen_helper_neon_qrdmulh_s32(tcg_res
, cpu_env
,
13784 case 0x1d: /* SQRDMLAH */
13785 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
13786 is_scalar
? size
: MO_32
);
13788 gen_helper_neon_qrdmlah_s16(tcg_res
, cpu_env
,
13789 tcg_op
, tcg_idx
, tcg_res
);
13791 gen_helper_neon_qrdmlah_s32(tcg_res
, cpu_env
,
13792 tcg_op
, tcg_idx
, tcg_res
);
13795 case 0x1f: /* SQRDMLSH */
13796 read_vec_element_i32(s
, tcg_res
, rd
, pass
,
13797 is_scalar
? size
: MO_32
);
13799 gen_helper_neon_qrdmlsh_s16(tcg_res
, cpu_env
,
13800 tcg_op
, tcg_idx
, tcg_res
);
13802 gen_helper_neon_qrdmlsh_s32(tcg_res
, cpu_env
,
13803 tcg_op
, tcg_idx
, tcg_res
);
13807 g_assert_not_reached();
13811 write_fp_sreg(s
, rd
, tcg_res
);
13813 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
13816 tcg_temp_free_i32(tcg_op
);
13817 tcg_temp_free_i32(tcg_res
);
13820 tcg_temp_free_i32(tcg_idx
);
13821 clear_vec_high(s
, is_q
, rd
);
13823 /* long ops: 16x16->32 or 32x32->64 */
13824 TCGv_i64 tcg_res
[2];
13826 bool satop
= extract32(opcode
, 0, 1);
13827 MemOp memop
= MO_32
;
13834 TCGv_i64 tcg_idx
= tcg_temp_new_i64();
13836 read_vec_element(s
, tcg_idx
, rm
, index
, memop
);
13838 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
13839 TCGv_i64 tcg_op
= tcg_temp_new_i64();
13840 TCGv_i64 tcg_passres
;
13846 passelt
= pass
+ (is_q
* 2);
13849 read_vec_element(s
, tcg_op
, rn
, passelt
, memop
);
13851 tcg_res
[pass
] = tcg_temp_new_i64();
13853 if (opcode
== 0xa || opcode
== 0xb) {
13854 /* Non-accumulating ops */
13855 tcg_passres
= tcg_res
[pass
];
13857 tcg_passres
= tcg_temp_new_i64();
13860 tcg_gen_mul_i64(tcg_passres
, tcg_op
, tcg_idx
);
13861 tcg_temp_free_i64(tcg_op
);
13864 /* saturating, doubling */
13865 gen_helper_neon_addl_saturate_s64(tcg_passres
, cpu_env
,
13866 tcg_passres
, tcg_passres
);
13869 if (opcode
== 0xa || opcode
== 0xb) {
13873 /* Accumulating op: handle accumulate step */
13874 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13877 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13878 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
13880 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13881 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
13883 case 0x7: /* SQDMLSL, SQDMLSL2 */
13884 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
13886 case 0x3: /* SQDMLAL, SQDMLAL2 */
13887 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], cpu_env
,
13892 g_assert_not_reached();
13894 tcg_temp_free_i64(tcg_passres
);
13896 tcg_temp_free_i64(tcg_idx
);
13898 clear_vec_high(s
, !is_scalar
, rd
);
13900 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
13903 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
13906 /* The simplest way to handle the 16x16 indexed ops is to
13907 * duplicate the index into both halves of the 32 bit tcg_idx
13908 * and then use the usual Neon helpers.
13910 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
13913 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
13914 TCGv_i32 tcg_op
= tcg_temp_new_i32();
13915 TCGv_i64 tcg_passres
;
13918 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
13920 read_vec_element_i32(s
, tcg_op
, rn
,
13921 pass
+ (is_q
* 2), MO_32
);
13924 tcg_res
[pass
] = tcg_temp_new_i64();
13926 if (opcode
== 0xa || opcode
== 0xb) {
13927 /* Non-accumulating ops */
13928 tcg_passres
= tcg_res
[pass
];
13930 tcg_passres
= tcg_temp_new_i64();
13933 if (memop
& MO_SIGN
) {
13934 gen_helper_neon_mull_s16(tcg_passres
, tcg_op
, tcg_idx
);
13936 gen_helper_neon_mull_u16(tcg_passres
, tcg_op
, tcg_idx
);
13939 gen_helper_neon_addl_saturate_s32(tcg_passres
, cpu_env
,
13940 tcg_passres
, tcg_passres
);
13942 tcg_temp_free_i32(tcg_op
);
13944 if (opcode
== 0xa || opcode
== 0xb) {
13948 /* Accumulating op: handle accumulate step */
13949 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13952 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13953 gen_helper_neon_addl_u32(tcg_res
[pass
], tcg_res
[pass
],
13956 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13957 gen_helper_neon_subl_u32(tcg_res
[pass
], tcg_res
[pass
],
13960 case 0x7: /* SQDMLSL, SQDMLSL2 */
13961 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
13963 case 0x3: /* SQDMLAL, SQDMLAL2 */
13964 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], cpu_env
,
13969 g_assert_not_reached();
13971 tcg_temp_free_i64(tcg_passres
);
13973 tcg_temp_free_i32(tcg_idx
);
13976 tcg_gen_ext32u_i64(tcg_res
[0], tcg_res
[0]);
13981 tcg_res
[1] = tcg_constant_i64(0);
13984 for (pass
= 0; pass
< 2; pass
++) {
13985 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
13986 tcg_temp_free_i64(tcg_res
[pass
]);
13991 tcg_temp_free_ptr(fpst
);
13996 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
13997 * +-----------------+------+-----------+--------+-----+------+------+
13998 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
13999 * +-----------------+------+-----------+--------+-----+------+------+
14001 static void disas_crypto_aes(DisasContext
*s
, uint32_t insn
)
14003 int size
= extract32(insn
, 22, 2);
14004 int opcode
= extract32(insn
, 12, 5);
14005 int rn
= extract32(insn
, 5, 5);
14006 int rd
= extract32(insn
, 0, 5);
14008 gen_helper_gvec_2
*genfn2
= NULL
;
14009 gen_helper_gvec_3
*genfn3
= NULL
;
14011 if (!dc_isar_feature(aa64_aes
, s
) || size
!= 0) {
14012 unallocated_encoding(s
);
14017 case 0x4: /* AESE */
14019 genfn3
= gen_helper_crypto_aese
;
14021 case 0x6: /* AESMC */
14023 genfn2
= gen_helper_crypto_aesmc
;
14025 case 0x5: /* AESD */
14027 genfn3
= gen_helper_crypto_aese
;
14029 case 0x7: /* AESIMC */
14031 genfn2
= gen_helper_crypto_aesmc
;
14034 unallocated_encoding(s
);
14038 if (!fp_access_check(s
)) {
14042 gen_gvec_op2_ool(s
, true, rd
, rn
, decrypt
, genfn2
);
14044 gen_gvec_op3_ool(s
, true, rd
, rd
, rn
, decrypt
, genfn3
);
14048 /* Crypto three-reg SHA
14049 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
14050 * +-----------------+------+---+------+---+--------+-----+------+------+
14051 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
14052 * +-----------------+------+---+------+---+--------+-----+------+------+
14054 static void disas_crypto_three_reg_sha(DisasContext
*s
, uint32_t insn
)
14056 int size
= extract32(insn
, 22, 2);
14057 int opcode
= extract32(insn
, 12, 3);
14058 int rm
= extract32(insn
, 16, 5);
14059 int rn
= extract32(insn
, 5, 5);
14060 int rd
= extract32(insn
, 0, 5);
14061 gen_helper_gvec_3
*genfn
;
14065 unallocated_encoding(s
);
14070 case 0: /* SHA1C */
14071 genfn
= gen_helper_crypto_sha1c
;
14072 feature
= dc_isar_feature(aa64_sha1
, s
);
14074 case 1: /* SHA1P */
14075 genfn
= gen_helper_crypto_sha1p
;
14076 feature
= dc_isar_feature(aa64_sha1
, s
);
14078 case 2: /* SHA1M */
14079 genfn
= gen_helper_crypto_sha1m
;
14080 feature
= dc_isar_feature(aa64_sha1
, s
);
14082 case 3: /* SHA1SU0 */
14083 genfn
= gen_helper_crypto_sha1su0
;
14084 feature
= dc_isar_feature(aa64_sha1
, s
);
14086 case 4: /* SHA256H */
14087 genfn
= gen_helper_crypto_sha256h
;
14088 feature
= dc_isar_feature(aa64_sha256
, s
);
14090 case 5: /* SHA256H2 */
14091 genfn
= gen_helper_crypto_sha256h2
;
14092 feature
= dc_isar_feature(aa64_sha256
, s
);
14094 case 6: /* SHA256SU1 */
14095 genfn
= gen_helper_crypto_sha256su1
;
14096 feature
= dc_isar_feature(aa64_sha256
, s
);
14099 unallocated_encoding(s
);
14104 unallocated_encoding(s
);
14108 if (!fp_access_check(s
)) {
14111 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, 0, genfn
);
14114 /* Crypto two-reg SHA
14115 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
14116 * +-----------------+------+-----------+--------+-----+------+------+
14117 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
14118 * +-----------------+------+-----------+--------+-----+------+------+
14120 static void disas_crypto_two_reg_sha(DisasContext
*s
, uint32_t insn
)
14122 int size
= extract32(insn
, 22, 2);
14123 int opcode
= extract32(insn
, 12, 5);
14124 int rn
= extract32(insn
, 5, 5);
14125 int rd
= extract32(insn
, 0, 5);
14126 gen_helper_gvec_2
*genfn
;
14130 unallocated_encoding(s
);
14135 case 0: /* SHA1H */
14136 feature
= dc_isar_feature(aa64_sha1
, s
);
14137 genfn
= gen_helper_crypto_sha1h
;
14139 case 1: /* SHA1SU1 */
14140 feature
= dc_isar_feature(aa64_sha1
, s
);
14141 genfn
= gen_helper_crypto_sha1su1
;
14143 case 2: /* SHA256SU0 */
14144 feature
= dc_isar_feature(aa64_sha256
, s
);
14145 genfn
= gen_helper_crypto_sha256su0
;
14148 unallocated_encoding(s
);
14153 unallocated_encoding(s
);
14157 if (!fp_access_check(s
)) {
14160 gen_gvec_op2_ool(s
, true, rd
, rn
, 0, genfn
);
14163 static void gen_rax1_i64(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
)
14165 tcg_gen_rotli_i64(d
, m
, 1);
14166 tcg_gen_xor_i64(d
, d
, n
);
14169 static void gen_rax1_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
, TCGv_vec m
)
14171 tcg_gen_rotli_vec(vece
, d
, m
, 1);
14172 tcg_gen_xor_vec(vece
, d
, d
, n
);
14175 void gen_gvec_rax1(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
14176 uint32_t rm_ofs
, uint32_t opr_sz
, uint32_t max_sz
)
14178 static const TCGOpcode vecop_list
[] = { INDEX_op_rotli_vec
, 0 };
14179 static const GVecGen3 op
= {
14180 .fni8
= gen_rax1_i64
,
14181 .fniv
= gen_rax1_vec
,
14182 .opt_opc
= vecop_list
,
14183 .fno
= gen_helper_crypto_rax1
,
14186 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
, &op
);
14189 /* Crypto three-reg SHA512
14190 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
14191 * +-----------------------+------+---+---+-----+--------+------+------+
14192 * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd |
14193 * +-----------------------+------+---+---+-----+--------+------+------+
14195 static void disas_crypto_three_reg_sha512(DisasContext
*s
, uint32_t insn
)
14197 int opcode
= extract32(insn
, 10, 2);
14198 int o
= extract32(insn
, 14, 1);
14199 int rm
= extract32(insn
, 16, 5);
14200 int rn
= extract32(insn
, 5, 5);
14201 int rd
= extract32(insn
, 0, 5);
14203 gen_helper_gvec_3
*oolfn
= NULL
;
14204 GVecGen3Fn
*gvecfn
= NULL
;
14208 case 0: /* SHA512H */
14209 feature
= dc_isar_feature(aa64_sha512
, s
);
14210 oolfn
= gen_helper_crypto_sha512h
;
14212 case 1: /* SHA512H2 */
14213 feature
= dc_isar_feature(aa64_sha512
, s
);
14214 oolfn
= gen_helper_crypto_sha512h2
;
14216 case 2: /* SHA512SU1 */
14217 feature
= dc_isar_feature(aa64_sha512
, s
);
14218 oolfn
= gen_helper_crypto_sha512su1
;
14221 feature
= dc_isar_feature(aa64_sha3
, s
);
14222 gvecfn
= gen_gvec_rax1
;
14225 g_assert_not_reached();
14229 case 0: /* SM3PARTW1 */
14230 feature
= dc_isar_feature(aa64_sm3
, s
);
14231 oolfn
= gen_helper_crypto_sm3partw1
;
14233 case 1: /* SM3PARTW2 */
14234 feature
= dc_isar_feature(aa64_sm3
, s
);
14235 oolfn
= gen_helper_crypto_sm3partw2
;
14237 case 2: /* SM4EKEY */
14238 feature
= dc_isar_feature(aa64_sm4
, s
);
14239 oolfn
= gen_helper_crypto_sm4ekey
;
14242 unallocated_encoding(s
);
14248 unallocated_encoding(s
);
14252 if (!fp_access_check(s
)) {
14257 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, 0, oolfn
);
14259 gen_gvec_fn3(s
, true, rd
, rn
, rm
, gvecfn
, MO_64
);
14263 /* Crypto two-reg SHA512
14264 * 31 12 11 10 9 5 4 0
14265 * +-----------------------------------------+--------+------+------+
14266 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd |
14267 * +-----------------------------------------+--------+------+------+
14269 static void disas_crypto_two_reg_sha512(DisasContext
*s
, uint32_t insn
)
14271 int opcode
= extract32(insn
, 10, 2);
14272 int rn
= extract32(insn
, 5, 5);
14273 int rd
= extract32(insn
, 0, 5);
14277 case 0: /* SHA512SU0 */
14278 feature
= dc_isar_feature(aa64_sha512
, s
);
14281 feature
= dc_isar_feature(aa64_sm4
, s
);
14284 unallocated_encoding(s
);
14289 unallocated_encoding(s
);
14293 if (!fp_access_check(s
)) {
14298 case 0: /* SHA512SU0 */
14299 gen_gvec_op2_ool(s
, true, rd
, rn
, 0, gen_helper_crypto_sha512su0
);
14302 gen_gvec_op3_ool(s
, true, rd
, rd
, rn
, 0, gen_helper_crypto_sm4e
);
14305 g_assert_not_reached();
14309 /* Crypto four-register
14310 * 31 23 22 21 20 16 15 14 10 9 5 4 0
14311 * +-------------------+-----+------+---+------+------+------+
14312 * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
14313 * +-------------------+-----+------+---+------+------+------+
14315 static void disas_crypto_four_reg(DisasContext
*s
, uint32_t insn
)
14317 int op0
= extract32(insn
, 21, 2);
14318 int rm
= extract32(insn
, 16, 5);
14319 int ra
= extract32(insn
, 10, 5);
14320 int rn
= extract32(insn
, 5, 5);
14321 int rd
= extract32(insn
, 0, 5);
14327 feature
= dc_isar_feature(aa64_sha3
, s
);
14329 case 2: /* SM3SS1 */
14330 feature
= dc_isar_feature(aa64_sm3
, s
);
14333 unallocated_encoding(s
);
14338 unallocated_encoding(s
);
14342 if (!fp_access_check(s
)) {
14347 TCGv_i64 tcg_op1
, tcg_op2
, tcg_op3
, tcg_res
[2];
14350 tcg_op1
= tcg_temp_new_i64();
14351 tcg_op2
= tcg_temp_new_i64();
14352 tcg_op3
= tcg_temp_new_i64();
14353 tcg_res
[0] = tcg_temp_new_i64();
14354 tcg_res
[1] = tcg_temp_new_i64();
14356 for (pass
= 0; pass
< 2; pass
++) {
14357 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
14358 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
14359 read_vec_element(s
, tcg_op3
, ra
, pass
, MO_64
);
14363 tcg_gen_xor_i64(tcg_res
[pass
], tcg_op2
, tcg_op3
);
14366 tcg_gen_andc_i64(tcg_res
[pass
], tcg_op2
, tcg_op3
);
14368 tcg_gen_xor_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
14370 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
14371 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
14373 tcg_temp_free_i64(tcg_op1
);
14374 tcg_temp_free_i64(tcg_op2
);
14375 tcg_temp_free_i64(tcg_op3
);
14376 tcg_temp_free_i64(tcg_res
[0]);
14377 tcg_temp_free_i64(tcg_res
[1]);
14379 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
, tcg_res
, tcg_zero
;
14381 tcg_op1
= tcg_temp_new_i32();
14382 tcg_op2
= tcg_temp_new_i32();
14383 tcg_op3
= tcg_temp_new_i32();
14384 tcg_res
= tcg_temp_new_i32();
14385 tcg_zero
= tcg_constant_i32(0);
14387 read_vec_element_i32(s
, tcg_op1
, rn
, 3, MO_32
);
14388 read_vec_element_i32(s
, tcg_op2
, rm
, 3, MO_32
);
14389 read_vec_element_i32(s
, tcg_op3
, ra
, 3, MO_32
);
14391 tcg_gen_rotri_i32(tcg_res
, tcg_op1
, 20);
14392 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op2
);
14393 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op3
);
14394 tcg_gen_rotri_i32(tcg_res
, tcg_res
, 25);
14396 write_vec_element_i32(s
, tcg_zero
, rd
, 0, MO_32
);
14397 write_vec_element_i32(s
, tcg_zero
, rd
, 1, MO_32
);
14398 write_vec_element_i32(s
, tcg_zero
, rd
, 2, MO_32
);
14399 write_vec_element_i32(s
, tcg_res
, rd
, 3, MO_32
);
14401 tcg_temp_free_i32(tcg_op1
);
14402 tcg_temp_free_i32(tcg_op2
);
14403 tcg_temp_free_i32(tcg_op3
);
14404 tcg_temp_free_i32(tcg_res
);
14409 * 31 21 20 16 15 10 9 5 4 0
14410 * +-----------------------+------+--------+------+------+
14411 * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
14412 * +-----------------------+------+--------+------+------+
14414 static void disas_crypto_xar(DisasContext
*s
, uint32_t insn
)
14416 int rm
= extract32(insn
, 16, 5);
14417 int imm6
= extract32(insn
, 10, 6);
14418 int rn
= extract32(insn
, 5, 5);
14419 int rd
= extract32(insn
, 0, 5);
14421 if (!dc_isar_feature(aa64_sha3
, s
)) {
14422 unallocated_encoding(s
);
14426 if (!fp_access_check(s
)) {
14430 gen_gvec_xar(MO_64
, vec_full_reg_offset(s
, rd
),
14431 vec_full_reg_offset(s
, rn
),
14432 vec_full_reg_offset(s
, rm
), imm6
, 16,
14433 vec_full_reg_size(s
));
14436 /* Crypto three-reg imm2
14437 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
14438 * +-----------------------+------+-----+------+--------+------+------+
14439 * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd |
14440 * +-----------------------+------+-----+------+--------+------+------+
14442 static void disas_crypto_three_reg_imm2(DisasContext
*s
, uint32_t insn
)
14444 static gen_helper_gvec_3
* const fns
[4] = {
14445 gen_helper_crypto_sm3tt1a
, gen_helper_crypto_sm3tt1b
,
14446 gen_helper_crypto_sm3tt2a
, gen_helper_crypto_sm3tt2b
,
14448 int opcode
= extract32(insn
, 10, 2);
14449 int imm2
= extract32(insn
, 12, 2);
14450 int rm
= extract32(insn
, 16, 5);
14451 int rn
= extract32(insn
, 5, 5);
14452 int rd
= extract32(insn
, 0, 5);
14454 if (!dc_isar_feature(aa64_sm3
, s
)) {
14455 unallocated_encoding(s
);
14459 if (!fp_access_check(s
)) {
14463 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, imm2
, fns
[opcode
]);
14466 /* C3.6 Data processing - SIMD, inc Crypto
14468 * As the decode gets a little complex we are using a table based
14469 * approach for this part of the decode.
14471 static const AArch64DecodeTable data_proc_simd
[] = {
14472 /* pattern , mask , fn */
14473 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same
},
14474 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra
},
14475 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff
},
14476 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc
},
14477 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes
},
14478 { 0x0e000400, 0x9fe08400, disas_simd_copy
},
14479 { 0x0f000000, 0x9f000400, disas_simd_indexed
}, /* vector indexed */
14480 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
14481 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm
},
14482 { 0x0f000400, 0x9f800400, disas_simd_shift_imm
},
14483 { 0x0e000000, 0xbf208c00, disas_simd_tb
},
14484 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn
},
14485 { 0x2e000000, 0xbf208400, disas_simd_ext
},
14486 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same
},
14487 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra
},
14488 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff
},
14489 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc
},
14490 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise
},
14491 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy
},
14492 { 0x5f000000, 0xdf000400, disas_simd_indexed
}, /* scalar indexed */
14493 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm
},
14494 { 0x4e280800, 0xff3e0c00, disas_crypto_aes
},
14495 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha
},
14496 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha
},
14497 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512
},
14498 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512
},
14499 { 0xce000000, 0xff808000, disas_crypto_four_reg
},
14500 { 0xce800000, 0xffe00000, disas_crypto_xar
},
14501 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2
},
14502 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16
},
14503 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16
},
14504 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16
},
14505 { 0x00000000, 0x00000000, NULL
}
14508 static void disas_data_proc_simd(DisasContext
*s
, uint32_t insn
)
14510 /* Note that this is called with all non-FP cases from
14511 * table C3-6 so it must UNDEF for entries not specifically
14512 * allocated to instructions in that table.
14514 AArch64DecodeFn
*fn
= lookup_disas_fn(&data_proc_simd
[0], insn
);
14518 unallocated_encoding(s
);
14522 /* C3.6 Data processing - SIMD and floating point */
14523 static void disas_data_proc_simd_fp(DisasContext
*s
, uint32_t insn
)
14525 if (extract32(insn
, 28, 1) == 1 && extract32(insn
, 30, 1) == 0) {
14526 disas_data_proc_fp(s
, insn
);
14528 /* SIMD, including crypto */
14529 disas_data_proc_simd(s
, insn
);
14535 * @env: The cpu environment
14536 * @s: The DisasContext
14538 * Return true if the page is guarded.
14540 static bool is_guarded_page(CPUARMState
*env
, DisasContext
*s
)
14542 uint64_t addr
= s
->base
.pc_first
;
14543 #ifdef CONFIG_USER_ONLY
14544 return page_get_flags(addr
) & PAGE_BTI
;
14546 int mmu_idx
= arm_to_core_mmu_idx(s
->mmu_idx
);
14547 unsigned int index
= tlb_index(env
, mmu_idx
, addr
);
14548 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
14551 * We test this immediately after reading an insn, which means
14552 * that any normal page must be in the TLB. The only exception
14553 * would be for executing from flash or device memory, which
14554 * does not retain the TLB entry.
14556 * FIXME: Assume false for those, for now. We could use
14557 * arm_cpu_get_phys_page_attrs_debug to re-read the page
14558 * table entry even for that case.
14560 return (tlb_hit(entry
->addr_code
, addr
) &&
14561 arm_tlb_bti_gp(&env_tlb(env
)->d
[mmu_idx
].iotlb
[index
].attrs
));
14566 * btype_destination_ok:
14567 * @insn: The instruction at the branch destination
14568 * @bt: SCTLR_ELx.BT
14569 * @btype: PSTATE.BTYPE, and is non-zero
14571 * On a guarded page, there are a limited number of insns
14572 * that may be present at the branch target:
14573 * - branch target identifiers,
14574 * - paciasp, pacibsp,
14577 * Anything else causes a Branch Target Exception.
14579 * Return true if the branch is compatible, false to raise BTITRAP.
14581 static bool btype_destination_ok(uint32_t insn
, bool bt
, int btype
)
14583 if ((insn
& 0xfffff01fu
) == 0xd503201fu
) {
14585 switch (extract32(insn
, 5, 7)) {
14586 case 0b011001: /* PACIASP */
14587 case 0b011011: /* PACIBSP */
14589 * If SCTLR_ELx.BT, then PACI*SP are not compatible
14590 * with btype == 3. Otherwise all btype are ok.
14592 return !bt
|| btype
!= 3;
14593 case 0b100000: /* BTI */
14594 /* Not compatible with any btype. */
14596 case 0b100010: /* BTI c */
14597 /* Not compatible with btype == 3 */
14599 case 0b100100: /* BTI j */
14600 /* Not compatible with btype == 2 */
14602 case 0b100110: /* BTI jc */
14603 /* Compatible with any btype. */
14607 switch (insn
& 0xffe0001fu
) {
14608 case 0xd4200000u
: /* BRK */
14609 case 0xd4400000u
: /* HLT */
14610 /* Give priority to the breakpoint exception. */
14617 static void aarch64_tr_init_disas_context(DisasContextBase
*dcbase
,
14620 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14621 CPUARMState
*env
= cpu
->env_ptr
;
14622 ARMCPU
*arm_cpu
= env_archcpu(env
);
14623 CPUARMTBFlags tb_flags
= arm_tbflags_from_tb(dc
->base
.tb
);
14624 int bound
, core_mmu_idx
;
14626 dc
->isar
= &arm_cpu
->isar
;
14629 dc
->aarch64
= true;
14632 dc
->be_data
= EX_TBFLAG_ANY(tb_flags
, BE_DATA
) ? MO_BE
: MO_LE
;
14633 dc
->condexec_mask
= 0;
14634 dc
->condexec_cond
= 0;
14635 core_mmu_idx
= EX_TBFLAG_ANY(tb_flags
, MMUIDX
);
14636 dc
->mmu_idx
= core_to_aa64_mmu_idx(core_mmu_idx
);
14637 dc
->tbii
= EX_TBFLAG_A64(tb_flags
, TBII
);
14638 dc
->tbid
= EX_TBFLAG_A64(tb_flags
, TBID
);
14639 dc
->tcma
= EX_TBFLAG_A64(tb_flags
, TCMA
);
14640 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
14641 #if !defined(CONFIG_USER_ONLY)
14642 dc
->user
= (dc
->current_el
== 0);
14644 dc
->fp_excp_el
= EX_TBFLAG_ANY(tb_flags
, FPEXC_EL
);
14645 dc
->align_mem
= EX_TBFLAG_ANY(tb_flags
, ALIGN_MEM
);
14646 dc
->pstate_il
= EX_TBFLAG_ANY(tb_flags
, PSTATE__IL
);
14647 dc
->sve_excp_el
= EX_TBFLAG_A64(tb_flags
, SVEEXC_EL
);
14648 dc
->sme_excp_el
= EX_TBFLAG_A64(tb_flags
, SMEEXC_EL
);
14649 dc
->vl
= (EX_TBFLAG_A64(tb_flags
, VL
) + 1) * 16;
14650 dc
->svl
= (EX_TBFLAG_A64(tb_flags
, SVL
) + 1) * 16;
14651 dc
->pauth_active
= EX_TBFLAG_A64(tb_flags
, PAUTH_ACTIVE
);
14652 dc
->bt
= EX_TBFLAG_A64(tb_flags
, BT
);
14653 dc
->btype
= EX_TBFLAG_A64(tb_flags
, BTYPE
);
14654 dc
->unpriv
= EX_TBFLAG_A64(tb_flags
, UNPRIV
);
14655 dc
->ata
= EX_TBFLAG_A64(tb_flags
, ATA
);
14656 dc
->mte_active
[0] = EX_TBFLAG_A64(tb_flags
, MTE_ACTIVE
);
14657 dc
->mte_active
[1] = EX_TBFLAG_A64(tb_flags
, MTE0_ACTIVE
);
14658 dc
->pstate_sm
= EX_TBFLAG_A64(tb_flags
, PSTATE_SM
);
14659 dc
->pstate_za
= EX_TBFLAG_A64(tb_flags
, PSTATE_ZA
);
14661 dc
->vec_stride
= 0;
14662 dc
->cp_regs
= arm_cpu
->cp_regs
;
14663 dc
->features
= env
->features
;
14664 dc
->dcz_blocksize
= arm_cpu
->dcz_blocksize
;
14666 #ifdef CONFIG_USER_ONLY
14667 /* In sve_probe_page, we assume TBI is enabled. */
14668 tcg_debug_assert(dc
->tbid
& 1);
14671 /* Single step state. The code-generation logic here is:
14673 * generate code with no special handling for single-stepping (except
14674 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
14675 * this happens anyway because those changes are all system register or
14677 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
14678 * emit code for one insn
14679 * emit code to clear PSTATE.SS
14680 * emit code to generate software step exception for completed step
14681 * end TB (as usual for having generated an exception)
14682 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
14683 * emit code to generate a software step exception
14686 dc
->ss_active
= EX_TBFLAG_ANY(tb_flags
, SS_ACTIVE
);
14687 dc
->pstate_ss
= EX_TBFLAG_ANY(tb_flags
, PSTATE__SS
);
14688 dc
->is_ldex
= false;
14690 /* Bound the number of insns to execute to those left on the page. */
14691 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
14693 /* If architectural single step active, limit to 1. */
14694 if (dc
->ss_active
) {
14697 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
14699 init_tmp_a64_array(dc
);
14702 static void aarch64_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
14706 static void aarch64_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
14708 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14710 tcg_gen_insn_start(dc
->base
.pc_next
, 0, 0);
14711 dc
->insn_start
= tcg_last_op();
14714 static void aarch64_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
14716 DisasContext
*s
= container_of(dcbase
, DisasContext
, base
);
14717 CPUARMState
*env
= cpu
->env_ptr
;
14718 uint64_t pc
= s
->base
.pc_next
;
14721 /* Singlestep exceptions have the highest priority. */
14722 if (s
->ss_active
&& !s
->pstate_ss
) {
14723 /* Singlestep state is Active-pending.
14724 * If we're in this state at the start of a TB then either
14725 * a) we just took an exception to an EL which is being debugged
14726 * and this is the first insn in the exception handler
14727 * b) debug exceptions were masked and we just unmasked them
14728 * without changing EL (eg by clearing PSTATE.D)
14729 * In either case we're going to take a swstep exception in the
14730 * "did not step an insn" case, and so the syndrome ISV and EX
14731 * bits should be zero.
14733 assert(s
->base
.num_insns
== 1);
14734 gen_swstep_exception(s
, 0, 0);
14735 s
->base
.is_jmp
= DISAS_NORETURN
;
14736 s
->base
.pc_next
= pc
+ 4;
14742 * PC alignment fault. This has priority over the instruction abort
14743 * that we would receive from a translation fault via arm_ldl_code.
14744 * This should only be possible after an indirect branch, at the
14747 assert(s
->base
.num_insns
== 1);
14748 gen_helper_exception_pc_alignment(cpu_env
, tcg_constant_tl(pc
));
14749 s
->base
.is_jmp
= DISAS_NORETURN
;
14750 s
->base
.pc_next
= QEMU_ALIGN_UP(pc
, 4);
14755 insn
= arm_ldl_code(env
, &s
->base
, pc
, s
->sctlr_b
);
14757 s
->base
.pc_next
= pc
+ 4;
14759 s
->fp_access_checked
= false;
14760 s
->sve_access_checked
= false;
14762 if (s
->pstate_il
) {
14764 * Illegal execution state. This has priority over BTI
14765 * exceptions, but comes after instruction abort exceptions.
14767 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
, syn_illegalstate());
14771 if (dc_isar_feature(aa64_bti
, s
)) {
14772 if (s
->base
.num_insns
== 1) {
14774 * At the first insn of the TB, compute s->guarded_page.
14775 * We delayed computing this until successfully reading
14776 * the first insn of the TB, above. This (mostly) ensures
14777 * that the softmmu tlb entry has been populated, and the
14778 * page table GP bit is available.
14780 * Note that we need to compute this even if btype == 0,
14781 * because this value is used for BR instructions later
14782 * where ENV is not available.
14784 s
->guarded_page
= is_guarded_page(env
, s
);
14786 /* First insn can have btype set to non-zero. */
14787 tcg_debug_assert(s
->btype
>= 0);
14790 * Note that the Branch Target Exception has fairly high
14791 * priority -- below debugging exceptions but above most
14792 * everything else. This allows us to handle this now
14793 * instead of waiting until the insn is otherwise decoded.
14797 && !btype_destination_ok(insn
, s
->bt
, s
->btype
)) {
14798 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
14799 syn_btitrap(s
->btype
));
14803 /* Not the first insn: btype must be 0. */
14804 tcg_debug_assert(s
->btype
== 0);
14808 switch (extract32(insn
, 25, 4)) {
14809 case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
14810 unallocated_encoding(s
);
14813 if (!disas_sve(s
, insn
)) {
14814 unallocated_encoding(s
);
14817 case 0x8: case 0x9: /* Data processing - immediate */
14818 disas_data_proc_imm(s
, insn
);
14820 case 0xa: case 0xb: /* Branch, exception generation and system insns */
14821 disas_b_exc_sys(s
, insn
);
14826 case 0xe: /* Loads and stores */
14827 disas_ldst(s
, insn
);
14830 case 0xd: /* Data processing - register */
14831 disas_data_proc_reg(s
, insn
);
14834 case 0xf: /* Data processing - SIMD and floating point */
14835 disas_data_proc_simd_fp(s
, insn
);
14838 assert(FALSE
); /* all 15 cases should be handled above */
14842 /* if we allocated any temporaries, free them here */
14846 * After execution of most insns, btype is reset to 0.
14847 * Note that we set btype == -1 when the insn sets btype.
14849 if (s
->btype
> 0 && s
->base
.is_jmp
!= DISAS_NORETURN
) {
14853 translator_loop_temp_check(&s
->base
);
14856 static void aarch64_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
14858 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14860 if (unlikely(dc
->ss_active
)) {
14861 /* Note that this means single stepping WFI doesn't halt the CPU.
14862 * For conditional branch insns this is harmless unreachable code as
14863 * gen_goto_tb() has already handled emitting the debug exception
14864 * (and thus a tb-jump is not possible when singlestepping).
14866 switch (dc
->base
.is_jmp
) {
14868 gen_a64_set_pc_im(dc
->base
.pc_next
);
14872 gen_step_complete_exception(dc
);
14874 case DISAS_NORETURN
:
14878 switch (dc
->base
.is_jmp
) {
14880 case DISAS_TOO_MANY
:
14881 gen_goto_tb(dc
, 1, dc
->base
.pc_next
);
14884 case DISAS_UPDATE_EXIT
:
14885 gen_a64_set_pc_im(dc
->base
.pc_next
);
14888 tcg_gen_exit_tb(NULL
, 0);
14890 case DISAS_UPDATE_NOCHAIN
:
14891 gen_a64_set_pc_im(dc
->base
.pc_next
);
14894 tcg_gen_lookup_and_goto_ptr();
14896 case DISAS_NORETURN
:
14900 gen_a64_set_pc_im(dc
->base
.pc_next
);
14901 gen_helper_wfe(cpu_env
);
14904 gen_a64_set_pc_im(dc
->base
.pc_next
);
14905 gen_helper_yield(cpu_env
);
14909 * This is a special case because we don't want to just halt
14910 * the CPU if trying to debug across a WFI.
14912 gen_a64_set_pc_im(dc
->base
.pc_next
);
14913 gen_helper_wfi(cpu_env
, tcg_constant_i32(4));
14915 * The helper doesn't necessarily throw an exception, but we
14916 * must go back to the main loop to check for interrupts anyway.
14918 tcg_gen_exit_tb(NULL
, 0);
14924 static void aarch64_tr_disas_log(const DisasContextBase
*dcbase
,
14925 CPUState
*cpu
, FILE *logfile
)
14927 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
14929 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
14930 target_disas(logfile
, cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
14933 const TranslatorOps aarch64_translator_ops
= {
14934 .init_disas_context
= aarch64_tr_init_disas_context
,
14935 .tb_start
= aarch64_tr_tb_start
,
14936 .insn_start
= aarch64_tr_insn_start
,
14937 .translate_insn
= aarch64_tr_translate_insn
,
14938 .tb_stop
= aarch64_tr_tb_stop
,
14939 .disas_log
= aarch64_tr_disas_log
,