4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/exec-all.h"
22 #include "translate.h"
23 #include "translate-a64.h"
26 #include "semihosting/semihost.h"
29 static TCGv_i64 cpu_X
[32];
30 static TCGv_i64 cpu_pc
;
32 /* Load/store exclusive handling */
33 static TCGv_i64 cpu_exclusive_high
;
35 static const char *regnames
[] = {
36 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
37 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
38 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
39 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
43 A64_SHIFT_TYPE_LSL
= 0,
44 A64_SHIFT_TYPE_LSR
= 1,
45 A64_SHIFT_TYPE_ASR
= 2,
46 A64_SHIFT_TYPE_ROR
= 3
50 * Helpers for extracting complex instruction fields
54 * For load/store with an unsigned 12 bit immediate scaled by the element
55 * size. The input has the immediate field in bits [14:3] and the element
58 static int uimm_scaled(DisasContext
*s
, int x
)
60 unsigned imm
= x
>> 3;
61 unsigned scale
= extract32(x
, 0, 3);
65 /* For load/store memory tags: scale offset by LOG2_TAG_GRANULE */
66 static int scale_by_log2_tag_granule(DisasContext
*s
, int x
)
68 return x
<< LOG2_TAG_GRANULE
;
72 * Include the generated decoders.
75 #include "decode-sme-fa64.c.inc"
76 #include "decode-a64.c.inc"
78 /* Table based decoder typedefs - used when the relevant bits for decode
79 * are too awkwardly scattered across the instruction (eg SIMD).
81 typedef void AArch64DecodeFn(DisasContext
*s
, uint32_t insn
);
83 typedef struct AArch64DecodeTable
{
86 AArch64DecodeFn
*disas_fn
;
89 /* initialize TCG globals. */
90 void a64_translate_init(void)
94 cpu_pc
= tcg_global_mem_new_i64(tcg_env
,
95 offsetof(CPUARMState
, pc
),
97 for (i
= 0; i
< 32; i
++) {
98 cpu_X
[i
] = tcg_global_mem_new_i64(tcg_env
,
99 offsetof(CPUARMState
, xregs
[i
]),
103 cpu_exclusive_high
= tcg_global_mem_new_i64(tcg_env
,
104 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
108 * Return the core mmu_idx to use for A64 load/store insns which
109 * have a "unprivileged load/store" variant. Those insns access
110 * EL0 if executed from an EL which has control over EL0 (usually
111 * EL1) but behave like normal loads and stores if executed from
112 * elsewhere (eg EL3).
114 * @unpriv : true for the unprivileged encoding; false for the
115 * normal encoding (in which case we will return the same
116 * thing as get_mem_index().
118 static int get_a64_user_mem_index(DisasContext
*s
, bool unpriv
)
121 * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
122 * which is the usual mmu_idx for this cpu state.
124 ARMMMUIdx useridx
= s
->mmu_idx
;
126 if (unpriv
&& s
->unpriv
) {
128 * We have pre-computed the condition for AccType_UNPRIV.
129 * Therefore we should never get here with a mmu_idx for
130 * which we do not know the corresponding user mmu_idx.
133 case ARMMMUIdx_E10_1
:
134 case ARMMMUIdx_E10_1_PAN
:
135 useridx
= ARMMMUIdx_E10_0
;
137 case ARMMMUIdx_E20_2
:
138 case ARMMMUIdx_E20_2_PAN
:
139 useridx
= ARMMMUIdx_E20_0
;
142 g_assert_not_reached();
145 return arm_to_core_mmu_idx(useridx
);
148 static void set_btype_raw(int val
)
150 tcg_gen_st_i32(tcg_constant_i32(val
), tcg_env
,
151 offsetof(CPUARMState
, btype
));
154 static void set_btype(DisasContext
*s
, int val
)
156 /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */
157 tcg_debug_assert(val
>= 1 && val
<= 3);
162 static void reset_btype(DisasContext
*s
)
170 static void gen_pc_plus_diff(DisasContext
*s
, TCGv_i64 dest
, target_long diff
)
172 assert(s
->pc_save
!= -1);
173 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
174 tcg_gen_addi_i64(dest
, cpu_pc
, (s
->pc_curr
- s
->pc_save
) + diff
);
176 tcg_gen_movi_i64(dest
, s
->pc_curr
+ diff
);
180 void gen_a64_update_pc(DisasContext
*s
, target_long diff
)
182 gen_pc_plus_diff(s
, cpu_pc
, diff
);
183 s
->pc_save
= s
->pc_curr
+ diff
;
187 * Handle Top Byte Ignore (TBI) bits.
189 * If address tagging is enabled via the TCR TBI bits:
190 * + for EL2 and EL3 there is only one TBI bit, and if it is set
191 * then the address is zero-extended, clearing bits [63:56]
192 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
193 * and TBI1 controls addresses with bit 55 == 1.
194 * If the appropriate TBI bit is set for the address then
195 * the address is sign-extended from bit 55 into bits [63:56]
197 * Here We have concatenated TBI{1,0} into tbi.
199 static void gen_top_byte_ignore(DisasContext
*s
, TCGv_i64 dst
,
200 TCGv_i64 src
, int tbi
)
203 /* Load unmodified address */
204 tcg_gen_mov_i64(dst
, src
);
205 } else if (!regime_has_2_ranges(s
->mmu_idx
)) {
206 /* Force tag byte to all zero */
207 tcg_gen_extract_i64(dst
, src
, 0, 56);
209 /* Sign-extend from bit 55. */
210 tcg_gen_sextract_i64(dst
, src
, 0, 56);
214 /* tbi0 but !tbi1: only use the extension if positive */
215 tcg_gen_and_i64(dst
, dst
, src
);
218 /* !tbi0 but tbi1: only use the extension if negative */
219 tcg_gen_or_i64(dst
, dst
, src
);
222 /* tbi0 and tbi1: always use the extension */
225 g_assert_not_reached();
230 static void gen_a64_set_pc(DisasContext
*s
, TCGv_i64 src
)
233 * If address tagging is enabled for instructions via the TCR TBI bits,
234 * then loading an address into the PC will clear out any tag.
236 gen_top_byte_ignore(s
, cpu_pc
, src
, s
->tbii
);
241 * Handle MTE and/or TBI.
243 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
244 * for the tag to be present in the FAR_ELx register. But for user-only
245 * mode we do not have a TLB with which to implement this, so we must
246 * remove the top byte now.
248 * Always return a fresh temporary that we can increment independently
249 * of the write-back address.
252 TCGv_i64
clean_data_tbi(DisasContext
*s
, TCGv_i64 addr
)
254 TCGv_i64 clean
= tcg_temp_new_i64();
255 #ifdef CONFIG_USER_ONLY
256 gen_top_byte_ignore(s
, clean
, addr
, s
->tbid
);
258 tcg_gen_mov_i64(clean
, addr
);
263 /* Insert a zero tag into src, with the result at dst. */
264 static void gen_address_with_allocation_tag0(TCGv_i64 dst
, TCGv_i64 src
)
266 tcg_gen_andi_i64(dst
, src
, ~MAKE_64BIT_MASK(56, 4));
269 static void gen_probe_access(DisasContext
*s
, TCGv_i64 ptr
,
270 MMUAccessType acc
, int log2_size
)
272 gen_helper_probe_access(tcg_env
, ptr
,
273 tcg_constant_i32(acc
),
274 tcg_constant_i32(get_mem_index(s
)),
275 tcg_constant_i32(1 << log2_size
));
279 * For MTE, check a single logical or atomic access. This probes a single
280 * address, the exact one specified. The size and alignment of the access
281 * is not relevant to MTE, per se, but watchpoints do require the size,
282 * and we want to recognize those before making any other changes to state.
284 static TCGv_i64
gen_mte_check1_mmuidx(DisasContext
*s
, TCGv_i64 addr
,
285 bool is_write
, bool tag_checked
,
286 MemOp memop
, bool is_unpriv
,
289 if (tag_checked
&& s
->mte_active
[is_unpriv
]) {
293 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, core_idx
);
294 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
295 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
296 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, is_write
);
297 desc
= FIELD_DP32(desc
, MTEDESC
, ALIGN
, get_alignment_bits(memop
));
298 desc
= FIELD_DP32(desc
, MTEDESC
, SIZEM1
, memop_size(memop
) - 1);
300 ret
= tcg_temp_new_i64();
301 gen_helper_mte_check(ret
, tcg_env
, tcg_constant_i32(desc
), addr
);
305 return clean_data_tbi(s
, addr
);
308 TCGv_i64
gen_mte_check1(DisasContext
*s
, TCGv_i64 addr
, bool is_write
,
309 bool tag_checked
, MemOp memop
)
311 return gen_mte_check1_mmuidx(s
, addr
, is_write
, tag_checked
, memop
,
312 false, get_mem_index(s
));
316 * For MTE, check multiple logical sequential accesses.
318 TCGv_i64
gen_mte_checkN(DisasContext
*s
, TCGv_i64 addr
, bool is_write
,
319 bool tag_checked
, int total_size
, MemOp single_mop
)
321 if (tag_checked
&& s
->mte_active
[0]) {
325 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, get_mem_index(s
));
326 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
327 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
328 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, is_write
);
329 desc
= FIELD_DP32(desc
, MTEDESC
, ALIGN
, get_alignment_bits(single_mop
));
330 desc
= FIELD_DP32(desc
, MTEDESC
, SIZEM1
, total_size
- 1);
332 ret
= tcg_temp_new_i64();
333 gen_helper_mte_check(ret
, tcg_env
, tcg_constant_i32(desc
), addr
);
337 return clean_data_tbi(s
, addr
);
341 * Generate the special alignment check that applies to AccType_ATOMIC
342 * and AccType_ORDERED insns under FEAT_LSE2: the access need not be
343 * naturally aligned, but it must not cross a 16-byte boundary.
344 * See AArch64.CheckAlignment().
346 static void check_lse2_align(DisasContext
*s
, int rn
, int imm
,
347 bool is_write
, MemOp mop
)
351 TCGLabel
*over_label
;
355 tmp
= tcg_temp_new_i32();
356 tcg_gen_extrl_i64_i32(tmp
, cpu_reg_sp(s
, rn
));
357 tcg_gen_addi_i32(tmp
, tmp
, imm
& 15);
358 tcg_gen_andi_i32(tmp
, tmp
, 15);
359 tcg_gen_addi_i32(tmp
, tmp
, memop_size(mop
));
361 over_label
= gen_new_label();
362 tcg_gen_brcondi_i32(TCG_COND_LEU
, tmp
, 16, over_label
);
364 addr
= tcg_temp_new_i64();
365 tcg_gen_addi_i64(addr
, cpu_reg_sp(s
, rn
), imm
);
367 type
= is_write
? MMU_DATA_STORE
: MMU_DATA_LOAD
,
368 mmu_idx
= get_mem_index(s
);
369 gen_helper_unaligned_access(tcg_env
, addr
, tcg_constant_i32(type
),
370 tcg_constant_i32(mmu_idx
));
372 gen_set_label(over_label
);
376 /* Handle the alignment check for AccType_ATOMIC instructions. */
377 static MemOp
check_atomic_align(DisasContext
*s
, int rn
, MemOp mop
)
379 MemOp size
= mop
& MO_SIZE
;
386 * If size == MO_128, this is a LDXP, and the operation is single-copy
387 * atomic for each doubleword, not the entire quadword; it still must
388 * be quadword aligned.
390 if (size
== MO_128
) {
391 return finalize_memop_atom(s
, MO_128
| MO_ALIGN
,
392 MO_ATOM_IFALIGN_PAIR
);
394 if (dc_isar_feature(aa64_lse2
, s
)) {
395 check_lse2_align(s
, rn
, 0, true, mop
);
399 return finalize_memop(s
, mop
);
402 /* Handle the alignment check for AccType_ORDERED instructions. */
403 static MemOp
check_ordered_align(DisasContext
*s
, int rn
, int imm
,
404 bool is_write
, MemOp mop
)
406 MemOp size
= mop
& MO_SIZE
;
411 if (size
== MO_128
) {
412 return finalize_memop_atom(s
, MO_128
| MO_ALIGN
,
413 MO_ATOM_IFALIGN_PAIR
);
415 if (!dc_isar_feature(aa64_lse2
, s
)) {
417 } else if (!s
->naa
) {
418 check_lse2_align(s
, rn
, imm
, is_write
, mop
);
420 return finalize_memop(s
, mop
);
423 typedef struct DisasCompare64
{
428 static void a64_test_cc(DisasCompare64
*c64
, int cc
)
432 arm_test_cc(&c32
, cc
);
435 * Sign-extend the 32-bit value so that the GE/LT comparisons work
436 * properly. The NE/EQ comparisons are also fine with this choice.
438 c64
->cond
= c32
.cond
;
439 c64
->value
= tcg_temp_new_i64();
440 tcg_gen_ext_i32_i64(c64
->value
, c32
.value
);
443 static void gen_rebuild_hflags(DisasContext
*s
)
445 gen_helper_rebuild_hflags_a64(tcg_env
, tcg_constant_i32(s
->current_el
));
448 static void gen_exception_internal(int excp
)
450 assert(excp_is_internal(excp
));
451 gen_helper_exception_internal(tcg_env
, tcg_constant_i32(excp
));
454 static void gen_exception_internal_insn(DisasContext
*s
, int excp
)
456 gen_a64_update_pc(s
, 0);
457 gen_exception_internal(excp
);
458 s
->base
.is_jmp
= DISAS_NORETURN
;
461 static void gen_exception_bkpt_insn(DisasContext
*s
, uint32_t syndrome
)
463 gen_a64_update_pc(s
, 0);
464 gen_helper_exception_bkpt_insn(tcg_env
, tcg_constant_i32(syndrome
));
465 s
->base
.is_jmp
= DISAS_NORETURN
;
468 static void gen_step_complete_exception(DisasContext
*s
)
470 /* We just completed step of an insn. Move from Active-not-pending
471 * to Active-pending, and then also take the swstep exception.
472 * This corresponds to making the (IMPDEF) choice to prioritize
473 * swstep exceptions over asynchronous exceptions taken to an exception
474 * level where debug is disabled. This choice has the advantage that
475 * we do not need to maintain internal state corresponding to the
476 * ISV/EX syndrome bits between completion of the step and generation
477 * of the exception, and our syndrome information is always correct.
480 gen_swstep_exception(s
, 1, s
->is_ldex
);
481 s
->base
.is_jmp
= DISAS_NORETURN
;
484 static inline bool use_goto_tb(DisasContext
*s
, uint64_t dest
)
489 return translator_use_goto_tb(&s
->base
, dest
);
492 static void gen_goto_tb(DisasContext
*s
, int n
, int64_t diff
)
494 if (use_goto_tb(s
, s
->pc_curr
+ diff
)) {
496 * For pcrel, the pc must always be up-to-date on entry to
497 * the linked TB, so that it can use simple additions for all
498 * further adjustments. For !pcrel, the linked TB is compiled
499 * to know its full virtual address, so we can delay the
500 * update to pc to the unlinked path. A long chain of links
501 * can thus avoid many updates to the PC.
503 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
504 gen_a64_update_pc(s
, diff
);
508 gen_a64_update_pc(s
, diff
);
510 tcg_gen_exit_tb(s
->base
.tb
, n
);
511 s
->base
.is_jmp
= DISAS_NORETURN
;
513 gen_a64_update_pc(s
, diff
);
515 gen_step_complete_exception(s
);
517 tcg_gen_lookup_and_goto_ptr();
518 s
->base
.is_jmp
= DISAS_NORETURN
;
524 * Register access functions
526 * These functions are used for directly accessing a register in where
527 * changes to the final register value are likely to be made. If you
528 * need to use a register for temporary calculation (e.g. index type
529 * operations) use the read_* form.
531 * B1.2.1 Register mappings
533 * In instruction register encoding 31 can refer to ZR (zero register) or
534 * the SP (stack pointer) depending on context. In QEMU's case we map SP
535 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
536 * This is the point of the _sp forms.
538 TCGv_i64
cpu_reg(DisasContext
*s
, int reg
)
541 TCGv_i64 t
= tcg_temp_new_i64();
542 tcg_gen_movi_i64(t
, 0);
549 /* register access for when 31 == SP */
550 TCGv_i64
cpu_reg_sp(DisasContext
*s
, int reg
)
555 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
556 * representing the register contents. This TCGv is an auto-freed
557 * temporary so it need not be explicitly freed, and may be modified.
559 TCGv_i64
read_cpu_reg(DisasContext
*s
, int reg
, int sf
)
561 TCGv_i64 v
= tcg_temp_new_i64();
564 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
566 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
569 tcg_gen_movi_i64(v
, 0);
574 TCGv_i64
read_cpu_reg_sp(DisasContext
*s
, int reg
, int sf
)
576 TCGv_i64 v
= tcg_temp_new_i64();
578 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
580 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
585 /* Return the offset into CPUARMState of a slice (from
586 * the least significant end) of FP register Qn (ie
588 * (Note that this is not the same mapping as for A32; see cpu.h)
590 static inline int fp_reg_offset(DisasContext
*s
, int regno
, MemOp size
)
592 return vec_reg_offset(s
, regno
, 0, size
);
595 /* Offset of the high half of the 128 bit vector Qn */
596 static inline int fp_reg_hi_offset(DisasContext
*s
, int regno
)
598 return vec_reg_offset(s
, regno
, 1, MO_64
);
601 /* Convenience accessors for reading and writing single and double
602 * FP registers. Writing clears the upper parts of the associated
603 * 128 bit vector register, as required by the architecture.
604 * Note that unlike the GP register accessors, the values returned
605 * by the read functions must be manually freed.
607 static TCGv_i64
read_fp_dreg(DisasContext
*s
, int reg
)
609 TCGv_i64 v
= tcg_temp_new_i64();
611 tcg_gen_ld_i64(v
, tcg_env
, fp_reg_offset(s
, reg
, MO_64
));
615 static TCGv_i32
read_fp_sreg(DisasContext
*s
, int reg
)
617 TCGv_i32 v
= tcg_temp_new_i32();
619 tcg_gen_ld_i32(v
, tcg_env
, fp_reg_offset(s
, reg
, MO_32
));
623 static TCGv_i32
read_fp_hreg(DisasContext
*s
, int reg
)
625 TCGv_i32 v
= tcg_temp_new_i32();
627 tcg_gen_ld16u_i32(v
, tcg_env
, fp_reg_offset(s
, reg
, MO_16
));
631 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
632 * If SVE is not enabled, then there are only 128 bits in the vector.
634 static void clear_vec_high(DisasContext
*s
, bool is_q
, int rd
)
636 unsigned ofs
= fp_reg_offset(s
, rd
, MO_64
);
637 unsigned vsz
= vec_full_reg_size(s
);
639 /* Nop move, with side effect of clearing the tail. */
640 tcg_gen_gvec_mov(MO_64
, ofs
, ofs
, is_q
? 16 : 8, vsz
);
643 void write_fp_dreg(DisasContext
*s
, int reg
, TCGv_i64 v
)
645 unsigned ofs
= fp_reg_offset(s
, reg
, MO_64
);
647 tcg_gen_st_i64(v
, tcg_env
, ofs
);
648 clear_vec_high(s
, false, reg
);
651 static void write_fp_sreg(DisasContext
*s
, int reg
, TCGv_i32 v
)
653 TCGv_i64 tmp
= tcg_temp_new_i64();
655 tcg_gen_extu_i32_i64(tmp
, v
);
656 write_fp_dreg(s
, reg
, tmp
);
659 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
660 static void gen_gvec_fn2(DisasContext
*s
, bool is_q
, int rd
, int rn
,
661 GVecGen2Fn
*gvec_fn
, int vece
)
663 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
664 is_q
? 16 : 8, vec_full_reg_size(s
));
667 /* Expand a 2-operand + immediate AdvSIMD vector operation using
668 * an expander function.
670 static void gen_gvec_fn2i(DisasContext
*s
, bool is_q
, int rd
, int rn
,
671 int64_t imm
, GVecGen2iFn
*gvec_fn
, int vece
)
673 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
674 imm
, is_q
? 16 : 8, vec_full_reg_size(s
));
677 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
678 static void gen_gvec_fn3(DisasContext
*s
, bool is_q
, int rd
, int rn
, int rm
,
679 GVecGen3Fn
*gvec_fn
, int vece
)
681 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
682 vec_full_reg_offset(s
, rm
), is_q
? 16 : 8, vec_full_reg_size(s
));
685 /* Expand a 4-operand AdvSIMD vector operation using an expander function. */
686 static void gen_gvec_fn4(DisasContext
*s
, bool is_q
, int rd
, int rn
, int rm
,
687 int rx
, GVecGen4Fn
*gvec_fn
, int vece
)
689 gvec_fn(vece
, vec_full_reg_offset(s
, rd
), vec_full_reg_offset(s
, rn
),
690 vec_full_reg_offset(s
, rm
), vec_full_reg_offset(s
, rx
),
691 is_q
? 16 : 8, vec_full_reg_size(s
));
694 /* Expand a 2-operand operation using an out-of-line helper. */
695 static void gen_gvec_op2_ool(DisasContext
*s
, bool is_q
, int rd
,
696 int rn
, int data
, gen_helper_gvec_2
*fn
)
698 tcg_gen_gvec_2_ool(vec_full_reg_offset(s
, rd
),
699 vec_full_reg_offset(s
, rn
),
700 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
703 /* Expand a 3-operand operation using an out-of-line helper. */
704 static void gen_gvec_op3_ool(DisasContext
*s
, bool is_q
, int rd
,
705 int rn
, int rm
, int data
, gen_helper_gvec_3
*fn
)
707 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, rd
),
708 vec_full_reg_offset(s
, rn
),
709 vec_full_reg_offset(s
, rm
),
710 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
713 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
714 * an out-of-line helper.
716 static void gen_gvec_op3_fpst(DisasContext
*s
, bool is_q
, int rd
, int rn
,
717 int rm
, bool is_fp16
, int data
,
718 gen_helper_gvec_3_ptr
*fn
)
720 TCGv_ptr fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
721 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
722 vec_full_reg_offset(s
, rn
),
723 vec_full_reg_offset(s
, rm
), fpst
,
724 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
727 /* Expand a 4-operand operation using an out-of-line helper. */
728 static void gen_gvec_op4_ool(DisasContext
*s
, bool is_q
, int rd
, int rn
,
729 int rm
, int ra
, int data
, gen_helper_gvec_4
*fn
)
731 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
732 vec_full_reg_offset(s
, rn
),
733 vec_full_reg_offset(s
, rm
),
734 vec_full_reg_offset(s
, ra
),
735 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
739 * Expand a 4-operand + fpstatus pointer + simd data value operation using
740 * an out-of-line helper.
742 static void gen_gvec_op4_fpst(DisasContext
*s
, bool is_q
, int rd
, int rn
,
743 int rm
, int ra
, bool is_fp16
, int data
,
744 gen_helper_gvec_4_ptr
*fn
)
746 TCGv_ptr fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
747 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, rd
),
748 vec_full_reg_offset(s
, rn
),
749 vec_full_reg_offset(s
, rm
),
750 vec_full_reg_offset(s
, ra
), fpst
,
751 is_q
? 16 : 8, vec_full_reg_size(s
), data
, fn
);
754 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
755 * than the 32 bit equivalent.
757 static inline void gen_set_NZ64(TCGv_i64 result
)
759 tcg_gen_extr_i64_i32(cpu_ZF
, cpu_NF
, result
);
760 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, cpu_NF
);
763 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
764 static inline void gen_logic_CC(int sf
, TCGv_i64 result
)
767 gen_set_NZ64(result
);
769 tcg_gen_extrl_i64_i32(cpu_ZF
, result
);
770 tcg_gen_mov_i32(cpu_NF
, cpu_ZF
);
772 tcg_gen_movi_i32(cpu_CF
, 0);
773 tcg_gen_movi_i32(cpu_VF
, 0);
776 /* dest = T0 + T1; compute C, N, V and Z flags */
777 static void gen_add64_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
779 TCGv_i64 result
, flag
, tmp
;
780 result
= tcg_temp_new_i64();
781 flag
= tcg_temp_new_i64();
782 tmp
= tcg_temp_new_i64();
784 tcg_gen_movi_i64(tmp
, 0);
785 tcg_gen_add2_i64(result
, flag
, t0
, tmp
, t1
, tmp
);
787 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
789 gen_set_NZ64(result
);
791 tcg_gen_xor_i64(flag
, result
, t0
);
792 tcg_gen_xor_i64(tmp
, t0
, t1
);
793 tcg_gen_andc_i64(flag
, flag
, tmp
);
794 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
796 tcg_gen_mov_i64(dest
, result
);
799 static void gen_add32_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
801 TCGv_i32 t0_32
= tcg_temp_new_i32();
802 TCGv_i32 t1_32
= tcg_temp_new_i32();
803 TCGv_i32 tmp
= tcg_temp_new_i32();
805 tcg_gen_movi_i32(tmp
, 0);
806 tcg_gen_extrl_i64_i32(t0_32
, t0
);
807 tcg_gen_extrl_i64_i32(t1_32
, t1
);
808 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, tmp
, t1_32
, tmp
);
809 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
810 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
811 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
812 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
813 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
816 static void gen_add_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
819 gen_add64_CC(dest
, t0
, t1
);
821 gen_add32_CC(dest
, t0
, t1
);
825 /* dest = T0 - T1; compute C, N, V and Z flags */
826 static void gen_sub64_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
828 /* 64 bit arithmetic */
829 TCGv_i64 result
, flag
, tmp
;
831 result
= tcg_temp_new_i64();
832 flag
= tcg_temp_new_i64();
833 tcg_gen_sub_i64(result
, t0
, t1
);
835 gen_set_NZ64(result
);
837 tcg_gen_setcond_i64(TCG_COND_GEU
, flag
, t0
, t1
);
838 tcg_gen_extrl_i64_i32(cpu_CF
, flag
);
840 tcg_gen_xor_i64(flag
, result
, t0
);
841 tmp
= tcg_temp_new_i64();
842 tcg_gen_xor_i64(tmp
, t0
, t1
);
843 tcg_gen_and_i64(flag
, flag
, tmp
);
844 tcg_gen_extrh_i64_i32(cpu_VF
, flag
);
845 tcg_gen_mov_i64(dest
, result
);
848 static void gen_sub32_CC(TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
850 /* 32 bit arithmetic */
851 TCGv_i32 t0_32
= tcg_temp_new_i32();
852 TCGv_i32 t1_32
= tcg_temp_new_i32();
855 tcg_gen_extrl_i64_i32(t0_32
, t0
);
856 tcg_gen_extrl_i64_i32(t1_32
, t1
);
857 tcg_gen_sub_i32(cpu_NF
, t0_32
, t1_32
);
858 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
859 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0_32
, t1_32
);
860 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
861 tmp
= tcg_temp_new_i32();
862 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
863 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
864 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
867 static void gen_sub_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
870 gen_sub64_CC(dest
, t0
, t1
);
872 gen_sub32_CC(dest
, t0
, t1
);
876 /* dest = T0 + T1 + CF; do not compute flags. */
877 static void gen_adc(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
879 TCGv_i64 flag
= tcg_temp_new_i64();
880 tcg_gen_extu_i32_i64(flag
, cpu_CF
);
881 tcg_gen_add_i64(dest
, t0
, t1
);
882 tcg_gen_add_i64(dest
, dest
, flag
);
885 tcg_gen_ext32u_i64(dest
, dest
);
889 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
890 static void gen_adc_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
893 TCGv_i64 result
= tcg_temp_new_i64();
894 TCGv_i64 cf_64
= tcg_temp_new_i64();
895 TCGv_i64 vf_64
= tcg_temp_new_i64();
896 TCGv_i64 tmp
= tcg_temp_new_i64();
897 TCGv_i64 zero
= tcg_constant_i64(0);
899 tcg_gen_extu_i32_i64(cf_64
, cpu_CF
);
900 tcg_gen_add2_i64(result
, cf_64
, t0
, zero
, cf_64
, zero
);
901 tcg_gen_add2_i64(result
, cf_64
, result
, cf_64
, t1
, zero
);
902 tcg_gen_extrl_i64_i32(cpu_CF
, cf_64
);
903 gen_set_NZ64(result
);
905 tcg_gen_xor_i64(vf_64
, result
, t0
);
906 tcg_gen_xor_i64(tmp
, t0
, t1
);
907 tcg_gen_andc_i64(vf_64
, vf_64
, tmp
);
908 tcg_gen_extrh_i64_i32(cpu_VF
, vf_64
);
910 tcg_gen_mov_i64(dest
, result
);
912 TCGv_i32 t0_32
= tcg_temp_new_i32();
913 TCGv_i32 t1_32
= tcg_temp_new_i32();
914 TCGv_i32 tmp
= tcg_temp_new_i32();
915 TCGv_i32 zero
= tcg_constant_i32(0);
917 tcg_gen_extrl_i64_i32(t0_32
, t0
);
918 tcg_gen_extrl_i64_i32(t1_32
, t1
);
919 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, zero
, cpu_CF
, zero
);
920 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1_32
, zero
);
922 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
923 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
924 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
925 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
926 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
931 * Load/Store generators
935 * Store from GPR register to memory.
937 static void do_gpr_st_memidx(DisasContext
*s
, TCGv_i64 source
,
938 TCGv_i64 tcg_addr
, MemOp memop
, int memidx
,
940 unsigned int iss_srt
,
941 bool iss_sf
, bool iss_ar
)
943 tcg_gen_qemu_st_i64(source
, tcg_addr
, memidx
, memop
);
948 syn
= syn_data_abort_with_iss(0,
954 0, 0, 0, 0, 0, false);
955 disas_set_insn_syndrome(s
, syn
);
959 static void do_gpr_st(DisasContext
*s
, TCGv_i64 source
,
960 TCGv_i64 tcg_addr
, MemOp memop
,
962 unsigned int iss_srt
,
963 bool iss_sf
, bool iss_ar
)
965 do_gpr_st_memidx(s
, source
, tcg_addr
, memop
, get_mem_index(s
),
966 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
970 * Load from memory to GPR register
972 static void do_gpr_ld_memidx(DisasContext
*s
, TCGv_i64 dest
, TCGv_i64 tcg_addr
,
973 MemOp memop
, bool extend
, int memidx
,
974 bool iss_valid
, unsigned int iss_srt
,
975 bool iss_sf
, bool iss_ar
)
977 tcg_gen_qemu_ld_i64(dest
, tcg_addr
, memidx
, memop
);
979 if (extend
&& (memop
& MO_SIGN
)) {
980 g_assert((memop
& MO_SIZE
) <= MO_32
);
981 tcg_gen_ext32u_i64(dest
, dest
);
987 syn
= syn_data_abort_with_iss(0,
989 (memop
& MO_SIGN
) != 0,
993 0, 0, 0, 0, 0, false);
994 disas_set_insn_syndrome(s
, syn
);
998 static void do_gpr_ld(DisasContext
*s
, TCGv_i64 dest
, TCGv_i64 tcg_addr
,
999 MemOp memop
, bool extend
,
1000 bool iss_valid
, unsigned int iss_srt
,
1001 bool iss_sf
, bool iss_ar
)
1003 do_gpr_ld_memidx(s
, dest
, tcg_addr
, memop
, extend
, get_mem_index(s
),
1004 iss_valid
, iss_srt
, iss_sf
, iss_ar
);
1008 * Store from FP register to memory
1010 static void do_fp_st(DisasContext
*s
, int srcidx
, TCGv_i64 tcg_addr
, MemOp mop
)
1012 /* This writes the bottom N bits of a 128 bit wide vector to memory */
1013 TCGv_i64 tmplo
= tcg_temp_new_i64();
1015 tcg_gen_ld_i64(tmplo
, tcg_env
, fp_reg_offset(s
, srcidx
, MO_64
));
1017 if ((mop
& MO_SIZE
) < MO_128
) {
1018 tcg_gen_qemu_st_i64(tmplo
, tcg_addr
, get_mem_index(s
), mop
);
1020 TCGv_i64 tmphi
= tcg_temp_new_i64();
1021 TCGv_i128 t16
= tcg_temp_new_i128();
1023 tcg_gen_ld_i64(tmphi
, tcg_env
, fp_reg_hi_offset(s
, srcidx
));
1024 tcg_gen_concat_i64_i128(t16
, tmplo
, tmphi
);
1026 tcg_gen_qemu_st_i128(t16
, tcg_addr
, get_mem_index(s
), mop
);
1031 * Load from memory to FP register
1033 static void do_fp_ld(DisasContext
*s
, int destidx
, TCGv_i64 tcg_addr
, MemOp mop
)
1035 /* This always zero-extends and writes to a full 128 bit wide vector */
1036 TCGv_i64 tmplo
= tcg_temp_new_i64();
1037 TCGv_i64 tmphi
= NULL
;
1039 if ((mop
& MO_SIZE
) < MO_128
) {
1040 tcg_gen_qemu_ld_i64(tmplo
, tcg_addr
, get_mem_index(s
), mop
);
1042 TCGv_i128 t16
= tcg_temp_new_i128();
1044 tcg_gen_qemu_ld_i128(t16
, tcg_addr
, get_mem_index(s
), mop
);
1046 tmphi
= tcg_temp_new_i64();
1047 tcg_gen_extr_i128_i64(tmplo
, tmphi
, t16
);
1050 tcg_gen_st_i64(tmplo
, tcg_env
, fp_reg_offset(s
, destidx
, MO_64
));
1053 tcg_gen_st_i64(tmphi
, tcg_env
, fp_reg_hi_offset(s
, destidx
));
1055 clear_vec_high(s
, tmphi
!= NULL
, destidx
);
1059 * Vector load/store helpers.
1061 * The principal difference between this and a FP load is that we don't
1062 * zero extend as we are filling a partial chunk of the vector register.
1063 * These functions don't support 128 bit loads/stores, which would be
1064 * normal load/store operations.
1066 * The _i32 versions are useful when operating on 32 bit quantities
1067 * (eg for floating point single or using Neon helper functions).
1070 /* Get value of an element within a vector register */
1071 static void read_vec_element(DisasContext
*s
, TCGv_i64 tcg_dest
, int srcidx
,
1072 int element
, MemOp memop
)
1074 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
1075 switch ((unsigned)memop
) {
1077 tcg_gen_ld8u_i64(tcg_dest
, tcg_env
, vect_off
);
1080 tcg_gen_ld16u_i64(tcg_dest
, tcg_env
, vect_off
);
1083 tcg_gen_ld32u_i64(tcg_dest
, tcg_env
, vect_off
);
1086 tcg_gen_ld8s_i64(tcg_dest
, tcg_env
, vect_off
);
1089 tcg_gen_ld16s_i64(tcg_dest
, tcg_env
, vect_off
);
1092 tcg_gen_ld32s_i64(tcg_dest
, tcg_env
, vect_off
);
1096 tcg_gen_ld_i64(tcg_dest
, tcg_env
, vect_off
);
1099 g_assert_not_reached();
1103 static void read_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_dest
, int srcidx
,
1104 int element
, MemOp memop
)
1106 int vect_off
= vec_reg_offset(s
, srcidx
, element
, memop
& MO_SIZE
);
1109 tcg_gen_ld8u_i32(tcg_dest
, tcg_env
, vect_off
);
1112 tcg_gen_ld16u_i32(tcg_dest
, tcg_env
, vect_off
);
1115 tcg_gen_ld8s_i32(tcg_dest
, tcg_env
, vect_off
);
1118 tcg_gen_ld16s_i32(tcg_dest
, tcg_env
, vect_off
);
1122 tcg_gen_ld_i32(tcg_dest
, tcg_env
, vect_off
);
1125 g_assert_not_reached();
1129 /* Set value of an element within a vector register */
1130 static void write_vec_element(DisasContext
*s
, TCGv_i64 tcg_src
, int destidx
,
1131 int element
, MemOp memop
)
1133 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1136 tcg_gen_st8_i64(tcg_src
, tcg_env
, vect_off
);
1139 tcg_gen_st16_i64(tcg_src
, tcg_env
, vect_off
);
1142 tcg_gen_st32_i64(tcg_src
, tcg_env
, vect_off
);
1145 tcg_gen_st_i64(tcg_src
, tcg_env
, vect_off
);
1148 g_assert_not_reached();
1152 static void write_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_src
,
1153 int destidx
, int element
, MemOp memop
)
1155 int vect_off
= vec_reg_offset(s
, destidx
, element
, memop
& MO_SIZE
);
1158 tcg_gen_st8_i32(tcg_src
, tcg_env
, vect_off
);
1161 tcg_gen_st16_i32(tcg_src
, tcg_env
, vect_off
);
1164 tcg_gen_st_i32(tcg_src
, tcg_env
, vect_off
);
1167 g_assert_not_reached();
1171 /* Store from vector register to memory */
1172 static void do_vec_st(DisasContext
*s
, int srcidx
, int element
,
1173 TCGv_i64 tcg_addr
, MemOp mop
)
1175 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1177 read_vec_element(s
, tcg_tmp
, srcidx
, element
, mop
& MO_SIZE
);
1178 tcg_gen_qemu_st_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), mop
);
1181 /* Load from memory to vector register */
1182 static void do_vec_ld(DisasContext
*s
, int destidx
, int element
,
1183 TCGv_i64 tcg_addr
, MemOp mop
)
1185 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
1187 tcg_gen_qemu_ld_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), mop
);
1188 write_vec_element(s
, tcg_tmp
, destidx
, element
, mop
& MO_SIZE
);
1191 /* Check that FP/Neon access is enabled. If it is, return
1192 * true. If not, emit code to generate an appropriate exception,
1193 * and return false; the caller should not emit any code for
1194 * the instruction. Note that this check must happen after all
1195 * unallocated-encoding checks (otherwise the syndrome information
1196 * for the resulting exception will be incorrect).
1198 static bool fp_access_check_only(DisasContext
*s
)
1200 if (s
->fp_excp_el
) {
1201 assert(!s
->fp_access_checked
);
1202 s
->fp_access_checked
= true;
1204 gen_exception_insn_el(s
, 0, EXCP_UDEF
,
1205 syn_fp_access_trap(1, 0xe, false, 0),
1209 s
->fp_access_checked
= true;
1213 static bool fp_access_check(DisasContext
*s
)
1215 if (!fp_access_check_only(s
)) {
1218 if (s
->sme_trap_nonstreaming
&& s
->is_nonstreaming
) {
1219 gen_exception_insn(s
, 0, EXCP_UDEF
,
1220 syn_smetrap(SME_ET_Streaming
, false));
1227 * Check that SVE access is enabled. If it is, return true.
1228 * If not, emit code to generate an appropriate exception and return false.
1229 * This function corresponds to CheckSVEEnabled().
1231 bool sve_access_check(DisasContext
*s
)
1233 if (s
->pstate_sm
|| !dc_isar_feature(aa64_sve
, s
)) {
1234 assert(dc_isar_feature(aa64_sme
, s
));
1235 if (!sme_sm_enabled_check(s
)) {
1238 } else if (s
->sve_excp_el
) {
1239 gen_exception_insn_el(s
, 0, EXCP_UDEF
,
1240 syn_sve_access_trap(), s
->sve_excp_el
);
1243 s
->sve_access_checked
= true;
1244 return fp_access_check(s
);
1247 /* Assert that we only raise one exception per instruction. */
1248 assert(!s
->sve_access_checked
);
1249 s
->sve_access_checked
= true;
1254 * Check that SME access is enabled, raise an exception if not.
1255 * Note that this function corresponds to CheckSMEAccess and is
1256 * only used directly for cpregs.
1258 static bool sme_access_check(DisasContext
*s
)
1260 if (s
->sme_excp_el
) {
1261 gen_exception_insn_el(s
, 0, EXCP_UDEF
,
1262 syn_smetrap(SME_ET_AccessTrap
, false),
1269 /* This function corresponds to CheckSMEEnabled. */
1270 bool sme_enabled_check(DisasContext
*s
)
1273 * Note that unlike sve_excp_el, we have not constrained sme_excp_el
1274 * to be zero when fp_excp_el has priority. This is because we need
1275 * sme_excp_el by itself for cpregs access checks.
1277 if (!s
->fp_excp_el
|| s
->sme_excp_el
< s
->fp_excp_el
) {
1278 s
->fp_access_checked
= true;
1279 return sme_access_check(s
);
1281 return fp_access_check_only(s
);
1284 /* Common subroutine for CheckSMEAnd*Enabled. */
1285 bool sme_enabled_check_with_svcr(DisasContext
*s
, unsigned req
)
1287 if (!sme_enabled_check(s
)) {
1290 if (FIELD_EX64(req
, SVCR
, SM
) && !s
->pstate_sm
) {
1291 gen_exception_insn(s
, 0, EXCP_UDEF
,
1292 syn_smetrap(SME_ET_NotStreaming
, false));
1295 if (FIELD_EX64(req
, SVCR
, ZA
) && !s
->pstate_za
) {
1296 gen_exception_insn(s
, 0, EXCP_UDEF
,
1297 syn_smetrap(SME_ET_InactiveZA
, false));
1304 * Expanders for AdvSIMD translation functions.
1307 static bool do_gvec_op2_ool(DisasContext
*s
, arg_qrr_e
*a
, int data
,
1308 gen_helper_gvec_2
*fn
)
1310 if (!a
->q
&& a
->esz
== MO_64
) {
1313 if (fp_access_check(s
)) {
1314 gen_gvec_op2_ool(s
, a
->q
, a
->rd
, a
->rn
, data
, fn
);
1319 static bool do_gvec_op3_ool(DisasContext
*s
, arg_qrrr_e
*a
, int data
,
1320 gen_helper_gvec_3
*fn
)
1322 if (!a
->q
&& a
->esz
== MO_64
) {
1325 if (fp_access_check(s
)) {
1326 gen_gvec_op3_ool(s
, a
->q
, a
->rd
, a
->rn
, a
->rm
, data
, fn
);
1331 static bool do_gvec_fn3(DisasContext
*s
, arg_qrrr_e
*a
, GVecGen3Fn
*fn
)
1333 if (!a
->q
&& a
->esz
== MO_64
) {
1336 if (fp_access_check(s
)) {
1337 gen_gvec_fn3(s
, a
->q
, a
->rd
, a
->rn
, a
->rm
, fn
, a
->esz
);
1342 static bool do_gvec_fn3_no64(DisasContext
*s
, arg_qrrr_e
*a
, GVecGen3Fn
*fn
)
1344 if (a
->esz
== MO_64
) {
1347 if (fp_access_check(s
)) {
1348 gen_gvec_fn3(s
, a
->q
, a
->rd
, a
->rn
, a
->rm
, fn
, a
->esz
);
1353 static bool do_gvec_fn3_no8_no64(DisasContext
*s
, arg_qrrr_e
*a
, GVecGen3Fn
*fn
)
1355 if (a
->esz
== MO_8
) {
1358 return do_gvec_fn3_no64(s
, a
, fn
);
1361 static bool do_gvec_fn4(DisasContext
*s
, arg_qrrrr_e
*a
, GVecGen4Fn
*fn
)
1363 if (!a
->q
&& a
->esz
== MO_64
) {
1366 if (fp_access_check(s
)) {
1367 gen_gvec_fn4(s
, a
->q
, a
->rd
, a
->rn
, a
->rm
, a
->ra
, fn
, a
->esz
);
1373 * This utility function is for doing register extension with an
1374 * optional shift. You will likely want to pass a temporary for the
1375 * destination register. See DecodeRegExtend() in the ARM ARM.
1377 static void ext_and_shift_reg(TCGv_i64 tcg_out
, TCGv_i64 tcg_in
,
1378 int option
, unsigned int shift
)
1380 int extsize
= extract32(option
, 0, 2);
1381 bool is_signed
= extract32(option
, 2, 1);
1383 tcg_gen_ext_i64(tcg_out
, tcg_in
, extsize
| (is_signed
? MO_SIGN
: 0));
1384 tcg_gen_shli_i64(tcg_out
, tcg_out
, shift
);
1387 static inline void gen_check_sp_alignment(DisasContext
*s
)
1389 /* The AArch64 architecture mandates that (if enabled via PSTATE
1390 * or SCTLR bits) there is a check that SP is 16-aligned on every
1391 * SP-relative load or store (with an exception generated if it is not).
1392 * In line with general QEMU practice regarding misaligned accesses,
1393 * we omit these checks for the sake of guest program performance.
1394 * This function is provided as a hook so we can more easily add these
1395 * checks in future (possibly as a "favour catching guest program bugs
1396 * over speed" user selectable option).
1401 * This provides a simple table based table lookup decoder. It is
1402 * intended to be used when the relevant bits for decode are too
1403 * awkwardly placed and switch/if based logic would be confusing and
1404 * deeply nested. Since it's a linear search through the table, tables
1405 * should be kept small.
1407 * It returns the first handler where insn & mask == pattern, or
1408 * NULL if there is no match.
1409 * The table is terminated by an empty mask (i.e. 0)
1411 static inline AArch64DecodeFn
*lookup_disas_fn(const AArch64DecodeTable
*table
,
1414 const AArch64DecodeTable
*tptr
= table
;
1416 while (tptr
->mask
) {
1417 if ((insn
& tptr
->mask
) == tptr
->pattern
) {
1418 return tptr
->disas_fn
;
1426 * The instruction disassembly implemented here matches
1427 * the instruction encoding classifications in chapter C4
1428 * of the ARM Architecture Reference Manual (DDI0487B_a);
1429 * classification names and decode diagrams here should generally
1430 * match up with those in the manual.
1433 static bool trans_B(DisasContext
*s
, arg_i
*a
)
1436 gen_goto_tb(s
, 0, a
->imm
);
1440 static bool trans_BL(DisasContext
*s
, arg_i
*a
)
1442 gen_pc_plus_diff(s
, cpu_reg(s
, 30), curr_insn_len(s
));
1444 gen_goto_tb(s
, 0, a
->imm
);
1449 static bool trans_CBZ(DisasContext
*s
, arg_cbz
*a
)
1454 tcg_cmp
= read_cpu_reg(s
, a
->rt
, a
->sf
);
1457 match
= gen_disas_label(s
);
1458 tcg_gen_brcondi_i64(a
->nz
? TCG_COND_NE
: TCG_COND_EQ
,
1459 tcg_cmp
, 0, match
.label
);
1460 gen_goto_tb(s
, 0, 4);
1461 set_disas_label(s
, match
);
1462 gen_goto_tb(s
, 1, a
->imm
);
1466 static bool trans_TBZ(DisasContext
*s
, arg_tbz
*a
)
1471 tcg_cmp
= tcg_temp_new_i64();
1472 tcg_gen_andi_i64(tcg_cmp
, cpu_reg(s
, a
->rt
), 1ULL << a
->bitpos
);
1476 match
= gen_disas_label(s
);
1477 tcg_gen_brcondi_i64(a
->nz
? TCG_COND_NE
: TCG_COND_EQ
,
1478 tcg_cmp
, 0, match
.label
);
1479 gen_goto_tb(s
, 0, 4);
1480 set_disas_label(s
, match
);
1481 gen_goto_tb(s
, 1, a
->imm
);
1485 static bool trans_B_cond(DisasContext
*s
, arg_B_cond
*a
)
1487 /* BC.cond is only present with FEAT_HBC */
1488 if (a
->c
&& !dc_isar_feature(aa64_hbc
, s
)) {
1492 if (a
->cond
< 0x0e) {
1493 /* genuinely conditional branches */
1494 DisasLabel match
= gen_disas_label(s
);
1495 arm_gen_test_cc(a
->cond
, match
.label
);
1496 gen_goto_tb(s
, 0, 4);
1497 set_disas_label(s
, match
);
1498 gen_goto_tb(s
, 1, a
->imm
);
1500 /* 0xe and 0xf are both "always" conditions */
1501 gen_goto_tb(s
, 0, a
->imm
);
1506 static void set_btype_for_br(DisasContext
*s
, int rn
)
1508 if (dc_isar_feature(aa64_bti
, s
)) {
1509 /* BR to {x16,x17} or !guard -> 1, else 3. */
1510 set_btype(s
, rn
== 16 || rn
== 17 || !s
->guarded_page
? 1 : 3);
1514 static void set_btype_for_blr(DisasContext
*s
)
1516 if (dc_isar_feature(aa64_bti
, s
)) {
1517 /* BLR sets BTYPE to 2, regardless of source guarded page. */
1522 static bool trans_BR(DisasContext
*s
, arg_r
*a
)
1524 gen_a64_set_pc(s
, cpu_reg(s
, a
->rn
));
1525 set_btype_for_br(s
, a
->rn
);
1526 s
->base
.is_jmp
= DISAS_JUMP
;
1530 static bool trans_BLR(DisasContext
*s
, arg_r
*a
)
1532 TCGv_i64 dst
= cpu_reg(s
, a
->rn
);
1533 TCGv_i64 lr
= cpu_reg(s
, 30);
1535 TCGv_i64 tmp
= tcg_temp_new_i64();
1536 tcg_gen_mov_i64(tmp
, dst
);
1539 gen_pc_plus_diff(s
, lr
, curr_insn_len(s
));
1540 gen_a64_set_pc(s
, dst
);
1541 set_btype_for_blr(s
);
1542 s
->base
.is_jmp
= DISAS_JUMP
;
1546 static bool trans_RET(DisasContext
*s
, arg_r
*a
)
1548 gen_a64_set_pc(s
, cpu_reg(s
, a
->rn
));
1549 s
->base
.is_jmp
= DISAS_JUMP
;
1553 static TCGv_i64
auth_branch_target(DisasContext
*s
, TCGv_i64 dst
,
1554 TCGv_i64 modifier
, bool use_key_a
)
1558 * Return the branch target for a BRAA/RETA/etc, which is either
1559 * just the destination dst, or that value with the pauth check
1560 * done and the code removed from the high bits.
1562 if (!s
->pauth_active
) {
1566 truedst
= tcg_temp_new_i64();
1568 gen_helper_autia_combined(truedst
, tcg_env
, dst
, modifier
);
1570 gen_helper_autib_combined(truedst
, tcg_env
, dst
, modifier
);
1575 static bool trans_BRAZ(DisasContext
*s
, arg_braz
*a
)
1579 if (!dc_isar_feature(aa64_pauth
, s
)) {
1583 dst
= auth_branch_target(s
, cpu_reg(s
, a
->rn
), tcg_constant_i64(0), !a
->m
);
1584 gen_a64_set_pc(s
, dst
);
1585 set_btype_for_br(s
, a
->rn
);
1586 s
->base
.is_jmp
= DISAS_JUMP
;
1590 static bool trans_BLRAZ(DisasContext
*s
, arg_braz
*a
)
1594 if (!dc_isar_feature(aa64_pauth
, s
)) {
1598 dst
= auth_branch_target(s
, cpu_reg(s
, a
->rn
), tcg_constant_i64(0), !a
->m
);
1599 lr
= cpu_reg(s
, 30);
1601 TCGv_i64 tmp
= tcg_temp_new_i64();
1602 tcg_gen_mov_i64(tmp
, dst
);
1605 gen_pc_plus_diff(s
, lr
, curr_insn_len(s
));
1606 gen_a64_set_pc(s
, dst
);
1607 set_btype_for_blr(s
);
1608 s
->base
.is_jmp
= DISAS_JUMP
;
1612 static bool trans_RETA(DisasContext
*s
, arg_reta
*a
)
1616 dst
= auth_branch_target(s
, cpu_reg(s
, 30), cpu_X
[31], !a
->m
);
1617 gen_a64_set_pc(s
, dst
);
1618 s
->base
.is_jmp
= DISAS_JUMP
;
1622 static bool trans_BRA(DisasContext
*s
, arg_bra
*a
)
1626 if (!dc_isar_feature(aa64_pauth
, s
)) {
1629 dst
= auth_branch_target(s
, cpu_reg(s
,a
->rn
), cpu_reg_sp(s
, a
->rm
), !a
->m
);
1630 gen_a64_set_pc(s
, dst
);
1631 set_btype_for_br(s
, a
->rn
);
1632 s
->base
.is_jmp
= DISAS_JUMP
;
1636 static bool trans_BLRA(DisasContext
*s
, arg_bra
*a
)
1640 if (!dc_isar_feature(aa64_pauth
, s
)) {
1643 dst
= auth_branch_target(s
, cpu_reg(s
, a
->rn
), cpu_reg_sp(s
, a
->rm
), !a
->m
);
1644 lr
= cpu_reg(s
, 30);
1646 TCGv_i64 tmp
= tcg_temp_new_i64();
1647 tcg_gen_mov_i64(tmp
, dst
);
1650 gen_pc_plus_diff(s
, lr
, curr_insn_len(s
));
1651 gen_a64_set_pc(s
, dst
);
1652 set_btype_for_blr(s
);
1653 s
->base
.is_jmp
= DISAS_JUMP
;
1657 static bool trans_ERET(DisasContext
*s
, arg_ERET
*a
)
1661 if (s
->current_el
== 0) {
1665 gen_exception_insn_el(s
, 0, EXCP_UDEF
, syn_erettrap(0), 2);
1668 dst
= tcg_temp_new_i64();
1669 tcg_gen_ld_i64(dst
, tcg_env
,
1670 offsetof(CPUARMState
, elr_el
[s
->current_el
]));
1672 translator_io_start(&s
->base
);
1674 gen_helper_exception_return(tcg_env
, dst
);
1675 /* Must exit loop to check un-masked IRQs */
1676 s
->base
.is_jmp
= DISAS_EXIT
;
1680 static bool trans_ERETA(DisasContext
*s
, arg_reta
*a
)
1684 if (!dc_isar_feature(aa64_pauth
, s
)) {
1687 if (s
->current_el
== 0) {
1690 /* The FGT trap takes precedence over an auth trap. */
1692 gen_exception_insn_el(s
, 0, EXCP_UDEF
, syn_erettrap(a
->m
? 3 : 2), 2);
1695 dst
= tcg_temp_new_i64();
1696 tcg_gen_ld_i64(dst
, tcg_env
,
1697 offsetof(CPUARMState
, elr_el
[s
->current_el
]));
1699 dst
= auth_branch_target(s
, dst
, cpu_X
[31], !a
->m
);
1701 translator_io_start(&s
->base
);
1703 gen_helper_exception_return(tcg_env
, dst
);
1704 /* Must exit loop to check un-masked IRQs */
1705 s
->base
.is_jmp
= DISAS_EXIT
;
1709 static bool trans_NOP(DisasContext
*s
, arg_NOP
*a
)
1714 static bool trans_YIELD(DisasContext
*s
, arg_YIELD
*a
)
1717 * When running in MTTCG we don't generate jumps to the yield and
1718 * WFE helpers as it won't affect the scheduling of other vCPUs.
1719 * If we wanted to more completely model WFE/SEV so we don't busy
1720 * spin unnecessarily we would need to do something more involved.
1722 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1723 s
->base
.is_jmp
= DISAS_YIELD
;
1728 static bool trans_WFI(DisasContext
*s
, arg_WFI
*a
)
1730 s
->base
.is_jmp
= DISAS_WFI
;
1734 static bool trans_WFE(DisasContext
*s
, arg_WFI
*a
)
1737 * When running in MTTCG we don't generate jumps to the yield and
1738 * WFE helpers as it won't affect the scheduling of other vCPUs.
1739 * If we wanted to more completely model WFE/SEV so we don't busy
1740 * spin unnecessarily we would need to do something more involved.
1742 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1743 s
->base
.is_jmp
= DISAS_WFE
;
1748 static bool trans_WFIT(DisasContext
*s
, arg_WFIT
*a
)
1750 if (!dc_isar_feature(aa64_wfxt
, s
)) {
1755 * Because we need to pass the register value to the helper,
1756 * it's easier to emit the code now, unlike trans_WFI which
1757 * defers it to aarch64_tr_tb_stop(). That means we need to
1758 * check ss_active so that single-stepping a WFIT doesn't halt.
1761 /* Act like a NOP under architectural singlestep */
1765 gen_a64_update_pc(s
, 4);
1766 gen_helper_wfit(tcg_env
, cpu_reg(s
, a
->rd
));
1767 /* Go back to the main loop to check for interrupts */
1768 s
->base
.is_jmp
= DISAS_EXIT
;
1772 static bool trans_WFET(DisasContext
*s
, arg_WFET
*a
)
1774 if (!dc_isar_feature(aa64_wfxt
, s
)) {
1779 * We rely here on our WFE implementation being a NOP, so we
1780 * don't need to do anything different to handle the WFET timeout
1781 * from what trans_WFE does.
1783 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
1784 s
->base
.is_jmp
= DISAS_WFE
;
1789 static bool trans_XPACLRI(DisasContext
*s
, arg_XPACLRI
*a
)
1791 if (s
->pauth_active
) {
1792 gen_helper_xpaci(cpu_X
[30], tcg_env
, cpu_X
[30]);
1797 static bool trans_PACIA1716(DisasContext
*s
, arg_PACIA1716
*a
)
1799 if (s
->pauth_active
) {
1800 gen_helper_pacia(cpu_X
[17], tcg_env
, cpu_X
[17], cpu_X
[16]);
1805 static bool trans_PACIB1716(DisasContext
*s
, arg_PACIB1716
*a
)
1807 if (s
->pauth_active
) {
1808 gen_helper_pacib(cpu_X
[17], tcg_env
, cpu_X
[17], cpu_X
[16]);
1813 static bool trans_AUTIA1716(DisasContext
*s
, arg_AUTIA1716
*a
)
1815 if (s
->pauth_active
) {
1816 gen_helper_autia(cpu_X
[17], tcg_env
, cpu_X
[17], cpu_X
[16]);
1821 static bool trans_AUTIB1716(DisasContext
*s
, arg_AUTIB1716
*a
)
1823 if (s
->pauth_active
) {
1824 gen_helper_autib(cpu_X
[17], tcg_env
, cpu_X
[17], cpu_X
[16]);
1829 static bool trans_ESB(DisasContext
*s
, arg_ESB
*a
)
1831 /* Without RAS, we must implement this as NOP. */
1832 if (dc_isar_feature(aa64_ras
, s
)) {
1834 * QEMU does not have a source of physical SErrors,
1835 * so we are only concerned with virtual SErrors.
1836 * The pseudocode in the ARM for this case is
1837 * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
1838 * AArch64.vESBOperation();
1839 * Most of the condition can be evaluated at translation time.
1840 * Test for EL2 present, and defer test for SEL2 to runtime.
1842 if (s
->current_el
<= 1 && arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
1843 gen_helper_vesb(tcg_env
);
1849 static bool trans_PACIAZ(DisasContext
*s
, arg_PACIAZ
*a
)
1851 if (s
->pauth_active
) {
1852 gen_helper_pacia(cpu_X
[30], tcg_env
, cpu_X
[30], tcg_constant_i64(0));
1857 static bool trans_PACIASP(DisasContext
*s
, arg_PACIASP
*a
)
1859 if (s
->pauth_active
) {
1860 gen_helper_pacia(cpu_X
[30], tcg_env
, cpu_X
[30], cpu_X
[31]);
1865 static bool trans_PACIBZ(DisasContext
*s
, arg_PACIBZ
*a
)
1867 if (s
->pauth_active
) {
1868 gen_helper_pacib(cpu_X
[30], tcg_env
, cpu_X
[30], tcg_constant_i64(0));
1873 static bool trans_PACIBSP(DisasContext
*s
, arg_PACIBSP
*a
)
1875 if (s
->pauth_active
) {
1876 gen_helper_pacib(cpu_X
[30], tcg_env
, cpu_X
[30], cpu_X
[31]);
1881 static bool trans_AUTIAZ(DisasContext
*s
, arg_AUTIAZ
*a
)
1883 if (s
->pauth_active
) {
1884 gen_helper_autia(cpu_X
[30], tcg_env
, cpu_X
[30], tcg_constant_i64(0));
1889 static bool trans_AUTIASP(DisasContext
*s
, arg_AUTIASP
*a
)
1891 if (s
->pauth_active
) {
1892 gen_helper_autia(cpu_X
[30], tcg_env
, cpu_X
[30], cpu_X
[31]);
1897 static bool trans_AUTIBZ(DisasContext
*s
, arg_AUTIBZ
*a
)
1899 if (s
->pauth_active
) {
1900 gen_helper_autib(cpu_X
[30], tcg_env
, cpu_X
[30], tcg_constant_i64(0));
1905 static bool trans_AUTIBSP(DisasContext
*s
, arg_AUTIBSP
*a
)
1907 if (s
->pauth_active
) {
1908 gen_helper_autib(cpu_X
[30], tcg_env
, cpu_X
[30], cpu_X
[31]);
1913 static bool trans_CLREX(DisasContext
*s
, arg_CLREX
*a
)
1915 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
1919 static bool trans_DSB_DMB(DisasContext
*s
, arg_DSB_DMB
*a
)
1921 /* We handle DSB and DMB the same way */
1925 case 1: /* MBReqTypes_Reads */
1926 bar
= TCG_BAR_SC
| TCG_MO_LD_LD
| TCG_MO_LD_ST
;
1928 case 2: /* MBReqTypes_Writes */
1929 bar
= TCG_BAR_SC
| TCG_MO_ST_ST
;
1931 default: /* MBReqTypes_All */
1932 bar
= TCG_BAR_SC
| TCG_MO_ALL
;
1939 static bool trans_ISB(DisasContext
*s
, arg_ISB
*a
)
1942 * We need to break the TB after this insn to execute
1943 * self-modifying code correctly and also to take
1944 * any pending interrupts immediately.
1947 gen_goto_tb(s
, 0, 4);
1951 static bool trans_SB(DisasContext
*s
, arg_SB
*a
)
1953 if (!dc_isar_feature(aa64_sb
, s
)) {
1957 * TODO: There is no speculation barrier opcode for TCG;
1958 * MB and end the TB instead.
1960 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1961 gen_goto_tb(s
, 0, 4);
1965 static bool trans_CFINV(DisasContext
*s
, arg_CFINV
*a
)
1967 if (!dc_isar_feature(aa64_condm_4
, s
)) {
1970 tcg_gen_xori_i32(cpu_CF
, cpu_CF
, 1);
1974 static bool trans_XAFLAG(DisasContext
*s
, arg_XAFLAG
*a
)
1978 if (!dc_isar_feature(aa64_condm_5
, s
)) {
1982 z
= tcg_temp_new_i32();
1984 tcg_gen_setcondi_i32(TCG_COND_EQ
, z
, cpu_ZF
, 0);
1993 tcg_gen_or_i32(cpu_NF
, cpu_CF
, z
);
1994 tcg_gen_subi_i32(cpu_NF
, cpu_NF
, 1);
1997 tcg_gen_and_i32(cpu_ZF
, z
, cpu_CF
);
1998 tcg_gen_xori_i32(cpu_ZF
, cpu_ZF
, 1);
2000 /* (!C & Z) << 31 -> -(Z & ~C) */
2001 tcg_gen_andc_i32(cpu_VF
, z
, cpu_CF
);
2002 tcg_gen_neg_i32(cpu_VF
, cpu_VF
);
2005 tcg_gen_or_i32(cpu_CF
, cpu_CF
, z
);
2010 static bool trans_AXFLAG(DisasContext
*s
, arg_AXFLAG
*a
)
2012 if (!dc_isar_feature(aa64_condm_5
, s
)) {
2016 tcg_gen_sari_i32(cpu_VF
, cpu_VF
, 31); /* V ? -1 : 0 */
2017 tcg_gen_andc_i32(cpu_CF
, cpu_CF
, cpu_VF
); /* C & !V */
2019 /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
2020 tcg_gen_andc_i32(cpu_ZF
, cpu_ZF
, cpu_VF
);
2022 tcg_gen_movi_i32(cpu_NF
, 0);
2023 tcg_gen_movi_i32(cpu_VF
, 0);
2028 static bool trans_MSR_i_UAO(DisasContext
*s
, arg_i
*a
)
2030 if (!dc_isar_feature(aa64_uao
, s
) || s
->current_el
== 0) {
2034 set_pstate_bits(PSTATE_UAO
);
2036 clear_pstate_bits(PSTATE_UAO
);
2038 gen_rebuild_hflags(s
);
2039 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2043 static bool trans_MSR_i_PAN(DisasContext
*s
, arg_i
*a
)
2045 if (!dc_isar_feature(aa64_pan
, s
) || s
->current_el
== 0) {
2049 set_pstate_bits(PSTATE_PAN
);
2051 clear_pstate_bits(PSTATE_PAN
);
2053 gen_rebuild_hflags(s
);
2054 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2058 static bool trans_MSR_i_SPSEL(DisasContext
*s
, arg_i
*a
)
2060 if (s
->current_el
== 0) {
2063 gen_helper_msr_i_spsel(tcg_env
, tcg_constant_i32(a
->imm
& PSTATE_SP
));
2064 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2068 static bool trans_MSR_i_SBSS(DisasContext
*s
, arg_i
*a
)
2070 if (!dc_isar_feature(aa64_ssbs
, s
)) {
2074 set_pstate_bits(PSTATE_SSBS
);
2076 clear_pstate_bits(PSTATE_SSBS
);
2078 /* Don't need to rebuild hflags since SSBS is a nop */
2079 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2083 static bool trans_MSR_i_DIT(DisasContext
*s
, arg_i
*a
)
2085 if (!dc_isar_feature(aa64_dit
, s
)) {
2089 set_pstate_bits(PSTATE_DIT
);
2091 clear_pstate_bits(PSTATE_DIT
);
2093 /* There's no need to rebuild hflags because DIT is a nop */
2094 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2098 static bool trans_MSR_i_TCO(DisasContext
*s
, arg_i
*a
)
2100 if (dc_isar_feature(aa64_mte
, s
)) {
2101 /* Full MTE is enabled -- set the TCO bit as directed. */
2103 set_pstate_bits(PSTATE_TCO
);
2105 clear_pstate_bits(PSTATE_TCO
);
2107 gen_rebuild_hflags(s
);
2108 /* Many factors, including TCO, go into MTE_ACTIVE. */
2109 s
->base
.is_jmp
= DISAS_UPDATE_NOCHAIN
;
2111 } else if (dc_isar_feature(aa64_mte_insn_reg
, s
)) {
2112 /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */
2115 /* Insn not present */
2120 static bool trans_MSR_i_DAIFSET(DisasContext
*s
, arg_i
*a
)
2122 gen_helper_msr_i_daifset(tcg_env
, tcg_constant_i32(a
->imm
));
2123 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2127 static bool trans_MSR_i_DAIFCLEAR(DisasContext
*s
, arg_i
*a
)
2129 gen_helper_msr_i_daifclear(tcg_env
, tcg_constant_i32(a
->imm
));
2130 /* Exit the cpu loop to re-evaluate pending IRQs. */
2131 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
2135 static bool trans_MSR_i_ALLINT(DisasContext
*s
, arg_i
*a
)
2137 if (!dc_isar_feature(aa64_nmi
, s
) || s
->current_el
== 0) {
2142 clear_pstate_bits(PSTATE_ALLINT
);
2143 } else if (s
->current_el
> 1) {
2144 set_pstate_bits(PSTATE_ALLINT
);
2146 gen_helper_msr_set_allint_el1(tcg_env
);
2149 /* Exit the cpu loop to re-evaluate pending IRQs. */
2150 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
2154 static bool trans_MSR_i_SVCR(DisasContext
*s
, arg_MSR_i_SVCR
*a
)
2156 if (!dc_isar_feature(aa64_sme
, s
) || a
->mask
== 0) {
2159 if (sme_access_check(s
)) {
2160 int old
= s
->pstate_sm
| (s
->pstate_za
<< 1);
2161 int new = a
->imm
* 3;
2163 if ((old
^ new) & a
->mask
) {
2164 /* At least one bit changes. */
2165 gen_helper_set_svcr(tcg_env
, tcg_constant_i32(new),
2166 tcg_constant_i32(a
->mask
));
2167 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2173 static void gen_get_nzcv(TCGv_i64 tcg_rt
)
2175 TCGv_i32 tmp
= tcg_temp_new_i32();
2176 TCGv_i32 nzcv
= tcg_temp_new_i32();
2178 /* build bit 31, N */
2179 tcg_gen_andi_i32(nzcv
, cpu_NF
, (1U << 31));
2180 /* build bit 30, Z */
2181 tcg_gen_setcondi_i32(TCG_COND_EQ
, tmp
, cpu_ZF
, 0);
2182 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 30, 1);
2183 /* build bit 29, C */
2184 tcg_gen_deposit_i32(nzcv
, nzcv
, cpu_CF
, 29, 1);
2185 /* build bit 28, V */
2186 tcg_gen_shri_i32(tmp
, cpu_VF
, 31);
2187 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 28, 1);
2188 /* generate result */
2189 tcg_gen_extu_i32_i64(tcg_rt
, nzcv
);
2192 static void gen_set_nzcv(TCGv_i64 tcg_rt
)
2194 TCGv_i32 nzcv
= tcg_temp_new_i32();
2196 /* take NZCV from R[t] */
2197 tcg_gen_extrl_i64_i32(nzcv
, tcg_rt
);
2200 tcg_gen_andi_i32(cpu_NF
, nzcv
, (1U << 31));
2202 tcg_gen_andi_i32(cpu_ZF
, nzcv
, (1 << 30));
2203 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_ZF
, cpu_ZF
, 0);
2205 tcg_gen_andi_i32(cpu_CF
, nzcv
, (1 << 29));
2206 tcg_gen_shri_i32(cpu_CF
, cpu_CF
, 29);
2208 tcg_gen_andi_i32(cpu_VF
, nzcv
, (1 << 28));
2209 tcg_gen_shli_i32(cpu_VF
, cpu_VF
, 3);
2212 static void gen_sysreg_undef(DisasContext
*s
, bool isread
,
2213 uint8_t op0
, uint8_t op1
, uint8_t op2
,
2214 uint8_t crn
, uint8_t crm
, uint8_t rt
)
2217 * Generate code to emit an UNDEF with correct syndrome
2218 * information for a failed system register access.
2219 * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases,
2220 * but if FEAT_IDST is implemented then read accesses to registers
2221 * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP
2226 if (isread
&& dc_isar_feature(aa64_ids
, s
) &&
2227 arm_cpreg_encoding_in_idspace(op0
, op1
, op2
, crn
, crm
)) {
2228 syndrome
= syn_aa64_sysregtrap(op0
, op1
, op2
, crn
, crm
, rt
, isread
);
2230 syndrome
= syn_uncategorized();
2232 gen_exception_insn(s
, 0, EXCP_UDEF
, syndrome
);
2235 /* MRS - move from system register
2236 * MSR (register) - move to system register
2239 * These are all essentially the same insn in 'read' and 'write'
2240 * versions, with varying op0 fields.
2242 static void handle_sys(DisasContext
*s
, bool isread
,
2243 unsigned int op0
, unsigned int op1
, unsigned int op2
,
2244 unsigned int crn
, unsigned int crm
, unsigned int rt
)
2246 uint32_t key
= ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP
,
2247 crn
, crm
, op0
, op1
, op2
);
2248 const ARMCPRegInfo
*ri
= get_arm_cp_reginfo(s
->cp_regs
, key
);
2249 bool need_exit_tb
= false;
2250 bool nv_trap_to_el2
= false;
2251 bool nv_redirect_reg
= false;
2252 bool skip_fp_access_checks
= false;
2253 bool nv2_mem_redirect
= false;
2254 TCGv_ptr tcg_ri
= NULL
;
2256 uint32_t syndrome
= syn_aa64_sysregtrap(op0
, op1
, op2
, crn
, crm
, rt
, isread
);
2258 if (crn
== 11 || crn
== 15) {
2260 * Check for TIDCP trap, which must take precedence over
2261 * the UNDEF for "no such register" etc.
2263 switch (s
->current_el
) {
2265 if (dc_isar_feature(aa64_tidcp1
, s
)) {
2266 gen_helper_tidcp_el0(tcg_env
, tcg_constant_i32(syndrome
));
2270 gen_helper_tidcp_el1(tcg_env
, tcg_constant_i32(syndrome
));
2276 /* Unknown register; this might be a guest error or a QEMU
2277 * unimplemented feature.
2279 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch64 "
2280 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
2281 isread
? "read" : "write", op0
, op1
, crn
, crm
, op2
);
2282 gen_sysreg_undef(s
, isread
, op0
, op1
, op2
, crn
, crm
, rt
);
2286 if (s
->nv2
&& ri
->nv2_redirect_offset
) {
2288 * Some registers always redirect to memory; some only do so if
2289 * HCR_EL2.NV1 is 0, and some only if NV1 is 1 (these come in
2290 * pairs which share an offset; see the table in R_CSRPQ).
2292 if (ri
->nv2_redirect_offset
& NV2_REDIR_NV1
) {
2293 nv2_mem_redirect
= s
->nv1
;
2294 } else if (ri
->nv2_redirect_offset
& NV2_REDIR_NO_NV1
) {
2295 nv2_mem_redirect
= !s
->nv1
;
2297 nv2_mem_redirect
= true;
2301 /* Check access permissions */
2302 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
2304 * FEAT_NV/NV2 handling does not do the usual FP access checks
2305 * for registers only accessible at EL2 (though it *does* do them
2306 * for registers accessible at EL1).
2308 skip_fp_access_checks
= true;
2309 if (s
->nv2
&& (ri
->type
& ARM_CP_NV2_REDIRECT
)) {
2311 * This is one of the few EL2 registers which should redirect
2312 * to the equivalent EL1 register. We do that after running
2313 * the EL2 register's accessfn.
2315 nv_redirect_reg
= true;
2316 assert(!nv2_mem_redirect
);
2317 } else if (nv2_mem_redirect
) {
2319 * NV2 redirect-to-memory takes precedence over trap to EL2 or
2322 } else if (s
->nv
&& arm_cpreg_traps_in_nv(ri
)) {
2324 * This register / instruction exists and is an EL2 register, so
2325 * we must trap to EL2 if accessed in nested virtualization EL1
2326 * instead of UNDEFing. We'll do that after the usual access checks.
2327 * (This makes a difference only for a couple of registers like
2328 * VSTTBR_EL2 where the "UNDEF if NonSecure" should take priority
2329 * over the trap-to-EL2. Most trapped-by-FEAT_NV registers have
2330 * an accessfn which does nothing when called from EL1, because
2331 * the trap-to-EL3 controls which would apply to that register
2332 * at EL2 don't take priority over the FEAT_NV trap-to-EL2.)
2334 nv_trap_to_el2
= true;
2336 gen_sysreg_undef(s
, isread
, op0
, op1
, op2
, crn
, crm
, rt
);
2341 if (ri
->accessfn
|| (ri
->fgt
&& s
->fgt_active
)) {
2342 /* Emit code to perform further access permissions checks at
2343 * runtime; this may result in an exception.
2345 gen_a64_update_pc(s
, 0);
2346 tcg_ri
= tcg_temp_new_ptr();
2347 gen_helper_access_check_cp_reg(tcg_ri
, tcg_env
,
2348 tcg_constant_i32(key
),
2349 tcg_constant_i32(syndrome
),
2350 tcg_constant_i32(isread
));
2351 } else if (ri
->type
& ARM_CP_RAISES_EXC
) {
2353 * The readfn or writefn might raise an exception;
2354 * synchronize the CPU state in case it does.
2356 gen_a64_update_pc(s
, 0);
2359 if (!skip_fp_access_checks
) {
2360 if ((ri
->type
& ARM_CP_FPU
) && !fp_access_check_only(s
)) {
2362 } else if ((ri
->type
& ARM_CP_SVE
) && !sve_access_check(s
)) {
2364 } else if ((ri
->type
& ARM_CP_SME
) && !sme_access_check(s
)) {
2369 if (nv_trap_to_el2
) {
2370 gen_exception_insn_el(s
, 0, EXCP_UDEF
, syndrome
, 2);
2374 if (nv_redirect_reg
) {
2376 * FEAT_NV2 redirection of an EL2 register to an EL1 register.
2377 * Conveniently in all cases the encoding of the EL1 register is
2378 * identical to the EL2 register except that opc1 is 0.
2379 * Get the reginfo for the EL1 register to use for the actual access.
2380 * We don't use the EL1 register's access function, and
2381 * fine-grained-traps on EL1 also do not apply here.
2383 key
= ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP
,
2384 crn
, crm
, op0
, 0, op2
);
2385 ri
= get_arm_cp_reginfo(s
->cp_regs
, key
);
2387 assert(cp_access_ok(s
->current_el
, ri
, isread
));
2389 * We might not have done an update_pc earlier, so check we don't
2390 * need it. We could support this in future if necessary.
2392 assert(!(ri
->type
& ARM_CP_RAISES_EXC
));
2395 if (nv2_mem_redirect
) {
2397 * This system register is being redirected into an EL2 memory access.
2398 * This means it is not an IO operation, doesn't change hflags,
2399 * and need not end the TB, because it has no side effects.
2401 * The access is 64-bit single copy atomic, guaranteed aligned because
2402 * of the definition of VCNR_EL2. Its endianness depends on
2403 * SCTLR_EL2.EE, not on the data endianness of EL1.
2404 * It is done under either the EL2 translation regime or the EL2&0
2405 * translation regime, depending on HCR_EL2.E2H. It behaves as if
2408 TCGv_i64 ptr
= tcg_temp_new_i64();
2409 MemOp mop
= MO_64
| MO_ALIGN
| MO_ATOM_IFALIGN
;
2410 ARMMMUIdx armmemidx
= s
->nv2_mem_e20
? ARMMMUIdx_E20_2
: ARMMMUIdx_E2
;
2411 int memidx
= arm_to_core_mmu_idx(armmemidx
);
2414 mop
|= (s
->nv2_mem_be
? MO_BE
: MO_LE
);
2416 tcg_gen_ld_i64(ptr
, tcg_env
, offsetof(CPUARMState
, cp15
.vncr_el2
));
2417 tcg_gen_addi_i64(ptr
, ptr
,
2418 (ri
->nv2_redirect_offset
& ~NV2_REDIR_FLAG_MASK
));
2419 tcg_rt
= cpu_reg(s
, rt
);
2421 syn
= syn_data_abort_vncr(0, !isread
, 0);
2422 disas_set_insn_syndrome(s
, syn
);
2424 tcg_gen_qemu_ld_i64(tcg_rt
, ptr
, memidx
, mop
);
2426 tcg_gen_qemu_st_i64(tcg_rt
, ptr
, memidx
, mop
);
2431 /* Handle special cases first */
2432 switch (ri
->type
& ARM_CP_SPECIAL_MASK
) {
2438 tcg_rt
= cpu_reg(s
, rt
);
2440 gen_get_nzcv(tcg_rt
);
2442 gen_set_nzcv(tcg_rt
);
2445 case ARM_CP_CURRENTEL
:
2448 * Reads as current EL value from pstate, which is
2449 * guaranteed to be constant by the tb flags.
2450 * For nested virt we should report EL2.
2452 int el
= s
->nv
? 2 : s
->current_el
;
2453 tcg_rt
= cpu_reg(s
, rt
);
2454 tcg_gen_movi_i64(tcg_rt
, el
<< 2);
2458 /* Writes clear the aligned block of memory which rt points into. */
2459 if (s
->mte_active
[0]) {
2462 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, get_mem_index(s
));
2463 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
2464 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
2466 tcg_rt
= tcg_temp_new_i64();
2467 gen_helper_mte_check_zva(tcg_rt
, tcg_env
,
2468 tcg_constant_i32(desc
), cpu_reg(s
, rt
));
2470 tcg_rt
= clean_data_tbi(s
, cpu_reg(s
, rt
));
2472 gen_helper_dc_zva(tcg_env
, tcg_rt
);
2476 TCGv_i64 clean_addr
, tag
;
2479 * DC_GVA, like DC_ZVA, requires that we supply the original
2480 * pointer for an invalid page. Probe that address first.
2482 tcg_rt
= cpu_reg(s
, rt
);
2483 clean_addr
= clean_data_tbi(s
, tcg_rt
);
2484 gen_probe_access(s
, clean_addr
, MMU_DATA_STORE
, MO_8
);
2487 /* Extract the tag from the register to match STZGM. */
2488 tag
= tcg_temp_new_i64();
2489 tcg_gen_shri_i64(tag
, tcg_rt
, 56);
2490 gen_helper_stzgm_tags(tcg_env
, clean_addr
, tag
);
2494 case ARM_CP_DC_GZVA
:
2496 TCGv_i64 clean_addr
, tag
;
2498 /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */
2499 tcg_rt
= cpu_reg(s
, rt
);
2500 clean_addr
= clean_data_tbi(s
, tcg_rt
);
2501 gen_helper_dc_zva(tcg_env
, clean_addr
);
2504 /* Extract the tag from the register to match STZGM. */
2505 tag
= tcg_temp_new_i64();
2506 tcg_gen_shri_i64(tag
, tcg_rt
, 56);
2507 gen_helper_stzgm_tags(tcg_env
, clean_addr
, tag
);
2512 g_assert_not_reached();
2515 if (ri
->type
& ARM_CP_IO
) {
2516 /* I/O operations must end the TB here (whether read or write) */
2517 need_exit_tb
= translator_io_start(&s
->base
);
2520 tcg_rt
= cpu_reg(s
, rt
);
2523 if (ri
->type
& ARM_CP_CONST
) {
2524 tcg_gen_movi_i64(tcg_rt
, ri
->resetvalue
);
2525 } else if (ri
->readfn
) {
2527 tcg_ri
= gen_lookup_cp_reg(key
);
2529 gen_helper_get_cp_reg64(tcg_rt
, tcg_env
, tcg_ri
);
2531 tcg_gen_ld_i64(tcg_rt
, tcg_env
, ri
->fieldoffset
);
2534 if (ri
->type
& ARM_CP_CONST
) {
2535 /* If not forbidden by access permissions, treat as WI */
2537 } else if (ri
->writefn
) {
2539 tcg_ri
= gen_lookup_cp_reg(key
);
2541 gen_helper_set_cp_reg64(tcg_env
, tcg_ri
, tcg_rt
);
2543 tcg_gen_st_i64(tcg_rt
, tcg_env
, ri
->fieldoffset
);
2547 if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
2549 * A write to any coprocessor register that ends a TB
2550 * must rebuild the hflags for the next TB.
2552 gen_rebuild_hflags(s
);
2554 * We default to ending the TB on a coprocessor register write,
2555 * but allow this to be suppressed by the register definition
2556 * (usually only necessary to work around guest bugs).
2558 need_exit_tb
= true;
2561 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
2565 static bool trans_SYS(DisasContext
*s
, arg_SYS
*a
)
2567 handle_sys(s
, a
->l
, a
->op0
, a
->op1
, a
->op2
, a
->crn
, a
->crm
, a
->rt
);
2571 static bool trans_SVC(DisasContext
*s
, arg_i
*a
)
2574 * For SVC, HVC and SMC we advance the single-step state
2575 * machine before taking the exception. This is architecturally
2576 * mandated, to ensure that single-stepping a system call
2577 * instruction works properly.
2579 uint32_t syndrome
= syn_aa64_svc(a
->imm
);
2581 gen_exception_insn_el(s
, 0, EXCP_UDEF
, syndrome
, 2);
2585 gen_exception_insn(s
, 4, EXCP_SWI
, syndrome
);
2589 static bool trans_HVC(DisasContext
*s
, arg_i
*a
)
2591 int target_el
= s
->current_el
== 3 ? 3 : 2;
2593 if (s
->current_el
== 0) {
2594 unallocated_encoding(s
);
2598 * The pre HVC helper handles cases when HVC gets trapped
2599 * as an undefined insn by runtime configuration.
2601 gen_a64_update_pc(s
, 0);
2602 gen_helper_pre_hvc(tcg_env
);
2603 /* Architecture requires ss advance before we do the actual work */
2605 gen_exception_insn_el(s
, 4, EXCP_HVC
, syn_aa64_hvc(a
->imm
), target_el
);
2609 static bool trans_SMC(DisasContext
*s
, arg_i
*a
)
2611 if (s
->current_el
== 0) {
2612 unallocated_encoding(s
);
2615 gen_a64_update_pc(s
, 0);
2616 gen_helper_pre_smc(tcg_env
, tcg_constant_i32(syn_aa64_smc(a
->imm
)));
2617 /* Architecture requires ss advance before we do the actual work */
2619 gen_exception_insn_el(s
, 4, EXCP_SMC
, syn_aa64_smc(a
->imm
), 3);
2623 static bool trans_BRK(DisasContext
*s
, arg_i
*a
)
2625 gen_exception_bkpt_insn(s
, syn_aa64_bkpt(a
->imm
));
2629 static bool trans_HLT(DisasContext
*s
, arg_i
*a
)
2632 * HLT. This has two purposes.
2633 * Architecturally, it is an external halting debug instruction.
2634 * Since QEMU doesn't implement external debug, we treat this as
2635 * it is required for halting debug disabled: it will UNDEF.
2636 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
2638 if (semihosting_enabled(s
->current_el
== 0) && a
->imm
== 0xf000) {
2639 gen_exception_internal_insn(s
, EXCP_SEMIHOST
);
2641 unallocated_encoding(s
);
2647 * Load/Store exclusive instructions are implemented by remembering
2648 * the value/address loaded, and seeing if these are the same
2649 * when the store is performed. This is not actually the architecturally
2650 * mandated semantics, but it works for typical guest code sequences
2651 * and avoids having to monitor regular stores.
2653 * The store exclusive uses the atomic cmpxchg primitives to avoid
2654 * races in multi-threaded linux-user and when MTTCG softmmu is
2657 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
, int rn
,
2658 int size
, bool is_pair
)
2660 int idx
= get_mem_index(s
);
2661 TCGv_i64 dirty_addr
, clean_addr
;
2662 MemOp memop
= check_atomic_align(s
, rn
, size
+ is_pair
);
2665 dirty_addr
= cpu_reg_sp(s
, rn
);
2666 clean_addr
= gen_mte_check1(s
, dirty_addr
, false, rn
!= 31, memop
);
2668 g_assert(size
<= 3);
2670 g_assert(size
>= 2);
2672 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, clean_addr
, idx
, memop
);
2673 if (s
->be_data
== MO_LE
) {
2674 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 0, 32);
2675 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 32, 32);
2677 tcg_gen_extract_i64(cpu_reg(s
, rt
), cpu_exclusive_val
, 32, 32);
2678 tcg_gen_extract_i64(cpu_reg(s
, rt2
), cpu_exclusive_val
, 0, 32);
2681 TCGv_i128 t16
= tcg_temp_new_i128();
2683 tcg_gen_qemu_ld_i128(t16
, clean_addr
, idx
, memop
);
2685 if (s
->be_data
== MO_LE
) {
2686 tcg_gen_extr_i128_i64(cpu_exclusive_val
,
2687 cpu_exclusive_high
, t16
);
2689 tcg_gen_extr_i128_i64(cpu_exclusive_high
,
2690 cpu_exclusive_val
, t16
);
2692 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2693 tcg_gen_mov_i64(cpu_reg(s
, rt2
), cpu_exclusive_high
);
2696 tcg_gen_qemu_ld_i64(cpu_exclusive_val
, clean_addr
, idx
, memop
);
2697 tcg_gen_mov_i64(cpu_reg(s
, rt
), cpu_exclusive_val
);
2699 tcg_gen_mov_i64(cpu_exclusive_addr
, clean_addr
);
2702 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
2703 int rn
, int size
, int is_pair
)
2705 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2706 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
2709 * [addr + datasize] = {Rt2};
2715 * env->exclusive_addr = -1;
2717 TCGLabel
*fail_label
= gen_new_label();
2718 TCGLabel
*done_label
= gen_new_label();
2719 TCGv_i64 tmp
, clean_addr
;
2723 * FIXME: We are out of spec here. We have recorded only the address
2724 * from load_exclusive, not the entire range, and we assume that the
2725 * size of the access on both sides match. The architecture allows the
2726 * store to be smaller than the load, so long as the stored bytes are
2727 * within the range recorded by the load.
2730 /* See AArch64.ExclusiveMonitorsPass() and AArch64.IsExclusiveVA(). */
2731 clean_addr
= clean_data_tbi(s
, cpu_reg_sp(s
, rn
));
2732 tcg_gen_brcond_i64(TCG_COND_NE
, clean_addr
, cpu_exclusive_addr
, fail_label
);
2735 * The write, and any associated faults, only happen if the virtual
2736 * and physical addresses pass the exclusive monitor check. These
2737 * faults are exceedingly unlikely, because normally the guest uses
2738 * the exact same address register for the load_exclusive, and we
2739 * would have recognized these faults there.
2741 * It is possible to trigger an alignment fault pre-LSE2, e.g. with an
2742 * unaligned 4-byte write within the range of an aligned 8-byte load.
2743 * With LSE2, the store would need to cross a 16-byte boundary when the
2744 * load did not, which would mean the store is outside the range
2745 * recorded for the monitor, which would have failed a corrected monitor
2746 * check above. For now, we assume no size change and retain the
2747 * MO_ALIGN to let tcg know what we checked in the load_exclusive.
2749 * It is possible to trigger an MTE fault, by performing the load with
2750 * a virtual address with a valid tag and performing the store with the
2751 * same virtual address and a different invalid tag.
2753 memop
= size
+ is_pair
;
2754 if (memop
== MO_128
|| !dc_isar_feature(aa64_lse2
, s
)) {
2757 memop
= finalize_memop(s
, memop
);
2758 gen_mte_check1(s
, cpu_reg_sp(s
, rn
), true, rn
!= 31, memop
);
2760 tmp
= tcg_temp_new_i64();
2763 if (s
->be_data
== MO_LE
) {
2764 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2766 tcg_gen_concat32_i64(tmp
, cpu_reg(s
, rt2
), cpu_reg(s
, rt
));
2768 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
,
2769 cpu_exclusive_val
, tmp
,
2770 get_mem_index(s
), memop
);
2771 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2773 TCGv_i128 t16
= tcg_temp_new_i128();
2774 TCGv_i128 c16
= tcg_temp_new_i128();
2777 if (s
->be_data
== MO_LE
) {
2778 tcg_gen_concat_i64_i128(t16
, cpu_reg(s
, rt
), cpu_reg(s
, rt2
));
2779 tcg_gen_concat_i64_i128(c16
, cpu_exclusive_val
,
2780 cpu_exclusive_high
);
2782 tcg_gen_concat_i64_i128(t16
, cpu_reg(s
, rt2
), cpu_reg(s
, rt
));
2783 tcg_gen_concat_i64_i128(c16
, cpu_exclusive_high
,
2787 tcg_gen_atomic_cmpxchg_i128(t16
, cpu_exclusive_addr
, c16
, t16
,
2788 get_mem_index(s
), memop
);
2790 a
= tcg_temp_new_i64();
2791 b
= tcg_temp_new_i64();
2792 if (s
->be_data
== MO_LE
) {
2793 tcg_gen_extr_i128_i64(a
, b
, t16
);
2795 tcg_gen_extr_i128_i64(b
, a
, t16
);
2798 tcg_gen_xor_i64(a
, a
, cpu_exclusive_val
);
2799 tcg_gen_xor_i64(b
, b
, cpu_exclusive_high
);
2800 tcg_gen_or_i64(tmp
, a
, b
);
2802 tcg_gen_setcondi_i64(TCG_COND_NE
, tmp
, tmp
, 0);
2805 tcg_gen_atomic_cmpxchg_i64(tmp
, cpu_exclusive_addr
, cpu_exclusive_val
,
2806 cpu_reg(s
, rt
), get_mem_index(s
), memop
);
2807 tcg_gen_setcond_i64(TCG_COND_NE
, tmp
, tmp
, cpu_exclusive_val
);
2809 tcg_gen_mov_i64(cpu_reg(s
, rd
), tmp
);
2810 tcg_gen_br(done_label
);
2812 gen_set_label(fail_label
);
2813 tcg_gen_movi_i64(cpu_reg(s
, rd
), 1);
2814 gen_set_label(done_label
);
2815 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
2818 static void gen_compare_and_swap(DisasContext
*s
, int rs
, int rt
,
2821 TCGv_i64 tcg_rs
= cpu_reg(s
, rs
);
2822 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2823 int memidx
= get_mem_index(s
);
2824 TCGv_i64 clean_addr
;
2828 gen_check_sp_alignment(s
);
2830 memop
= check_atomic_align(s
, rn
, size
);
2831 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
), true, rn
!= 31, memop
);
2832 tcg_gen_atomic_cmpxchg_i64(tcg_rs
, clean_addr
, tcg_rs
, tcg_rt
,
2836 static void gen_compare_and_swap_pair(DisasContext
*s
, int rs
, int rt
,
2839 TCGv_i64 s1
= cpu_reg(s
, rs
);
2840 TCGv_i64 s2
= cpu_reg(s
, rs
+ 1);
2841 TCGv_i64 t1
= cpu_reg(s
, rt
);
2842 TCGv_i64 t2
= cpu_reg(s
, rt
+ 1);
2843 TCGv_i64 clean_addr
;
2844 int memidx
= get_mem_index(s
);
2848 gen_check_sp_alignment(s
);
2851 /* This is a single atomic access, despite the "pair". */
2852 memop
= check_atomic_align(s
, rn
, size
+ 1);
2853 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, rn
), true, rn
!= 31, memop
);
2856 TCGv_i64 cmp
= tcg_temp_new_i64();
2857 TCGv_i64 val
= tcg_temp_new_i64();
2859 if (s
->be_data
== MO_LE
) {
2860 tcg_gen_concat32_i64(val
, t1
, t2
);
2861 tcg_gen_concat32_i64(cmp
, s1
, s2
);
2863 tcg_gen_concat32_i64(val
, t2
, t1
);
2864 tcg_gen_concat32_i64(cmp
, s2
, s1
);
2867 tcg_gen_atomic_cmpxchg_i64(cmp
, clean_addr
, cmp
, val
, memidx
, memop
);
2869 if (s
->be_data
== MO_LE
) {
2870 tcg_gen_extr32_i64(s1
, s2
, cmp
);
2872 tcg_gen_extr32_i64(s2
, s1
, cmp
);
2875 TCGv_i128 cmp
= tcg_temp_new_i128();
2876 TCGv_i128 val
= tcg_temp_new_i128();
2878 if (s
->be_data
== MO_LE
) {
2879 tcg_gen_concat_i64_i128(val
, t1
, t2
);
2880 tcg_gen_concat_i64_i128(cmp
, s1
, s2
);
2882 tcg_gen_concat_i64_i128(val
, t2
, t1
);
2883 tcg_gen_concat_i64_i128(cmp
, s2
, s1
);
2886 tcg_gen_atomic_cmpxchg_i128(cmp
, clean_addr
, cmp
, val
, memidx
, memop
);
2888 if (s
->be_data
== MO_LE
) {
2889 tcg_gen_extr_i128_i64(s1
, s2
, cmp
);
2891 tcg_gen_extr_i128_i64(s2
, s1
, cmp
);
2897 * Compute the ISS.SF bit for syndrome information if an exception
2898 * is taken on a load or store. This indicates whether the instruction
2899 * is accessing a 32-bit or 64-bit register. This logic is derived
2900 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2902 static bool ldst_iss_sf(int size
, bool sign
, bool ext
)
2907 * Signed loads are 64 bit results if we are not going to
2908 * do a zero-extend from 32 to 64 after the load.
2909 * (For a store, sign and ext are always false.)
2913 /* Unsigned loads/stores work at the specified size */
2914 return size
== MO_64
;
2918 static bool trans_STXR(DisasContext
*s
, arg_stxr
*a
)
2921 gen_check_sp_alignment(s
);
2924 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2926 gen_store_exclusive(s
, a
->rs
, a
->rt
, a
->rt2
, a
->rn
, a
->sz
, false);
2930 static bool trans_LDXR(DisasContext
*s
, arg_stxr
*a
)
2933 gen_check_sp_alignment(s
);
2935 gen_load_exclusive(s
, a
->rt
, a
->rt2
, a
->rn
, a
->sz
, false);
2937 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2942 static bool trans_STLR(DisasContext
*s
, arg_stlr
*a
)
2944 TCGv_i64 clean_addr
;
2946 bool iss_sf
= ldst_iss_sf(a
->sz
, false, false);
2949 * StoreLORelease is the same as Store-Release for QEMU, but
2950 * needs the feature-test.
2952 if (!a
->lasr
&& !dc_isar_feature(aa64_lor
, s
)) {
2955 /* Generate ISS for non-exclusive accesses including LASR. */
2957 gen_check_sp_alignment(s
);
2959 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2960 memop
= check_ordered_align(s
, a
->rn
, 0, true, a
->sz
);
2961 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, a
->rn
),
2962 true, a
->rn
!= 31, memop
);
2963 do_gpr_st(s
, cpu_reg(s
, a
->rt
), clean_addr
, memop
, true, a
->rt
,
2968 static bool trans_LDAR(DisasContext
*s
, arg_stlr
*a
)
2970 TCGv_i64 clean_addr
;
2972 bool iss_sf
= ldst_iss_sf(a
->sz
, false, false);
2974 /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
2975 if (!a
->lasr
&& !dc_isar_feature(aa64_lor
, s
)) {
2978 /* Generate ISS for non-exclusive accesses including LASR. */
2980 gen_check_sp_alignment(s
);
2982 memop
= check_ordered_align(s
, a
->rn
, 0, false, a
->sz
);
2983 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, a
->rn
),
2984 false, a
->rn
!= 31, memop
);
2985 do_gpr_ld(s
, cpu_reg(s
, a
->rt
), clean_addr
, memop
, false, true,
2986 a
->rt
, iss_sf
, a
->lasr
);
2987 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
2991 static bool trans_STXP(DisasContext
*s
, arg_stxr
*a
)
2994 gen_check_sp_alignment(s
);
2997 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
2999 gen_store_exclusive(s
, a
->rs
, a
->rt
, a
->rt2
, a
->rn
, a
->sz
, true);
3003 static bool trans_LDXP(DisasContext
*s
, arg_stxr
*a
)
3006 gen_check_sp_alignment(s
);
3008 gen_load_exclusive(s
, a
->rt
, a
->rt2
, a
->rn
, a
->sz
, true);
3010 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
3015 static bool trans_CASP(DisasContext
*s
, arg_CASP
*a
)
3017 if (!dc_isar_feature(aa64_atomics
, s
)) {
3020 if (((a
->rt
| a
->rs
) & 1) != 0) {
3024 gen_compare_and_swap_pair(s
, a
->rs
, a
->rt
, a
->rn
, a
->sz
);
3028 static bool trans_CAS(DisasContext
*s
, arg_CAS
*a
)
3030 if (!dc_isar_feature(aa64_atomics
, s
)) {
3033 gen_compare_and_swap(s
, a
->rs
, a
->rt
, a
->rn
, a
->sz
);
3037 static bool trans_LD_lit(DisasContext
*s
, arg_ldlit
*a
)
3039 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, false);
3040 TCGv_i64 tcg_rt
= cpu_reg(s
, a
->rt
);
3041 TCGv_i64 clean_addr
= tcg_temp_new_i64();
3042 MemOp memop
= finalize_memop(s
, a
->sz
+ a
->sign
* MO_SIGN
);
3044 gen_pc_plus_diff(s
, clean_addr
, a
->imm
);
3045 do_gpr_ld(s
, tcg_rt
, clean_addr
, memop
,
3046 false, true, a
->rt
, iss_sf
, false);
3050 static bool trans_LD_lit_v(DisasContext
*s
, arg_ldlit
*a
)
3052 /* Load register (literal), vector version */
3053 TCGv_i64 clean_addr
;
3056 if (!fp_access_check(s
)) {
3059 memop
= finalize_memop_asimd(s
, a
->sz
);
3060 clean_addr
= tcg_temp_new_i64();
3061 gen_pc_plus_diff(s
, clean_addr
, a
->imm
);
3062 do_fp_ld(s
, a
->rt
, clean_addr
, memop
);
3066 static void op_addr_ldstpair_pre(DisasContext
*s
, arg_ldstpair
*a
,
3067 TCGv_i64
*clean_addr
, TCGv_i64
*dirty_addr
,
3068 uint64_t offset
, bool is_store
, MemOp mop
)
3071 gen_check_sp_alignment(s
);
3074 *dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3076 tcg_gen_addi_i64(*dirty_addr
, *dirty_addr
, offset
);
3079 *clean_addr
= gen_mte_checkN(s
, *dirty_addr
, is_store
,
3080 (a
->w
|| a
->rn
!= 31), 2 << a
->sz
, mop
);
3083 static void op_addr_ldstpair_post(DisasContext
*s
, arg_ldstpair
*a
,
3084 TCGv_i64 dirty_addr
, uint64_t offset
)
3088 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3090 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), dirty_addr
);
3094 static bool trans_STP(DisasContext
*s
, arg_ldstpair
*a
)
3096 uint64_t offset
= a
->imm
<< a
->sz
;
3097 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
, tcg_rt2
;
3098 MemOp mop
= finalize_memop(s
, a
->sz
);
3100 op_addr_ldstpair_pre(s
, a
, &clean_addr
, &dirty_addr
, offset
, true, mop
);
3101 tcg_rt
= cpu_reg(s
, a
->rt
);
3102 tcg_rt2
= cpu_reg(s
, a
->rt2
);
3104 * We built mop above for the single logical access -- rebuild it
3105 * now for the paired operation.
3107 * With LSE2, non-sign-extending pairs are treated atomically if
3108 * aligned, and if unaligned one of the pair will be completely
3109 * within a 16-byte block and that element will be atomic.
3110 * Otherwise each element is separately atomic.
3111 * In all cases, issue one operation with the correct atomicity.
3115 mop
|= (a
->sz
== 2 ? MO_ALIGN_4
: MO_ALIGN_8
);
3117 mop
= finalize_memop_pair(s
, mop
);
3119 TCGv_i64 tmp
= tcg_temp_new_i64();
3121 if (s
->be_data
== MO_LE
) {
3122 tcg_gen_concat32_i64(tmp
, tcg_rt
, tcg_rt2
);
3124 tcg_gen_concat32_i64(tmp
, tcg_rt2
, tcg_rt
);
3126 tcg_gen_qemu_st_i64(tmp
, clean_addr
, get_mem_index(s
), mop
);
3128 TCGv_i128 tmp
= tcg_temp_new_i128();
3130 if (s
->be_data
== MO_LE
) {
3131 tcg_gen_concat_i64_i128(tmp
, tcg_rt
, tcg_rt2
);
3133 tcg_gen_concat_i64_i128(tmp
, tcg_rt2
, tcg_rt
);
3135 tcg_gen_qemu_st_i128(tmp
, clean_addr
, get_mem_index(s
), mop
);
3137 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
3141 static bool trans_LDP(DisasContext
*s
, arg_ldstpair
*a
)
3143 uint64_t offset
= a
->imm
<< a
->sz
;
3144 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
, tcg_rt2
;
3145 MemOp mop
= finalize_memop(s
, a
->sz
);
3147 op_addr_ldstpair_pre(s
, a
, &clean_addr
, &dirty_addr
, offset
, false, mop
);
3148 tcg_rt
= cpu_reg(s
, a
->rt
);
3149 tcg_rt2
= cpu_reg(s
, a
->rt2
);
3152 * We built mop above for the single logical access -- rebuild it
3153 * now for the paired operation.
3155 * With LSE2, non-sign-extending pairs are treated atomically if
3156 * aligned, and if unaligned one of the pair will be completely
3157 * within a 16-byte block and that element will be atomic.
3158 * Otherwise each element is separately atomic.
3159 * In all cases, issue one operation with the correct atomicity.
3161 * This treats sign-extending loads like zero-extending loads,
3162 * since that reuses the most code below.
3166 mop
|= (a
->sz
== 2 ? MO_ALIGN_4
: MO_ALIGN_8
);
3168 mop
= finalize_memop_pair(s
, mop
);
3170 int o2
= s
->be_data
== MO_LE
? 32 : 0;
3173 tcg_gen_qemu_ld_i64(tcg_rt
, clean_addr
, get_mem_index(s
), mop
);
3175 tcg_gen_sextract_i64(tcg_rt2
, tcg_rt
, o2
, 32);
3176 tcg_gen_sextract_i64(tcg_rt
, tcg_rt
, o1
, 32);
3178 tcg_gen_extract_i64(tcg_rt2
, tcg_rt
, o2
, 32);
3179 tcg_gen_extract_i64(tcg_rt
, tcg_rt
, o1
, 32);
3182 TCGv_i128 tmp
= tcg_temp_new_i128();
3184 tcg_gen_qemu_ld_i128(tmp
, clean_addr
, get_mem_index(s
), mop
);
3185 if (s
->be_data
== MO_LE
) {
3186 tcg_gen_extr_i128_i64(tcg_rt
, tcg_rt2
, tmp
);
3188 tcg_gen_extr_i128_i64(tcg_rt2
, tcg_rt
, tmp
);
3191 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
3195 static bool trans_STP_v(DisasContext
*s
, arg_ldstpair
*a
)
3197 uint64_t offset
= a
->imm
<< a
->sz
;
3198 TCGv_i64 clean_addr
, dirty_addr
;
3201 if (!fp_access_check(s
)) {
3205 /* LSE2 does not merge FP pairs; leave these as separate operations. */
3206 mop
= finalize_memop_asimd(s
, a
->sz
);
3207 op_addr_ldstpair_pre(s
, a
, &clean_addr
, &dirty_addr
, offset
, true, mop
);
3208 do_fp_st(s
, a
->rt
, clean_addr
, mop
);
3209 tcg_gen_addi_i64(clean_addr
, clean_addr
, 1 << a
->sz
);
3210 do_fp_st(s
, a
->rt2
, clean_addr
, mop
);
3211 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
3215 static bool trans_LDP_v(DisasContext
*s
, arg_ldstpair
*a
)
3217 uint64_t offset
= a
->imm
<< a
->sz
;
3218 TCGv_i64 clean_addr
, dirty_addr
;
3221 if (!fp_access_check(s
)) {
3225 /* LSE2 does not merge FP pairs; leave these as separate operations. */
3226 mop
= finalize_memop_asimd(s
, a
->sz
);
3227 op_addr_ldstpair_pre(s
, a
, &clean_addr
, &dirty_addr
, offset
, false, mop
);
3228 do_fp_ld(s
, a
->rt
, clean_addr
, mop
);
3229 tcg_gen_addi_i64(clean_addr
, clean_addr
, 1 << a
->sz
);
3230 do_fp_ld(s
, a
->rt2
, clean_addr
, mop
);
3231 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
3235 static bool trans_STGP(DisasContext
*s
, arg_ldstpair
*a
)
3237 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
, tcg_rt2
;
3238 uint64_t offset
= a
->imm
<< LOG2_TAG_GRANULE
;
3242 /* STGP only comes in one size. */
3243 tcg_debug_assert(a
->sz
== MO_64
);
3245 if (!dc_isar_feature(aa64_mte_insn_reg
, s
)) {
3250 gen_check_sp_alignment(s
);
3253 dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3255 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3258 clean_addr
= clean_data_tbi(s
, dirty_addr
);
3259 tcg_rt
= cpu_reg(s
, a
->rt
);
3260 tcg_rt2
= cpu_reg(s
, a
->rt2
);
3263 * STGP is defined as two 8-byte memory operations, aligned to TAG_GRANULE,
3264 * and one tag operation. We implement it as one single aligned 16-byte
3265 * memory operation for convenience. Note that the alignment ensures
3266 * MO_ATOM_IFALIGN_PAIR produces 8-byte atomicity for the memory store.
3268 mop
= finalize_memop_atom(s
, MO_128
| MO_ALIGN
, MO_ATOM_IFALIGN_PAIR
);
3270 tmp
= tcg_temp_new_i128();
3271 if (s
->be_data
== MO_LE
) {
3272 tcg_gen_concat_i64_i128(tmp
, tcg_rt
, tcg_rt2
);
3274 tcg_gen_concat_i64_i128(tmp
, tcg_rt2
, tcg_rt
);
3276 tcg_gen_qemu_st_i128(tmp
, clean_addr
, get_mem_index(s
), mop
);
3278 /* Perform the tag store, if tag access enabled. */
3280 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
3281 gen_helper_stg_parallel(tcg_env
, dirty_addr
, dirty_addr
);
3283 gen_helper_stg(tcg_env
, dirty_addr
, dirty_addr
);
3287 op_addr_ldstpair_post(s
, a
, dirty_addr
, offset
);
3291 static void op_addr_ldst_imm_pre(DisasContext
*s
, arg_ldst_imm
*a
,
3292 TCGv_i64
*clean_addr
, TCGv_i64
*dirty_addr
,
3293 uint64_t offset
, bool is_store
, MemOp mop
)
3298 gen_check_sp_alignment(s
);
3301 *dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3303 tcg_gen_addi_i64(*dirty_addr
, *dirty_addr
, offset
);
3305 memidx
= get_a64_user_mem_index(s
, a
->unpriv
);
3306 *clean_addr
= gen_mte_check1_mmuidx(s
, *dirty_addr
, is_store
,
3307 a
->w
|| a
->rn
!= 31,
3308 mop
, a
->unpriv
, memidx
);
3311 static void op_addr_ldst_imm_post(DisasContext
*s
, arg_ldst_imm
*a
,
3312 TCGv_i64 dirty_addr
, uint64_t offset
)
3316 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, offset
);
3318 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), dirty_addr
);
3322 static bool trans_STR_i(DisasContext
*s
, arg_ldst_imm
*a
)
3324 bool iss_sf
, iss_valid
= !a
->w
;
3325 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3326 int memidx
= get_a64_user_mem_index(s
, a
->unpriv
);
3327 MemOp mop
= finalize_memop(s
, a
->sz
+ a
->sign
* MO_SIGN
);
3329 op_addr_ldst_imm_pre(s
, a
, &clean_addr
, &dirty_addr
, a
->imm
, true, mop
);
3331 tcg_rt
= cpu_reg(s
, a
->rt
);
3332 iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3334 do_gpr_st_memidx(s
, tcg_rt
, clean_addr
, mop
, memidx
,
3335 iss_valid
, a
->rt
, iss_sf
, false);
3336 op_addr_ldst_imm_post(s
, a
, dirty_addr
, a
->imm
);
3340 static bool trans_LDR_i(DisasContext
*s
, arg_ldst_imm
*a
)
3342 bool iss_sf
, iss_valid
= !a
->w
;
3343 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3344 int memidx
= get_a64_user_mem_index(s
, a
->unpriv
);
3345 MemOp mop
= finalize_memop(s
, a
->sz
+ a
->sign
* MO_SIGN
);
3347 op_addr_ldst_imm_pre(s
, a
, &clean_addr
, &dirty_addr
, a
->imm
, false, mop
);
3349 tcg_rt
= cpu_reg(s
, a
->rt
);
3350 iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3352 do_gpr_ld_memidx(s
, tcg_rt
, clean_addr
, mop
,
3353 a
->ext
, memidx
, iss_valid
, a
->rt
, iss_sf
, false);
3354 op_addr_ldst_imm_post(s
, a
, dirty_addr
, a
->imm
);
3358 static bool trans_STR_v_i(DisasContext
*s
, arg_ldst_imm
*a
)
3360 TCGv_i64 clean_addr
, dirty_addr
;
3363 if (!fp_access_check(s
)) {
3366 mop
= finalize_memop_asimd(s
, a
->sz
);
3367 op_addr_ldst_imm_pre(s
, a
, &clean_addr
, &dirty_addr
, a
->imm
, true, mop
);
3368 do_fp_st(s
, a
->rt
, clean_addr
, mop
);
3369 op_addr_ldst_imm_post(s
, a
, dirty_addr
, a
->imm
);
3373 static bool trans_LDR_v_i(DisasContext
*s
, arg_ldst_imm
*a
)
3375 TCGv_i64 clean_addr
, dirty_addr
;
3378 if (!fp_access_check(s
)) {
3381 mop
= finalize_memop_asimd(s
, a
->sz
);
3382 op_addr_ldst_imm_pre(s
, a
, &clean_addr
, &dirty_addr
, a
->imm
, false, mop
);
3383 do_fp_ld(s
, a
->rt
, clean_addr
, mop
);
3384 op_addr_ldst_imm_post(s
, a
, dirty_addr
, a
->imm
);
3388 static void op_addr_ldst_pre(DisasContext
*s
, arg_ldst
*a
,
3389 TCGv_i64
*clean_addr
, TCGv_i64
*dirty_addr
,
3390 bool is_store
, MemOp memop
)
3395 gen_check_sp_alignment(s
);
3397 *dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3399 tcg_rm
= read_cpu_reg(s
, a
->rm
, 1);
3400 ext_and_shift_reg(tcg_rm
, tcg_rm
, a
->opt
, a
->s
? a
->sz
: 0);
3402 tcg_gen_add_i64(*dirty_addr
, *dirty_addr
, tcg_rm
);
3403 *clean_addr
= gen_mte_check1(s
, *dirty_addr
, is_store
, true, memop
);
3406 static bool trans_LDR(DisasContext
*s
, arg_ldst
*a
)
3408 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3409 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3412 if (extract32(a
->opt
, 1, 1) == 0) {
3416 memop
= finalize_memop(s
, a
->sz
+ a
->sign
* MO_SIGN
);
3417 op_addr_ldst_pre(s
, a
, &clean_addr
, &dirty_addr
, false, memop
);
3418 tcg_rt
= cpu_reg(s
, a
->rt
);
3419 do_gpr_ld(s
, tcg_rt
, clean_addr
, memop
,
3420 a
->ext
, true, a
->rt
, iss_sf
, false);
3424 static bool trans_STR(DisasContext
*s
, arg_ldst
*a
)
3426 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3427 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3430 if (extract32(a
->opt
, 1, 1) == 0) {
3434 memop
= finalize_memop(s
, a
->sz
);
3435 op_addr_ldst_pre(s
, a
, &clean_addr
, &dirty_addr
, true, memop
);
3436 tcg_rt
= cpu_reg(s
, a
->rt
);
3437 do_gpr_st(s
, tcg_rt
, clean_addr
, memop
, true, a
->rt
, iss_sf
, false);
3441 static bool trans_LDR_v(DisasContext
*s
, arg_ldst
*a
)
3443 TCGv_i64 clean_addr
, dirty_addr
;
3446 if (extract32(a
->opt
, 1, 1) == 0) {
3450 if (!fp_access_check(s
)) {
3454 memop
= finalize_memop_asimd(s
, a
->sz
);
3455 op_addr_ldst_pre(s
, a
, &clean_addr
, &dirty_addr
, false, memop
);
3456 do_fp_ld(s
, a
->rt
, clean_addr
, memop
);
3460 static bool trans_STR_v(DisasContext
*s
, arg_ldst
*a
)
3462 TCGv_i64 clean_addr
, dirty_addr
;
3465 if (extract32(a
->opt
, 1, 1) == 0) {
3469 if (!fp_access_check(s
)) {
3473 memop
= finalize_memop_asimd(s
, a
->sz
);
3474 op_addr_ldst_pre(s
, a
, &clean_addr
, &dirty_addr
, true, memop
);
3475 do_fp_st(s
, a
->rt
, clean_addr
, memop
);
3480 static bool do_atomic_ld(DisasContext
*s
, arg_atomic
*a
, AtomicThreeOpFn
*fn
,
3481 int sign
, bool invert
)
3483 MemOp mop
= a
->sz
| sign
;
3484 TCGv_i64 clean_addr
, tcg_rs
, tcg_rt
;
3487 gen_check_sp_alignment(s
);
3489 mop
= check_atomic_align(s
, a
->rn
, mop
);
3490 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, a
->rn
), false,
3492 tcg_rs
= read_cpu_reg(s
, a
->rs
, true);
3493 tcg_rt
= cpu_reg(s
, a
->rt
);
3495 tcg_gen_not_i64(tcg_rs
, tcg_rs
);
3498 * The tcg atomic primitives are all full barriers. Therefore we
3499 * can ignore the Acquire and Release bits of this instruction.
3501 fn(tcg_rt
, clean_addr
, tcg_rs
, get_mem_index(s
), mop
);
3503 if (mop
& MO_SIGN
) {
3506 tcg_gen_ext8u_i64(tcg_rt
, tcg_rt
);
3509 tcg_gen_ext16u_i64(tcg_rt
, tcg_rt
);
3512 tcg_gen_ext32u_i64(tcg_rt
, tcg_rt
);
3517 g_assert_not_reached();
3523 TRANS_FEAT(LDADD
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_add_i64
, 0, false)
3524 TRANS_FEAT(LDCLR
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_and_i64
, 0, true)
3525 TRANS_FEAT(LDEOR
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_xor_i64
, 0, false)
3526 TRANS_FEAT(LDSET
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_or_i64
, 0, false)
3527 TRANS_FEAT(LDSMAX
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_smax_i64
, MO_SIGN
, false)
3528 TRANS_FEAT(LDSMIN
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_smin_i64
, MO_SIGN
, false)
3529 TRANS_FEAT(LDUMAX
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_umax_i64
, 0, false)
3530 TRANS_FEAT(LDUMIN
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_fetch_umin_i64
, 0, false)
3531 TRANS_FEAT(SWP
, aa64_atomics
, do_atomic_ld
, a
, tcg_gen_atomic_xchg_i64
, 0, false)
3533 static bool trans_LDAPR(DisasContext
*s
, arg_LDAPR
*a
)
3535 bool iss_sf
= ldst_iss_sf(a
->sz
, false, false);
3536 TCGv_i64 clean_addr
;
3539 if (!dc_isar_feature(aa64_atomics
, s
) ||
3540 !dc_isar_feature(aa64_rcpc_8_3
, s
)) {
3544 gen_check_sp_alignment(s
);
3546 mop
= check_atomic_align(s
, a
->rn
, a
->sz
);
3547 clean_addr
= gen_mte_check1(s
, cpu_reg_sp(s
, a
->rn
), false,
3550 * LDAPR* are a special case because they are a simple load, not a
3551 * fetch-and-do-something op.
3552 * The architectural consistency requirements here are weaker than
3553 * full load-acquire (we only need "load-acquire processor consistent"),
3554 * but we choose to implement them as full LDAQ.
3556 do_gpr_ld(s
, cpu_reg(s
, a
->rt
), clean_addr
, mop
, false,
3557 true, a
->rt
, iss_sf
, true);
3558 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
3562 static bool trans_LDRA(DisasContext
*s
, arg_LDRA
*a
)
3564 TCGv_i64 clean_addr
, dirty_addr
, tcg_rt
;
3567 /* Load with pointer authentication */
3568 if (!dc_isar_feature(aa64_pauth
, s
)) {
3573 gen_check_sp_alignment(s
);
3575 dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3577 if (s
->pauth_active
) {
3579 gen_helper_autda_combined(dirty_addr
, tcg_env
, dirty_addr
,
3580 tcg_constant_i64(0));
3582 gen_helper_autdb_combined(dirty_addr
, tcg_env
, dirty_addr
,
3583 tcg_constant_i64(0));
3587 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, a
->imm
);
3589 memop
= finalize_memop(s
, MO_64
);
3591 /* Note that "clean" and "dirty" here refer to TBI not PAC. */
3592 clean_addr
= gen_mte_check1(s
, dirty_addr
, false,
3593 a
->w
|| a
->rn
!= 31, memop
);
3595 tcg_rt
= cpu_reg(s
, a
->rt
);
3596 do_gpr_ld(s
, tcg_rt
, clean_addr
, memop
,
3597 /* extend */ false, /* iss_valid */ !a
->w
,
3598 /* iss_srt */ a
->rt
, /* iss_sf */ true, /* iss_ar */ false);
3601 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), dirty_addr
);
3606 static bool trans_LDAPR_i(DisasContext
*s
, arg_ldapr_stlr_i
*a
)
3608 TCGv_i64 clean_addr
, dirty_addr
;
3609 MemOp mop
= a
->sz
| (a
->sign
? MO_SIGN
: 0);
3610 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3612 if (!dc_isar_feature(aa64_rcpc_8_4
, s
)) {
3617 gen_check_sp_alignment(s
);
3620 mop
= check_ordered_align(s
, a
->rn
, a
->imm
, false, mop
);
3621 dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3622 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, a
->imm
);
3623 clean_addr
= clean_data_tbi(s
, dirty_addr
);
3626 * Load-AcquirePC semantics; we implement as the slightly more
3627 * restrictive Load-Acquire.
3629 do_gpr_ld(s
, cpu_reg(s
, a
->rt
), clean_addr
, mop
, a
->ext
, true,
3630 a
->rt
, iss_sf
, true);
3631 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
3635 static bool trans_STLR_i(DisasContext
*s
, arg_ldapr_stlr_i
*a
)
3637 TCGv_i64 clean_addr
, dirty_addr
;
3639 bool iss_sf
= ldst_iss_sf(a
->sz
, a
->sign
, a
->ext
);
3641 if (!dc_isar_feature(aa64_rcpc_8_4
, s
)) {
3645 /* TODO: ARMv8.4-LSE SCTLR.nAA */
3648 gen_check_sp_alignment(s
);
3651 mop
= check_ordered_align(s
, a
->rn
, a
->imm
, true, mop
);
3652 dirty_addr
= read_cpu_reg_sp(s
, a
->rn
, 1);
3653 tcg_gen_addi_i64(dirty_addr
, dirty_addr
, a
->imm
);
3654 clean_addr
= clean_data_tbi(s
, dirty_addr
);
3656 /* Store-Release semantics */
3657 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
3658 do_gpr_st(s
, cpu_reg(s
, a
->rt
), clean_addr
, mop
, true, a
->rt
, iss_sf
, true);
3662 static bool trans_LD_mult(DisasContext
*s
, arg_ldst_mult
*a
)
3664 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3665 MemOp endian
, align
, mop
;
3667 int total
; /* total bytes */
3668 int elements
; /* elements per vector */
3672 if (!a
->p
&& a
->rm
!= 0) {
3673 /* For non-postindexed accesses the Rm field must be 0 */
3676 if (size
== 3 && !a
->q
&& a
->selem
!= 1) {
3679 if (!fp_access_check(s
)) {
3684 gen_check_sp_alignment(s
);
3687 /* For our purposes, bytes are always little-endian. */
3688 endian
= s
->be_data
;
3693 total
= a
->rpt
* a
->selem
* (a
->q
? 16 : 8);
3694 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3697 * Issue the MTE check vs the logical repeat count, before we
3698 * promote consecutive little-endian elements below.
3700 clean_addr
= gen_mte_checkN(s
, tcg_rn
, false, a
->p
|| a
->rn
!= 31, total
,
3701 finalize_memop_asimd(s
, size
));
3704 * Consecutive little-endian elements from a single register
3705 * can be promoted to a larger little-endian operation.
3708 if (a
->selem
== 1 && endian
== MO_LE
) {
3709 align
= pow2_align(size
);
3712 if (!s
->align_mem
) {
3715 mop
= endian
| size
| align
;
3717 elements
= (a
->q
? 16 : 8) >> size
;
3718 tcg_ebytes
= tcg_constant_i64(1 << size
);
3719 for (r
= 0; r
< a
->rpt
; r
++) {
3721 for (e
= 0; e
< elements
; e
++) {
3723 for (xs
= 0; xs
< a
->selem
; xs
++) {
3724 int tt
= (a
->rt
+ r
+ xs
) % 32;
3725 do_vec_ld(s
, tt
, e
, clean_addr
, mop
);
3726 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3732 * For non-quad operations, setting a slice of the low 64 bits of
3733 * the register clears the high 64 bits (in the ARM ARM pseudocode
3734 * this is implicit in the fact that 'rval' is a 64 bit wide
3735 * variable). For quad operations, we might still need to zero
3736 * the high bits of SVE.
3738 for (r
= 0; r
< a
->rpt
* a
->selem
; r
++) {
3739 int tt
= (a
->rt
+ r
) % 32;
3740 clear_vec_high(s
, a
->q
, tt
);
3745 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3747 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3753 static bool trans_ST_mult(DisasContext
*s
, arg_ldst_mult
*a
)
3755 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3756 MemOp endian
, align
, mop
;
3758 int total
; /* total bytes */
3759 int elements
; /* elements per vector */
3763 if (!a
->p
&& a
->rm
!= 0) {
3764 /* For non-postindexed accesses the Rm field must be 0 */
3767 if (size
== 3 && !a
->q
&& a
->selem
!= 1) {
3770 if (!fp_access_check(s
)) {
3775 gen_check_sp_alignment(s
);
3778 /* For our purposes, bytes are always little-endian. */
3779 endian
= s
->be_data
;
3784 total
= a
->rpt
* a
->selem
* (a
->q
? 16 : 8);
3785 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3788 * Issue the MTE check vs the logical repeat count, before we
3789 * promote consecutive little-endian elements below.
3791 clean_addr
= gen_mte_checkN(s
, tcg_rn
, true, a
->p
|| a
->rn
!= 31, total
,
3792 finalize_memop_asimd(s
, size
));
3795 * Consecutive little-endian elements from a single register
3796 * can be promoted to a larger little-endian operation.
3799 if (a
->selem
== 1 && endian
== MO_LE
) {
3800 align
= pow2_align(size
);
3803 if (!s
->align_mem
) {
3806 mop
= endian
| size
| align
;
3808 elements
= (a
->q
? 16 : 8) >> size
;
3809 tcg_ebytes
= tcg_constant_i64(1 << size
);
3810 for (r
= 0; r
< a
->rpt
; r
++) {
3812 for (e
= 0; e
< elements
; e
++) {
3814 for (xs
= 0; xs
< a
->selem
; xs
++) {
3815 int tt
= (a
->rt
+ r
+ xs
) % 32;
3816 do_vec_st(s
, tt
, e
, clean_addr
, mop
);
3817 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3824 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3826 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3832 static bool trans_ST_single(DisasContext
*s
, arg_ldst_single
*a
)
3835 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3838 if (!a
->p
&& a
->rm
!= 0) {
3841 if (!fp_access_check(s
)) {
3846 gen_check_sp_alignment(s
);
3849 total
= a
->selem
<< a
->scale
;
3850 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3852 mop
= finalize_memop_asimd(s
, a
->scale
);
3853 clean_addr
= gen_mte_checkN(s
, tcg_rn
, true, a
->p
|| a
->rn
!= 31,
3856 tcg_ebytes
= tcg_constant_i64(1 << a
->scale
);
3857 for (xs
= 0, rt
= a
->rt
; xs
< a
->selem
; xs
++, rt
= (rt
+ 1) % 32) {
3858 do_vec_st(s
, rt
, a
->index
, clean_addr
, mop
);
3859 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3864 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3866 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3872 static bool trans_LD_single(DisasContext
*s
, arg_ldst_single
*a
)
3875 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3878 if (!a
->p
&& a
->rm
!= 0) {
3881 if (!fp_access_check(s
)) {
3886 gen_check_sp_alignment(s
);
3889 total
= a
->selem
<< a
->scale
;
3890 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3892 mop
= finalize_memop_asimd(s
, a
->scale
);
3893 clean_addr
= gen_mte_checkN(s
, tcg_rn
, false, a
->p
|| a
->rn
!= 31,
3896 tcg_ebytes
= tcg_constant_i64(1 << a
->scale
);
3897 for (xs
= 0, rt
= a
->rt
; xs
< a
->selem
; xs
++, rt
= (rt
+ 1) % 32) {
3898 do_vec_ld(s
, rt
, a
->index
, clean_addr
, mop
);
3899 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3904 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3906 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3912 static bool trans_LD_single_repl(DisasContext
*s
, arg_LD_single_repl
*a
)
3915 TCGv_i64 clean_addr
, tcg_rn
, tcg_ebytes
;
3918 if (!a
->p
&& a
->rm
!= 0) {
3921 if (!fp_access_check(s
)) {
3926 gen_check_sp_alignment(s
);
3929 total
= a
->selem
<< a
->scale
;
3930 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
3932 mop
= finalize_memop_asimd(s
, a
->scale
);
3933 clean_addr
= gen_mte_checkN(s
, tcg_rn
, false, a
->p
|| a
->rn
!= 31,
3936 tcg_ebytes
= tcg_constant_i64(1 << a
->scale
);
3937 for (xs
= 0, rt
= a
->rt
; xs
< a
->selem
; xs
++, rt
= (rt
+ 1) % 32) {
3938 /* Load and replicate to all elements */
3939 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
3941 tcg_gen_qemu_ld_i64(tcg_tmp
, clean_addr
, get_mem_index(s
), mop
);
3942 tcg_gen_gvec_dup_i64(a
->scale
, vec_full_reg_offset(s
, rt
),
3943 (a
->q
+ 1) * 8, vec_full_reg_size(s
), tcg_tmp
);
3944 tcg_gen_add_i64(clean_addr
, clean_addr
, tcg_ebytes
);
3949 tcg_gen_addi_i64(tcg_rn
, tcg_rn
, total
);
3951 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, a
->rm
));
3957 static bool trans_STZGM(DisasContext
*s
, arg_ldst_tag
*a
)
3959 TCGv_i64 addr
, clean_addr
, tcg_rt
;
3960 int size
= 4 << s
->dcz_blocksize
;
3962 if (!dc_isar_feature(aa64_mte
, s
)) {
3965 if (s
->current_el
== 0) {
3970 gen_check_sp_alignment(s
);
3973 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
3974 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
3975 tcg_rt
= cpu_reg(s
, a
->rt
);
3978 gen_helper_stzgm_tags(tcg_env
, addr
, tcg_rt
);
3981 * The non-tags portion of STZGM is mostly like DC_ZVA,
3982 * except the alignment happens before the access.
3984 clean_addr
= clean_data_tbi(s
, addr
);
3985 tcg_gen_andi_i64(clean_addr
, clean_addr
, -size
);
3986 gen_helper_dc_zva(tcg_env
, clean_addr
);
3990 static bool trans_STGM(DisasContext
*s
, arg_ldst_tag
*a
)
3992 TCGv_i64 addr
, clean_addr
, tcg_rt
;
3994 if (!dc_isar_feature(aa64_mte
, s
)) {
3997 if (s
->current_el
== 0) {
4002 gen_check_sp_alignment(s
);
4005 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
4006 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
4007 tcg_rt
= cpu_reg(s
, a
->rt
);
4010 gen_helper_stgm(tcg_env
, addr
, tcg_rt
);
4012 MMUAccessType acc
= MMU_DATA_STORE
;
4013 int size
= 4 << s
->gm_blocksize
;
4015 clean_addr
= clean_data_tbi(s
, addr
);
4016 tcg_gen_andi_i64(clean_addr
, clean_addr
, -size
);
4017 gen_probe_access(s
, clean_addr
, acc
, size
);
4022 static bool trans_LDGM(DisasContext
*s
, arg_ldst_tag
*a
)
4024 TCGv_i64 addr
, clean_addr
, tcg_rt
;
4026 if (!dc_isar_feature(aa64_mte
, s
)) {
4029 if (s
->current_el
== 0) {
4034 gen_check_sp_alignment(s
);
4037 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
4038 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
4039 tcg_rt
= cpu_reg(s
, a
->rt
);
4042 gen_helper_ldgm(tcg_rt
, tcg_env
, addr
);
4044 MMUAccessType acc
= MMU_DATA_LOAD
;
4045 int size
= 4 << s
->gm_blocksize
;
4047 clean_addr
= clean_data_tbi(s
, addr
);
4048 tcg_gen_andi_i64(clean_addr
, clean_addr
, -size
);
4049 gen_probe_access(s
, clean_addr
, acc
, size
);
4050 /* The result tags are zeros. */
4051 tcg_gen_movi_i64(tcg_rt
, 0);
4056 static bool trans_LDG(DisasContext
*s
, arg_ldst_tag
*a
)
4058 TCGv_i64 addr
, clean_addr
, tcg_rt
;
4060 if (!dc_isar_feature(aa64_mte_insn_reg
, s
)) {
4065 gen_check_sp_alignment(s
);
4068 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
4070 /* pre-index or signed offset */
4071 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
4074 tcg_gen_andi_i64(addr
, addr
, -TAG_GRANULE
);
4075 tcg_rt
= cpu_reg(s
, a
->rt
);
4077 gen_helper_ldg(tcg_rt
, tcg_env
, addr
, tcg_rt
);
4080 * Tag access disabled: we must check for aborts on the load
4081 * load from [rn+offset], and then insert a 0 tag into rt.
4083 clean_addr
= clean_data_tbi(s
, addr
);
4084 gen_probe_access(s
, clean_addr
, MMU_DATA_LOAD
, MO_8
);
4085 gen_address_with_allocation_tag0(tcg_rt
, tcg_rt
);
4089 /* pre-index or post-index */
4092 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
4094 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), addr
);
4099 static bool do_STG(DisasContext
*s
, arg_ldst_tag
*a
, bool is_zero
, bool is_pair
)
4101 TCGv_i64 addr
, tcg_rt
;
4104 gen_check_sp_alignment(s
);
4107 addr
= read_cpu_reg_sp(s
, a
->rn
, true);
4109 /* pre-index or signed offset */
4110 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
4112 tcg_rt
= cpu_reg_sp(s
, a
->rt
);
4115 * For STG and ST2G, we need to check alignment and probe memory.
4116 * TODO: For STZG and STZ2G, we could rely on the stores below,
4117 * at least for system mode; user-only won't enforce alignment.
4120 gen_helper_st2g_stub(tcg_env
, addr
);
4122 gen_helper_stg_stub(tcg_env
, addr
);
4124 } else if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
4126 gen_helper_st2g_parallel(tcg_env
, addr
, tcg_rt
);
4128 gen_helper_stg_parallel(tcg_env
, addr
, tcg_rt
);
4132 gen_helper_st2g(tcg_env
, addr
, tcg_rt
);
4134 gen_helper_stg(tcg_env
, addr
, tcg_rt
);
4139 TCGv_i64 clean_addr
= clean_data_tbi(s
, addr
);
4140 TCGv_i64 zero64
= tcg_constant_i64(0);
4141 TCGv_i128 zero128
= tcg_temp_new_i128();
4142 int mem_index
= get_mem_index(s
);
4143 MemOp mop
= finalize_memop(s
, MO_128
| MO_ALIGN
);
4145 tcg_gen_concat_i64_i128(zero128
, zero64
, zero64
);
4147 /* This is 1 or 2 atomic 16-byte operations. */
4148 tcg_gen_qemu_st_i128(zero128
, clean_addr
, mem_index
, mop
);
4150 tcg_gen_addi_i64(clean_addr
, clean_addr
, 16);
4151 tcg_gen_qemu_st_i128(zero128
, clean_addr
, mem_index
, mop
);
4156 /* pre-index or post-index */
4159 tcg_gen_addi_i64(addr
, addr
, a
->imm
);
4161 tcg_gen_mov_i64(cpu_reg_sp(s
, a
->rn
), addr
);
4166 TRANS_FEAT(STG
, aa64_mte_insn_reg
, do_STG
, a
, false, false)
4167 TRANS_FEAT(STZG
, aa64_mte_insn_reg
, do_STG
, a
, true, false)
4168 TRANS_FEAT(ST2G
, aa64_mte_insn_reg
, do_STG
, a
, false, true)
4169 TRANS_FEAT(STZ2G
, aa64_mte_insn_reg
, do_STG
, a
, true, true)
4171 typedef void SetFn(TCGv_env
, TCGv_i32
, TCGv_i32
);
4173 static bool do_SET(DisasContext
*s
, arg_set
*a
, bool is_epilogue
,
4174 bool is_setg
, SetFn fn
)
4177 uint32_t syndrome
, desc
= 0;
4179 if (is_setg
&& !dc_isar_feature(aa64_mte
, s
)) {
4184 * UNPREDICTABLE cases: we choose to UNDEF, which allows
4185 * us to pull this check before the CheckMOPSEnabled() test
4186 * (which we do in the helper function)
4188 if (a
->rs
== a
->rn
|| a
->rs
== a
->rd
|| a
->rn
== a
->rd
||
4189 a
->rd
== 31 || a
->rn
== 31) {
4193 memidx
= get_a64_user_mem_index(s
, a
->unpriv
);
4196 * We pass option_a == true, matching our implementation;
4197 * we pass wrong_option == false: helper function may set that bit.
4199 syndrome
= syn_mop(true, is_setg
, (a
->nontemp
<< 1) | a
->unpriv
,
4200 is_epilogue
, false, true, a
->rd
, a
->rs
, a
->rn
);
4202 if (is_setg
? s
->ata
[a
->unpriv
] : s
->mte_active
[a
->unpriv
]) {
4203 /* We may need to do MTE tag checking, so assemble the descriptor */
4204 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
4205 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
4206 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, true);
4207 /* SIZEM1 and ALIGN we leave 0 (byte write) */
4209 /* The helper function always needs the memidx even with MTE disabled */
4210 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, memidx
);
4213 * The helper needs the register numbers, but since they're in
4214 * the syndrome anyway, we let it extract them from there rather
4215 * than passing in an extra three integer arguments.
4217 fn(tcg_env
, tcg_constant_i32(syndrome
), tcg_constant_i32(desc
));
4221 TRANS_FEAT(SETP
, aa64_mops
, do_SET
, a
, false, false, gen_helper_setp
)
4222 TRANS_FEAT(SETM
, aa64_mops
, do_SET
, a
, false, false, gen_helper_setm
)
4223 TRANS_FEAT(SETE
, aa64_mops
, do_SET
, a
, true, false, gen_helper_sete
)
4224 TRANS_FEAT(SETGP
, aa64_mops
, do_SET
, a
, false, true, gen_helper_setgp
)
4225 TRANS_FEAT(SETGM
, aa64_mops
, do_SET
, a
, false, true, gen_helper_setgm
)
4226 TRANS_FEAT(SETGE
, aa64_mops
, do_SET
, a
, true, true, gen_helper_setge
)
4228 typedef void CpyFn(TCGv_env
, TCGv_i32
, TCGv_i32
, TCGv_i32
);
4230 static bool do_CPY(DisasContext
*s
, arg_cpy
*a
, bool is_epilogue
, CpyFn fn
)
4232 int rmemidx
, wmemidx
;
4233 uint32_t syndrome
, rdesc
= 0, wdesc
= 0;
4234 bool wunpriv
= extract32(a
->options
, 0, 1);
4235 bool runpriv
= extract32(a
->options
, 1, 1);
4238 * UNPREDICTABLE cases: we choose to UNDEF, which allows
4239 * us to pull this check before the CheckMOPSEnabled() test
4240 * (which we do in the helper function)
4242 if (a
->rs
== a
->rn
|| a
->rs
== a
->rd
|| a
->rn
== a
->rd
||
4243 a
->rd
== 31 || a
->rs
== 31 || a
->rn
== 31) {
4247 rmemidx
= get_a64_user_mem_index(s
, runpriv
);
4248 wmemidx
= get_a64_user_mem_index(s
, wunpriv
);
4251 * We pass option_a == true, matching our implementation;
4252 * we pass wrong_option == false: helper function may set that bit.
4254 syndrome
= syn_mop(false, false, a
->options
, is_epilogue
,
4255 false, true, a
->rd
, a
->rs
, a
->rn
);
4257 /* If we need to do MTE tag checking, assemble the descriptors */
4258 if (s
->mte_active
[runpriv
]) {
4259 rdesc
= FIELD_DP32(rdesc
, MTEDESC
, TBI
, s
->tbid
);
4260 rdesc
= FIELD_DP32(rdesc
, MTEDESC
, TCMA
, s
->tcma
);
4262 if (s
->mte_active
[wunpriv
]) {
4263 wdesc
= FIELD_DP32(wdesc
, MTEDESC
, TBI
, s
->tbid
);
4264 wdesc
= FIELD_DP32(wdesc
, MTEDESC
, TCMA
, s
->tcma
);
4265 wdesc
= FIELD_DP32(wdesc
, MTEDESC
, WRITE
, true);
4267 /* The helper function needs these parts of the descriptor regardless */
4268 rdesc
= FIELD_DP32(rdesc
, MTEDESC
, MIDX
, rmemidx
);
4269 wdesc
= FIELD_DP32(wdesc
, MTEDESC
, MIDX
, wmemidx
);
4272 * The helper needs the register numbers, but since they're in
4273 * the syndrome anyway, we let it extract them from there rather
4274 * than passing in an extra three integer arguments.
4276 fn(tcg_env
, tcg_constant_i32(syndrome
), tcg_constant_i32(wdesc
),
4277 tcg_constant_i32(rdesc
));
4281 TRANS_FEAT(CPYP
, aa64_mops
, do_CPY
, a
, false, gen_helper_cpyp
)
4282 TRANS_FEAT(CPYM
, aa64_mops
, do_CPY
, a
, false, gen_helper_cpym
)
4283 TRANS_FEAT(CPYE
, aa64_mops
, do_CPY
, a
, true, gen_helper_cpye
)
4284 TRANS_FEAT(CPYFP
, aa64_mops
, do_CPY
, a
, false, gen_helper_cpyfp
)
4285 TRANS_FEAT(CPYFM
, aa64_mops
, do_CPY
, a
, false, gen_helper_cpyfm
)
4286 TRANS_FEAT(CPYFE
, aa64_mops
, do_CPY
, a
, true, gen_helper_cpyfe
)
4288 typedef void ArithTwoOp(TCGv_i64
, TCGv_i64
, TCGv_i64
);
4290 static bool gen_rri(DisasContext
*s
, arg_rri_sf
*a
,
4291 bool rd_sp
, bool rn_sp
, ArithTwoOp
*fn
)
4293 TCGv_i64 tcg_rn
= rn_sp
? cpu_reg_sp(s
, a
->rn
) : cpu_reg(s
, a
->rn
);
4294 TCGv_i64 tcg_rd
= rd_sp
? cpu_reg_sp(s
, a
->rd
) : cpu_reg(s
, a
->rd
);
4295 TCGv_i64 tcg_imm
= tcg_constant_i64(a
->imm
);
4297 fn(tcg_rd
, tcg_rn
, tcg_imm
);
4299 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4305 * PC-rel. addressing
4308 static bool trans_ADR(DisasContext
*s
, arg_ri
*a
)
4310 gen_pc_plus_diff(s
, cpu_reg(s
, a
->rd
), a
->imm
);
4314 static bool trans_ADRP(DisasContext
*s
, arg_ri
*a
)
4316 int64_t offset
= (int64_t)a
->imm
<< 12;
4318 /* The page offset is ok for CF_PCREL. */
4319 offset
-= s
->pc_curr
& 0xfff;
4320 gen_pc_plus_diff(s
, cpu_reg(s
, a
->rd
), offset
);
4325 * Add/subtract (immediate)
4327 TRANS(ADD_i
, gen_rri
, a
, 1, 1, tcg_gen_add_i64
)
4328 TRANS(SUB_i
, gen_rri
, a
, 1, 1, tcg_gen_sub_i64
)
4329 TRANS(ADDS_i
, gen_rri
, a
, 0, 1, a
->sf
? gen_add64_CC
: gen_add32_CC
)
4330 TRANS(SUBS_i
, gen_rri
, a
, 0, 1, a
->sf
? gen_sub64_CC
: gen_sub32_CC
)
4333 * Add/subtract (immediate, with tags)
4336 static bool gen_add_sub_imm_with_tags(DisasContext
*s
, arg_rri_tag
*a
,
4339 TCGv_i64 tcg_rn
, tcg_rd
;
4342 imm
= a
->uimm6
<< LOG2_TAG_GRANULE
;
4347 tcg_rn
= cpu_reg_sp(s
, a
->rn
);
4348 tcg_rd
= cpu_reg_sp(s
, a
->rd
);
4351 gen_helper_addsubg(tcg_rd
, tcg_env
, tcg_rn
,
4352 tcg_constant_i32(imm
),
4353 tcg_constant_i32(a
->uimm4
));
4355 tcg_gen_addi_i64(tcg_rd
, tcg_rn
, imm
);
4356 gen_address_with_allocation_tag0(tcg_rd
, tcg_rd
);
4361 TRANS_FEAT(ADDG_i
, aa64_mte_insn_reg
, gen_add_sub_imm_with_tags
, a
, false)
4362 TRANS_FEAT(SUBG_i
, aa64_mte_insn_reg
, gen_add_sub_imm_with_tags
, a
, true)
4364 /* The input should be a value in the bottom e bits (with higher
4365 * bits zero); returns that value replicated into every element
4366 * of size e in a 64 bit integer.
4368 static uint64_t bitfield_replicate(uint64_t mask
, unsigned int e
)
4379 * Logical (immediate)
4383 * Simplified variant of pseudocode DecodeBitMasks() for the case where we
4384 * only require the wmask. Returns false if the imms/immr/immn are a reserved
4385 * value (ie should cause a guest UNDEF exception), and true if they are
4386 * valid, in which case the decoded bit pattern is written to result.
4388 bool logic_imm_decode_wmask(uint64_t *result
, unsigned int immn
,
4389 unsigned int imms
, unsigned int immr
)
4392 unsigned e
, levels
, s
, r
;
4395 assert(immn
< 2 && imms
< 64 && immr
< 64);
4397 /* The bit patterns we create here are 64 bit patterns which
4398 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
4399 * 64 bits each. Each element contains the same value: a run
4400 * of between 1 and e-1 non-zero bits, rotated within the
4401 * element by between 0 and e-1 bits.
4403 * The element size and run length are encoded into immn (1 bit)
4404 * and imms (6 bits) as follows:
4405 * 64 bit elements: immn = 1, imms = <length of run - 1>
4406 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
4407 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
4408 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
4409 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
4410 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
4411 * Notice that immn = 0, imms = 11111x is the only combination
4412 * not covered by one of the above options; this is reserved.
4413 * Further, <length of run - 1> all-ones is a reserved pattern.
4415 * In all cases the rotation is by immr % e (and immr is 6 bits).
4418 /* First determine the element size */
4419 len
= 31 - clz32((immn
<< 6) | (~imms
& 0x3f));
4421 /* This is the immn == 0, imms == 0x11111x case */
4431 /* <length of run - 1> mustn't be all-ones. */
4435 /* Create the value of one element: s+1 set bits rotated
4436 * by r within the element (which is e bits wide)...
4438 mask
= MAKE_64BIT_MASK(0, s
+ 1);
4440 mask
= (mask
>> r
) | (mask
<< (e
- r
));
4441 mask
&= MAKE_64BIT_MASK(0, e
);
4443 /* ...then replicate the element over the whole 64 bit value */
4444 mask
= bitfield_replicate(mask
, e
);
4449 static bool gen_rri_log(DisasContext
*s
, arg_rri_log
*a
, bool set_cc
,
4450 void (*fn
)(TCGv_i64
, TCGv_i64
, int64_t))
4452 TCGv_i64 tcg_rd
, tcg_rn
;
4455 /* Some immediate field values are reserved. */
4456 if (!logic_imm_decode_wmask(&imm
, extract32(a
->dbm
, 12, 1),
4457 extract32(a
->dbm
, 0, 6),
4458 extract32(a
->dbm
, 6, 6))) {
4462 imm
&= 0xffffffffull
;
4465 tcg_rd
= set_cc
? cpu_reg(s
, a
->rd
) : cpu_reg_sp(s
, a
->rd
);
4466 tcg_rn
= cpu_reg(s
, a
->rn
);
4468 fn(tcg_rd
, tcg_rn
, imm
);
4470 gen_logic_CC(a
->sf
, tcg_rd
);
4473 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4478 TRANS(AND_i
, gen_rri_log
, a
, false, tcg_gen_andi_i64
)
4479 TRANS(ORR_i
, gen_rri_log
, a
, false, tcg_gen_ori_i64
)
4480 TRANS(EOR_i
, gen_rri_log
, a
, false, tcg_gen_xori_i64
)
4481 TRANS(ANDS_i
, gen_rri_log
, a
, true, tcg_gen_andi_i64
)
4484 * Move wide (immediate)
4487 static bool trans_MOVZ(DisasContext
*s
, arg_movw
*a
)
4489 int pos
= a
->hw
<< 4;
4490 tcg_gen_movi_i64(cpu_reg(s
, a
->rd
), (uint64_t)a
->imm
<< pos
);
4494 static bool trans_MOVN(DisasContext
*s
, arg_movw
*a
)
4496 int pos
= a
->hw
<< 4;
4497 uint64_t imm
= a
->imm
;
4499 imm
= ~(imm
<< pos
);
4501 imm
= (uint32_t)imm
;
4503 tcg_gen_movi_i64(cpu_reg(s
, a
->rd
), imm
);
4507 static bool trans_MOVK(DisasContext
*s
, arg_movw
*a
)
4509 int pos
= a
->hw
<< 4;
4510 TCGv_i64 tcg_rd
, tcg_im
;
4512 tcg_rd
= cpu_reg(s
, a
->rd
);
4513 tcg_im
= tcg_constant_i64(a
->imm
);
4514 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_im
, pos
, 16);
4516 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4525 static bool trans_SBFM(DisasContext
*s
, arg_SBFM
*a
)
4527 TCGv_i64 tcg_rd
= cpu_reg(s
, a
->rd
);
4528 TCGv_i64 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4529 unsigned int bitsize
= a
->sf
? 64 : 32;
4530 unsigned int ri
= a
->immr
;
4531 unsigned int si
= a
->imms
;
4532 unsigned int pos
, len
;
4535 /* Wd<s-r:0> = Wn<s:r> */
4536 len
= (si
- ri
) + 1;
4537 tcg_gen_sextract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
4539 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4542 /* Wd<32+s-r,32-r> = Wn<s:0> */
4544 pos
= (bitsize
- ri
) & (bitsize
- 1);
4548 * Sign extend the destination field from len to fill the
4549 * balance of the word. Let the deposit below insert all
4550 * of those sign bits.
4552 tcg_gen_sextract_i64(tcg_tmp
, tcg_tmp
, 0, len
);
4557 * We start with zero, and we haven't modified any bits outside
4558 * bitsize, therefore no final zero-extension is unneeded for !sf.
4560 tcg_gen_deposit_z_i64(tcg_rd
, tcg_tmp
, pos
, len
);
4565 static bool trans_UBFM(DisasContext
*s
, arg_UBFM
*a
)
4567 TCGv_i64 tcg_rd
= cpu_reg(s
, a
->rd
);
4568 TCGv_i64 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4569 unsigned int bitsize
= a
->sf
? 64 : 32;
4570 unsigned int ri
= a
->immr
;
4571 unsigned int si
= a
->imms
;
4572 unsigned int pos
, len
;
4574 tcg_rd
= cpu_reg(s
, a
->rd
);
4575 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4578 /* Wd<s-r:0> = Wn<s:r> */
4579 len
= (si
- ri
) + 1;
4580 tcg_gen_extract_i64(tcg_rd
, tcg_tmp
, ri
, len
);
4582 /* Wd<32+s-r,32-r> = Wn<s:0> */
4584 pos
= (bitsize
- ri
) & (bitsize
- 1);
4585 tcg_gen_deposit_z_i64(tcg_rd
, tcg_tmp
, pos
, len
);
4590 static bool trans_BFM(DisasContext
*s
, arg_BFM
*a
)
4592 TCGv_i64 tcg_rd
= cpu_reg(s
, a
->rd
);
4593 TCGv_i64 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4594 unsigned int bitsize
= a
->sf
? 64 : 32;
4595 unsigned int ri
= a
->immr
;
4596 unsigned int si
= a
->imms
;
4597 unsigned int pos
, len
;
4599 tcg_rd
= cpu_reg(s
, a
->rd
);
4600 tcg_tmp
= read_cpu_reg(s
, a
->rn
, 1);
4603 /* Wd<s-r:0> = Wn<s:r> */
4604 tcg_gen_shri_i64(tcg_tmp
, tcg_tmp
, ri
);
4605 len
= (si
- ri
) + 1;
4608 /* Wd<32+s-r,32-r> = Wn<s:0> */
4610 pos
= (bitsize
- ri
) & (bitsize
- 1);
4613 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_tmp
, pos
, len
);
4615 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4620 static bool trans_EXTR(DisasContext
*s
, arg_extract
*a
)
4622 TCGv_i64 tcg_rd
, tcg_rm
, tcg_rn
;
4624 tcg_rd
= cpu_reg(s
, a
->rd
);
4626 if (unlikely(a
->imm
== 0)) {
4628 * tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
4629 * so an extract from bit 0 is a special case.
4632 tcg_gen_mov_i64(tcg_rd
, cpu_reg(s
, a
->rm
));
4634 tcg_gen_ext32u_i64(tcg_rd
, cpu_reg(s
, a
->rm
));
4637 tcg_rm
= cpu_reg(s
, a
->rm
);
4638 tcg_rn
= cpu_reg(s
, a
->rn
);
4641 /* Specialization to ROR happens in EXTRACT2. */
4642 tcg_gen_extract2_i64(tcg_rd
, tcg_rm
, tcg_rn
, a
->imm
);
4644 TCGv_i32 t0
= tcg_temp_new_i32();
4646 tcg_gen_extrl_i64_i32(t0
, tcg_rm
);
4647 if (a
->rm
== a
->rn
) {
4648 tcg_gen_rotri_i32(t0
, t0
, a
->imm
);
4650 TCGv_i32 t1
= tcg_temp_new_i32();
4651 tcg_gen_extrl_i64_i32(t1
, tcg_rn
);
4652 tcg_gen_extract2_i32(t0
, t0
, t1
, a
->imm
);
4654 tcg_gen_extu_i32_i64(tcg_rd
, t0
);
4661 * Cryptographic AES, SHA, SHA512
4664 TRANS_FEAT(AESE
, aa64_aes
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_aese
)
4665 TRANS_FEAT(AESD
, aa64_aes
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_aesd
)
4666 TRANS_FEAT(AESMC
, aa64_aes
, do_gvec_op2_ool
, a
, 0, gen_helper_crypto_aesmc
)
4667 TRANS_FEAT(AESIMC
, aa64_aes
, do_gvec_op2_ool
, a
, 0, gen_helper_crypto_aesimc
)
4669 TRANS_FEAT(SHA1C
, aa64_sha1
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sha1c
)
4670 TRANS_FEAT(SHA1P
, aa64_sha1
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sha1p
)
4671 TRANS_FEAT(SHA1M
, aa64_sha1
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sha1m
)
4672 TRANS_FEAT(SHA1SU0
, aa64_sha1
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sha1su0
)
4674 TRANS_FEAT(SHA256H
, aa64_sha256
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sha256h
)
4675 TRANS_FEAT(SHA256H2
, aa64_sha256
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sha256h2
)
4676 TRANS_FEAT(SHA256SU1
, aa64_sha256
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sha256su1
)
4678 TRANS_FEAT(SHA1H
, aa64_sha1
, do_gvec_op2_ool
, a
, 0, gen_helper_crypto_sha1h
)
4679 TRANS_FEAT(SHA1SU1
, aa64_sha1
, do_gvec_op2_ool
, a
, 0, gen_helper_crypto_sha1su1
)
4680 TRANS_FEAT(SHA256SU0
, aa64_sha256
, do_gvec_op2_ool
, a
, 0, gen_helper_crypto_sha256su0
)
4682 TRANS_FEAT(SHA512H
, aa64_sha512
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sha512h
)
4683 TRANS_FEAT(SHA512H2
, aa64_sha512
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sha512h2
)
4684 TRANS_FEAT(SHA512SU1
, aa64_sha512
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sha512su1
)
4685 TRANS_FEAT(RAX1
, aa64_sha3
, do_gvec_fn3
, a
, gen_gvec_rax1
)
4686 TRANS_FEAT(SM3PARTW1
, aa64_sm3
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sm3partw1
)
4687 TRANS_FEAT(SM3PARTW2
, aa64_sm3
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sm3partw2
)
4688 TRANS_FEAT(SM4EKEY
, aa64_sm4
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sm4ekey
)
4690 TRANS_FEAT(SHA512SU0
, aa64_sha512
, do_gvec_op2_ool
, a
, 0, gen_helper_crypto_sha512su0
)
4691 TRANS_FEAT(SM4E
, aa64_sm4
, do_gvec_op3_ool
, a
, 0, gen_helper_crypto_sm4e
)
4693 TRANS_FEAT(EOR3
, aa64_sha3
, do_gvec_fn4
, a
, gen_gvec_eor3
)
4694 TRANS_FEAT(BCAX
, aa64_sha3
, do_gvec_fn4
, a
, gen_gvec_bcax
)
4696 static bool trans_SM3SS1(DisasContext
*s
, arg_SM3SS1
*a
)
4698 if (!dc_isar_feature(aa64_sm3
, s
)) {
4701 if (fp_access_check(s
)) {
4702 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
4703 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
4704 TCGv_i32 tcg_op3
= tcg_temp_new_i32();
4705 TCGv_i32 tcg_res
= tcg_temp_new_i32();
4708 read_vec_element_i32(s
, tcg_op1
, a
->rn
, 3, MO_32
);
4709 read_vec_element_i32(s
, tcg_op2
, a
->rm
, 3, MO_32
);
4710 read_vec_element_i32(s
, tcg_op3
, a
->ra
, 3, MO_32
);
4712 tcg_gen_rotri_i32(tcg_res
, tcg_op1
, 20);
4713 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op2
);
4714 tcg_gen_add_i32(tcg_res
, tcg_res
, tcg_op3
);
4715 tcg_gen_rotri_i32(tcg_res
, tcg_res
, 25);
4717 /* Clear the whole register first, then store bits [127:96]. */
4718 vsz
= vec_full_reg_size(s
);
4719 dofs
= vec_full_reg_offset(s
, a
->rd
);
4720 tcg_gen_gvec_dup_imm(MO_64
, dofs
, vsz
, vsz
, 0);
4721 write_vec_element_i32(s
, tcg_res
, a
->rd
, 3, MO_32
);
4726 static bool do_crypto3i(DisasContext
*s
, arg_crypto3i
*a
, gen_helper_gvec_3
*fn
)
4728 if (fp_access_check(s
)) {
4729 gen_gvec_op3_ool(s
, true, a
->rd
, a
->rn
, a
->rm
, a
->imm
, fn
);
4733 TRANS_FEAT(SM3TT1A
, aa64_sm3
, do_crypto3i
, a
, gen_helper_crypto_sm3tt1a
)
4734 TRANS_FEAT(SM3TT1B
, aa64_sm3
, do_crypto3i
, a
, gen_helper_crypto_sm3tt1b
)
4735 TRANS_FEAT(SM3TT2A
, aa64_sm3
, do_crypto3i
, a
, gen_helper_crypto_sm3tt2a
)
4736 TRANS_FEAT(SM3TT2B
, aa64_sm3
, do_crypto3i
, a
, gen_helper_crypto_sm3tt2b
)
4738 static bool trans_XAR(DisasContext
*s
, arg_XAR
*a
)
4740 if (!dc_isar_feature(aa64_sha3
, s
)) {
4743 if (fp_access_check(s
)) {
4744 gen_gvec_xar(MO_64
, vec_full_reg_offset(s
, a
->rd
),
4745 vec_full_reg_offset(s
, a
->rn
),
4746 vec_full_reg_offset(s
, a
->rm
), a
->imm
, 16,
4747 vec_full_reg_size(s
));
4753 * Advanced SIMD copy
4756 static bool decode_esz_idx(int imm
, MemOp
*pesz
, unsigned *pidx
)
4758 unsigned esz
= ctz32(imm
);
4761 *pidx
= imm
>> (esz
+ 1);
4767 static bool trans_DUP_element_s(DisasContext
*s
, arg_DUP_element_s
*a
)
4772 if (!decode_esz_idx(a
->imm
, &esz
, &idx
)) {
4775 if (fp_access_check(s
)) {
4777 * This instruction just extracts the specified element and
4778 * zero-extends it into the bottom of the destination register.
4780 TCGv_i64 tmp
= tcg_temp_new_i64();
4781 read_vec_element(s
, tmp
, a
->rn
, idx
, esz
);
4782 write_fp_dreg(s
, a
->rd
, tmp
);
4787 static bool trans_DUP_element_v(DisasContext
*s
, arg_DUP_element_v
*a
)
4792 if (!decode_esz_idx(a
->imm
, &esz
, &idx
)) {
4795 if (esz
== MO_64
&& !a
->q
) {
4798 if (fp_access_check(s
)) {
4799 tcg_gen_gvec_dup_mem(esz
, vec_full_reg_offset(s
, a
->rd
),
4800 vec_reg_offset(s
, a
->rn
, idx
, esz
),
4801 a
->q
? 16 : 8, vec_full_reg_size(s
));
4806 static bool trans_DUP_general(DisasContext
*s
, arg_DUP_general
*a
)
4811 if (!decode_esz_idx(a
->imm
, &esz
, &idx
)) {
4814 if (esz
== MO_64
&& !a
->q
) {
4817 if (fp_access_check(s
)) {
4818 tcg_gen_gvec_dup_i64(esz
, vec_full_reg_offset(s
, a
->rd
),
4819 a
->q
? 16 : 8, vec_full_reg_size(s
),
4825 static bool do_smov_umov(DisasContext
*s
, arg_SMOV
*a
, MemOp is_signed
)
4830 if (!decode_esz_idx(a
->imm
, &esz
, &idx
)) {
4834 if (esz
== MO_64
|| (esz
== MO_32
&& !a
->q
)) {
4838 if (esz
== MO_64
? !a
->q
: a
->q
) {
4842 if (fp_access_check(s
)) {
4843 TCGv_i64 tcg_rd
= cpu_reg(s
, a
->rd
);
4844 read_vec_element(s
, tcg_rd
, a
->rn
, idx
, esz
| is_signed
);
4845 if (is_signed
&& !a
->q
) {
4846 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
4852 TRANS(SMOV
, do_smov_umov
, a
, MO_SIGN
)
4853 TRANS(UMOV
, do_smov_umov
, a
, 0)
4855 static bool trans_INS_general(DisasContext
*s
, arg_INS_general
*a
)
4860 if (!decode_esz_idx(a
->imm
, &esz
, &idx
)) {
4863 if (fp_access_check(s
)) {
4864 write_vec_element(s
, cpu_reg(s
, a
->rn
), a
->rd
, idx
, esz
);
4865 clear_vec_high(s
, true, a
->rd
);
4870 static bool trans_INS_element(DisasContext
*s
, arg_INS_element
*a
)
4873 unsigned didx
, sidx
;
4875 if (!decode_esz_idx(a
->di
, &esz
, &didx
)) {
4878 sidx
= a
->si
>> esz
;
4879 if (fp_access_check(s
)) {
4880 TCGv_i64 tmp
= tcg_temp_new_i64();
4882 read_vec_element(s
, tmp
, a
->rn
, sidx
, esz
);
4883 write_vec_element(s
, tmp
, a
->rd
, didx
, esz
);
4885 /* INS is considered a 128-bit write for SVE. */
4886 clear_vec_high(s
, true, a
->rd
);
4892 * Advanced SIMD three same
4895 typedef struct FPScalar
{
4896 void (*gen_h
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
4897 void (*gen_s
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
4898 void (*gen_d
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_ptr
);
4901 static bool do_fp3_scalar(DisasContext
*s
, arg_rrr_e
*a
, const FPScalar
*f
)
4905 if (fp_access_check(s
)) {
4906 TCGv_i64 t0
= read_fp_dreg(s
, a
->rn
);
4907 TCGv_i64 t1
= read_fp_dreg(s
, a
->rm
);
4908 f
->gen_d(t0
, t0
, t1
, fpstatus_ptr(FPST_FPCR
));
4909 write_fp_dreg(s
, a
->rd
, t0
);
4913 if (fp_access_check(s
)) {
4914 TCGv_i32 t0
= read_fp_sreg(s
, a
->rn
);
4915 TCGv_i32 t1
= read_fp_sreg(s
, a
->rm
);
4916 f
->gen_s(t0
, t0
, t1
, fpstatus_ptr(FPST_FPCR
));
4917 write_fp_sreg(s
, a
->rd
, t0
);
4921 if (!dc_isar_feature(aa64_fp16
, s
)) {
4924 if (fp_access_check(s
)) {
4925 TCGv_i32 t0
= read_fp_hreg(s
, a
->rn
);
4926 TCGv_i32 t1
= read_fp_hreg(s
, a
->rm
);
4927 f
->gen_h(t0
, t0
, t1
, fpstatus_ptr(FPST_FPCR_F16
));
4928 write_fp_sreg(s
, a
->rd
, t0
);
4937 static const FPScalar f_scalar_fadd
= {
4938 gen_helper_vfp_addh
,
4939 gen_helper_vfp_adds
,
4940 gen_helper_vfp_addd
,
4942 TRANS(FADD_s
, do_fp3_scalar
, a
, &f_scalar_fadd
)
4944 static const FPScalar f_scalar_fsub
= {
4945 gen_helper_vfp_subh
,
4946 gen_helper_vfp_subs
,
4947 gen_helper_vfp_subd
,
4949 TRANS(FSUB_s
, do_fp3_scalar
, a
, &f_scalar_fsub
)
4951 static const FPScalar f_scalar_fdiv
= {
4952 gen_helper_vfp_divh
,
4953 gen_helper_vfp_divs
,
4954 gen_helper_vfp_divd
,
4956 TRANS(FDIV_s
, do_fp3_scalar
, a
, &f_scalar_fdiv
)
4958 static const FPScalar f_scalar_fmul
= {
4959 gen_helper_vfp_mulh
,
4960 gen_helper_vfp_muls
,
4961 gen_helper_vfp_muld
,
4963 TRANS(FMUL_s
, do_fp3_scalar
, a
, &f_scalar_fmul
)
4965 static const FPScalar f_scalar_fmax
= {
4966 gen_helper_advsimd_maxh
,
4967 gen_helper_vfp_maxs
,
4968 gen_helper_vfp_maxd
,
4970 TRANS(FMAX_s
, do_fp3_scalar
, a
, &f_scalar_fmax
)
4972 static const FPScalar f_scalar_fmin
= {
4973 gen_helper_advsimd_minh
,
4974 gen_helper_vfp_mins
,
4975 gen_helper_vfp_mind
,
4977 TRANS(FMIN_s
, do_fp3_scalar
, a
, &f_scalar_fmin
)
4979 static const FPScalar f_scalar_fmaxnm
= {
4980 gen_helper_advsimd_maxnumh
,
4981 gen_helper_vfp_maxnums
,
4982 gen_helper_vfp_maxnumd
,
4984 TRANS(FMAXNM_s
, do_fp3_scalar
, a
, &f_scalar_fmaxnm
)
4986 static const FPScalar f_scalar_fminnm
= {
4987 gen_helper_advsimd_minnumh
,
4988 gen_helper_vfp_minnums
,
4989 gen_helper_vfp_minnumd
,
4991 TRANS(FMINNM_s
, do_fp3_scalar
, a
, &f_scalar_fminnm
)
4993 static const FPScalar f_scalar_fmulx
= {
4994 gen_helper_advsimd_mulxh
,
4995 gen_helper_vfp_mulxs
,
4996 gen_helper_vfp_mulxd
,
4998 TRANS(FMULX_s
, do_fp3_scalar
, a
, &f_scalar_fmulx
)
5000 static void gen_fnmul_h(TCGv_i32 d
, TCGv_i32 n
, TCGv_i32 m
, TCGv_ptr s
)
5002 gen_helper_vfp_mulh(d
, n
, m
, s
);
5006 static void gen_fnmul_s(TCGv_i32 d
, TCGv_i32 n
, TCGv_i32 m
, TCGv_ptr s
)
5008 gen_helper_vfp_muls(d
, n
, m
, s
);
5012 static void gen_fnmul_d(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
, TCGv_ptr s
)
5014 gen_helper_vfp_muld(d
, n
, m
, s
);
5018 static const FPScalar f_scalar_fnmul
= {
5023 TRANS(FNMUL_s
, do_fp3_scalar
, a
, &f_scalar_fnmul
)
5025 static const FPScalar f_scalar_fcmeq
= {
5026 gen_helper_advsimd_ceq_f16
,
5027 gen_helper_neon_ceq_f32
,
5028 gen_helper_neon_ceq_f64
,
5030 TRANS(FCMEQ_s
, do_fp3_scalar
, a
, &f_scalar_fcmeq
)
5032 static const FPScalar f_scalar_fcmge
= {
5033 gen_helper_advsimd_cge_f16
,
5034 gen_helper_neon_cge_f32
,
5035 gen_helper_neon_cge_f64
,
5037 TRANS(FCMGE_s
, do_fp3_scalar
, a
, &f_scalar_fcmge
)
5039 static const FPScalar f_scalar_fcmgt
= {
5040 gen_helper_advsimd_cgt_f16
,
5041 gen_helper_neon_cgt_f32
,
5042 gen_helper_neon_cgt_f64
,
5044 TRANS(FCMGT_s
, do_fp3_scalar
, a
, &f_scalar_fcmgt
)
5046 static const FPScalar f_scalar_facge
= {
5047 gen_helper_advsimd_acge_f16
,
5048 gen_helper_neon_acge_f32
,
5049 gen_helper_neon_acge_f64
,
5051 TRANS(FACGE_s
, do_fp3_scalar
, a
, &f_scalar_facge
)
5053 static const FPScalar f_scalar_facgt
= {
5054 gen_helper_advsimd_acgt_f16
,
5055 gen_helper_neon_acgt_f32
,
5056 gen_helper_neon_acgt_f64
,
5058 TRANS(FACGT_s
, do_fp3_scalar
, a
, &f_scalar_facgt
)
5060 static void gen_fabd_h(TCGv_i32 d
, TCGv_i32 n
, TCGv_i32 m
, TCGv_ptr s
)
5062 gen_helper_vfp_subh(d
, n
, m
, s
);
5066 static void gen_fabd_s(TCGv_i32 d
, TCGv_i32 n
, TCGv_i32 m
, TCGv_ptr s
)
5068 gen_helper_vfp_subs(d
, n
, m
, s
);
5072 static void gen_fabd_d(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
, TCGv_ptr s
)
5074 gen_helper_vfp_subd(d
, n
, m
, s
);
5078 static const FPScalar f_scalar_fabd
= {
5083 TRANS(FABD_s
, do_fp3_scalar
, a
, &f_scalar_fabd
)
5085 static const FPScalar f_scalar_frecps
= {
5086 gen_helper_recpsf_f16
,
5087 gen_helper_recpsf_f32
,
5088 gen_helper_recpsf_f64
,
5090 TRANS(FRECPS_s
, do_fp3_scalar
, a
, &f_scalar_frecps
)
5092 static const FPScalar f_scalar_frsqrts
= {
5093 gen_helper_rsqrtsf_f16
,
5094 gen_helper_rsqrtsf_f32
,
5095 gen_helper_rsqrtsf_f64
,
5097 TRANS(FRSQRTS_s
, do_fp3_scalar
, a
, &f_scalar_frsqrts
)
5099 static bool do_satacc_s(DisasContext
*s
, arg_rrr_e
*a
,
5100 MemOp sgn_n
, MemOp sgn_m
,
5101 void (*gen_bhs
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
, MemOp
),
5102 void (*gen_d
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
5104 TCGv_i64 t0
, t1
, t2
, qc
;
5107 if (!fp_access_check(s
)) {
5111 t0
= tcg_temp_new_i64();
5112 t1
= tcg_temp_new_i64();
5113 t2
= tcg_temp_new_i64();
5114 qc
= tcg_temp_new_i64();
5115 read_vec_element(s
, t1
, a
->rn
, 0, esz
| sgn_n
);
5116 read_vec_element(s
, t2
, a
->rm
, 0, esz
| sgn_m
);
5117 tcg_gen_ld_i64(qc
, tcg_env
, offsetof(CPUARMState
, vfp
.qc
));
5120 gen_d(t0
, qc
, t1
, t2
);
5122 gen_bhs(t0
, qc
, t1
, t2
, esz
);
5123 tcg_gen_ext_i64(t0
, t0
, esz
);
5126 write_fp_dreg(s
, a
->rd
, t0
);
5127 tcg_gen_st_i64(qc
, tcg_env
, offsetof(CPUARMState
, vfp
.qc
));
5131 TRANS(SQADD_s
, do_satacc_s
, a
, MO_SIGN
, MO_SIGN
, gen_sqadd_bhs
, gen_sqadd_d
)
5132 TRANS(SQSUB_s
, do_satacc_s
, a
, MO_SIGN
, MO_SIGN
, gen_sqsub_bhs
, gen_sqsub_d
)
5133 TRANS(UQADD_s
, do_satacc_s
, a
, 0, 0, gen_uqadd_bhs
, gen_uqadd_d
)
5134 TRANS(UQSUB_s
, do_satacc_s
, a
, 0, 0, gen_uqsub_bhs
, gen_uqsub_d
)
5135 TRANS(SUQADD_s
, do_satacc_s
, a
, MO_SIGN
, 0, gen_suqadd_bhs
, gen_suqadd_d
)
5136 TRANS(USQADD_s
, do_satacc_s
, a
, 0, MO_SIGN
, gen_usqadd_bhs
, gen_usqadd_d
)
5138 static bool do_int3_scalar_d(DisasContext
*s
, arg_rrr_e
*a
,
5139 void (*fn
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
5141 if (fp_access_check(s
)) {
5142 TCGv_i64 t0
= tcg_temp_new_i64();
5143 TCGv_i64 t1
= tcg_temp_new_i64();
5145 read_vec_element(s
, t0
, a
->rn
, 0, MO_64
);
5146 read_vec_element(s
, t1
, a
->rm
, 0, MO_64
);
5148 write_fp_dreg(s
, a
->rd
, t0
);
5153 TRANS(SSHL_s
, do_int3_scalar_d
, a
, gen_sshl_i64
)
5154 TRANS(USHL_s
, do_int3_scalar_d
, a
, gen_ushl_i64
)
5155 TRANS(SRSHL_s
, do_int3_scalar_d
, a
, gen_helper_neon_rshl_s64
)
5156 TRANS(URSHL_s
, do_int3_scalar_d
, a
, gen_helper_neon_rshl_u64
)
5157 TRANS(ADD_s
, do_int3_scalar_d
, a
, tcg_gen_add_i64
)
5158 TRANS(SUB_s
, do_int3_scalar_d
, a
, tcg_gen_sub_i64
)
5160 typedef struct ENVScalar2
{
5161 NeonGenTwoOpEnvFn
*gen_bhs
[3];
5162 NeonGenTwo64OpEnvFn
*gen_d
;
5165 static bool do_env_scalar2(DisasContext
*s
, arg_rrr_e
*a
, const ENVScalar2
*f
)
5167 if (!fp_access_check(s
)) {
5170 if (a
->esz
== MO_64
) {
5171 TCGv_i64 t0
= read_fp_dreg(s
, a
->rn
);
5172 TCGv_i64 t1
= read_fp_dreg(s
, a
->rm
);
5173 f
->gen_d(t0
, tcg_env
, t0
, t1
);
5174 write_fp_dreg(s
, a
->rd
, t0
);
5176 TCGv_i32 t0
= tcg_temp_new_i32();
5177 TCGv_i32 t1
= tcg_temp_new_i32();
5179 read_vec_element_i32(s
, t0
, a
->rn
, 0, a
->esz
);
5180 read_vec_element_i32(s
, t1
, a
->rm
, 0, a
->esz
);
5181 f
->gen_bhs
[a
->esz
](t0
, tcg_env
, t0
, t1
);
5182 write_fp_sreg(s
, a
->rd
, t0
);
5187 static const ENVScalar2 f_scalar_sqshl
= {
5188 { gen_helper_neon_qshl_s8
,
5189 gen_helper_neon_qshl_s16
,
5190 gen_helper_neon_qshl_s32
},
5191 gen_helper_neon_qshl_s64
,
5193 TRANS(SQSHL_s
, do_env_scalar2
, a
, &f_scalar_sqshl
)
5195 static const ENVScalar2 f_scalar_uqshl
= {
5196 { gen_helper_neon_qshl_u8
,
5197 gen_helper_neon_qshl_u16
,
5198 gen_helper_neon_qshl_u32
},
5199 gen_helper_neon_qshl_u64
,
5201 TRANS(UQSHL_s
, do_env_scalar2
, a
, &f_scalar_uqshl
)
5203 static const ENVScalar2 f_scalar_sqrshl
= {
5204 { gen_helper_neon_qrshl_s8
,
5205 gen_helper_neon_qrshl_s16
,
5206 gen_helper_neon_qrshl_s32
},
5207 gen_helper_neon_qrshl_s64
,
5209 TRANS(SQRSHL_s
, do_env_scalar2
, a
, &f_scalar_sqrshl
)
5211 static const ENVScalar2 f_scalar_uqrshl
= {
5212 { gen_helper_neon_qrshl_u8
,
5213 gen_helper_neon_qrshl_u16
,
5214 gen_helper_neon_qrshl_u32
},
5215 gen_helper_neon_qrshl_u64
,
5217 TRANS(UQRSHL_s
, do_env_scalar2
, a
, &f_scalar_uqrshl
)
5219 static bool do_env_scalar2_hs(DisasContext
*s
, arg_rrr_e
*a
,
5220 const ENVScalar2
*f
)
5222 if (a
->esz
== MO_16
|| a
->esz
== MO_32
) {
5223 return do_env_scalar2(s
, a
, f
);
5228 static const ENVScalar2 f_scalar_sqdmulh
= {
5229 { NULL
, gen_helper_neon_qdmulh_s16
, gen_helper_neon_qdmulh_s32
}
5231 TRANS(SQDMULH_s
, do_env_scalar2_hs
, a
, &f_scalar_sqdmulh
)
5233 static const ENVScalar2 f_scalar_sqrdmulh
= {
5234 { NULL
, gen_helper_neon_qrdmulh_s16
, gen_helper_neon_qrdmulh_s32
}
5236 TRANS(SQRDMULH_s
, do_env_scalar2_hs
, a
, &f_scalar_sqrdmulh
)
5238 typedef struct ENVScalar3
{
5239 NeonGenThreeOpEnvFn
*gen_hs
[2];
5242 static bool do_env_scalar3_hs(DisasContext
*s
, arg_rrr_e
*a
,
5243 const ENVScalar3
*f
)
5245 TCGv_i32 t0
, t1
, t2
;
5247 if (a
->esz
!= MO_16
&& a
->esz
!= MO_32
) {
5250 if (!fp_access_check(s
)) {
5254 t0
= tcg_temp_new_i32();
5255 t1
= tcg_temp_new_i32();
5256 t2
= tcg_temp_new_i32();
5257 read_vec_element_i32(s
, t0
, a
->rn
, 0, a
->esz
);
5258 read_vec_element_i32(s
, t1
, a
->rm
, 0, a
->esz
);
5259 read_vec_element_i32(s
, t2
, a
->rd
, 0, a
->esz
);
5260 f
->gen_hs
[a
->esz
- 1](t0
, tcg_env
, t0
, t1
, t2
);
5261 write_fp_sreg(s
, a
->rd
, t0
);
5265 static const ENVScalar3 f_scalar_sqrdmlah
= {
5266 { gen_helper_neon_qrdmlah_s16
, gen_helper_neon_qrdmlah_s32
}
5268 TRANS_FEAT(SQRDMLAH_s
, aa64_rdm
, do_env_scalar3_hs
, a
, &f_scalar_sqrdmlah
)
5270 static const ENVScalar3 f_scalar_sqrdmlsh
= {
5271 { gen_helper_neon_qrdmlsh_s16
, gen_helper_neon_qrdmlsh_s32
}
5273 TRANS_FEAT(SQRDMLSH_s
, aa64_rdm
, do_env_scalar3_hs
, a
, &f_scalar_sqrdmlsh
)
5275 static bool do_cmop_d(DisasContext
*s
, arg_rrr_e
*a
, TCGCond cond
)
5277 if (fp_access_check(s
)) {
5278 TCGv_i64 t0
= read_fp_dreg(s
, a
->rn
);
5279 TCGv_i64 t1
= read_fp_dreg(s
, a
->rm
);
5280 tcg_gen_negsetcond_i64(cond
, t0
, t0
, t1
);
5281 write_fp_dreg(s
, a
->rd
, t0
);
5286 TRANS(CMGT_s
, do_cmop_d
, a
, TCG_COND_GT
)
5287 TRANS(CMHI_s
, do_cmop_d
, a
, TCG_COND_GTU
)
5288 TRANS(CMGE_s
, do_cmop_d
, a
, TCG_COND_GE
)
5289 TRANS(CMHS_s
, do_cmop_d
, a
, TCG_COND_GEU
)
5290 TRANS(CMEQ_s
, do_cmop_d
, a
, TCG_COND_EQ
)
5291 TRANS(CMTST_s
, do_cmop_d
, a
, TCG_COND_TSTNE
)
5293 static bool do_fp3_vector(DisasContext
*s
, arg_qrrr_e
*a
,
5294 gen_helper_gvec_3_ptr
* const fns
[3])
5307 if (!dc_isar_feature(aa64_fp16
, s
)) {
5314 if (fp_access_check(s
)) {
5315 gen_gvec_op3_fpst(s
, a
->q
, a
->rd
, a
->rn
, a
->rm
,
5316 esz
== MO_16
, 0, fns
[esz
- 1]);
5321 static gen_helper_gvec_3_ptr
* const f_vector_fadd
[3] = {
5322 gen_helper_gvec_fadd_h
,
5323 gen_helper_gvec_fadd_s
,
5324 gen_helper_gvec_fadd_d
,
5326 TRANS(FADD_v
, do_fp3_vector
, a
, f_vector_fadd
)
5328 static gen_helper_gvec_3_ptr
* const f_vector_fsub
[3] = {
5329 gen_helper_gvec_fsub_h
,
5330 gen_helper_gvec_fsub_s
,
5331 gen_helper_gvec_fsub_d
,
5333 TRANS(FSUB_v
, do_fp3_vector
, a
, f_vector_fsub
)
5335 static gen_helper_gvec_3_ptr
* const f_vector_fdiv
[3] = {
5336 gen_helper_gvec_fdiv_h
,
5337 gen_helper_gvec_fdiv_s
,
5338 gen_helper_gvec_fdiv_d
,
5340 TRANS(FDIV_v
, do_fp3_vector
, a
, f_vector_fdiv
)
5342 static gen_helper_gvec_3_ptr
* const f_vector_fmul
[3] = {
5343 gen_helper_gvec_fmul_h
,
5344 gen_helper_gvec_fmul_s
,
5345 gen_helper_gvec_fmul_d
,
5347 TRANS(FMUL_v
, do_fp3_vector
, a
, f_vector_fmul
)
5349 static gen_helper_gvec_3_ptr
* const f_vector_fmax
[3] = {
5350 gen_helper_gvec_fmax_h
,
5351 gen_helper_gvec_fmax_s
,
5352 gen_helper_gvec_fmax_d
,
5354 TRANS(FMAX_v
, do_fp3_vector
, a
, f_vector_fmax
)
5356 static gen_helper_gvec_3_ptr
* const f_vector_fmin
[3] = {
5357 gen_helper_gvec_fmin_h
,
5358 gen_helper_gvec_fmin_s
,
5359 gen_helper_gvec_fmin_d
,
5361 TRANS(FMIN_v
, do_fp3_vector
, a
, f_vector_fmin
)
5363 static gen_helper_gvec_3_ptr
* const f_vector_fmaxnm
[3] = {
5364 gen_helper_gvec_fmaxnum_h
,
5365 gen_helper_gvec_fmaxnum_s
,
5366 gen_helper_gvec_fmaxnum_d
,
5368 TRANS(FMAXNM_v
, do_fp3_vector
, a
, f_vector_fmaxnm
)
5370 static gen_helper_gvec_3_ptr
* const f_vector_fminnm
[3] = {
5371 gen_helper_gvec_fminnum_h
,
5372 gen_helper_gvec_fminnum_s
,
5373 gen_helper_gvec_fminnum_d
,
5375 TRANS(FMINNM_v
, do_fp3_vector
, a
, f_vector_fminnm
)
5377 static gen_helper_gvec_3_ptr
* const f_vector_fmulx
[3] = {
5378 gen_helper_gvec_fmulx_h
,
5379 gen_helper_gvec_fmulx_s
,
5380 gen_helper_gvec_fmulx_d
,
5382 TRANS(FMULX_v
, do_fp3_vector
, a
, f_vector_fmulx
)
5384 static gen_helper_gvec_3_ptr
* const f_vector_fmla
[3] = {
5385 gen_helper_gvec_vfma_h
,
5386 gen_helper_gvec_vfma_s
,
5387 gen_helper_gvec_vfma_d
,
5389 TRANS(FMLA_v
, do_fp3_vector
, a
, f_vector_fmla
)
5391 static gen_helper_gvec_3_ptr
* const f_vector_fmls
[3] = {
5392 gen_helper_gvec_vfms_h
,
5393 gen_helper_gvec_vfms_s
,
5394 gen_helper_gvec_vfms_d
,
5396 TRANS(FMLS_v
, do_fp3_vector
, a
, f_vector_fmls
)
5398 static gen_helper_gvec_3_ptr
* const f_vector_fcmeq
[3] = {
5399 gen_helper_gvec_fceq_h
,
5400 gen_helper_gvec_fceq_s
,
5401 gen_helper_gvec_fceq_d
,
5403 TRANS(FCMEQ_v
, do_fp3_vector
, a
, f_vector_fcmeq
)
5405 static gen_helper_gvec_3_ptr
* const f_vector_fcmge
[3] = {
5406 gen_helper_gvec_fcge_h
,
5407 gen_helper_gvec_fcge_s
,
5408 gen_helper_gvec_fcge_d
,
5410 TRANS(FCMGE_v
, do_fp3_vector
, a
, f_vector_fcmge
)
5412 static gen_helper_gvec_3_ptr
* const f_vector_fcmgt
[3] = {
5413 gen_helper_gvec_fcgt_h
,
5414 gen_helper_gvec_fcgt_s
,
5415 gen_helper_gvec_fcgt_d
,
5417 TRANS(FCMGT_v
, do_fp3_vector
, a
, f_vector_fcmgt
)
5419 static gen_helper_gvec_3_ptr
* const f_vector_facge
[3] = {
5420 gen_helper_gvec_facge_h
,
5421 gen_helper_gvec_facge_s
,
5422 gen_helper_gvec_facge_d
,
5424 TRANS(FACGE_v
, do_fp3_vector
, a
, f_vector_facge
)
5426 static gen_helper_gvec_3_ptr
* const f_vector_facgt
[3] = {
5427 gen_helper_gvec_facgt_h
,
5428 gen_helper_gvec_facgt_s
,
5429 gen_helper_gvec_facgt_d
,
5431 TRANS(FACGT_v
, do_fp3_vector
, a
, f_vector_facgt
)
5433 static gen_helper_gvec_3_ptr
* const f_vector_fabd
[3] = {
5434 gen_helper_gvec_fabd_h
,
5435 gen_helper_gvec_fabd_s
,
5436 gen_helper_gvec_fabd_d
,
5438 TRANS(FABD_v
, do_fp3_vector
, a
, f_vector_fabd
)
5440 static gen_helper_gvec_3_ptr
* const f_vector_frecps
[3] = {
5441 gen_helper_gvec_recps_h
,
5442 gen_helper_gvec_recps_s
,
5443 gen_helper_gvec_recps_d
,
5445 TRANS(FRECPS_v
, do_fp3_vector
, a
, f_vector_frecps
)
5447 static gen_helper_gvec_3_ptr
* const f_vector_frsqrts
[3] = {
5448 gen_helper_gvec_rsqrts_h
,
5449 gen_helper_gvec_rsqrts_s
,
5450 gen_helper_gvec_rsqrts_d
,
5452 TRANS(FRSQRTS_v
, do_fp3_vector
, a
, f_vector_frsqrts
)
5454 static gen_helper_gvec_3_ptr
* const f_vector_faddp
[3] = {
5455 gen_helper_gvec_faddp_h
,
5456 gen_helper_gvec_faddp_s
,
5457 gen_helper_gvec_faddp_d
,
5459 TRANS(FADDP_v
, do_fp3_vector
, a
, f_vector_faddp
)
5461 static gen_helper_gvec_3_ptr
* const f_vector_fmaxp
[3] = {
5462 gen_helper_gvec_fmaxp_h
,
5463 gen_helper_gvec_fmaxp_s
,
5464 gen_helper_gvec_fmaxp_d
,
5466 TRANS(FMAXP_v
, do_fp3_vector
, a
, f_vector_fmaxp
)
5468 static gen_helper_gvec_3_ptr
* const f_vector_fminp
[3] = {
5469 gen_helper_gvec_fminp_h
,
5470 gen_helper_gvec_fminp_s
,
5471 gen_helper_gvec_fminp_d
,
5473 TRANS(FMINP_v
, do_fp3_vector
, a
, f_vector_fminp
)
5475 static gen_helper_gvec_3_ptr
* const f_vector_fmaxnmp
[3] = {
5476 gen_helper_gvec_fmaxnump_h
,
5477 gen_helper_gvec_fmaxnump_s
,
5478 gen_helper_gvec_fmaxnump_d
,
5480 TRANS(FMAXNMP_v
, do_fp3_vector
, a
, f_vector_fmaxnmp
)
5482 static gen_helper_gvec_3_ptr
* const f_vector_fminnmp
[3] = {
5483 gen_helper_gvec_fminnump_h
,
5484 gen_helper_gvec_fminnump_s
,
5485 gen_helper_gvec_fminnump_d
,
5487 TRANS(FMINNMP_v
, do_fp3_vector
, a
, f_vector_fminnmp
)
5489 static bool do_fmlal(DisasContext
*s
, arg_qrrr_e
*a
, bool is_s
, bool is_2
)
5491 if (fp_access_check(s
)) {
5492 int data
= (is_2
<< 1) | is_s
;
5493 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, a
->rd
),
5494 vec_full_reg_offset(s
, a
->rn
),
5495 vec_full_reg_offset(s
, a
->rm
), tcg_env
,
5496 a
->q
? 16 : 8, vec_full_reg_size(s
),
5497 data
, gen_helper_gvec_fmlal_a64
);
5502 TRANS_FEAT(FMLAL_v
, aa64_fhm
, do_fmlal
, a
, false, false)
5503 TRANS_FEAT(FMLSL_v
, aa64_fhm
, do_fmlal
, a
, true, false)
5504 TRANS_FEAT(FMLAL2_v
, aa64_fhm
, do_fmlal
, a
, false, true)
5505 TRANS_FEAT(FMLSL2_v
, aa64_fhm
, do_fmlal
, a
, true, true)
5507 TRANS(ADDP_v
, do_gvec_fn3
, a
, gen_gvec_addp
)
5508 TRANS(SMAXP_v
, do_gvec_fn3_no64
, a
, gen_gvec_smaxp
)
5509 TRANS(SMINP_v
, do_gvec_fn3_no64
, a
, gen_gvec_sminp
)
5510 TRANS(UMAXP_v
, do_gvec_fn3_no64
, a
, gen_gvec_umaxp
)
5511 TRANS(UMINP_v
, do_gvec_fn3_no64
, a
, gen_gvec_uminp
)
5513 TRANS(AND_v
, do_gvec_fn3
, a
, tcg_gen_gvec_and
)
5514 TRANS(BIC_v
, do_gvec_fn3
, a
, tcg_gen_gvec_andc
)
5515 TRANS(ORR_v
, do_gvec_fn3
, a
, tcg_gen_gvec_or
)
5516 TRANS(ORN_v
, do_gvec_fn3
, a
, tcg_gen_gvec_orc
)
5517 TRANS(EOR_v
, do_gvec_fn3
, a
, tcg_gen_gvec_xor
)
5519 static bool do_bitsel(DisasContext
*s
, bool is_q
, int d
, int a
, int b
, int c
)
5521 if (fp_access_check(s
)) {
5522 gen_gvec_fn4(s
, is_q
, d
, a
, b
, c
, tcg_gen_gvec_bitsel
, 0);
5527 TRANS(BSL_v
, do_bitsel
, a
->q
, a
->rd
, a
->rd
, a
->rn
, a
->rm
)
5528 TRANS(BIT_v
, do_bitsel
, a
->q
, a
->rd
, a
->rm
, a
->rn
, a
->rd
)
5529 TRANS(BIF_v
, do_bitsel
, a
->q
, a
->rd
, a
->rm
, a
->rd
, a
->rn
)
5531 TRANS(SQADD_v
, do_gvec_fn3
, a
, gen_gvec_sqadd_qc
)
5532 TRANS(UQADD_v
, do_gvec_fn3
, a
, gen_gvec_uqadd_qc
)
5533 TRANS(SQSUB_v
, do_gvec_fn3
, a
, gen_gvec_sqsub_qc
)
5534 TRANS(UQSUB_v
, do_gvec_fn3
, a
, gen_gvec_uqsub_qc
)
5535 TRANS(SUQADD_v
, do_gvec_fn3
, a
, gen_gvec_suqadd_qc
)
5536 TRANS(USQADD_v
, do_gvec_fn3
, a
, gen_gvec_usqadd_qc
)
5538 TRANS(SSHL_v
, do_gvec_fn3
, a
, gen_gvec_sshl
)
5539 TRANS(USHL_v
, do_gvec_fn3
, a
, gen_gvec_ushl
)
5540 TRANS(SRSHL_v
, do_gvec_fn3
, a
, gen_gvec_srshl
)
5541 TRANS(URSHL_v
, do_gvec_fn3
, a
, gen_gvec_urshl
)
5542 TRANS(SQSHL_v
, do_gvec_fn3
, a
, gen_neon_sqshl
)
5543 TRANS(UQSHL_v
, do_gvec_fn3
, a
, gen_neon_uqshl
)
5544 TRANS(SQRSHL_v
, do_gvec_fn3
, a
, gen_neon_sqrshl
)
5545 TRANS(UQRSHL_v
, do_gvec_fn3
, a
, gen_neon_uqrshl
)
5547 TRANS(ADD_v
, do_gvec_fn3
, a
, tcg_gen_gvec_add
)
5548 TRANS(SUB_v
, do_gvec_fn3
, a
, tcg_gen_gvec_sub
)
5549 TRANS(SHADD_v
, do_gvec_fn3_no64
, a
, gen_gvec_shadd
)
5550 TRANS(UHADD_v
, do_gvec_fn3_no64
, a
, gen_gvec_uhadd
)
5551 TRANS(SHSUB_v
, do_gvec_fn3_no64
, a
, gen_gvec_shsub
)
5552 TRANS(UHSUB_v
, do_gvec_fn3_no64
, a
, gen_gvec_uhsub
)
5553 TRANS(SRHADD_v
, do_gvec_fn3_no64
, a
, gen_gvec_srhadd
)
5554 TRANS(URHADD_v
, do_gvec_fn3_no64
, a
, gen_gvec_urhadd
)
5555 TRANS(SMAX_v
, do_gvec_fn3_no64
, a
, tcg_gen_gvec_smax
)
5556 TRANS(UMAX_v
, do_gvec_fn3_no64
, a
, tcg_gen_gvec_umax
)
5557 TRANS(SMIN_v
, do_gvec_fn3_no64
, a
, tcg_gen_gvec_smin
)
5558 TRANS(UMIN_v
, do_gvec_fn3_no64
, a
, tcg_gen_gvec_umin
)
5559 TRANS(SABA_v
, do_gvec_fn3_no64
, a
, gen_gvec_saba
)
5560 TRANS(UABA_v
, do_gvec_fn3_no64
, a
, gen_gvec_uaba
)
5561 TRANS(SABD_v
, do_gvec_fn3_no64
, a
, gen_gvec_sabd
)
5562 TRANS(UABD_v
, do_gvec_fn3_no64
, a
, gen_gvec_uabd
)
5563 TRANS(MUL_v
, do_gvec_fn3_no64
, a
, tcg_gen_gvec_mul
)
5564 TRANS(PMUL_v
, do_gvec_op3_ool
, a
, 0, gen_helper_gvec_pmul_b
)
5565 TRANS(MLA_v
, do_gvec_fn3_no64
, a
, gen_gvec_mla
)
5566 TRANS(MLS_v
, do_gvec_fn3_no64
, a
, gen_gvec_mls
)
5568 static bool do_cmop_v(DisasContext
*s
, arg_qrrr_e
*a
, TCGCond cond
)
5570 if (a
->esz
== MO_64
&& !a
->q
) {
5573 if (fp_access_check(s
)) {
5574 tcg_gen_gvec_cmp(cond
, a
->esz
,
5575 vec_full_reg_offset(s
, a
->rd
),
5576 vec_full_reg_offset(s
, a
->rn
),
5577 vec_full_reg_offset(s
, a
->rm
),
5578 a
->q
? 16 : 8, vec_full_reg_size(s
));
5583 TRANS(CMGT_v
, do_cmop_v
, a
, TCG_COND_GT
)
5584 TRANS(CMHI_v
, do_cmop_v
, a
, TCG_COND_GTU
)
5585 TRANS(CMGE_v
, do_cmop_v
, a
, TCG_COND_GE
)
5586 TRANS(CMHS_v
, do_cmop_v
, a
, TCG_COND_GEU
)
5587 TRANS(CMEQ_v
, do_cmop_v
, a
, TCG_COND_EQ
)
5588 TRANS(CMTST_v
, do_gvec_fn3
, a
, gen_gvec_cmtst
)
5590 TRANS(SQDMULH_v
, do_gvec_fn3_no8_no64
, a
, gen_gvec_sqdmulh_qc
)
5591 TRANS(SQRDMULH_v
, do_gvec_fn3_no8_no64
, a
, gen_gvec_sqrdmulh_qc
)
5592 TRANS_FEAT(SQRDMLAH_v
, aa64_rdm
, do_gvec_fn3_no8_no64
, a
, gen_gvec_sqrdmlah_qc
)
5593 TRANS_FEAT(SQRDMLSH_v
, aa64_rdm
, do_gvec_fn3_no8_no64
, a
, gen_gvec_sqrdmlsh_qc
)
5595 static bool do_dot_vector(DisasContext
*s
, arg_qrrr_e
*a
,
5596 gen_helper_gvec_4
*fn
)
5598 if (fp_access_check(s
)) {
5599 gen_gvec_op4_ool(s
, a
->q
, a
->rd
, a
->rn
, a
->rm
, a
->rd
, 0, fn
);
5604 TRANS_FEAT(SDOT_v
, aa64_dp
, do_dot_vector
, a
, gen_helper_gvec_sdot_b
)
5605 TRANS_FEAT(UDOT_v
, aa64_dp
, do_dot_vector
, a
, gen_helper_gvec_udot_b
)
5606 TRANS_FEAT(USDOT_v
, aa64_i8mm
, do_dot_vector
, a
, gen_helper_gvec_usdot_b
)
5607 TRANS_FEAT(BFDOT_v
, aa64_bf16
, do_dot_vector
, a
, gen_helper_gvec_bfdot
)
5609 static bool trans_BFMLAL_v(DisasContext
*s
, arg_qrrr_e
*a
)
5611 if (!dc_isar_feature(aa64_bf16
, s
)) {
5614 if (fp_access_check(s
)) {
5615 /* Q bit selects BFMLALB vs BFMLALT. */
5616 gen_gvec_op4_fpst(s
, true, a
->rd
, a
->rn
, a
->rm
, a
->rd
, false, a
->q
,
5617 gen_helper_gvec_bfmlal
);
5623 * Advanced SIMD scalar/vector x indexed element
5626 static bool do_fp3_scalar_idx(DisasContext
*s
, arg_rrx_e
*a
, const FPScalar
*f
)
5630 if (fp_access_check(s
)) {
5631 TCGv_i64 t0
= read_fp_dreg(s
, a
->rn
);
5632 TCGv_i64 t1
= tcg_temp_new_i64();
5634 read_vec_element(s
, t1
, a
->rm
, a
->idx
, MO_64
);
5635 f
->gen_d(t0
, t0
, t1
, fpstatus_ptr(FPST_FPCR
));
5636 write_fp_dreg(s
, a
->rd
, t0
);
5640 if (fp_access_check(s
)) {
5641 TCGv_i32 t0
= read_fp_sreg(s
, a
->rn
);
5642 TCGv_i32 t1
= tcg_temp_new_i32();
5644 read_vec_element_i32(s
, t1
, a
->rm
, a
->idx
, MO_32
);
5645 f
->gen_s(t0
, t0
, t1
, fpstatus_ptr(FPST_FPCR
));
5646 write_fp_sreg(s
, a
->rd
, t0
);
5650 if (!dc_isar_feature(aa64_fp16
, s
)) {
5653 if (fp_access_check(s
)) {
5654 TCGv_i32 t0
= read_fp_hreg(s
, a
->rn
);
5655 TCGv_i32 t1
= tcg_temp_new_i32();
5657 read_vec_element_i32(s
, t1
, a
->rm
, a
->idx
, MO_16
);
5658 f
->gen_h(t0
, t0
, t1
, fpstatus_ptr(FPST_FPCR_F16
));
5659 write_fp_sreg(s
, a
->rd
, t0
);
5663 g_assert_not_reached();
5668 TRANS(FMUL_si
, do_fp3_scalar_idx
, a
, &f_scalar_fmul
)
5669 TRANS(FMULX_si
, do_fp3_scalar_idx
, a
, &f_scalar_fmulx
)
5671 static bool do_fmla_scalar_idx(DisasContext
*s
, arg_rrx_e
*a
, bool neg
)
5675 if (fp_access_check(s
)) {
5676 TCGv_i64 t0
= read_fp_dreg(s
, a
->rd
);
5677 TCGv_i64 t1
= read_fp_dreg(s
, a
->rn
);
5678 TCGv_i64 t2
= tcg_temp_new_i64();
5680 read_vec_element(s
, t2
, a
->rm
, a
->idx
, MO_64
);
5682 gen_vfp_negd(t1
, t1
);
5684 gen_helper_vfp_muladdd(t0
, t1
, t2
, t0
, fpstatus_ptr(FPST_FPCR
));
5685 write_fp_dreg(s
, a
->rd
, t0
);
5689 if (fp_access_check(s
)) {
5690 TCGv_i32 t0
= read_fp_sreg(s
, a
->rd
);
5691 TCGv_i32 t1
= read_fp_sreg(s
, a
->rn
);
5692 TCGv_i32 t2
= tcg_temp_new_i32();
5694 read_vec_element_i32(s
, t2
, a
->rm
, a
->idx
, MO_32
);
5696 gen_vfp_negs(t1
, t1
);
5698 gen_helper_vfp_muladds(t0
, t1
, t2
, t0
, fpstatus_ptr(FPST_FPCR
));
5699 write_fp_sreg(s
, a
->rd
, t0
);
5703 if (!dc_isar_feature(aa64_fp16
, s
)) {
5706 if (fp_access_check(s
)) {
5707 TCGv_i32 t0
= read_fp_hreg(s
, a
->rd
);
5708 TCGv_i32 t1
= read_fp_hreg(s
, a
->rn
);
5709 TCGv_i32 t2
= tcg_temp_new_i32();
5711 read_vec_element_i32(s
, t2
, a
->rm
, a
->idx
, MO_16
);
5713 gen_vfp_negh(t1
, t1
);
5715 gen_helper_advsimd_muladdh(t0
, t1
, t2
, t0
,
5716 fpstatus_ptr(FPST_FPCR_F16
));
5717 write_fp_sreg(s
, a
->rd
, t0
);
5721 g_assert_not_reached();
5726 TRANS(FMLA_si
, do_fmla_scalar_idx
, a
, false)
5727 TRANS(FMLS_si
, do_fmla_scalar_idx
, a
, true)
5729 static bool do_env_scalar2_idx_hs(DisasContext
*s
, arg_rrx_e
*a
,
5730 const ENVScalar2
*f
)
5732 if (a
->esz
< MO_16
|| a
->esz
> MO_32
) {
5735 if (fp_access_check(s
)) {
5736 TCGv_i32 t0
= tcg_temp_new_i32();
5737 TCGv_i32 t1
= tcg_temp_new_i32();
5739 read_vec_element_i32(s
, t0
, a
->rn
, 0, a
->esz
);
5740 read_vec_element_i32(s
, t1
, a
->rm
, a
->idx
, a
->esz
);
5741 f
->gen_bhs
[a
->esz
](t0
, tcg_env
, t0
, t1
);
5742 write_fp_sreg(s
, a
->rd
, t0
);
5747 TRANS(SQDMULH_si
, do_env_scalar2_idx_hs
, a
, &f_scalar_sqdmulh
)
5748 TRANS(SQRDMULH_si
, do_env_scalar2_idx_hs
, a
, &f_scalar_sqrdmulh
)
5750 static bool do_env_scalar3_idx_hs(DisasContext
*s
, arg_rrx_e
*a
,
5751 const ENVScalar3
*f
)
5753 if (a
->esz
< MO_16
|| a
->esz
> MO_32
) {
5756 if (fp_access_check(s
)) {
5757 TCGv_i32 t0
= tcg_temp_new_i32();
5758 TCGv_i32 t1
= tcg_temp_new_i32();
5759 TCGv_i32 t2
= tcg_temp_new_i32();
5761 read_vec_element_i32(s
, t0
, a
->rn
, 0, a
->esz
);
5762 read_vec_element_i32(s
, t1
, a
->rm
, a
->idx
, a
->esz
);
5763 read_vec_element_i32(s
, t2
, a
->rd
, 0, a
->esz
);
5764 f
->gen_hs
[a
->esz
- 1](t0
, tcg_env
, t0
, t1
, t2
);
5765 write_fp_sreg(s
, a
->rd
, t0
);
5770 TRANS_FEAT(SQRDMLAH_si
, aa64_rdm
, do_env_scalar3_idx_hs
, a
, &f_scalar_sqrdmlah
)
5771 TRANS_FEAT(SQRDMLSH_si
, aa64_rdm
, do_env_scalar3_idx_hs
, a
, &f_scalar_sqrdmlsh
)
5773 static bool do_fp3_vector_idx(DisasContext
*s
, arg_qrrx_e
*a
,
5774 gen_helper_gvec_3_ptr
* const fns
[3])
5787 if (!dc_isar_feature(aa64_fp16
, s
)) {
5792 g_assert_not_reached();
5794 if (fp_access_check(s
)) {
5795 gen_gvec_op3_fpst(s
, a
->q
, a
->rd
, a
->rn
, a
->rm
,
5796 esz
== MO_16
, a
->idx
, fns
[esz
- 1]);
5801 static gen_helper_gvec_3_ptr
* const f_vector_idx_fmul
[3] = {
5802 gen_helper_gvec_fmul_idx_h
,
5803 gen_helper_gvec_fmul_idx_s
,
5804 gen_helper_gvec_fmul_idx_d
,
5806 TRANS(FMUL_vi
, do_fp3_vector_idx
, a
, f_vector_idx_fmul
)
5808 static gen_helper_gvec_3_ptr
* const f_vector_idx_fmulx
[3] = {
5809 gen_helper_gvec_fmulx_idx_h
,
5810 gen_helper_gvec_fmulx_idx_s
,
5811 gen_helper_gvec_fmulx_idx_d
,
5813 TRANS(FMULX_vi
, do_fp3_vector_idx
, a
, f_vector_idx_fmulx
)
5815 static bool do_fmla_vector_idx(DisasContext
*s
, arg_qrrx_e
*a
, bool neg
)
5817 static gen_helper_gvec_4_ptr
* const fns
[3] = {
5818 gen_helper_gvec_fmla_idx_h
,
5819 gen_helper_gvec_fmla_idx_s
,
5820 gen_helper_gvec_fmla_idx_d
,
5833 if (!dc_isar_feature(aa64_fp16
, s
)) {
5838 g_assert_not_reached();
5840 if (fp_access_check(s
)) {
5841 gen_gvec_op4_fpst(s
, a
->q
, a
->rd
, a
->rn
, a
->rm
, a
->rd
,
5842 esz
== MO_16
, (a
->idx
<< 1) | neg
,
5848 TRANS(FMLA_vi
, do_fmla_vector_idx
, a
, false)
5849 TRANS(FMLS_vi
, do_fmla_vector_idx
, a
, true)
5851 static bool do_fmlal_idx(DisasContext
*s
, arg_qrrx_e
*a
, bool is_s
, bool is_2
)
5853 if (fp_access_check(s
)) {
5854 int data
= (a
->idx
<< 2) | (is_2
<< 1) | is_s
;
5855 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, a
->rd
),
5856 vec_full_reg_offset(s
, a
->rn
),
5857 vec_full_reg_offset(s
, a
->rm
), tcg_env
,
5858 a
->q
? 16 : 8, vec_full_reg_size(s
),
5859 data
, gen_helper_gvec_fmlal_idx_a64
);
5864 TRANS_FEAT(FMLAL_vi
, aa64_fhm
, do_fmlal_idx
, a
, false, false)
5865 TRANS_FEAT(FMLSL_vi
, aa64_fhm
, do_fmlal_idx
, a
, true, false)
5866 TRANS_FEAT(FMLAL2_vi
, aa64_fhm
, do_fmlal_idx
, a
, false, true)
5867 TRANS_FEAT(FMLSL2_vi
, aa64_fhm
, do_fmlal_idx
, a
, true, true)
5869 static bool do_int3_vector_idx(DisasContext
*s
, arg_qrrx_e
*a
,
5870 gen_helper_gvec_3
* const fns
[2])
5872 assert(a
->esz
== MO_16
|| a
->esz
== MO_32
);
5873 if (fp_access_check(s
)) {
5874 gen_gvec_op3_ool(s
, a
->q
, a
->rd
, a
->rn
, a
->rm
, a
->idx
, fns
[a
->esz
- 1]);
5879 static gen_helper_gvec_3
* const f_vector_idx_mul
[2] = {
5880 gen_helper_gvec_mul_idx_h
,
5881 gen_helper_gvec_mul_idx_s
,
5883 TRANS(MUL_vi
, do_int3_vector_idx
, a
, f_vector_idx_mul
)
5885 static bool do_mla_vector_idx(DisasContext
*s
, arg_qrrx_e
*a
, bool sub
)
5887 static gen_helper_gvec_4
* const fns
[2][2] = {
5888 { gen_helper_gvec_mla_idx_h
, gen_helper_gvec_mls_idx_h
},
5889 { gen_helper_gvec_mla_idx_s
, gen_helper_gvec_mls_idx_s
},
5892 assert(a
->esz
== MO_16
|| a
->esz
== MO_32
);
5893 if (fp_access_check(s
)) {
5894 gen_gvec_op4_ool(s
, a
->q
, a
->rd
, a
->rn
, a
->rm
, a
->rd
,
5895 a
->idx
, fns
[a
->esz
- 1][sub
]);
5900 TRANS(MLA_vi
, do_mla_vector_idx
, a
, false)
5901 TRANS(MLS_vi
, do_mla_vector_idx
, a
, true)
5903 static bool do_int3_qc_vector_idx(DisasContext
*s
, arg_qrrx_e
*a
,
5904 gen_helper_gvec_4
* const fns
[2])
5906 assert(a
->esz
== MO_16
|| a
->esz
== MO_32
);
5907 if (fp_access_check(s
)) {
5908 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, a
->rd
),
5909 vec_full_reg_offset(s
, a
->rn
),
5910 vec_full_reg_offset(s
, a
->rm
),
5911 offsetof(CPUARMState
, vfp
.qc
),
5912 a
->q
? 16 : 8, vec_full_reg_size(s
),
5913 a
->idx
, fns
[a
->esz
- 1]);
5918 static gen_helper_gvec_4
* const f_vector_idx_sqdmulh
[2] = {
5919 gen_helper_neon_sqdmulh_idx_h
,
5920 gen_helper_neon_sqdmulh_idx_s
,
5922 TRANS(SQDMULH_vi
, do_int3_qc_vector_idx
, a
, f_vector_idx_sqdmulh
)
5924 static gen_helper_gvec_4
* const f_vector_idx_sqrdmulh
[2] = {
5925 gen_helper_neon_sqrdmulh_idx_h
,
5926 gen_helper_neon_sqrdmulh_idx_s
,
5928 TRANS(SQRDMULH_vi
, do_int3_qc_vector_idx
, a
, f_vector_idx_sqrdmulh
)
5930 static gen_helper_gvec_4
* const f_vector_idx_sqrdmlah
[2] = {
5931 gen_helper_neon_sqrdmlah_idx_h
,
5932 gen_helper_neon_sqrdmlah_idx_s
,
5934 TRANS_FEAT(SQRDMLAH_vi
, aa64_rdm
, do_int3_qc_vector_idx
, a
,
5935 f_vector_idx_sqrdmlah
)
5937 static gen_helper_gvec_4
* const f_vector_idx_sqrdmlsh
[2] = {
5938 gen_helper_neon_sqrdmlsh_idx_h
,
5939 gen_helper_neon_sqrdmlsh_idx_s
,
5941 TRANS_FEAT(SQRDMLSH_vi
, aa64_rdm
, do_int3_qc_vector_idx
, a
,
5942 f_vector_idx_sqrdmlsh
)
5944 static bool do_dot_vector_idx(DisasContext
*s
, arg_qrrx_e
*a
,
5945 gen_helper_gvec_4
*fn
)
5947 if (fp_access_check(s
)) {
5948 gen_gvec_op4_ool(s
, a
->q
, a
->rd
, a
->rn
, a
->rm
, a
->rd
, a
->idx
, fn
);
5953 TRANS_FEAT(SDOT_vi
, aa64_dp
, do_dot_vector_idx
, a
, gen_helper_gvec_sdot_idx_b
)
5954 TRANS_FEAT(UDOT_vi
, aa64_dp
, do_dot_vector_idx
, a
, gen_helper_gvec_udot_idx_b
)
5955 TRANS_FEAT(SUDOT_vi
, aa64_i8mm
, do_dot_vector_idx
, a
,
5956 gen_helper_gvec_sudot_idx_b
)
5957 TRANS_FEAT(USDOT_vi
, aa64_i8mm
, do_dot_vector_idx
, a
,
5958 gen_helper_gvec_usdot_idx_b
)
5959 TRANS_FEAT(BFDOT_vi
, aa64_bf16
, do_dot_vector_idx
, a
,
5960 gen_helper_gvec_bfdot_idx
)
5962 static bool trans_BFMLAL_vi(DisasContext
*s
, arg_qrrx_e
*a
)
5964 if (!dc_isar_feature(aa64_bf16
, s
)) {
5967 if (fp_access_check(s
)) {
5968 /* Q bit selects BFMLALB vs BFMLALT. */
5969 gen_gvec_op4_fpst(s
, true, a
->rd
, a
->rn
, a
->rm
, a
->rd
, 0,
5970 (a
->idx
<< 1) | a
->q
,
5971 gen_helper_gvec_bfmlal_idx
);
5977 * Advanced SIMD scalar pairwise
5980 static bool do_fp3_scalar_pair(DisasContext
*s
, arg_rr_e
*a
, const FPScalar
*f
)
5984 if (fp_access_check(s
)) {
5985 TCGv_i64 t0
= tcg_temp_new_i64();
5986 TCGv_i64 t1
= tcg_temp_new_i64();
5988 read_vec_element(s
, t0
, a
->rn
, 0, MO_64
);
5989 read_vec_element(s
, t1
, a
->rn
, 1, MO_64
);
5990 f
->gen_d(t0
, t0
, t1
, fpstatus_ptr(FPST_FPCR
));
5991 write_fp_dreg(s
, a
->rd
, t0
);
5995 if (fp_access_check(s
)) {
5996 TCGv_i32 t0
= tcg_temp_new_i32();
5997 TCGv_i32 t1
= tcg_temp_new_i32();
5999 read_vec_element_i32(s
, t0
, a
->rn
, 0, MO_32
);
6000 read_vec_element_i32(s
, t1
, a
->rn
, 1, MO_32
);
6001 f
->gen_s(t0
, t0
, t1
, fpstatus_ptr(FPST_FPCR
));
6002 write_fp_sreg(s
, a
->rd
, t0
);
6006 if (!dc_isar_feature(aa64_fp16
, s
)) {
6009 if (fp_access_check(s
)) {
6010 TCGv_i32 t0
= tcg_temp_new_i32();
6011 TCGv_i32 t1
= tcg_temp_new_i32();
6013 read_vec_element_i32(s
, t0
, a
->rn
, 0, MO_16
);
6014 read_vec_element_i32(s
, t1
, a
->rn
, 1, MO_16
);
6015 f
->gen_h(t0
, t0
, t1
, fpstatus_ptr(FPST_FPCR_F16
));
6016 write_fp_sreg(s
, a
->rd
, t0
);
6020 g_assert_not_reached();
6025 TRANS(FADDP_s
, do_fp3_scalar_pair
, a
, &f_scalar_fadd
)
6026 TRANS(FMAXP_s
, do_fp3_scalar_pair
, a
, &f_scalar_fmax
)
6027 TRANS(FMINP_s
, do_fp3_scalar_pair
, a
, &f_scalar_fmin
)
6028 TRANS(FMAXNMP_s
, do_fp3_scalar_pair
, a
, &f_scalar_fmaxnm
)
6029 TRANS(FMINNMP_s
, do_fp3_scalar_pair
, a
, &f_scalar_fminnm
)
6031 static bool trans_ADDP_s(DisasContext
*s
, arg_rr_e
*a
)
6033 if (fp_access_check(s
)) {
6034 TCGv_i64 t0
= tcg_temp_new_i64();
6035 TCGv_i64 t1
= tcg_temp_new_i64();
6037 read_vec_element(s
, t0
, a
->rn
, 0, MO_64
);
6038 read_vec_element(s
, t1
, a
->rn
, 1, MO_64
);
6039 tcg_gen_add_i64(t0
, t0
, t1
);
6040 write_fp_dreg(s
, a
->rd
, t0
);
6046 * Floating-point conditional select
6049 static bool trans_FCSEL(DisasContext
*s
, arg_FCSEL
*a
)
6051 TCGv_i64 t_true
, t_false
;
6059 if (!dc_isar_feature(aa64_fp16
, s
)) {
6067 if (!fp_access_check(s
)) {
6071 /* Zero extend sreg & hreg inputs to 64 bits now. */
6072 t_true
= tcg_temp_new_i64();
6073 t_false
= tcg_temp_new_i64();
6074 read_vec_element(s
, t_true
, a
->rn
, 0, a
->esz
);
6075 read_vec_element(s
, t_false
, a
->rm
, 0, a
->esz
);
6077 a64_test_cc(&c
, a
->cond
);
6078 tcg_gen_movcond_i64(c
.cond
, t_true
, c
.value
, tcg_constant_i64(0),
6082 * Note that sregs & hregs write back zeros to the high bits,
6083 * and we've already done the zero-extension.
6085 write_fp_dreg(s
, a
->rd
, t_true
);
6090 * Floating-point data-processing (3 source)
6093 static bool do_fmadd(DisasContext
*s
, arg_rrrr_e
*a
, bool neg_a
, bool neg_n
)
6098 * These are fused multiply-add. Note that doing the negations here
6099 * as separate steps is correct: an input NaN should come out with
6100 * its sign bit flipped if it is a negated-input.
6104 if (fp_access_check(s
)) {
6105 TCGv_i64 tn
= read_fp_dreg(s
, a
->rn
);
6106 TCGv_i64 tm
= read_fp_dreg(s
, a
->rm
);
6107 TCGv_i64 ta
= read_fp_dreg(s
, a
->ra
);
6110 gen_vfp_negd(ta
, ta
);
6113 gen_vfp_negd(tn
, tn
);
6115 fpst
= fpstatus_ptr(FPST_FPCR
);
6116 gen_helper_vfp_muladdd(ta
, tn
, tm
, ta
, fpst
);
6117 write_fp_dreg(s
, a
->rd
, ta
);
6122 if (fp_access_check(s
)) {
6123 TCGv_i32 tn
= read_fp_sreg(s
, a
->rn
);
6124 TCGv_i32 tm
= read_fp_sreg(s
, a
->rm
);
6125 TCGv_i32 ta
= read_fp_sreg(s
, a
->ra
);
6128 gen_vfp_negs(ta
, ta
);
6131 gen_vfp_negs(tn
, tn
);
6133 fpst
= fpstatus_ptr(FPST_FPCR
);
6134 gen_helper_vfp_muladds(ta
, tn
, tm
, ta
, fpst
);
6135 write_fp_sreg(s
, a
->rd
, ta
);
6140 if (!dc_isar_feature(aa64_fp16
, s
)) {
6143 if (fp_access_check(s
)) {
6144 TCGv_i32 tn
= read_fp_hreg(s
, a
->rn
);
6145 TCGv_i32 tm
= read_fp_hreg(s
, a
->rm
);
6146 TCGv_i32 ta
= read_fp_hreg(s
, a
->ra
);
6149 gen_vfp_negh(ta
, ta
);
6152 gen_vfp_negh(tn
, tn
);
6154 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
6155 gen_helper_advsimd_muladdh(ta
, tn
, tm
, ta
, fpst
);
6156 write_fp_sreg(s
, a
->rd
, ta
);
6166 TRANS(FMADD
, do_fmadd
, a
, false, false)
6167 TRANS(FNMADD
, do_fmadd
, a
, true, true)
6168 TRANS(FMSUB
, do_fmadd
, a
, false, true)
6169 TRANS(FNMSUB
, do_fmadd
, a
, true, false)
6171 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
6172 * Note that it is the caller's responsibility to ensure that the
6173 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
6174 * mandated semantics for out of range shifts.
6176 static void shift_reg(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
6177 enum a64_shift_type shift_type
, TCGv_i64 shift_amount
)
6179 switch (shift_type
) {
6180 case A64_SHIFT_TYPE_LSL
:
6181 tcg_gen_shl_i64(dst
, src
, shift_amount
);
6183 case A64_SHIFT_TYPE_LSR
:
6184 tcg_gen_shr_i64(dst
, src
, shift_amount
);
6186 case A64_SHIFT_TYPE_ASR
:
6188 tcg_gen_ext32s_i64(dst
, src
);
6190 tcg_gen_sar_i64(dst
, sf
? src
: dst
, shift_amount
);
6192 case A64_SHIFT_TYPE_ROR
:
6194 tcg_gen_rotr_i64(dst
, src
, shift_amount
);
6197 t0
= tcg_temp_new_i32();
6198 t1
= tcg_temp_new_i32();
6199 tcg_gen_extrl_i64_i32(t0
, src
);
6200 tcg_gen_extrl_i64_i32(t1
, shift_amount
);
6201 tcg_gen_rotr_i32(t0
, t0
, t1
);
6202 tcg_gen_extu_i32_i64(dst
, t0
);
6206 assert(FALSE
); /* all shift types should be handled */
6210 if (!sf
) { /* zero extend final result */
6211 tcg_gen_ext32u_i64(dst
, dst
);
6215 /* Shift a TCGv src by immediate, put result in dst.
6216 * The shift amount must be in range (this should always be true as the
6217 * relevant instructions will UNDEF on bad shift immediates).
6219 static void shift_reg_imm(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
6220 enum a64_shift_type shift_type
, unsigned int shift_i
)
6222 assert(shift_i
< (sf
? 64 : 32));
6225 tcg_gen_mov_i64(dst
, src
);
6227 shift_reg(dst
, src
, sf
, shift_type
, tcg_constant_i64(shift_i
));
6231 /* Logical (shifted register)
6232 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
6233 * +----+-----+-----------+-------+---+------+--------+------+------+
6234 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
6235 * +----+-----+-----------+-------+---+------+--------+------+------+
6237 static void disas_logic_reg(DisasContext
*s
, uint32_t insn
)
6239 TCGv_i64 tcg_rd
, tcg_rn
, tcg_rm
;
6240 unsigned int sf
, opc
, shift_type
, invert
, rm
, shift_amount
, rn
, rd
;
6242 sf
= extract32(insn
, 31, 1);
6243 opc
= extract32(insn
, 29, 2);
6244 shift_type
= extract32(insn
, 22, 2);
6245 invert
= extract32(insn
, 21, 1);
6246 rm
= extract32(insn
, 16, 5);
6247 shift_amount
= extract32(insn
, 10, 6);
6248 rn
= extract32(insn
, 5, 5);
6249 rd
= extract32(insn
, 0, 5);
6251 if (!sf
&& (shift_amount
& (1 << 5))) {
6252 unallocated_encoding(s
);
6256 tcg_rd
= cpu_reg(s
, rd
);
6258 if (opc
== 1 && shift_amount
== 0 && shift_type
== 0 && rn
== 31) {
6259 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
6260 * register-register MOV and MVN, so it is worth special casing.
6262 tcg_rm
= cpu_reg(s
, rm
);
6264 tcg_gen_not_i64(tcg_rd
, tcg_rm
);
6266 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
6270 tcg_gen_mov_i64(tcg_rd
, tcg_rm
);
6272 tcg_gen_ext32u_i64(tcg_rd
, tcg_rm
);
6278 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
6281 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, shift_amount
);
6284 tcg_rn
= cpu_reg(s
, rn
);
6286 switch (opc
| (invert
<< 2)) {
6289 tcg_gen_and_i64(tcg_rd
, tcg_rn
, tcg_rm
);
6292 tcg_gen_or_i64(tcg_rd
, tcg_rn
, tcg_rm
);
6295 tcg_gen_xor_i64(tcg_rd
, tcg_rn
, tcg_rm
);
6299 tcg_gen_andc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
6302 tcg_gen_orc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
6305 tcg_gen_eqv_i64(tcg_rd
, tcg_rn
, tcg_rm
);
6313 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
6317 gen_logic_CC(sf
, tcg_rd
);
6322 * Add/subtract (extended register)
6324 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
6325 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
6326 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
6327 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
6329 * sf: 0 -> 32bit, 1 -> 64bit
6330 * op: 0 -> add , 1 -> sub
6333 * option: extension type (see DecodeRegExtend)
6334 * imm3: optional shift to Rm
6336 * Rd = Rn + LSL(extend(Rm), amount)
6338 static void disas_add_sub_ext_reg(DisasContext
*s
, uint32_t insn
)
6340 int rd
= extract32(insn
, 0, 5);
6341 int rn
= extract32(insn
, 5, 5);
6342 int imm3
= extract32(insn
, 10, 3);
6343 int option
= extract32(insn
, 13, 3);
6344 int rm
= extract32(insn
, 16, 5);
6345 int opt
= extract32(insn
, 22, 2);
6346 bool setflags
= extract32(insn
, 29, 1);
6347 bool sub_op
= extract32(insn
, 30, 1);
6348 bool sf
= extract32(insn
, 31, 1);
6350 TCGv_i64 tcg_rm
, tcg_rn
; /* temps */
6352 TCGv_i64 tcg_result
;
6354 if (imm3
> 4 || opt
!= 0) {
6355 unallocated_encoding(s
);
6359 /* non-flag setting ops may use SP */
6361 tcg_rd
= cpu_reg_sp(s
, rd
);
6363 tcg_rd
= cpu_reg(s
, rd
);
6365 tcg_rn
= read_cpu_reg_sp(s
, rn
, sf
);
6367 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
6368 ext_and_shift_reg(tcg_rm
, tcg_rm
, option
, imm3
);
6370 tcg_result
= tcg_temp_new_i64();
6374 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
6376 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
6380 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
6382 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
6387 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
6389 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
6394 * Add/subtract (shifted register)
6396 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
6397 * +--+--+--+-----------+-----+--+-------+---------+------+------+
6398 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
6399 * +--+--+--+-----------+-----+--+-------+---------+------+------+
6401 * sf: 0 -> 32bit, 1 -> 64bit
6402 * op: 0 -> add , 1 -> sub
6404 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
6405 * imm6: Shift amount to apply to Rm before the add/sub
6407 static void disas_add_sub_reg(DisasContext
*s
, uint32_t insn
)
6409 int rd
= extract32(insn
, 0, 5);
6410 int rn
= extract32(insn
, 5, 5);
6411 int imm6
= extract32(insn
, 10, 6);
6412 int rm
= extract32(insn
, 16, 5);
6413 int shift_type
= extract32(insn
, 22, 2);
6414 bool setflags
= extract32(insn
, 29, 1);
6415 bool sub_op
= extract32(insn
, 30, 1);
6416 bool sf
= extract32(insn
, 31, 1);
6418 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
6419 TCGv_i64 tcg_rn
, tcg_rm
;
6420 TCGv_i64 tcg_result
;
6422 if ((shift_type
== 3) || (!sf
&& (imm6
> 31))) {
6423 unallocated_encoding(s
);
6427 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
6428 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
6430 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, imm6
);
6432 tcg_result
= tcg_temp_new_i64();
6436 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
6438 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
6442 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
6444 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
6449 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
6451 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
6455 /* Data-processing (3 source)
6457 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
6458 * +--+------+-----------+------+------+----+------+------+------+
6459 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
6460 * +--+------+-----------+------+------+----+------+------+------+
6462 static void disas_data_proc_3src(DisasContext
*s
, uint32_t insn
)
6464 int rd
= extract32(insn
, 0, 5);
6465 int rn
= extract32(insn
, 5, 5);
6466 int ra
= extract32(insn
, 10, 5);
6467 int rm
= extract32(insn
, 16, 5);
6468 int op_id
= (extract32(insn
, 29, 3) << 4) |
6469 (extract32(insn
, 21, 3) << 1) |
6470 extract32(insn
, 15, 1);
6471 bool sf
= extract32(insn
, 31, 1);
6472 bool is_sub
= extract32(op_id
, 0, 1);
6473 bool is_high
= extract32(op_id
, 2, 1);
6474 bool is_signed
= false;
6479 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
6481 case 0x42: /* SMADDL */
6482 case 0x43: /* SMSUBL */
6483 case 0x44: /* SMULH */
6486 case 0x0: /* MADD (32bit) */
6487 case 0x1: /* MSUB (32bit) */
6488 case 0x40: /* MADD (64bit) */
6489 case 0x41: /* MSUB (64bit) */
6490 case 0x4a: /* UMADDL */
6491 case 0x4b: /* UMSUBL */
6492 case 0x4c: /* UMULH */
6495 unallocated_encoding(s
);
6500 TCGv_i64 low_bits
= tcg_temp_new_i64(); /* low bits discarded */
6501 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
6502 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
6503 TCGv_i64 tcg_rm
= cpu_reg(s
, rm
);
6506 tcg_gen_muls2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
6508 tcg_gen_mulu2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
6513 tcg_op1
= tcg_temp_new_i64();
6514 tcg_op2
= tcg_temp_new_i64();
6515 tcg_tmp
= tcg_temp_new_i64();
6518 tcg_gen_mov_i64(tcg_op1
, cpu_reg(s
, rn
));
6519 tcg_gen_mov_i64(tcg_op2
, cpu_reg(s
, rm
));
6522 tcg_gen_ext32s_i64(tcg_op1
, cpu_reg(s
, rn
));
6523 tcg_gen_ext32s_i64(tcg_op2
, cpu_reg(s
, rm
));
6525 tcg_gen_ext32u_i64(tcg_op1
, cpu_reg(s
, rn
));
6526 tcg_gen_ext32u_i64(tcg_op2
, cpu_reg(s
, rm
));
6530 if (ra
== 31 && !is_sub
) {
6531 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
6532 tcg_gen_mul_i64(cpu_reg(s
, rd
), tcg_op1
, tcg_op2
);
6534 tcg_gen_mul_i64(tcg_tmp
, tcg_op1
, tcg_op2
);
6536 tcg_gen_sub_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
6538 tcg_gen_add_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
6543 tcg_gen_ext32u_i64(cpu_reg(s
, rd
), cpu_reg(s
, rd
));
6547 /* Add/subtract (with carry)
6548 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
6549 * +--+--+--+------------------------+------+-------------+------+-----+
6550 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd |
6551 * +--+--+--+------------------------+------+-------------+------+-----+
6554 static void disas_adc_sbc(DisasContext
*s
, uint32_t insn
)
6556 unsigned int sf
, op
, setflags
, rm
, rn
, rd
;
6557 TCGv_i64 tcg_y
, tcg_rn
, tcg_rd
;
6559 sf
= extract32(insn
, 31, 1);
6560 op
= extract32(insn
, 30, 1);
6561 setflags
= extract32(insn
, 29, 1);
6562 rm
= extract32(insn
, 16, 5);
6563 rn
= extract32(insn
, 5, 5);
6564 rd
= extract32(insn
, 0, 5);
6566 tcg_rd
= cpu_reg(s
, rd
);
6567 tcg_rn
= cpu_reg(s
, rn
);
6570 tcg_y
= tcg_temp_new_i64();
6571 tcg_gen_not_i64(tcg_y
, cpu_reg(s
, rm
));
6573 tcg_y
= cpu_reg(s
, rm
);
6577 gen_adc_CC(sf
, tcg_rd
, tcg_rn
, tcg_y
);
6579 gen_adc(sf
, tcg_rd
, tcg_rn
, tcg_y
);
6584 * Rotate right into flags
6585 * 31 30 29 21 15 10 5 4 0
6586 * +--+--+--+-----------------+--------+-----------+------+--+------+
6587 * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask |
6588 * +--+--+--+-----------------+--------+-----------+------+--+------+
6590 static void disas_rotate_right_into_flags(DisasContext
*s
, uint32_t insn
)
6592 int mask
= extract32(insn
, 0, 4);
6593 int o2
= extract32(insn
, 4, 1);
6594 int rn
= extract32(insn
, 5, 5);
6595 int imm6
= extract32(insn
, 15, 6);
6596 int sf_op_s
= extract32(insn
, 29, 3);
6600 if (sf_op_s
!= 5 || o2
!= 0 || !dc_isar_feature(aa64_condm_4
, s
)) {
6601 unallocated_encoding(s
);
6605 tcg_rn
= read_cpu_reg(s
, rn
, 1);
6606 tcg_gen_rotri_i64(tcg_rn
, tcg_rn
, imm6
);
6608 nzcv
= tcg_temp_new_i32();
6609 tcg_gen_extrl_i64_i32(nzcv
, tcg_rn
);
6611 if (mask
& 8) { /* N */
6612 tcg_gen_shli_i32(cpu_NF
, nzcv
, 31 - 3);
6614 if (mask
& 4) { /* Z */
6615 tcg_gen_not_i32(cpu_ZF
, nzcv
);
6616 tcg_gen_andi_i32(cpu_ZF
, cpu_ZF
, 4);
6618 if (mask
& 2) { /* C */
6619 tcg_gen_extract_i32(cpu_CF
, nzcv
, 1, 1);
6621 if (mask
& 1) { /* V */
6622 tcg_gen_shli_i32(cpu_VF
, nzcv
, 31 - 0);
6627 * Evaluate into flags
6628 * 31 30 29 21 15 14 10 5 4 0
6629 * +--+--+--+-----------------+---------+----+---------+------+--+------+
6630 * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask |
6631 * +--+--+--+-----------------+---------+----+---------+------+--+------+
6633 static void disas_evaluate_into_flags(DisasContext
*s
, uint32_t insn
)
6635 int o3_mask
= extract32(insn
, 0, 5);
6636 int rn
= extract32(insn
, 5, 5);
6637 int o2
= extract32(insn
, 15, 6);
6638 int sz
= extract32(insn
, 14, 1);
6639 int sf_op_s
= extract32(insn
, 29, 3);
6643 if (sf_op_s
!= 1 || o2
!= 0 || o3_mask
!= 0xd ||
6644 !dc_isar_feature(aa64_condm_4
, s
)) {
6645 unallocated_encoding(s
);
6648 shift
= sz
? 16 : 24; /* SETF16 or SETF8 */
6650 tmp
= tcg_temp_new_i32();
6651 tcg_gen_extrl_i64_i32(tmp
, cpu_reg(s
, rn
));
6652 tcg_gen_shli_i32(cpu_NF
, tmp
, shift
);
6653 tcg_gen_shli_i32(cpu_VF
, tmp
, shift
- 1);
6654 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
6655 tcg_gen_xor_i32(cpu_VF
, cpu_VF
, cpu_NF
);
6658 /* Conditional compare (immediate / register)
6659 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
6660 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
6661 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
6662 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
6665 static void disas_cc(DisasContext
*s
, uint32_t insn
)
6667 unsigned int sf
, op
, y
, cond
, rn
, nzcv
, is_imm
;
6668 TCGv_i32 tcg_t0
, tcg_t1
, tcg_t2
;
6669 TCGv_i64 tcg_tmp
, tcg_y
, tcg_rn
;
6672 if (!extract32(insn
, 29, 1)) {
6673 unallocated_encoding(s
);
6676 if (insn
& (1 << 10 | 1 << 4)) {
6677 unallocated_encoding(s
);
6680 sf
= extract32(insn
, 31, 1);
6681 op
= extract32(insn
, 30, 1);
6682 is_imm
= extract32(insn
, 11, 1);
6683 y
= extract32(insn
, 16, 5); /* y = rm (reg) or imm5 (imm) */
6684 cond
= extract32(insn
, 12, 4);
6685 rn
= extract32(insn
, 5, 5);
6686 nzcv
= extract32(insn
, 0, 4);
6688 /* Set T0 = !COND. */
6689 tcg_t0
= tcg_temp_new_i32();
6690 arm_test_cc(&c
, cond
);
6691 tcg_gen_setcondi_i32(tcg_invert_cond(c
.cond
), tcg_t0
, c
.value
, 0);
6693 /* Load the arguments for the new comparison. */
6695 tcg_y
= tcg_temp_new_i64();
6696 tcg_gen_movi_i64(tcg_y
, y
);
6698 tcg_y
= cpu_reg(s
, y
);
6700 tcg_rn
= cpu_reg(s
, rn
);
6702 /* Set the flags for the new comparison. */
6703 tcg_tmp
= tcg_temp_new_i64();
6705 gen_sub_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
6707 gen_add_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
6710 /* If COND was false, force the flags to #nzcv. Compute two masks
6711 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
6712 * For tcg hosts that support ANDC, we can make do with just T1.
6713 * In either case, allow the tcg optimizer to delete any unused mask.
6715 tcg_t1
= tcg_temp_new_i32();
6716 tcg_t2
= tcg_temp_new_i32();
6717 tcg_gen_neg_i32(tcg_t1
, tcg_t0
);
6718 tcg_gen_subi_i32(tcg_t2
, tcg_t0
, 1);
6720 if (nzcv
& 8) { /* N */
6721 tcg_gen_or_i32(cpu_NF
, cpu_NF
, tcg_t1
);
6723 if (TCG_TARGET_HAS_andc_i32
) {
6724 tcg_gen_andc_i32(cpu_NF
, cpu_NF
, tcg_t1
);
6726 tcg_gen_and_i32(cpu_NF
, cpu_NF
, tcg_t2
);
6729 if (nzcv
& 4) { /* Z */
6730 if (TCG_TARGET_HAS_andc_i32
) {
6731 tcg_gen_andc_i32(cpu_ZF
, cpu_ZF
, tcg_t1
);
6733 tcg_gen_and_i32(cpu_ZF
, cpu_ZF
, tcg_t2
);
6736 tcg_gen_or_i32(cpu_ZF
, cpu_ZF
, tcg_t0
);
6738 if (nzcv
& 2) { /* C */
6739 tcg_gen_or_i32(cpu_CF
, cpu_CF
, tcg_t0
);
6741 if (TCG_TARGET_HAS_andc_i32
) {
6742 tcg_gen_andc_i32(cpu_CF
, cpu_CF
, tcg_t1
);
6744 tcg_gen_and_i32(cpu_CF
, cpu_CF
, tcg_t2
);
6747 if (nzcv
& 1) { /* V */
6748 tcg_gen_or_i32(cpu_VF
, cpu_VF
, tcg_t1
);
6750 if (TCG_TARGET_HAS_andc_i32
) {
6751 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tcg_t1
);
6753 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tcg_t2
);
6758 /* Conditional select
6759 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
6760 * +----+----+---+-----------------+------+------+-----+------+------+
6761 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
6762 * +----+----+---+-----------------+------+------+-----+------+------+
6764 static void disas_cond_select(DisasContext
*s
, uint32_t insn
)
6766 unsigned int sf
, else_inv
, rm
, cond
, else_inc
, rn
, rd
;
6767 TCGv_i64 tcg_rd
, zero
;
6770 if (extract32(insn
, 29, 1) || extract32(insn
, 11, 1)) {
6771 /* S == 1 or op2<1> == 1 */
6772 unallocated_encoding(s
);
6775 sf
= extract32(insn
, 31, 1);
6776 else_inv
= extract32(insn
, 30, 1);
6777 rm
= extract32(insn
, 16, 5);
6778 cond
= extract32(insn
, 12, 4);
6779 else_inc
= extract32(insn
, 10, 1);
6780 rn
= extract32(insn
, 5, 5);
6781 rd
= extract32(insn
, 0, 5);
6783 tcg_rd
= cpu_reg(s
, rd
);
6785 a64_test_cc(&c
, cond
);
6786 zero
= tcg_constant_i64(0);
6788 if (rn
== 31 && rm
== 31 && (else_inc
^ else_inv
)) {
6791 tcg_gen_negsetcond_i64(tcg_invert_cond(c
.cond
),
6792 tcg_rd
, c
.value
, zero
);
6794 tcg_gen_setcond_i64(tcg_invert_cond(c
.cond
),
6795 tcg_rd
, c
.value
, zero
);
6798 TCGv_i64 t_true
= cpu_reg(s
, rn
);
6799 TCGv_i64 t_false
= read_cpu_reg(s
, rm
, 1);
6800 if (else_inv
&& else_inc
) {
6801 tcg_gen_neg_i64(t_false
, t_false
);
6802 } else if (else_inv
) {
6803 tcg_gen_not_i64(t_false
, t_false
);
6804 } else if (else_inc
) {
6805 tcg_gen_addi_i64(t_false
, t_false
, 1);
6807 tcg_gen_movcond_i64(c
.cond
, tcg_rd
, c
.value
, zero
, t_true
, t_false
);
6811 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
6815 static void handle_clz(DisasContext
*s
, unsigned int sf
,
6816 unsigned int rn
, unsigned int rd
)
6818 TCGv_i64 tcg_rd
, tcg_rn
;
6819 tcg_rd
= cpu_reg(s
, rd
);
6820 tcg_rn
= cpu_reg(s
, rn
);
6823 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
6825 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
6826 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
6827 tcg_gen_clzi_i32(tcg_tmp32
, tcg_tmp32
, 32);
6828 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
6832 static void handle_cls(DisasContext
*s
, unsigned int sf
,
6833 unsigned int rn
, unsigned int rd
)
6835 TCGv_i64 tcg_rd
, tcg_rn
;
6836 tcg_rd
= cpu_reg(s
, rd
);
6837 tcg_rn
= cpu_reg(s
, rn
);
6840 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
6842 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
6843 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
6844 tcg_gen_clrsb_i32(tcg_tmp32
, tcg_tmp32
);
6845 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
6849 static void handle_rbit(DisasContext
*s
, unsigned int sf
,
6850 unsigned int rn
, unsigned int rd
)
6852 TCGv_i64 tcg_rd
, tcg_rn
;
6853 tcg_rd
= cpu_reg(s
, rd
);
6854 tcg_rn
= cpu_reg(s
, rn
);
6857 gen_helper_rbit64(tcg_rd
, tcg_rn
);
6859 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
6860 tcg_gen_extrl_i64_i32(tcg_tmp32
, tcg_rn
);
6861 gen_helper_rbit(tcg_tmp32
, tcg_tmp32
);
6862 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
6866 /* REV with sf==1, opcode==3 ("REV64") */
6867 static void handle_rev64(DisasContext
*s
, unsigned int sf
,
6868 unsigned int rn
, unsigned int rd
)
6871 unallocated_encoding(s
);
6874 tcg_gen_bswap64_i64(cpu_reg(s
, rd
), cpu_reg(s
, rn
));
6877 /* REV with sf==0, opcode==2
6878 * REV32 (sf==1, opcode==2)
6880 static void handle_rev32(DisasContext
*s
, unsigned int sf
,
6881 unsigned int rn
, unsigned int rd
)
6883 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
6884 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
6887 tcg_gen_bswap64_i64(tcg_rd
, tcg_rn
);
6888 tcg_gen_rotri_i64(tcg_rd
, tcg_rd
, 32);
6890 tcg_gen_bswap32_i64(tcg_rd
, tcg_rn
, TCG_BSWAP_OZ
);
6894 /* REV16 (opcode==1) */
6895 static void handle_rev16(DisasContext
*s
, unsigned int sf
,
6896 unsigned int rn
, unsigned int rd
)
6898 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
6899 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
6900 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
6901 TCGv_i64 mask
= tcg_constant_i64(sf
? 0x00ff00ff00ff00ffull
: 0x00ff00ff);
6903 tcg_gen_shri_i64(tcg_tmp
, tcg_rn
, 8);
6904 tcg_gen_and_i64(tcg_rd
, tcg_rn
, mask
);
6905 tcg_gen_and_i64(tcg_tmp
, tcg_tmp
, mask
);
6906 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, 8);
6907 tcg_gen_or_i64(tcg_rd
, tcg_rd
, tcg_tmp
);
6910 /* Data-processing (1 source)
6911 * 31 30 29 28 21 20 16 15 10 9 5 4 0
6912 * +----+---+---+-----------------+---------+--------+------+------+
6913 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
6914 * +----+---+---+-----------------+---------+--------+------+------+
6916 static void disas_data_proc_1src(DisasContext
*s
, uint32_t insn
)
6918 unsigned int sf
, opcode
, opcode2
, rn
, rd
;
6921 if (extract32(insn
, 29, 1)) {
6922 unallocated_encoding(s
);
6926 sf
= extract32(insn
, 31, 1);
6927 opcode
= extract32(insn
, 10, 6);
6928 opcode2
= extract32(insn
, 16, 5);
6929 rn
= extract32(insn
, 5, 5);
6930 rd
= extract32(insn
, 0, 5);
6932 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
6934 switch (MAP(sf
, opcode2
, opcode
)) {
6935 case MAP(0, 0x00, 0x00): /* RBIT */
6936 case MAP(1, 0x00, 0x00):
6937 handle_rbit(s
, sf
, rn
, rd
);
6939 case MAP(0, 0x00, 0x01): /* REV16 */
6940 case MAP(1, 0x00, 0x01):
6941 handle_rev16(s
, sf
, rn
, rd
);
6943 case MAP(0, 0x00, 0x02): /* REV/REV32 */
6944 case MAP(1, 0x00, 0x02):
6945 handle_rev32(s
, sf
, rn
, rd
);
6947 case MAP(1, 0x00, 0x03): /* REV64 */
6948 handle_rev64(s
, sf
, rn
, rd
);
6950 case MAP(0, 0x00, 0x04): /* CLZ */
6951 case MAP(1, 0x00, 0x04):
6952 handle_clz(s
, sf
, rn
, rd
);
6954 case MAP(0, 0x00, 0x05): /* CLS */
6955 case MAP(1, 0x00, 0x05):
6956 handle_cls(s
, sf
, rn
, rd
);
6958 case MAP(1, 0x01, 0x00): /* PACIA */
6959 if (s
->pauth_active
) {
6960 tcg_rd
= cpu_reg(s
, rd
);
6961 gen_helper_pacia(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
6962 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
6963 goto do_unallocated
;
6966 case MAP(1, 0x01, 0x01): /* PACIB */
6967 if (s
->pauth_active
) {
6968 tcg_rd
= cpu_reg(s
, rd
);
6969 gen_helper_pacib(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
6970 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
6971 goto do_unallocated
;
6974 case MAP(1, 0x01, 0x02): /* PACDA */
6975 if (s
->pauth_active
) {
6976 tcg_rd
= cpu_reg(s
, rd
);
6977 gen_helper_pacda(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
6978 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
6979 goto do_unallocated
;
6982 case MAP(1, 0x01, 0x03): /* PACDB */
6983 if (s
->pauth_active
) {
6984 tcg_rd
= cpu_reg(s
, rd
);
6985 gen_helper_pacdb(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
6986 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
6987 goto do_unallocated
;
6990 case MAP(1, 0x01, 0x04): /* AUTIA */
6991 if (s
->pauth_active
) {
6992 tcg_rd
= cpu_reg(s
, rd
);
6993 gen_helper_autia(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
6994 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
6995 goto do_unallocated
;
6998 case MAP(1, 0x01, 0x05): /* AUTIB */
6999 if (s
->pauth_active
) {
7000 tcg_rd
= cpu_reg(s
, rd
);
7001 gen_helper_autib(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
7002 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
7003 goto do_unallocated
;
7006 case MAP(1, 0x01, 0x06): /* AUTDA */
7007 if (s
->pauth_active
) {
7008 tcg_rd
= cpu_reg(s
, rd
);
7009 gen_helper_autda(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
7010 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
7011 goto do_unallocated
;
7014 case MAP(1, 0x01, 0x07): /* AUTDB */
7015 if (s
->pauth_active
) {
7016 tcg_rd
= cpu_reg(s
, rd
);
7017 gen_helper_autdb(tcg_rd
, tcg_env
, tcg_rd
, cpu_reg_sp(s
, rn
));
7018 } else if (!dc_isar_feature(aa64_pauth
, s
)) {
7019 goto do_unallocated
;
7022 case MAP(1, 0x01, 0x08): /* PACIZA */
7023 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
7024 goto do_unallocated
;
7025 } else if (s
->pauth_active
) {
7026 tcg_rd
= cpu_reg(s
, rd
);
7027 gen_helper_pacia(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
7030 case MAP(1, 0x01, 0x09): /* PACIZB */
7031 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
7032 goto do_unallocated
;
7033 } else if (s
->pauth_active
) {
7034 tcg_rd
= cpu_reg(s
, rd
);
7035 gen_helper_pacib(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
7038 case MAP(1, 0x01, 0x0a): /* PACDZA */
7039 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
7040 goto do_unallocated
;
7041 } else if (s
->pauth_active
) {
7042 tcg_rd
= cpu_reg(s
, rd
);
7043 gen_helper_pacda(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
7046 case MAP(1, 0x01, 0x0b): /* PACDZB */
7047 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
7048 goto do_unallocated
;
7049 } else if (s
->pauth_active
) {
7050 tcg_rd
= cpu_reg(s
, rd
);
7051 gen_helper_pacdb(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
7054 case MAP(1, 0x01, 0x0c): /* AUTIZA */
7055 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
7056 goto do_unallocated
;
7057 } else if (s
->pauth_active
) {
7058 tcg_rd
= cpu_reg(s
, rd
);
7059 gen_helper_autia(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
7062 case MAP(1, 0x01, 0x0d): /* AUTIZB */
7063 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
7064 goto do_unallocated
;
7065 } else if (s
->pauth_active
) {
7066 tcg_rd
= cpu_reg(s
, rd
);
7067 gen_helper_autib(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
7070 case MAP(1, 0x01, 0x0e): /* AUTDZA */
7071 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
7072 goto do_unallocated
;
7073 } else if (s
->pauth_active
) {
7074 tcg_rd
= cpu_reg(s
, rd
);
7075 gen_helper_autda(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
7078 case MAP(1, 0x01, 0x0f): /* AUTDZB */
7079 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
7080 goto do_unallocated
;
7081 } else if (s
->pauth_active
) {
7082 tcg_rd
= cpu_reg(s
, rd
);
7083 gen_helper_autdb(tcg_rd
, tcg_env
, tcg_rd
, tcg_constant_i64(0));
7086 case MAP(1, 0x01, 0x10): /* XPACI */
7087 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
7088 goto do_unallocated
;
7089 } else if (s
->pauth_active
) {
7090 tcg_rd
= cpu_reg(s
, rd
);
7091 gen_helper_xpaci(tcg_rd
, tcg_env
, tcg_rd
);
7094 case MAP(1, 0x01, 0x11): /* XPACD */
7095 if (!dc_isar_feature(aa64_pauth
, s
) || rn
!= 31) {
7096 goto do_unallocated
;
7097 } else if (s
->pauth_active
) {
7098 tcg_rd
= cpu_reg(s
, rd
);
7099 gen_helper_xpacd(tcg_rd
, tcg_env
, tcg_rd
);
7104 unallocated_encoding(s
);
7111 static void handle_div(DisasContext
*s
, bool is_signed
, unsigned int sf
,
7112 unsigned int rm
, unsigned int rn
, unsigned int rd
)
7114 TCGv_i64 tcg_n
, tcg_m
, tcg_rd
;
7115 tcg_rd
= cpu_reg(s
, rd
);
7117 if (!sf
&& is_signed
) {
7118 tcg_n
= tcg_temp_new_i64();
7119 tcg_m
= tcg_temp_new_i64();
7120 tcg_gen_ext32s_i64(tcg_n
, cpu_reg(s
, rn
));
7121 tcg_gen_ext32s_i64(tcg_m
, cpu_reg(s
, rm
));
7123 tcg_n
= read_cpu_reg(s
, rn
, sf
);
7124 tcg_m
= read_cpu_reg(s
, rm
, sf
);
7128 gen_helper_sdiv64(tcg_rd
, tcg_n
, tcg_m
);
7130 gen_helper_udiv64(tcg_rd
, tcg_n
, tcg_m
);
7133 if (!sf
) { /* zero extend final result */
7134 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
7138 /* LSLV, LSRV, ASRV, RORV */
7139 static void handle_shift_reg(DisasContext
*s
,
7140 enum a64_shift_type shift_type
, unsigned int sf
,
7141 unsigned int rm
, unsigned int rn
, unsigned int rd
)
7143 TCGv_i64 tcg_shift
= tcg_temp_new_i64();
7144 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
7145 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
7147 tcg_gen_andi_i64(tcg_shift
, cpu_reg(s
, rm
), sf
? 63 : 31);
7148 shift_reg(tcg_rd
, tcg_rn
, sf
, shift_type
, tcg_shift
);
7151 /* CRC32[BHWX], CRC32C[BHWX] */
7152 static void handle_crc32(DisasContext
*s
,
7153 unsigned int sf
, unsigned int sz
, bool crc32c
,
7154 unsigned int rm
, unsigned int rn
, unsigned int rd
)
7156 TCGv_i64 tcg_acc
, tcg_val
;
7159 if (!dc_isar_feature(aa64_crc32
, s
)
7160 || (sf
== 1 && sz
!= 3)
7161 || (sf
== 0 && sz
== 3)) {
7162 unallocated_encoding(s
);
7167 tcg_val
= cpu_reg(s
, rm
);
7181 g_assert_not_reached();
7183 tcg_val
= tcg_temp_new_i64();
7184 tcg_gen_andi_i64(tcg_val
, cpu_reg(s
, rm
), mask
);
7187 tcg_acc
= cpu_reg(s
, rn
);
7188 tcg_bytes
= tcg_constant_i32(1 << sz
);
7191 gen_helper_crc32c_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
7193 gen_helper_crc32_64(cpu_reg(s
, rd
), tcg_acc
, tcg_val
, tcg_bytes
);
7197 /* Data-processing (2 source)
7198 * 31 30 29 28 21 20 16 15 10 9 5 4 0
7199 * +----+---+---+-----------------+------+--------+------+------+
7200 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
7201 * +----+---+---+-----------------+------+--------+------+------+
7203 static void disas_data_proc_2src(DisasContext
*s
, uint32_t insn
)
7205 unsigned int sf
, rm
, opcode
, rn
, rd
, setflag
;
7206 sf
= extract32(insn
, 31, 1);
7207 setflag
= extract32(insn
, 29, 1);
7208 rm
= extract32(insn
, 16, 5);
7209 opcode
= extract32(insn
, 10, 6);
7210 rn
= extract32(insn
, 5, 5);
7211 rd
= extract32(insn
, 0, 5);
7213 if (setflag
&& opcode
!= 0) {
7214 unallocated_encoding(s
);
7219 case 0: /* SUBP(S) */
7220 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
7221 goto do_unallocated
;
7223 TCGv_i64 tcg_n
, tcg_m
, tcg_d
;
7225 tcg_n
= read_cpu_reg_sp(s
, rn
, true);
7226 tcg_m
= read_cpu_reg_sp(s
, rm
, true);
7227 tcg_gen_sextract_i64(tcg_n
, tcg_n
, 0, 56);
7228 tcg_gen_sextract_i64(tcg_m
, tcg_m
, 0, 56);
7229 tcg_d
= cpu_reg(s
, rd
);
7232 gen_sub_CC(true, tcg_d
, tcg_n
, tcg_m
);
7234 tcg_gen_sub_i64(tcg_d
, tcg_n
, tcg_m
);
7239 handle_div(s
, false, sf
, rm
, rn
, rd
);
7242 handle_div(s
, true, sf
, rm
, rn
, rd
);
7245 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
7246 goto do_unallocated
;
7249 gen_helper_irg(cpu_reg_sp(s
, rd
), tcg_env
,
7250 cpu_reg_sp(s
, rn
), cpu_reg(s
, rm
));
7252 gen_address_with_allocation_tag0(cpu_reg_sp(s
, rd
),
7257 if (sf
== 0 || !dc_isar_feature(aa64_mte_insn_reg
, s
)) {
7258 goto do_unallocated
;
7260 TCGv_i64 t
= tcg_temp_new_i64();
7262 tcg_gen_extract_i64(t
, cpu_reg_sp(s
, rn
), 56, 4);
7263 tcg_gen_shl_i64(t
, tcg_constant_i64(1), t
);
7264 tcg_gen_or_i64(cpu_reg(s
, rd
), cpu_reg(s
, rm
), t
);
7268 handle_shift_reg(s
, A64_SHIFT_TYPE_LSL
, sf
, rm
, rn
, rd
);
7271 handle_shift_reg(s
, A64_SHIFT_TYPE_LSR
, sf
, rm
, rn
, rd
);
7274 handle_shift_reg(s
, A64_SHIFT_TYPE_ASR
, sf
, rm
, rn
, rd
);
7277 handle_shift_reg(s
, A64_SHIFT_TYPE_ROR
, sf
, rm
, rn
, rd
);
7279 case 12: /* PACGA */
7280 if (sf
== 0 || !dc_isar_feature(aa64_pauth
, s
)) {
7281 goto do_unallocated
;
7283 gen_helper_pacga(cpu_reg(s
, rd
), tcg_env
,
7284 cpu_reg(s
, rn
), cpu_reg_sp(s
, rm
));
7293 case 23: /* CRC32 */
7295 int sz
= extract32(opcode
, 0, 2);
7296 bool crc32c
= extract32(opcode
, 2, 1);
7297 handle_crc32(s
, sf
, sz
, crc32c
, rm
, rn
, rd
);
7302 unallocated_encoding(s
);
7308 * Data processing - register
7309 * 31 30 29 28 25 21 20 16 10 0
7310 * +--+---+--+---+-------+-----+-------+-------+---------+
7311 * | |op0| |op1| 1 0 1 | op2 | | op3 | |
7312 * +--+---+--+---+-------+-----+-------+-------+---------+
7314 static void disas_data_proc_reg(DisasContext
*s
, uint32_t insn
)
7316 int op0
= extract32(insn
, 30, 1);
7317 int op1
= extract32(insn
, 28, 1);
7318 int op2
= extract32(insn
, 21, 4);
7319 int op3
= extract32(insn
, 10, 6);
7324 /* Add/sub (extended register) */
7325 disas_add_sub_ext_reg(s
, insn
);
7327 /* Add/sub (shifted register) */
7328 disas_add_sub_reg(s
, insn
);
7331 /* Logical (shifted register) */
7332 disas_logic_reg(s
, insn
);
7340 case 0x00: /* Add/subtract (with carry) */
7341 disas_adc_sbc(s
, insn
);
7344 case 0x01: /* Rotate right into flags */
7346 disas_rotate_right_into_flags(s
, insn
);
7349 case 0x02: /* Evaluate into flags */
7353 disas_evaluate_into_flags(s
, insn
);
7357 goto do_unallocated
;
7361 case 0x2: /* Conditional compare */
7362 disas_cc(s
, insn
); /* both imm and reg forms */
7365 case 0x4: /* Conditional select */
7366 disas_cond_select(s
, insn
);
7369 case 0x6: /* Data-processing */
7370 if (op0
) { /* (1 source) */
7371 disas_data_proc_1src(s
, insn
);
7372 } else { /* (2 source) */
7373 disas_data_proc_2src(s
, insn
);
7376 case 0x8 ... 0xf: /* (3 source) */
7377 disas_data_proc_3src(s
, insn
);
7382 unallocated_encoding(s
);
7387 static void handle_fp_compare(DisasContext
*s
, int size
,
7388 unsigned int rn
, unsigned int rm
,
7389 bool cmp_with_zero
, bool signal_all_nans
)
7391 TCGv_i64 tcg_flags
= tcg_temp_new_i64();
7392 TCGv_ptr fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
7394 if (size
== MO_64
) {
7395 TCGv_i64 tcg_vn
, tcg_vm
;
7397 tcg_vn
= read_fp_dreg(s
, rn
);
7398 if (cmp_with_zero
) {
7399 tcg_vm
= tcg_constant_i64(0);
7401 tcg_vm
= read_fp_dreg(s
, rm
);
7403 if (signal_all_nans
) {
7404 gen_helper_vfp_cmped_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
7406 gen_helper_vfp_cmpd_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
7409 TCGv_i32 tcg_vn
= tcg_temp_new_i32();
7410 TCGv_i32 tcg_vm
= tcg_temp_new_i32();
7412 read_vec_element_i32(s
, tcg_vn
, rn
, 0, size
);
7413 if (cmp_with_zero
) {
7414 tcg_gen_movi_i32(tcg_vm
, 0);
7416 read_vec_element_i32(s
, tcg_vm
, rm
, 0, size
);
7421 if (signal_all_nans
) {
7422 gen_helper_vfp_cmpes_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
7424 gen_helper_vfp_cmps_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
7428 if (signal_all_nans
) {
7429 gen_helper_vfp_cmpeh_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
7431 gen_helper_vfp_cmph_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
7435 g_assert_not_reached();
7439 gen_set_nzcv(tcg_flags
);
7442 /* Floating point compare
7443 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
7444 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
7445 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
7446 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
7448 static void disas_fp_compare(DisasContext
*s
, uint32_t insn
)
7450 unsigned int mos
, type
, rm
, op
, rn
, opc
, op2r
;
7453 mos
= extract32(insn
, 29, 3);
7454 type
= extract32(insn
, 22, 2);
7455 rm
= extract32(insn
, 16, 5);
7456 op
= extract32(insn
, 14, 2);
7457 rn
= extract32(insn
, 5, 5);
7458 opc
= extract32(insn
, 3, 2);
7459 op2r
= extract32(insn
, 0, 3);
7461 if (mos
|| op
|| op2r
) {
7462 unallocated_encoding(s
);
7475 if (dc_isar_feature(aa64_fp16
, s
)) {
7480 unallocated_encoding(s
);
7484 if (!fp_access_check(s
)) {
7488 handle_fp_compare(s
, size
, rn
, rm
, opc
& 1, opc
& 2);
7491 /* Floating point conditional compare
7492 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
7493 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
7494 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
7495 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
7497 static void disas_fp_ccomp(DisasContext
*s
, uint32_t insn
)
7499 unsigned int mos
, type
, rm
, cond
, rn
, op
, nzcv
;
7500 TCGLabel
*label_continue
= NULL
;
7503 mos
= extract32(insn
, 29, 3);
7504 type
= extract32(insn
, 22, 2);
7505 rm
= extract32(insn
, 16, 5);
7506 cond
= extract32(insn
, 12, 4);
7507 rn
= extract32(insn
, 5, 5);
7508 op
= extract32(insn
, 4, 1);
7509 nzcv
= extract32(insn
, 0, 4);
7512 unallocated_encoding(s
);
7525 if (dc_isar_feature(aa64_fp16
, s
)) {
7530 unallocated_encoding(s
);
7534 if (!fp_access_check(s
)) {
7538 if (cond
< 0x0e) { /* not always */
7539 TCGLabel
*label_match
= gen_new_label();
7540 label_continue
= gen_new_label();
7541 arm_gen_test_cc(cond
, label_match
);
7543 gen_set_nzcv(tcg_constant_i64(nzcv
<< 28));
7544 tcg_gen_br(label_continue
);
7545 gen_set_label(label_match
);
7548 handle_fp_compare(s
, size
, rn
, rm
, false, op
);
7551 gen_set_label(label_continue
);
7555 /* Floating-point data-processing (1 source) - half precision */
7556 static void handle_fp_1src_half(DisasContext
*s
, int opcode
, int rd
, int rn
)
7558 TCGv_ptr fpst
= NULL
;
7559 TCGv_i32 tcg_op
= read_fp_hreg(s
, rn
);
7560 TCGv_i32 tcg_res
= tcg_temp_new_i32();
7563 case 0x0: /* FMOV */
7564 tcg_gen_mov_i32(tcg_res
, tcg_op
);
7566 case 0x1: /* FABS */
7567 gen_vfp_absh(tcg_res
, tcg_op
);
7569 case 0x2: /* FNEG */
7570 gen_vfp_negh(tcg_res
, tcg_op
);
7572 case 0x3: /* FSQRT */
7573 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
7574 gen_helper_sqrt_f16(tcg_res
, tcg_op
, fpst
);
7576 case 0x8: /* FRINTN */
7577 case 0x9: /* FRINTP */
7578 case 0xa: /* FRINTM */
7579 case 0xb: /* FRINTZ */
7580 case 0xc: /* FRINTA */
7584 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
7585 tcg_rmode
= gen_set_rmode(opcode
& 7, fpst
);
7586 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
7587 gen_restore_rmode(tcg_rmode
, fpst
);
7590 case 0xe: /* FRINTX */
7591 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
7592 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, fpst
);
7594 case 0xf: /* FRINTI */
7595 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
7596 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, fpst
);
7599 g_assert_not_reached();
7602 write_fp_sreg(s
, rd
, tcg_res
);
7605 /* Floating-point data-processing (1 source) - single precision */
7606 static void handle_fp_1src_single(DisasContext
*s
, int opcode
, int rd
, int rn
)
7608 void (*gen_fpst
)(TCGv_i32
, TCGv_i32
, TCGv_ptr
);
7609 TCGv_i32 tcg_op
, tcg_res
;
7613 tcg_op
= read_fp_sreg(s
, rn
);
7614 tcg_res
= tcg_temp_new_i32();
7617 case 0x0: /* FMOV */
7618 tcg_gen_mov_i32(tcg_res
, tcg_op
);
7620 case 0x1: /* FABS */
7621 gen_vfp_abss(tcg_res
, tcg_op
);
7623 case 0x2: /* FNEG */
7624 gen_vfp_negs(tcg_res
, tcg_op
);
7626 case 0x3: /* FSQRT */
7627 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, tcg_env
);
7629 case 0x6: /* BFCVT */
7630 gen_fpst
= gen_helper_bfcvt
;
7632 case 0x8: /* FRINTN */
7633 case 0x9: /* FRINTP */
7634 case 0xa: /* FRINTM */
7635 case 0xb: /* FRINTZ */
7636 case 0xc: /* FRINTA */
7638 gen_fpst
= gen_helper_rints
;
7640 case 0xe: /* FRINTX */
7641 gen_fpst
= gen_helper_rints_exact
;
7643 case 0xf: /* FRINTI */
7644 gen_fpst
= gen_helper_rints
;
7646 case 0x10: /* FRINT32Z */
7647 rmode
= FPROUNDING_ZERO
;
7648 gen_fpst
= gen_helper_frint32_s
;
7650 case 0x11: /* FRINT32X */
7651 gen_fpst
= gen_helper_frint32_s
;
7653 case 0x12: /* FRINT64Z */
7654 rmode
= FPROUNDING_ZERO
;
7655 gen_fpst
= gen_helper_frint64_s
;
7657 case 0x13: /* FRINT64X */
7658 gen_fpst
= gen_helper_frint64_s
;
7661 g_assert_not_reached();
7664 fpst
= fpstatus_ptr(FPST_FPCR
);
7666 TCGv_i32 tcg_rmode
= gen_set_rmode(rmode
, fpst
);
7667 gen_fpst(tcg_res
, tcg_op
, fpst
);
7668 gen_restore_rmode(tcg_rmode
, fpst
);
7670 gen_fpst(tcg_res
, tcg_op
, fpst
);
7674 write_fp_sreg(s
, rd
, tcg_res
);
7677 /* Floating-point data-processing (1 source) - double precision */
7678 static void handle_fp_1src_double(DisasContext
*s
, int opcode
, int rd
, int rn
)
7680 void (*gen_fpst
)(TCGv_i64
, TCGv_i64
, TCGv_ptr
);
7681 TCGv_i64 tcg_op
, tcg_res
;
7686 case 0x0: /* FMOV */
7687 gen_gvec_fn2(s
, false, rd
, rn
, tcg_gen_gvec_mov
, 0);
7691 tcg_op
= read_fp_dreg(s
, rn
);
7692 tcg_res
= tcg_temp_new_i64();
7695 case 0x1: /* FABS */
7696 gen_vfp_absd(tcg_res
, tcg_op
);
7698 case 0x2: /* FNEG */
7699 gen_vfp_negd(tcg_res
, tcg_op
);
7701 case 0x3: /* FSQRT */
7702 gen_helper_vfp_sqrtd(tcg_res
, tcg_op
, tcg_env
);
7704 case 0x8: /* FRINTN */
7705 case 0x9: /* FRINTP */
7706 case 0xa: /* FRINTM */
7707 case 0xb: /* FRINTZ */
7708 case 0xc: /* FRINTA */
7710 gen_fpst
= gen_helper_rintd
;
7712 case 0xe: /* FRINTX */
7713 gen_fpst
= gen_helper_rintd_exact
;
7715 case 0xf: /* FRINTI */
7716 gen_fpst
= gen_helper_rintd
;
7718 case 0x10: /* FRINT32Z */
7719 rmode
= FPROUNDING_ZERO
;
7720 gen_fpst
= gen_helper_frint32_d
;
7722 case 0x11: /* FRINT32X */
7723 gen_fpst
= gen_helper_frint32_d
;
7725 case 0x12: /* FRINT64Z */
7726 rmode
= FPROUNDING_ZERO
;
7727 gen_fpst
= gen_helper_frint64_d
;
7729 case 0x13: /* FRINT64X */
7730 gen_fpst
= gen_helper_frint64_d
;
7733 g_assert_not_reached();
7736 fpst
= fpstatus_ptr(FPST_FPCR
);
7738 TCGv_i32 tcg_rmode
= gen_set_rmode(rmode
, fpst
);
7739 gen_fpst(tcg_res
, tcg_op
, fpst
);
7740 gen_restore_rmode(tcg_rmode
, fpst
);
7742 gen_fpst(tcg_res
, tcg_op
, fpst
);
7746 write_fp_dreg(s
, rd
, tcg_res
);
7749 static void handle_fp_fcvt(DisasContext
*s
, int opcode
,
7750 int rd
, int rn
, int dtype
, int ntype
)
7755 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
7757 /* Single to double */
7758 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
7759 gen_helper_vfp_fcvtds(tcg_rd
, tcg_rn
, tcg_env
);
7760 write_fp_dreg(s
, rd
, tcg_rd
);
7762 /* Single to half */
7763 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
7764 TCGv_i32 ahp
= get_ahp_flag();
7765 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
7767 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd
, tcg_rn
, fpst
, ahp
);
7768 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
7769 write_fp_sreg(s
, rd
, tcg_rd
);
7775 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
7776 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
7778 /* Double to single */
7779 gen_helper_vfp_fcvtsd(tcg_rd
, tcg_rn
, tcg_env
);
7781 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
7782 TCGv_i32 ahp
= get_ahp_flag();
7783 /* Double to half */
7784 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd
, tcg_rn
, fpst
, ahp
);
7785 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
7787 write_fp_sreg(s
, rd
, tcg_rd
);
7792 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
7793 TCGv_ptr tcg_fpst
= fpstatus_ptr(FPST_FPCR
);
7794 TCGv_i32 tcg_ahp
= get_ahp_flag();
7795 tcg_gen_ext16u_i32(tcg_rn
, tcg_rn
);
7797 /* Half to single */
7798 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
7799 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd
, tcg_rn
, tcg_fpst
, tcg_ahp
);
7800 write_fp_sreg(s
, rd
, tcg_rd
);
7802 /* Half to double */
7803 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
7804 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd
, tcg_rn
, tcg_fpst
, tcg_ahp
);
7805 write_fp_dreg(s
, rd
, tcg_rd
);
7810 g_assert_not_reached();
7814 /* Floating point data-processing (1 source)
7815 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
7816 * +---+---+---+-----------+------+---+--------+-----------+------+------+
7817 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
7818 * +---+---+---+-----------+------+---+--------+-----------+------+------+
7820 static void disas_fp_1src(DisasContext
*s
, uint32_t insn
)
7822 int mos
= extract32(insn
, 29, 3);
7823 int type
= extract32(insn
, 22, 2);
7824 int opcode
= extract32(insn
, 15, 6);
7825 int rn
= extract32(insn
, 5, 5);
7826 int rd
= extract32(insn
, 0, 5);
7829 goto do_unallocated
;
7833 case 0x4: case 0x5: case 0x7:
7835 /* FCVT between half, single and double precision */
7836 int dtype
= extract32(opcode
, 0, 2);
7837 if (type
== 2 || dtype
== type
) {
7838 goto do_unallocated
;
7840 if (!fp_access_check(s
)) {
7844 handle_fp_fcvt(s
, opcode
, rd
, rn
, dtype
, type
);
7848 case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
7849 if (type
> 1 || !dc_isar_feature(aa64_frint
, s
)) {
7850 goto do_unallocated
;
7856 /* 32-to-32 and 64-to-64 ops */
7859 if (!fp_access_check(s
)) {
7862 handle_fp_1src_single(s
, opcode
, rd
, rn
);
7865 if (!fp_access_check(s
)) {
7868 handle_fp_1src_double(s
, opcode
, rd
, rn
);
7871 if (!dc_isar_feature(aa64_fp16
, s
)) {
7872 goto do_unallocated
;
7875 if (!fp_access_check(s
)) {
7878 handle_fp_1src_half(s
, opcode
, rd
, rn
);
7881 goto do_unallocated
;
7888 if (!dc_isar_feature(aa64_bf16
, s
)) {
7889 goto do_unallocated
;
7891 if (!fp_access_check(s
)) {
7894 handle_fp_1src_single(s
, opcode
, rd
, rn
);
7897 goto do_unallocated
;
7903 unallocated_encoding(s
);
7908 /* Floating point immediate
7909 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
7910 * +---+---+---+-----------+------+---+------------+-------+------+------+
7911 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
7912 * +---+---+---+-----------+------+---+------------+-------+------+------+
7914 static void disas_fp_imm(DisasContext
*s
, uint32_t insn
)
7916 int rd
= extract32(insn
, 0, 5);
7917 int imm5
= extract32(insn
, 5, 5);
7918 int imm8
= extract32(insn
, 13, 8);
7919 int type
= extract32(insn
, 22, 2);
7920 int mos
= extract32(insn
, 29, 3);
7925 unallocated_encoding(s
);
7938 if (dc_isar_feature(aa64_fp16
, s
)) {
7943 unallocated_encoding(s
);
7947 if (!fp_access_check(s
)) {
7951 imm
= vfp_expand_imm(sz
, imm8
);
7952 write_fp_dreg(s
, rd
, tcg_constant_i64(imm
));
7955 /* Handle floating point <=> fixed point conversions. Note that we can
7956 * also deal with fp <=> integer conversions as a special case (scale == 64)
7957 * OPTME: consider handling that special case specially or at least skipping
7958 * the call to scalbn in the helpers for zero shifts.
7960 static void handle_fpfpcvt(DisasContext
*s
, int rd
, int rn
, int opcode
,
7961 bool itof
, int rmode
, int scale
, int sf
, int type
)
7963 bool is_signed
= !(opcode
& 1);
7964 TCGv_ptr tcg_fpstatus
;
7965 TCGv_i32 tcg_shift
, tcg_single
;
7966 TCGv_i64 tcg_double
;
7968 tcg_fpstatus
= fpstatus_ptr(type
== 3 ? FPST_FPCR_F16
: FPST_FPCR
);
7970 tcg_shift
= tcg_constant_i32(64 - scale
);
7973 TCGv_i64 tcg_int
= cpu_reg(s
, rn
);
7975 TCGv_i64 tcg_extend
= tcg_temp_new_i64();
7978 tcg_gen_ext32s_i64(tcg_extend
, tcg_int
);
7980 tcg_gen_ext32u_i64(tcg_extend
, tcg_int
);
7983 tcg_int
= tcg_extend
;
7987 case 1: /* float64 */
7988 tcg_double
= tcg_temp_new_i64();
7990 gen_helper_vfp_sqtod(tcg_double
, tcg_int
,
7991 tcg_shift
, tcg_fpstatus
);
7993 gen_helper_vfp_uqtod(tcg_double
, tcg_int
,
7994 tcg_shift
, tcg_fpstatus
);
7996 write_fp_dreg(s
, rd
, tcg_double
);
7999 case 0: /* float32 */
8000 tcg_single
= tcg_temp_new_i32();
8002 gen_helper_vfp_sqtos(tcg_single
, tcg_int
,
8003 tcg_shift
, tcg_fpstatus
);
8005 gen_helper_vfp_uqtos(tcg_single
, tcg_int
,
8006 tcg_shift
, tcg_fpstatus
);
8008 write_fp_sreg(s
, rd
, tcg_single
);
8011 case 3: /* float16 */
8012 tcg_single
= tcg_temp_new_i32();
8014 gen_helper_vfp_sqtoh(tcg_single
, tcg_int
,
8015 tcg_shift
, tcg_fpstatus
);
8017 gen_helper_vfp_uqtoh(tcg_single
, tcg_int
,
8018 tcg_shift
, tcg_fpstatus
);
8020 write_fp_sreg(s
, rd
, tcg_single
);
8024 g_assert_not_reached();
8027 TCGv_i64 tcg_int
= cpu_reg(s
, rd
);
8030 if (extract32(opcode
, 2, 1)) {
8031 /* There are too many rounding modes to all fit into rmode,
8032 * so FCVTA[US] is a special case.
8034 rmode
= FPROUNDING_TIEAWAY
;
8037 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
8040 case 1: /* float64 */
8041 tcg_double
= read_fp_dreg(s
, rn
);
8044 gen_helper_vfp_tosld(tcg_int
, tcg_double
,
8045 tcg_shift
, tcg_fpstatus
);
8047 gen_helper_vfp_tosqd(tcg_int
, tcg_double
,
8048 tcg_shift
, tcg_fpstatus
);
8052 gen_helper_vfp_tould(tcg_int
, tcg_double
,
8053 tcg_shift
, tcg_fpstatus
);
8055 gen_helper_vfp_touqd(tcg_int
, tcg_double
,
8056 tcg_shift
, tcg_fpstatus
);
8060 tcg_gen_ext32u_i64(tcg_int
, tcg_int
);
8064 case 0: /* float32 */
8065 tcg_single
= read_fp_sreg(s
, rn
);
8068 gen_helper_vfp_tosqs(tcg_int
, tcg_single
,
8069 tcg_shift
, tcg_fpstatus
);
8071 gen_helper_vfp_touqs(tcg_int
, tcg_single
,
8072 tcg_shift
, tcg_fpstatus
);
8075 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
8077 gen_helper_vfp_tosls(tcg_dest
, tcg_single
,
8078 tcg_shift
, tcg_fpstatus
);
8080 gen_helper_vfp_touls(tcg_dest
, tcg_single
,
8081 tcg_shift
, tcg_fpstatus
);
8083 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
8087 case 3: /* float16 */
8088 tcg_single
= read_fp_sreg(s
, rn
);
8091 gen_helper_vfp_tosqh(tcg_int
, tcg_single
,
8092 tcg_shift
, tcg_fpstatus
);
8094 gen_helper_vfp_touqh(tcg_int
, tcg_single
,
8095 tcg_shift
, tcg_fpstatus
);
8098 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
8100 gen_helper_vfp_toslh(tcg_dest
, tcg_single
,
8101 tcg_shift
, tcg_fpstatus
);
8103 gen_helper_vfp_toulh(tcg_dest
, tcg_single
,
8104 tcg_shift
, tcg_fpstatus
);
8106 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
8111 g_assert_not_reached();
8114 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
8118 /* Floating point <-> fixed point conversions
8119 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
8120 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
8121 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
8122 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
8124 static void disas_fp_fixed_conv(DisasContext
*s
, uint32_t insn
)
8126 int rd
= extract32(insn
, 0, 5);
8127 int rn
= extract32(insn
, 5, 5);
8128 int scale
= extract32(insn
, 10, 6);
8129 int opcode
= extract32(insn
, 16, 3);
8130 int rmode
= extract32(insn
, 19, 2);
8131 int type
= extract32(insn
, 22, 2);
8132 bool sbit
= extract32(insn
, 29, 1);
8133 bool sf
= extract32(insn
, 31, 1);
8136 if (sbit
|| (!sf
&& scale
< 32)) {
8137 unallocated_encoding(s
);
8142 case 0: /* float32 */
8143 case 1: /* float64 */
8145 case 3: /* float16 */
8146 if (dc_isar_feature(aa64_fp16
, s
)) {
8151 unallocated_encoding(s
);
8155 switch ((rmode
<< 3) | opcode
) {
8156 case 0x2: /* SCVTF */
8157 case 0x3: /* UCVTF */
8160 case 0x18: /* FCVTZS */
8161 case 0x19: /* FCVTZU */
8165 unallocated_encoding(s
);
8169 if (!fp_access_check(s
)) {
8173 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, FPROUNDING_ZERO
, scale
, sf
, type
);
8176 static void handle_fmov(DisasContext
*s
, int rd
, int rn
, int type
, bool itof
)
8178 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
8179 * without conversion.
8183 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
8189 tmp
= tcg_temp_new_i64();
8190 tcg_gen_ext32u_i64(tmp
, tcg_rn
);
8191 write_fp_dreg(s
, rd
, tmp
);
8195 write_fp_dreg(s
, rd
, tcg_rn
);
8198 /* 64 bit to top half. */
8199 tcg_gen_st_i64(tcg_rn
, tcg_env
, fp_reg_hi_offset(s
, rd
));
8200 clear_vec_high(s
, true, rd
);
8204 tmp
= tcg_temp_new_i64();
8205 tcg_gen_ext16u_i64(tmp
, tcg_rn
);
8206 write_fp_dreg(s
, rd
, tmp
);
8209 g_assert_not_reached();
8212 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
8217 tcg_gen_ld32u_i64(tcg_rd
, tcg_env
, fp_reg_offset(s
, rn
, MO_32
));
8221 tcg_gen_ld_i64(tcg_rd
, tcg_env
, fp_reg_offset(s
, rn
, MO_64
));
8224 /* 64 bits from top half */
8225 tcg_gen_ld_i64(tcg_rd
, tcg_env
, fp_reg_hi_offset(s
, rn
));
8229 tcg_gen_ld16u_i64(tcg_rd
, tcg_env
, fp_reg_offset(s
, rn
, MO_16
));
8232 g_assert_not_reached();
8237 static void handle_fjcvtzs(DisasContext
*s
, int rd
, int rn
)
8239 TCGv_i64 t
= read_fp_dreg(s
, rn
);
8240 TCGv_ptr fpstatus
= fpstatus_ptr(FPST_FPCR
);
8242 gen_helper_fjcvtzs(t
, t
, fpstatus
);
8244 tcg_gen_ext32u_i64(cpu_reg(s
, rd
), t
);
8245 tcg_gen_extrh_i64_i32(cpu_ZF
, t
);
8246 tcg_gen_movi_i32(cpu_CF
, 0);
8247 tcg_gen_movi_i32(cpu_NF
, 0);
8248 tcg_gen_movi_i32(cpu_VF
, 0);
8251 /* Floating point <-> integer conversions
8252 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
8253 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
8254 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
8255 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
8257 static void disas_fp_int_conv(DisasContext
*s
, uint32_t insn
)
8259 int rd
= extract32(insn
, 0, 5);
8260 int rn
= extract32(insn
, 5, 5);
8261 int opcode
= extract32(insn
, 16, 3);
8262 int rmode
= extract32(insn
, 19, 2);
8263 int type
= extract32(insn
, 22, 2);
8264 bool sbit
= extract32(insn
, 29, 1);
8265 bool sf
= extract32(insn
, 31, 1);
8269 goto do_unallocated
;
8277 case 4: /* FCVTAS */
8278 case 5: /* FCVTAU */
8280 goto do_unallocated
;
8283 case 0: /* FCVT[NPMZ]S */
8284 case 1: /* FCVT[NPMZ]U */
8286 case 0: /* float32 */
8287 case 1: /* float64 */
8289 case 3: /* float16 */
8290 if (!dc_isar_feature(aa64_fp16
, s
)) {
8291 goto do_unallocated
;
8295 goto do_unallocated
;
8297 if (!fp_access_check(s
)) {
8300 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, rmode
, 64, sf
, type
);
8304 switch (sf
<< 7 | type
<< 5 | rmode
<< 3 | opcode
) {
8305 case 0b01100110: /* FMOV half <-> 32-bit int */
8307 case 0b11100110: /* FMOV half <-> 64-bit int */
8309 if (!dc_isar_feature(aa64_fp16
, s
)) {
8310 goto do_unallocated
;
8313 case 0b00000110: /* FMOV 32-bit */
8315 case 0b10100110: /* FMOV 64-bit */
8317 case 0b11001110: /* FMOV top half of 128-bit */
8319 if (!fp_access_check(s
)) {
8323 handle_fmov(s
, rd
, rn
, type
, itof
);
8326 case 0b00111110: /* FJCVTZS */
8327 if (!dc_isar_feature(aa64_jscvt
, s
)) {
8328 goto do_unallocated
;
8329 } else if (fp_access_check(s
)) {
8330 handle_fjcvtzs(s
, rd
, rn
);
8336 unallocated_encoding(s
);
8343 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
8344 * 31 30 29 28 25 24 0
8345 * +---+---+---+---------+-----------------------------+
8346 * | | 0 | | 1 1 1 1 | |
8347 * +---+---+---+---------+-----------------------------+
8349 static void disas_data_proc_fp(DisasContext
*s
, uint32_t insn
)
8351 if (extract32(insn
, 24, 1)) {
8352 unallocated_encoding(s
); /* in decodetree */
8353 } else if (extract32(insn
, 21, 1) == 0) {
8354 /* Floating point to fixed point conversions */
8355 disas_fp_fixed_conv(s
, insn
);
8357 switch (extract32(insn
, 10, 2)) {
8359 /* Floating point conditional compare */
8360 disas_fp_ccomp(s
, insn
);
8363 /* Floating point data-processing (2 source) */
8364 unallocated_encoding(s
); /* in decodetree */
8367 /* Floating point conditional select */
8368 unallocated_encoding(s
); /* in decodetree */
8371 switch (ctz32(extract32(insn
, 12, 4))) {
8372 case 0: /* [15:12] == xxx1 */
8373 /* Floating point immediate */
8374 disas_fp_imm(s
, insn
);
8376 case 1: /* [15:12] == xx10 */
8377 /* Floating point compare */
8378 disas_fp_compare(s
, insn
);
8380 case 2: /* [15:12] == x100 */
8381 /* Floating point data-processing (1 source) */
8382 disas_fp_1src(s
, insn
);
8384 case 3: /* [15:12] == 1000 */
8385 unallocated_encoding(s
);
8387 default: /* [15:12] == 0000 */
8388 /* Floating point <-> integer conversions */
8389 disas_fp_int_conv(s
, insn
);
8397 static void do_ext64(DisasContext
*s
, TCGv_i64 tcg_left
, TCGv_i64 tcg_right
,
8400 /* Extract 64 bits from the middle of two concatenated 64 bit
8401 * vector register slices left:right. The extracted bits start
8402 * at 'pos' bits into the right (least significant) side.
8403 * We return the result in tcg_right, and guarantee not to
8406 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
8407 assert(pos
> 0 && pos
< 64);
8409 tcg_gen_shri_i64(tcg_right
, tcg_right
, pos
);
8410 tcg_gen_shli_i64(tcg_tmp
, tcg_left
, 64 - pos
);
8411 tcg_gen_or_i64(tcg_right
, tcg_right
, tcg_tmp
);
8415 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
8416 * +---+---+-------------+-----+---+------+---+------+---+------+------+
8417 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
8418 * +---+---+-------------+-----+---+------+---+------+---+------+------+
8420 static void disas_simd_ext(DisasContext
*s
, uint32_t insn
)
8422 int is_q
= extract32(insn
, 30, 1);
8423 int op2
= extract32(insn
, 22, 2);
8424 int imm4
= extract32(insn
, 11, 4);
8425 int rm
= extract32(insn
, 16, 5);
8426 int rn
= extract32(insn
, 5, 5);
8427 int rd
= extract32(insn
, 0, 5);
8428 int pos
= imm4
<< 3;
8429 TCGv_i64 tcg_resl
, tcg_resh
;
8431 if (op2
!= 0 || (!is_q
&& extract32(imm4
, 3, 1))) {
8432 unallocated_encoding(s
);
8436 if (!fp_access_check(s
)) {
8440 tcg_resh
= tcg_temp_new_i64();
8441 tcg_resl
= tcg_temp_new_i64();
8443 /* Vd gets bits starting at pos bits into Vm:Vn. This is
8444 * either extracting 128 bits from a 128:128 concatenation, or
8445 * extracting 64 bits from a 64:64 concatenation.
8448 read_vec_element(s
, tcg_resl
, rn
, 0, MO_64
);
8450 read_vec_element(s
, tcg_resh
, rm
, 0, MO_64
);
8451 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
8459 EltPosns eltposns
[] = { {rn
, 0}, {rn
, 1}, {rm
, 0}, {rm
, 1} };
8460 EltPosns
*elt
= eltposns
;
8467 read_vec_element(s
, tcg_resl
, elt
->reg
, elt
->elt
, MO_64
);
8469 read_vec_element(s
, tcg_resh
, elt
->reg
, elt
->elt
, MO_64
);
8472 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
8473 tcg_hh
= tcg_temp_new_i64();
8474 read_vec_element(s
, tcg_hh
, elt
->reg
, elt
->elt
, MO_64
);
8475 do_ext64(s
, tcg_hh
, tcg_resh
, pos
);
8479 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
8481 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
8483 clear_vec_high(s
, is_q
, rd
);
8487 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
8488 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
8489 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
8490 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
8492 static void disas_simd_tb(DisasContext
*s
, uint32_t insn
)
8494 int op2
= extract32(insn
, 22, 2);
8495 int is_q
= extract32(insn
, 30, 1);
8496 int rm
= extract32(insn
, 16, 5);
8497 int rn
= extract32(insn
, 5, 5);
8498 int rd
= extract32(insn
, 0, 5);
8499 int is_tbx
= extract32(insn
, 12, 1);
8500 int len
= (extract32(insn
, 13, 2) + 1) * 16;
8503 unallocated_encoding(s
);
8507 if (!fp_access_check(s
)) {
8511 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s
, rd
),
8512 vec_full_reg_offset(s
, rm
), tcg_env
,
8513 is_q
? 16 : 8, vec_full_reg_size(s
),
8514 (len
<< 6) | (is_tbx
<< 5) | rn
,
8515 gen_helper_simd_tblx
);
8519 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
8520 * +---+---+-------------+------+---+------+---+------------------+------+
8521 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
8522 * +---+---+-------------+------+---+------+---+------------------+------+
8524 static void disas_simd_zip_trn(DisasContext
*s
, uint32_t insn
)
8526 int rd
= extract32(insn
, 0, 5);
8527 int rn
= extract32(insn
, 5, 5);
8528 int rm
= extract32(insn
, 16, 5);
8529 int size
= extract32(insn
, 22, 2);
8530 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
8531 * bit 2 indicates 1 vs 2 variant of the insn.
8533 int opcode
= extract32(insn
, 12, 2);
8534 bool part
= extract32(insn
, 14, 1);
8535 bool is_q
= extract32(insn
, 30, 1);
8536 int esize
= 8 << size
;
8538 int datasize
= is_q
? 128 : 64;
8539 int elements
= datasize
/ esize
;
8540 TCGv_i64 tcg_res
[2], tcg_ele
;
8542 if (opcode
== 0 || (size
== 3 && !is_q
)) {
8543 unallocated_encoding(s
);
8547 if (!fp_access_check(s
)) {
8551 tcg_res
[0] = tcg_temp_new_i64();
8552 tcg_res
[1] = is_q
? tcg_temp_new_i64() : NULL
;
8553 tcg_ele
= tcg_temp_new_i64();
8555 for (i
= 0; i
< elements
; i
++) {
8559 case 1: /* UZP1/2 */
8561 int midpoint
= elements
/ 2;
8563 read_vec_element(s
, tcg_ele
, rn
, 2 * i
+ part
, size
);
8565 read_vec_element(s
, tcg_ele
, rm
,
8566 2 * (i
- midpoint
) + part
, size
);
8570 case 2: /* TRN1/2 */
8572 read_vec_element(s
, tcg_ele
, rm
, (i
& ~1) + part
, size
);
8574 read_vec_element(s
, tcg_ele
, rn
, (i
& ~1) + part
, size
);
8577 case 3: /* ZIP1/2 */
8579 int base
= part
* elements
/ 2;
8581 read_vec_element(s
, tcg_ele
, rm
, base
+ (i
>> 1), size
);
8583 read_vec_element(s
, tcg_ele
, rn
, base
+ (i
>> 1), size
);
8588 g_assert_not_reached();
8591 w
= (i
* esize
) / 64;
8592 o
= (i
* esize
) % 64;
8594 tcg_gen_mov_i64(tcg_res
[w
], tcg_ele
);
8596 tcg_gen_shli_i64(tcg_ele
, tcg_ele
, o
);
8597 tcg_gen_or_i64(tcg_res
[w
], tcg_res
[w
], tcg_ele
);
8601 for (i
= 0; i
<= is_q
; ++i
) {
8602 write_vec_element(s
, tcg_res
[i
], rd
, i
, MO_64
);
8604 clear_vec_high(s
, is_q
, rd
);
8608 * do_reduction_op helper
8610 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
8611 * important for correct NaN propagation that we do these
8612 * operations in exactly the order specified by the pseudocode.
8614 * This is a recursive function, TCG temps should be freed by the
8615 * calling function once it is done with the values.
8617 static TCGv_i32
do_reduction_op(DisasContext
*s
, int fpopcode
, int rn
,
8618 int esize
, int size
, int vmap
, TCGv_ptr fpst
)
8620 if (esize
== size
) {
8622 MemOp msize
= esize
== 16 ? MO_16
: MO_32
;
8625 /* We should have one register left here */
8626 assert(ctpop8(vmap
) == 1);
8627 element
= ctz32(vmap
);
8628 assert(element
< 8);
8630 tcg_elem
= tcg_temp_new_i32();
8631 read_vec_element_i32(s
, tcg_elem
, rn
, element
, msize
);
8634 int bits
= size
/ 2;
8635 int shift
= ctpop8(vmap
) / 2;
8636 int vmap_lo
= (vmap
>> shift
) & vmap
;
8637 int vmap_hi
= (vmap
& ~vmap_lo
);
8638 TCGv_i32 tcg_hi
, tcg_lo
, tcg_res
;
8640 tcg_hi
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_hi
, fpst
);
8641 tcg_lo
= do_reduction_op(s
, fpopcode
, rn
, esize
, bits
, vmap_lo
, fpst
);
8642 tcg_res
= tcg_temp_new_i32();
8645 case 0x0c: /* fmaxnmv half-precision */
8646 gen_helper_advsimd_maxnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
8648 case 0x0f: /* fmaxv half-precision */
8649 gen_helper_advsimd_maxh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
8651 case 0x1c: /* fminnmv half-precision */
8652 gen_helper_advsimd_minnumh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
8654 case 0x1f: /* fminv half-precision */
8655 gen_helper_advsimd_minh(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
8657 case 0x2c: /* fmaxnmv */
8658 gen_helper_vfp_maxnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
8660 case 0x2f: /* fmaxv */
8661 gen_helper_vfp_maxs(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
8663 case 0x3c: /* fminnmv */
8664 gen_helper_vfp_minnums(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
8666 case 0x3f: /* fminv */
8667 gen_helper_vfp_mins(tcg_res
, tcg_lo
, tcg_hi
, fpst
);
8670 g_assert_not_reached();
8676 /* AdvSIMD across lanes
8677 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
8678 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
8679 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
8680 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
8682 static void disas_simd_across_lanes(DisasContext
*s
, uint32_t insn
)
8684 int rd
= extract32(insn
, 0, 5);
8685 int rn
= extract32(insn
, 5, 5);
8686 int size
= extract32(insn
, 22, 2);
8687 int opcode
= extract32(insn
, 12, 5);
8688 bool is_q
= extract32(insn
, 30, 1);
8689 bool is_u
= extract32(insn
, 29, 1);
8691 bool is_min
= false;
8695 TCGv_i64 tcg_res
, tcg_elt
;
8698 case 0x1b: /* ADDV */
8700 unallocated_encoding(s
);
8704 case 0x3: /* SADDLV, UADDLV */
8705 case 0xa: /* SMAXV, UMAXV */
8706 case 0x1a: /* SMINV, UMINV */
8707 if (size
== 3 || (size
== 2 && !is_q
)) {
8708 unallocated_encoding(s
);
8712 case 0xc: /* FMAXNMV, FMINNMV */
8713 case 0xf: /* FMAXV, FMINV */
8714 /* Bit 1 of size field encodes min vs max and the actual size
8715 * depends on the encoding of the U bit. If not set (and FP16
8716 * enabled) then we do half-precision float instead of single
8719 is_min
= extract32(size
, 1, 1);
8721 if (!is_u
&& dc_isar_feature(aa64_fp16
, s
)) {
8723 } else if (!is_u
|| !is_q
|| extract32(size
, 0, 1)) {
8724 unallocated_encoding(s
);
8731 unallocated_encoding(s
);
8735 if (!fp_access_check(s
)) {
8740 elements
= (is_q
? 128 : 64) / esize
;
8742 tcg_res
= tcg_temp_new_i64();
8743 tcg_elt
= tcg_temp_new_i64();
8745 /* These instructions operate across all lanes of a vector
8746 * to produce a single result. We can guarantee that a 64
8747 * bit intermediate is sufficient:
8748 * + for [US]ADDLV the maximum element size is 32 bits, and
8749 * the result type is 64 bits
8750 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
8751 * same as the element size, which is 32 bits at most
8752 * For the integer operations we can choose to work at 64
8753 * or 32 bits and truncate at the end; for simplicity
8754 * we use 64 bits always. The floating point
8755 * ops do require 32 bit intermediates, though.
8758 read_vec_element(s
, tcg_res
, rn
, 0, size
| (is_u
? 0 : MO_SIGN
));
8760 for (i
= 1; i
< elements
; i
++) {
8761 read_vec_element(s
, tcg_elt
, rn
, i
, size
| (is_u
? 0 : MO_SIGN
));
8764 case 0x03: /* SADDLV / UADDLV */
8765 case 0x1b: /* ADDV */
8766 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_elt
);
8768 case 0x0a: /* SMAXV / UMAXV */
8770 tcg_gen_umax_i64(tcg_res
, tcg_res
, tcg_elt
);
8772 tcg_gen_smax_i64(tcg_res
, tcg_res
, tcg_elt
);
8775 case 0x1a: /* SMINV / UMINV */
8777 tcg_gen_umin_i64(tcg_res
, tcg_res
, tcg_elt
);
8779 tcg_gen_smin_i64(tcg_res
, tcg_res
, tcg_elt
);
8783 g_assert_not_reached();
8788 /* Floating point vector reduction ops which work across 32
8789 * bit (single) or 16 bit (half-precision) intermediates.
8790 * Note that correct NaN propagation requires that we do these
8791 * operations in exactly the order specified by the pseudocode.
8793 TCGv_ptr fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
8794 int fpopcode
= opcode
| is_min
<< 4 | is_u
<< 5;
8795 int vmap
= (1 << elements
) - 1;
8796 TCGv_i32 tcg_res32
= do_reduction_op(s
, fpopcode
, rn
, esize
,
8797 (is_q
? 128 : 64), vmap
, fpst
);
8798 tcg_gen_extu_i32_i64(tcg_res
, tcg_res32
);
8801 /* Now truncate the result to the width required for the final output */
8802 if (opcode
== 0x03) {
8803 /* SADDLV, UADDLV: result is 2*esize */
8809 tcg_gen_ext8u_i64(tcg_res
, tcg_res
);
8812 tcg_gen_ext16u_i64(tcg_res
, tcg_res
);
8815 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
8820 g_assert_not_reached();
8823 write_fp_dreg(s
, rd
, tcg_res
);
8826 /* AdvSIMD modified immediate
8827 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
8828 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
8829 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
8830 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
8832 * There are a number of operations that can be carried out here:
8833 * MOVI - move (shifted) imm into register
8834 * MVNI - move inverted (shifted) imm into register
8835 * ORR - bitwise OR of (shifted) imm with register
8836 * BIC - bitwise clear of (shifted) imm with register
8837 * With ARMv8.2 we also have:
8838 * FMOV half-precision
8840 static void disas_simd_mod_imm(DisasContext
*s
, uint32_t insn
)
8842 int rd
= extract32(insn
, 0, 5);
8843 int cmode
= extract32(insn
, 12, 4);
8844 int o2
= extract32(insn
, 11, 1);
8845 uint64_t abcdefgh
= extract32(insn
, 5, 5) | (extract32(insn
, 16, 3) << 5);
8846 bool is_neg
= extract32(insn
, 29, 1);
8847 bool is_q
= extract32(insn
, 30, 1);
8851 if (cmode
!= 0xf || is_neg
) {
8852 unallocated_encoding(s
);
8855 /* FMOV (vector, immediate) - half-precision */
8856 if (!dc_isar_feature(aa64_fp16
, s
)) {
8857 unallocated_encoding(s
);
8860 imm
= vfp_expand_imm(MO_16
, abcdefgh
);
8861 /* now duplicate across the lanes */
8862 imm
= dup_const(MO_16
, imm
);
8864 if (cmode
== 0xf && is_neg
&& !is_q
) {
8865 unallocated_encoding(s
);
8868 imm
= asimd_imm_const(abcdefgh
, cmode
, is_neg
);
8871 if (!fp_access_check(s
)) {
8875 if (!((cmode
& 0x9) == 0x1 || (cmode
& 0xd) == 0x9)) {
8876 /* MOVI or MVNI, with MVNI negation handled above. */
8877 tcg_gen_gvec_dup_imm(MO_64
, vec_full_reg_offset(s
, rd
), is_q
? 16 : 8,
8878 vec_full_reg_size(s
), imm
);
8880 /* ORR or BIC, with BIC negation to AND handled above. */
8882 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_andi
, MO_64
);
8884 gen_gvec_fn2i(s
, is_q
, rd
, rd
, imm
, tcg_gen_gvec_ori
, MO_64
);
8890 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
8892 * This code is handles the common shifting code and is used by both
8893 * the vector and scalar code.
8895 static void handle_shri_with_rndacc(TCGv_i64 tcg_res
, TCGv_i64 tcg_src
,
8896 TCGv_i64 tcg_rnd
, bool accumulate
,
8897 bool is_u
, int size
, int shift
)
8899 bool extended_result
= false;
8900 bool round
= tcg_rnd
!= NULL
;
8902 TCGv_i64 tcg_src_hi
;
8904 if (round
&& size
== 3) {
8905 extended_result
= true;
8906 ext_lshift
= 64 - shift
;
8907 tcg_src_hi
= tcg_temp_new_i64();
8908 } else if (shift
== 64) {
8909 if (!accumulate
&& is_u
) {
8910 /* result is zero */
8911 tcg_gen_movi_i64(tcg_res
, 0);
8916 /* Deal with the rounding step */
8918 if (extended_result
) {
8919 TCGv_i64 tcg_zero
= tcg_constant_i64(0);
8921 /* take care of sign extending tcg_res */
8922 tcg_gen_sari_i64(tcg_src_hi
, tcg_src
, 63);
8923 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
8924 tcg_src
, tcg_src_hi
,
8927 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
8932 tcg_gen_add_i64(tcg_src
, tcg_src
, tcg_rnd
);
8936 /* Now do the shift right */
8937 if (round
&& extended_result
) {
8938 /* extended case, >64 bit precision required */
8939 if (ext_lshift
== 0) {
8940 /* special case, only high bits matter */
8941 tcg_gen_mov_i64(tcg_src
, tcg_src_hi
);
8943 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
8944 tcg_gen_shli_i64(tcg_src_hi
, tcg_src_hi
, ext_lshift
);
8945 tcg_gen_or_i64(tcg_src
, tcg_src
, tcg_src_hi
);
8950 /* essentially shifting in 64 zeros */
8951 tcg_gen_movi_i64(tcg_src
, 0);
8953 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
8957 /* effectively extending the sign-bit */
8958 tcg_gen_sari_i64(tcg_src
, tcg_src
, 63);
8960 tcg_gen_sari_i64(tcg_src
, tcg_src
, shift
);
8966 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_src
);
8968 tcg_gen_mov_i64(tcg_res
, tcg_src
);
8972 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
8973 static void handle_scalar_simd_shri(DisasContext
*s
,
8974 bool is_u
, int immh
, int immb
,
8975 int opcode
, int rn
, int rd
)
8978 int immhb
= immh
<< 3 | immb
;
8979 int shift
= 2 * (8 << size
) - immhb
;
8980 bool accumulate
= false;
8982 bool insert
= false;
8987 if (!extract32(immh
, 3, 1)) {
8988 unallocated_encoding(s
);
8992 if (!fp_access_check(s
)) {
8997 case 0x02: /* SSRA / USRA (accumulate) */
9000 case 0x04: /* SRSHR / URSHR (rounding) */
9003 case 0x06: /* SRSRA / URSRA (accum + rounding) */
9004 accumulate
= round
= true;
9006 case 0x08: /* SRI */
9012 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
9017 tcg_rn
= read_fp_dreg(s
, rn
);
9018 tcg_rd
= (accumulate
|| insert
) ? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
9021 /* shift count same as element size is valid but does nothing;
9022 * special case to avoid potential shift by 64.
9024 int esize
= 8 << size
;
9025 if (shift
!= esize
) {
9026 tcg_gen_shri_i64(tcg_rn
, tcg_rn
, shift
);
9027 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, 0, esize
- shift
);
9030 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
9031 accumulate
, is_u
, size
, shift
);
9034 write_fp_dreg(s
, rd
, tcg_rd
);
9037 /* SHL/SLI - Scalar shift left */
9038 static void handle_scalar_simd_shli(DisasContext
*s
, bool insert
,
9039 int immh
, int immb
, int opcode
,
9042 int size
= 32 - clz32(immh
) - 1;
9043 int immhb
= immh
<< 3 | immb
;
9044 int shift
= immhb
- (8 << size
);
9048 if (!extract32(immh
, 3, 1)) {
9049 unallocated_encoding(s
);
9053 if (!fp_access_check(s
)) {
9057 tcg_rn
= read_fp_dreg(s
, rn
);
9058 tcg_rd
= insert
? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
9061 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, shift
, 64 - shift
);
9063 tcg_gen_shli_i64(tcg_rd
, tcg_rn
, shift
);
9066 write_fp_dreg(s
, rd
, tcg_rd
);
9069 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
9070 * (signed/unsigned) narrowing */
9071 static void handle_vec_simd_sqshrn(DisasContext
*s
, bool is_scalar
, bool is_q
,
9072 bool is_u_shift
, bool is_u_narrow
,
9073 int immh
, int immb
, int opcode
,
9076 int immhb
= immh
<< 3 | immb
;
9077 int size
= 32 - clz32(immh
) - 1;
9078 int esize
= 8 << size
;
9079 int shift
= (2 * esize
) - immhb
;
9080 int elements
= is_scalar
? 1 : (64 / esize
);
9081 bool round
= extract32(opcode
, 0, 1);
9082 MemOp ldop
= (size
+ 1) | (is_u_shift
? 0 : MO_SIGN
);
9083 TCGv_i64 tcg_rn
, tcg_rd
, tcg_round
;
9084 TCGv_i32 tcg_rd_narrowed
;
9087 static NeonGenNarrowEnvFn
* const signed_narrow_fns
[4][2] = {
9088 { gen_helper_neon_narrow_sat_s8
,
9089 gen_helper_neon_unarrow_sat8
},
9090 { gen_helper_neon_narrow_sat_s16
,
9091 gen_helper_neon_unarrow_sat16
},
9092 { gen_helper_neon_narrow_sat_s32
,
9093 gen_helper_neon_unarrow_sat32
},
9096 static NeonGenNarrowEnvFn
* const unsigned_narrow_fns
[4] = {
9097 gen_helper_neon_narrow_sat_u8
,
9098 gen_helper_neon_narrow_sat_u16
,
9099 gen_helper_neon_narrow_sat_u32
,
9102 NeonGenNarrowEnvFn
*narrowfn
;
9108 if (extract32(immh
, 3, 1)) {
9109 unallocated_encoding(s
);
9113 if (!fp_access_check(s
)) {
9118 narrowfn
= unsigned_narrow_fns
[size
];
9120 narrowfn
= signed_narrow_fns
[size
][is_u_narrow
? 1 : 0];
9123 tcg_rn
= tcg_temp_new_i64();
9124 tcg_rd
= tcg_temp_new_i64();
9125 tcg_rd_narrowed
= tcg_temp_new_i32();
9126 tcg_final
= tcg_temp_new_i64();
9129 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
9134 for (i
= 0; i
< elements
; i
++) {
9135 read_vec_element(s
, tcg_rn
, rn
, i
, ldop
);
9136 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
9137 false, is_u_shift
, size
+1, shift
);
9138 narrowfn(tcg_rd_narrowed
, tcg_env
, tcg_rd
);
9139 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd_narrowed
);
9141 tcg_gen_extract_i64(tcg_final
, tcg_rd
, 0, esize
);
9143 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
9148 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
9150 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
9152 clear_vec_high(s
, is_q
, rd
);
9155 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
9156 static void handle_simd_qshl(DisasContext
*s
, bool scalar
, bool is_q
,
9157 bool src_unsigned
, bool dst_unsigned
,
9158 int immh
, int immb
, int rn
, int rd
)
9160 int immhb
= immh
<< 3 | immb
;
9161 int size
= 32 - clz32(immh
) - 1;
9162 int shift
= immhb
- (8 << size
);
9166 assert(!(scalar
&& is_q
));
9169 if (!is_q
&& extract32(immh
, 3, 1)) {
9170 unallocated_encoding(s
);
9174 /* Since we use the variable-shift helpers we must
9175 * replicate the shift count into each element of
9176 * the tcg_shift value.
9180 shift
|= shift
<< 8;
9183 shift
|= shift
<< 16;
9189 g_assert_not_reached();
9193 if (!fp_access_check(s
)) {
9198 TCGv_i64 tcg_shift
= tcg_constant_i64(shift
);
9199 static NeonGenTwo64OpEnvFn
* const fns
[2][2] = {
9200 { gen_helper_neon_qshl_s64
, gen_helper_neon_qshlu_s64
},
9201 { NULL
, gen_helper_neon_qshl_u64
},
9203 NeonGenTwo64OpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
];
9204 int maxpass
= is_q
? 2 : 1;
9206 for (pass
= 0; pass
< maxpass
; pass
++) {
9207 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9209 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9210 genfn(tcg_op
, tcg_env
, tcg_op
, tcg_shift
);
9211 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
9213 clear_vec_high(s
, is_q
, rd
);
9215 TCGv_i32 tcg_shift
= tcg_constant_i32(shift
);
9216 static NeonGenTwoOpEnvFn
* const fns
[2][2][3] = {
9218 { gen_helper_neon_qshl_s8
,
9219 gen_helper_neon_qshl_s16
,
9220 gen_helper_neon_qshl_s32
},
9221 { gen_helper_neon_qshlu_s8
,
9222 gen_helper_neon_qshlu_s16
,
9223 gen_helper_neon_qshlu_s32
}
9225 { NULL
, NULL
, NULL
},
9226 { gen_helper_neon_qshl_u8
,
9227 gen_helper_neon_qshl_u16
,
9228 gen_helper_neon_qshl_u32
}
9231 NeonGenTwoOpEnvFn
*genfn
= fns
[src_unsigned
][dst_unsigned
][size
];
9232 MemOp memop
= scalar
? size
: MO_32
;
9233 int maxpass
= scalar
? 1 : is_q
? 4 : 2;
9235 for (pass
= 0; pass
< maxpass
; pass
++) {
9236 TCGv_i32 tcg_op
= tcg_temp_new_i32();
9238 read_vec_element_i32(s
, tcg_op
, rn
, pass
, memop
);
9239 genfn(tcg_op
, tcg_env
, tcg_op
, tcg_shift
);
9243 tcg_gen_ext8u_i32(tcg_op
, tcg_op
);
9246 tcg_gen_ext16u_i32(tcg_op
, tcg_op
);
9251 g_assert_not_reached();
9253 write_fp_sreg(s
, rd
, tcg_op
);
9255 write_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
9260 clear_vec_high(s
, is_q
, rd
);
9265 /* Common vector code for handling integer to FP conversion */
9266 static void handle_simd_intfp_conv(DisasContext
*s
, int rd
, int rn
,
9267 int elements
, int is_signed
,
9268 int fracbits
, int size
)
9270 TCGv_ptr tcg_fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
9271 TCGv_i32 tcg_shift
= NULL
;
9273 MemOp mop
= size
| (is_signed
? MO_SIGN
: 0);
9276 if (fracbits
|| size
== MO_64
) {
9277 tcg_shift
= tcg_constant_i32(fracbits
);
9280 if (size
== MO_64
) {
9281 TCGv_i64 tcg_int64
= tcg_temp_new_i64();
9282 TCGv_i64 tcg_double
= tcg_temp_new_i64();
9284 for (pass
= 0; pass
< elements
; pass
++) {
9285 read_vec_element(s
, tcg_int64
, rn
, pass
, mop
);
9288 gen_helper_vfp_sqtod(tcg_double
, tcg_int64
,
9289 tcg_shift
, tcg_fpst
);
9291 gen_helper_vfp_uqtod(tcg_double
, tcg_int64
,
9292 tcg_shift
, tcg_fpst
);
9294 if (elements
== 1) {
9295 write_fp_dreg(s
, rd
, tcg_double
);
9297 write_vec_element(s
, tcg_double
, rd
, pass
, MO_64
);
9301 TCGv_i32 tcg_int32
= tcg_temp_new_i32();
9302 TCGv_i32 tcg_float
= tcg_temp_new_i32();
9304 for (pass
= 0; pass
< elements
; pass
++) {
9305 read_vec_element_i32(s
, tcg_int32
, rn
, pass
, mop
);
9311 gen_helper_vfp_sltos(tcg_float
, tcg_int32
,
9312 tcg_shift
, tcg_fpst
);
9314 gen_helper_vfp_ultos(tcg_float
, tcg_int32
,
9315 tcg_shift
, tcg_fpst
);
9319 gen_helper_vfp_sitos(tcg_float
, tcg_int32
, tcg_fpst
);
9321 gen_helper_vfp_uitos(tcg_float
, tcg_int32
, tcg_fpst
);
9328 gen_helper_vfp_sltoh(tcg_float
, tcg_int32
,
9329 tcg_shift
, tcg_fpst
);
9331 gen_helper_vfp_ultoh(tcg_float
, tcg_int32
,
9332 tcg_shift
, tcg_fpst
);
9336 gen_helper_vfp_sitoh(tcg_float
, tcg_int32
, tcg_fpst
);
9338 gen_helper_vfp_uitoh(tcg_float
, tcg_int32
, tcg_fpst
);
9343 g_assert_not_reached();
9346 if (elements
== 1) {
9347 write_fp_sreg(s
, rd
, tcg_float
);
9349 write_vec_element_i32(s
, tcg_float
, rd
, pass
, size
);
9354 clear_vec_high(s
, elements
<< size
== 16, rd
);
9357 /* UCVTF/SCVTF - Integer to FP conversion */
9358 static void handle_simd_shift_intfp_conv(DisasContext
*s
, bool is_scalar
,
9359 bool is_q
, bool is_u
,
9360 int immh
, int immb
, int opcode
,
9363 int size
, elements
, fracbits
;
9364 int immhb
= immh
<< 3 | immb
;
9368 if (!is_scalar
&& !is_q
) {
9369 unallocated_encoding(s
);
9372 } else if (immh
& 4) {
9374 } else if (immh
& 2) {
9376 if (!dc_isar_feature(aa64_fp16
, s
)) {
9377 unallocated_encoding(s
);
9381 /* immh == 0 would be a failure of the decode logic */
9382 g_assert(immh
== 1);
9383 unallocated_encoding(s
);
9390 elements
= (8 << is_q
) >> size
;
9392 fracbits
= (16 << size
) - immhb
;
9394 if (!fp_access_check(s
)) {
9398 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !is_u
, fracbits
, size
);
9401 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
9402 static void handle_simd_shift_fpint_conv(DisasContext
*s
, bool is_scalar
,
9403 bool is_q
, bool is_u
,
9404 int immh
, int immb
, int rn
, int rd
)
9406 int immhb
= immh
<< 3 | immb
;
9407 int pass
, size
, fracbits
;
9408 TCGv_ptr tcg_fpstatus
;
9409 TCGv_i32 tcg_rmode
, tcg_shift
;
9413 if (!is_scalar
&& !is_q
) {
9414 unallocated_encoding(s
);
9417 } else if (immh
& 0x4) {
9419 } else if (immh
& 0x2) {
9421 if (!dc_isar_feature(aa64_fp16
, s
)) {
9422 unallocated_encoding(s
);
9426 /* Should have split out AdvSIMD modified immediate earlier. */
9428 unallocated_encoding(s
);
9432 if (!fp_access_check(s
)) {
9436 assert(!(is_scalar
&& is_q
));
9438 tcg_fpstatus
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
9439 tcg_rmode
= gen_set_rmode(FPROUNDING_ZERO
, tcg_fpstatus
);
9440 fracbits
= (16 << size
) - immhb
;
9441 tcg_shift
= tcg_constant_i32(fracbits
);
9443 if (size
== MO_64
) {
9444 int maxpass
= is_scalar
? 1 : 2;
9446 for (pass
= 0; pass
< maxpass
; pass
++) {
9447 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9449 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9451 gen_helper_vfp_touqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
9453 gen_helper_vfp_tosqd(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
9455 write_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
9457 clear_vec_high(s
, is_q
, rd
);
9459 void (*fn
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_ptr
);
9460 int maxpass
= is_scalar
? 1 : ((8 << is_q
) >> size
);
9465 fn
= gen_helper_vfp_touhh
;
9467 fn
= gen_helper_vfp_toshh
;
9472 fn
= gen_helper_vfp_touls
;
9474 fn
= gen_helper_vfp_tosls
;
9478 g_assert_not_reached();
9481 for (pass
= 0; pass
< maxpass
; pass
++) {
9482 TCGv_i32 tcg_op
= tcg_temp_new_i32();
9484 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
9485 fn(tcg_op
, tcg_op
, tcg_shift
, tcg_fpstatus
);
9487 if (size
== MO_16
&& !is_u
) {
9488 tcg_gen_ext16u_i32(tcg_op
, tcg_op
);
9490 write_fp_sreg(s
, rd
, tcg_op
);
9492 write_vec_element_i32(s
, tcg_op
, rd
, pass
, size
);
9496 clear_vec_high(s
, is_q
, rd
);
9500 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
9503 /* AdvSIMD scalar shift by immediate
9504 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
9505 * +-----+---+-------------+------+------+--------+---+------+------+
9506 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
9507 * +-----+---+-------------+------+------+--------+---+------+------+
9509 * This is the scalar version so it works on a fixed sized registers
9511 static void disas_simd_scalar_shift_imm(DisasContext
*s
, uint32_t insn
)
9513 int rd
= extract32(insn
, 0, 5);
9514 int rn
= extract32(insn
, 5, 5);
9515 int opcode
= extract32(insn
, 11, 5);
9516 int immb
= extract32(insn
, 16, 3);
9517 int immh
= extract32(insn
, 19, 4);
9518 bool is_u
= extract32(insn
, 29, 1);
9521 unallocated_encoding(s
);
9526 case 0x08: /* SRI */
9528 unallocated_encoding(s
);
9532 case 0x00: /* SSHR / USHR */
9533 case 0x02: /* SSRA / USRA */
9534 case 0x04: /* SRSHR / URSHR */
9535 case 0x06: /* SRSRA / URSRA */
9536 handle_scalar_simd_shri(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
9538 case 0x0a: /* SHL / SLI */
9539 handle_scalar_simd_shli(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
9541 case 0x1c: /* SCVTF, UCVTF */
9542 handle_simd_shift_intfp_conv(s
, true, false, is_u
, immh
, immb
,
9545 case 0x10: /* SQSHRUN, SQSHRUN2 */
9546 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
9548 unallocated_encoding(s
);
9551 handle_vec_simd_sqshrn(s
, true, false, false, true,
9552 immh
, immb
, opcode
, rn
, rd
);
9554 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
9555 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
9556 handle_vec_simd_sqshrn(s
, true, false, is_u
, is_u
,
9557 immh
, immb
, opcode
, rn
, rd
);
9559 case 0xc: /* SQSHLU */
9561 unallocated_encoding(s
);
9564 handle_simd_qshl(s
, true, false, false, true, immh
, immb
, rn
, rd
);
9566 case 0xe: /* SQSHL, UQSHL */
9567 handle_simd_qshl(s
, true, false, is_u
, is_u
, immh
, immb
, rn
, rd
);
9569 case 0x1f: /* FCVTZS, FCVTZU */
9570 handle_simd_shift_fpint_conv(s
, true, false, is_u
, immh
, immb
, rn
, rd
);
9573 unallocated_encoding(s
);
9578 /* AdvSIMD scalar three different
9579 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
9580 * +-----+---+-----------+------+---+------+--------+-----+------+------+
9581 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
9582 * +-----+---+-----------+------+---+------+--------+-----+------+------+
9584 static void disas_simd_scalar_three_reg_diff(DisasContext
*s
, uint32_t insn
)
9586 bool is_u
= extract32(insn
, 29, 1);
9587 int size
= extract32(insn
, 22, 2);
9588 int opcode
= extract32(insn
, 12, 4);
9589 int rm
= extract32(insn
, 16, 5);
9590 int rn
= extract32(insn
, 5, 5);
9591 int rd
= extract32(insn
, 0, 5);
9594 unallocated_encoding(s
);
9599 case 0x9: /* SQDMLAL, SQDMLAL2 */
9600 case 0xb: /* SQDMLSL, SQDMLSL2 */
9601 case 0xd: /* SQDMULL, SQDMULL2 */
9602 if (size
== 0 || size
== 3) {
9603 unallocated_encoding(s
);
9608 unallocated_encoding(s
);
9612 if (!fp_access_check(s
)) {
9617 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
9618 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
9619 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9621 read_vec_element(s
, tcg_op1
, rn
, 0, MO_32
| MO_SIGN
);
9622 read_vec_element(s
, tcg_op2
, rm
, 0, MO_32
| MO_SIGN
);
9624 tcg_gen_mul_i64(tcg_res
, tcg_op1
, tcg_op2
);
9625 gen_helper_neon_addl_saturate_s64(tcg_res
, tcg_env
, tcg_res
, tcg_res
);
9628 case 0xd: /* SQDMULL, SQDMULL2 */
9630 case 0xb: /* SQDMLSL, SQDMLSL2 */
9631 tcg_gen_neg_i64(tcg_res
, tcg_res
);
9633 case 0x9: /* SQDMLAL, SQDMLAL2 */
9634 read_vec_element(s
, tcg_op1
, rd
, 0, MO_64
);
9635 gen_helper_neon_addl_saturate_s64(tcg_res
, tcg_env
,
9639 g_assert_not_reached();
9642 write_fp_dreg(s
, rd
, tcg_res
);
9644 TCGv_i32 tcg_op1
= read_fp_hreg(s
, rn
);
9645 TCGv_i32 tcg_op2
= read_fp_hreg(s
, rm
);
9646 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9648 gen_helper_neon_mull_s16(tcg_res
, tcg_op1
, tcg_op2
);
9649 gen_helper_neon_addl_saturate_s32(tcg_res
, tcg_env
, tcg_res
, tcg_res
);
9652 case 0xd: /* SQDMULL, SQDMULL2 */
9654 case 0xb: /* SQDMLSL, SQDMLSL2 */
9655 gen_helper_neon_negl_u32(tcg_res
, tcg_res
);
9657 case 0x9: /* SQDMLAL, SQDMLAL2 */
9659 TCGv_i64 tcg_op3
= tcg_temp_new_i64();
9660 read_vec_element(s
, tcg_op3
, rd
, 0, MO_32
);
9661 gen_helper_neon_addl_saturate_s32(tcg_res
, tcg_env
,
9666 g_assert_not_reached();
9669 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
9670 write_fp_dreg(s
, rd
, tcg_res
);
9674 static void handle_2misc_64(DisasContext
*s
, int opcode
, bool u
,
9675 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
,
9676 TCGv_i32 tcg_rmode
, TCGv_ptr tcg_fpstatus
)
9678 /* Handle 64->64 opcodes which are shared between the scalar and
9679 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
9680 * is valid in either group and also the double-precision fp ops.
9681 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
9687 case 0x4: /* CLS, CLZ */
9689 tcg_gen_clzi_i64(tcg_rd
, tcg_rn
, 64);
9691 tcg_gen_clrsb_i64(tcg_rd
, tcg_rn
);
9695 /* This opcode is shared with CNT and RBIT but we have earlier
9696 * enforced that size == 3 if and only if this is the NOT insn.
9698 tcg_gen_not_i64(tcg_rd
, tcg_rn
);
9700 case 0x7: /* SQABS, SQNEG */
9702 gen_helper_neon_qneg_s64(tcg_rd
, tcg_env
, tcg_rn
);
9704 gen_helper_neon_qabs_s64(tcg_rd
, tcg_env
, tcg_rn
);
9707 case 0xa: /* CMLT */
9710 /* 64 bit integer comparison against zero, result is test ? -1 : 0. */
9711 tcg_gen_negsetcond_i64(cond
, tcg_rd
, tcg_rn
, tcg_constant_i64(0));
9713 case 0x8: /* CMGT, CMGE */
9714 cond
= u
? TCG_COND_GE
: TCG_COND_GT
;
9716 case 0x9: /* CMEQ, CMLE */
9717 cond
= u
? TCG_COND_LE
: TCG_COND_EQ
;
9719 case 0xb: /* ABS, NEG */
9721 tcg_gen_neg_i64(tcg_rd
, tcg_rn
);
9723 tcg_gen_abs_i64(tcg_rd
, tcg_rn
);
9726 case 0x2f: /* FABS */
9727 gen_vfp_absd(tcg_rd
, tcg_rn
);
9729 case 0x6f: /* FNEG */
9730 gen_vfp_negd(tcg_rd
, tcg_rn
);
9732 case 0x7f: /* FSQRT */
9733 gen_helper_vfp_sqrtd(tcg_rd
, tcg_rn
, tcg_env
);
9735 case 0x1a: /* FCVTNS */
9736 case 0x1b: /* FCVTMS */
9737 case 0x1c: /* FCVTAS */
9738 case 0x3a: /* FCVTPS */
9739 case 0x3b: /* FCVTZS */
9740 gen_helper_vfp_tosqd(tcg_rd
, tcg_rn
, tcg_constant_i32(0), tcg_fpstatus
);
9742 case 0x5a: /* FCVTNU */
9743 case 0x5b: /* FCVTMU */
9744 case 0x5c: /* FCVTAU */
9745 case 0x7a: /* FCVTPU */
9746 case 0x7b: /* FCVTZU */
9747 gen_helper_vfp_touqd(tcg_rd
, tcg_rn
, tcg_constant_i32(0), tcg_fpstatus
);
9749 case 0x18: /* FRINTN */
9750 case 0x19: /* FRINTM */
9751 case 0x38: /* FRINTP */
9752 case 0x39: /* FRINTZ */
9753 case 0x58: /* FRINTA */
9754 case 0x79: /* FRINTI */
9755 gen_helper_rintd(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9757 case 0x59: /* FRINTX */
9758 gen_helper_rintd_exact(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9760 case 0x1e: /* FRINT32Z */
9761 case 0x5e: /* FRINT32X */
9762 gen_helper_frint32_d(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9764 case 0x1f: /* FRINT64Z */
9765 case 0x5f: /* FRINT64X */
9766 gen_helper_frint64_d(tcg_rd
, tcg_rn
, tcg_fpstatus
);
9769 g_assert_not_reached();
9773 static void handle_2misc_fcmp_zero(DisasContext
*s
, int opcode
,
9774 bool is_scalar
, bool is_u
, bool is_q
,
9775 int size
, int rn
, int rd
)
9777 bool is_double
= (size
== MO_64
);
9780 if (!fp_access_check(s
)) {
9784 fpst
= fpstatus_ptr(size
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
9787 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9788 TCGv_i64 tcg_zero
= tcg_constant_i64(0);
9789 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9790 NeonGenTwoDoubleOpFn
*genfn
;
9795 case 0x2e: /* FCMLT (zero) */
9798 case 0x2c: /* FCMGT (zero) */
9799 genfn
= gen_helper_neon_cgt_f64
;
9801 case 0x2d: /* FCMEQ (zero) */
9802 genfn
= gen_helper_neon_ceq_f64
;
9804 case 0x6d: /* FCMLE (zero) */
9807 case 0x6c: /* FCMGE (zero) */
9808 genfn
= gen_helper_neon_cge_f64
;
9811 g_assert_not_reached();
9814 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
9815 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9817 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
9819 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
9821 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
9824 clear_vec_high(s
, !is_scalar
, rd
);
9826 TCGv_i32 tcg_op
= tcg_temp_new_i32();
9827 TCGv_i32 tcg_zero
= tcg_constant_i32(0);
9828 TCGv_i32 tcg_res
= tcg_temp_new_i32();
9829 NeonGenTwoSingleOpFn
*genfn
;
9831 int pass
, maxpasses
;
9833 if (size
== MO_16
) {
9835 case 0x2e: /* FCMLT (zero) */
9838 case 0x2c: /* FCMGT (zero) */
9839 genfn
= gen_helper_advsimd_cgt_f16
;
9841 case 0x2d: /* FCMEQ (zero) */
9842 genfn
= gen_helper_advsimd_ceq_f16
;
9844 case 0x6d: /* FCMLE (zero) */
9847 case 0x6c: /* FCMGE (zero) */
9848 genfn
= gen_helper_advsimd_cge_f16
;
9851 g_assert_not_reached();
9855 case 0x2e: /* FCMLT (zero) */
9858 case 0x2c: /* FCMGT (zero) */
9859 genfn
= gen_helper_neon_cgt_f32
;
9861 case 0x2d: /* FCMEQ (zero) */
9862 genfn
= gen_helper_neon_ceq_f32
;
9864 case 0x6d: /* FCMLE (zero) */
9867 case 0x6c: /* FCMGE (zero) */
9868 genfn
= gen_helper_neon_cge_f32
;
9871 g_assert_not_reached();
9878 int vector_size
= 8 << is_q
;
9879 maxpasses
= vector_size
>> size
;
9882 for (pass
= 0; pass
< maxpasses
; pass
++) {
9883 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
9885 genfn(tcg_res
, tcg_zero
, tcg_op
, fpst
);
9887 genfn(tcg_res
, tcg_op
, tcg_zero
, fpst
);
9890 write_fp_sreg(s
, rd
, tcg_res
);
9892 write_vec_element_i32(s
, tcg_res
, rd
, pass
, size
);
9897 clear_vec_high(s
, is_q
, rd
);
9902 static void handle_2misc_reciprocal(DisasContext
*s
, int opcode
,
9903 bool is_scalar
, bool is_u
, bool is_q
,
9904 int size
, int rn
, int rd
)
9906 bool is_double
= (size
== 3);
9907 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
9910 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9911 TCGv_i64 tcg_res
= tcg_temp_new_i64();
9914 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
9915 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
9917 case 0x3d: /* FRECPE */
9918 gen_helper_recpe_f64(tcg_res
, tcg_op
, fpst
);
9920 case 0x3f: /* FRECPX */
9921 gen_helper_frecpx_f64(tcg_res
, tcg_op
, fpst
);
9923 case 0x7d: /* FRSQRTE */
9924 gen_helper_rsqrte_f64(tcg_res
, tcg_op
, fpst
);
9927 g_assert_not_reached();
9929 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
9931 clear_vec_high(s
, !is_scalar
, rd
);
9933 TCGv_i32 tcg_op
= tcg_temp_new_i32();
9934 TCGv_i32 tcg_res
= tcg_temp_new_i32();
9935 int pass
, maxpasses
;
9940 maxpasses
= is_q
? 4 : 2;
9943 for (pass
= 0; pass
< maxpasses
; pass
++) {
9944 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
9947 case 0x3c: /* URECPE */
9948 gen_helper_recpe_u32(tcg_res
, tcg_op
);
9950 case 0x3d: /* FRECPE */
9951 gen_helper_recpe_f32(tcg_res
, tcg_op
, fpst
);
9953 case 0x3f: /* FRECPX */
9954 gen_helper_frecpx_f32(tcg_res
, tcg_op
, fpst
);
9956 case 0x7d: /* FRSQRTE */
9957 gen_helper_rsqrte_f32(tcg_res
, tcg_op
, fpst
);
9960 g_assert_not_reached();
9964 write_fp_sreg(s
, rd
, tcg_res
);
9966 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
9970 clear_vec_high(s
, is_q
, rd
);
9975 static void handle_2misc_narrow(DisasContext
*s
, bool scalar
,
9976 int opcode
, bool u
, bool is_q
,
9977 int size
, int rn
, int rd
)
9979 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
9980 * in the source becomes a size element in the destination).
9983 TCGv_i32 tcg_res
[2];
9984 int destelt
= is_q
? 2 : 0;
9985 int passes
= scalar
? 1 : 2;
9988 tcg_res
[1] = tcg_constant_i32(0);
9991 for (pass
= 0; pass
< passes
; pass
++) {
9992 TCGv_i64 tcg_op
= tcg_temp_new_i64();
9993 NeonGenNarrowFn
*genfn
= NULL
;
9994 NeonGenNarrowEnvFn
*genenvfn
= NULL
;
9997 read_vec_element(s
, tcg_op
, rn
, pass
, size
+ 1);
9999 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
10001 tcg_res
[pass
] = tcg_temp_new_i32();
10004 case 0x12: /* XTN, SQXTUN */
10006 static NeonGenNarrowFn
* const xtnfns
[3] = {
10007 gen_helper_neon_narrow_u8
,
10008 gen_helper_neon_narrow_u16
,
10009 tcg_gen_extrl_i64_i32
,
10011 static NeonGenNarrowEnvFn
* const sqxtunfns
[3] = {
10012 gen_helper_neon_unarrow_sat8
,
10013 gen_helper_neon_unarrow_sat16
,
10014 gen_helper_neon_unarrow_sat32
,
10017 genenvfn
= sqxtunfns
[size
];
10019 genfn
= xtnfns
[size
];
10023 case 0x14: /* SQXTN, UQXTN */
10025 static NeonGenNarrowEnvFn
* const fns
[3][2] = {
10026 { gen_helper_neon_narrow_sat_s8
,
10027 gen_helper_neon_narrow_sat_u8
},
10028 { gen_helper_neon_narrow_sat_s16
,
10029 gen_helper_neon_narrow_sat_u16
},
10030 { gen_helper_neon_narrow_sat_s32
,
10031 gen_helper_neon_narrow_sat_u32
},
10033 genenvfn
= fns
[size
][u
];
10036 case 0x16: /* FCVTN, FCVTN2 */
10037 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
10039 gen_helper_vfp_fcvtsd(tcg_res
[pass
], tcg_op
, tcg_env
);
10041 TCGv_i32 tcg_lo
= tcg_temp_new_i32();
10042 TCGv_i32 tcg_hi
= tcg_temp_new_i32();
10043 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
10044 TCGv_i32 ahp
= get_ahp_flag();
10046 tcg_gen_extr_i64_i32(tcg_lo
, tcg_hi
, tcg_op
);
10047 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo
, tcg_lo
, fpst
, ahp
);
10048 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi
, tcg_hi
, fpst
, ahp
);
10049 tcg_gen_deposit_i32(tcg_res
[pass
], tcg_lo
, tcg_hi
, 16, 16);
10052 case 0x36: /* BFCVTN, BFCVTN2 */
10054 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
10055 gen_helper_bfcvt_pair(tcg_res
[pass
], tcg_op
, fpst
);
10058 case 0x56: /* FCVTXN, FCVTXN2 */
10059 /* 64 bit to 32 bit float conversion
10060 * with von Neumann rounding (round to odd)
10063 gen_helper_fcvtx_f64_to_f32(tcg_res
[pass
], tcg_op
, tcg_env
);
10066 g_assert_not_reached();
10070 genfn(tcg_res
[pass
], tcg_op
);
10071 } else if (genenvfn
) {
10072 genenvfn(tcg_res
[pass
], tcg_env
, tcg_op
);
10076 for (pass
= 0; pass
< 2; pass
++) {
10077 write_vec_element_i32(s
, tcg_res
[pass
], rd
, destelt
+ pass
, MO_32
);
10079 clear_vec_high(s
, is_q
, rd
);
10082 /* AdvSIMD scalar two reg misc
10083 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
10084 * +-----+---+-----------+------+-----------+--------+-----+------+------+
10085 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
10086 * +-----+---+-----------+------+-----------+--------+-----+------+------+
10088 static void disas_simd_scalar_two_reg_misc(DisasContext
*s
, uint32_t insn
)
10090 int rd
= extract32(insn
, 0, 5);
10091 int rn
= extract32(insn
, 5, 5);
10092 int opcode
= extract32(insn
, 12, 5);
10093 int size
= extract32(insn
, 22, 2);
10094 bool u
= extract32(insn
, 29, 1);
10095 bool is_fcvt
= false;
10097 TCGv_i32 tcg_rmode
;
10098 TCGv_ptr tcg_fpstatus
;
10101 case 0x7: /* SQABS / SQNEG */
10103 case 0xa: /* CMLT */
10105 unallocated_encoding(s
);
10109 case 0x8: /* CMGT, CMGE */
10110 case 0x9: /* CMEQ, CMLE */
10111 case 0xb: /* ABS, NEG */
10113 unallocated_encoding(s
);
10117 case 0x12: /* SQXTUN */
10119 unallocated_encoding(s
);
10123 case 0x14: /* SQXTN, UQXTN */
10125 unallocated_encoding(s
);
10128 if (!fp_access_check(s
)) {
10131 handle_2misc_narrow(s
, true, opcode
, u
, false, size
, rn
, rd
);
10134 case 0x16 ... 0x1d:
10136 /* Floating point: U, size[1] and opcode indicate operation;
10137 * size[0] indicates single or double precision.
10139 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
10140 size
= extract32(size
, 0, 1) ? 3 : 2;
10142 case 0x2c: /* FCMGT (zero) */
10143 case 0x2d: /* FCMEQ (zero) */
10144 case 0x2e: /* FCMLT (zero) */
10145 case 0x6c: /* FCMGE (zero) */
10146 case 0x6d: /* FCMLE (zero) */
10147 handle_2misc_fcmp_zero(s
, opcode
, true, u
, true, size
, rn
, rd
);
10149 case 0x1d: /* SCVTF */
10150 case 0x5d: /* UCVTF */
10152 bool is_signed
= (opcode
== 0x1d);
10153 if (!fp_access_check(s
)) {
10156 handle_simd_intfp_conv(s
, rd
, rn
, 1, is_signed
, 0, size
);
10159 case 0x3d: /* FRECPE */
10160 case 0x3f: /* FRECPX */
10161 case 0x7d: /* FRSQRTE */
10162 if (!fp_access_check(s
)) {
10165 handle_2misc_reciprocal(s
, opcode
, true, u
, true, size
, rn
, rd
);
10167 case 0x1a: /* FCVTNS */
10168 case 0x1b: /* FCVTMS */
10169 case 0x3a: /* FCVTPS */
10170 case 0x3b: /* FCVTZS */
10171 case 0x5a: /* FCVTNU */
10172 case 0x5b: /* FCVTMU */
10173 case 0x7a: /* FCVTPU */
10174 case 0x7b: /* FCVTZU */
10176 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
10178 case 0x1c: /* FCVTAS */
10179 case 0x5c: /* FCVTAU */
10180 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
10182 rmode
= FPROUNDING_TIEAWAY
;
10184 case 0x56: /* FCVTXN, FCVTXN2 */
10186 unallocated_encoding(s
);
10189 if (!fp_access_check(s
)) {
10192 handle_2misc_narrow(s
, true, opcode
, u
, false, size
- 1, rn
, rd
);
10195 unallocated_encoding(s
);
10200 case 0x3: /* USQADD / SUQADD */
10201 unallocated_encoding(s
);
10205 if (!fp_access_check(s
)) {
10210 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR
);
10211 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
10213 tcg_fpstatus
= NULL
;
10218 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
10219 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
10221 handle_2misc_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rmode
, tcg_fpstatus
);
10222 write_fp_dreg(s
, rd
, tcg_rd
);
10224 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
10225 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
10227 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
10230 case 0x7: /* SQABS, SQNEG */
10232 NeonGenOneOpEnvFn
*genfn
;
10233 static NeonGenOneOpEnvFn
* const fns
[3][2] = {
10234 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
10235 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
10236 { gen_helper_neon_qabs_s32
, gen_helper_neon_qneg_s32
},
10238 genfn
= fns
[size
][u
];
10239 genfn(tcg_rd
, tcg_env
, tcg_rn
);
10242 case 0x1a: /* FCVTNS */
10243 case 0x1b: /* FCVTMS */
10244 case 0x1c: /* FCVTAS */
10245 case 0x3a: /* FCVTPS */
10246 case 0x3b: /* FCVTZS */
10247 gen_helper_vfp_tosls(tcg_rd
, tcg_rn
, tcg_constant_i32(0),
10250 case 0x5a: /* FCVTNU */
10251 case 0x5b: /* FCVTMU */
10252 case 0x5c: /* FCVTAU */
10253 case 0x7a: /* FCVTPU */
10254 case 0x7b: /* FCVTZU */
10255 gen_helper_vfp_touls(tcg_rd
, tcg_rn
, tcg_constant_i32(0),
10259 g_assert_not_reached();
10262 write_fp_sreg(s
, rd
, tcg_rd
);
10266 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
10270 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
10271 static void handle_vec_simd_shri(DisasContext
*s
, bool is_q
, bool is_u
,
10272 int immh
, int immb
, int opcode
, int rn
, int rd
)
10274 int size
= 32 - clz32(immh
) - 1;
10275 int immhb
= immh
<< 3 | immb
;
10276 int shift
= 2 * (8 << size
) - immhb
;
10277 GVecGen2iFn
*gvec_fn
;
10279 if (extract32(immh
, 3, 1) && !is_q
) {
10280 unallocated_encoding(s
);
10283 tcg_debug_assert(size
<= 3);
10285 if (!fp_access_check(s
)) {
10290 case 0x02: /* SSRA / USRA (accumulate) */
10291 gvec_fn
= is_u
? gen_gvec_usra
: gen_gvec_ssra
;
10294 case 0x08: /* SRI */
10295 gvec_fn
= gen_gvec_sri
;
10298 case 0x00: /* SSHR / USHR */
10300 if (shift
== 8 << size
) {
10301 /* Shift count the same size as element size produces zero. */
10302 tcg_gen_gvec_dup_imm(size
, vec_full_reg_offset(s
, rd
),
10303 is_q
? 16 : 8, vec_full_reg_size(s
), 0);
10306 gvec_fn
= tcg_gen_gvec_shri
;
10308 /* Shift count the same size as element size produces all sign. */
10309 if (shift
== 8 << size
) {
10312 gvec_fn
= tcg_gen_gvec_sari
;
10316 case 0x04: /* SRSHR / URSHR (rounding) */
10317 gvec_fn
= is_u
? gen_gvec_urshr
: gen_gvec_srshr
;
10320 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10321 gvec_fn
= is_u
? gen_gvec_ursra
: gen_gvec_srsra
;
10325 g_assert_not_reached();
10328 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, gvec_fn
, size
);
10331 /* SHL/SLI - Vector shift left */
10332 static void handle_vec_simd_shli(DisasContext
*s
, bool is_q
, bool insert
,
10333 int immh
, int immb
, int opcode
, int rn
, int rd
)
10335 int size
= 32 - clz32(immh
) - 1;
10336 int immhb
= immh
<< 3 | immb
;
10337 int shift
= immhb
- (8 << size
);
10339 /* Range of size is limited by decode: immh is a non-zero 4 bit field */
10340 assert(size
>= 0 && size
<= 3);
10342 if (extract32(immh
, 3, 1) && !is_q
) {
10343 unallocated_encoding(s
);
10347 if (!fp_access_check(s
)) {
10352 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, gen_gvec_sli
, size
);
10354 gen_gvec_fn2i(s
, is_q
, rd
, rn
, shift
, tcg_gen_gvec_shli
, size
);
10358 /* USHLL/SHLL - Vector shift left with widening */
10359 static void handle_vec_simd_wshli(DisasContext
*s
, bool is_q
, bool is_u
,
10360 int immh
, int immb
, int opcode
, int rn
, int rd
)
10362 int size
= 32 - clz32(immh
) - 1;
10363 int immhb
= immh
<< 3 | immb
;
10364 int shift
= immhb
- (8 << size
);
10366 int esize
= 8 << size
;
10367 int elements
= dsize
/esize
;
10368 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
10369 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
10373 unallocated_encoding(s
);
10377 if (!fp_access_check(s
)) {
10381 /* For the LL variants the store is larger than the load,
10382 * so if rd == rn we would overwrite parts of our input.
10383 * So load everything right now and use shifts in the main loop.
10385 read_vec_element(s
, tcg_rn
, rn
, is_q
? 1 : 0, MO_64
);
10387 for (i
= 0; i
< elements
; i
++) {
10388 tcg_gen_shri_i64(tcg_rd
, tcg_rn
, i
* esize
);
10389 ext_and_shift_reg(tcg_rd
, tcg_rd
, size
| (!is_u
<< 2), 0);
10390 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, shift
);
10391 write_vec_element(s
, tcg_rd
, rd
, i
, size
+ 1);
10395 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
10396 static void handle_vec_simd_shrn(DisasContext
*s
, bool is_q
,
10397 int immh
, int immb
, int opcode
, int rn
, int rd
)
10399 int immhb
= immh
<< 3 | immb
;
10400 int size
= 32 - clz32(immh
) - 1;
10402 int esize
= 8 << size
;
10403 int elements
= dsize
/esize
;
10404 int shift
= (2 * esize
) - immhb
;
10405 bool round
= extract32(opcode
, 0, 1);
10406 TCGv_i64 tcg_rn
, tcg_rd
, tcg_final
;
10407 TCGv_i64 tcg_round
;
10410 if (extract32(immh
, 3, 1)) {
10411 unallocated_encoding(s
);
10415 if (!fp_access_check(s
)) {
10419 tcg_rn
= tcg_temp_new_i64();
10420 tcg_rd
= tcg_temp_new_i64();
10421 tcg_final
= tcg_temp_new_i64();
10422 read_vec_element(s
, tcg_final
, rd
, is_q
? 1 : 0, MO_64
);
10425 tcg_round
= tcg_constant_i64(1ULL << (shift
- 1));
10430 for (i
= 0; i
< elements
; i
++) {
10431 read_vec_element(s
, tcg_rn
, rn
, i
, size
+1);
10432 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
10433 false, true, size
+1, shift
);
10435 tcg_gen_deposit_i64(tcg_final
, tcg_final
, tcg_rd
, esize
* i
, esize
);
10439 write_vec_element(s
, tcg_final
, rd
, 0, MO_64
);
10441 write_vec_element(s
, tcg_final
, rd
, 1, MO_64
);
10444 clear_vec_high(s
, is_q
, rd
);
10448 /* AdvSIMD shift by immediate
10449 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
10450 * +---+---+---+-------------+------+------+--------+---+------+------+
10451 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
10452 * +---+---+---+-------------+------+------+--------+---+------+------+
10454 static void disas_simd_shift_imm(DisasContext
*s
, uint32_t insn
)
10456 int rd
= extract32(insn
, 0, 5);
10457 int rn
= extract32(insn
, 5, 5);
10458 int opcode
= extract32(insn
, 11, 5);
10459 int immb
= extract32(insn
, 16, 3);
10460 int immh
= extract32(insn
, 19, 4);
10461 bool is_u
= extract32(insn
, 29, 1);
10462 bool is_q
= extract32(insn
, 30, 1);
10464 /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
10468 case 0x08: /* SRI */
10470 unallocated_encoding(s
);
10474 case 0x00: /* SSHR / USHR */
10475 case 0x02: /* SSRA / USRA (accumulate) */
10476 case 0x04: /* SRSHR / URSHR (rounding) */
10477 case 0x06: /* SRSRA / URSRA (accum + rounding) */
10478 handle_vec_simd_shri(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10480 case 0x0a: /* SHL / SLI */
10481 handle_vec_simd_shli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10483 case 0x10: /* SHRN */
10484 case 0x11: /* RSHRN / SQRSHRUN */
10486 handle_vec_simd_sqshrn(s
, false, is_q
, false, true, immh
, immb
,
10489 handle_vec_simd_shrn(s
, is_q
, immh
, immb
, opcode
, rn
, rd
);
10492 case 0x12: /* SQSHRN / UQSHRN */
10493 case 0x13: /* SQRSHRN / UQRSHRN */
10494 handle_vec_simd_sqshrn(s
, false, is_q
, is_u
, is_u
, immh
, immb
,
10497 case 0x14: /* SSHLL / USHLL */
10498 handle_vec_simd_wshli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
10500 case 0x1c: /* SCVTF / UCVTF */
10501 handle_simd_shift_intfp_conv(s
, false, is_q
, is_u
, immh
, immb
,
10504 case 0xc: /* SQSHLU */
10506 unallocated_encoding(s
);
10509 handle_simd_qshl(s
, false, is_q
, false, true, immh
, immb
, rn
, rd
);
10511 case 0xe: /* SQSHL, UQSHL */
10512 handle_simd_qshl(s
, false, is_q
, is_u
, is_u
, immh
, immb
, rn
, rd
);
10514 case 0x1f: /* FCVTZS/ FCVTZU */
10515 handle_simd_shift_fpint_conv(s
, false, is_q
, is_u
, immh
, immb
, rn
, rd
);
10518 unallocated_encoding(s
);
10523 /* Generate code to do a "long" addition or subtraction, ie one done in
10524 * TCGv_i64 on vector lanes twice the width specified by size.
10526 static void gen_neon_addl(int size
, bool is_sub
, TCGv_i64 tcg_res
,
10527 TCGv_i64 tcg_op1
, TCGv_i64 tcg_op2
)
10529 static NeonGenTwo64OpFn
* const fns
[3][2] = {
10530 { gen_helper_neon_addl_u16
, gen_helper_neon_subl_u16
},
10531 { gen_helper_neon_addl_u32
, gen_helper_neon_subl_u32
},
10532 { tcg_gen_add_i64
, tcg_gen_sub_i64
},
10534 NeonGenTwo64OpFn
*genfn
;
10537 genfn
= fns
[size
][is_sub
];
10538 genfn(tcg_res
, tcg_op1
, tcg_op2
);
10541 static void handle_3rd_widening(DisasContext
*s
, int is_q
, int is_u
, int size
,
10542 int opcode
, int rd
, int rn
, int rm
)
10544 /* 3-reg-different widening insns: 64 x 64 -> 128 */
10545 TCGv_i64 tcg_res
[2];
10548 tcg_res
[0] = tcg_temp_new_i64();
10549 tcg_res
[1] = tcg_temp_new_i64();
10551 /* Does this op do an adding accumulate, a subtracting accumulate,
10552 * or no accumulate at all?
10570 read_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
10571 read_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
10574 /* size == 2 means two 32x32->64 operations; this is worth special
10575 * casing because we can generally handle it inline.
10578 for (pass
= 0; pass
< 2; pass
++) {
10579 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10580 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10581 TCGv_i64 tcg_passres
;
10582 MemOp memop
= MO_32
| (is_u
? 0 : MO_SIGN
);
10584 int elt
= pass
+ is_q
* 2;
10586 read_vec_element(s
, tcg_op1
, rn
, elt
, memop
);
10587 read_vec_element(s
, tcg_op2
, rm
, elt
, memop
);
10590 tcg_passres
= tcg_res
[pass
];
10592 tcg_passres
= tcg_temp_new_i64();
10596 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10597 tcg_gen_add_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10599 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10600 tcg_gen_sub_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10602 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10603 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10605 TCGv_i64 tcg_tmp1
= tcg_temp_new_i64();
10606 TCGv_i64 tcg_tmp2
= tcg_temp_new_i64();
10608 tcg_gen_sub_i64(tcg_tmp1
, tcg_op1
, tcg_op2
);
10609 tcg_gen_sub_i64(tcg_tmp2
, tcg_op2
, tcg_op1
);
10610 tcg_gen_movcond_i64(is_u
? TCG_COND_GEU
: TCG_COND_GE
,
10612 tcg_op1
, tcg_op2
, tcg_tmp1
, tcg_tmp2
);
10615 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10616 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10617 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10618 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10620 case 9: /* SQDMLAL, SQDMLAL2 */
10621 case 11: /* SQDMLSL, SQDMLSL2 */
10622 case 13: /* SQDMULL, SQDMULL2 */
10623 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
10624 gen_helper_neon_addl_saturate_s64(tcg_passres
, tcg_env
,
10625 tcg_passres
, tcg_passres
);
10628 g_assert_not_reached();
10631 if (opcode
== 9 || opcode
== 11) {
10632 /* saturating accumulate ops */
10634 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
10636 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], tcg_env
,
10637 tcg_res
[pass
], tcg_passres
);
10638 } else if (accop
> 0) {
10639 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
10640 } else if (accop
< 0) {
10641 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
10645 /* size 0 or 1, generally helper functions */
10646 for (pass
= 0; pass
< 2; pass
++) {
10647 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
10648 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10649 TCGv_i64 tcg_passres
;
10650 int elt
= pass
+ is_q
* 2;
10652 read_vec_element_i32(s
, tcg_op1
, rn
, elt
, MO_32
);
10653 read_vec_element_i32(s
, tcg_op2
, rm
, elt
, MO_32
);
10656 tcg_passres
= tcg_res
[pass
];
10658 tcg_passres
= tcg_temp_new_i64();
10662 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10663 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10665 TCGv_i64 tcg_op2_64
= tcg_temp_new_i64();
10666 static NeonGenWidenFn
* const widenfns
[2][2] = {
10667 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
10668 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
10670 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
10672 widenfn(tcg_op2_64
, tcg_op2
);
10673 widenfn(tcg_passres
, tcg_op1
);
10674 gen_neon_addl(size
, (opcode
== 2), tcg_passres
,
10675 tcg_passres
, tcg_op2_64
);
10678 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10679 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10682 gen_helper_neon_abdl_u16(tcg_passres
, tcg_op1
, tcg_op2
);
10684 gen_helper_neon_abdl_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10688 gen_helper_neon_abdl_u32(tcg_passres
, tcg_op1
, tcg_op2
);
10690 gen_helper_neon_abdl_s32(tcg_passres
, tcg_op1
, tcg_op2
);
10694 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10695 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10696 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10699 gen_helper_neon_mull_u8(tcg_passres
, tcg_op1
, tcg_op2
);
10701 gen_helper_neon_mull_s8(tcg_passres
, tcg_op1
, tcg_op2
);
10705 gen_helper_neon_mull_u16(tcg_passres
, tcg_op1
, tcg_op2
);
10707 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10711 case 9: /* SQDMLAL, SQDMLAL2 */
10712 case 11: /* SQDMLSL, SQDMLSL2 */
10713 case 13: /* SQDMULL, SQDMULL2 */
10715 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
10716 gen_helper_neon_addl_saturate_s32(tcg_passres
, tcg_env
,
10717 tcg_passres
, tcg_passres
);
10720 g_assert_not_reached();
10724 if (opcode
== 9 || opcode
== 11) {
10725 /* saturating accumulate ops */
10727 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
10729 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], tcg_env
,
10733 gen_neon_addl(size
, (accop
< 0), tcg_res
[pass
],
10734 tcg_res
[pass
], tcg_passres
);
10740 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
10741 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
10744 static void handle_3rd_wide(DisasContext
*s
, int is_q
, int is_u
, int size
,
10745 int opcode
, int rd
, int rn
, int rm
)
10747 TCGv_i64 tcg_res
[2];
10748 int part
= is_q
? 2 : 0;
10751 for (pass
= 0; pass
< 2; pass
++) {
10752 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10753 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
10754 TCGv_i64 tcg_op2_wide
= tcg_temp_new_i64();
10755 static NeonGenWidenFn
* const widenfns
[3][2] = {
10756 { gen_helper_neon_widen_s8
, gen_helper_neon_widen_u8
},
10757 { gen_helper_neon_widen_s16
, gen_helper_neon_widen_u16
},
10758 { tcg_gen_ext_i32_i64
, tcg_gen_extu_i32_i64
},
10760 NeonGenWidenFn
*widenfn
= widenfns
[size
][is_u
];
10762 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
10763 read_vec_element_i32(s
, tcg_op2
, rm
, part
+ pass
, MO_32
);
10764 widenfn(tcg_op2_wide
, tcg_op2
);
10765 tcg_res
[pass
] = tcg_temp_new_i64();
10766 gen_neon_addl(size
, (opcode
== 3),
10767 tcg_res
[pass
], tcg_op1
, tcg_op2_wide
);
10770 for (pass
= 0; pass
< 2; pass
++) {
10771 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
10775 static void do_narrow_round_high_u32(TCGv_i32 res
, TCGv_i64 in
)
10777 tcg_gen_addi_i64(in
, in
, 1U << 31);
10778 tcg_gen_extrh_i64_i32(res
, in
);
10781 static void handle_3rd_narrowing(DisasContext
*s
, int is_q
, int is_u
, int size
,
10782 int opcode
, int rd
, int rn
, int rm
)
10784 TCGv_i32 tcg_res
[2];
10785 int part
= is_q
? 2 : 0;
10788 for (pass
= 0; pass
< 2; pass
++) {
10789 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
10790 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
10791 TCGv_i64 tcg_wideres
= tcg_temp_new_i64();
10792 static NeonGenNarrowFn
* const narrowfns
[3][2] = {
10793 { gen_helper_neon_narrow_high_u8
,
10794 gen_helper_neon_narrow_round_high_u8
},
10795 { gen_helper_neon_narrow_high_u16
,
10796 gen_helper_neon_narrow_round_high_u16
},
10797 { tcg_gen_extrh_i64_i32
, do_narrow_round_high_u32
},
10799 NeonGenNarrowFn
*gennarrow
= narrowfns
[size
][is_u
];
10801 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
10802 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
10804 gen_neon_addl(size
, (opcode
== 6), tcg_wideres
, tcg_op1
, tcg_op2
);
10806 tcg_res
[pass
] = tcg_temp_new_i32();
10807 gennarrow(tcg_res
[pass
], tcg_wideres
);
10810 for (pass
= 0; pass
< 2; pass
++) {
10811 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
+ part
, MO_32
);
10813 clear_vec_high(s
, is_q
, rd
);
10816 /* AdvSIMD three different
10817 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
10818 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10819 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
10820 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10822 static void disas_simd_three_reg_diff(DisasContext
*s
, uint32_t insn
)
10824 /* Instructions in this group fall into three basic classes
10825 * (in each case with the operation working on each element in
10826 * the input vectors):
10827 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
10829 * (2) wide 64 x 128 -> 128
10830 * (3) narrowing 128 x 128 -> 64
10831 * Here we do initial decode, catch unallocated cases and
10832 * dispatch to separate functions for each class.
10834 int is_q
= extract32(insn
, 30, 1);
10835 int is_u
= extract32(insn
, 29, 1);
10836 int size
= extract32(insn
, 22, 2);
10837 int opcode
= extract32(insn
, 12, 4);
10838 int rm
= extract32(insn
, 16, 5);
10839 int rn
= extract32(insn
, 5, 5);
10840 int rd
= extract32(insn
, 0, 5);
10843 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
10844 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
10845 /* 64 x 128 -> 128 */
10847 unallocated_encoding(s
);
10850 if (!fp_access_check(s
)) {
10853 handle_3rd_wide(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10855 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
10856 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
10857 /* 128 x 128 -> 64 */
10859 unallocated_encoding(s
);
10862 if (!fp_access_check(s
)) {
10865 handle_3rd_narrowing(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10867 case 14: /* PMULL, PMULL2 */
10869 unallocated_encoding(s
);
10873 case 0: /* PMULL.P8 */
10874 if (!fp_access_check(s
)) {
10877 /* The Q field specifies lo/hi half input for this insn. */
10878 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, is_q
,
10879 gen_helper_neon_pmull_h
);
10882 case 3: /* PMULL.P64 */
10883 if (!dc_isar_feature(aa64_pmull
, s
)) {
10884 unallocated_encoding(s
);
10887 if (!fp_access_check(s
)) {
10890 /* The Q field specifies lo/hi half input for this insn. */
10891 gen_gvec_op3_ool(s
, true, rd
, rn
, rm
, is_q
,
10892 gen_helper_gvec_pmull_q
);
10896 unallocated_encoding(s
);
10900 case 9: /* SQDMLAL, SQDMLAL2 */
10901 case 11: /* SQDMLSL, SQDMLSL2 */
10902 case 13: /* SQDMULL, SQDMULL2 */
10903 if (is_u
|| size
== 0) {
10904 unallocated_encoding(s
);
10908 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10909 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10910 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10911 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10912 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10913 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10914 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
10915 /* 64 x 64 -> 128 */
10917 unallocated_encoding(s
);
10920 if (!fp_access_check(s
)) {
10924 handle_3rd_widening(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
10927 /* opcode 15 not allocated */
10928 unallocated_encoding(s
);
10933 /* AdvSIMD three same extra
10934 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
10935 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
10936 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
10937 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
10939 static void disas_simd_three_reg_same_extra(DisasContext
*s
, uint32_t insn
)
10941 int rd
= extract32(insn
, 0, 5);
10942 int rn
= extract32(insn
, 5, 5);
10943 int opcode
= extract32(insn
, 11, 4);
10944 int rm
= extract32(insn
, 16, 5);
10945 int size
= extract32(insn
, 22, 2);
10946 bool u
= extract32(insn
, 29, 1);
10947 bool is_q
= extract32(insn
, 30, 1);
10951 switch (u
* 16 + opcode
) {
10952 case 0x04: /* SMMLA */
10953 case 0x14: /* UMMLA */
10954 case 0x05: /* USMMLA */
10955 if (!is_q
|| size
!= MO_32
) {
10956 unallocated_encoding(s
);
10959 feature
= dc_isar_feature(aa64_i8mm
, s
);
10961 case 0x18: /* FCMLA, #0 */
10962 case 0x19: /* FCMLA, #90 */
10963 case 0x1a: /* FCMLA, #180 */
10964 case 0x1b: /* FCMLA, #270 */
10965 case 0x1c: /* FCADD, #90 */
10966 case 0x1e: /* FCADD, #270 */
10968 || (size
== 1 && !dc_isar_feature(aa64_fp16
, s
))
10969 || (size
== 3 && !is_q
)) {
10970 unallocated_encoding(s
);
10973 feature
= dc_isar_feature(aa64_fcma
, s
);
10975 case 0x1d: /* BFMMLA */
10976 if (size
!= MO_16
|| !is_q
) {
10977 unallocated_encoding(s
);
10980 feature
= dc_isar_feature(aa64_bf16
, s
);
10983 case 0x02: /* SDOT (vector) */
10984 case 0x03: /* USDOT */
10985 case 0x10: /* SQRDMLAH (vector) */
10986 case 0x11: /* SQRDMLSH (vector) */
10987 case 0x12: /* UDOT (vector) */
10988 case 0x1f: /* BFDOT / BFMLAL */
10989 unallocated_encoding(s
);
10993 unallocated_encoding(s
);
10996 if (!fp_access_check(s
)) {
11001 case 0x04: /* SMMLA, UMMLA */
11002 gen_gvec_op4_ool(s
, 1, rd
, rn
, rm
, rd
, 0,
11003 u
? gen_helper_gvec_ummla_b
11004 : gen_helper_gvec_smmla_b
);
11006 case 0x05: /* USMMLA */
11007 gen_gvec_op4_ool(s
, 1, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_usmmla_b
);
11010 case 0x8: /* FCMLA, #0 */
11011 case 0x9: /* FCMLA, #90 */
11012 case 0xa: /* FCMLA, #180 */
11013 case 0xb: /* FCMLA, #270 */
11014 rot
= extract32(opcode
, 0, 2);
11017 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, true, rot
,
11018 gen_helper_gvec_fcmlah
);
11021 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, false, rot
,
11022 gen_helper_gvec_fcmlas
);
11025 gen_gvec_op4_fpst(s
, is_q
, rd
, rn
, rm
, rd
, false, rot
,
11026 gen_helper_gvec_fcmlad
);
11029 g_assert_not_reached();
11033 case 0xc: /* FCADD, #90 */
11034 case 0xe: /* FCADD, #270 */
11035 rot
= extract32(opcode
, 1, 1);
11038 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11039 gen_helper_gvec_fcaddh
);
11042 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11043 gen_helper_gvec_fcadds
);
11046 gen_gvec_op3_fpst(s
, is_q
, rd
, rn
, rm
, size
== 1, rot
,
11047 gen_helper_gvec_fcaddd
);
11050 g_assert_not_reached();
11054 case 0xd: /* BFMMLA */
11055 gen_gvec_op4_ool(s
, is_q
, rd
, rn
, rm
, rd
, 0, gen_helper_gvec_bfmmla
);
11058 g_assert_not_reached();
11062 static void handle_2misc_widening(DisasContext
*s
, int opcode
, bool is_q
,
11063 int size
, int rn
, int rd
)
11065 /* Handle 2-reg-misc ops which are widening (so each size element
11066 * in the source becomes a 2*size element in the destination.
11067 * The only instruction like this is FCVTL.
11072 /* 32 -> 64 bit fp conversion */
11073 TCGv_i64 tcg_res
[2];
11074 int srcelt
= is_q
? 2 : 0;
11076 for (pass
= 0; pass
< 2; pass
++) {
11077 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11078 tcg_res
[pass
] = tcg_temp_new_i64();
11080 read_vec_element_i32(s
, tcg_op
, rn
, srcelt
+ pass
, MO_32
);
11081 gen_helper_vfp_fcvtds(tcg_res
[pass
], tcg_op
, tcg_env
);
11083 for (pass
= 0; pass
< 2; pass
++) {
11084 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11087 /* 16 -> 32 bit fp conversion */
11088 int srcelt
= is_q
? 4 : 0;
11089 TCGv_i32 tcg_res
[4];
11090 TCGv_ptr fpst
= fpstatus_ptr(FPST_FPCR
);
11091 TCGv_i32 ahp
= get_ahp_flag();
11093 for (pass
= 0; pass
< 4; pass
++) {
11094 tcg_res
[pass
] = tcg_temp_new_i32();
11096 read_vec_element_i32(s
, tcg_res
[pass
], rn
, srcelt
+ pass
, MO_16
);
11097 gen_helper_vfp_fcvt_f16_to_f32(tcg_res
[pass
], tcg_res
[pass
],
11100 for (pass
= 0; pass
< 4; pass
++) {
11101 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
11106 static void handle_rev(DisasContext
*s
, int opcode
, bool u
,
11107 bool is_q
, int size
, int rn
, int rd
)
11109 int op
= (opcode
<< 1) | u
;
11110 int opsz
= op
+ size
;
11111 int grp_size
= 3 - opsz
;
11112 int dsize
= is_q
? 128 : 64;
11116 unallocated_encoding(s
);
11120 if (!fp_access_check(s
)) {
11125 /* Special case bytes, use bswap op on each group of elements */
11126 int groups
= dsize
/ (8 << grp_size
);
11128 for (i
= 0; i
< groups
; i
++) {
11129 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
11131 read_vec_element(s
, tcg_tmp
, rn
, i
, grp_size
);
11132 switch (grp_size
) {
11134 tcg_gen_bswap16_i64(tcg_tmp
, tcg_tmp
, TCG_BSWAP_IZ
);
11137 tcg_gen_bswap32_i64(tcg_tmp
, tcg_tmp
, TCG_BSWAP_IZ
);
11140 tcg_gen_bswap64_i64(tcg_tmp
, tcg_tmp
);
11143 g_assert_not_reached();
11145 write_vec_element(s
, tcg_tmp
, rd
, i
, grp_size
);
11147 clear_vec_high(s
, is_q
, rd
);
11149 int revmask
= (1 << grp_size
) - 1;
11150 int esize
= 8 << size
;
11151 int elements
= dsize
/ esize
;
11152 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
11153 TCGv_i64 tcg_rd
[2];
11155 for (i
= 0; i
< 2; i
++) {
11156 tcg_rd
[i
] = tcg_temp_new_i64();
11157 tcg_gen_movi_i64(tcg_rd
[i
], 0);
11160 for (i
= 0; i
< elements
; i
++) {
11161 int e_rev
= (i
& 0xf) ^ revmask
;
11162 int w
= (e_rev
* esize
) / 64;
11163 int o
= (e_rev
* esize
) % 64;
11165 read_vec_element(s
, tcg_rn
, rn
, i
, size
);
11166 tcg_gen_deposit_i64(tcg_rd
[w
], tcg_rd
[w
], tcg_rn
, o
, esize
);
11169 for (i
= 0; i
< 2; i
++) {
11170 write_vec_element(s
, tcg_rd
[i
], rd
, i
, MO_64
);
11172 clear_vec_high(s
, true, rd
);
11176 static void handle_2misc_pairwise(DisasContext
*s
, int opcode
, bool u
,
11177 bool is_q
, int size
, int rn
, int rd
)
11179 /* Implement the pairwise operations from 2-misc:
11180 * SADDLP, UADDLP, SADALP, UADALP.
11181 * These all add pairs of elements in the input to produce a
11182 * double-width result element in the output (possibly accumulating).
11184 bool accum
= (opcode
== 0x6);
11185 int maxpass
= is_q
? 2 : 1;
11187 TCGv_i64 tcg_res
[2];
11190 /* 32 + 32 -> 64 op */
11191 MemOp memop
= size
+ (u
? 0 : MO_SIGN
);
11193 for (pass
= 0; pass
< maxpass
; pass
++) {
11194 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
11195 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
11197 tcg_res
[pass
] = tcg_temp_new_i64();
11199 read_vec_element(s
, tcg_op1
, rn
, pass
* 2, memop
);
11200 read_vec_element(s
, tcg_op2
, rn
, pass
* 2 + 1, memop
);
11201 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
11203 read_vec_element(s
, tcg_op1
, rd
, pass
, MO_64
);
11204 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
11208 for (pass
= 0; pass
< maxpass
; pass
++) {
11209 TCGv_i64 tcg_op
= tcg_temp_new_i64();
11210 NeonGenOne64OpFn
*genfn
;
11211 static NeonGenOne64OpFn
* const fns
[2][2] = {
11212 { gen_helper_neon_addlp_s8
, gen_helper_neon_addlp_u8
},
11213 { gen_helper_neon_addlp_s16
, gen_helper_neon_addlp_u16
},
11216 genfn
= fns
[size
][u
];
11218 tcg_res
[pass
] = tcg_temp_new_i64();
11220 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
11221 genfn(tcg_res
[pass
], tcg_op
);
11224 read_vec_element(s
, tcg_op
, rd
, pass
, MO_64
);
11226 gen_helper_neon_addl_u16(tcg_res
[pass
],
11227 tcg_res
[pass
], tcg_op
);
11229 gen_helper_neon_addl_u32(tcg_res
[pass
],
11230 tcg_res
[pass
], tcg_op
);
11236 tcg_res
[1] = tcg_constant_i64(0);
11238 for (pass
= 0; pass
< 2; pass
++) {
11239 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11243 static void handle_shll(DisasContext
*s
, bool is_q
, int size
, int rn
, int rd
)
11245 /* Implement SHLL and SHLL2 */
11247 int part
= is_q
? 2 : 0;
11248 TCGv_i64 tcg_res
[2];
11250 for (pass
= 0; pass
< 2; pass
++) {
11251 static NeonGenWidenFn
* const widenfns
[3] = {
11252 gen_helper_neon_widen_u8
,
11253 gen_helper_neon_widen_u16
,
11254 tcg_gen_extu_i32_i64
,
11256 NeonGenWidenFn
*widenfn
= widenfns
[size
];
11257 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11259 read_vec_element_i32(s
, tcg_op
, rn
, part
+ pass
, MO_32
);
11260 tcg_res
[pass
] = tcg_temp_new_i64();
11261 widenfn(tcg_res
[pass
], tcg_op
);
11262 tcg_gen_shli_i64(tcg_res
[pass
], tcg_res
[pass
], 8 << size
);
11265 for (pass
= 0; pass
< 2; pass
++) {
11266 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
11270 /* AdvSIMD two reg misc
11271 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
11272 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11273 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
11274 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11276 static void disas_simd_two_reg_misc(DisasContext
*s
, uint32_t insn
)
11278 int size
= extract32(insn
, 22, 2);
11279 int opcode
= extract32(insn
, 12, 5);
11280 bool u
= extract32(insn
, 29, 1);
11281 bool is_q
= extract32(insn
, 30, 1);
11282 int rn
= extract32(insn
, 5, 5);
11283 int rd
= extract32(insn
, 0, 5);
11284 bool need_fpstatus
= false;
11286 TCGv_i32 tcg_rmode
;
11287 TCGv_ptr tcg_fpstatus
;
11290 case 0x0: /* REV64, REV32 */
11291 case 0x1: /* REV16 */
11292 handle_rev(s
, opcode
, u
, is_q
, size
, rn
, rd
);
11294 case 0x5: /* CNT, NOT, RBIT */
11295 if (u
&& size
== 0) {
11298 } else if (u
&& size
== 1) {
11301 } else if (!u
&& size
== 0) {
11305 unallocated_encoding(s
);
11307 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
11308 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
11310 unallocated_encoding(s
);
11313 if (!fp_access_check(s
)) {
11317 handle_2misc_narrow(s
, false, opcode
, u
, is_q
, size
, rn
, rd
);
11319 case 0x4: /* CLS, CLZ */
11321 unallocated_encoding(s
);
11325 case 0x2: /* SADDLP, UADDLP */
11326 case 0x6: /* SADALP, UADALP */
11328 unallocated_encoding(s
);
11331 if (!fp_access_check(s
)) {
11334 handle_2misc_pairwise(s
, opcode
, u
, is_q
, size
, rn
, rd
);
11336 case 0x13: /* SHLL, SHLL2 */
11337 if (u
== 0 || size
== 3) {
11338 unallocated_encoding(s
);
11341 if (!fp_access_check(s
)) {
11344 handle_shll(s
, is_q
, size
, rn
, rd
);
11346 case 0xa: /* CMLT */
11348 unallocated_encoding(s
);
11352 case 0x8: /* CMGT, CMGE */
11353 case 0x9: /* CMEQ, CMLE */
11354 case 0xb: /* ABS, NEG */
11355 if (size
== 3 && !is_q
) {
11356 unallocated_encoding(s
);
11360 case 0x7: /* SQABS, SQNEG */
11361 if (size
== 3 && !is_q
) {
11362 unallocated_encoding(s
);
11367 case 0x16 ... 0x1f:
11369 /* Floating point: U, size[1] and opcode indicate operation;
11370 * size[0] indicates single or double precision.
11372 int is_double
= extract32(size
, 0, 1);
11373 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
11374 size
= is_double
? 3 : 2;
11376 case 0x2f: /* FABS */
11377 case 0x6f: /* FNEG */
11378 if (size
== 3 && !is_q
) {
11379 unallocated_encoding(s
);
11383 case 0x1d: /* SCVTF */
11384 case 0x5d: /* UCVTF */
11386 bool is_signed
= (opcode
== 0x1d) ? true : false;
11387 int elements
= is_double
? 2 : is_q
? 4 : 2;
11388 if (is_double
&& !is_q
) {
11389 unallocated_encoding(s
);
11392 if (!fp_access_check(s
)) {
11395 handle_simd_intfp_conv(s
, rd
, rn
, elements
, is_signed
, 0, size
);
11398 case 0x2c: /* FCMGT (zero) */
11399 case 0x2d: /* FCMEQ (zero) */
11400 case 0x2e: /* FCMLT (zero) */
11401 case 0x6c: /* FCMGE (zero) */
11402 case 0x6d: /* FCMLE (zero) */
11403 if (size
== 3 && !is_q
) {
11404 unallocated_encoding(s
);
11407 handle_2misc_fcmp_zero(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
11409 case 0x7f: /* FSQRT */
11410 if (size
== 3 && !is_q
) {
11411 unallocated_encoding(s
);
11415 case 0x1a: /* FCVTNS */
11416 case 0x1b: /* FCVTMS */
11417 case 0x3a: /* FCVTPS */
11418 case 0x3b: /* FCVTZS */
11419 case 0x5a: /* FCVTNU */
11420 case 0x5b: /* FCVTMU */
11421 case 0x7a: /* FCVTPU */
11422 case 0x7b: /* FCVTZU */
11423 need_fpstatus
= true;
11424 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
11425 if (size
== 3 && !is_q
) {
11426 unallocated_encoding(s
);
11430 case 0x5c: /* FCVTAU */
11431 case 0x1c: /* FCVTAS */
11432 need_fpstatus
= true;
11433 rmode
= FPROUNDING_TIEAWAY
;
11434 if (size
== 3 && !is_q
) {
11435 unallocated_encoding(s
);
11439 case 0x3c: /* URECPE */
11441 unallocated_encoding(s
);
11445 case 0x3d: /* FRECPE */
11446 case 0x7d: /* FRSQRTE */
11447 if (size
== 3 && !is_q
) {
11448 unallocated_encoding(s
);
11451 if (!fp_access_check(s
)) {
11454 handle_2misc_reciprocal(s
, opcode
, false, u
, is_q
, size
, rn
, rd
);
11456 case 0x56: /* FCVTXN, FCVTXN2 */
11458 unallocated_encoding(s
);
11462 case 0x16: /* FCVTN, FCVTN2 */
11463 /* handle_2misc_narrow does a 2*size -> size operation, but these
11464 * instructions encode the source size rather than dest size.
11466 if (!fp_access_check(s
)) {
11469 handle_2misc_narrow(s
, false, opcode
, 0, is_q
, size
- 1, rn
, rd
);
11471 case 0x36: /* BFCVTN, BFCVTN2 */
11472 if (!dc_isar_feature(aa64_bf16
, s
) || size
!= 2) {
11473 unallocated_encoding(s
);
11476 if (!fp_access_check(s
)) {
11479 handle_2misc_narrow(s
, false, opcode
, 0, is_q
, size
- 1, rn
, rd
);
11481 case 0x17: /* FCVTL, FCVTL2 */
11482 if (!fp_access_check(s
)) {
11485 handle_2misc_widening(s
, opcode
, is_q
, size
, rn
, rd
);
11487 case 0x18: /* FRINTN */
11488 case 0x19: /* FRINTM */
11489 case 0x38: /* FRINTP */
11490 case 0x39: /* FRINTZ */
11491 rmode
= extract32(opcode
, 5, 1) | (extract32(opcode
, 0, 1) << 1);
11493 case 0x59: /* FRINTX */
11494 case 0x79: /* FRINTI */
11495 need_fpstatus
= true;
11496 if (size
== 3 && !is_q
) {
11497 unallocated_encoding(s
);
11501 case 0x58: /* FRINTA */
11502 rmode
= FPROUNDING_TIEAWAY
;
11503 need_fpstatus
= true;
11504 if (size
== 3 && !is_q
) {
11505 unallocated_encoding(s
);
11509 case 0x7c: /* URSQRTE */
11511 unallocated_encoding(s
);
11515 case 0x1e: /* FRINT32Z */
11516 case 0x1f: /* FRINT64Z */
11517 rmode
= FPROUNDING_ZERO
;
11519 case 0x5e: /* FRINT32X */
11520 case 0x5f: /* FRINT64X */
11521 need_fpstatus
= true;
11522 if ((size
== 3 && !is_q
) || !dc_isar_feature(aa64_frint
, s
)) {
11523 unallocated_encoding(s
);
11528 unallocated_encoding(s
);
11534 case 0x3: /* SUQADD, USQADD */
11535 unallocated_encoding(s
);
11539 if (!fp_access_check(s
)) {
11543 if (need_fpstatus
|| rmode
>= 0) {
11544 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR
);
11546 tcg_fpstatus
= NULL
;
11549 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
11556 if (u
&& size
== 0) { /* NOT */
11557 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_not
, 0);
11561 case 0x8: /* CMGT, CMGE */
11563 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cge0
, size
);
11565 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cgt0
, size
);
11568 case 0x9: /* CMEQ, CMLE */
11570 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_cle0
, size
);
11572 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_ceq0
, size
);
11575 case 0xa: /* CMLT */
11576 gen_gvec_fn2(s
, is_q
, rd
, rn
, gen_gvec_clt0
, size
);
11579 if (u
) { /* ABS, NEG */
11580 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_neg
, size
);
11582 gen_gvec_fn2(s
, is_q
, rd
, rn
, tcg_gen_gvec_abs
, size
);
11588 /* All 64-bit element operations can be shared with scalar 2misc */
11591 /* Coverity claims (size == 3 && !is_q) has been eliminated
11592 * from all paths leading to here.
11594 tcg_debug_assert(is_q
);
11595 for (pass
= 0; pass
< 2; pass
++) {
11596 TCGv_i64 tcg_op
= tcg_temp_new_i64();
11597 TCGv_i64 tcg_res
= tcg_temp_new_i64();
11599 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
11601 handle_2misc_64(s
, opcode
, u
, tcg_res
, tcg_op
,
11602 tcg_rmode
, tcg_fpstatus
);
11604 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
11609 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
11610 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11611 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11613 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
11616 /* Special cases for 32 bit elements */
11618 case 0x4: /* CLS */
11620 tcg_gen_clzi_i32(tcg_res
, tcg_op
, 32);
11622 tcg_gen_clrsb_i32(tcg_res
, tcg_op
);
11625 case 0x7: /* SQABS, SQNEG */
11627 gen_helper_neon_qneg_s32(tcg_res
, tcg_env
, tcg_op
);
11629 gen_helper_neon_qabs_s32(tcg_res
, tcg_env
, tcg_op
);
11632 case 0x2f: /* FABS */
11633 gen_vfp_abss(tcg_res
, tcg_op
);
11635 case 0x6f: /* FNEG */
11636 gen_vfp_negs(tcg_res
, tcg_op
);
11638 case 0x7f: /* FSQRT */
11639 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, tcg_env
);
11641 case 0x1a: /* FCVTNS */
11642 case 0x1b: /* FCVTMS */
11643 case 0x1c: /* FCVTAS */
11644 case 0x3a: /* FCVTPS */
11645 case 0x3b: /* FCVTZS */
11646 gen_helper_vfp_tosls(tcg_res
, tcg_op
,
11647 tcg_constant_i32(0), tcg_fpstatus
);
11649 case 0x5a: /* FCVTNU */
11650 case 0x5b: /* FCVTMU */
11651 case 0x5c: /* FCVTAU */
11652 case 0x7a: /* FCVTPU */
11653 case 0x7b: /* FCVTZU */
11654 gen_helper_vfp_touls(tcg_res
, tcg_op
,
11655 tcg_constant_i32(0), tcg_fpstatus
);
11657 case 0x18: /* FRINTN */
11658 case 0x19: /* FRINTM */
11659 case 0x38: /* FRINTP */
11660 case 0x39: /* FRINTZ */
11661 case 0x58: /* FRINTA */
11662 case 0x79: /* FRINTI */
11663 gen_helper_rints(tcg_res
, tcg_op
, tcg_fpstatus
);
11665 case 0x59: /* FRINTX */
11666 gen_helper_rints_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
11668 case 0x7c: /* URSQRTE */
11669 gen_helper_rsqrte_u32(tcg_res
, tcg_op
);
11671 case 0x1e: /* FRINT32Z */
11672 case 0x5e: /* FRINT32X */
11673 gen_helper_frint32_s(tcg_res
, tcg_op
, tcg_fpstatus
);
11675 case 0x1f: /* FRINT64Z */
11676 case 0x5f: /* FRINT64X */
11677 gen_helper_frint64_s(tcg_res
, tcg_op
, tcg_fpstatus
);
11680 g_assert_not_reached();
11683 /* Use helpers for 8 and 16 bit elements */
11685 case 0x5: /* CNT, RBIT */
11686 /* For these two insns size is part of the opcode specifier
11687 * (handled earlier); they always operate on byte elements.
11690 gen_helper_neon_rbit_u8(tcg_res
, tcg_op
);
11692 gen_helper_neon_cnt_u8(tcg_res
, tcg_op
);
11695 case 0x7: /* SQABS, SQNEG */
11697 NeonGenOneOpEnvFn
*genfn
;
11698 static NeonGenOneOpEnvFn
* const fns
[2][2] = {
11699 { gen_helper_neon_qabs_s8
, gen_helper_neon_qneg_s8
},
11700 { gen_helper_neon_qabs_s16
, gen_helper_neon_qneg_s16
},
11702 genfn
= fns
[size
][u
];
11703 genfn(tcg_res
, tcg_env
, tcg_op
);
11706 case 0x4: /* CLS, CLZ */
11709 gen_helper_neon_clz_u8(tcg_res
, tcg_op
);
11711 gen_helper_neon_clz_u16(tcg_res
, tcg_op
);
11715 gen_helper_neon_cls_s8(tcg_res
, tcg_op
);
11717 gen_helper_neon_cls_s16(tcg_res
, tcg_op
);
11722 g_assert_not_reached();
11726 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
11729 clear_vec_high(s
, is_q
, rd
);
11732 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
11736 /* AdvSIMD [scalar] two register miscellaneous (FP16)
11738 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
11739 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
11740 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
11741 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
11742 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
11743 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
11745 * This actually covers two groups where scalar access is governed by
11746 * bit 28. A bunch of the instructions (float to integral) only exist
11747 * in the vector form and are un-allocated for the scalar decode. Also
11748 * in the scalar decode Q is always 1.
11750 static void disas_simd_two_reg_misc_fp16(DisasContext
*s
, uint32_t insn
)
11752 int fpop
, opcode
, a
, u
;
11756 bool only_in_vector
= false;
11759 TCGv_i32 tcg_rmode
= NULL
;
11760 TCGv_ptr tcg_fpstatus
= NULL
;
11761 bool need_fpst
= true;
11764 if (!dc_isar_feature(aa64_fp16
, s
)) {
11765 unallocated_encoding(s
);
11769 rd
= extract32(insn
, 0, 5);
11770 rn
= extract32(insn
, 5, 5);
11772 a
= extract32(insn
, 23, 1);
11773 u
= extract32(insn
, 29, 1);
11774 is_scalar
= extract32(insn
, 28, 1);
11775 is_q
= extract32(insn
, 30, 1);
11777 opcode
= extract32(insn
, 12, 5);
11778 fpop
= deposit32(opcode
, 5, 1, a
);
11779 fpop
= deposit32(fpop
, 6, 1, u
);
11782 case 0x1d: /* SCVTF */
11783 case 0x5d: /* UCVTF */
11790 elements
= (is_q
? 8 : 4);
11793 if (!fp_access_check(s
)) {
11796 handle_simd_intfp_conv(s
, rd
, rn
, elements
, !u
, 0, MO_16
);
11800 case 0x2c: /* FCMGT (zero) */
11801 case 0x2d: /* FCMEQ (zero) */
11802 case 0x2e: /* FCMLT (zero) */
11803 case 0x6c: /* FCMGE (zero) */
11804 case 0x6d: /* FCMLE (zero) */
11805 handle_2misc_fcmp_zero(s
, fpop
, is_scalar
, 0, is_q
, MO_16
, rn
, rd
);
11807 case 0x3d: /* FRECPE */
11808 case 0x3f: /* FRECPX */
11810 case 0x18: /* FRINTN */
11811 only_in_vector
= true;
11812 rmode
= FPROUNDING_TIEEVEN
;
11814 case 0x19: /* FRINTM */
11815 only_in_vector
= true;
11816 rmode
= FPROUNDING_NEGINF
;
11818 case 0x38: /* FRINTP */
11819 only_in_vector
= true;
11820 rmode
= FPROUNDING_POSINF
;
11822 case 0x39: /* FRINTZ */
11823 only_in_vector
= true;
11824 rmode
= FPROUNDING_ZERO
;
11826 case 0x58: /* FRINTA */
11827 only_in_vector
= true;
11828 rmode
= FPROUNDING_TIEAWAY
;
11830 case 0x59: /* FRINTX */
11831 case 0x79: /* FRINTI */
11832 only_in_vector
= true;
11833 /* current rounding mode */
11835 case 0x1a: /* FCVTNS */
11836 rmode
= FPROUNDING_TIEEVEN
;
11838 case 0x1b: /* FCVTMS */
11839 rmode
= FPROUNDING_NEGINF
;
11841 case 0x1c: /* FCVTAS */
11842 rmode
= FPROUNDING_TIEAWAY
;
11844 case 0x3a: /* FCVTPS */
11845 rmode
= FPROUNDING_POSINF
;
11847 case 0x3b: /* FCVTZS */
11848 rmode
= FPROUNDING_ZERO
;
11850 case 0x5a: /* FCVTNU */
11851 rmode
= FPROUNDING_TIEEVEN
;
11853 case 0x5b: /* FCVTMU */
11854 rmode
= FPROUNDING_NEGINF
;
11856 case 0x5c: /* FCVTAU */
11857 rmode
= FPROUNDING_TIEAWAY
;
11859 case 0x7a: /* FCVTPU */
11860 rmode
= FPROUNDING_POSINF
;
11862 case 0x7b: /* FCVTZU */
11863 rmode
= FPROUNDING_ZERO
;
11865 case 0x2f: /* FABS */
11866 case 0x6f: /* FNEG */
11869 case 0x7d: /* FRSQRTE */
11870 case 0x7f: /* FSQRT (vector) */
11873 unallocated_encoding(s
);
11878 /* Check additional constraints for the scalar encoding */
11881 unallocated_encoding(s
);
11884 /* FRINTxx is only in the vector form */
11885 if (only_in_vector
) {
11886 unallocated_encoding(s
);
11891 if (!fp_access_check(s
)) {
11895 if (rmode
>= 0 || need_fpst
) {
11896 tcg_fpstatus
= fpstatus_ptr(FPST_FPCR_F16
);
11900 tcg_rmode
= gen_set_rmode(rmode
, tcg_fpstatus
);
11904 TCGv_i32 tcg_op
= read_fp_hreg(s
, rn
);
11905 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11908 case 0x1a: /* FCVTNS */
11909 case 0x1b: /* FCVTMS */
11910 case 0x1c: /* FCVTAS */
11911 case 0x3a: /* FCVTPS */
11912 case 0x3b: /* FCVTZS */
11913 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
11915 case 0x3d: /* FRECPE */
11916 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
11918 case 0x3f: /* FRECPX */
11919 gen_helper_frecpx_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
11921 case 0x5a: /* FCVTNU */
11922 case 0x5b: /* FCVTMU */
11923 case 0x5c: /* FCVTAU */
11924 case 0x7a: /* FCVTPU */
11925 case 0x7b: /* FCVTZU */
11926 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
11928 case 0x6f: /* FNEG */
11929 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
11931 case 0x7d: /* FRSQRTE */
11932 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
11935 g_assert_not_reached();
11938 /* limit any sign extension going on */
11939 tcg_gen_andi_i32(tcg_res
, tcg_res
, 0xffff);
11940 write_fp_sreg(s
, rd
, tcg_res
);
11942 for (pass
= 0; pass
< (is_q
? 8 : 4); pass
++) {
11943 TCGv_i32 tcg_op
= tcg_temp_new_i32();
11944 TCGv_i32 tcg_res
= tcg_temp_new_i32();
11946 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_16
);
11949 case 0x1a: /* FCVTNS */
11950 case 0x1b: /* FCVTMS */
11951 case 0x1c: /* FCVTAS */
11952 case 0x3a: /* FCVTPS */
11953 case 0x3b: /* FCVTZS */
11954 gen_helper_advsimd_f16tosinth(tcg_res
, tcg_op
, tcg_fpstatus
);
11956 case 0x3d: /* FRECPE */
11957 gen_helper_recpe_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
11959 case 0x5a: /* FCVTNU */
11960 case 0x5b: /* FCVTMU */
11961 case 0x5c: /* FCVTAU */
11962 case 0x7a: /* FCVTPU */
11963 case 0x7b: /* FCVTZU */
11964 gen_helper_advsimd_f16touinth(tcg_res
, tcg_op
, tcg_fpstatus
);
11966 case 0x18: /* FRINTN */
11967 case 0x19: /* FRINTM */
11968 case 0x38: /* FRINTP */
11969 case 0x39: /* FRINTZ */
11970 case 0x58: /* FRINTA */
11971 case 0x79: /* FRINTI */
11972 gen_helper_advsimd_rinth(tcg_res
, tcg_op
, tcg_fpstatus
);
11974 case 0x59: /* FRINTX */
11975 gen_helper_advsimd_rinth_exact(tcg_res
, tcg_op
, tcg_fpstatus
);
11977 case 0x2f: /* FABS */
11978 tcg_gen_andi_i32(tcg_res
, tcg_op
, 0x7fff);
11980 case 0x6f: /* FNEG */
11981 tcg_gen_xori_i32(tcg_res
, tcg_op
, 0x8000);
11983 case 0x7d: /* FRSQRTE */
11984 gen_helper_rsqrte_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
11986 case 0x7f: /* FSQRT */
11987 gen_helper_sqrt_f16(tcg_res
, tcg_op
, tcg_fpstatus
);
11990 g_assert_not_reached();
11993 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_16
);
11996 clear_vec_high(s
, is_q
, rd
);
12000 gen_restore_rmode(tcg_rmode
, tcg_fpstatus
);
12004 /* AdvSIMD scalar x indexed element
12005 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12006 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12007 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12008 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12009 * AdvSIMD vector x indexed element
12010 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12011 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12012 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12013 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12015 static void disas_simd_indexed(DisasContext
*s
, uint32_t insn
)
12017 /* This encoding has two kinds of instruction:
12018 * normal, where we perform elt x idxelt => elt for each
12019 * element in the vector
12020 * long, where we perform elt x idxelt and generate a result of
12021 * double the width of the input element
12022 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12024 bool is_scalar
= extract32(insn
, 28, 1);
12025 bool is_q
= extract32(insn
, 30, 1);
12026 bool u
= extract32(insn
, 29, 1);
12027 int size
= extract32(insn
, 22, 2);
12028 int l
= extract32(insn
, 21, 1);
12029 int m
= extract32(insn
, 20, 1);
12030 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12031 int rm
= extract32(insn
, 16, 4);
12032 int opcode
= extract32(insn
, 12, 4);
12033 int h
= extract32(insn
, 11, 1);
12034 int rn
= extract32(insn
, 5, 5);
12035 int rd
= extract32(insn
, 0, 5);
12036 bool is_long
= false;
12038 bool is_fp16
= false;
12042 switch (16 * u
+ opcode
) {
12043 case 0x02: /* SMLAL, SMLAL2 */
12044 case 0x12: /* UMLAL, UMLAL2 */
12045 case 0x06: /* SMLSL, SMLSL2 */
12046 case 0x16: /* UMLSL, UMLSL2 */
12047 case 0x0a: /* SMULL, SMULL2 */
12048 case 0x1a: /* UMULL, UMULL2 */
12050 unallocated_encoding(s
);
12055 case 0x03: /* SQDMLAL, SQDMLAL2 */
12056 case 0x07: /* SQDMLSL, SQDMLSL2 */
12057 case 0x0b: /* SQDMULL, SQDMULL2 */
12060 case 0x11: /* FCMLA #0 */
12061 case 0x13: /* FCMLA #90 */
12062 case 0x15: /* FCMLA #180 */
12063 case 0x17: /* FCMLA #270 */
12064 if (is_scalar
|| !dc_isar_feature(aa64_fcma
, s
)) {
12065 unallocated_encoding(s
);
12071 case 0x00: /* FMLAL */
12072 case 0x01: /* FMLA */
12073 case 0x04: /* FMLSL */
12074 case 0x05: /* FMLS */
12075 case 0x08: /* MUL */
12076 case 0x09: /* FMUL */
12077 case 0x0c: /* SQDMULH */
12078 case 0x0d: /* SQRDMULH */
12079 case 0x0e: /* SDOT */
12080 case 0x0f: /* SUDOT / BFDOT / USDOT / BFMLAL */
12081 case 0x10: /* MLA */
12082 case 0x14: /* MLS */
12083 case 0x18: /* FMLAL2 */
12084 case 0x19: /* FMULX */
12085 case 0x1c: /* FMLSL2 */
12086 case 0x1d: /* SQRDMLAH */
12087 case 0x1e: /* UDOT */
12088 case 0x1f: /* SQRDMLSH */
12089 unallocated_encoding(s
);
12094 case 1: /* normal fp */
12095 unallocated_encoding(s
); /* in decodetree */
12098 case 2: /* complex fp */
12099 /* Each indexable element is a complex pair. */
12104 unallocated_encoding(s
);
12112 unallocated_encoding(s
);
12117 default: /* integer */
12121 unallocated_encoding(s
);
12126 if (is_fp16
&& !dc_isar_feature(aa64_fp16
, s
)) {
12127 unallocated_encoding(s
);
12131 /* Given MemOp size, adjust register and indexing. */
12134 index
= h
<< 2 | l
<< 1 | m
;
12137 index
= h
<< 1 | l
;
12142 unallocated_encoding(s
);
12149 g_assert_not_reached();
12152 if (!fp_access_check(s
)) {
12157 fpst
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
12162 switch (16 * u
+ opcode
) {
12163 case 0x11: /* FCMLA #0 */
12164 case 0x13: /* FCMLA #90 */
12165 case 0x15: /* FCMLA #180 */
12166 case 0x17: /* FCMLA #270 */
12168 int rot
= extract32(insn
, 13, 2);
12169 int data
= (index
<< 2) | rot
;
12170 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, rd
),
12171 vec_full_reg_offset(s
, rn
),
12172 vec_full_reg_offset(s
, rm
),
12173 vec_full_reg_offset(s
, rd
), fpst
,
12174 is_q
? 16 : 8, vec_full_reg_size(s
), data
,
12176 ? gen_helper_gvec_fcmlas_idx
12177 : gen_helper_gvec_fcmlah_idx
);
12183 g_assert_not_reached();
12184 } else if (!is_long
) {
12185 /* 32 bit floating point, or 16 or 32 bit integer.
12186 * For the 16 bit scalar case we use the usual Neon helpers and
12187 * rely on the fact that 0 op 0 == 0 with no side effects.
12189 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
12190 int pass
, maxpasses
;
12195 maxpasses
= is_q
? 4 : 2;
12198 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
12200 if (size
== 1 && !is_scalar
) {
12201 /* The simplest way to handle the 16x16 indexed ops is to duplicate
12202 * the index into both halves of the 32 bit tcg_idx and then use
12203 * the usual Neon helpers.
12205 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
12208 for (pass
= 0; pass
< maxpasses
; pass
++) {
12209 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12210 TCGv_i32 tcg_res
= tcg_temp_new_i32();
12212 read_vec_element_i32(s
, tcg_op
, rn
, pass
, is_scalar
? size
: MO_32
);
12214 switch (16 * u
+ opcode
) {
12215 case 0x10: /* MLA */
12216 case 0x14: /* MLS */
12218 static NeonGenTwoOpFn
* const fns
[2][2] = {
12219 { gen_helper_neon_add_u16
, gen_helper_neon_sub_u16
},
12220 { tcg_gen_add_i32
, tcg_gen_sub_i32
},
12222 NeonGenTwoOpFn
*genfn
;
12223 bool is_sub
= opcode
== 0x4;
12226 gen_helper_neon_mul_u16(tcg_res
, tcg_op
, tcg_idx
);
12228 tcg_gen_mul_i32(tcg_res
, tcg_op
, tcg_idx
);
12230 if (opcode
== 0x8) {
12233 read_vec_element_i32(s
, tcg_op
, rd
, pass
, MO_32
);
12234 genfn
= fns
[size
- 1][is_sub
];
12235 genfn(tcg_res
, tcg_op
, tcg_res
);
12238 case 0x0c: /* SQDMULH */
12240 gen_helper_neon_qdmulh_s16(tcg_res
, tcg_env
,
12243 gen_helper_neon_qdmulh_s32(tcg_res
, tcg_env
,
12247 case 0x0d: /* SQRDMULH */
12249 gen_helper_neon_qrdmulh_s16(tcg_res
, tcg_env
,
12252 gen_helper_neon_qrdmulh_s32(tcg_res
, tcg_env
,
12257 case 0x01: /* FMLA */
12258 case 0x05: /* FMLS */
12259 case 0x09: /* FMUL */
12260 case 0x19: /* FMULX */
12261 case 0x1d: /* SQRDMLAH */
12262 case 0x1f: /* SQRDMLSH */
12263 g_assert_not_reached();
12267 write_fp_sreg(s
, rd
, tcg_res
);
12269 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
12273 clear_vec_high(s
, is_q
, rd
);
12275 /* long ops: 16x16->32 or 32x32->64 */
12276 TCGv_i64 tcg_res
[2];
12278 bool satop
= extract32(opcode
, 0, 1);
12279 MemOp memop
= MO_32
;
12286 TCGv_i64 tcg_idx
= tcg_temp_new_i64();
12288 read_vec_element(s
, tcg_idx
, rm
, index
, memop
);
12290 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
12291 TCGv_i64 tcg_op
= tcg_temp_new_i64();
12292 TCGv_i64 tcg_passres
;
12298 passelt
= pass
+ (is_q
* 2);
12301 read_vec_element(s
, tcg_op
, rn
, passelt
, memop
);
12303 tcg_res
[pass
] = tcg_temp_new_i64();
12305 if (opcode
== 0xa || opcode
== 0xb) {
12306 /* Non-accumulating ops */
12307 tcg_passres
= tcg_res
[pass
];
12309 tcg_passres
= tcg_temp_new_i64();
12312 tcg_gen_mul_i64(tcg_passres
, tcg_op
, tcg_idx
);
12315 /* saturating, doubling */
12316 gen_helper_neon_addl_saturate_s64(tcg_passres
, tcg_env
,
12317 tcg_passres
, tcg_passres
);
12320 if (opcode
== 0xa || opcode
== 0xb) {
12324 /* Accumulating op: handle accumulate step */
12325 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
12328 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
12329 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
12331 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
12332 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
12334 case 0x7: /* SQDMLSL, SQDMLSL2 */
12335 tcg_gen_neg_i64(tcg_passres
, tcg_passres
);
12337 case 0x3: /* SQDMLAL, SQDMLAL2 */
12338 gen_helper_neon_addl_saturate_s64(tcg_res
[pass
], tcg_env
,
12343 g_assert_not_reached();
12347 clear_vec_high(s
, !is_scalar
, rd
);
12349 TCGv_i32 tcg_idx
= tcg_temp_new_i32();
12352 read_vec_element_i32(s
, tcg_idx
, rm
, index
, size
);
12355 /* The simplest way to handle the 16x16 indexed ops is to
12356 * duplicate the index into both halves of the 32 bit tcg_idx
12357 * and then use the usual Neon helpers.
12359 tcg_gen_deposit_i32(tcg_idx
, tcg_idx
, tcg_idx
, 16, 16);
12362 for (pass
= 0; pass
< (is_scalar
? 1 : 2); pass
++) {
12363 TCGv_i32 tcg_op
= tcg_temp_new_i32();
12364 TCGv_i64 tcg_passres
;
12367 read_vec_element_i32(s
, tcg_op
, rn
, pass
, size
);
12369 read_vec_element_i32(s
, tcg_op
, rn
,
12370 pass
+ (is_q
* 2), MO_32
);
12373 tcg_res
[pass
] = tcg_temp_new_i64();
12375 if (opcode
== 0xa || opcode
== 0xb) {
12376 /* Non-accumulating ops */
12377 tcg_passres
= tcg_res
[pass
];
12379 tcg_passres
= tcg_temp_new_i64();
12382 if (memop
& MO_SIGN
) {
12383 gen_helper_neon_mull_s16(tcg_passres
, tcg_op
, tcg_idx
);
12385 gen_helper_neon_mull_u16(tcg_passres
, tcg_op
, tcg_idx
);
12388 gen_helper_neon_addl_saturate_s32(tcg_passres
, tcg_env
,
12389 tcg_passres
, tcg_passres
);
12392 if (opcode
== 0xa || opcode
== 0xb) {
12396 /* Accumulating op: handle accumulate step */
12397 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
12400 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
12401 gen_helper_neon_addl_u32(tcg_res
[pass
], tcg_res
[pass
],
12404 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
12405 gen_helper_neon_subl_u32(tcg_res
[pass
], tcg_res
[pass
],
12408 case 0x7: /* SQDMLSL, SQDMLSL2 */
12409 gen_helper_neon_negl_u32(tcg_passres
, tcg_passres
);
12411 case 0x3: /* SQDMLAL, SQDMLAL2 */
12412 gen_helper_neon_addl_saturate_s32(tcg_res
[pass
], tcg_env
,
12417 g_assert_not_reached();
12422 tcg_gen_ext32u_i64(tcg_res
[0], tcg_res
[0]);
12427 tcg_res
[1] = tcg_constant_i64(0);
12430 for (pass
= 0; pass
< 2; pass
++) {
12431 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
12436 /* C3.6 Data processing - SIMD, inc Crypto
12438 * As the decode gets a little complex we are using a table based
12439 * approach for this part of the decode.
12441 static const AArch64DecodeTable data_proc_simd
[] = {
12442 /* pattern , mask , fn */
12443 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra
},
12444 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff
},
12445 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc
},
12446 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes
},
12447 { 0x0f000000, 0x9f000400, disas_simd_indexed
}, /* vector indexed */
12448 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
12449 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm
},
12450 { 0x0f000400, 0x9f800400, disas_simd_shift_imm
},
12451 { 0x0e000000, 0xbf208c00, disas_simd_tb
},
12452 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn
},
12453 { 0x2e000000, 0xbf208400, disas_simd_ext
},
12454 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff
},
12455 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc
},
12456 { 0x5f000000, 0xdf000400, disas_simd_indexed
}, /* scalar indexed */
12457 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm
},
12458 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16
},
12459 { 0x00000000, 0x00000000, NULL
}
12462 static void disas_data_proc_simd(DisasContext
*s
, uint32_t insn
)
12464 /* Note that this is called with all non-FP cases from
12465 * table C3-6 so it must UNDEF for entries not specifically
12466 * allocated to instructions in that table.
12468 AArch64DecodeFn
*fn
= lookup_disas_fn(&data_proc_simd
[0], insn
);
12472 unallocated_encoding(s
);
12476 /* C3.6 Data processing - SIMD and floating point */
12477 static void disas_data_proc_simd_fp(DisasContext
*s
, uint32_t insn
)
12479 if (extract32(insn
, 28, 1) == 1 && extract32(insn
, 30, 1) == 0) {
12480 disas_data_proc_fp(s
, insn
);
12482 /* SIMD, including crypto */
12483 disas_data_proc_simd(s
, insn
);
12487 static bool trans_OK(DisasContext
*s
, arg_OK
*a
)
12492 static bool trans_FAIL(DisasContext
*s
, arg_OK
*a
)
12494 s
->is_nonstreaming
= true;
12500 * @env: The cpu environment
12501 * @s: The DisasContext
12503 * Return true if the page is guarded.
12505 static bool is_guarded_page(CPUARMState
*env
, DisasContext
*s
)
12507 uint64_t addr
= s
->base
.pc_first
;
12508 #ifdef CONFIG_USER_ONLY
12509 return page_get_flags(addr
) & PAGE_BTI
;
12511 CPUTLBEntryFull
*full
;
12513 int mmu_idx
= arm_to_core_mmu_idx(s
->mmu_idx
);
12517 * We test this immediately after reading an insn, which means
12518 * that the TLB entry must be present and valid, and thus this
12519 * access will never raise an exception.
12521 flags
= probe_access_full(env
, addr
, 0, MMU_INST_FETCH
, mmu_idx
,
12522 false, &host
, &full
, 0);
12523 assert(!(flags
& TLB_INVALID_MASK
));
12525 return full
->extra
.arm
.guarded
;
12530 * btype_destination_ok:
12531 * @insn: The instruction at the branch destination
12532 * @bt: SCTLR_ELx.BT
12533 * @btype: PSTATE.BTYPE, and is non-zero
12535 * On a guarded page, there are a limited number of insns
12536 * that may be present at the branch target:
12537 * - branch target identifiers,
12538 * - paciasp, pacibsp,
12541 * Anything else causes a Branch Target Exception.
12543 * Return true if the branch is compatible, false to raise BTITRAP.
12545 static bool btype_destination_ok(uint32_t insn
, bool bt
, int btype
)
12547 if ((insn
& 0xfffff01fu
) == 0xd503201fu
) {
12549 switch (extract32(insn
, 5, 7)) {
12550 case 0b011001: /* PACIASP */
12551 case 0b011011: /* PACIBSP */
12553 * If SCTLR_ELx.BT, then PACI*SP are not compatible
12554 * with btype == 3. Otherwise all btype are ok.
12556 return !bt
|| btype
!= 3;
12557 case 0b100000: /* BTI */
12558 /* Not compatible with any btype. */
12560 case 0b100010: /* BTI c */
12561 /* Not compatible with btype == 3 */
12563 case 0b100100: /* BTI j */
12564 /* Not compatible with btype == 2 */
12566 case 0b100110: /* BTI jc */
12567 /* Compatible with any btype. */
12571 switch (insn
& 0xffe0001fu
) {
12572 case 0xd4200000u
: /* BRK */
12573 case 0xd4400000u
: /* HLT */
12574 /* Give priority to the breakpoint exception. */
12581 /* C3.1 A64 instruction index by encoding */
12582 static void disas_a64_legacy(DisasContext
*s
, uint32_t insn
)
12584 switch (extract32(insn
, 25, 4)) {
12586 case 0xd: /* Data processing - register */
12587 disas_data_proc_reg(s
, insn
);
12590 case 0xf: /* Data processing - SIMD and floating point */
12591 disas_data_proc_simd_fp(s
, insn
);
12594 unallocated_encoding(s
);
12599 static void aarch64_tr_init_disas_context(DisasContextBase
*dcbase
,
12602 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12603 CPUARMState
*env
= cpu_env(cpu
);
12604 ARMCPU
*arm_cpu
= env_archcpu(env
);
12605 CPUARMTBFlags tb_flags
= arm_tbflags_from_tb(dc
->base
.tb
);
12606 int bound
, core_mmu_idx
;
12608 dc
->isar
= &arm_cpu
->isar
;
12610 dc
->pc_save
= dc
->base
.pc_first
;
12611 dc
->aarch64
= true;
12614 dc
->be_data
= EX_TBFLAG_ANY(tb_flags
, BE_DATA
) ? MO_BE
: MO_LE
;
12615 dc
->condexec_mask
= 0;
12616 dc
->condexec_cond
= 0;
12617 core_mmu_idx
= EX_TBFLAG_ANY(tb_flags
, MMUIDX
);
12618 dc
->mmu_idx
= core_to_aa64_mmu_idx(core_mmu_idx
);
12619 dc
->tbii
= EX_TBFLAG_A64(tb_flags
, TBII
);
12620 dc
->tbid
= EX_TBFLAG_A64(tb_flags
, TBID
);
12621 dc
->tcma
= EX_TBFLAG_A64(tb_flags
, TCMA
);
12622 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
12623 #if !defined(CONFIG_USER_ONLY)
12624 dc
->user
= (dc
->current_el
== 0);
12626 dc
->fp_excp_el
= EX_TBFLAG_ANY(tb_flags
, FPEXC_EL
);
12627 dc
->align_mem
= EX_TBFLAG_ANY(tb_flags
, ALIGN_MEM
);
12628 dc
->pstate_il
= EX_TBFLAG_ANY(tb_flags
, PSTATE__IL
);
12629 dc
->fgt_active
= EX_TBFLAG_ANY(tb_flags
, FGT_ACTIVE
);
12630 dc
->fgt_svc
= EX_TBFLAG_ANY(tb_flags
, FGT_SVC
);
12631 dc
->trap_eret
= EX_TBFLAG_A64(tb_flags
, TRAP_ERET
);
12632 dc
->sve_excp_el
= EX_TBFLAG_A64(tb_flags
, SVEEXC_EL
);
12633 dc
->sme_excp_el
= EX_TBFLAG_A64(tb_flags
, SMEEXC_EL
);
12634 dc
->vl
= (EX_TBFLAG_A64(tb_flags
, VL
) + 1) * 16;
12635 dc
->svl
= (EX_TBFLAG_A64(tb_flags
, SVL
) + 1) * 16;
12636 dc
->pauth_active
= EX_TBFLAG_A64(tb_flags
, PAUTH_ACTIVE
);
12637 dc
->bt
= EX_TBFLAG_A64(tb_flags
, BT
);
12638 dc
->btype
= EX_TBFLAG_A64(tb_flags
, BTYPE
);
12639 dc
->unpriv
= EX_TBFLAG_A64(tb_flags
, UNPRIV
);
12640 dc
->ata
[0] = EX_TBFLAG_A64(tb_flags
, ATA
);
12641 dc
->ata
[1] = EX_TBFLAG_A64(tb_flags
, ATA0
);
12642 dc
->mte_active
[0] = EX_TBFLAG_A64(tb_flags
, MTE_ACTIVE
);
12643 dc
->mte_active
[1] = EX_TBFLAG_A64(tb_flags
, MTE0_ACTIVE
);
12644 dc
->pstate_sm
= EX_TBFLAG_A64(tb_flags
, PSTATE_SM
);
12645 dc
->pstate_za
= EX_TBFLAG_A64(tb_flags
, PSTATE_ZA
);
12646 dc
->sme_trap_nonstreaming
= EX_TBFLAG_A64(tb_flags
, SME_TRAP_NONSTREAMING
);
12647 dc
->naa
= EX_TBFLAG_A64(tb_flags
, NAA
);
12648 dc
->nv
= EX_TBFLAG_A64(tb_flags
, NV
);
12649 dc
->nv1
= EX_TBFLAG_A64(tb_flags
, NV1
);
12650 dc
->nv2
= EX_TBFLAG_A64(tb_flags
, NV2
);
12651 dc
->nv2_mem_e20
= EX_TBFLAG_A64(tb_flags
, NV2_MEM_E20
);
12652 dc
->nv2_mem_be
= EX_TBFLAG_A64(tb_flags
, NV2_MEM_BE
);
12654 dc
->vec_stride
= 0;
12655 dc
->cp_regs
= arm_cpu
->cp_regs
;
12656 dc
->features
= env
->features
;
12657 dc
->dcz_blocksize
= arm_cpu
->dcz_blocksize
;
12658 dc
->gm_blocksize
= arm_cpu
->gm_blocksize
;
12660 #ifdef CONFIG_USER_ONLY
12661 /* In sve_probe_page, we assume TBI is enabled. */
12662 tcg_debug_assert(dc
->tbid
& 1);
12665 dc
->lse2
= dc_isar_feature(aa64_lse2
, dc
);
12667 /* Single step state. The code-generation logic here is:
12669 * generate code with no special handling for single-stepping (except
12670 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12671 * this happens anyway because those changes are all system register or
12673 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12674 * emit code for one insn
12675 * emit code to clear PSTATE.SS
12676 * emit code to generate software step exception for completed step
12677 * end TB (as usual for having generated an exception)
12678 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12679 * emit code to generate a software step exception
12682 dc
->ss_active
= EX_TBFLAG_ANY(tb_flags
, SS_ACTIVE
);
12683 dc
->pstate_ss
= EX_TBFLAG_ANY(tb_flags
, PSTATE__SS
);
12684 dc
->is_ldex
= false;
12686 /* Bound the number of insns to execute to those left on the page. */
12687 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
12689 /* If architectural single step active, limit to 1. */
12690 if (dc
->ss_active
) {
12693 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
12696 static void aarch64_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
12700 static void aarch64_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
12702 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12703 target_ulong pc_arg
= dc
->base
.pc_next
;
12705 if (tb_cflags(dcbase
->tb
) & CF_PCREL
) {
12706 pc_arg
&= ~TARGET_PAGE_MASK
;
12708 tcg_gen_insn_start(pc_arg
, 0, 0);
12709 dc
->insn_start_updated
= false;
12712 static void aarch64_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
12714 DisasContext
*s
= container_of(dcbase
, DisasContext
, base
);
12715 CPUARMState
*env
= cpu_env(cpu
);
12716 uint64_t pc
= s
->base
.pc_next
;
12719 /* Singlestep exceptions have the highest priority. */
12720 if (s
->ss_active
&& !s
->pstate_ss
) {
12721 /* Singlestep state is Active-pending.
12722 * If we're in this state at the start of a TB then either
12723 * a) we just took an exception to an EL which is being debugged
12724 * and this is the first insn in the exception handler
12725 * b) debug exceptions were masked and we just unmasked them
12726 * without changing EL (eg by clearing PSTATE.D)
12727 * In either case we're going to take a swstep exception in the
12728 * "did not step an insn" case, and so the syndrome ISV and EX
12729 * bits should be zero.
12731 assert(s
->base
.num_insns
== 1);
12732 gen_swstep_exception(s
, 0, 0);
12733 s
->base
.is_jmp
= DISAS_NORETURN
;
12734 s
->base
.pc_next
= pc
+ 4;
12740 * PC alignment fault. This has priority over the instruction abort
12741 * that we would receive from a translation fault via arm_ldl_code.
12742 * This should only be possible after an indirect branch, at the
12745 assert(s
->base
.num_insns
== 1);
12746 gen_helper_exception_pc_alignment(tcg_env
, tcg_constant_tl(pc
));
12747 s
->base
.is_jmp
= DISAS_NORETURN
;
12748 s
->base
.pc_next
= QEMU_ALIGN_UP(pc
, 4);
12753 insn
= arm_ldl_code(env
, &s
->base
, pc
, s
->sctlr_b
);
12755 s
->base
.pc_next
= pc
+ 4;
12757 s
->fp_access_checked
= false;
12758 s
->sve_access_checked
= false;
12760 if (s
->pstate_il
) {
12762 * Illegal execution state. This has priority over BTI
12763 * exceptions, but comes after instruction abort exceptions.
12765 gen_exception_insn(s
, 0, EXCP_UDEF
, syn_illegalstate());
12769 if (dc_isar_feature(aa64_bti
, s
)) {
12770 if (s
->base
.num_insns
== 1) {
12772 * At the first insn of the TB, compute s->guarded_page.
12773 * We delayed computing this until successfully reading
12774 * the first insn of the TB, above. This (mostly) ensures
12775 * that the softmmu tlb entry has been populated, and the
12776 * page table GP bit is available.
12778 * Note that we need to compute this even if btype == 0,
12779 * because this value is used for BR instructions later
12780 * where ENV is not available.
12782 s
->guarded_page
= is_guarded_page(env
, s
);
12784 /* First insn can have btype set to non-zero. */
12785 tcg_debug_assert(s
->btype
>= 0);
12788 * Note that the Branch Target Exception has fairly high
12789 * priority -- below debugging exceptions but above most
12790 * everything else. This allows us to handle this now
12791 * instead of waiting until the insn is otherwise decoded.
12795 && !btype_destination_ok(insn
, s
->bt
, s
->btype
)) {
12796 gen_exception_insn(s
, 0, EXCP_UDEF
, syn_btitrap(s
->btype
));
12800 /* Not the first insn: btype must be 0. */
12801 tcg_debug_assert(s
->btype
== 0);
12805 s
->is_nonstreaming
= false;
12806 if (s
->sme_trap_nonstreaming
) {
12807 disas_sme_fa64(s
, insn
);
12810 if (!disas_a64(s
, insn
) &&
12811 !disas_sme(s
, insn
) &&
12812 !disas_sve(s
, insn
)) {
12813 disas_a64_legacy(s
, insn
);
12817 * After execution of most insns, btype is reset to 0.
12818 * Note that we set btype == -1 when the insn sets btype.
12820 if (s
->btype
> 0 && s
->base
.is_jmp
!= DISAS_NORETURN
) {
12825 static void aarch64_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
12827 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12829 if (unlikely(dc
->ss_active
)) {
12830 /* Note that this means single stepping WFI doesn't halt the CPU.
12831 * For conditional branch insns this is harmless unreachable code as
12832 * gen_goto_tb() has already handled emitting the debug exception
12833 * (and thus a tb-jump is not possible when singlestepping).
12835 switch (dc
->base
.is_jmp
) {
12837 gen_a64_update_pc(dc
, 4);
12841 gen_step_complete_exception(dc
);
12843 case DISAS_NORETURN
:
12847 switch (dc
->base
.is_jmp
) {
12849 case DISAS_TOO_MANY
:
12850 gen_goto_tb(dc
, 1, 4);
12853 case DISAS_UPDATE_EXIT
:
12854 gen_a64_update_pc(dc
, 4);
12857 tcg_gen_exit_tb(NULL
, 0);
12859 case DISAS_UPDATE_NOCHAIN
:
12860 gen_a64_update_pc(dc
, 4);
12863 tcg_gen_lookup_and_goto_ptr();
12865 case DISAS_NORETURN
:
12869 gen_a64_update_pc(dc
, 4);
12870 gen_helper_wfe(tcg_env
);
12873 gen_a64_update_pc(dc
, 4);
12874 gen_helper_yield(tcg_env
);
12878 * This is a special case because we don't want to just halt
12879 * the CPU if trying to debug across a WFI.
12881 gen_a64_update_pc(dc
, 4);
12882 gen_helper_wfi(tcg_env
, tcg_constant_i32(4));
12884 * The helper doesn't necessarily throw an exception, but we
12885 * must go back to the main loop to check for interrupts anyway.
12887 tcg_gen_exit_tb(NULL
, 0);
12893 const TranslatorOps aarch64_translator_ops
= {
12894 .init_disas_context
= aarch64_tr_init_disas_context
,
12895 .tb_start
= aarch64_tr_tb_start
,
12896 .insn_start
= aarch64_tr_insn_start
,
12897 .translate_insn
= aarch64_tr_translate_insn
,
12898 .tb_stop
= aarch64_tr_tb_stop
,