4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "translate.h"
29 #include "qemu/host-utils.h"
31 #include "exec/gen-icount.h"
37 static TCGv_i64 cpu_X
[32];
38 static TCGv_i64 cpu_pc
;
39 static TCGv_i32 cpu_NF
, cpu_ZF
, cpu_CF
, cpu_VF
;
41 /* Load/store exclusive handling */
42 static TCGv_i64 cpu_exclusive_addr
;
43 static TCGv_i64 cpu_exclusive_val
;
44 static TCGv_i64 cpu_exclusive_high
;
45 #ifdef CONFIG_USER_ONLY
46 static TCGv_i64 cpu_exclusive_test
;
47 static TCGv_i32 cpu_exclusive_info
;
50 static const char *regnames
[] = {
51 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
52 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
53 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
54 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
58 A64_SHIFT_TYPE_LSL
= 0,
59 A64_SHIFT_TYPE_LSR
= 1,
60 A64_SHIFT_TYPE_ASR
= 2,
61 A64_SHIFT_TYPE_ROR
= 3
64 /* Table based decoder typedefs - used when the relevant bits for decode
65 * are too awkwardly scattered across the instruction (eg SIMD).
67 typedef void AArch64DecodeFn(DisasContext
*s
, uint32_t insn
);
69 typedef struct AArch64DecodeTable
{
72 AArch64DecodeFn
*disas_fn
;
75 /* Function prototype for gen_ functions for calling Neon helpers */
76 typedef void NeonGenTwoOpFn(TCGv_i32
, TCGv_i32
, TCGv_i32
);
77 typedef void NeonGenTwoOpEnvFn(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
);
78 typedef void NeonGenNarrowFn(TCGv_i32
, TCGv_i64
);
79 typedef void NeonGenNarrowEnvFn(TCGv_i32
, TCGv_ptr
, TCGv_i64
);
81 /* initialize TCG globals. */
82 void a64_translate_init(void)
86 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
87 offsetof(CPUARMState
, pc
),
89 for (i
= 0; i
< 32; i
++) {
90 cpu_X
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
91 offsetof(CPUARMState
, xregs
[i
]),
95 cpu_NF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, NF
), "NF");
96 cpu_ZF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, ZF
), "ZF");
97 cpu_CF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, CF
), "CF");
98 cpu_VF
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUARMState
, VF
), "VF");
100 cpu_exclusive_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
101 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
102 cpu_exclusive_val
= tcg_global_mem_new_i64(TCG_AREG0
,
103 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
104 cpu_exclusive_high
= tcg_global_mem_new_i64(TCG_AREG0
,
105 offsetof(CPUARMState
, exclusive_high
), "exclusive_high");
106 #ifdef CONFIG_USER_ONLY
107 cpu_exclusive_test
= tcg_global_mem_new_i64(TCG_AREG0
,
108 offsetof(CPUARMState
, exclusive_test
), "exclusive_test");
109 cpu_exclusive_info
= tcg_global_mem_new_i32(TCG_AREG0
,
110 offsetof(CPUARMState
, exclusive_info
), "exclusive_info");
114 void aarch64_cpu_dump_state(CPUState
*cs
, FILE *f
,
115 fprintf_function cpu_fprintf
, int flags
)
117 ARMCPU
*cpu
= ARM_CPU(cs
);
118 CPUARMState
*env
= &cpu
->env
;
119 uint32_t psr
= pstate_read(env
);
122 cpu_fprintf(f
, "PC=%016"PRIx64
" SP=%016"PRIx64
"\n",
123 env
->pc
, env
->xregs
[31]);
124 for (i
= 0; i
< 31; i
++) {
125 cpu_fprintf(f
, "X%02d=%016"PRIx64
, i
, env
->xregs
[i
]);
127 cpu_fprintf(f
, "\n");
132 cpu_fprintf(f
, "PSTATE=%08x (flags %c%c%c%c)\n",
134 psr
& PSTATE_N
? 'N' : '-',
135 psr
& PSTATE_Z
? 'Z' : '-',
136 psr
& PSTATE_C
? 'C' : '-',
137 psr
& PSTATE_V
? 'V' : '-');
138 cpu_fprintf(f
, "\n");
140 if (flags
& CPU_DUMP_FPU
) {
142 for (i
= 0; i
< numvfpregs
; i
+= 2) {
143 uint64_t vlo
= float64_val(env
->vfp
.regs
[i
* 2]);
144 uint64_t vhi
= float64_val(env
->vfp
.regs
[(i
* 2) + 1]);
145 cpu_fprintf(f
, "q%02d=%016" PRIx64
":%016" PRIx64
" ",
147 vlo
= float64_val(env
->vfp
.regs
[(i
+ 1) * 2]);
148 vhi
= float64_val(env
->vfp
.regs
[((i
+ 1) * 2) + 1]);
149 cpu_fprintf(f
, "q%02d=%016" PRIx64
":%016" PRIx64
"\n",
152 cpu_fprintf(f
, "FPCR: %08x FPSR: %08x\n",
153 vfp_get_fpcr(env
), vfp_get_fpsr(env
));
157 static int get_mem_index(DisasContext
*s
)
159 #ifdef CONFIG_USER_ONLY
166 void gen_a64_set_pc_im(uint64_t val
)
168 tcg_gen_movi_i64(cpu_pc
, val
);
171 static void gen_exception(int excp
)
173 TCGv_i32 tmp
= tcg_temp_new_i32();
174 tcg_gen_movi_i32(tmp
, excp
);
175 gen_helper_exception(cpu_env
, tmp
);
176 tcg_temp_free_i32(tmp
);
179 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
)
181 gen_a64_set_pc_im(s
->pc
- offset
);
183 s
->is_jmp
= DISAS_EXC
;
186 static inline bool use_goto_tb(DisasContext
*s
, int n
, uint64_t dest
)
188 /* No direct tb linking with singlestep or deterministic io */
189 if (s
->singlestep_enabled
|| (s
->tb
->cflags
& CF_LAST_IO
)) {
193 /* Only link tbs from inside the same guest page */
194 if ((s
->tb
->pc
& TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) {
201 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint64_t dest
)
203 TranslationBlock
*tb
;
206 if (use_goto_tb(s
, n
, dest
)) {
208 gen_a64_set_pc_im(dest
);
209 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
210 s
->is_jmp
= DISAS_TB_JUMP
;
212 gen_a64_set_pc_im(dest
);
213 if (s
->singlestep_enabled
) {
214 gen_exception(EXCP_DEBUG
);
217 s
->is_jmp
= DISAS_JUMP
;
221 static void unallocated_encoding(DisasContext
*s
)
223 gen_exception_insn(s
, 4, EXCP_UDEF
);
226 #define unsupported_encoding(s, insn) \
228 qemu_log_mask(LOG_UNIMP, \
229 "%s:%d: unsupported instruction encoding 0x%08x " \
230 "at pc=%016" PRIx64 "\n", \
231 __FILE__, __LINE__, insn, s->pc - 4); \
232 unallocated_encoding(s); \
235 static void init_tmp_a64_array(DisasContext
*s
)
237 #ifdef CONFIG_DEBUG_TCG
239 for (i
= 0; i
< ARRAY_SIZE(s
->tmp_a64
); i
++) {
240 TCGV_UNUSED_I64(s
->tmp_a64
[i
]);
243 s
->tmp_a64_count
= 0;
246 static void free_tmp_a64(DisasContext
*s
)
249 for (i
= 0; i
< s
->tmp_a64_count
; i
++) {
250 tcg_temp_free_i64(s
->tmp_a64
[i
]);
252 init_tmp_a64_array(s
);
255 static TCGv_i64
new_tmp_a64(DisasContext
*s
)
257 assert(s
->tmp_a64_count
< TMP_A64_MAX
);
258 return s
->tmp_a64
[s
->tmp_a64_count
++] = tcg_temp_new_i64();
261 static TCGv_i64
new_tmp_a64_zero(DisasContext
*s
)
263 TCGv_i64 t
= new_tmp_a64(s
);
264 tcg_gen_movi_i64(t
, 0);
269 * Register access functions
271 * These functions are used for directly accessing a register in where
272 * changes to the final register value are likely to be made. If you
273 * need to use a register for temporary calculation (e.g. index type
274 * operations) use the read_* form.
276 * B1.2.1 Register mappings
278 * In instruction register encoding 31 can refer to ZR (zero register) or
279 * the SP (stack pointer) depending on context. In QEMU's case we map SP
280 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
281 * This is the point of the _sp forms.
283 static TCGv_i64
cpu_reg(DisasContext
*s
, int reg
)
286 return new_tmp_a64_zero(s
);
292 /* register access for when 31 == SP */
293 static TCGv_i64
cpu_reg_sp(DisasContext
*s
, int reg
)
298 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
299 * representing the register contents. This TCGv is an auto-freed
300 * temporary so it need not be explicitly freed, and may be modified.
302 static TCGv_i64
read_cpu_reg(DisasContext
*s
, int reg
, int sf
)
304 TCGv_i64 v
= new_tmp_a64(s
);
307 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
309 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
312 tcg_gen_movi_i64(v
, 0);
317 static TCGv_i64
read_cpu_reg_sp(DisasContext
*s
, int reg
, int sf
)
319 TCGv_i64 v
= new_tmp_a64(s
);
321 tcg_gen_mov_i64(v
, cpu_X
[reg
]);
323 tcg_gen_ext32u_i64(v
, cpu_X
[reg
]);
328 /* Return the offset into CPUARMState of an element of specified
329 * size, 'element' places in from the least significant end of
330 * the FP/vector register Qn.
332 static inline int vec_reg_offset(int regno
, int element
, TCGMemOp size
)
334 int offs
= offsetof(CPUARMState
, vfp
.regs
[regno
* 2]);
335 #ifdef HOST_WORDS_BIGENDIAN
336 /* This is complicated slightly because vfp.regs[2n] is
337 * still the low half and vfp.regs[2n+1] the high half
338 * of the 128 bit vector, even on big endian systems.
339 * Calculate the offset assuming a fully bigendian 128 bits,
340 * then XOR to account for the order of the two 64 bit halves.
342 offs
+= (16 - ((element
+ 1) * (1 << size
)));
345 offs
+= element
* (1 << size
);
350 /* Return the offset into CPUARMState of a slice (from
351 * the least significant end) of FP register Qn (ie
353 * (Note that this is not the same mapping as for A32; see cpu.h)
355 static inline int fp_reg_offset(int regno
, TCGMemOp size
)
357 int offs
= offsetof(CPUARMState
, vfp
.regs
[regno
* 2]);
358 #ifdef HOST_WORDS_BIGENDIAN
359 offs
+= (8 - (1 << size
));
364 /* Offset of the high half of the 128 bit vector Qn */
365 static inline int fp_reg_hi_offset(int regno
)
367 return offsetof(CPUARMState
, vfp
.regs
[regno
* 2 + 1]);
370 /* Convenience accessors for reading and writing single and double
371 * FP registers. Writing clears the upper parts of the associated
372 * 128 bit vector register, as required by the architecture.
373 * Note that unlike the GP register accessors, the values returned
374 * by the read functions must be manually freed.
376 static TCGv_i64
read_fp_dreg(DisasContext
*s
, int reg
)
378 TCGv_i64 v
= tcg_temp_new_i64();
380 tcg_gen_ld_i64(v
, cpu_env
, fp_reg_offset(reg
, MO_64
));
384 static TCGv_i32
read_fp_sreg(DisasContext
*s
, int reg
)
386 TCGv_i32 v
= tcg_temp_new_i32();
388 tcg_gen_ld_i32(v
, cpu_env
, fp_reg_offset(reg
, MO_32
));
392 static void write_fp_dreg(DisasContext
*s
, int reg
, TCGv_i64 v
)
394 TCGv_i64 tcg_zero
= tcg_const_i64(0);
396 tcg_gen_st_i64(v
, cpu_env
, fp_reg_offset(reg
, MO_64
));
397 tcg_gen_st_i64(tcg_zero
, cpu_env
, fp_reg_hi_offset(reg
));
398 tcg_temp_free_i64(tcg_zero
);
401 static void write_fp_sreg(DisasContext
*s
, int reg
, TCGv_i32 v
)
403 TCGv_i64 tmp
= tcg_temp_new_i64();
405 tcg_gen_extu_i32_i64(tmp
, v
);
406 write_fp_dreg(s
, reg
, tmp
);
407 tcg_temp_free_i64(tmp
);
410 static TCGv_ptr
get_fpstatus_ptr(void)
412 TCGv_ptr statusptr
= tcg_temp_new_ptr();
415 /* In A64 all instructions (both FP and Neon) use the FPCR;
416 * there is no equivalent of the A32 Neon "standard FPSCR value"
417 * and all operations use vfp.fp_status.
419 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
420 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
424 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
425 * than the 32 bit equivalent.
427 static inline void gen_set_NZ64(TCGv_i64 result
)
429 TCGv_i64 flag
= tcg_temp_new_i64();
431 tcg_gen_setcondi_i64(TCG_COND_NE
, flag
, result
, 0);
432 tcg_gen_trunc_i64_i32(cpu_ZF
, flag
);
433 tcg_gen_shri_i64(flag
, result
, 32);
434 tcg_gen_trunc_i64_i32(cpu_NF
, flag
);
435 tcg_temp_free_i64(flag
);
438 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
439 static inline void gen_logic_CC(int sf
, TCGv_i64 result
)
442 gen_set_NZ64(result
);
444 tcg_gen_trunc_i64_i32(cpu_ZF
, result
);
445 tcg_gen_trunc_i64_i32(cpu_NF
, result
);
447 tcg_gen_movi_i32(cpu_CF
, 0);
448 tcg_gen_movi_i32(cpu_VF
, 0);
451 /* dest = T0 + T1; compute C, N, V and Z flags */
452 static void gen_add_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
455 TCGv_i64 result
, flag
, tmp
;
456 result
= tcg_temp_new_i64();
457 flag
= tcg_temp_new_i64();
458 tmp
= tcg_temp_new_i64();
460 tcg_gen_movi_i64(tmp
, 0);
461 tcg_gen_add2_i64(result
, flag
, t0
, tmp
, t1
, tmp
);
463 tcg_gen_trunc_i64_i32(cpu_CF
, flag
);
465 gen_set_NZ64(result
);
467 tcg_gen_xor_i64(flag
, result
, t0
);
468 tcg_gen_xor_i64(tmp
, t0
, t1
);
469 tcg_gen_andc_i64(flag
, flag
, tmp
);
470 tcg_temp_free_i64(tmp
);
471 tcg_gen_shri_i64(flag
, flag
, 32);
472 tcg_gen_trunc_i64_i32(cpu_VF
, flag
);
474 tcg_gen_mov_i64(dest
, result
);
475 tcg_temp_free_i64(result
);
476 tcg_temp_free_i64(flag
);
478 /* 32 bit arithmetic */
479 TCGv_i32 t0_32
= tcg_temp_new_i32();
480 TCGv_i32 t1_32
= tcg_temp_new_i32();
481 TCGv_i32 tmp
= tcg_temp_new_i32();
483 tcg_gen_movi_i32(tmp
, 0);
484 tcg_gen_trunc_i64_i32(t0_32
, t0
);
485 tcg_gen_trunc_i64_i32(t1_32
, t1
);
486 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, tmp
, t1_32
, tmp
);
487 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
488 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
489 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
490 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
491 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
493 tcg_temp_free_i32(tmp
);
494 tcg_temp_free_i32(t0_32
);
495 tcg_temp_free_i32(t1_32
);
499 /* dest = T0 - T1; compute C, N, V and Z flags */
500 static void gen_sub_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
503 /* 64 bit arithmetic */
504 TCGv_i64 result
, flag
, tmp
;
506 result
= tcg_temp_new_i64();
507 flag
= tcg_temp_new_i64();
508 tcg_gen_sub_i64(result
, t0
, t1
);
510 gen_set_NZ64(result
);
512 tcg_gen_setcond_i64(TCG_COND_GEU
, flag
, t0
, t1
);
513 tcg_gen_trunc_i64_i32(cpu_CF
, flag
);
515 tcg_gen_xor_i64(flag
, result
, t0
);
516 tmp
= tcg_temp_new_i64();
517 tcg_gen_xor_i64(tmp
, t0
, t1
);
518 tcg_gen_and_i64(flag
, flag
, tmp
);
519 tcg_temp_free_i64(tmp
);
520 tcg_gen_shri_i64(flag
, flag
, 32);
521 tcg_gen_trunc_i64_i32(cpu_VF
, flag
);
522 tcg_gen_mov_i64(dest
, result
);
523 tcg_temp_free_i64(flag
);
524 tcg_temp_free_i64(result
);
526 /* 32 bit arithmetic */
527 TCGv_i32 t0_32
= tcg_temp_new_i32();
528 TCGv_i32 t1_32
= tcg_temp_new_i32();
531 tcg_gen_trunc_i64_i32(t0_32
, t0
);
532 tcg_gen_trunc_i64_i32(t1_32
, t1
);
533 tcg_gen_sub_i32(cpu_NF
, t0_32
, t1_32
);
534 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
535 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0_32
, t1_32
);
536 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
537 tmp
= tcg_temp_new_i32();
538 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
539 tcg_temp_free_i32(t0_32
);
540 tcg_temp_free_i32(t1_32
);
541 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
542 tcg_temp_free_i32(tmp
);
543 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
547 /* dest = T0 + T1 + CF; do not compute flags. */
548 static void gen_adc(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
550 TCGv_i64 flag
= tcg_temp_new_i64();
551 tcg_gen_extu_i32_i64(flag
, cpu_CF
);
552 tcg_gen_add_i64(dest
, t0
, t1
);
553 tcg_gen_add_i64(dest
, dest
, flag
);
554 tcg_temp_free_i64(flag
);
557 tcg_gen_ext32u_i64(dest
, dest
);
561 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
562 static void gen_adc_CC(int sf
, TCGv_i64 dest
, TCGv_i64 t0
, TCGv_i64 t1
)
565 TCGv_i64 result
, cf_64
, vf_64
, tmp
;
566 result
= tcg_temp_new_i64();
567 cf_64
= tcg_temp_new_i64();
568 vf_64
= tcg_temp_new_i64();
569 tmp
= tcg_const_i64(0);
571 tcg_gen_extu_i32_i64(cf_64
, cpu_CF
);
572 tcg_gen_add2_i64(result
, cf_64
, t0
, tmp
, cf_64
, tmp
);
573 tcg_gen_add2_i64(result
, cf_64
, result
, cf_64
, t1
, tmp
);
574 tcg_gen_trunc_i64_i32(cpu_CF
, cf_64
);
575 gen_set_NZ64(result
);
577 tcg_gen_xor_i64(vf_64
, result
, t0
);
578 tcg_gen_xor_i64(tmp
, t0
, t1
);
579 tcg_gen_andc_i64(vf_64
, vf_64
, tmp
);
580 tcg_gen_shri_i64(vf_64
, vf_64
, 32);
581 tcg_gen_trunc_i64_i32(cpu_VF
, vf_64
);
583 tcg_gen_mov_i64(dest
, result
);
585 tcg_temp_free_i64(tmp
);
586 tcg_temp_free_i64(vf_64
);
587 tcg_temp_free_i64(cf_64
);
588 tcg_temp_free_i64(result
);
590 TCGv_i32 t0_32
, t1_32
, tmp
;
591 t0_32
= tcg_temp_new_i32();
592 t1_32
= tcg_temp_new_i32();
593 tmp
= tcg_const_i32(0);
595 tcg_gen_trunc_i64_i32(t0_32
, t0
);
596 tcg_gen_trunc_i64_i32(t1_32
, t1
);
597 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0_32
, tmp
, cpu_CF
, tmp
);
598 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1_32
, tmp
);
600 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
601 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0_32
);
602 tcg_gen_xor_i32(tmp
, t0_32
, t1_32
);
603 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
604 tcg_gen_extu_i32_i64(dest
, cpu_NF
);
606 tcg_temp_free_i32(tmp
);
607 tcg_temp_free_i32(t1_32
);
608 tcg_temp_free_i32(t0_32
);
613 * Load/Store generators
617 * Store from GPR register to memory
619 static void do_gpr_st(DisasContext
*s
, TCGv_i64 source
,
620 TCGv_i64 tcg_addr
, int size
)
623 tcg_gen_qemu_st_i64(source
, tcg_addr
, get_mem_index(s
), MO_TE
+ size
);
627 * Load from memory to GPR register
629 static void do_gpr_ld(DisasContext
*s
, TCGv_i64 dest
, TCGv_i64 tcg_addr
,
630 int size
, bool is_signed
, bool extend
)
632 TCGMemOp memop
= MO_TE
+ size
;
640 tcg_gen_qemu_ld_i64(dest
, tcg_addr
, get_mem_index(s
), memop
);
642 if (extend
&& is_signed
) {
644 tcg_gen_ext32u_i64(dest
, dest
);
649 * Store from FP register to memory
651 static void do_fp_st(DisasContext
*s
, int srcidx
, TCGv_i64 tcg_addr
, int size
)
653 /* This writes the bottom N bits of a 128 bit wide vector to memory */
654 TCGv_i64 tmp
= tcg_temp_new_i64();
655 tcg_gen_ld_i64(tmp
, cpu_env
, fp_reg_offset(srcidx
, MO_64
));
657 tcg_gen_qemu_st_i64(tmp
, tcg_addr
, get_mem_index(s
), MO_TE
+ size
);
659 TCGv_i64 tcg_hiaddr
= tcg_temp_new_i64();
660 tcg_gen_qemu_st_i64(tmp
, tcg_addr
, get_mem_index(s
), MO_TEQ
);
661 tcg_gen_qemu_st64(tmp
, tcg_addr
, get_mem_index(s
));
662 tcg_gen_ld_i64(tmp
, cpu_env
, fp_reg_hi_offset(srcidx
));
663 tcg_gen_addi_i64(tcg_hiaddr
, tcg_addr
, 8);
664 tcg_gen_qemu_st_i64(tmp
, tcg_hiaddr
, get_mem_index(s
), MO_TEQ
);
665 tcg_temp_free_i64(tcg_hiaddr
);
668 tcg_temp_free_i64(tmp
);
672 * Load from memory to FP register
674 static void do_fp_ld(DisasContext
*s
, int destidx
, TCGv_i64 tcg_addr
, int size
)
676 /* This always zero-extends and writes to a full 128 bit wide vector */
677 TCGv_i64 tmplo
= tcg_temp_new_i64();
681 TCGMemOp memop
= MO_TE
+ size
;
682 tmphi
= tcg_const_i64(0);
683 tcg_gen_qemu_ld_i64(tmplo
, tcg_addr
, get_mem_index(s
), memop
);
686 tmphi
= tcg_temp_new_i64();
687 tcg_hiaddr
= tcg_temp_new_i64();
689 tcg_gen_qemu_ld_i64(tmplo
, tcg_addr
, get_mem_index(s
), MO_TEQ
);
690 tcg_gen_addi_i64(tcg_hiaddr
, tcg_addr
, 8);
691 tcg_gen_qemu_ld_i64(tmphi
, tcg_hiaddr
, get_mem_index(s
), MO_TEQ
);
692 tcg_temp_free_i64(tcg_hiaddr
);
695 tcg_gen_st_i64(tmplo
, cpu_env
, fp_reg_offset(destidx
, MO_64
));
696 tcg_gen_st_i64(tmphi
, cpu_env
, fp_reg_hi_offset(destidx
));
698 tcg_temp_free_i64(tmplo
);
699 tcg_temp_free_i64(tmphi
);
703 * Vector load/store helpers.
705 * The principal difference between this and a FP load is that we don't
706 * zero extend as we are filling a partial chunk of the vector register.
707 * These functions don't support 128 bit loads/stores, which would be
708 * normal load/store operations.
710 * The _i32 versions are useful when operating on 32 bit quantities
711 * (eg for floating point single or using Neon helper functions).
714 /* Get value of an element within a vector register */
715 static void read_vec_element(DisasContext
*s
, TCGv_i64 tcg_dest
, int srcidx
,
716 int element
, TCGMemOp memop
)
718 int vect_off
= vec_reg_offset(srcidx
, element
, memop
& MO_SIZE
);
721 tcg_gen_ld8u_i64(tcg_dest
, cpu_env
, vect_off
);
724 tcg_gen_ld16u_i64(tcg_dest
, cpu_env
, vect_off
);
727 tcg_gen_ld32u_i64(tcg_dest
, cpu_env
, vect_off
);
730 tcg_gen_ld8s_i64(tcg_dest
, cpu_env
, vect_off
);
733 tcg_gen_ld16s_i64(tcg_dest
, cpu_env
, vect_off
);
736 tcg_gen_ld32s_i64(tcg_dest
, cpu_env
, vect_off
);
740 tcg_gen_ld_i64(tcg_dest
, cpu_env
, vect_off
);
743 g_assert_not_reached();
747 static void read_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_dest
, int srcidx
,
748 int element
, TCGMemOp memop
)
750 int vect_off
= vec_reg_offset(srcidx
, element
, memop
& MO_SIZE
);
753 tcg_gen_ld8u_i32(tcg_dest
, cpu_env
, vect_off
);
756 tcg_gen_ld16u_i32(tcg_dest
, cpu_env
, vect_off
);
759 tcg_gen_ld8s_i32(tcg_dest
, cpu_env
, vect_off
);
762 tcg_gen_ld16s_i32(tcg_dest
, cpu_env
, vect_off
);
766 tcg_gen_ld_i32(tcg_dest
, cpu_env
, vect_off
);
769 g_assert_not_reached();
773 /* Set value of an element within a vector register */
774 static void write_vec_element(DisasContext
*s
, TCGv_i64 tcg_src
, int destidx
,
775 int element
, TCGMemOp memop
)
777 int vect_off
= vec_reg_offset(destidx
, element
, memop
& MO_SIZE
);
780 tcg_gen_st8_i64(tcg_src
, cpu_env
, vect_off
);
783 tcg_gen_st16_i64(tcg_src
, cpu_env
, vect_off
);
786 tcg_gen_st32_i64(tcg_src
, cpu_env
, vect_off
);
789 tcg_gen_st_i64(tcg_src
, cpu_env
, vect_off
);
792 g_assert_not_reached();
796 static void write_vec_element_i32(DisasContext
*s
, TCGv_i32 tcg_src
,
797 int destidx
, int element
, TCGMemOp memop
)
799 int vect_off
= vec_reg_offset(destidx
, element
, memop
& MO_SIZE
);
802 tcg_gen_st8_i32(tcg_src
, cpu_env
, vect_off
);
805 tcg_gen_st16_i32(tcg_src
, cpu_env
, vect_off
);
808 tcg_gen_st_i32(tcg_src
, cpu_env
, vect_off
);
811 g_assert_not_reached();
815 /* Clear the high 64 bits of a 128 bit vector (in general non-quad
816 * vector ops all need to do this).
818 static void clear_vec_high(DisasContext
*s
, int rd
)
820 TCGv_i64 tcg_zero
= tcg_const_i64(0);
822 write_vec_element(s
, tcg_zero
, rd
, 1, MO_64
);
823 tcg_temp_free_i64(tcg_zero
);
826 /* Store from vector register to memory */
827 static void do_vec_st(DisasContext
*s
, int srcidx
, int element
,
828 TCGv_i64 tcg_addr
, int size
)
830 TCGMemOp memop
= MO_TE
+ size
;
831 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
833 read_vec_element(s
, tcg_tmp
, srcidx
, element
, size
);
834 tcg_gen_qemu_st_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), memop
);
836 tcg_temp_free_i64(tcg_tmp
);
839 /* Load from memory to vector register */
840 static void do_vec_ld(DisasContext
*s
, int destidx
, int element
,
841 TCGv_i64 tcg_addr
, int size
)
843 TCGMemOp memop
= MO_TE
+ size
;
844 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
846 tcg_gen_qemu_ld_i64(tcg_tmp
, tcg_addr
, get_mem_index(s
), memop
);
847 write_vec_element(s
, tcg_tmp
, destidx
, element
, size
);
849 tcg_temp_free_i64(tcg_tmp
);
853 * This utility function is for doing register extension with an
854 * optional shift. You will likely want to pass a temporary for the
855 * destination register. See DecodeRegExtend() in the ARM ARM.
857 static void ext_and_shift_reg(TCGv_i64 tcg_out
, TCGv_i64 tcg_in
,
858 int option
, unsigned int shift
)
860 int extsize
= extract32(option
, 0, 2);
861 bool is_signed
= extract32(option
, 2, 1);
866 tcg_gen_ext8s_i64(tcg_out
, tcg_in
);
869 tcg_gen_ext16s_i64(tcg_out
, tcg_in
);
872 tcg_gen_ext32s_i64(tcg_out
, tcg_in
);
875 tcg_gen_mov_i64(tcg_out
, tcg_in
);
881 tcg_gen_ext8u_i64(tcg_out
, tcg_in
);
884 tcg_gen_ext16u_i64(tcg_out
, tcg_in
);
887 tcg_gen_ext32u_i64(tcg_out
, tcg_in
);
890 tcg_gen_mov_i64(tcg_out
, tcg_in
);
896 tcg_gen_shli_i64(tcg_out
, tcg_out
, shift
);
900 static inline void gen_check_sp_alignment(DisasContext
*s
)
902 /* The AArch64 architecture mandates that (if enabled via PSTATE
903 * or SCTLR bits) there is a check that SP is 16-aligned on every
904 * SP-relative load or store (with an exception generated if it is not).
905 * In line with general QEMU practice regarding misaligned accesses,
906 * we omit these checks for the sake of guest program performance.
907 * This function is provided as a hook so we can more easily add these
908 * checks in future (possibly as a "favour catching guest program bugs
909 * over speed" user selectable option).
914 * This provides a simple table based table lookup decoder. It is
915 * intended to be used when the relevant bits for decode are too
916 * awkwardly placed and switch/if based logic would be confusing and
917 * deeply nested. Since it's a linear search through the table, tables
918 * should be kept small.
920 * It returns the first handler where insn & mask == pattern, or
921 * NULL if there is no match.
922 * The table is terminated by an empty mask (i.e. 0)
924 static inline AArch64DecodeFn
*lookup_disas_fn(const AArch64DecodeTable
*table
,
927 const AArch64DecodeTable
*tptr
= table
;
930 if ((insn
& tptr
->mask
) == tptr
->pattern
) {
931 return tptr
->disas_fn
;
939 * the instruction disassembly implemented here matches
940 * the instruction encoding classifications in chapter 3 (C3)
941 * of the ARM Architecture Reference Manual (DDI0487A_a)
944 /* C3.2.7 Unconditional branch (immediate)
946 * +----+-----------+-------------------------------------+
947 * | op | 0 0 1 0 1 | imm26 |
948 * +----+-----------+-------------------------------------+
950 static void disas_uncond_b_imm(DisasContext
*s
, uint32_t insn
)
952 uint64_t addr
= s
->pc
+ sextract32(insn
, 0, 26) * 4 - 4;
954 if (insn
& (1 << 31)) {
955 /* C5.6.26 BL Branch with link */
956 tcg_gen_movi_i64(cpu_reg(s
, 30), s
->pc
);
959 /* C5.6.20 B Branch / C5.6.26 BL Branch with link */
960 gen_goto_tb(s
, 0, addr
);
963 /* C3.2.1 Compare & branch (immediate)
964 * 31 30 25 24 23 5 4 0
965 * +----+-------------+----+---------------------+--------+
966 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
967 * +----+-------------+----+---------------------+--------+
969 static void disas_comp_b_imm(DisasContext
*s
, uint32_t insn
)
971 unsigned int sf
, op
, rt
;
976 sf
= extract32(insn
, 31, 1);
977 op
= extract32(insn
, 24, 1); /* 0: CBZ; 1: CBNZ */
978 rt
= extract32(insn
, 0, 5);
979 addr
= s
->pc
+ sextract32(insn
, 5, 19) * 4 - 4;
981 tcg_cmp
= read_cpu_reg(s
, rt
, sf
);
982 label_match
= gen_new_label();
984 tcg_gen_brcondi_i64(op
? TCG_COND_NE
: TCG_COND_EQ
,
985 tcg_cmp
, 0, label_match
);
987 gen_goto_tb(s
, 0, s
->pc
);
988 gen_set_label(label_match
);
989 gen_goto_tb(s
, 1, addr
);
992 /* C3.2.5 Test & branch (immediate)
993 * 31 30 25 24 23 19 18 5 4 0
994 * +----+-------------+----+-------+-------------+------+
995 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
996 * +----+-------------+----+-------+-------------+------+
998 static void disas_test_b_imm(DisasContext
*s
, uint32_t insn
)
1000 unsigned int bit_pos
, op
, rt
;
1005 bit_pos
= (extract32(insn
, 31, 1) << 5) | extract32(insn
, 19, 5);
1006 op
= extract32(insn
, 24, 1); /* 0: TBZ; 1: TBNZ */
1007 addr
= s
->pc
+ sextract32(insn
, 5, 14) * 4 - 4;
1008 rt
= extract32(insn
, 0, 5);
1010 tcg_cmp
= tcg_temp_new_i64();
1011 tcg_gen_andi_i64(tcg_cmp
, cpu_reg(s
, rt
), (1ULL << bit_pos
));
1012 label_match
= gen_new_label();
1013 tcg_gen_brcondi_i64(op
? TCG_COND_NE
: TCG_COND_EQ
,
1014 tcg_cmp
, 0, label_match
);
1015 tcg_temp_free_i64(tcg_cmp
);
1016 gen_goto_tb(s
, 0, s
->pc
);
1017 gen_set_label(label_match
);
1018 gen_goto_tb(s
, 1, addr
);
1021 /* C3.2.2 / C5.6.19 Conditional branch (immediate)
1022 * 31 25 24 23 5 4 3 0
1023 * +---------------+----+---------------------+----+------+
1024 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
1025 * +---------------+----+---------------------+----+------+
1027 static void disas_cond_b_imm(DisasContext
*s
, uint32_t insn
)
1032 if ((insn
& (1 << 4)) || (insn
& (1 << 24))) {
1033 unallocated_encoding(s
);
1036 addr
= s
->pc
+ sextract32(insn
, 5, 19) * 4 - 4;
1037 cond
= extract32(insn
, 0, 4);
1040 /* genuinely conditional branches */
1041 int label_match
= gen_new_label();
1042 arm_gen_test_cc(cond
, label_match
);
1043 gen_goto_tb(s
, 0, s
->pc
);
1044 gen_set_label(label_match
);
1045 gen_goto_tb(s
, 1, addr
);
1047 /* 0xe and 0xf are both "always" conditions */
1048 gen_goto_tb(s
, 0, addr
);
1053 static void handle_hint(DisasContext
*s
, uint32_t insn
,
1054 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1056 unsigned int selector
= crm
<< 3 | op2
;
1059 unallocated_encoding(s
);
1071 /* we treat all as NOP at least for now */
1074 /* default specified as NOP equivalent */
1079 static void gen_clrex(DisasContext
*s
, uint32_t insn
)
1081 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
1084 /* CLREX, DSB, DMB, ISB */
1085 static void handle_sync(DisasContext
*s
, uint32_t insn
,
1086 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1089 unallocated_encoding(s
);
1100 /* We don't emulate caches so barriers are no-ops */
1103 unallocated_encoding(s
);
1108 /* C5.6.130 MSR (immediate) - move immediate to processor state field */
1109 static void handle_msr_i(DisasContext
*s
, uint32_t insn
,
1110 unsigned int op1
, unsigned int op2
, unsigned int crm
)
1112 unsupported_encoding(s
, insn
);
1115 static void gen_get_nzcv(TCGv_i64 tcg_rt
)
1117 TCGv_i32 tmp
= tcg_temp_new_i32();
1118 TCGv_i32 nzcv
= tcg_temp_new_i32();
1120 /* build bit 31, N */
1121 tcg_gen_andi_i32(nzcv
, cpu_NF
, (1 << 31));
1122 /* build bit 30, Z */
1123 tcg_gen_setcondi_i32(TCG_COND_EQ
, tmp
, cpu_ZF
, 0);
1124 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 30, 1);
1125 /* build bit 29, C */
1126 tcg_gen_deposit_i32(nzcv
, nzcv
, cpu_CF
, 29, 1);
1127 /* build bit 28, V */
1128 tcg_gen_shri_i32(tmp
, cpu_VF
, 31);
1129 tcg_gen_deposit_i32(nzcv
, nzcv
, tmp
, 28, 1);
1130 /* generate result */
1131 tcg_gen_extu_i32_i64(tcg_rt
, nzcv
);
1133 tcg_temp_free_i32(nzcv
);
1134 tcg_temp_free_i32(tmp
);
1137 static void gen_set_nzcv(TCGv_i64 tcg_rt
)
1140 TCGv_i32 nzcv
= tcg_temp_new_i32();
1142 /* take NZCV from R[t] */
1143 tcg_gen_trunc_i64_i32(nzcv
, tcg_rt
);
1146 tcg_gen_andi_i32(cpu_NF
, nzcv
, (1 << 31));
1148 tcg_gen_andi_i32(cpu_ZF
, nzcv
, (1 << 30));
1149 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_ZF
, cpu_ZF
, 0);
1151 tcg_gen_andi_i32(cpu_CF
, nzcv
, (1 << 29));
1152 tcg_gen_shri_i32(cpu_CF
, cpu_CF
, 29);
1154 tcg_gen_andi_i32(cpu_VF
, nzcv
, (1 << 28));
1155 tcg_gen_shli_i32(cpu_VF
, cpu_VF
, 3);
1156 tcg_temp_free_i32(nzcv
);
1159 /* C5.6.129 MRS - move from system register
1160 * C5.6.131 MSR (register) - move to system register
1163 * These are all essentially the same insn in 'read' and 'write'
1164 * versions, with varying op0 fields.
1166 static void handle_sys(DisasContext
*s
, uint32_t insn
, bool isread
,
1167 unsigned int op0
, unsigned int op1
, unsigned int op2
,
1168 unsigned int crn
, unsigned int crm
, unsigned int rt
)
1170 const ARMCPRegInfo
*ri
;
1173 ri
= get_arm_cp_reginfo(s
->cp_regs
,
1174 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP
,
1175 crn
, crm
, op0
, op1
, op2
));
1178 /* Unknown register */
1179 unallocated_encoding(s
);
1183 /* Check access permissions */
1184 if (!cp_access_ok(s
->current_pl
, ri
, isread
)) {
1185 unallocated_encoding(s
);
1189 /* Handle special cases first */
1190 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
1194 tcg_rt
= cpu_reg(s
, rt
);
1196 gen_get_nzcv(tcg_rt
);
1198 gen_set_nzcv(tcg_rt
);
1205 if (use_icount
&& (ri
->type
& ARM_CP_IO
)) {
1209 tcg_rt
= cpu_reg(s
, rt
);
1212 if (ri
->type
& ARM_CP_CONST
) {
1213 tcg_gen_movi_i64(tcg_rt
, ri
->resetvalue
);
1214 } else if (ri
->readfn
) {
1216 gen_a64_set_pc_im(s
->pc
- 4);
1217 tmpptr
= tcg_const_ptr(ri
);
1218 gen_helper_get_cp_reg64(tcg_rt
, cpu_env
, tmpptr
);
1219 tcg_temp_free_ptr(tmpptr
);
1221 tcg_gen_ld_i64(tcg_rt
, cpu_env
, ri
->fieldoffset
);
1224 if (ri
->type
& ARM_CP_CONST
) {
1225 /* If not forbidden by access permissions, treat as WI */
1227 } else if (ri
->writefn
) {
1229 gen_a64_set_pc_im(s
->pc
- 4);
1230 tmpptr
= tcg_const_ptr(ri
);
1231 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tcg_rt
);
1232 tcg_temp_free_ptr(tmpptr
);
1234 tcg_gen_st_i64(tcg_rt
, cpu_env
, ri
->fieldoffset
);
1238 if (use_icount
&& (ri
->type
& ARM_CP_IO
)) {
1239 /* I/O operations must end the TB here (whether read or write) */
1241 s
->is_jmp
= DISAS_UPDATE
;
1242 } else if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
1243 /* We default to ending the TB on a coprocessor register write,
1244 * but allow this to be suppressed by the register definition
1245 * (usually only necessary to work around guest bugs).
1247 s
->is_jmp
= DISAS_UPDATE
;
1252 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
1253 * +---------------------+---+-----+-----+-------+-------+-----+------+
1254 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
1255 * +---------------------+---+-----+-----+-------+-------+-----+------+
1257 static void disas_system(DisasContext
*s
, uint32_t insn
)
1259 unsigned int l
, op0
, op1
, crn
, crm
, op2
, rt
;
1260 l
= extract32(insn
, 21, 1);
1261 op0
= extract32(insn
, 19, 2);
1262 op1
= extract32(insn
, 16, 3);
1263 crn
= extract32(insn
, 12, 4);
1264 crm
= extract32(insn
, 8, 4);
1265 op2
= extract32(insn
, 5, 3);
1266 rt
= extract32(insn
, 0, 5);
1269 if (l
|| rt
!= 31) {
1270 unallocated_encoding(s
);
1274 case 2: /* C5.6.68 HINT */
1275 handle_hint(s
, insn
, op1
, op2
, crm
);
1277 case 3: /* CLREX, DSB, DMB, ISB */
1278 handle_sync(s
, insn
, op1
, op2
, crm
);
1280 case 4: /* C5.6.130 MSR (immediate) */
1281 handle_msr_i(s
, insn
, op1
, op2
, crm
);
1284 unallocated_encoding(s
);
1289 handle_sys(s
, insn
, l
, op0
, op1
, op2
, crn
, crm
, rt
);
1292 /* C3.2.3 Exception generation
1294 * 31 24 23 21 20 5 4 2 1 0
1295 * +-----------------+-----+------------------------+-----+----+
1296 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
1297 * +-----------------------+------------------------+----------+
1299 static void disas_exc(DisasContext
*s
, uint32_t insn
)
1301 int opc
= extract32(insn
, 21, 3);
1302 int op2_ll
= extract32(insn
, 0, 5);
1306 /* SVC, HVC, SMC; since we don't support the Virtualization
1307 * or TrustZone extensions these all UNDEF except SVC.
1310 unallocated_encoding(s
);
1313 gen_exception_insn(s
, 0, EXCP_SWI
);
1317 unallocated_encoding(s
);
1321 gen_exception_insn(s
, 0, EXCP_BKPT
);
1325 unallocated_encoding(s
);
1329 unsupported_encoding(s
, insn
);
1332 if (op2_ll
< 1 || op2_ll
> 3) {
1333 unallocated_encoding(s
);
1336 /* DCPS1, DCPS2, DCPS3 */
1337 unsupported_encoding(s
, insn
);
1340 unallocated_encoding(s
);
1345 /* C3.2.7 Unconditional branch (register)
1346 * 31 25 24 21 20 16 15 10 9 5 4 0
1347 * +---------------+-------+-------+-------+------+-------+
1348 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
1349 * +---------------+-------+-------+-------+------+-------+
1351 static void disas_uncond_b_reg(DisasContext
*s
, uint32_t insn
)
1353 unsigned int opc
, op2
, op3
, rn
, op4
;
1355 opc
= extract32(insn
, 21, 4);
1356 op2
= extract32(insn
, 16, 5);
1357 op3
= extract32(insn
, 10, 6);
1358 rn
= extract32(insn
, 5, 5);
1359 op4
= extract32(insn
, 0, 5);
1361 if (op4
!= 0x0 || op3
!= 0x0 || op2
!= 0x1f) {
1362 unallocated_encoding(s
);
1371 tcg_gen_movi_i64(cpu_reg(s
, 30), s
->pc
);
1376 unallocated_encoding(s
);
1378 unsupported_encoding(s
, insn
);
1382 unallocated_encoding(s
);
1386 tcg_gen_mov_i64(cpu_pc
, cpu_reg(s
, rn
));
1387 s
->is_jmp
= DISAS_JUMP
;
1390 /* C3.2 Branches, exception generating and system instructions */
1391 static void disas_b_exc_sys(DisasContext
*s
, uint32_t insn
)
1393 switch (extract32(insn
, 25, 7)) {
1394 case 0x0a: case 0x0b:
1395 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
1396 disas_uncond_b_imm(s
, insn
);
1398 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
1399 disas_comp_b_imm(s
, insn
);
1401 case 0x1b: case 0x5b: /* Test & branch (immediate) */
1402 disas_test_b_imm(s
, insn
);
1404 case 0x2a: /* Conditional branch (immediate) */
1405 disas_cond_b_imm(s
, insn
);
1407 case 0x6a: /* Exception generation / System */
1408 if (insn
& (1 << 24)) {
1409 disas_system(s
, insn
);
1414 case 0x6b: /* Unconditional branch (register) */
1415 disas_uncond_b_reg(s
, insn
);
1418 unallocated_encoding(s
);
1424 * Load/Store exclusive instructions are implemented by remembering
1425 * the value/address loaded, and seeing if these are the same
1426 * when the store is performed. This is not actually the architecturally
1427 * mandated semantics, but it works for typical guest code sequences
1428 * and avoids having to monitor regular stores.
1430 * In system emulation mode only one CPU will be running at once, so
1431 * this sequence is effectively atomic. In user emulation mode we
1432 * throw an exception and handle the atomic operation elsewhere.
1434 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
1435 TCGv_i64 addr
, int size
, bool is_pair
)
1437 TCGv_i64 tmp
= tcg_temp_new_i64();
1438 TCGMemOp memop
= MO_TE
+ size
;
1440 g_assert(size
<= 3);
1441 tcg_gen_qemu_ld_i64(tmp
, addr
, get_mem_index(s
), memop
);
1444 TCGv_i64 addr2
= tcg_temp_new_i64();
1445 TCGv_i64 hitmp
= tcg_temp_new_i64();
1447 g_assert(size
>= 2);
1448 tcg_gen_addi_i64(addr2
, addr
, 1 << size
);
1449 tcg_gen_qemu_ld_i64(hitmp
, addr2
, get_mem_index(s
), memop
);
1450 tcg_temp_free_i64(addr2
);
1451 tcg_gen_mov_i64(cpu_exclusive_high
, hitmp
);
1452 tcg_gen_mov_i64(cpu_reg(s
, rt2
), hitmp
);
1453 tcg_temp_free_i64(hitmp
);
1456 tcg_gen_mov_i64(cpu_exclusive_val
, tmp
);
1457 tcg_gen_mov_i64(cpu_reg(s
, rt
), tmp
);
1459 tcg_temp_free_i64(tmp
);
1460 tcg_gen_mov_i64(cpu_exclusive_addr
, addr
);
1463 #ifdef CONFIG_USER_ONLY
1464 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
1465 TCGv_i64 addr
, int size
, int is_pair
)
1467 tcg_gen_mov_i64(cpu_exclusive_test
, addr
);
1468 tcg_gen_movi_i32(cpu_exclusive_info
,
1469 size
| is_pair
<< 2 | (rd
<< 4) | (rt
<< 9) | (rt2
<< 14));
1470 gen_exception_insn(s
, 4, EXCP_STREX
);
1473 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
1474 TCGv_i64 addr
, int size
, int is_pair
)
1476 qemu_log_mask(LOG_UNIMP
,
1477 "%s:%d: system mode store_exclusive unsupported "
1478 "at pc=%016" PRIx64
"\n",
1479 __FILE__
, __LINE__
, s
->pc
- 4);
1483 /* C3.3.6 Load/store exclusive
1485 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
1486 * +-----+-------------+----+---+----+------+----+-------+------+------+
1487 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
1488 * +-----+-------------+----+---+----+------+----+-------+------+------+
1490 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
1491 * L: 0 -> store, 1 -> load
1492 * o2: 0 -> exclusive, 1 -> not
1493 * o1: 0 -> single register, 1 -> register pair
1494 * o0: 1 -> load-acquire/store-release, 0 -> not
1496 * o0 == 0 AND o2 == 1 is un-allocated
1497 * o1 == 1 is un-allocated except for 32 and 64 bit sizes
1499 static void disas_ldst_excl(DisasContext
*s
, uint32_t insn
)
1501 int rt
= extract32(insn
, 0, 5);
1502 int rn
= extract32(insn
, 5, 5);
1503 int rt2
= extract32(insn
, 10, 5);
1504 int is_lasr
= extract32(insn
, 15, 1);
1505 int rs
= extract32(insn
, 16, 5);
1506 int is_pair
= extract32(insn
, 21, 1);
1507 int is_store
= !extract32(insn
, 22, 1);
1508 int is_excl
= !extract32(insn
, 23, 1);
1509 int size
= extract32(insn
, 30, 2);
1512 if ((!is_excl
&& !is_lasr
) ||
1513 (is_pair
&& size
< 2)) {
1514 unallocated_encoding(s
);
1519 gen_check_sp_alignment(s
);
1521 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
1523 /* Note that since TCG is single threaded load-acquire/store-release
1524 * semantics require no extra if (is_lasr) { ... } handling.
1529 gen_load_exclusive(s
, rt
, rt2
, tcg_addr
, size
, is_pair
);
1531 gen_store_exclusive(s
, rs
, rt
, rt2
, tcg_addr
, size
, is_pair
);
1534 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
1536 do_gpr_st(s
, tcg_rt
, tcg_addr
, size
);
1538 do_gpr_ld(s
, tcg_rt
, tcg_addr
, size
, false, false);
1541 TCGv_i64 tcg_rt2
= cpu_reg(s
, rt
);
1542 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, 1 << size
);
1544 do_gpr_st(s
, tcg_rt2
, tcg_addr
, size
);
1546 do_gpr_ld(s
, tcg_rt2
, tcg_addr
, size
, false, false);
1553 * C3.3.5 Load register (literal)
1555 * 31 30 29 27 26 25 24 23 5 4 0
1556 * +-----+-------+---+-----+-------------------+-------+
1557 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
1558 * +-----+-------+---+-----+-------------------+-------+
1560 * V: 1 -> vector (simd/fp)
1561 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
1562 * 10-> 32 bit signed, 11 -> prefetch
1563 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
1565 static void disas_ld_lit(DisasContext
*s
, uint32_t insn
)
1567 int rt
= extract32(insn
, 0, 5);
1568 int64_t imm
= sextract32(insn
, 5, 19) << 2;
1569 bool is_vector
= extract32(insn
, 26, 1);
1570 int opc
= extract32(insn
, 30, 2);
1571 bool is_signed
= false;
1573 TCGv_i64 tcg_rt
, tcg_addr
;
1577 unallocated_encoding(s
);
1583 /* PRFM (literal) : prefetch */
1586 size
= 2 + extract32(opc
, 0, 1);
1587 is_signed
= extract32(opc
, 1, 1);
1590 tcg_rt
= cpu_reg(s
, rt
);
1592 tcg_addr
= tcg_const_i64((s
->pc
- 4) + imm
);
1594 do_fp_ld(s
, rt
, tcg_addr
, size
);
1596 do_gpr_ld(s
, tcg_rt
, tcg_addr
, size
, is_signed
, false);
1598 tcg_temp_free_i64(tcg_addr
);
1602 * C5.6.80 LDNP (Load Pair - non-temporal hint)
1603 * C5.6.81 LDP (Load Pair - non vector)
1604 * C5.6.82 LDPSW (Load Pair Signed Word - non vector)
1605 * C5.6.176 STNP (Store Pair - non-temporal hint)
1606 * C5.6.177 STP (Store Pair - non vector)
1607 * C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint)
1608 * C6.3.165 LDP (Load Pair of SIMD&FP)
1609 * C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint)
1610 * C6.3.284 STP (Store Pair of SIMD&FP)
1612 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
1613 * +-----+-------+---+---+-------+---+-----------------------------+
1614 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
1615 * +-----+-------+---+---+-------+---+-------+-------+------+------+
1617 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
1619 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
1620 * V: 0 -> GPR, 1 -> Vector
1621 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
1622 * 10 -> signed offset, 11 -> pre-index
1623 * L: 0 -> Store 1 -> Load
1625 * Rt, Rt2 = GPR or SIMD registers to be stored
1626 * Rn = general purpose register containing address
1627 * imm7 = signed offset (multiple of 4 or 8 depending on size)
1629 static void disas_ldst_pair(DisasContext
*s
, uint32_t insn
)
1631 int rt
= extract32(insn
, 0, 5);
1632 int rn
= extract32(insn
, 5, 5);
1633 int rt2
= extract32(insn
, 10, 5);
1634 int64_t offset
= sextract32(insn
, 15, 7);
1635 int index
= extract32(insn
, 23, 2);
1636 bool is_vector
= extract32(insn
, 26, 1);
1637 bool is_load
= extract32(insn
, 22, 1);
1638 int opc
= extract32(insn
, 30, 2);
1640 bool is_signed
= false;
1641 bool postindex
= false;
1644 TCGv_i64 tcg_addr
; /* calculated address */
1648 unallocated_encoding(s
);
1655 size
= 2 + extract32(opc
, 1, 1);
1656 is_signed
= extract32(opc
, 0, 1);
1657 if (!is_load
&& is_signed
) {
1658 unallocated_encoding(s
);
1664 case 1: /* post-index */
1669 /* signed offset with "non-temporal" hint. Since we don't emulate
1670 * caches we don't care about hints to the cache system about
1671 * data access patterns, and handle this identically to plain
1675 /* There is no non-temporal-hint version of LDPSW */
1676 unallocated_encoding(s
);
1681 case 2: /* signed offset, rn not updated */
1684 case 3: /* pre-index */
1693 gen_check_sp_alignment(s
);
1696 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
1699 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, offset
);
1704 do_fp_ld(s
, rt
, tcg_addr
, size
);
1706 do_fp_st(s
, rt
, tcg_addr
, size
);
1709 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
1711 do_gpr_ld(s
, tcg_rt
, tcg_addr
, size
, is_signed
, false);
1713 do_gpr_st(s
, tcg_rt
, tcg_addr
, size
);
1716 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, 1 << size
);
1719 do_fp_ld(s
, rt2
, tcg_addr
, size
);
1721 do_fp_st(s
, rt2
, tcg_addr
, size
);
1724 TCGv_i64 tcg_rt2
= cpu_reg(s
, rt2
);
1726 do_gpr_ld(s
, tcg_rt2
, tcg_addr
, size
, is_signed
, false);
1728 do_gpr_st(s
, tcg_rt2
, tcg_addr
, size
);
1734 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, offset
- (1 << size
));
1736 tcg_gen_subi_i64(tcg_addr
, tcg_addr
, 1 << size
);
1738 tcg_gen_mov_i64(cpu_reg_sp(s
, rn
), tcg_addr
);
1743 * C3.3.8 Load/store (immediate post-indexed)
1744 * C3.3.9 Load/store (immediate pre-indexed)
1745 * C3.3.12 Load/store (unscaled immediate)
1747 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
1748 * +----+-------+---+-----+-----+---+--------+-----+------+------+
1749 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
1750 * +----+-------+---+-----+-----+---+--------+-----+------+------+
1752 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
1753 * V = 0 -> non-vector
1754 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
1755 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1757 static void disas_ldst_reg_imm9(DisasContext
*s
, uint32_t insn
)
1759 int rt
= extract32(insn
, 0, 5);
1760 int rn
= extract32(insn
, 5, 5);
1761 int imm9
= sextract32(insn
, 12, 9);
1762 int opc
= extract32(insn
, 22, 2);
1763 int size
= extract32(insn
, 30, 2);
1764 int idx
= extract32(insn
, 10, 2);
1765 bool is_signed
= false;
1766 bool is_store
= false;
1767 bool is_extended
= false;
1768 bool is_vector
= extract32(insn
, 26, 1);
1775 size
|= (opc
& 2) << 1;
1777 unallocated_encoding(s
);
1780 is_store
= ((opc
& 1) == 0);
1782 if (size
== 3 && opc
== 2) {
1783 /* PRFM - prefetch */
1786 if (opc
== 3 && size
> 1) {
1787 unallocated_encoding(s
);
1790 is_store
= (opc
== 0);
1791 is_signed
= opc
& (1<<1);
1792 is_extended
= (size
< 3) && (opc
& 1);
1814 gen_check_sp_alignment(s
);
1816 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
1819 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, imm9
);
1824 do_fp_st(s
, rt
, tcg_addr
, size
);
1826 do_fp_ld(s
, rt
, tcg_addr
, size
);
1829 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
1831 do_gpr_st(s
, tcg_rt
, tcg_addr
, size
);
1833 do_gpr_ld(s
, tcg_rt
, tcg_addr
, size
, is_signed
, is_extended
);
1838 TCGv_i64 tcg_rn
= cpu_reg_sp(s
, rn
);
1840 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, imm9
);
1842 tcg_gen_mov_i64(tcg_rn
, tcg_addr
);
1847 * C3.3.10 Load/store (register offset)
1849 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
1850 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
1851 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
1852 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
1855 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
1856 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1858 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
1859 * opc<0>: 0 -> store, 1 -> load
1860 * V: 1 -> vector/simd
1861 * opt: extend encoding (see DecodeRegExtend)
1862 * S: if S=1 then scale (essentially index by sizeof(size))
1863 * Rt: register to transfer into/out of
1864 * Rn: address register or SP for base
1865 * Rm: offset register or ZR for offset
1867 static void disas_ldst_reg_roffset(DisasContext
*s
, uint32_t insn
)
1869 int rt
= extract32(insn
, 0, 5);
1870 int rn
= extract32(insn
, 5, 5);
1871 int shift
= extract32(insn
, 12, 1);
1872 int rm
= extract32(insn
, 16, 5);
1873 int opc
= extract32(insn
, 22, 2);
1874 int opt
= extract32(insn
, 13, 3);
1875 int size
= extract32(insn
, 30, 2);
1876 bool is_signed
= false;
1877 bool is_store
= false;
1878 bool is_extended
= false;
1879 bool is_vector
= extract32(insn
, 26, 1);
1884 if (extract32(opt
, 1, 1) == 0) {
1885 unallocated_encoding(s
);
1890 size
|= (opc
& 2) << 1;
1892 unallocated_encoding(s
);
1895 is_store
= !extract32(opc
, 0, 1);
1897 if (size
== 3 && opc
== 2) {
1898 /* PRFM - prefetch */
1901 if (opc
== 3 && size
> 1) {
1902 unallocated_encoding(s
);
1905 is_store
= (opc
== 0);
1906 is_signed
= extract32(opc
, 1, 1);
1907 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
1911 gen_check_sp_alignment(s
);
1913 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
1915 tcg_rm
= read_cpu_reg(s
, rm
, 1);
1916 ext_and_shift_reg(tcg_rm
, tcg_rm
, opt
, shift
? size
: 0);
1918 tcg_gen_add_i64(tcg_addr
, tcg_addr
, tcg_rm
);
1922 do_fp_st(s
, rt
, tcg_addr
, size
);
1924 do_fp_ld(s
, rt
, tcg_addr
, size
);
1927 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
1929 do_gpr_st(s
, tcg_rt
, tcg_addr
, size
);
1931 do_gpr_ld(s
, tcg_rt
, tcg_addr
, size
, is_signed
, is_extended
);
1937 * C3.3.13 Load/store (unsigned immediate)
1939 * 31 30 29 27 26 25 24 23 22 21 10 9 5
1940 * +----+-------+---+-----+-----+------------+-------+------+
1941 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
1942 * +----+-------+---+-----+-----+------------+-------+------+
1945 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
1946 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1948 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
1949 * opc<0>: 0 -> store, 1 -> load
1950 * Rn: base address register (inc SP)
1951 * Rt: target register
1953 static void disas_ldst_reg_unsigned_imm(DisasContext
*s
, uint32_t insn
)
1955 int rt
= extract32(insn
, 0, 5);
1956 int rn
= extract32(insn
, 5, 5);
1957 unsigned int imm12
= extract32(insn
, 10, 12);
1958 bool is_vector
= extract32(insn
, 26, 1);
1959 int size
= extract32(insn
, 30, 2);
1960 int opc
= extract32(insn
, 22, 2);
1961 unsigned int offset
;
1966 bool is_signed
= false;
1967 bool is_extended
= false;
1970 size
|= (opc
& 2) << 1;
1972 unallocated_encoding(s
);
1975 is_store
= !extract32(opc
, 0, 1);
1977 if (size
== 3 && opc
== 2) {
1978 /* PRFM - prefetch */
1981 if (opc
== 3 && size
> 1) {
1982 unallocated_encoding(s
);
1985 is_store
= (opc
== 0);
1986 is_signed
= extract32(opc
, 1, 1);
1987 is_extended
= (size
< 3) && extract32(opc
, 0, 1);
1991 gen_check_sp_alignment(s
);
1993 tcg_addr
= read_cpu_reg_sp(s
, rn
, 1);
1994 offset
= imm12
<< size
;
1995 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, offset
);
1999 do_fp_st(s
, rt
, tcg_addr
, size
);
2001 do_fp_ld(s
, rt
, tcg_addr
, size
);
2004 TCGv_i64 tcg_rt
= cpu_reg(s
, rt
);
2006 do_gpr_st(s
, tcg_rt
, tcg_addr
, size
);
2008 do_gpr_ld(s
, tcg_rt
, tcg_addr
, size
, is_signed
, is_extended
);
2013 /* Load/store register (immediate forms) */
2014 static void disas_ldst_reg_imm(DisasContext
*s
, uint32_t insn
)
2016 switch (extract32(insn
, 10, 2)) {
2017 case 0: case 1: case 3:
2018 /* Load/store register (unscaled immediate) */
2019 /* Load/store immediate pre/post-indexed */
2020 disas_ldst_reg_imm9(s
, insn
);
2023 /* Load/store register unprivileged */
2024 unsupported_encoding(s
, insn
);
2027 unallocated_encoding(s
);
2032 /* Load/store register (all forms) */
2033 static void disas_ldst_reg(DisasContext
*s
, uint32_t insn
)
2035 switch (extract32(insn
, 24, 2)) {
2037 if (extract32(insn
, 21, 1) == 1 && extract32(insn
, 10, 2) == 2) {
2038 disas_ldst_reg_roffset(s
, insn
);
2040 disas_ldst_reg_imm(s
, insn
);
2044 disas_ldst_reg_unsigned_imm(s
, insn
);
2047 unallocated_encoding(s
);
2052 /* C3.3.1 AdvSIMD load/store multiple structures
2054 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
2055 * +---+---+---------------+---+-------------+--------+------+------+------+
2056 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
2057 * +---+---+---------------+---+-------------+--------+------+------+------+
2059 * C3.3.2 AdvSIMD load/store multiple structures (post-indexed)
2061 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
2062 * +---+---+---------------+---+---+---------+--------+------+------+------+
2063 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
2064 * +---+---+---------------+---+---+---------+--------+------+------+------+
2066 * Rt: first (or only) SIMD&FP register to be transferred
2067 * Rn: base address or SP
2068 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2070 static void disas_ldst_multiple_struct(DisasContext
*s
, uint32_t insn
)
2072 int rt
= extract32(insn
, 0, 5);
2073 int rn
= extract32(insn
, 5, 5);
2074 int size
= extract32(insn
, 10, 2);
2075 int opcode
= extract32(insn
, 12, 4);
2076 bool is_store
= !extract32(insn
, 22, 1);
2077 bool is_postidx
= extract32(insn
, 23, 1);
2078 bool is_q
= extract32(insn
, 30, 1);
2079 TCGv_i64 tcg_addr
, tcg_rn
;
2081 int ebytes
= 1 << size
;
2082 int elements
= (is_q
? 128 : 64) / (8 << size
);
2083 int rpt
; /* num iterations */
2084 int selem
; /* structure elements */
2087 if (extract32(insn
, 31, 1) || extract32(insn
, 21, 1)) {
2088 unallocated_encoding(s
);
2092 /* From the shared decode logic */
2123 unallocated_encoding(s
);
2127 if (size
== 3 && !is_q
&& selem
!= 1) {
2129 unallocated_encoding(s
);
2134 gen_check_sp_alignment(s
);
2137 tcg_rn
= cpu_reg_sp(s
, rn
);
2138 tcg_addr
= tcg_temp_new_i64();
2139 tcg_gen_mov_i64(tcg_addr
, tcg_rn
);
2141 for (r
= 0; r
< rpt
; r
++) {
2143 for (e
= 0; e
< elements
; e
++) {
2144 int tt
= (rt
+ r
) % 32;
2146 for (xs
= 0; xs
< selem
; xs
++) {
2148 do_vec_st(s
, tt
, e
, tcg_addr
, size
);
2150 do_vec_ld(s
, tt
, e
, tcg_addr
, size
);
2152 /* For non-quad operations, setting a slice of the low
2153 * 64 bits of the register clears the high 64 bits (in
2154 * the ARM ARM pseudocode this is implicit in the fact
2155 * that 'rval' is a 64 bit wide variable). We optimize
2156 * by noticing that we only need to do this the first
2157 * time we touch a register.
2159 if (!is_q
&& e
== 0 && (r
== 0 || xs
== selem
- 1)) {
2160 clear_vec_high(s
, tt
);
2163 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, ebytes
);
2170 int rm
= extract32(insn
, 16, 5);
2172 tcg_gen_mov_i64(tcg_rn
, tcg_addr
);
2174 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, rm
));
2177 tcg_temp_free_i64(tcg_addr
);
2180 /* C3.3.3 AdvSIMD load/store single structure
2182 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2183 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2184 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
2185 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2187 * C3.3.4 AdvSIMD load/store single structure (post-indexed)
2189 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2190 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2191 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
2192 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2194 * Rt: first (or only) SIMD&FP register to be transferred
2195 * Rn: base address or SP
2196 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2197 * index = encoded in Q:S:size dependent on size
2199 * lane_size = encoded in R, opc
2200 * transfer width = encoded in opc, S, size
2202 static void disas_ldst_single_struct(DisasContext
*s
, uint32_t insn
)
2204 int rt
= extract32(insn
, 0, 5);
2205 int rn
= extract32(insn
, 5, 5);
2206 int size
= extract32(insn
, 10, 2);
2207 int S
= extract32(insn
, 12, 1);
2208 int opc
= extract32(insn
, 13, 3);
2209 int R
= extract32(insn
, 21, 1);
2210 int is_load
= extract32(insn
, 22, 1);
2211 int is_postidx
= extract32(insn
, 23, 1);
2212 int is_q
= extract32(insn
, 30, 1);
2214 int scale
= extract32(opc
, 1, 2);
2215 int selem
= (extract32(opc
, 0, 1) << 1 | R
) + 1;
2216 bool replicate
= false;
2217 int index
= is_q
<< 3 | S
<< 2 | size
;
2219 TCGv_i64 tcg_addr
, tcg_rn
;
2223 if (!is_load
|| S
) {
2224 unallocated_encoding(s
);
2233 if (extract32(size
, 0, 1)) {
2234 unallocated_encoding(s
);
2240 if (extract32(size
, 1, 1)) {
2241 unallocated_encoding(s
);
2244 if (!extract32(size
, 0, 1)) {
2248 unallocated_encoding(s
);
2256 g_assert_not_reached();
2259 ebytes
= 1 << scale
;
2262 gen_check_sp_alignment(s
);
2265 tcg_rn
= cpu_reg_sp(s
, rn
);
2266 tcg_addr
= tcg_temp_new_i64();
2267 tcg_gen_mov_i64(tcg_addr
, tcg_rn
);
2269 for (xs
= 0; xs
< selem
; xs
++) {
2271 /* Load and replicate to all elements */
2273 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
2275 tcg_gen_qemu_ld_i64(tcg_tmp
, tcg_addr
,
2276 get_mem_index(s
), MO_TE
+ scale
);
2279 mulconst
= 0x0101010101010101ULL
;
2282 mulconst
= 0x0001000100010001ULL
;
2285 mulconst
= 0x0000000100000001ULL
;
2291 g_assert_not_reached();
2294 tcg_gen_muli_i64(tcg_tmp
, tcg_tmp
, mulconst
);
2296 write_vec_element(s
, tcg_tmp
, rt
, 0, MO_64
);
2298 write_vec_element(s
, tcg_tmp
, rt
, 1, MO_64
);
2300 clear_vec_high(s
, rt
);
2302 tcg_temp_free_i64(tcg_tmp
);
2304 /* Load/store one element per register */
2306 do_vec_ld(s
, rt
, index
, tcg_addr
, MO_TE
+ scale
);
2308 do_vec_st(s
, rt
, index
, tcg_addr
, MO_TE
+ scale
);
2311 tcg_gen_addi_i64(tcg_addr
, tcg_addr
, ebytes
);
2316 int rm
= extract32(insn
, 16, 5);
2318 tcg_gen_mov_i64(tcg_rn
, tcg_addr
);
2320 tcg_gen_add_i64(tcg_rn
, tcg_rn
, cpu_reg(s
, rm
));
2323 tcg_temp_free_i64(tcg_addr
);
2326 /* C3.3 Loads and stores */
2327 static void disas_ldst(DisasContext
*s
, uint32_t insn
)
2329 switch (extract32(insn
, 24, 6)) {
2330 case 0x08: /* Load/store exclusive */
2331 disas_ldst_excl(s
, insn
);
2333 case 0x18: case 0x1c: /* Load register (literal) */
2334 disas_ld_lit(s
, insn
);
2336 case 0x28: case 0x29:
2337 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
2338 disas_ldst_pair(s
, insn
);
2340 case 0x38: case 0x39:
2341 case 0x3c: case 0x3d: /* Load/store register (all forms) */
2342 disas_ldst_reg(s
, insn
);
2344 case 0x0c: /* AdvSIMD load/store multiple structures */
2345 disas_ldst_multiple_struct(s
, insn
);
2347 case 0x0d: /* AdvSIMD load/store single structure */
2348 disas_ldst_single_struct(s
, insn
);
2351 unallocated_encoding(s
);
2356 /* C3.4.6 PC-rel. addressing
2357 * 31 30 29 28 24 23 5 4 0
2358 * +----+-------+-----------+-------------------+------+
2359 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
2360 * +----+-------+-----------+-------------------+------+
2362 static void disas_pc_rel_adr(DisasContext
*s
, uint32_t insn
)
2364 unsigned int page
, rd
;
2368 page
= extract32(insn
, 31, 1);
2369 /* SignExtend(immhi:immlo) -> offset */
2370 offset
= ((int64_t)sextract32(insn
, 5, 19) << 2) | extract32(insn
, 29, 2);
2371 rd
= extract32(insn
, 0, 5);
2375 /* ADRP (page based) */
2380 tcg_gen_movi_i64(cpu_reg(s
, rd
), base
+ offset
);
2384 * C3.4.1 Add/subtract (immediate)
2386 * 31 30 29 28 24 23 22 21 10 9 5 4 0
2387 * +--+--+--+-----------+-----+-------------+-----+-----+
2388 * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
2389 * +--+--+--+-----------+-----+-------------+-----+-----+
2391 * sf: 0 -> 32bit, 1 -> 64bit
2392 * op: 0 -> add , 1 -> sub
2394 * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
2396 static void disas_add_sub_imm(DisasContext
*s
, uint32_t insn
)
2398 int rd
= extract32(insn
, 0, 5);
2399 int rn
= extract32(insn
, 5, 5);
2400 uint64_t imm
= extract32(insn
, 10, 12);
2401 int shift
= extract32(insn
, 22, 2);
2402 bool setflags
= extract32(insn
, 29, 1);
2403 bool sub_op
= extract32(insn
, 30, 1);
2404 bool is_64bit
= extract32(insn
, 31, 1);
2406 TCGv_i64 tcg_rn
= cpu_reg_sp(s
, rn
);
2407 TCGv_i64 tcg_rd
= setflags
? cpu_reg(s
, rd
) : cpu_reg_sp(s
, rd
);
2408 TCGv_i64 tcg_result
;
2417 unallocated_encoding(s
);
2421 tcg_result
= tcg_temp_new_i64();
2424 tcg_gen_subi_i64(tcg_result
, tcg_rn
, imm
);
2426 tcg_gen_addi_i64(tcg_result
, tcg_rn
, imm
);
2429 TCGv_i64 tcg_imm
= tcg_const_i64(imm
);
2431 gen_sub_CC(is_64bit
, tcg_result
, tcg_rn
, tcg_imm
);
2433 gen_add_CC(is_64bit
, tcg_result
, tcg_rn
, tcg_imm
);
2435 tcg_temp_free_i64(tcg_imm
);
2439 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
2441 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
2444 tcg_temp_free_i64(tcg_result
);
2447 /* The input should be a value in the bottom e bits (with higher
2448 * bits zero); returns that value replicated into every element
2449 * of size e in a 64 bit integer.
2451 static uint64_t bitfield_replicate(uint64_t mask
, unsigned int e
)
2461 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
2462 static inline uint64_t bitmask64(unsigned int length
)
2464 assert(length
> 0 && length
<= 64);
2465 return ~0ULL >> (64 - length
);
2468 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
2469 * only require the wmask. Returns false if the imms/immr/immn are a reserved
2470 * value (ie should cause a guest UNDEF exception), and true if they are
2471 * valid, in which case the decoded bit pattern is written to result.
2473 static bool logic_imm_decode_wmask(uint64_t *result
, unsigned int immn
,
2474 unsigned int imms
, unsigned int immr
)
2477 unsigned e
, levels
, s
, r
;
2480 assert(immn
< 2 && imms
< 64 && immr
< 64);
2482 /* The bit patterns we create here are 64 bit patterns which
2483 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
2484 * 64 bits each. Each element contains the same value: a run
2485 * of between 1 and e-1 non-zero bits, rotated within the
2486 * element by between 0 and e-1 bits.
2488 * The element size and run length are encoded into immn (1 bit)
2489 * and imms (6 bits) as follows:
2490 * 64 bit elements: immn = 1, imms = <length of run - 1>
2491 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
2492 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
2493 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
2494 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
2495 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
2496 * Notice that immn = 0, imms = 11111x is the only combination
2497 * not covered by one of the above options; this is reserved.
2498 * Further, <length of run - 1> all-ones is a reserved pattern.
2500 * In all cases the rotation is by immr % e (and immr is 6 bits).
2503 /* First determine the element size */
2504 len
= 31 - clz32((immn
<< 6) | (~imms
& 0x3f));
2506 /* This is the immn == 0, imms == 0x11111x case */
2516 /* <length of run - 1> mustn't be all-ones. */
2520 /* Create the value of one element: s+1 set bits rotated
2521 * by r within the element (which is e bits wide)...
2523 mask
= bitmask64(s
+ 1);
2524 mask
= (mask
>> r
) | (mask
<< (e
- r
));
2525 /* ...then replicate the element over the whole 64 bit value */
2526 mask
= bitfield_replicate(mask
, e
);
2531 /* C3.4.4 Logical (immediate)
2532 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
2533 * +----+-----+-------------+---+------+------+------+------+
2534 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
2535 * +----+-----+-------------+---+------+------+------+------+
2537 static void disas_logic_imm(DisasContext
*s
, uint32_t insn
)
2539 unsigned int sf
, opc
, is_n
, immr
, imms
, rn
, rd
;
2540 TCGv_i64 tcg_rd
, tcg_rn
;
2542 bool is_and
= false;
2544 sf
= extract32(insn
, 31, 1);
2545 opc
= extract32(insn
, 29, 2);
2546 is_n
= extract32(insn
, 22, 1);
2547 immr
= extract32(insn
, 16, 6);
2548 imms
= extract32(insn
, 10, 6);
2549 rn
= extract32(insn
, 5, 5);
2550 rd
= extract32(insn
, 0, 5);
2553 unallocated_encoding(s
);
2557 if (opc
== 0x3) { /* ANDS */
2558 tcg_rd
= cpu_reg(s
, rd
);
2560 tcg_rd
= cpu_reg_sp(s
, rd
);
2562 tcg_rn
= cpu_reg(s
, rn
);
2564 if (!logic_imm_decode_wmask(&wmask
, is_n
, imms
, immr
)) {
2565 /* some immediate field values are reserved */
2566 unallocated_encoding(s
);
2571 wmask
&= 0xffffffff;
2575 case 0x3: /* ANDS */
2577 tcg_gen_andi_i64(tcg_rd
, tcg_rn
, wmask
);
2581 tcg_gen_ori_i64(tcg_rd
, tcg_rn
, wmask
);
2584 tcg_gen_xori_i64(tcg_rd
, tcg_rn
, wmask
);
2587 assert(FALSE
); /* must handle all above */
2591 if (!sf
&& !is_and
) {
2592 /* zero extend final result; we know we can skip this for AND
2593 * since the immediate had the high 32 bits clear.
2595 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
2598 if (opc
== 3) { /* ANDS */
2599 gen_logic_CC(sf
, tcg_rd
);
2604 * C3.4.5 Move wide (immediate)
2606 * 31 30 29 28 23 22 21 20 5 4 0
2607 * +--+-----+-------------+-----+----------------+------+
2608 * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
2609 * +--+-----+-------------+-----+----------------+------+
2611 * sf: 0 -> 32 bit, 1 -> 64 bit
2612 * opc: 00 -> N, 10 -> Z, 11 -> K
2613 * hw: shift/16 (0,16, and sf only 32, 48)
2615 static void disas_movw_imm(DisasContext
*s
, uint32_t insn
)
2617 int rd
= extract32(insn
, 0, 5);
2618 uint64_t imm
= extract32(insn
, 5, 16);
2619 int sf
= extract32(insn
, 31, 1);
2620 int opc
= extract32(insn
, 29, 2);
2621 int pos
= extract32(insn
, 21, 2) << 4;
2622 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
2625 if (!sf
&& (pos
>= 32)) {
2626 unallocated_encoding(s
);
2640 tcg_gen_movi_i64(tcg_rd
, imm
);
2643 tcg_imm
= tcg_const_i64(imm
);
2644 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_imm
, pos
, 16);
2645 tcg_temp_free_i64(tcg_imm
);
2647 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
2651 unallocated_encoding(s
);
2657 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
2658 * +----+-----+-------------+---+------+------+------+------+
2659 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
2660 * +----+-----+-------------+---+------+------+------+------+
2662 static void disas_bitfield(DisasContext
*s
, uint32_t insn
)
2664 unsigned int sf
, n
, opc
, ri
, si
, rn
, rd
, bitsize
, pos
, len
;
2665 TCGv_i64 tcg_rd
, tcg_tmp
;
2667 sf
= extract32(insn
, 31, 1);
2668 opc
= extract32(insn
, 29, 2);
2669 n
= extract32(insn
, 22, 1);
2670 ri
= extract32(insn
, 16, 6);
2671 si
= extract32(insn
, 10, 6);
2672 rn
= extract32(insn
, 5, 5);
2673 rd
= extract32(insn
, 0, 5);
2674 bitsize
= sf
? 64 : 32;
2676 if (sf
!= n
|| ri
>= bitsize
|| si
>= bitsize
|| opc
> 2) {
2677 unallocated_encoding(s
);
2681 tcg_rd
= cpu_reg(s
, rd
);
2682 tcg_tmp
= read_cpu_reg(s
, rn
, sf
);
2684 /* OPTME: probably worth recognizing common cases of ext{8,16,32}{u,s} */
2686 if (opc
!= 1) { /* SBFM or UBFM */
2687 tcg_gen_movi_i64(tcg_rd
, 0);
2690 /* do the bit move operation */
2692 /* Wd<s-r:0> = Wn<s:r> */
2693 tcg_gen_shri_i64(tcg_tmp
, tcg_tmp
, ri
);
2695 len
= (si
- ri
) + 1;
2697 /* Wd<32+s-r,32-r> = Wn<s:0> */
2702 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_tmp
, pos
, len
);
2704 if (opc
== 0) { /* SBFM - sign extend the destination field */
2705 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, 64 - (pos
+ len
));
2706 tcg_gen_sari_i64(tcg_rd
, tcg_rd
, 64 - (pos
+ len
));
2709 if (!sf
) { /* zero extend final result */
2710 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
2715 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
2716 * +----+------+-------------+---+----+------+--------+------+------+
2717 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
2718 * +----+------+-------------+---+----+------+--------+------+------+
2720 static void disas_extract(DisasContext
*s
, uint32_t insn
)
2722 unsigned int sf
, n
, rm
, imm
, rn
, rd
, bitsize
, op21
, op0
;
2724 sf
= extract32(insn
, 31, 1);
2725 n
= extract32(insn
, 22, 1);
2726 rm
= extract32(insn
, 16, 5);
2727 imm
= extract32(insn
, 10, 6);
2728 rn
= extract32(insn
, 5, 5);
2729 rd
= extract32(insn
, 0, 5);
2730 op21
= extract32(insn
, 29, 2);
2731 op0
= extract32(insn
, 21, 1);
2732 bitsize
= sf
? 64 : 32;
2734 if (sf
!= n
|| op21
|| op0
|| imm
>= bitsize
) {
2735 unallocated_encoding(s
);
2737 TCGv_i64 tcg_rd
, tcg_rm
, tcg_rn
;
2739 tcg_rd
= cpu_reg(s
, rd
);
2742 /* OPTME: we can special case rm==rn as a rotate */
2743 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
2744 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
2745 tcg_gen_shri_i64(tcg_rm
, tcg_rm
, imm
);
2746 tcg_gen_shli_i64(tcg_rn
, tcg_rn
, bitsize
- imm
);
2747 tcg_gen_or_i64(tcg_rd
, tcg_rm
, tcg_rn
);
2749 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
2752 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
2753 * so an extract from bit 0 is a special case.
2756 tcg_gen_mov_i64(tcg_rd
, cpu_reg(s
, rm
));
2758 tcg_gen_ext32u_i64(tcg_rd
, cpu_reg(s
, rm
));
2765 /* C3.4 Data processing - immediate */
2766 static void disas_data_proc_imm(DisasContext
*s
, uint32_t insn
)
2768 switch (extract32(insn
, 23, 6)) {
2769 case 0x20: case 0x21: /* PC-rel. addressing */
2770 disas_pc_rel_adr(s
, insn
);
2772 case 0x22: case 0x23: /* Add/subtract (immediate) */
2773 disas_add_sub_imm(s
, insn
);
2775 case 0x24: /* Logical (immediate) */
2776 disas_logic_imm(s
, insn
);
2778 case 0x25: /* Move wide (immediate) */
2779 disas_movw_imm(s
, insn
);
2781 case 0x26: /* Bitfield */
2782 disas_bitfield(s
, insn
);
2784 case 0x27: /* Extract */
2785 disas_extract(s
, insn
);
2788 unallocated_encoding(s
);
2793 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
2794 * Note that it is the caller's responsibility to ensure that the
2795 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
2796 * mandated semantics for out of range shifts.
2798 static void shift_reg(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
2799 enum a64_shift_type shift_type
, TCGv_i64 shift_amount
)
2801 switch (shift_type
) {
2802 case A64_SHIFT_TYPE_LSL
:
2803 tcg_gen_shl_i64(dst
, src
, shift_amount
);
2805 case A64_SHIFT_TYPE_LSR
:
2806 tcg_gen_shr_i64(dst
, src
, shift_amount
);
2808 case A64_SHIFT_TYPE_ASR
:
2810 tcg_gen_ext32s_i64(dst
, src
);
2812 tcg_gen_sar_i64(dst
, sf
? src
: dst
, shift_amount
);
2814 case A64_SHIFT_TYPE_ROR
:
2816 tcg_gen_rotr_i64(dst
, src
, shift_amount
);
2819 t0
= tcg_temp_new_i32();
2820 t1
= tcg_temp_new_i32();
2821 tcg_gen_trunc_i64_i32(t0
, src
);
2822 tcg_gen_trunc_i64_i32(t1
, shift_amount
);
2823 tcg_gen_rotr_i32(t0
, t0
, t1
);
2824 tcg_gen_extu_i32_i64(dst
, t0
);
2825 tcg_temp_free_i32(t0
);
2826 tcg_temp_free_i32(t1
);
2830 assert(FALSE
); /* all shift types should be handled */
2834 if (!sf
) { /* zero extend final result */
2835 tcg_gen_ext32u_i64(dst
, dst
);
2839 /* Shift a TCGv src by immediate, put result in dst.
2840 * The shift amount must be in range (this should always be true as the
2841 * relevant instructions will UNDEF on bad shift immediates).
2843 static void shift_reg_imm(TCGv_i64 dst
, TCGv_i64 src
, int sf
,
2844 enum a64_shift_type shift_type
, unsigned int shift_i
)
2846 assert(shift_i
< (sf
? 64 : 32));
2849 tcg_gen_mov_i64(dst
, src
);
2851 TCGv_i64 shift_const
;
2853 shift_const
= tcg_const_i64(shift_i
);
2854 shift_reg(dst
, src
, sf
, shift_type
, shift_const
);
2855 tcg_temp_free_i64(shift_const
);
2859 /* C3.5.10 Logical (shifted register)
2860 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
2861 * +----+-----+-----------+-------+---+------+--------+------+------+
2862 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
2863 * +----+-----+-----------+-------+---+------+--------+------+------+
2865 static void disas_logic_reg(DisasContext
*s
, uint32_t insn
)
2867 TCGv_i64 tcg_rd
, tcg_rn
, tcg_rm
;
2868 unsigned int sf
, opc
, shift_type
, invert
, rm
, shift_amount
, rn
, rd
;
2870 sf
= extract32(insn
, 31, 1);
2871 opc
= extract32(insn
, 29, 2);
2872 shift_type
= extract32(insn
, 22, 2);
2873 invert
= extract32(insn
, 21, 1);
2874 rm
= extract32(insn
, 16, 5);
2875 shift_amount
= extract32(insn
, 10, 6);
2876 rn
= extract32(insn
, 5, 5);
2877 rd
= extract32(insn
, 0, 5);
2879 if (!sf
&& (shift_amount
& (1 << 5))) {
2880 unallocated_encoding(s
);
2884 tcg_rd
= cpu_reg(s
, rd
);
2886 if (opc
== 1 && shift_amount
== 0 && shift_type
== 0 && rn
== 31) {
2887 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
2888 * register-register MOV and MVN, so it is worth special casing.
2890 tcg_rm
= cpu_reg(s
, rm
);
2892 tcg_gen_not_i64(tcg_rd
, tcg_rm
);
2894 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
2898 tcg_gen_mov_i64(tcg_rd
, tcg_rm
);
2900 tcg_gen_ext32u_i64(tcg_rd
, tcg_rm
);
2906 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
2909 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, shift_amount
);
2912 tcg_rn
= cpu_reg(s
, rn
);
2914 switch (opc
| (invert
<< 2)) {
2917 tcg_gen_and_i64(tcg_rd
, tcg_rn
, tcg_rm
);
2920 tcg_gen_or_i64(tcg_rd
, tcg_rn
, tcg_rm
);
2923 tcg_gen_xor_i64(tcg_rd
, tcg_rn
, tcg_rm
);
2927 tcg_gen_andc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
2930 tcg_gen_orc_i64(tcg_rd
, tcg_rn
, tcg_rm
);
2933 tcg_gen_eqv_i64(tcg_rd
, tcg_rn
, tcg_rm
);
2941 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
2945 gen_logic_CC(sf
, tcg_rd
);
2950 * C3.5.1 Add/subtract (extended register)
2952 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
2953 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
2954 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
2955 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
2957 * sf: 0 -> 32bit, 1 -> 64bit
2958 * op: 0 -> add , 1 -> sub
2961 * option: extension type (see DecodeRegExtend)
2962 * imm3: optional shift to Rm
2964 * Rd = Rn + LSL(extend(Rm), amount)
2966 static void disas_add_sub_ext_reg(DisasContext
*s
, uint32_t insn
)
2968 int rd
= extract32(insn
, 0, 5);
2969 int rn
= extract32(insn
, 5, 5);
2970 int imm3
= extract32(insn
, 10, 3);
2971 int option
= extract32(insn
, 13, 3);
2972 int rm
= extract32(insn
, 16, 5);
2973 bool setflags
= extract32(insn
, 29, 1);
2974 bool sub_op
= extract32(insn
, 30, 1);
2975 bool sf
= extract32(insn
, 31, 1);
2977 TCGv_i64 tcg_rm
, tcg_rn
; /* temps */
2979 TCGv_i64 tcg_result
;
2982 unallocated_encoding(s
);
2986 /* non-flag setting ops may use SP */
2988 tcg_rn
= read_cpu_reg_sp(s
, rn
, sf
);
2989 tcg_rd
= cpu_reg_sp(s
, rd
);
2991 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
2992 tcg_rd
= cpu_reg(s
, rd
);
2995 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
2996 ext_and_shift_reg(tcg_rm
, tcg_rm
, option
, imm3
);
2998 tcg_result
= tcg_temp_new_i64();
3002 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
3004 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
3008 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
3010 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
3015 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
3017 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
3020 tcg_temp_free_i64(tcg_result
);
3024 * C3.5.2 Add/subtract (shifted register)
3026 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
3027 * +--+--+--+-----------+-----+--+-------+---------+------+------+
3028 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
3029 * +--+--+--+-----------+-----+--+-------+---------+------+------+
3031 * sf: 0 -> 32bit, 1 -> 64bit
3032 * op: 0 -> add , 1 -> sub
3034 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
3035 * imm6: Shift amount to apply to Rm before the add/sub
3037 static void disas_add_sub_reg(DisasContext
*s
, uint32_t insn
)
3039 int rd
= extract32(insn
, 0, 5);
3040 int rn
= extract32(insn
, 5, 5);
3041 int imm6
= extract32(insn
, 10, 6);
3042 int rm
= extract32(insn
, 16, 5);
3043 int shift_type
= extract32(insn
, 22, 2);
3044 bool setflags
= extract32(insn
, 29, 1);
3045 bool sub_op
= extract32(insn
, 30, 1);
3046 bool sf
= extract32(insn
, 31, 1);
3048 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
3049 TCGv_i64 tcg_rn
, tcg_rm
;
3050 TCGv_i64 tcg_result
;
3052 if ((shift_type
== 3) || (!sf
&& (imm6
> 31))) {
3053 unallocated_encoding(s
);
3057 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
3058 tcg_rm
= read_cpu_reg(s
, rm
, sf
);
3060 shift_reg_imm(tcg_rm
, tcg_rm
, sf
, shift_type
, imm6
);
3062 tcg_result
= tcg_temp_new_i64();
3066 tcg_gen_sub_i64(tcg_result
, tcg_rn
, tcg_rm
);
3068 tcg_gen_add_i64(tcg_result
, tcg_rn
, tcg_rm
);
3072 gen_sub_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
3074 gen_add_CC(sf
, tcg_result
, tcg_rn
, tcg_rm
);
3079 tcg_gen_mov_i64(tcg_rd
, tcg_result
);
3081 tcg_gen_ext32u_i64(tcg_rd
, tcg_result
);
3084 tcg_temp_free_i64(tcg_result
);
3087 /* C3.5.9 Data-processing (3 source)
3089 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
3090 +--+------+-----------+------+------+----+------+------+------+
3091 |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
3092 +--+------+-----------+------+------+----+------+------+------+
3095 static void disas_data_proc_3src(DisasContext
*s
, uint32_t insn
)
3097 int rd
= extract32(insn
, 0, 5);
3098 int rn
= extract32(insn
, 5, 5);
3099 int ra
= extract32(insn
, 10, 5);
3100 int rm
= extract32(insn
, 16, 5);
3101 int op_id
= (extract32(insn
, 29, 3) << 4) |
3102 (extract32(insn
, 21, 3) << 1) |
3103 extract32(insn
, 15, 1);
3104 bool sf
= extract32(insn
, 31, 1);
3105 bool is_sub
= extract32(op_id
, 0, 1);
3106 bool is_high
= extract32(op_id
, 2, 1);
3107 bool is_signed
= false;
3112 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
3114 case 0x42: /* SMADDL */
3115 case 0x43: /* SMSUBL */
3116 case 0x44: /* SMULH */
3119 case 0x0: /* MADD (32bit) */
3120 case 0x1: /* MSUB (32bit) */
3121 case 0x40: /* MADD (64bit) */
3122 case 0x41: /* MSUB (64bit) */
3123 case 0x4a: /* UMADDL */
3124 case 0x4b: /* UMSUBL */
3125 case 0x4c: /* UMULH */
3128 unallocated_encoding(s
);
3133 TCGv_i64 low_bits
= tcg_temp_new_i64(); /* low bits discarded */
3134 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
3135 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
3136 TCGv_i64 tcg_rm
= cpu_reg(s
, rm
);
3139 tcg_gen_muls2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
3141 tcg_gen_mulu2_i64(low_bits
, tcg_rd
, tcg_rn
, tcg_rm
);
3144 tcg_temp_free_i64(low_bits
);
3148 tcg_op1
= tcg_temp_new_i64();
3149 tcg_op2
= tcg_temp_new_i64();
3150 tcg_tmp
= tcg_temp_new_i64();
3153 tcg_gen_mov_i64(tcg_op1
, cpu_reg(s
, rn
));
3154 tcg_gen_mov_i64(tcg_op2
, cpu_reg(s
, rm
));
3157 tcg_gen_ext32s_i64(tcg_op1
, cpu_reg(s
, rn
));
3158 tcg_gen_ext32s_i64(tcg_op2
, cpu_reg(s
, rm
));
3160 tcg_gen_ext32u_i64(tcg_op1
, cpu_reg(s
, rn
));
3161 tcg_gen_ext32u_i64(tcg_op2
, cpu_reg(s
, rm
));
3165 if (ra
== 31 && !is_sub
) {
3166 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
3167 tcg_gen_mul_i64(cpu_reg(s
, rd
), tcg_op1
, tcg_op2
);
3169 tcg_gen_mul_i64(tcg_tmp
, tcg_op1
, tcg_op2
);
3171 tcg_gen_sub_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
3173 tcg_gen_add_i64(cpu_reg(s
, rd
), cpu_reg(s
, ra
), tcg_tmp
);
3178 tcg_gen_ext32u_i64(cpu_reg(s
, rd
), cpu_reg(s
, rd
));
3181 tcg_temp_free_i64(tcg_op1
);
3182 tcg_temp_free_i64(tcg_op2
);
3183 tcg_temp_free_i64(tcg_tmp
);
3186 /* C3.5.3 - Add/subtract (with carry)
3187 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
3188 * +--+--+--+------------------------+------+---------+------+-----+
3189 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd |
3190 * +--+--+--+------------------------+------+---------+------+-----+
3194 static void disas_adc_sbc(DisasContext
*s
, uint32_t insn
)
3196 unsigned int sf
, op
, setflags
, rm
, rn
, rd
;
3197 TCGv_i64 tcg_y
, tcg_rn
, tcg_rd
;
3199 if (extract32(insn
, 10, 6) != 0) {
3200 unallocated_encoding(s
);
3204 sf
= extract32(insn
, 31, 1);
3205 op
= extract32(insn
, 30, 1);
3206 setflags
= extract32(insn
, 29, 1);
3207 rm
= extract32(insn
, 16, 5);
3208 rn
= extract32(insn
, 5, 5);
3209 rd
= extract32(insn
, 0, 5);
3211 tcg_rd
= cpu_reg(s
, rd
);
3212 tcg_rn
= cpu_reg(s
, rn
);
3215 tcg_y
= new_tmp_a64(s
);
3216 tcg_gen_not_i64(tcg_y
, cpu_reg(s
, rm
));
3218 tcg_y
= cpu_reg(s
, rm
);
3222 gen_adc_CC(sf
, tcg_rd
, tcg_rn
, tcg_y
);
3224 gen_adc(sf
, tcg_rd
, tcg_rn
, tcg_y
);
3228 /* C3.5.4 - C3.5.5 Conditional compare (immediate / register)
3229 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
3230 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
3231 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
3232 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
3235 static void disas_cc(DisasContext
*s
, uint32_t insn
)
3237 unsigned int sf
, op
, y
, cond
, rn
, nzcv
, is_imm
;
3238 int label_continue
= -1;
3239 TCGv_i64 tcg_tmp
, tcg_y
, tcg_rn
;
3241 if (!extract32(insn
, 29, 1)) {
3242 unallocated_encoding(s
);
3245 if (insn
& (1 << 10 | 1 << 4)) {
3246 unallocated_encoding(s
);
3249 sf
= extract32(insn
, 31, 1);
3250 op
= extract32(insn
, 30, 1);
3251 is_imm
= extract32(insn
, 11, 1);
3252 y
= extract32(insn
, 16, 5); /* y = rm (reg) or imm5 (imm) */
3253 cond
= extract32(insn
, 12, 4);
3254 rn
= extract32(insn
, 5, 5);
3255 nzcv
= extract32(insn
, 0, 4);
3257 if (cond
< 0x0e) { /* not always */
3258 int label_match
= gen_new_label();
3259 label_continue
= gen_new_label();
3260 arm_gen_test_cc(cond
, label_match
);
3262 tcg_tmp
= tcg_temp_new_i64();
3263 tcg_gen_movi_i64(tcg_tmp
, nzcv
<< 28);
3264 gen_set_nzcv(tcg_tmp
);
3265 tcg_temp_free_i64(tcg_tmp
);
3266 tcg_gen_br(label_continue
);
3267 gen_set_label(label_match
);
3269 /* match, or condition is always */
3271 tcg_y
= new_tmp_a64(s
);
3272 tcg_gen_movi_i64(tcg_y
, y
);
3274 tcg_y
= cpu_reg(s
, y
);
3276 tcg_rn
= cpu_reg(s
, rn
);
3278 tcg_tmp
= tcg_temp_new_i64();
3280 gen_sub_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
3282 gen_add_CC(sf
, tcg_tmp
, tcg_rn
, tcg_y
);
3284 tcg_temp_free_i64(tcg_tmp
);
3286 if (cond
< 0x0e) { /* continue */
3287 gen_set_label(label_continue
);
3291 /* C3.5.6 Conditional select
3292 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
3293 * +----+----+---+-----------------+------+------+-----+------+------+
3294 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
3295 * +----+----+---+-----------------+------+------+-----+------+------+
3297 static void disas_cond_select(DisasContext
*s
, uint32_t insn
)
3299 unsigned int sf
, else_inv
, rm
, cond
, else_inc
, rn
, rd
;
3300 TCGv_i64 tcg_rd
, tcg_src
;
3302 if (extract32(insn
, 29, 1) || extract32(insn
, 11, 1)) {
3303 /* S == 1 or op2<1> == 1 */
3304 unallocated_encoding(s
);
3307 sf
= extract32(insn
, 31, 1);
3308 else_inv
= extract32(insn
, 30, 1);
3309 rm
= extract32(insn
, 16, 5);
3310 cond
= extract32(insn
, 12, 4);
3311 else_inc
= extract32(insn
, 10, 1);
3312 rn
= extract32(insn
, 5, 5);
3313 rd
= extract32(insn
, 0, 5);
3316 /* silly no-op write; until we use movcond we must special-case
3317 * this to avoid a dead temporary across basic blocks.
3322 tcg_rd
= cpu_reg(s
, rd
);
3324 if (cond
>= 0x0e) { /* condition "always" */
3325 tcg_src
= read_cpu_reg(s
, rn
, sf
);
3326 tcg_gen_mov_i64(tcg_rd
, tcg_src
);
3328 /* OPTME: we could use movcond here, at the cost of duplicating
3329 * a lot of the arm_gen_test_cc() logic.
3331 int label_match
= gen_new_label();
3332 int label_continue
= gen_new_label();
3334 arm_gen_test_cc(cond
, label_match
);
3336 tcg_src
= cpu_reg(s
, rm
);
3338 if (else_inv
&& else_inc
) {
3339 tcg_gen_neg_i64(tcg_rd
, tcg_src
);
3340 } else if (else_inv
) {
3341 tcg_gen_not_i64(tcg_rd
, tcg_src
);
3342 } else if (else_inc
) {
3343 tcg_gen_addi_i64(tcg_rd
, tcg_src
, 1);
3345 tcg_gen_mov_i64(tcg_rd
, tcg_src
);
3348 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3350 tcg_gen_br(label_continue
);
3352 gen_set_label(label_match
);
3353 tcg_src
= read_cpu_reg(s
, rn
, sf
);
3354 tcg_gen_mov_i64(tcg_rd
, tcg_src
);
3356 gen_set_label(label_continue
);
3360 static void handle_clz(DisasContext
*s
, unsigned int sf
,
3361 unsigned int rn
, unsigned int rd
)
3363 TCGv_i64 tcg_rd
, tcg_rn
;
3364 tcg_rd
= cpu_reg(s
, rd
);
3365 tcg_rn
= cpu_reg(s
, rn
);
3368 gen_helper_clz64(tcg_rd
, tcg_rn
);
3370 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
3371 tcg_gen_trunc_i64_i32(tcg_tmp32
, tcg_rn
);
3372 gen_helper_clz(tcg_tmp32
, tcg_tmp32
);
3373 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
3374 tcg_temp_free_i32(tcg_tmp32
);
3378 static void handle_cls(DisasContext
*s
, unsigned int sf
,
3379 unsigned int rn
, unsigned int rd
)
3381 TCGv_i64 tcg_rd
, tcg_rn
;
3382 tcg_rd
= cpu_reg(s
, rd
);
3383 tcg_rn
= cpu_reg(s
, rn
);
3386 gen_helper_cls64(tcg_rd
, tcg_rn
);
3388 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
3389 tcg_gen_trunc_i64_i32(tcg_tmp32
, tcg_rn
);
3390 gen_helper_cls32(tcg_tmp32
, tcg_tmp32
);
3391 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
3392 tcg_temp_free_i32(tcg_tmp32
);
3396 static void handle_rbit(DisasContext
*s
, unsigned int sf
,
3397 unsigned int rn
, unsigned int rd
)
3399 TCGv_i64 tcg_rd
, tcg_rn
;
3400 tcg_rd
= cpu_reg(s
, rd
);
3401 tcg_rn
= cpu_reg(s
, rn
);
3404 gen_helper_rbit64(tcg_rd
, tcg_rn
);
3406 TCGv_i32 tcg_tmp32
= tcg_temp_new_i32();
3407 tcg_gen_trunc_i64_i32(tcg_tmp32
, tcg_rn
);
3408 gen_helper_rbit(tcg_tmp32
, tcg_tmp32
);
3409 tcg_gen_extu_i32_i64(tcg_rd
, tcg_tmp32
);
3410 tcg_temp_free_i32(tcg_tmp32
);
3414 /* C5.6.149 REV with sf==1, opcode==3 ("REV64") */
3415 static void handle_rev64(DisasContext
*s
, unsigned int sf
,
3416 unsigned int rn
, unsigned int rd
)
3419 unallocated_encoding(s
);
3422 tcg_gen_bswap64_i64(cpu_reg(s
, rd
), cpu_reg(s
, rn
));
3425 /* C5.6.149 REV with sf==0, opcode==2
3426 * C5.6.151 REV32 (sf==1, opcode==2)
3428 static void handle_rev32(DisasContext
*s
, unsigned int sf
,
3429 unsigned int rn
, unsigned int rd
)
3431 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
3434 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
3435 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
3437 /* bswap32_i64 requires zero high word */
3438 tcg_gen_ext32u_i64(tcg_tmp
, tcg_rn
);
3439 tcg_gen_bswap32_i64(tcg_rd
, tcg_tmp
);
3440 tcg_gen_shri_i64(tcg_tmp
, tcg_rn
, 32);
3441 tcg_gen_bswap32_i64(tcg_tmp
, tcg_tmp
);
3442 tcg_gen_concat32_i64(tcg_rd
, tcg_rd
, tcg_tmp
);
3444 tcg_temp_free_i64(tcg_tmp
);
3446 tcg_gen_ext32u_i64(tcg_rd
, cpu_reg(s
, rn
));
3447 tcg_gen_bswap32_i64(tcg_rd
, tcg_rd
);
3451 /* C5.6.150 REV16 (opcode==1) */
3452 static void handle_rev16(DisasContext
*s
, unsigned int sf
,
3453 unsigned int rn
, unsigned int rd
)
3455 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
3456 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
3457 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
3459 tcg_gen_andi_i64(tcg_tmp
, tcg_rn
, 0xffff);
3460 tcg_gen_bswap16_i64(tcg_rd
, tcg_tmp
);
3462 tcg_gen_shri_i64(tcg_tmp
, tcg_rn
, 16);
3463 tcg_gen_andi_i64(tcg_tmp
, tcg_tmp
, 0xffff);
3464 tcg_gen_bswap16_i64(tcg_tmp
, tcg_tmp
);
3465 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_tmp
, 16, 16);
3468 tcg_gen_shri_i64(tcg_tmp
, tcg_rn
, 32);
3469 tcg_gen_andi_i64(tcg_tmp
, tcg_tmp
, 0xffff);
3470 tcg_gen_bswap16_i64(tcg_tmp
, tcg_tmp
);
3471 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_tmp
, 32, 16);
3473 tcg_gen_shri_i64(tcg_tmp
, tcg_rn
, 48);
3474 tcg_gen_bswap16_i64(tcg_tmp
, tcg_tmp
);
3475 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_tmp
, 48, 16);
3478 tcg_temp_free_i64(tcg_tmp
);
3481 /* C3.5.7 Data-processing (1 source)
3482 * 31 30 29 28 21 20 16 15 10 9 5 4 0
3483 * +----+---+---+-----------------+---------+--------+------+------+
3484 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
3485 * +----+---+---+-----------------+---------+--------+------+------+
3487 static void disas_data_proc_1src(DisasContext
*s
, uint32_t insn
)
3489 unsigned int sf
, opcode
, rn
, rd
;
3491 if (extract32(insn
, 29, 1) || extract32(insn
, 16, 5)) {
3492 unallocated_encoding(s
);
3496 sf
= extract32(insn
, 31, 1);
3497 opcode
= extract32(insn
, 10, 6);
3498 rn
= extract32(insn
, 5, 5);
3499 rd
= extract32(insn
, 0, 5);
3503 handle_rbit(s
, sf
, rn
, rd
);
3506 handle_rev16(s
, sf
, rn
, rd
);
3509 handle_rev32(s
, sf
, rn
, rd
);
3512 handle_rev64(s
, sf
, rn
, rd
);
3515 handle_clz(s
, sf
, rn
, rd
);
3518 handle_cls(s
, sf
, rn
, rd
);
3523 static void handle_div(DisasContext
*s
, bool is_signed
, unsigned int sf
,
3524 unsigned int rm
, unsigned int rn
, unsigned int rd
)
3526 TCGv_i64 tcg_n
, tcg_m
, tcg_rd
;
3527 tcg_rd
= cpu_reg(s
, rd
);
3529 if (!sf
&& is_signed
) {
3530 tcg_n
= new_tmp_a64(s
);
3531 tcg_m
= new_tmp_a64(s
);
3532 tcg_gen_ext32s_i64(tcg_n
, cpu_reg(s
, rn
));
3533 tcg_gen_ext32s_i64(tcg_m
, cpu_reg(s
, rm
));
3535 tcg_n
= read_cpu_reg(s
, rn
, sf
);
3536 tcg_m
= read_cpu_reg(s
, rm
, sf
);
3540 gen_helper_sdiv64(tcg_rd
, tcg_n
, tcg_m
);
3542 gen_helper_udiv64(tcg_rd
, tcg_n
, tcg_m
);
3545 if (!sf
) { /* zero extend final result */
3546 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
3550 /* C5.6.115 LSLV, C5.6.118 LSRV, C5.6.17 ASRV, C5.6.154 RORV */
3551 static void handle_shift_reg(DisasContext
*s
,
3552 enum a64_shift_type shift_type
, unsigned int sf
,
3553 unsigned int rm
, unsigned int rn
, unsigned int rd
)
3555 TCGv_i64 tcg_shift
= tcg_temp_new_i64();
3556 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
3557 TCGv_i64 tcg_rn
= read_cpu_reg(s
, rn
, sf
);
3559 tcg_gen_andi_i64(tcg_shift
, cpu_reg(s
, rm
), sf
? 63 : 31);
3560 shift_reg(tcg_rd
, tcg_rn
, sf
, shift_type
, tcg_shift
);
3561 tcg_temp_free_i64(tcg_shift
);
3564 /* C3.5.8 Data-processing (2 source)
3565 * 31 30 29 28 21 20 16 15 10 9 5 4 0
3566 * +----+---+---+-----------------+------+--------+------+------+
3567 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
3568 * +----+---+---+-----------------+------+--------+------+------+
3570 static void disas_data_proc_2src(DisasContext
*s
, uint32_t insn
)
3572 unsigned int sf
, rm
, opcode
, rn
, rd
;
3573 sf
= extract32(insn
, 31, 1);
3574 rm
= extract32(insn
, 16, 5);
3575 opcode
= extract32(insn
, 10, 6);
3576 rn
= extract32(insn
, 5, 5);
3577 rd
= extract32(insn
, 0, 5);
3579 if (extract32(insn
, 29, 1)) {
3580 unallocated_encoding(s
);
3586 handle_div(s
, false, sf
, rm
, rn
, rd
);
3589 handle_div(s
, true, sf
, rm
, rn
, rd
);
3592 handle_shift_reg(s
, A64_SHIFT_TYPE_LSL
, sf
, rm
, rn
, rd
);
3595 handle_shift_reg(s
, A64_SHIFT_TYPE_LSR
, sf
, rm
, rn
, rd
);
3598 handle_shift_reg(s
, A64_SHIFT_TYPE_ASR
, sf
, rm
, rn
, rd
);
3601 handle_shift_reg(s
, A64_SHIFT_TYPE_ROR
, sf
, rm
, rn
, rd
);
3610 case 23: /* CRC32 */
3611 unsupported_encoding(s
, insn
);
3614 unallocated_encoding(s
);
3619 /* C3.5 Data processing - register */
3620 static void disas_data_proc_reg(DisasContext
*s
, uint32_t insn
)
3622 switch (extract32(insn
, 24, 5)) {
3623 case 0x0a: /* Logical (shifted register) */
3624 disas_logic_reg(s
, insn
);
3626 case 0x0b: /* Add/subtract */
3627 if (insn
& (1 << 21)) { /* (extended register) */
3628 disas_add_sub_ext_reg(s
, insn
);
3630 disas_add_sub_reg(s
, insn
);
3633 case 0x1b: /* Data-processing (3 source) */
3634 disas_data_proc_3src(s
, insn
);
3637 switch (extract32(insn
, 21, 3)) {
3638 case 0x0: /* Add/subtract (with carry) */
3639 disas_adc_sbc(s
, insn
);
3641 case 0x2: /* Conditional compare */
3642 disas_cc(s
, insn
); /* both imm and reg forms */
3644 case 0x4: /* Conditional select */
3645 disas_cond_select(s
, insn
);
3647 case 0x6: /* Data-processing */
3648 if (insn
& (1 << 30)) { /* (1 source) */
3649 disas_data_proc_1src(s
, insn
);
3650 } else { /* (2 source) */
3651 disas_data_proc_2src(s
, insn
);
3655 unallocated_encoding(s
);
3660 unallocated_encoding(s
);
3665 static void handle_fp_compare(DisasContext
*s
, bool is_double
,
3666 unsigned int rn
, unsigned int rm
,
3667 bool cmp_with_zero
, bool signal_all_nans
)
3669 TCGv_i64 tcg_flags
= tcg_temp_new_i64();
3670 TCGv_ptr fpst
= get_fpstatus_ptr();
3673 TCGv_i64 tcg_vn
, tcg_vm
;
3675 tcg_vn
= read_fp_dreg(s
, rn
);
3676 if (cmp_with_zero
) {
3677 tcg_vm
= tcg_const_i64(0);
3679 tcg_vm
= read_fp_dreg(s
, rm
);
3681 if (signal_all_nans
) {
3682 gen_helper_vfp_cmped_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
3684 gen_helper_vfp_cmpd_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
3686 tcg_temp_free_i64(tcg_vn
);
3687 tcg_temp_free_i64(tcg_vm
);
3689 TCGv_i32 tcg_vn
, tcg_vm
;
3691 tcg_vn
= read_fp_sreg(s
, rn
);
3692 if (cmp_with_zero
) {
3693 tcg_vm
= tcg_const_i32(0);
3695 tcg_vm
= read_fp_sreg(s
, rm
);
3697 if (signal_all_nans
) {
3698 gen_helper_vfp_cmpes_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
3700 gen_helper_vfp_cmps_a64(tcg_flags
, tcg_vn
, tcg_vm
, fpst
);
3702 tcg_temp_free_i32(tcg_vn
);
3703 tcg_temp_free_i32(tcg_vm
);
3706 tcg_temp_free_ptr(fpst
);
3708 gen_set_nzcv(tcg_flags
);
3710 tcg_temp_free_i64(tcg_flags
);
3713 /* C3.6.22 Floating point compare
3714 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
3715 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
3716 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
3717 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
3719 static void disas_fp_compare(DisasContext
*s
, uint32_t insn
)
3721 unsigned int mos
, type
, rm
, op
, rn
, opc
, op2r
;
3723 mos
= extract32(insn
, 29, 3);
3724 type
= extract32(insn
, 22, 2); /* 0 = single, 1 = double */
3725 rm
= extract32(insn
, 16, 5);
3726 op
= extract32(insn
, 14, 2);
3727 rn
= extract32(insn
, 5, 5);
3728 opc
= extract32(insn
, 3, 2);
3729 op2r
= extract32(insn
, 0, 3);
3731 if (mos
|| op
|| op2r
|| type
> 1) {
3732 unallocated_encoding(s
);
3736 handle_fp_compare(s
, type
, rn
, rm
, opc
& 1, opc
& 2);
3739 /* C3.6.23 Floating point conditional compare
3740 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
3741 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
3742 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
3743 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
3745 static void disas_fp_ccomp(DisasContext
*s
, uint32_t insn
)
3747 unsigned int mos
, type
, rm
, cond
, rn
, op
, nzcv
;
3749 int label_continue
= -1;
3751 mos
= extract32(insn
, 29, 3);
3752 type
= extract32(insn
, 22, 2); /* 0 = single, 1 = double */
3753 rm
= extract32(insn
, 16, 5);
3754 cond
= extract32(insn
, 12, 4);
3755 rn
= extract32(insn
, 5, 5);
3756 op
= extract32(insn
, 4, 1);
3757 nzcv
= extract32(insn
, 0, 4);
3759 if (mos
|| type
> 1) {
3760 unallocated_encoding(s
);
3764 if (cond
< 0x0e) { /* not always */
3765 int label_match
= gen_new_label();
3766 label_continue
= gen_new_label();
3767 arm_gen_test_cc(cond
, label_match
);
3769 tcg_flags
= tcg_const_i64(nzcv
<< 28);
3770 gen_set_nzcv(tcg_flags
);
3771 tcg_temp_free_i64(tcg_flags
);
3772 tcg_gen_br(label_continue
);
3773 gen_set_label(label_match
);
3776 handle_fp_compare(s
, type
, rn
, rm
, false, op
);
3779 gen_set_label(label_continue
);
3783 /* copy src FP register to dst FP register; type specifies single or double */
3784 static void gen_mov_fp2fp(DisasContext
*s
, int type
, int dst
, int src
)
3787 TCGv_i64 v
= read_fp_dreg(s
, src
);
3788 write_fp_dreg(s
, dst
, v
);
3789 tcg_temp_free_i64(v
);
3791 TCGv_i32 v
= read_fp_sreg(s
, src
);
3792 write_fp_sreg(s
, dst
, v
);
3793 tcg_temp_free_i32(v
);
3797 /* C3.6.24 Floating point conditional select
3798 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
3799 * +---+---+---+-----------+------+---+------+------+-----+------+------+
3800 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
3801 * +---+---+---+-----------+------+---+------+------+-----+------+------+
3803 static void disas_fp_csel(DisasContext
*s
, uint32_t insn
)
3805 unsigned int mos
, type
, rm
, cond
, rn
, rd
;
3806 int label_continue
= -1;
3808 mos
= extract32(insn
, 29, 3);
3809 type
= extract32(insn
, 22, 2); /* 0 = single, 1 = double */
3810 rm
= extract32(insn
, 16, 5);
3811 cond
= extract32(insn
, 12, 4);
3812 rn
= extract32(insn
, 5, 5);
3813 rd
= extract32(insn
, 0, 5);
3815 if (mos
|| type
> 1) {
3816 unallocated_encoding(s
);
3820 if (cond
< 0x0e) { /* not always */
3821 int label_match
= gen_new_label();
3822 label_continue
= gen_new_label();
3823 arm_gen_test_cc(cond
, label_match
);
3825 gen_mov_fp2fp(s
, type
, rd
, rm
);
3826 tcg_gen_br(label_continue
);
3827 gen_set_label(label_match
);
3830 gen_mov_fp2fp(s
, type
, rd
, rn
);
3832 if (cond
< 0x0e) { /* continue */
3833 gen_set_label(label_continue
);
3837 /* C3.6.25 Floating-point data-processing (1 source) - single precision */
3838 static void handle_fp_1src_single(DisasContext
*s
, int opcode
, int rd
, int rn
)
3844 fpst
= get_fpstatus_ptr();
3845 tcg_op
= read_fp_sreg(s
, rn
);
3846 tcg_res
= tcg_temp_new_i32();
3849 case 0x0: /* FMOV */
3850 tcg_gen_mov_i32(tcg_res
, tcg_op
);
3852 case 0x1: /* FABS */
3853 gen_helper_vfp_abss(tcg_res
, tcg_op
);
3855 case 0x2: /* FNEG */
3856 gen_helper_vfp_negs(tcg_res
, tcg_op
);
3858 case 0x3: /* FSQRT */
3859 gen_helper_vfp_sqrts(tcg_res
, tcg_op
, cpu_env
);
3861 case 0x8: /* FRINTN */
3862 case 0x9: /* FRINTP */
3863 case 0xa: /* FRINTM */
3864 case 0xb: /* FRINTZ */
3865 case 0xc: /* FRINTA */
3867 TCGv_i32 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(opcode
& 7));
3869 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3870 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
3872 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3873 tcg_temp_free_i32(tcg_rmode
);
3876 case 0xe: /* FRINTX */
3877 gen_helper_rints_exact(tcg_res
, tcg_op
, fpst
);
3879 case 0xf: /* FRINTI */
3880 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
3886 write_fp_sreg(s
, rd
, tcg_res
);
3888 tcg_temp_free_ptr(fpst
);
3889 tcg_temp_free_i32(tcg_op
);
3890 tcg_temp_free_i32(tcg_res
);
3893 /* C3.6.25 Floating-point data-processing (1 source) - double precision */
3894 static void handle_fp_1src_double(DisasContext
*s
, int opcode
, int rd
, int rn
)
3900 fpst
= get_fpstatus_ptr();
3901 tcg_op
= read_fp_dreg(s
, rn
);
3902 tcg_res
= tcg_temp_new_i64();
3905 case 0x0: /* FMOV */
3906 tcg_gen_mov_i64(tcg_res
, tcg_op
);
3908 case 0x1: /* FABS */
3909 gen_helper_vfp_absd(tcg_res
, tcg_op
);
3911 case 0x2: /* FNEG */
3912 gen_helper_vfp_negd(tcg_res
, tcg_op
);
3914 case 0x3: /* FSQRT */
3915 gen_helper_vfp_sqrtd(tcg_res
, tcg_op
, cpu_env
);
3917 case 0x8: /* FRINTN */
3918 case 0x9: /* FRINTP */
3919 case 0xa: /* FRINTM */
3920 case 0xb: /* FRINTZ */
3921 case 0xc: /* FRINTA */
3923 TCGv_i32 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(opcode
& 7));
3925 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3926 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
3928 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3929 tcg_temp_free_i32(tcg_rmode
);
3932 case 0xe: /* FRINTX */
3933 gen_helper_rintd_exact(tcg_res
, tcg_op
, fpst
);
3935 case 0xf: /* FRINTI */
3936 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
3942 write_fp_dreg(s
, rd
, tcg_res
);
3944 tcg_temp_free_ptr(fpst
);
3945 tcg_temp_free_i64(tcg_op
);
3946 tcg_temp_free_i64(tcg_res
);
3949 static void handle_fp_fcvt(DisasContext
*s
, int opcode
,
3950 int rd
, int rn
, int dtype
, int ntype
)
3955 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
3957 /* Single to double */
3958 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
3959 gen_helper_vfp_fcvtds(tcg_rd
, tcg_rn
, cpu_env
);
3960 write_fp_dreg(s
, rd
, tcg_rd
);
3961 tcg_temp_free_i64(tcg_rd
);
3963 /* Single to half */
3964 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
3965 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd
, tcg_rn
, cpu_env
);
3966 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
3967 write_fp_sreg(s
, rd
, tcg_rd
);
3968 tcg_temp_free_i32(tcg_rd
);
3970 tcg_temp_free_i32(tcg_rn
);
3975 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
3976 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
3978 /* Double to single */
3979 gen_helper_vfp_fcvtsd(tcg_rd
, tcg_rn
, cpu_env
);
3981 /* Double to half */
3982 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd
, tcg_rn
, cpu_env
);
3983 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
3985 write_fp_sreg(s
, rd
, tcg_rd
);
3986 tcg_temp_free_i32(tcg_rd
);
3987 tcg_temp_free_i64(tcg_rn
);
3992 TCGv_i32 tcg_rn
= read_fp_sreg(s
, rn
);
3993 tcg_gen_ext16u_i32(tcg_rn
, tcg_rn
);
3995 /* Half to single */
3996 TCGv_i32 tcg_rd
= tcg_temp_new_i32();
3997 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd
, tcg_rn
, cpu_env
);
3998 write_fp_sreg(s
, rd
, tcg_rd
);
3999 tcg_temp_free_i32(tcg_rd
);
4001 /* Half to double */
4002 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
4003 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd
, tcg_rn
, cpu_env
);
4004 write_fp_dreg(s
, rd
, tcg_rd
);
4005 tcg_temp_free_i64(tcg_rd
);
4007 tcg_temp_free_i32(tcg_rn
);
4015 /* C3.6.25 Floating point data-processing (1 source)
4016 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
4017 * +---+---+---+-----------+------+---+--------+-----------+------+------+
4018 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
4019 * +---+---+---+-----------+------+---+--------+-----------+------+------+
4021 static void disas_fp_1src(DisasContext
*s
, uint32_t insn
)
4023 int type
= extract32(insn
, 22, 2);
4024 int opcode
= extract32(insn
, 15, 6);
4025 int rn
= extract32(insn
, 5, 5);
4026 int rd
= extract32(insn
, 0, 5);
4029 case 0x4: case 0x5: case 0x7:
4031 /* FCVT between half, single and double precision */
4032 int dtype
= extract32(opcode
, 0, 2);
4033 if (type
== 2 || dtype
== type
) {
4034 unallocated_encoding(s
);
4037 handle_fp_fcvt(s
, opcode
, rd
, rn
, dtype
, type
);
4043 /* 32-to-32 and 64-to-64 ops */
4046 handle_fp_1src_single(s
, opcode
, rd
, rn
);
4049 handle_fp_1src_double(s
, opcode
, rd
, rn
);
4052 unallocated_encoding(s
);
4056 unallocated_encoding(s
);
4061 /* C3.6.26 Floating-point data-processing (2 source) - single precision */
4062 static void handle_fp_2src_single(DisasContext
*s
, int opcode
,
4063 int rd
, int rn
, int rm
)
4070 tcg_res
= tcg_temp_new_i32();
4071 fpst
= get_fpstatus_ptr();
4072 tcg_op1
= read_fp_sreg(s
, rn
);
4073 tcg_op2
= read_fp_sreg(s
, rm
);
4076 case 0x0: /* FMUL */
4077 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4079 case 0x1: /* FDIV */
4080 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4082 case 0x2: /* FADD */
4083 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4085 case 0x3: /* FSUB */
4086 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4088 case 0x4: /* FMAX */
4089 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4091 case 0x5: /* FMIN */
4092 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4094 case 0x6: /* FMAXNM */
4095 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4097 case 0x7: /* FMINNM */
4098 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4100 case 0x8: /* FNMUL */
4101 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4102 gen_helper_vfp_negs(tcg_res
, tcg_res
);
4106 write_fp_sreg(s
, rd
, tcg_res
);
4108 tcg_temp_free_ptr(fpst
);
4109 tcg_temp_free_i32(tcg_op1
);
4110 tcg_temp_free_i32(tcg_op2
);
4111 tcg_temp_free_i32(tcg_res
);
4114 /* C3.6.26 Floating-point data-processing (2 source) - double precision */
4115 static void handle_fp_2src_double(DisasContext
*s
, int opcode
,
4116 int rd
, int rn
, int rm
)
4123 tcg_res
= tcg_temp_new_i64();
4124 fpst
= get_fpstatus_ptr();
4125 tcg_op1
= read_fp_dreg(s
, rn
);
4126 tcg_op2
= read_fp_dreg(s
, rm
);
4129 case 0x0: /* FMUL */
4130 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4132 case 0x1: /* FDIV */
4133 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4135 case 0x2: /* FADD */
4136 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4138 case 0x3: /* FSUB */
4139 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4141 case 0x4: /* FMAX */
4142 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4144 case 0x5: /* FMIN */
4145 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4147 case 0x6: /* FMAXNM */
4148 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4150 case 0x7: /* FMINNM */
4151 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4153 case 0x8: /* FNMUL */
4154 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
4155 gen_helper_vfp_negd(tcg_res
, tcg_res
);
4159 write_fp_dreg(s
, rd
, tcg_res
);
4161 tcg_temp_free_ptr(fpst
);
4162 tcg_temp_free_i64(tcg_op1
);
4163 tcg_temp_free_i64(tcg_op2
);
4164 tcg_temp_free_i64(tcg_res
);
4167 /* C3.6.26 Floating point data-processing (2 source)
4168 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
4169 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
4170 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
4171 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
4173 static void disas_fp_2src(DisasContext
*s
, uint32_t insn
)
4175 int type
= extract32(insn
, 22, 2);
4176 int rd
= extract32(insn
, 0, 5);
4177 int rn
= extract32(insn
, 5, 5);
4178 int rm
= extract32(insn
, 16, 5);
4179 int opcode
= extract32(insn
, 12, 4);
4182 unallocated_encoding(s
);
4188 handle_fp_2src_single(s
, opcode
, rd
, rn
, rm
);
4191 handle_fp_2src_double(s
, opcode
, rd
, rn
, rm
);
4194 unallocated_encoding(s
);
4198 /* C3.6.27 Floating-point data-processing (3 source) - single precision */
4199 static void handle_fp_3src_single(DisasContext
*s
, bool o0
, bool o1
,
4200 int rd
, int rn
, int rm
, int ra
)
4202 TCGv_i32 tcg_op1
, tcg_op2
, tcg_op3
;
4203 TCGv_i32 tcg_res
= tcg_temp_new_i32();
4204 TCGv_ptr fpst
= get_fpstatus_ptr();
4206 tcg_op1
= read_fp_sreg(s
, rn
);
4207 tcg_op2
= read_fp_sreg(s
, rm
);
4208 tcg_op3
= read_fp_sreg(s
, ra
);
4210 /* These are fused multiply-add, and must be done as one
4211 * floating point operation with no rounding between the
4212 * multiplication and addition steps.
4213 * NB that doing the negations here as separate steps is
4214 * correct : an input NaN should come out with its sign bit
4215 * flipped if it is a negated-input.
4218 gen_helper_vfp_negs(tcg_op3
, tcg_op3
);
4222 gen_helper_vfp_negs(tcg_op1
, tcg_op1
);
4225 gen_helper_vfp_muladds(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
4227 write_fp_sreg(s
, rd
, tcg_res
);
4229 tcg_temp_free_ptr(fpst
);
4230 tcg_temp_free_i32(tcg_op1
);
4231 tcg_temp_free_i32(tcg_op2
);
4232 tcg_temp_free_i32(tcg_op3
);
4233 tcg_temp_free_i32(tcg_res
);
4236 /* C3.6.27 Floating-point data-processing (3 source) - double precision */
4237 static void handle_fp_3src_double(DisasContext
*s
, bool o0
, bool o1
,
4238 int rd
, int rn
, int rm
, int ra
)
4240 TCGv_i64 tcg_op1
, tcg_op2
, tcg_op3
;
4241 TCGv_i64 tcg_res
= tcg_temp_new_i64();
4242 TCGv_ptr fpst
= get_fpstatus_ptr();
4244 tcg_op1
= read_fp_dreg(s
, rn
);
4245 tcg_op2
= read_fp_dreg(s
, rm
);
4246 tcg_op3
= read_fp_dreg(s
, ra
);
4248 /* These are fused multiply-add, and must be done as one
4249 * floating point operation with no rounding between the
4250 * multiplication and addition steps.
4251 * NB that doing the negations here as separate steps is
4252 * correct : an input NaN should come out with its sign bit
4253 * flipped if it is a negated-input.
4256 gen_helper_vfp_negd(tcg_op3
, tcg_op3
);
4260 gen_helper_vfp_negd(tcg_op1
, tcg_op1
);
4263 gen_helper_vfp_muladdd(tcg_res
, tcg_op1
, tcg_op2
, tcg_op3
, fpst
);
4265 write_fp_dreg(s
, rd
, tcg_res
);
4267 tcg_temp_free_ptr(fpst
);
4268 tcg_temp_free_i64(tcg_op1
);
4269 tcg_temp_free_i64(tcg_op2
);
4270 tcg_temp_free_i64(tcg_op3
);
4271 tcg_temp_free_i64(tcg_res
);
4274 /* C3.6.27 Floating point data-processing (3 source)
4275 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
4276 * +---+---+---+-----------+------+----+------+----+------+------+------+
4277 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
4278 * +---+---+---+-----------+------+----+------+----+------+------+------+
4280 static void disas_fp_3src(DisasContext
*s
, uint32_t insn
)
4282 int type
= extract32(insn
, 22, 2);
4283 int rd
= extract32(insn
, 0, 5);
4284 int rn
= extract32(insn
, 5, 5);
4285 int ra
= extract32(insn
, 10, 5);
4286 int rm
= extract32(insn
, 16, 5);
4287 bool o0
= extract32(insn
, 15, 1);
4288 bool o1
= extract32(insn
, 21, 1);
4292 handle_fp_3src_single(s
, o0
, o1
, rd
, rn
, rm
, ra
);
4295 handle_fp_3src_double(s
, o0
, o1
, rd
, rn
, rm
, ra
);
4298 unallocated_encoding(s
);
4302 /* C3.6.28 Floating point immediate
4303 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
4304 * +---+---+---+-----------+------+---+------------+-------+------+------+
4305 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
4306 * +---+---+---+-----------+------+---+------------+-------+------+------+
4308 static void disas_fp_imm(DisasContext
*s
, uint32_t insn
)
4310 int rd
= extract32(insn
, 0, 5);
4311 int imm8
= extract32(insn
, 13, 8);
4312 int is_double
= extract32(insn
, 22, 2);
4316 if (is_double
> 1) {
4317 unallocated_encoding(s
);
4321 /* The imm8 encodes the sign bit, enough bits to represent
4322 * an exponent in the range 01....1xx to 10....0xx,
4323 * and the most significant 4 bits of the mantissa; see
4324 * VFPExpandImm() in the v8 ARM ARM.
4327 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
4328 (extract32(imm8
, 6, 1) ? 0x3fc0 : 0x4000) |
4329 extract32(imm8
, 0, 6);
4332 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
4333 (extract32(imm8
, 6, 1) ? 0x3e00 : 0x4000) |
4334 (extract32(imm8
, 0, 6) << 3);
4338 tcg_res
= tcg_const_i64(imm
);
4339 write_fp_dreg(s
, rd
, tcg_res
);
4340 tcg_temp_free_i64(tcg_res
);
4343 /* Handle floating point <=> fixed point conversions. Note that we can
4344 * also deal with fp <=> integer conversions as a special case (scale == 64)
4345 * OPTME: consider handling that special case specially or at least skipping
4346 * the call to scalbn in the helpers for zero shifts.
4348 static void handle_fpfpcvt(DisasContext
*s
, int rd
, int rn
, int opcode
,
4349 bool itof
, int rmode
, int scale
, int sf
, int type
)
4351 bool is_signed
= !(opcode
& 1);
4352 bool is_double
= type
;
4353 TCGv_ptr tcg_fpstatus
;
4356 tcg_fpstatus
= get_fpstatus_ptr();
4358 tcg_shift
= tcg_const_i32(64 - scale
);
4361 TCGv_i64 tcg_int
= cpu_reg(s
, rn
);
4363 TCGv_i64 tcg_extend
= new_tmp_a64(s
);
4366 tcg_gen_ext32s_i64(tcg_extend
, tcg_int
);
4368 tcg_gen_ext32u_i64(tcg_extend
, tcg_int
);
4371 tcg_int
= tcg_extend
;
4375 TCGv_i64 tcg_double
= tcg_temp_new_i64();
4377 gen_helper_vfp_sqtod(tcg_double
, tcg_int
,
4378 tcg_shift
, tcg_fpstatus
);
4380 gen_helper_vfp_uqtod(tcg_double
, tcg_int
,
4381 tcg_shift
, tcg_fpstatus
);
4383 write_fp_dreg(s
, rd
, tcg_double
);
4384 tcg_temp_free_i64(tcg_double
);
4386 TCGv_i32 tcg_single
= tcg_temp_new_i32();
4388 gen_helper_vfp_sqtos(tcg_single
, tcg_int
,
4389 tcg_shift
, tcg_fpstatus
);
4391 gen_helper_vfp_uqtos(tcg_single
, tcg_int
,
4392 tcg_shift
, tcg_fpstatus
);
4394 write_fp_sreg(s
, rd
, tcg_single
);
4395 tcg_temp_free_i32(tcg_single
);
4398 TCGv_i64 tcg_int
= cpu_reg(s
, rd
);
4401 if (extract32(opcode
, 2, 1)) {
4402 /* There are too many rounding modes to all fit into rmode,
4403 * so FCVTA[US] is a special case.
4405 rmode
= FPROUNDING_TIEAWAY
;
4408 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
4410 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
4413 TCGv_i64 tcg_double
= read_fp_dreg(s
, rn
);
4416 gen_helper_vfp_tosld(tcg_int
, tcg_double
,
4417 tcg_shift
, tcg_fpstatus
);
4419 gen_helper_vfp_tosqd(tcg_int
, tcg_double
,
4420 tcg_shift
, tcg_fpstatus
);
4424 gen_helper_vfp_tould(tcg_int
, tcg_double
,
4425 tcg_shift
, tcg_fpstatus
);
4427 gen_helper_vfp_touqd(tcg_int
, tcg_double
,
4428 tcg_shift
, tcg_fpstatus
);
4431 tcg_temp_free_i64(tcg_double
);
4433 TCGv_i32 tcg_single
= read_fp_sreg(s
, rn
);
4436 gen_helper_vfp_tosqs(tcg_int
, tcg_single
,
4437 tcg_shift
, tcg_fpstatus
);
4439 gen_helper_vfp_touqs(tcg_int
, tcg_single
,
4440 tcg_shift
, tcg_fpstatus
);
4443 TCGv_i32 tcg_dest
= tcg_temp_new_i32();
4445 gen_helper_vfp_tosls(tcg_dest
, tcg_single
,
4446 tcg_shift
, tcg_fpstatus
);
4448 gen_helper_vfp_touls(tcg_dest
, tcg_single
,
4449 tcg_shift
, tcg_fpstatus
);
4451 tcg_gen_extu_i32_i64(tcg_int
, tcg_dest
);
4452 tcg_temp_free_i32(tcg_dest
);
4454 tcg_temp_free_i32(tcg_single
);
4457 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
4458 tcg_temp_free_i32(tcg_rmode
);
4461 tcg_gen_ext32u_i64(tcg_int
, tcg_int
);
4465 tcg_temp_free_ptr(tcg_fpstatus
);
4466 tcg_temp_free_i32(tcg_shift
);
4469 /* C3.6.29 Floating point <-> fixed point conversions
4470 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
4471 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
4472 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
4473 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
4475 static void disas_fp_fixed_conv(DisasContext
*s
, uint32_t insn
)
4477 int rd
= extract32(insn
, 0, 5);
4478 int rn
= extract32(insn
, 5, 5);
4479 int scale
= extract32(insn
, 10, 6);
4480 int opcode
= extract32(insn
, 16, 3);
4481 int rmode
= extract32(insn
, 19, 2);
4482 int type
= extract32(insn
, 22, 2);
4483 bool sbit
= extract32(insn
, 29, 1);
4484 bool sf
= extract32(insn
, 31, 1);
4487 if (sbit
|| (type
> 1)
4488 || (!sf
&& scale
< 32)) {
4489 unallocated_encoding(s
);
4493 switch ((rmode
<< 3) | opcode
) {
4494 case 0x2: /* SCVTF */
4495 case 0x3: /* UCVTF */
4498 case 0x18: /* FCVTZS */
4499 case 0x19: /* FCVTZU */
4503 unallocated_encoding(s
);
4507 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, FPROUNDING_ZERO
, scale
, sf
, type
);
4510 static void handle_fmov(DisasContext
*s
, int rd
, int rn
, int type
, bool itof
)
4512 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
4513 * without conversion.
4517 TCGv_i64 tcg_rn
= cpu_reg(s
, rn
);
4523 TCGv_i64 tmp
= tcg_temp_new_i64();
4524 tcg_gen_ext32u_i64(tmp
, tcg_rn
);
4525 tcg_gen_st_i64(tmp
, cpu_env
, fp_reg_offset(rd
, MO_64
));
4526 tcg_gen_movi_i64(tmp
, 0);
4527 tcg_gen_st_i64(tmp
, cpu_env
, fp_reg_hi_offset(rd
));
4528 tcg_temp_free_i64(tmp
);
4534 TCGv_i64 tmp
= tcg_const_i64(0);
4535 tcg_gen_st_i64(tcg_rn
, cpu_env
, fp_reg_offset(rd
, MO_64
));
4536 tcg_gen_st_i64(tmp
, cpu_env
, fp_reg_hi_offset(rd
));
4537 tcg_temp_free_i64(tmp
);
4541 /* 64 bit to top half. */
4542 tcg_gen_st_i64(tcg_rn
, cpu_env
, fp_reg_hi_offset(rd
));
4546 TCGv_i64 tcg_rd
= cpu_reg(s
, rd
);
4551 tcg_gen_ld32u_i64(tcg_rd
, cpu_env
, fp_reg_offset(rn
, MO_32
));
4555 tcg_gen_ld_i64(tcg_rd
, cpu_env
, fp_reg_offset(rn
, MO_64
));
4558 /* 64 bits from top half */
4559 tcg_gen_ld_i64(tcg_rd
, cpu_env
, fp_reg_hi_offset(rn
));
4565 /* C3.6.30 Floating point <-> integer conversions
4566 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
4567 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
4568 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
4569 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
4571 static void disas_fp_int_conv(DisasContext
*s
, uint32_t insn
)
4573 int rd
= extract32(insn
, 0, 5);
4574 int rn
= extract32(insn
, 5, 5);
4575 int opcode
= extract32(insn
, 16, 3);
4576 int rmode
= extract32(insn
, 19, 2);
4577 int type
= extract32(insn
, 22, 2);
4578 bool sbit
= extract32(insn
, 29, 1);
4579 bool sf
= extract32(insn
, 31, 1);
4582 unallocated_encoding(s
);
4588 bool itof
= opcode
& 1;
4591 unallocated_encoding(s
);
4595 switch (sf
<< 3 | type
<< 1 | rmode
) {
4596 case 0x0: /* 32 bit */
4597 case 0xa: /* 64 bit */
4598 case 0xd: /* 64 bit to top half of quad */
4601 /* all other sf/type/rmode combinations are invalid */
4602 unallocated_encoding(s
);
4606 handle_fmov(s
, rd
, rn
, type
, itof
);
4608 /* actual FP conversions */
4609 bool itof
= extract32(opcode
, 1, 1);
4611 if (type
> 1 || (rmode
!= 0 && opcode
> 1)) {
4612 unallocated_encoding(s
);
4616 handle_fpfpcvt(s
, rd
, rn
, opcode
, itof
, rmode
, 64, sf
, type
);
4620 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
4621 * 31 30 29 28 25 24 0
4622 * +---+---+---+---------+-----------------------------+
4623 * | | 0 | | 1 1 1 1 | |
4624 * +---+---+---+---------+-----------------------------+
4626 static void disas_data_proc_fp(DisasContext
*s
, uint32_t insn
)
4628 if (extract32(insn
, 24, 1)) {
4629 /* Floating point data-processing (3 source) */
4630 disas_fp_3src(s
, insn
);
4631 } else if (extract32(insn
, 21, 1) == 0) {
4632 /* Floating point to fixed point conversions */
4633 disas_fp_fixed_conv(s
, insn
);
4635 switch (extract32(insn
, 10, 2)) {
4637 /* Floating point conditional compare */
4638 disas_fp_ccomp(s
, insn
);
4641 /* Floating point data-processing (2 source) */
4642 disas_fp_2src(s
, insn
);
4645 /* Floating point conditional select */
4646 disas_fp_csel(s
, insn
);
4649 switch (ctz32(extract32(insn
, 12, 4))) {
4650 case 0: /* [15:12] == xxx1 */
4651 /* Floating point immediate */
4652 disas_fp_imm(s
, insn
);
4654 case 1: /* [15:12] == xx10 */
4655 /* Floating point compare */
4656 disas_fp_compare(s
, insn
);
4658 case 2: /* [15:12] == x100 */
4659 /* Floating point data-processing (1 source) */
4660 disas_fp_1src(s
, insn
);
4662 case 3: /* [15:12] == 1000 */
4663 unallocated_encoding(s
);
4665 default: /* [15:12] == 0000 */
4666 /* Floating point <-> integer conversions */
4667 disas_fp_int_conv(s
, insn
);
4675 static void do_ext64(DisasContext
*s
, TCGv_i64 tcg_left
, TCGv_i64 tcg_right
,
4678 /* Extract 64 bits from the middle of two concatenated 64 bit
4679 * vector register slices left:right. The extracted bits start
4680 * at 'pos' bits into the right (least significant) side.
4681 * We return the result in tcg_right, and guarantee not to
4684 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
4685 assert(pos
> 0 && pos
< 64);
4687 tcg_gen_shri_i64(tcg_right
, tcg_right
, pos
);
4688 tcg_gen_shli_i64(tcg_tmp
, tcg_left
, 64 - pos
);
4689 tcg_gen_or_i64(tcg_right
, tcg_right
, tcg_tmp
);
4691 tcg_temp_free_i64(tcg_tmp
);
4695 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
4696 * +---+---+-------------+-----+---+------+---+------+---+------+------+
4697 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
4698 * +---+---+-------------+-----+---+------+---+------+---+------+------+
4700 static void disas_simd_ext(DisasContext
*s
, uint32_t insn
)
4702 int is_q
= extract32(insn
, 30, 1);
4703 int op2
= extract32(insn
, 22, 2);
4704 int imm4
= extract32(insn
, 11, 4);
4705 int rm
= extract32(insn
, 16, 5);
4706 int rn
= extract32(insn
, 5, 5);
4707 int rd
= extract32(insn
, 0, 5);
4708 int pos
= imm4
<< 3;
4709 TCGv_i64 tcg_resl
, tcg_resh
;
4711 if (op2
!= 0 || (!is_q
&& extract32(imm4
, 3, 1))) {
4712 unallocated_encoding(s
);
4716 tcg_resh
= tcg_temp_new_i64();
4717 tcg_resl
= tcg_temp_new_i64();
4719 /* Vd gets bits starting at pos bits into Vm:Vn. This is
4720 * either extracting 128 bits from a 128:128 concatenation, or
4721 * extracting 64 bits from a 64:64 concatenation.
4724 read_vec_element(s
, tcg_resl
, rn
, 0, MO_64
);
4726 read_vec_element(s
, tcg_resh
, rm
, 0, MO_64
);
4727 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
4729 tcg_gen_movi_i64(tcg_resh
, 0);
4736 EltPosns eltposns
[] = { {rn
, 0}, {rn
, 1}, {rm
, 0}, {rm
, 1} };
4737 EltPosns
*elt
= eltposns
;
4744 read_vec_element(s
, tcg_resl
, elt
->reg
, elt
->elt
, MO_64
);
4746 read_vec_element(s
, tcg_resh
, elt
->reg
, elt
->elt
, MO_64
);
4749 do_ext64(s
, tcg_resh
, tcg_resl
, pos
);
4750 tcg_hh
= tcg_temp_new_i64();
4751 read_vec_element(s
, tcg_hh
, elt
->reg
, elt
->elt
, MO_64
);
4752 do_ext64(s
, tcg_hh
, tcg_resh
, pos
);
4753 tcg_temp_free_i64(tcg_hh
);
4757 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
4758 tcg_temp_free_i64(tcg_resl
);
4759 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
4760 tcg_temp_free_i64(tcg_resh
);
4764 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
4765 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
4766 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
4767 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
4769 static void disas_simd_tb(DisasContext
*s
, uint32_t insn
)
4771 int op2
= extract32(insn
, 22, 2);
4772 int is_q
= extract32(insn
, 30, 1);
4773 int rm
= extract32(insn
, 16, 5);
4774 int rn
= extract32(insn
, 5, 5);
4775 int rd
= extract32(insn
, 0, 5);
4776 int is_tblx
= extract32(insn
, 12, 1);
4777 int len
= extract32(insn
, 13, 2);
4778 TCGv_i64 tcg_resl
, tcg_resh
, tcg_idx
;
4779 TCGv_i32 tcg_regno
, tcg_numregs
;
4782 unallocated_encoding(s
);
4786 /* This does a table lookup: for every byte element in the input
4787 * we index into a table formed from up to four vector registers,
4788 * and then the output is the result of the lookups. Our helper
4789 * function does the lookup operation for a single 64 bit part of
4792 tcg_resl
= tcg_temp_new_i64();
4793 tcg_resh
= tcg_temp_new_i64();
4796 read_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
4798 tcg_gen_movi_i64(tcg_resl
, 0);
4800 if (is_tblx
&& is_q
) {
4801 read_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
4803 tcg_gen_movi_i64(tcg_resh
, 0);
4806 tcg_idx
= tcg_temp_new_i64();
4807 tcg_regno
= tcg_const_i32(rn
);
4808 tcg_numregs
= tcg_const_i32(len
+ 1);
4809 read_vec_element(s
, tcg_idx
, rm
, 0, MO_64
);
4810 gen_helper_simd_tbl(tcg_resl
, cpu_env
, tcg_resl
, tcg_idx
,
4811 tcg_regno
, tcg_numregs
);
4813 read_vec_element(s
, tcg_idx
, rm
, 1, MO_64
);
4814 gen_helper_simd_tbl(tcg_resh
, cpu_env
, tcg_resh
, tcg_idx
,
4815 tcg_regno
, tcg_numregs
);
4817 tcg_temp_free_i64(tcg_idx
);
4818 tcg_temp_free_i32(tcg_regno
);
4819 tcg_temp_free_i32(tcg_numregs
);
4821 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
4822 tcg_temp_free_i64(tcg_resl
);
4823 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
4824 tcg_temp_free_i64(tcg_resh
);
4827 /* C3.6.3 ZIP/UZP/TRN
4828 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
4829 * +---+---+-------------+------+---+------+---+------------------+------+
4830 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
4831 * +---+---+-------------+------+---+------+---+------------------+------+
4833 static void disas_simd_zip_trn(DisasContext
*s
, uint32_t insn
)
4835 int rd
= extract32(insn
, 0, 5);
4836 int rn
= extract32(insn
, 5, 5);
4837 int rm
= extract32(insn
, 16, 5);
4838 int size
= extract32(insn
, 22, 2);
4839 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
4840 * bit 2 indicates 1 vs 2 variant of the insn.
4842 int opcode
= extract32(insn
, 12, 2);
4843 bool part
= extract32(insn
, 14, 1);
4844 bool is_q
= extract32(insn
, 30, 1);
4845 int esize
= 8 << size
;
4847 int datasize
= is_q
? 128 : 64;
4848 int elements
= datasize
/ esize
;
4849 TCGv_i64 tcg_res
, tcg_resl
, tcg_resh
;
4851 if (opcode
== 0 || (size
== 3 && !is_q
)) {
4852 unallocated_encoding(s
);
4856 tcg_resl
= tcg_const_i64(0);
4857 tcg_resh
= tcg_const_i64(0);
4858 tcg_res
= tcg_temp_new_i64();
4860 for (i
= 0; i
< elements
; i
++) {
4862 case 1: /* UZP1/2 */
4864 int midpoint
= elements
/ 2;
4866 read_vec_element(s
, tcg_res
, rn
, 2 * i
+ part
, size
);
4868 read_vec_element(s
, tcg_res
, rm
,
4869 2 * (i
- midpoint
) + part
, size
);
4873 case 2: /* TRN1/2 */
4875 read_vec_element(s
, tcg_res
, rm
, (i
& ~1) + part
, size
);
4877 read_vec_element(s
, tcg_res
, rn
, (i
& ~1) + part
, size
);
4880 case 3: /* ZIP1/2 */
4882 int base
= part
* elements
/ 2;
4884 read_vec_element(s
, tcg_res
, rm
, base
+ (i
>> 1), size
);
4886 read_vec_element(s
, tcg_res
, rn
, base
+ (i
>> 1), size
);
4891 g_assert_not_reached();
4896 tcg_gen_shli_i64(tcg_res
, tcg_res
, ofs
);
4897 tcg_gen_or_i64(tcg_resl
, tcg_resl
, tcg_res
);
4899 tcg_gen_shli_i64(tcg_res
, tcg_res
, ofs
- 64);
4900 tcg_gen_or_i64(tcg_resh
, tcg_resh
, tcg_res
);
4904 tcg_temp_free_i64(tcg_res
);
4906 write_vec_element(s
, tcg_resl
, rd
, 0, MO_64
);
4907 tcg_temp_free_i64(tcg_resl
);
4908 write_vec_element(s
, tcg_resh
, rd
, 1, MO_64
);
4909 tcg_temp_free_i64(tcg_resh
);
4912 static void do_minmaxop(DisasContext
*s
, TCGv_i32 tcg_elt1
, TCGv_i32 tcg_elt2
,
4913 int opc
, bool is_min
, TCGv_ptr fpst
)
4915 /* Helper function for disas_simd_across_lanes: do a single precision
4916 * min/max operation on the specified two inputs,
4917 * and return the result in tcg_elt1.
4921 gen_helper_vfp_minnums(tcg_elt1
, tcg_elt1
, tcg_elt2
, fpst
);
4923 gen_helper_vfp_maxnums(tcg_elt1
, tcg_elt1
, tcg_elt2
, fpst
);
4928 gen_helper_vfp_mins(tcg_elt1
, tcg_elt1
, tcg_elt2
, fpst
);
4930 gen_helper_vfp_maxs(tcg_elt1
, tcg_elt1
, tcg_elt2
, fpst
);
4935 /* C3.6.4 AdvSIMD across lanes
4936 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
4937 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
4938 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
4939 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
4941 static void disas_simd_across_lanes(DisasContext
*s
, uint32_t insn
)
4943 int rd
= extract32(insn
, 0, 5);
4944 int rn
= extract32(insn
, 5, 5);
4945 int size
= extract32(insn
, 22, 2);
4946 int opcode
= extract32(insn
, 12, 5);
4947 bool is_q
= extract32(insn
, 30, 1);
4948 bool is_u
= extract32(insn
, 29, 1);
4950 bool is_min
= false;
4954 TCGv_i64 tcg_res
, tcg_elt
;
4957 case 0x1b: /* ADDV */
4959 unallocated_encoding(s
);
4963 case 0x3: /* SADDLV, UADDLV */
4964 case 0xa: /* SMAXV, UMAXV */
4965 case 0x1a: /* SMINV, UMINV */
4966 if (size
== 3 || (size
== 2 && !is_q
)) {
4967 unallocated_encoding(s
);
4971 case 0xc: /* FMAXNMV, FMINNMV */
4972 case 0xf: /* FMAXV, FMINV */
4973 if (!is_u
|| !is_q
|| extract32(size
, 0, 1)) {
4974 unallocated_encoding(s
);
4977 /* Bit 1 of size field encodes min vs max, and actual size is always
4978 * 32 bits: adjust the size variable so following code can rely on it
4980 is_min
= extract32(size
, 1, 1);
4985 unallocated_encoding(s
);
4990 elements
= (is_q
? 128 : 64) / esize
;
4992 tcg_res
= tcg_temp_new_i64();
4993 tcg_elt
= tcg_temp_new_i64();
4995 /* These instructions operate across all lanes of a vector
4996 * to produce a single result. We can guarantee that a 64
4997 * bit intermediate is sufficient:
4998 * + for [US]ADDLV the maximum element size is 32 bits, and
4999 * the result type is 64 bits
5000 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
5001 * same as the element size, which is 32 bits at most
5002 * For the integer operations we can choose to work at 64
5003 * or 32 bits and truncate at the end; for simplicity
5004 * we use 64 bits always. The floating point
5005 * ops do require 32 bit intermediates, though.
5008 read_vec_element(s
, tcg_res
, rn
, 0, size
| (is_u
? 0 : MO_SIGN
));
5010 for (i
= 1; i
< elements
; i
++) {
5011 read_vec_element(s
, tcg_elt
, rn
, i
, size
| (is_u
? 0 : MO_SIGN
));
5014 case 0x03: /* SADDLV / UADDLV */
5015 case 0x1b: /* ADDV */
5016 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_elt
);
5018 case 0x0a: /* SMAXV / UMAXV */
5019 tcg_gen_movcond_i64(is_u
? TCG_COND_GEU
: TCG_COND_GE
,
5021 tcg_res
, tcg_elt
, tcg_res
, tcg_elt
);
5023 case 0x1a: /* SMINV / UMINV */
5024 tcg_gen_movcond_i64(is_u
? TCG_COND_LEU
: TCG_COND_LE
,
5026 tcg_res
, tcg_elt
, tcg_res
, tcg_elt
);
5030 g_assert_not_reached();
5035 /* Floating point ops which work on 32 bit (single) intermediates.
5036 * Note that correct NaN propagation requires that we do these
5037 * operations in exactly the order specified by the pseudocode.
5039 TCGv_i32 tcg_elt1
= tcg_temp_new_i32();
5040 TCGv_i32 tcg_elt2
= tcg_temp_new_i32();
5041 TCGv_i32 tcg_elt3
= tcg_temp_new_i32();
5042 TCGv_ptr fpst
= get_fpstatus_ptr();
5044 assert(esize
== 32);
5045 assert(elements
== 4);
5047 read_vec_element(s
, tcg_elt
, rn
, 0, MO_32
);
5048 tcg_gen_trunc_i64_i32(tcg_elt1
, tcg_elt
);
5049 read_vec_element(s
, tcg_elt
, rn
, 1, MO_32
);
5050 tcg_gen_trunc_i64_i32(tcg_elt2
, tcg_elt
);
5052 do_minmaxop(s
, tcg_elt1
, tcg_elt2
, opcode
, is_min
, fpst
);
5054 read_vec_element(s
, tcg_elt
, rn
, 2, MO_32
);
5055 tcg_gen_trunc_i64_i32(tcg_elt2
, tcg_elt
);
5056 read_vec_element(s
, tcg_elt
, rn
, 3, MO_32
);
5057 tcg_gen_trunc_i64_i32(tcg_elt3
, tcg_elt
);
5059 do_minmaxop(s
, tcg_elt2
, tcg_elt3
, opcode
, is_min
, fpst
);
5061 do_minmaxop(s
, tcg_elt1
, tcg_elt2
, opcode
, is_min
, fpst
);
5063 tcg_gen_extu_i32_i64(tcg_res
, tcg_elt1
);
5064 tcg_temp_free_i32(tcg_elt1
);
5065 tcg_temp_free_i32(tcg_elt2
);
5066 tcg_temp_free_i32(tcg_elt3
);
5067 tcg_temp_free_ptr(fpst
);
5070 tcg_temp_free_i64(tcg_elt
);
5072 /* Now truncate the result to the width required for the final output */
5073 if (opcode
== 0x03) {
5074 /* SADDLV, UADDLV: result is 2*esize */
5080 tcg_gen_ext8u_i64(tcg_res
, tcg_res
);
5083 tcg_gen_ext16u_i64(tcg_res
, tcg_res
);
5086 tcg_gen_ext32u_i64(tcg_res
, tcg_res
);
5091 g_assert_not_reached();
5094 write_fp_dreg(s
, rd
, tcg_res
);
5095 tcg_temp_free_i64(tcg_res
);
5098 /* C6.3.31 DUP (Element, Vector)
5100 * 31 30 29 21 20 16 15 10 9 5 4 0
5101 * +---+---+-------------------+--------+-------------+------+------+
5102 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
5103 * +---+---+-------------------+--------+-------------+------+------+
5105 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5107 static void handle_simd_dupe(DisasContext
*s
, int is_q
, int rd
, int rn
,
5110 int size
= ctz32(imm5
);
5111 int esize
= 8 << size
;
5112 int elements
= (is_q
? 128 : 64) / esize
;
5116 if (size
> 3 || (size
== 3 && !is_q
)) {
5117 unallocated_encoding(s
);
5121 index
= imm5
>> (size
+ 1);
5123 tmp
= tcg_temp_new_i64();
5124 read_vec_element(s
, tmp
, rn
, index
, size
);
5126 for (i
= 0; i
< elements
; i
++) {
5127 write_vec_element(s
, tmp
, rd
, i
, size
);
5131 clear_vec_high(s
, rd
);
5134 tcg_temp_free_i64(tmp
);
5137 /* C6.3.31 DUP (element, scalar)
5138 * 31 21 20 16 15 10 9 5 4 0
5139 * +-----------------------+--------+-------------+------+------+
5140 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
5141 * +-----------------------+--------+-------------+------+------+
5143 static void handle_simd_dupes(DisasContext
*s
, int rd
, int rn
,
5146 int size
= ctz32(imm5
);
5151 unallocated_encoding(s
);
5155 index
= imm5
>> (size
+ 1);
5157 /* This instruction just extracts the specified element and
5158 * zero-extends it into the bottom of the destination register.
5160 tmp
= tcg_temp_new_i64();
5161 read_vec_element(s
, tmp
, rn
, index
, size
);
5162 write_fp_dreg(s
, rd
, tmp
);
5163 tcg_temp_free_i64(tmp
);
5166 /* C6.3.32 DUP (General)
5168 * 31 30 29 21 20 16 15 10 9 5 4 0
5169 * +---+---+-------------------+--------+-------------+------+------+
5170 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
5171 * +---+---+-------------------+--------+-------------+------+------+
5173 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5175 static void handle_simd_dupg(DisasContext
*s
, int is_q
, int rd
, int rn
,
5178 int size
= ctz32(imm5
);
5179 int esize
= 8 << size
;
5180 int elements
= (is_q
? 128 : 64)/esize
;
5183 if (size
> 3 || ((size
== 3) && !is_q
)) {
5184 unallocated_encoding(s
);
5187 for (i
= 0; i
< elements
; i
++) {
5188 write_vec_element(s
, cpu_reg(s
, rn
), rd
, i
, size
);
5191 clear_vec_high(s
, rd
);
5195 /* C6.3.150 INS (Element)
5197 * 31 21 20 16 15 14 11 10 9 5 4 0
5198 * +-----------------------+--------+------------+---+------+------+
5199 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
5200 * +-----------------------+--------+------------+---+------+------+
5202 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5203 * index: encoded in imm5<4:size+1>
5205 static void handle_simd_inse(DisasContext
*s
, int rd
, int rn
,
5208 int size
= ctz32(imm5
);
5209 int src_index
, dst_index
;
5213 unallocated_encoding(s
);
5216 dst_index
= extract32(imm5
, 1+size
, 5);
5217 src_index
= extract32(imm4
, size
, 4);
5219 tmp
= tcg_temp_new_i64();
5221 read_vec_element(s
, tmp
, rn
, src_index
, size
);
5222 write_vec_element(s
, tmp
, rd
, dst_index
, size
);
5224 tcg_temp_free_i64(tmp
);
5228 /* C6.3.151 INS (General)
5230 * 31 21 20 16 15 10 9 5 4 0
5231 * +-----------------------+--------+-------------+------+------+
5232 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
5233 * +-----------------------+--------+-------------+------+------+
5235 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5236 * index: encoded in imm5<4:size+1>
5238 static void handle_simd_insg(DisasContext
*s
, int rd
, int rn
, int imm5
)
5240 int size
= ctz32(imm5
);
5244 unallocated_encoding(s
);
5248 idx
= extract32(imm5
, 1 + size
, 4 - size
);
5249 write_vec_element(s
, cpu_reg(s
, rn
), rd
, idx
, size
);
5253 * C6.3.321 UMOV (General)
5254 * C6.3.237 SMOV (General)
5256 * 31 30 29 21 20 16 15 12 10 9 5 4 0
5257 * +---+---+-------------------+--------+-------------+------+------+
5258 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
5259 * +---+---+-------------------+--------+-------------+------+------+
5261 * U: unsigned when set
5262 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5264 static void handle_simd_umov_smov(DisasContext
*s
, int is_q
, int is_signed
,
5265 int rn
, int rd
, int imm5
)
5267 int size
= ctz32(imm5
);
5271 /* Check for UnallocatedEncodings */
5273 if (size
> 2 || (size
== 2 && !is_q
)) {
5274 unallocated_encoding(s
);
5279 || (size
< 3 && is_q
)
5280 || (size
== 3 && !is_q
)) {
5281 unallocated_encoding(s
);
5285 element
= extract32(imm5
, 1+size
, 4);
5287 tcg_rd
= cpu_reg(s
, rd
);
5288 read_vec_element(s
, tcg_rd
, rn
, element
, size
| (is_signed
? MO_SIGN
: 0));
5289 if (is_signed
&& !is_q
) {
5290 tcg_gen_ext32u_i64(tcg_rd
, tcg_rd
);
5294 /* C3.6.5 AdvSIMD copy
5295 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
5296 * +---+---+----+-----------------+------+---+------+---+------+------+
5297 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
5298 * +---+---+----+-----------------+------+---+------+---+------+------+
5300 static void disas_simd_copy(DisasContext
*s
, uint32_t insn
)
5302 int rd
= extract32(insn
, 0, 5);
5303 int rn
= extract32(insn
, 5, 5);
5304 int imm4
= extract32(insn
, 11, 4);
5305 int op
= extract32(insn
, 29, 1);
5306 int is_q
= extract32(insn
, 30, 1);
5307 int imm5
= extract32(insn
, 16, 5);
5312 handle_simd_inse(s
, rd
, rn
, imm4
, imm5
);
5314 unallocated_encoding(s
);
5319 /* DUP (element - vector) */
5320 handle_simd_dupe(s
, is_q
, rd
, rn
, imm5
);
5324 handle_simd_dupg(s
, is_q
, rd
, rn
, imm5
);
5329 handle_simd_insg(s
, rd
, rn
, imm5
);
5331 unallocated_encoding(s
);
5336 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
5337 handle_simd_umov_smov(s
, is_q
, (imm4
== 5), rn
, rd
, imm5
);
5340 unallocated_encoding(s
);
5346 /* C3.6.6 AdvSIMD modified immediate
5347 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
5348 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
5349 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
5350 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
5352 * There are a number of operations that can be carried out here:
5353 * MOVI - move (shifted) imm into register
5354 * MVNI - move inverted (shifted) imm into register
5355 * ORR - bitwise OR of (shifted) imm with register
5356 * BIC - bitwise clear of (shifted) imm with register
5358 static void disas_simd_mod_imm(DisasContext
*s
, uint32_t insn
)
5360 int rd
= extract32(insn
, 0, 5);
5361 int cmode
= extract32(insn
, 12, 4);
5362 int cmode_3_1
= extract32(cmode
, 1, 3);
5363 int cmode_0
= extract32(cmode
, 0, 1);
5364 int o2
= extract32(insn
, 11, 1);
5365 uint64_t abcdefgh
= extract32(insn
, 5, 5) | (extract32(insn
, 16, 3) << 5);
5366 bool is_neg
= extract32(insn
, 29, 1);
5367 bool is_q
= extract32(insn
, 30, 1);
5369 TCGv_i64 tcg_rd
, tcg_imm
;
5372 if (o2
!= 0 || ((cmode
== 0xf) && is_neg
&& !is_q
)) {
5373 unallocated_encoding(s
);
5377 /* See AdvSIMDExpandImm() in ARM ARM */
5378 switch (cmode_3_1
) {
5379 case 0: /* Replicate(Zeros(24):imm8, 2) */
5380 case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
5381 case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
5382 case 3: /* Replicate(imm8:Zeros(24), 2) */
5384 int shift
= cmode_3_1
* 8;
5385 imm
= bitfield_replicate(abcdefgh
<< shift
, 32);
5388 case 4: /* Replicate(Zeros(8):imm8, 4) */
5389 case 5: /* Replicate(imm8:Zeros(8), 4) */
5391 int shift
= (cmode_3_1
& 0x1) * 8;
5392 imm
= bitfield_replicate(abcdefgh
<< shift
, 16);
5397 /* Replicate(Zeros(8):imm8:Ones(16), 2) */
5398 imm
= (abcdefgh
<< 16) | 0xffff;
5400 /* Replicate(Zeros(16):imm8:Ones(8), 2) */
5401 imm
= (abcdefgh
<< 8) | 0xff;
5403 imm
= bitfield_replicate(imm
, 32);
5406 if (!cmode_0
&& !is_neg
) {
5407 imm
= bitfield_replicate(abcdefgh
, 8);
5408 } else if (!cmode_0
&& is_neg
) {
5411 for (i
= 0; i
< 8; i
++) {
5412 if ((abcdefgh
) & (1 << i
)) {
5413 imm
|= 0xffULL
<< (i
* 8);
5416 } else if (cmode_0
) {
5418 imm
= (abcdefgh
& 0x3f) << 48;
5419 if (abcdefgh
& 0x80) {
5420 imm
|= 0x8000000000000000ULL
;
5422 if (abcdefgh
& 0x40) {
5423 imm
|= 0x3fc0000000000000ULL
;
5425 imm
|= 0x4000000000000000ULL
;
5428 imm
= (abcdefgh
& 0x3f) << 19;
5429 if (abcdefgh
& 0x80) {
5432 if (abcdefgh
& 0x40) {
5443 if (cmode_3_1
!= 7 && is_neg
) {
5447 tcg_imm
= tcg_const_i64(imm
);
5448 tcg_rd
= new_tmp_a64(s
);
5450 for (i
= 0; i
< 2; i
++) {
5451 int foffs
= i
? fp_reg_hi_offset(rd
) : fp_reg_offset(rd
, MO_64
);
5453 if (i
== 1 && !is_q
) {
5454 /* non-quad ops clear high half of vector */
5455 tcg_gen_movi_i64(tcg_rd
, 0);
5456 } else if ((cmode
& 0x9) == 0x1 || (cmode
& 0xd) == 0x9) {
5457 tcg_gen_ld_i64(tcg_rd
, cpu_env
, foffs
);
5460 tcg_gen_and_i64(tcg_rd
, tcg_rd
, tcg_imm
);
5463 tcg_gen_or_i64(tcg_rd
, tcg_rd
, tcg_imm
);
5467 tcg_gen_mov_i64(tcg_rd
, tcg_imm
);
5469 tcg_gen_st_i64(tcg_rd
, cpu_env
, foffs
);
5472 tcg_temp_free_i64(tcg_imm
);
5475 /* C3.6.7 AdvSIMD scalar copy
5476 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
5477 * +-----+----+-----------------+------+---+------+---+------+------+
5478 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
5479 * +-----+----+-----------------+------+---+------+---+------+------+
5481 static void disas_simd_scalar_copy(DisasContext
*s
, uint32_t insn
)
5483 int rd
= extract32(insn
, 0, 5);
5484 int rn
= extract32(insn
, 5, 5);
5485 int imm4
= extract32(insn
, 11, 4);
5486 int imm5
= extract32(insn
, 16, 5);
5487 int op
= extract32(insn
, 29, 1);
5489 if (op
!= 0 || imm4
!= 0) {
5490 unallocated_encoding(s
);
5494 /* DUP (element, scalar) */
5495 handle_simd_dupes(s
, rd
, rn
, imm5
);
5498 /* C3.6.8 AdvSIMD scalar pairwise
5499 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
5500 * +-----+---+-----------+------+-----------+--------+-----+------+------+
5501 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
5502 * +-----+---+-----------+------+-----------+--------+-----+------+------+
5504 static void disas_simd_scalar_pairwise(DisasContext
*s
, uint32_t insn
)
5506 int u
= extract32(insn
, 29, 1);
5507 int size
= extract32(insn
, 22, 2);
5508 int opcode
= extract32(insn
, 12, 5);
5509 int rn
= extract32(insn
, 5, 5);
5510 int rd
= extract32(insn
, 0, 5);
5513 /* For some ops (the FP ones), size[1] is part of the encoding.
5514 * For ADDP strictly it is not but size[1] is always 1 for valid
5517 opcode
|= (extract32(size
, 1, 1) << 5);
5520 case 0x3b: /* ADDP */
5521 if (u
|| size
!= 3) {
5522 unallocated_encoding(s
);
5525 TCGV_UNUSED_PTR(fpst
);
5527 case 0xc: /* FMAXNMP */
5528 case 0xd: /* FADDP */
5529 case 0xf: /* FMAXP */
5530 case 0x2c: /* FMINNMP */
5531 case 0x2f: /* FMINP */
5532 /* FP op, size[0] is 32 or 64 bit */
5534 unallocated_encoding(s
);
5537 size
= extract32(size
, 0, 1) ? 3 : 2;
5538 fpst
= get_fpstatus_ptr();
5541 unallocated_encoding(s
);
5546 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
5547 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
5548 TCGv_i64 tcg_res
= tcg_temp_new_i64();
5550 read_vec_element(s
, tcg_op1
, rn
, 0, MO_64
);
5551 read_vec_element(s
, tcg_op2
, rn
, 1, MO_64
);
5554 case 0x3b: /* ADDP */
5555 tcg_gen_add_i64(tcg_res
, tcg_op1
, tcg_op2
);
5557 case 0xc: /* FMAXNMP */
5558 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5560 case 0xd: /* FADDP */
5561 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5563 case 0xf: /* FMAXP */
5564 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5566 case 0x2c: /* FMINNMP */
5567 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5569 case 0x2f: /* FMINP */
5570 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5573 g_assert_not_reached();
5576 write_fp_dreg(s
, rd
, tcg_res
);
5578 tcg_temp_free_i64(tcg_op1
);
5579 tcg_temp_free_i64(tcg_op2
);
5580 tcg_temp_free_i64(tcg_res
);
5582 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
5583 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
5584 TCGv_i32 tcg_res
= tcg_temp_new_i32();
5586 read_vec_element_i32(s
, tcg_op1
, rn
, 0, MO_32
);
5587 read_vec_element_i32(s
, tcg_op2
, rn
, 1, MO_32
);
5590 case 0xc: /* FMAXNMP */
5591 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5593 case 0xd: /* FADDP */
5594 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5596 case 0xf: /* FMAXP */
5597 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5599 case 0x2c: /* FMINNMP */
5600 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5602 case 0x2f: /* FMINP */
5603 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5606 g_assert_not_reached();
5609 write_fp_sreg(s
, rd
, tcg_res
);
5611 tcg_temp_free_i32(tcg_op1
);
5612 tcg_temp_free_i32(tcg_op2
);
5613 tcg_temp_free_i32(tcg_res
);
5616 if (!TCGV_IS_UNUSED_PTR(fpst
)) {
5617 tcg_temp_free_ptr(fpst
);
5622 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
5624 * This code is handles the common shifting code and is used by both
5625 * the vector and scalar code.
5627 static void handle_shri_with_rndacc(TCGv_i64 tcg_res
, TCGv_i64 tcg_src
,
5628 TCGv_i64 tcg_rnd
, bool accumulate
,
5629 bool is_u
, int size
, int shift
)
5631 bool extended_result
= false;
5632 bool round
= !TCGV_IS_UNUSED_I64(tcg_rnd
);
5634 TCGv_i64 tcg_src_hi
;
5636 if (round
&& size
== 3) {
5637 extended_result
= true;
5638 ext_lshift
= 64 - shift
;
5639 tcg_src_hi
= tcg_temp_new_i64();
5640 } else if (shift
== 64) {
5641 if (!accumulate
&& is_u
) {
5642 /* result is zero */
5643 tcg_gen_movi_i64(tcg_res
, 0);
5648 /* Deal with the rounding step */
5650 if (extended_result
) {
5651 TCGv_i64 tcg_zero
= tcg_const_i64(0);
5653 /* take care of sign extending tcg_res */
5654 tcg_gen_sari_i64(tcg_src_hi
, tcg_src
, 63);
5655 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
5656 tcg_src
, tcg_src_hi
,
5659 tcg_gen_add2_i64(tcg_src
, tcg_src_hi
,
5663 tcg_temp_free_i64(tcg_zero
);
5665 tcg_gen_add_i64(tcg_src
, tcg_src
, tcg_rnd
);
5669 /* Now do the shift right */
5670 if (round
&& extended_result
) {
5671 /* extended case, >64 bit precision required */
5672 if (ext_lshift
== 0) {
5673 /* special case, only high bits matter */
5674 tcg_gen_mov_i64(tcg_src
, tcg_src_hi
);
5676 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
5677 tcg_gen_shli_i64(tcg_src_hi
, tcg_src_hi
, ext_lshift
);
5678 tcg_gen_or_i64(tcg_src
, tcg_src
, tcg_src_hi
);
5683 /* essentially shifting in 64 zeros */
5684 tcg_gen_movi_i64(tcg_src
, 0);
5686 tcg_gen_shri_i64(tcg_src
, tcg_src
, shift
);
5690 /* effectively extending the sign-bit */
5691 tcg_gen_sari_i64(tcg_src
, tcg_src
, 63);
5693 tcg_gen_sari_i64(tcg_src
, tcg_src
, shift
);
5699 tcg_gen_add_i64(tcg_res
, tcg_res
, tcg_src
);
5701 tcg_gen_mov_i64(tcg_res
, tcg_src
);
5704 if (extended_result
) {
5705 tcg_temp_free_i64(tcg_src_hi
);
5709 /* Common SHL/SLI - Shift left with an optional insert */
5710 static void handle_shli_with_ins(TCGv_i64 tcg_res
, TCGv_i64 tcg_src
,
5711 bool insert
, int shift
)
5713 if (insert
) { /* SLI */
5714 tcg_gen_deposit_i64(tcg_res
, tcg_res
, tcg_src
, shift
, 64 - shift
);
5716 tcg_gen_shli_i64(tcg_res
, tcg_src
, shift
);
5720 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
5721 static void handle_scalar_simd_shri(DisasContext
*s
,
5722 bool is_u
, int immh
, int immb
,
5723 int opcode
, int rn
, int rd
)
5726 int immhb
= immh
<< 3 | immb
;
5727 int shift
= 2 * (8 << size
) - immhb
;
5728 bool accumulate
= false;
5734 if (!extract32(immh
, 3, 1)) {
5735 unallocated_encoding(s
);
5740 case 0x02: /* SSRA / USRA (accumulate) */
5743 case 0x04: /* SRSHR / URSHR (rounding) */
5746 case 0x06: /* SRSRA / URSRA (accum + rounding) */
5747 accumulate
= round
= true;
5752 uint64_t round_const
= 1ULL << (shift
- 1);
5753 tcg_round
= tcg_const_i64(round_const
);
5755 TCGV_UNUSED_I64(tcg_round
);
5758 tcg_rn
= read_fp_dreg(s
, rn
);
5759 tcg_rd
= accumulate
? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
5761 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
5762 accumulate
, is_u
, size
, shift
);
5764 write_fp_dreg(s
, rd
, tcg_rd
);
5766 tcg_temp_free_i64(tcg_rn
);
5767 tcg_temp_free_i64(tcg_rd
);
5769 tcg_temp_free_i64(tcg_round
);
5773 /* SHL/SLI - Scalar shift left */
5774 static void handle_scalar_simd_shli(DisasContext
*s
, bool insert
,
5775 int immh
, int immb
, int opcode
,
5778 int size
= 32 - clz32(immh
) - 1;
5779 int immhb
= immh
<< 3 | immb
;
5780 int shift
= immhb
- (8 << size
);
5781 TCGv_i64 tcg_rn
= new_tmp_a64(s
);
5782 TCGv_i64 tcg_rd
= new_tmp_a64(s
);
5784 if (!extract32(immh
, 3, 1)) {
5785 unallocated_encoding(s
);
5789 tcg_rn
= read_fp_dreg(s
, rn
);
5790 tcg_rd
= insert
? read_fp_dreg(s
, rd
) : tcg_temp_new_i64();
5792 handle_shli_with_ins(tcg_rd
, tcg_rn
, insert
, shift
);
5794 write_fp_dreg(s
, rd
, tcg_rd
);
5796 tcg_temp_free_i64(tcg_rn
);
5797 tcg_temp_free_i64(tcg_rd
);
5800 /* C3.6.9 AdvSIMD scalar shift by immediate
5801 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
5802 * +-----+---+-------------+------+------+--------+---+------+------+
5803 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
5804 * +-----+---+-------------+------+------+--------+---+------+------+
5806 * This is the scalar version so it works on a fixed sized registers
5808 static void disas_simd_scalar_shift_imm(DisasContext
*s
, uint32_t insn
)
5810 int rd
= extract32(insn
, 0, 5);
5811 int rn
= extract32(insn
, 5, 5);
5812 int opcode
= extract32(insn
, 11, 5);
5813 int immb
= extract32(insn
, 16, 3);
5814 int immh
= extract32(insn
, 19, 4);
5815 bool is_u
= extract32(insn
, 29, 1);
5818 case 0x00: /* SSHR / USHR */
5819 case 0x02: /* SSRA / USRA */
5820 case 0x04: /* SRSHR / URSHR */
5821 case 0x06: /* SRSRA / URSRA */
5822 handle_scalar_simd_shri(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
5824 case 0x0a: /* SHL / SLI */
5825 handle_scalar_simd_shli(s
, is_u
, immh
, immb
, opcode
, rn
, rd
);
5828 unsupported_encoding(s
, insn
);
5833 /* C3.6.10 AdvSIMD scalar three different
5834 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
5835 * +-----+---+-----------+------+---+------+--------+-----+------+------+
5836 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
5837 * +-----+---+-----------+------+---+------+--------+-----+------+------+
5839 static void disas_simd_scalar_three_reg_diff(DisasContext
*s
, uint32_t insn
)
5841 unsupported_encoding(s
, insn
);
5844 static void handle_3same_64(DisasContext
*s
, int opcode
, bool u
,
5845 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
, TCGv_i64 tcg_rm
)
5847 /* Handle 64x64->64 opcodes which are shared between the scalar
5848 * and vector 3-same groups. We cover every opcode where size == 3
5849 * is valid in either the three-reg-same (integer, not pairwise)
5850 * or scalar-three-reg-same groups. (Some opcodes are not yet
5856 case 0x1: /* SQADD */
5858 gen_helper_neon_qadd_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
5860 gen_helper_neon_qadd_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
5863 case 0x5: /* SQSUB */
5865 gen_helper_neon_qsub_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
5867 gen_helper_neon_qsub_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
5870 case 0x6: /* CMGT, CMHI */
5871 /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
5872 * We implement this using setcond (test) and then negating.
5874 cond
= u
? TCG_COND_GTU
: TCG_COND_GT
;
5876 tcg_gen_setcond_i64(cond
, tcg_rd
, tcg_rn
, tcg_rm
);
5877 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
5879 case 0x7: /* CMGE, CMHS */
5880 cond
= u
? TCG_COND_GEU
: TCG_COND_GE
;
5882 case 0x11: /* CMTST, CMEQ */
5887 /* CMTST : test is "if (X & Y != 0)". */
5888 tcg_gen_and_i64(tcg_rd
, tcg_rn
, tcg_rm
);
5889 tcg_gen_setcondi_i64(TCG_COND_NE
, tcg_rd
, tcg_rd
, 0);
5890 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
5892 case 0x8: /* SSHL, USHL */
5894 gen_helper_neon_shl_u64(tcg_rd
, tcg_rn
, tcg_rm
);
5896 gen_helper_neon_shl_s64(tcg_rd
, tcg_rn
, tcg_rm
);
5899 case 0x9: /* SQSHL, UQSHL */
5901 gen_helper_neon_qshl_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
5903 gen_helper_neon_qshl_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
5906 case 0xa: /* SRSHL, URSHL */
5908 gen_helper_neon_rshl_u64(tcg_rd
, tcg_rn
, tcg_rm
);
5910 gen_helper_neon_rshl_s64(tcg_rd
, tcg_rn
, tcg_rm
);
5913 case 0xb: /* SQRSHL, UQRSHL */
5915 gen_helper_neon_qrshl_u64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
5917 gen_helper_neon_qrshl_s64(tcg_rd
, cpu_env
, tcg_rn
, tcg_rm
);
5920 case 0x10: /* ADD, SUB */
5922 tcg_gen_sub_i64(tcg_rd
, tcg_rn
, tcg_rm
);
5924 tcg_gen_add_i64(tcg_rd
, tcg_rn
, tcg_rm
);
5928 g_assert_not_reached();
5932 /* Handle the 3-same-operands float operations; shared by the scalar
5933 * and vector encodings. The caller must filter out any encodings
5934 * not allocated for the encoding it is dealing with.
5936 static void handle_3same_float(DisasContext
*s
, int size
, int elements
,
5937 int fpopcode
, int rd
, int rn
, int rm
)
5940 TCGv_ptr fpst
= get_fpstatus_ptr();
5942 for (pass
= 0; pass
< elements
; pass
++) {
5945 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
5946 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
5947 TCGv_i64 tcg_res
= tcg_temp_new_i64();
5949 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
5950 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
5953 case 0x18: /* FMAXNM */
5954 gen_helper_vfp_maxnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5956 case 0x1a: /* FADD */
5957 gen_helper_vfp_addd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5959 case 0x1e: /* FMAX */
5960 gen_helper_vfp_maxd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5962 case 0x38: /* FMINNM */
5963 gen_helper_vfp_minnumd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5965 case 0x3a: /* FSUB */
5966 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5968 case 0x3e: /* FMIN */
5969 gen_helper_vfp_mind(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5971 case 0x5b: /* FMUL */
5972 gen_helper_vfp_muld(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5974 case 0x5f: /* FDIV */
5975 gen_helper_vfp_divd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5977 case 0x7a: /* FABD */
5978 gen_helper_vfp_subd(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
5979 gen_helper_vfp_absd(tcg_res
, tcg_res
);
5982 g_assert_not_reached();
5985 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
5987 tcg_temp_free_i64(tcg_res
);
5988 tcg_temp_free_i64(tcg_op1
);
5989 tcg_temp_free_i64(tcg_op2
);
5992 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
5993 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
5994 TCGv_i32 tcg_res
= tcg_temp_new_i32();
5996 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
5997 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
6000 case 0x1a: /* FADD */
6001 gen_helper_vfp_adds(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6003 case 0x1e: /* FMAX */
6004 gen_helper_vfp_maxs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6006 case 0x18: /* FMAXNM */
6007 gen_helper_vfp_maxnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6009 case 0x38: /* FMINNM */
6010 gen_helper_vfp_minnums(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6012 case 0x3a: /* FSUB */
6013 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6015 case 0x3e: /* FMIN */
6016 gen_helper_vfp_mins(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6018 case 0x5b: /* FMUL */
6019 gen_helper_vfp_muls(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6021 case 0x5f: /* FDIV */
6022 gen_helper_vfp_divs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6024 case 0x7a: /* FABD */
6025 gen_helper_vfp_subs(tcg_res
, tcg_op1
, tcg_op2
, fpst
);
6026 gen_helper_vfp_abss(tcg_res
, tcg_res
);
6029 g_assert_not_reached();
6032 if (elements
== 1) {
6033 /* scalar single so clear high part */
6034 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
6036 tcg_gen_extu_i32_i64(tcg_tmp
, tcg_res
);
6037 write_vec_element(s
, tcg_tmp
, rd
, pass
, MO_64
);
6038 tcg_temp_free_i64(tcg_tmp
);
6040 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
6043 tcg_temp_free_i32(tcg_res
);
6044 tcg_temp_free_i32(tcg_op1
);
6045 tcg_temp_free_i32(tcg_op2
);
6049 tcg_temp_free_ptr(fpst
);
6051 if ((elements
<< size
) < 4) {
6052 /* scalar, or non-quad vector op */
6053 clear_vec_high(s
, rd
);
6057 /* C3.6.11 AdvSIMD scalar three same
6058 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
6059 * +-----+---+-----------+------+---+------+--------+---+------+------+
6060 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
6061 * +-----+---+-----------+------+---+------+--------+---+------+------+
6063 static void disas_simd_scalar_three_reg_same(DisasContext
*s
, uint32_t insn
)
6065 int rd
= extract32(insn
, 0, 5);
6066 int rn
= extract32(insn
, 5, 5);
6067 int opcode
= extract32(insn
, 11, 5);
6068 int rm
= extract32(insn
, 16, 5);
6069 int size
= extract32(insn
, 22, 2);
6070 bool u
= extract32(insn
, 29, 1);
6073 if (opcode
>= 0x18) {
6074 /* Floating point: U, size[1] and opcode indicate operation */
6075 int fpopcode
= opcode
| (extract32(size
, 1, 1) << 5) | (u
<< 6);
6077 case 0x1b: /* FMULX */
6078 case 0x1c: /* FCMEQ */
6079 case 0x1f: /* FRECPS */
6080 case 0x3f: /* FRSQRTS */
6081 case 0x5c: /* FCMGE */
6082 case 0x5d: /* FACGE */
6083 case 0x7c: /* FCMGT */
6084 case 0x7d: /* FACGT */
6085 unsupported_encoding(s
, insn
);
6087 case 0x7a: /* FABD */
6090 unallocated_encoding(s
);
6094 handle_3same_float(s
, extract32(size
, 0, 1), 1, fpopcode
, rd
, rn
, rm
);
6099 case 0x1: /* SQADD, UQADD */
6100 case 0x5: /* SQSUB, UQSUB */
6101 case 0x9: /* SQSHL, UQSHL */
6102 case 0xb: /* SQRSHL, UQRSHL */
6104 case 0x8: /* SSHL, USHL */
6105 case 0xa: /* SRSHL, URSHL */
6106 case 0x6: /* CMGT, CMHI */
6107 case 0x7: /* CMGE, CMHS */
6108 case 0x11: /* CMTST, CMEQ */
6109 case 0x10: /* ADD, SUB (vector) */
6111 unallocated_encoding(s
);
6115 case 0x16: /* SQDMULH, SQRDMULH (vector) */
6116 if (size
!= 1 && size
!= 2) {
6117 unallocated_encoding(s
);
6122 unallocated_encoding(s
);
6126 tcg_rd
= tcg_temp_new_i64();
6129 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
6130 TCGv_i64 tcg_rm
= read_fp_dreg(s
, rm
);
6132 handle_3same_64(s
, opcode
, u
, tcg_rd
, tcg_rn
, tcg_rm
);
6133 tcg_temp_free_i64(tcg_rn
);
6134 tcg_temp_free_i64(tcg_rm
);
6136 /* Do a single operation on the lowest element in the vector.
6137 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
6138 * no side effects for all these operations.
6139 * OPTME: special-purpose helpers would avoid doing some
6140 * unnecessary work in the helper for the 8 and 16 bit cases.
6142 NeonGenTwoOpEnvFn
*genenvfn
;
6143 TCGv_i32 tcg_rn
= tcg_temp_new_i32();
6144 TCGv_i32 tcg_rm
= tcg_temp_new_i32();
6145 TCGv_i32 tcg_rd32
= tcg_temp_new_i32();
6147 read_vec_element_i32(s
, tcg_rn
, rn
, 0, size
);
6148 read_vec_element_i32(s
, tcg_rm
, rm
, 0, size
);
6151 case 0x1: /* SQADD, UQADD */
6153 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
6154 { gen_helper_neon_qadd_s8
, gen_helper_neon_qadd_u8
},
6155 { gen_helper_neon_qadd_s16
, gen_helper_neon_qadd_u16
},
6156 { gen_helper_neon_qadd_s32
, gen_helper_neon_qadd_u32
},
6158 genenvfn
= fns
[size
][u
];
6161 case 0x5: /* SQSUB, UQSUB */
6163 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
6164 { gen_helper_neon_qsub_s8
, gen_helper_neon_qsub_u8
},
6165 { gen_helper_neon_qsub_s16
, gen_helper_neon_qsub_u16
},
6166 { gen_helper_neon_qsub_s32
, gen_helper_neon_qsub_u32
},
6168 genenvfn
= fns
[size
][u
];
6171 case 0x9: /* SQSHL, UQSHL */
6173 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
6174 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
6175 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
6176 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
6178 genenvfn
= fns
[size
][u
];
6181 case 0xb: /* SQRSHL, UQRSHL */
6183 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
6184 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
6185 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
6186 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
6188 genenvfn
= fns
[size
][u
];
6191 case 0x16: /* SQDMULH, SQRDMULH */
6193 static NeonGenTwoOpEnvFn
* const fns
[2][2] = {
6194 { gen_helper_neon_qdmulh_s16
, gen_helper_neon_qrdmulh_s16
},
6195 { gen_helper_neon_qdmulh_s32
, gen_helper_neon_qrdmulh_s32
},
6197 assert(size
== 1 || size
== 2);
6198 genenvfn
= fns
[size
- 1][u
];
6202 g_assert_not_reached();
6205 genenvfn(tcg_rd32
, cpu_env
, tcg_rn
, tcg_rm
);
6206 tcg_gen_extu_i32_i64(tcg_rd
, tcg_rd32
);
6207 tcg_temp_free_i32(tcg_rd32
);
6208 tcg_temp_free_i32(tcg_rn
);
6209 tcg_temp_free_i32(tcg_rm
);
6212 write_fp_dreg(s
, rd
, tcg_rd
);
6214 tcg_temp_free_i64(tcg_rd
);
6217 static void handle_2misc_64(DisasContext
*s
, int opcode
, bool u
,
6218 TCGv_i64 tcg_rd
, TCGv_i64 tcg_rn
)
6220 /* Handle 64->64 opcodes which are shared between the scalar and
6221 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
6222 * is valid in either group and also the double-precision fp ops.
6228 /* This opcode is shared with CNT and RBIT but we have earlier
6229 * enforced that size == 3 if and only if this is the NOT insn.
6231 tcg_gen_not_i64(tcg_rd
, tcg_rn
);
6233 case 0xa: /* CMLT */
6234 /* 64 bit integer comparison against zero, result is
6235 * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
6240 tcg_gen_setcondi_i64(cond
, tcg_rd
, tcg_rn
, 0);
6241 tcg_gen_neg_i64(tcg_rd
, tcg_rd
);
6243 case 0x8: /* CMGT, CMGE */
6244 cond
= u
? TCG_COND_GE
: TCG_COND_GT
;
6246 case 0x9: /* CMEQ, CMLE */
6247 cond
= u
? TCG_COND_LE
: TCG_COND_EQ
;
6249 case 0xb: /* ABS, NEG */
6251 tcg_gen_neg_i64(tcg_rd
, tcg_rn
);
6253 TCGv_i64 tcg_zero
= tcg_const_i64(0);
6254 tcg_gen_neg_i64(tcg_rd
, tcg_rn
);
6255 tcg_gen_movcond_i64(TCG_COND_GT
, tcg_rd
, tcg_rn
, tcg_zero
,
6257 tcg_temp_free_i64(tcg_zero
);
6260 case 0x2f: /* FABS */
6261 gen_helper_vfp_absd(tcg_rd
, tcg_rn
);
6263 case 0x6f: /* FNEG */
6264 gen_helper_vfp_negd(tcg_rd
, tcg_rn
);
6267 g_assert_not_reached();
6271 /* C3.6.12 AdvSIMD scalar two reg misc
6272 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
6273 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6274 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
6275 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6277 static void disas_simd_scalar_two_reg_misc(DisasContext
*s
, uint32_t insn
)
6279 int rd
= extract32(insn
, 0, 5);
6280 int rn
= extract32(insn
, 5, 5);
6281 int opcode
= extract32(insn
, 12, 5);
6282 int size
= extract32(insn
, 22, 2);
6283 bool u
= extract32(insn
, 29, 1);
6286 case 0xa: /* CMLT */
6288 unallocated_encoding(s
);
6292 case 0x8: /* CMGT, CMGE */
6293 case 0x9: /* CMEQ, CMLE */
6294 case 0xb: /* ABS, NEG */
6296 unallocated_encoding(s
);
6301 /* Other categories of encoding in this class:
6302 * + floating point (single and double)
6303 * + SUQADD/USQADD/SQABS/SQNEG : size 8, 16, 32 or 64
6304 * + SQXTN/SQXTN2/SQXTUN/SQXTUN2/UQXTN/UQXTN2:
6305 * narrowing saturate ops: size 64/32/16 -> 32/16/8
6307 unsupported_encoding(s
, insn
);
6312 TCGv_i64 tcg_rn
= read_fp_dreg(s
, rn
);
6313 TCGv_i64 tcg_rd
= tcg_temp_new_i64();
6315 handle_2misc_64(s
, opcode
, u
, tcg_rd
, tcg_rn
);
6316 write_fp_dreg(s
, rd
, tcg_rd
);
6317 tcg_temp_free_i64(tcg_rd
);
6318 tcg_temp_free_i64(tcg_rn
);
6320 /* the 'size might not be 64' ops aren't implemented yet */
6321 g_assert_not_reached();
6325 /* C3.6.13 AdvSIMD scalar x indexed element
6326 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
6327 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
6328 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
6329 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
6331 static void disas_simd_scalar_indexed(DisasContext
*s
, uint32_t insn
)
6333 unsupported_encoding(s
, insn
);
6336 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
6337 static void handle_vec_simd_shri(DisasContext
*s
, bool is_q
, bool is_u
,
6338 int immh
, int immb
, int opcode
, int rn
, int rd
)
6340 int size
= 32 - clz32(immh
) - 1;
6341 int immhb
= immh
<< 3 | immb
;
6342 int shift
= 2 * (8 << size
) - immhb
;
6343 bool accumulate
= false;
6345 int dsize
= is_q
? 128 : 64;
6346 int esize
= 8 << size
;
6347 int elements
= dsize
/esize
;
6348 TCGMemOp memop
= size
| (is_u
? 0 : MO_SIGN
);
6349 TCGv_i64 tcg_rn
= new_tmp_a64(s
);
6350 TCGv_i64 tcg_rd
= new_tmp_a64(s
);
6354 if (extract32(immh
, 3, 1) && !is_q
) {
6355 unallocated_encoding(s
);
6359 if (size
> 3 && !is_q
) {
6360 unallocated_encoding(s
);
6365 case 0x02: /* SSRA / USRA (accumulate) */
6368 case 0x04: /* SRSHR / URSHR (rounding) */
6371 case 0x06: /* SRSRA / URSRA (accum + rounding) */
6372 accumulate
= round
= true;
6377 uint64_t round_const
= 1ULL << (shift
- 1);
6378 tcg_round
= tcg_const_i64(round_const
);
6380 TCGV_UNUSED_I64(tcg_round
);
6383 for (i
= 0; i
< elements
; i
++) {
6384 read_vec_element(s
, tcg_rn
, rn
, i
, memop
);
6386 read_vec_element(s
, tcg_rd
, rd
, i
, memop
);
6389 handle_shri_with_rndacc(tcg_rd
, tcg_rn
, tcg_round
,
6390 accumulate
, is_u
, size
, shift
);
6392 write_vec_element(s
, tcg_rd
, rd
, i
, size
);
6396 clear_vec_high(s
, rd
);
6400 tcg_temp_free_i64(tcg_round
);
6404 /* SHL/SLI - Vector shift left */
6405 static void handle_vec_simd_shli(DisasContext
*s
, bool is_q
, bool insert
,
6406 int immh
, int immb
, int opcode
, int rn
, int rd
)
6408 int size
= 32 - clz32(immh
) - 1;
6409 int immhb
= immh
<< 3 | immb
;
6410 int shift
= immhb
- (8 << size
);
6411 int dsize
= is_q
? 128 : 64;
6412 int esize
= 8 << size
;
6413 int elements
= dsize
/esize
;
6414 TCGv_i64 tcg_rn
= new_tmp_a64(s
);
6415 TCGv_i64 tcg_rd
= new_tmp_a64(s
);
6418 if (extract32(immh
, 3, 1) && !is_q
) {
6419 unallocated_encoding(s
);
6423 if (size
> 3 && !is_q
) {
6424 unallocated_encoding(s
);
6428 for (i
= 0; i
< elements
; i
++) {
6429 read_vec_element(s
, tcg_rn
, rn
, i
, size
);
6431 read_vec_element(s
, tcg_rd
, rd
, i
, size
);
6434 handle_shli_with_ins(tcg_rd
, tcg_rn
, insert
, shift
);
6436 write_vec_element(s
, tcg_rd
, rd
, i
, size
);
6440 clear_vec_high(s
, rd
);
6444 /* USHLL/SHLL - Vector shift left with widening */
6445 static void handle_vec_simd_wshli(DisasContext
*s
, bool is_q
, bool is_u
,
6446 int immh
, int immb
, int opcode
, int rn
, int rd
)
6448 int size
= 32 - clz32(immh
) - 1;
6449 int immhb
= immh
<< 3 | immb
;
6450 int shift
= immhb
- (8 << size
);
6452 int esize
= 8 << size
;
6453 int elements
= dsize
/esize
;
6454 TCGv_i64 tcg_rn
= new_tmp_a64(s
);
6455 TCGv_i64 tcg_rd
= new_tmp_a64(s
);
6459 unallocated_encoding(s
);
6463 /* For the LL variants the store is larger than the load,
6464 * so if rd == rn we would overwrite parts of our input.
6465 * So load everything right now and use shifts in the main loop.
6467 read_vec_element(s
, tcg_rn
, rn
, is_q
? 1 : 0, MO_64
);
6469 for (i
= 0; i
< elements
; i
++) {
6470 tcg_gen_shri_i64(tcg_rd
, tcg_rn
, i
* esize
);
6471 ext_and_shift_reg(tcg_rd
, tcg_rd
, size
| (!is_u
<< 2), 0);
6472 tcg_gen_shli_i64(tcg_rd
, tcg_rd
, shift
);
6473 write_vec_element(s
, tcg_rd
, rd
, i
, size
+ 1);
6478 /* C3.6.14 AdvSIMD shift by immediate
6479 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
6480 * +---+---+---+-------------+------+------+--------+---+------+------+
6481 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
6482 * +---+---+---+-------------+------+------+--------+---+------+------+
6484 static void disas_simd_shift_imm(DisasContext
*s
, uint32_t insn
)
6486 int rd
= extract32(insn
, 0, 5);
6487 int rn
= extract32(insn
, 5, 5);
6488 int opcode
= extract32(insn
, 11, 5);
6489 int immb
= extract32(insn
, 16, 3);
6490 int immh
= extract32(insn
, 19, 4);
6491 bool is_u
= extract32(insn
, 29, 1);
6492 bool is_q
= extract32(insn
, 30, 1);
6495 case 0x00: /* SSHR / USHR */
6496 case 0x02: /* SSRA / USRA (accumulate) */
6497 case 0x04: /* SRSHR / URSHR (rounding) */
6498 case 0x06: /* SRSRA / URSRA (accum + rounding) */
6499 handle_vec_simd_shri(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
6501 case 0x0a: /* SHL / SLI */
6502 handle_vec_simd_shli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
6504 case 0x14: /* SSHLL / USHLL */
6505 handle_vec_simd_wshli(s
, is_q
, is_u
, immh
, immb
, opcode
, rn
, rd
);
6508 /* We don't currently implement any of the Narrow or saturating shifts;
6509 * nor do we implement the fixed-point conversions in this
6510 * encoding group (SCVTF, FCVTZS, UCVTF, FCVTZU).
6512 unsupported_encoding(s
, insn
);
6517 static void handle_3rd_widening(DisasContext
*s
, int is_q
, int is_u
, int size
,
6518 int opcode
, int rd
, int rn
, int rm
)
6520 /* 3-reg-different widening insns: 64 x 64 -> 128 */
6521 TCGv_i64 tcg_res
[2];
6524 tcg_res
[0] = tcg_temp_new_i64();
6525 tcg_res
[1] = tcg_temp_new_i64();
6527 /* Does this op do an adding accumulate, a subtracting accumulate,
6528 * or no accumulate at all?
6546 read_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
6547 read_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
6550 /* size == 2 means two 32x32->64 operations; this is worth special
6551 * casing because we can generally handle it inline.
6554 for (pass
= 0; pass
< 2; pass
++) {
6555 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
6556 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
6557 TCGv_i64 tcg_passres
;
6558 TCGMemOp memop
= MO_32
| (is_u
? 0 : MO_SIGN
);
6560 int elt
= pass
+ is_q
* 2;
6562 read_vec_element(s
, tcg_op1
, rn
, elt
, memop
);
6563 read_vec_element(s
, tcg_op2
, rm
, elt
, memop
);
6566 tcg_passres
= tcg_res
[pass
];
6568 tcg_passres
= tcg_temp_new_i64();
6572 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
6573 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
6575 TCGv_i64 tcg_tmp1
= tcg_temp_new_i64();
6576 TCGv_i64 tcg_tmp2
= tcg_temp_new_i64();
6578 tcg_gen_sub_i64(tcg_tmp1
, tcg_op1
, tcg_op2
);
6579 tcg_gen_sub_i64(tcg_tmp2
, tcg_op2
, tcg_op1
);
6580 tcg_gen_movcond_i64(is_u
? TCG_COND_GEU
: TCG_COND_GE
,
6582 tcg_op1
, tcg_op2
, tcg_tmp1
, tcg_tmp2
);
6583 tcg_temp_free_i64(tcg_tmp1
);
6584 tcg_temp_free_i64(tcg_tmp2
);
6587 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
6588 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
6589 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
6590 tcg_gen_mul_i64(tcg_passres
, tcg_op1
, tcg_op2
);
6593 g_assert_not_reached();
6597 tcg_gen_add_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
6598 tcg_temp_free_i64(tcg_passres
);
6599 } else if (accop
< 0) {
6600 tcg_gen_sub_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_passres
);
6601 tcg_temp_free_i64(tcg_passres
);
6604 tcg_temp_free_i64(tcg_op1
);
6605 tcg_temp_free_i64(tcg_op2
);
6608 /* size 0 or 1, generally helper functions */
6609 for (pass
= 0; pass
< 2; pass
++) {
6610 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
6611 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
6612 TCGv_i64 tcg_passres
;
6613 int elt
= pass
+ is_q
* 2;
6615 read_vec_element_i32(s
, tcg_op1
, rn
, elt
, MO_32
);
6616 read_vec_element_i32(s
, tcg_op2
, rm
, elt
, MO_32
);
6619 tcg_passres
= tcg_res
[pass
];
6621 tcg_passres
= tcg_temp_new_i64();
6625 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
6626 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
6629 gen_helper_neon_abdl_u16(tcg_passres
, tcg_op1
, tcg_op2
);
6631 gen_helper_neon_abdl_s16(tcg_passres
, tcg_op1
, tcg_op2
);
6635 gen_helper_neon_abdl_u32(tcg_passres
, tcg_op1
, tcg_op2
);
6637 gen_helper_neon_abdl_s32(tcg_passres
, tcg_op1
, tcg_op2
);
6641 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
6642 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
6643 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
6646 gen_helper_neon_mull_u8(tcg_passres
, tcg_op1
, tcg_op2
);
6648 gen_helper_neon_mull_s8(tcg_passres
, tcg_op1
, tcg_op2
);
6652 gen_helper_neon_mull_u16(tcg_passres
, tcg_op1
, tcg_op2
);
6654 gen_helper_neon_mull_s16(tcg_passres
, tcg_op1
, tcg_op2
);
6659 g_assert_not_reached();
6661 tcg_temp_free_i32(tcg_op1
);
6662 tcg_temp_free_i32(tcg_op2
);
6666 gen_helper_neon_addl_u16(tcg_res
[pass
], tcg_res
[pass
],
6669 gen_helper_neon_addl_u32(tcg_res
[pass
], tcg_res
[pass
],
6672 tcg_temp_free_i64(tcg_passres
);
6673 } else if (accop
< 0) {
6675 gen_helper_neon_subl_u16(tcg_res
[pass
], tcg_res
[pass
],
6678 gen_helper_neon_subl_u32(tcg_res
[pass
], tcg_res
[pass
],
6681 tcg_temp_free_i64(tcg_passres
);
6686 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
6687 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
6688 tcg_temp_free_i64(tcg_res
[0]);
6689 tcg_temp_free_i64(tcg_res
[1]);
6692 /* C3.6.15 AdvSIMD three different
6693 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
6694 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6695 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
6696 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6698 static void disas_simd_three_reg_diff(DisasContext
*s
, uint32_t insn
)
6700 /* Instructions in this group fall into three basic classes
6701 * (in each case with the operation working on each element in
6702 * the input vectors):
6703 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
6705 * (2) wide 64 x 128 -> 128
6706 * (3) narrowing 128 x 128 -> 64
6707 * Here we do initial decode, catch unallocated cases and
6708 * dispatch to separate functions for each class.
6710 int is_q
= extract32(insn
, 30, 1);
6711 int is_u
= extract32(insn
, 29, 1);
6712 int size
= extract32(insn
, 22, 2);
6713 int opcode
= extract32(insn
, 12, 4);
6714 int rm
= extract32(insn
, 16, 5);
6715 int rn
= extract32(insn
, 5, 5);
6716 int rd
= extract32(insn
, 0, 5);
6719 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
6720 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
6721 /* 64 x 128 -> 128 */
6722 unsupported_encoding(s
, insn
);
6724 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
6725 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
6726 /* 128 x 128 -> 64 */
6727 unsupported_encoding(s
, insn
);
6734 unallocated_encoding(s
);
6740 unsupported_encoding(s
, insn
);
6747 /* 64 x 64 -> 128 */
6749 unallocated_encoding(s
);
6752 handle_3rd_widening(s
, is_q
, is_u
, size
, opcode
, rd
, rn
, rm
);
6755 /* opcode 15 not allocated */
6756 unallocated_encoding(s
);
6761 /* Logic op (opcode == 3) subgroup of C3.6.16. */
6762 static void disas_simd_3same_logic(DisasContext
*s
, uint32_t insn
)
6764 int rd
= extract32(insn
, 0, 5);
6765 int rn
= extract32(insn
, 5, 5);
6766 int rm
= extract32(insn
, 16, 5);
6767 int size
= extract32(insn
, 22, 2);
6768 bool is_u
= extract32(insn
, 29, 1);
6769 bool is_q
= extract32(insn
, 30, 1);
6770 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
6771 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
6772 TCGv_i64 tcg_res
[2];
6775 tcg_res
[0] = tcg_temp_new_i64();
6776 tcg_res
[1] = tcg_temp_new_i64();
6778 for (pass
= 0; pass
< (is_q
? 2 : 1); pass
++) {
6779 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
6780 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
6785 tcg_gen_and_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
6788 tcg_gen_andc_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
6791 tcg_gen_or_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
6794 tcg_gen_orc_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
6799 /* B* ops need res loaded to operate on */
6800 read_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
6805 tcg_gen_xor_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
6807 case 1: /* BSL bitwise select */
6808 tcg_gen_xor_i64(tcg_op1
, tcg_op1
, tcg_op2
);
6809 tcg_gen_and_i64(tcg_op1
, tcg_op1
, tcg_res
[pass
]);
6810 tcg_gen_xor_i64(tcg_res
[pass
], tcg_op2
, tcg_op1
);
6812 case 2: /* BIT, bitwise insert if true */
6813 tcg_gen_xor_i64(tcg_op1
, tcg_op1
, tcg_res
[pass
]);
6814 tcg_gen_and_i64(tcg_op1
, tcg_op1
, tcg_op2
);
6815 tcg_gen_xor_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
6817 case 3: /* BIF, bitwise insert if false */
6818 tcg_gen_xor_i64(tcg_op1
, tcg_op1
, tcg_res
[pass
]);
6819 tcg_gen_andc_i64(tcg_op1
, tcg_op1
, tcg_op2
);
6820 tcg_gen_xor_i64(tcg_res
[pass
], tcg_res
[pass
], tcg_op1
);
6826 write_vec_element(s
, tcg_res
[0], rd
, 0, MO_64
);
6828 tcg_gen_movi_i64(tcg_res
[1], 0);
6830 write_vec_element(s
, tcg_res
[1], rd
, 1, MO_64
);
6832 tcg_temp_free_i64(tcg_op1
);
6833 tcg_temp_free_i64(tcg_op2
);
6834 tcg_temp_free_i64(tcg_res
[0]);
6835 tcg_temp_free_i64(tcg_res
[1]);
6838 /* Helper functions for 32 bit comparisons */
6839 static void gen_max_s32(TCGv_i32 res
, TCGv_i32 op1
, TCGv_i32 op2
)
6841 tcg_gen_movcond_i32(TCG_COND_GE
, res
, op1
, op2
, op1
, op2
);
6844 static void gen_max_u32(TCGv_i32 res
, TCGv_i32 op1
, TCGv_i32 op2
)
6846 tcg_gen_movcond_i32(TCG_COND_GEU
, res
, op1
, op2
, op1
, op2
);
6849 static void gen_min_s32(TCGv_i32 res
, TCGv_i32 op1
, TCGv_i32 op2
)
6851 tcg_gen_movcond_i32(TCG_COND_LE
, res
, op1
, op2
, op1
, op2
);
6854 static void gen_min_u32(TCGv_i32 res
, TCGv_i32 op1
, TCGv_i32 op2
)
6856 tcg_gen_movcond_i32(TCG_COND_LEU
, res
, op1
, op2
, op1
, op2
);
6859 /* Pairwise op subgroup of C3.6.16. */
6860 static void disas_simd_3same_pair(DisasContext
*s
, uint32_t insn
)
6862 int is_q
= extract32(insn
, 30, 1);
6863 int u
= extract32(insn
, 29, 1);
6864 int size
= extract32(insn
, 22, 2);
6865 int opcode
= extract32(insn
, 11, 5);
6866 int rm
= extract32(insn
, 16, 5);
6867 int rn
= extract32(insn
, 5, 5);
6868 int rd
= extract32(insn
, 0, 5);
6871 if (size
== 3 && !is_q
) {
6872 unallocated_encoding(s
);
6877 case 0x14: /* SMAXP, UMAXP */
6878 case 0x15: /* SMINP, UMINP */
6880 unallocated_encoding(s
);
6886 unallocated_encoding(s
);
6891 g_assert_not_reached();
6894 /* These operations work on the concatenated rm:rn, with each pair of
6895 * adjacent elements being operated on to produce an element in the result.
6898 TCGv_i64 tcg_res
[2];
6900 for (pass
= 0; pass
< 2; pass
++) {
6901 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
6902 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
6903 int passreg
= (pass
== 0) ? rn
: rm
;
6905 read_vec_element(s
, tcg_op1
, passreg
, 0, MO_64
);
6906 read_vec_element(s
, tcg_op2
, passreg
, 1, MO_64
);
6907 tcg_res
[pass
] = tcg_temp_new_i64();
6909 /* The only 64 bit pairwise integer op is ADDP */
6910 assert(opcode
== 0x17);
6911 tcg_gen_add_i64(tcg_res
[pass
], tcg_op1
, tcg_op2
);
6913 tcg_temp_free_i64(tcg_op1
);
6914 tcg_temp_free_i64(tcg_op2
);
6917 for (pass
= 0; pass
< 2; pass
++) {
6918 write_vec_element(s
, tcg_res
[pass
], rd
, pass
, MO_64
);
6919 tcg_temp_free_i64(tcg_res
[pass
]);
6922 int maxpass
= is_q
? 4 : 2;
6923 TCGv_i32 tcg_res
[4];
6925 for (pass
= 0; pass
< maxpass
; pass
++) {
6926 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
6927 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
6928 NeonGenTwoOpFn
*genfn
;
6929 int passreg
= pass
< (maxpass
/ 2) ? rn
: rm
;
6930 int passelt
= (is_q
&& (pass
& 1)) ? 2 : 0;
6932 read_vec_element_i32(s
, tcg_op1
, passreg
, passelt
, MO_32
);
6933 read_vec_element_i32(s
, tcg_op2
, passreg
, passelt
+ 1, MO_32
);
6934 tcg_res
[pass
] = tcg_temp_new_i32();
6937 case 0x17: /* ADDP */
6939 static NeonGenTwoOpFn
* const fns
[3] = {
6940 gen_helper_neon_padd_u8
,
6941 gen_helper_neon_padd_u16
,
6947 case 0x14: /* SMAXP, UMAXP */
6949 static NeonGenTwoOpFn
* const fns
[3][2] = {
6950 { gen_helper_neon_pmax_s8
, gen_helper_neon_pmax_u8
},
6951 { gen_helper_neon_pmax_s16
, gen_helper_neon_pmax_u16
},
6952 { gen_max_s32
, gen_max_u32
},
6954 genfn
= fns
[size
][u
];
6957 case 0x15: /* SMINP, UMINP */
6959 static NeonGenTwoOpFn
* const fns
[3][2] = {
6960 { gen_helper_neon_pmin_s8
, gen_helper_neon_pmin_u8
},
6961 { gen_helper_neon_pmin_s16
, gen_helper_neon_pmin_u16
},
6962 { gen_min_s32
, gen_min_u32
},
6964 genfn
= fns
[size
][u
];
6968 g_assert_not_reached();
6971 genfn(tcg_res
[pass
], tcg_op1
, tcg_op2
);
6973 tcg_temp_free_i32(tcg_op1
);
6974 tcg_temp_free_i32(tcg_op2
);
6977 for (pass
= 0; pass
< maxpass
; pass
++) {
6978 write_vec_element_i32(s
, tcg_res
[pass
], rd
, pass
, MO_32
);
6979 tcg_temp_free_i32(tcg_res
[pass
]);
6982 clear_vec_high(s
, rd
);
6987 /* Floating point op subgroup of C3.6.16. */
6988 static void disas_simd_3same_float(DisasContext
*s
, uint32_t insn
)
6990 /* For floating point ops, the U, size[1] and opcode bits
6991 * together indicate the operation. size[0] indicates single
6994 int fpopcode
= extract32(insn
, 11, 5)
6995 | (extract32(insn
, 23, 1) << 5)
6996 | (extract32(insn
, 29, 1) << 6);
6997 int is_q
= extract32(insn
, 30, 1);
6998 int size
= extract32(insn
, 22, 1);
6999 int rm
= extract32(insn
, 16, 5);
7000 int rn
= extract32(insn
, 5, 5);
7001 int rd
= extract32(insn
, 0, 5);
7003 int datasize
= is_q
? 128 : 64;
7004 int esize
= 32 << size
;
7005 int elements
= datasize
/ esize
;
7007 if (size
== 1 && !is_q
) {
7008 unallocated_encoding(s
);
7013 case 0x58: /* FMAXNMP */
7014 case 0x5a: /* FADDP */
7015 case 0x5e: /* FMAXP */
7016 case 0x78: /* FMINNMP */
7017 case 0x7e: /* FMINP */
7019 unsupported_encoding(s
, insn
);
7021 case 0x1b: /* FMULX */
7022 case 0x1c: /* FCMEQ */
7023 case 0x1f: /* FRECPS */
7024 case 0x3f: /* FRSQRTS */
7025 case 0x5c: /* FCMGE */
7026 case 0x5d: /* FACGE */
7027 case 0x7c: /* FCMGT */
7028 case 0x7d: /* FACGT */
7029 case 0x19: /* FMLA */
7030 case 0x39: /* FMLS */
7031 unsupported_encoding(s
, insn
);
7033 case 0x18: /* FMAXNM */
7034 case 0x1a: /* FADD */
7035 case 0x1e: /* FMAX */
7036 case 0x38: /* FMINNM */
7037 case 0x3a: /* FSUB */
7038 case 0x3e: /* FMIN */
7039 case 0x5b: /* FMUL */
7040 case 0x5f: /* FDIV */
7041 case 0x7a: /* FABD */
7042 handle_3same_float(s
, size
, elements
, fpopcode
, rd
, rn
, rm
);
7045 unallocated_encoding(s
);
7050 /* Integer op subgroup of C3.6.16. */
7051 static void disas_simd_3same_int(DisasContext
*s
, uint32_t insn
)
7053 int is_q
= extract32(insn
, 30, 1);
7054 int u
= extract32(insn
, 29, 1);
7055 int size
= extract32(insn
, 22, 2);
7056 int opcode
= extract32(insn
, 11, 5);
7057 int rm
= extract32(insn
, 16, 5);
7058 int rn
= extract32(insn
, 5, 5);
7059 int rd
= extract32(insn
, 0, 5);
7063 case 0x13: /* MUL, PMUL */
7064 if (u
&& size
!= 0) {
7065 unallocated_encoding(s
);
7069 case 0x0: /* SHADD, UHADD */
7070 case 0x2: /* SRHADD, URHADD */
7071 case 0x4: /* SHSUB, UHSUB */
7072 case 0xc: /* SMAX, UMAX */
7073 case 0xd: /* SMIN, UMIN */
7074 case 0xe: /* SABD, UABD */
7075 case 0xf: /* SABA, UABA */
7076 case 0x12: /* MLA, MLS */
7078 unallocated_encoding(s
);
7082 case 0x16: /* SQDMULH, SQRDMULH */
7083 if (size
== 0 || size
== 3) {
7084 unallocated_encoding(s
);
7089 if (size
== 3 && !is_q
) {
7090 unallocated_encoding(s
);
7097 for (pass
= 0; pass
< (is_q
? 2 : 1); pass
++) {
7098 TCGv_i64 tcg_op1
= tcg_temp_new_i64();
7099 TCGv_i64 tcg_op2
= tcg_temp_new_i64();
7100 TCGv_i64 tcg_res
= tcg_temp_new_i64();
7102 read_vec_element(s
, tcg_op1
, rn
, pass
, MO_64
);
7103 read_vec_element(s
, tcg_op2
, rm
, pass
, MO_64
);
7105 handle_3same_64(s
, opcode
, u
, tcg_res
, tcg_op1
, tcg_op2
);
7107 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
7109 tcg_temp_free_i64(tcg_res
);
7110 tcg_temp_free_i64(tcg_op1
);
7111 tcg_temp_free_i64(tcg_op2
);
7114 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
7115 TCGv_i32 tcg_op1
= tcg_temp_new_i32();
7116 TCGv_i32 tcg_op2
= tcg_temp_new_i32();
7117 TCGv_i32 tcg_res
= tcg_temp_new_i32();
7118 NeonGenTwoOpFn
*genfn
= NULL
;
7119 NeonGenTwoOpEnvFn
*genenvfn
= NULL
;
7121 read_vec_element_i32(s
, tcg_op1
, rn
, pass
, MO_32
);
7122 read_vec_element_i32(s
, tcg_op2
, rm
, pass
, MO_32
);
7125 case 0x0: /* SHADD, UHADD */
7127 static NeonGenTwoOpFn
* const fns
[3][2] = {
7128 { gen_helper_neon_hadd_s8
, gen_helper_neon_hadd_u8
},
7129 { gen_helper_neon_hadd_s16
, gen_helper_neon_hadd_u16
},
7130 { gen_helper_neon_hadd_s32
, gen_helper_neon_hadd_u32
},
7132 genfn
= fns
[size
][u
];
7135 case 0x1: /* SQADD, UQADD */
7137 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
7138 { gen_helper_neon_qadd_s8
, gen_helper_neon_qadd_u8
},
7139 { gen_helper_neon_qadd_s16
, gen_helper_neon_qadd_u16
},
7140 { gen_helper_neon_qadd_s32
, gen_helper_neon_qadd_u32
},
7142 genenvfn
= fns
[size
][u
];
7145 case 0x2: /* SRHADD, URHADD */
7147 static NeonGenTwoOpFn
* const fns
[3][2] = {
7148 { gen_helper_neon_rhadd_s8
, gen_helper_neon_rhadd_u8
},
7149 { gen_helper_neon_rhadd_s16
, gen_helper_neon_rhadd_u16
},
7150 { gen_helper_neon_rhadd_s32
, gen_helper_neon_rhadd_u32
},
7152 genfn
= fns
[size
][u
];
7155 case 0x4: /* SHSUB, UHSUB */
7157 static NeonGenTwoOpFn
* const fns
[3][2] = {
7158 { gen_helper_neon_hsub_s8
, gen_helper_neon_hsub_u8
},
7159 { gen_helper_neon_hsub_s16
, gen_helper_neon_hsub_u16
},
7160 { gen_helper_neon_hsub_s32
, gen_helper_neon_hsub_u32
},
7162 genfn
= fns
[size
][u
];
7165 case 0x5: /* SQSUB, UQSUB */
7167 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
7168 { gen_helper_neon_qsub_s8
, gen_helper_neon_qsub_u8
},
7169 { gen_helper_neon_qsub_s16
, gen_helper_neon_qsub_u16
},
7170 { gen_helper_neon_qsub_s32
, gen_helper_neon_qsub_u32
},
7172 genenvfn
= fns
[size
][u
];
7175 case 0x6: /* CMGT, CMHI */
7177 static NeonGenTwoOpFn
* const fns
[3][2] = {
7178 { gen_helper_neon_cgt_s8
, gen_helper_neon_cgt_u8
},
7179 { gen_helper_neon_cgt_s16
, gen_helper_neon_cgt_u16
},
7180 { gen_helper_neon_cgt_s32
, gen_helper_neon_cgt_u32
},
7182 genfn
= fns
[size
][u
];
7185 case 0x7: /* CMGE, CMHS */
7187 static NeonGenTwoOpFn
* const fns
[3][2] = {
7188 { gen_helper_neon_cge_s8
, gen_helper_neon_cge_u8
},
7189 { gen_helper_neon_cge_s16
, gen_helper_neon_cge_u16
},
7190 { gen_helper_neon_cge_s32
, gen_helper_neon_cge_u32
},
7192 genfn
= fns
[size
][u
];
7195 case 0x8: /* SSHL, USHL */
7197 static NeonGenTwoOpFn
* const fns
[3][2] = {
7198 { gen_helper_neon_shl_s8
, gen_helper_neon_shl_u8
},
7199 { gen_helper_neon_shl_s16
, gen_helper_neon_shl_u16
},
7200 { gen_helper_neon_shl_s32
, gen_helper_neon_shl_u32
},
7202 genfn
= fns
[size
][u
];
7205 case 0x9: /* SQSHL, UQSHL */
7207 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
7208 { gen_helper_neon_qshl_s8
, gen_helper_neon_qshl_u8
},
7209 { gen_helper_neon_qshl_s16
, gen_helper_neon_qshl_u16
},
7210 { gen_helper_neon_qshl_s32
, gen_helper_neon_qshl_u32
},
7212 genenvfn
= fns
[size
][u
];
7215 case 0xa: /* SRSHL, URSHL */
7217 static NeonGenTwoOpFn
* const fns
[3][2] = {
7218 { gen_helper_neon_rshl_s8
, gen_helper_neon_rshl_u8
},
7219 { gen_helper_neon_rshl_s16
, gen_helper_neon_rshl_u16
},
7220 { gen_helper_neon_rshl_s32
, gen_helper_neon_rshl_u32
},
7222 genfn
= fns
[size
][u
];
7225 case 0xb: /* SQRSHL, UQRSHL */
7227 static NeonGenTwoOpEnvFn
* const fns
[3][2] = {
7228 { gen_helper_neon_qrshl_s8
, gen_helper_neon_qrshl_u8
},
7229 { gen_helper_neon_qrshl_s16
, gen_helper_neon_qrshl_u16
},
7230 { gen_helper_neon_qrshl_s32
, gen_helper_neon_qrshl_u32
},
7232 genenvfn
= fns
[size
][u
];
7235 case 0xc: /* SMAX, UMAX */
7237 static NeonGenTwoOpFn
* const fns
[3][2] = {
7238 { gen_helper_neon_max_s8
, gen_helper_neon_max_u8
},
7239 { gen_helper_neon_max_s16
, gen_helper_neon_max_u16
},
7240 { gen_max_s32
, gen_max_u32
},
7242 genfn
= fns
[size
][u
];
7246 case 0xd: /* SMIN, UMIN */
7248 static NeonGenTwoOpFn
* const fns
[3][2] = {
7249 { gen_helper_neon_min_s8
, gen_helper_neon_min_u8
},
7250 { gen_helper_neon_min_s16
, gen_helper_neon_min_u16
},
7251 { gen_min_s32
, gen_min_u32
},
7253 genfn
= fns
[size
][u
];
7256 case 0xe: /* SABD, UABD */
7257 case 0xf: /* SABA, UABA */
7259 static NeonGenTwoOpFn
* const fns
[3][2] = {
7260 { gen_helper_neon_abd_s8
, gen_helper_neon_abd_u8
},
7261 { gen_helper_neon_abd_s16
, gen_helper_neon_abd_u16
},
7262 { gen_helper_neon_abd_s32
, gen_helper_neon_abd_u32
},
7264 genfn
= fns
[size
][u
];
7267 case 0x10: /* ADD, SUB */
7269 static NeonGenTwoOpFn
* const fns
[3][2] = {
7270 { gen_helper_neon_add_u8
, gen_helper_neon_sub_u8
},
7271 { gen_helper_neon_add_u16
, gen_helper_neon_sub_u16
},
7272 { tcg_gen_add_i32
, tcg_gen_sub_i32
},
7274 genfn
= fns
[size
][u
];
7277 case 0x11: /* CMTST, CMEQ */
7279 static NeonGenTwoOpFn
* const fns
[3][2] = {
7280 { gen_helper_neon_tst_u8
, gen_helper_neon_ceq_u8
},
7281 { gen_helper_neon_tst_u16
, gen_helper_neon_ceq_u16
},
7282 { gen_helper_neon_tst_u32
, gen_helper_neon_ceq_u32
},
7284 genfn
= fns
[size
][u
];
7287 case 0x13: /* MUL, PMUL */
7291 genfn
= gen_helper_neon_mul_p8
;
7294 /* fall through : MUL */
7295 case 0x12: /* MLA, MLS */
7297 static NeonGenTwoOpFn
* const fns
[3] = {
7298 gen_helper_neon_mul_u8
,
7299 gen_helper_neon_mul_u16
,
7305 case 0x16: /* SQDMULH, SQRDMULH */
7307 static NeonGenTwoOpEnvFn
* const fns
[2][2] = {
7308 { gen_helper_neon_qdmulh_s16
, gen_helper_neon_qrdmulh_s16
},
7309 { gen_helper_neon_qdmulh_s32
, gen_helper_neon_qrdmulh_s32
},
7311 assert(size
== 1 || size
== 2);
7312 genenvfn
= fns
[size
- 1][u
];
7316 g_assert_not_reached();
7320 genenvfn(tcg_res
, cpu_env
, tcg_op1
, tcg_op2
);
7322 genfn(tcg_res
, tcg_op1
, tcg_op2
);
7325 if (opcode
== 0xf || opcode
== 0x12) {
7326 /* SABA, UABA, MLA, MLS: accumulating ops */
7327 static NeonGenTwoOpFn
* const fns
[3][2] = {
7328 { gen_helper_neon_add_u8
, gen_helper_neon_sub_u8
},
7329 { gen_helper_neon_add_u16
, gen_helper_neon_sub_u16
},
7330 { tcg_gen_add_i32
, tcg_gen_sub_i32
},
7332 bool is_sub
= (opcode
== 0x12 && u
); /* MLS */
7334 genfn
= fns
[size
][is_sub
];
7335 read_vec_element_i32(s
, tcg_op1
, rd
, pass
, MO_32
);
7336 genfn(tcg_res
, tcg_res
, tcg_op1
);
7339 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
7341 tcg_temp_free_i32(tcg_res
);
7342 tcg_temp_free_i32(tcg_op1
);
7343 tcg_temp_free_i32(tcg_op2
);
7348 clear_vec_high(s
, rd
);
7352 /* C3.6.16 AdvSIMD three same
7353 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
7354 * +---+---+---+-----------+------+---+------+--------+---+------+------+
7355 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
7356 * +---+---+---+-----------+------+---+------+--------+---+------+------+
7358 static void disas_simd_three_reg_same(DisasContext
*s
, uint32_t insn
)
7360 int opcode
= extract32(insn
, 11, 5);
7363 case 0x3: /* logic ops */
7364 disas_simd_3same_logic(s
, insn
);
7366 case 0x17: /* ADDP */
7367 case 0x14: /* SMAXP, UMAXP */
7368 case 0x15: /* SMINP, UMINP */
7369 /* Pairwise operations */
7370 disas_simd_3same_pair(s
, insn
);
7373 /* floating point ops, sz[1] and U are part of opcode */
7374 disas_simd_3same_float(s
, insn
);
7377 disas_simd_3same_int(s
, insn
);
7382 static void handle_2misc_narrow(DisasContext
*s
, int opcode
, bool u
, bool is_q
,
7383 int size
, int rn
, int rd
)
7385 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
7386 * in the source becomes a size element in the destination).
7389 TCGv_i32 tcg_res
[2];
7390 int destelt
= is_q
? 2 : 0;
7392 for (pass
= 0; pass
< 2; pass
++) {
7393 TCGv_i64 tcg_op
= tcg_temp_new_i64();
7394 NeonGenNarrowFn
*genfn
= NULL
;
7395 NeonGenNarrowEnvFn
*genenvfn
= NULL
;
7397 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
7398 tcg_res
[pass
] = tcg_temp_new_i32();
7401 case 0x12: /* XTN, SQXTUN */
7403 static NeonGenNarrowFn
* const xtnfns
[3] = {
7404 gen_helper_neon_narrow_u8
,
7405 gen_helper_neon_narrow_u16
,
7406 tcg_gen_trunc_i64_i32
,
7408 static NeonGenNarrowEnvFn
* const sqxtunfns
[3] = {
7409 gen_helper_neon_unarrow_sat8
,
7410 gen_helper_neon_unarrow_sat16
,
7411 gen_helper_neon_unarrow_sat32
,
7414 genenvfn
= sqxtunfns
[size
];
7416 genfn
= xtnfns
[size
];
7420 case 0x14: /* SQXTN, UQXTN */
7422 static NeonGenNarrowEnvFn
* const fns
[3][2] = {
7423 { gen_helper_neon_narrow_sat_s8
,
7424 gen_helper_neon_narrow_sat_u8
},
7425 { gen_helper_neon_narrow_sat_s16
,
7426 gen_helper_neon_narrow_sat_u16
},
7427 { gen_helper_neon_narrow_sat_s32
,
7428 gen_helper_neon_narrow_sat_u32
},
7430 genenvfn
= fns
[size
][u
];
7434 g_assert_not_reached();
7438 genfn(tcg_res
[pass
], tcg_op
);
7440 genenvfn(tcg_res
[pass
], cpu_env
, tcg_op
);
7443 tcg_temp_free_i64(tcg_op
);
7446 for (pass
= 0; pass
< 2; pass
++) {
7447 write_vec_element_i32(s
, tcg_res
[pass
], rd
, destelt
+ pass
, MO_32
);
7448 tcg_temp_free_i32(tcg_res
[pass
]);
7451 clear_vec_high(s
, rd
);
7455 static void handle_rev(DisasContext
*s
, int opcode
, bool u
,
7456 bool is_q
, int size
, int rn
, int rd
)
7458 int op
= (opcode
<< 1) | u
;
7459 int opsz
= op
+ size
;
7460 int grp_size
= 3 - opsz
;
7461 int dsize
= is_q
? 128 : 64;
7465 unallocated_encoding(s
);
7470 /* Special case bytes, use bswap op on each group of elements */
7471 int groups
= dsize
/ (8 << grp_size
);
7473 for (i
= 0; i
< groups
; i
++) {
7474 TCGv_i64 tcg_tmp
= tcg_temp_new_i64();
7476 read_vec_element(s
, tcg_tmp
, rn
, i
, grp_size
);
7479 tcg_gen_bswap16_i64(tcg_tmp
, tcg_tmp
);
7482 tcg_gen_bswap32_i64(tcg_tmp
, tcg_tmp
);
7485 tcg_gen_bswap64_i64(tcg_tmp
, tcg_tmp
);
7488 g_assert_not_reached();
7490 write_vec_element(s
, tcg_tmp
, rd
, i
, grp_size
);
7491 tcg_temp_free_i64(tcg_tmp
);
7494 clear_vec_high(s
, rd
);
7497 int revmask
= (1 << grp_size
) - 1;
7498 int esize
= 8 << size
;
7499 int elements
= dsize
/ esize
;
7500 TCGv_i64 tcg_rn
= tcg_temp_new_i64();
7501 TCGv_i64 tcg_rd
= tcg_const_i64(0);
7502 TCGv_i64 tcg_rd_hi
= tcg_const_i64(0);
7504 for (i
= 0; i
< elements
; i
++) {
7505 int e_rev
= (i
& 0xf) ^ revmask
;
7506 int off
= e_rev
* esize
;
7507 read_vec_element(s
, tcg_rn
, rn
, i
, size
);
7509 tcg_gen_deposit_i64(tcg_rd_hi
, tcg_rd_hi
,
7510 tcg_rn
, off
- 64, esize
);
7512 tcg_gen_deposit_i64(tcg_rd
, tcg_rd
, tcg_rn
, off
, esize
);
7515 write_vec_element(s
, tcg_rd
, rd
, 0, MO_64
);
7516 write_vec_element(s
, tcg_rd_hi
, rd
, 1, MO_64
);
7518 tcg_temp_free_i64(tcg_rd_hi
);
7519 tcg_temp_free_i64(tcg_rd
);
7520 tcg_temp_free_i64(tcg_rn
);
7524 /* C3.6.17 AdvSIMD two reg misc
7525 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
7526 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7527 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
7528 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7530 static void disas_simd_two_reg_misc(DisasContext
*s
, uint32_t insn
)
7532 int size
= extract32(insn
, 22, 2);
7533 int opcode
= extract32(insn
, 12, 5);
7534 bool u
= extract32(insn
, 29, 1);
7535 bool is_q
= extract32(insn
, 30, 1);
7536 int rn
= extract32(insn
, 5, 5);
7537 int rd
= extract32(insn
, 0, 5);
7540 case 0x0: /* REV64, REV32 */
7541 case 0x1: /* REV16 */
7542 handle_rev(s
, opcode
, u
, is_q
, size
, rn
, rd
);
7544 case 0x5: /* CNT, NOT, RBIT */
7545 if (u
&& size
== 0) {
7546 /* NOT: adjust size so we can use the 64-bits-at-a-time loop. */
7549 } else if (u
&& size
== 1) {
7552 } else if (!u
&& size
== 0) {
7556 unallocated_encoding(s
);
7558 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
7559 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
7561 unallocated_encoding(s
);
7564 handle_2misc_narrow(s
, opcode
, u
, is_q
, size
, rn
, rd
);
7566 case 0x2: /* SADDLP, UADDLP */
7567 case 0x4: /* CLS, CLZ */
7568 case 0x6: /* SADALP, UADALP */
7570 unallocated_encoding(s
);
7573 unsupported_encoding(s
, insn
);
7575 case 0x13: /* SHLL, SHLL2 */
7576 if (u
== 0 || size
== 3) {
7577 unallocated_encoding(s
);
7580 unsupported_encoding(s
, insn
);
7582 case 0xa: /* CMLT */
7584 unallocated_encoding(s
);
7588 case 0x8: /* CMGT, CMGE */
7589 case 0x9: /* CMEQ, CMLE */
7590 case 0xb: /* ABS, NEG */
7591 if (size
== 3 && !is_q
) {
7592 unallocated_encoding(s
);
7596 case 0x3: /* SUQADD, USQADD */
7597 case 0x7: /* SQABS, SQNEG */
7598 if (size
== 3 && !is_q
) {
7599 unallocated_encoding(s
);
7602 unsupported_encoding(s
, insn
);
7608 /* Floating point: U, size[1] and opcode indicate operation;
7609 * size[0] indicates single or double precision.
7611 opcode
|= (extract32(size
, 1, 1) << 5) | (u
<< 6);
7612 size
= extract32(size
, 0, 1) ? 3 : 2;
7614 case 0x2f: /* FABS */
7615 case 0x6f: /* FNEG */
7616 if (size
== 3 && !is_q
) {
7617 unallocated_encoding(s
);
7621 case 0x16: /* FCVTN, FCVTN2 */
7622 case 0x17: /* FCVTL, FCVTL2 */
7623 case 0x18: /* FRINTN */
7624 case 0x19: /* FRINTM */
7625 case 0x1a: /* FCVTNS */
7626 case 0x1b: /* FCVTMS */
7627 case 0x1c: /* FCVTAS */
7628 case 0x1d: /* SCVTF */
7629 case 0x2c: /* FCMGT (zero) */
7630 case 0x2d: /* FCMEQ (zero) */
7631 case 0x2e: /* FCMLT (zero) */
7632 case 0x38: /* FRINTP */
7633 case 0x39: /* FRINTZ */
7634 case 0x3a: /* FCVTPS */
7635 case 0x3b: /* FCVTZS */
7636 case 0x3c: /* URECPE */
7637 case 0x3d: /* FRECPE */
7638 case 0x56: /* FCVTXN, FCVTXN2 */
7639 case 0x58: /* FRINTA */
7640 case 0x59: /* FRINTX */
7641 case 0x5a: /* FCVTNU */
7642 case 0x5b: /* FCVTMU */
7643 case 0x5c: /* FCVTAU */
7644 case 0x5d: /* UCVTF */
7645 case 0x6c: /* FCMGE (zero) */
7646 case 0x6d: /* FCMLE (zero) */
7647 case 0x79: /* FRINTI */
7648 case 0x7a: /* FCVTPU */
7649 case 0x7b: /* FCVTZU */
7650 case 0x7c: /* URSQRTE */
7651 case 0x7d: /* FRSQRTE */
7652 case 0x7f: /* FSQRT */
7653 unsupported_encoding(s
, insn
);
7656 unallocated_encoding(s
);
7662 unallocated_encoding(s
);
7667 /* All 64-bit element operations can be shared with scalar 2misc */
7670 for (pass
= 0; pass
< (is_q
? 2 : 1); pass
++) {
7671 TCGv_i64 tcg_op
= tcg_temp_new_i64();
7672 TCGv_i64 tcg_res
= tcg_temp_new_i64();
7674 read_vec_element(s
, tcg_op
, rn
, pass
, MO_64
);
7676 handle_2misc_64(s
, opcode
, u
, tcg_res
, tcg_op
);
7678 write_vec_element(s
, tcg_res
, rd
, pass
, MO_64
);
7680 tcg_temp_free_i64(tcg_res
);
7681 tcg_temp_free_i64(tcg_op
);
7686 for (pass
= 0; pass
< (is_q
? 4 : 2); pass
++) {
7687 TCGv_i32 tcg_op
= tcg_temp_new_i32();
7688 TCGv_i32 tcg_res
= tcg_temp_new_i32();
7691 read_vec_element_i32(s
, tcg_op
, rn
, pass
, MO_32
);
7694 /* Special cases for 32 bit elements */
7696 case 0xa: /* CMLT */
7697 /* 32 bit integer comparison against zero, result is
7698 * test ? (2^32 - 1) : 0. We implement via setcond(test)
7703 tcg_gen_setcondi_i32(cond
, tcg_res
, tcg_op
, 0);
7704 tcg_gen_neg_i32(tcg_res
, tcg_res
);
7706 case 0x8: /* CMGT, CMGE */
7707 cond
= u
? TCG_COND_GE
: TCG_COND_GT
;
7709 case 0x9: /* CMEQ, CMLE */
7710 cond
= u
? TCG_COND_LE
: TCG_COND_EQ
;
7712 case 0xb: /* ABS, NEG */
7714 tcg_gen_neg_i32(tcg_res
, tcg_op
);
7716 TCGv_i32 tcg_zero
= tcg_const_i32(0);
7717 tcg_gen_neg_i32(tcg_res
, tcg_op
);
7718 tcg_gen_movcond_i32(TCG_COND_GT
, tcg_res
, tcg_op
,
7719 tcg_zero
, tcg_op
, tcg_res
);
7720 tcg_temp_free_i32(tcg_zero
);
7723 case 0x2f: /* FABS */
7724 gen_helper_vfp_abss(tcg_res
, tcg_op
);
7726 case 0x6f: /* FNEG */
7727 gen_helper_vfp_negs(tcg_res
, tcg_op
);
7730 g_assert_not_reached();
7733 /* Use helpers for 8 and 16 bit elements */
7735 case 0x5: /* CNT, RBIT */
7736 /* For these two insns size is part of the opcode specifier
7737 * (handled earlier); they always operate on byte elements.
7740 gen_helper_neon_rbit_u8(tcg_res
, tcg_op
);
7742 gen_helper_neon_cnt_u8(tcg_res
, tcg_op
);
7745 case 0x8: /* CMGT, CMGE */
7746 case 0x9: /* CMEQ, CMLE */
7747 case 0xa: /* CMLT */
7749 static NeonGenTwoOpFn
* const fns
[3][2] = {
7750 { gen_helper_neon_cgt_s8
, gen_helper_neon_cgt_s16
},
7751 { gen_helper_neon_cge_s8
, gen_helper_neon_cge_s16
},
7752 { gen_helper_neon_ceq_u8
, gen_helper_neon_ceq_u16
},
7754 NeonGenTwoOpFn
*genfn
;
7757 TCGv_i32 tcg_zero
= tcg_const_i32(0);
7759 /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */
7760 comp
= (opcode
- 0x8) * 2 + u
;
7761 /* ...but LE, LT are implemented as reverse GE, GT */
7762 reverse
= (comp
> 2);
7766 genfn
= fns
[comp
][size
];
7768 genfn(tcg_res
, tcg_zero
, tcg_op
);
7770 genfn(tcg_res
, tcg_op
, tcg_zero
);
7772 tcg_temp_free_i32(tcg_zero
);
7775 case 0xb: /* ABS, NEG */
7777 TCGv_i32 tcg_zero
= tcg_const_i32(0);
7779 gen_helper_neon_sub_u16(tcg_res
, tcg_zero
, tcg_op
);
7781 gen_helper_neon_sub_u8(tcg_res
, tcg_zero
, tcg_op
);
7783 tcg_temp_free_i32(tcg_zero
);
7786 gen_helper_neon_abs_s16(tcg_res
, tcg_op
);
7788 gen_helper_neon_abs_s8(tcg_res
, tcg_op
);
7793 g_assert_not_reached();
7797 write_vec_element_i32(s
, tcg_res
, rd
, pass
, MO_32
);
7799 tcg_temp_free_i32(tcg_res
);
7800 tcg_temp_free_i32(tcg_op
);
7804 clear_vec_high(s
, rd
);
7808 /* C3.6.18 AdvSIMD vector x indexed element
7809 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
7810 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
7811 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
7812 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
7814 static void disas_simd_indexed_vector(DisasContext
*s
, uint32_t insn
)
7816 unsupported_encoding(s
, insn
);
7819 /* C3.6.19 Crypto AES
7820 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
7821 * +-----------------+------+-----------+--------+-----+------+------+
7822 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
7823 * +-----------------+------+-----------+--------+-----+------+------+
7825 static void disas_crypto_aes(DisasContext
*s
, uint32_t insn
)
7827 unsupported_encoding(s
, insn
);
7830 /* C3.6.20 Crypto three-reg SHA
7831 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
7832 * +-----------------+------+---+------+---+--------+-----+------+------+
7833 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
7834 * +-----------------+------+---+------+---+--------+-----+------+------+
7836 static void disas_crypto_three_reg_sha(DisasContext
*s
, uint32_t insn
)
7838 unsupported_encoding(s
, insn
);
7841 /* C3.6.21 Crypto two-reg SHA
7842 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
7843 * +-----------------+------+-----------+--------+-----+------+------+
7844 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
7845 * +-----------------+------+-----------+--------+-----+------+------+
7847 static void disas_crypto_two_reg_sha(DisasContext
*s
, uint32_t insn
)
7849 unsupported_encoding(s
, insn
);
7852 /* C3.6 Data processing - SIMD, inc Crypto
7854 * As the decode gets a little complex we are using a table based
7855 * approach for this part of the decode.
7857 static const AArch64DecodeTable data_proc_simd
[] = {
7858 /* pattern , mask , fn */
7859 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same
},
7860 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff
},
7861 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc
},
7862 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes
},
7863 { 0x0e000400, 0x9fe08400, disas_simd_copy
},
7864 { 0x0f000000, 0x9f000400, disas_simd_indexed_vector
},
7865 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
7866 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm
},
7867 { 0x0f000400, 0x9f800400, disas_simd_shift_imm
},
7868 { 0x0e000000, 0xbf208c00, disas_simd_tb
},
7869 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn
},
7870 { 0x2e000000, 0xbf208400, disas_simd_ext
},
7871 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same
},
7872 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff
},
7873 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc
},
7874 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise
},
7875 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy
},
7876 { 0x5f000000, 0xdf000400, disas_simd_scalar_indexed
},
7877 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm
},
7878 { 0x4e280800, 0xff3e0c00, disas_crypto_aes
},
7879 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha
},
7880 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha
},
7881 { 0x00000000, 0x00000000, NULL
}
7884 static void disas_data_proc_simd(DisasContext
*s
, uint32_t insn
)
7886 /* Note that this is called with all non-FP cases from
7887 * table C3-6 so it must UNDEF for entries not specifically
7888 * allocated to instructions in that table.
7890 AArch64DecodeFn
*fn
= lookup_disas_fn(&data_proc_simd
[0], insn
);
7894 unallocated_encoding(s
);
7898 /* C3.6 Data processing - SIMD and floating point */
7899 static void disas_data_proc_simd_fp(DisasContext
*s
, uint32_t insn
)
7901 if (extract32(insn
, 28, 1) == 1 && extract32(insn
, 30, 1) == 0) {
7902 disas_data_proc_fp(s
, insn
);
7904 /* SIMD, including crypto */
7905 disas_data_proc_simd(s
, insn
);
7909 /* C3.1 A64 instruction index by encoding */
7910 static void disas_a64_insn(CPUARMState
*env
, DisasContext
*s
)
7914 insn
= arm_ldl_code(env
, s
->pc
, s
->bswap_code
);
7918 switch (extract32(insn
, 25, 4)) {
7919 case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
7920 unallocated_encoding(s
);
7922 case 0x8: case 0x9: /* Data processing - immediate */
7923 disas_data_proc_imm(s
, insn
);
7925 case 0xa: case 0xb: /* Branch, exception generation and system insns */
7926 disas_b_exc_sys(s
, insn
);
7931 case 0xe: /* Loads and stores */
7932 disas_ldst(s
, insn
);
7935 case 0xd: /* Data processing - register */
7936 disas_data_proc_reg(s
, insn
);
7939 case 0xf: /* Data processing - SIMD and floating point */
7940 disas_data_proc_simd_fp(s
, insn
);
7943 assert(FALSE
); /* all 15 cases should be handled above */
7947 /* if we allocated any temporaries, free them here */
7951 void gen_intermediate_code_internal_a64(ARMCPU
*cpu
,
7952 TranslationBlock
*tb
,
7955 CPUState
*cs
= CPU(cpu
);
7956 CPUARMState
*env
= &cpu
->env
;
7957 DisasContext dc1
, *dc
= &dc1
;
7959 uint16_t *gen_opc_end
;
7961 target_ulong pc_start
;
7962 target_ulong next_page_start
;
7970 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
7972 dc
->is_jmp
= DISAS_NEXT
;
7974 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
7980 dc
->condexec_mask
= 0;
7981 dc
->condexec_cond
= 0;
7982 #if !defined(CONFIG_USER_ONLY)
7985 dc
->vfp_enabled
= 0;
7988 dc
->cp_regs
= cpu
->cp_regs
;
7989 dc
->current_pl
= arm_current_pl(env
);
7991 init_tmp_a64_array(dc
);
7993 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
7996 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
7997 if (max_insns
== 0) {
7998 max_insns
= CF_COUNT_MASK
;
8003 tcg_clear_temp_count();
8006 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
8007 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
8008 if (bp
->pc
== dc
->pc
) {
8009 gen_exception_insn(dc
, 0, EXCP_DEBUG
);
8010 /* Advance PC so that clearing the breakpoint will
8011 invalidate this TB. */
8013 goto done_generating
;
8019 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8023 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8026 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
8027 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
8028 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
8031 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
8035 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
8036 tcg_gen_debug_insn_start(dc
->pc
);
8039 disas_a64_insn(env
, dc
);
8041 if (tcg_check_temp_count()) {
8042 fprintf(stderr
, "TCG temporary leak before "TARGET_FMT_lx
"\n",
8046 /* Translation stops when a conditional branch is encountered.
8047 * Otherwise the subsequent code could get translated several times.
8048 * Also stop translation when a page boundary is reached. This
8049 * ensures prefetch aborts occur at the right place.
8052 } while (!dc
->is_jmp
&& tcg_ctx
.gen_opc_ptr
< gen_opc_end
&&
8053 !cs
->singlestep_enabled
&&
8055 dc
->pc
< next_page_start
&&
8056 num_insns
< max_insns
);
8058 if (tb
->cflags
& CF_LAST_IO
) {
8062 if (unlikely(cs
->singlestep_enabled
) && dc
->is_jmp
!= DISAS_EXC
) {
8063 /* Note that this means single stepping WFI doesn't halt the CPU.
8064 * For conditional branch insns this is harmless unreachable code as
8065 * gen_goto_tb() has already handled emitting the debug exception
8066 * (and thus a tb-jump is not possible when singlestepping).
8068 assert(dc
->is_jmp
!= DISAS_TB_JUMP
);
8069 if (dc
->is_jmp
!= DISAS_JUMP
) {
8070 gen_a64_set_pc_im(dc
->pc
);
8072 gen_exception(EXCP_DEBUG
);
8074 switch (dc
->is_jmp
) {
8076 gen_goto_tb(dc
, 1, dc
->pc
);
8080 gen_a64_set_pc_im(dc
->pc
);
8083 /* indicate that the hash table must be used to find the next TB */
8091 /* This is a special case because we don't want to just halt the CPU
8092 * if trying to debug across a WFI.
8094 gen_helper_wfi(cpu_env
);
8100 gen_tb_end(tb
, num_insns
);
8101 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
8104 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
8105 qemu_log("----------------\n");
8106 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
8107 log_target_disas(env
, pc_start
, dc
->pc
- pc_start
,
8108 4 | (dc
->bswap_code
<< 1));
8113 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
8116 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
8119 tb
->size
= dc
->pc
- pc_start
;
8120 tb
->icount
= num_insns
;