target-arm: A64: add support for ld/st with index
[qemu/ar7.git] / target-arm / translate-a64.c
bloba2cc9f0d7a5b23168ec24ffdd190b147dbc83329
1 /*
2 * AArch64 translation
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
25 #include "cpu.h"
26 #include "tcg-op.h"
27 #include "qemu/log.h"
28 #include "translate.h"
29 #include "qemu/host-utils.h"
31 #include "exec/gen-icount.h"
33 #include "helper.h"
34 #define GEN_HELPER 1
35 #include "helper.h"
37 static TCGv_i64 cpu_X[32];
38 static TCGv_i64 cpu_pc;
39 static TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
41 static const char *regnames[] = {
42 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
43 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
44 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
45 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
48 enum a64_shift_type {
49 A64_SHIFT_TYPE_LSL = 0,
50 A64_SHIFT_TYPE_LSR = 1,
51 A64_SHIFT_TYPE_ASR = 2,
52 A64_SHIFT_TYPE_ROR = 3
55 /* initialize TCG globals. */
56 void a64_translate_init(void)
58 int i;
60 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
61 offsetof(CPUARMState, pc),
62 "pc");
63 for (i = 0; i < 32; i++) {
64 cpu_X[i] = tcg_global_mem_new_i64(TCG_AREG0,
65 offsetof(CPUARMState, xregs[i]),
66 regnames[i]);
69 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
70 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
71 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
72 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
75 void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
76 fprintf_function cpu_fprintf, int flags)
78 ARMCPU *cpu = ARM_CPU(cs);
79 CPUARMState *env = &cpu->env;
80 uint32_t psr = pstate_read(env);
81 int i;
83 cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
84 env->pc, env->xregs[31]);
85 for (i = 0; i < 31; i++) {
86 cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
87 if ((i % 4) == 3) {
88 cpu_fprintf(f, "\n");
89 } else {
90 cpu_fprintf(f, " ");
93 cpu_fprintf(f, "PSTATE=%08x (flags %c%c%c%c)\n",
94 psr,
95 psr & PSTATE_N ? 'N' : '-',
96 psr & PSTATE_Z ? 'Z' : '-',
97 psr & PSTATE_C ? 'C' : '-',
98 psr & PSTATE_V ? 'V' : '-');
99 cpu_fprintf(f, "\n");
102 static int get_mem_index(DisasContext *s)
104 #ifdef CONFIG_USER_ONLY
105 return 1;
106 #else
107 return s->user;
108 #endif
111 void gen_a64_set_pc_im(uint64_t val)
113 tcg_gen_movi_i64(cpu_pc, val);
116 static void gen_exception(int excp)
118 TCGv_i32 tmp = tcg_temp_new_i32();
119 tcg_gen_movi_i32(tmp, excp);
120 gen_helper_exception(cpu_env, tmp);
121 tcg_temp_free_i32(tmp);
124 static void gen_exception_insn(DisasContext *s, int offset, int excp)
126 gen_a64_set_pc_im(s->pc - offset);
127 gen_exception(excp);
128 s->is_jmp = DISAS_EXC;
131 static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
133 /* No direct tb linking with singlestep or deterministic io */
134 if (s->singlestep_enabled || (s->tb->cflags & CF_LAST_IO)) {
135 return false;
138 /* Only link tbs from inside the same guest page */
139 if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
140 return false;
143 return true;
146 static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
148 TranslationBlock *tb;
150 tb = s->tb;
151 if (use_goto_tb(s, n, dest)) {
152 tcg_gen_goto_tb(n);
153 gen_a64_set_pc_im(dest);
154 tcg_gen_exit_tb((tcg_target_long)tb + n);
155 s->is_jmp = DISAS_TB_JUMP;
156 } else {
157 gen_a64_set_pc_im(dest);
158 if (s->singlestep_enabled) {
159 gen_exception(EXCP_DEBUG);
161 tcg_gen_exit_tb(0);
162 s->is_jmp = DISAS_JUMP;
166 static void unallocated_encoding(DisasContext *s)
168 gen_exception_insn(s, 4, EXCP_UDEF);
171 #define unsupported_encoding(s, insn) \
172 do { \
173 qemu_log_mask(LOG_UNIMP, \
174 "%s:%d: unsupported instruction encoding 0x%08x " \
175 "at pc=%016" PRIx64 "\n", \
176 __FILE__, __LINE__, insn, s->pc - 4); \
177 unallocated_encoding(s); \
178 } while (0);
180 static void init_tmp_a64_array(DisasContext *s)
182 #ifdef CONFIG_DEBUG_TCG
183 int i;
184 for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) {
185 TCGV_UNUSED_I64(s->tmp_a64[i]);
187 #endif
188 s->tmp_a64_count = 0;
191 static void free_tmp_a64(DisasContext *s)
193 int i;
194 for (i = 0; i < s->tmp_a64_count; i++) {
195 tcg_temp_free_i64(s->tmp_a64[i]);
197 init_tmp_a64_array(s);
200 static TCGv_i64 new_tmp_a64(DisasContext *s)
202 assert(s->tmp_a64_count < TMP_A64_MAX);
203 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
206 static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
208 TCGv_i64 t = new_tmp_a64(s);
209 tcg_gen_movi_i64(t, 0);
210 return t;
214 * Register access functions
216 * These functions are used for directly accessing a register in where
217 * changes to the final register value are likely to be made. If you
218 * need to use a register for temporary calculation (e.g. index type
219 * operations) use the read_* form.
221 * B1.2.1 Register mappings
223 * In instruction register encoding 31 can refer to ZR (zero register) or
224 * the SP (stack pointer) depending on context. In QEMU's case we map SP
225 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
226 * This is the point of the _sp forms.
228 static TCGv_i64 cpu_reg(DisasContext *s, int reg)
230 if (reg == 31) {
231 return new_tmp_a64_zero(s);
232 } else {
233 return cpu_X[reg];
237 /* register access for when 31 == SP */
238 static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
240 return cpu_X[reg];
243 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
244 * representing the register contents. This TCGv is an auto-freed
245 * temporary so it need not be explicitly freed, and may be modified.
247 static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
249 TCGv_i64 v = new_tmp_a64(s);
250 if (reg != 31) {
251 if (sf) {
252 tcg_gen_mov_i64(v, cpu_X[reg]);
253 } else {
254 tcg_gen_ext32u_i64(v, cpu_X[reg]);
256 } else {
257 tcg_gen_movi_i64(v, 0);
259 return v;
262 static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
264 TCGv_i64 v = new_tmp_a64(s);
265 if (sf) {
266 tcg_gen_mov_i64(v, cpu_X[reg]);
267 } else {
268 tcg_gen_ext32u_i64(v, cpu_X[reg]);
270 return v;
273 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
274 * than the 32 bit equivalent.
276 static inline void gen_set_NZ64(TCGv_i64 result)
278 TCGv_i64 flag = tcg_temp_new_i64();
280 tcg_gen_setcondi_i64(TCG_COND_NE, flag, result, 0);
281 tcg_gen_trunc_i64_i32(cpu_ZF, flag);
282 tcg_gen_shri_i64(flag, result, 32);
283 tcg_gen_trunc_i64_i32(cpu_NF, flag);
284 tcg_temp_free_i64(flag);
287 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
288 static inline void gen_logic_CC(int sf, TCGv_i64 result)
290 if (sf) {
291 gen_set_NZ64(result);
292 } else {
293 tcg_gen_trunc_i64_i32(cpu_ZF, result);
294 tcg_gen_trunc_i64_i32(cpu_NF, result);
296 tcg_gen_movi_i32(cpu_CF, 0);
297 tcg_gen_movi_i32(cpu_VF, 0);
301 * Load/Store generators
305 * Store from GPR register to memory
307 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
308 TCGv_i64 tcg_addr, int size)
310 g_assert(size <= 3);
311 tcg_gen_qemu_st_i64(source, tcg_addr, get_mem_index(s), MO_TE + size);
315 * Load from memory to GPR register
317 static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
318 int size, bool is_signed, bool extend)
320 TCGMemOp memop = MO_TE + size;
322 g_assert(size <= 3);
324 if (is_signed) {
325 memop += MO_SIGN;
328 tcg_gen_qemu_ld_i64(dest, tcg_addr, get_mem_index(s), memop);
330 if (extend && is_signed) {
331 g_assert(size < 3);
332 tcg_gen_ext32u_i64(dest, dest);
337 * Store from FP register to memory
339 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
341 /* This writes the bottom N bits of a 128 bit wide vector to memory */
342 int freg_offs = offsetof(CPUARMState, vfp.regs[srcidx * 2]);
343 TCGv_i64 tmp = tcg_temp_new_i64();
345 if (size < 4) {
346 switch (size) {
347 case 0:
348 tcg_gen_ld8u_i64(tmp, cpu_env, freg_offs);
349 break;
350 case 1:
351 tcg_gen_ld16u_i64(tmp, cpu_env, freg_offs);
352 break;
353 case 2:
354 tcg_gen_ld32u_i64(tmp, cpu_env, freg_offs);
355 break;
356 case 3:
357 tcg_gen_ld_i64(tmp, cpu_env, freg_offs);
358 break;
360 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TE + size);
361 } else {
362 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
363 tcg_gen_ld_i64(tmp, cpu_env, freg_offs);
364 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TEQ);
365 tcg_gen_qemu_st64(tmp, tcg_addr, get_mem_index(s));
366 tcg_gen_ld_i64(tmp, cpu_env, freg_offs + sizeof(float64));
367 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
368 tcg_gen_qemu_st_i64(tmp, tcg_hiaddr, get_mem_index(s), MO_TEQ);
369 tcg_temp_free_i64(tcg_hiaddr);
372 tcg_temp_free_i64(tmp);
376 * Load from memory to FP register
378 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
380 /* This always zero-extends and writes to a full 128 bit wide vector */
381 int freg_offs = offsetof(CPUARMState, vfp.regs[destidx * 2]);
382 TCGv_i64 tmplo = tcg_temp_new_i64();
383 TCGv_i64 tmphi;
385 if (size < 4) {
386 TCGMemOp memop = MO_TE + size;
387 tmphi = tcg_const_i64(0);
388 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
389 } else {
390 TCGv_i64 tcg_hiaddr;
391 tmphi = tcg_temp_new_i64();
392 tcg_hiaddr = tcg_temp_new_i64();
394 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), MO_TEQ);
395 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
396 tcg_gen_qemu_ld_i64(tmphi, tcg_hiaddr, get_mem_index(s), MO_TEQ);
397 tcg_temp_free_i64(tcg_hiaddr);
400 tcg_gen_st_i64(tmplo, cpu_env, freg_offs);
401 tcg_gen_st_i64(tmphi, cpu_env, freg_offs + sizeof(float64));
403 tcg_temp_free_i64(tmplo);
404 tcg_temp_free_i64(tmphi);
408 * This utility function is for doing register extension with an
409 * optional shift. You will likely want to pass a temporary for the
410 * destination register. See DecodeRegExtend() in the ARM ARM.
412 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
413 int option, unsigned int shift)
415 int extsize = extract32(option, 0, 2);
416 bool is_signed = extract32(option, 2, 1);
418 if (is_signed) {
419 switch (extsize) {
420 case 0:
421 tcg_gen_ext8s_i64(tcg_out, tcg_in);
422 break;
423 case 1:
424 tcg_gen_ext16s_i64(tcg_out, tcg_in);
425 break;
426 case 2:
427 tcg_gen_ext32s_i64(tcg_out, tcg_in);
428 break;
429 case 3:
430 tcg_gen_mov_i64(tcg_out, tcg_in);
431 break;
433 } else {
434 switch (extsize) {
435 case 0:
436 tcg_gen_ext8u_i64(tcg_out, tcg_in);
437 break;
438 case 1:
439 tcg_gen_ext16u_i64(tcg_out, tcg_in);
440 break;
441 case 2:
442 tcg_gen_ext32u_i64(tcg_out, tcg_in);
443 break;
444 case 3:
445 tcg_gen_mov_i64(tcg_out, tcg_in);
446 break;
450 if (shift) {
451 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
455 static inline void gen_check_sp_alignment(DisasContext *s)
457 /* The AArch64 architecture mandates that (if enabled via PSTATE
458 * or SCTLR bits) there is a check that SP is 16-aligned on every
459 * SP-relative load or store (with an exception generated if it is not).
460 * In line with general QEMU practice regarding misaligned accesses,
461 * we omit these checks for the sake of guest program performance.
462 * This function is provided as a hook so we can more easily add these
463 * checks in future (possibly as a "favour catching guest program bugs
464 * over speed" user selectable option).
469 * the instruction disassembly implemented here matches
470 * the instruction encoding classifications in chapter 3 (C3)
471 * of the ARM Architecture Reference Manual (DDI0487A_a)
474 /* C3.2.7 Unconditional branch (immediate)
475 * 31 30 26 25 0
476 * +----+-----------+-------------------------------------+
477 * | op | 0 0 1 0 1 | imm26 |
478 * +----+-----------+-------------------------------------+
480 static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
482 uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
484 if (insn & (1 << 31)) {
485 /* C5.6.26 BL Branch with link */
486 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
489 /* C5.6.20 B Branch / C5.6.26 BL Branch with link */
490 gen_goto_tb(s, 0, addr);
493 /* C3.2.1 Compare & branch (immediate)
494 * 31 30 25 24 23 5 4 0
495 * +----+-------------+----+---------------------+--------+
496 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
497 * +----+-------------+----+---------------------+--------+
499 static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
501 unsigned int sf, op, rt;
502 uint64_t addr;
503 int label_match;
504 TCGv_i64 tcg_cmp;
506 sf = extract32(insn, 31, 1);
507 op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
508 rt = extract32(insn, 0, 5);
509 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
511 tcg_cmp = read_cpu_reg(s, rt, sf);
512 label_match = gen_new_label();
514 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
515 tcg_cmp, 0, label_match);
517 gen_goto_tb(s, 0, s->pc);
518 gen_set_label(label_match);
519 gen_goto_tb(s, 1, addr);
522 /* C3.2.5 Test & branch (immediate)
523 * 31 30 25 24 23 19 18 5 4 0
524 * +----+-------------+----+-------+-------------+------+
525 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
526 * +----+-------------+----+-------+-------------+------+
528 static void disas_test_b_imm(DisasContext *s, uint32_t insn)
530 unsigned int bit_pos, op, rt;
531 uint64_t addr;
532 int label_match;
533 TCGv_i64 tcg_cmp;
535 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
536 op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
537 addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
538 rt = extract32(insn, 0, 5);
540 tcg_cmp = tcg_temp_new_i64();
541 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
542 label_match = gen_new_label();
543 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
544 tcg_cmp, 0, label_match);
545 tcg_temp_free_i64(tcg_cmp);
546 gen_goto_tb(s, 0, s->pc);
547 gen_set_label(label_match);
548 gen_goto_tb(s, 1, addr);
551 /* C3.2.2 / C5.6.19 Conditional branch (immediate)
552 * 31 25 24 23 5 4 3 0
553 * +---------------+----+---------------------+----+------+
554 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
555 * +---------------+----+---------------------+----+------+
557 static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
559 unsigned int cond;
560 uint64_t addr;
562 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
563 unallocated_encoding(s);
564 return;
566 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
567 cond = extract32(insn, 0, 4);
569 if (cond < 0x0e) {
570 /* genuinely conditional branches */
571 int label_match = gen_new_label();
572 arm_gen_test_cc(cond, label_match);
573 gen_goto_tb(s, 0, s->pc);
574 gen_set_label(label_match);
575 gen_goto_tb(s, 1, addr);
576 } else {
577 /* 0xe and 0xf are both "always" conditions */
578 gen_goto_tb(s, 0, addr);
582 /* C5.6.68 HINT */
583 static void handle_hint(DisasContext *s, uint32_t insn,
584 unsigned int op1, unsigned int op2, unsigned int crm)
586 unsigned int selector = crm << 3 | op2;
588 if (op1 != 3) {
589 unallocated_encoding(s);
590 return;
593 switch (selector) {
594 case 0: /* NOP */
595 return;
596 case 1: /* YIELD */
597 case 2: /* WFE */
598 case 3: /* WFI */
599 case 4: /* SEV */
600 case 5: /* SEVL */
601 /* we treat all as NOP at least for now */
602 return;
603 default:
604 /* default specified as NOP equivalent */
605 return;
609 /* CLREX, DSB, DMB, ISB */
610 static void handle_sync(DisasContext *s, uint32_t insn,
611 unsigned int op1, unsigned int op2, unsigned int crm)
613 if (op1 != 3) {
614 unallocated_encoding(s);
615 return;
618 switch (op2) {
619 case 2: /* CLREX */
620 unsupported_encoding(s, insn);
621 return;
622 case 4: /* DSB */
623 case 5: /* DMB */
624 case 6: /* ISB */
625 /* We don't emulate caches so barriers are no-ops */
626 return;
627 default:
628 unallocated_encoding(s);
629 return;
633 /* C5.6.130 MSR (immediate) - move immediate to processor state field */
634 static void handle_msr_i(DisasContext *s, uint32_t insn,
635 unsigned int op1, unsigned int op2, unsigned int crm)
637 unsupported_encoding(s, insn);
640 /* C5.6.204 SYS */
641 static void handle_sys(DisasContext *s, uint32_t insn, unsigned int l,
642 unsigned int op1, unsigned int op2,
643 unsigned int crn, unsigned int crm, unsigned int rt)
645 unsupported_encoding(s, insn);
648 /* C5.6.129 MRS - move from system register */
649 static void handle_mrs(DisasContext *s, uint32_t insn, unsigned int op0,
650 unsigned int op1, unsigned int op2,
651 unsigned int crn, unsigned int crm, unsigned int rt)
653 unsupported_encoding(s, insn);
656 /* C5.6.131 MSR (register) - move to system register */
657 static void handle_msr(DisasContext *s, uint32_t insn, unsigned int op0,
658 unsigned int op1, unsigned int op2,
659 unsigned int crn, unsigned int crm, unsigned int rt)
661 unsupported_encoding(s, insn);
664 /* C3.2.4 System
665 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
666 * +---------------------+---+-----+-----+-------+-------+-----+------+
667 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
668 * +---------------------+---+-----+-----+-------+-------+-----+------+
670 static void disas_system(DisasContext *s, uint32_t insn)
672 unsigned int l, op0, op1, crn, crm, op2, rt;
673 l = extract32(insn, 21, 1);
674 op0 = extract32(insn, 19, 2);
675 op1 = extract32(insn, 16, 3);
676 crn = extract32(insn, 12, 4);
677 crm = extract32(insn, 8, 4);
678 op2 = extract32(insn, 5, 3);
679 rt = extract32(insn, 0, 5);
681 if (op0 == 0) {
682 if (l || rt != 31) {
683 unallocated_encoding(s);
684 return;
686 switch (crn) {
687 case 2: /* C5.6.68 HINT */
688 handle_hint(s, insn, op1, op2, crm);
689 break;
690 case 3: /* CLREX, DSB, DMB, ISB */
691 handle_sync(s, insn, op1, op2, crm);
692 break;
693 case 4: /* C5.6.130 MSR (immediate) */
694 handle_msr_i(s, insn, op1, op2, crm);
695 break;
696 default:
697 unallocated_encoding(s);
698 break;
700 return;
703 if (op0 == 1) {
704 /* C5.6.204 SYS */
705 handle_sys(s, insn, l, op1, op2, crn, crm, rt);
706 } else if (l) { /* op0 > 1 */
707 /* C5.6.129 MRS - move from system register */
708 handle_mrs(s, insn, op0, op1, op2, crn, crm, rt);
709 } else {
710 /* C5.6.131 MSR (register) - move to system register */
711 handle_msr(s, insn, op0, op1, op2, crn, crm, rt);
715 /* Exception generation */
716 static void disas_exc(DisasContext *s, uint32_t insn)
718 unsupported_encoding(s, insn);
721 /* C3.2.7 Unconditional branch (register)
722 * 31 25 24 21 20 16 15 10 9 5 4 0
723 * +---------------+-------+-------+-------+------+-------+
724 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
725 * +---------------+-------+-------+-------+------+-------+
727 static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
729 unsigned int opc, op2, op3, rn, op4;
731 opc = extract32(insn, 21, 4);
732 op2 = extract32(insn, 16, 5);
733 op3 = extract32(insn, 10, 6);
734 rn = extract32(insn, 5, 5);
735 op4 = extract32(insn, 0, 5);
737 if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
738 unallocated_encoding(s);
739 return;
742 switch (opc) {
743 case 0: /* BR */
744 case 2: /* RET */
745 break;
746 case 1: /* BLR */
747 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
748 break;
749 case 4: /* ERET */
750 case 5: /* DRPS */
751 if (rn != 0x1f) {
752 unallocated_encoding(s);
753 } else {
754 unsupported_encoding(s, insn);
756 return;
757 default:
758 unallocated_encoding(s);
759 return;
762 tcg_gen_mov_i64(cpu_pc, cpu_reg(s, rn));
763 s->is_jmp = DISAS_JUMP;
766 /* C3.2 Branches, exception generating and system instructions */
767 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
769 switch (extract32(insn, 25, 7)) {
770 case 0x0a: case 0x0b:
771 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
772 disas_uncond_b_imm(s, insn);
773 break;
774 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
775 disas_comp_b_imm(s, insn);
776 break;
777 case 0x1b: case 0x5b: /* Test & branch (immediate) */
778 disas_test_b_imm(s, insn);
779 break;
780 case 0x2a: /* Conditional branch (immediate) */
781 disas_cond_b_imm(s, insn);
782 break;
783 case 0x6a: /* Exception generation / System */
784 if (insn & (1 << 24)) {
785 disas_system(s, insn);
786 } else {
787 disas_exc(s, insn);
789 break;
790 case 0x6b: /* Unconditional branch (register) */
791 disas_uncond_b_reg(s, insn);
792 break;
793 default:
794 unallocated_encoding(s);
795 break;
799 /* Load/store exclusive */
800 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
802 unsupported_encoding(s, insn);
805 /* Load register (literal) */
806 static void disas_ld_lit(DisasContext *s, uint32_t insn)
808 unsupported_encoding(s, insn);
812 * C5.6.80 LDNP (Load Pair - non-temporal hint)
813 * C5.6.81 LDP (Load Pair - non vector)
814 * C5.6.82 LDPSW (Load Pair Signed Word - non vector)
815 * C5.6.176 STNP (Store Pair - non-temporal hint)
816 * C5.6.177 STP (Store Pair - non vector)
817 * C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint)
818 * C6.3.165 LDP (Load Pair of SIMD&FP)
819 * C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint)
820 * C6.3.284 STP (Store Pair of SIMD&FP)
822 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
823 * +-----+-------+---+---+-------+---+-----------------------------+
824 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
825 * +-----+-------+---+---+-------+---+-------+-------+------+------+
827 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
828 * LDPSW 01
829 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
830 * V: 0 -> GPR, 1 -> Vector
831 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
832 * 10 -> signed offset, 11 -> pre-index
833 * L: 0 -> Store 1 -> Load
835 * Rt, Rt2 = GPR or SIMD registers to be stored
836 * Rn = general purpose register containing address
837 * imm7 = signed offset (multiple of 4 or 8 depending on size)
839 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
841 int rt = extract32(insn, 0, 5);
842 int rn = extract32(insn, 5, 5);
843 int rt2 = extract32(insn, 10, 5);
844 int64_t offset = sextract32(insn, 15, 7);
845 int index = extract32(insn, 23, 2);
846 bool is_vector = extract32(insn, 26, 1);
847 bool is_load = extract32(insn, 22, 1);
848 int opc = extract32(insn, 30, 2);
850 bool is_signed = false;
851 bool postindex = false;
852 bool wback = false;
854 TCGv_i64 tcg_addr; /* calculated address */
855 int size;
857 if (opc == 3) {
858 unallocated_encoding(s);
859 return;
862 if (is_vector) {
863 size = 2 + opc;
864 } else {
865 size = 2 + extract32(opc, 1, 1);
866 is_signed = extract32(opc, 0, 1);
867 if (!is_load && is_signed) {
868 unallocated_encoding(s);
869 return;
873 switch (index) {
874 case 1: /* post-index */
875 postindex = true;
876 wback = true;
877 break;
878 case 0:
879 /* signed offset with "non-temporal" hint. Since we don't emulate
880 * caches we don't care about hints to the cache system about
881 * data access patterns, and handle this identically to plain
882 * signed offset.
884 if (is_signed) {
885 /* There is no non-temporal-hint version of LDPSW */
886 unallocated_encoding(s);
887 return;
889 postindex = false;
890 break;
891 case 2: /* signed offset, rn not updated */
892 postindex = false;
893 break;
894 case 3: /* pre-index */
895 postindex = false;
896 wback = true;
897 break;
900 offset <<= size;
902 if (rn == 31) {
903 gen_check_sp_alignment(s);
906 tcg_addr = read_cpu_reg_sp(s, rn, 1);
908 if (!postindex) {
909 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
912 if (is_vector) {
913 if (is_load) {
914 do_fp_ld(s, rt, tcg_addr, size);
915 } else {
916 do_fp_st(s, rt, tcg_addr, size);
918 } else {
919 TCGv_i64 tcg_rt = cpu_reg(s, rt);
920 if (is_load) {
921 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false);
922 } else {
923 do_gpr_st(s, tcg_rt, tcg_addr, size);
926 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
927 if (is_vector) {
928 if (is_load) {
929 do_fp_ld(s, rt2, tcg_addr, size);
930 } else {
931 do_fp_st(s, rt2, tcg_addr, size);
933 } else {
934 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
935 if (is_load) {
936 do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false);
937 } else {
938 do_gpr_st(s, tcg_rt2, tcg_addr, size);
942 if (wback) {
943 if (postindex) {
944 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
945 } else {
946 tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
948 tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
953 * C3.3.8 Load/store (immediate post-indexed)
954 * C3.3.9 Load/store (immediate pre-indexed)
955 * C3.3.12 Load/store (unscaled immediate)
957 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
958 * +----+-------+---+-----+-----+---+--------+-----+------+------+
959 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
960 * +----+-------+---+-----+-----+---+--------+-----+------+------+
962 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
963 * V = 0 -> non-vector
964 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
965 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
967 static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
969 int rt = extract32(insn, 0, 5);
970 int rn = extract32(insn, 5, 5);
971 int imm9 = sextract32(insn, 12, 9);
972 int opc = extract32(insn, 22, 2);
973 int size = extract32(insn, 30, 2);
974 int idx = extract32(insn, 10, 2);
975 bool is_signed = false;
976 bool is_store = false;
977 bool is_extended = false;
978 bool is_vector = extract32(insn, 26, 1);
979 bool post_index;
980 bool writeback;
982 TCGv_i64 tcg_addr;
984 if (is_vector) {
985 size |= (opc & 2) << 1;
986 if (size > 4) {
987 unallocated_encoding(s);
988 return;
990 is_store = ((opc & 1) == 0);
991 } else {
992 if (size == 3 && opc == 2) {
993 /* PRFM - prefetch */
994 return;
996 if (opc == 3 && size > 1) {
997 unallocated_encoding(s);
998 return;
1000 is_store = (opc == 0);
1001 is_signed = opc & (1<<1);
1002 is_extended = (size < 3) && (opc & 1);
1005 switch (idx) {
1006 case 0:
1007 post_index = false;
1008 writeback = false;
1009 break;
1010 case 1:
1011 post_index = true;
1012 writeback = true;
1013 break;
1014 case 3:
1015 post_index = false;
1016 writeback = true;
1017 break;
1018 case 2:
1019 g_assert(false);
1020 break;
1023 if (rn == 31) {
1024 gen_check_sp_alignment(s);
1026 tcg_addr = read_cpu_reg_sp(s, rn, 1);
1028 if (!post_index) {
1029 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
1032 if (is_vector) {
1033 if (is_store) {
1034 do_fp_st(s, rt, tcg_addr, size);
1035 } else {
1036 do_fp_ld(s, rt, tcg_addr, size);
1038 } else {
1039 TCGv_i64 tcg_rt = cpu_reg(s, rt);
1040 if (is_store) {
1041 do_gpr_st(s, tcg_rt, tcg_addr, size);
1042 } else {
1043 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1047 if (writeback) {
1048 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
1049 if (post_index) {
1050 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
1052 tcg_gen_mov_i64(tcg_rn, tcg_addr);
1057 * C3.3.10 Load/store (register offset)
1059 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
1060 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
1061 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
1062 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
1064 * For non-vector:
1065 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
1066 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1067 * For vector:
1068 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
1069 * opc<0>: 0 -> store, 1 -> load
1070 * V: 1 -> vector/simd
1071 * opt: extend encoding (see DecodeRegExtend)
1072 * S: if S=1 then scale (essentially index by sizeof(size))
1073 * Rt: register to transfer into/out of
1074 * Rn: address register or SP for base
1075 * Rm: offset register or ZR for offset
1077 static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn)
1079 int rt = extract32(insn, 0, 5);
1080 int rn = extract32(insn, 5, 5);
1081 int shift = extract32(insn, 12, 1);
1082 int rm = extract32(insn, 16, 5);
1083 int opc = extract32(insn, 22, 2);
1084 int opt = extract32(insn, 13, 3);
1085 int size = extract32(insn, 30, 2);
1086 bool is_signed = false;
1087 bool is_store = false;
1088 bool is_extended = false;
1089 bool is_vector = extract32(insn, 26, 1);
1091 TCGv_i64 tcg_rm;
1092 TCGv_i64 tcg_addr;
1094 if (extract32(opt, 1, 1) == 0) {
1095 unallocated_encoding(s);
1096 return;
1099 if (is_vector) {
1100 size |= (opc & 2) << 1;
1101 if (size > 4) {
1102 unallocated_encoding(s);
1103 return;
1105 is_store = !extract32(opc, 0, 1);
1106 } else {
1107 if (size == 3 && opc == 2) {
1108 /* PRFM - prefetch */
1109 return;
1111 if (opc == 3 && size > 1) {
1112 unallocated_encoding(s);
1113 return;
1115 is_store = (opc == 0);
1116 is_signed = extract32(opc, 1, 1);
1117 is_extended = (size < 3) && extract32(opc, 0, 1);
1120 if (rn == 31) {
1121 gen_check_sp_alignment(s);
1123 tcg_addr = read_cpu_reg_sp(s, rn, 1);
1125 tcg_rm = read_cpu_reg(s, rm, 1);
1126 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
1128 tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
1130 if (is_vector) {
1131 if (is_store) {
1132 do_fp_st(s, rt, tcg_addr, size);
1133 } else {
1134 do_fp_ld(s, rt, tcg_addr, size);
1136 } else {
1137 TCGv_i64 tcg_rt = cpu_reg(s, rt);
1138 if (is_store) {
1139 do_gpr_st(s, tcg_rt, tcg_addr, size);
1140 } else {
1141 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1147 * C3.3.13 Load/store (unsigned immediate)
1149 * 31 30 29 27 26 25 24 23 22 21 10 9 5
1150 * +----+-------+---+-----+-----+------------+-------+------+
1151 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
1152 * +----+-------+---+-----+-----+------------+-------+------+
1154 * For non-vector:
1155 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
1156 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1157 * For vector:
1158 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
1159 * opc<0>: 0 -> store, 1 -> load
1160 * Rn: base address register (inc SP)
1161 * Rt: target register
1163 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
1165 int rt = extract32(insn, 0, 5);
1166 int rn = extract32(insn, 5, 5);
1167 unsigned int imm12 = extract32(insn, 10, 12);
1168 bool is_vector = extract32(insn, 26, 1);
1169 int size = extract32(insn, 30, 2);
1170 int opc = extract32(insn, 22, 2);
1171 unsigned int offset;
1173 TCGv_i64 tcg_addr;
1175 bool is_store;
1176 bool is_signed = false;
1177 bool is_extended = false;
1179 if (is_vector) {
1180 size |= (opc & 2) << 1;
1181 if (size > 4) {
1182 unallocated_encoding(s);
1183 return;
1185 is_store = !extract32(opc, 0, 1);
1186 } else {
1187 if (size == 3 && opc == 2) {
1188 /* PRFM - prefetch */
1189 return;
1191 if (opc == 3 && size > 1) {
1192 unallocated_encoding(s);
1193 return;
1195 is_store = (opc == 0);
1196 is_signed = extract32(opc, 1, 1);
1197 is_extended = (size < 3) && extract32(opc, 0, 1);
1200 if (rn == 31) {
1201 gen_check_sp_alignment(s);
1203 tcg_addr = read_cpu_reg_sp(s, rn, 1);
1204 offset = imm12 << size;
1205 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
1207 if (is_vector) {
1208 if (is_store) {
1209 do_fp_st(s, rt, tcg_addr, size);
1210 } else {
1211 do_fp_ld(s, rt, tcg_addr, size);
1213 } else {
1214 TCGv_i64 tcg_rt = cpu_reg(s, rt);
1215 if (is_store) {
1216 do_gpr_st(s, tcg_rt, tcg_addr, size);
1217 } else {
1218 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1223 /* Load/store register (immediate forms) */
1224 static void disas_ldst_reg_imm(DisasContext *s, uint32_t insn)
1226 switch (extract32(insn, 10, 2)) {
1227 case 0: case 1: case 3:
1228 /* Load/store register (unscaled immediate) */
1229 /* Load/store immediate pre/post-indexed */
1230 disas_ldst_reg_imm9(s, insn);
1231 break;
1232 case 2:
1233 /* Load/store register unprivileged */
1234 unsupported_encoding(s, insn);
1235 break;
1236 default:
1237 unallocated_encoding(s);
1238 break;
1242 /* Load/store register (all forms) */
1243 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
1245 switch (extract32(insn, 24, 2)) {
1246 case 0:
1247 if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
1248 disas_ldst_reg_roffset(s, insn);
1249 } else {
1250 disas_ldst_reg_imm(s, insn);
1252 break;
1253 case 1:
1254 disas_ldst_reg_unsigned_imm(s, insn);
1255 break;
1256 default:
1257 unallocated_encoding(s);
1258 break;
1262 /* AdvSIMD load/store multiple structures */
1263 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
1265 unsupported_encoding(s, insn);
1268 /* AdvSIMD load/store single structure */
1269 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
1271 unsupported_encoding(s, insn);
1274 /* C3.3 Loads and stores */
1275 static void disas_ldst(DisasContext *s, uint32_t insn)
1277 switch (extract32(insn, 24, 6)) {
1278 case 0x08: /* Load/store exclusive */
1279 disas_ldst_excl(s, insn);
1280 break;
1281 case 0x18: case 0x1c: /* Load register (literal) */
1282 disas_ld_lit(s, insn);
1283 break;
1284 case 0x28: case 0x29:
1285 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
1286 disas_ldst_pair(s, insn);
1287 break;
1288 case 0x38: case 0x39:
1289 case 0x3c: case 0x3d: /* Load/store register (all forms) */
1290 disas_ldst_reg(s, insn);
1291 break;
1292 case 0x0c: /* AdvSIMD load/store multiple structures */
1293 disas_ldst_multiple_struct(s, insn);
1294 break;
1295 case 0x0d: /* AdvSIMD load/store single structure */
1296 disas_ldst_single_struct(s, insn);
1297 break;
1298 default:
1299 unallocated_encoding(s);
1300 break;
1304 /* C3.4.6 PC-rel. addressing
1305 * 31 30 29 28 24 23 5 4 0
1306 * +----+-------+-----------+-------------------+------+
1307 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
1308 * +----+-------+-----------+-------------------+------+
1310 static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
1312 unsigned int page, rd;
1313 uint64_t base;
1314 int64_t offset;
1316 page = extract32(insn, 31, 1);
1317 /* SignExtend(immhi:immlo) -> offset */
1318 offset = ((int64_t)sextract32(insn, 5, 19) << 2) | extract32(insn, 29, 2);
1319 rd = extract32(insn, 0, 5);
1320 base = s->pc - 4;
1322 if (page) {
1323 /* ADRP (page based) */
1324 base &= ~0xfff;
1325 offset <<= 12;
1328 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
1331 /* Add/subtract (immediate) */
1332 static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
1334 unsupported_encoding(s, insn);
1337 /* The input should be a value in the bottom e bits (with higher
1338 * bits zero); returns that value replicated into every element
1339 * of size e in a 64 bit integer.
1341 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
1343 assert(e != 0);
1344 while (e < 64) {
1345 mask |= mask << e;
1346 e *= 2;
1348 return mask;
1351 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
1352 static inline uint64_t bitmask64(unsigned int length)
1354 assert(length > 0 && length <= 64);
1355 return ~0ULL >> (64 - length);
1358 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
1359 * only require the wmask. Returns false if the imms/immr/immn are a reserved
1360 * value (ie should cause a guest UNDEF exception), and true if they are
1361 * valid, in which case the decoded bit pattern is written to result.
1363 static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
1364 unsigned int imms, unsigned int immr)
1366 uint64_t mask;
1367 unsigned e, levels, s, r;
1368 int len;
1370 assert(immn < 2 && imms < 64 && immr < 64);
1372 /* The bit patterns we create here are 64 bit patterns which
1373 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
1374 * 64 bits each. Each element contains the same value: a run
1375 * of between 1 and e-1 non-zero bits, rotated within the
1376 * element by between 0 and e-1 bits.
1378 * The element size and run length are encoded into immn (1 bit)
1379 * and imms (6 bits) as follows:
1380 * 64 bit elements: immn = 1, imms = <length of run - 1>
1381 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
1382 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
1383 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
1384 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
1385 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
1386 * Notice that immn = 0, imms = 11111x is the only combination
1387 * not covered by one of the above options; this is reserved.
1388 * Further, <length of run - 1> all-ones is a reserved pattern.
1390 * In all cases the rotation is by immr % e (and immr is 6 bits).
1393 /* First determine the element size */
1394 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
1395 if (len < 1) {
1396 /* This is the immn == 0, imms == 0x11111x case */
1397 return false;
1399 e = 1 << len;
1401 levels = e - 1;
1402 s = imms & levels;
1403 r = immr & levels;
1405 if (s == levels) {
1406 /* <length of run - 1> mustn't be all-ones. */
1407 return false;
1410 /* Create the value of one element: s+1 set bits rotated
1411 * by r within the element (which is e bits wide)...
1413 mask = bitmask64(s + 1);
1414 mask = (mask >> r) | (mask << (e - r));
1415 /* ...then replicate the element over the whole 64 bit value */
1416 mask = bitfield_replicate(mask, e);
1417 *result = mask;
1418 return true;
1421 /* C3.4.4 Logical (immediate)
1422 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
1423 * +----+-----+-------------+---+------+------+------+------+
1424 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
1425 * +----+-----+-------------+---+------+------+------+------+
1427 static void disas_logic_imm(DisasContext *s, uint32_t insn)
1429 unsigned int sf, opc, is_n, immr, imms, rn, rd;
1430 TCGv_i64 tcg_rd, tcg_rn;
1431 uint64_t wmask;
1432 bool is_and = false;
1434 sf = extract32(insn, 31, 1);
1435 opc = extract32(insn, 29, 2);
1436 is_n = extract32(insn, 22, 1);
1437 immr = extract32(insn, 16, 6);
1438 imms = extract32(insn, 10, 6);
1439 rn = extract32(insn, 5, 5);
1440 rd = extract32(insn, 0, 5);
1442 if (!sf && is_n) {
1443 unallocated_encoding(s);
1444 return;
1447 if (opc == 0x3) { /* ANDS */
1448 tcg_rd = cpu_reg(s, rd);
1449 } else {
1450 tcg_rd = cpu_reg_sp(s, rd);
1452 tcg_rn = cpu_reg(s, rn);
1454 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
1455 /* some immediate field values are reserved */
1456 unallocated_encoding(s);
1457 return;
1460 if (!sf) {
1461 wmask &= 0xffffffff;
1464 switch (opc) {
1465 case 0x3: /* ANDS */
1466 case 0x0: /* AND */
1467 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
1468 is_and = true;
1469 break;
1470 case 0x1: /* ORR */
1471 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
1472 break;
1473 case 0x2: /* EOR */
1474 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
1475 break;
1476 default:
1477 assert(FALSE); /* must handle all above */
1478 break;
1481 if (!sf && !is_and) {
1482 /* zero extend final result; we know we can skip this for AND
1483 * since the immediate had the high 32 bits clear.
1485 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1488 if (opc == 3) { /* ANDS */
1489 gen_logic_CC(sf, tcg_rd);
1493 /* Move wide (immediate) */
1494 static void disas_movw_imm(DisasContext *s, uint32_t insn)
1496 unsupported_encoding(s, insn);
1499 /* C3.4.2 Bitfield
1500 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
1501 * +----+-----+-------------+---+------+------+------+------+
1502 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
1503 * +----+-----+-------------+---+------+------+------+------+
1505 static void disas_bitfield(DisasContext *s, uint32_t insn)
1507 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
1508 TCGv_i64 tcg_rd, tcg_tmp;
1510 sf = extract32(insn, 31, 1);
1511 opc = extract32(insn, 29, 2);
1512 n = extract32(insn, 22, 1);
1513 ri = extract32(insn, 16, 6);
1514 si = extract32(insn, 10, 6);
1515 rn = extract32(insn, 5, 5);
1516 rd = extract32(insn, 0, 5);
1517 bitsize = sf ? 64 : 32;
1519 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
1520 unallocated_encoding(s);
1521 return;
1524 tcg_rd = cpu_reg(s, rd);
1525 tcg_tmp = read_cpu_reg(s, rn, sf);
1527 /* OPTME: probably worth recognizing common cases of ext{8,16,32}{u,s} */
1529 if (opc != 1) { /* SBFM or UBFM */
1530 tcg_gen_movi_i64(tcg_rd, 0);
1533 /* do the bit move operation */
1534 if (si >= ri) {
1535 /* Wd<s-r:0> = Wn<s:r> */
1536 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
1537 pos = 0;
1538 len = (si - ri) + 1;
1539 } else {
1540 /* Wd<32+s-r,32-r> = Wn<s:0> */
1541 pos = bitsize - ri;
1542 len = si + 1;
1545 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
1547 if (opc == 0) { /* SBFM - sign extend the destination field */
1548 tcg_gen_shli_i64(tcg_rd, tcg_rd, 64 - (pos + len));
1549 tcg_gen_sari_i64(tcg_rd, tcg_rd, 64 - (pos + len));
1552 if (!sf) { /* zero extend final result */
1553 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1557 /* C3.4.3 Extract
1558 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
1559 * +----+------+-------------+---+----+------+--------+------+------+
1560 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
1561 * +----+------+-------------+---+----+------+--------+------+------+
1563 static void disas_extract(DisasContext *s, uint32_t insn)
1565 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
1567 sf = extract32(insn, 31, 1);
1568 n = extract32(insn, 22, 1);
1569 rm = extract32(insn, 16, 5);
1570 imm = extract32(insn, 10, 6);
1571 rn = extract32(insn, 5, 5);
1572 rd = extract32(insn, 0, 5);
1573 op21 = extract32(insn, 29, 2);
1574 op0 = extract32(insn, 21, 1);
1575 bitsize = sf ? 64 : 32;
1577 if (sf != n || op21 || op0 || imm >= bitsize) {
1578 unallocated_encoding(s);
1579 } else {
1580 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
1582 tcg_rd = cpu_reg(s, rd);
1584 if (imm) {
1585 /* OPTME: we can special case rm==rn as a rotate */
1586 tcg_rm = read_cpu_reg(s, rm, sf);
1587 tcg_rn = read_cpu_reg(s, rn, sf);
1588 tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
1589 tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
1590 tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
1591 if (!sf) {
1592 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1594 } else {
1595 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
1596 * so an extract from bit 0 is a special case.
1598 if (sf) {
1599 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
1600 } else {
1601 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
1608 /* C3.4 Data processing - immediate */
1609 static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
1611 switch (extract32(insn, 23, 6)) {
1612 case 0x20: case 0x21: /* PC-rel. addressing */
1613 disas_pc_rel_adr(s, insn);
1614 break;
1615 case 0x22: case 0x23: /* Add/subtract (immediate) */
1616 disas_add_sub_imm(s, insn);
1617 break;
1618 case 0x24: /* Logical (immediate) */
1619 disas_logic_imm(s, insn);
1620 break;
1621 case 0x25: /* Move wide (immediate) */
1622 disas_movw_imm(s, insn);
1623 break;
1624 case 0x26: /* Bitfield */
1625 disas_bitfield(s, insn);
1626 break;
1627 case 0x27: /* Extract */
1628 disas_extract(s, insn);
1629 break;
1630 default:
1631 unallocated_encoding(s);
1632 break;
1636 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
1637 * Note that it is the caller's responsibility to ensure that the
1638 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
1639 * mandated semantics for out of range shifts.
1641 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
1642 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
1644 switch (shift_type) {
1645 case A64_SHIFT_TYPE_LSL:
1646 tcg_gen_shl_i64(dst, src, shift_amount);
1647 break;
1648 case A64_SHIFT_TYPE_LSR:
1649 tcg_gen_shr_i64(dst, src, shift_amount);
1650 break;
1651 case A64_SHIFT_TYPE_ASR:
1652 if (!sf) {
1653 tcg_gen_ext32s_i64(dst, src);
1655 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
1656 break;
1657 case A64_SHIFT_TYPE_ROR:
1658 if (sf) {
1659 tcg_gen_rotr_i64(dst, src, shift_amount);
1660 } else {
1661 TCGv_i32 t0, t1;
1662 t0 = tcg_temp_new_i32();
1663 t1 = tcg_temp_new_i32();
1664 tcg_gen_trunc_i64_i32(t0, src);
1665 tcg_gen_trunc_i64_i32(t1, shift_amount);
1666 tcg_gen_rotr_i32(t0, t0, t1);
1667 tcg_gen_extu_i32_i64(dst, t0);
1668 tcg_temp_free_i32(t0);
1669 tcg_temp_free_i32(t1);
1671 break;
1672 default:
1673 assert(FALSE); /* all shift types should be handled */
1674 break;
1677 if (!sf) { /* zero extend final result */
1678 tcg_gen_ext32u_i64(dst, dst);
1682 /* Shift a TCGv src by immediate, put result in dst.
1683 * The shift amount must be in range (this should always be true as the
1684 * relevant instructions will UNDEF on bad shift immediates).
1686 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
1687 enum a64_shift_type shift_type, unsigned int shift_i)
1689 assert(shift_i < (sf ? 64 : 32));
1691 if (shift_i == 0) {
1692 tcg_gen_mov_i64(dst, src);
1693 } else {
1694 TCGv_i64 shift_const;
1696 shift_const = tcg_const_i64(shift_i);
1697 shift_reg(dst, src, sf, shift_type, shift_const);
1698 tcg_temp_free_i64(shift_const);
1702 /* C3.5.10 Logical (shifted register)
1703 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
1704 * +----+-----+-----------+-------+---+------+--------+------+------+
1705 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
1706 * +----+-----+-----------+-------+---+------+--------+------+------+
1708 static void disas_logic_reg(DisasContext *s, uint32_t insn)
1710 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
1711 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
1713 sf = extract32(insn, 31, 1);
1714 opc = extract32(insn, 29, 2);
1715 shift_type = extract32(insn, 22, 2);
1716 invert = extract32(insn, 21, 1);
1717 rm = extract32(insn, 16, 5);
1718 shift_amount = extract32(insn, 10, 6);
1719 rn = extract32(insn, 5, 5);
1720 rd = extract32(insn, 0, 5);
1722 if (!sf && (shift_amount & (1 << 5))) {
1723 unallocated_encoding(s);
1724 return;
1727 tcg_rd = cpu_reg(s, rd);
1729 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
1730 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
1731 * register-register MOV and MVN, so it is worth special casing.
1733 tcg_rm = cpu_reg(s, rm);
1734 if (invert) {
1735 tcg_gen_not_i64(tcg_rd, tcg_rm);
1736 if (!sf) {
1737 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1739 } else {
1740 if (sf) {
1741 tcg_gen_mov_i64(tcg_rd, tcg_rm);
1742 } else {
1743 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
1746 return;
1749 tcg_rm = read_cpu_reg(s, rm, sf);
1751 if (shift_amount) {
1752 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
1755 tcg_rn = cpu_reg(s, rn);
1757 switch (opc | (invert << 2)) {
1758 case 0: /* AND */
1759 case 3: /* ANDS */
1760 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
1761 break;
1762 case 1: /* ORR */
1763 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
1764 break;
1765 case 2: /* EOR */
1766 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
1767 break;
1768 case 4: /* BIC */
1769 case 7: /* BICS */
1770 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
1771 break;
1772 case 5: /* ORN */
1773 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
1774 break;
1775 case 6: /* EON */
1776 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
1777 break;
1778 default:
1779 assert(FALSE);
1780 break;
1783 if (!sf) {
1784 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1787 if (opc == 3) {
1788 gen_logic_CC(sf, tcg_rd);
1792 /* Add/subtract (extended register) */
1793 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
1795 unsupported_encoding(s, insn);
1798 /* Add/subtract (shifted register) */
1799 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
1801 unsupported_encoding(s, insn);
1804 /* Data-processing (3 source) */
1805 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
1807 unsupported_encoding(s, insn);
1810 /* Add/subtract (with carry) */
1811 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
1813 unsupported_encoding(s, insn);
1816 /* Conditional compare (immediate) */
1817 static void disas_cc_imm(DisasContext *s, uint32_t insn)
1819 unsupported_encoding(s, insn);
1822 /* Conditional compare (register) */
1823 static void disas_cc_reg(DisasContext *s, uint32_t insn)
1825 unsupported_encoding(s, insn);
1828 /* C3.5.6 Conditional select
1829 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
1830 * +----+----+---+-----------------+------+------+-----+------+------+
1831 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
1832 * +----+----+---+-----------------+------+------+-----+------+------+
1834 static void disas_cond_select(DisasContext *s, uint32_t insn)
1836 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
1837 TCGv_i64 tcg_rd, tcg_src;
1839 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
1840 /* S == 1 or op2<1> == 1 */
1841 unallocated_encoding(s);
1842 return;
1844 sf = extract32(insn, 31, 1);
1845 else_inv = extract32(insn, 30, 1);
1846 rm = extract32(insn, 16, 5);
1847 cond = extract32(insn, 12, 4);
1848 else_inc = extract32(insn, 10, 1);
1849 rn = extract32(insn, 5, 5);
1850 rd = extract32(insn, 0, 5);
1852 if (rd == 31) {
1853 /* silly no-op write; until we use movcond we must special-case
1854 * this to avoid a dead temporary across basic blocks.
1856 return;
1859 tcg_rd = cpu_reg(s, rd);
1861 if (cond >= 0x0e) { /* condition "always" */
1862 tcg_src = read_cpu_reg(s, rn, sf);
1863 tcg_gen_mov_i64(tcg_rd, tcg_src);
1864 } else {
1865 /* OPTME: we could use movcond here, at the cost of duplicating
1866 * a lot of the arm_gen_test_cc() logic.
1868 int label_match = gen_new_label();
1869 int label_continue = gen_new_label();
1871 arm_gen_test_cc(cond, label_match);
1872 /* nomatch: */
1873 tcg_src = cpu_reg(s, rm);
1875 if (else_inv && else_inc) {
1876 tcg_gen_neg_i64(tcg_rd, tcg_src);
1877 } else if (else_inv) {
1878 tcg_gen_not_i64(tcg_rd, tcg_src);
1879 } else if (else_inc) {
1880 tcg_gen_addi_i64(tcg_rd, tcg_src, 1);
1881 } else {
1882 tcg_gen_mov_i64(tcg_rd, tcg_src);
1884 if (!sf) {
1885 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1887 tcg_gen_br(label_continue);
1888 /* match: */
1889 gen_set_label(label_match);
1890 tcg_src = read_cpu_reg(s, rn, sf);
1891 tcg_gen_mov_i64(tcg_rd, tcg_src);
1892 /* continue: */
1893 gen_set_label(label_continue);
1897 static void handle_clz(DisasContext *s, unsigned int sf,
1898 unsigned int rn, unsigned int rd)
1900 TCGv_i64 tcg_rd, tcg_rn;
1901 tcg_rd = cpu_reg(s, rd);
1902 tcg_rn = cpu_reg(s, rn);
1904 if (sf) {
1905 gen_helper_clz64(tcg_rd, tcg_rn);
1906 } else {
1907 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
1908 tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
1909 gen_helper_clz(tcg_tmp32, tcg_tmp32);
1910 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
1911 tcg_temp_free_i32(tcg_tmp32);
1915 static void handle_cls(DisasContext *s, unsigned int sf,
1916 unsigned int rn, unsigned int rd)
1918 TCGv_i64 tcg_rd, tcg_rn;
1919 tcg_rd = cpu_reg(s, rd);
1920 tcg_rn = cpu_reg(s, rn);
1922 if (sf) {
1923 gen_helper_cls64(tcg_rd, tcg_rn);
1924 } else {
1925 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
1926 tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
1927 gen_helper_cls32(tcg_tmp32, tcg_tmp32);
1928 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
1929 tcg_temp_free_i32(tcg_tmp32);
1933 static void handle_rbit(DisasContext *s, unsigned int sf,
1934 unsigned int rn, unsigned int rd)
1936 TCGv_i64 tcg_rd, tcg_rn;
1937 tcg_rd = cpu_reg(s, rd);
1938 tcg_rn = cpu_reg(s, rn);
1940 if (sf) {
1941 gen_helper_rbit64(tcg_rd, tcg_rn);
1942 } else {
1943 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
1944 tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
1945 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
1946 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
1947 tcg_temp_free_i32(tcg_tmp32);
1951 /* C5.6.149 REV with sf==1, opcode==3 ("REV64") */
1952 static void handle_rev64(DisasContext *s, unsigned int sf,
1953 unsigned int rn, unsigned int rd)
1955 if (!sf) {
1956 unallocated_encoding(s);
1957 return;
1959 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
1962 /* C5.6.149 REV with sf==0, opcode==2
1963 * C5.6.151 REV32 (sf==1, opcode==2)
1965 static void handle_rev32(DisasContext *s, unsigned int sf,
1966 unsigned int rn, unsigned int rd)
1968 TCGv_i64 tcg_rd = cpu_reg(s, rd);
1970 if (sf) {
1971 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1972 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
1974 /* bswap32_i64 requires zero high word */
1975 tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
1976 tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
1977 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
1978 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
1979 tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
1981 tcg_temp_free_i64(tcg_tmp);
1982 } else {
1983 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
1984 tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
1988 /* C5.6.150 REV16 (opcode==1) */
1989 static void handle_rev16(DisasContext *s, unsigned int sf,
1990 unsigned int rn, unsigned int rd)
1992 TCGv_i64 tcg_rd = cpu_reg(s, rd);
1993 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1994 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
1996 tcg_gen_andi_i64(tcg_tmp, tcg_rn, 0xffff);
1997 tcg_gen_bswap16_i64(tcg_rd, tcg_tmp);
1999 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 16);
2000 tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
2001 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
2002 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 16, 16);
2004 if (sf) {
2005 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
2006 tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
2007 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
2008 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 32, 16);
2010 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 48);
2011 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
2012 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 48, 16);
2015 tcg_temp_free_i64(tcg_tmp);
2018 /* C3.5.7 Data-processing (1 source)
2019 * 31 30 29 28 21 20 16 15 10 9 5 4 0
2020 * +----+---+---+-----------------+---------+--------+------+------+
2021 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
2022 * +----+---+---+-----------------+---------+--------+------+------+
2024 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
2026 unsigned int sf, opcode, rn, rd;
2028 if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
2029 unallocated_encoding(s);
2030 return;
2033 sf = extract32(insn, 31, 1);
2034 opcode = extract32(insn, 10, 6);
2035 rn = extract32(insn, 5, 5);
2036 rd = extract32(insn, 0, 5);
2038 switch (opcode) {
2039 case 0: /* RBIT */
2040 handle_rbit(s, sf, rn, rd);
2041 break;
2042 case 1: /* REV16 */
2043 handle_rev16(s, sf, rn, rd);
2044 break;
2045 case 2: /* REV32 */
2046 handle_rev32(s, sf, rn, rd);
2047 break;
2048 case 3: /* REV64 */
2049 handle_rev64(s, sf, rn, rd);
2050 break;
2051 case 4: /* CLZ */
2052 handle_clz(s, sf, rn, rd);
2053 break;
2054 case 5: /* CLS */
2055 handle_cls(s, sf, rn, rd);
2056 break;
2060 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
2061 unsigned int rm, unsigned int rn, unsigned int rd)
2063 TCGv_i64 tcg_n, tcg_m, tcg_rd;
2064 tcg_rd = cpu_reg(s, rd);
2066 if (!sf && is_signed) {
2067 tcg_n = new_tmp_a64(s);
2068 tcg_m = new_tmp_a64(s);
2069 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
2070 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
2071 } else {
2072 tcg_n = read_cpu_reg(s, rn, sf);
2073 tcg_m = read_cpu_reg(s, rm, sf);
2076 if (is_signed) {
2077 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
2078 } else {
2079 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
2082 if (!sf) { /* zero extend final result */
2083 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2087 /* C5.6.115 LSLV, C5.6.118 LSRV, C5.6.17 ASRV, C5.6.154 RORV */
2088 static void handle_shift_reg(DisasContext *s,
2089 enum a64_shift_type shift_type, unsigned int sf,
2090 unsigned int rm, unsigned int rn, unsigned int rd)
2092 TCGv_i64 tcg_shift = tcg_temp_new_i64();
2093 TCGv_i64 tcg_rd = cpu_reg(s, rd);
2094 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
2096 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
2097 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
2098 tcg_temp_free_i64(tcg_shift);
2101 /* C3.5.8 Data-processing (2 source)
2102 * 31 30 29 28 21 20 16 15 10 9 5 4 0
2103 * +----+---+---+-----------------+------+--------+------+------+
2104 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
2105 * +----+---+---+-----------------+------+--------+------+------+
2107 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
2109 unsigned int sf, rm, opcode, rn, rd;
2110 sf = extract32(insn, 31, 1);
2111 rm = extract32(insn, 16, 5);
2112 opcode = extract32(insn, 10, 6);
2113 rn = extract32(insn, 5, 5);
2114 rd = extract32(insn, 0, 5);
2116 if (extract32(insn, 29, 1)) {
2117 unallocated_encoding(s);
2118 return;
2121 switch (opcode) {
2122 case 2: /* UDIV */
2123 handle_div(s, false, sf, rm, rn, rd);
2124 break;
2125 case 3: /* SDIV */
2126 handle_div(s, true, sf, rm, rn, rd);
2127 break;
2128 case 8: /* LSLV */
2129 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
2130 break;
2131 case 9: /* LSRV */
2132 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
2133 break;
2134 case 10: /* ASRV */
2135 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
2136 break;
2137 case 11: /* RORV */
2138 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
2139 break;
2140 case 16:
2141 case 17:
2142 case 18:
2143 case 19:
2144 case 20:
2145 case 21:
2146 case 22:
2147 case 23: /* CRC32 */
2148 unsupported_encoding(s, insn);
2149 break;
2150 default:
2151 unallocated_encoding(s);
2152 break;
2156 /* C3.5 Data processing - register */
2157 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
2159 switch (extract32(insn, 24, 5)) {
2160 case 0x0a: /* Logical (shifted register) */
2161 disas_logic_reg(s, insn);
2162 break;
2163 case 0x0b: /* Add/subtract */
2164 if (insn & (1 << 21)) { /* (extended register) */
2165 disas_add_sub_ext_reg(s, insn);
2166 } else {
2167 disas_add_sub_reg(s, insn);
2169 break;
2170 case 0x1b: /* Data-processing (3 source) */
2171 disas_data_proc_3src(s, insn);
2172 break;
2173 case 0x1a:
2174 switch (extract32(insn, 21, 3)) {
2175 case 0x0: /* Add/subtract (with carry) */
2176 disas_adc_sbc(s, insn);
2177 break;
2178 case 0x2: /* Conditional compare */
2179 if (insn & (1 << 11)) { /* (immediate) */
2180 disas_cc_imm(s, insn);
2181 } else { /* (register) */
2182 disas_cc_reg(s, insn);
2184 break;
2185 case 0x4: /* Conditional select */
2186 disas_cond_select(s, insn);
2187 break;
2188 case 0x6: /* Data-processing */
2189 if (insn & (1 << 30)) { /* (1 source) */
2190 disas_data_proc_1src(s, insn);
2191 } else { /* (2 source) */
2192 disas_data_proc_2src(s, insn);
2194 break;
2195 default:
2196 unallocated_encoding(s);
2197 break;
2199 break;
2200 default:
2201 unallocated_encoding(s);
2202 break;
2206 /* C3.6 Data processing - SIMD and floating point */
2207 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
2209 unsupported_encoding(s, insn);
2212 /* C3.1 A64 instruction index by encoding */
2213 static void disas_a64_insn(CPUARMState *env, DisasContext *s)
2215 uint32_t insn;
2217 insn = arm_ldl_code(env, s->pc, s->bswap_code);
2218 s->insn = insn;
2219 s->pc += 4;
2221 switch (extract32(insn, 25, 4)) {
2222 case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
2223 unallocated_encoding(s);
2224 break;
2225 case 0x8: case 0x9: /* Data processing - immediate */
2226 disas_data_proc_imm(s, insn);
2227 break;
2228 case 0xa: case 0xb: /* Branch, exception generation and system insns */
2229 disas_b_exc_sys(s, insn);
2230 break;
2231 case 0x4:
2232 case 0x6:
2233 case 0xc:
2234 case 0xe: /* Loads and stores */
2235 disas_ldst(s, insn);
2236 break;
2237 case 0x5:
2238 case 0xd: /* Data processing - register */
2239 disas_data_proc_reg(s, insn);
2240 break;
2241 case 0x7:
2242 case 0xf: /* Data processing - SIMD and floating point */
2243 disas_data_proc_simd_fp(s, insn);
2244 break;
2245 default:
2246 assert(FALSE); /* all 15 cases should be handled above */
2247 break;
2250 /* if we allocated any temporaries, free them here */
2251 free_tmp_a64(s);
2254 void gen_intermediate_code_internal_a64(ARMCPU *cpu,
2255 TranslationBlock *tb,
2256 bool search_pc)
2258 CPUState *cs = CPU(cpu);
2259 CPUARMState *env = &cpu->env;
2260 DisasContext dc1, *dc = &dc1;
2261 CPUBreakpoint *bp;
2262 uint16_t *gen_opc_end;
2263 int j, lj;
2264 target_ulong pc_start;
2265 target_ulong next_page_start;
2266 int num_insns;
2267 int max_insns;
2269 pc_start = tb->pc;
2271 dc->tb = tb;
2273 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2275 dc->is_jmp = DISAS_NEXT;
2276 dc->pc = pc_start;
2277 dc->singlestep_enabled = cs->singlestep_enabled;
2278 dc->condjmp = 0;
2280 dc->aarch64 = 1;
2281 dc->thumb = 0;
2282 dc->bswap_code = 0;
2283 dc->condexec_mask = 0;
2284 dc->condexec_cond = 0;
2285 #if !defined(CONFIG_USER_ONLY)
2286 dc->user = 0;
2287 #endif
2288 dc->vfp_enabled = 0;
2289 dc->vec_len = 0;
2290 dc->vec_stride = 0;
2292 init_tmp_a64_array(dc);
2294 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2295 lj = -1;
2296 num_insns = 0;
2297 max_insns = tb->cflags & CF_COUNT_MASK;
2298 if (max_insns == 0) {
2299 max_insns = CF_COUNT_MASK;
2302 gen_tb_start();
2304 tcg_clear_temp_count();
2306 do {
2307 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2308 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2309 if (bp->pc == dc->pc) {
2310 gen_exception_insn(dc, 0, EXCP_DEBUG);
2311 /* Advance PC so that clearing the breakpoint will
2312 invalidate this TB. */
2313 dc->pc += 2;
2314 goto done_generating;
2319 if (search_pc) {
2320 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2321 if (lj < j) {
2322 lj++;
2323 while (lj < j) {
2324 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2327 tcg_ctx.gen_opc_pc[lj] = dc->pc;
2328 tcg_ctx.gen_opc_instr_start[lj] = 1;
2329 tcg_ctx.gen_opc_icount[lj] = num_insns;
2332 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2333 gen_io_start();
2336 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2337 tcg_gen_debug_insn_start(dc->pc);
2340 disas_a64_insn(env, dc);
2342 if (tcg_check_temp_count()) {
2343 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
2344 dc->pc);
2347 /* Translation stops when a conditional branch is encountered.
2348 * Otherwise the subsequent code could get translated several times.
2349 * Also stop translation when a page boundary is reached. This
2350 * ensures prefetch aborts occur at the right place.
2352 num_insns++;
2353 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
2354 !cs->singlestep_enabled &&
2355 !singlestep &&
2356 dc->pc < next_page_start &&
2357 num_insns < max_insns);
2359 if (tb->cflags & CF_LAST_IO) {
2360 gen_io_end();
2363 if (unlikely(cs->singlestep_enabled) && dc->is_jmp != DISAS_EXC) {
2364 /* Note that this means single stepping WFI doesn't halt the CPU.
2365 * For conditional branch insns this is harmless unreachable code as
2366 * gen_goto_tb() has already handled emitting the debug exception
2367 * (and thus a tb-jump is not possible when singlestepping).
2369 assert(dc->is_jmp != DISAS_TB_JUMP);
2370 if (dc->is_jmp != DISAS_JUMP) {
2371 gen_a64_set_pc_im(dc->pc);
2373 gen_exception(EXCP_DEBUG);
2374 } else {
2375 switch (dc->is_jmp) {
2376 case DISAS_NEXT:
2377 gen_goto_tb(dc, 1, dc->pc);
2378 break;
2379 default:
2380 case DISAS_JUMP:
2381 case DISAS_UPDATE:
2382 /* indicate that the hash table must be used to find the next TB */
2383 tcg_gen_exit_tb(0);
2384 break;
2385 case DISAS_TB_JUMP:
2386 case DISAS_EXC:
2387 case DISAS_SWI:
2388 break;
2389 case DISAS_WFI:
2390 /* This is a special case because we don't want to just halt the CPU
2391 * if trying to debug across a WFI.
2393 gen_helper_wfi(cpu_env);
2394 break;
2398 done_generating:
2399 gen_tb_end(tb, num_insns);
2400 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2402 #ifdef DEBUG_DISAS
2403 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2404 qemu_log("----------------\n");
2405 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2406 log_target_disas(env, pc_start, dc->pc - pc_start,
2407 dc->thumb | (dc->bswap_code << 1));
2408 qemu_log("\n");
2410 #endif
2411 if (search_pc) {
2412 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2413 lj++;
2414 while (lj <= j) {
2415 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2417 } else {
2418 tb->size = dc->pc - pc_start;
2419 tb->icount = num_insns;