target-arm: A64: add support for ld/st unsigned imm
[qemu.git] / target-arm / translate-a64.c
blob0edcee124c252f740aa71bd6ba6c73325e17cca1
1 /*
2 * AArch64 translation
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
25 #include "cpu.h"
26 #include "tcg-op.h"
27 #include "qemu/log.h"
28 #include "translate.h"
29 #include "qemu/host-utils.h"
31 #include "exec/gen-icount.h"
33 #include "helper.h"
34 #define GEN_HELPER 1
35 #include "helper.h"
37 static TCGv_i64 cpu_X[32];
38 static TCGv_i64 cpu_pc;
39 static TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
41 static const char *regnames[] = {
42 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
43 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
44 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
45 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
48 enum a64_shift_type {
49 A64_SHIFT_TYPE_LSL = 0,
50 A64_SHIFT_TYPE_LSR = 1,
51 A64_SHIFT_TYPE_ASR = 2,
52 A64_SHIFT_TYPE_ROR = 3
55 /* initialize TCG globals. */
56 void a64_translate_init(void)
58 int i;
60 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
61 offsetof(CPUARMState, pc),
62 "pc");
63 for (i = 0; i < 32; i++) {
64 cpu_X[i] = tcg_global_mem_new_i64(TCG_AREG0,
65 offsetof(CPUARMState, xregs[i]),
66 regnames[i]);
69 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
70 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
71 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
72 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
75 void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
76 fprintf_function cpu_fprintf, int flags)
78 ARMCPU *cpu = ARM_CPU(cs);
79 CPUARMState *env = &cpu->env;
80 uint32_t psr = pstate_read(env);
81 int i;
83 cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
84 env->pc, env->xregs[31]);
85 for (i = 0; i < 31; i++) {
86 cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
87 if ((i % 4) == 3) {
88 cpu_fprintf(f, "\n");
89 } else {
90 cpu_fprintf(f, " ");
93 cpu_fprintf(f, "PSTATE=%08x (flags %c%c%c%c)\n",
94 psr,
95 psr & PSTATE_N ? 'N' : '-',
96 psr & PSTATE_Z ? 'Z' : '-',
97 psr & PSTATE_C ? 'C' : '-',
98 psr & PSTATE_V ? 'V' : '-');
99 cpu_fprintf(f, "\n");
102 static int get_mem_index(DisasContext *s)
104 #ifdef CONFIG_USER_ONLY
105 return 1;
106 #else
107 return s->user;
108 #endif
111 void gen_a64_set_pc_im(uint64_t val)
113 tcg_gen_movi_i64(cpu_pc, val);
116 static void gen_exception(int excp)
118 TCGv_i32 tmp = tcg_temp_new_i32();
119 tcg_gen_movi_i32(tmp, excp);
120 gen_helper_exception(cpu_env, tmp);
121 tcg_temp_free_i32(tmp);
124 static void gen_exception_insn(DisasContext *s, int offset, int excp)
126 gen_a64_set_pc_im(s->pc - offset);
127 gen_exception(excp);
128 s->is_jmp = DISAS_EXC;
131 static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
133 /* No direct tb linking with singlestep or deterministic io */
134 if (s->singlestep_enabled || (s->tb->cflags & CF_LAST_IO)) {
135 return false;
138 /* Only link tbs from inside the same guest page */
139 if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
140 return false;
143 return true;
146 static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
148 TranslationBlock *tb;
150 tb = s->tb;
151 if (use_goto_tb(s, n, dest)) {
152 tcg_gen_goto_tb(n);
153 gen_a64_set_pc_im(dest);
154 tcg_gen_exit_tb((tcg_target_long)tb + n);
155 s->is_jmp = DISAS_TB_JUMP;
156 } else {
157 gen_a64_set_pc_im(dest);
158 if (s->singlestep_enabled) {
159 gen_exception(EXCP_DEBUG);
161 tcg_gen_exit_tb(0);
162 s->is_jmp = DISAS_JUMP;
166 static void unallocated_encoding(DisasContext *s)
168 gen_exception_insn(s, 4, EXCP_UDEF);
171 #define unsupported_encoding(s, insn) \
172 do { \
173 qemu_log_mask(LOG_UNIMP, \
174 "%s:%d: unsupported instruction encoding 0x%08x " \
175 "at pc=%016" PRIx64 "\n", \
176 __FILE__, __LINE__, insn, s->pc - 4); \
177 unallocated_encoding(s); \
178 } while (0);
180 static void init_tmp_a64_array(DisasContext *s)
182 #ifdef CONFIG_DEBUG_TCG
183 int i;
184 for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) {
185 TCGV_UNUSED_I64(s->tmp_a64[i]);
187 #endif
188 s->tmp_a64_count = 0;
191 static void free_tmp_a64(DisasContext *s)
193 int i;
194 for (i = 0; i < s->tmp_a64_count; i++) {
195 tcg_temp_free_i64(s->tmp_a64[i]);
197 init_tmp_a64_array(s);
200 static TCGv_i64 new_tmp_a64(DisasContext *s)
202 assert(s->tmp_a64_count < TMP_A64_MAX);
203 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
206 static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
208 TCGv_i64 t = new_tmp_a64(s);
209 tcg_gen_movi_i64(t, 0);
210 return t;
214 * Register access functions
216 * These functions are used for directly accessing a register in where
217 * changes to the final register value are likely to be made. If you
218 * need to use a register for temporary calculation (e.g. index type
219 * operations) use the read_* form.
221 * B1.2.1 Register mappings
223 * In instruction register encoding 31 can refer to ZR (zero register) or
224 * the SP (stack pointer) depending on context. In QEMU's case we map SP
225 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
226 * This is the point of the _sp forms.
228 static TCGv_i64 cpu_reg(DisasContext *s, int reg)
230 if (reg == 31) {
231 return new_tmp_a64_zero(s);
232 } else {
233 return cpu_X[reg];
237 /* register access for when 31 == SP */
238 static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
240 return cpu_X[reg];
243 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
244 * representing the register contents. This TCGv is an auto-freed
245 * temporary so it need not be explicitly freed, and may be modified.
247 static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
249 TCGv_i64 v = new_tmp_a64(s);
250 if (reg != 31) {
251 if (sf) {
252 tcg_gen_mov_i64(v, cpu_X[reg]);
253 } else {
254 tcg_gen_ext32u_i64(v, cpu_X[reg]);
256 } else {
257 tcg_gen_movi_i64(v, 0);
259 return v;
262 static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
264 TCGv_i64 v = new_tmp_a64(s);
265 if (sf) {
266 tcg_gen_mov_i64(v, cpu_X[reg]);
267 } else {
268 tcg_gen_ext32u_i64(v, cpu_X[reg]);
270 return v;
273 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
274 * than the 32 bit equivalent.
276 static inline void gen_set_NZ64(TCGv_i64 result)
278 TCGv_i64 flag = tcg_temp_new_i64();
280 tcg_gen_setcondi_i64(TCG_COND_NE, flag, result, 0);
281 tcg_gen_trunc_i64_i32(cpu_ZF, flag);
282 tcg_gen_shri_i64(flag, result, 32);
283 tcg_gen_trunc_i64_i32(cpu_NF, flag);
284 tcg_temp_free_i64(flag);
287 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
288 static inline void gen_logic_CC(int sf, TCGv_i64 result)
290 if (sf) {
291 gen_set_NZ64(result);
292 } else {
293 tcg_gen_trunc_i64_i32(cpu_ZF, result);
294 tcg_gen_trunc_i64_i32(cpu_NF, result);
296 tcg_gen_movi_i32(cpu_CF, 0);
297 tcg_gen_movi_i32(cpu_VF, 0);
301 * Load/Store generators
305 * Store from GPR register to memory
307 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
308 TCGv_i64 tcg_addr, int size)
310 g_assert(size <= 3);
311 tcg_gen_qemu_st_i64(source, tcg_addr, get_mem_index(s), MO_TE + size);
315 * Load from memory to GPR register
317 static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
318 int size, bool is_signed, bool extend)
320 TCGMemOp memop = MO_TE + size;
322 g_assert(size <= 3);
324 if (is_signed) {
325 memop += MO_SIGN;
328 tcg_gen_qemu_ld_i64(dest, tcg_addr, get_mem_index(s), memop);
330 if (extend && is_signed) {
331 g_assert(size < 3);
332 tcg_gen_ext32u_i64(dest, dest);
337 * Store from FP register to memory
339 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
341 /* This writes the bottom N bits of a 128 bit wide vector to memory */
342 int freg_offs = offsetof(CPUARMState, vfp.regs[srcidx * 2]);
343 TCGv_i64 tmp = tcg_temp_new_i64();
345 if (size < 4) {
346 switch (size) {
347 case 0:
348 tcg_gen_ld8u_i64(tmp, cpu_env, freg_offs);
349 break;
350 case 1:
351 tcg_gen_ld16u_i64(tmp, cpu_env, freg_offs);
352 break;
353 case 2:
354 tcg_gen_ld32u_i64(tmp, cpu_env, freg_offs);
355 break;
356 case 3:
357 tcg_gen_ld_i64(tmp, cpu_env, freg_offs);
358 break;
360 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TE + size);
361 } else {
362 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
363 tcg_gen_ld_i64(tmp, cpu_env, freg_offs);
364 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TEQ);
365 tcg_gen_qemu_st64(tmp, tcg_addr, get_mem_index(s));
366 tcg_gen_ld_i64(tmp, cpu_env, freg_offs + sizeof(float64));
367 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
368 tcg_gen_qemu_st_i64(tmp, tcg_hiaddr, get_mem_index(s), MO_TEQ);
369 tcg_temp_free_i64(tcg_hiaddr);
372 tcg_temp_free_i64(tmp);
376 * Load from memory to FP register
378 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
380 /* This always zero-extends and writes to a full 128 bit wide vector */
381 int freg_offs = offsetof(CPUARMState, vfp.regs[destidx * 2]);
382 TCGv_i64 tmplo = tcg_temp_new_i64();
383 TCGv_i64 tmphi;
385 if (size < 4) {
386 TCGMemOp memop = MO_TE + size;
387 tmphi = tcg_const_i64(0);
388 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
389 } else {
390 TCGv_i64 tcg_hiaddr;
391 tmphi = tcg_temp_new_i64();
392 tcg_hiaddr = tcg_temp_new_i64();
394 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), MO_TEQ);
395 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
396 tcg_gen_qemu_ld_i64(tmphi, tcg_hiaddr, get_mem_index(s), MO_TEQ);
397 tcg_temp_free_i64(tcg_hiaddr);
400 tcg_gen_st_i64(tmplo, cpu_env, freg_offs);
401 tcg_gen_st_i64(tmphi, cpu_env, freg_offs + sizeof(float64));
403 tcg_temp_free_i64(tmplo);
404 tcg_temp_free_i64(tmphi);
407 static inline void gen_check_sp_alignment(DisasContext *s)
409 /* The AArch64 architecture mandates that (if enabled via PSTATE
410 * or SCTLR bits) there is a check that SP is 16-aligned on every
411 * SP-relative load or store (with an exception generated if it is not).
412 * In line with general QEMU practice regarding misaligned accesses,
413 * we omit these checks for the sake of guest program performance.
414 * This function is provided as a hook so we can more easily add these
415 * checks in future (possibly as a "favour catching guest program bugs
416 * over speed" user selectable option).
421 * the instruction disassembly implemented here matches
422 * the instruction encoding classifications in chapter 3 (C3)
423 * of the ARM Architecture Reference Manual (DDI0487A_a)
426 /* C3.2.7 Unconditional branch (immediate)
427 * 31 30 26 25 0
428 * +----+-----------+-------------------------------------+
429 * | op | 0 0 1 0 1 | imm26 |
430 * +----+-----------+-------------------------------------+
432 static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
434 uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
436 if (insn & (1 << 31)) {
437 /* C5.6.26 BL Branch with link */
438 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
441 /* C5.6.20 B Branch / C5.6.26 BL Branch with link */
442 gen_goto_tb(s, 0, addr);
445 /* C3.2.1 Compare & branch (immediate)
446 * 31 30 25 24 23 5 4 0
447 * +----+-------------+----+---------------------+--------+
448 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
449 * +----+-------------+----+---------------------+--------+
451 static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
453 unsigned int sf, op, rt;
454 uint64_t addr;
455 int label_match;
456 TCGv_i64 tcg_cmp;
458 sf = extract32(insn, 31, 1);
459 op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
460 rt = extract32(insn, 0, 5);
461 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
463 tcg_cmp = read_cpu_reg(s, rt, sf);
464 label_match = gen_new_label();
466 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
467 tcg_cmp, 0, label_match);
469 gen_goto_tb(s, 0, s->pc);
470 gen_set_label(label_match);
471 gen_goto_tb(s, 1, addr);
474 /* C3.2.5 Test & branch (immediate)
475 * 31 30 25 24 23 19 18 5 4 0
476 * +----+-------------+----+-------+-------------+------+
477 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
478 * +----+-------------+----+-------+-------------+------+
480 static void disas_test_b_imm(DisasContext *s, uint32_t insn)
482 unsigned int bit_pos, op, rt;
483 uint64_t addr;
484 int label_match;
485 TCGv_i64 tcg_cmp;
487 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
488 op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
489 addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
490 rt = extract32(insn, 0, 5);
492 tcg_cmp = tcg_temp_new_i64();
493 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
494 label_match = gen_new_label();
495 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
496 tcg_cmp, 0, label_match);
497 tcg_temp_free_i64(tcg_cmp);
498 gen_goto_tb(s, 0, s->pc);
499 gen_set_label(label_match);
500 gen_goto_tb(s, 1, addr);
503 /* C3.2.2 / C5.6.19 Conditional branch (immediate)
504 * 31 25 24 23 5 4 3 0
505 * +---------------+----+---------------------+----+------+
506 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
507 * +---------------+----+---------------------+----+------+
509 static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
511 unsigned int cond;
512 uint64_t addr;
514 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
515 unallocated_encoding(s);
516 return;
518 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
519 cond = extract32(insn, 0, 4);
521 if (cond < 0x0e) {
522 /* genuinely conditional branches */
523 int label_match = gen_new_label();
524 arm_gen_test_cc(cond, label_match);
525 gen_goto_tb(s, 0, s->pc);
526 gen_set_label(label_match);
527 gen_goto_tb(s, 1, addr);
528 } else {
529 /* 0xe and 0xf are both "always" conditions */
530 gen_goto_tb(s, 0, addr);
534 /* C5.6.68 HINT */
535 static void handle_hint(DisasContext *s, uint32_t insn,
536 unsigned int op1, unsigned int op2, unsigned int crm)
538 unsigned int selector = crm << 3 | op2;
540 if (op1 != 3) {
541 unallocated_encoding(s);
542 return;
545 switch (selector) {
546 case 0: /* NOP */
547 return;
548 case 1: /* YIELD */
549 case 2: /* WFE */
550 case 3: /* WFI */
551 case 4: /* SEV */
552 case 5: /* SEVL */
553 /* we treat all as NOP at least for now */
554 return;
555 default:
556 /* default specified as NOP equivalent */
557 return;
561 /* CLREX, DSB, DMB, ISB */
562 static void handle_sync(DisasContext *s, uint32_t insn,
563 unsigned int op1, unsigned int op2, unsigned int crm)
565 if (op1 != 3) {
566 unallocated_encoding(s);
567 return;
570 switch (op2) {
571 case 2: /* CLREX */
572 unsupported_encoding(s, insn);
573 return;
574 case 4: /* DSB */
575 case 5: /* DMB */
576 case 6: /* ISB */
577 /* We don't emulate caches so barriers are no-ops */
578 return;
579 default:
580 unallocated_encoding(s);
581 return;
585 /* C5.6.130 MSR (immediate) - move immediate to processor state field */
586 static void handle_msr_i(DisasContext *s, uint32_t insn,
587 unsigned int op1, unsigned int op2, unsigned int crm)
589 unsupported_encoding(s, insn);
592 /* C5.6.204 SYS */
593 static void handle_sys(DisasContext *s, uint32_t insn, unsigned int l,
594 unsigned int op1, unsigned int op2,
595 unsigned int crn, unsigned int crm, unsigned int rt)
597 unsupported_encoding(s, insn);
600 /* C5.6.129 MRS - move from system register */
601 static void handle_mrs(DisasContext *s, uint32_t insn, unsigned int op0,
602 unsigned int op1, unsigned int op2,
603 unsigned int crn, unsigned int crm, unsigned int rt)
605 unsupported_encoding(s, insn);
608 /* C5.6.131 MSR (register) - move to system register */
609 static void handle_msr(DisasContext *s, uint32_t insn, unsigned int op0,
610 unsigned int op1, unsigned int op2,
611 unsigned int crn, unsigned int crm, unsigned int rt)
613 unsupported_encoding(s, insn);
616 /* C3.2.4 System
617 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
618 * +---------------------+---+-----+-----+-------+-------+-----+------+
619 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
620 * +---------------------+---+-----+-----+-------+-------+-----+------+
622 static void disas_system(DisasContext *s, uint32_t insn)
624 unsigned int l, op0, op1, crn, crm, op2, rt;
625 l = extract32(insn, 21, 1);
626 op0 = extract32(insn, 19, 2);
627 op1 = extract32(insn, 16, 3);
628 crn = extract32(insn, 12, 4);
629 crm = extract32(insn, 8, 4);
630 op2 = extract32(insn, 5, 3);
631 rt = extract32(insn, 0, 5);
633 if (op0 == 0) {
634 if (l || rt != 31) {
635 unallocated_encoding(s);
636 return;
638 switch (crn) {
639 case 2: /* C5.6.68 HINT */
640 handle_hint(s, insn, op1, op2, crm);
641 break;
642 case 3: /* CLREX, DSB, DMB, ISB */
643 handle_sync(s, insn, op1, op2, crm);
644 break;
645 case 4: /* C5.6.130 MSR (immediate) */
646 handle_msr_i(s, insn, op1, op2, crm);
647 break;
648 default:
649 unallocated_encoding(s);
650 break;
652 return;
655 if (op0 == 1) {
656 /* C5.6.204 SYS */
657 handle_sys(s, insn, l, op1, op2, crn, crm, rt);
658 } else if (l) { /* op0 > 1 */
659 /* C5.6.129 MRS - move from system register */
660 handle_mrs(s, insn, op0, op1, op2, crn, crm, rt);
661 } else {
662 /* C5.6.131 MSR (register) - move to system register */
663 handle_msr(s, insn, op0, op1, op2, crn, crm, rt);
667 /* Exception generation */
668 static void disas_exc(DisasContext *s, uint32_t insn)
670 unsupported_encoding(s, insn);
673 /* C3.2.7 Unconditional branch (register)
674 * 31 25 24 21 20 16 15 10 9 5 4 0
675 * +---------------+-------+-------+-------+------+-------+
676 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
677 * +---------------+-------+-------+-------+------+-------+
679 static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
681 unsigned int opc, op2, op3, rn, op4;
683 opc = extract32(insn, 21, 4);
684 op2 = extract32(insn, 16, 5);
685 op3 = extract32(insn, 10, 6);
686 rn = extract32(insn, 5, 5);
687 op4 = extract32(insn, 0, 5);
689 if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
690 unallocated_encoding(s);
691 return;
694 switch (opc) {
695 case 0: /* BR */
696 case 2: /* RET */
697 break;
698 case 1: /* BLR */
699 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
700 break;
701 case 4: /* ERET */
702 case 5: /* DRPS */
703 if (rn != 0x1f) {
704 unallocated_encoding(s);
705 } else {
706 unsupported_encoding(s, insn);
708 return;
709 default:
710 unallocated_encoding(s);
711 return;
714 tcg_gen_mov_i64(cpu_pc, cpu_reg(s, rn));
715 s->is_jmp = DISAS_JUMP;
718 /* C3.2 Branches, exception generating and system instructions */
719 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
721 switch (extract32(insn, 25, 7)) {
722 case 0x0a: case 0x0b:
723 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
724 disas_uncond_b_imm(s, insn);
725 break;
726 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
727 disas_comp_b_imm(s, insn);
728 break;
729 case 0x1b: case 0x5b: /* Test & branch (immediate) */
730 disas_test_b_imm(s, insn);
731 break;
732 case 0x2a: /* Conditional branch (immediate) */
733 disas_cond_b_imm(s, insn);
734 break;
735 case 0x6a: /* Exception generation / System */
736 if (insn & (1 << 24)) {
737 disas_system(s, insn);
738 } else {
739 disas_exc(s, insn);
741 break;
742 case 0x6b: /* Unconditional branch (register) */
743 disas_uncond_b_reg(s, insn);
744 break;
745 default:
746 unallocated_encoding(s);
747 break;
751 /* Load/store exclusive */
752 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
754 unsupported_encoding(s, insn);
757 /* Load register (literal) */
758 static void disas_ld_lit(DisasContext *s, uint32_t insn)
760 unsupported_encoding(s, insn);
764 * C5.6.80 LDNP (Load Pair - non-temporal hint)
765 * C5.6.81 LDP (Load Pair - non vector)
766 * C5.6.82 LDPSW (Load Pair Signed Word - non vector)
767 * C5.6.176 STNP (Store Pair - non-temporal hint)
768 * C5.6.177 STP (Store Pair - non vector)
769 * C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint)
770 * C6.3.165 LDP (Load Pair of SIMD&FP)
771 * C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint)
772 * C6.3.284 STP (Store Pair of SIMD&FP)
774 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
775 * +-----+-------+---+---+-------+---+-----------------------------+
776 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
777 * +-----+-------+---+---+-------+---+-------+-------+------+------+
779 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
780 * LDPSW 01
781 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
782 * V: 0 -> GPR, 1 -> Vector
783 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
784 * 10 -> signed offset, 11 -> pre-index
785 * L: 0 -> Store 1 -> Load
787 * Rt, Rt2 = GPR or SIMD registers to be stored
788 * Rn = general purpose register containing address
789 * imm7 = signed offset (multiple of 4 or 8 depending on size)
791 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
793 int rt = extract32(insn, 0, 5);
794 int rn = extract32(insn, 5, 5);
795 int rt2 = extract32(insn, 10, 5);
796 int64_t offset = sextract32(insn, 15, 7);
797 int index = extract32(insn, 23, 2);
798 bool is_vector = extract32(insn, 26, 1);
799 bool is_load = extract32(insn, 22, 1);
800 int opc = extract32(insn, 30, 2);
802 bool is_signed = false;
803 bool postindex = false;
804 bool wback = false;
806 TCGv_i64 tcg_addr; /* calculated address */
807 int size;
809 if (opc == 3) {
810 unallocated_encoding(s);
811 return;
814 if (is_vector) {
815 size = 2 + opc;
816 } else {
817 size = 2 + extract32(opc, 1, 1);
818 is_signed = extract32(opc, 0, 1);
819 if (!is_load && is_signed) {
820 unallocated_encoding(s);
821 return;
825 switch (index) {
826 case 1: /* post-index */
827 postindex = true;
828 wback = true;
829 break;
830 case 0:
831 /* signed offset with "non-temporal" hint. Since we don't emulate
832 * caches we don't care about hints to the cache system about
833 * data access patterns, and handle this identically to plain
834 * signed offset.
836 if (is_signed) {
837 /* There is no non-temporal-hint version of LDPSW */
838 unallocated_encoding(s);
839 return;
841 postindex = false;
842 break;
843 case 2: /* signed offset, rn not updated */
844 postindex = false;
845 break;
846 case 3: /* pre-index */
847 postindex = false;
848 wback = true;
849 break;
852 offset <<= size;
854 if (rn == 31) {
855 gen_check_sp_alignment(s);
858 tcg_addr = read_cpu_reg_sp(s, rn, 1);
860 if (!postindex) {
861 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
864 if (is_vector) {
865 if (is_load) {
866 do_fp_ld(s, rt, tcg_addr, size);
867 } else {
868 do_fp_st(s, rt, tcg_addr, size);
870 } else {
871 TCGv_i64 tcg_rt = cpu_reg(s, rt);
872 if (is_load) {
873 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false);
874 } else {
875 do_gpr_st(s, tcg_rt, tcg_addr, size);
878 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
879 if (is_vector) {
880 if (is_load) {
881 do_fp_ld(s, rt2, tcg_addr, size);
882 } else {
883 do_fp_st(s, rt2, tcg_addr, size);
885 } else {
886 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
887 if (is_load) {
888 do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false);
889 } else {
890 do_gpr_st(s, tcg_rt2, tcg_addr, size);
894 if (wback) {
895 if (postindex) {
896 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
897 } else {
898 tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
900 tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
905 * C3.3.13 Load/store (unsigned immediate)
907 * 31 30 29 27 26 25 24 23 22 21 10 9 5
908 * +----+-------+---+-----+-----+------------+-------+------+
909 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
910 * +----+-------+---+-----+-----+------------+-------+------+
912 * For non-vector:
913 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
914 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
915 * For vector:
916 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
917 * opc<0>: 0 -> store, 1 -> load
918 * Rn: base address register (inc SP)
919 * Rt: target register
921 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
923 int rt = extract32(insn, 0, 5);
924 int rn = extract32(insn, 5, 5);
925 unsigned int imm12 = extract32(insn, 10, 12);
926 bool is_vector = extract32(insn, 26, 1);
927 int size = extract32(insn, 30, 2);
928 int opc = extract32(insn, 22, 2);
929 unsigned int offset;
931 TCGv_i64 tcg_addr;
933 bool is_store;
934 bool is_signed = false;
935 bool is_extended = false;
937 if (is_vector) {
938 size |= (opc & 2) << 1;
939 if (size > 4) {
940 unallocated_encoding(s);
941 return;
943 is_store = !extract32(opc, 0, 1);
944 } else {
945 if (size == 3 && opc == 2) {
946 /* PRFM - prefetch */
947 return;
949 if (opc == 3 && size > 1) {
950 unallocated_encoding(s);
951 return;
953 is_store = (opc == 0);
954 is_signed = extract32(opc, 1, 1);
955 is_extended = (size < 3) && extract32(opc, 0, 1);
958 if (rn == 31) {
959 gen_check_sp_alignment(s);
961 tcg_addr = read_cpu_reg_sp(s, rn, 1);
962 offset = imm12 << size;
963 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
965 if (is_vector) {
966 if (is_store) {
967 do_fp_st(s, rt, tcg_addr, size);
968 } else {
969 do_fp_ld(s, rt, tcg_addr, size);
971 } else {
972 TCGv_i64 tcg_rt = cpu_reg(s, rt);
973 if (is_store) {
974 do_gpr_st(s, tcg_rt, tcg_addr, size);
975 } else {
976 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
981 /* Load/store register (all forms) */
982 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
984 switch (extract32(insn, 24, 2)) {
985 case 0:
986 unsupported_encoding(s, insn);
987 break;
988 case 1:
989 disas_ldst_reg_unsigned_imm(s, insn);
990 break;
991 default:
992 unallocated_encoding(s);
993 break;
997 /* AdvSIMD load/store multiple structures */
998 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
1000 unsupported_encoding(s, insn);
1003 /* AdvSIMD load/store single structure */
1004 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
1006 unsupported_encoding(s, insn);
1009 /* C3.3 Loads and stores */
1010 static void disas_ldst(DisasContext *s, uint32_t insn)
1012 switch (extract32(insn, 24, 6)) {
1013 case 0x08: /* Load/store exclusive */
1014 disas_ldst_excl(s, insn);
1015 break;
1016 case 0x18: case 0x1c: /* Load register (literal) */
1017 disas_ld_lit(s, insn);
1018 break;
1019 case 0x28: case 0x29:
1020 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
1021 disas_ldst_pair(s, insn);
1022 break;
1023 case 0x38: case 0x39:
1024 case 0x3c: case 0x3d: /* Load/store register (all forms) */
1025 disas_ldst_reg(s, insn);
1026 break;
1027 case 0x0c: /* AdvSIMD load/store multiple structures */
1028 disas_ldst_multiple_struct(s, insn);
1029 break;
1030 case 0x0d: /* AdvSIMD load/store single structure */
1031 disas_ldst_single_struct(s, insn);
1032 break;
1033 default:
1034 unallocated_encoding(s);
1035 break;
1039 /* C3.4.6 PC-rel. addressing
1040 * 31 30 29 28 24 23 5 4 0
1041 * +----+-------+-----------+-------------------+------+
1042 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
1043 * +----+-------+-----------+-------------------+------+
1045 static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
1047 unsigned int page, rd;
1048 uint64_t base;
1049 int64_t offset;
1051 page = extract32(insn, 31, 1);
1052 /* SignExtend(immhi:immlo) -> offset */
1053 offset = ((int64_t)sextract32(insn, 5, 19) << 2) | extract32(insn, 29, 2);
1054 rd = extract32(insn, 0, 5);
1055 base = s->pc - 4;
1057 if (page) {
1058 /* ADRP (page based) */
1059 base &= ~0xfff;
1060 offset <<= 12;
1063 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
1066 /* Add/subtract (immediate) */
1067 static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
1069 unsupported_encoding(s, insn);
1072 /* The input should be a value in the bottom e bits (with higher
1073 * bits zero); returns that value replicated into every element
1074 * of size e in a 64 bit integer.
1076 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
1078 assert(e != 0);
1079 while (e < 64) {
1080 mask |= mask << e;
1081 e *= 2;
1083 return mask;
1086 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
1087 static inline uint64_t bitmask64(unsigned int length)
1089 assert(length > 0 && length <= 64);
1090 return ~0ULL >> (64 - length);
1093 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
1094 * only require the wmask. Returns false if the imms/immr/immn are a reserved
1095 * value (ie should cause a guest UNDEF exception), and true if they are
1096 * valid, in which case the decoded bit pattern is written to result.
1098 static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
1099 unsigned int imms, unsigned int immr)
1101 uint64_t mask;
1102 unsigned e, levels, s, r;
1103 int len;
1105 assert(immn < 2 && imms < 64 && immr < 64);
1107 /* The bit patterns we create here are 64 bit patterns which
1108 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
1109 * 64 bits each. Each element contains the same value: a run
1110 * of between 1 and e-1 non-zero bits, rotated within the
1111 * element by between 0 and e-1 bits.
1113 * The element size and run length are encoded into immn (1 bit)
1114 * and imms (6 bits) as follows:
1115 * 64 bit elements: immn = 1, imms = <length of run - 1>
1116 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
1117 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
1118 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
1119 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
1120 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
1121 * Notice that immn = 0, imms = 11111x is the only combination
1122 * not covered by one of the above options; this is reserved.
1123 * Further, <length of run - 1> all-ones is a reserved pattern.
1125 * In all cases the rotation is by immr % e (and immr is 6 bits).
1128 /* First determine the element size */
1129 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
1130 if (len < 1) {
1131 /* This is the immn == 0, imms == 0x11111x case */
1132 return false;
1134 e = 1 << len;
1136 levels = e - 1;
1137 s = imms & levels;
1138 r = immr & levels;
1140 if (s == levels) {
1141 /* <length of run - 1> mustn't be all-ones. */
1142 return false;
1145 /* Create the value of one element: s+1 set bits rotated
1146 * by r within the element (which is e bits wide)...
1148 mask = bitmask64(s + 1);
1149 mask = (mask >> r) | (mask << (e - r));
1150 /* ...then replicate the element over the whole 64 bit value */
1151 mask = bitfield_replicate(mask, e);
1152 *result = mask;
1153 return true;
1156 /* C3.4.4 Logical (immediate)
1157 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
1158 * +----+-----+-------------+---+------+------+------+------+
1159 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
1160 * +----+-----+-------------+---+------+------+------+------+
1162 static void disas_logic_imm(DisasContext *s, uint32_t insn)
1164 unsigned int sf, opc, is_n, immr, imms, rn, rd;
1165 TCGv_i64 tcg_rd, tcg_rn;
1166 uint64_t wmask;
1167 bool is_and = false;
1169 sf = extract32(insn, 31, 1);
1170 opc = extract32(insn, 29, 2);
1171 is_n = extract32(insn, 22, 1);
1172 immr = extract32(insn, 16, 6);
1173 imms = extract32(insn, 10, 6);
1174 rn = extract32(insn, 5, 5);
1175 rd = extract32(insn, 0, 5);
1177 if (!sf && is_n) {
1178 unallocated_encoding(s);
1179 return;
1182 if (opc == 0x3) { /* ANDS */
1183 tcg_rd = cpu_reg(s, rd);
1184 } else {
1185 tcg_rd = cpu_reg_sp(s, rd);
1187 tcg_rn = cpu_reg(s, rn);
1189 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
1190 /* some immediate field values are reserved */
1191 unallocated_encoding(s);
1192 return;
1195 if (!sf) {
1196 wmask &= 0xffffffff;
1199 switch (opc) {
1200 case 0x3: /* ANDS */
1201 case 0x0: /* AND */
1202 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
1203 is_and = true;
1204 break;
1205 case 0x1: /* ORR */
1206 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
1207 break;
1208 case 0x2: /* EOR */
1209 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
1210 break;
1211 default:
1212 assert(FALSE); /* must handle all above */
1213 break;
1216 if (!sf && !is_and) {
1217 /* zero extend final result; we know we can skip this for AND
1218 * since the immediate had the high 32 bits clear.
1220 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1223 if (opc == 3) { /* ANDS */
1224 gen_logic_CC(sf, tcg_rd);
1228 /* Move wide (immediate) */
1229 static void disas_movw_imm(DisasContext *s, uint32_t insn)
1231 unsupported_encoding(s, insn);
1234 /* C3.4.2 Bitfield
1235 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
1236 * +----+-----+-------------+---+------+------+------+------+
1237 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
1238 * +----+-----+-------------+---+------+------+------+------+
1240 static void disas_bitfield(DisasContext *s, uint32_t insn)
1242 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
1243 TCGv_i64 tcg_rd, tcg_tmp;
1245 sf = extract32(insn, 31, 1);
1246 opc = extract32(insn, 29, 2);
1247 n = extract32(insn, 22, 1);
1248 ri = extract32(insn, 16, 6);
1249 si = extract32(insn, 10, 6);
1250 rn = extract32(insn, 5, 5);
1251 rd = extract32(insn, 0, 5);
1252 bitsize = sf ? 64 : 32;
1254 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
1255 unallocated_encoding(s);
1256 return;
1259 tcg_rd = cpu_reg(s, rd);
1260 tcg_tmp = read_cpu_reg(s, rn, sf);
1262 /* OPTME: probably worth recognizing common cases of ext{8,16,32}{u,s} */
1264 if (opc != 1) { /* SBFM or UBFM */
1265 tcg_gen_movi_i64(tcg_rd, 0);
1268 /* do the bit move operation */
1269 if (si >= ri) {
1270 /* Wd<s-r:0> = Wn<s:r> */
1271 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
1272 pos = 0;
1273 len = (si - ri) + 1;
1274 } else {
1275 /* Wd<32+s-r,32-r> = Wn<s:0> */
1276 pos = bitsize - ri;
1277 len = si + 1;
1280 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
1282 if (opc == 0) { /* SBFM - sign extend the destination field */
1283 tcg_gen_shli_i64(tcg_rd, tcg_rd, 64 - (pos + len));
1284 tcg_gen_sari_i64(tcg_rd, tcg_rd, 64 - (pos + len));
1287 if (!sf) { /* zero extend final result */
1288 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1292 /* C3.4.3 Extract
1293 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
1294 * +----+------+-------------+---+----+------+--------+------+------+
1295 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
1296 * +----+------+-------------+---+----+------+--------+------+------+
1298 static void disas_extract(DisasContext *s, uint32_t insn)
1300 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
1302 sf = extract32(insn, 31, 1);
1303 n = extract32(insn, 22, 1);
1304 rm = extract32(insn, 16, 5);
1305 imm = extract32(insn, 10, 6);
1306 rn = extract32(insn, 5, 5);
1307 rd = extract32(insn, 0, 5);
1308 op21 = extract32(insn, 29, 2);
1309 op0 = extract32(insn, 21, 1);
1310 bitsize = sf ? 64 : 32;
1312 if (sf != n || op21 || op0 || imm >= bitsize) {
1313 unallocated_encoding(s);
1314 } else {
1315 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
1317 tcg_rd = cpu_reg(s, rd);
1319 if (imm) {
1320 /* OPTME: we can special case rm==rn as a rotate */
1321 tcg_rm = read_cpu_reg(s, rm, sf);
1322 tcg_rn = read_cpu_reg(s, rn, sf);
1323 tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
1324 tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
1325 tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
1326 if (!sf) {
1327 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1329 } else {
1330 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
1331 * so an extract from bit 0 is a special case.
1333 if (sf) {
1334 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
1335 } else {
1336 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
1343 /* C3.4 Data processing - immediate */
1344 static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
1346 switch (extract32(insn, 23, 6)) {
1347 case 0x20: case 0x21: /* PC-rel. addressing */
1348 disas_pc_rel_adr(s, insn);
1349 break;
1350 case 0x22: case 0x23: /* Add/subtract (immediate) */
1351 disas_add_sub_imm(s, insn);
1352 break;
1353 case 0x24: /* Logical (immediate) */
1354 disas_logic_imm(s, insn);
1355 break;
1356 case 0x25: /* Move wide (immediate) */
1357 disas_movw_imm(s, insn);
1358 break;
1359 case 0x26: /* Bitfield */
1360 disas_bitfield(s, insn);
1361 break;
1362 case 0x27: /* Extract */
1363 disas_extract(s, insn);
1364 break;
1365 default:
1366 unallocated_encoding(s);
1367 break;
1371 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
1372 * Note that it is the caller's responsibility to ensure that the
1373 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
1374 * mandated semantics for out of range shifts.
1376 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
1377 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
1379 switch (shift_type) {
1380 case A64_SHIFT_TYPE_LSL:
1381 tcg_gen_shl_i64(dst, src, shift_amount);
1382 break;
1383 case A64_SHIFT_TYPE_LSR:
1384 tcg_gen_shr_i64(dst, src, shift_amount);
1385 break;
1386 case A64_SHIFT_TYPE_ASR:
1387 if (!sf) {
1388 tcg_gen_ext32s_i64(dst, src);
1390 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
1391 break;
1392 case A64_SHIFT_TYPE_ROR:
1393 if (sf) {
1394 tcg_gen_rotr_i64(dst, src, shift_amount);
1395 } else {
1396 TCGv_i32 t0, t1;
1397 t0 = tcg_temp_new_i32();
1398 t1 = tcg_temp_new_i32();
1399 tcg_gen_trunc_i64_i32(t0, src);
1400 tcg_gen_trunc_i64_i32(t1, shift_amount);
1401 tcg_gen_rotr_i32(t0, t0, t1);
1402 tcg_gen_extu_i32_i64(dst, t0);
1403 tcg_temp_free_i32(t0);
1404 tcg_temp_free_i32(t1);
1406 break;
1407 default:
1408 assert(FALSE); /* all shift types should be handled */
1409 break;
1412 if (!sf) { /* zero extend final result */
1413 tcg_gen_ext32u_i64(dst, dst);
1417 /* Shift a TCGv src by immediate, put result in dst.
1418 * The shift amount must be in range (this should always be true as the
1419 * relevant instructions will UNDEF on bad shift immediates).
1421 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
1422 enum a64_shift_type shift_type, unsigned int shift_i)
1424 assert(shift_i < (sf ? 64 : 32));
1426 if (shift_i == 0) {
1427 tcg_gen_mov_i64(dst, src);
1428 } else {
1429 TCGv_i64 shift_const;
1431 shift_const = tcg_const_i64(shift_i);
1432 shift_reg(dst, src, sf, shift_type, shift_const);
1433 tcg_temp_free_i64(shift_const);
1437 /* C3.5.10 Logical (shifted register)
1438 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
1439 * +----+-----+-----------+-------+---+------+--------+------+------+
1440 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
1441 * +----+-----+-----------+-------+---+------+--------+------+------+
1443 static void disas_logic_reg(DisasContext *s, uint32_t insn)
1445 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
1446 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
1448 sf = extract32(insn, 31, 1);
1449 opc = extract32(insn, 29, 2);
1450 shift_type = extract32(insn, 22, 2);
1451 invert = extract32(insn, 21, 1);
1452 rm = extract32(insn, 16, 5);
1453 shift_amount = extract32(insn, 10, 6);
1454 rn = extract32(insn, 5, 5);
1455 rd = extract32(insn, 0, 5);
1457 if (!sf && (shift_amount & (1 << 5))) {
1458 unallocated_encoding(s);
1459 return;
1462 tcg_rd = cpu_reg(s, rd);
1464 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
1465 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
1466 * register-register MOV and MVN, so it is worth special casing.
1468 tcg_rm = cpu_reg(s, rm);
1469 if (invert) {
1470 tcg_gen_not_i64(tcg_rd, tcg_rm);
1471 if (!sf) {
1472 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1474 } else {
1475 if (sf) {
1476 tcg_gen_mov_i64(tcg_rd, tcg_rm);
1477 } else {
1478 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
1481 return;
1484 tcg_rm = read_cpu_reg(s, rm, sf);
1486 if (shift_amount) {
1487 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
1490 tcg_rn = cpu_reg(s, rn);
1492 switch (opc | (invert << 2)) {
1493 case 0: /* AND */
1494 case 3: /* ANDS */
1495 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
1496 break;
1497 case 1: /* ORR */
1498 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
1499 break;
1500 case 2: /* EOR */
1501 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
1502 break;
1503 case 4: /* BIC */
1504 case 7: /* BICS */
1505 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
1506 break;
1507 case 5: /* ORN */
1508 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
1509 break;
1510 case 6: /* EON */
1511 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
1512 break;
1513 default:
1514 assert(FALSE);
1515 break;
1518 if (!sf) {
1519 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1522 if (opc == 3) {
1523 gen_logic_CC(sf, tcg_rd);
1527 /* Add/subtract (extended register) */
1528 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
1530 unsupported_encoding(s, insn);
1533 /* Add/subtract (shifted register) */
1534 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
1536 unsupported_encoding(s, insn);
1539 /* Data-processing (3 source) */
1540 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
1542 unsupported_encoding(s, insn);
1545 /* Add/subtract (with carry) */
1546 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
1548 unsupported_encoding(s, insn);
1551 /* Conditional compare (immediate) */
1552 static void disas_cc_imm(DisasContext *s, uint32_t insn)
1554 unsupported_encoding(s, insn);
1557 /* Conditional compare (register) */
1558 static void disas_cc_reg(DisasContext *s, uint32_t insn)
1560 unsupported_encoding(s, insn);
1563 /* C3.5.6 Conditional select
1564 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
1565 * +----+----+---+-----------------+------+------+-----+------+------+
1566 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
1567 * +----+----+---+-----------------+------+------+-----+------+------+
1569 static void disas_cond_select(DisasContext *s, uint32_t insn)
1571 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
1572 TCGv_i64 tcg_rd, tcg_src;
1574 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
1575 /* S == 1 or op2<1> == 1 */
1576 unallocated_encoding(s);
1577 return;
1579 sf = extract32(insn, 31, 1);
1580 else_inv = extract32(insn, 30, 1);
1581 rm = extract32(insn, 16, 5);
1582 cond = extract32(insn, 12, 4);
1583 else_inc = extract32(insn, 10, 1);
1584 rn = extract32(insn, 5, 5);
1585 rd = extract32(insn, 0, 5);
1587 if (rd == 31) {
1588 /* silly no-op write; until we use movcond we must special-case
1589 * this to avoid a dead temporary across basic blocks.
1591 return;
1594 tcg_rd = cpu_reg(s, rd);
1596 if (cond >= 0x0e) { /* condition "always" */
1597 tcg_src = read_cpu_reg(s, rn, sf);
1598 tcg_gen_mov_i64(tcg_rd, tcg_src);
1599 } else {
1600 /* OPTME: we could use movcond here, at the cost of duplicating
1601 * a lot of the arm_gen_test_cc() logic.
1603 int label_match = gen_new_label();
1604 int label_continue = gen_new_label();
1606 arm_gen_test_cc(cond, label_match);
1607 /* nomatch: */
1608 tcg_src = cpu_reg(s, rm);
1610 if (else_inv && else_inc) {
1611 tcg_gen_neg_i64(tcg_rd, tcg_src);
1612 } else if (else_inv) {
1613 tcg_gen_not_i64(tcg_rd, tcg_src);
1614 } else if (else_inc) {
1615 tcg_gen_addi_i64(tcg_rd, tcg_src, 1);
1616 } else {
1617 tcg_gen_mov_i64(tcg_rd, tcg_src);
1619 if (!sf) {
1620 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1622 tcg_gen_br(label_continue);
1623 /* match: */
1624 gen_set_label(label_match);
1625 tcg_src = read_cpu_reg(s, rn, sf);
1626 tcg_gen_mov_i64(tcg_rd, tcg_src);
1627 /* continue: */
1628 gen_set_label(label_continue);
1632 static void handle_clz(DisasContext *s, unsigned int sf,
1633 unsigned int rn, unsigned int rd)
1635 TCGv_i64 tcg_rd, tcg_rn;
1636 tcg_rd = cpu_reg(s, rd);
1637 tcg_rn = cpu_reg(s, rn);
1639 if (sf) {
1640 gen_helper_clz64(tcg_rd, tcg_rn);
1641 } else {
1642 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
1643 tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
1644 gen_helper_clz(tcg_tmp32, tcg_tmp32);
1645 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
1646 tcg_temp_free_i32(tcg_tmp32);
1650 static void handle_cls(DisasContext *s, unsigned int sf,
1651 unsigned int rn, unsigned int rd)
1653 TCGv_i64 tcg_rd, tcg_rn;
1654 tcg_rd = cpu_reg(s, rd);
1655 tcg_rn = cpu_reg(s, rn);
1657 if (sf) {
1658 gen_helper_cls64(tcg_rd, tcg_rn);
1659 } else {
1660 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
1661 tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
1662 gen_helper_cls32(tcg_tmp32, tcg_tmp32);
1663 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
1664 tcg_temp_free_i32(tcg_tmp32);
1668 static void handle_rbit(DisasContext *s, unsigned int sf,
1669 unsigned int rn, unsigned int rd)
1671 TCGv_i64 tcg_rd, tcg_rn;
1672 tcg_rd = cpu_reg(s, rd);
1673 tcg_rn = cpu_reg(s, rn);
1675 if (sf) {
1676 gen_helper_rbit64(tcg_rd, tcg_rn);
1677 } else {
1678 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
1679 tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
1680 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
1681 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
1682 tcg_temp_free_i32(tcg_tmp32);
1686 /* C5.6.149 REV with sf==1, opcode==3 ("REV64") */
1687 static void handle_rev64(DisasContext *s, unsigned int sf,
1688 unsigned int rn, unsigned int rd)
1690 if (!sf) {
1691 unallocated_encoding(s);
1692 return;
1694 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
1697 /* C5.6.149 REV with sf==0, opcode==2
1698 * C5.6.151 REV32 (sf==1, opcode==2)
1700 static void handle_rev32(DisasContext *s, unsigned int sf,
1701 unsigned int rn, unsigned int rd)
1703 TCGv_i64 tcg_rd = cpu_reg(s, rd);
1705 if (sf) {
1706 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1707 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
1709 /* bswap32_i64 requires zero high word */
1710 tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
1711 tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
1712 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
1713 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
1714 tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
1716 tcg_temp_free_i64(tcg_tmp);
1717 } else {
1718 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
1719 tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
1723 /* C5.6.150 REV16 (opcode==1) */
1724 static void handle_rev16(DisasContext *s, unsigned int sf,
1725 unsigned int rn, unsigned int rd)
1727 TCGv_i64 tcg_rd = cpu_reg(s, rd);
1728 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1729 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
1731 tcg_gen_andi_i64(tcg_tmp, tcg_rn, 0xffff);
1732 tcg_gen_bswap16_i64(tcg_rd, tcg_tmp);
1734 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 16);
1735 tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
1736 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
1737 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 16, 16);
1739 if (sf) {
1740 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
1741 tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
1742 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
1743 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 32, 16);
1745 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 48);
1746 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
1747 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 48, 16);
1750 tcg_temp_free_i64(tcg_tmp);
1753 /* C3.5.7 Data-processing (1 source)
1754 * 31 30 29 28 21 20 16 15 10 9 5 4 0
1755 * +----+---+---+-----------------+---------+--------+------+------+
1756 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
1757 * +----+---+---+-----------------+---------+--------+------+------+
1759 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
1761 unsigned int sf, opcode, rn, rd;
1763 if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
1764 unallocated_encoding(s);
1765 return;
1768 sf = extract32(insn, 31, 1);
1769 opcode = extract32(insn, 10, 6);
1770 rn = extract32(insn, 5, 5);
1771 rd = extract32(insn, 0, 5);
1773 switch (opcode) {
1774 case 0: /* RBIT */
1775 handle_rbit(s, sf, rn, rd);
1776 break;
1777 case 1: /* REV16 */
1778 handle_rev16(s, sf, rn, rd);
1779 break;
1780 case 2: /* REV32 */
1781 handle_rev32(s, sf, rn, rd);
1782 break;
1783 case 3: /* REV64 */
1784 handle_rev64(s, sf, rn, rd);
1785 break;
1786 case 4: /* CLZ */
1787 handle_clz(s, sf, rn, rd);
1788 break;
1789 case 5: /* CLS */
1790 handle_cls(s, sf, rn, rd);
1791 break;
1795 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
1796 unsigned int rm, unsigned int rn, unsigned int rd)
1798 TCGv_i64 tcg_n, tcg_m, tcg_rd;
1799 tcg_rd = cpu_reg(s, rd);
1801 if (!sf && is_signed) {
1802 tcg_n = new_tmp_a64(s);
1803 tcg_m = new_tmp_a64(s);
1804 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
1805 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
1806 } else {
1807 tcg_n = read_cpu_reg(s, rn, sf);
1808 tcg_m = read_cpu_reg(s, rm, sf);
1811 if (is_signed) {
1812 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
1813 } else {
1814 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
1817 if (!sf) { /* zero extend final result */
1818 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1822 /* C5.6.115 LSLV, C5.6.118 LSRV, C5.6.17 ASRV, C5.6.154 RORV */
1823 static void handle_shift_reg(DisasContext *s,
1824 enum a64_shift_type shift_type, unsigned int sf,
1825 unsigned int rm, unsigned int rn, unsigned int rd)
1827 TCGv_i64 tcg_shift = tcg_temp_new_i64();
1828 TCGv_i64 tcg_rd = cpu_reg(s, rd);
1829 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
1831 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
1832 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
1833 tcg_temp_free_i64(tcg_shift);
1836 /* C3.5.8 Data-processing (2 source)
1837 * 31 30 29 28 21 20 16 15 10 9 5 4 0
1838 * +----+---+---+-----------------+------+--------+------+------+
1839 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
1840 * +----+---+---+-----------------+------+--------+------+------+
1842 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
1844 unsigned int sf, rm, opcode, rn, rd;
1845 sf = extract32(insn, 31, 1);
1846 rm = extract32(insn, 16, 5);
1847 opcode = extract32(insn, 10, 6);
1848 rn = extract32(insn, 5, 5);
1849 rd = extract32(insn, 0, 5);
1851 if (extract32(insn, 29, 1)) {
1852 unallocated_encoding(s);
1853 return;
1856 switch (opcode) {
1857 case 2: /* UDIV */
1858 handle_div(s, false, sf, rm, rn, rd);
1859 break;
1860 case 3: /* SDIV */
1861 handle_div(s, true, sf, rm, rn, rd);
1862 break;
1863 case 8: /* LSLV */
1864 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
1865 break;
1866 case 9: /* LSRV */
1867 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
1868 break;
1869 case 10: /* ASRV */
1870 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
1871 break;
1872 case 11: /* RORV */
1873 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
1874 break;
1875 case 16:
1876 case 17:
1877 case 18:
1878 case 19:
1879 case 20:
1880 case 21:
1881 case 22:
1882 case 23: /* CRC32 */
1883 unsupported_encoding(s, insn);
1884 break;
1885 default:
1886 unallocated_encoding(s);
1887 break;
1891 /* C3.5 Data processing - register */
1892 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
1894 switch (extract32(insn, 24, 5)) {
1895 case 0x0a: /* Logical (shifted register) */
1896 disas_logic_reg(s, insn);
1897 break;
1898 case 0x0b: /* Add/subtract */
1899 if (insn & (1 << 21)) { /* (extended register) */
1900 disas_add_sub_ext_reg(s, insn);
1901 } else {
1902 disas_add_sub_reg(s, insn);
1904 break;
1905 case 0x1b: /* Data-processing (3 source) */
1906 disas_data_proc_3src(s, insn);
1907 break;
1908 case 0x1a:
1909 switch (extract32(insn, 21, 3)) {
1910 case 0x0: /* Add/subtract (with carry) */
1911 disas_adc_sbc(s, insn);
1912 break;
1913 case 0x2: /* Conditional compare */
1914 if (insn & (1 << 11)) { /* (immediate) */
1915 disas_cc_imm(s, insn);
1916 } else { /* (register) */
1917 disas_cc_reg(s, insn);
1919 break;
1920 case 0x4: /* Conditional select */
1921 disas_cond_select(s, insn);
1922 break;
1923 case 0x6: /* Data-processing */
1924 if (insn & (1 << 30)) { /* (1 source) */
1925 disas_data_proc_1src(s, insn);
1926 } else { /* (2 source) */
1927 disas_data_proc_2src(s, insn);
1929 break;
1930 default:
1931 unallocated_encoding(s);
1932 break;
1934 break;
1935 default:
1936 unallocated_encoding(s);
1937 break;
1941 /* C3.6 Data processing - SIMD and floating point */
1942 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
1944 unsupported_encoding(s, insn);
1947 /* C3.1 A64 instruction index by encoding */
1948 static void disas_a64_insn(CPUARMState *env, DisasContext *s)
1950 uint32_t insn;
1952 insn = arm_ldl_code(env, s->pc, s->bswap_code);
1953 s->insn = insn;
1954 s->pc += 4;
1956 switch (extract32(insn, 25, 4)) {
1957 case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
1958 unallocated_encoding(s);
1959 break;
1960 case 0x8: case 0x9: /* Data processing - immediate */
1961 disas_data_proc_imm(s, insn);
1962 break;
1963 case 0xa: case 0xb: /* Branch, exception generation and system insns */
1964 disas_b_exc_sys(s, insn);
1965 break;
1966 case 0x4:
1967 case 0x6:
1968 case 0xc:
1969 case 0xe: /* Loads and stores */
1970 disas_ldst(s, insn);
1971 break;
1972 case 0x5:
1973 case 0xd: /* Data processing - register */
1974 disas_data_proc_reg(s, insn);
1975 break;
1976 case 0x7:
1977 case 0xf: /* Data processing - SIMD and floating point */
1978 disas_data_proc_simd_fp(s, insn);
1979 break;
1980 default:
1981 assert(FALSE); /* all 15 cases should be handled above */
1982 break;
1985 /* if we allocated any temporaries, free them here */
1986 free_tmp_a64(s);
1989 void gen_intermediate_code_internal_a64(ARMCPU *cpu,
1990 TranslationBlock *tb,
1991 bool search_pc)
1993 CPUState *cs = CPU(cpu);
1994 CPUARMState *env = &cpu->env;
1995 DisasContext dc1, *dc = &dc1;
1996 CPUBreakpoint *bp;
1997 uint16_t *gen_opc_end;
1998 int j, lj;
1999 target_ulong pc_start;
2000 target_ulong next_page_start;
2001 int num_insns;
2002 int max_insns;
2004 pc_start = tb->pc;
2006 dc->tb = tb;
2008 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2010 dc->is_jmp = DISAS_NEXT;
2011 dc->pc = pc_start;
2012 dc->singlestep_enabled = cs->singlestep_enabled;
2013 dc->condjmp = 0;
2015 dc->aarch64 = 1;
2016 dc->thumb = 0;
2017 dc->bswap_code = 0;
2018 dc->condexec_mask = 0;
2019 dc->condexec_cond = 0;
2020 #if !defined(CONFIG_USER_ONLY)
2021 dc->user = 0;
2022 #endif
2023 dc->vfp_enabled = 0;
2024 dc->vec_len = 0;
2025 dc->vec_stride = 0;
2027 init_tmp_a64_array(dc);
2029 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2030 lj = -1;
2031 num_insns = 0;
2032 max_insns = tb->cflags & CF_COUNT_MASK;
2033 if (max_insns == 0) {
2034 max_insns = CF_COUNT_MASK;
2037 gen_tb_start();
2039 tcg_clear_temp_count();
2041 do {
2042 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2043 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2044 if (bp->pc == dc->pc) {
2045 gen_exception_insn(dc, 0, EXCP_DEBUG);
2046 /* Advance PC so that clearing the breakpoint will
2047 invalidate this TB. */
2048 dc->pc += 2;
2049 goto done_generating;
2054 if (search_pc) {
2055 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2056 if (lj < j) {
2057 lj++;
2058 while (lj < j) {
2059 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2062 tcg_ctx.gen_opc_pc[lj] = dc->pc;
2063 tcg_ctx.gen_opc_instr_start[lj] = 1;
2064 tcg_ctx.gen_opc_icount[lj] = num_insns;
2067 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2068 gen_io_start();
2071 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2072 tcg_gen_debug_insn_start(dc->pc);
2075 disas_a64_insn(env, dc);
2077 if (tcg_check_temp_count()) {
2078 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
2079 dc->pc);
2082 /* Translation stops when a conditional branch is encountered.
2083 * Otherwise the subsequent code could get translated several times.
2084 * Also stop translation when a page boundary is reached. This
2085 * ensures prefetch aborts occur at the right place.
2087 num_insns++;
2088 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
2089 !cs->singlestep_enabled &&
2090 !singlestep &&
2091 dc->pc < next_page_start &&
2092 num_insns < max_insns);
2094 if (tb->cflags & CF_LAST_IO) {
2095 gen_io_end();
2098 if (unlikely(cs->singlestep_enabled) && dc->is_jmp != DISAS_EXC) {
2099 /* Note that this means single stepping WFI doesn't halt the CPU.
2100 * For conditional branch insns this is harmless unreachable code as
2101 * gen_goto_tb() has already handled emitting the debug exception
2102 * (and thus a tb-jump is not possible when singlestepping).
2104 assert(dc->is_jmp != DISAS_TB_JUMP);
2105 if (dc->is_jmp != DISAS_JUMP) {
2106 gen_a64_set_pc_im(dc->pc);
2108 gen_exception(EXCP_DEBUG);
2109 } else {
2110 switch (dc->is_jmp) {
2111 case DISAS_NEXT:
2112 gen_goto_tb(dc, 1, dc->pc);
2113 break;
2114 default:
2115 case DISAS_JUMP:
2116 case DISAS_UPDATE:
2117 /* indicate that the hash table must be used to find the next TB */
2118 tcg_gen_exit_tb(0);
2119 break;
2120 case DISAS_TB_JUMP:
2121 case DISAS_EXC:
2122 case DISAS_SWI:
2123 break;
2124 case DISAS_WFI:
2125 /* This is a special case because we don't want to just halt the CPU
2126 * if trying to debug across a WFI.
2128 gen_helper_wfi(cpu_env);
2129 break;
2133 done_generating:
2134 gen_tb_end(tb, num_insns);
2135 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2137 #ifdef DEBUG_DISAS
2138 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2139 qemu_log("----------------\n");
2140 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2141 log_target_disas(env, pc_start, dc->pc - pc_start,
2142 dc->thumb | (dc->bswap_code << 1));
2143 qemu_log("\n");
2145 #endif
2146 if (search_pc) {
2147 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2148 lj++;
2149 while (lj <= j) {
2150 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2152 } else {
2153 tb->size = dc->pc - pc_start;
2154 tb->icount = num_insns;