target-arm: A64: add support for 2-src data processing and DIV
[qemu/kevin.git] / target-arm / translate-a64.c
bloba0bc876b8d4c4091b5f37157096016c1b621b493
1 /*
2 * AArch64 translation
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
25 #include "cpu.h"
26 #include "tcg-op.h"
27 #include "qemu/log.h"
28 #include "translate.h"
29 #include "qemu/host-utils.h"
31 #include "exec/gen-icount.h"
33 #include "helper.h"
34 #define GEN_HELPER 1
35 #include "helper.h"
37 static TCGv_i64 cpu_X[32];
38 static TCGv_i64 cpu_pc;
39 static TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
41 static const char *regnames[] = {
42 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
43 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
44 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
45 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
48 enum a64_shift_type {
49 A64_SHIFT_TYPE_LSL = 0,
50 A64_SHIFT_TYPE_LSR = 1,
51 A64_SHIFT_TYPE_ASR = 2,
52 A64_SHIFT_TYPE_ROR = 3
55 /* initialize TCG globals. */
56 void a64_translate_init(void)
58 int i;
60 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
61 offsetof(CPUARMState, pc),
62 "pc");
63 for (i = 0; i < 32; i++) {
64 cpu_X[i] = tcg_global_mem_new_i64(TCG_AREG0,
65 offsetof(CPUARMState, xregs[i]),
66 regnames[i]);
69 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
70 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
71 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
72 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
75 void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
76 fprintf_function cpu_fprintf, int flags)
78 ARMCPU *cpu = ARM_CPU(cs);
79 CPUARMState *env = &cpu->env;
80 uint32_t psr = pstate_read(env);
81 int i;
83 cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
84 env->pc, env->xregs[31]);
85 for (i = 0; i < 31; i++) {
86 cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
87 if ((i % 4) == 3) {
88 cpu_fprintf(f, "\n");
89 } else {
90 cpu_fprintf(f, " ");
93 cpu_fprintf(f, "PSTATE=%08x (flags %c%c%c%c)\n",
94 psr,
95 psr & PSTATE_N ? 'N' : '-',
96 psr & PSTATE_Z ? 'Z' : '-',
97 psr & PSTATE_C ? 'C' : '-',
98 psr & PSTATE_V ? 'V' : '-');
99 cpu_fprintf(f, "\n");
102 void gen_a64_set_pc_im(uint64_t val)
104 tcg_gen_movi_i64(cpu_pc, val);
107 static void gen_exception(int excp)
109 TCGv_i32 tmp = tcg_temp_new_i32();
110 tcg_gen_movi_i32(tmp, excp);
111 gen_helper_exception(cpu_env, tmp);
112 tcg_temp_free_i32(tmp);
115 static void gen_exception_insn(DisasContext *s, int offset, int excp)
117 gen_a64_set_pc_im(s->pc - offset);
118 gen_exception(excp);
119 s->is_jmp = DISAS_EXC;
122 static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
124 /* No direct tb linking with singlestep or deterministic io */
125 if (s->singlestep_enabled || (s->tb->cflags & CF_LAST_IO)) {
126 return false;
129 /* Only link tbs from inside the same guest page */
130 if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
131 return false;
134 return true;
137 static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
139 TranslationBlock *tb;
141 tb = s->tb;
142 if (use_goto_tb(s, n, dest)) {
143 tcg_gen_goto_tb(n);
144 gen_a64_set_pc_im(dest);
145 tcg_gen_exit_tb((tcg_target_long)tb + n);
146 s->is_jmp = DISAS_TB_JUMP;
147 } else {
148 gen_a64_set_pc_im(dest);
149 if (s->singlestep_enabled) {
150 gen_exception(EXCP_DEBUG);
152 tcg_gen_exit_tb(0);
153 s->is_jmp = DISAS_JUMP;
157 static void unallocated_encoding(DisasContext *s)
159 gen_exception_insn(s, 4, EXCP_UDEF);
162 #define unsupported_encoding(s, insn) \
163 do { \
164 qemu_log_mask(LOG_UNIMP, \
165 "%s:%d: unsupported instruction encoding 0x%08x " \
166 "at pc=%016" PRIx64 "\n", \
167 __FILE__, __LINE__, insn, s->pc - 4); \
168 unallocated_encoding(s); \
169 } while (0);
171 static void init_tmp_a64_array(DisasContext *s)
173 #ifdef CONFIG_DEBUG_TCG
174 int i;
175 for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) {
176 TCGV_UNUSED_I64(s->tmp_a64[i]);
178 #endif
179 s->tmp_a64_count = 0;
182 static void free_tmp_a64(DisasContext *s)
184 int i;
185 for (i = 0; i < s->tmp_a64_count; i++) {
186 tcg_temp_free_i64(s->tmp_a64[i]);
188 init_tmp_a64_array(s);
191 static TCGv_i64 new_tmp_a64(DisasContext *s)
193 assert(s->tmp_a64_count < TMP_A64_MAX);
194 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
197 static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
199 TCGv_i64 t = new_tmp_a64(s);
200 tcg_gen_movi_i64(t, 0);
201 return t;
204 static TCGv_i64 cpu_reg(DisasContext *s, int reg)
206 if (reg == 31) {
207 return new_tmp_a64_zero(s);
208 } else {
209 return cpu_X[reg];
213 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
214 * representing the register contents. This TCGv is an auto-freed
215 * temporary so it need not be explicitly freed, and may be modified.
217 static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
219 TCGv_i64 v = new_tmp_a64(s);
220 if (reg != 31) {
221 if (sf) {
222 tcg_gen_mov_i64(v, cpu_X[reg]);
223 } else {
224 tcg_gen_ext32u_i64(v, cpu_X[reg]);
226 } else {
227 tcg_gen_movi_i64(v, 0);
229 return v;
232 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
233 * than the 32 bit equivalent.
235 static inline void gen_set_NZ64(TCGv_i64 result)
237 TCGv_i64 flag = tcg_temp_new_i64();
239 tcg_gen_setcondi_i64(TCG_COND_NE, flag, result, 0);
240 tcg_gen_trunc_i64_i32(cpu_ZF, flag);
241 tcg_gen_shri_i64(flag, result, 32);
242 tcg_gen_trunc_i64_i32(cpu_NF, flag);
243 tcg_temp_free_i64(flag);
246 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
247 static inline void gen_logic_CC(int sf, TCGv_i64 result)
249 if (sf) {
250 gen_set_NZ64(result);
251 } else {
252 tcg_gen_trunc_i64_i32(cpu_ZF, result);
253 tcg_gen_trunc_i64_i32(cpu_NF, result);
255 tcg_gen_movi_i32(cpu_CF, 0);
256 tcg_gen_movi_i32(cpu_VF, 0);
260 * the instruction disassembly implemented here matches
261 * the instruction encoding classifications in chapter 3 (C3)
262 * of the ARM Architecture Reference Manual (DDI0487A_a)
265 /* C3.2.7 Unconditional branch (immediate)
266 * 31 30 26 25 0
267 * +----+-----------+-------------------------------------+
268 * | op | 0 0 1 0 1 | imm26 |
269 * +----+-----------+-------------------------------------+
271 static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
273 uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
275 if (insn & (1 << 31)) {
276 /* C5.6.26 BL Branch with link */
277 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
280 /* C5.6.20 B Branch / C5.6.26 BL Branch with link */
281 gen_goto_tb(s, 0, addr);
284 /* C3.2.1 Compare & branch (immediate)
285 * 31 30 25 24 23 5 4 0
286 * +----+-------------+----+---------------------+--------+
287 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
288 * +----+-------------+----+---------------------+--------+
290 static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
292 unsigned int sf, op, rt;
293 uint64_t addr;
294 int label_match;
295 TCGv_i64 tcg_cmp;
297 sf = extract32(insn, 31, 1);
298 op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
299 rt = extract32(insn, 0, 5);
300 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
302 tcg_cmp = read_cpu_reg(s, rt, sf);
303 label_match = gen_new_label();
305 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
306 tcg_cmp, 0, label_match);
308 gen_goto_tb(s, 0, s->pc);
309 gen_set_label(label_match);
310 gen_goto_tb(s, 1, addr);
313 /* C3.2.5 Test & branch (immediate)
314 * 31 30 25 24 23 19 18 5 4 0
315 * +----+-------------+----+-------+-------------+------+
316 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
317 * +----+-------------+----+-------+-------------+------+
319 static void disas_test_b_imm(DisasContext *s, uint32_t insn)
321 unsigned int bit_pos, op, rt;
322 uint64_t addr;
323 int label_match;
324 TCGv_i64 tcg_cmp;
326 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
327 op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
328 addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
329 rt = extract32(insn, 0, 5);
331 tcg_cmp = tcg_temp_new_i64();
332 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
333 label_match = gen_new_label();
334 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
335 tcg_cmp, 0, label_match);
336 tcg_temp_free_i64(tcg_cmp);
337 gen_goto_tb(s, 0, s->pc);
338 gen_set_label(label_match);
339 gen_goto_tb(s, 1, addr);
342 /* C3.2.2 / C5.6.19 Conditional branch (immediate)
343 * 31 25 24 23 5 4 3 0
344 * +---------------+----+---------------------+----+------+
345 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
346 * +---------------+----+---------------------+----+------+
348 static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
350 unsigned int cond;
351 uint64_t addr;
353 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
354 unallocated_encoding(s);
355 return;
357 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
358 cond = extract32(insn, 0, 4);
360 if (cond < 0x0e) {
361 /* genuinely conditional branches */
362 int label_match = gen_new_label();
363 arm_gen_test_cc(cond, label_match);
364 gen_goto_tb(s, 0, s->pc);
365 gen_set_label(label_match);
366 gen_goto_tb(s, 1, addr);
367 } else {
368 /* 0xe and 0xf are both "always" conditions */
369 gen_goto_tb(s, 0, addr);
373 /* C5.6.68 HINT */
374 static void handle_hint(DisasContext *s, uint32_t insn,
375 unsigned int op1, unsigned int op2, unsigned int crm)
377 unsigned int selector = crm << 3 | op2;
379 if (op1 != 3) {
380 unallocated_encoding(s);
381 return;
384 switch (selector) {
385 case 0: /* NOP */
386 return;
387 case 1: /* YIELD */
388 case 2: /* WFE */
389 case 3: /* WFI */
390 case 4: /* SEV */
391 case 5: /* SEVL */
392 /* we treat all as NOP at least for now */
393 return;
394 default:
395 /* default specified as NOP equivalent */
396 return;
400 /* CLREX, DSB, DMB, ISB */
401 static void handle_sync(DisasContext *s, uint32_t insn,
402 unsigned int op1, unsigned int op2, unsigned int crm)
404 if (op1 != 3) {
405 unallocated_encoding(s);
406 return;
409 switch (op2) {
410 case 2: /* CLREX */
411 unsupported_encoding(s, insn);
412 return;
413 case 4: /* DSB */
414 case 5: /* DMB */
415 case 6: /* ISB */
416 /* We don't emulate caches so barriers are no-ops */
417 return;
418 default:
419 unallocated_encoding(s);
420 return;
424 /* C5.6.130 MSR (immediate) - move immediate to processor state field */
425 static void handle_msr_i(DisasContext *s, uint32_t insn,
426 unsigned int op1, unsigned int op2, unsigned int crm)
428 unsupported_encoding(s, insn);
431 /* C5.6.204 SYS */
432 static void handle_sys(DisasContext *s, uint32_t insn, unsigned int l,
433 unsigned int op1, unsigned int op2,
434 unsigned int crn, unsigned int crm, unsigned int rt)
436 unsupported_encoding(s, insn);
439 /* C5.6.129 MRS - move from system register */
440 static void handle_mrs(DisasContext *s, uint32_t insn, unsigned int op0,
441 unsigned int op1, unsigned int op2,
442 unsigned int crn, unsigned int crm, unsigned int rt)
444 unsupported_encoding(s, insn);
447 /* C5.6.131 MSR (register) - move to system register */
448 static void handle_msr(DisasContext *s, uint32_t insn, unsigned int op0,
449 unsigned int op1, unsigned int op2,
450 unsigned int crn, unsigned int crm, unsigned int rt)
452 unsupported_encoding(s, insn);
455 /* C3.2.4 System
456 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
457 * +---------------------+---+-----+-----+-------+-------+-----+------+
458 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
459 * +---------------------+---+-----+-----+-------+-------+-----+------+
461 static void disas_system(DisasContext *s, uint32_t insn)
463 unsigned int l, op0, op1, crn, crm, op2, rt;
464 l = extract32(insn, 21, 1);
465 op0 = extract32(insn, 19, 2);
466 op1 = extract32(insn, 16, 3);
467 crn = extract32(insn, 12, 4);
468 crm = extract32(insn, 8, 4);
469 op2 = extract32(insn, 5, 3);
470 rt = extract32(insn, 0, 5);
472 if (op0 == 0) {
473 if (l || rt != 31) {
474 unallocated_encoding(s);
475 return;
477 switch (crn) {
478 case 2: /* C5.6.68 HINT */
479 handle_hint(s, insn, op1, op2, crm);
480 break;
481 case 3: /* CLREX, DSB, DMB, ISB */
482 handle_sync(s, insn, op1, op2, crm);
483 break;
484 case 4: /* C5.6.130 MSR (immediate) */
485 handle_msr_i(s, insn, op1, op2, crm);
486 break;
487 default:
488 unallocated_encoding(s);
489 break;
491 return;
494 if (op0 == 1) {
495 /* C5.6.204 SYS */
496 handle_sys(s, insn, l, op1, op2, crn, crm, rt);
497 } else if (l) { /* op0 > 1 */
498 /* C5.6.129 MRS - move from system register */
499 handle_mrs(s, insn, op0, op1, op2, crn, crm, rt);
500 } else {
501 /* C5.6.131 MSR (register) - move to system register */
502 handle_msr(s, insn, op0, op1, op2, crn, crm, rt);
506 /* Exception generation */
507 static void disas_exc(DisasContext *s, uint32_t insn)
509 unsupported_encoding(s, insn);
512 /* C3.2.7 Unconditional branch (register)
513 * 31 25 24 21 20 16 15 10 9 5 4 0
514 * +---------------+-------+-------+-------+------+-------+
515 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
516 * +---------------+-------+-------+-------+------+-------+
518 static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
520 unsigned int opc, op2, op3, rn, op4;
522 opc = extract32(insn, 21, 4);
523 op2 = extract32(insn, 16, 5);
524 op3 = extract32(insn, 10, 6);
525 rn = extract32(insn, 5, 5);
526 op4 = extract32(insn, 0, 5);
528 if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
529 unallocated_encoding(s);
530 return;
533 switch (opc) {
534 case 0: /* BR */
535 case 2: /* RET */
536 break;
537 case 1: /* BLR */
538 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
539 break;
540 case 4: /* ERET */
541 case 5: /* DRPS */
542 if (rn != 0x1f) {
543 unallocated_encoding(s);
544 } else {
545 unsupported_encoding(s, insn);
547 return;
548 default:
549 unallocated_encoding(s);
550 return;
553 tcg_gen_mov_i64(cpu_pc, cpu_reg(s, rn));
554 s->is_jmp = DISAS_JUMP;
557 /* C3.2 Branches, exception generating and system instructions */
558 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
560 switch (extract32(insn, 25, 7)) {
561 case 0x0a: case 0x0b:
562 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
563 disas_uncond_b_imm(s, insn);
564 break;
565 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
566 disas_comp_b_imm(s, insn);
567 break;
568 case 0x1b: case 0x5b: /* Test & branch (immediate) */
569 disas_test_b_imm(s, insn);
570 break;
571 case 0x2a: /* Conditional branch (immediate) */
572 disas_cond_b_imm(s, insn);
573 break;
574 case 0x6a: /* Exception generation / System */
575 if (insn & (1 << 24)) {
576 disas_system(s, insn);
577 } else {
578 disas_exc(s, insn);
580 break;
581 case 0x6b: /* Unconditional branch (register) */
582 disas_uncond_b_reg(s, insn);
583 break;
584 default:
585 unallocated_encoding(s);
586 break;
590 /* Load/store exclusive */
591 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
593 unsupported_encoding(s, insn);
596 /* Load register (literal) */
597 static void disas_ld_lit(DisasContext *s, uint32_t insn)
599 unsupported_encoding(s, insn);
602 /* Load/store pair (all forms) */
603 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
605 unsupported_encoding(s, insn);
608 /* Load/store register (all forms) */
609 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
611 unsupported_encoding(s, insn);
614 /* AdvSIMD load/store multiple structures */
615 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
617 unsupported_encoding(s, insn);
620 /* AdvSIMD load/store single structure */
621 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
623 unsupported_encoding(s, insn);
626 /* C3.3 Loads and stores */
627 static void disas_ldst(DisasContext *s, uint32_t insn)
629 switch (extract32(insn, 24, 6)) {
630 case 0x08: /* Load/store exclusive */
631 disas_ldst_excl(s, insn);
632 break;
633 case 0x18: case 0x1c: /* Load register (literal) */
634 disas_ld_lit(s, insn);
635 break;
636 case 0x28: case 0x29:
637 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
638 disas_ldst_pair(s, insn);
639 break;
640 case 0x38: case 0x39:
641 case 0x3c: case 0x3d: /* Load/store register (all forms) */
642 disas_ldst_reg(s, insn);
643 break;
644 case 0x0c: /* AdvSIMD load/store multiple structures */
645 disas_ldst_multiple_struct(s, insn);
646 break;
647 case 0x0d: /* AdvSIMD load/store single structure */
648 disas_ldst_single_struct(s, insn);
649 break;
650 default:
651 unallocated_encoding(s);
652 break;
656 /* C3.4.6 PC-rel. addressing
657 * 31 30 29 28 24 23 5 4 0
658 * +----+-------+-----------+-------------------+------+
659 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
660 * +----+-------+-----------+-------------------+------+
662 static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
664 unsigned int page, rd;
665 uint64_t base;
666 int64_t offset;
668 page = extract32(insn, 31, 1);
669 /* SignExtend(immhi:immlo) -> offset */
670 offset = ((int64_t)sextract32(insn, 5, 19) << 2) | extract32(insn, 29, 2);
671 rd = extract32(insn, 0, 5);
672 base = s->pc - 4;
674 if (page) {
675 /* ADRP (page based) */
676 base &= ~0xfff;
677 offset <<= 12;
680 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
683 /* Add/subtract (immediate) */
684 static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
686 unsupported_encoding(s, insn);
689 /* Logical (immediate) */
690 static void disas_logic_imm(DisasContext *s, uint32_t insn)
692 unsupported_encoding(s, insn);
695 /* Move wide (immediate) */
696 static void disas_movw_imm(DisasContext *s, uint32_t insn)
698 unsupported_encoding(s, insn);
701 /* Bitfield */
702 static void disas_bitfield(DisasContext *s, uint32_t insn)
704 unsupported_encoding(s, insn);
707 /* C3.4.3 Extract
708 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
709 * +----+------+-------------+---+----+------+--------+------+------+
710 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
711 * +----+------+-------------+---+----+------+--------+------+------+
713 static void disas_extract(DisasContext *s, uint32_t insn)
715 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
717 sf = extract32(insn, 31, 1);
718 n = extract32(insn, 22, 1);
719 rm = extract32(insn, 16, 5);
720 imm = extract32(insn, 10, 6);
721 rn = extract32(insn, 5, 5);
722 rd = extract32(insn, 0, 5);
723 op21 = extract32(insn, 29, 2);
724 op0 = extract32(insn, 21, 1);
725 bitsize = sf ? 64 : 32;
727 if (sf != n || op21 || op0 || imm >= bitsize) {
728 unallocated_encoding(s);
729 } else {
730 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
732 tcg_rd = cpu_reg(s, rd);
734 if (imm) {
735 /* OPTME: we can special case rm==rn as a rotate */
736 tcg_rm = read_cpu_reg(s, rm, sf);
737 tcg_rn = read_cpu_reg(s, rn, sf);
738 tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
739 tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
740 tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
741 if (!sf) {
742 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
744 } else {
745 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
746 * so an extract from bit 0 is a special case.
748 if (sf) {
749 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
750 } else {
751 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
758 /* C3.4 Data processing - immediate */
759 static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
761 switch (extract32(insn, 23, 6)) {
762 case 0x20: case 0x21: /* PC-rel. addressing */
763 disas_pc_rel_adr(s, insn);
764 break;
765 case 0x22: case 0x23: /* Add/subtract (immediate) */
766 disas_add_sub_imm(s, insn);
767 break;
768 case 0x24: /* Logical (immediate) */
769 disas_logic_imm(s, insn);
770 break;
771 case 0x25: /* Move wide (immediate) */
772 disas_movw_imm(s, insn);
773 break;
774 case 0x26: /* Bitfield */
775 disas_bitfield(s, insn);
776 break;
777 case 0x27: /* Extract */
778 disas_extract(s, insn);
779 break;
780 default:
781 unallocated_encoding(s);
782 break;
786 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
787 * Note that it is the caller's responsibility to ensure that the
788 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
789 * mandated semantics for out of range shifts.
791 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
792 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
794 switch (shift_type) {
795 case A64_SHIFT_TYPE_LSL:
796 tcg_gen_shl_i64(dst, src, shift_amount);
797 break;
798 case A64_SHIFT_TYPE_LSR:
799 tcg_gen_shr_i64(dst, src, shift_amount);
800 break;
801 case A64_SHIFT_TYPE_ASR:
802 if (!sf) {
803 tcg_gen_ext32s_i64(dst, src);
805 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
806 break;
807 case A64_SHIFT_TYPE_ROR:
808 if (sf) {
809 tcg_gen_rotr_i64(dst, src, shift_amount);
810 } else {
811 TCGv_i32 t0, t1;
812 t0 = tcg_temp_new_i32();
813 t1 = tcg_temp_new_i32();
814 tcg_gen_trunc_i64_i32(t0, src);
815 tcg_gen_trunc_i64_i32(t1, shift_amount);
816 tcg_gen_rotr_i32(t0, t0, t1);
817 tcg_gen_extu_i32_i64(dst, t0);
818 tcg_temp_free_i32(t0);
819 tcg_temp_free_i32(t1);
821 break;
822 default:
823 assert(FALSE); /* all shift types should be handled */
824 break;
827 if (!sf) { /* zero extend final result */
828 tcg_gen_ext32u_i64(dst, dst);
832 /* Shift a TCGv src by immediate, put result in dst.
833 * The shift amount must be in range (this should always be true as the
834 * relevant instructions will UNDEF on bad shift immediates).
836 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
837 enum a64_shift_type shift_type, unsigned int shift_i)
839 assert(shift_i < (sf ? 64 : 32));
841 if (shift_i == 0) {
842 tcg_gen_mov_i64(dst, src);
843 } else {
844 TCGv_i64 shift_const;
846 shift_const = tcg_const_i64(shift_i);
847 shift_reg(dst, src, sf, shift_type, shift_const);
848 tcg_temp_free_i64(shift_const);
852 /* C3.5.10 Logical (shifted register)
853 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
854 * +----+-----+-----------+-------+---+------+--------+------+------+
855 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
856 * +----+-----+-----------+-------+---+------+--------+------+------+
858 static void disas_logic_reg(DisasContext *s, uint32_t insn)
860 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
861 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
863 sf = extract32(insn, 31, 1);
864 opc = extract32(insn, 29, 2);
865 shift_type = extract32(insn, 22, 2);
866 invert = extract32(insn, 21, 1);
867 rm = extract32(insn, 16, 5);
868 shift_amount = extract32(insn, 10, 6);
869 rn = extract32(insn, 5, 5);
870 rd = extract32(insn, 0, 5);
872 if (!sf && (shift_amount & (1 << 5))) {
873 unallocated_encoding(s);
874 return;
877 tcg_rd = cpu_reg(s, rd);
879 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
880 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
881 * register-register MOV and MVN, so it is worth special casing.
883 tcg_rm = cpu_reg(s, rm);
884 if (invert) {
885 tcg_gen_not_i64(tcg_rd, tcg_rm);
886 if (!sf) {
887 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
889 } else {
890 if (sf) {
891 tcg_gen_mov_i64(tcg_rd, tcg_rm);
892 } else {
893 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
896 return;
899 tcg_rm = read_cpu_reg(s, rm, sf);
901 if (shift_amount) {
902 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
905 tcg_rn = cpu_reg(s, rn);
907 switch (opc | (invert << 2)) {
908 case 0: /* AND */
909 case 3: /* ANDS */
910 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
911 break;
912 case 1: /* ORR */
913 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
914 break;
915 case 2: /* EOR */
916 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
917 break;
918 case 4: /* BIC */
919 case 7: /* BICS */
920 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
921 break;
922 case 5: /* ORN */
923 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
924 break;
925 case 6: /* EON */
926 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
927 break;
928 default:
929 assert(FALSE);
930 break;
933 if (!sf) {
934 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
937 if (opc == 3) {
938 gen_logic_CC(sf, tcg_rd);
942 /* Add/subtract (extended register) */
943 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
945 unsupported_encoding(s, insn);
948 /* Add/subtract (shifted register) */
949 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
951 unsupported_encoding(s, insn);
954 /* Data-processing (3 source) */
955 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
957 unsupported_encoding(s, insn);
960 /* Add/subtract (with carry) */
961 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
963 unsupported_encoding(s, insn);
966 /* Conditional compare (immediate) */
967 static void disas_cc_imm(DisasContext *s, uint32_t insn)
969 unsupported_encoding(s, insn);
972 /* Conditional compare (register) */
973 static void disas_cc_reg(DisasContext *s, uint32_t insn)
975 unsupported_encoding(s, insn);
978 /* C3.5.6 Conditional select
979 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
980 * +----+----+---+-----------------+------+------+-----+------+------+
981 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
982 * +----+----+---+-----------------+------+------+-----+------+------+
984 static void disas_cond_select(DisasContext *s, uint32_t insn)
986 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
987 TCGv_i64 tcg_rd, tcg_src;
989 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
990 /* S == 1 or op2<1> == 1 */
991 unallocated_encoding(s);
992 return;
994 sf = extract32(insn, 31, 1);
995 else_inv = extract32(insn, 30, 1);
996 rm = extract32(insn, 16, 5);
997 cond = extract32(insn, 12, 4);
998 else_inc = extract32(insn, 10, 1);
999 rn = extract32(insn, 5, 5);
1000 rd = extract32(insn, 0, 5);
1002 if (rd == 31) {
1003 /* silly no-op write; until we use movcond we must special-case
1004 * this to avoid a dead temporary across basic blocks.
1006 return;
1009 tcg_rd = cpu_reg(s, rd);
1011 if (cond >= 0x0e) { /* condition "always" */
1012 tcg_src = read_cpu_reg(s, rn, sf);
1013 tcg_gen_mov_i64(tcg_rd, tcg_src);
1014 } else {
1015 /* OPTME: we could use movcond here, at the cost of duplicating
1016 * a lot of the arm_gen_test_cc() logic.
1018 int label_match = gen_new_label();
1019 int label_continue = gen_new_label();
1021 arm_gen_test_cc(cond, label_match);
1022 /* nomatch: */
1023 tcg_src = cpu_reg(s, rm);
1025 if (else_inv && else_inc) {
1026 tcg_gen_neg_i64(tcg_rd, tcg_src);
1027 } else if (else_inv) {
1028 tcg_gen_not_i64(tcg_rd, tcg_src);
1029 } else if (else_inc) {
1030 tcg_gen_addi_i64(tcg_rd, tcg_src, 1);
1031 } else {
1032 tcg_gen_mov_i64(tcg_rd, tcg_src);
1034 if (!sf) {
1035 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1037 tcg_gen_br(label_continue);
1038 /* match: */
1039 gen_set_label(label_match);
1040 tcg_src = read_cpu_reg(s, rn, sf);
1041 tcg_gen_mov_i64(tcg_rd, tcg_src);
1042 /* continue: */
1043 gen_set_label(label_continue);
1047 /* Data-processing (1 source) */
1048 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
1050 unsupported_encoding(s, insn);
1053 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
1054 unsigned int rm, unsigned int rn, unsigned int rd)
1056 TCGv_i64 tcg_n, tcg_m, tcg_rd;
1057 tcg_rd = cpu_reg(s, rd);
1059 if (!sf && is_signed) {
1060 tcg_n = new_tmp_a64(s);
1061 tcg_m = new_tmp_a64(s);
1062 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
1063 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
1064 } else {
1065 tcg_n = read_cpu_reg(s, rn, sf);
1066 tcg_m = read_cpu_reg(s, rm, sf);
1069 if (is_signed) {
1070 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
1071 } else {
1072 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
1075 if (!sf) { /* zero extend final result */
1076 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1080 /* C3.5.8 Data-processing (2 source)
1081 * 31 30 29 28 21 20 16 15 10 9 5 4 0
1082 * +----+---+---+-----------------+------+--------+------+------+
1083 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
1084 * +----+---+---+-----------------+------+--------+------+------+
1086 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
1088 unsigned int sf, rm, opcode, rn, rd;
1089 sf = extract32(insn, 31, 1);
1090 rm = extract32(insn, 16, 5);
1091 opcode = extract32(insn, 10, 6);
1092 rn = extract32(insn, 5, 5);
1093 rd = extract32(insn, 0, 5);
1095 if (extract32(insn, 29, 1)) {
1096 unallocated_encoding(s);
1097 return;
1100 switch (opcode) {
1101 case 2: /* UDIV */
1102 handle_div(s, false, sf, rm, rn, rd);
1103 break;
1104 case 3: /* SDIV */
1105 handle_div(s, true, sf, rm, rn, rd);
1106 break;
1107 case 8: /* LSLV */
1108 case 9: /* LSRV */
1109 case 10: /* ASRV */
1110 case 11: /* RORV */
1111 case 16:
1112 case 17:
1113 case 18:
1114 case 19:
1115 case 20:
1116 case 21:
1117 case 22:
1118 case 23: /* CRC32 */
1119 unsupported_encoding(s, insn);
1120 break;
1121 default:
1122 unallocated_encoding(s);
1123 break;
1127 /* C3.5 Data processing - register */
1128 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
1130 switch (extract32(insn, 24, 5)) {
1131 case 0x0a: /* Logical (shifted register) */
1132 disas_logic_reg(s, insn);
1133 break;
1134 case 0x0b: /* Add/subtract */
1135 if (insn & (1 << 21)) { /* (extended register) */
1136 disas_add_sub_ext_reg(s, insn);
1137 } else {
1138 disas_add_sub_reg(s, insn);
1140 break;
1141 case 0x1b: /* Data-processing (3 source) */
1142 disas_data_proc_3src(s, insn);
1143 break;
1144 case 0x1a:
1145 switch (extract32(insn, 21, 3)) {
1146 case 0x0: /* Add/subtract (with carry) */
1147 disas_adc_sbc(s, insn);
1148 break;
1149 case 0x2: /* Conditional compare */
1150 if (insn & (1 << 11)) { /* (immediate) */
1151 disas_cc_imm(s, insn);
1152 } else { /* (register) */
1153 disas_cc_reg(s, insn);
1155 break;
1156 case 0x4: /* Conditional select */
1157 disas_cond_select(s, insn);
1158 break;
1159 case 0x6: /* Data-processing */
1160 if (insn & (1 << 30)) { /* (1 source) */
1161 disas_data_proc_1src(s, insn);
1162 } else { /* (2 source) */
1163 disas_data_proc_2src(s, insn);
1165 break;
1166 default:
1167 unallocated_encoding(s);
1168 break;
1170 break;
1171 default:
1172 unallocated_encoding(s);
1173 break;
1177 /* C3.6 Data processing - SIMD and floating point */
1178 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
1180 unsupported_encoding(s, insn);
1183 /* C3.1 A64 instruction index by encoding */
1184 static void disas_a64_insn(CPUARMState *env, DisasContext *s)
1186 uint32_t insn;
1188 insn = arm_ldl_code(env, s->pc, s->bswap_code);
1189 s->insn = insn;
1190 s->pc += 4;
1192 switch (extract32(insn, 25, 4)) {
1193 case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
1194 unallocated_encoding(s);
1195 break;
1196 case 0x8: case 0x9: /* Data processing - immediate */
1197 disas_data_proc_imm(s, insn);
1198 break;
1199 case 0xa: case 0xb: /* Branch, exception generation and system insns */
1200 disas_b_exc_sys(s, insn);
1201 break;
1202 case 0x4:
1203 case 0x6:
1204 case 0xc:
1205 case 0xe: /* Loads and stores */
1206 disas_ldst(s, insn);
1207 break;
1208 case 0x5:
1209 case 0xd: /* Data processing - register */
1210 disas_data_proc_reg(s, insn);
1211 break;
1212 case 0x7:
1213 case 0xf: /* Data processing - SIMD and floating point */
1214 disas_data_proc_simd_fp(s, insn);
1215 break;
1216 default:
1217 assert(FALSE); /* all 15 cases should be handled above */
1218 break;
1221 /* if we allocated any temporaries, free them here */
1222 free_tmp_a64(s);
1225 void gen_intermediate_code_internal_a64(ARMCPU *cpu,
1226 TranslationBlock *tb,
1227 bool search_pc)
1229 CPUState *cs = CPU(cpu);
1230 CPUARMState *env = &cpu->env;
1231 DisasContext dc1, *dc = &dc1;
1232 CPUBreakpoint *bp;
1233 uint16_t *gen_opc_end;
1234 int j, lj;
1235 target_ulong pc_start;
1236 target_ulong next_page_start;
1237 int num_insns;
1238 int max_insns;
1240 pc_start = tb->pc;
1242 dc->tb = tb;
1244 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1246 dc->is_jmp = DISAS_NEXT;
1247 dc->pc = pc_start;
1248 dc->singlestep_enabled = cs->singlestep_enabled;
1249 dc->condjmp = 0;
1251 dc->aarch64 = 1;
1252 dc->thumb = 0;
1253 dc->bswap_code = 0;
1254 dc->condexec_mask = 0;
1255 dc->condexec_cond = 0;
1256 #if !defined(CONFIG_USER_ONLY)
1257 dc->user = 0;
1258 #endif
1259 dc->vfp_enabled = 0;
1260 dc->vec_len = 0;
1261 dc->vec_stride = 0;
1263 init_tmp_a64_array(dc);
1265 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1266 lj = -1;
1267 num_insns = 0;
1268 max_insns = tb->cflags & CF_COUNT_MASK;
1269 if (max_insns == 0) {
1270 max_insns = CF_COUNT_MASK;
1273 gen_tb_start();
1275 tcg_clear_temp_count();
1277 do {
1278 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1279 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1280 if (bp->pc == dc->pc) {
1281 gen_exception_insn(dc, 0, EXCP_DEBUG);
1282 /* Advance PC so that clearing the breakpoint will
1283 invalidate this TB. */
1284 dc->pc += 2;
1285 goto done_generating;
1290 if (search_pc) {
1291 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1292 if (lj < j) {
1293 lj++;
1294 while (lj < j) {
1295 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1298 tcg_ctx.gen_opc_pc[lj] = dc->pc;
1299 tcg_ctx.gen_opc_instr_start[lj] = 1;
1300 tcg_ctx.gen_opc_icount[lj] = num_insns;
1303 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
1304 gen_io_start();
1307 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1308 tcg_gen_debug_insn_start(dc->pc);
1311 disas_a64_insn(env, dc);
1313 if (tcg_check_temp_count()) {
1314 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
1315 dc->pc);
1318 /* Translation stops when a conditional branch is encountered.
1319 * Otherwise the subsequent code could get translated several times.
1320 * Also stop translation when a page boundary is reached. This
1321 * ensures prefetch aborts occur at the right place.
1323 num_insns++;
1324 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
1325 !cs->singlestep_enabled &&
1326 !singlestep &&
1327 dc->pc < next_page_start &&
1328 num_insns < max_insns);
1330 if (tb->cflags & CF_LAST_IO) {
1331 gen_io_end();
1334 if (unlikely(cs->singlestep_enabled) && dc->is_jmp != DISAS_EXC) {
1335 /* Note that this means single stepping WFI doesn't halt the CPU.
1336 * For conditional branch insns this is harmless unreachable code as
1337 * gen_goto_tb() has already handled emitting the debug exception
1338 * (and thus a tb-jump is not possible when singlestepping).
1340 assert(dc->is_jmp != DISAS_TB_JUMP);
1341 if (dc->is_jmp != DISAS_JUMP) {
1342 gen_a64_set_pc_im(dc->pc);
1344 gen_exception(EXCP_DEBUG);
1345 } else {
1346 switch (dc->is_jmp) {
1347 case DISAS_NEXT:
1348 gen_goto_tb(dc, 1, dc->pc);
1349 break;
1350 default:
1351 case DISAS_JUMP:
1352 case DISAS_UPDATE:
1353 /* indicate that the hash table must be used to find the next TB */
1354 tcg_gen_exit_tb(0);
1355 break;
1356 case DISAS_TB_JUMP:
1357 case DISAS_EXC:
1358 case DISAS_SWI:
1359 break;
1360 case DISAS_WFI:
1361 /* This is a special case because we don't want to just halt the CPU
1362 * if trying to debug across a WFI.
1364 gen_helper_wfi(cpu_env);
1365 break;
1369 done_generating:
1370 gen_tb_end(tb, num_insns);
1371 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1373 #ifdef DEBUG_DISAS
1374 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1375 qemu_log("----------------\n");
1376 qemu_log("IN: %s\n", lookup_symbol(pc_start));
1377 log_target_disas(env, pc_start, dc->pc - pc_start,
1378 dc->thumb | (dc->bswap_code << 1));
1379 qemu_log("\n");
1381 #endif
1382 if (search_pc) {
1383 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1384 lj++;
1385 while (lj <= j) {
1386 tcg_ctx.gen_opc_instr_start[lj++] = 0;
1388 } else {
1389 tb->size = dc->pc - pc_start;
1390 tb->icount = num_insns;