target-arm: A64: add support for conditional compare insns
[qemu/ar7.git] / target-arm / translate-a64.c
blob538d69e634b0df09e3951ecb9fe1a679b84463b6
1 /*
2 * AArch64 translation
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
25 #include "cpu.h"
26 #include "tcg-op.h"
27 #include "qemu/log.h"
28 #include "translate.h"
29 #include "qemu/host-utils.h"
31 #include "exec/gen-icount.h"
33 #include "helper.h"
34 #define GEN_HELPER 1
35 #include "helper.h"
37 static TCGv_i64 cpu_X[32];
38 static TCGv_i64 cpu_pc;
39 static TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
41 static const char *regnames[] = {
42 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
43 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
44 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
45 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
48 enum a64_shift_type {
49 A64_SHIFT_TYPE_LSL = 0,
50 A64_SHIFT_TYPE_LSR = 1,
51 A64_SHIFT_TYPE_ASR = 2,
52 A64_SHIFT_TYPE_ROR = 3
55 /* initialize TCG globals. */
56 void a64_translate_init(void)
58 int i;
60 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
61 offsetof(CPUARMState, pc),
62 "pc");
63 for (i = 0; i < 32; i++) {
64 cpu_X[i] = tcg_global_mem_new_i64(TCG_AREG0,
65 offsetof(CPUARMState, xregs[i]),
66 regnames[i]);
69 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
70 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
71 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
72 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
75 void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
76 fprintf_function cpu_fprintf, int flags)
78 ARMCPU *cpu = ARM_CPU(cs);
79 CPUARMState *env = &cpu->env;
80 uint32_t psr = pstate_read(env);
81 int i;
83 cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
84 env->pc, env->xregs[31]);
85 for (i = 0; i < 31; i++) {
86 cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
87 if ((i % 4) == 3) {
88 cpu_fprintf(f, "\n");
89 } else {
90 cpu_fprintf(f, " ");
93 cpu_fprintf(f, "PSTATE=%08x (flags %c%c%c%c)\n",
94 psr,
95 psr & PSTATE_N ? 'N' : '-',
96 psr & PSTATE_Z ? 'Z' : '-',
97 psr & PSTATE_C ? 'C' : '-',
98 psr & PSTATE_V ? 'V' : '-');
99 cpu_fprintf(f, "\n");
102 static int get_mem_index(DisasContext *s)
104 #ifdef CONFIG_USER_ONLY
105 return 1;
106 #else
107 return s->user;
108 #endif
111 void gen_a64_set_pc_im(uint64_t val)
113 tcg_gen_movi_i64(cpu_pc, val);
116 static void gen_exception(int excp)
118 TCGv_i32 tmp = tcg_temp_new_i32();
119 tcg_gen_movi_i32(tmp, excp);
120 gen_helper_exception(cpu_env, tmp);
121 tcg_temp_free_i32(tmp);
124 static void gen_exception_insn(DisasContext *s, int offset, int excp)
126 gen_a64_set_pc_im(s->pc - offset);
127 gen_exception(excp);
128 s->is_jmp = DISAS_EXC;
131 static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
133 /* No direct tb linking with singlestep or deterministic io */
134 if (s->singlestep_enabled || (s->tb->cflags & CF_LAST_IO)) {
135 return false;
138 /* Only link tbs from inside the same guest page */
139 if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
140 return false;
143 return true;
146 static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
148 TranslationBlock *tb;
150 tb = s->tb;
151 if (use_goto_tb(s, n, dest)) {
152 tcg_gen_goto_tb(n);
153 gen_a64_set_pc_im(dest);
154 tcg_gen_exit_tb((tcg_target_long)tb + n);
155 s->is_jmp = DISAS_TB_JUMP;
156 } else {
157 gen_a64_set_pc_im(dest);
158 if (s->singlestep_enabled) {
159 gen_exception(EXCP_DEBUG);
161 tcg_gen_exit_tb(0);
162 s->is_jmp = DISAS_JUMP;
166 static void unallocated_encoding(DisasContext *s)
168 gen_exception_insn(s, 4, EXCP_UDEF);
171 #define unsupported_encoding(s, insn) \
172 do { \
173 qemu_log_mask(LOG_UNIMP, \
174 "%s:%d: unsupported instruction encoding 0x%08x " \
175 "at pc=%016" PRIx64 "\n", \
176 __FILE__, __LINE__, insn, s->pc - 4); \
177 unallocated_encoding(s); \
178 } while (0);
180 static void init_tmp_a64_array(DisasContext *s)
182 #ifdef CONFIG_DEBUG_TCG
183 int i;
184 for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) {
185 TCGV_UNUSED_I64(s->tmp_a64[i]);
187 #endif
188 s->tmp_a64_count = 0;
191 static void free_tmp_a64(DisasContext *s)
193 int i;
194 for (i = 0; i < s->tmp_a64_count; i++) {
195 tcg_temp_free_i64(s->tmp_a64[i]);
197 init_tmp_a64_array(s);
200 static TCGv_i64 new_tmp_a64(DisasContext *s)
202 assert(s->tmp_a64_count < TMP_A64_MAX);
203 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
206 static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
208 TCGv_i64 t = new_tmp_a64(s);
209 tcg_gen_movi_i64(t, 0);
210 return t;
214 * Register access functions
216 * These functions are used for directly accessing a register in where
217 * changes to the final register value are likely to be made. If you
218 * need to use a register for temporary calculation (e.g. index type
219 * operations) use the read_* form.
221 * B1.2.1 Register mappings
223 * In instruction register encoding 31 can refer to ZR (zero register) or
224 * the SP (stack pointer) depending on context. In QEMU's case we map SP
225 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
226 * This is the point of the _sp forms.
228 static TCGv_i64 cpu_reg(DisasContext *s, int reg)
230 if (reg == 31) {
231 return new_tmp_a64_zero(s);
232 } else {
233 return cpu_X[reg];
237 /* register access for when 31 == SP */
238 static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
240 return cpu_X[reg];
243 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
244 * representing the register contents. This TCGv is an auto-freed
245 * temporary so it need not be explicitly freed, and may be modified.
247 static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
249 TCGv_i64 v = new_tmp_a64(s);
250 if (reg != 31) {
251 if (sf) {
252 tcg_gen_mov_i64(v, cpu_X[reg]);
253 } else {
254 tcg_gen_ext32u_i64(v, cpu_X[reg]);
256 } else {
257 tcg_gen_movi_i64(v, 0);
259 return v;
262 static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
264 TCGv_i64 v = new_tmp_a64(s);
265 if (sf) {
266 tcg_gen_mov_i64(v, cpu_X[reg]);
267 } else {
268 tcg_gen_ext32u_i64(v, cpu_X[reg]);
270 return v;
273 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
274 * than the 32 bit equivalent.
276 static inline void gen_set_NZ64(TCGv_i64 result)
278 TCGv_i64 flag = tcg_temp_new_i64();
280 tcg_gen_setcondi_i64(TCG_COND_NE, flag, result, 0);
281 tcg_gen_trunc_i64_i32(cpu_ZF, flag);
282 tcg_gen_shri_i64(flag, result, 32);
283 tcg_gen_trunc_i64_i32(cpu_NF, flag);
284 tcg_temp_free_i64(flag);
287 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
288 static inline void gen_logic_CC(int sf, TCGv_i64 result)
290 if (sf) {
291 gen_set_NZ64(result);
292 } else {
293 tcg_gen_trunc_i64_i32(cpu_ZF, result);
294 tcg_gen_trunc_i64_i32(cpu_NF, result);
296 tcg_gen_movi_i32(cpu_CF, 0);
297 tcg_gen_movi_i32(cpu_VF, 0);
300 /* dest = T0 + T1; compute C, N, V and Z flags */
301 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
303 if (sf) {
304 TCGv_i64 result, flag, tmp;
305 result = tcg_temp_new_i64();
306 flag = tcg_temp_new_i64();
307 tmp = tcg_temp_new_i64();
309 tcg_gen_movi_i64(tmp, 0);
310 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
312 tcg_gen_trunc_i64_i32(cpu_CF, flag);
314 gen_set_NZ64(result);
316 tcg_gen_xor_i64(flag, result, t0);
317 tcg_gen_xor_i64(tmp, t0, t1);
318 tcg_gen_andc_i64(flag, flag, tmp);
319 tcg_temp_free_i64(tmp);
320 tcg_gen_shri_i64(flag, flag, 32);
321 tcg_gen_trunc_i64_i32(cpu_VF, flag);
323 tcg_gen_mov_i64(dest, result);
324 tcg_temp_free_i64(result);
325 tcg_temp_free_i64(flag);
326 } else {
327 /* 32 bit arithmetic */
328 TCGv_i32 t0_32 = tcg_temp_new_i32();
329 TCGv_i32 t1_32 = tcg_temp_new_i32();
330 TCGv_i32 tmp = tcg_temp_new_i32();
332 tcg_gen_movi_i32(tmp, 0);
333 tcg_gen_trunc_i64_i32(t0_32, t0);
334 tcg_gen_trunc_i64_i32(t1_32, t1);
335 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
336 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
337 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
338 tcg_gen_xor_i32(tmp, t0_32, t1_32);
339 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
340 tcg_gen_extu_i32_i64(dest, cpu_NF);
342 tcg_temp_free_i32(tmp);
343 tcg_temp_free_i32(t0_32);
344 tcg_temp_free_i32(t1_32);
348 /* dest = T0 - T1; compute C, N, V and Z flags */
349 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
351 if (sf) {
352 /* 64 bit arithmetic */
353 TCGv_i64 result, flag, tmp;
355 result = tcg_temp_new_i64();
356 flag = tcg_temp_new_i64();
357 tcg_gen_sub_i64(result, t0, t1);
359 gen_set_NZ64(result);
361 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
362 tcg_gen_trunc_i64_i32(cpu_CF, flag);
364 tcg_gen_xor_i64(flag, result, t0);
365 tmp = tcg_temp_new_i64();
366 tcg_gen_xor_i64(tmp, t0, t1);
367 tcg_gen_and_i64(flag, flag, tmp);
368 tcg_temp_free_i64(tmp);
369 tcg_gen_shri_i64(flag, flag, 32);
370 tcg_gen_trunc_i64_i32(cpu_VF, flag);
371 tcg_gen_mov_i64(dest, result);
372 tcg_temp_free_i64(flag);
373 tcg_temp_free_i64(result);
374 } else {
375 /* 32 bit arithmetic */
376 TCGv_i32 t0_32 = tcg_temp_new_i32();
377 TCGv_i32 t1_32 = tcg_temp_new_i32();
378 TCGv_i32 tmp;
380 tcg_gen_trunc_i64_i32(t0_32, t0);
381 tcg_gen_trunc_i64_i32(t1_32, t1);
382 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
383 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
384 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
385 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
386 tmp = tcg_temp_new_i32();
387 tcg_gen_xor_i32(tmp, t0_32, t1_32);
388 tcg_temp_free_i32(t0_32);
389 tcg_temp_free_i32(t1_32);
390 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
391 tcg_temp_free_i32(tmp);
392 tcg_gen_extu_i32_i64(dest, cpu_NF);
396 /* dest = T0 + T1 + CF; do not compute flags. */
397 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
399 TCGv_i64 flag = tcg_temp_new_i64();
400 tcg_gen_extu_i32_i64(flag, cpu_CF);
401 tcg_gen_add_i64(dest, t0, t1);
402 tcg_gen_add_i64(dest, dest, flag);
403 tcg_temp_free_i64(flag);
405 if (!sf) {
406 tcg_gen_ext32u_i64(dest, dest);
410 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
411 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
413 if (sf) {
414 TCGv_i64 result, cf_64, vf_64, tmp;
415 result = tcg_temp_new_i64();
416 cf_64 = tcg_temp_new_i64();
417 vf_64 = tcg_temp_new_i64();
418 tmp = tcg_const_i64(0);
420 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
421 tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
422 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
423 tcg_gen_trunc_i64_i32(cpu_CF, cf_64);
424 gen_set_NZ64(result);
426 tcg_gen_xor_i64(vf_64, result, t0);
427 tcg_gen_xor_i64(tmp, t0, t1);
428 tcg_gen_andc_i64(vf_64, vf_64, tmp);
429 tcg_gen_shri_i64(vf_64, vf_64, 32);
430 tcg_gen_trunc_i64_i32(cpu_VF, vf_64);
432 tcg_gen_mov_i64(dest, result);
434 tcg_temp_free_i64(tmp);
435 tcg_temp_free_i64(vf_64);
436 tcg_temp_free_i64(cf_64);
437 tcg_temp_free_i64(result);
438 } else {
439 TCGv_i32 t0_32, t1_32, tmp;
440 t0_32 = tcg_temp_new_i32();
441 t1_32 = tcg_temp_new_i32();
442 tmp = tcg_const_i32(0);
444 tcg_gen_trunc_i64_i32(t0_32, t0);
445 tcg_gen_trunc_i64_i32(t1_32, t1);
446 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
447 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
449 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
450 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
451 tcg_gen_xor_i32(tmp, t0_32, t1_32);
452 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
453 tcg_gen_extu_i32_i64(dest, cpu_NF);
455 tcg_temp_free_i32(tmp);
456 tcg_temp_free_i32(t1_32);
457 tcg_temp_free_i32(t0_32);
462 * Load/Store generators
466 * Store from GPR register to memory
468 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
469 TCGv_i64 tcg_addr, int size)
471 g_assert(size <= 3);
472 tcg_gen_qemu_st_i64(source, tcg_addr, get_mem_index(s), MO_TE + size);
476 * Load from memory to GPR register
478 static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
479 int size, bool is_signed, bool extend)
481 TCGMemOp memop = MO_TE + size;
483 g_assert(size <= 3);
485 if (is_signed) {
486 memop += MO_SIGN;
489 tcg_gen_qemu_ld_i64(dest, tcg_addr, get_mem_index(s), memop);
491 if (extend && is_signed) {
492 g_assert(size < 3);
493 tcg_gen_ext32u_i64(dest, dest);
498 * Store from FP register to memory
500 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
502 /* This writes the bottom N bits of a 128 bit wide vector to memory */
503 int freg_offs = offsetof(CPUARMState, vfp.regs[srcidx * 2]);
504 TCGv_i64 tmp = tcg_temp_new_i64();
506 if (size < 4) {
507 switch (size) {
508 case 0:
509 tcg_gen_ld8u_i64(tmp, cpu_env, freg_offs);
510 break;
511 case 1:
512 tcg_gen_ld16u_i64(tmp, cpu_env, freg_offs);
513 break;
514 case 2:
515 tcg_gen_ld32u_i64(tmp, cpu_env, freg_offs);
516 break;
517 case 3:
518 tcg_gen_ld_i64(tmp, cpu_env, freg_offs);
519 break;
521 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TE + size);
522 } else {
523 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
524 tcg_gen_ld_i64(tmp, cpu_env, freg_offs);
525 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TEQ);
526 tcg_gen_qemu_st64(tmp, tcg_addr, get_mem_index(s));
527 tcg_gen_ld_i64(tmp, cpu_env, freg_offs + sizeof(float64));
528 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
529 tcg_gen_qemu_st_i64(tmp, tcg_hiaddr, get_mem_index(s), MO_TEQ);
530 tcg_temp_free_i64(tcg_hiaddr);
533 tcg_temp_free_i64(tmp);
537 * Load from memory to FP register
539 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
541 /* This always zero-extends and writes to a full 128 bit wide vector */
542 int freg_offs = offsetof(CPUARMState, vfp.regs[destidx * 2]);
543 TCGv_i64 tmplo = tcg_temp_new_i64();
544 TCGv_i64 tmphi;
546 if (size < 4) {
547 TCGMemOp memop = MO_TE + size;
548 tmphi = tcg_const_i64(0);
549 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
550 } else {
551 TCGv_i64 tcg_hiaddr;
552 tmphi = tcg_temp_new_i64();
553 tcg_hiaddr = tcg_temp_new_i64();
555 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), MO_TEQ);
556 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
557 tcg_gen_qemu_ld_i64(tmphi, tcg_hiaddr, get_mem_index(s), MO_TEQ);
558 tcg_temp_free_i64(tcg_hiaddr);
561 tcg_gen_st_i64(tmplo, cpu_env, freg_offs);
562 tcg_gen_st_i64(tmphi, cpu_env, freg_offs + sizeof(float64));
564 tcg_temp_free_i64(tmplo);
565 tcg_temp_free_i64(tmphi);
569 * This utility function is for doing register extension with an
570 * optional shift. You will likely want to pass a temporary for the
571 * destination register. See DecodeRegExtend() in the ARM ARM.
573 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
574 int option, unsigned int shift)
576 int extsize = extract32(option, 0, 2);
577 bool is_signed = extract32(option, 2, 1);
579 if (is_signed) {
580 switch (extsize) {
581 case 0:
582 tcg_gen_ext8s_i64(tcg_out, tcg_in);
583 break;
584 case 1:
585 tcg_gen_ext16s_i64(tcg_out, tcg_in);
586 break;
587 case 2:
588 tcg_gen_ext32s_i64(tcg_out, tcg_in);
589 break;
590 case 3:
591 tcg_gen_mov_i64(tcg_out, tcg_in);
592 break;
594 } else {
595 switch (extsize) {
596 case 0:
597 tcg_gen_ext8u_i64(tcg_out, tcg_in);
598 break;
599 case 1:
600 tcg_gen_ext16u_i64(tcg_out, tcg_in);
601 break;
602 case 2:
603 tcg_gen_ext32u_i64(tcg_out, tcg_in);
604 break;
605 case 3:
606 tcg_gen_mov_i64(tcg_out, tcg_in);
607 break;
611 if (shift) {
612 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
616 static inline void gen_check_sp_alignment(DisasContext *s)
618 /* The AArch64 architecture mandates that (if enabled via PSTATE
619 * or SCTLR bits) there is a check that SP is 16-aligned on every
620 * SP-relative load or store (with an exception generated if it is not).
621 * In line with general QEMU practice regarding misaligned accesses,
622 * we omit these checks for the sake of guest program performance.
623 * This function is provided as a hook so we can more easily add these
624 * checks in future (possibly as a "favour catching guest program bugs
625 * over speed" user selectable option).
630 * the instruction disassembly implemented here matches
631 * the instruction encoding classifications in chapter 3 (C3)
632 * of the ARM Architecture Reference Manual (DDI0487A_a)
635 /* C3.2.7 Unconditional branch (immediate)
636 * 31 30 26 25 0
637 * +----+-----------+-------------------------------------+
638 * | op | 0 0 1 0 1 | imm26 |
639 * +----+-----------+-------------------------------------+
641 static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
643 uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
645 if (insn & (1 << 31)) {
646 /* C5.6.26 BL Branch with link */
647 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
650 /* C5.6.20 B Branch / C5.6.26 BL Branch with link */
651 gen_goto_tb(s, 0, addr);
654 /* C3.2.1 Compare & branch (immediate)
655 * 31 30 25 24 23 5 4 0
656 * +----+-------------+----+---------------------+--------+
657 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
658 * +----+-------------+----+---------------------+--------+
660 static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
662 unsigned int sf, op, rt;
663 uint64_t addr;
664 int label_match;
665 TCGv_i64 tcg_cmp;
667 sf = extract32(insn, 31, 1);
668 op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
669 rt = extract32(insn, 0, 5);
670 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
672 tcg_cmp = read_cpu_reg(s, rt, sf);
673 label_match = gen_new_label();
675 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
676 tcg_cmp, 0, label_match);
678 gen_goto_tb(s, 0, s->pc);
679 gen_set_label(label_match);
680 gen_goto_tb(s, 1, addr);
683 /* C3.2.5 Test & branch (immediate)
684 * 31 30 25 24 23 19 18 5 4 0
685 * +----+-------------+----+-------+-------------+------+
686 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
687 * +----+-------------+----+-------+-------------+------+
689 static void disas_test_b_imm(DisasContext *s, uint32_t insn)
691 unsigned int bit_pos, op, rt;
692 uint64_t addr;
693 int label_match;
694 TCGv_i64 tcg_cmp;
696 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
697 op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
698 addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
699 rt = extract32(insn, 0, 5);
701 tcg_cmp = tcg_temp_new_i64();
702 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
703 label_match = gen_new_label();
704 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
705 tcg_cmp, 0, label_match);
706 tcg_temp_free_i64(tcg_cmp);
707 gen_goto_tb(s, 0, s->pc);
708 gen_set_label(label_match);
709 gen_goto_tb(s, 1, addr);
712 /* C3.2.2 / C5.6.19 Conditional branch (immediate)
713 * 31 25 24 23 5 4 3 0
714 * +---------------+----+---------------------+----+------+
715 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
716 * +---------------+----+---------------------+----+------+
718 static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
720 unsigned int cond;
721 uint64_t addr;
723 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
724 unallocated_encoding(s);
725 return;
727 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
728 cond = extract32(insn, 0, 4);
730 if (cond < 0x0e) {
731 /* genuinely conditional branches */
732 int label_match = gen_new_label();
733 arm_gen_test_cc(cond, label_match);
734 gen_goto_tb(s, 0, s->pc);
735 gen_set_label(label_match);
736 gen_goto_tb(s, 1, addr);
737 } else {
738 /* 0xe and 0xf are both "always" conditions */
739 gen_goto_tb(s, 0, addr);
743 /* C5.6.68 HINT */
744 static void handle_hint(DisasContext *s, uint32_t insn,
745 unsigned int op1, unsigned int op2, unsigned int crm)
747 unsigned int selector = crm << 3 | op2;
749 if (op1 != 3) {
750 unallocated_encoding(s);
751 return;
754 switch (selector) {
755 case 0: /* NOP */
756 return;
757 case 1: /* YIELD */
758 case 2: /* WFE */
759 case 3: /* WFI */
760 case 4: /* SEV */
761 case 5: /* SEVL */
762 /* we treat all as NOP at least for now */
763 return;
764 default:
765 /* default specified as NOP equivalent */
766 return;
770 /* CLREX, DSB, DMB, ISB */
771 static void handle_sync(DisasContext *s, uint32_t insn,
772 unsigned int op1, unsigned int op2, unsigned int crm)
774 if (op1 != 3) {
775 unallocated_encoding(s);
776 return;
779 switch (op2) {
780 case 2: /* CLREX */
781 unsupported_encoding(s, insn);
782 return;
783 case 4: /* DSB */
784 case 5: /* DMB */
785 case 6: /* ISB */
786 /* We don't emulate caches so barriers are no-ops */
787 return;
788 default:
789 unallocated_encoding(s);
790 return;
794 /* C5.6.130 MSR (immediate) - move immediate to processor state field */
795 static void handle_msr_i(DisasContext *s, uint32_t insn,
796 unsigned int op1, unsigned int op2, unsigned int crm)
798 unsupported_encoding(s, insn);
801 static void gen_get_nzcv(TCGv_i64 tcg_rt)
803 TCGv_i32 tmp = tcg_temp_new_i32();
804 TCGv_i32 nzcv = tcg_temp_new_i32();
806 /* build bit 31, N */
807 tcg_gen_andi_i32(nzcv, cpu_NF, (1 << 31));
808 /* build bit 30, Z */
809 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
810 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
811 /* build bit 29, C */
812 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
813 /* build bit 28, V */
814 tcg_gen_shri_i32(tmp, cpu_VF, 31);
815 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
816 /* generate result */
817 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
819 tcg_temp_free_i32(nzcv);
820 tcg_temp_free_i32(tmp);
823 static void gen_set_nzcv(TCGv_i64 tcg_rt)
826 TCGv_i32 nzcv = tcg_temp_new_i32();
828 /* take NZCV from R[t] */
829 tcg_gen_trunc_i64_i32(nzcv, tcg_rt);
831 /* bit 31, N */
832 tcg_gen_andi_i32(cpu_NF, nzcv, (1 << 31));
833 /* bit 30, Z */
834 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
835 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
836 /* bit 29, C */
837 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
838 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
839 /* bit 28, V */
840 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
841 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
842 tcg_temp_free_i32(nzcv);
845 /* C5.6.129 MRS - move from system register
846 * C5.6.131 MSR (register) - move to system register
847 * C5.6.204 SYS
848 * C5.6.205 SYSL
849 * These are all essentially the same insn in 'read' and 'write'
850 * versions, with varying op0 fields.
852 static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
853 unsigned int op0, unsigned int op1, unsigned int op2,
854 unsigned int crn, unsigned int crm, unsigned int rt)
856 const ARMCPRegInfo *ri;
857 TCGv_i64 tcg_rt;
859 ri = get_arm_cp_reginfo(s->cp_regs,
860 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
861 crn, crm, op0, op1, op2));
863 if (!ri) {
864 /* Unknown register */
865 unallocated_encoding(s);
866 return;
869 /* Check access permissions */
870 if (!cp_access_ok(s->current_pl, ri, isread)) {
871 unallocated_encoding(s);
872 return;
875 /* Handle special cases first */
876 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
877 case ARM_CP_NOP:
878 return;
879 case ARM_CP_NZCV:
880 tcg_rt = cpu_reg(s, rt);
881 if (isread) {
882 gen_get_nzcv(tcg_rt);
883 } else {
884 gen_set_nzcv(tcg_rt);
886 return;
887 default:
888 break;
891 if (use_icount && (ri->type & ARM_CP_IO)) {
892 gen_io_start();
895 tcg_rt = cpu_reg(s, rt);
897 if (isread) {
898 if (ri->type & ARM_CP_CONST) {
899 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
900 } else if (ri->readfn) {
901 TCGv_ptr tmpptr;
902 gen_a64_set_pc_im(s->pc - 4);
903 tmpptr = tcg_const_ptr(ri);
904 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
905 tcg_temp_free_ptr(tmpptr);
906 } else {
907 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
909 } else {
910 if (ri->type & ARM_CP_CONST) {
911 /* If not forbidden by access permissions, treat as WI */
912 return;
913 } else if (ri->writefn) {
914 TCGv_ptr tmpptr;
915 gen_a64_set_pc_im(s->pc - 4);
916 tmpptr = tcg_const_ptr(ri);
917 gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
918 tcg_temp_free_ptr(tmpptr);
919 } else {
920 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
924 if (use_icount && (ri->type & ARM_CP_IO)) {
925 /* I/O operations must end the TB here (whether read or write) */
926 gen_io_end();
927 s->is_jmp = DISAS_UPDATE;
928 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
929 /* We default to ending the TB on a coprocessor register write,
930 * but allow this to be suppressed by the register definition
931 * (usually only necessary to work around guest bugs).
933 s->is_jmp = DISAS_UPDATE;
937 /* C3.2.4 System
938 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
939 * +---------------------+---+-----+-----+-------+-------+-----+------+
940 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
941 * +---------------------+---+-----+-----+-------+-------+-----+------+
943 static void disas_system(DisasContext *s, uint32_t insn)
945 unsigned int l, op0, op1, crn, crm, op2, rt;
946 l = extract32(insn, 21, 1);
947 op0 = extract32(insn, 19, 2);
948 op1 = extract32(insn, 16, 3);
949 crn = extract32(insn, 12, 4);
950 crm = extract32(insn, 8, 4);
951 op2 = extract32(insn, 5, 3);
952 rt = extract32(insn, 0, 5);
954 if (op0 == 0) {
955 if (l || rt != 31) {
956 unallocated_encoding(s);
957 return;
959 switch (crn) {
960 case 2: /* C5.6.68 HINT */
961 handle_hint(s, insn, op1, op2, crm);
962 break;
963 case 3: /* CLREX, DSB, DMB, ISB */
964 handle_sync(s, insn, op1, op2, crm);
965 break;
966 case 4: /* C5.6.130 MSR (immediate) */
967 handle_msr_i(s, insn, op1, op2, crm);
968 break;
969 default:
970 unallocated_encoding(s);
971 break;
973 return;
975 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
978 /* C3.2.3 Exception generation
980 * 31 24 23 21 20 5 4 2 1 0
981 * +-----------------+-----+------------------------+-----+----+
982 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
983 * +-----------------------+------------------------+----------+
985 static void disas_exc(DisasContext *s, uint32_t insn)
987 int opc = extract32(insn, 21, 3);
988 int op2_ll = extract32(insn, 0, 5);
990 switch (opc) {
991 case 0:
992 /* SVC, HVC, SMC; since we don't support the Virtualization
993 * or TrustZone extensions these all UNDEF except SVC.
995 if (op2_ll != 1) {
996 unallocated_encoding(s);
997 break;
999 gen_exception_insn(s, 0, EXCP_SWI);
1000 break;
1001 case 1:
1002 if (op2_ll != 0) {
1003 unallocated_encoding(s);
1004 break;
1006 /* BRK */
1007 gen_exception_insn(s, 0, EXCP_BKPT);
1008 break;
1009 case 2:
1010 if (op2_ll != 0) {
1011 unallocated_encoding(s);
1012 break;
1014 /* HLT */
1015 unsupported_encoding(s, insn);
1016 break;
1017 case 5:
1018 if (op2_ll < 1 || op2_ll > 3) {
1019 unallocated_encoding(s);
1020 break;
1022 /* DCPS1, DCPS2, DCPS3 */
1023 unsupported_encoding(s, insn);
1024 break;
1025 default:
1026 unallocated_encoding(s);
1027 break;
1031 /* C3.2.7 Unconditional branch (register)
1032 * 31 25 24 21 20 16 15 10 9 5 4 0
1033 * +---------------+-------+-------+-------+------+-------+
1034 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
1035 * +---------------+-------+-------+-------+------+-------+
1037 static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
1039 unsigned int opc, op2, op3, rn, op4;
1041 opc = extract32(insn, 21, 4);
1042 op2 = extract32(insn, 16, 5);
1043 op3 = extract32(insn, 10, 6);
1044 rn = extract32(insn, 5, 5);
1045 op4 = extract32(insn, 0, 5);
1047 if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
1048 unallocated_encoding(s);
1049 return;
1052 switch (opc) {
1053 case 0: /* BR */
1054 case 2: /* RET */
1055 break;
1056 case 1: /* BLR */
1057 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1058 break;
1059 case 4: /* ERET */
1060 case 5: /* DRPS */
1061 if (rn != 0x1f) {
1062 unallocated_encoding(s);
1063 } else {
1064 unsupported_encoding(s, insn);
1066 return;
1067 default:
1068 unallocated_encoding(s);
1069 return;
1072 tcg_gen_mov_i64(cpu_pc, cpu_reg(s, rn));
1073 s->is_jmp = DISAS_JUMP;
1076 /* C3.2 Branches, exception generating and system instructions */
1077 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
1079 switch (extract32(insn, 25, 7)) {
1080 case 0x0a: case 0x0b:
1081 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
1082 disas_uncond_b_imm(s, insn);
1083 break;
1084 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
1085 disas_comp_b_imm(s, insn);
1086 break;
1087 case 0x1b: case 0x5b: /* Test & branch (immediate) */
1088 disas_test_b_imm(s, insn);
1089 break;
1090 case 0x2a: /* Conditional branch (immediate) */
1091 disas_cond_b_imm(s, insn);
1092 break;
1093 case 0x6a: /* Exception generation / System */
1094 if (insn & (1 << 24)) {
1095 disas_system(s, insn);
1096 } else {
1097 disas_exc(s, insn);
1099 break;
1100 case 0x6b: /* Unconditional branch (register) */
1101 disas_uncond_b_reg(s, insn);
1102 break;
1103 default:
1104 unallocated_encoding(s);
1105 break;
1109 /* Load/store exclusive */
1110 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
1112 unsupported_encoding(s, insn);
1115 /* Load register (literal) */
1116 static void disas_ld_lit(DisasContext *s, uint32_t insn)
1118 unsupported_encoding(s, insn);
1122 * C5.6.80 LDNP (Load Pair - non-temporal hint)
1123 * C5.6.81 LDP (Load Pair - non vector)
1124 * C5.6.82 LDPSW (Load Pair Signed Word - non vector)
1125 * C5.6.176 STNP (Store Pair - non-temporal hint)
1126 * C5.6.177 STP (Store Pair - non vector)
1127 * C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint)
1128 * C6.3.165 LDP (Load Pair of SIMD&FP)
1129 * C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint)
1130 * C6.3.284 STP (Store Pair of SIMD&FP)
1132 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
1133 * +-----+-------+---+---+-------+---+-----------------------------+
1134 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
1135 * +-----+-------+---+---+-------+---+-------+-------+------+------+
1137 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
1138 * LDPSW 01
1139 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
1140 * V: 0 -> GPR, 1 -> Vector
1141 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
1142 * 10 -> signed offset, 11 -> pre-index
1143 * L: 0 -> Store 1 -> Load
1145 * Rt, Rt2 = GPR or SIMD registers to be stored
1146 * Rn = general purpose register containing address
1147 * imm7 = signed offset (multiple of 4 or 8 depending on size)
1149 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
1151 int rt = extract32(insn, 0, 5);
1152 int rn = extract32(insn, 5, 5);
1153 int rt2 = extract32(insn, 10, 5);
1154 int64_t offset = sextract32(insn, 15, 7);
1155 int index = extract32(insn, 23, 2);
1156 bool is_vector = extract32(insn, 26, 1);
1157 bool is_load = extract32(insn, 22, 1);
1158 int opc = extract32(insn, 30, 2);
1160 bool is_signed = false;
1161 bool postindex = false;
1162 bool wback = false;
1164 TCGv_i64 tcg_addr; /* calculated address */
1165 int size;
1167 if (opc == 3) {
1168 unallocated_encoding(s);
1169 return;
1172 if (is_vector) {
1173 size = 2 + opc;
1174 } else {
1175 size = 2 + extract32(opc, 1, 1);
1176 is_signed = extract32(opc, 0, 1);
1177 if (!is_load && is_signed) {
1178 unallocated_encoding(s);
1179 return;
1183 switch (index) {
1184 case 1: /* post-index */
1185 postindex = true;
1186 wback = true;
1187 break;
1188 case 0:
1189 /* signed offset with "non-temporal" hint. Since we don't emulate
1190 * caches we don't care about hints to the cache system about
1191 * data access patterns, and handle this identically to plain
1192 * signed offset.
1194 if (is_signed) {
1195 /* There is no non-temporal-hint version of LDPSW */
1196 unallocated_encoding(s);
1197 return;
1199 postindex = false;
1200 break;
1201 case 2: /* signed offset, rn not updated */
1202 postindex = false;
1203 break;
1204 case 3: /* pre-index */
1205 postindex = false;
1206 wback = true;
1207 break;
1210 offset <<= size;
1212 if (rn == 31) {
1213 gen_check_sp_alignment(s);
1216 tcg_addr = read_cpu_reg_sp(s, rn, 1);
1218 if (!postindex) {
1219 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
1222 if (is_vector) {
1223 if (is_load) {
1224 do_fp_ld(s, rt, tcg_addr, size);
1225 } else {
1226 do_fp_st(s, rt, tcg_addr, size);
1228 } else {
1229 TCGv_i64 tcg_rt = cpu_reg(s, rt);
1230 if (is_load) {
1231 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false);
1232 } else {
1233 do_gpr_st(s, tcg_rt, tcg_addr, size);
1236 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
1237 if (is_vector) {
1238 if (is_load) {
1239 do_fp_ld(s, rt2, tcg_addr, size);
1240 } else {
1241 do_fp_st(s, rt2, tcg_addr, size);
1243 } else {
1244 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
1245 if (is_load) {
1246 do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false);
1247 } else {
1248 do_gpr_st(s, tcg_rt2, tcg_addr, size);
1252 if (wback) {
1253 if (postindex) {
1254 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
1255 } else {
1256 tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
1258 tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
1263 * C3.3.8 Load/store (immediate post-indexed)
1264 * C3.3.9 Load/store (immediate pre-indexed)
1265 * C3.3.12 Load/store (unscaled immediate)
1267 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
1268 * +----+-------+---+-----+-----+---+--------+-----+------+------+
1269 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
1270 * +----+-------+---+-----+-----+---+--------+-----+------+------+
1272 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
1273 * V = 0 -> non-vector
1274 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
1275 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1277 static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
1279 int rt = extract32(insn, 0, 5);
1280 int rn = extract32(insn, 5, 5);
1281 int imm9 = sextract32(insn, 12, 9);
1282 int opc = extract32(insn, 22, 2);
1283 int size = extract32(insn, 30, 2);
1284 int idx = extract32(insn, 10, 2);
1285 bool is_signed = false;
1286 bool is_store = false;
1287 bool is_extended = false;
1288 bool is_vector = extract32(insn, 26, 1);
1289 bool post_index;
1290 bool writeback;
1292 TCGv_i64 tcg_addr;
1294 if (is_vector) {
1295 size |= (opc & 2) << 1;
1296 if (size > 4) {
1297 unallocated_encoding(s);
1298 return;
1300 is_store = ((opc & 1) == 0);
1301 } else {
1302 if (size == 3 && opc == 2) {
1303 /* PRFM - prefetch */
1304 return;
1306 if (opc == 3 && size > 1) {
1307 unallocated_encoding(s);
1308 return;
1310 is_store = (opc == 0);
1311 is_signed = opc & (1<<1);
1312 is_extended = (size < 3) && (opc & 1);
1315 switch (idx) {
1316 case 0:
1317 post_index = false;
1318 writeback = false;
1319 break;
1320 case 1:
1321 post_index = true;
1322 writeback = true;
1323 break;
1324 case 3:
1325 post_index = false;
1326 writeback = true;
1327 break;
1328 case 2:
1329 g_assert(false);
1330 break;
1333 if (rn == 31) {
1334 gen_check_sp_alignment(s);
1336 tcg_addr = read_cpu_reg_sp(s, rn, 1);
1338 if (!post_index) {
1339 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
1342 if (is_vector) {
1343 if (is_store) {
1344 do_fp_st(s, rt, tcg_addr, size);
1345 } else {
1346 do_fp_ld(s, rt, tcg_addr, size);
1348 } else {
1349 TCGv_i64 tcg_rt = cpu_reg(s, rt);
1350 if (is_store) {
1351 do_gpr_st(s, tcg_rt, tcg_addr, size);
1352 } else {
1353 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1357 if (writeback) {
1358 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
1359 if (post_index) {
1360 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
1362 tcg_gen_mov_i64(tcg_rn, tcg_addr);
1367 * C3.3.10 Load/store (register offset)
1369 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
1370 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
1371 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
1372 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
1374 * For non-vector:
1375 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
1376 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1377 * For vector:
1378 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
1379 * opc<0>: 0 -> store, 1 -> load
1380 * V: 1 -> vector/simd
1381 * opt: extend encoding (see DecodeRegExtend)
1382 * S: if S=1 then scale (essentially index by sizeof(size))
1383 * Rt: register to transfer into/out of
1384 * Rn: address register or SP for base
1385 * Rm: offset register or ZR for offset
1387 static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn)
1389 int rt = extract32(insn, 0, 5);
1390 int rn = extract32(insn, 5, 5);
1391 int shift = extract32(insn, 12, 1);
1392 int rm = extract32(insn, 16, 5);
1393 int opc = extract32(insn, 22, 2);
1394 int opt = extract32(insn, 13, 3);
1395 int size = extract32(insn, 30, 2);
1396 bool is_signed = false;
1397 bool is_store = false;
1398 bool is_extended = false;
1399 bool is_vector = extract32(insn, 26, 1);
1401 TCGv_i64 tcg_rm;
1402 TCGv_i64 tcg_addr;
1404 if (extract32(opt, 1, 1) == 0) {
1405 unallocated_encoding(s);
1406 return;
1409 if (is_vector) {
1410 size |= (opc & 2) << 1;
1411 if (size > 4) {
1412 unallocated_encoding(s);
1413 return;
1415 is_store = !extract32(opc, 0, 1);
1416 } else {
1417 if (size == 3 && opc == 2) {
1418 /* PRFM - prefetch */
1419 return;
1421 if (opc == 3 && size > 1) {
1422 unallocated_encoding(s);
1423 return;
1425 is_store = (opc == 0);
1426 is_signed = extract32(opc, 1, 1);
1427 is_extended = (size < 3) && extract32(opc, 0, 1);
1430 if (rn == 31) {
1431 gen_check_sp_alignment(s);
1433 tcg_addr = read_cpu_reg_sp(s, rn, 1);
1435 tcg_rm = read_cpu_reg(s, rm, 1);
1436 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
1438 tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
1440 if (is_vector) {
1441 if (is_store) {
1442 do_fp_st(s, rt, tcg_addr, size);
1443 } else {
1444 do_fp_ld(s, rt, tcg_addr, size);
1446 } else {
1447 TCGv_i64 tcg_rt = cpu_reg(s, rt);
1448 if (is_store) {
1449 do_gpr_st(s, tcg_rt, tcg_addr, size);
1450 } else {
1451 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1457 * C3.3.13 Load/store (unsigned immediate)
1459 * 31 30 29 27 26 25 24 23 22 21 10 9 5
1460 * +----+-------+---+-----+-----+------------+-------+------+
1461 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
1462 * +----+-------+---+-----+-----+------------+-------+------+
1464 * For non-vector:
1465 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
1466 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1467 * For vector:
1468 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
1469 * opc<0>: 0 -> store, 1 -> load
1470 * Rn: base address register (inc SP)
1471 * Rt: target register
1473 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
1475 int rt = extract32(insn, 0, 5);
1476 int rn = extract32(insn, 5, 5);
1477 unsigned int imm12 = extract32(insn, 10, 12);
1478 bool is_vector = extract32(insn, 26, 1);
1479 int size = extract32(insn, 30, 2);
1480 int opc = extract32(insn, 22, 2);
1481 unsigned int offset;
1483 TCGv_i64 tcg_addr;
1485 bool is_store;
1486 bool is_signed = false;
1487 bool is_extended = false;
1489 if (is_vector) {
1490 size |= (opc & 2) << 1;
1491 if (size > 4) {
1492 unallocated_encoding(s);
1493 return;
1495 is_store = !extract32(opc, 0, 1);
1496 } else {
1497 if (size == 3 && opc == 2) {
1498 /* PRFM - prefetch */
1499 return;
1501 if (opc == 3 && size > 1) {
1502 unallocated_encoding(s);
1503 return;
1505 is_store = (opc == 0);
1506 is_signed = extract32(opc, 1, 1);
1507 is_extended = (size < 3) && extract32(opc, 0, 1);
1510 if (rn == 31) {
1511 gen_check_sp_alignment(s);
1513 tcg_addr = read_cpu_reg_sp(s, rn, 1);
1514 offset = imm12 << size;
1515 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
1517 if (is_vector) {
1518 if (is_store) {
1519 do_fp_st(s, rt, tcg_addr, size);
1520 } else {
1521 do_fp_ld(s, rt, tcg_addr, size);
1523 } else {
1524 TCGv_i64 tcg_rt = cpu_reg(s, rt);
1525 if (is_store) {
1526 do_gpr_st(s, tcg_rt, tcg_addr, size);
1527 } else {
1528 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1533 /* Load/store register (immediate forms) */
1534 static void disas_ldst_reg_imm(DisasContext *s, uint32_t insn)
1536 switch (extract32(insn, 10, 2)) {
1537 case 0: case 1: case 3:
1538 /* Load/store register (unscaled immediate) */
1539 /* Load/store immediate pre/post-indexed */
1540 disas_ldst_reg_imm9(s, insn);
1541 break;
1542 case 2:
1543 /* Load/store register unprivileged */
1544 unsupported_encoding(s, insn);
1545 break;
1546 default:
1547 unallocated_encoding(s);
1548 break;
1552 /* Load/store register (all forms) */
1553 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
1555 switch (extract32(insn, 24, 2)) {
1556 case 0:
1557 if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
1558 disas_ldst_reg_roffset(s, insn);
1559 } else {
1560 disas_ldst_reg_imm(s, insn);
1562 break;
1563 case 1:
1564 disas_ldst_reg_unsigned_imm(s, insn);
1565 break;
1566 default:
1567 unallocated_encoding(s);
1568 break;
1572 /* AdvSIMD load/store multiple structures */
1573 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
1575 unsupported_encoding(s, insn);
1578 /* AdvSIMD load/store single structure */
1579 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
1581 unsupported_encoding(s, insn);
1584 /* C3.3 Loads and stores */
1585 static void disas_ldst(DisasContext *s, uint32_t insn)
1587 switch (extract32(insn, 24, 6)) {
1588 case 0x08: /* Load/store exclusive */
1589 disas_ldst_excl(s, insn);
1590 break;
1591 case 0x18: case 0x1c: /* Load register (literal) */
1592 disas_ld_lit(s, insn);
1593 break;
1594 case 0x28: case 0x29:
1595 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
1596 disas_ldst_pair(s, insn);
1597 break;
1598 case 0x38: case 0x39:
1599 case 0x3c: case 0x3d: /* Load/store register (all forms) */
1600 disas_ldst_reg(s, insn);
1601 break;
1602 case 0x0c: /* AdvSIMD load/store multiple structures */
1603 disas_ldst_multiple_struct(s, insn);
1604 break;
1605 case 0x0d: /* AdvSIMD load/store single structure */
1606 disas_ldst_single_struct(s, insn);
1607 break;
1608 default:
1609 unallocated_encoding(s);
1610 break;
1614 /* C3.4.6 PC-rel. addressing
1615 * 31 30 29 28 24 23 5 4 0
1616 * +----+-------+-----------+-------------------+------+
1617 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
1618 * +----+-------+-----------+-------------------+------+
1620 static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
1622 unsigned int page, rd;
1623 uint64_t base;
1624 int64_t offset;
1626 page = extract32(insn, 31, 1);
1627 /* SignExtend(immhi:immlo) -> offset */
1628 offset = ((int64_t)sextract32(insn, 5, 19) << 2) | extract32(insn, 29, 2);
1629 rd = extract32(insn, 0, 5);
1630 base = s->pc - 4;
1632 if (page) {
1633 /* ADRP (page based) */
1634 base &= ~0xfff;
1635 offset <<= 12;
1638 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
1642 * C3.4.1 Add/subtract (immediate)
1644 * 31 30 29 28 24 23 22 21 10 9 5 4 0
1645 * +--+--+--+-----------+-----+-------------+-----+-----+
1646 * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
1647 * +--+--+--+-----------+-----+-------------+-----+-----+
1649 * sf: 0 -> 32bit, 1 -> 64bit
1650 * op: 0 -> add , 1 -> sub
1651 * S: 1 -> set flags
1652 * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
1654 static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
1656 int rd = extract32(insn, 0, 5);
1657 int rn = extract32(insn, 5, 5);
1658 uint64_t imm = extract32(insn, 10, 12);
1659 int shift = extract32(insn, 22, 2);
1660 bool setflags = extract32(insn, 29, 1);
1661 bool sub_op = extract32(insn, 30, 1);
1662 bool is_64bit = extract32(insn, 31, 1);
1664 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
1665 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
1666 TCGv_i64 tcg_result;
1668 switch (shift) {
1669 case 0x0:
1670 break;
1671 case 0x1:
1672 imm <<= 12;
1673 break;
1674 default:
1675 unallocated_encoding(s);
1676 return;
1679 tcg_result = tcg_temp_new_i64();
1680 if (!setflags) {
1681 if (sub_op) {
1682 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
1683 } else {
1684 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
1686 } else {
1687 TCGv_i64 tcg_imm = tcg_const_i64(imm);
1688 if (sub_op) {
1689 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
1690 } else {
1691 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
1693 tcg_temp_free_i64(tcg_imm);
1696 if (is_64bit) {
1697 tcg_gen_mov_i64(tcg_rd, tcg_result);
1698 } else {
1699 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
1702 tcg_temp_free_i64(tcg_result);
1705 /* The input should be a value in the bottom e bits (with higher
1706 * bits zero); returns that value replicated into every element
1707 * of size e in a 64 bit integer.
1709 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
1711 assert(e != 0);
1712 while (e < 64) {
1713 mask |= mask << e;
1714 e *= 2;
1716 return mask;
1719 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
1720 static inline uint64_t bitmask64(unsigned int length)
1722 assert(length > 0 && length <= 64);
1723 return ~0ULL >> (64 - length);
1726 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
1727 * only require the wmask. Returns false if the imms/immr/immn are a reserved
1728 * value (ie should cause a guest UNDEF exception), and true if they are
1729 * valid, in which case the decoded bit pattern is written to result.
1731 static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
1732 unsigned int imms, unsigned int immr)
1734 uint64_t mask;
1735 unsigned e, levels, s, r;
1736 int len;
1738 assert(immn < 2 && imms < 64 && immr < 64);
1740 /* The bit patterns we create here are 64 bit patterns which
1741 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
1742 * 64 bits each. Each element contains the same value: a run
1743 * of between 1 and e-1 non-zero bits, rotated within the
1744 * element by between 0 and e-1 bits.
1746 * The element size and run length are encoded into immn (1 bit)
1747 * and imms (6 bits) as follows:
1748 * 64 bit elements: immn = 1, imms = <length of run - 1>
1749 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
1750 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
1751 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
1752 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
1753 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
1754 * Notice that immn = 0, imms = 11111x is the only combination
1755 * not covered by one of the above options; this is reserved.
1756 * Further, <length of run - 1> all-ones is a reserved pattern.
1758 * In all cases the rotation is by immr % e (and immr is 6 bits).
1761 /* First determine the element size */
1762 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
1763 if (len < 1) {
1764 /* This is the immn == 0, imms == 0x11111x case */
1765 return false;
1767 e = 1 << len;
1769 levels = e - 1;
1770 s = imms & levels;
1771 r = immr & levels;
1773 if (s == levels) {
1774 /* <length of run - 1> mustn't be all-ones. */
1775 return false;
1778 /* Create the value of one element: s+1 set bits rotated
1779 * by r within the element (which is e bits wide)...
1781 mask = bitmask64(s + 1);
1782 mask = (mask >> r) | (mask << (e - r));
1783 /* ...then replicate the element over the whole 64 bit value */
1784 mask = bitfield_replicate(mask, e);
1785 *result = mask;
1786 return true;
1789 /* C3.4.4 Logical (immediate)
1790 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
1791 * +----+-----+-------------+---+------+------+------+------+
1792 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
1793 * +----+-----+-------------+---+------+------+------+------+
1795 static void disas_logic_imm(DisasContext *s, uint32_t insn)
1797 unsigned int sf, opc, is_n, immr, imms, rn, rd;
1798 TCGv_i64 tcg_rd, tcg_rn;
1799 uint64_t wmask;
1800 bool is_and = false;
1802 sf = extract32(insn, 31, 1);
1803 opc = extract32(insn, 29, 2);
1804 is_n = extract32(insn, 22, 1);
1805 immr = extract32(insn, 16, 6);
1806 imms = extract32(insn, 10, 6);
1807 rn = extract32(insn, 5, 5);
1808 rd = extract32(insn, 0, 5);
1810 if (!sf && is_n) {
1811 unallocated_encoding(s);
1812 return;
1815 if (opc == 0x3) { /* ANDS */
1816 tcg_rd = cpu_reg(s, rd);
1817 } else {
1818 tcg_rd = cpu_reg_sp(s, rd);
1820 tcg_rn = cpu_reg(s, rn);
1822 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
1823 /* some immediate field values are reserved */
1824 unallocated_encoding(s);
1825 return;
1828 if (!sf) {
1829 wmask &= 0xffffffff;
1832 switch (opc) {
1833 case 0x3: /* ANDS */
1834 case 0x0: /* AND */
1835 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
1836 is_and = true;
1837 break;
1838 case 0x1: /* ORR */
1839 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
1840 break;
1841 case 0x2: /* EOR */
1842 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
1843 break;
1844 default:
1845 assert(FALSE); /* must handle all above */
1846 break;
1849 if (!sf && !is_and) {
1850 /* zero extend final result; we know we can skip this for AND
1851 * since the immediate had the high 32 bits clear.
1853 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1856 if (opc == 3) { /* ANDS */
1857 gen_logic_CC(sf, tcg_rd);
1862 * C3.4.5 Move wide (immediate)
1864 * 31 30 29 28 23 22 21 20 5 4 0
1865 * +--+-----+-------------+-----+----------------+------+
1866 * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
1867 * +--+-----+-------------+-----+----------------+------+
1869 * sf: 0 -> 32 bit, 1 -> 64 bit
1870 * opc: 00 -> N, 10 -> Z, 11 -> K
1871 * hw: shift/16 (0,16, and sf only 32, 48)
1873 static void disas_movw_imm(DisasContext *s, uint32_t insn)
1875 int rd = extract32(insn, 0, 5);
1876 uint64_t imm = extract32(insn, 5, 16);
1877 int sf = extract32(insn, 31, 1);
1878 int opc = extract32(insn, 29, 2);
1879 int pos = extract32(insn, 21, 2) << 4;
1880 TCGv_i64 tcg_rd = cpu_reg(s, rd);
1881 TCGv_i64 tcg_imm;
1883 if (!sf && (pos >= 32)) {
1884 unallocated_encoding(s);
1885 return;
1888 switch (opc) {
1889 case 0: /* MOVN */
1890 case 2: /* MOVZ */
1891 imm <<= pos;
1892 if (opc == 0) {
1893 imm = ~imm;
1895 if (!sf) {
1896 imm &= 0xffffffffu;
1898 tcg_gen_movi_i64(tcg_rd, imm);
1899 break;
1900 case 3: /* MOVK */
1901 tcg_imm = tcg_const_i64(imm);
1902 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
1903 tcg_temp_free_i64(tcg_imm);
1904 if (!sf) {
1905 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1907 break;
1908 default:
1909 unallocated_encoding(s);
1910 break;
1914 /* C3.4.2 Bitfield
1915 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
1916 * +----+-----+-------------+---+------+------+------+------+
1917 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
1918 * +----+-----+-------------+---+------+------+------+------+
1920 static void disas_bitfield(DisasContext *s, uint32_t insn)
1922 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
1923 TCGv_i64 tcg_rd, tcg_tmp;
1925 sf = extract32(insn, 31, 1);
1926 opc = extract32(insn, 29, 2);
1927 n = extract32(insn, 22, 1);
1928 ri = extract32(insn, 16, 6);
1929 si = extract32(insn, 10, 6);
1930 rn = extract32(insn, 5, 5);
1931 rd = extract32(insn, 0, 5);
1932 bitsize = sf ? 64 : 32;
1934 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
1935 unallocated_encoding(s);
1936 return;
1939 tcg_rd = cpu_reg(s, rd);
1940 tcg_tmp = read_cpu_reg(s, rn, sf);
1942 /* OPTME: probably worth recognizing common cases of ext{8,16,32}{u,s} */
1944 if (opc != 1) { /* SBFM or UBFM */
1945 tcg_gen_movi_i64(tcg_rd, 0);
1948 /* do the bit move operation */
1949 if (si >= ri) {
1950 /* Wd<s-r:0> = Wn<s:r> */
1951 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
1952 pos = 0;
1953 len = (si - ri) + 1;
1954 } else {
1955 /* Wd<32+s-r,32-r> = Wn<s:0> */
1956 pos = bitsize - ri;
1957 len = si + 1;
1960 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
1962 if (opc == 0) { /* SBFM - sign extend the destination field */
1963 tcg_gen_shli_i64(tcg_rd, tcg_rd, 64 - (pos + len));
1964 tcg_gen_sari_i64(tcg_rd, tcg_rd, 64 - (pos + len));
1967 if (!sf) { /* zero extend final result */
1968 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
1972 /* C3.4.3 Extract
1973 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
1974 * +----+------+-------------+---+----+------+--------+------+------+
1975 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
1976 * +----+------+-------------+---+----+------+--------+------+------+
1978 static void disas_extract(DisasContext *s, uint32_t insn)
1980 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
1982 sf = extract32(insn, 31, 1);
1983 n = extract32(insn, 22, 1);
1984 rm = extract32(insn, 16, 5);
1985 imm = extract32(insn, 10, 6);
1986 rn = extract32(insn, 5, 5);
1987 rd = extract32(insn, 0, 5);
1988 op21 = extract32(insn, 29, 2);
1989 op0 = extract32(insn, 21, 1);
1990 bitsize = sf ? 64 : 32;
1992 if (sf != n || op21 || op0 || imm >= bitsize) {
1993 unallocated_encoding(s);
1994 } else {
1995 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
1997 tcg_rd = cpu_reg(s, rd);
1999 if (imm) {
2000 /* OPTME: we can special case rm==rn as a rotate */
2001 tcg_rm = read_cpu_reg(s, rm, sf);
2002 tcg_rn = read_cpu_reg(s, rn, sf);
2003 tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
2004 tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
2005 tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
2006 if (!sf) {
2007 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2009 } else {
2010 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
2011 * so an extract from bit 0 is a special case.
2013 if (sf) {
2014 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
2015 } else {
2016 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
2023 /* C3.4 Data processing - immediate */
2024 static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
2026 switch (extract32(insn, 23, 6)) {
2027 case 0x20: case 0x21: /* PC-rel. addressing */
2028 disas_pc_rel_adr(s, insn);
2029 break;
2030 case 0x22: case 0x23: /* Add/subtract (immediate) */
2031 disas_add_sub_imm(s, insn);
2032 break;
2033 case 0x24: /* Logical (immediate) */
2034 disas_logic_imm(s, insn);
2035 break;
2036 case 0x25: /* Move wide (immediate) */
2037 disas_movw_imm(s, insn);
2038 break;
2039 case 0x26: /* Bitfield */
2040 disas_bitfield(s, insn);
2041 break;
2042 case 0x27: /* Extract */
2043 disas_extract(s, insn);
2044 break;
2045 default:
2046 unallocated_encoding(s);
2047 break;
2051 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
2052 * Note that it is the caller's responsibility to ensure that the
2053 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
2054 * mandated semantics for out of range shifts.
2056 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
2057 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
2059 switch (shift_type) {
2060 case A64_SHIFT_TYPE_LSL:
2061 tcg_gen_shl_i64(dst, src, shift_amount);
2062 break;
2063 case A64_SHIFT_TYPE_LSR:
2064 tcg_gen_shr_i64(dst, src, shift_amount);
2065 break;
2066 case A64_SHIFT_TYPE_ASR:
2067 if (!sf) {
2068 tcg_gen_ext32s_i64(dst, src);
2070 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
2071 break;
2072 case A64_SHIFT_TYPE_ROR:
2073 if (sf) {
2074 tcg_gen_rotr_i64(dst, src, shift_amount);
2075 } else {
2076 TCGv_i32 t0, t1;
2077 t0 = tcg_temp_new_i32();
2078 t1 = tcg_temp_new_i32();
2079 tcg_gen_trunc_i64_i32(t0, src);
2080 tcg_gen_trunc_i64_i32(t1, shift_amount);
2081 tcg_gen_rotr_i32(t0, t0, t1);
2082 tcg_gen_extu_i32_i64(dst, t0);
2083 tcg_temp_free_i32(t0);
2084 tcg_temp_free_i32(t1);
2086 break;
2087 default:
2088 assert(FALSE); /* all shift types should be handled */
2089 break;
2092 if (!sf) { /* zero extend final result */
2093 tcg_gen_ext32u_i64(dst, dst);
2097 /* Shift a TCGv src by immediate, put result in dst.
2098 * The shift amount must be in range (this should always be true as the
2099 * relevant instructions will UNDEF on bad shift immediates).
2101 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
2102 enum a64_shift_type shift_type, unsigned int shift_i)
2104 assert(shift_i < (sf ? 64 : 32));
2106 if (shift_i == 0) {
2107 tcg_gen_mov_i64(dst, src);
2108 } else {
2109 TCGv_i64 shift_const;
2111 shift_const = tcg_const_i64(shift_i);
2112 shift_reg(dst, src, sf, shift_type, shift_const);
2113 tcg_temp_free_i64(shift_const);
2117 /* C3.5.10 Logical (shifted register)
2118 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
2119 * +----+-----+-----------+-------+---+------+--------+------+------+
2120 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
2121 * +----+-----+-----------+-------+---+------+--------+------+------+
2123 static void disas_logic_reg(DisasContext *s, uint32_t insn)
2125 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
2126 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
2128 sf = extract32(insn, 31, 1);
2129 opc = extract32(insn, 29, 2);
2130 shift_type = extract32(insn, 22, 2);
2131 invert = extract32(insn, 21, 1);
2132 rm = extract32(insn, 16, 5);
2133 shift_amount = extract32(insn, 10, 6);
2134 rn = extract32(insn, 5, 5);
2135 rd = extract32(insn, 0, 5);
2137 if (!sf && (shift_amount & (1 << 5))) {
2138 unallocated_encoding(s);
2139 return;
2142 tcg_rd = cpu_reg(s, rd);
2144 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
2145 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
2146 * register-register MOV and MVN, so it is worth special casing.
2148 tcg_rm = cpu_reg(s, rm);
2149 if (invert) {
2150 tcg_gen_not_i64(tcg_rd, tcg_rm);
2151 if (!sf) {
2152 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2154 } else {
2155 if (sf) {
2156 tcg_gen_mov_i64(tcg_rd, tcg_rm);
2157 } else {
2158 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
2161 return;
2164 tcg_rm = read_cpu_reg(s, rm, sf);
2166 if (shift_amount) {
2167 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
2170 tcg_rn = cpu_reg(s, rn);
2172 switch (opc | (invert << 2)) {
2173 case 0: /* AND */
2174 case 3: /* ANDS */
2175 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
2176 break;
2177 case 1: /* ORR */
2178 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
2179 break;
2180 case 2: /* EOR */
2181 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
2182 break;
2183 case 4: /* BIC */
2184 case 7: /* BICS */
2185 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
2186 break;
2187 case 5: /* ORN */
2188 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
2189 break;
2190 case 6: /* EON */
2191 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
2192 break;
2193 default:
2194 assert(FALSE);
2195 break;
2198 if (!sf) {
2199 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2202 if (opc == 3) {
2203 gen_logic_CC(sf, tcg_rd);
2208 * C3.5.1 Add/subtract (extended register)
2210 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
2211 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
2212 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
2213 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
2215 * sf: 0 -> 32bit, 1 -> 64bit
2216 * op: 0 -> add , 1 -> sub
2217 * S: 1 -> set flags
2218 * opt: 00
2219 * option: extension type (see DecodeRegExtend)
2220 * imm3: optional shift to Rm
2222 * Rd = Rn + LSL(extend(Rm), amount)
2224 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
2226 int rd = extract32(insn, 0, 5);
2227 int rn = extract32(insn, 5, 5);
2228 int imm3 = extract32(insn, 10, 3);
2229 int option = extract32(insn, 13, 3);
2230 int rm = extract32(insn, 16, 5);
2231 bool setflags = extract32(insn, 29, 1);
2232 bool sub_op = extract32(insn, 30, 1);
2233 bool sf = extract32(insn, 31, 1);
2235 TCGv_i64 tcg_rm, tcg_rn; /* temps */
2236 TCGv_i64 tcg_rd;
2237 TCGv_i64 tcg_result;
2239 if (imm3 > 4) {
2240 unallocated_encoding(s);
2241 return;
2244 /* non-flag setting ops may use SP */
2245 if (!setflags) {
2246 tcg_rn = read_cpu_reg_sp(s, rn, sf);
2247 tcg_rd = cpu_reg_sp(s, rd);
2248 } else {
2249 tcg_rn = read_cpu_reg(s, rn, sf);
2250 tcg_rd = cpu_reg(s, rd);
2253 tcg_rm = read_cpu_reg(s, rm, sf);
2254 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
2256 tcg_result = tcg_temp_new_i64();
2258 if (!setflags) {
2259 if (sub_op) {
2260 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
2261 } else {
2262 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
2264 } else {
2265 if (sub_op) {
2266 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
2267 } else {
2268 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
2272 if (sf) {
2273 tcg_gen_mov_i64(tcg_rd, tcg_result);
2274 } else {
2275 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
2278 tcg_temp_free_i64(tcg_result);
2282 * C3.5.2 Add/subtract (shifted register)
2284 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
2285 * +--+--+--+-----------+-----+--+-------+---------+------+------+
2286 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
2287 * +--+--+--+-----------+-----+--+-------+---------+------+------+
2289 * sf: 0 -> 32bit, 1 -> 64bit
2290 * op: 0 -> add , 1 -> sub
2291 * S: 1 -> set flags
2292 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
2293 * imm6: Shift amount to apply to Rm before the add/sub
2295 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
2297 int rd = extract32(insn, 0, 5);
2298 int rn = extract32(insn, 5, 5);
2299 int imm6 = extract32(insn, 10, 6);
2300 int rm = extract32(insn, 16, 5);
2301 int shift_type = extract32(insn, 22, 2);
2302 bool setflags = extract32(insn, 29, 1);
2303 bool sub_op = extract32(insn, 30, 1);
2304 bool sf = extract32(insn, 31, 1);
2306 TCGv_i64 tcg_rd = cpu_reg(s, rd);
2307 TCGv_i64 tcg_rn, tcg_rm;
2308 TCGv_i64 tcg_result;
2310 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
2311 unallocated_encoding(s);
2312 return;
2315 tcg_rn = read_cpu_reg(s, rn, sf);
2316 tcg_rm = read_cpu_reg(s, rm, sf);
2318 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
2320 tcg_result = tcg_temp_new_i64();
2322 if (!setflags) {
2323 if (sub_op) {
2324 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
2325 } else {
2326 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
2328 } else {
2329 if (sub_op) {
2330 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
2331 } else {
2332 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
2336 if (sf) {
2337 tcg_gen_mov_i64(tcg_rd, tcg_result);
2338 } else {
2339 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
2342 tcg_temp_free_i64(tcg_result);
2345 /* C3.5.9 Data-processing (3 source)
2347 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
2348 +--+------+-----------+------+------+----+------+------+------+
2349 |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
2350 +--+------+-----------+------+------+----+------+------+------+
2353 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
2355 int rd = extract32(insn, 0, 5);
2356 int rn = extract32(insn, 5, 5);
2357 int ra = extract32(insn, 10, 5);
2358 int rm = extract32(insn, 16, 5);
2359 int op_id = (extract32(insn, 29, 3) << 4) |
2360 (extract32(insn, 21, 3) << 1) |
2361 extract32(insn, 15, 1);
2362 bool sf = extract32(insn, 31, 1);
2363 bool is_sub = extract32(op_id, 0, 1);
2364 bool is_high = extract32(op_id, 2, 1);
2365 bool is_signed = false;
2366 TCGv_i64 tcg_op1;
2367 TCGv_i64 tcg_op2;
2368 TCGv_i64 tcg_tmp;
2370 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
2371 switch (op_id) {
2372 case 0x42: /* SMADDL */
2373 case 0x43: /* SMSUBL */
2374 case 0x44: /* SMULH */
2375 is_signed = true;
2376 break;
2377 case 0x0: /* MADD (32bit) */
2378 case 0x1: /* MSUB (32bit) */
2379 case 0x40: /* MADD (64bit) */
2380 case 0x41: /* MSUB (64bit) */
2381 case 0x4a: /* UMADDL */
2382 case 0x4b: /* UMSUBL */
2383 case 0x4c: /* UMULH */
2384 break;
2385 default:
2386 unallocated_encoding(s);
2387 return;
2390 if (is_high) {
2391 TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
2392 TCGv_i64 tcg_rd = cpu_reg(s, rd);
2393 TCGv_i64 tcg_rn = cpu_reg(s, rn);
2394 TCGv_i64 tcg_rm = cpu_reg(s, rm);
2396 if (is_signed) {
2397 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
2398 } else {
2399 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
2402 tcg_temp_free_i64(low_bits);
2403 return;
2406 tcg_op1 = tcg_temp_new_i64();
2407 tcg_op2 = tcg_temp_new_i64();
2408 tcg_tmp = tcg_temp_new_i64();
2410 if (op_id < 0x42) {
2411 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
2412 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
2413 } else {
2414 if (is_signed) {
2415 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
2416 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
2417 } else {
2418 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
2419 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
2423 if (ra == 31 && !is_sub) {
2424 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
2425 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
2426 } else {
2427 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
2428 if (is_sub) {
2429 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
2430 } else {
2431 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
2435 if (!sf) {
2436 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
2439 tcg_temp_free_i64(tcg_op1);
2440 tcg_temp_free_i64(tcg_op2);
2441 tcg_temp_free_i64(tcg_tmp);
2444 /* C3.5.3 - Add/subtract (with carry)
2445 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
2446 * +--+--+--+------------------------+------+---------+------+-----+
2447 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd |
2448 * +--+--+--+------------------------+------+---------+------+-----+
2449 * [000000]
2452 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
2454 unsigned int sf, op, setflags, rm, rn, rd;
2455 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
2457 if (extract32(insn, 10, 6) != 0) {
2458 unallocated_encoding(s);
2459 return;
2462 sf = extract32(insn, 31, 1);
2463 op = extract32(insn, 30, 1);
2464 setflags = extract32(insn, 29, 1);
2465 rm = extract32(insn, 16, 5);
2466 rn = extract32(insn, 5, 5);
2467 rd = extract32(insn, 0, 5);
2469 tcg_rd = cpu_reg(s, rd);
2470 tcg_rn = cpu_reg(s, rn);
2472 if (op) {
2473 tcg_y = new_tmp_a64(s);
2474 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
2475 } else {
2476 tcg_y = cpu_reg(s, rm);
2479 if (setflags) {
2480 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
2481 } else {
2482 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
2486 /* C3.5.4 - C3.5.5 Conditional compare (immediate / register)
2487 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
2488 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
2489 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
2490 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
2491 * [1] y [0] [0]
2493 static void disas_cc(DisasContext *s, uint32_t insn)
2495 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
2496 int label_continue = -1;
2497 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
2499 if (!extract32(insn, 29, 1)) {
2500 unallocated_encoding(s);
2501 return;
2503 if (insn & (1 << 10 | 1 << 4)) {
2504 unallocated_encoding(s);
2505 return;
2507 sf = extract32(insn, 31, 1);
2508 op = extract32(insn, 30, 1);
2509 is_imm = extract32(insn, 11, 1);
2510 y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
2511 cond = extract32(insn, 12, 4);
2512 rn = extract32(insn, 5, 5);
2513 nzcv = extract32(insn, 0, 4);
2515 if (cond < 0x0e) { /* not always */
2516 int label_match = gen_new_label();
2517 label_continue = gen_new_label();
2518 arm_gen_test_cc(cond, label_match);
2519 /* nomatch: */
2520 tcg_tmp = tcg_temp_new_i64();
2521 tcg_gen_movi_i64(tcg_tmp, nzcv << 28);
2522 gen_set_nzcv(tcg_tmp);
2523 tcg_temp_free_i64(tcg_tmp);
2524 tcg_gen_br(label_continue);
2525 gen_set_label(label_match);
2527 /* match, or condition is always */
2528 if (is_imm) {
2529 tcg_y = new_tmp_a64(s);
2530 tcg_gen_movi_i64(tcg_y, y);
2531 } else {
2532 tcg_y = cpu_reg(s, y);
2534 tcg_rn = cpu_reg(s, rn);
2536 tcg_tmp = tcg_temp_new_i64();
2537 if (op) {
2538 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
2539 } else {
2540 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
2542 tcg_temp_free_i64(tcg_tmp);
2544 if (cond < 0x0e) { /* continue */
2545 gen_set_label(label_continue);
2549 /* C3.5.6 Conditional select
2550 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
2551 * +----+----+---+-----------------+------+------+-----+------+------+
2552 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
2553 * +----+----+---+-----------------+------+------+-----+------+------+
2555 static void disas_cond_select(DisasContext *s, uint32_t insn)
2557 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
2558 TCGv_i64 tcg_rd, tcg_src;
2560 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
2561 /* S == 1 or op2<1> == 1 */
2562 unallocated_encoding(s);
2563 return;
2565 sf = extract32(insn, 31, 1);
2566 else_inv = extract32(insn, 30, 1);
2567 rm = extract32(insn, 16, 5);
2568 cond = extract32(insn, 12, 4);
2569 else_inc = extract32(insn, 10, 1);
2570 rn = extract32(insn, 5, 5);
2571 rd = extract32(insn, 0, 5);
2573 if (rd == 31) {
2574 /* silly no-op write; until we use movcond we must special-case
2575 * this to avoid a dead temporary across basic blocks.
2577 return;
2580 tcg_rd = cpu_reg(s, rd);
2582 if (cond >= 0x0e) { /* condition "always" */
2583 tcg_src = read_cpu_reg(s, rn, sf);
2584 tcg_gen_mov_i64(tcg_rd, tcg_src);
2585 } else {
2586 /* OPTME: we could use movcond here, at the cost of duplicating
2587 * a lot of the arm_gen_test_cc() logic.
2589 int label_match = gen_new_label();
2590 int label_continue = gen_new_label();
2592 arm_gen_test_cc(cond, label_match);
2593 /* nomatch: */
2594 tcg_src = cpu_reg(s, rm);
2596 if (else_inv && else_inc) {
2597 tcg_gen_neg_i64(tcg_rd, tcg_src);
2598 } else if (else_inv) {
2599 tcg_gen_not_i64(tcg_rd, tcg_src);
2600 } else if (else_inc) {
2601 tcg_gen_addi_i64(tcg_rd, tcg_src, 1);
2602 } else {
2603 tcg_gen_mov_i64(tcg_rd, tcg_src);
2605 if (!sf) {
2606 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2608 tcg_gen_br(label_continue);
2609 /* match: */
2610 gen_set_label(label_match);
2611 tcg_src = read_cpu_reg(s, rn, sf);
2612 tcg_gen_mov_i64(tcg_rd, tcg_src);
2613 /* continue: */
2614 gen_set_label(label_continue);
2618 static void handle_clz(DisasContext *s, unsigned int sf,
2619 unsigned int rn, unsigned int rd)
2621 TCGv_i64 tcg_rd, tcg_rn;
2622 tcg_rd = cpu_reg(s, rd);
2623 tcg_rn = cpu_reg(s, rn);
2625 if (sf) {
2626 gen_helper_clz64(tcg_rd, tcg_rn);
2627 } else {
2628 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
2629 tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
2630 gen_helper_clz(tcg_tmp32, tcg_tmp32);
2631 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
2632 tcg_temp_free_i32(tcg_tmp32);
2636 static void handle_cls(DisasContext *s, unsigned int sf,
2637 unsigned int rn, unsigned int rd)
2639 TCGv_i64 tcg_rd, tcg_rn;
2640 tcg_rd = cpu_reg(s, rd);
2641 tcg_rn = cpu_reg(s, rn);
2643 if (sf) {
2644 gen_helper_cls64(tcg_rd, tcg_rn);
2645 } else {
2646 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
2647 tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
2648 gen_helper_cls32(tcg_tmp32, tcg_tmp32);
2649 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
2650 tcg_temp_free_i32(tcg_tmp32);
2654 static void handle_rbit(DisasContext *s, unsigned int sf,
2655 unsigned int rn, unsigned int rd)
2657 TCGv_i64 tcg_rd, tcg_rn;
2658 tcg_rd = cpu_reg(s, rd);
2659 tcg_rn = cpu_reg(s, rn);
2661 if (sf) {
2662 gen_helper_rbit64(tcg_rd, tcg_rn);
2663 } else {
2664 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
2665 tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
2666 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
2667 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
2668 tcg_temp_free_i32(tcg_tmp32);
2672 /* C5.6.149 REV with sf==1, opcode==3 ("REV64") */
2673 static void handle_rev64(DisasContext *s, unsigned int sf,
2674 unsigned int rn, unsigned int rd)
2676 if (!sf) {
2677 unallocated_encoding(s);
2678 return;
2680 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
2683 /* C5.6.149 REV with sf==0, opcode==2
2684 * C5.6.151 REV32 (sf==1, opcode==2)
2686 static void handle_rev32(DisasContext *s, unsigned int sf,
2687 unsigned int rn, unsigned int rd)
2689 TCGv_i64 tcg_rd = cpu_reg(s, rd);
2691 if (sf) {
2692 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
2693 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
2695 /* bswap32_i64 requires zero high word */
2696 tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
2697 tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
2698 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
2699 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
2700 tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
2702 tcg_temp_free_i64(tcg_tmp);
2703 } else {
2704 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
2705 tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
2709 /* C5.6.150 REV16 (opcode==1) */
2710 static void handle_rev16(DisasContext *s, unsigned int sf,
2711 unsigned int rn, unsigned int rd)
2713 TCGv_i64 tcg_rd = cpu_reg(s, rd);
2714 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
2715 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
2717 tcg_gen_andi_i64(tcg_tmp, tcg_rn, 0xffff);
2718 tcg_gen_bswap16_i64(tcg_rd, tcg_tmp);
2720 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 16);
2721 tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
2722 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
2723 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 16, 16);
2725 if (sf) {
2726 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
2727 tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
2728 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
2729 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 32, 16);
2731 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 48);
2732 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
2733 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 48, 16);
2736 tcg_temp_free_i64(tcg_tmp);
2739 /* C3.5.7 Data-processing (1 source)
2740 * 31 30 29 28 21 20 16 15 10 9 5 4 0
2741 * +----+---+---+-----------------+---------+--------+------+------+
2742 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
2743 * +----+---+---+-----------------+---------+--------+------+------+
2745 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
2747 unsigned int sf, opcode, rn, rd;
2749 if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
2750 unallocated_encoding(s);
2751 return;
2754 sf = extract32(insn, 31, 1);
2755 opcode = extract32(insn, 10, 6);
2756 rn = extract32(insn, 5, 5);
2757 rd = extract32(insn, 0, 5);
2759 switch (opcode) {
2760 case 0: /* RBIT */
2761 handle_rbit(s, sf, rn, rd);
2762 break;
2763 case 1: /* REV16 */
2764 handle_rev16(s, sf, rn, rd);
2765 break;
2766 case 2: /* REV32 */
2767 handle_rev32(s, sf, rn, rd);
2768 break;
2769 case 3: /* REV64 */
2770 handle_rev64(s, sf, rn, rd);
2771 break;
2772 case 4: /* CLZ */
2773 handle_clz(s, sf, rn, rd);
2774 break;
2775 case 5: /* CLS */
2776 handle_cls(s, sf, rn, rd);
2777 break;
2781 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
2782 unsigned int rm, unsigned int rn, unsigned int rd)
2784 TCGv_i64 tcg_n, tcg_m, tcg_rd;
2785 tcg_rd = cpu_reg(s, rd);
2787 if (!sf && is_signed) {
2788 tcg_n = new_tmp_a64(s);
2789 tcg_m = new_tmp_a64(s);
2790 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
2791 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
2792 } else {
2793 tcg_n = read_cpu_reg(s, rn, sf);
2794 tcg_m = read_cpu_reg(s, rm, sf);
2797 if (is_signed) {
2798 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
2799 } else {
2800 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
2803 if (!sf) { /* zero extend final result */
2804 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2808 /* C5.6.115 LSLV, C5.6.118 LSRV, C5.6.17 ASRV, C5.6.154 RORV */
2809 static void handle_shift_reg(DisasContext *s,
2810 enum a64_shift_type shift_type, unsigned int sf,
2811 unsigned int rm, unsigned int rn, unsigned int rd)
2813 TCGv_i64 tcg_shift = tcg_temp_new_i64();
2814 TCGv_i64 tcg_rd = cpu_reg(s, rd);
2815 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
2817 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
2818 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
2819 tcg_temp_free_i64(tcg_shift);
2822 /* C3.5.8 Data-processing (2 source)
2823 * 31 30 29 28 21 20 16 15 10 9 5 4 0
2824 * +----+---+---+-----------------+------+--------+------+------+
2825 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
2826 * +----+---+---+-----------------+------+--------+------+------+
2828 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
2830 unsigned int sf, rm, opcode, rn, rd;
2831 sf = extract32(insn, 31, 1);
2832 rm = extract32(insn, 16, 5);
2833 opcode = extract32(insn, 10, 6);
2834 rn = extract32(insn, 5, 5);
2835 rd = extract32(insn, 0, 5);
2837 if (extract32(insn, 29, 1)) {
2838 unallocated_encoding(s);
2839 return;
2842 switch (opcode) {
2843 case 2: /* UDIV */
2844 handle_div(s, false, sf, rm, rn, rd);
2845 break;
2846 case 3: /* SDIV */
2847 handle_div(s, true, sf, rm, rn, rd);
2848 break;
2849 case 8: /* LSLV */
2850 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
2851 break;
2852 case 9: /* LSRV */
2853 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
2854 break;
2855 case 10: /* ASRV */
2856 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
2857 break;
2858 case 11: /* RORV */
2859 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
2860 break;
2861 case 16:
2862 case 17:
2863 case 18:
2864 case 19:
2865 case 20:
2866 case 21:
2867 case 22:
2868 case 23: /* CRC32 */
2869 unsupported_encoding(s, insn);
2870 break;
2871 default:
2872 unallocated_encoding(s);
2873 break;
2877 /* C3.5 Data processing - register */
2878 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
2880 switch (extract32(insn, 24, 5)) {
2881 case 0x0a: /* Logical (shifted register) */
2882 disas_logic_reg(s, insn);
2883 break;
2884 case 0x0b: /* Add/subtract */
2885 if (insn & (1 << 21)) { /* (extended register) */
2886 disas_add_sub_ext_reg(s, insn);
2887 } else {
2888 disas_add_sub_reg(s, insn);
2890 break;
2891 case 0x1b: /* Data-processing (3 source) */
2892 disas_data_proc_3src(s, insn);
2893 break;
2894 case 0x1a:
2895 switch (extract32(insn, 21, 3)) {
2896 case 0x0: /* Add/subtract (with carry) */
2897 disas_adc_sbc(s, insn);
2898 break;
2899 case 0x2: /* Conditional compare */
2900 disas_cc(s, insn); /* both imm and reg forms */
2901 break;
2902 case 0x4: /* Conditional select */
2903 disas_cond_select(s, insn);
2904 break;
2905 case 0x6: /* Data-processing */
2906 if (insn & (1 << 30)) { /* (1 source) */
2907 disas_data_proc_1src(s, insn);
2908 } else { /* (2 source) */
2909 disas_data_proc_2src(s, insn);
2911 break;
2912 default:
2913 unallocated_encoding(s);
2914 break;
2916 break;
2917 default:
2918 unallocated_encoding(s);
2919 break;
2923 /* C3.6.22 Floating point compare
2924 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
2925 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
2926 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
2927 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
2929 static void disas_fp_compare(DisasContext *s, uint32_t insn)
2931 unsupported_encoding(s, insn);
2934 /* C3.6.23 Floating point conditional compare
2935 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
2936 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
2937 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
2938 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
2940 static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
2942 unsupported_encoding(s, insn);
2945 /* C3.6.24 Floating point conditional select
2946 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
2947 * +---+---+---+-----------+------+---+------+------+-----+------+------+
2948 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
2949 * +---+---+---+-----------+------+---+------+------+-----+------+------+
2951 static void disas_fp_csel(DisasContext *s, uint32_t insn)
2953 unsupported_encoding(s, insn);
2956 /* C3.6.25 Floating point data-processing (1 source)
2957 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
2958 * +---+---+---+-----------+------+---+--------+-----------+------+------+
2959 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
2960 * +---+---+---+-----------+------+---+--------+-----------+------+------+
2962 static void disas_fp_1src(DisasContext *s, uint32_t insn)
2964 unsupported_encoding(s, insn);
2967 /* C3.6.26 Floating point data-processing (2 source)
2968 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
2969 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
2970 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
2971 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
2973 static void disas_fp_2src(DisasContext *s, uint32_t insn)
2975 unsupported_encoding(s, insn);
2978 /* C3.6.27 Floating point data-processing (3 source)
2979 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
2980 * +---+---+---+-----------+------+----+------+----+------+------+------+
2981 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
2982 * +---+---+---+-----------+------+----+------+----+------+------+------+
2984 static void disas_fp_3src(DisasContext *s, uint32_t insn)
2986 unsupported_encoding(s, insn);
2989 /* C3.6.28 Floating point immediate
2990 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
2991 * +---+---+---+-----------+------+---+------------+-------+------+------+
2992 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
2993 * +---+---+---+-----------+------+---+------------+-------+------+------+
2995 static void disas_fp_imm(DisasContext *s, uint32_t insn)
2997 unsupported_encoding(s, insn);
3000 /* C3.6.29 Floating point <-> fixed point conversions
3001 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
3002 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
3003 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
3004 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
3006 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
3008 unsupported_encoding(s, insn);
3011 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
3013 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
3014 * without conversion.
3017 if (itof) {
3018 int freg_offs = offsetof(CPUARMState, vfp.regs[rd * 2]);
3019 TCGv_i64 tcg_rn = cpu_reg(s, rn);
3021 switch (type) {
3022 case 0:
3024 /* 32 bit */
3025 TCGv_i64 tmp = tcg_temp_new_i64();
3026 tcg_gen_ext32u_i64(tmp, tcg_rn);
3027 tcg_gen_st_i64(tmp, cpu_env, freg_offs);
3028 tcg_gen_movi_i64(tmp, 0);
3029 tcg_gen_st_i64(tmp, cpu_env, freg_offs + sizeof(float64));
3030 tcg_temp_free_i64(tmp);
3031 break;
3033 case 1:
3035 /* 64 bit */
3036 TCGv_i64 tmp = tcg_const_i64(0);
3037 tcg_gen_st_i64(tcg_rn, cpu_env, freg_offs);
3038 tcg_gen_st_i64(tmp, cpu_env, freg_offs + sizeof(float64));
3039 tcg_temp_free_i64(tmp);
3040 break;
3042 case 2:
3043 /* 64 bit to top half. */
3044 tcg_gen_st_i64(tcg_rn, cpu_env, freg_offs + sizeof(float64));
3045 break;
3047 } else {
3048 int freg_offs = offsetof(CPUARMState, vfp.regs[rn * 2]);
3049 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3051 switch (type) {
3052 case 0:
3053 /* 32 bit */
3054 tcg_gen_ld32u_i64(tcg_rd, cpu_env, freg_offs);
3055 break;
3056 case 2:
3057 /* 64 bits from top half */
3058 freg_offs += sizeof(float64);
3059 /* fall through */
3060 case 1:
3061 /* 64 bit */
3062 tcg_gen_ld_i64(tcg_rd, cpu_env, freg_offs);
3063 break;
3068 /* C3.6.30 Floating point <-> integer conversions
3069 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
3070 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
3071 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
3072 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
3074 static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
3076 int rd = extract32(insn, 0, 5);
3077 int rn = extract32(insn, 5, 5);
3078 int opcode = extract32(insn, 16, 3);
3079 int rmode = extract32(insn, 19, 2);
3080 int type = extract32(insn, 22, 2);
3081 bool sbit = extract32(insn, 29, 1);
3082 bool sf = extract32(insn, 31, 1);
3084 if (!sbit && (rmode < 2) && (opcode > 5)) {
3085 /* FMOV */
3086 bool itof = opcode & 1;
3088 switch (sf << 3 | type << 1 | rmode) {
3089 case 0x0: /* 32 bit */
3090 case 0xa: /* 64 bit */
3091 case 0xd: /* 64 bit to top half of quad */
3092 break;
3093 default:
3094 /* all other sf/type/rmode combinations are invalid */
3095 unallocated_encoding(s);
3096 break;
3099 handle_fmov(s, rd, rn, type, itof);
3100 } else {
3101 /* actual FP conversions */
3102 unsupported_encoding(s, insn);
3106 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
3107 * 31 30 29 28 25 24 0
3108 * +---+---+---+---------+-----------------------------+
3109 * | | 0 | | 1 1 1 1 | |
3110 * +---+---+---+---------+-----------------------------+
3112 static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
3114 if (extract32(insn, 24, 1)) {
3115 /* Floating point data-processing (3 source) */
3116 disas_fp_3src(s, insn);
3117 } else if (extract32(insn, 21, 1) == 0) {
3118 /* Floating point to fixed point conversions */
3119 disas_fp_fixed_conv(s, insn);
3120 } else {
3121 switch (extract32(insn, 10, 2)) {
3122 case 1:
3123 /* Floating point conditional compare */
3124 disas_fp_ccomp(s, insn);
3125 break;
3126 case 2:
3127 /* Floating point data-processing (2 source) */
3128 disas_fp_2src(s, insn);
3129 break;
3130 case 3:
3131 /* Floating point conditional select */
3132 disas_fp_csel(s, insn);
3133 break;
3134 case 0:
3135 switch (ctz32(extract32(insn, 12, 4))) {
3136 case 0: /* [15:12] == xxx1 */
3137 /* Floating point immediate */
3138 disas_fp_imm(s, insn);
3139 break;
3140 case 1: /* [15:12] == xx10 */
3141 /* Floating point compare */
3142 disas_fp_compare(s, insn);
3143 break;
3144 case 2: /* [15:12] == x100 */
3145 /* Floating point data-processing (1 source) */
3146 disas_fp_1src(s, insn);
3147 break;
3148 case 3: /* [15:12] == 1000 */
3149 unallocated_encoding(s);
3150 break;
3151 default: /* [15:12] == 0000 */
3152 /* Floating point <-> integer conversions */
3153 disas_fp_int_conv(s, insn);
3154 break;
3156 break;
3161 static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
3163 /* Note that this is called with all non-FP cases from
3164 * table C3-6 so it must UNDEF for entries not specifically
3165 * allocated to instructions in that table.
3167 unsupported_encoding(s, insn);
3170 /* C3.6 Data processing - SIMD and floating point */
3171 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
3173 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
3174 disas_data_proc_fp(s, insn);
3175 } else {
3176 /* SIMD, including crypto */
3177 disas_data_proc_simd(s, insn);
3181 /* C3.1 A64 instruction index by encoding */
3182 static void disas_a64_insn(CPUARMState *env, DisasContext *s)
3184 uint32_t insn;
3186 insn = arm_ldl_code(env, s->pc, s->bswap_code);
3187 s->insn = insn;
3188 s->pc += 4;
3190 switch (extract32(insn, 25, 4)) {
3191 case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
3192 unallocated_encoding(s);
3193 break;
3194 case 0x8: case 0x9: /* Data processing - immediate */
3195 disas_data_proc_imm(s, insn);
3196 break;
3197 case 0xa: case 0xb: /* Branch, exception generation and system insns */
3198 disas_b_exc_sys(s, insn);
3199 break;
3200 case 0x4:
3201 case 0x6:
3202 case 0xc:
3203 case 0xe: /* Loads and stores */
3204 disas_ldst(s, insn);
3205 break;
3206 case 0x5:
3207 case 0xd: /* Data processing - register */
3208 disas_data_proc_reg(s, insn);
3209 break;
3210 case 0x7:
3211 case 0xf: /* Data processing - SIMD and floating point */
3212 disas_data_proc_simd_fp(s, insn);
3213 break;
3214 default:
3215 assert(FALSE); /* all 15 cases should be handled above */
3216 break;
3219 /* if we allocated any temporaries, free them here */
3220 free_tmp_a64(s);
3223 void gen_intermediate_code_internal_a64(ARMCPU *cpu,
3224 TranslationBlock *tb,
3225 bool search_pc)
3227 CPUState *cs = CPU(cpu);
3228 CPUARMState *env = &cpu->env;
3229 DisasContext dc1, *dc = &dc1;
3230 CPUBreakpoint *bp;
3231 uint16_t *gen_opc_end;
3232 int j, lj;
3233 target_ulong pc_start;
3234 target_ulong next_page_start;
3235 int num_insns;
3236 int max_insns;
3238 pc_start = tb->pc;
3240 dc->tb = tb;
3242 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
3244 dc->is_jmp = DISAS_NEXT;
3245 dc->pc = pc_start;
3246 dc->singlestep_enabled = cs->singlestep_enabled;
3247 dc->condjmp = 0;
3249 dc->aarch64 = 1;
3250 dc->thumb = 0;
3251 dc->bswap_code = 0;
3252 dc->condexec_mask = 0;
3253 dc->condexec_cond = 0;
3254 #if !defined(CONFIG_USER_ONLY)
3255 dc->user = 0;
3256 #endif
3257 dc->vfp_enabled = 0;
3258 dc->vec_len = 0;
3259 dc->vec_stride = 0;
3260 dc->cp_regs = cpu->cp_regs;
3261 dc->current_pl = arm_current_pl(env);
3263 init_tmp_a64_array(dc);
3265 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
3266 lj = -1;
3267 num_insns = 0;
3268 max_insns = tb->cflags & CF_COUNT_MASK;
3269 if (max_insns == 0) {
3270 max_insns = CF_COUNT_MASK;
3273 gen_tb_start();
3275 tcg_clear_temp_count();
3277 do {
3278 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3279 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3280 if (bp->pc == dc->pc) {
3281 gen_exception_insn(dc, 0, EXCP_DEBUG);
3282 /* Advance PC so that clearing the breakpoint will
3283 invalidate this TB. */
3284 dc->pc += 2;
3285 goto done_generating;
3290 if (search_pc) {
3291 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3292 if (lj < j) {
3293 lj++;
3294 while (lj < j) {
3295 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3298 tcg_ctx.gen_opc_pc[lj] = dc->pc;
3299 tcg_ctx.gen_opc_instr_start[lj] = 1;
3300 tcg_ctx.gen_opc_icount[lj] = num_insns;
3303 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
3304 gen_io_start();
3307 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
3308 tcg_gen_debug_insn_start(dc->pc);
3311 disas_a64_insn(env, dc);
3313 if (tcg_check_temp_count()) {
3314 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
3315 dc->pc);
3318 /* Translation stops when a conditional branch is encountered.
3319 * Otherwise the subsequent code could get translated several times.
3320 * Also stop translation when a page boundary is reached. This
3321 * ensures prefetch aborts occur at the right place.
3323 num_insns++;
3324 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
3325 !cs->singlestep_enabled &&
3326 !singlestep &&
3327 dc->pc < next_page_start &&
3328 num_insns < max_insns);
3330 if (tb->cflags & CF_LAST_IO) {
3331 gen_io_end();
3334 if (unlikely(cs->singlestep_enabled) && dc->is_jmp != DISAS_EXC) {
3335 /* Note that this means single stepping WFI doesn't halt the CPU.
3336 * For conditional branch insns this is harmless unreachable code as
3337 * gen_goto_tb() has already handled emitting the debug exception
3338 * (and thus a tb-jump is not possible when singlestepping).
3340 assert(dc->is_jmp != DISAS_TB_JUMP);
3341 if (dc->is_jmp != DISAS_JUMP) {
3342 gen_a64_set_pc_im(dc->pc);
3344 gen_exception(EXCP_DEBUG);
3345 } else {
3346 switch (dc->is_jmp) {
3347 case DISAS_NEXT:
3348 gen_goto_tb(dc, 1, dc->pc);
3349 break;
3350 default:
3351 case DISAS_UPDATE:
3352 gen_a64_set_pc_im(dc->pc);
3353 /* fall through */
3354 case DISAS_JUMP:
3355 /* indicate that the hash table must be used to find the next TB */
3356 tcg_gen_exit_tb(0);
3357 break;
3358 case DISAS_TB_JUMP:
3359 case DISAS_EXC:
3360 case DISAS_SWI:
3361 break;
3362 case DISAS_WFI:
3363 /* This is a special case because we don't want to just halt the CPU
3364 * if trying to debug across a WFI.
3366 gen_helper_wfi(cpu_env);
3367 break;
3371 done_generating:
3372 gen_tb_end(tb, num_insns);
3373 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
3375 #ifdef DEBUG_DISAS
3376 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3377 qemu_log("----------------\n");
3378 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3379 log_target_disas(env, pc_start, dc->pc - pc_start,
3380 dc->thumb | (dc->bswap_code << 1));
3381 qemu_log("\n");
3383 #endif
3384 if (search_pc) {
3385 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3386 lj++;
3387 while (lj <= j) {
3388 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3390 } else {
3391 tb->size = dc->pc - pc_start;
3392 tb->icount = num_insns;