hw/mips/cps: Expose input clock and connect it to CPU cores
[qemu/ar7.git] / target / arm / translate.c
blobd34c1d351a66f6f623dbb5662122b029dff752af
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "qemu/log.h"
30 #include "qemu/bitops.h"
31 #include "arm_ldst.h"
32 #include "hw/semihosting/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
38 #include "exec/log.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #include "translate.h"
54 #if defined(CONFIG_USER_ONLY)
55 #define IS_USER(s) 1
56 #else
57 #define IS_USER(s) (s->user)
58 #endif
60 /* These are TCG temporaries used only by the legacy iwMMXt decoder */
61 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
62 /* These are TCG globals which alias CPUARMState fields */
63 static TCGv_i32 cpu_R[16];
64 TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
65 TCGv_i64 cpu_exclusive_addr;
66 TCGv_i64 cpu_exclusive_val;
68 #include "exec/gen-icount.h"
70 static const char * const regnames[] =
71 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
72 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
74 /* Function prototypes for gen_ functions calling Neon helpers. */
75 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
76 TCGv_i32, TCGv_i32);
77 /* Function prototypes for gen_ functions for fix point conversions */
78 typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
80 /* initialize TCG globals. */
81 void arm_translate_init(void)
83 int i;
85 for (i = 0; i < 16; i++) {
86 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
87 offsetof(CPUARMState, regs[i]),
88 regnames[i]);
90 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
91 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
92 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
93 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
95 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
96 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
97 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
98 offsetof(CPUARMState, exclusive_val), "exclusive_val");
100 a64_translate_init();
103 /* Flags for the disas_set_da_iss info argument:
104 * lower bits hold the Rt register number, higher bits are flags.
106 typedef enum ISSInfo {
107 ISSNone = 0,
108 ISSRegMask = 0x1f,
109 ISSInvalid = (1 << 5),
110 ISSIsAcqRel = (1 << 6),
111 ISSIsWrite = (1 << 7),
112 ISSIs16Bit = (1 << 8),
113 } ISSInfo;
115 /* Save the syndrome information for a Data Abort */
116 static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
118 uint32_t syn;
119 int sas = memop & MO_SIZE;
120 bool sse = memop & MO_SIGN;
121 bool is_acqrel = issinfo & ISSIsAcqRel;
122 bool is_write = issinfo & ISSIsWrite;
123 bool is_16bit = issinfo & ISSIs16Bit;
124 int srt = issinfo & ISSRegMask;
126 if (issinfo & ISSInvalid) {
127 /* Some callsites want to conditionally provide ISS info,
128 * eg "only if this was not a writeback"
130 return;
133 if (srt == 15) {
134 /* For AArch32, insns where the src/dest is R15 never generate
135 * ISS information. Catching that here saves checking at all
136 * the call sites.
138 return;
141 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
142 0, 0, 0, is_write, 0, is_16bit);
143 disas_set_insn_syndrome(s, syn);
146 static inline int get_a32_user_mem_index(DisasContext *s)
148 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
149 * insns:
150 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
151 * otherwise, access as if at PL0.
153 switch (s->mmu_idx) {
154 case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
155 case ARMMMUIdx_E10_0:
156 case ARMMMUIdx_E10_1:
157 case ARMMMUIdx_E10_1_PAN:
158 return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
159 case ARMMMUIdx_SE3:
160 case ARMMMUIdx_SE10_0:
161 case ARMMMUIdx_SE10_1:
162 case ARMMMUIdx_SE10_1_PAN:
163 return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0);
164 case ARMMMUIdx_MUser:
165 case ARMMMUIdx_MPriv:
166 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
167 case ARMMMUIdx_MUserNegPri:
168 case ARMMMUIdx_MPrivNegPri:
169 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
170 case ARMMMUIdx_MSUser:
171 case ARMMMUIdx_MSPriv:
172 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
173 case ARMMMUIdx_MSUserNegPri:
174 case ARMMMUIdx_MSPrivNegPri:
175 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
176 default:
177 g_assert_not_reached();
181 static inline TCGv_i32 load_cpu_offset(int offset)
183 TCGv_i32 tmp = tcg_temp_new_i32();
184 tcg_gen_ld_i32(tmp, cpu_env, offset);
185 return tmp;
188 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
190 static inline void store_cpu_offset(TCGv_i32 var, int offset)
192 tcg_gen_st_i32(var, cpu_env, offset);
193 tcg_temp_free_i32(var);
196 #define store_cpu_field(var, name) \
197 store_cpu_offset(var, offsetof(CPUARMState, name))
199 /* The architectural value of PC. */
200 static uint32_t read_pc(DisasContext *s)
202 return s->pc_curr + (s->thumb ? 4 : 8);
205 /* Set a variable to the value of a CPU register. */
206 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
208 if (reg == 15) {
209 tcg_gen_movi_i32(var, read_pc(s));
210 } else {
211 tcg_gen_mov_i32(var, cpu_R[reg]);
215 /* Create a new temporary and set it to the value of a CPU register. */
216 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
218 TCGv_i32 tmp = tcg_temp_new_i32();
219 load_reg_var(s, tmp, reg);
220 return tmp;
224 * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
225 * This is used for load/store for which use of PC implies (literal),
226 * or ADD that implies ADR.
228 static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
230 TCGv_i32 tmp = tcg_temp_new_i32();
232 if (reg == 15) {
233 tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
234 } else {
235 tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
237 return tmp;
240 /* Set a CPU register. The source must be a temporary and will be
241 marked as dead. */
242 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
244 if (reg == 15) {
245 /* In Thumb mode, we must ignore bit 0.
246 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
247 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
248 * We choose to ignore [1:0] in ARM mode for all architecture versions.
250 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
251 s->base.is_jmp = DISAS_JUMP;
253 tcg_gen_mov_i32(cpu_R[reg], var);
254 tcg_temp_free_i32(var);
258 * Variant of store_reg which applies v8M stack-limit checks before updating
259 * SP. If the check fails this will result in an exception being taken.
260 * We disable the stack checks for CONFIG_USER_ONLY because we have
261 * no idea what the stack limits should be in that case.
262 * If stack checking is not being done this just acts like store_reg().
264 static void store_sp_checked(DisasContext *s, TCGv_i32 var)
266 #ifndef CONFIG_USER_ONLY
267 if (s->v8m_stackcheck) {
268 gen_helper_v8m_stackcheck(cpu_env, var);
270 #endif
271 store_reg(s, 13, var);
274 /* Value extensions. */
275 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
276 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
277 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
278 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
280 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
281 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
284 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
286 TCGv_i32 tmp_mask = tcg_const_i32(mask);
287 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
288 tcg_temp_free_i32(tmp_mask);
290 /* Set NZCV flags from the high 4 bits of var. */
291 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
293 static void gen_exception_internal(int excp)
295 TCGv_i32 tcg_excp = tcg_const_i32(excp);
297 assert(excp_is_internal(excp));
298 gen_helper_exception_internal(cpu_env, tcg_excp);
299 tcg_temp_free_i32(tcg_excp);
302 static void gen_step_complete_exception(DisasContext *s)
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
313 gen_ss_advance(s);
314 gen_swstep_exception(s, 1, s->is_ldex);
315 s->base.is_jmp = DISAS_NORETURN;
318 static void gen_singlestep_exception(DisasContext *s)
320 /* Generate the right kind of exception for singlestep, which is
321 * either the architectural singlestep or EXCP_DEBUG for QEMU's
322 * gdb singlestepping.
324 if (s->ss_active) {
325 gen_step_complete_exception(s);
326 } else {
327 gen_exception_internal(EXCP_DEBUG);
331 static inline bool is_singlestepping(DisasContext *s)
333 /* Return true if we are singlestepping either because of
334 * architectural singlestep or QEMU gdbstub singlestep. This does
335 * not include the command line '-singlestep' mode which is rather
336 * misnamed as it only means "one instruction per TB" and doesn't
337 * affect the code we generate.
339 return s->base.singlestep_enabled || s->ss_active;
342 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
344 TCGv_i32 tmp1 = tcg_temp_new_i32();
345 TCGv_i32 tmp2 = tcg_temp_new_i32();
346 tcg_gen_ext16s_i32(tmp1, a);
347 tcg_gen_ext16s_i32(tmp2, b);
348 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
349 tcg_temp_free_i32(tmp2);
350 tcg_gen_sari_i32(a, a, 16);
351 tcg_gen_sari_i32(b, b, 16);
352 tcg_gen_mul_i32(b, b, a);
353 tcg_gen_mov_i32(a, tmp1);
354 tcg_temp_free_i32(tmp1);
357 /* Byteswap each halfword. */
358 static void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
360 TCGv_i32 tmp = tcg_temp_new_i32();
361 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
362 tcg_gen_shri_i32(tmp, var, 8);
363 tcg_gen_and_i32(tmp, tmp, mask);
364 tcg_gen_and_i32(var, var, mask);
365 tcg_gen_shli_i32(var, var, 8);
366 tcg_gen_or_i32(dest, var, tmp);
367 tcg_temp_free_i32(mask);
368 tcg_temp_free_i32(tmp);
371 /* Byteswap low halfword and sign extend. */
372 static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
374 tcg_gen_ext16u_i32(var, var);
375 tcg_gen_bswap16_i32(var, var);
376 tcg_gen_ext16s_i32(dest, var);
379 /* Swap low and high halfwords. */
380 static void gen_swap_half(TCGv_i32 dest, TCGv_i32 var)
382 tcg_gen_rotri_i32(dest, var, 16);
385 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
386 tmp = (t0 ^ t1) & 0x8000;
387 t0 &= ~0x8000;
388 t1 &= ~0x8000;
389 t0 = (t0 + t1) ^ tmp;
392 static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
394 TCGv_i32 tmp = tcg_temp_new_i32();
395 tcg_gen_xor_i32(tmp, t0, t1);
396 tcg_gen_andi_i32(tmp, tmp, 0x8000);
397 tcg_gen_andi_i32(t0, t0, ~0x8000);
398 tcg_gen_andi_i32(t1, t1, ~0x8000);
399 tcg_gen_add_i32(t0, t0, t1);
400 tcg_gen_xor_i32(dest, t0, tmp);
401 tcg_temp_free_i32(tmp);
404 /* Set N and Z flags from var. */
405 static inline void gen_logic_CC(TCGv_i32 var)
407 tcg_gen_mov_i32(cpu_NF, var);
408 tcg_gen_mov_i32(cpu_ZF, var);
411 /* dest = T0 + T1 + CF. */
412 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
414 tcg_gen_add_i32(dest, t0, t1);
415 tcg_gen_add_i32(dest, dest, cpu_CF);
418 /* dest = T0 - T1 + CF - 1. */
419 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
421 tcg_gen_sub_i32(dest, t0, t1);
422 tcg_gen_add_i32(dest, dest, cpu_CF);
423 tcg_gen_subi_i32(dest, dest, 1);
426 /* dest = T0 + T1. Compute C, N, V and Z flags */
427 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
429 TCGv_i32 tmp = tcg_temp_new_i32();
430 tcg_gen_movi_i32(tmp, 0);
431 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
432 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
433 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
434 tcg_gen_xor_i32(tmp, t0, t1);
435 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
436 tcg_temp_free_i32(tmp);
437 tcg_gen_mov_i32(dest, cpu_NF);
440 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
441 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
443 TCGv_i32 tmp = tcg_temp_new_i32();
444 if (TCG_TARGET_HAS_add2_i32) {
445 tcg_gen_movi_i32(tmp, 0);
446 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
447 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
448 } else {
449 TCGv_i64 q0 = tcg_temp_new_i64();
450 TCGv_i64 q1 = tcg_temp_new_i64();
451 tcg_gen_extu_i32_i64(q0, t0);
452 tcg_gen_extu_i32_i64(q1, t1);
453 tcg_gen_add_i64(q0, q0, q1);
454 tcg_gen_extu_i32_i64(q1, cpu_CF);
455 tcg_gen_add_i64(q0, q0, q1);
456 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
457 tcg_temp_free_i64(q0);
458 tcg_temp_free_i64(q1);
460 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
461 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
462 tcg_gen_xor_i32(tmp, t0, t1);
463 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
464 tcg_temp_free_i32(tmp);
465 tcg_gen_mov_i32(dest, cpu_NF);
468 /* dest = T0 - T1. Compute C, N, V and Z flags */
469 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
471 TCGv_i32 tmp;
472 tcg_gen_sub_i32(cpu_NF, t0, t1);
473 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
474 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
475 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
476 tmp = tcg_temp_new_i32();
477 tcg_gen_xor_i32(tmp, t0, t1);
478 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
479 tcg_temp_free_i32(tmp);
480 tcg_gen_mov_i32(dest, cpu_NF);
483 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
484 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
486 TCGv_i32 tmp = tcg_temp_new_i32();
487 tcg_gen_not_i32(tmp, t1);
488 gen_adc_CC(dest, t0, tmp);
489 tcg_temp_free_i32(tmp);
492 #define GEN_SHIFT(name) \
493 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
495 TCGv_i32 tmp1, tmp2, tmp3; \
496 tmp1 = tcg_temp_new_i32(); \
497 tcg_gen_andi_i32(tmp1, t1, 0xff); \
498 tmp2 = tcg_const_i32(0); \
499 tmp3 = tcg_const_i32(0x1f); \
500 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
501 tcg_temp_free_i32(tmp3); \
502 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
503 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
504 tcg_temp_free_i32(tmp2); \
505 tcg_temp_free_i32(tmp1); \
507 GEN_SHIFT(shl)
508 GEN_SHIFT(shr)
509 #undef GEN_SHIFT
511 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
513 TCGv_i32 tmp1, tmp2;
514 tmp1 = tcg_temp_new_i32();
515 tcg_gen_andi_i32(tmp1, t1, 0xff);
516 tmp2 = tcg_const_i32(0x1f);
517 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
518 tcg_temp_free_i32(tmp2);
519 tcg_gen_sar_i32(dest, t0, tmp1);
520 tcg_temp_free_i32(tmp1);
523 static void shifter_out_im(TCGv_i32 var, int shift)
525 tcg_gen_extract_i32(cpu_CF, var, shift, 1);
528 /* Shift by immediate. Includes special handling for shift == 0. */
529 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
530 int shift, int flags)
532 switch (shiftop) {
533 case 0: /* LSL */
534 if (shift != 0) {
535 if (flags)
536 shifter_out_im(var, 32 - shift);
537 tcg_gen_shli_i32(var, var, shift);
539 break;
540 case 1: /* LSR */
541 if (shift == 0) {
542 if (flags) {
543 tcg_gen_shri_i32(cpu_CF, var, 31);
545 tcg_gen_movi_i32(var, 0);
546 } else {
547 if (flags)
548 shifter_out_im(var, shift - 1);
549 tcg_gen_shri_i32(var, var, shift);
551 break;
552 case 2: /* ASR */
553 if (shift == 0)
554 shift = 32;
555 if (flags)
556 shifter_out_im(var, shift - 1);
557 if (shift == 32)
558 shift = 31;
559 tcg_gen_sari_i32(var, var, shift);
560 break;
561 case 3: /* ROR/RRX */
562 if (shift != 0) {
563 if (flags)
564 shifter_out_im(var, shift - 1);
565 tcg_gen_rotri_i32(var, var, shift); break;
566 } else {
567 TCGv_i32 tmp = tcg_temp_new_i32();
568 tcg_gen_shli_i32(tmp, cpu_CF, 31);
569 if (flags)
570 shifter_out_im(var, 0);
571 tcg_gen_shri_i32(var, var, 1);
572 tcg_gen_or_i32(var, var, tmp);
573 tcg_temp_free_i32(tmp);
578 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
579 TCGv_i32 shift, int flags)
581 if (flags) {
582 switch (shiftop) {
583 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
584 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
585 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
586 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
588 } else {
589 switch (shiftop) {
590 case 0:
591 gen_shl(var, var, shift);
592 break;
593 case 1:
594 gen_shr(var, var, shift);
595 break;
596 case 2:
597 gen_sar(var, var, shift);
598 break;
599 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
600 tcg_gen_rotr_i32(var, var, shift); break;
603 tcg_temp_free_i32(shift);
607 * Generate a conditional based on ARM condition code cc.
608 * This is common between ARM and Aarch64 targets.
610 void arm_test_cc(DisasCompare *cmp, int cc)
612 TCGv_i32 value;
613 TCGCond cond;
614 bool global = true;
616 switch (cc) {
617 case 0: /* eq: Z */
618 case 1: /* ne: !Z */
619 cond = TCG_COND_EQ;
620 value = cpu_ZF;
621 break;
623 case 2: /* cs: C */
624 case 3: /* cc: !C */
625 cond = TCG_COND_NE;
626 value = cpu_CF;
627 break;
629 case 4: /* mi: N */
630 case 5: /* pl: !N */
631 cond = TCG_COND_LT;
632 value = cpu_NF;
633 break;
635 case 6: /* vs: V */
636 case 7: /* vc: !V */
637 cond = TCG_COND_LT;
638 value = cpu_VF;
639 break;
641 case 8: /* hi: C && !Z */
642 case 9: /* ls: !C || Z -> !(C && !Z) */
643 cond = TCG_COND_NE;
644 value = tcg_temp_new_i32();
645 global = false;
646 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
647 ZF is non-zero for !Z; so AND the two subexpressions. */
648 tcg_gen_neg_i32(value, cpu_CF);
649 tcg_gen_and_i32(value, value, cpu_ZF);
650 break;
652 case 10: /* ge: N == V -> N ^ V == 0 */
653 case 11: /* lt: N != V -> N ^ V != 0 */
654 /* Since we're only interested in the sign bit, == 0 is >= 0. */
655 cond = TCG_COND_GE;
656 value = tcg_temp_new_i32();
657 global = false;
658 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
659 break;
661 case 12: /* gt: !Z && N == V */
662 case 13: /* le: Z || N != V */
663 cond = TCG_COND_NE;
664 value = tcg_temp_new_i32();
665 global = false;
666 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
667 * the sign bit then AND with ZF to yield the result. */
668 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
669 tcg_gen_sari_i32(value, value, 31);
670 tcg_gen_andc_i32(value, cpu_ZF, value);
671 break;
673 case 14: /* always */
674 case 15: /* always */
675 /* Use the ALWAYS condition, which will fold early.
676 * It doesn't matter what we use for the value. */
677 cond = TCG_COND_ALWAYS;
678 value = cpu_ZF;
679 goto no_invert;
681 default:
682 fprintf(stderr, "Bad condition code 0x%x\n", cc);
683 abort();
686 if (cc & 1) {
687 cond = tcg_invert_cond(cond);
690 no_invert:
691 cmp->cond = cond;
692 cmp->value = value;
693 cmp->value_global = global;
696 void arm_free_cc(DisasCompare *cmp)
698 if (!cmp->value_global) {
699 tcg_temp_free_i32(cmp->value);
703 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
705 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
708 void arm_gen_test_cc(int cc, TCGLabel *label)
710 DisasCompare cmp;
711 arm_test_cc(&cmp, cc);
712 arm_jump_cc(&cmp, label);
713 arm_free_cc(&cmp);
716 static inline void gen_set_condexec(DisasContext *s)
718 if (s->condexec_mask) {
719 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
720 TCGv_i32 tmp = tcg_temp_new_i32();
721 tcg_gen_movi_i32(tmp, val);
722 store_cpu_field(tmp, condexec_bits);
726 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
728 tcg_gen_movi_i32(cpu_R[15], val);
731 /* Set PC and Thumb state from var. var is marked as dead. */
732 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
734 s->base.is_jmp = DISAS_JUMP;
735 tcg_gen_andi_i32(cpu_R[15], var, ~1);
736 tcg_gen_andi_i32(var, var, 1);
737 store_cpu_field(var, thumb);
741 * Set PC and Thumb state from var. var is marked as dead.
742 * For M-profile CPUs, include logic to detect exception-return
743 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
744 * and BX reg, and no others, and happens only for code in Handler mode.
745 * The Security Extension also requires us to check for the FNC_RETURN
746 * which signals a function return from non-secure state; this can happen
747 * in both Handler and Thread mode.
748 * To avoid having to do multiple comparisons in inline generated code,
749 * we make the check we do here loose, so it will match for EXC_RETURN
750 * in Thread mode. For system emulation do_v7m_exception_exit() checks
751 * for these spurious cases and returns without doing anything (giving
752 * the same behaviour as for a branch to a non-magic address).
754 * In linux-user mode it is unclear what the right behaviour for an
755 * attempted FNC_RETURN should be, because in real hardware this will go
756 * directly to Secure code (ie not the Linux kernel) which will then treat
757 * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
758 * attempt behave the way it would on a CPU without the security extension,
759 * which is to say "like a normal branch". That means we can simply treat
760 * all branches as normal with no magic address behaviour.
762 static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
764 /* Generate the same code here as for a simple bx, but flag via
765 * s->base.is_jmp that we need to do the rest of the work later.
767 gen_bx(s, var);
768 #ifndef CONFIG_USER_ONLY
769 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
770 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
771 s->base.is_jmp = DISAS_BX_EXCRET;
773 #endif
776 static inline void gen_bx_excret_final_code(DisasContext *s)
778 /* Generate the code to finish possible exception return and end the TB */
779 TCGLabel *excret_label = gen_new_label();
780 uint32_t min_magic;
782 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
783 /* Covers FNC_RETURN and EXC_RETURN magic */
784 min_magic = FNC_RETURN_MIN_MAGIC;
785 } else {
786 /* EXC_RETURN magic only */
787 min_magic = EXC_RETURN_MIN_MAGIC;
790 /* Is the new PC value in the magic range indicating exception return? */
791 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
792 /* No: end the TB as we would for a DISAS_JMP */
793 if (is_singlestepping(s)) {
794 gen_singlestep_exception(s);
795 } else {
796 tcg_gen_exit_tb(NULL, 0);
798 gen_set_label(excret_label);
799 /* Yes: this is an exception return.
800 * At this point in runtime env->regs[15] and env->thumb will hold
801 * the exception-return magic number, which do_v7m_exception_exit()
802 * will read. Nothing else will be able to see those values because
803 * the cpu-exec main loop guarantees that we will always go straight
804 * from raising the exception to the exception-handling code.
806 * gen_ss_advance(s) does nothing on M profile currently but
807 * calling it is conceptually the right thing as we have executed
808 * this instruction (compare SWI, HVC, SMC handling).
810 gen_ss_advance(s);
811 gen_exception_internal(EXCP_EXCEPTION_EXIT);
814 static inline void gen_bxns(DisasContext *s, int rm)
816 TCGv_i32 var = load_reg(s, rm);
818 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
819 * we need to sync state before calling it, but:
820 * - we don't need to do gen_set_pc_im() because the bxns helper will
821 * always set the PC itself
822 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
823 * unless it's outside an IT block or the last insn in an IT block,
824 * so we know that condexec == 0 (already set at the top of the TB)
825 * is correct in the non-UNPREDICTABLE cases, and we can choose
826 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
828 gen_helper_v7m_bxns(cpu_env, var);
829 tcg_temp_free_i32(var);
830 s->base.is_jmp = DISAS_EXIT;
833 static inline void gen_blxns(DisasContext *s, int rm)
835 TCGv_i32 var = load_reg(s, rm);
837 /* We don't need to sync condexec state, for the same reason as bxns.
838 * We do however need to set the PC, because the blxns helper reads it.
839 * The blxns helper may throw an exception.
841 gen_set_pc_im(s, s->base.pc_next);
842 gen_helper_v7m_blxns(cpu_env, var);
843 tcg_temp_free_i32(var);
844 s->base.is_jmp = DISAS_EXIT;
847 /* Variant of store_reg which uses branch&exchange logic when storing
848 to r15 in ARM architecture v7 and above. The source must be a temporary
849 and will be marked as dead. */
850 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
852 if (reg == 15 && ENABLE_ARCH_7) {
853 gen_bx(s, var);
854 } else {
855 store_reg(s, reg, var);
859 /* Variant of store_reg which uses branch&exchange logic when storing
860 * to r15 in ARM architecture v5T and above. This is used for storing
861 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
862 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
863 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
865 if (reg == 15 && ENABLE_ARCH_5) {
866 gen_bx_excret(s, var);
867 } else {
868 store_reg(s, reg, var);
872 #ifdef CONFIG_USER_ONLY
873 #define IS_USER_ONLY 1
874 #else
875 #define IS_USER_ONLY 0
876 #endif
878 /* Abstractions of "generate code to do a guest load/store for
879 * AArch32", where a vaddr is always 32 bits (and is zero
880 * extended if we're a 64 bit core) and data is also
881 * 32 bits unless specifically doing a 64 bit access.
882 * These functions work like tcg_gen_qemu_{ld,st}* except
883 * that the address argument is TCGv_i32 rather than TCGv.
886 static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
888 TCGv addr = tcg_temp_new();
889 tcg_gen_extu_i32_tl(addr, a32);
891 /* Not needed for user-mode BE32, where we use MO_BE instead. */
892 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
893 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
895 return addr;
898 static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
899 int index, MemOp opc)
901 TCGv addr;
903 if (arm_dc_feature(s, ARM_FEATURE_M) &&
904 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
905 opc |= MO_ALIGN;
908 addr = gen_aa32_addr(s, a32, opc);
909 tcg_gen_qemu_ld_i32(val, addr, index, opc);
910 tcg_temp_free(addr);
913 static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
914 int index, MemOp opc)
916 TCGv addr;
918 if (arm_dc_feature(s, ARM_FEATURE_M) &&
919 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
920 opc |= MO_ALIGN;
923 addr = gen_aa32_addr(s, a32, opc);
924 tcg_gen_qemu_st_i32(val, addr, index, opc);
925 tcg_temp_free(addr);
928 #define DO_GEN_LD(SUFF, OPC) \
929 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
930 TCGv_i32 a32, int index) \
932 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
935 #define DO_GEN_ST(SUFF, OPC) \
936 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
937 TCGv_i32 a32, int index) \
939 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
942 static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
944 /* Not needed for user-mode BE32, where we use MO_BE instead. */
945 if (!IS_USER_ONLY && s->sctlr_b) {
946 tcg_gen_rotri_i64(val, val, 32);
950 static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
951 int index, MemOp opc)
953 TCGv addr = gen_aa32_addr(s, a32, opc);
954 tcg_gen_qemu_ld_i64(val, addr, index, opc);
955 gen_aa32_frob64(s, val);
956 tcg_temp_free(addr);
959 static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
960 TCGv_i32 a32, int index)
962 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
965 static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
966 int index, MemOp opc)
968 TCGv addr = gen_aa32_addr(s, a32, opc);
970 /* Not needed for user-mode BE32, where we use MO_BE instead. */
971 if (!IS_USER_ONLY && s->sctlr_b) {
972 TCGv_i64 tmp = tcg_temp_new_i64();
973 tcg_gen_rotri_i64(tmp, val, 32);
974 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
975 tcg_temp_free_i64(tmp);
976 } else {
977 tcg_gen_qemu_st_i64(val, addr, index, opc);
979 tcg_temp_free(addr);
982 static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
983 TCGv_i32 a32, int index)
985 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
988 DO_GEN_LD(8u, MO_UB)
989 DO_GEN_LD(16u, MO_UW)
990 DO_GEN_LD(32u, MO_UL)
991 DO_GEN_ST(8, MO_UB)
992 DO_GEN_ST(16, MO_UW)
993 DO_GEN_ST(32, MO_UL)
995 static inline void gen_hvc(DisasContext *s, int imm16)
997 /* The pre HVC helper handles cases when HVC gets trapped
998 * as an undefined insn by runtime configuration (ie before
999 * the insn really executes).
1001 gen_set_pc_im(s, s->pc_curr);
1002 gen_helper_pre_hvc(cpu_env);
1003 /* Otherwise we will treat this as a real exception which
1004 * happens after execution of the insn. (The distinction matters
1005 * for the PC value reported to the exception handler and also
1006 * for single stepping.)
1008 s->svc_imm = imm16;
1009 gen_set_pc_im(s, s->base.pc_next);
1010 s->base.is_jmp = DISAS_HVC;
1013 static inline void gen_smc(DisasContext *s)
1015 /* As with HVC, we may take an exception either before or after
1016 * the insn executes.
1018 TCGv_i32 tmp;
1020 gen_set_pc_im(s, s->pc_curr);
1021 tmp = tcg_const_i32(syn_aa32_smc());
1022 gen_helper_pre_smc(cpu_env, tmp);
1023 tcg_temp_free_i32(tmp);
1024 gen_set_pc_im(s, s->base.pc_next);
1025 s->base.is_jmp = DISAS_SMC;
1028 static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
1030 gen_set_condexec(s);
1031 gen_set_pc_im(s, pc);
1032 gen_exception_internal(excp);
1033 s->base.is_jmp = DISAS_NORETURN;
1036 static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
1037 int syn, uint32_t target_el)
1039 gen_set_condexec(s);
1040 gen_set_pc_im(s, pc);
1041 gen_exception(excp, syn, target_el);
1042 s->base.is_jmp = DISAS_NORETURN;
1045 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
1047 TCGv_i32 tcg_syn;
1049 gen_set_condexec(s);
1050 gen_set_pc_im(s, s->pc_curr);
1051 tcg_syn = tcg_const_i32(syn);
1052 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1053 tcg_temp_free_i32(tcg_syn);
1054 s->base.is_jmp = DISAS_NORETURN;
1057 static void unallocated_encoding(DisasContext *s)
1059 /* Unallocated and reserved encodings are uncategorized */
1060 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
1061 default_exception_el(s));
1064 /* Force a TB lookup after an instruction that changes the CPU state. */
1065 static inline void gen_lookup_tb(DisasContext *s)
1067 tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
1068 s->base.is_jmp = DISAS_EXIT;
1071 static inline void gen_hlt(DisasContext *s, int imm)
1073 /* HLT. This has two purposes.
1074 * Architecturally, it is an external halting debug instruction.
1075 * Since QEMU doesn't implement external debug, we treat this as
1076 * it is required for halting debug disabled: it will UNDEF.
1077 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1078 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1079 * must trigger semihosting even for ARMv7 and earlier, where
1080 * HLT was an undefined encoding.
1081 * In system mode, we don't allow userspace access to
1082 * semihosting, to provide some semblance of security
1083 * (and for consistency with our 32-bit semihosting).
1085 if (semihosting_enabled() &&
1086 #ifndef CONFIG_USER_ONLY
1087 s->current_el != 0 &&
1088 #endif
1089 (imm == (s->thumb ? 0x3c : 0xf000))) {
1090 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
1091 return;
1094 unallocated_encoding(s);
1097 static inline long vfp_reg_offset(bool dp, unsigned reg)
1099 if (dp) {
1100 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1101 } else {
1102 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
1103 if (reg & 1) {
1104 ofs += offsetof(CPU_DoubleU, l.upper);
1105 } else {
1106 ofs += offsetof(CPU_DoubleU, l.lower);
1108 return ofs;
1112 /* Return the offset of a 32-bit piece of a NEON register.
1113 zero is the least significant end of the register. */
1114 static inline long
1115 neon_reg_offset (int reg, int n)
1117 int sreg;
1118 sreg = reg * 2 + n;
1119 return vfp_reg_offset(0, sreg);
1122 static TCGv_i32 neon_load_reg(int reg, int pass)
1124 TCGv_i32 tmp = tcg_temp_new_i32();
1125 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1126 return tmp;
1129 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1131 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1132 tcg_temp_free_i32(var);
1135 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1137 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1140 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1142 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1145 static inline void neon_load_reg32(TCGv_i32 var, int reg)
1147 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1150 static inline void neon_store_reg32(TCGv_i32 var, int reg)
1152 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1155 static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1157 TCGv_ptr ret = tcg_temp_new_ptr();
1158 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1159 return ret;
1162 #define ARM_CP_RW_BIT (1 << 20)
1164 /* Include the VFP and Neon decoders */
1165 #include "decode-m-nocp.c.inc"
1166 #include "translate-vfp.c.inc"
1167 #include "translate-neon.c.inc"
1169 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1171 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1174 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1176 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1179 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1181 TCGv_i32 var = tcg_temp_new_i32();
1182 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1183 return var;
1186 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1188 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1189 tcg_temp_free_i32(var);
1192 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1194 iwmmxt_store_reg(cpu_M0, rn);
1197 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1199 iwmmxt_load_reg(cpu_M0, rn);
1202 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1204 iwmmxt_load_reg(cpu_V1, rn);
1205 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1208 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1210 iwmmxt_load_reg(cpu_V1, rn);
1211 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1214 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1216 iwmmxt_load_reg(cpu_V1, rn);
1217 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1220 #define IWMMXT_OP(name) \
1221 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1223 iwmmxt_load_reg(cpu_V1, rn); \
1224 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1227 #define IWMMXT_OP_ENV(name) \
1228 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1230 iwmmxt_load_reg(cpu_V1, rn); \
1231 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1234 #define IWMMXT_OP_ENV_SIZE(name) \
1235 IWMMXT_OP_ENV(name##b) \
1236 IWMMXT_OP_ENV(name##w) \
1237 IWMMXT_OP_ENV(name##l)
1239 #define IWMMXT_OP_ENV1(name) \
1240 static inline void gen_op_iwmmxt_##name##_M0(void) \
1242 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1245 IWMMXT_OP(maddsq)
1246 IWMMXT_OP(madduq)
1247 IWMMXT_OP(sadb)
1248 IWMMXT_OP(sadw)
1249 IWMMXT_OP(mulslw)
1250 IWMMXT_OP(mulshw)
1251 IWMMXT_OP(mululw)
1252 IWMMXT_OP(muluhw)
1253 IWMMXT_OP(macsw)
1254 IWMMXT_OP(macuw)
1256 IWMMXT_OP_ENV_SIZE(unpackl)
1257 IWMMXT_OP_ENV_SIZE(unpackh)
1259 IWMMXT_OP_ENV1(unpacklub)
1260 IWMMXT_OP_ENV1(unpackluw)
1261 IWMMXT_OP_ENV1(unpacklul)
1262 IWMMXT_OP_ENV1(unpackhub)
1263 IWMMXT_OP_ENV1(unpackhuw)
1264 IWMMXT_OP_ENV1(unpackhul)
1265 IWMMXT_OP_ENV1(unpacklsb)
1266 IWMMXT_OP_ENV1(unpacklsw)
1267 IWMMXT_OP_ENV1(unpacklsl)
1268 IWMMXT_OP_ENV1(unpackhsb)
1269 IWMMXT_OP_ENV1(unpackhsw)
1270 IWMMXT_OP_ENV1(unpackhsl)
1272 IWMMXT_OP_ENV_SIZE(cmpeq)
1273 IWMMXT_OP_ENV_SIZE(cmpgtu)
1274 IWMMXT_OP_ENV_SIZE(cmpgts)
1276 IWMMXT_OP_ENV_SIZE(mins)
1277 IWMMXT_OP_ENV_SIZE(minu)
1278 IWMMXT_OP_ENV_SIZE(maxs)
1279 IWMMXT_OP_ENV_SIZE(maxu)
1281 IWMMXT_OP_ENV_SIZE(subn)
1282 IWMMXT_OP_ENV_SIZE(addn)
1283 IWMMXT_OP_ENV_SIZE(subu)
1284 IWMMXT_OP_ENV_SIZE(addu)
1285 IWMMXT_OP_ENV_SIZE(subs)
1286 IWMMXT_OP_ENV_SIZE(adds)
1288 IWMMXT_OP_ENV(avgb0)
1289 IWMMXT_OP_ENV(avgb1)
1290 IWMMXT_OP_ENV(avgw0)
1291 IWMMXT_OP_ENV(avgw1)
1293 IWMMXT_OP_ENV(packuw)
1294 IWMMXT_OP_ENV(packul)
1295 IWMMXT_OP_ENV(packuq)
1296 IWMMXT_OP_ENV(packsw)
1297 IWMMXT_OP_ENV(packsl)
1298 IWMMXT_OP_ENV(packsq)
1300 static void gen_op_iwmmxt_set_mup(void)
1302 TCGv_i32 tmp;
1303 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1304 tcg_gen_ori_i32(tmp, tmp, 2);
1305 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1308 static void gen_op_iwmmxt_set_cup(void)
1310 TCGv_i32 tmp;
1311 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1312 tcg_gen_ori_i32(tmp, tmp, 1);
1313 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1316 static void gen_op_iwmmxt_setpsr_nz(void)
1318 TCGv_i32 tmp = tcg_temp_new_i32();
1319 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1320 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1323 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1325 iwmmxt_load_reg(cpu_V1, rn);
1326 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1327 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1330 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1331 TCGv_i32 dest)
1333 int rd;
1334 uint32_t offset;
1335 TCGv_i32 tmp;
1337 rd = (insn >> 16) & 0xf;
1338 tmp = load_reg(s, rd);
1340 offset = (insn & 0xff) << ((insn >> 7) & 2);
1341 if (insn & (1 << 24)) {
1342 /* Pre indexed */
1343 if (insn & (1 << 23))
1344 tcg_gen_addi_i32(tmp, tmp, offset);
1345 else
1346 tcg_gen_addi_i32(tmp, tmp, -offset);
1347 tcg_gen_mov_i32(dest, tmp);
1348 if (insn & (1 << 21))
1349 store_reg(s, rd, tmp);
1350 else
1351 tcg_temp_free_i32(tmp);
1352 } else if (insn & (1 << 21)) {
1353 /* Post indexed */
1354 tcg_gen_mov_i32(dest, tmp);
1355 if (insn & (1 << 23))
1356 tcg_gen_addi_i32(tmp, tmp, offset);
1357 else
1358 tcg_gen_addi_i32(tmp, tmp, -offset);
1359 store_reg(s, rd, tmp);
1360 } else if (!(insn & (1 << 23)))
1361 return 1;
1362 return 0;
1365 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1367 int rd = (insn >> 0) & 0xf;
1368 TCGv_i32 tmp;
1370 if (insn & (1 << 8)) {
1371 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1372 return 1;
1373 } else {
1374 tmp = iwmmxt_load_creg(rd);
1376 } else {
1377 tmp = tcg_temp_new_i32();
1378 iwmmxt_load_reg(cpu_V0, rd);
1379 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1381 tcg_gen_andi_i32(tmp, tmp, mask);
1382 tcg_gen_mov_i32(dest, tmp);
1383 tcg_temp_free_i32(tmp);
1384 return 0;
1387 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1388 (ie. an undefined instruction). */
1389 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1391 int rd, wrd;
1392 int rdhi, rdlo, rd0, rd1, i;
1393 TCGv_i32 addr;
1394 TCGv_i32 tmp, tmp2, tmp3;
1396 if ((insn & 0x0e000e00) == 0x0c000000) {
1397 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1398 wrd = insn & 0xf;
1399 rdlo = (insn >> 12) & 0xf;
1400 rdhi = (insn >> 16) & 0xf;
1401 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1402 iwmmxt_load_reg(cpu_V0, wrd);
1403 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1404 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
1405 } else { /* TMCRR */
1406 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1407 iwmmxt_store_reg(cpu_V0, wrd);
1408 gen_op_iwmmxt_set_mup();
1410 return 0;
1413 wrd = (insn >> 12) & 0xf;
1414 addr = tcg_temp_new_i32();
1415 if (gen_iwmmxt_address(s, insn, addr)) {
1416 tcg_temp_free_i32(addr);
1417 return 1;
1419 if (insn & ARM_CP_RW_BIT) {
1420 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1421 tmp = tcg_temp_new_i32();
1422 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1423 iwmmxt_store_creg(wrd, tmp);
1424 } else {
1425 i = 1;
1426 if (insn & (1 << 8)) {
1427 if (insn & (1 << 22)) { /* WLDRD */
1428 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1429 i = 0;
1430 } else { /* WLDRW wRd */
1431 tmp = tcg_temp_new_i32();
1432 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1434 } else {
1435 tmp = tcg_temp_new_i32();
1436 if (insn & (1 << 22)) { /* WLDRH */
1437 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1438 } else { /* WLDRB */
1439 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1442 if (i) {
1443 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1444 tcg_temp_free_i32(tmp);
1446 gen_op_iwmmxt_movq_wRn_M0(wrd);
1448 } else {
1449 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1450 tmp = iwmmxt_load_creg(wrd);
1451 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1452 } else {
1453 gen_op_iwmmxt_movq_M0_wRn(wrd);
1454 tmp = tcg_temp_new_i32();
1455 if (insn & (1 << 8)) {
1456 if (insn & (1 << 22)) { /* WSTRD */
1457 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1458 } else { /* WSTRW wRd */
1459 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1460 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1462 } else {
1463 if (insn & (1 << 22)) { /* WSTRH */
1464 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1465 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1466 } else { /* WSTRB */
1467 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1468 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1472 tcg_temp_free_i32(tmp);
1474 tcg_temp_free_i32(addr);
1475 return 0;
1478 if ((insn & 0x0f000000) != 0x0e000000)
1479 return 1;
1481 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1482 case 0x000: /* WOR */
1483 wrd = (insn >> 12) & 0xf;
1484 rd0 = (insn >> 0) & 0xf;
1485 rd1 = (insn >> 16) & 0xf;
1486 gen_op_iwmmxt_movq_M0_wRn(rd0);
1487 gen_op_iwmmxt_orq_M0_wRn(rd1);
1488 gen_op_iwmmxt_setpsr_nz();
1489 gen_op_iwmmxt_movq_wRn_M0(wrd);
1490 gen_op_iwmmxt_set_mup();
1491 gen_op_iwmmxt_set_cup();
1492 break;
1493 case 0x011: /* TMCR */
1494 if (insn & 0xf)
1495 return 1;
1496 rd = (insn >> 12) & 0xf;
1497 wrd = (insn >> 16) & 0xf;
1498 switch (wrd) {
1499 case ARM_IWMMXT_wCID:
1500 case ARM_IWMMXT_wCASF:
1501 break;
1502 case ARM_IWMMXT_wCon:
1503 gen_op_iwmmxt_set_cup();
1504 /* Fall through. */
1505 case ARM_IWMMXT_wCSSF:
1506 tmp = iwmmxt_load_creg(wrd);
1507 tmp2 = load_reg(s, rd);
1508 tcg_gen_andc_i32(tmp, tmp, tmp2);
1509 tcg_temp_free_i32(tmp2);
1510 iwmmxt_store_creg(wrd, tmp);
1511 break;
1512 case ARM_IWMMXT_wCGR0:
1513 case ARM_IWMMXT_wCGR1:
1514 case ARM_IWMMXT_wCGR2:
1515 case ARM_IWMMXT_wCGR3:
1516 gen_op_iwmmxt_set_cup();
1517 tmp = load_reg(s, rd);
1518 iwmmxt_store_creg(wrd, tmp);
1519 break;
1520 default:
1521 return 1;
1523 break;
1524 case 0x100: /* WXOR */
1525 wrd = (insn >> 12) & 0xf;
1526 rd0 = (insn >> 0) & 0xf;
1527 rd1 = (insn >> 16) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0);
1529 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1534 break;
1535 case 0x111: /* TMRC */
1536 if (insn & 0xf)
1537 return 1;
1538 rd = (insn >> 12) & 0xf;
1539 wrd = (insn >> 16) & 0xf;
1540 tmp = iwmmxt_load_creg(wrd);
1541 store_reg(s, rd, tmp);
1542 break;
1543 case 0x300: /* WANDN */
1544 wrd = (insn >> 12) & 0xf;
1545 rd0 = (insn >> 0) & 0xf;
1546 rd1 = (insn >> 16) & 0xf;
1547 gen_op_iwmmxt_movq_M0_wRn(rd0);
1548 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1549 gen_op_iwmmxt_andq_M0_wRn(rd1);
1550 gen_op_iwmmxt_setpsr_nz();
1551 gen_op_iwmmxt_movq_wRn_M0(wrd);
1552 gen_op_iwmmxt_set_mup();
1553 gen_op_iwmmxt_set_cup();
1554 break;
1555 case 0x200: /* WAND */
1556 wrd = (insn >> 12) & 0xf;
1557 rd0 = (insn >> 0) & 0xf;
1558 rd1 = (insn >> 16) & 0xf;
1559 gen_op_iwmmxt_movq_M0_wRn(rd0);
1560 gen_op_iwmmxt_andq_M0_wRn(rd1);
1561 gen_op_iwmmxt_setpsr_nz();
1562 gen_op_iwmmxt_movq_wRn_M0(wrd);
1563 gen_op_iwmmxt_set_mup();
1564 gen_op_iwmmxt_set_cup();
1565 break;
1566 case 0x810: case 0xa10: /* WMADD */
1567 wrd = (insn >> 12) & 0xf;
1568 rd0 = (insn >> 0) & 0xf;
1569 rd1 = (insn >> 16) & 0xf;
1570 gen_op_iwmmxt_movq_M0_wRn(rd0);
1571 if (insn & (1 << 21))
1572 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1573 else
1574 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1575 gen_op_iwmmxt_movq_wRn_M0(wrd);
1576 gen_op_iwmmxt_set_mup();
1577 break;
1578 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1579 wrd = (insn >> 12) & 0xf;
1580 rd0 = (insn >> 16) & 0xf;
1581 rd1 = (insn >> 0) & 0xf;
1582 gen_op_iwmmxt_movq_M0_wRn(rd0);
1583 switch ((insn >> 22) & 3) {
1584 case 0:
1585 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1586 break;
1587 case 1:
1588 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1589 break;
1590 case 2:
1591 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1592 break;
1593 case 3:
1594 return 1;
1596 gen_op_iwmmxt_movq_wRn_M0(wrd);
1597 gen_op_iwmmxt_set_mup();
1598 gen_op_iwmmxt_set_cup();
1599 break;
1600 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1601 wrd = (insn >> 12) & 0xf;
1602 rd0 = (insn >> 16) & 0xf;
1603 rd1 = (insn >> 0) & 0xf;
1604 gen_op_iwmmxt_movq_M0_wRn(rd0);
1605 switch ((insn >> 22) & 3) {
1606 case 0:
1607 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1608 break;
1609 case 1:
1610 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1611 break;
1612 case 2:
1613 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1614 break;
1615 case 3:
1616 return 1;
1618 gen_op_iwmmxt_movq_wRn_M0(wrd);
1619 gen_op_iwmmxt_set_mup();
1620 gen_op_iwmmxt_set_cup();
1621 break;
1622 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1623 wrd = (insn >> 12) & 0xf;
1624 rd0 = (insn >> 16) & 0xf;
1625 rd1 = (insn >> 0) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0);
1627 if (insn & (1 << 22))
1628 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1629 else
1630 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1631 if (!(insn & (1 << 20)))
1632 gen_op_iwmmxt_addl_M0_wRn(wrd);
1633 gen_op_iwmmxt_movq_wRn_M0(wrd);
1634 gen_op_iwmmxt_set_mup();
1635 break;
1636 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1637 wrd = (insn >> 12) & 0xf;
1638 rd0 = (insn >> 16) & 0xf;
1639 rd1 = (insn >> 0) & 0xf;
1640 gen_op_iwmmxt_movq_M0_wRn(rd0);
1641 if (insn & (1 << 21)) {
1642 if (insn & (1 << 20))
1643 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1644 else
1645 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1646 } else {
1647 if (insn & (1 << 20))
1648 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1649 else
1650 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 break;
1655 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1656 wrd = (insn >> 12) & 0xf;
1657 rd0 = (insn >> 16) & 0xf;
1658 rd1 = (insn >> 0) & 0xf;
1659 gen_op_iwmmxt_movq_M0_wRn(rd0);
1660 if (insn & (1 << 21))
1661 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1662 else
1663 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1664 if (!(insn & (1 << 20))) {
1665 iwmmxt_load_reg(cpu_V1, wrd);
1666 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 break;
1671 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1672 wrd = (insn >> 12) & 0xf;
1673 rd0 = (insn >> 16) & 0xf;
1674 rd1 = (insn >> 0) & 0xf;
1675 gen_op_iwmmxt_movq_M0_wRn(rd0);
1676 switch ((insn >> 22) & 3) {
1677 case 0:
1678 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1679 break;
1680 case 1:
1681 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1682 break;
1683 case 2:
1684 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1685 break;
1686 case 3:
1687 return 1;
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1692 break;
1693 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1694 wrd = (insn >> 12) & 0xf;
1695 rd0 = (insn >> 16) & 0xf;
1696 rd1 = (insn >> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0);
1698 if (insn & (1 << 22)) {
1699 if (insn & (1 << 20))
1700 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1701 else
1702 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1703 } else {
1704 if (insn & (1 << 20))
1705 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1706 else
1707 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1709 gen_op_iwmmxt_movq_wRn_M0(wrd);
1710 gen_op_iwmmxt_set_mup();
1711 gen_op_iwmmxt_set_cup();
1712 break;
1713 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1714 wrd = (insn >> 12) & 0xf;
1715 rd0 = (insn >> 16) & 0xf;
1716 rd1 = (insn >> 0) & 0xf;
1717 gen_op_iwmmxt_movq_M0_wRn(rd0);
1718 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1719 tcg_gen_andi_i32(tmp, tmp, 7);
1720 iwmmxt_load_reg(cpu_V1, rd1);
1721 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1722 tcg_temp_free_i32(tmp);
1723 gen_op_iwmmxt_movq_wRn_M0(wrd);
1724 gen_op_iwmmxt_set_mup();
1725 break;
1726 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1727 if (((insn >> 6) & 3) == 3)
1728 return 1;
1729 rd = (insn >> 12) & 0xf;
1730 wrd = (insn >> 16) & 0xf;
1731 tmp = load_reg(s, rd);
1732 gen_op_iwmmxt_movq_M0_wRn(wrd);
1733 switch ((insn >> 6) & 3) {
1734 case 0:
1735 tmp2 = tcg_const_i32(0xff);
1736 tmp3 = tcg_const_i32((insn & 7) << 3);
1737 break;
1738 case 1:
1739 tmp2 = tcg_const_i32(0xffff);
1740 tmp3 = tcg_const_i32((insn & 3) << 4);
1741 break;
1742 case 2:
1743 tmp2 = tcg_const_i32(0xffffffff);
1744 tmp3 = tcg_const_i32((insn & 1) << 5);
1745 break;
1746 default:
1747 tmp2 = NULL;
1748 tmp3 = NULL;
1750 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1751 tcg_temp_free_i32(tmp3);
1752 tcg_temp_free_i32(tmp2);
1753 tcg_temp_free_i32(tmp);
1754 gen_op_iwmmxt_movq_wRn_M0(wrd);
1755 gen_op_iwmmxt_set_mup();
1756 break;
1757 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1758 rd = (insn >> 12) & 0xf;
1759 wrd = (insn >> 16) & 0xf;
1760 if (rd == 15 || ((insn >> 22) & 3) == 3)
1761 return 1;
1762 gen_op_iwmmxt_movq_M0_wRn(wrd);
1763 tmp = tcg_temp_new_i32();
1764 switch ((insn >> 22) & 3) {
1765 case 0:
1766 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1767 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1768 if (insn & 8) {
1769 tcg_gen_ext8s_i32(tmp, tmp);
1770 } else {
1771 tcg_gen_andi_i32(tmp, tmp, 0xff);
1773 break;
1774 case 1:
1775 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1776 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1777 if (insn & 8) {
1778 tcg_gen_ext16s_i32(tmp, tmp);
1779 } else {
1780 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1782 break;
1783 case 2:
1784 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1785 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1786 break;
1788 store_reg(s, rd, tmp);
1789 break;
1790 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1791 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1792 return 1;
1793 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1794 switch ((insn >> 22) & 3) {
1795 case 0:
1796 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1797 break;
1798 case 1:
1799 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1800 break;
1801 case 2:
1802 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1803 break;
1805 tcg_gen_shli_i32(tmp, tmp, 28);
1806 gen_set_nzcv(tmp);
1807 tcg_temp_free_i32(tmp);
1808 break;
1809 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1810 if (((insn >> 6) & 3) == 3)
1811 return 1;
1812 rd = (insn >> 12) & 0xf;
1813 wrd = (insn >> 16) & 0xf;
1814 tmp = load_reg(s, rd);
1815 switch ((insn >> 6) & 3) {
1816 case 0:
1817 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1818 break;
1819 case 1:
1820 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1821 break;
1822 case 2:
1823 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1824 break;
1826 tcg_temp_free_i32(tmp);
1827 gen_op_iwmmxt_movq_wRn_M0(wrd);
1828 gen_op_iwmmxt_set_mup();
1829 break;
1830 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1831 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1832 return 1;
1833 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1834 tmp2 = tcg_temp_new_i32();
1835 tcg_gen_mov_i32(tmp2, tmp);
1836 switch ((insn >> 22) & 3) {
1837 case 0:
1838 for (i = 0; i < 7; i ++) {
1839 tcg_gen_shli_i32(tmp2, tmp2, 4);
1840 tcg_gen_and_i32(tmp, tmp, tmp2);
1842 break;
1843 case 1:
1844 for (i = 0; i < 3; i ++) {
1845 tcg_gen_shli_i32(tmp2, tmp2, 8);
1846 tcg_gen_and_i32(tmp, tmp, tmp2);
1848 break;
1849 case 2:
1850 tcg_gen_shli_i32(tmp2, tmp2, 16);
1851 tcg_gen_and_i32(tmp, tmp, tmp2);
1852 break;
1854 gen_set_nzcv(tmp);
1855 tcg_temp_free_i32(tmp2);
1856 tcg_temp_free_i32(tmp);
1857 break;
1858 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1859 wrd = (insn >> 12) & 0xf;
1860 rd0 = (insn >> 16) & 0xf;
1861 gen_op_iwmmxt_movq_M0_wRn(rd0);
1862 switch ((insn >> 22) & 3) {
1863 case 0:
1864 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1865 break;
1866 case 1:
1867 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1868 break;
1869 case 2:
1870 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1871 break;
1872 case 3:
1873 return 1;
1875 gen_op_iwmmxt_movq_wRn_M0(wrd);
1876 gen_op_iwmmxt_set_mup();
1877 break;
1878 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1879 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1880 return 1;
1881 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1882 tmp2 = tcg_temp_new_i32();
1883 tcg_gen_mov_i32(tmp2, tmp);
1884 switch ((insn >> 22) & 3) {
1885 case 0:
1886 for (i = 0; i < 7; i ++) {
1887 tcg_gen_shli_i32(tmp2, tmp2, 4);
1888 tcg_gen_or_i32(tmp, tmp, tmp2);
1890 break;
1891 case 1:
1892 for (i = 0; i < 3; i ++) {
1893 tcg_gen_shli_i32(tmp2, tmp2, 8);
1894 tcg_gen_or_i32(tmp, tmp, tmp2);
1896 break;
1897 case 2:
1898 tcg_gen_shli_i32(tmp2, tmp2, 16);
1899 tcg_gen_or_i32(tmp, tmp, tmp2);
1900 break;
1902 gen_set_nzcv(tmp);
1903 tcg_temp_free_i32(tmp2);
1904 tcg_temp_free_i32(tmp);
1905 break;
1906 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1907 rd = (insn >> 12) & 0xf;
1908 rd0 = (insn >> 16) & 0xf;
1909 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1910 return 1;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 tmp = tcg_temp_new_i32();
1913 switch ((insn >> 22) & 3) {
1914 case 0:
1915 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1916 break;
1917 case 1:
1918 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1919 break;
1920 case 2:
1921 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1922 break;
1924 store_reg(s, rd, tmp);
1925 break;
1926 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1927 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1928 wrd = (insn >> 12) & 0xf;
1929 rd0 = (insn >> 16) & 0xf;
1930 rd1 = (insn >> 0) & 0xf;
1931 gen_op_iwmmxt_movq_M0_wRn(rd0);
1932 switch ((insn >> 22) & 3) {
1933 case 0:
1934 if (insn & (1 << 21))
1935 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1936 else
1937 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1938 break;
1939 case 1:
1940 if (insn & (1 << 21))
1941 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1942 else
1943 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1944 break;
1945 case 2:
1946 if (insn & (1 << 21))
1947 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1948 else
1949 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1950 break;
1951 case 3:
1952 return 1;
1954 gen_op_iwmmxt_movq_wRn_M0(wrd);
1955 gen_op_iwmmxt_set_mup();
1956 gen_op_iwmmxt_set_cup();
1957 break;
1958 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1959 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1960 wrd = (insn >> 12) & 0xf;
1961 rd0 = (insn >> 16) & 0xf;
1962 gen_op_iwmmxt_movq_M0_wRn(rd0);
1963 switch ((insn >> 22) & 3) {
1964 case 0:
1965 if (insn & (1 << 21))
1966 gen_op_iwmmxt_unpacklsb_M0();
1967 else
1968 gen_op_iwmmxt_unpacklub_M0();
1969 break;
1970 case 1:
1971 if (insn & (1 << 21))
1972 gen_op_iwmmxt_unpacklsw_M0();
1973 else
1974 gen_op_iwmmxt_unpackluw_M0();
1975 break;
1976 case 2:
1977 if (insn & (1 << 21))
1978 gen_op_iwmmxt_unpacklsl_M0();
1979 else
1980 gen_op_iwmmxt_unpacklul_M0();
1981 break;
1982 case 3:
1983 return 1;
1985 gen_op_iwmmxt_movq_wRn_M0(wrd);
1986 gen_op_iwmmxt_set_mup();
1987 gen_op_iwmmxt_set_cup();
1988 break;
1989 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1990 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1991 wrd = (insn >> 12) & 0xf;
1992 rd0 = (insn >> 16) & 0xf;
1993 gen_op_iwmmxt_movq_M0_wRn(rd0);
1994 switch ((insn >> 22) & 3) {
1995 case 0:
1996 if (insn & (1 << 21))
1997 gen_op_iwmmxt_unpackhsb_M0();
1998 else
1999 gen_op_iwmmxt_unpackhub_M0();
2000 break;
2001 case 1:
2002 if (insn & (1 << 21))
2003 gen_op_iwmmxt_unpackhsw_M0();
2004 else
2005 gen_op_iwmmxt_unpackhuw_M0();
2006 break;
2007 case 2:
2008 if (insn & (1 << 21))
2009 gen_op_iwmmxt_unpackhsl_M0();
2010 else
2011 gen_op_iwmmxt_unpackhul_M0();
2012 break;
2013 case 3:
2014 return 1;
2016 gen_op_iwmmxt_movq_wRn_M0(wrd);
2017 gen_op_iwmmxt_set_mup();
2018 gen_op_iwmmxt_set_cup();
2019 break;
2020 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2021 case 0x214: case 0x614: case 0xa14: case 0xe14:
2022 if (((insn >> 22) & 3) == 0)
2023 return 1;
2024 wrd = (insn >> 12) & 0xf;
2025 rd0 = (insn >> 16) & 0xf;
2026 gen_op_iwmmxt_movq_M0_wRn(rd0);
2027 tmp = tcg_temp_new_i32();
2028 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2029 tcg_temp_free_i32(tmp);
2030 return 1;
2032 switch ((insn >> 22) & 3) {
2033 case 1:
2034 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2035 break;
2036 case 2:
2037 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2038 break;
2039 case 3:
2040 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2041 break;
2043 tcg_temp_free_i32(tmp);
2044 gen_op_iwmmxt_movq_wRn_M0(wrd);
2045 gen_op_iwmmxt_set_mup();
2046 gen_op_iwmmxt_set_cup();
2047 break;
2048 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2049 case 0x014: case 0x414: case 0x814: case 0xc14:
2050 if (((insn >> 22) & 3) == 0)
2051 return 1;
2052 wrd = (insn >> 12) & 0xf;
2053 rd0 = (insn >> 16) & 0xf;
2054 gen_op_iwmmxt_movq_M0_wRn(rd0);
2055 tmp = tcg_temp_new_i32();
2056 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2057 tcg_temp_free_i32(tmp);
2058 return 1;
2060 switch ((insn >> 22) & 3) {
2061 case 1:
2062 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2063 break;
2064 case 2:
2065 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2066 break;
2067 case 3:
2068 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2069 break;
2071 tcg_temp_free_i32(tmp);
2072 gen_op_iwmmxt_movq_wRn_M0(wrd);
2073 gen_op_iwmmxt_set_mup();
2074 gen_op_iwmmxt_set_cup();
2075 break;
2076 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2077 case 0x114: case 0x514: case 0x914: case 0xd14:
2078 if (((insn >> 22) & 3) == 0)
2079 return 1;
2080 wrd = (insn >> 12) & 0xf;
2081 rd0 = (insn >> 16) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0);
2083 tmp = tcg_temp_new_i32();
2084 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2085 tcg_temp_free_i32(tmp);
2086 return 1;
2088 switch ((insn >> 22) & 3) {
2089 case 1:
2090 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2091 break;
2092 case 2:
2093 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2094 break;
2095 case 3:
2096 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2097 break;
2099 tcg_temp_free_i32(tmp);
2100 gen_op_iwmmxt_movq_wRn_M0(wrd);
2101 gen_op_iwmmxt_set_mup();
2102 gen_op_iwmmxt_set_cup();
2103 break;
2104 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2105 case 0x314: case 0x714: case 0xb14: case 0xf14:
2106 if (((insn >> 22) & 3) == 0)
2107 return 1;
2108 wrd = (insn >> 12) & 0xf;
2109 rd0 = (insn >> 16) & 0xf;
2110 gen_op_iwmmxt_movq_M0_wRn(rd0);
2111 tmp = tcg_temp_new_i32();
2112 switch ((insn >> 22) & 3) {
2113 case 1:
2114 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2115 tcg_temp_free_i32(tmp);
2116 return 1;
2118 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2119 break;
2120 case 2:
2121 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2122 tcg_temp_free_i32(tmp);
2123 return 1;
2125 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2126 break;
2127 case 3:
2128 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2129 tcg_temp_free_i32(tmp);
2130 return 1;
2132 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2133 break;
2135 tcg_temp_free_i32(tmp);
2136 gen_op_iwmmxt_movq_wRn_M0(wrd);
2137 gen_op_iwmmxt_set_mup();
2138 gen_op_iwmmxt_set_cup();
2139 break;
2140 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2141 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2142 wrd = (insn >> 12) & 0xf;
2143 rd0 = (insn >> 16) & 0xf;
2144 rd1 = (insn >> 0) & 0xf;
2145 gen_op_iwmmxt_movq_M0_wRn(rd0);
2146 switch ((insn >> 22) & 3) {
2147 case 0:
2148 if (insn & (1 << 21))
2149 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2150 else
2151 gen_op_iwmmxt_minub_M0_wRn(rd1);
2152 break;
2153 case 1:
2154 if (insn & (1 << 21))
2155 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2156 else
2157 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2158 break;
2159 case 2:
2160 if (insn & (1 << 21))
2161 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2162 else
2163 gen_op_iwmmxt_minul_M0_wRn(rd1);
2164 break;
2165 case 3:
2166 return 1;
2168 gen_op_iwmmxt_movq_wRn_M0(wrd);
2169 gen_op_iwmmxt_set_mup();
2170 break;
2171 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2172 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2173 wrd = (insn >> 12) & 0xf;
2174 rd0 = (insn >> 16) & 0xf;
2175 rd1 = (insn >> 0) & 0xf;
2176 gen_op_iwmmxt_movq_M0_wRn(rd0);
2177 switch ((insn >> 22) & 3) {
2178 case 0:
2179 if (insn & (1 << 21))
2180 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2181 else
2182 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2183 break;
2184 case 1:
2185 if (insn & (1 << 21))
2186 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2187 else
2188 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2189 break;
2190 case 2:
2191 if (insn & (1 << 21))
2192 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2193 else
2194 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2195 break;
2196 case 3:
2197 return 1;
2199 gen_op_iwmmxt_movq_wRn_M0(wrd);
2200 gen_op_iwmmxt_set_mup();
2201 break;
2202 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2203 case 0x402: case 0x502: case 0x602: case 0x702:
2204 wrd = (insn >> 12) & 0xf;
2205 rd0 = (insn >> 16) & 0xf;
2206 rd1 = (insn >> 0) & 0xf;
2207 gen_op_iwmmxt_movq_M0_wRn(rd0);
2208 tmp = tcg_const_i32((insn >> 20) & 3);
2209 iwmmxt_load_reg(cpu_V1, rd1);
2210 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2211 tcg_temp_free_i32(tmp);
2212 gen_op_iwmmxt_movq_wRn_M0(wrd);
2213 gen_op_iwmmxt_set_mup();
2214 break;
2215 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2216 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2217 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2218 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2219 wrd = (insn >> 12) & 0xf;
2220 rd0 = (insn >> 16) & 0xf;
2221 rd1 = (insn >> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0);
2223 switch ((insn >> 20) & 0xf) {
2224 case 0x0:
2225 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2226 break;
2227 case 0x1:
2228 gen_op_iwmmxt_subub_M0_wRn(rd1);
2229 break;
2230 case 0x3:
2231 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2232 break;
2233 case 0x4:
2234 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2235 break;
2236 case 0x5:
2237 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2238 break;
2239 case 0x7:
2240 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2241 break;
2242 case 0x8:
2243 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2244 break;
2245 case 0x9:
2246 gen_op_iwmmxt_subul_M0_wRn(rd1);
2247 break;
2248 case 0xb:
2249 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2250 break;
2251 default:
2252 return 1;
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 gen_op_iwmmxt_set_cup();
2257 break;
2258 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2259 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2260 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2261 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2262 wrd = (insn >> 12) & 0xf;
2263 rd0 = (insn >> 16) & 0xf;
2264 gen_op_iwmmxt_movq_M0_wRn(rd0);
2265 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2266 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2267 tcg_temp_free_i32(tmp);
2268 gen_op_iwmmxt_movq_wRn_M0(wrd);
2269 gen_op_iwmmxt_set_mup();
2270 gen_op_iwmmxt_set_cup();
2271 break;
2272 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2273 case 0x418: case 0x518: case 0x618: case 0x718:
2274 case 0x818: case 0x918: case 0xa18: case 0xb18:
2275 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2276 wrd = (insn >> 12) & 0xf;
2277 rd0 = (insn >> 16) & 0xf;
2278 rd1 = (insn >> 0) & 0xf;
2279 gen_op_iwmmxt_movq_M0_wRn(rd0);
2280 switch ((insn >> 20) & 0xf) {
2281 case 0x0:
2282 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2283 break;
2284 case 0x1:
2285 gen_op_iwmmxt_addub_M0_wRn(rd1);
2286 break;
2287 case 0x3:
2288 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2289 break;
2290 case 0x4:
2291 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2292 break;
2293 case 0x5:
2294 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2295 break;
2296 case 0x7:
2297 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2298 break;
2299 case 0x8:
2300 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2301 break;
2302 case 0x9:
2303 gen_op_iwmmxt_addul_M0_wRn(rd1);
2304 break;
2305 case 0xb:
2306 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2307 break;
2308 default:
2309 return 1;
2311 gen_op_iwmmxt_movq_wRn_M0(wrd);
2312 gen_op_iwmmxt_set_mup();
2313 gen_op_iwmmxt_set_cup();
2314 break;
2315 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2316 case 0x408: case 0x508: case 0x608: case 0x708:
2317 case 0x808: case 0x908: case 0xa08: case 0xb08:
2318 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2319 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2320 return 1;
2321 wrd = (insn >> 12) & 0xf;
2322 rd0 = (insn >> 16) & 0xf;
2323 rd1 = (insn >> 0) & 0xf;
2324 gen_op_iwmmxt_movq_M0_wRn(rd0);
2325 switch ((insn >> 22) & 3) {
2326 case 1:
2327 if (insn & (1 << 21))
2328 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2329 else
2330 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2331 break;
2332 case 2:
2333 if (insn & (1 << 21))
2334 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2335 else
2336 gen_op_iwmmxt_packul_M0_wRn(rd1);
2337 break;
2338 case 3:
2339 if (insn & (1 << 21))
2340 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2341 else
2342 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2343 break;
2345 gen_op_iwmmxt_movq_wRn_M0(wrd);
2346 gen_op_iwmmxt_set_mup();
2347 gen_op_iwmmxt_set_cup();
2348 break;
2349 case 0x201: case 0x203: case 0x205: case 0x207:
2350 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2351 case 0x211: case 0x213: case 0x215: case 0x217:
2352 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2353 wrd = (insn >> 5) & 0xf;
2354 rd0 = (insn >> 12) & 0xf;
2355 rd1 = (insn >> 0) & 0xf;
2356 if (rd0 == 0xf || rd1 == 0xf)
2357 return 1;
2358 gen_op_iwmmxt_movq_M0_wRn(wrd);
2359 tmp = load_reg(s, rd0);
2360 tmp2 = load_reg(s, rd1);
2361 switch ((insn >> 16) & 0xf) {
2362 case 0x0: /* TMIA */
2363 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2364 break;
2365 case 0x8: /* TMIAPH */
2366 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2367 break;
2368 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2369 if (insn & (1 << 16))
2370 tcg_gen_shri_i32(tmp, tmp, 16);
2371 if (insn & (1 << 17))
2372 tcg_gen_shri_i32(tmp2, tmp2, 16);
2373 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2374 break;
2375 default:
2376 tcg_temp_free_i32(tmp2);
2377 tcg_temp_free_i32(tmp);
2378 return 1;
2380 tcg_temp_free_i32(tmp2);
2381 tcg_temp_free_i32(tmp);
2382 gen_op_iwmmxt_movq_wRn_M0(wrd);
2383 gen_op_iwmmxt_set_mup();
2384 break;
2385 default:
2386 return 1;
2389 return 0;
2392 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2393 (ie. an undefined instruction). */
2394 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2396 int acc, rd0, rd1, rdhi, rdlo;
2397 TCGv_i32 tmp, tmp2;
2399 if ((insn & 0x0ff00f10) == 0x0e200010) {
2400 /* Multiply with Internal Accumulate Format */
2401 rd0 = (insn >> 12) & 0xf;
2402 rd1 = insn & 0xf;
2403 acc = (insn >> 5) & 7;
2405 if (acc != 0)
2406 return 1;
2408 tmp = load_reg(s, rd0);
2409 tmp2 = load_reg(s, rd1);
2410 switch ((insn >> 16) & 0xf) {
2411 case 0x0: /* MIA */
2412 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2413 break;
2414 case 0x8: /* MIAPH */
2415 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2416 break;
2417 case 0xc: /* MIABB */
2418 case 0xd: /* MIABT */
2419 case 0xe: /* MIATB */
2420 case 0xf: /* MIATT */
2421 if (insn & (1 << 16))
2422 tcg_gen_shri_i32(tmp, tmp, 16);
2423 if (insn & (1 << 17))
2424 tcg_gen_shri_i32(tmp2, tmp2, 16);
2425 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2426 break;
2427 default:
2428 return 1;
2430 tcg_temp_free_i32(tmp2);
2431 tcg_temp_free_i32(tmp);
2433 gen_op_iwmmxt_movq_wRn_M0(acc);
2434 return 0;
2437 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2438 /* Internal Accumulator Access Format */
2439 rdhi = (insn >> 16) & 0xf;
2440 rdlo = (insn >> 12) & 0xf;
2441 acc = insn & 7;
2443 if (acc != 0)
2444 return 1;
2446 if (insn & ARM_CP_RW_BIT) { /* MRA */
2447 iwmmxt_load_reg(cpu_V0, acc);
2448 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2449 tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
2450 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2451 } else { /* MAR */
2452 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2453 iwmmxt_store_reg(cpu_V0, acc);
2455 return 0;
2458 return 1;
2461 static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
2463 #ifndef CONFIG_USER_ONLY
2464 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
2465 ((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
2466 #else
2467 return true;
2468 #endif
2471 static void gen_goto_ptr(void)
2473 tcg_gen_lookup_and_goto_ptr();
2476 /* This will end the TB but doesn't guarantee we'll return to
2477 * cpu_loop_exec. Any live exit_requests will be processed as we
2478 * enter the next TB.
2480 static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
2482 if (use_goto_tb(s, dest)) {
2483 tcg_gen_goto_tb(n);
2484 gen_set_pc_im(s, dest);
2485 tcg_gen_exit_tb(s->base.tb, n);
2486 } else {
2487 gen_set_pc_im(s, dest);
2488 gen_goto_ptr();
2490 s->base.is_jmp = DISAS_NORETURN;
2493 static inline void gen_jmp (DisasContext *s, uint32_t dest)
2495 if (unlikely(is_singlestepping(s))) {
2496 /* An indirect jump so that we still trigger the debug exception. */
2497 gen_set_pc_im(s, dest);
2498 s->base.is_jmp = DISAS_JUMP;
2499 } else {
2500 gen_goto_tb(s, 0, dest);
2504 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
2506 if (x)
2507 tcg_gen_sari_i32(t0, t0, 16);
2508 else
2509 gen_sxth(t0);
2510 if (y)
2511 tcg_gen_sari_i32(t1, t1, 16);
2512 else
2513 gen_sxth(t1);
2514 tcg_gen_mul_i32(t0, t0, t1);
2517 /* Return the mask of PSR bits set by a MSR instruction. */
2518 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2520 uint32_t mask = 0;
2522 if (flags & (1 << 0)) {
2523 mask |= 0xff;
2525 if (flags & (1 << 1)) {
2526 mask |= 0xff00;
2528 if (flags & (1 << 2)) {
2529 mask |= 0xff0000;
2531 if (flags & (1 << 3)) {
2532 mask |= 0xff000000;
2535 /* Mask out undefined and reserved bits. */
2536 mask &= aarch32_cpsr_valid_mask(s->features, s->isar);
2538 /* Mask out execution state. */
2539 if (!spsr) {
2540 mask &= ~CPSR_EXEC;
2543 /* Mask out privileged bits. */
2544 if (IS_USER(s)) {
2545 mask &= CPSR_USER;
2547 return mask;
2550 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
2551 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
2553 TCGv_i32 tmp;
2554 if (spsr) {
2555 /* ??? This is also undefined in system mode. */
2556 if (IS_USER(s))
2557 return 1;
2559 tmp = load_cpu_field(spsr);
2560 tcg_gen_andi_i32(tmp, tmp, ~mask);
2561 tcg_gen_andi_i32(t0, t0, mask);
2562 tcg_gen_or_i32(tmp, tmp, t0);
2563 store_cpu_field(tmp, spsr);
2564 } else {
2565 gen_set_cpsr(t0, mask);
2567 tcg_temp_free_i32(t0);
2568 gen_lookup_tb(s);
2569 return 0;
2572 /* Returns nonzero if access to the PSR is not permitted. */
2573 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
2575 TCGv_i32 tmp;
2576 tmp = tcg_temp_new_i32();
2577 tcg_gen_movi_i32(tmp, val);
2578 return gen_set_psr(s, mask, spsr, tmp);
2581 static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
2582 int *tgtmode, int *regno)
2584 /* Decode the r and sysm fields of MSR/MRS banked accesses into
2585 * the target mode and register number, and identify the various
2586 * unpredictable cases.
2587 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
2588 * + executed in user mode
2589 * + using R15 as the src/dest register
2590 * + accessing an unimplemented register
2591 * + accessing a register that's inaccessible at current PL/security state*
2592 * + accessing a register that you could access with a different insn
2593 * We choose to UNDEF in all these cases.
2594 * Since we don't know which of the various AArch32 modes we are in
2595 * we have to defer some checks to runtime.
2596 * Accesses to Monitor mode registers from Secure EL1 (which implies
2597 * that EL3 is AArch64) must trap to EL3.
2599 * If the access checks fail this function will emit code to take
2600 * an exception and return false. Otherwise it will return true,
2601 * and set *tgtmode and *regno appropriately.
2603 int exc_target = default_exception_el(s);
2605 /* These instructions are present only in ARMv8, or in ARMv7 with the
2606 * Virtualization Extensions.
2608 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
2609 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
2610 goto undef;
2613 if (IS_USER(s) || rn == 15) {
2614 goto undef;
2617 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
2618 * of registers into (r, sysm).
2620 if (r) {
2621 /* SPSRs for other modes */
2622 switch (sysm) {
2623 case 0xe: /* SPSR_fiq */
2624 *tgtmode = ARM_CPU_MODE_FIQ;
2625 break;
2626 case 0x10: /* SPSR_irq */
2627 *tgtmode = ARM_CPU_MODE_IRQ;
2628 break;
2629 case 0x12: /* SPSR_svc */
2630 *tgtmode = ARM_CPU_MODE_SVC;
2631 break;
2632 case 0x14: /* SPSR_abt */
2633 *tgtmode = ARM_CPU_MODE_ABT;
2634 break;
2635 case 0x16: /* SPSR_und */
2636 *tgtmode = ARM_CPU_MODE_UND;
2637 break;
2638 case 0x1c: /* SPSR_mon */
2639 *tgtmode = ARM_CPU_MODE_MON;
2640 break;
2641 case 0x1e: /* SPSR_hyp */
2642 *tgtmode = ARM_CPU_MODE_HYP;
2643 break;
2644 default: /* unallocated */
2645 goto undef;
2647 /* We arbitrarily assign SPSR a register number of 16. */
2648 *regno = 16;
2649 } else {
2650 /* general purpose registers for other modes */
2651 switch (sysm) {
2652 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
2653 *tgtmode = ARM_CPU_MODE_USR;
2654 *regno = sysm + 8;
2655 break;
2656 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
2657 *tgtmode = ARM_CPU_MODE_FIQ;
2658 *regno = sysm;
2659 break;
2660 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
2661 *tgtmode = ARM_CPU_MODE_IRQ;
2662 *regno = sysm & 1 ? 13 : 14;
2663 break;
2664 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
2665 *tgtmode = ARM_CPU_MODE_SVC;
2666 *regno = sysm & 1 ? 13 : 14;
2667 break;
2668 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
2669 *tgtmode = ARM_CPU_MODE_ABT;
2670 *regno = sysm & 1 ? 13 : 14;
2671 break;
2672 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
2673 *tgtmode = ARM_CPU_MODE_UND;
2674 *regno = sysm & 1 ? 13 : 14;
2675 break;
2676 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
2677 *tgtmode = ARM_CPU_MODE_MON;
2678 *regno = sysm & 1 ? 13 : 14;
2679 break;
2680 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
2681 *tgtmode = ARM_CPU_MODE_HYP;
2682 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
2683 *regno = sysm & 1 ? 13 : 17;
2684 break;
2685 default: /* unallocated */
2686 goto undef;
2690 /* Catch the 'accessing inaccessible register' cases we can detect
2691 * at translate time.
2693 switch (*tgtmode) {
2694 case ARM_CPU_MODE_MON:
2695 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
2696 goto undef;
2698 if (s->current_el == 1) {
2699 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
2700 * then accesses to Mon registers trap to EL3
2702 exc_target = 3;
2703 goto undef;
2705 break;
2706 case ARM_CPU_MODE_HYP:
2708 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
2709 * (and so we can forbid accesses from EL2 or below). elr_hyp
2710 * can be accessed also from Hyp mode, so forbid accesses from
2711 * EL0 or EL1.
2713 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
2714 (s->current_el < 3 && *regno != 17)) {
2715 goto undef;
2717 break;
2718 default:
2719 break;
2722 return true;
2724 undef:
2725 /* If we get here then some access check did not pass */
2726 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
2727 syn_uncategorized(), exc_target);
2728 return false;
2731 static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
2733 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
2734 int tgtmode = 0, regno = 0;
2736 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
2737 return;
2740 /* Sync state because msr_banked() can raise exceptions */
2741 gen_set_condexec(s);
2742 gen_set_pc_im(s, s->pc_curr);
2743 tcg_reg = load_reg(s, rn);
2744 tcg_tgtmode = tcg_const_i32(tgtmode);
2745 tcg_regno = tcg_const_i32(regno);
2746 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
2747 tcg_temp_free_i32(tcg_tgtmode);
2748 tcg_temp_free_i32(tcg_regno);
2749 tcg_temp_free_i32(tcg_reg);
2750 s->base.is_jmp = DISAS_UPDATE_EXIT;
2753 static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
2755 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
2756 int tgtmode = 0, regno = 0;
2758 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
2759 return;
2762 /* Sync state because mrs_banked() can raise exceptions */
2763 gen_set_condexec(s);
2764 gen_set_pc_im(s, s->pc_curr);
2765 tcg_reg = tcg_temp_new_i32();
2766 tcg_tgtmode = tcg_const_i32(tgtmode);
2767 tcg_regno = tcg_const_i32(regno);
2768 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
2769 tcg_temp_free_i32(tcg_tgtmode);
2770 tcg_temp_free_i32(tcg_regno);
2771 store_reg(s, rn, tcg_reg);
2772 s->base.is_jmp = DISAS_UPDATE_EXIT;
2775 /* Store value to PC as for an exception return (ie don't
2776 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
2777 * will do the masking based on the new value of the Thumb bit.
2779 static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
2781 tcg_gen_mov_i32(cpu_R[15], pc);
2782 tcg_temp_free_i32(pc);
2785 /* Generate a v6 exception return. Marks both values as dead. */
2786 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2788 store_pc_exc_ret(s, pc);
2789 /* The cpsr_write_eret helper will mask the low bits of PC
2790 * appropriately depending on the new Thumb bit, so it must
2791 * be called after storing the new PC.
2793 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
2794 gen_io_start();
2796 gen_helper_cpsr_write_eret(cpu_env, cpsr);
2797 tcg_temp_free_i32(cpsr);
2798 /* Must exit loop to check un-masked IRQs */
2799 s->base.is_jmp = DISAS_EXIT;
2802 /* Generate an old-style exception return. Marks pc as dead. */
2803 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
2805 gen_rfe(s, pc, load_cpu_field(spsr));
2808 static void gen_gvec_fn3_qc(uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs,
2809 uint32_t opr_sz, uint32_t max_sz,
2810 gen_helper_gvec_3_ptr *fn)
2812 TCGv_ptr qc_ptr = tcg_temp_new_ptr();
2814 tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
2815 tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, qc_ptr,
2816 opr_sz, max_sz, 0, fn);
2817 tcg_temp_free_ptr(qc_ptr);
2820 void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
2821 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
2823 static gen_helper_gvec_3_ptr * const fns[2] = {
2824 gen_helper_gvec_qrdmlah_s16, gen_helper_gvec_qrdmlah_s32
2826 tcg_debug_assert(vece >= 1 && vece <= 2);
2827 gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
2830 void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
2831 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
2833 static gen_helper_gvec_3_ptr * const fns[2] = {
2834 gen_helper_gvec_qrdmlsh_s16, gen_helper_gvec_qrdmlsh_s32
2836 tcg_debug_assert(vece >= 1 && vece <= 2);
2837 gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
2840 #define GEN_CMP0(NAME, COND) \
2841 static void gen_##NAME##0_i32(TCGv_i32 d, TCGv_i32 a) \
2843 tcg_gen_setcondi_i32(COND, d, a, 0); \
2844 tcg_gen_neg_i32(d, d); \
2846 static void gen_##NAME##0_i64(TCGv_i64 d, TCGv_i64 a) \
2848 tcg_gen_setcondi_i64(COND, d, a, 0); \
2849 tcg_gen_neg_i64(d, d); \
2851 static void gen_##NAME##0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) \
2853 TCGv_vec zero = tcg_const_zeros_vec_matching(d); \
2854 tcg_gen_cmp_vec(COND, vece, d, a, zero); \
2855 tcg_temp_free_vec(zero); \
2857 void gen_gvec_##NAME##0(unsigned vece, uint32_t d, uint32_t m, \
2858 uint32_t opr_sz, uint32_t max_sz) \
2860 const GVecGen2 op[4] = { \
2861 { .fno = gen_helper_gvec_##NAME##0_b, \
2862 .fniv = gen_##NAME##0_vec, \
2863 .opt_opc = vecop_list_cmp, \
2864 .vece = MO_8 }, \
2865 { .fno = gen_helper_gvec_##NAME##0_h, \
2866 .fniv = gen_##NAME##0_vec, \
2867 .opt_opc = vecop_list_cmp, \
2868 .vece = MO_16 }, \
2869 { .fni4 = gen_##NAME##0_i32, \
2870 .fniv = gen_##NAME##0_vec, \
2871 .opt_opc = vecop_list_cmp, \
2872 .vece = MO_32 }, \
2873 { .fni8 = gen_##NAME##0_i64, \
2874 .fniv = gen_##NAME##0_vec, \
2875 .opt_opc = vecop_list_cmp, \
2876 .prefer_i64 = TCG_TARGET_REG_BITS == 64, \
2877 .vece = MO_64 }, \
2878 }; \
2879 tcg_gen_gvec_2(d, m, opr_sz, max_sz, &op[vece]); \
2882 static const TCGOpcode vecop_list_cmp[] = {
2883 INDEX_op_cmp_vec, 0
2886 GEN_CMP0(ceq, TCG_COND_EQ)
2887 GEN_CMP0(cle, TCG_COND_LE)
2888 GEN_CMP0(cge, TCG_COND_GE)
2889 GEN_CMP0(clt, TCG_COND_LT)
2890 GEN_CMP0(cgt, TCG_COND_GT)
2892 #undef GEN_CMP0
2894 static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
2896 tcg_gen_vec_sar8i_i64(a, a, shift);
2897 tcg_gen_vec_add8_i64(d, d, a);
2900 static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
2902 tcg_gen_vec_sar16i_i64(a, a, shift);
2903 tcg_gen_vec_add16_i64(d, d, a);
2906 static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
2908 tcg_gen_sari_i32(a, a, shift);
2909 tcg_gen_add_i32(d, d, a);
2912 static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
2914 tcg_gen_sari_i64(a, a, shift);
2915 tcg_gen_add_i64(d, d, a);
2918 static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
2920 tcg_gen_sari_vec(vece, a, a, sh);
2921 tcg_gen_add_vec(vece, d, d, a);
2924 void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
2925 int64_t shift, uint32_t opr_sz, uint32_t max_sz)
2927 static const TCGOpcode vecop_list[] = {
2928 INDEX_op_sari_vec, INDEX_op_add_vec, 0
2930 static const GVecGen2i ops[4] = {
2931 { .fni8 = gen_ssra8_i64,
2932 .fniv = gen_ssra_vec,
2933 .fno = gen_helper_gvec_ssra_b,
2934 .load_dest = true,
2935 .opt_opc = vecop_list,
2936 .vece = MO_8 },
2937 { .fni8 = gen_ssra16_i64,
2938 .fniv = gen_ssra_vec,
2939 .fno = gen_helper_gvec_ssra_h,
2940 .load_dest = true,
2941 .opt_opc = vecop_list,
2942 .vece = MO_16 },
2943 { .fni4 = gen_ssra32_i32,
2944 .fniv = gen_ssra_vec,
2945 .fno = gen_helper_gvec_ssra_s,
2946 .load_dest = true,
2947 .opt_opc = vecop_list,
2948 .vece = MO_32 },
2949 { .fni8 = gen_ssra64_i64,
2950 .fniv = gen_ssra_vec,
2951 .fno = gen_helper_gvec_ssra_b,
2952 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
2953 .opt_opc = vecop_list,
2954 .load_dest = true,
2955 .vece = MO_64 },
2958 /* tszimm encoding produces immediates in the range [1..esize]. */
2959 tcg_debug_assert(shift > 0);
2960 tcg_debug_assert(shift <= (8 << vece));
2963 * Shifts larger than the element size are architecturally valid.
2964 * Signed results in all sign bits.
2966 shift = MIN(shift, (8 << vece) - 1);
2967 tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
2970 static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
2972 tcg_gen_vec_shr8i_i64(a, a, shift);
2973 tcg_gen_vec_add8_i64(d, d, a);
2976 static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
2978 tcg_gen_vec_shr16i_i64(a, a, shift);
2979 tcg_gen_vec_add16_i64(d, d, a);
2982 static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
2984 tcg_gen_shri_i32(a, a, shift);
2985 tcg_gen_add_i32(d, d, a);
2988 static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
2990 tcg_gen_shri_i64(a, a, shift);
2991 tcg_gen_add_i64(d, d, a);
2994 static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
2996 tcg_gen_shri_vec(vece, a, a, sh);
2997 tcg_gen_add_vec(vece, d, d, a);
3000 void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
3001 int64_t shift, uint32_t opr_sz, uint32_t max_sz)
3003 static const TCGOpcode vecop_list[] = {
3004 INDEX_op_shri_vec, INDEX_op_add_vec, 0
3006 static const GVecGen2i ops[4] = {
3007 { .fni8 = gen_usra8_i64,
3008 .fniv = gen_usra_vec,
3009 .fno = gen_helper_gvec_usra_b,
3010 .load_dest = true,
3011 .opt_opc = vecop_list,
3012 .vece = MO_8, },
3013 { .fni8 = gen_usra16_i64,
3014 .fniv = gen_usra_vec,
3015 .fno = gen_helper_gvec_usra_h,
3016 .load_dest = true,
3017 .opt_opc = vecop_list,
3018 .vece = MO_16, },
3019 { .fni4 = gen_usra32_i32,
3020 .fniv = gen_usra_vec,
3021 .fno = gen_helper_gvec_usra_s,
3022 .load_dest = true,
3023 .opt_opc = vecop_list,
3024 .vece = MO_32, },
3025 { .fni8 = gen_usra64_i64,
3026 .fniv = gen_usra_vec,
3027 .fno = gen_helper_gvec_usra_d,
3028 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3029 .load_dest = true,
3030 .opt_opc = vecop_list,
3031 .vece = MO_64, },
3034 /* tszimm encoding produces immediates in the range [1..esize]. */
3035 tcg_debug_assert(shift > 0);
3036 tcg_debug_assert(shift <= (8 << vece));
3039 * Shifts larger than the element size are architecturally valid.
3040 * Unsigned results in all zeros as input to accumulate: nop.
3042 if (shift < (8 << vece)) {
3043 tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
3044 } else {
3045 /* Nop, but we do need to clear the tail. */
3046 tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
3051 * Shift one less than the requested amount, and the low bit is
3052 * the rounding bit. For the 8 and 16-bit operations, because we
3053 * mask the low bit, we can perform a normal integer shift instead
3054 * of a vector shift.
3056 static void gen_srshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
3058 TCGv_i64 t = tcg_temp_new_i64();
3060 tcg_gen_shri_i64(t, a, sh - 1);
3061 tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
3062 tcg_gen_vec_sar8i_i64(d, a, sh);
3063 tcg_gen_vec_add8_i64(d, d, t);
3064 tcg_temp_free_i64(t);
3067 static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
3069 TCGv_i64 t = tcg_temp_new_i64();
3071 tcg_gen_shri_i64(t, a, sh - 1);
3072 tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
3073 tcg_gen_vec_sar16i_i64(d, a, sh);
3074 tcg_gen_vec_add16_i64(d, d, t);
3075 tcg_temp_free_i64(t);
3078 static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
3080 TCGv_i32 t = tcg_temp_new_i32();
3082 tcg_gen_extract_i32(t, a, sh - 1, 1);
3083 tcg_gen_sari_i32(d, a, sh);
3084 tcg_gen_add_i32(d, d, t);
3085 tcg_temp_free_i32(t);
3088 static void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
3090 TCGv_i64 t = tcg_temp_new_i64();
3092 tcg_gen_extract_i64(t, a, sh - 1, 1);
3093 tcg_gen_sari_i64(d, a, sh);
3094 tcg_gen_add_i64(d, d, t);
3095 tcg_temp_free_i64(t);
3098 static void gen_srshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
3100 TCGv_vec t = tcg_temp_new_vec_matching(d);
3101 TCGv_vec ones = tcg_temp_new_vec_matching(d);
3103 tcg_gen_shri_vec(vece, t, a, sh - 1);
3104 tcg_gen_dupi_vec(vece, ones, 1);
3105 tcg_gen_and_vec(vece, t, t, ones);
3106 tcg_gen_sari_vec(vece, d, a, sh);
3107 tcg_gen_add_vec(vece, d, d, t);
3109 tcg_temp_free_vec(t);
3110 tcg_temp_free_vec(ones);
3113 void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
3114 int64_t shift, uint32_t opr_sz, uint32_t max_sz)
3116 static const TCGOpcode vecop_list[] = {
3117 INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
3119 static const GVecGen2i ops[4] = {
3120 { .fni8 = gen_srshr8_i64,
3121 .fniv = gen_srshr_vec,
3122 .fno = gen_helper_gvec_srshr_b,
3123 .opt_opc = vecop_list,
3124 .vece = MO_8 },
3125 { .fni8 = gen_srshr16_i64,
3126 .fniv = gen_srshr_vec,
3127 .fno = gen_helper_gvec_srshr_h,
3128 .opt_opc = vecop_list,
3129 .vece = MO_16 },
3130 { .fni4 = gen_srshr32_i32,
3131 .fniv = gen_srshr_vec,
3132 .fno = gen_helper_gvec_srshr_s,
3133 .opt_opc = vecop_list,
3134 .vece = MO_32 },
3135 { .fni8 = gen_srshr64_i64,
3136 .fniv = gen_srshr_vec,
3137 .fno = gen_helper_gvec_srshr_d,
3138 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3139 .opt_opc = vecop_list,
3140 .vece = MO_64 },
3143 /* tszimm encoding produces immediates in the range [1..esize] */
3144 tcg_debug_assert(shift > 0);
3145 tcg_debug_assert(shift <= (8 << vece));
3147 if (shift == (8 << vece)) {
3149 * Shifts larger than the element size are architecturally valid.
3150 * Signed results in all sign bits. With rounding, this produces
3151 * (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
3152 * I.e. always zero.
3154 tcg_gen_gvec_dup_imm(vece, rd_ofs, opr_sz, max_sz, 0);
3155 } else {
3156 tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
3160 static void gen_srsra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
3162 TCGv_i64 t = tcg_temp_new_i64();
3164 gen_srshr8_i64(t, a, sh);
3165 tcg_gen_vec_add8_i64(d, d, t);
3166 tcg_temp_free_i64(t);
3169 static void gen_srsra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
3171 TCGv_i64 t = tcg_temp_new_i64();
3173 gen_srshr16_i64(t, a, sh);
3174 tcg_gen_vec_add16_i64(d, d, t);
3175 tcg_temp_free_i64(t);
3178 static void gen_srsra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
3180 TCGv_i32 t = tcg_temp_new_i32();
3182 gen_srshr32_i32(t, a, sh);
3183 tcg_gen_add_i32(d, d, t);
3184 tcg_temp_free_i32(t);
3187 static void gen_srsra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
3189 TCGv_i64 t = tcg_temp_new_i64();
3191 gen_srshr64_i64(t, a, sh);
3192 tcg_gen_add_i64(d, d, t);
3193 tcg_temp_free_i64(t);
3196 static void gen_srsra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
3198 TCGv_vec t = tcg_temp_new_vec_matching(d);
3200 gen_srshr_vec(vece, t, a, sh);
3201 tcg_gen_add_vec(vece, d, d, t);
3202 tcg_temp_free_vec(t);
3205 void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
3206 int64_t shift, uint32_t opr_sz, uint32_t max_sz)
3208 static const TCGOpcode vecop_list[] = {
3209 INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
3211 static const GVecGen2i ops[4] = {
3212 { .fni8 = gen_srsra8_i64,
3213 .fniv = gen_srsra_vec,
3214 .fno = gen_helper_gvec_srsra_b,
3215 .opt_opc = vecop_list,
3216 .load_dest = true,
3217 .vece = MO_8 },
3218 { .fni8 = gen_srsra16_i64,
3219 .fniv = gen_srsra_vec,
3220 .fno = gen_helper_gvec_srsra_h,
3221 .opt_opc = vecop_list,
3222 .load_dest = true,
3223 .vece = MO_16 },
3224 { .fni4 = gen_srsra32_i32,
3225 .fniv = gen_srsra_vec,
3226 .fno = gen_helper_gvec_srsra_s,
3227 .opt_opc = vecop_list,
3228 .load_dest = true,
3229 .vece = MO_32 },
3230 { .fni8 = gen_srsra64_i64,
3231 .fniv = gen_srsra_vec,
3232 .fno = gen_helper_gvec_srsra_d,
3233 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3234 .opt_opc = vecop_list,
3235 .load_dest = true,
3236 .vece = MO_64 },
3239 /* tszimm encoding produces immediates in the range [1..esize] */
3240 tcg_debug_assert(shift > 0);
3241 tcg_debug_assert(shift <= (8 << vece));
3244 * Shifts larger than the element size are architecturally valid.
3245 * Signed results in all sign bits. With rounding, this produces
3246 * (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
3247 * I.e. always zero. With accumulation, this leaves D unchanged.
3249 if (shift == (8 << vece)) {
3250 /* Nop, but we do need to clear the tail. */
3251 tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
3252 } else {
3253 tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
3257 static void gen_urshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
3259 TCGv_i64 t = tcg_temp_new_i64();
3261 tcg_gen_shri_i64(t, a, sh - 1);
3262 tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
3263 tcg_gen_vec_shr8i_i64(d, a, sh);
3264 tcg_gen_vec_add8_i64(d, d, t);
3265 tcg_temp_free_i64(t);
3268 static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
3270 TCGv_i64 t = tcg_temp_new_i64();
3272 tcg_gen_shri_i64(t, a, sh - 1);
3273 tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
3274 tcg_gen_vec_shr16i_i64(d, a, sh);
3275 tcg_gen_vec_add16_i64(d, d, t);
3276 tcg_temp_free_i64(t);
3279 static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
3281 TCGv_i32 t = tcg_temp_new_i32();
3283 tcg_gen_extract_i32(t, a, sh - 1, 1);
3284 tcg_gen_shri_i32(d, a, sh);
3285 tcg_gen_add_i32(d, d, t);
3286 tcg_temp_free_i32(t);
3289 static void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
3291 TCGv_i64 t = tcg_temp_new_i64();
3293 tcg_gen_extract_i64(t, a, sh - 1, 1);
3294 tcg_gen_shri_i64(d, a, sh);
3295 tcg_gen_add_i64(d, d, t);
3296 tcg_temp_free_i64(t);
3299 static void gen_urshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift)
3301 TCGv_vec t = tcg_temp_new_vec_matching(d);
3302 TCGv_vec ones = tcg_temp_new_vec_matching(d);
3304 tcg_gen_shri_vec(vece, t, a, shift - 1);
3305 tcg_gen_dupi_vec(vece, ones, 1);
3306 tcg_gen_and_vec(vece, t, t, ones);
3307 tcg_gen_shri_vec(vece, d, a, shift);
3308 tcg_gen_add_vec(vece, d, d, t);
3310 tcg_temp_free_vec(t);
3311 tcg_temp_free_vec(ones);
3314 void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
3315 int64_t shift, uint32_t opr_sz, uint32_t max_sz)
3317 static const TCGOpcode vecop_list[] = {
3318 INDEX_op_shri_vec, INDEX_op_add_vec, 0
3320 static const GVecGen2i ops[4] = {
3321 { .fni8 = gen_urshr8_i64,
3322 .fniv = gen_urshr_vec,
3323 .fno = gen_helper_gvec_urshr_b,
3324 .opt_opc = vecop_list,
3325 .vece = MO_8 },
3326 { .fni8 = gen_urshr16_i64,
3327 .fniv = gen_urshr_vec,
3328 .fno = gen_helper_gvec_urshr_h,
3329 .opt_opc = vecop_list,
3330 .vece = MO_16 },
3331 { .fni4 = gen_urshr32_i32,
3332 .fniv = gen_urshr_vec,
3333 .fno = gen_helper_gvec_urshr_s,
3334 .opt_opc = vecop_list,
3335 .vece = MO_32 },
3336 { .fni8 = gen_urshr64_i64,
3337 .fniv = gen_urshr_vec,
3338 .fno = gen_helper_gvec_urshr_d,
3339 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3340 .opt_opc = vecop_list,
3341 .vece = MO_64 },
3344 /* tszimm encoding produces immediates in the range [1..esize] */
3345 tcg_debug_assert(shift > 0);
3346 tcg_debug_assert(shift <= (8 << vece));
3348 if (shift == (8 << vece)) {
3350 * Shifts larger than the element size are architecturally valid.
3351 * Unsigned results in zero. With rounding, this produces a
3352 * copy of the most significant bit.
3354 tcg_gen_gvec_shri(vece, rd_ofs, rm_ofs, shift - 1, opr_sz, max_sz);
3355 } else {
3356 tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
3360 static void gen_ursra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
3362 TCGv_i64 t = tcg_temp_new_i64();
3364 if (sh == 8) {
3365 tcg_gen_vec_shr8i_i64(t, a, 7);
3366 } else {
3367 gen_urshr8_i64(t, a, sh);
3369 tcg_gen_vec_add8_i64(d, d, t);
3370 tcg_temp_free_i64(t);
3373 static void gen_ursra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
3375 TCGv_i64 t = tcg_temp_new_i64();
3377 if (sh == 16) {
3378 tcg_gen_vec_shr16i_i64(t, a, 15);
3379 } else {
3380 gen_urshr16_i64(t, a, sh);
3382 tcg_gen_vec_add16_i64(d, d, t);
3383 tcg_temp_free_i64(t);
3386 static void gen_ursra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
3388 TCGv_i32 t = tcg_temp_new_i32();
3390 if (sh == 32) {
3391 tcg_gen_shri_i32(t, a, 31);
3392 } else {
3393 gen_urshr32_i32(t, a, sh);
3395 tcg_gen_add_i32(d, d, t);
3396 tcg_temp_free_i32(t);
3399 static void gen_ursra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
3401 TCGv_i64 t = tcg_temp_new_i64();
3403 if (sh == 64) {
3404 tcg_gen_shri_i64(t, a, 63);
3405 } else {
3406 gen_urshr64_i64(t, a, sh);
3408 tcg_gen_add_i64(d, d, t);
3409 tcg_temp_free_i64(t);
3412 static void gen_ursra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
3414 TCGv_vec t = tcg_temp_new_vec_matching(d);
3416 if (sh == (8 << vece)) {
3417 tcg_gen_shri_vec(vece, t, a, sh - 1);
3418 } else {
3419 gen_urshr_vec(vece, t, a, sh);
3421 tcg_gen_add_vec(vece, d, d, t);
3422 tcg_temp_free_vec(t);
3425 void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
3426 int64_t shift, uint32_t opr_sz, uint32_t max_sz)
3428 static const TCGOpcode vecop_list[] = {
3429 INDEX_op_shri_vec, INDEX_op_add_vec, 0
3431 static const GVecGen2i ops[4] = {
3432 { .fni8 = gen_ursra8_i64,
3433 .fniv = gen_ursra_vec,
3434 .fno = gen_helper_gvec_ursra_b,
3435 .opt_opc = vecop_list,
3436 .load_dest = true,
3437 .vece = MO_8 },
3438 { .fni8 = gen_ursra16_i64,
3439 .fniv = gen_ursra_vec,
3440 .fno = gen_helper_gvec_ursra_h,
3441 .opt_opc = vecop_list,
3442 .load_dest = true,
3443 .vece = MO_16 },
3444 { .fni4 = gen_ursra32_i32,
3445 .fniv = gen_ursra_vec,
3446 .fno = gen_helper_gvec_ursra_s,
3447 .opt_opc = vecop_list,
3448 .load_dest = true,
3449 .vece = MO_32 },
3450 { .fni8 = gen_ursra64_i64,
3451 .fniv = gen_ursra_vec,
3452 .fno = gen_helper_gvec_ursra_d,
3453 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3454 .opt_opc = vecop_list,
3455 .load_dest = true,
3456 .vece = MO_64 },
3459 /* tszimm encoding produces immediates in the range [1..esize] */
3460 tcg_debug_assert(shift > 0);
3461 tcg_debug_assert(shift <= (8 << vece));
3463 tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
3466 static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3468 uint64_t mask = dup_const(MO_8, 0xff >> shift);
3469 TCGv_i64 t = tcg_temp_new_i64();
3471 tcg_gen_shri_i64(t, a, shift);
3472 tcg_gen_andi_i64(t, t, mask);
3473 tcg_gen_andi_i64(d, d, ~mask);
3474 tcg_gen_or_i64(d, d, t);
3475 tcg_temp_free_i64(t);
3478 static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3480 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
3481 TCGv_i64 t = tcg_temp_new_i64();
3483 tcg_gen_shri_i64(t, a, shift);
3484 tcg_gen_andi_i64(t, t, mask);
3485 tcg_gen_andi_i64(d, d, ~mask);
3486 tcg_gen_or_i64(d, d, t);
3487 tcg_temp_free_i64(t);
3490 static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
3492 tcg_gen_shri_i32(a, a, shift);
3493 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
3496 static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3498 tcg_gen_shri_i64(a, a, shift);
3499 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
3502 static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
3504 TCGv_vec t = tcg_temp_new_vec_matching(d);
3505 TCGv_vec m = tcg_temp_new_vec_matching(d);
3507 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
3508 tcg_gen_shri_vec(vece, t, a, sh);
3509 tcg_gen_and_vec(vece, d, d, m);
3510 tcg_gen_or_vec(vece, d, d, t);
3512 tcg_temp_free_vec(t);
3513 tcg_temp_free_vec(m);
3516 void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
3517 int64_t shift, uint32_t opr_sz, uint32_t max_sz)
3519 static const TCGOpcode vecop_list[] = { INDEX_op_shri_vec, 0 };
3520 const GVecGen2i ops[4] = {
3521 { .fni8 = gen_shr8_ins_i64,
3522 .fniv = gen_shr_ins_vec,
3523 .fno = gen_helper_gvec_sri_b,
3524 .load_dest = true,
3525 .opt_opc = vecop_list,
3526 .vece = MO_8 },
3527 { .fni8 = gen_shr16_ins_i64,
3528 .fniv = gen_shr_ins_vec,
3529 .fno = gen_helper_gvec_sri_h,
3530 .load_dest = true,
3531 .opt_opc = vecop_list,
3532 .vece = MO_16 },
3533 { .fni4 = gen_shr32_ins_i32,
3534 .fniv = gen_shr_ins_vec,
3535 .fno = gen_helper_gvec_sri_s,
3536 .load_dest = true,
3537 .opt_opc = vecop_list,
3538 .vece = MO_32 },
3539 { .fni8 = gen_shr64_ins_i64,
3540 .fniv = gen_shr_ins_vec,
3541 .fno = gen_helper_gvec_sri_d,
3542 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3543 .load_dest = true,
3544 .opt_opc = vecop_list,
3545 .vece = MO_64 },
3548 /* tszimm encoding produces immediates in the range [1..esize]. */
3549 tcg_debug_assert(shift > 0);
3550 tcg_debug_assert(shift <= (8 << vece));
3552 /* Shift of esize leaves destination unchanged. */
3553 if (shift < (8 << vece)) {
3554 tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
3555 } else {
3556 /* Nop, but we do need to clear the tail. */
3557 tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
3561 static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3563 uint64_t mask = dup_const(MO_8, 0xff << shift);
3564 TCGv_i64 t = tcg_temp_new_i64();
3566 tcg_gen_shli_i64(t, a, shift);
3567 tcg_gen_andi_i64(t, t, mask);
3568 tcg_gen_andi_i64(d, d, ~mask);
3569 tcg_gen_or_i64(d, d, t);
3570 tcg_temp_free_i64(t);
3573 static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3575 uint64_t mask = dup_const(MO_16, 0xffff << shift);
3576 TCGv_i64 t = tcg_temp_new_i64();
3578 tcg_gen_shli_i64(t, a, shift);
3579 tcg_gen_andi_i64(t, t, mask);
3580 tcg_gen_andi_i64(d, d, ~mask);
3581 tcg_gen_or_i64(d, d, t);
3582 tcg_temp_free_i64(t);
3585 static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
3587 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
3590 static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
3592 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
3595 static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
3597 TCGv_vec t = tcg_temp_new_vec_matching(d);
3598 TCGv_vec m = tcg_temp_new_vec_matching(d);
3600 tcg_gen_shli_vec(vece, t, a, sh);
3601 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
3602 tcg_gen_and_vec(vece, d, d, m);
3603 tcg_gen_or_vec(vece, d, d, t);
3605 tcg_temp_free_vec(t);
3606 tcg_temp_free_vec(m);
3609 void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
3610 int64_t shift, uint32_t opr_sz, uint32_t max_sz)
3612 static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 };
3613 const GVecGen2i ops[4] = {
3614 { .fni8 = gen_shl8_ins_i64,
3615 .fniv = gen_shl_ins_vec,
3616 .fno = gen_helper_gvec_sli_b,
3617 .load_dest = true,
3618 .opt_opc = vecop_list,
3619 .vece = MO_8 },
3620 { .fni8 = gen_shl16_ins_i64,
3621 .fniv = gen_shl_ins_vec,
3622 .fno = gen_helper_gvec_sli_h,
3623 .load_dest = true,
3624 .opt_opc = vecop_list,
3625 .vece = MO_16 },
3626 { .fni4 = gen_shl32_ins_i32,
3627 .fniv = gen_shl_ins_vec,
3628 .fno = gen_helper_gvec_sli_s,
3629 .load_dest = true,
3630 .opt_opc = vecop_list,
3631 .vece = MO_32 },
3632 { .fni8 = gen_shl64_ins_i64,
3633 .fniv = gen_shl_ins_vec,
3634 .fno = gen_helper_gvec_sli_d,
3635 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3636 .load_dest = true,
3637 .opt_opc = vecop_list,
3638 .vece = MO_64 },
3641 /* tszimm encoding produces immediates in the range [0..esize-1]. */
3642 tcg_debug_assert(shift >= 0);
3643 tcg_debug_assert(shift < (8 << vece));
3645 if (shift == 0) {
3646 tcg_gen_gvec_mov(vece, rd_ofs, rm_ofs, opr_sz, max_sz);
3647 } else {
3648 tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
3652 static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
3654 gen_helper_neon_mul_u8(a, a, b);
3655 gen_helper_neon_add_u8(d, d, a);
3658 static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
3660 gen_helper_neon_mul_u8(a, a, b);
3661 gen_helper_neon_sub_u8(d, d, a);
3664 static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
3666 gen_helper_neon_mul_u16(a, a, b);
3667 gen_helper_neon_add_u16(d, d, a);
3670 static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
3672 gen_helper_neon_mul_u16(a, a, b);
3673 gen_helper_neon_sub_u16(d, d, a);
3676 static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
3678 tcg_gen_mul_i32(a, a, b);
3679 tcg_gen_add_i32(d, d, a);
3682 static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
3684 tcg_gen_mul_i32(a, a, b);
3685 tcg_gen_sub_i32(d, d, a);
3688 static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
3690 tcg_gen_mul_i64(a, a, b);
3691 tcg_gen_add_i64(d, d, a);
3694 static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
3696 tcg_gen_mul_i64(a, a, b);
3697 tcg_gen_sub_i64(d, d, a);
3700 static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
3702 tcg_gen_mul_vec(vece, a, a, b);
3703 tcg_gen_add_vec(vece, d, d, a);
3706 static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
3708 tcg_gen_mul_vec(vece, a, a, b);
3709 tcg_gen_sub_vec(vece, d, d, a);
3712 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
3713 * these tables are shared with AArch64 which does support them.
3715 void gen_gvec_mla(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
3716 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
3718 static const TCGOpcode vecop_list[] = {
3719 INDEX_op_mul_vec, INDEX_op_add_vec, 0
3721 static const GVecGen3 ops[4] = {
3722 { .fni4 = gen_mla8_i32,
3723 .fniv = gen_mla_vec,
3724 .load_dest = true,
3725 .opt_opc = vecop_list,
3726 .vece = MO_8 },
3727 { .fni4 = gen_mla16_i32,
3728 .fniv = gen_mla_vec,
3729 .load_dest = true,
3730 .opt_opc = vecop_list,
3731 .vece = MO_16 },
3732 { .fni4 = gen_mla32_i32,
3733 .fniv = gen_mla_vec,
3734 .load_dest = true,
3735 .opt_opc = vecop_list,
3736 .vece = MO_32 },
3737 { .fni8 = gen_mla64_i64,
3738 .fniv = gen_mla_vec,
3739 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3740 .load_dest = true,
3741 .opt_opc = vecop_list,
3742 .vece = MO_64 },
3744 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
3747 void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
3748 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
3750 static const TCGOpcode vecop_list[] = {
3751 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
3753 static const GVecGen3 ops[4] = {
3754 { .fni4 = gen_mls8_i32,
3755 .fniv = gen_mls_vec,
3756 .load_dest = true,
3757 .opt_opc = vecop_list,
3758 .vece = MO_8 },
3759 { .fni4 = gen_mls16_i32,
3760 .fniv = gen_mls_vec,
3761 .load_dest = true,
3762 .opt_opc = vecop_list,
3763 .vece = MO_16 },
3764 { .fni4 = gen_mls32_i32,
3765 .fniv = gen_mls_vec,
3766 .load_dest = true,
3767 .opt_opc = vecop_list,
3768 .vece = MO_32 },
3769 { .fni8 = gen_mls64_i64,
3770 .fniv = gen_mls_vec,
3771 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3772 .load_dest = true,
3773 .opt_opc = vecop_list,
3774 .vece = MO_64 },
3776 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
3779 /* CMTST : test is "if (X & Y != 0)". */
3780 static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
3782 tcg_gen_and_i32(d, a, b);
3783 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
3784 tcg_gen_neg_i32(d, d);
3787 void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
3789 tcg_gen_and_i64(d, a, b);
3790 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
3791 tcg_gen_neg_i64(d, d);
3794 static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
3796 tcg_gen_and_vec(vece, d, a, b);
3797 tcg_gen_dupi_vec(vece, a, 0);
3798 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
3801 void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
3802 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
3804 static const TCGOpcode vecop_list[] = { INDEX_op_cmp_vec, 0 };
3805 static const GVecGen3 ops[4] = {
3806 { .fni4 = gen_helper_neon_tst_u8,
3807 .fniv = gen_cmtst_vec,
3808 .opt_opc = vecop_list,
3809 .vece = MO_8 },
3810 { .fni4 = gen_helper_neon_tst_u16,
3811 .fniv = gen_cmtst_vec,
3812 .opt_opc = vecop_list,
3813 .vece = MO_16 },
3814 { .fni4 = gen_cmtst_i32,
3815 .fniv = gen_cmtst_vec,
3816 .opt_opc = vecop_list,
3817 .vece = MO_32 },
3818 { .fni8 = gen_cmtst_i64,
3819 .fniv = gen_cmtst_vec,
3820 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3821 .opt_opc = vecop_list,
3822 .vece = MO_64 },
3824 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
3827 void gen_ushl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
3829 TCGv_i32 lval = tcg_temp_new_i32();
3830 TCGv_i32 rval = tcg_temp_new_i32();
3831 TCGv_i32 lsh = tcg_temp_new_i32();
3832 TCGv_i32 rsh = tcg_temp_new_i32();
3833 TCGv_i32 zero = tcg_const_i32(0);
3834 TCGv_i32 max = tcg_const_i32(32);
3837 * Rely on the TCG guarantee that out of range shifts produce
3838 * unspecified results, not undefined behaviour (i.e. no trap).
3839 * Discard out-of-range results after the fact.
3841 tcg_gen_ext8s_i32(lsh, shift);
3842 tcg_gen_neg_i32(rsh, lsh);
3843 tcg_gen_shl_i32(lval, src, lsh);
3844 tcg_gen_shr_i32(rval, src, rsh);
3845 tcg_gen_movcond_i32(TCG_COND_LTU, dst, lsh, max, lval, zero);
3846 tcg_gen_movcond_i32(TCG_COND_LTU, dst, rsh, max, rval, dst);
3848 tcg_temp_free_i32(lval);
3849 tcg_temp_free_i32(rval);
3850 tcg_temp_free_i32(lsh);
3851 tcg_temp_free_i32(rsh);
3852 tcg_temp_free_i32(zero);
3853 tcg_temp_free_i32(max);
3856 void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
3858 TCGv_i64 lval = tcg_temp_new_i64();
3859 TCGv_i64 rval = tcg_temp_new_i64();
3860 TCGv_i64 lsh = tcg_temp_new_i64();
3861 TCGv_i64 rsh = tcg_temp_new_i64();
3862 TCGv_i64 zero = tcg_const_i64(0);
3863 TCGv_i64 max = tcg_const_i64(64);
3866 * Rely on the TCG guarantee that out of range shifts produce
3867 * unspecified results, not undefined behaviour (i.e. no trap).
3868 * Discard out-of-range results after the fact.
3870 tcg_gen_ext8s_i64(lsh, shift);
3871 tcg_gen_neg_i64(rsh, lsh);
3872 tcg_gen_shl_i64(lval, src, lsh);
3873 tcg_gen_shr_i64(rval, src, rsh);
3874 tcg_gen_movcond_i64(TCG_COND_LTU, dst, lsh, max, lval, zero);
3875 tcg_gen_movcond_i64(TCG_COND_LTU, dst, rsh, max, rval, dst);
3877 tcg_temp_free_i64(lval);
3878 tcg_temp_free_i64(rval);
3879 tcg_temp_free_i64(lsh);
3880 tcg_temp_free_i64(rsh);
3881 tcg_temp_free_i64(zero);
3882 tcg_temp_free_i64(max);
3885 static void gen_ushl_vec(unsigned vece, TCGv_vec dst,
3886 TCGv_vec src, TCGv_vec shift)
3888 TCGv_vec lval = tcg_temp_new_vec_matching(dst);
3889 TCGv_vec rval = tcg_temp_new_vec_matching(dst);
3890 TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
3891 TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
3892 TCGv_vec msk, max;
3894 tcg_gen_neg_vec(vece, rsh, shift);
3895 if (vece == MO_8) {
3896 tcg_gen_mov_vec(lsh, shift);
3897 } else {
3898 msk = tcg_temp_new_vec_matching(dst);
3899 tcg_gen_dupi_vec(vece, msk, 0xff);
3900 tcg_gen_and_vec(vece, lsh, shift, msk);
3901 tcg_gen_and_vec(vece, rsh, rsh, msk);
3902 tcg_temp_free_vec(msk);
3906 * Rely on the TCG guarantee that out of range shifts produce
3907 * unspecified results, not undefined behaviour (i.e. no trap).
3908 * Discard out-of-range results after the fact.
3910 tcg_gen_shlv_vec(vece, lval, src, lsh);
3911 tcg_gen_shrv_vec(vece, rval, src, rsh);
3913 max = tcg_temp_new_vec_matching(dst);
3914 tcg_gen_dupi_vec(vece, max, 8 << vece);
3917 * The choice of LT (signed) and GEU (unsigned) are biased toward
3918 * the instructions of the x86_64 host. For MO_8, the whole byte
3919 * is significant so we must use an unsigned compare; otherwise we
3920 * have already masked to a byte and so a signed compare works.
3921 * Other tcg hosts have a full set of comparisons and do not care.
3923 if (vece == MO_8) {
3924 tcg_gen_cmp_vec(TCG_COND_GEU, vece, lsh, lsh, max);
3925 tcg_gen_cmp_vec(TCG_COND_GEU, vece, rsh, rsh, max);
3926 tcg_gen_andc_vec(vece, lval, lval, lsh);
3927 tcg_gen_andc_vec(vece, rval, rval, rsh);
3928 } else {
3929 tcg_gen_cmp_vec(TCG_COND_LT, vece, lsh, lsh, max);
3930 tcg_gen_cmp_vec(TCG_COND_LT, vece, rsh, rsh, max);
3931 tcg_gen_and_vec(vece, lval, lval, lsh);
3932 tcg_gen_and_vec(vece, rval, rval, rsh);
3934 tcg_gen_or_vec(vece, dst, lval, rval);
3936 tcg_temp_free_vec(max);
3937 tcg_temp_free_vec(lval);
3938 tcg_temp_free_vec(rval);
3939 tcg_temp_free_vec(lsh);
3940 tcg_temp_free_vec(rsh);
3943 void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
3944 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
3946 static const TCGOpcode vecop_list[] = {
3947 INDEX_op_neg_vec, INDEX_op_shlv_vec,
3948 INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0
3950 static const GVecGen3 ops[4] = {
3951 { .fniv = gen_ushl_vec,
3952 .fno = gen_helper_gvec_ushl_b,
3953 .opt_opc = vecop_list,
3954 .vece = MO_8 },
3955 { .fniv = gen_ushl_vec,
3956 .fno = gen_helper_gvec_ushl_h,
3957 .opt_opc = vecop_list,
3958 .vece = MO_16 },
3959 { .fni4 = gen_ushl_i32,
3960 .fniv = gen_ushl_vec,
3961 .opt_opc = vecop_list,
3962 .vece = MO_32 },
3963 { .fni8 = gen_ushl_i64,
3964 .fniv = gen_ushl_vec,
3965 .opt_opc = vecop_list,
3966 .vece = MO_64 },
3968 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
3971 void gen_sshl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
3973 TCGv_i32 lval = tcg_temp_new_i32();
3974 TCGv_i32 rval = tcg_temp_new_i32();
3975 TCGv_i32 lsh = tcg_temp_new_i32();
3976 TCGv_i32 rsh = tcg_temp_new_i32();
3977 TCGv_i32 zero = tcg_const_i32(0);
3978 TCGv_i32 max = tcg_const_i32(31);
3981 * Rely on the TCG guarantee that out of range shifts produce
3982 * unspecified results, not undefined behaviour (i.e. no trap).
3983 * Discard out-of-range results after the fact.
3985 tcg_gen_ext8s_i32(lsh, shift);
3986 tcg_gen_neg_i32(rsh, lsh);
3987 tcg_gen_shl_i32(lval, src, lsh);
3988 tcg_gen_umin_i32(rsh, rsh, max);
3989 tcg_gen_sar_i32(rval, src, rsh);
3990 tcg_gen_movcond_i32(TCG_COND_LEU, lval, lsh, max, lval, zero);
3991 tcg_gen_movcond_i32(TCG_COND_LT, dst, lsh, zero, rval, lval);
3993 tcg_temp_free_i32(lval);
3994 tcg_temp_free_i32(rval);
3995 tcg_temp_free_i32(lsh);
3996 tcg_temp_free_i32(rsh);
3997 tcg_temp_free_i32(zero);
3998 tcg_temp_free_i32(max);
4001 void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
4003 TCGv_i64 lval = tcg_temp_new_i64();
4004 TCGv_i64 rval = tcg_temp_new_i64();
4005 TCGv_i64 lsh = tcg_temp_new_i64();
4006 TCGv_i64 rsh = tcg_temp_new_i64();
4007 TCGv_i64 zero = tcg_const_i64(0);
4008 TCGv_i64 max = tcg_const_i64(63);
4011 * Rely on the TCG guarantee that out of range shifts produce
4012 * unspecified results, not undefined behaviour (i.e. no trap).
4013 * Discard out-of-range results after the fact.
4015 tcg_gen_ext8s_i64(lsh, shift);
4016 tcg_gen_neg_i64(rsh, lsh);
4017 tcg_gen_shl_i64(lval, src, lsh);
4018 tcg_gen_umin_i64(rsh, rsh, max);
4019 tcg_gen_sar_i64(rval, src, rsh);
4020 tcg_gen_movcond_i64(TCG_COND_LEU, lval, lsh, max, lval, zero);
4021 tcg_gen_movcond_i64(TCG_COND_LT, dst, lsh, zero, rval, lval);
4023 tcg_temp_free_i64(lval);
4024 tcg_temp_free_i64(rval);
4025 tcg_temp_free_i64(lsh);
4026 tcg_temp_free_i64(rsh);
4027 tcg_temp_free_i64(zero);
4028 tcg_temp_free_i64(max);
4031 static void gen_sshl_vec(unsigned vece, TCGv_vec dst,
4032 TCGv_vec src, TCGv_vec shift)
4034 TCGv_vec lval = tcg_temp_new_vec_matching(dst);
4035 TCGv_vec rval = tcg_temp_new_vec_matching(dst);
4036 TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
4037 TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
4038 TCGv_vec tmp = tcg_temp_new_vec_matching(dst);
4041 * Rely on the TCG guarantee that out of range shifts produce
4042 * unspecified results, not undefined behaviour (i.e. no trap).
4043 * Discard out-of-range results after the fact.
4045 tcg_gen_neg_vec(vece, rsh, shift);
4046 if (vece == MO_8) {
4047 tcg_gen_mov_vec(lsh, shift);
4048 } else {
4049 tcg_gen_dupi_vec(vece, tmp, 0xff);
4050 tcg_gen_and_vec(vece, lsh, shift, tmp);
4051 tcg_gen_and_vec(vece, rsh, rsh, tmp);
4054 /* Bound rsh so out of bound right shift gets -1. */
4055 tcg_gen_dupi_vec(vece, tmp, (8 << vece) - 1);
4056 tcg_gen_umin_vec(vece, rsh, rsh, tmp);
4057 tcg_gen_cmp_vec(TCG_COND_GT, vece, tmp, lsh, tmp);
4059 tcg_gen_shlv_vec(vece, lval, src, lsh);
4060 tcg_gen_sarv_vec(vece, rval, src, rsh);
4062 /* Select in-bound left shift. */
4063 tcg_gen_andc_vec(vece, lval, lval, tmp);
4065 /* Select between left and right shift. */
4066 if (vece == MO_8) {
4067 tcg_gen_dupi_vec(vece, tmp, 0);
4068 tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, rval, lval);
4069 } else {
4070 tcg_gen_dupi_vec(vece, tmp, 0x80);
4071 tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, lval, rval);
4074 tcg_temp_free_vec(lval);
4075 tcg_temp_free_vec(rval);
4076 tcg_temp_free_vec(lsh);
4077 tcg_temp_free_vec(rsh);
4078 tcg_temp_free_vec(tmp);
4081 void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
4082 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
4084 static const TCGOpcode vecop_list[] = {
4085 INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec,
4086 INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0
4088 static const GVecGen3 ops[4] = {
4089 { .fniv = gen_sshl_vec,
4090 .fno = gen_helper_gvec_sshl_b,
4091 .opt_opc = vecop_list,
4092 .vece = MO_8 },
4093 { .fniv = gen_sshl_vec,
4094 .fno = gen_helper_gvec_sshl_h,
4095 .opt_opc = vecop_list,
4096 .vece = MO_16 },
4097 { .fni4 = gen_sshl_i32,
4098 .fniv = gen_sshl_vec,
4099 .opt_opc = vecop_list,
4100 .vece = MO_32 },
4101 { .fni8 = gen_sshl_i64,
4102 .fniv = gen_sshl_vec,
4103 .opt_opc = vecop_list,
4104 .vece = MO_64 },
4106 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
4109 static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4110 TCGv_vec a, TCGv_vec b)
4112 TCGv_vec x = tcg_temp_new_vec_matching(t);
4113 tcg_gen_add_vec(vece, x, a, b);
4114 tcg_gen_usadd_vec(vece, t, a, b);
4115 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4116 tcg_gen_or_vec(vece, sat, sat, x);
4117 tcg_temp_free_vec(x);
4120 void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
4121 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
4123 static const TCGOpcode vecop_list[] = {
4124 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4126 static const GVecGen4 ops[4] = {
4127 { .fniv = gen_uqadd_vec,
4128 .fno = gen_helper_gvec_uqadd_b,
4129 .write_aofs = true,
4130 .opt_opc = vecop_list,
4131 .vece = MO_8 },
4132 { .fniv = gen_uqadd_vec,
4133 .fno = gen_helper_gvec_uqadd_h,
4134 .write_aofs = true,
4135 .opt_opc = vecop_list,
4136 .vece = MO_16 },
4137 { .fniv = gen_uqadd_vec,
4138 .fno = gen_helper_gvec_uqadd_s,
4139 .write_aofs = true,
4140 .opt_opc = vecop_list,
4141 .vece = MO_32 },
4142 { .fniv = gen_uqadd_vec,
4143 .fno = gen_helper_gvec_uqadd_d,
4144 .write_aofs = true,
4145 .opt_opc = vecop_list,
4146 .vece = MO_64 },
4148 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
4149 rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
4152 static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4153 TCGv_vec a, TCGv_vec b)
4155 TCGv_vec x = tcg_temp_new_vec_matching(t);
4156 tcg_gen_add_vec(vece, x, a, b);
4157 tcg_gen_ssadd_vec(vece, t, a, b);
4158 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4159 tcg_gen_or_vec(vece, sat, sat, x);
4160 tcg_temp_free_vec(x);
4163 void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
4164 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
4166 static const TCGOpcode vecop_list[] = {
4167 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4169 static const GVecGen4 ops[4] = {
4170 { .fniv = gen_sqadd_vec,
4171 .fno = gen_helper_gvec_sqadd_b,
4172 .opt_opc = vecop_list,
4173 .write_aofs = true,
4174 .vece = MO_8 },
4175 { .fniv = gen_sqadd_vec,
4176 .fno = gen_helper_gvec_sqadd_h,
4177 .opt_opc = vecop_list,
4178 .write_aofs = true,
4179 .vece = MO_16 },
4180 { .fniv = gen_sqadd_vec,
4181 .fno = gen_helper_gvec_sqadd_s,
4182 .opt_opc = vecop_list,
4183 .write_aofs = true,
4184 .vece = MO_32 },
4185 { .fniv = gen_sqadd_vec,
4186 .fno = gen_helper_gvec_sqadd_d,
4187 .opt_opc = vecop_list,
4188 .write_aofs = true,
4189 .vece = MO_64 },
4191 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
4192 rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
4195 static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4196 TCGv_vec a, TCGv_vec b)
4198 TCGv_vec x = tcg_temp_new_vec_matching(t);
4199 tcg_gen_sub_vec(vece, x, a, b);
4200 tcg_gen_ussub_vec(vece, t, a, b);
4201 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4202 tcg_gen_or_vec(vece, sat, sat, x);
4203 tcg_temp_free_vec(x);
4206 void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
4207 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
4209 static const TCGOpcode vecop_list[] = {
4210 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4212 static const GVecGen4 ops[4] = {
4213 { .fniv = gen_uqsub_vec,
4214 .fno = gen_helper_gvec_uqsub_b,
4215 .opt_opc = vecop_list,
4216 .write_aofs = true,
4217 .vece = MO_8 },
4218 { .fniv = gen_uqsub_vec,
4219 .fno = gen_helper_gvec_uqsub_h,
4220 .opt_opc = vecop_list,
4221 .write_aofs = true,
4222 .vece = MO_16 },
4223 { .fniv = gen_uqsub_vec,
4224 .fno = gen_helper_gvec_uqsub_s,
4225 .opt_opc = vecop_list,
4226 .write_aofs = true,
4227 .vece = MO_32 },
4228 { .fniv = gen_uqsub_vec,
4229 .fno = gen_helper_gvec_uqsub_d,
4230 .opt_opc = vecop_list,
4231 .write_aofs = true,
4232 .vece = MO_64 },
4234 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
4235 rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
4238 static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4239 TCGv_vec a, TCGv_vec b)
4241 TCGv_vec x = tcg_temp_new_vec_matching(t);
4242 tcg_gen_sub_vec(vece, x, a, b);
4243 tcg_gen_sssub_vec(vece, t, a, b);
4244 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4245 tcg_gen_or_vec(vece, sat, sat, x);
4246 tcg_temp_free_vec(x);
4249 void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
4250 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
4252 static const TCGOpcode vecop_list[] = {
4253 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4255 static const GVecGen4 ops[4] = {
4256 { .fniv = gen_sqsub_vec,
4257 .fno = gen_helper_gvec_sqsub_b,
4258 .opt_opc = vecop_list,
4259 .write_aofs = true,
4260 .vece = MO_8 },
4261 { .fniv = gen_sqsub_vec,
4262 .fno = gen_helper_gvec_sqsub_h,
4263 .opt_opc = vecop_list,
4264 .write_aofs = true,
4265 .vece = MO_16 },
4266 { .fniv = gen_sqsub_vec,
4267 .fno = gen_helper_gvec_sqsub_s,
4268 .opt_opc = vecop_list,
4269 .write_aofs = true,
4270 .vece = MO_32 },
4271 { .fniv = gen_sqsub_vec,
4272 .fno = gen_helper_gvec_sqsub_d,
4273 .opt_opc = vecop_list,
4274 .write_aofs = true,
4275 .vece = MO_64 },
4277 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
4278 rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
4281 static void gen_sabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4283 TCGv_i32 t = tcg_temp_new_i32();
4285 tcg_gen_sub_i32(t, a, b);
4286 tcg_gen_sub_i32(d, b, a);
4287 tcg_gen_movcond_i32(TCG_COND_LT, d, a, b, d, t);
4288 tcg_temp_free_i32(t);
4291 static void gen_sabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4293 TCGv_i64 t = tcg_temp_new_i64();
4295 tcg_gen_sub_i64(t, a, b);
4296 tcg_gen_sub_i64(d, b, a);
4297 tcg_gen_movcond_i64(TCG_COND_LT, d, a, b, d, t);
4298 tcg_temp_free_i64(t);
4301 static void gen_sabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4303 TCGv_vec t = tcg_temp_new_vec_matching(d);
4305 tcg_gen_smin_vec(vece, t, a, b);
4306 tcg_gen_smax_vec(vece, d, a, b);
4307 tcg_gen_sub_vec(vece, d, d, t);
4308 tcg_temp_free_vec(t);
4311 void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
4312 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
4314 static const TCGOpcode vecop_list[] = {
4315 INDEX_op_sub_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0
4317 static const GVecGen3 ops[4] = {
4318 { .fniv = gen_sabd_vec,
4319 .fno = gen_helper_gvec_sabd_b,
4320 .opt_opc = vecop_list,
4321 .vece = MO_8 },
4322 { .fniv = gen_sabd_vec,
4323 .fno = gen_helper_gvec_sabd_h,
4324 .opt_opc = vecop_list,
4325 .vece = MO_16 },
4326 { .fni4 = gen_sabd_i32,
4327 .fniv = gen_sabd_vec,
4328 .fno = gen_helper_gvec_sabd_s,
4329 .opt_opc = vecop_list,
4330 .vece = MO_32 },
4331 { .fni8 = gen_sabd_i64,
4332 .fniv = gen_sabd_vec,
4333 .fno = gen_helper_gvec_sabd_d,
4334 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4335 .opt_opc = vecop_list,
4336 .vece = MO_64 },
4338 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
4341 static void gen_uabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4343 TCGv_i32 t = tcg_temp_new_i32();
4345 tcg_gen_sub_i32(t, a, b);
4346 tcg_gen_sub_i32(d, b, a);
4347 tcg_gen_movcond_i32(TCG_COND_LTU, d, a, b, d, t);
4348 tcg_temp_free_i32(t);
4351 static void gen_uabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4353 TCGv_i64 t = tcg_temp_new_i64();
4355 tcg_gen_sub_i64(t, a, b);
4356 tcg_gen_sub_i64(d, b, a);
4357 tcg_gen_movcond_i64(TCG_COND_LTU, d, a, b, d, t);
4358 tcg_temp_free_i64(t);
4361 static void gen_uabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4363 TCGv_vec t = tcg_temp_new_vec_matching(d);
4365 tcg_gen_umin_vec(vece, t, a, b);
4366 tcg_gen_umax_vec(vece, d, a, b);
4367 tcg_gen_sub_vec(vece, d, d, t);
4368 tcg_temp_free_vec(t);
4371 void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
4372 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
4374 static const TCGOpcode vecop_list[] = {
4375 INDEX_op_sub_vec, INDEX_op_umin_vec, INDEX_op_umax_vec, 0
4377 static const GVecGen3 ops[4] = {
4378 { .fniv = gen_uabd_vec,
4379 .fno = gen_helper_gvec_uabd_b,
4380 .opt_opc = vecop_list,
4381 .vece = MO_8 },
4382 { .fniv = gen_uabd_vec,
4383 .fno = gen_helper_gvec_uabd_h,
4384 .opt_opc = vecop_list,
4385 .vece = MO_16 },
4386 { .fni4 = gen_uabd_i32,
4387 .fniv = gen_uabd_vec,
4388 .fno = gen_helper_gvec_uabd_s,
4389 .opt_opc = vecop_list,
4390 .vece = MO_32 },
4391 { .fni8 = gen_uabd_i64,
4392 .fniv = gen_uabd_vec,
4393 .fno = gen_helper_gvec_uabd_d,
4394 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4395 .opt_opc = vecop_list,
4396 .vece = MO_64 },
4398 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
4401 static void gen_saba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4403 TCGv_i32 t = tcg_temp_new_i32();
4404 gen_sabd_i32(t, a, b);
4405 tcg_gen_add_i32(d, d, t);
4406 tcg_temp_free_i32(t);
4409 static void gen_saba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4411 TCGv_i64 t = tcg_temp_new_i64();
4412 gen_sabd_i64(t, a, b);
4413 tcg_gen_add_i64(d, d, t);
4414 tcg_temp_free_i64(t);
4417 static void gen_saba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4419 TCGv_vec t = tcg_temp_new_vec_matching(d);
4420 gen_sabd_vec(vece, t, a, b);
4421 tcg_gen_add_vec(vece, d, d, t);
4422 tcg_temp_free_vec(t);
4425 void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
4426 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
4428 static const TCGOpcode vecop_list[] = {
4429 INDEX_op_sub_vec, INDEX_op_add_vec,
4430 INDEX_op_smin_vec, INDEX_op_smax_vec, 0
4432 static const GVecGen3 ops[4] = {
4433 { .fniv = gen_saba_vec,
4434 .fno = gen_helper_gvec_saba_b,
4435 .opt_opc = vecop_list,
4436 .load_dest = true,
4437 .vece = MO_8 },
4438 { .fniv = gen_saba_vec,
4439 .fno = gen_helper_gvec_saba_h,
4440 .opt_opc = vecop_list,
4441 .load_dest = true,
4442 .vece = MO_16 },
4443 { .fni4 = gen_saba_i32,
4444 .fniv = gen_saba_vec,
4445 .fno = gen_helper_gvec_saba_s,
4446 .opt_opc = vecop_list,
4447 .load_dest = true,
4448 .vece = MO_32 },
4449 { .fni8 = gen_saba_i64,
4450 .fniv = gen_saba_vec,
4451 .fno = gen_helper_gvec_saba_d,
4452 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4453 .opt_opc = vecop_list,
4454 .load_dest = true,
4455 .vece = MO_64 },
4457 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
4460 static void gen_uaba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4462 TCGv_i32 t = tcg_temp_new_i32();
4463 gen_uabd_i32(t, a, b);
4464 tcg_gen_add_i32(d, d, t);
4465 tcg_temp_free_i32(t);
4468 static void gen_uaba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4470 TCGv_i64 t = tcg_temp_new_i64();
4471 gen_uabd_i64(t, a, b);
4472 tcg_gen_add_i64(d, d, t);
4473 tcg_temp_free_i64(t);
4476 static void gen_uaba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4478 TCGv_vec t = tcg_temp_new_vec_matching(d);
4479 gen_uabd_vec(vece, t, a, b);
4480 tcg_gen_add_vec(vece, d, d, t);
4481 tcg_temp_free_vec(t);
4484 void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
4485 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
4487 static const TCGOpcode vecop_list[] = {
4488 INDEX_op_sub_vec, INDEX_op_add_vec,
4489 INDEX_op_umin_vec, INDEX_op_umax_vec, 0
4491 static const GVecGen3 ops[4] = {
4492 { .fniv = gen_uaba_vec,
4493 .fno = gen_helper_gvec_uaba_b,
4494 .opt_opc = vecop_list,
4495 .load_dest = true,
4496 .vece = MO_8 },
4497 { .fniv = gen_uaba_vec,
4498 .fno = gen_helper_gvec_uaba_h,
4499 .opt_opc = vecop_list,
4500 .load_dest = true,
4501 .vece = MO_16 },
4502 { .fni4 = gen_uaba_i32,
4503 .fniv = gen_uaba_vec,
4504 .fno = gen_helper_gvec_uaba_s,
4505 .opt_opc = vecop_list,
4506 .load_dest = true,
4507 .vece = MO_32 },
4508 { .fni8 = gen_uaba_i64,
4509 .fniv = gen_uaba_vec,
4510 .fno = gen_helper_gvec_uaba_d,
4511 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4512 .opt_opc = vecop_list,
4513 .load_dest = true,
4514 .vece = MO_64 },
4516 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
4519 static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
4520 int opc1, int crn, int crm, int opc2,
4521 bool isread, int rt, int rt2)
4523 const ARMCPRegInfo *ri;
4525 ri = get_arm_cp_reginfo(s->cp_regs,
4526 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4527 if (ri) {
4528 bool need_exit_tb;
4530 /* Check access permissions */
4531 if (!cp_access_ok(s->current_el, ri, isread)) {
4532 unallocated_encoding(s);
4533 return;
4536 if (s->hstr_active || ri->accessfn ||
4537 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
4538 /* Emit code to perform further access permissions checks at
4539 * runtime; this may result in an exception.
4540 * Note that on XScale all cp0..c13 registers do an access check
4541 * call in order to handle c15_cpar.
4543 TCGv_ptr tmpptr;
4544 TCGv_i32 tcg_syn, tcg_isread;
4545 uint32_t syndrome;
4547 /* Note that since we are an implementation which takes an
4548 * exception on a trapped conditional instruction only if the
4549 * instruction passes its condition code check, we can take
4550 * advantage of the clause in the ARM ARM that allows us to set
4551 * the COND field in the instruction to 0xE in all cases.
4552 * We could fish the actual condition out of the insn (ARM)
4553 * or the condexec bits (Thumb) but it isn't necessary.
4555 switch (cpnum) {
4556 case 14:
4557 if (is64) {
4558 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4559 isread, false);
4560 } else {
4561 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4562 rt, isread, false);
4564 break;
4565 case 15:
4566 if (is64) {
4567 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4568 isread, false);
4569 } else {
4570 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4571 rt, isread, false);
4573 break;
4574 default:
4575 /* ARMv8 defines that only coprocessors 14 and 15 exist,
4576 * so this can only happen if this is an ARMv7 or earlier CPU,
4577 * in which case the syndrome information won't actually be
4578 * guest visible.
4580 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
4581 syndrome = syn_uncategorized();
4582 break;
4585 gen_set_condexec(s);
4586 gen_set_pc_im(s, s->pc_curr);
4587 tmpptr = tcg_const_ptr(ri);
4588 tcg_syn = tcg_const_i32(syndrome);
4589 tcg_isread = tcg_const_i32(isread);
4590 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
4591 tcg_isread);
4592 tcg_temp_free_ptr(tmpptr);
4593 tcg_temp_free_i32(tcg_syn);
4594 tcg_temp_free_i32(tcg_isread);
4595 } else if (ri->type & ARM_CP_RAISES_EXC) {
4597 * The readfn or writefn might raise an exception;
4598 * synchronize the CPU state in case it does.
4600 gen_set_condexec(s);
4601 gen_set_pc_im(s, s->pc_curr);
4604 /* Handle special cases first */
4605 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
4606 case ARM_CP_NOP:
4607 return;
4608 case ARM_CP_WFI:
4609 if (isread) {
4610 unallocated_encoding(s);
4611 return;
4613 gen_set_pc_im(s, s->base.pc_next);
4614 s->base.is_jmp = DISAS_WFI;
4615 return;
4616 default:
4617 break;
4620 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
4621 gen_io_start();
4624 if (isread) {
4625 /* Read */
4626 if (is64) {
4627 TCGv_i64 tmp64;
4628 TCGv_i32 tmp;
4629 if (ri->type & ARM_CP_CONST) {
4630 tmp64 = tcg_const_i64(ri->resetvalue);
4631 } else if (ri->readfn) {
4632 TCGv_ptr tmpptr;
4633 tmp64 = tcg_temp_new_i64();
4634 tmpptr = tcg_const_ptr(ri);
4635 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
4636 tcg_temp_free_ptr(tmpptr);
4637 } else {
4638 tmp64 = tcg_temp_new_i64();
4639 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
4641 tmp = tcg_temp_new_i32();
4642 tcg_gen_extrl_i64_i32(tmp, tmp64);
4643 store_reg(s, rt, tmp);
4644 tmp = tcg_temp_new_i32();
4645 tcg_gen_extrh_i64_i32(tmp, tmp64);
4646 tcg_temp_free_i64(tmp64);
4647 store_reg(s, rt2, tmp);
4648 } else {
4649 TCGv_i32 tmp;
4650 if (ri->type & ARM_CP_CONST) {
4651 tmp = tcg_const_i32(ri->resetvalue);
4652 } else if (ri->readfn) {
4653 TCGv_ptr tmpptr;
4654 tmp = tcg_temp_new_i32();
4655 tmpptr = tcg_const_ptr(ri);
4656 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
4657 tcg_temp_free_ptr(tmpptr);
4658 } else {
4659 tmp = load_cpu_offset(ri->fieldoffset);
4661 if (rt == 15) {
4662 /* Destination register of r15 for 32 bit loads sets
4663 * the condition codes from the high 4 bits of the value
4665 gen_set_nzcv(tmp);
4666 tcg_temp_free_i32(tmp);
4667 } else {
4668 store_reg(s, rt, tmp);
4671 } else {
4672 /* Write */
4673 if (ri->type & ARM_CP_CONST) {
4674 /* If not forbidden by access permissions, treat as WI */
4675 return;
4678 if (is64) {
4679 TCGv_i32 tmplo, tmphi;
4680 TCGv_i64 tmp64 = tcg_temp_new_i64();
4681 tmplo = load_reg(s, rt);
4682 tmphi = load_reg(s, rt2);
4683 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
4684 tcg_temp_free_i32(tmplo);
4685 tcg_temp_free_i32(tmphi);
4686 if (ri->writefn) {
4687 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4688 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
4689 tcg_temp_free_ptr(tmpptr);
4690 } else {
4691 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
4693 tcg_temp_free_i64(tmp64);
4694 } else {
4695 if (ri->writefn) {
4696 TCGv_i32 tmp;
4697 TCGv_ptr tmpptr;
4698 tmp = load_reg(s, rt);
4699 tmpptr = tcg_const_ptr(ri);
4700 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
4701 tcg_temp_free_ptr(tmpptr);
4702 tcg_temp_free_i32(tmp);
4703 } else {
4704 TCGv_i32 tmp = load_reg(s, rt);
4705 store_cpu_offset(tmp, ri->fieldoffset);
4710 /* I/O operations must end the TB here (whether read or write) */
4711 need_exit_tb = ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) &&
4712 (ri->type & ARM_CP_IO));
4714 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4716 * A write to any coprocessor register that ends a TB
4717 * must rebuild the hflags for the next TB.
4719 TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
4720 if (arm_dc_feature(s, ARM_FEATURE_M)) {
4721 gen_helper_rebuild_hflags_m32(cpu_env, tcg_el);
4722 } else {
4723 if (ri->type & ARM_CP_NEWEL) {
4724 gen_helper_rebuild_hflags_a32_newel(cpu_env);
4725 } else {
4726 gen_helper_rebuild_hflags_a32(cpu_env, tcg_el);
4729 tcg_temp_free_i32(tcg_el);
4731 * We default to ending the TB on a coprocessor register write,
4732 * but allow this to be suppressed by the register definition
4733 * (usually only necessary to work around guest bugs).
4735 need_exit_tb = true;
4737 if (need_exit_tb) {
4738 gen_lookup_tb(s);
4741 return;
4744 /* Unknown register; this might be a guest error or a QEMU
4745 * unimplemented feature.
4747 if (is64) {
4748 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
4749 "64 bit system register cp:%d opc1: %d crm:%d "
4750 "(%s)\n",
4751 isread ? "read" : "write", cpnum, opc1, crm,
4752 s->ns ? "non-secure" : "secure");
4753 } else {
4754 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
4755 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
4756 "(%s)\n",
4757 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
4758 s->ns ? "non-secure" : "secure");
4761 unallocated_encoding(s);
4762 return;
4765 /* Decode XScale DSP or iWMMXt insn (in the copro space, cp=0 or 1) */
4766 static void disas_xscale_insn(DisasContext *s, uint32_t insn)
4768 int cpnum = (insn >> 8) & 0xf;
4770 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
4771 unallocated_encoding(s);
4772 } else if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
4773 if (disas_iwmmxt_insn(s, insn)) {
4774 unallocated_encoding(s);
4776 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
4777 if (disas_dsp_insn(s, insn)) {
4778 unallocated_encoding(s);
4783 /* Store a 64-bit value to a register pair. Clobbers val. */
4784 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
4786 TCGv_i32 tmp;
4787 tmp = tcg_temp_new_i32();
4788 tcg_gen_extrl_i64_i32(tmp, val);
4789 store_reg(s, rlow, tmp);
4790 tmp = tcg_temp_new_i32();
4791 tcg_gen_extrh_i64_i32(tmp, val);
4792 store_reg(s, rhigh, tmp);
4795 /* load and add a 64-bit value from a register pair. */
4796 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
4798 TCGv_i64 tmp;
4799 TCGv_i32 tmpl;
4800 TCGv_i32 tmph;
4802 /* Load 64-bit value rd:rn. */
4803 tmpl = load_reg(s, rlow);
4804 tmph = load_reg(s, rhigh);
4805 tmp = tcg_temp_new_i64();
4806 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
4807 tcg_temp_free_i32(tmpl);
4808 tcg_temp_free_i32(tmph);
4809 tcg_gen_add_i64(val, val, tmp);
4810 tcg_temp_free_i64(tmp);
4813 /* Set N and Z flags from hi|lo. */
4814 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
4816 tcg_gen_mov_i32(cpu_NF, hi);
4817 tcg_gen_or_i32(cpu_ZF, lo, hi);
4820 /* Load/Store exclusive instructions are implemented by remembering
4821 the value/address loaded, and seeing if these are the same
4822 when the store is performed. This should be sufficient to implement
4823 the architecturally mandated semantics, and avoids having to monitor
4824 regular stores. The compare vs the remembered value is done during
4825 the cmpxchg operation, but we must compare the addresses manually. */
4826 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
4827 TCGv_i32 addr, int size)
4829 TCGv_i32 tmp = tcg_temp_new_i32();
4830 MemOp opc = size | MO_ALIGN | s->be_data;
4832 s->is_ldex = true;
4834 if (size == 3) {
4835 TCGv_i32 tmp2 = tcg_temp_new_i32();
4836 TCGv_i64 t64 = tcg_temp_new_i64();
4838 /* For AArch32, architecturally the 32-bit word at the lowest
4839 * address is always Rt and the one at addr+4 is Rt2, even if
4840 * the CPU is big-endian. That means we don't want to do a
4841 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
4842 * for an architecturally 64-bit access, but instead do a
4843 * 64-bit access using MO_BE if appropriate and then split
4844 * the two halves.
4845 * This only makes a difference for BE32 user-mode, where
4846 * frob64() must not flip the two halves of the 64-bit data
4847 * but this code must treat BE32 user-mode like BE32 system.
4849 TCGv taddr = gen_aa32_addr(s, addr, opc);
4851 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
4852 tcg_temp_free(taddr);
4853 tcg_gen_mov_i64(cpu_exclusive_val, t64);
4854 if (s->be_data == MO_BE) {
4855 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
4856 } else {
4857 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
4859 tcg_temp_free_i64(t64);
4861 store_reg(s, rt2, tmp2);
4862 } else {
4863 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
4864 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
4867 store_reg(s, rt, tmp);
4868 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
4871 static void gen_clrex(DisasContext *s)
4873 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
4876 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
4877 TCGv_i32 addr, int size)
4879 TCGv_i32 t0, t1, t2;
4880 TCGv_i64 extaddr;
4881 TCGv taddr;
4882 TCGLabel *done_label;
4883 TCGLabel *fail_label;
4884 MemOp opc = size | MO_ALIGN | s->be_data;
4886 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
4887 [addr] = {Rt};
4888 {Rd} = 0;
4889 } else {
4890 {Rd} = 1;
4891 } */
4892 fail_label = gen_new_label();
4893 done_label = gen_new_label();
4894 extaddr = tcg_temp_new_i64();
4895 tcg_gen_extu_i32_i64(extaddr, addr);
4896 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
4897 tcg_temp_free_i64(extaddr);
4899 taddr = gen_aa32_addr(s, addr, opc);
4900 t0 = tcg_temp_new_i32();
4901 t1 = load_reg(s, rt);
4902 if (size == 3) {
4903 TCGv_i64 o64 = tcg_temp_new_i64();
4904 TCGv_i64 n64 = tcg_temp_new_i64();
4906 t2 = load_reg(s, rt2);
4907 /* For AArch32, architecturally the 32-bit word at the lowest
4908 * address is always Rt and the one at addr+4 is Rt2, even if
4909 * the CPU is big-endian. Since we're going to treat this as a
4910 * single 64-bit BE store, we need to put the two halves in the
4911 * opposite order for BE to LE, so that they end up in the right
4912 * places.
4913 * We don't want gen_aa32_frob64() because that does the wrong
4914 * thing for BE32 usermode.
4916 if (s->be_data == MO_BE) {
4917 tcg_gen_concat_i32_i64(n64, t2, t1);
4918 } else {
4919 tcg_gen_concat_i32_i64(n64, t1, t2);
4921 tcg_temp_free_i32(t2);
4923 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
4924 get_mem_index(s), opc);
4925 tcg_temp_free_i64(n64);
4927 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
4928 tcg_gen_extrl_i64_i32(t0, o64);
4930 tcg_temp_free_i64(o64);
4931 } else {
4932 t2 = tcg_temp_new_i32();
4933 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
4934 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
4935 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
4936 tcg_temp_free_i32(t2);
4938 tcg_temp_free_i32(t1);
4939 tcg_temp_free(taddr);
4940 tcg_gen_mov_i32(cpu_R[rd], t0);
4941 tcg_temp_free_i32(t0);
4942 tcg_gen_br(done_label);
4944 gen_set_label(fail_label);
4945 tcg_gen_movi_i32(cpu_R[rd], 1);
4946 gen_set_label(done_label);
4947 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
4950 /* gen_srs:
4951 * @env: CPUARMState
4952 * @s: DisasContext
4953 * @mode: mode field from insn (which stack to store to)
4954 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
4955 * @writeback: true if writeback bit set
4957 * Generate code for the SRS (Store Return State) insn.
4959 static void gen_srs(DisasContext *s,
4960 uint32_t mode, uint32_t amode, bool writeback)
4962 int32_t offset;
4963 TCGv_i32 addr, tmp;
4964 bool undef = false;
4966 /* SRS is:
4967 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
4968 * and specified mode is monitor mode
4969 * - UNDEFINED in Hyp mode
4970 * - UNPREDICTABLE in User or System mode
4971 * - UNPREDICTABLE if the specified mode is:
4972 * -- not implemented
4973 * -- not a valid mode number
4974 * -- a mode that's at a higher exception level
4975 * -- Monitor, if we are Non-secure
4976 * For the UNPREDICTABLE cases we choose to UNDEF.
4978 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
4979 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
4980 return;
4983 if (s->current_el == 0 || s->current_el == 2) {
4984 undef = true;
4987 switch (mode) {
4988 case ARM_CPU_MODE_USR:
4989 case ARM_CPU_MODE_FIQ:
4990 case ARM_CPU_MODE_IRQ:
4991 case ARM_CPU_MODE_SVC:
4992 case ARM_CPU_MODE_ABT:
4993 case ARM_CPU_MODE_UND:
4994 case ARM_CPU_MODE_SYS:
4995 break;
4996 case ARM_CPU_MODE_HYP:
4997 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4998 undef = true;
5000 break;
5001 case ARM_CPU_MODE_MON:
5002 /* No need to check specifically for "are we non-secure" because
5003 * we've already made EL0 UNDEF and handled the trap for S-EL1;
5004 * so if this isn't EL3 then we must be non-secure.
5006 if (s->current_el != 3) {
5007 undef = true;
5009 break;
5010 default:
5011 undef = true;
5014 if (undef) {
5015 unallocated_encoding(s);
5016 return;
5019 addr = tcg_temp_new_i32();
5020 tmp = tcg_const_i32(mode);
5021 /* get_r13_banked() will raise an exception if called from System mode */
5022 gen_set_condexec(s);
5023 gen_set_pc_im(s, s->pc_curr);
5024 gen_helper_get_r13_banked(addr, cpu_env, tmp);
5025 tcg_temp_free_i32(tmp);
5026 switch (amode) {
5027 case 0: /* DA */
5028 offset = -4;
5029 break;
5030 case 1: /* IA */
5031 offset = 0;
5032 break;
5033 case 2: /* DB */
5034 offset = -8;
5035 break;
5036 case 3: /* IB */
5037 offset = 4;
5038 break;
5039 default:
5040 abort();
5042 tcg_gen_addi_i32(addr, addr, offset);
5043 tmp = load_reg(s, 14);
5044 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5045 tcg_temp_free_i32(tmp);
5046 tmp = load_cpu_field(spsr);
5047 tcg_gen_addi_i32(addr, addr, 4);
5048 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5049 tcg_temp_free_i32(tmp);
5050 if (writeback) {
5051 switch (amode) {
5052 case 0:
5053 offset = -8;
5054 break;
5055 case 1:
5056 offset = 4;
5057 break;
5058 case 2:
5059 offset = -4;
5060 break;
5061 case 3:
5062 offset = 0;
5063 break;
5064 default:
5065 abort();
5067 tcg_gen_addi_i32(addr, addr, offset);
5068 tmp = tcg_const_i32(mode);
5069 gen_helper_set_r13_banked(cpu_env, tmp, addr);
5070 tcg_temp_free_i32(tmp);
5072 tcg_temp_free_i32(addr);
5073 s->base.is_jmp = DISAS_UPDATE_EXIT;
5076 /* Generate a label used for skipping this instruction */
5077 static void arm_gen_condlabel(DisasContext *s)
5079 if (!s->condjmp) {
5080 s->condlabel = gen_new_label();
5081 s->condjmp = 1;
5085 /* Skip this instruction if the ARM condition is false */
5086 static void arm_skip_unless(DisasContext *s, uint32_t cond)
5088 arm_gen_condlabel(s);
5089 arm_gen_test_cc(cond ^ 1, s->condlabel);
5094 * Constant expanders for the decoders.
5097 static int negate(DisasContext *s, int x)
5099 return -x;
5102 static int plus_2(DisasContext *s, int x)
5104 return x + 2;
5107 static int times_2(DisasContext *s, int x)
5109 return x * 2;
5112 static int times_4(DisasContext *s, int x)
5114 return x * 4;
5117 /* Return only the rotation part of T32ExpandImm. */
5118 static int t32_expandimm_rot(DisasContext *s, int x)
5120 return x & 0xc00 ? extract32(x, 7, 5) : 0;
5123 /* Return the unrotated immediate from T32ExpandImm. */
5124 static int t32_expandimm_imm(DisasContext *s, int x)
5126 int imm = extract32(x, 0, 8);
5128 switch (extract32(x, 8, 4)) {
5129 case 0: /* XY */
5130 /* Nothing to do. */
5131 break;
5132 case 1: /* 00XY00XY */
5133 imm *= 0x00010001;
5134 break;
5135 case 2: /* XY00XY00 */
5136 imm *= 0x01000100;
5137 break;
5138 case 3: /* XYXYXYXY */
5139 imm *= 0x01010101;
5140 break;
5141 default:
5142 /* Rotated constant. */
5143 imm |= 0x80;
5144 break;
5146 return imm;
5149 static int t32_branch24(DisasContext *s, int x)
5151 /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */
5152 x ^= !(x < 0) * (3 << 21);
5153 /* Append the final zero. */
5154 return x << 1;
5157 static int t16_setflags(DisasContext *s)
5159 return s->condexec_mask == 0;
5162 static int t16_push_list(DisasContext *s, int x)
5164 return (x & 0xff) | (x & 0x100) << (14 - 8);
5167 static int t16_pop_list(DisasContext *s, int x)
5169 return (x & 0xff) | (x & 0x100) << (15 - 8);
5173 * Include the generated decoders.
5176 #include "decode-a32.c.inc"
5177 #include "decode-a32-uncond.c.inc"
5178 #include "decode-t32.c.inc"
5179 #include "decode-t16.c.inc"
5181 static bool valid_cp(DisasContext *s, int cp)
5184 * Return true if this coprocessor field indicates something
5185 * that's really a possible coprocessor.
5186 * For v7 and earlier, coprocessors 8..15 were reserved for Arm use,
5187 * and of those only cp14 and cp15 were used for registers.
5188 * cp10 and cp11 were used for VFP and Neon, whose decode is
5189 * dealt with elsewhere. With the advent of fp16, cp9 is also
5190 * now part of VFP.
5191 * For v8A and later, the encoding has been tightened so that
5192 * only cp14 and cp15 are valid, and other values aren't considered
5193 * to be in the coprocessor-instruction space at all. v8M still
5194 * permits coprocessors 0..7.
5196 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
5197 !arm_dc_feature(s, ARM_FEATURE_M)) {
5198 return cp >= 14;
5200 return cp < 8 || cp >= 14;
5203 static bool trans_MCR(DisasContext *s, arg_MCR *a)
5205 if (!valid_cp(s, a->cp)) {
5206 return false;
5208 do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
5209 false, a->rt, 0);
5210 return true;
5213 static bool trans_MRC(DisasContext *s, arg_MRC *a)
5215 if (!valid_cp(s, a->cp)) {
5216 return false;
5218 do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
5219 true, a->rt, 0);
5220 return true;
5223 static bool trans_MCRR(DisasContext *s, arg_MCRR *a)
5225 if (!valid_cp(s, a->cp)) {
5226 return false;
5228 do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
5229 false, a->rt, a->rt2);
5230 return true;
5233 static bool trans_MRRC(DisasContext *s, arg_MRRC *a)
5235 if (!valid_cp(s, a->cp)) {
5236 return false;
5238 do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
5239 true, a->rt, a->rt2);
5240 return true;
5243 /* Helpers to swap operands for reverse-subtract. */
5244 static void gen_rsb(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
5246 tcg_gen_sub_i32(dst, b, a);
5249 static void gen_rsb_CC(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
5251 gen_sub_CC(dst, b, a);
5254 static void gen_rsc(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
5256 gen_sub_carry(dest, b, a);
5259 static void gen_rsc_CC(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
5261 gen_sbc_CC(dest, b, a);
5265 * Helpers for the data processing routines.
5267 * After the computation store the results back.
5268 * This may be suppressed altogether (STREG_NONE), require a runtime
5269 * check against the stack limits (STREG_SP_CHECK), or generate an
5270 * exception return. Oh, or store into a register.
5272 * Always return true, indicating success for a trans_* function.
5274 typedef enum {
5275 STREG_NONE,
5276 STREG_NORMAL,
5277 STREG_SP_CHECK,
5278 STREG_EXC_RET,
5279 } StoreRegKind;
5281 static bool store_reg_kind(DisasContext *s, int rd,
5282 TCGv_i32 val, StoreRegKind kind)
5284 switch (kind) {
5285 case STREG_NONE:
5286 tcg_temp_free_i32(val);
5287 return true;
5288 case STREG_NORMAL:
5289 /* See ALUWritePC: Interworking only from a32 mode. */
5290 if (s->thumb) {
5291 store_reg(s, rd, val);
5292 } else {
5293 store_reg_bx(s, rd, val);
5295 return true;
5296 case STREG_SP_CHECK:
5297 store_sp_checked(s, val);
5298 return true;
5299 case STREG_EXC_RET:
5300 gen_exception_return(s, val);
5301 return true;
5303 g_assert_not_reached();
5307 * Data Processing (register)
5309 * Operate, with set flags, one register source,
5310 * one immediate shifted register source, and a destination.
5312 static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a,
5313 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
5314 int logic_cc, StoreRegKind kind)
5316 TCGv_i32 tmp1, tmp2;
5318 tmp2 = load_reg(s, a->rm);
5319 gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc);
5320 tmp1 = load_reg(s, a->rn);
5322 gen(tmp1, tmp1, tmp2);
5323 tcg_temp_free_i32(tmp2);
5325 if (logic_cc) {
5326 gen_logic_CC(tmp1);
5328 return store_reg_kind(s, a->rd, tmp1, kind);
5331 static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a,
5332 void (*gen)(TCGv_i32, TCGv_i32),
5333 int logic_cc, StoreRegKind kind)
5335 TCGv_i32 tmp;
5337 tmp = load_reg(s, a->rm);
5338 gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc);
5340 gen(tmp, tmp);
5341 if (logic_cc) {
5342 gen_logic_CC(tmp);
5344 return store_reg_kind(s, a->rd, tmp, kind);
5348 * Data-processing (register-shifted register)
5350 * Operate, with set flags, one register source,
5351 * one register shifted register source, and a destination.
5353 static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a,
5354 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
5355 int logic_cc, StoreRegKind kind)
5357 TCGv_i32 tmp1, tmp2;
5359 tmp1 = load_reg(s, a->rs);
5360 tmp2 = load_reg(s, a->rm);
5361 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
5362 tmp1 = load_reg(s, a->rn);
5364 gen(tmp1, tmp1, tmp2);
5365 tcg_temp_free_i32(tmp2);
5367 if (logic_cc) {
5368 gen_logic_CC(tmp1);
5370 return store_reg_kind(s, a->rd, tmp1, kind);
5373 static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a,
5374 void (*gen)(TCGv_i32, TCGv_i32),
5375 int logic_cc, StoreRegKind kind)
5377 TCGv_i32 tmp1, tmp2;
5379 tmp1 = load_reg(s, a->rs);
5380 tmp2 = load_reg(s, a->rm);
5381 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
5383 gen(tmp2, tmp2);
5384 if (logic_cc) {
5385 gen_logic_CC(tmp2);
5387 return store_reg_kind(s, a->rd, tmp2, kind);
5391 * Data-processing (immediate)
5393 * Operate, with set flags, one register source,
5394 * one rotated immediate, and a destination.
5396 * Note that logic_cc && a->rot setting CF based on the msb of the
5397 * immediate is the reason why we must pass in the unrotated form
5398 * of the immediate.
5400 static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a,
5401 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
5402 int logic_cc, StoreRegKind kind)
5404 TCGv_i32 tmp1, tmp2;
5405 uint32_t imm;
5407 imm = ror32(a->imm, a->rot);
5408 if (logic_cc && a->rot) {
5409 tcg_gen_movi_i32(cpu_CF, imm >> 31);
5411 tmp2 = tcg_const_i32(imm);
5412 tmp1 = load_reg(s, a->rn);
5414 gen(tmp1, tmp1, tmp2);
5415 tcg_temp_free_i32(tmp2);
5417 if (logic_cc) {
5418 gen_logic_CC(tmp1);
5420 return store_reg_kind(s, a->rd, tmp1, kind);
5423 static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a,
5424 void (*gen)(TCGv_i32, TCGv_i32),
5425 int logic_cc, StoreRegKind kind)
5427 TCGv_i32 tmp;
5428 uint32_t imm;
5430 imm = ror32(a->imm, a->rot);
5431 if (logic_cc && a->rot) {
5432 tcg_gen_movi_i32(cpu_CF, imm >> 31);
5434 tmp = tcg_const_i32(imm);
5436 gen(tmp, tmp);
5437 if (logic_cc) {
5438 gen_logic_CC(tmp);
5440 return store_reg_kind(s, a->rd, tmp, kind);
5443 #define DO_ANY3(NAME, OP, L, K) \
5444 static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
5445 { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
5446 static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
5447 { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
5448 static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
5449 { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
5451 #define DO_ANY2(NAME, OP, L, K) \
5452 static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
5453 { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
5454 static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
5455 { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
5456 static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
5457 { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
5459 #define DO_CMP2(NAME, OP, L) \
5460 static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
5461 { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
5462 static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
5463 { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
5464 static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
5465 { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
5467 DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
5468 DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
5469 DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
5470 DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
5472 DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
5473 DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
5474 DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
5475 DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
5477 DO_CMP2(TST, tcg_gen_and_i32, true)
5478 DO_CMP2(TEQ, tcg_gen_xor_i32, true)
5479 DO_CMP2(CMN, gen_add_CC, false)
5480 DO_CMP2(CMP, gen_sub_CC, false)
5482 DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false,
5483 a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL)
5486 * Note for the computation of StoreRegKind we return out of the
5487 * middle of the functions that are expanded by DO_ANY3, and that
5488 * we modify a->s via that parameter before it is used by OP.
5490 DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false,
5492 StoreRegKind ret = STREG_NORMAL;
5493 if (a->rd == 15 && a->s) {
5495 * See ALUExceptionReturn:
5496 * In User mode, UNPREDICTABLE; we choose UNDEF.
5497 * In Hyp mode, UNDEFINED.
5499 if (IS_USER(s) || s->current_el == 2) {
5500 unallocated_encoding(s);
5501 return true;
5503 /* There is no writeback of nzcv to PSTATE. */
5504 a->s = 0;
5505 ret = STREG_EXC_RET;
5506 } else if (a->rd == 13 && a->rn == 13) {
5507 ret = STREG_SP_CHECK;
5509 ret;
5512 DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
5514 StoreRegKind ret = STREG_NORMAL;
5515 if (a->rd == 15 && a->s) {
5517 * See ALUExceptionReturn:
5518 * In User mode, UNPREDICTABLE; we choose UNDEF.
5519 * In Hyp mode, UNDEFINED.
5521 if (IS_USER(s) || s->current_el == 2) {
5522 unallocated_encoding(s);
5523 return true;
5525 /* There is no writeback of nzcv to PSTATE. */
5526 a->s = 0;
5527 ret = STREG_EXC_RET;
5528 } else if (a->rd == 13) {
5529 ret = STREG_SP_CHECK;
5531 ret;
5534 DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
5537 * ORN is only available with T32, so there is no register-shifted-register
5538 * form of the insn. Using the DO_ANY3 macro would create an unused function.
5540 static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a)
5542 return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
5545 static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a)
5547 return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
5550 #undef DO_ANY3
5551 #undef DO_ANY2
5552 #undef DO_CMP2
5554 static bool trans_ADR(DisasContext *s, arg_ri *a)
5556 store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm));
5557 return true;
5560 static bool trans_MOVW(DisasContext *s, arg_MOVW *a)
5562 TCGv_i32 tmp;
5564 if (!ENABLE_ARCH_6T2) {
5565 return false;
5568 tmp = tcg_const_i32(a->imm);
5569 store_reg(s, a->rd, tmp);
5570 return true;
5573 static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
5575 TCGv_i32 tmp;
5577 if (!ENABLE_ARCH_6T2) {
5578 return false;
5581 tmp = load_reg(s, a->rd);
5582 tcg_gen_ext16u_i32(tmp, tmp);
5583 tcg_gen_ori_i32(tmp, tmp, a->imm << 16);
5584 store_reg(s, a->rd, tmp);
5585 return true;
5589 * Multiply and multiply accumulate
5592 static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add)
5594 TCGv_i32 t1, t2;
5596 t1 = load_reg(s, a->rn);
5597 t2 = load_reg(s, a->rm);
5598 tcg_gen_mul_i32(t1, t1, t2);
5599 tcg_temp_free_i32(t2);
5600 if (add) {
5601 t2 = load_reg(s, a->ra);
5602 tcg_gen_add_i32(t1, t1, t2);
5603 tcg_temp_free_i32(t2);
5605 if (a->s) {
5606 gen_logic_CC(t1);
5608 store_reg(s, a->rd, t1);
5609 return true;
5612 static bool trans_MUL(DisasContext *s, arg_MUL *a)
5614 return op_mla(s, a, false);
5617 static bool trans_MLA(DisasContext *s, arg_MLA *a)
5619 return op_mla(s, a, true);
5622 static bool trans_MLS(DisasContext *s, arg_MLS *a)
5624 TCGv_i32 t1, t2;
5626 if (!ENABLE_ARCH_6T2) {
5627 return false;
5629 t1 = load_reg(s, a->rn);
5630 t2 = load_reg(s, a->rm);
5631 tcg_gen_mul_i32(t1, t1, t2);
5632 tcg_temp_free_i32(t2);
5633 t2 = load_reg(s, a->ra);
5634 tcg_gen_sub_i32(t1, t2, t1);
5635 tcg_temp_free_i32(t2);
5636 store_reg(s, a->rd, t1);
5637 return true;
5640 static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add)
5642 TCGv_i32 t0, t1, t2, t3;
5644 t0 = load_reg(s, a->rm);
5645 t1 = load_reg(s, a->rn);
5646 if (uns) {
5647 tcg_gen_mulu2_i32(t0, t1, t0, t1);
5648 } else {
5649 tcg_gen_muls2_i32(t0, t1, t0, t1);
5651 if (add) {
5652 t2 = load_reg(s, a->ra);
5653 t3 = load_reg(s, a->rd);
5654 tcg_gen_add2_i32(t0, t1, t0, t1, t2, t3);
5655 tcg_temp_free_i32(t2);
5656 tcg_temp_free_i32(t3);
5658 if (a->s) {
5659 gen_logicq_cc(t0, t1);
5661 store_reg(s, a->ra, t0);
5662 store_reg(s, a->rd, t1);
5663 return true;
5666 static bool trans_UMULL(DisasContext *s, arg_UMULL *a)
5668 return op_mlal(s, a, true, false);
5671 static bool trans_SMULL(DisasContext *s, arg_SMULL *a)
5673 return op_mlal(s, a, false, false);
5676 static bool trans_UMLAL(DisasContext *s, arg_UMLAL *a)
5678 return op_mlal(s, a, true, true);
5681 static bool trans_SMLAL(DisasContext *s, arg_SMLAL *a)
5683 return op_mlal(s, a, false, true);
5686 static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a)
5688 TCGv_i32 t0, t1, t2, zero;
5690 if (s->thumb
5691 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5692 : !ENABLE_ARCH_6) {
5693 return false;
5696 t0 = load_reg(s, a->rm);
5697 t1 = load_reg(s, a->rn);
5698 tcg_gen_mulu2_i32(t0, t1, t0, t1);
5699 zero = tcg_const_i32(0);
5700 t2 = load_reg(s, a->ra);
5701 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
5702 tcg_temp_free_i32(t2);
5703 t2 = load_reg(s, a->rd);
5704 tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
5705 tcg_temp_free_i32(t2);
5706 tcg_temp_free_i32(zero);
5707 store_reg(s, a->ra, t0);
5708 store_reg(s, a->rd, t1);
5709 return true;
5713 * Saturating addition and subtraction
5716 static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub)
5718 TCGv_i32 t0, t1;
5720 if (s->thumb
5721 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5722 : !ENABLE_ARCH_5TE) {
5723 return false;
5726 t0 = load_reg(s, a->rm);
5727 t1 = load_reg(s, a->rn);
5728 if (doub) {
5729 gen_helper_add_saturate(t1, cpu_env, t1, t1);
5731 if (add) {
5732 gen_helper_add_saturate(t0, cpu_env, t0, t1);
5733 } else {
5734 gen_helper_sub_saturate(t0, cpu_env, t0, t1);
5736 tcg_temp_free_i32(t1);
5737 store_reg(s, a->rd, t0);
5738 return true;
5741 #define DO_QADDSUB(NAME, ADD, DOUB) \
5742 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
5744 return op_qaddsub(s, a, ADD, DOUB); \
5747 DO_QADDSUB(QADD, true, false)
5748 DO_QADDSUB(QSUB, false, false)
5749 DO_QADDSUB(QDADD, true, true)
5750 DO_QADDSUB(QDSUB, false, true)
5752 #undef DO_QADDSUB
5755 * Halfword multiply and multiply accumulate
5758 static bool op_smlaxxx(DisasContext *s, arg_rrrr *a,
5759 int add_long, bool nt, bool mt)
5761 TCGv_i32 t0, t1, tl, th;
5763 if (s->thumb
5764 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
5765 : !ENABLE_ARCH_5TE) {
5766 return false;
5769 t0 = load_reg(s, a->rn);
5770 t1 = load_reg(s, a->rm);
5771 gen_mulxy(t0, t1, nt, mt);
5772 tcg_temp_free_i32(t1);
5774 switch (add_long) {
5775 case 0:
5776 store_reg(s, a->rd, t0);
5777 break;
5778 case 1:
5779 t1 = load_reg(s, a->ra);
5780 gen_helper_add_setq(t0, cpu_env, t0, t1);
5781 tcg_temp_free_i32(t1);
5782 store_reg(s, a->rd, t0);
5783 break;
5784 case 2:
5785 tl = load_reg(s, a->ra);
5786 th = load_reg(s, a->rd);
5787 /* Sign-extend the 32-bit product to 64 bits. */
5788 t1 = tcg_temp_new_i32();
5789 tcg_gen_sari_i32(t1, t0, 31);
5790 tcg_gen_add2_i32(tl, th, tl, th, t0, t1);
5791 tcg_temp_free_i32(t0);
5792 tcg_temp_free_i32(t1);
5793 store_reg(s, a->ra, tl);
5794 store_reg(s, a->rd, th);
5795 break;
5796 default:
5797 g_assert_not_reached();
5799 return true;
5802 #define DO_SMLAX(NAME, add, nt, mt) \
5803 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
5805 return op_smlaxxx(s, a, add, nt, mt); \
5808 DO_SMLAX(SMULBB, 0, 0, 0)
5809 DO_SMLAX(SMULBT, 0, 0, 1)
5810 DO_SMLAX(SMULTB, 0, 1, 0)
5811 DO_SMLAX(SMULTT, 0, 1, 1)
5813 DO_SMLAX(SMLABB, 1, 0, 0)
5814 DO_SMLAX(SMLABT, 1, 0, 1)
5815 DO_SMLAX(SMLATB, 1, 1, 0)
5816 DO_SMLAX(SMLATT, 1, 1, 1)
5818 DO_SMLAX(SMLALBB, 2, 0, 0)
5819 DO_SMLAX(SMLALBT, 2, 0, 1)
5820 DO_SMLAX(SMLALTB, 2, 1, 0)
5821 DO_SMLAX(SMLALTT, 2, 1, 1)
5823 #undef DO_SMLAX
5825 static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt)
5827 TCGv_i32 t0, t1;
5829 if (!ENABLE_ARCH_5TE) {
5830 return false;
5833 t0 = load_reg(s, a->rn);
5834 t1 = load_reg(s, a->rm);
5836 * Since the nominal result is product<47:16>, shift the 16-bit
5837 * input up by 16 bits, so that the result is at product<63:32>.
5839 if (mt) {
5840 tcg_gen_andi_i32(t1, t1, 0xffff0000);
5841 } else {
5842 tcg_gen_shli_i32(t1, t1, 16);
5844 tcg_gen_muls2_i32(t0, t1, t0, t1);
5845 tcg_temp_free_i32(t0);
5846 if (add) {
5847 t0 = load_reg(s, a->ra);
5848 gen_helper_add_setq(t1, cpu_env, t1, t0);
5849 tcg_temp_free_i32(t0);
5851 store_reg(s, a->rd, t1);
5852 return true;
5855 #define DO_SMLAWX(NAME, add, mt) \
5856 static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
5858 return op_smlawx(s, a, add, mt); \
5861 DO_SMLAWX(SMULWB, 0, 0)
5862 DO_SMLAWX(SMULWT, 0, 1)
5863 DO_SMLAWX(SMLAWB, 1, 0)
5864 DO_SMLAWX(SMLAWT, 1, 1)
5866 #undef DO_SMLAWX
5869 * MSR (immediate) and hints
5872 static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
5875 * When running single-threaded TCG code, use the helper to ensure that
5876 * the next round-robin scheduled vCPU gets a crack. When running in
5877 * MTTCG we don't generate jumps to the helper as it won't affect the
5878 * scheduling of other vCPUs.
5880 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
5881 gen_set_pc_im(s, s->base.pc_next);
5882 s->base.is_jmp = DISAS_YIELD;
5884 return true;
5887 static bool trans_WFE(DisasContext *s, arg_WFE *a)
5890 * When running single-threaded TCG code, use the helper to ensure that
5891 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
5892 * just skip this instruction. Currently the SEV/SEVL instructions,
5893 * which are *one* of many ways to wake the CPU from WFE, are not
5894 * implemented so we can't sleep like WFI does.
5896 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
5897 gen_set_pc_im(s, s->base.pc_next);
5898 s->base.is_jmp = DISAS_WFE;
5900 return true;
5903 static bool trans_WFI(DisasContext *s, arg_WFI *a)
5905 /* For WFI, halt the vCPU until an IRQ. */
5906 gen_set_pc_im(s, s->base.pc_next);
5907 s->base.is_jmp = DISAS_WFI;
5908 return true;
5911 static bool trans_NOP(DisasContext *s, arg_NOP *a)
5913 return true;
5916 static bool trans_MSR_imm(DisasContext *s, arg_MSR_imm *a)
5918 uint32_t val = ror32(a->imm, a->rot * 2);
5919 uint32_t mask = msr_mask(s, a->mask, a->r);
5921 if (gen_set_psr_im(s, mask, a->r, val)) {
5922 unallocated_encoding(s);
5924 return true;
5928 * Cyclic Redundancy Check
5931 static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz)
5933 TCGv_i32 t1, t2, t3;
5935 if (!dc_isar_feature(aa32_crc32, s)) {
5936 return false;
5939 t1 = load_reg(s, a->rn);
5940 t2 = load_reg(s, a->rm);
5941 switch (sz) {
5942 case MO_8:
5943 gen_uxtb(t2);
5944 break;
5945 case MO_16:
5946 gen_uxth(t2);
5947 break;
5948 case MO_32:
5949 break;
5950 default:
5951 g_assert_not_reached();
5953 t3 = tcg_const_i32(1 << sz);
5954 if (c) {
5955 gen_helper_crc32c(t1, t1, t2, t3);
5956 } else {
5957 gen_helper_crc32(t1, t1, t2, t3);
5959 tcg_temp_free_i32(t2);
5960 tcg_temp_free_i32(t3);
5961 store_reg(s, a->rd, t1);
5962 return true;
5965 #define DO_CRC32(NAME, c, sz) \
5966 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
5967 { return op_crc32(s, a, c, sz); }
5969 DO_CRC32(CRC32B, false, MO_8)
5970 DO_CRC32(CRC32H, false, MO_16)
5971 DO_CRC32(CRC32W, false, MO_32)
5972 DO_CRC32(CRC32CB, true, MO_8)
5973 DO_CRC32(CRC32CH, true, MO_16)
5974 DO_CRC32(CRC32CW, true, MO_32)
5976 #undef DO_CRC32
5979 * Miscellaneous instructions
5982 static bool trans_MRS_bank(DisasContext *s, arg_MRS_bank *a)
5984 if (arm_dc_feature(s, ARM_FEATURE_M)) {
5985 return false;
5987 gen_mrs_banked(s, a->r, a->sysm, a->rd);
5988 return true;
5991 static bool trans_MSR_bank(DisasContext *s, arg_MSR_bank *a)
5993 if (arm_dc_feature(s, ARM_FEATURE_M)) {
5994 return false;
5996 gen_msr_banked(s, a->r, a->sysm, a->rn);
5997 return true;
6000 static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a)
6002 TCGv_i32 tmp;
6004 if (arm_dc_feature(s, ARM_FEATURE_M)) {
6005 return false;
6007 if (a->r) {
6008 if (IS_USER(s)) {
6009 unallocated_encoding(s);
6010 return true;
6012 tmp = load_cpu_field(spsr);
6013 } else {
6014 tmp = tcg_temp_new_i32();
6015 gen_helper_cpsr_read(tmp, cpu_env);
6017 store_reg(s, a->rd, tmp);
6018 return true;
6021 static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a)
6023 TCGv_i32 tmp;
6024 uint32_t mask = msr_mask(s, a->mask, a->r);
6026 if (arm_dc_feature(s, ARM_FEATURE_M)) {
6027 return false;
6029 tmp = load_reg(s, a->rn);
6030 if (gen_set_psr(s, mask, a->r, tmp)) {
6031 unallocated_encoding(s);
6033 return true;
6036 static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
6038 TCGv_i32 tmp;
6040 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
6041 return false;
6043 tmp = tcg_const_i32(a->sysm);
6044 gen_helper_v7m_mrs(tmp, cpu_env, tmp);
6045 store_reg(s, a->rd, tmp);
6046 return true;
6049 static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
6051 TCGv_i32 addr, reg;
6053 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
6054 return false;
6056 addr = tcg_const_i32((a->mask << 10) | a->sysm);
6057 reg = load_reg(s, a->rn);
6058 gen_helper_v7m_msr(cpu_env, addr, reg);
6059 tcg_temp_free_i32(addr);
6060 tcg_temp_free_i32(reg);
6061 /* If we wrote to CONTROL, the EL might have changed */
6062 gen_helper_rebuild_hflags_m32_newel(cpu_env);
6063 gen_lookup_tb(s);
6064 return true;
6067 static bool trans_BX(DisasContext *s, arg_BX *a)
6069 if (!ENABLE_ARCH_4T) {
6070 return false;
6072 gen_bx_excret(s, load_reg(s, a->rm));
6073 return true;
6076 static bool trans_BXJ(DisasContext *s, arg_BXJ *a)
6078 if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) {
6079 return false;
6081 /* Trivial implementation equivalent to bx. */
6082 gen_bx(s, load_reg(s, a->rm));
6083 return true;
6086 static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a)
6088 TCGv_i32 tmp;
6090 if (!ENABLE_ARCH_5) {
6091 return false;
6093 tmp = load_reg(s, a->rm);
6094 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
6095 gen_bx(s, tmp);
6096 return true;
6100 * BXNS/BLXNS: only exist for v8M with the security extensions,
6101 * and always UNDEF if NonSecure. We don't implement these in
6102 * the user-only mode either (in theory you can use them from
6103 * Secure User mode but they are too tied in to system emulation).
6105 static bool trans_BXNS(DisasContext *s, arg_BXNS *a)
6107 if (!s->v8m_secure || IS_USER_ONLY) {
6108 unallocated_encoding(s);
6109 } else {
6110 gen_bxns(s, a->rm);
6112 return true;
6115 static bool trans_BLXNS(DisasContext *s, arg_BLXNS *a)
6117 if (!s->v8m_secure || IS_USER_ONLY) {
6118 unallocated_encoding(s);
6119 } else {
6120 gen_blxns(s, a->rm);
6122 return true;
6125 static bool trans_CLZ(DisasContext *s, arg_CLZ *a)
6127 TCGv_i32 tmp;
6129 if (!ENABLE_ARCH_5) {
6130 return false;
6132 tmp = load_reg(s, a->rm);
6133 tcg_gen_clzi_i32(tmp, tmp, 32);
6134 store_reg(s, a->rd, tmp);
6135 return true;
6138 static bool trans_ERET(DisasContext *s, arg_ERET *a)
6140 TCGv_i32 tmp;
6142 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
6143 return false;
6145 if (IS_USER(s)) {
6146 unallocated_encoding(s);
6147 return true;
6149 if (s->current_el == 2) {
6150 /* ERET from Hyp uses ELR_Hyp, not LR */
6151 tmp = load_cpu_field(elr_el[2]);
6152 } else {
6153 tmp = load_reg(s, 14);
6155 gen_exception_return(s, tmp);
6156 return true;
6159 static bool trans_HLT(DisasContext *s, arg_HLT *a)
6161 gen_hlt(s, a->imm);
6162 return true;
6165 static bool trans_BKPT(DisasContext *s, arg_BKPT *a)
6167 if (!ENABLE_ARCH_5) {
6168 return false;
6170 if (arm_dc_feature(s, ARM_FEATURE_M) &&
6171 semihosting_enabled() &&
6172 #ifndef CONFIG_USER_ONLY
6173 !IS_USER(s) &&
6174 #endif
6175 (a->imm == 0xab)) {
6176 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
6177 } else {
6178 gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false));
6180 return true;
6183 static bool trans_HVC(DisasContext *s, arg_HVC *a)
6185 if (!ENABLE_ARCH_7 || arm_dc_feature(s, ARM_FEATURE_M)) {
6186 return false;
6188 if (IS_USER(s)) {
6189 unallocated_encoding(s);
6190 } else {
6191 gen_hvc(s, a->imm);
6193 return true;
6196 static bool trans_SMC(DisasContext *s, arg_SMC *a)
6198 if (!ENABLE_ARCH_6K || arm_dc_feature(s, ARM_FEATURE_M)) {
6199 return false;
6201 if (IS_USER(s)) {
6202 unallocated_encoding(s);
6203 } else {
6204 gen_smc(s);
6206 return true;
6209 static bool trans_SG(DisasContext *s, arg_SG *a)
6211 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
6212 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6213 return false;
6216 * SG (v8M only)
6217 * The bulk of the behaviour for this instruction is implemented
6218 * in v7m_handle_execute_nsc(), which deals with the insn when
6219 * it is executed by a CPU in non-secure state from memory
6220 * which is Secure & NonSecure-Callable.
6221 * Here we only need to handle the remaining cases:
6222 * * in NS memory (including the "security extension not
6223 * implemented" case) : NOP
6224 * * in S memory but CPU already secure (clear IT bits)
6225 * We know that the attribute for the memory this insn is
6226 * in must match the current CPU state, because otherwise
6227 * get_phys_addr_pmsav8 would have generated an exception.
6229 if (s->v8m_secure) {
6230 /* Like the IT insn, we don't need to generate any code */
6231 s->condexec_cond = 0;
6232 s->condexec_mask = 0;
6234 return true;
6237 static bool trans_TT(DisasContext *s, arg_TT *a)
6239 TCGv_i32 addr, tmp;
6241 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
6242 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6243 return false;
6245 if (a->rd == 13 || a->rd == 15 || a->rn == 15) {
6246 /* We UNDEF for these UNPREDICTABLE cases */
6247 unallocated_encoding(s);
6248 return true;
6250 if (a->A && !s->v8m_secure) {
6251 /* This case is UNDEFINED. */
6252 unallocated_encoding(s);
6253 return true;
6256 addr = load_reg(s, a->rn);
6257 tmp = tcg_const_i32((a->A << 1) | a->T);
6258 gen_helper_v7m_tt(tmp, cpu_env, addr, tmp);
6259 tcg_temp_free_i32(addr);
6260 store_reg(s, a->rd, tmp);
6261 return true;
6265 * Load/store register index
6268 static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w)
6270 ISSInfo ret;
6272 /* ISS not valid if writeback */
6273 if (p && !w) {
6274 ret = rd;
6275 if (s->base.pc_next - s->pc_curr == 2) {
6276 ret |= ISSIs16Bit;
6278 } else {
6279 ret = ISSInvalid;
6281 return ret;
6284 static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
6286 TCGv_i32 addr = load_reg(s, a->rn);
6288 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
6289 gen_helper_v8m_stackcheck(cpu_env, addr);
6292 if (a->p) {
6293 TCGv_i32 ofs = load_reg(s, a->rm);
6294 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
6295 if (a->u) {
6296 tcg_gen_add_i32(addr, addr, ofs);
6297 } else {
6298 tcg_gen_sub_i32(addr, addr, ofs);
6300 tcg_temp_free_i32(ofs);
6302 return addr;
6305 static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
6306 TCGv_i32 addr, int address_offset)
6308 if (!a->p) {
6309 TCGv_i32 ofs = load_reg(s, a->rm);
6310 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
6311 if (a->u) {
6312 tcg_gen_add_i32(addr, addr, ofs);
6313 } else {
6314 tcg_gen_sub_i32(addr, addr, ofs);
6316 tcg_temp_free_i32(ofs);
6317 } else if (!a->w) {
6318 tcg_temp_free_i32(addr);
6319 return;
6321 tcg_gen_addi_i32(addr, addr, address_offset);
6322 store_reg(s, a->rn, addr);
6325 static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
6326 MemOp mop, int mem_idx)
6328 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
6329 TCGv_i32 addr, tmp;
6331 addr = op_addr_rr_pre(s, a);
6333 tmp = tcg_temp_new_i32();
6334 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
6335 disas_set_da_iss(s, mop, issinfo);
6338 * Perform base writeback before the loaded value to
6339 * ensure correct behavior with overlapping index registers.
6341 op_addr_rr_post(s, a, addr, 0);
6342 store_reg_from_load(s, a->rt, tmp);
6343 return true;
6346 static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
6347 MemOp mop, int mem_idx)
6349 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
6350 TCGv_i32 addr, tmp;
6352 addr = op_addr_rr_pre(s, a);
6354 tmp = load_reg(s, a->rt);
6355 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
6356 disas_set_da_iss(s, mop, issinfo);
6357 tcg_temp_free_i32(tmp);
6359 op_addr_rr_post(s, a, addr, 0);
6360 return true;
6363 static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
6365 int mem_idx = get_mem_index(s);
6366 TCGv_i32 addr, tmp;
6368 if (!ENABLE_ARCH_5TE) {
6369 return false;
6371 if (a->rt & 1) {
6372 unallocated_encoding(s);
6373 return true;
6375 addr = op_addr_rr_pre(s, a);
6377 tmp = tcg_temp_new_i32();
6378 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
6379 store_reg(s, a->rt, tmp);
6381 tcg_gen_addi_i32(addr, addr, 4);
6383 tmp = tcg_temp_new_i32();
6384 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
6385 store_reg(s, a->rt + 1, tmp);
6387 /* LDRD w/ base writeback is undefined if the registers overlap. */
6388 op_addr_rr_post(s, a, addr, -4);
6389 return true;
6392 static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
6394 int mem_idx = get_mem_index(s);
6395 TCGv_i32 addr, tmp;
6397 if (!ENABLE_ARCH_5TE) {
6398 return false;
6400 if (a->rt & 1) {
6401 unallocated_encoding(s);
6402 return true;
6404 addr = op_addr_rr_pre(s, a);
6406 tmp = load_reg(s, a->rt);
6407 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
6408 tcg_temp_free_i32(tmp);
6410 tcg_gen_addi_i32(addr, addr, 4);
6412 tmp = load_reg(s, a->rt + 1);
6413 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
6414 tcg_temp_free_i32(tmp);
6416 op_addr_rr_post(s, a, addr, -4);
6417 return true;
6421 * Load/store immediate index
6424 static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
6426 int ofs = a->imm;
6428 if (!a->u) {
6429 ofs = -ofs;
6432 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
6434 * Stackcheck. Here we know 'addr' is the current SP;
6435 * U is set if we're moving SP up, else down. It is
6436 * UNKNOWN whether the limit check triggers when SP starts
6437 * below the limit and ends up above it; we chose to do so.
6439 if (!a->u) {
6440 TCGv_i32 newsp = tcg_temp_new_i32();
6441 tcg_gen_addi_i32(newsp, cpu_R[13], ofs);
6442 gen_helper_v8m_stackcheck(cpu_env, newsp);
6443 tcg_temp_free_i32(newsp);
6444 } else {
6445 gen_helper_v8m_stackcheck(cpu_env, cpu_R[13]);
6449 return add_reg_for_lit(s, a->rn, a->p ? ofs : 0);
6452 static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a,
6453 TCGv_i32 addr, int address_offset)
6455 if (!a->p) {
6456 if (a->u) {
6457 address_offset += a->imm;
6458 } else {
6459 address_offset -= a->imm;
6461 } else if (!a->w) {
6462 tcg_temp_free_i32(addr);
6463 return;
6465 tcg_gen_addi_i32(addr, addr, address_offset);
6466 store_reg(s, a->rn, addr);
6469 static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
6470 MemOp mop, int mem_idx)
6472 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
6473 TCGv_i32 addr, tmp;
6475 addr = op_addr_ri_pre(s, a);
6477 tmp = tcg_temp_new_i32();
6478 gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
6479 disas_set_da_iss(s, mop, issinfo);
6482 * Perform base writeback before the loaded value to
6483 * ensure correct behavior with overlapping index registers.
6485 op_addr_ri_post(s, a, addr, 0);
6486 store_reg_from_load(s, a->rt, tmp);
6487 return true;
6490 static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
6491 MemOp mop, int mem_idx)
6493 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
6494 TCGv_i32 addr, tmp;
6496 addr = op_addr_ri_pre(s, a);
6498 tmp = load_reg(s, a->rt);
6499 gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
6500 disas_set_da_iss(s, mop, issinfo);
6501 tcg_temp_free_i32(tmp);
6503 op_addr_ri_post(s, a, addr, 0);
6504 return true;
6507 static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
6509 int mem_idx = get_mem_index(s);
6510 TCGv_i32 addr, tmp;
6512 addr = op_addr_ri_pre(s, a);
6514 tmp = tcg_temp_new_i32();
6515 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
6516 store_reg(s, a->rt, tmp);
6518 tcg_gen_addi_i32(addr, addr, 4);
6520 tmp = tcg_temp_new_i32();
6521 gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
6522 store_reg(s, rt2, tmp);
6524 /* LDRD w/ base writeback is undefined if the registers overlap. */
6525 op_addr_ri_post(s, a, addr, -4);
6526 return true;
6529 static bool trans_LDRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
6531 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
6532 return false;
6534 return op_ldrd_ri(s, a, a->rt + 1);
6537 static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
6539 arg_ldst_ri b = {
6540 .u = a->u, .w = a->w, .p = a->p,
6541 .rn = a->rn, .rt = a->rt, .imm = a->imm
6543 return op_ldrd_ri(s, &b, a->rt2);
6546 static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
6548 int mem_idx = get_mem_index(s);
6549 TCGv_i32 addr, tmp;
6551 addr = op_addr_ri_pre(s, a);
6553 tmp = load_reg(s, a->rt);
6554 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
6555 tcg_temp_free_i32(tmp);
6557 tcg_gen_addi_i32(addr, addr, 4);
6559 tmp = load_reg(s, rt2);
6560 gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
6561 tcg_temp_free_i32(tmp);
6563 op_addr_ri_post(s, a, addr, -4);
6564 return true;
6567 static bool trans_STRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
6569 if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
6570 return false;
6572 return op_strd_ri(s, a, a->rt + 1);
6575 static bool trans_STRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
6577 arg_ldst_ri b = {
6578 .u = a->u, .w = a->w, .p = a->p,
6579 .rn = a->rn, .rt = a->rt, .imm = a->imm
6581 return op_strd_ri(s, &b, a->rt2);
6584 #define DO_LDST(NAME, WHICH, MEMOP) \
6585 static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \
6587 return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \
6589 static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \
6591 return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \
6593 static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \
6595 return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \
6597 static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \
6599 return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \
6602 DO_LDST(LDR, load, MO_UL)
6603 DO_LDST(LDRB, load, MO_UB)
6604 DO_LDST(LDRH, load, MO_UW)
6605 DO_LDST(LDRSB, load, MO_SB)
6606 DO_LDST(LDRSH, load, MO_SW)
6608 DO_LDST(STR, store, MO_UL)
6609 DO_LDST(STRB, store, MO_UB)
6610 DO_LDST(STRH, store, MO_UW)
6612 #undef DO_LDST
6615 * Synchronization primitives
6618 static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc)
6620 TCGv_i32 addr, tmp;
6621 TCGv taddr;
6623 opc |= s->be_data;
6624 addr = load_reg(s, a->rn);
6625 taddr = gen_aa32_addr(s, addr, opc);
6626 tcg_temp_free_i32(addr);
6628 tmp = load_reg(s, a->rt2);
6629 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp, get_mem_index(s), opc);
6630 tcg_temp_free(taddr);
6632 store_reg(s, a->rt, tmp);
6633 return true;
6636 static bool trans_SWP(DisasContext *s, arg_SWP *a)
6638 return op_swp(s, a, MO_UL | MO_ALIGN);
6641 static bool trans_SWPB(DisasContext *s, arg_SWP *a)
6643 return op_swp(s, a, MO_UB);
6647 * Load/Store Exclusive and Load-Acquire/Store-Release
6650 static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel)
6652 TCGv_i32 addr;
6653 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
6654 bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
6656 /* We UNDEF for these UNPREDICTABLE cases. */
6657 if (a->rd == 15 || a->rn == 15 || a->rt == 15
6658 || a->rd == a->rn || a->rd == a->rt
6659 || (!v8a && s->thumb && (a->rd == 13 || a->rt == 13))
6660 || (mop == MO_64
6661 && (a->rt2 == 15
6662 || a->rd == a->rt2
6663 || (!v8a && s->thumb && a->rt2 == 13)))) {
6664 unallocated_encoding(s);
6665 return true;
6668 if (rel) {
6669 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
6672 addr = tcg_temp_local_new_i32();
6673 load_reg_var(s, addr, a->rn);
6674 tcg_gen_addi_i32(addr, addr, a->imm);
6676 gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop);
6677 tcg_temp_free_i32(addr);
6678 return true;
6681 static bool trans_STREX(DisasContext *s, arg_STREX *a)
6683 if (!ENABLE_ARCH_6) {
6684 return false;
6686 return op_strex(s, a, MO_32, false);
6689 static bool trans_STREXD_a32(DisasContext *s, arg_STREX *a)
6691 if (!ENABLE_ARCH_6K) {
6692 return false;
6694 /* We UNDEF for these UNPREDICTABLE cases. */
6695 if (a->rt & 1) {
6696 unallocated_encoding(s);
6697 return true;
6699 a->rt2 = a->rt + 1;
6700 return op_strex(s, a, MO_64, false);
6703 static bool trans_STREXD_t32(DisasContext *s, arg_STREX *a)
6705 return op_strex(s, a, MO_64, false);
6708 static bool trans_STREXB(DisasContext *s, arg_STREX *a)
6710 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
6711 return false;
6713 return op_strex(s, a, MO_8, false);
6716 static bool trans_STREXH(DisasContext *s, arg_STREX *a)
6718 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
6719 return false;
6721 return op_strex(s, a, MO_16, false);
6724 static bool trans_STLEX(DisasContext *s, arg_STREX *a)
6726 if (!ENABLE_ARCH_8) {
6727 return false;
6729 return op_strex(s, a, MO_32, true);
6732 static bool trans_STLEXD_a32(DisasContext *s, arg_STREX *a)
6734 if (!ENABLE_ARCH_8) {
6735 return false;
6737 /* We UNDEF for these UNPREDICTABLE cases. */
6738 if (a->rt & 1) {
6739 unallocated_encoding(s);
6740 return true;
6742 a->rt2 = a->rt + 1;
6743 return op_strex(s, a, MO_64, true);
6746 static bool trans_STLEXD_t32(DisasContext *s, arg_STREX *a)
6748 if (!ENABLE_ARCH_8) {
6749 return false;
6751 return op_strex(s, a, MO_64, true);
6754 static bool trans_STLEXB(DisasContext *s, arg_STREX *a)
6756 if (!ENABLE_ARCH_8) {
6757 return false;
6759 return op_strex(s, a, MO_8, true);
6762 static bool trans_STLEXH(DisasContext *s, arg_STREX *a)
6764 if (!ENABLE_ARCH_8) {
6765 return false;
6767 return op_strex(s, a, MO_16, true);
6770 static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
6772 TCGv_i32 addr, tmp;
6774 if (!ENABLE_ARCH_8) {
6775 return false;
6777 /* We UNDEF for these UNPREDICTABLE cases. */
6778 if (a->rn == 15 || a->rt == 15) {
6779 unallocated_encoding(s);
6780 return true;
6783 addr = load_reg(s, a->rn);
6784 tmp = load_reg(s, a->rt);
6785 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
6786 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
6787 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
6789 tcg_temp_free_i32(tmp);
6790 tcg_temp_free_i32(addr);
6791 return true;
6794 static bool trans_STL(DisasContext *s, arg_STL *a)
6796 return op_stl(s, a, MO_UL);
6799 static bool trans_STLB(DisasContext *s, arg_STL *a)
6801 return op_stl(s, a, MO_UB);
6804 static bool trans_STLH(DisasContext *s, arg_STL *a)
6806 return op_stl(s, a, MO_UW);
6809 static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq)
6811 TCGv_i32 addr;
6812 /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
6813 bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
6815 /* We UNDEF for these UNPREDICTABLE cases. */
6816 if (a->rn == 15 || a->rt == 15
6817 || (!v8a && s->thumb && a->rt == 13)
6818 || (mop == MO_64
6819 && (a->rt2 == 15 || a->rt == a->rt2
6820 || (!v8a && s->thumb && a->rt2 == 13)))) {
6821 unallocated_encoding(s);
6822 return true;
6825 addr = tcg_temp_local_new_i32();
6826 load_reg_var(s, addr, a->rn);
6827 tcg_gen_addi_i32(addr, addr, a->imm);
6829 gen_load_exclusive(s, a->rt, a->rt2, addr, mop);
6830 tcg_temp_free_i32(addr);
6832 if (acq) {
6833 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
6835 return true;
6838 static bool trans_LDREX(DisasContext *s, arg_LDREX *a)
6840 if (!ENABLE_ARCH_6) {
6841 return false;
6843 return op_ldrex(s, a, MO_32, false);
6846 static bool trans_LDREXD_a32(DisasContext *s, arg_LDREX *a)
6848 if (!ENABLE_ARCH_6K) {
6849 return false;
6851 /* We UNDEF for these UNPREDICTABLE cases. */
6852 if (a->rt & 1) {
6853 unallocated_encoding(s);
6854 return true;
6856 a->rt2 = a->rt + 1;
6857 return op_ldrex(s, a, MO_64, false);
6860 static bool trans_LDREXD_t32(DisasContext *s, arg_LDREX *a)
6862 return op_ldrex(s, a, MO_64, false);
6865 static bool trans_LDREXB(DisasContext *s, arg_LDREX *a)
6867 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
6868 return false;
6870 return op_ldrex(s, a, MO_8, false);
6873 static bool trans_LDREXH(DisasContext *s, arg_LDREX *a)
6875 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
6876 return false;
6878 return op_ldrex(s, a, MO_16, false);
6881 static bool trans_LDAEX(DisasContext *s, arg_LDREX *a)
6883 if (!ENABLE_ARCH_8) {
6884 return false;
6886 return op_ldrex(s, a, MO_32, true);
6889 static bool trans_LDAEXD_a32(DisasContext *s, arg_LDREX *a)
6891 if (!ENABLE_ARCH_8) {
6892 return false;
6894 /* We UNDEF for these UNPREDICTABLE cases. */
6895 if (a->rt & 1) {
6896 unallocated_encoding(s);
6897 return true;
6899 a->rt2 = a->rt + 1;
6900 return op_ldrex(s, a, MO_64, true);
6903 static bool trans_LDAEXD_t32(DisasContext *s, arg_LDREX *a)
6905 if (!ENABLE_ARCH_8) {
6906 return false;
6908 return op_ldrex(s, a, MO_64, true);
6911 static bool trans_LDAEXB(DisasContext *s, arg_LDREX *a)
6913 if (!ENABLE_ARCH_8) {
6914 return false;
6916 return op_ldrex(s, a, MO_8, true);
6919 static bool trans_LDAEXH(DisasContext *s, arg_LDREX *a)
6921 if (!ENABLE_ARCH_8) {
6922 return false;
6924 return op_ldrex(s, a, MO_16, true);
6927 static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
6929 TCGv_i32 addr, tmp;
6931 if (!ENABLE_ARCH_8) {
6932 return false;
6934 /* We UNDEF for these UNPREDICTABLE cases. */
6935 if (a->rn == 15 || a->rt == 15) {
6936 unallocated_encoding(s);
6937 return true;
6940 addr = load_reg(s, a->rn);
6941 tmp = tcg_temp_new_i32();
6942 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
6943 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
6944 tcg_temp_free_i32(addr);
6946 store_reg(s, a->rt, tmp);
6947 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
6948 return true;
6951 static bool trans_LDA(DisasContext *s, arg_LDA *a)
6953 return op_lda(s, a, MO_UL);
6956 static bool trans_LDAB(DisasContext *s, arg_LDA *a)
6958 return op_lda(s, a, MO_UB);
6961 static bool trans_LDAH(DisasContext *s, arg_LDA *a)
6963 return op_lda(s, a, MO_UW);
6967 * Media instructions
6970 static bool trans_USADA8(DisasContext *s, arg_USADA8 *a)
6972 TCGv_i32 t1, t2;
6974 if (!ENABLE_ARCH_6) {
6975 return false;
6978 t1 = load_reg(s, a->rn);
6979 t2 = load_reg(s, a->rm);
6980 gen_helper_usad8(t1, t1, t2);
6981 tcg_temp_free_i32(t2);
6982 if (a->ra != 15) {
6983 t2 = load_reg(s, a->ra);
6984 tcg_gen_add_i32(t1, t1, t2);
6985 tcg_temp_free_i32(t2);
6987 store_reg(s, a->rd, t1);
6988 return true;
6991 static bool op_bfx(DisasContext *s, arg_UBFX *a, bool u)
6993 TCGv_i32 tmp;
6994 int width = a->widthm1 + 1;
6995 int shift = a->lsb;
6997 if (!ENABLE_ARCH_6T2) {
6998 return false;
7000 if (shift + width > 32) {
7001 /* UNPREDICTABLE; we choose to UNDEF */
7002 unallocated_encoding(s);
7003 return true;
7006 tmp = load_reg(s, a->rn);
7007 if (u) {
7008 tcg_gen_extract_i32(tmp, tmp, shift, width);
7009 } else {
7010 tcg_gen_sextract_i32(tmp, tmp, shift, width);
7012 store_reg(s, a->rd, tmp);
7013 return true;
7016 static bool trans_SBFX(DisasContext *s, arg_SBFX *a)
7018 return op_bfx(s, a, false);
7021 static bool trans_UBFX(DisasContext *s, arg_UBFX *a)
7023 return op_bfx(s, a, true);
7026 static bool trans_BFCI(DisasContext *s, arg_BFCI *a)
7028 TCGv_i32 tmp;
7029 int msb = a->msb, lsb = a->lsb;
7030 int width;
7032 if (!ENABLE_ARCH_6T2) {
7033 return false;
7035 if (msb < lsb) {
7036 /* UNPREDICTABLE; we choose to UNDEF */
7037 unallocated_encoding(s);
7038 return true;
7041 width = msb + 1 - lsb;
7042 if (a->rn == 15) {
7043 /* BFC */
7044 tmp = tcg_const_i32(0);
7045 } else {
7046 /* BFI */
7047 tmp = load_reg(s, a->rn);
7049 if (width != 32) {
7050 TCGv_i32 tmp2 = load_reg(s, a->rd);
7051 tcg_gen_deposit_i32(tmp, tmp2, tmp, lsb, width);
7052 tcg_temp_free_i32(tmp2);
7054 store_reg(s, a->rd, tmp);
7055 return true;
7058 static bool trans_UDF(DisasContext *s, arg_UDF *a)
7060 unallocated_encoding(s);
7061 return true;
7065 * Parallel addition and subtraction
7068 static bool op_par_addsub(DisasContext *s, arg_rrr *a,
7069 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
7071 TCGv_i32 t0, t1;
7073 if (s->thumb
7074 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
7075 : !ENABLE_ARCH_6) {
7076 return false;
7079 t0 = load_reg(s, a->rn);
7080 t1 = load_reg(s, a->rm);
7082 gen(t0, t0, t1);
7084 tcg_temp_free_i32(t1);
7085 store_reg(s, a->rd, t0);
7086 return true;
7089 static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a,
7090 void (*gen)(TCGv_i32, TCGv_i32,
7091 TCGv_i32, TCGv_ptr))
7093 TCGv_i32 t0, t1;
7094 TCGv_ptr ge;
7096 if (s->thumb
7097 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
7098 : !ENABLE_ARCH_6) {
7099 return false;
7102 t0 = load_reg(s, a->rn);
7103 t1 = load_reg(s, a->rm);
7105 ge = tcg_temp_new_ptr();
7106 tcg_gen_addi_ptr(ge, cpu_env, offsetof(CPUARMState, GE));
7107 gen(t0, t0, t1, ge);
7109 tcg_temp_free_ptr(ge);
7110 tcg_temp_free_i32(t1);
7111 store_reg(s, a->rd, t0);
7112 return true;
7115 #define DO_PAR_ADDSUB(NAME, helper) \
7116 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
7118 return op_par_addsub(s, a, helper); \
7121 #define DO_PAR_ADDSUB_GE(NAME, helper) \
7122 static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
7124 return op_par_addsub_ge(s, a, helper); \
7127 DO_PAR_ADDSUB_GE(SADD16, gen_helper_sadd16)
7128 DO_PAR_ADDSUB_GE(SASX, gen_helper_saddsubx)
7129 DO_PAR_ADDSUB_GE(SSAX, gen_helper_ssubaddx)
7130 DO_PAR_ADDSUB_GE(SSUB16, gen_helper_ssub16)
7131 DO_PAR_ADDSUB_GE(SADD8, gen_helper_sadd8)
7132 DO_PAR_ADDSUB_GE(SSUB8, gen_helper_ssub8)
7134 DO_PAR_ADDSUB_GE(UADD16, gen_helper_uadd16)
7135 DO_PAR_ADDSUB_GE(UASX, gen_helper_uaddsubx)
7136 DO_PAR_ADDSUB_GE(USAX, gen_helper_usubaddx)
7137 DO_PAR_ADDSUB_GE(USUB16, gen_helper_usub16)
7138 DO_PAR_ADDSUB_GE(UADD8, gen_helper_uadd8)
7139 DO_PAR_ADDSUB_GE(USUB8, gen_helper_usub8)
7141 DO_PAR_ADDSUB(QADD16, gen_helper_qadd16)
7142 DO_PAR_ADDSUB(QASX, gen_helper_qaddsubx)
7143 DO_PAR_ADDSUB(QSAX, gen_helper_qsubaddx)
7144 DO_PAR_ADDSUB(QSUB16, gen_helper_qsub16)
7145 DO_PAR_ADDSUB(QADD8, gen_helper_qadd8)
7146 DO_PAR_ADDSUB(QSUB8, gen_helper_qsub8)
7148 DO_PAR_ADDSUB(UQADD16, gen_helper_uqadd16)
7149 DO_PAR_ADDSUB(UQASX, gen_helper_uqaddsubx)
7150 DO_PAR_ADDSUB(UQSAX, gen_helper_uqsubaddx)
7151 DO_PAR_ADDSUB(UQSUB16, gen_helper_uqsub16)
7152 DO_PAR_ADDSUB(UQADD8, gen_helper_uqadd8)
7153 DO_PAR_ADDSUB(UQSUB8, gen_helper_uqsub8)
7155 DO_PAR_ADDSUB(SHADD16, gen_helper_shadd16)
7156 DO_PAR_ADDSUB(SHASX, gen_helper_shaddsubx)
7157 DO_PAR_ADDSUB(SHSAX, gen_helper_shsubaddx)
7158 DO_PAR_ADDSUB(SHSUB16, gen_helper_shsub16)
7159 DO_PAR_ADDSUB(SHADD8, gen_helper_shadd8)
7160 DO_PAR_ADDSUB(SHSUB8, gen_helper_shsub8)
7162 DO_PAR_ADDSUB(UHADD16, gen_helper_uhadd16)
7163 DO_PAR_ADDSUB(UHASX, gen_helper_uhaddsubx)
7164 DO_PAR_ADDSUB(UHSAX, gen_helper_uhsubaddx)
7165 DO_PAR_ADDSUB(UHSUB16, gen_helper_uhsub16)
7166 DO_PAR_ADDSUB(UHADD8, gen_helper_uhadd8)
7167 DO_PAR_ADDSUB(UHSUB8, gen_helper_uhsub8)
7169 #undef DO_PAR_ADDSUB
7170 #undef DO_PAR_ADDSUB_GE
7173 * Packing, unpacking, saturation, and reversal
7176 static bool trans_PKH(DisasContext *s, arg_PKH *a)
7178 TCGv_i32 tn, tm;
7179 int shift = a->imm;
7181 if (s->thumb
7182 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
7183 : !ENABLE_ARCH_6) {
7184 return false;
7187 tn = load_reg(s, a->rn);
7188 tm = load_reg(s, a->rm);
7189 if (a->tb) {
7190 /* PKHTB */
7191 if (shift == 0) {
7192 shift = 31;
7194 tcg_gen_sari_i32(tm, tm, shift);
7195 tcg_gen_deposit_i32(tn, tn, tm, 0, 16);
7196 } else {
7197 /* PKHBT */
7198 tcg_gen_shli_i32(tm, tm, shift);
7199 tcg_gen_deposit_i32(tn, tm, tn, 0, 16);
7201 tcg_temp_free_i32(tm);
7202 store_reg(s, a->rd, tn);
7203 return true;
7206 static bool op_sat(DisasContext *s, arg_sat *a,
7207 void (*gen)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
7209 TCGv_i32 tmp, satimm;
7210 int shift = a->imm;
7212 if (!ENABLE_ARCH_6) {
7213 return false;
7216 tmp = load_reg(s, a->rn);
7217 if (a->sh) {
7218 tcg_gen_sari_i32(tmp, tmp, shift ? shift : 31);
7219 } else {
7220 tcg_gen_shli_i32(tmp, tmp, shift);
7223 satimm = tcg_const_i32(a->satimm);
7224 gen(tmp, cpu_env, tmp, satimm);
7225 tcg_temp_free_i32(satimm);
7227 store_reg(s, a->rd, tmp);
7228 return true;
7231 static bool trans_SSAT(DisasContext *s, arg_sat *a)
7233 return op_sat(s, a, gen_helper_ssat);
7236 static bool trans_USAT(DisasContext *s, arg_sat *a)
7238 return op_sat(s, a, gen_helper_usat);
7241 static bool trans_SSAT16(DisasContext *s, arg_sat *a)
7243 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
7244 return false;
7246 return op_sat(s, a, gen_helper_ssat16);
7249 static bool trans_USAT16(DisasContext *s, arg_sat *a)
7251 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
7252 return false;
7254 return op_sat(s, a, gen_helper_usat16);
7257 static bool op_xta(DisasContext *s, arg_rrr_rot *a,
7258 void (*gen_extract)(TCGv_i32, TCGv_i32),
7259 void (*gen_add)(TCGv_i32, TCGv_i32, TCGv_i32))
7261 TCGv_i32 tmp;
7263 if (!ENABLE_ARCH_6) {
7264 return false;
7267 tmp = load_reg(s, a->rm);
7269 * TODO: In many cases we could do a shift instead of a rotate.
7270 * Combined with a simple extend, that becomes an extract.
7272 tcg_gen_rotri_i32(tmp, tmp, a->rot * 8);
7273 gen_extract(tmp, tmp);
7275 if (a->rn != 15) {
7276 TCGv_i32 tmp2 = load_reg(s, a->rn);
7277 gen_add(tmp, tmp, tmp2);
7278 tcg_temp_free_i32(tmp2);
7280 store_reg(s, a->rd, tmp);
7281 return true;
7284 static bool trans_SXTAB(DisasContext *s, arg_rrr_rot *a)
7286 return op_xta(s, a, tcg_gen_ext8s_i32, tcg_gen_add_i32);
7289 static bool trans_SXTAH(DisasContext *s, arg_rrr_rot *a)
7291 return op_xta(s, a, tcg_gen_ext16s_i32, tcg_gen_add_i32);
7294 static bool trans_SXTAB16(DisasContext *s, arg_rrr_rot *a)
7296 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
7297 return false;
7299 return op_xta(s, a, gen_helper_sxtb16, gen_add16);
7302 static bool trans_UXTAB(DisasContext *s, arg_rrr_rot *a)
7304 return op_xta(s, a, tcg_gen_ext8u_i32, tcg_gen_add_i32);
7307 static bool trans_UXTAH(DisasContext *s, arg_rrr_rot *a)
7309 return op_xta(s, a, tcg_gen_ext16u_i32, tcg_gen_add_i32);
7312 static bool trans_UXTAB16(DisasContext *s, arg_rrr_rot *a)
7314 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
7315 return false;
7317 return op_xta(s, a, gen_helper_uxtb16, gen_add16);
7320 static bool trans_SEL(DisasContext *s, arg_rrr *a)
7322 TCGv_i32 t1, t2, t3;
7324 if (s->thumb
7325 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
7326 : !ENABLE_ARCH_6) {
7327 return false;
7330 t1 = load_reg(s, a->rn);
7331 t2 = load_reg(s, a->rm);
7332 t3 = tcg_temp_new_i32();
7333 tcg_gen_ld_i32(t3, cpu_env, offsetof(CPUARMState, GE));
7334 gen_helper_sel_flags(t1, t3, t1, t2);
7335 tcg_temp_free_i32(t3);
7336 tcg_temp_free_i32(t2);
7337 store_reg(s, a->rd, t1);
7338 return true;
7341 static bool op_rr(DisasContext *s, arg_rr *a,
7342 void (*gen)(TCGv_i32, TCGv_i32))
7344 TCGv_i32 tmp;
7346 tmp = load_reg(s, a->rm);
7347 gen(tmp, tmp);
7348 store_reg(s, a->rd, tmp);
7349 return true;
7352 static bool trans_REV(DisasContext *s, arg_rr *a)
7354 if (!ENABLE_ARCH_6) {
7355 return false;
7357 return op_rr(s, a, tcg_gen_bswap32_i32);
7360 static bool trans_REV16(DisasContext *s, arg_rr *a)
7362 if (!ENABLE_ARCH_6) {
7363 return false;
7365 return op_rr(s, a, gen_rev16);
7368 static bool trans_REVSH(DisasContext *s, arg_rr *a)
7370 if (!ENABLE_ARCH_6) {
7371 return false;
7373 return op_rr(s, a, gen_revsh);
7376 static bool trans_RBIT(DisasContext *s, arg_rr *a)
7378 if (!ENABLE_ARCH_6T2) {
7379 return false;
7381 return op_rr(s, a, gen_helper_rbit);
7385 * Signed multiply, signed and unsigned divide
7388 static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
7390 TCGv_i32 t1, t2;
7392 if (!ENABLE_ARCH_6) {
7393 return false;
7396 t1 = load_reg(s, a->rn);
7397 t2 = load_reg(s, a->rm);
7398 if (m_swap) {
7399 gen_swap_half(t2, t2);
7401 gen_smul_dual(t1, t2);
7403 if (sub) {
7404 /* This subtraction cannot overflow. */
7405 tcg_gen_sub_i32(t1, t1, t2);
7406 } else {
7408 * This addition cannot overflow 32 bits; however it may
7409 * overflow considered as a signed operation, in which case
7410 * we must set the Q flag.
7412 gen_helper_add_setq(t1, cpu_env, t1, t2);
7414 tcg_temp_free_i32(t2);
7416 if (a->ra != 15) {
7417 t2 = load_reg(s, a->ra);
7418 gen_helper_add_setq(t1, cpu_env, t1, t2);
7419 tcg_temp_free_i32(t2);
7421 store_reg(s, a->rd, t1);
7422 return true;
7425 static bool trans_SMLAD(DisasContext *s, arg_rrrr *a)
7427 return op_smlad(s, a, false, false);
7430 static bool trans_SMLADX(DisasContext *s, arg_rrrr *a)
7432 return op_smlad(s, a, true, false);
7435 static bool trans_SMLSD(DisasContext *s, arg_rrrr *a)
7437 return op_smlad(s, a, false, true);
7440 static bool trans_SMLSDX(DisasContext *s, arg_rrrr *a)
7442 return op_smlad(s, a, true, true);
7445 static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
7447 TCGv_i32 t1, t2;
7448 TCGv_i64 l1, l2;
7450 if (!ENABLE_ARCH_6) {
7451 return false;
7454 t1 = load_reg(s, a->rn);
7455 t2 = load_reg(s, a->rm);
7456 if (m_swap) {
7457 gen_swap_half(t2, t2);
7459 gen_smul_dual(t1, t2);
7461 l1 = tcg_temp_new_i64();
7462 l2 = tcg_temp_new_i64();
7463 tcg_gen_ext_i32_i64(l1, t1);
7464 tcg_gen_ext_i32_i64(l2, t2);
7465 tcg_temp_free_i32(t1);
7466 tcg_temp_free_i32(t2);
7468 if (sub) {
7469 tcg_gen_sub_i64(l1, l1, l2);
7470 } else {
7471 tcg_gen_add_i64(l1, l1, l2);
7473 tcg_temp_free_i64(l2);
7475 gen_addq(s, l1, a->ra, a->rd);
7476 gen_storeq_reg(s, a->ra, a->rd, l1);
7477 tcg_temp_free_i64(l1);
7478 return true;
7481 static bool trans_SMLALD(DisasContext *s, arg_rrrr *a)
7483 return op_smlald(s, a, false, false);
7486 static bool trans_SMLALDX(DisasContext *s, arg_rrrr *a)
7488 return op_smlald(s, a, true, false);
7491 static bool trans_SMLSLD(DisasContext *s, arg_rrrr *a)
7493 return op_smlald(s, a, false, true);
7496 static bool trans_SMLSLDX(DisasContext *s, arg_rrrr *a)
7498 return op_smlald(s, a, true, true);
7501 static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub)
7503 TCGv_i32 t1, t2;
7505 if (s->thumb
7506 ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
7507 : !ENABLE_ARCH_6) {
7508 return false;
7511 t1 = load_reg(s, a->rn);
7512 t2 = load_reg(s, a->rm);
7513 tcg_gen_muls2_i32(t2, t1, t1, t2);
7515 if (a->ra != 15) {
7516 TCGv_i32 t3 = load_reg(s, a->ra);
7517 if (sub) {
7519 * For SMMLS, we need a 64-bit subtract. Borrow caused by
7520 * a non-zero multiplicand lowpart, and the correct result
7521 * lowpart for rounding.
7523 TCGv_i32 zero = tcg_const_i32(0);
7524 tcg_gen_sub2_i32(t2, t1, zero, t3, t2, t1);
7525 tcg_temp_free_i32(zero);
7526 } else {
7527 tcg_gen_add_i32(t1, t1, t3);
7529 tcg_temp_free_i32(t3);
7531 if (round) {
7533 * Adding 0x80000000 to the 64-bit quantity means that we have
7534 * carry in to the high word when the low word has the msb set.
7536 tcg_gen_shri_i32(t2, t2, 31);
7537 tcg_gen_add_i32(t1, t1, t2);
7539 tcg_temp_free_i32(t2);
7540 store_reg(s, a->rd, t1);
7541 return true;
7544 static bool trans_SMMLA(DisasContext *s, arg_rrrr *a)
7546 return op_smmla(s, a, false, false);
7549 static bool trans_SMMLAR(DisasContext *s, arg_rrrr *a)
7551 return op_smmla(s, a, true, false);
7554 static bool trans_SMMLS(DisasContext *s, arg_rrrr *a)
7556 return op_smmla(s, a, false, true);
7559 static bool trans_SMMLSR(DisasContext *s, arg_rrrr *a)
7561 return op_smmla(s, a, true, true);
7564 static bool op_div(DisasContext *s, arg_rrr *a, bool u)
7566 TCGv_i32 t1, t2;
7568 if (s->thumb
7569 ? !dc_isar_feature(aa32_thumb_div, s)
7570 : !dc_isar_feature(aa32_arm_div, s)) {
7571 return false;
7574 t1 = load_reg(s, a->rn);
7575 t2 = load_reg(s, a->rm);
7576 if (u) {
7577 gen_helper_udiv(t1, t1, t2);
7578 } else {
7579 gen_helper_sdiv(t1, t1, t2);
7581 tcg_temp_free_i32(t2);
7582 store_reg(s, a->rd, t1);
7583 return true;
7586 static bool trans_SDIV(DisasContext *s, arg_rrr *a)
7588 return op_div(s, a, false);
7591 static bool trans_UDIV(DisasContext *s, arg_rrr *a)
7593 return op_div(s, a, true);
7597 * Block data transfer
7600 static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n)
7602 TCGv_i32 addr = load_reg(s, a->rn);
7604 if (a->b) {
7605 if (a->i) {
7606 /* pre increment */
7607 tcg_gen_addi_i32(addr, addr, 4);
7608 } else {
7609 /* pre decrement */
7610 tcg_gen_addi_i32(addr, addr, -(n * 4));
7612 } else if (!a->i && n != 1) {
7613 /* post decrement */
7614 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7617 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
7619 * If the writeback is incrementing SP rather than
7620 * decrementing it, and the initial SP is below the
7621 * stack limit but the final written-back SP would
7622 * be above, then then we must not perform any memory
7623 * accesses, but it is IMPDEF whether we generate
7624 * an exception. We choose to do so in this case.
7625 * At this point 'addr' is the lowest address, so
7626 * either the original SP (if incrementing) or our
7627 * final SP (if decrementing), so that's what we check.
7629 gen_helper_v8m_stackcheck(cpu_env, addr);
7632 return addr;
7635 static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
7636 TCGv_i32 addr, int n)
7638 if (a->w) {
7639 /* write back */
7640 if (!a->b) {
7641 if (a->i) {
7642 /* post increment */
7643 tcg_gen_addi_i32(addr, addr, 4);
7644 } else {
7645 /* post decrement */
7646 tcg_gen_addi_i32(addr, addr, -(n * 4));
7648 } else if (!a->i && n != 1) {
7649 /* pre decrement */
7650 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7652 store_reg(s, a->rn, addr);
7653 } else {
7654 tcg_temp_free_i32(addr);
7658 static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
7660 int i, j, n, list, mem_idx;
7661 bool user = a->u;
7662 TCGv_i32 addr, tmp, tmp2;
7664 if (user) {
7665 /* STM (user) */
7666 if (IS_USER(s)) {
7667 /* Only usable in supervisor mode. */
7668 unallocated_encoding(s);
7669 return true;
7673 list = a->list;
7674 n = ctpop16(list);
7675 if (n < min_n || a->rn == 15) {
7676 unallocated_encoding(s);
7677 return true;
7680 addr = op_addr_block_pre(s, a, n);
7681 mem_idx = get_mem_index(s);
7683 for (i = j = 0; i < 16; i++) {
7684 if (!(list & (1 << i))) {
7685 continue;
7688 if (user && i != 15) {
7689 tmp = tcg_temp_new_i32();
7690 tmp2 = tcg_const_i32(i);
7691 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
7692 tcg_temp_free_i32(tmp2);
7693 } else {
7694 tmp = load_reg(s, i);
7696 gen_aa32_st32(s, tmp, addr, mem_idx);
7697 tcg_temp_free_i32(tmp);
7699 /* No need to add after the last transfer. */
7700 if (++j != n) {
7701 tcg_gen_addi_i32(addr, addr, 4);
7705 op_addr_block_post(s, a, addr, n);
7706 return true;
7709 static bool trans_STM(DisasContext *s, arg_ldst_block *a)
7711 /* BitCount(list) < 1 is UNPREDICTABLE */
7712 return op_stm(s, a, 1);
7715 static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
7717 /* Writeback register in register list is UNPREDICTABLE for T32. */
7718 if (a->w && (a->list & (1 << a->rn))) {
7719 unallocated_encoding(s);
7720 return true;
7722 /* BitCount(list) < 2 is UNPREDICTABLE */
7723 return op_stm(s, a, 2);
7726 static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
7728 int i, j, n, list, mem_idx;
7729 bool loaded_base;
7730 bool user = a->u;
7731 bool exc_return = false;
7732 TCGv_i32 addr, tmp, tmp2, loaded_var;
7734 if (user) {
7735 /* LDM (user), LDM (exception return) */
7736 if (IS_USER(s)) {
7737 /* Only usable in supervisor mode. */
7738 unallocated_encoding(s);
7739 return true;
7741 if (extract32(a->list, 15, 1)) {
7742 exc_return = true;
7743 user = false;
7744 } else {
7745 /* LDM (user) does not allow writeback. */
7746 if (a->w) {
7747 unallocated_encoding(s);
7748 return true;
7753 list = a->list;
7754 n = ctpop16(list);
7755 if (n < min_n || a->rn == 15) {
7756 unallocated_encoding(s);
7757 return true;
7760 addr = op_addr_block_pre(s, a, n);
7761 mem_idx = get_mem_index(s);
7762 loaded_base = false;
7763 loaded_var = NULL;
7765 for (i = j = 0; i < 16; i++) {
7766 if (!(list & (1 << i))) {
7767 continue;
7770 tmp = tcg_temp_new_i32();
7771 gen_aa32_ld32u(s, tmp, addr, mem_idx);
7772 if (user) {
7773 tmp2 = tcg_const_i32(i);
7774 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
7775 tcg_temp_free_i32(tmp2);
7776 tcg_temp_free_i32(tmp);
7777 } else if (i == a->rn) {
7778 loaded_var = tmp;
7779 loaded_base = true;
7780 } else if (i == 15 && exc_return) {
7781 store_pc_exc_ret(s, tmp);
7782 } else {
7783 store_reg_from_load(s, i, tmp);
7786 /* No need to add after the last transfer. */
7787 if (++j != n) {
7788 tcg_gen_addi_i32(addr, addr, 4);
7792 op_addr_block_post(s, a, addr, n);
7794 if (loaded_base) {
7795 /* Note that we reject base == pc above. */
7796 store_reg(s, a->rn, loaded_var);
7799 if (exc_return) {
7800 /* Restore CPSR from SPSR. */
7801 tmp = load_cpu_field(spsr);
7802 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
7803 gen_io_start();
7805 gen_helper_cpsr_write_eret(cpu_env, tmp);
7806 tcg_temp_free_i32(tmp);
7807 /* Must exit loop to check un-masked IRQs */
7808 s->base.is_jmp = DISAS_EXIT;
7810 return true;
7813 static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a)
7816 * Writeback register in register list is UNPREDICTABLE
7817 * for ArchVersion() >= 7. Prior to v7, A32 would write
7818 * an UNKNOWN value to the base register.
7820 if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) {
7821 unallocated_encoding(s);
7822 return true;
7824 /* BitCount(list) < 1 is UNPREDICTABLE */
7825 return do_ldm(s, a, 1);
7828 static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
7830 /* Writeback register in register list is UNPREDICTABLE for T32. */
7831 if (a->w && (a->list & (1 << a->rn))) {
7832 unallocated_encoding(s);
7833 return true;
7835 /* BitCount(list) < 2 is UNPREDICTABLE */
7836 return do_ldm(s, a, 2);
7839 static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
7841 /* Writeback is conditional on the base register not being loaded. */
7842 a->w = !(a->list & (1 << a->rn));
7843 /* BitCount(list) < 1 is UNPREDICTABLE */
7844 return do_ldm(s, a, 1);
7848 * Branch, branch with link
7851 static bool trans_B(DisasContext *s, arg_i *a)
7853 gen_jmp(s, read_pc(s) + a->imm);
7854 return true;
7857 static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a)
7859 /* This has cond from encoding, required to be outside IT block. */
7860 if (a->cond >= 0xe) {
7861 return false;
7863 if (s->condexec_mask) {
7864 unallocated_encoding(s);
7865 return true;
7867 arm_skip_unless(s, a->cond);
7868 gen_jmp(s, read_pc(s) + a->imm);
7869 return true;
7872 static bool trans_BL(DisasContext *s, arg_i *a)
7874 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
7875 gen_jmp(s, read_pc(s) + a->imm);
7876 return true;
7879 static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
7881 TCGv_i32 tmp;
7883 /* For A32, ARM_FEATURE_V5 is checked near the start of the uncond block. */
7884 if (s->thumb && (a->imm & 2)) {
7885 return false;
7887 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
7888 tmp = tcg_const_i32(!s->thumb);
7889 store_cpu_field(tmp, thumb);
7890 gen_jmp(s, (read_pc(s) & ~3) + a->imm);
7891 return true;
7894 static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a)
7896 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
7897 tcg_gen_movi_i32(cpu_R[14], read_pc(s) + (a->imm << 12));
7898 return true;
7901 static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a)
7903 TCGv_i32 tmp = tcg_temp_new_i32();
7905 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
7906 tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1);
7907 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
7908 gen_bx(s, tmp);
7909 return true;
7912 static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a)
7914 TCGv_i32 tmp;
7916 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
7917 if (!ENABLE_ARCH_5) {
7918 return false;
7920 tmp = tcg_temp_new_i32();
7921 tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1);
7922 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7923 tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
7924 gen_bx(s, tmp);
7925 return true;
7928 static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
7930 TCGv_i32 addr, tmp;
7932 tmp = load_reg(s, a->rm);
7933 if (half) {
7934 tcg_gen_add_i32(tmp, tmp, tmp);
7936 addr = load_reg(s, a->rn);
7937 tcg_gen_add_i32(addr, addr, tmp);
7939 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
7940 half ? MO_UW | s->be_data : MO_UB);
7941 tcg_temp_free_i32(addr);
7943 tcg_gen_add_i32(tmp, tmp, tmp);
7944 tcg_gen_addi_i32(tmp, tmp, read_pc(s));
7945 store_reg(s, 15, tmp);
7946 return true;
7949 static bool trans_TBB(DisasContext *s, arg_tbranch *a)
7951 return op_tbranch(s, a, false);
7954 static bool trans_TBH(DisasContext *s, arg_tbranch *a)
7956 return op_tbranch(s, a, true);
7959 static bool trans_CBZ(DisasContext *s, arg_CBZ *a)
7961 TCGv_i32 tmp = load_reg(s, a->rn);
7963 arm_gen_condlabel(s);
7964 tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE,
7965 tmp, 0, s->condlabel);
7966 tcg_temp_free_i32(tmp);
7967 gen_jmp(s, read_pc(s) + a->imm);
7968 return true;
7972 * Supervisor call - both T32 & A32 come here so we need to check
7973 * which mode we are in when checking for semihosting.
7976 static bool trans_SVC(DisasContext *s, arg_SVC *a)
7978 const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456;
7980 if (!arm_dc_feature(s, ARM_FEATURE_M) && semihosting_enabled() &&
7981 #ifndef CONFIG_USER_ONLY
7982 !IS_USER(s) &&
7983 #endif
7984 (a->imm == semihost_imm)) {
7985 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
7986 } else {
7987 gen_set_pc_im(s, s->base.pc_next);
7988 s->svc_imm = a->imm;
7989 s->base.is_jmp = DISAS_SWI;
7991 return true;
7995 * Unconditional system instructions
7998 static bool trans_RFE(DisasContext *s, arg_RFE *a)
8000 static const int8_t pre_offset[4] = {
8001 /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4
8003 static const int8_t post_offset[4] = {
8004 /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0
8006 TCGv_i32 addr, t1, t2;
8008 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
8009 return false;
8011 if (IS_USER(s)) {
8012 unallocated_encoding(s);
8013 return true;
8016 addr = load_reg(s, a->rn);
8017 tcg_gen_addi_i32(addr, addr, pre_offset[a->pu]);
8019 /* Load PC into tmp and CPSR into tmp2. */
8020 t1 = tcg_temp_new_i32();
8021 gen_aa32_ld32u(s, t1, addr, get_mem_index(s));
8022 tcg_gen_addi_i32(addr, addr, 4);
8023 t2 = tcg_temp_new_i32();
8024 gen_aa32_ld32u(s, t2, addr, get_mem_index(s));
8026 if (a->w) {
8027 /* Base writeback. */
8028 tcg_gen_addi_i32(addr, addr, post_offset[a->pu]);
8029 store_reg(s, a->rn, addr);
8030 } else {
8031 tcg_temp_free_i32(addr);
8033 gen_rfe(s, t1, t2);
8034 return true;
8037 static bool trans_SRS(DisasContext *s, arg_SRS *a)
8039 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
8040 return false;
8042 gen_srs(s, a->mode, a->pu, a->w);
8043 return true;
8046 static bool trans_CPS(DisasContext *s, arg_CPS *a)
8048 uint32_t mask, val;
8050 if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
8051 return false;
8053 if (IS_USER(s)) {
8054 /* Implemented as NOP in user mode. */
8055 return true;
8057 /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */
8059 mask = val = 0;
8060 if (a->imod & 2) {
8061 if (a->A) {
8062 mask |= CPSR_A;
8064 if (a->I) {
8065 mask |= CPSR_I;
8067 if (a->F) {
8068 mask |= CPSR_F;
8070 if (a->imod & 1) {
8071 val |= mask;
8074 if (a->M) {
8075 mask |= CPSR_M;
8076 val |= a->mode;
8078 if (mask) {
8079 gen_set_psr_im(s, mask, 0, val);
8081 return true;
8084 static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
8086 TCGv_i32 tmp, addr, el;
8088 if (!arm_dc_feature(s, ARM_FEATURE_M)) {
8089 return false;
8091 if (IS_USER(s)) {
8092 /* Implemented as NOP in user mode. */
8093 return true;
8096 tmp = tcg_const_i32(a->im);
8097 /* FAULTMASK */
8098 if (a->F) {
8099 addr = tcg_const_i32(19);
8100 gen_helper_v7m_msr(cpu_env, addr, tmp);
8101 tcg_temp_free_i32(addr);
8103 /* PRIMASK */
8104 if (a->I) {
8105 addr = tcg_const_i32(16);
8106 gen_helper_v7m_msr(cpu_env, addr, tmp);
8107 tcg_temp_free_i32(addr);
8109 el = tcg_const_i32(s->current_el);
8110 gen_helper_rebuild_hflags_m32(cpu_env, el);
8111 tcg_temp_free_i32(el);
8112 tcg_temp_free_i32(tmp);
8113 gen_lookup_tb(s);
8114 return true;
8118 * Clear-Exclusive, Barriers
8121 static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
8123 if (s->thumb
8124 ? !ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)
8125 : !ENABLE_ARCH_6K) {
8126 return false;
8128 gen_clrex(s);
8129 return true;
8132 static bool trans_DSB(DisasContext *s, arg_DSB *a)
8134 if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
8135 return false;
8137 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8138 return true;
8141 static bool trans_DMB(DisasContext *s, arg_DMB *a)
8143 return trans_DSB(s, NULL);
8146 static bool trans_ISB(DisasContext *s, arg_ISB *a)
8148 if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
8149 return false;
8152 * We need to break the TB after this insn to execute
8153 * self-modifying code correctly and also to take
8154 * any pending interrupts immediately.
8156 gen_goto_tb(s, 0, s->base.pc_next);
8157 return true;
8160 static bool trans_SB(DisasContext *s, arg_SB *a)
8162 if (!dc_isar_feature(aa32_sb, s)) {
8163 return false;
8166 * TODO: There is no speculation barrier opcode
8167 * for TCG; MB and end the TB instead.
8169 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8170 gen_goto_tb(s, 0, s->base.pc_next);
8171 return true;
8174 static bool trans_SETEND(DisasContext *s, arg_SETEND *a)
8176 if (!ENABLE_ARCH_6) {
8177 return false;
8179 if (a->E != (s->be_data == MO_BE)) {
8180 gen_helper_setend(cpu_env);
8181 s->base.is_jmp = DISAS_UPDATE_EXIT;
8183 return true;
8187 * Preload instructions
8188 * All are nops, contingent on the appropriate arch level.
8191 static bool trans_PLD(DisasContext *s, arg_PLD *a)
8193 return ENABLE_ARCH_5TE;
8196 static bool trans_PLDW(DisasContext *s, arg_PLD *a)
8198 return arm_dc_feature(s, ARM_FEATURE_V7MP);
8201 static bool trans_PLI(DisasContext *s, arg_PLD *a)
8203 return ENABLE_ARCH_7;
8207 * If-then
8210 static bool trans_IT(DisasContext *s, arg_IT *a)
8212 int cond_mask = a->cond_mask;
8215 * No actual code generated for this insn, just setup state.
8217 * Combinations of firstcond and mask which set up an 0b1111
8218 * condition are UNPREDICTABLE; we take the CONSTRAINED
8219 * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
8220 * i.e. both meaning "execute always".
8222 s->condexec_cond = (cond_mask >> 4) & 0xe;
8223 s->condexec_mask = cond_mask & 0x1f;
8224 return true;
8228 * Legacy decoder.
8231 static void disas_arm_insn(DisasContext *s, unsigned int insn)
8233 unsigned int cond = insn >> 28;
8235 /* M variants do not implement ARM mode; this must raise the INVSTATE
8236 * UsageFault exception.
8238 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8239 gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
8240 default_exception_el(s));
8241 return;
8244 if (cond == 0xf) {
8245 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8246 * choose to UNDEF. In ARMv5 and above the space is used
8247 * for miscellaneous unconditional instructions.
8249 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
8250 unallocated_encoding(s);
8251 return;
8254 /* Unconditional instructions. */
8255 /* TODO: Perhaps merge these into one decodetree output file. */
8256 if (disas_a32_uncond(s, insn) ||
8257 disas_vfp_uncond(s, insn) ||
8258 disas_neon_dp(s, insn) ||
8259 disas_neon_ls(s, insn) ||
8260 disas_neon_shared(s, insn)) {
8261 return;
8263 /* fall back to legacy decoder */
8265 if ((insn & 0x0e000f00) == 0x0c000100) {
8266 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
8267 /* iWMMXt register transfer. */
8268 if (extract32(s->c15_cpar, 1, 1)) {
8269 if (!disas_iwmmxt_insn(s, insn)) {
8270 return;
8275 goto illegal_op;
8277 if (cond != 0xe) {
8278 /* if not always execute, we generate a conditional jump to
8279 next instruction */
8280 arm_skip_unless(s, cond);
8283 /* TODO: Perhaps merge these into one decodetree output file. */
8284 if (disas_a32(s, insn) ||
8285 disas_vfp(s, insn)) {
8286 return;
8288 /* fall back to legacy decoder */
8289 /* TODO: convert xscale/iwmmxt decoder to decodetree ?? */
8290 if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
8291 if (((insn & 0x0c000e00) == 0x0c000000)
8292 && ((insn & 0x03000000) != 0x03000000)) {
8293 /* Coprocessor insn, coprocessor 0 or 1 */
8294 disas_xscale_insn(s, insn);
8295 return;
8299 illegal_op:
8300 unallocated_encoding(s);
8303 static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
8306 * Return true if this is a 16 bit instruction. We must be precise
8307 * about this (matching the decode).
8309 if ((insn >> 11) < 0x1d) {
8310 /* Definitely a 16-bit instruction */
8311 return true;
8314 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
8315 * first half of a 32-bit Thumb insn. Thumb-1 cores might
8316 * end up actually treating this as two 16-bit insns, though,
8317 * if it's half of a bl/blx pair that might span a page boundary.
8319 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
8320 arm_dc_feature(s, ARM_FEATURE_M)) {
8321 /* Thumb2 cores (including all M profile ones) always treat
8322 * 32-bit insns as 32-bit.
8324 return false;
8327 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
8328 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
8329 * is not on the next page; we merge this into a 32-bit
8330 * insn.
8332 return false;
8334 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
8335 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
8336 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
8337 * -- handle as single 16 bit insn
8339 return true;
8342 /* Translate a 32-bit thumb instruction. */
8343 static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
8346 * ARMv6-M supports a limited subset of Thumb2 instructions.
8347 * Other Thumb1 architectures allow only 32-bit
8348 * combined BL/BLX prefix and suffix.
8350 if (arm_dc_feature(s, ARM_FEATURE_M) &&
8351 !arm_dc_feature(s, ARM_FEATURE_V7)) {
8352 int i;
8353 bool found = false;
8354 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
8355 0xf3b08040 /* dsb */,
8356 0xf3b08050 /* dmb */,
8357 0xf3b08060 /* isb */,
8358 0xf3e08000 /* mrs */,
8359 0xf000d000 /* bl */};
8360 static const uint32_t armv6m_mask[] = {0xffe0d000,
8361 0xfff0d0f0,
8362 0xfff0d0f0,
8363 0xfff0d0f0,
8364 0xffe0d000,
8365 0xf800d000};
8367 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
8368 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
8369 found = true;
8370 break;
8373 if (!found) {
8374 goto illegal_op;
8376 } else if ((insn & 0xf800e800) != 0xf000e800) {
8377 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
8378 unallocated_encoding(s);
8379 return;
8383 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8385 * NOCP takes precedence over any UNDEF for (almost) the
8386 * entire wide range of coprocessor-space encodings, so check
8387 * for it first before proceeding to actually decode eg VFP
8388 * insns. This decode also handles the few insns which are
8389 * in copro space but do not have NOCP checks (eg VLLDM, VLSTM).
8391 if (disas_m_nocp(s, insn)) {
8392 return;
8396 if ((insn & 0xef000000) == 0xef000000) {
8398 * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
8399 * transform into
8400 * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
8402 uint32_t a32_insn = (insn & 0xe2ffffff) |
8403 ((insn & (1 << 28)) >> 4) | (1 << 28);
8405 if (disas_neon_dp(s, a32_insn)) {
8406 return;
8410 if ((insn & 0xff100000) == 0xf9000000) {
8412 * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
8413 * transform into
8414 * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
8416 uint32_t a32_insn = (insn & 0x00ffffff) | 0xf4000000;
8418 if (disas_neon_ls(s, a32_insn)) {
8419 return;
8424 * TODO: Perhaps merge these into one decodetree output file.
8425 * Note disas_vfp is written for a32 with cond field in the
8426 * top nibble. The t32 encoding requires 0xe in the top nibble.
8428 if (disas_t32(s, insn) ||
8429 disas_vfp_uncond(s, insn) ||
8430 disas_neon_shared(s, insn) ||
8431 ((insn >> 28) == 0xe && disas_vfp(s, insn))) {
8432 return;
8435 illegal_op:
8436 unallocated_encoding(s);
8439 static void disas_thumb_insn(DisasContext *s, uint32_t insn)
8441 if (!disas_t16(s, insn)) {
8442 unallocated_encoding(s);
8446 static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
8448 /* Return true if the insn at dc->base.pc_next might cross a page boundary.
8449 * (False positives are OK, false negatives are not.)
8450 * We know this is a Thumb insn, and our caller ensures we are
8451 * only called if dc->base.pc_next is less than 4 bytes from the page
8452 * boundary, so we cross the page if the first 16 bits indicate
8453 * that this is a 32 bit insn.
8455 uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
8457 return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
8460 static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
8462 DisasContext *dc = container_of(dcbase, DisasContext, base);
8463 CPUARMState *env = cs->env_ptr;
8464 ARMCPU *cpu = env_archcpu(env);
8465 uint32_t tb_flags = dc->base.tb->flags;
8466 uint32_t condexec, core_mmu_idx;
8468 dc->isar = &cpu->isar;
8469 dc->condjmp = 0;
8471 dc->aarch64 = 0;
8472 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
8473 * there is no secure EL1, so we route exceptions to EL3.
8475 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
8476 !arm_el_is_aa64(env, 3);
8477 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB);
8478 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
8479 condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC);
8480 dc->condexec_mask = (condexec & 0xf) << 1;
8481 dc->condexec_cond = condexec >> 4;
8483 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
8484 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
8485 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
8486 #if !defined(CONFIG_USER_ONLY)
8487 dc->user = (dc->current_el == 0);
8488 #endif
8489 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
8491 if (arm_feature(env, ARM_FEATURE_M)) {
8492 dc->vfp_enabled = 1;
8493 dc->be_data = MO_TE;
8494 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER);
8495 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
8496 regime_is_secure(env, dc->mmu_idx);
8497 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK);
8498 dc->v8m_fpccr_s_wrong =
8499 FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG);
8500 dc->v7m_new_fp_ctxt_needed =
8501 FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED);
8502 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT);
8503 } else {
8504 dc->be_data =
8505 FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
8506 dc->debug_target_el =
8507 FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
8508 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
8509 dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
8510 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
8511 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
8512 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
8513 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
8514 } else {
8515 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
8516 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
8519 dc->cp_regs = cpu->cp_regs;
8520 dc->features = env->features;
8522 /* Single step state. The code-generation logic here is:
8523 * SS_ACTIVE == 0:
8524 * generate code with no special handling for single-stepping (except
8525 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
8526 * this happens anyway because those changes are all system register or
8527 * PSTATE writes).
8528 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
8529 * emit code for one insn
8530 * emit code to clear PSTATE.SS
8531 * emit code to generate software step exception for completed step
8532 * end TB (as usual for having generated an exception)
8533 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
8534 * emit code to generate a software step exception
8535 * end the TB
8537 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
8538 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
8539 dc->is_ldex = false;
8541 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
8543 /* If architectural single step active, limit to 1. */
8544 if (is_singlestepping(dc)) {
8545 dc->base.max_insns = 1;
8548 /* ARM is a fixed-length ISA. Bound the number of insns to execute
8549 to those left on the page. */
8550 if (!dc->thumb) {
8551 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
8552 dc->base.max_insns = MIN(dc->base.max_insns, bound);
8555 cpu_V0 = tcg_temp_new_i64();
8556 cpu_V1 = tcg_temp_new_i64();
8557 cpu_M0 = tcg_temp_new_i64();
8560 static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
8562 DisasContext *dc = container_of(dcbase, DisasContext, base);
8564 /* A note on handling of the condexec (IT) bits:
8566 * We want to avoid the overhead of having to write the updated condexec
8567 * bits back to the CPUARMState for every instruction in an IT block. So:
8568 * (1) if the condexec bits are not already zero then we write
8569 * zero back into the CPUARMState now. This avoids complications trying
8570 * to do it at the end of the block. (For example if we don't do this
8571 * it's hard to identify whether we can safely skip writing condexec
8572 * at the end of the TB, which we definitely want to do for the case
8573 * where a TB doesn't do anything with the IT state at all.)
8574 * (2) if we are going to leave the TB then we call gen_set_condexec()
8575 * which will write the correct value into CPUARMState if zero is wrong.
8576 * This is done both for leaving the TB at the end, and for leaving
8577 * it because of an exception we know will happen, which is done in
8578 * gen_exception_insn(). The latter is necessary because we need to
8579 * leave the TB with the PC/IT state just prior to execution of the
8580 * instruction which caused the exception.
8581 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
8582 * then the CPUARMState will be wrong and we need to reset it.
8583 * This is handled in the same way as restoration of the
8584 * PC in these situations; we save the value of the condexec bits
8585 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
8586 * then uses this to restore them after an exception.
8588 * Note that there are no instructions which can read the condexec
8589 * bits, and none which can write non-static values to them, so
8590 * we don't need to care about whether CPUARMState is correct in the
8591 * middle of a TB.
8594 /* Reset the conditional execution bits immediately. This avoids
8595 complications trying to do it at the end of the block. */
8596 if (dc->condexec_mask || dc->condexec_cond) {
8597 TCGv_i32 tmp = tcg_temp_new_i32();
8598 tcg_gen_movi_i32(tmp, 0);
8599 store_cpu_field(tmp, condexec_bits);
8603 static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
8605 DisasContext *dc = container_of(dcbase, DisasContext, base);
8607 tcg_gen_insn_start(dc->base.pc_next,
8608 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
8610 dc->insn_start = tcg_last_op();
8613 static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
8614 const CPUBreakpoint *bp)
8616 DisasContext *dc = container_of(dcbase, DisasContext, base);
8618 if (bp->flags & BP_CPU) {
8619 gen_set_condexec(dc);
8620 gen_set_pc_im(dc, dc->base.pc_next);
8621 gen_helper_check_breakpoints(cpu_env);
8622 /* End the TB early; it's likely not going to be executed */
8623 dc->base.is_jmp = DISAS_TOO_MANY;
8624 } else {
8625 gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
8626 /* The address covered by the breakpoint must be
8627 included in [tb->pc, tb->pc + tb->size) in order
8628 to for it to be properly cleared -- thus we
8629 increment the PC here so that the logic setting
8630 tb->size below does the right thing. */
8631 /* TODO: Advance PC by correct instruction length to
8632 * avoid disassembler error messages */
8633 dc->base.pc_next += 2;
8634 dc->base.is_jmp = DISAS_NORETURN;
8637 return true;
8640 static bool arm_pre_translate_insn(DisasContext *dc)
8642 #ifdef CONFIG_USER_ONLY
8643 /* Intercept jump to the magic kernel page. */
8644 if (dc->base.pc_next >= 0xffff0000) {
8645 /* We always get here via a jump, so know we are not in a
8646 conditional execution block. */
8647 gen_exception_internal(EXCP_KERNEL_TRAP);
8648 dc->base.is_jmp = DISAS_NORETURN;
8649 return true;
8651 #endif
8653 if (dc->ss_active && !dc->pstate_ss) {
8654 /* Singlestep state is Active-pending.
8655 * If we're in this state at the start of a TB then either
8656 * a) we just took an exception to an EL which is being debugged
8657 * and this is the first insn in the exception handler
8658 * b) debug exceptions were masked and we just unmasked them
8659 * without changing EL (eg by clearing PSTATE.D)
8660 * In either case we're going to take a swstep exception in the
8661 * "did not step an insn" case, and so the syndrome ISV and EX
8662 * bits should be zero.
8664 assert(dc->base.num_insns == 1);
8665 gen_swstep_exception(dc, 0, 0);
8666 dc->base.is_jmp = DISAS_NORETURN;
8667 return true;
8670 return false;
8673 static void arm_post_translate_insn(DisasContext *dc)
8675 if (dc->condjmp && !dc->base.is_jmp) {
8676 gen_set_label(dc->condlabel);
8677 dc->condjmp = 0;
8679 translator_loop_temp_check(&dc->base);
8682 static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
8684 DisasContext *dc = container_of(dcbase, DisasContext, base);
8685 CPUARMState *env = cpu->env_ptr;
8686 unsigned int insn;
8688 if (arm_pre_translate_insn(dc)) {
8689 return;
8692 dc->pc_curr = dc->base.pc_next;
8693 insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
8694 dc->insn = insn;
8695 dc->base.pc_next += 4;
8696 disas_arm_insn(dc, insn);
8698 arm_post_translate_insn(dc);
8700 /* ARM is a fixed-length ISA. We performed the cross-page check
8701 in init_disas_context by adjusting max_insns. */
8704 static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
8706 /* Return true if this Thumb insn is always unconditional,
8707 * even inside an IT block. This is true of only a very few
8708 * instructions: BKPT, HLT, and SG.
8710 * A larger class of instructions are UNPREDICTABLE if used
8711 * inside an IT block; we do not need to detect those here, because
8712 * what we do by default (perform the cc check and update the IT
8713 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
8714 * choice for those situations.
8716 * insn is either a 16-bit or a 32-bit instruction; the two are
8717 * distinguishable because for the 16-bit case the top 16 bits
8718 * are zeroes, and that isn't a valid 32-bit encoding.
8720 if ((insn & 0xffffff00) == 0xbe00) {
8721 /* BKPT */
8722 return true;
8725 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
8726 !arm_dc_feature(s, ARM_FEATURE_M)) {
8727 /* HLT: v8A only. This is unconditional even when it is going to
8728 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
8729 * For v7 cores this was a plain old undefined encoding and so
8730 * honours its cc check. (We might be using the encoding as
8731 * a semihosting trap, but we don't change the cc check behaviour
8732 * on that account, because a debugger connected to a real v7A
8733 * core and emulating semihosting traps by catching the UNDEF
8734 * exception would also only see cases where the cc check passed.
8735 * No guest code should be trying to do a HLT semihosting trap
8736 * in an IT block anyway.
8738 return true;
8741 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
8742 arm_dc_feature(s, ARM_FEATURE_M)) {
8743 /* SG: v8M only */
8744 return true;
8747 return false;
8750 static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
8752 DisasContext *dc = container_of(dcbase, DisasContext, base);
8753 CPUARMState *env = cpu->env_ptr;
8754 uint32_t insn;
8755 bool is_16bit;
8757 if (arm_pre_translate_insn(dc)) {
8758 return;
8761 dc->pc_curr = dc->base.pc_next;
8762 insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
8763 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
8764 dc->base.pc_next += 2;
8765 if (!is_16bit) {
8766 uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
8768 insn = insn << 16 | insn2;
8769 dc->base.pc_next += 2;
8771 dc->insn = insn;
8773 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
8774 uint32_t cond = dc->condexec_cond;
8777 * Conditionally skip the insn. Note that both 0xe and 0xf mean
8778 * "always"; 0xf is not "never".
8780 if (cond < 0x0e) {
8781 arm_skip_unless(dc, cond);
8785 if (is_16bit) {
8786 disas_thumb_insn(dc, insn);
8787 } else {
8788 disas_thumb2_insn(dc, insn);
8791 /* Advance the Thumb condexec condition. */
8792 if (dc->condexec_mask) {
8793 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
8794 ((dc->condexec_mask >> 4) & 1));
8795 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8796 if (dc->condexec_mask == 0) {
8797 dc->condexec_cond = 0;
8801 arm_post_translate_insn(dc);
8803 /* Thumb is a variable-length ISA. Stop translation when the next insn
8804 * will touch a new page. This ensures that prefetch aborts occur at
8805 * the right place.
8807 * We want to stop the TB if the next insn starts in a new page,
8808 * or if it spans between this page and the next. This means that
8809 * if we're looking at the last halfword in the page we need to
8810 * see if it's a 16-bit Thumb insn (which will fit in this TB)
8811 * or a 32-bit Thumb insn (which won't).
8812 * This is to avoid generating a silly TB with a single 16-bit insn
8813 * in it at the end of this page (which would execute correctly
8814 * but isn't very efficient).
8816 if (dc->base.is_jmp == DISAS_NEXT
8817 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
8818 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
8819 && insn_crosses_page(env, dc)))) {
8820 dc->base.is_jmp = DISAS_TOO_MANY;
8824 static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
8826 DisasContext *dc = container_of(dcbase, DisasContext, base);
8828 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
8829 /* FIXME: This can theoretically happen with self-modifying code. */
8830 cpu_abort(cpu, "IO on conditional branch instruction");
8833 /* At this stage dc->condjmp will only be set when the skipped
8834 instruction was a conditional branch or trap, and the PC has
8835 already been written. */
8836 gen_set_condexec(dc);
8837 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
8838 /* Exception return branches need some special case code at the
8839 * end of the TB, which is complex enough that it has to
8840 * handle the single-step vs not and the condition-failed
8841 * insn codepath itself.
8843 gen_bx_excret_final_code(dc);
8844 } else if (unlikely(is_singlestepping(dc))) {
8845 /* Unconditional and "condition passed" instruction codepath. */
8846 switch (dc->base.is_jmp) {
8847 case DISAS_SWI:
8848 gen_ss_advance(dc);
8849 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
8850 default_exception_el(dc));
8851 break;
8852 case DISAS_HVC:
8853 gen_ss_advance(dc);
8854 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
8855 break;
8856 case DISAS_SMC:
8857 gen_ss_advance(dc);
8858 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
8859 break;
8860 case DISAS_NEXT:
8861 case DISAS_TOO_MANY:
8862 case DISAS_UPDATE_EXIT:
8863 case DISAS_UPDATE_NOCHAIN:
8864 gen_set_pc_im(dc, dc->base.pc_next);
8865 /* fall through */
8866 default:
8867 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
8868 gen_singlestep_exception(dc);
8869 break;
8870 case DISAS_NORETURN:
8871 break;
8873 } else {
8874 /* While branches must always occur at the end of an IT block,
8875 there are a few other things that can cause us to terminate
8876 the TB in the middle of an IT block:
8877 - Exception generating instructions (bkpt, swi, undefined).
8878 - Page boundaries.
8879 - Hardware watchpoints.
8880 Hardware breakpoints have already been handled and skip this code.
8882 switch(dc->base.is_jmp) {
8883 case DISAS_NEXT:
8884 case DISAS_TOO_MANY:
8885 gen_goto_tb(dc, 1, dc->base.pc_next);
8886 break;
8887 case DISAS_UPDATE_NOCHAIN:
8888 gen_set_pc_im(dc, dc->base.pc_next);
8889 /* fall through */
8890 case DISAS_JUMP:
8891 gen_goto_ptr();
8892 break;
8893 case DISAS_UPDATE_EXIT:
8894 gen_set_pc_im(dc, dc->base.pc_next);
8895 /* fall through */
8896 default:
8897 /* indicate that the hash table must be used to find the next TB */
8898 tcg_gen_exit_tb(NULL, 0);
8899 break;
8900 case DISAS_NORETURN:
8901 /* nothing more to generate */
8902 break;
8903 case DISAS_WFI:
8905 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
8906 !(dc->insn & (1U << 31))) ? 2 : 4);
8908 gen_helper_wfi(cpu_env, tmp);
8909 tcg_temp_free_i32(tmp);
8910 /* The helper doesn't necessarily throw an exception, but we
8911 * must go back to the main loop to check for interrupts anyway.
8913 tcg_gen_exit_tb(NULL, 0);
8914 break;
8916 case DISAS_WFE:
8917 gen_helper_wfe(cpu_env);
8918 break;
8919 case DISAS_YIELD:
8920 gen_helper_yield(cpu_env);
8921 break;
8922 case DISAS_SWI:
8923 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
8924 default_exception_el(dc));
8925 break;
8926 case DISAS_HVC:
8927 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
8928 break;
8929 case DISAS_SMC:
8930 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
8931 break;
8935 if (dc->condjmp) {
8936 /* "Condition failed" instruction codepath for the branch/trap insn */
8937 gen_set_label(dc->condlabel);
8938 gen_set_condexec(dc);
8939 if (unlikely(is_singlestepping(dc))) {
8940 gen_set_pc_im(dc, dc->base.pc_next);
8941 gen_singlestep_exception(dc);
8942 } else {
8943 gen_goto_tb(dc, 1, dc->base.pc_next);
8948 static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
8950 DisasContext *dc = container_of(dcbase, DisasContext, base);
8952 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
8953 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
8956 static const TranslatorOps arm_translator_ops = {
8957 .init_disas_context = arm_tr_init_disas_context,
8958 .tb_start = arm_tr_tb_start,
8959 .insn_start = arm_tr_insn_start,
8960 .breakpoint_check = arm_tr_breakpoint_check,
8961 .translate_insn = arm_tr_translate_insn,
8962 .tb_stop = arm_tr_tb_stop,
8963 .disas_log = arm_tr_disas_log,
8966 static const TranslatorOps thumb_translator_ops = {
8967 .init_disas_context = arm_tr_init_disas_context,
8968 .tb_start = arm_tr_tb_start,
8969 .insn_start = arm_tr_insn_start,
8970 .breakpoint_check = arm_tr_breakpoint_check,
8971 .translate_insn = thumb_tr_translate_insn,
8972 .tb_stop = arm_tr_tb_stop,
8973 .disas_log = arm_tr_disas_log,
8976 /* generate intermediate code for basic block 'tb'. */
8977 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
8979 DisasContext dc = { };
8980 const TranslatorOps *ops = &arm_translator_ops;
8982 if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) {
8983 ops = &thumb_translator_ops;
8985 #ifdef TARGET_AARCH64
8986 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
8987 ops = &aarch64_translator_ops;
8989 #endif
8991 translator_loop(ops, &dc.base, cpu, tb, max_insns);
8994 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
8995 target_ulong *data)
8997 if (is_a64(env)) {
8998 env->pc = data[0];
8999 env->condexec_bits = 0;
9000 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
9001 } else {
9002 env->regs[15] = data[0];
9003 env->condexec_bits = data[1];
9004 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;