4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
28 #include "tcg-op-gvec.h"
30 #include "qemu/bitops.h"
31 #include "qemu/qemu-print.h"
33 #include "hw/semihosting/semihost.h"
35 #include "exec/helper-proto.h"
36 #include "exec/helper-gen.h"
38 #include "trace-tcg.h"
42 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
43 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
44 /* currently all emulated v5 cores are also v5TE, so don't bother */
45 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
46 #define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
47 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
48 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
49 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
50 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
51 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
53 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
55 #include "translate.h"
57 #if defined(CONFIG_USER_ONLY)
60 #define IS_USER(s) (s->user)
63 /* We reuse the same 64-bit temporaries for efficiency. */
64 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
65 static TCGv_i32 cpu_R
[16];
66 TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
67 TCGv_i64 cpu_exclusive_addr
;
68 TCGv_i64 cpu_exclusive_val
;
70 /* FIXME: These should be removed. */
71 static TCGv_i32 cpu_F0s
, cpu_F1s
;
72 static TCGv_i64 cpu_F0d
, cpu_F1d
;
74 #include "exec/gen-icount.h"
76 static const char * const regnames
[] =
77 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
78 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
80 /* Function prototypes for gen_ functions calling Neon helpers. */
81 typedef void NeonGenThreeOpEnvFn(TCGv_i32
, TCGv_env
, TCGv_i32
,
84 /* initialize TCG globals. */
85 void arm_translate_init(void)
89 for (i
= 0; i
< 16; i
++) {
90 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
91 offsetof(CPUARMState
, regs
[i
]),
94 cpu_CF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, CF
), "CF");
95 cpu_NF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, NF
), "NF");
96 cpu_VF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, VF
), "VF");
97 cpu_ZF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, ZF
), "ZF");
99 cpu_exclusive_addr
= tcg_global_mem_new_i64(cpu_env
,
100 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
101 cpu_exclusive_val
= tcg_global_mem_new_i64(cpu_env
,
102 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
104 a64_translate_init();
107 /* Flags for the disas_set_da_iss info argument:
108 * lower bits hold the Rt register number, higher bits are flags.
110 typedef enum ISSInfo
{
113 ISSInvalid
= (1 << 5),
114 ISSIsAcqRel
= (1 << 6),
115 ISSIsWrite
= (1 << 7),
116 ISSIs16Bit
= (1 << 8),
119 /* Save the syndrome information for a Data Abort */
120 static void disas_set_da_iss(DisasContext
*s
, TCGMemOp memop
, ISSInfo issinfo
)
123 int sas
= memop
& MO_SIZE
;
124 bool sse
= memop
& MO_SIGN
;
125 bool is_acqrel
= issinfo
& ISSIsAcqRel
;
126 bool is_write
= issinfo
& ISSIsWrite
;
127 bool is_16bit
= issinfo
& ISSIs16Bit
;
128 int srt
= issinfo
& ISSRegMask
;
130 if (issinfo
& ISSInvalid
) {
131 /* Some callsites want to conditionally provide ISS info,
132 * eg "only if this was not a writeback"
138 /* For AArch32, insns where the src/dest is R15 never generate
139 * ISS information. Catching that here saves checking at all
145 syn
= syn_data_abort_with_iss(0, sas
, sse
, srt
, 0, is_acqrel
,
146 0, 0, 0, is_write
, 0, is_16bit
);
147 disas_set_insn_syndrome(s
, syn
);
150 static inline int get_a32_user_mem_index(DisasContext
*s
)
152 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
154 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
155 * otherwise, access as if at PL0.
157 switch (s
->mmu_idx
) {
158 case ARMMMUIdx_S1E2
: /* this one is UNPREDICTABLE */
159 case ARMMMUIdx_S12NSE0
:
160 case ARMMMUIdx_S12NSE1
:
161 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0
);
163 case ARMMMUIdx_S1SE0
:
164 case ARMMMUIdx_S1SE1
:
165 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0
);
166 case ARMMMUIdx_MUser
:
167 case ARMMMUIdx_MPriv
:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUser
);
169 case ARMMMUIdx_MUserNegPri
:
170 case ARMMMUIdx_MPrivNegPri
:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri
);
172 case ARMMMUIdx_MSUser
:
173 case ARMMMUIdx_MSPriv
:
174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser
);
175 case ARMMMUIdx_MSUserNegPri
:
176 case ARMMMUIdx_MSPrivNegPri
:
177 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri
);
180 g_assert_not_reached();
184 static inline TCGv_i32
load_cpu_offset(int offset
)
186 TCGv_i32 tmp
= tcg_temp_new_i32();
187 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
191 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
193 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
195 tcg_gen_st_i32(var
, cpu_env
, offset
);
196 tcg_temp_free_i32(var
);
199 #define store_cpu_field(var, name) \
200 store_cpu_offset(var, offsetof(CPUARMState, name))
202 /* Set a variable to the value of a CPU register. */
203 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
207 /* normally, since we updated PC, we need only to add one insn */
209 addr
= (long)s
->pc
+ 2;
211 addr
= (long)s
->pc
+ 4;
212 tcg_gen_movi_i32(var
, addr
);
214 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
218 /* Create a new temporary and set it to the value of a CPU register. */
219 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
221 TCGv_i32 tmp
= tcg_temp_new_i32();
222 load_reg_var(s
, tmp
, reg
);
226 /* Set a CPU register. The source must be a temporary and will be
228 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
231 /* In Thumb mode, we must ignore bit 0.
232 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
233 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
234 * We choose to ignore [1:0] in ARM mode for all architecture versions.
236 tcg_gen_andi_i32(var
, var
, s
->thumb
? ~1 : ~3);
237 s
->base
.is_jmp
= DISAS_JUMP
;
239 tcg_gen_mov_i32(cpu_R
[reg
], var
);
240 tcg_temp_free_i32(var
);
244 * Variant of store_reg which applies v8M stack-limit checks before updating
245 * SP. If the check fails this will result in an exception being taken.
246 * We disable the stack checks for CONFIG_USER_ONLY because we have
247 * no idea what the stack limits should be in that case.
248 * If stack checking is not being done this just acts like store_reg().
250 static void store_sp_checked(DisasContext
*s
, TCGv_i32 var
)
252 #ifndef CONFIG_USER_ONLY
253 if (s
->v8m_stackcheck
) {
254 gen_helper_v8m_stackcheck(cpu_env
, var
);
257 store_reg(s
, 13, var
);
260 /* Value extensions. */
261 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
262 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
263 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
264 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
266 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
267 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
270 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
272 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
273 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
274 tcg_temp_free_i32(tmp_mask
);
276 /* Set NZCV flags from the high 4 bits of var. */
277 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
279 static void gen_exception_internal(int excp
)
281 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
283 assert(excp_is_internal(excp
));
284 gen_helper_exception_internal(cpu_env
, tcg_excp
);
285 tcg_temp_free_i32(tcg_excp
);
288 static void gen_exception(int excp
, uint32_t syndrome
, uint32_t target_el
)
290 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
291 TCGv_i32 tcg_syn
= tcg_const_i32(syndrome
);
292 TCGv_i32 tcg_el
= tcg_const_i32(target_el
);
294 gen_helper_exception_with_syndrome(cpu_env
, tcg_excp
,
297 tcg_temp_free_i32(tcg_el
);
298 tcg_temp_free_i32(tcg_syn
);
299 tcg_temp_free_i32(tcg_excp
);
302 static void gen_step_complete_exception(DisasContext
*s
)
304 /* We just completed step of an insn. Move from Active-not-pending
305 * to Active-pending, and then also take the swstep exception.
306 * This corresponds to making the (IMPDEF) choice to prioritize
307 * swstep exceptions over asynchronous exceptions taken to an exception
308 * level where debug is disabled. This choice has the advantage that
309 * we do not need to maintain internal state corresponding to the
310 * ISV/EX syndrome bits between completion of the step and generation
311 * of the exception, and our syndrome information is always correct.
314 gen_exception(EXCP_UDEF
, syn_swstep(s
->ss_same_el
, 1, s
->is_ldex
),
315 default_exception_el(s
));
316 s
->base
.is_jmp
= DISAS_NORETURN
;
319 static void gen_singlestep_exception(DisasContext
*s
)
321 /* Generate the right kind of exception for singlestep, which is
322 * either the architectural singlestep or EXCP_DEBUG for QEMU's
323 * gdb singlestepping.
326 gen_step_complete_exception(s
);
328 gen_exception_internal(EXCP_DEBUG
);
332 static inline bool is_singlestepping(DisasContext
*s
)
334 /* Return true if we are singlestepping either because of
335 * architectural singlestep or QEMU gdbstub singlestep. This does
336 * not include the command line '-singlestep' mode which is rather
337 * misnamed as it only means "one instruction per TB" and doesn't
338 * affect the code we generate.
340 return s
->base
.singlestep_enabled
|| s
->ss_active
;
343 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
345 TCGv_i32 tmp1
= tcg_temp_new_i32();
346 TCGv_i32 tmp2
= tcg_temp_new_i32();
347 tcg_gen_ext16s_i32(tmp1
, a
);
348 tcg_gen_ext16s_i32(tmp2
, b
);
349 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
350 tcg_temp_free_i32(tmp2
);
351 tcg_gen_sari_i32(a
, a
, 16);
352 tcg_gen_sari_i32(b
, b
, 16);
353 tcg_gen_mul_i32(b
, b
, a
);
354 tcg_gen_mov_i32(a
, tmp1
);
355 tcg_temp_free_i32(tmp1
);
358 /* Byteswap each halfword. */
359 static void gen_rev16(TCGv_i32 var
)
361 TCGv_i32 tmp
= tcg_temp_new_i32();
362 TCGv_i32 mask
= tcg_const_i32(0x00ff00ff);
363 tcg_gen_shri_i32(tmp
, var
, 8);
364 tcg_gen_and_i32(tmp
, tmp
, mask
);
365 tcg_gen_and_i32(var
, var
, mask
);
366 tcg_gen_shli_i32(var
, var
, 8);
367 tcg_gen_or_i32(var
, var
, tmp
);
368 tcg_temp_free_i32(mask
);
369 tcg_temp_free_i32(tmp
);
372 /* Byteswap low halfword and sign extend. */
373 static void gen_revsh(TCGv_i32 var
)
375 tcg_gen_ext16u_i32(var
, var
);
376 tcg_gen_bswap16_i32(var
, var
);
377 tcg_gen_ext16s_i32(var
, var
);
380 /* Return (b << 32) + a. Mark inputs as dead */
381 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv_i32 b
)
383 TCGv_i64 tmp64
= tcg_temp_new_i64();
385 tcg_gen_extu_i32_i64(tmp64
, b
);
386 tcg_temp_free_i32(b
);
387 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
388 tcg_gen_add_i64(a
, tmp64
, a
);
390 tcg_temp_free_i64(tmp64
);
394 /* Return (b << 32) - a. Mark inputs as dead. */
395 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv_i32 b
)
397 TCGv_i64 tmp64
= tcg_temp_new_i64();
399 tcg_gen_extu_i32_i64(tmp64
, b
);
400 tcg_temp_free_i32(b
);
401 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
402 tcg_gen_sub_i64(a
, tmp64
, a
);
404 tcg_temp_free_i64(tmp64
);
408 /* 32x32->64 multiply. Marks inputs as dead. */
409 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
411 TCGv_i32 lo
= tcg_temp_new_i32();
412 TCGv_i32 hi
= tcg_temp_new_i32();
415 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
416 tcg_temp_free_i32(a
);
417 tcg_temp_free_i32(b
);
419 ret
= tcg_temp_new_i64();
420 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
421 tcg_temp_free_i32(lo
);
422 tcg_temp_free_i32(hi
);
427 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
429 TCGv_i32 lo
= tcg_temp_new_i32();
430 TCGv_i32 hi
= tcg_temp_new_i32();
433 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
434 tcg_temp_free_i32(a
);
435 tcg_temp_free_i32(b
);
437 ret
= tcg_temp_new_i64();
438 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
439 tcg_temp_free_i32(lo
);
440 tcg_temp_free_i32(hi
);
445 /* Swap low and high halfwords. */
446 static void gen_swap_half(TCGv_i32 var
)
448 TCGv_i32 tmp
= tcg_temp_new_i32();
449 tcg_gen_shri_i32(tmp
, var
, 16);
450 tcg_gen_shli_i32(var
, var
, 16);
451 tcg_gen_or_i32(var
, var
, tmp
);
452 tcg_temp_free_i32(tmp
);
455 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
456 tmp = (t0 ^ t1) & 0x8000;
459 t0 = (t0 + t1) ^ tmp;
462 static void gen_add16(TCGv_i32 t0
, TCGv_i32 t1
)
464 TCGv_i32 tmp
= tcg_temp_new_i32();
465 tcg_gen_xor_i32(tmp
, t0
, t1
);
466 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
467 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
468 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
469 tcg_gen_add_i32(t0
, t0
, t1
);
470 tcg_gen_xor_i32(t0
, t0
, tmp
);
471 tcg_temp_free_i32(tmp
);
472 tcg_temp_free_i32(t1
);
475 /* Set CF to the top bit of var. */
476 static void gen_set_CF_bit31(TCGv_i32 var
)
478 tcg_gen_shri_i32(cpu_CF
, var
, 31);
481 /* Set N and Z flags from var. */
482 static inline void gen_logic_CC(TCGv_i32 var
)
484 tcg_gen_mov_i32(cpu_NF
, var
);
485 tcg_gen_mov_i32(cpu_ZF
, var
);
489 static void gen_adc(TCGv_i32 t0
, TCGv_i32 t1
)
491 tcg_gen_add_i32(t0
, t0
, t1
);
492 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
495 /* dest = T0 + T1 + CF. */
496 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
498 tcg_gen_add_i32(dest
, t0
, t1
);
499 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
502 /* dest = T0 - T1 + CF - 1. */
503 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
505 tcg_gen_sub_i32(dest
, t0
, t1
);
506 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
507 tcg_gen_subi_i32(dest
, dest
, 1);
510 /* dest = T0 + T1. Compute C, N, V and Z flags */
511 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
513 TCGv_i32 tmp
= tcg_temp_new_i32();
514 tcg_gen_movi_i32(tmp
, 0);
515 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
516 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
517 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
518 tcg_gen_xor_i32(tmp
, t0
, t1
);
519 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
520 tcg_temp_free_i32(tmp
);
521 tcg_gen_mov_i32(dest
, cpu_NF
);
524 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
525 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
527 TCGv_i32 tmp
= tcg_temp_new_i32();
528 if (TCG_TARGET_HAS_add2_i32
) {
529 tcg_gen_movi_i32(tmp
, 0);
530 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
531 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
533 TCGv_i64 q0
= tcg_temp_new_i64();
534 TCGv_i64 q1
= tcg_temp_new_i64();
535 tcg_gen_extu_i32_i64(q0
, t0
);
536 tcg_gen_extu_i32_i64(q1
, t1
);
537 tcg_gen_add_i64(q0
, q0
, q1
);
538 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
539 tcg_gen_add_i64(q0
, q0
, q1
);
540 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
541 tcg_temp_free_i64(q0
);
542 tcg_temp_free_i64(q1
);
544 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
545 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
546 tcg_gen_xor_i32(tmp
, t0
, t1
);
547 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
548 tcg_temp_free_i32(tmp
);
549 tcg_gen_mov_i32(dest
, cpu_NF
);
552 /* dest = T0 - T1. Compute C, N, V and Z flags */
553 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
556 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
557 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
558 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
559 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
560 tmp
= tcg_temp_new_i32();
561 tcg_gen_xor_i32(tmp
, t0
, t1
);
562 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
563 tcg_temp_free_i32(tmp
);
564 tcg_gen_mov_i32(dest
, cpu_NF
);
567 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
568 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
570 TCGv_i32 tmp
= tcg_temp_new_i32();
571 tcg_gen_not_i32(tmp
, t1
);
572 gen_adc_CC(dest
, t0
, tmp
);
573 tcg_temp_free_i32(tmp
);
576 #define GEN_SHIFT(name) \
577 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
579 TCGv_i32 tmp1, tmp2, tmp3; \
580 tmp1 = tcg_temp_new_i32(); \
581 tcg_gen_andi_i32(tmp1, t1, 0xff); \
582 tmp2 = tcg_const_i32(0); \
583 tmp3 = tcg_const_i32(0x1f); \
584 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
585 tcg_temp_free_i32(tmp3); \
586 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
587 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
588 tcg_temp_free_i32(tmp2); \
589 tcg_temp_free_i32(tmp1); \
595 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
598 tmp1
= tcg_temp_new_i32();
599 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
600 tmp2
= tcg_const_i32(0x1f);
601 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
602 tcg_temp_free_i32(tmp2
);
603 tcg_gen_sar_i32(dest
, t0
, tmp1
);
604 tcg_temp_free_i32(tmp1
);
607 static void shifter_out_im(TCGv_i32 var
, int shift
)
610 tcg_gen_andi_i32(cpu_CF
, var
, 1);
612 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
614 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
619 /* Shift by immediate. Includes special handling for shift == 0. */
620 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
621 int shift
, int flags
)
627 shifter_out_im(var
, 32 - shift
);
628 tcg_gen_shli_i32(var
, var
, shift
);
634 tcg_gen_shri_i32(cpu_CF
, var
, 31);
636 tcg_gen_movi_i32(var
, 0);
639 shifter_out_im(var
, shift
- 1);
640 tcg_gen_shri_i32(var
, var
, shift
);
647 shifter_out_im(var
, shift
- 1);
650 tcg_gen_sari_i32(var
, var
, shift
);
652 case 3: /* ROR/RRX */
655 shifter_out_im(var
, shift
- 1);
656 tcg_gen_rotri_i32(var
, var
, shift
); break;
658 TCGv_i32 tmp
= tcg_temp_new_i32();
659 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
661 shifter_out_im(var
, 0);
662 tcg_gen_shri_i32(var
, var
, 1);
663 tcg_gen_or_i32(var
, var
, tmp
);
664 tcg_temp_free_i32(tmp
);
669 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
670 TCGv_i32 shift
, int flags
)
674 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
675 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
676 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
677 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
682 gen_shl(var
, var
, shift
);
685 gen_shr(var
, var
, shift
);
688 gen_sar(var
, var
, shift
);
690 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
691 tcg_gen_rotr_i32(var
, var
, shift
); break;
694 tcg_temp_free_i32(shift
);
697 #define PAS_OP(pfx) \
699 case 0: gen_pas_helper(glue(pfx,add16)); break; \
700 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
701 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
702 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
703 case 4: gen_pas_helper(glue(pfx,add8)); break; \
704 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
706 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
711 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
713 tmp
= tcg_temp_new_ptr();
714 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
716 tcg_temp_free_ptr(tmp
);
719 tmp
= tcg_temp_new_ptr();
720 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
722 tcg_temp_free_ptr(tmp
);
724 #undef gen_pas_helper
725 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
738 #undef gen_pas_helper
743 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
744 #define PAS_OP(pfx) \
746 case 0: gen_pas_helper(glue(pfx,add8)); break; \
747 case 1: gen_pas_helper(glue(pfx,add16)); break; \
748 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
749 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
750 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
751 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
753 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
758 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
760 tmp
= tcg_temp_new_ptr();
761 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
763 tcg_temp_free_ptr(tmp
);
766 tmp
= tcg_temp_new_ptr();
767 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
769 tcg_temp_free_ptr(tmp
);
771 #undef gen_pas_helper
772 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
785 #undef gen_pas_helper
791 * Generate a conditional based on ARM condition code cc.
792 * This is common between ARM and Aarch64 targets.
794 void arm_test_cc(DisasCompare
*cmp
, int cc
)
825 case 8: /* hi: C && !Z */
826 case 9: /* ls: !C || Z -> !(C && !Z) */
828 value
= tcg_temp_new_i32();
830 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
831 ZF is non-zero for !Z; so AND the two subexpressions. */
832 tcg_gen_neg_i32(value
, cpu_CF
);
833 tcg_gen_and_i32(value
, value
, cpu_ZF
);
836 case 10: /* ge: N == V -> N ^ V == 0 */
837 case 11: /* lt: N != V -> N ^ V != 0 */
838 /* Since we're only interested in the sign bit, == 0 is >= 0. */
840 value
= tcg_temp_new_i32();
842 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
845 case 12: /* gt: !Z && N == V */
846 case 13: /* le: Z || N != V */
848 value
= tcg_temp_new_i32();
850 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
851 * the sign bit then AND with ZF to yield the result. */
852 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
853 tcg_gen_sari_i32(value
, value
, 31);
854 tcg_gen_andc_i32(value
, cpu_ZF
, value
);
857 case 14: /* always */
858 case 15: /* always */
859 /* Use the ALWAYS condition, which will fold early.
860 * It doesn't matter what we use for the value. */
861 cond
= TCG_COND_ALWAYS
;
866 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
871 cond
= tcg_invert_cond(cond
);
877 cmp
->value_global
= global
;
880 void arm_free_cc(DisasCompare
*cmp
)
882 if (!cmp
->value_global
) {
883 tcg_temp_free_i32(cmp
->value
);
887 void arm_jump_cc(DisasCompare
*cmp
, TCGLabel
*label
)
889 tcg_gen_brcondi_i32(cmp
->cond
, cmp
->value
, 0, label
);
892 void arm_gen_test_cc(int cc
, TCGLabel
*label
)
895 arm_test_cc(&cmp
, cc
);
896 arm_jump_cc(&cmp
, label
);
900 static const uint8_t table_logic_cc
[16] = {
919 static inline void gen_set_condexec(DisasContext
*s
)
921 if (s
->condexec_mask
) {
922 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
923 TCGv_i32 tmp
= tcg_temp_new_i32();
924 tcg_gen_movi_i32(tmp
, val
);
925 store_cpu_field(tmp
, condexec_bits
);
929 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
931 tcg_gen_movi_i32(cpu_R
[15], val
);
934 /* Set PC and Thumb state from an immediate address. */
935 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
939 s
->base
.is_jmp
= DISAS_JUMP
;
940 if (s
->thumb
!= (addr
& 1)) {
941 tmp
= tcg_temp_new_i32();
942 tcg_gen_movi_i32(tmp
, addr
& 1);
943 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
944 tcg_temp_free_i32(tmp
);
946 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
949 /* Set PC and Thumb state from var. var is marked as dead. */
950 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
952 s
->base
.is_jmp
= DISAS_JUMP
;
953 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
954 tcg_gen_andi_i32(var
, var
, 1);
955 store_cpu_field(var
, thumb
);
958 /* Set PC and Thumb state from var. var is marked as dead.
959 * For M-profile CPUs, include logic to detect exception-return
960 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
961 * and BX reg, and no others, and happens only for code in Handler mode.
963 static inline void gen_bx_excret(DisasContext
*s
, TCGv_i32 var
)
965 /* Generate the same code here as for a simple bx, but flag via
966 * s->base.is_jmp that we need to do the rest of the work later.
969 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
) ||
970 (s
->v7m_handler_mode
&& arm_dc_feature(s
, ARM_FEATURE_M
))) {
971 s
->base
.is_jmp
= DISAS_BX_EXCRET
;
975 static inline void gen_bx_excret_final_code(DisasContext
*s
)
977 /* Generate the code to finish possible exception return and end the TB */
978 TCGLabel
*excret_label
= gen_new_label();
981 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
)) {
982 /* Covers FNC_RETURN and EXC_RETURN magic */
983 min_magic
= FNC_RETURN_MIN_MAGIC
;
985 /* EXC_RETURN magic only */
986 min_magic
= EXC_RETURN_MIN_MAGIC
;
989 /* Is the new PC value in the magic range indicating exception return? */
990 tcg_gen_brcondi_i32(TCG_COND_GEU
, cpu_R
[15], min_magic
, excret_label
);
991 /* No: end the TB as we would for a DISAS_JMP */
992 if (is_singlestepping(s
)) {
993 gen_singlestep_exception(s
);
995 tcg_gen_exit_tb(NULL
, 0);
997 gen_set_label(excret_label
);
998 /* Yes: this is an exception return.
999 * At this point in runtime env->regs[15] and env->thumb will hold
1000 * the exception-return magic number, which do_v7m_exception_exit()
1001 * will read. Nothing else will be able to see those values because
1002 * the cpu-exec main loop guarantees that we will always go straight
1003 * from raising the exception to the exception-handling code.
1005 * gen_ss_advance(s) does nothing on M profile currently but
1006 * calling it is conceptually the right thing as we have executed
1007 * this instruction (compare SWI, HVC, SMC handling).
1010 gen_exception_internal(EXCP_EXCEPTION_EXIT
);
1013 static inline void gen_bxns(DisasContext
*s
, int rm
)
1015 TCGv_i32 var
= load_reg(s
, rm
);
1017 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1018 * we need to sync state before calling it, but:
1019 * - we don't need to do gen_set_pc_im() because the bxns helper will
1020 * always set the PC itself
1021 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1022 * unless it's outside an IT block or the last insn in an IT block,
1023 * so we know that condexec == 0 (already set at the top of the TB)
1024 * is correct in the non-UNPREDICTABLE cases, and we can choose
1025 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1027 gen_helper_v7m_bxns(cpu_env
, var
);
1028 tcg_temp_free_i32(var
);
1029 s
->base
.is_jmp
= DISAS_EXIT
;
1032 static inline void gen_blxns(DisasContext
*s
, int rm
)
1034 TCGv_i32 var
= load_reg(s
, rm
);
1036 /* We don't need to sync condexec state, for the same reason as bxns.
1037 * We do however need to set the PC, because the blxns helper reads it.
1038 * The blxns helper may throw an exception.
1040 gen_set_pc_im(s
, s
->pc
);
1041 gen_helper_v7m_blxns(cpu_env
, var
);
1042 tcg_temp_free_i32(var
);
1043 s
->base
.is_jmp
= DISAS_EXIT
;
1046 /* Variant of store_reg which uses branch&exchange logic when storing
1047 to r15 in ARM architecture v7 and above. The source must be a temporary
1048 and will be marked as dead. */
1049 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv_i32 var
)
1051 if (reg
== 15 && ENABLE_ARCH_7
) {
1054 store_reg(s
, reg
, var
);
1058 /* Variant of store_reg which uses branch&exchange logic when storing
1059 * to r15 in ARM architecture v5T and above. This is used for storing
1060 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1061 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1062 static inline void store_reg_from_load(DisasContext
*s
, int reg
, TCGv_i32 var
)
1064 if (reg
== 15 && ENABLE_ARCH_5
) {
1065 gen_bx_excret(s
, var
);
1067 store_reg(s
, reg
, var
);
1071 #ifdef CONFIG_USER_ONLY
1072 #define IS_USER_ONLY 1
1074 #define IS_USER_ONLY 0
1077 /* Abstractions of "generate code to do a guest load/store for
1078 * AArch32", where a vaddr is always 32 bits (and is zero
1079 * extended if we're a 64 bit core) and data is also
1080 * 32 bits unless specifically doing a 64 bit access.
1081 * These functions work like tcg_gen_qemu_{ld,st}* except
1082 * that the address argument is TCGv_i32 rather than TCGv.
1085 static inline TCGv
gen_aa32_addr(DisasContext
*s
, TCGv_i32 a32
, TCGMemOp op
)
1087 TCGv addr
= tcg_temp_new();
1088 tcg_gen_extu_i32_tl(addr
, a32
);
1090 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1091 if (!IS_USER_ONLY
&& s
->sctlr_b
&& (op
& MO_SIZE
) < MO_32
) {
1092 tcg_gen_xori_tl(addr
, addr
, 4 - (1 << (op
& MO_SIZE
)));
1097 static void gen_aa32_ld_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
1098 int index
, TCGMemOp opc
)
1102 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
1103 !arm_dc_feature(s
, ARM_FEATURE_M_MAIN
)) {
1107 addr
= gen_aa32_addr(s
, a32
, opc
);
1108 tcg_gen_qemu_ld_i32(val
, addr
, index
, opc
);
1109 tcg_temp_free(addr
);
1112 static void gen_aa32_st_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
1113 int index
, TCGMemOp opc
)
1117 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
1118 !arm_dc_feature(s
, ARM_FEATURE_M_MAIN
)) {
1122 addr
= gen_aa32_addr(s
, a32
, opc
);
1123 tcg_gen_qemu_st_i32(val
, addr
, index
, opc
);
1124 tcg_temp_free(addr
);
1127 #define DO_GEN_LD(SUFF, OPC) \
1128 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1129 TCGv_i32 a32, int index) \
1131 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1133 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1135 TCGv_i32 a32, int index, \
1138 gen_aa32_ld##SUFF(s, val, a32, index); \
1139 disas_set_da_iss(s, OPC, issinfo); \
1142 #define DO_GEN_ST(SUFF, OPC) \
1143 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1144 TCGv_i32 a32, int index) \
1146 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1148 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1150 TCGv_i32 a32, int index, \
1153 gen_aa32_st##SUFF(s, val, a32, index); \
1154 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1157 static inline void gen_aa32_frob64(DisasContext
*s
, TCGv_i64 val
)
1159 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1160 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1161 tcg_gen_rotri_i64(val
, val
, 32);
1165 static void gen_aa32_ld_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
1166 int index
, TCGMemOp opc
)
1168 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1169 tcg_gen_qemu_ld_i64(val
, addr
, index
, opc
);
1170 gen_aa32_frob64(s
, val
);
1171 tcg_temp_free(addr
);
1174 static inline void gen_aa32_ld64(DisasContext
*s
, TCGv_i64 val
,
1175 TCGv_i32 a32
, int index
)
1177 gen_aa32_ld_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1180 static void gen_aa32_st_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
1181 int index
, TCGMemOp opc
)
1183 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1185 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1186 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1187 TCGv_i64 tmp
= tcg_temp_new_i64();
1188 tcg_gen_rotri_i64(tmp
, val
, 32);
1189 tcg_gen_qemu_st_i64(tmp
, addr
, index
, opc
);
1190 tcg_temp_free_i64(tmp
);
1192 tcg_gen_qemu_st_i64(val
, addr
, index
, opc
);
1194 tcg_temp_free(addr
);
1197 static inline void gen_aa32_st64(DisasContext
*s
, TCGv_i64 val
,
1198 TCGv_i32 a32
, int index
)
1200 gen_aa32_st_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1203 DO_GEN_LD(8s
, MO_SB
)
1204 DO_GEN_LD(8u, MO_UB
)
1205 DO_GEN_LD(16s
, MO_SW
)
1206 DO_GEN_LD(16u, MO_UW
)
1207 DO_GEN_LD(32u, MO_UL
)
1209 DO_GEN_ST(16, MO_UW
)
1210 DO_GEN_ST(32, MO_UL
)
1212 static inline void gen_hvc(DisasContext
*s
, int imm16
)
1214 /* The pre HVC helper handles cases when HVC gets trapped
1215 * as an undefined insn by runtime configuration (ie before
1216 * the insn really executes).
1218 gen_set_pc_im(s
, s
->pc
- 4);
1219 gen_helper_pre_hvc(cpu_env
);
1220 /* Otherwise we will treat this as a real exception which
1221 * happens after execution of the insn. (The distinction matters
1222 * for the PC value reported to the exception handler and also
1223 * for single stepping.)
1226 gen_set_pc_im(s
, s
->pc
);
1227 s
->base
.is_jmp
= DISAS_HVC
;
1230 static inline void gen_smc(DisasContext
*s
)
1232 /* As with HVC, we may take an exception either before or after
1233 * the insn executes.
1237 gen_set_pc_im(s
, s
->pc
- 4);
1238 tmp
= tcg_const_i32(syn_aa32_smc());
1239 gen_helper_pre_smc(cpu_env
, tmp
);
1240 tcg_temp_free_i32(tmp
);
1241 gen_set_pc_im(s
, s
->pc
);
1242 s
->base
.is_jmp
= DISAS_SMC
;
1245 static void gen_exception_internal_insn(DisasContext
*s
, int offset
, int excp
)
1247 gen_set_condexec(s
);
1248 gen_set_pc_im(s
, s
->pc
- offset
);
1249 gen_exception_internal(excp
);
1250 s
->base
.is_jmp
= DISAS_NORETURN
;
1253 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
,
1254 int syn
, uint32_t target_el
)
1256 gen_set_condexec(s
);
1257 gen_set_pc_im(s
, s
->pc
- offset
);
1258 gen_exception(excp
, syn
, target_el
);
1259 s
->base
.is_jmp
= DISAS_NORETURN
;
1262 static void gen_exception_bkpt_insn(DisasContext
*s
, int offset
, uint32_t syn
)
1266 gen_set_condexec(s
);
1267 gen_set_pc_im(s
, s
->pc
- offset
);
1268 tcg_syn
= tcg_const_i32(syn
);
1269 gen_helper_exception_bkpt_insn(cpu_env
, tcg_syn
);
1270 tcg_temp_free_i32(tcg_syn
);
1271 s
->base
.is_jmp
= DISAS_NORETURN
;
1274 /* Force a TB lookup after an instruction that changes the CPU state. */
1275 static inline void gen_lookup_tb(DisasContext
*s
)
1277 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
1278 s
->base
.is_jmp
= DISAS_EXIT
;
1281 static inline void gen_hlt(DisasContext
*s
, int imm
)
1283 /* HLT. This has two purposes.
1284 * Architecturally, it is an external halting debug instruction.
1285 * Since QEMU doesn't implement external debug, we treat this as
1286 * it is required for halting debug disabled: it will UNDEF.
1287 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1288 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1289 * must trigger semihosting even for ARMv7 and earlier, where
1290 * HLT was an undefined encoding.
1291 * In system mode, we don't allow userspace access to
1292 * semihosting, to provide some semblance of security
1293 * (and for consistency with our 32-bit semihosting).
1295 if (semihosting_enabled() &&
1296 #ifndef CONFIG_USER_ONLY
1297 s
->current_el
!= 0 &&
1299 (imm
== (s
->thumb
? 0x3c : 0xf000))) {
1300 gen_exception_internal_insn(s
, 0, EXCP_SEMIHOST
);
1304 gen_exception_insn(s
, s
->thumb
? 2 : 4, EXCP_UDEF
, syn_uncategorized(),
1305 default_exception_el(s
));
1308 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
1311 int val
, rm
, shift
, shiftop
;
1314 if (!(insn
& (1 << 25))) {
1317 if (!(insn
& (1 << 23)))
1320 tcg_gen_addi_i32(var
, var
, val
);
1322 /* shift/register */
1324 shift
= (insn
>> 7) & 0x1f;
1325 shiftop
= (insn
>> 5) & 3;
1326 offset
= load_reg(s
, rm
);
1327 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
1328 if (!(insn
& (1 << 23)))
1329 tcg_gen_sub_i32(var
, var
, offset
);
1331 tcg_gen_add_i32(var
, var
, offset
);
1332 tcg_temp_free_i32(offset
);
1336 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
1337 int extra
, TCGv_i32 var
)
1342 if (insn
& (1 << 22)) {
1344 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
1345 if (!(insn
& (1 << 23)))
1349 tcg_gen_addi_i32(var
, var
, val
);
1353 tcg_gen_addi_i32(var
, var
, extra
);
1355 offset
= load_reg(s
, rm
);
1356 if (!(insn
& (1 << 23)))
1357 tcg_gen_sub_i32(var
, var
, offset
);
1359 tcg_gen_add_i32(var
, var
, offset
);
1360 tcg_temp_free_i32(offset
);
1364 static TCGv_ptr
get_fpstatus_ptr(int neon
)
1366 TCGv_ptr statusptr
= tcg_temp_new_ptr();
1369 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
1371 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
1373 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
1377 static inline void gen_vfp_abs(int dp
)
1380 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
1382 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
1385 static inline void gen_vfp_neg(int dp
)
1388 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1390 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1393 #define VFP_GEN_ITOF(name) \
1394 static inline void gen_vfp_##name(int dp, int neon) \
1396 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1398 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1400 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1402 tcg_temp_free_ptr(statusptr); \
1409 #define VFP_GEN_FTOI(name) \
1410 static inline void gen_vfp_##name(int dp, int neon) \
1412 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1414 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1416 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1418 tcg_temp_free_ptr(statusptr); \
1427 #define VFP_GEN_FIX(name, round) \
1428 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1430 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1431 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1433 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1436 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1439 tcg_temp_free_i32(tmp_shift); \
1440 tcg_temp_free_ptr(statusptr); \
1442 VFP_GEN_FIX(tosl
, _round_to_zero
)
1443 VFP_GEN_FIX(toul
, _round_to_zero
)
1448 static inline long vfp_reg_offset(bool dp
, unsigned reg
)
1451 return offsetof(CPUARMState
, vfp
.zregs
[reg
>> 1].d
[reg
& 1]);
1453 long ofs
= offsetof(CPUARMState
, vfp
.zregs
[reg
>> 2].d
[(reg
>> 1) & 1]);
1455 ofs
+= offsetof(CPU_DoubleU
, l
.upper
);
1457 ofs
+= offsetof(CPU_DoubleU
, l
.lower
);
1463 /* Return the offset of a 32-bit piece of a NEON register.
1464 zero is the least significant end of the register. */
1466 neon_reg_offset (int reg
, int n
)
1470 return vfp_reg_offset(0, sreg
);
1473 /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1474 * where 0 is the least significant end of the register.
1477 neon_element_offset(int reg
, int element
, TCGMemOp size
)
1479 int element_size
= 1 << size
;
1480 int ofs
= element
* element_size
;
1481 #ifdef HOST_WORDS_BIGENDIAN
1482 /* Calculate the offset assuming fully little-endian,
1483 * then XOR to account for the order of the 8-byte units.
1485 if (element_size
< 8) {
1486 ofs
^= 8 - element_size
;
1489 return neon_reg_offset(reg
, 0) + ofs
;
1492 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1494 TCGv_i32 tmp
= tcg_temp_new_i32();
1495 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1499 static void neon_load_element(TCGv_i32 var
, int reg
, int ele
, TCGMemOp mop
)
1501 long offset
= neon_element_offset(reg
, ele
, mop
& MO_SIZE
);
1505 tcg_gen_ld8u_i32(var
, cpu_env
, offset
);
1508 tcg_gen_ld16u_i32(var
, cpu_env
, offset
);
1511 tcg_gen_ld_i32(var
, cpu_env
, offset
);
1514 g_assert_not_reached();
1518 static void neon_load_element64(TCGv_i64 var
, int reg
, int ele
, TCGMemOp mop
)
1520 long offset
= neon_element_offset(reg
, ele
, mop
& MO_SIZE
);
1524 tcg_gen_ld8u_i64(var
, cpu_env
, offset
);
1527 tcg_gen_ld16u_i64(var
, cpu_env
, offset
);
1530 tcg_gen_ld32u_i64(var
, cpu_env
, offset
);
1533 tcg_gen_ld_i64(var
, cpu_env
, offset
);
1536 g_assert_not_reached();
1540 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1542 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1543 tcg_temp_free_i32(var
);
1546 static void neon_store_element(int reg
, int ele
, TCGMemOp size
, TCGv_i32 var
)
1548 long offset
= neon_element_offset(reg
, ele
, size
);
1552 tcg_gen_st8_i32(var
, cpu_env
, offset
);
1555 tcg_gen_st16_i32(var
, cpu_env
, offset
);
1558 tcg_gen_st_i32(var
, cpu_env
, offset
);
1561 g_assert_not_reached();
1565 static void neon_store_element64(int reg
, int ele
, TCGMemOp size
, TCGv_i64 var
)
1567 long offset
= neon_element_offset(reg
, ele
, size
);
1571 tcg_gen_st8_i64(var
, cpu_env
, offset
);
1574 tcg_gen_st16_i64(var
, cpu_env
, offset
);
1577 tcg_gen_st32_i64(var
, cpu_env
, offset
);
1580 tcg_gen_st_i64(var
, cpu_env
, offset
);
1583 g_assert_not_reached();
1587 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1589 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1592 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1594 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1597 static inline void neon_load_reg32(TCGv_i32 var
, int reg
)
1599 tcg_gen_ld_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
1602 static inline void neon_store_reg32(TCGv_i32 var
, int reg
)
1604 tcg_gen_st_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
1607 static TCGv_ptr
vfp_reg_ptr(bool dp
, int reg
)
1609 TCGv_ptr ret
= tcg_temp_new_ptr();
1610 tcg_gen_addi_ptr(ret
, cpu_env
, vfp_reg_offset(dp
, reg
));
1614 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1615 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1616 #define tcg_gen_st_f32 tcg_gen_st_i32
1617 #define tcg_gen_st_f64 tcg_gen_st_i64
1619 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1622 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1624 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1627 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1630 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1632 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1635 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1638 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1640 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1643 #define ARM_CP_RW_BIT (1 << 20)
1645 /* Include the VFP decoder */
1646 #include "translate-vfp.inc.c"
1648 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1650 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1653 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1655 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1658 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1660 TCGv_i32 var
= tcg_temp_new_i32();
1661 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1665 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1667 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1668 tcg_temp_free_i32(var
);
1671 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1673 iwmmxt_store_reg(cpu_M0
, rn
);
1676 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1678 iwmmxt_load_reg(cpu_M0
, rn
);
1681 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1683 iwmmxt_load_reg(cpu_V1
, rn
);
1684 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1687 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1689 iwmmxt_load_reg(cpu_V1
, rn
);
1690 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1693 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1695 iwmmxt_load_reg(cpu_V1
, rn
);
1696 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1699 #define IWMMXT_OP(name) \
1700 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1702 iwmmxt_load_reg(cpu_V1, rn); \
1703 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1706 #define IWMMXT_OP_ENV(name) \
1707 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1709 iwmmxt_load_reg(cpu_V1, rn); \
1710 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1713 #define IWMMXT_OP_ENV_SIZE(name) \
1714 IWMMXT_OP_ENV(name##b) \
1715 IWMMXT_OP_ENV(name##w) \
1716 IWMMXT_OP_ENV(name##l)
1718 #define IWMMXT_OP_ENV1(name) \
1719 static inline void gen_op_iwmmxt_##name##_M0(void) \
1721 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1735 IWMMXT_OP_ENV_SIZE(unpackl
)
1736 IWMMXT_OP_ENV_SIZE(unpackh
)
1738 IWMMXT_OP_ENV1(unpacklub
)
1739 IWMMXT_OP_ENV1(unpackluw
)
1740 IWMMXT_OP_ENV1(unpacklul
)
1741 IWMMXT_OP_ENV1(unpackhub
)
1742 IWMMXT_OP_ENV1(unpackhuw
)
1743 IWMMXT_OP_ENV1(unpackhul
)
1744 IWMMXT_OP_ENV1(unpacklsb
)
1745 IWMMXT_OP_ENV1(unpacklsw
)
1746 IWMMXT_OP_ENV1(unpacklsl
)
1747 IWMMXT_OP_ENV1(unpackhsb
)
1748 IWMMXT_OP_ENV1(unpackhsw
)
1749 IWMMXT_OP_ENV1(unpackhsl
)
1751 IWMMXT_OP_ENV_SIZE(cmpeq
)
1752 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1753 IWMMXT_OP_ENV_SIZE(cmpgts
)
1755 IWMMXT_OP_ENV_SIZE(mins
)
1756 IWMMXT_OP_ENV_SIZE(minu
)
1757 IWMMXT_OP_ENV_SIZE(maxs
)
1758 IWMMXT_OP_ENV_SIZE(maxu
)
1760 IWMMXT_OP_ENV_SIZE(subn
)
1761 IWMMXT_OP_ENV_SIZE(addn
)
1762 IWMMXT_OP_ENV_SIZE(subu
)
1763 IWMMXT_OP_ENV_SIZE(addu
)
1764 IWMMXT_OP_ENV_SIZE(subs
)
1765 IWMMXT_OP_ENV_SIZE(adds
)
1767 IWMMXT_OP_ENV(avgb0
)
1768 IWMMXT_OP_ENV(avgb1
)
1769 IWMMXT_OP_ENV(avgw0
)
1770 IWMMXT_OP_ENV(avgw1
)
1772 IWMMXT_OP_ENV(packuw
)
1773 IWMMXT_OP_ENV(packul
)
1774 IWMMXT_OP_ENV(packuq
)
1775 IWMMXT_OP_ENV(packsw
)
1776 IWMMXT_OP_ENV(packsl
)
1777 IWMMXT_OP_ENV(packsq
)
1779 static void gen_op_iwmmxt_set_mup(void)
1782 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1783 tcg_gen_ori_i32(tmp
, tmp
, 2);
1784 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1787 static void gen_op_iwmmxt_set_cup(void)
1790 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1791 tcg_gen_ori_i32(tmp
, tmp
, 1);
1792 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1795 static void gen_op_iwmmxt_setpsr_nz(void)
1797 TCGv_i32 tmp
= tcg_temp_new_i32();
1798 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1799 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1802 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1804 iwmmxt_load_reg(cpu_V1
, rn
);
1805 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1806 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1809 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1816 rd
= (insn
>> 16) & 0xf;
1817 tmp
= load_reg(s
, rd
);
1819 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1820 if (insn
& (1 << 24)) {
1822 if (insn
& (1 << 23))
1823 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1825 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1826 tcg_gen_mov_i32(dest
, tmp
);
1827 if (insn
& (1 << 21))
1828 store_reg(s
, rd
, tmp
);
1830 tcg_temp_free_i32(tmp
);
1831 } else if (insn
& (1 << 21)) {
1833 tcg_gen_mov_i32(dest
, tmp
);
1834 if (insn
& (1 << 23))
1835 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1837 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1838 store_reg(s
, rd
, tmp
);
1839 } else if (!(insn
& (1 << 23)))
1844 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1846 int rd
= (insn
>> 0) & 0xf;
1849 if (insn
& (1 << 8)) {
1850 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1853 tmp
= iwmmxt_load_creg(rd
);
1856 tmp
= tcg_temp_new_i32();
1857 iwmmxt_load_reg(cpu_V0
, rd
);
1858 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
1860 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1861 tcg_gen_mov_i32(dest
, tmp
);
1862 tcg_temp_free_i32(tmp
);
1866 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1867 (ie. an undefined instruction). */
1868 static int disas_iwmmxt_insn(DisasContext
*s
, uint32_t insn
)
1871 int rdhi
, rdlo
, rd0
, rd1
, i
;
1873 TCGv_i32 tmp
, tmp2
, tmp3
;
1875 if ((insn
& 0x0e000e00) == 0x0c000000) {
1876 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1878 rdlo
= (insn
>> 12) & 0xf;
1879 rdhi
= (insn
>> 16) & 0xf;
1880 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1881 iwmmxt_load_reg(cpu_V0
, wrd
);
1882 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1883 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1884 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1885 } else { /* TMCRR */
1886 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1887 iwmmxt_store_reg(cpu_V0
, wrd
);
1888 gen_op_iwmmxt_set_mup();
1893 wrd
= (insn
>> 12) & 0xf;
1894 addr
= tcg_temp_new_i32();
1895 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1896 tcg_temp_free_i32(addr
);
1899 if (insn
& ARM_CP_RW_BIT
) {
1900 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1901 tmp
= tcg_temp_new_i32();
1902 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1903 iwmmxt_store_creg(wrd
, tmp
);
1906 if (insn
& (1 << 8)) {
1907 if (insn
& (1 << 22)) { /* WLDRD */
1908 gen_aa32_ld64(s
, cpu_M0
, addr
, get_mem_index(s
));
1910 } else { /* WLDRW wRd */
1911 tmp
= tcg_temp_new_i32();
1912 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1915 tmp
= tcg_temp_new_i32();
1916 if (insn
& (1 << 22)) { /* WLDRH */
1917 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
1918 } else { /* WLDRB */
1919 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
1923 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1924 tcg_temp_free_i32(tmp
);
1926 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1929 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1930 tmp
= iwmmxt_load_creg(wrd
);
1931 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1933 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1934 tmp
= tcg_temp_new_i32();
1935 if (insn
& (1 << 8)) {
1936 if (insn
& (1 << 22)) { /* WSTRD */
1937 gen_aa32_st64(s
, cpu_M0
, addr
, get_mem_index(s
));
1938 } else { /* WSTRW wRd */
1939 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1940 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1943 if (insn
& (1 << 22)) { /* WSTRH */
1944 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1945 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
1946 } else { /* WSTRB */
1947 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1948 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
1952 tcg_temp_free_i32(tmp
);
1954 tcg_temp_free_i32(addr
);
1958 if ((insn
& 0x0f000000) != 0x0e000000)
1961 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1962 case 0x000: /* WOR */
1963 wrd
= (insn
>> 12) & 0xf;
1964 rd0
= (insn
>> 0) & 0xf;
1965 rd1
= (insn
>> 16) & 0xf;
1966 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1967 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1968 gen_op_iwmmxt_setpsr_nz();
1969 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1970 gen_op_iwmmxt_set_mup();
1971 gen_op_iwmmxt_set_cup();
1973 case 0x011: /* TMCR */
1976 rd
= (insn
>> 12) & 0xf;
1977 wrd
= (insn
>> 16) & 0xf;
1979 case ARM_IWMMXT_wCID
:
1980 case ARM_IWMMXT_wCASF
:
1982 case ARM_IWMMXT_wCon
:
1983 gen_op_iwmmxt_set_cup();
1985 case ARM_IWMMXT_wCSSF
:
1986 tmp
= iwmmxt_load_creg(wrd
);
1987 tmp2
= load_reg(s
, rd
);
1988 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1989 tcg_temp_free_i32(tmp2
);
1990 iwmmxt_store_creg(wrd
, tmp
);
1992 case ARM_IWMMXT_wCGR0
:
1993 case ARM_IWMMXT_wCGR1
:
1994 case ARM_IWMMXT_wCGR2
:
1995 case ARM_IWMMXT_wCGR3
:
1996 gen_op_iwmmxt_set_cup();
1997 tmp
= load_reg(s
, rd
);
1998 iwmmxt_store_creg(wrd
, tmp
);
2004 case 0x100: /* WXOR */
2005 wrd
= (insn
>> 12) & 0xf;
2006 rd0
= (insn
>> 0) & 0xf;
2007 rd1
= (insn
>> 16) & 0xf;
2008 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2009 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
2010 gen_op_iwmmxt_setpsr_nz();
2011 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2012 gen_op_iwmmxt_set_mup();
2013 gen_op_iwmmxt_set_cup();
2015 case 0x111: /* TMRC */
2018 rd
= (insn
>> 12) & 0xf;
2019 wrd
= (insn
>> 16) & 0xf;
2020 tmp
= iwmmxt_load_creg(wrd
);
2021 store_reg(s
, rd
, tmp
);
2023 case 0x300: /* WANDN */
2024 wrd
= (insn
>> 12) & 0xf;
2025 rd0
= (insn
>> 0) & 0xf;
2026 rd1
= (insn
>> 16) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2028 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
2029 gen_op_iwmmxt_andq_M0_wRn(rd1
);
2030 gen_op_iwmmxt_setpsr_nz();
2031 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2032 gen_op_iwmmxt_set_mup();
2033 gen_op_iwmmxt_set_cup();
2035 case 0x200: /* WAND */
2036 wrd
= (insn
>> 12) & 0xf;
2037 rd0
= (insn
>> 0) & 0xf;
2038 rd1
= (insn
>> 16) & 0xf;
2039 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2040 gen_op_iwmmxt_andq_M0_wRn(rd1
);
2041 gen_op_iwmmxt_setpsr_nz();
2042 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2043 gen_op_iwmmxt_set_mup();
2044 gen_op_iwmmxt_set_cup();
2046 case 0x810: case 0xa10: /* WMADD */
2047 wrd
= (insn
>> 12) & 0xf;
2048 rd0
= (insn
>> 0) & 0xf;
2049 rd1
= (insn
>> 16) & 0xf;
2050 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2051 if (insn
& (1 << 21))
2052 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
2054 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
2055 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2056 gen_op_iwmmxt_set_mup();
2058 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2059 wrd
= (insn
>> 12) & 0xf;
2060 rd0
= (insn
>> 16) & 0xf;
2061 rd1
= (insn
>> 0) & 0xf;
2062 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2063 switch ((insn
>> 22) & 3) {
2065 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
2068 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
2071 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
2076 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2077 gen_op_iwmmxt_set_mup();
2078 gen_op_iwmmxt_set_cup();
2080 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2081 wrd
= (insn
>> 12) & 0xf;
2082 rd0
= (insn
>> 16) & 0xf;
2083 rd1
= (insn
>> 0) & 0xf;
2084 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2085 switch ((insn
>> 22) & 3) {
2087 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
2090 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
2093 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
2098 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2099 gen_op_iwmmxt_set_mup();
2100 gen_op_iwmmxt_set_cup();
2102 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2103 wrd
= (insn
>> 12) & 0xf;
2104 rd0
= (insn
>> 16) & 0xf;
2105 rd1
= (insn
>> 0) & 0xf;
2106 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2107 if (insn
& (1 << 22))
2108 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
2110 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
2111 if (!(insn
& (1 << 20)))
2112 gen_op_iwmmxt_addl_M0_wRn(wrd
);
2113 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2114 gen_op_iwmmxt_set_mup();
2116 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2117 wrd
= (insn
>> 12) & 0xf;
2118 rd0
= (insn
>> 16) & 0xf;
2119 rd1
= (insn
>> 0) & 0xf;
2120 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2121 if (insn
& (1 << 21)) {
2122 if (insn
& (1 << 20))
2123 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
2125 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
2127 if (insn
& (1 << 20))
2128 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
2130 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
2132 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2133 gen_op_iwmmxt_set_mup();
2135 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2136 wrd
= (insn
>> 12) & 0xf;
2137 rd0
= (insn
>> 16) & 0xf;
2138 rd1
= (insn
>> 0) & 0xf;
2139 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2140 if (insn
& (1 << 21))
2141 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
2143 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
2144 if (!(insn
& (1 << 20))) {
2145 iwmmxt_load_reg(cpu_V1
, wrd
);
2146 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
2148 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2149 gen_op_iwmmxt_set_mup();
2151 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2152 wrd
= (insn
>> 12) & 0xf;
2153 rd0
= (insn
>> 16) & 0xf;
2154 rd1
= (insn
>> 0) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2156 switch ((insn
>> 22) & 3) {
2158 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
2161 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
2164 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
2169 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2170 gen_op_iwmmxt_set_mup();
2171 gen_op_iwmmxt_set_cup();
2173 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2174 wrd
= (insn
>> 12) & 0xf;
2175 rd0
= (insn
>> 16) & 0xf;
2176 rd1
= (insn
>> 0) & 0xf;
2177 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2178 if (insn
& (1 << 22)) {
2179 if (insn
& (1 << 20))
2180 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
2182 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
2184 if (insn
& (1 << 20))
2185 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
2187 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
2189 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2190 gen_op_iwmmxt_set_mup();
2191 gen_op_iwmmxt_set_cup();
2193 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2194 wrd
= (insn
>> 12) & 0xf;
2195 rd0
= (insn
>> 16) & 0xf;
2196 rd1
= (insn
>> 0) & 0xf;
2197 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2198 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
2199 tcg_gen_andi_i32(tmp
, tmp
, 7);
2200 iwmmxt_load_reg(cpu_V1
, rd1
);
2201 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2202 tcg_temp_free_i32(tmp
);
2203 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2204 gen_op_iwmmxt_set_mup();
2206 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2207 if (((insn
>> 6) & 3) == 3)
2209 rd
= (insn
>> 12) & 0xf;
2210 wrd
= (insn
>> 16) & 0xf;
2211 tmp
= load_reg(s
, rd
);
2212 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2213 switch ((insn
>> 6) & 3) {
2215 tmp2
= tcg_const_i32(0xff);
2216 tmp3
= tcg_const_i32((insn
& 7) << 3);
2219 tmp2
= tcg_const_i32(0xffff);
2220 tmp3
= tcg_const_i32((insn
& 3) << 4);
2223 tmp2
= tcg_const_i32(0xffffffff);
2224 tmp3
= tcg_const_i32((insn
& 1) << 5);
2230 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
2231 tcg_temp_free_i32(tmp3
);
2232 tcg_temp_free_i32(tmp2
);
2233 tcg_temp_free_i32(tmp
);
2234 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2235 gen_op_iwmmxt_set_mup();
2237 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2238 rd
= (insn
>> 12) & 0xf;
2239 wrd
= (insn
>> 16) & 0xf;
2240 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
2242 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2243 tmp
= tcg_temp_new_i32();
2244 switch ((insn
>> 22) & 3) {
2246 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
2247 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2249 tcg_gen_ext8s_i32(tmp
, tmp
);
2251 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
2255 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
2256 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2258 tcg_gen_ext16s_i32(tmp
, tmp
);
2260 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
2264 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
2265 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2268 store_reg(s
, rd
, tmp
);
2270 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2271 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2273 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2274 switch ((insn
>> 22) & 3) {
2276 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
2279 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
2282 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
2285 tcg_gen_shli_i32(tmp
, tmp
, 28);
2287 tcg_temp_free_i32(tmp
);
2289 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2290 if (((insn
>> 6) & 3) == 3)
2292 rd
= (insn
>> 12) & 0xf;
2293 wrd
= (insn
>> 16) & 0xf;
2294 tmp
= load_reg(s
, rd
);
2295 switch ((insn
>> 6) & 3) {
2297 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
2300 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
2303 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
2306 tcg_temp_free_i32(tmp
);
2307 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2308 gen_op_iwmmxt_set_mup();
2310 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2311 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2313 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2314 tmp2
= tcg_temp_new_i32();
2315 tcg_gen_mov_i32(tmp2
, tmp
);
2316 switch ((insn
>> 22) & 3) {
2318 for (i
= 0; i
< 7; i
++) {
2319 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2320 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2324 for (i
= 0; i
< 3; i
++) {
2325 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2326 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2330 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2331 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2335 tcg_temp_free_i32(tmp2
);
2336 tcg_temp_free_i32(tmp
);
2338 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2339 wrd
= (insn
>> 12) & 0xf;
2340 rd0
= (insn
>> 16) & 0xf;
2341 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2342 switch ((insn
>> 22) & 3) {
2344 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
2347 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
2350 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
2355 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2356 gen_op_iwmmxt_set_mup();
2358 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2359 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2361 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2362 tmp2
= tcg_temp_new_i32();
2363 tcg_gen_mov_i32(tmp2
, tmp
);
2364 switch ((insn
>> 22) & 3) {
2366 for (i
= 0; i
< 7; i
++) {
2367 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2368 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2372 for (i
= 0; i
< 3; i
++) {
2373 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2374 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2378 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2379 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2383 tcg_temp_free_i32(tmp2
);
2384 tcg_temp_free_i32(tmp
);
2386 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2387 rd
= (insn
>> 12) & 0xf;
2388 rd0
= (insn
>> 16) & 0xf;
2389 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
2391 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2392 tmp
= tcg_temp_new_i32();
2393 switch ((insn
>> 22) & 3) {
2395 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
2398 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
2401 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
2404 store_reg(s
, rd
, tmp
);
2406 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2407 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2408 wrd
= (insn
>> 12) & 0xf;
2409 rd0
= (insn
>> 16) & 0xf;
2410 rd1
= (insn
>> 0) & 0xf;
2411 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2412 switch ((insn
>> 22) & 3) {
2414 if (insn
& (1 << 21))
2415 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2417 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2420 if (insn
& (1 << 21))
2421 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2423 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2426 if (insn
& (1 << 21))
2427 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2429 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2434 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2435 gen_op_iwmmxt_set_mup();
2436 gen_op_iwmmxt_set_cup();
2438 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2439 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2440 wrd
= (insn
>> 12) & 0xf;
2441 rd0
= (insn
>> 16) & 0xf;
2442 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2443 switch ((insn
>> 22) & 3) {
2445 if (insn
& (1 << 21))
2446 gen_op_iwmmxt_unpacklsb_M0();
2448 gen_op_iwmmxt_unpacklub_M0();
2451 if (insn
& (1 << 21))
2452 gen_op_iwmmxt_unpacklsw_M0();
2454 gen_op_iwmmxt_unpackluw_M0();
2457 if (insn
& (1 << 21))
2458 gen_op_iwmmxt_unpacklsl_M0();
2460 gen_op_iwmmxt_unpacklul_M0();
2465 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2466 gen_op_iwmmxt_set_mup();
2467 gen_op_iwmmxt_set_cup();
2469 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2470 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2471 wrd
= (insn
>> 12) & 0xf;
2472 rd0
= (insn
>> 16) & 0xf;
2473 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2474 switch ((insn
>> 22) & 3) {
2476 if (insn
& (1 << 21))
2477 gen_op_iwmmxt_unpackhsb_M0();
2479 gen_op_iwmmxt_unpackhub_M0();
2482 if (insn
& (1 << 21))
2483 gen_op_iwmmxt_unpackhsw_M0();
2485 gen_op_iwmmxt_unpackhuw_M0();
2488 if (insn
& (1 << 21))
2489 gen_op_iwmmxt_unpackhsl_M0();
2491 gen_op_iwmmxt_unpackhul_M0();
2496 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2497 gen_op_iwmmxt_set_mup();
2498 gen_op_iwmmxt_set_cup();
2500 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2501 case 0x214: case 0x614: case 0xa14: case 0xe14:
2502 if (((insn
>> 22) & 3) == 0)
2504 wrd
= (insn
>> 12) & 0xf;
2505 rd0
= (insn
>> 16) & 0xf;
2506 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2507 tmp
= tcg_temp_new_i32();
2508 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2509 tcg_temp_free_i32(tmp
);
2512 switch ((insn
>> 22) & 3) {
2514 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2517 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2520 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2523 tcg_temp_free_i32(tmp
);
2524 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2525 gen_op_iwmmxt_set_mup();
2526 gen_op_iwmmxt_set_cup();
2528 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2529 case 0x014: case 0x414: case 0x814: case 0xc14:
2530 if (((insn
>> 22) & 3) == 0)
2532 wrd
= (insn
>> 12) & 0xf;
2533 rd0
= (insn
>> 16) & 0xf;
2534 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2535 tmp
= tcg_temp_new_i32();
2536 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2537 tcg_temp_free_i32(tmp
);
2540 switch ((insn
>> 22) & 3) {
2542 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2545 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2548 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2551 tcg_temp_free_i32(tmp
);
2552 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2553 gen_op_iwmmxt_set_mup();
2554 gen_op_iwmmxt_set_cup();
2556 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2557 case 0x114: case 0x514: case 0x914: case 0xd14:
2558 if (((insn
>> 22) & 3) == 0)
2560 wrd
= (insn
>> 12) & 0xf;
2561 rd0
= (insn
>> 16) & 0xf;
2562 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2563 tmp
= tcg_temp_new_i32();
2564 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2565 tcg_temp_free_i32(tmp
);
2568 switch ((insn
>> 22) & 3) {
2570 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2573 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2576 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2579 tcg_temp_free_i32(tmp
);
2580 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2581 gen_op_iwmmxt_set_mup();
2582 gen_op_iwmmxt_set_cup();
2584 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2585 case 0x314: case 0x714: case 0xb14: case 0xf14:
2586 if (((insn
>> 22) & 3) == 0)
2588 wrd
= (insn
>> 12) & 0xf;
2589 rd0
= (insn
>> 16) & 0xf;
2590 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2591 tmp
= tcg_temp_new_i32();
2592 switch ((insn
>> 22) & 3) {
2594 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2595 tcg_temp_free_i32(tmp
);
2598 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2601 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2602 tcg_temp_free_i32(tmp
);
2605 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2608 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2609 tcg_temp_free_i32(tmp
);
2612 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2615 tcg_temp_free_i32(tmp
);
2616 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2617 gen_op_iwmmxt_set_mup();
2618 gen_op_iwmmxt_set_cup();
2620 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2621 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2622 wrd
= (insn
>> 12) & 0xf;
2623 rd0
= (insn
>> 16) & 0xf;
2624 rd1
= (insn
>> 0) & 0xf;
2625 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2626 switch ((insn
>> 22) & 3) {
2628 if (insn
& (1 << 21))
2629 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2631 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2634 if (insn
& (1 << 21))
2635 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2637 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2640 if (insn
& (1 << 21))
2641 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2643 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2648 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2649 gen_op_iwmmxt_set_mup();
2651 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2652 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2653 wrd
= (insn
>> 12) & 0xf;
2654 rd0
= (insn
>> 16) & 0xf;
2655 rd1
= (insn
>> 0) & 0xf;
2656 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2657 switch ((insn
>> 22) & 3) {
2659 if (insn
& (1 << 21))
2660 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2662 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2665 if (insn
& (1 << 21))
2666 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2668 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2671 if (insn
& (1 << 21))
2672 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2674 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2679 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2680 gen_op_iwmmxt_set_mup();
2682 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2683 case 0x402: case 0x502: case 0x602: case 0x702:
2684 wrd
= (insn
>> 12) & 0xf;
2685 rd0
= (insn
>> 16) & 0xf;
2686 rd1
= (insn
>> 0) & 0xf;
2687 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2688 tmp
= tcg_const_i32((insn
>> 20) & 3);
2689 iwmmxt_load_reg(cpu_V1
, rd1
);
2690 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2691 tcg_temp_free_i32(tmp
);
2692 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2693 gen_op_iwmmxt_set_mup();
2695 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2696 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2697 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2698 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2699 wrd
= (insn
>> 12) & 0xf;
2700 rd0
= (insn
>> 16) & 0xf;
2701 rd1
= (insn
>> 0) & 0xf;
2702 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2703 switch ((insn
>> 20) & 0xf) {
2705 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2708 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2711 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2714 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2717 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2720 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2723 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2726 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2729 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2734 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2735 gen_op_iwmmxt_set_mup();
2736 gen_op_iwmmxt_set_cup();
2738 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2739 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2740 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2741 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2742 wrd
= (insn
>> 12) & 0xf;
2743 rd0
= (insn
>> 16) & 0xf;
2744 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2745 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2746 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2747 tcg_temp_free_i32(tmp
);
2748 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2749 gen_op_iwmmxt_set_mup();
2750 gen_op_iwmmxt_set_cup();
2752 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2753 case 0x418: case 0x518: case 0x618: case 0x718:
2754 case 0x818: case 0x918: case 0xa18: case 0xb18:
2755 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2756 wrd
= (insn
>> 12) & 0xf;
2757 rd0
= (insn
>> 16) & 0xf;
2758 rd1
= (insn
>> 0) & 0xf;
2759 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2760 switch ((insn
>> 20) & 0xf) {
2762 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2765 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2768 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2771 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2774 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2777 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2780 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2783 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2786 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2791 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2792 gen_op_iwmmxt_set_mup();
2793 gen_op_iwmmxt_set_cup();
2795 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2796 case 0x408: case 0x508: case 0x608: case 0x708:
2797 case 0x808: case 0x908: case 0xa08: case 0xb08:
2798 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2799 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2801 wrd
= (insn
>> 12) & 0xf;
2802 rd0
= (insn
>> 16) & 0xf;
2803 rd1
= (insn
>> 0) & 0xf;
2804 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2805 switch ((insn
>> 22) & 3) {
2807 if (insn
& (1 << 21))
2808 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2810 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2813 if (insn
& (1 << 21))
2814 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2816 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2819 if (insn
& (1 << 21))
2820 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2822 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2825 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2826 gen_op_iwmmxt_set_mup();
2827 gen_op_iwmmxt_set_cup();
2829 case 0x201: case 0x203: case 0x205: case 0x207:
2830 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2831 case 0x211: case 0x213: case 0x215: case 0x217:
2832 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2833 wrd
= (insn
>> 5) & 0xf;
2834 rd0
= (insn
>> 12) & 0xf;
2835 rd1
= (insn
>> 0) & 0xf;
2836 if (rd0
== 0xf || rd1
== 0xf)
2838 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2839 tmp
= load_reg(s
, rd0
);
2840 tmp2
= load_reg(s
, rd1
);
2841 switch ((insn
>> 16) & 0xf) {
2842 case 0x0: /* TMIA */
2843 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2845 case 0x8: /* TMIAPH */
2846 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2848 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2849 if (insn
& (1 << 16))
2850 tcg_gen_shri_i32(tmp
, tmp
, 16);
2851 if (insn
& (1 << 17))
2852 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2853 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2856 tcg_temp_free_i32(tmp2
);
2857 tcg_temp_free_i32(tmp
);
2860 tcg_temp_free_i32(tmp2
);
2861 tcg_temp_free_i32(tmp
);
2862 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2863 gen_op_iwmmxt_set_mup();
2872 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2873 (ie. an undefined instruction). */
2874 static int disas_dsp_insn(DisasContext
*s
, uint32_t insn
)
2876 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2879 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2880 /* Multiply with Internal Accumulate Format */
2881 rd0
= (insn
>> 12) & 0xf;
2883 acc
= (insn
>> 5) & 7;
2888 tmp
= load_reg(s
, rd0
);
2889 tmp2
= load_reg(s
, rd1
);
2890 switch ((insn
>> 16) & 0xf) {
2892 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2894 case 0x8: /* MIAPH */
2895 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2897 case 0xc: /* MIABB */
2898 case 0xd: /* MIABT */
2899 case 0xe: /* MIATB */
2900 case 0xf: /* MIATT */
2901 if (insn
& (1 << 16))
2902 tcg_gen_shri_i32(tmp
, tmp
, 16);
2903 if (insn
& (1 << 17))
2904 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2905 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2910 tcg_temp_free_i32(tmp2
);
2911 tcg_temp_free_i32(tmp
);
2913 gen_op_iwmmxt_movq_wRn_M0(acc
);
2917 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2918 /* Internal Accumulator Access Format */
2919 rdhi
= (insn
>> 16) & 0xf;
2920 rdlo
= (insn
>> 12) & 0xf;
2926 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2927 iwmmxt_load_reg(cpu_V0
, acc
);
2928 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2929 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2930 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2931 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2933 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2934 iwmmxt_store_reg(cpu_V0
, acc
);
2942 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2943 #define VFP_SREG(insn, bigbit, smallbit) \
2944 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2945 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2946 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2947 reg = (((insn) >> (bigbit)) & 0x0f) \
2948 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2950 if (insn & (1 << (smallbit))) \
2952 reg = ((insn) >> (bigbit)) & 0x0f; \
2955 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2956 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2957 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2958 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2959 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2960 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2962 static void gen_neon_dup_low16(TCGv_i32 var
)
2964 TCGv_i32 tmp
= tcg_temp_new_i32();
2965 tcg_gen_ext16u_i32(var
, var
);
2966 tcg_gen_shli_i32(tmp
, var
, 16);
2967 tcg_gen_or_i32(var
, var
, tmp
);
2968 tcg_temp_free_i32(tmp
);
2971 static void gen_neon_dup_high16(TCGv_i32 var
)
2973 TCGv_i32 tmp
= tcg_temp_new_i32();
2974 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2975 tcg_gen_shri_i32(tmp
, var
, 16);
2976 tcg_gen_or_i32(var
, var
, tmp
);
2977 tcg_temp_free_i32(tmp
);
2981 * Disassemble a VFP instruction. Returns nonzero if an error occurred
2982 * (ie. an undefined instruction).
2984 static int disas_vfp_insn(DisasContext
*s
, uint32_t insn
)
2986 uint32_t rd
, rn
, rm
, op
, delta_d
, delta_m
, bank_mask
;
2989 if (!arm_dc_feature(s
, ARM_FEATURE_VFP
)) {
2994 * If the decodetree decoder handles this insn it will always
2995 * emit code to either execute the insn or generate an appropriate
2996 * exception; so we don't need to ever return non-zero to tell
2997 * the calling code to emit an UNDEF exception.
2999 if (extract32(insn
, 28, 4) == 0xf) {
3000 if (disas_vfp_uncond(s
, insn
)) {
3004 if (disas_vfp(s
, insn
)) {
3009 if (extract32(insn
, 28, 4) == 0xf) {
3011 * Encodings with T=1 (Thumb) or unconditional (ARM): these
3012 * were all handled by the decodetree decoder, so any insn
3013 * patterns which get here must be UNDEF.
3019 * FIXME: this access check should not take precedence over UNDEF
3020 * for invalid encodings; we will generate incorrect syndrome information
3021 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3023 if (!vfp_access_check(s
)) {
3027 dp
= ((insn
& 0xf00) == 0xb00);
3028 switch ((insn
>> 24) & 0xf) {
3030 if (insn
& (1 << 4)) {
3031 /* already handled by decodetree */
3034 /* data processing */
3037 bool no_output
= false;
3039 /* The opcode is in bits 23, 21, 20 and 6. */
3040 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
3041 rn
= VFP_SREG_N(insn
);
3045 /* Already handled by decodetree */
3051 /* Already handled by decodetree */
3061 /* rn is opcode, encoded as per VFP_SREG_N. */
3063 case 0x18: /* vcvtr.u32.fxx */
3064 case 0x19: /* vcvtz.u32.fxx */
3065 case 0x1a: /* vcvtr.s32.fxx */
3066 case 0x1b: /* vcvtz.s32.fxx */
3074 /* rn is register number */
3075 VFP_DREG_N(rn
, insn
);
3079 VFP_DREG_D(rd
, insn
);
3081 rd
= VFP_SREG_D(insn
);
3084 VFP_DREG_M(rm
, insn
);
3086 rm
= VFP_SREG_M(insn
);
3089 veclen
= s
->vec_len
;
3090 if (op
== 15 && rn
> 3) {
3094 /* Shut up compiler warnings. */
3105 /* Figure out what type of vector operation this is. */
3106 if ((rd
& bank_mask
) == 0) {
3111 delta_d
= (s
->vec_stride
>> 1) + 1;
3113 delta_d
= s
->vec_stride
+ 1;
3115 if ((rm
& bank_mask
) == 0) {
3116 /* mixed scalar/vector */
3125 /* Load the initial operands. */
3129 /* One source operand. */
3130 gen_mov_F0_vreg(rm_is_dp
, rm
);
3134 /* Two source operands. */
3135 gen_mov_F0_vreg(dp
, rn
);
3136 gen_mov_F1_vreg(dp
, rm
);
3140 /* Perform the calculation. */
3142 case 15: /* extension space */
3144 case 24: /* ftoui */
3145 gen_vfp_toui(dp
, 0);
3147 case 25: /* ftouiz */
3148 gen_vfp_touiz(dp
, 0);
3150 case 26: /* ftosi */
3151 gen_vfp_tosi(dp
, 0);
3153 case 27: /* ftosiz */
3154 gen_vfp_tosiz(dp
, 0);
3156 default: /* undefined */
3157 g_assert_not_reached();
3160 default: /* undefined */
3164 /* Write back the result, if any. */
3166 gen_mov_vreg_F0(rd_is_dp
, rd
);
3169 /* break out of the loop if we have finished */
3174 if (op
== 15 && delta_m
== 0) {
3175 /* single source one-many */
3177 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3179 gen_mov_vreg_F0(dp
, rd
);
3183 /* Setup the next operands. */
3185 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3189 /* One source operand. */
3190 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3192 gen_mov_F0_vreg(dp
, rm
);
3194 /* Two source operands. */
3195 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3197 gen_mov_F0_vreg(dp
, rn
);
3199 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3201 gen_mov_F1_vreg(dp
, rm
);
3209 /* Already handled by decodetree */
3212 /* Should never happen. */
3218 static inline bool use_goto_tb(DisasContext
*s
, target_ulong dest
)
3220 #ifndef CONFIG_USER_ONLY
3221 return (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
3222 ((s
->pc
- 1) & TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
3228 static void gen_goto_ptr(void)
3230 tcg_gen_lookup_and_goto_ptr();
3233 /* This will end the TB but doesn't guarantee we'll return to
3234 * cpu_loop_exec. Any live exit_requests will be processed as we
3235 * enter the next TB.
3237 static void gen_goto_tb(DisasContext
*s
, int n
, target_ulong dest
)
3239 if (use_goto_tb(s
, dest
)) {
3241 gen_set_pc_im(s
, dest
);
3242 tcg_gen_exit_tb(s
->base
.tb
, n
);
3244 gen_set_pc_im(s
, dest
);
3247 s
->base
.is_jmp
= DISAS_NORETURN
;
3250 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
3252 if (unlikely(is_singlestepping(s
))) {
3253 /* An indirect jump so that we still trigger the debug exception. */
3258 gen_goto_tb(s
, 0, dest
);
3262 static inline void gen_mulxy(TCGv_i32 t0
, TCGv_i32 t1
, int x
, int y
)
3265 tcg_gen_sari_i32(t0
, t0
, 16);
3269 tcg_gen_sari_i32(t1
, t1
, 16);
3272 tcg_gen_mul_i32(t0
, t0
, t1
);
3275 /* Return the mask of PSR bits set by a MSR instruction. */
3276 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
)
3281 if (flags
& (1 << 0))
3283 if (flags
& (1 << 1))
3285 if (flags
& (1 << 2))
3287 if (flags
& (1 << 3))
3290 /* Mask out undefined bits. */
3291 mask
&= ~CPSR_RESERVED
;
3292 if (!arm_dc_feature(s
, ARM_FEATURE_V4T
)) {
3295 if (!arm_dc_feature(s
, ARM_FEATURE_V5
)) {
3296 mask
&= ~CPSR_Q
; /* V5TE in reality*/
3298 if (!arm_dc_feature(s
, ARM_FEATURE_V6
)) {
3299 mask
&= ~(CPSR_E
| CPSR_GE
);
3301 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB2
)) {
3304 /* Mask out execution state and reserved bits. */
3306 mask
&= ~(CPSR_EXEC
| CPSR_RESERVED
);
3308 /* Mask out privileged bits. */
3314 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3315 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv_i32 t0
)
3319 /* ??? This is also undefined in system mode. */
3323 tmp
= load_cpu_field(spsr
);
3324 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
3325 tcg_gen_andi_i32(t0
, t0
, mask
);
3326 tcg_gen_or_i32(tmp
, tmp
, t0
);
3327 store_cpu_field(tmp
, spsr
);
3329 gen_set_cpsr(t0
, mask
);
3331 tcg_temp_free_i32(t0
);
3336 /* Returns nonzero if access to the PSR is not permitted. */
3337 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
3340 tmp
= tcg_temp_new_i32();
3341 tcg_gen_movi_i32(tmp
, val
);
3342 return gen_set_psr(s
, mask
, spsr
, tmp
);
3345 static bool msr_banked_access_decode(DisasContext
*s
, int r
, int sysm
, int rn
,
3346 int *tgtmode
, int *regno
)
3348 /* Decode the r and sysm fields of MSR/MRS banked accesses into
3349 * the target mode and register number, and identify the various
3350 * unpredictable cases.
3351 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
3352 * + executed in user mode
3353 * + using R15 as the src/dest register
3354 * + accessing an unimplemented register
3355 * + accessing a register that's inaccessible at current PL/security state*
3356 * + accessing a register that you could access with a different insn
3357 * We choose to UNDEF in all these cases.
3358 * Since we don't know which of the various AArch32 modes we are in
3359 * we have to defer some checks to runtime.
3360 * Accesses to Monitor mode registers from Secure EL1 (which implies
3361 * that EL3 is AArch64) must trap to EL3.
3363 * If the access checks fail this function will emit code to take
3364 * an exception and return false. Otherwise it will return true,
3365 * and set *tgtmode and *regno appropriately.
3367 int exc_target
= default_exception_el(s
);
3369 /* These instructions are present only in ARMv8, or in ARMv7 with the
3370 * Virtualization Extensions.
3372 if (!arm_dc_feature(s
, ARM_FEATURE_V8
) &&
3373 !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
3377 if (IS_USER(s
) || rn
== 15) {
3381 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
3382 * of registers into (r, sysm).
3385 /* SPSRs for other modes */
3387 case 0xe: /* SPSR_fiq */
3388 *tgtmode
= ARM_CPU_MODE_FIQ
;
3390 case 0x10: /* SPSR_irq */
3391 *tgtmode
= ARM_CPU_MODE_IRQ
;
3393 case 0x12: /* SPSR_svc */
3394 *tgtmode
= ARM_CPU_MODE_SVC
;
3396 case 0x14: /* SPSR_abt */
3397 *tgtmode
= ARM_CPU_MODE_ABT
;
3399 case 0x16: /* SPSR_und */
3400 *tgtmode
= ARM_CPU_MODE_UND
;
3402 case 0x1c: /* SPSR_mon */
3403 *tgtmode
= ARM_CPU_MODE_MON
;
3405 case 0x1e: /* SPSR_hyp */
3406 *tgtmode
= ARM_CPU_MODE_HYP
;
3408 default: /* unallocated */
3411 /* We arbitrarily assign SPSR a register number of 16. */
3414 /* general purpose registers for other modes */
3416 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
3417 *tgtmode
= ARM_CPU_MODE_USR
;
3420 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
3421 *tgtmode
= ARM_CPU_MODE_FIQ
;
3424 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
3425 *tgtmode
= ARM_CPU_MODE_IRQ
;
3426 *regno
= sysm
& 1 ? 13 : 14;
3428 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
3429 *tgtmode
= ARM_CPU_MODE_SVC
;
3430 *regno
= sysm
& 1 ? 13 : 14;
3432 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
3433 *tgtmode
= ARM_CPU_MODE_ABT
;
3434 *regno
= sysm
& 1 ? 13 : 14;
3436 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
3437 *tgtmode
= ARM_CPU_MODE_UND
;
3438 *regno
= sysm
& 1 ? 13 : 14;
3440 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
3441 *tgtmode
= ARM_CPU_MODE_MON
;
3442 *regno
= sysm
& 1 ? 13 : 14;
3444 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
3445 *tgtmode
= ARM_CPU_MODE_HYP
;
3446 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
3447 *regno
= sysm
& 1 ? 13 : 17;
3449 default: /* unallocated */
3454 /* Catch the 'accessing inaccessible register' cases we can detect
3455 * at translate time.
3458 case ARM_CPU_MODE_MON
:
3459 if (!arm_dc_feature(s
, ARM_FEATURE_EL3
) || s
->ns
) {
3462 if (s
->current_el
== 1) {
3463 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
3464 * then accesses to Mon registers trap to EL3
3470 case ARM_CPU_MODE_HYP
:
3472 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
3473 * (and so we can forbid accesses from EL2 or below). elr_hyp
3474 * can be accessed also from Hyp mode, so forbid accesses from
3477 if (!arm_dc_feature(s
, ARM_FEATURE_EL2
) || s
->current_el
< 2 ||
3478 (s
->current_el
< 3 && *regno
!= 17)) {
3489 /* If we get here then some access check did not pass */
3490 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(), exc_target
);
3494 static void gen_msr_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
3496 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
3497 int tgtmode
= 0, regno
= 0;
3499 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
3503 /* Sync state because msr_banked() can raise exceptions */
3504 gen_set_condexec(s
);
3505 gen_set_pc_im(s
, s
->pc
- 4);
3506 tcg_reg
= load_reg(s
, rn
);
3507 tcg_tgtmode
= tcg_const_i32(tgtmode
);
3508 tcg_regno
= tcg_const_i32(regno
);
3509 gen_helper_msr_banked(cpu_env
, tcg_reg
, tcg_tgtmode
, tcg_regno
);
3510 tcg_temp_free_i32(tcg_tgtmode
);
3511 tcg_temp_free_i32(tcg_regno
);
3512 tcg_temp_free_i32(tcg_reg
);
3513 s
->base
.is_jmp
= DISAS_UPDATE
;
3516 static void gen_mrs_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
3518 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
3519 int tgtmode
= 0, regno
= 0;
3521 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
3525 /* Sync state because mrs_banked() can raise exceptions */
3526 gen_set_condexec(s
);
3527 gen_set_pc_im(s
, s
->pc
- 4);
3528 tcg_reg
= tcg_temp_new_i32();
3529 tcg_tgtmode
= tcg_const_i32(tgtmode
);
3530 tcg_regno
= tcg_const_i32(regno
);
3531 gen_helper_mrs_banked(tcg_reg
, cpu_env
, tcg_tgtmode
, tcg_regno
);
3532 tcg_temp_free_i32(tcg_tgtmode
);
3533 tcg_temp_free_i32(tcg_regno
);
3534 store_reg(s
, rn
, tcg_reg
);
3535 s
->base
.is_jmp
= DISAS_UPDATE
;
3538 /* Store value to PC as for an exception return (ie don't
3539 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
3540 * will do the masking based on the new value of the Thumb bit.
3542 static void store_pc_exc_ret(DisasContext
*s
, TCGv_i32 pc
)
3544 tcg_gen_mov_i32(cpu_R
[15], pc
);
3545 tcg_temp_free_i32(pc
);
3548 /* Generate a v6 exception return. Marks both values as dead. */
3549 static void gen_rfe(DisasContext
*s
, TCGv_i32 pc
, TCGv_i32 cpsr
)
3551 store_pc_exc_ret(s
, pc
);
3552 /* The cpsr_write_eret helper will mask the low bits of PC
3553 * appropriately depending on the new Thumb bit, so it must
3554 * be called after storing the new PC.
3556 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
3559 gen_helper_cpsr_write_eret(cpu_env
, cpsr
);
3560 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
3563 tcg_temp_free_i32(cpsr
);
3564 /* Must exit loop to check un-masked IRQs */
3565 s
->base
.is_jmp
= DISAS_EXIT
;
3568 /* Generate an old-style exception return. Marks pc as dead. */
3569 static void gen_exception_return(DisasContext
*s
, TCGv_i32 pc
)
3571 gen_rfe(s
, pc
, load_cpu_field(spsr
));
3575 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
3576 * only call the helper when running single threaded TCG code to ensure
3577 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
3578 * just skip this instruction. Currently the SEV/SEVL instructions
3579 * which are *one* of many ways to wake the CPU from WFE are not
3580 * implemented so we can't sleep like WFI does.
3582 static void gen_nop_hint(DisasContext
*s
, int val
)
3585 /* When running in MTTCG we don't generate jumps to the yield and
3586 * WFE helpers as it won't affect the scheduling of other vCPUs.
3587 * If we wanted to more completely model WFE/SEV so we don't busy
3588 * spin unnecessarily we would need to do something more involved.
3591 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3592 gen_set_pc_im(s
, s
->pc
);
3593 s
->base
.is_jmp
= DISAS_YIELD
;
3597 gen_set_pc_im(s
, s
->pc
);
3598 s
->base
.is_jmp
= DISAS_WFI
;
3601 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
3602 gen_set_pc_im(s
, s
->pc
);
3603 s
->base
.is_jmp
= DISAS_WFE
;
3608 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
3614 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3616 static inline void gen_neon_add(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
3619 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
3620 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
3621 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
3626 static inline void gen_neon_rsb(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
3629 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
3630 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
3631 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
3636 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3637 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3638 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3639 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3640 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
3642 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3643 switch ((size << 1) | u) { \
3645 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3648 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3651 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3654 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3657 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3660 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3662 default: return 1; \
3665 #define GEN_NEON_INTEGER_OP(name) do { \
3666 switch ((size << 1) | u) { \
3668 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3671 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3674 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3677 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3680 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3683 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3685 default: return 1; \
3688 static TCGv_i32
neon_load_scratch(int scratch
)
3690 TCGv_i32 tmp
= tcg_temp_new_i32();
3691 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3695 static void neon_store_scratch(int scratch
, TCGv_i32 var
)
3697 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
3698 tcg_temp_free_i32(var
);
3701 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
3705 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
3707 gen_neon_dup_high16(tmp
);
3709 gen_neon_dup_low16(tmp
);
3712 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
3717 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
3721 if (!q
&& size
== 2) {
3724 pd
= vfp_reg_ptr(true, rd
);
3725 pm
= vfp_reg_ptr(true, rm
);
3729 gen_helper_neon_qunzip8(pd
, pm
);
3732 gen_helper_neon_qunzip16(pd
, pm
);
3735 gen_helper_neon_qunzip32(pd
, pm
);
3743 gen_helper_neon_unzip8(pd
, pm
);
3746 gen_helper_neon_unzip16(pd
, pm
);
3752 tcg_temp_free_ptr(pd
);
3753 tcg_temp_free_ptr(pm
);
3757 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
3761 if (!q
&& size
== 2) {
3764 pd
= vfp_reg_ptr(true, rd
);
3765 pm
= vfp_reg_ptr(true, rm
);
3769 gen_helper_neon_qzip8(pd
, pm
);
3772 gen_helper_neon_qzip16(pd
, pm
);
3775 gen_helper_neon_qzip32(pd
, pm
);
3783 gen_helper_neon_zip8(pd
, pm
);
3786 gen_helper_neon_zip16(pd
, pm
);
3792 tcg_temp_free_ptr(pd
);
3793 tcg_temp_free_ptr(pm
);
3797 static void gen_neon_trn_u8(TCGv_i32 t0
, TCGv_i32 t1
)
3801 rd
= tcg_temp_new_i32();
3802 tmp
= tcg_temp_new_i32();
3804 tcg_gen_shli_i32(rd
, t0
, 8);
3805 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
3806 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
3807 tcg_gen_or_i32(rd
, rd
, tmp
);
3809 tcg_gen_shri_i32(t1
, t1
, 8);
3810 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
3811 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
3812 tcg_gen_or_i32(t1
, t1
, tmp
);
3813 tcg_gen_mov_i32(t0
, rd
);
3815 tcg_temp_free_i32(tmp
);
3816 tcg_temp_free_i32(rd
);
3819 static void gen_neon_trn_u16(TCGv_i32 t0
, TCGv_i32 t1
)
3823 rd
= tcg_temp_new_i32();
3824 tmp
= tcg_temp_new_i32();
3826 tcg_gen_shli_i32(rd
, t0
, 16);
3827 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
3828 tcg_gen_or_i32(rd
, rd
, tmp
);
3829 tcg_gen_shri_i32(t1
, t1
, 16);
3830 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
3831 tcg_gen_or_i32(t1
, t1
, tmp
);
3832 tcg_gen_mov_i32(t0
, rd
);
3834 tcg_temp_free_i32(tmp
);
3835 tcg_temp_free_i32(rd
);
3843 } const neon_ls_element_type
[11] = {
3857 /* Translate a NEON load/store element instruction. Return nonzero if the
3858 instruction is invalid. */
3859 static int disas_neon_ls_insn(DisasContext
*s
, uint32_t insn
)
3879 /* FIXME: this access check should not take precedence over UNDEF
3880 * for invalid encodings; we will generate incorrect syndrome information
3881 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3883 if (s
->fp_excp_el
) {
3884 gen_exception_insn(s
, 4, EXCP_UDEF
,
3885 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
3889 if (!s
->vfp_enabled
)
3891 VFP_DREG_D(rd
, insn
);
3892 rn
= (insn
>> 16) & 0xf;
3894 load
= (insn
& (1 << 21)) != 0;
3895 endian
= s
->be_data
;
3896 mmu_idx
= get_mem_index(s
);
3897 if ((insn
& (1 << 23)) == 0) {
3898 /* Load store all elements. */
3899 op
= (insn
>> 8) & 0xf;
3900 size
= (insn
>> 6) & 3;
3903 /* Catch UNDEF cases for bad values of align field */
3906 if (((insn
>> 5) & 1) == 1) {
3911 if (((insn
>> 4) & 3) == 3) {
3918 nregs
= neon_ls_element_type
[op
].nregs
;
3919 interleave
= neon_ls_element_type
[op
].interleave
;
3920 spacing
= neon_ls_element_type
[op
].spacing
;
3921 if (size
== 3 && (interleave
| spacing
) != 1) {
3924 /* For our purposes, bytes are always little-endian. */
3928 /* Consecutive little-endian elements from a single register
3929 * can be promoted to a larger little-endian operation.
3931 if (interleave
== 1 && endian
== MO_LE
) {
3934 tmp64
= tcg_temp_new_i64();
3935 addr
= tcg_temp_new_i32();
3936 tmp2
= tcg_const_i32(1 << size
);
3937 load_reg_var(s
, addr
, rn
);
3938 for (reg
= 0; reg
< nregs
; reg
++) {
3939 for (n
= 0; n
< 8 >> size
; n
++) {
3941 for (xs
= 0; xs
< interleave
; xs
++) {
3942 int tt
= rd
+ reg
+ spacing
* xs
;
3945 gen_aa32_ld_i64(s
, tmp64
, addr
, mmu_idx
, endian
| size
);
3946 neon_store_element64(tt
, n
, size
, tmp64
);
3948 neon_load_element64(tmp64
, tt
, n
, size
);
3949 gen_aa32_st_i64(s
, tmp64
, addr
, mmu_idx
, endian
| size
);
3951 tcg_gen_add_i32(addr
, addr
, tmp2
);
3955 tcg_temp_free_i32(addr
);
3956 tcg_temp_free_i32(tmp2
);
3957 tcg_temp_free_i64(tmp64
);
3958 stride
= nregs
* interleave
* 8;
3960 size
= (insn
>> 10) & 3;
3962 /* Load single element to all lanes. */
3963 int a
= (insn
>> 4) & 1;
3967 size
= (insn
>> 6) & 3;
3968 nregs
= ((insn
>> 8) & 3) + 1;
3971 if (nregs
!= 4 || a
== 0) {
3974 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3977 if (nregs
== 1 && a
== 1 && size
== 0) {
3980 if (nregs
== 3 && a
== 1) {
3983 addr
= tcg_temp_new_i32();
3984 load_reg_var(s
, addr
, rn
);
3986 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
3987 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
3989 stride
= (insn
& (1 << 5)) ? 2 : 1;
3990 vec_size
= nregs
== 1 ? stride
* 8 : 8;
3992 tmp
= tcg_temp_new_i32();
3993 for (reg
= 0; reg
< nregs
; reg
++) {
3994 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
),
3996 if ((rd
& 1) && vec_size
== 16) {
3997 /* We cannot write 16 bytes at once because the
3998 * destination is unaligned.
4000 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(rd
, 0),
4002 tcg_gen_gvec_mov(0, neon_reg_offset(rd
+ 1, 0),
4003 neon_reg_offset(rd
, 0), 8, 8);
4005 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(rd
, 0),
4006 vec_size
, vec_size
, tmp
);
4008 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4011 tcg_temp_free_i32(tmp
);
4012 tcg_temp_free_i32(addr
);
4013 stride
= (1 << size
) * nregs
;
4015 /* Single element. */
4016 int idx
= (insn
>> 4) & 0xf;
4020 reg_idx
= (insn
>> 5) & 7;
4024 reg_idx
= (insn
>> 6) & 3;
4025 stride
= (insn
& (1 << 5)) ? 2 : 1;
4028 reg_idx
= (insn
>> 7) & 1;
4029 stride
= (insn
& (1 << 6)) ? 2 : 1;
4034 nregs
= ((insn
>> 8) & 3) + 1;
4035 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4038 if (((idx
& (1 << size
)) != 0) ||
4039 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
4044 if ((idx
& 1) != 0) {
4049 if (size
== 2 && (idx
& 2) != 0) {
4054 if ((size
== 2) && ((idx
& 3) == 3)) {
4061 if ((rd
+ stride
* (nregs
- 1)) > 31) {
4062 /* Attempts to write off the end of the register file
4063 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4064 * the neon_load_reg() would write off the end of the array.
4068 tmp
= tcg_temp_new_i32();
4069 addr
= tcg_temp_new_i32();
4070 load_reg_var(s
, addr
, rn
);
4071 for (reg
= 0; reg
< nregs
; reg
++) {
4073 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
),
4075 neon_store_element(rd
, reg_idx
, size
, tmp
);
4076 } else { /* Store */
4077 neon_load_element(tmp
, rd
, reg_idx
, size
);
4078 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
),
4082 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4084 tcg_temp_free_i32(addr
);
4085 tcg_temp_free_i32(tmp
);
4086 stride
= nregs
* (1 << size
);
4092 base
= load_reg(s
, rn
);
4094 tcg_gen_addi_i32(base
, base
, stride
);
4097 index
= load_reg(s
, rm
);
4098 tcg_gen_add_i32(base
, base
, index
);
4099 tcg_temp_free_i32(index
);
4101 store_reg(s
, rn
, base
);
4106 static inline void gen_neon_narrow(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4109 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4110 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4111 case 2: tcg_gen_extrl_i64_i32(dest
, src
); break;
4116 static inline void gen_neon_narrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4119 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4120 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4121 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4126 static inline void gen_neon_narrow_satu(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4129 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4130 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4131 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
4136 static inline void gen_neon_unarrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4139 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
4140 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
4141 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
4146 static inline void gen_neon_shift_narrow(int size
, TCGv_i32 var
, TCGv_i32 shift
,
4152 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
4153 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
4158 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
4159 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
4166 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
4167 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
4172 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
4173 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
4180 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv_i32 src
, int size
, int u
)
4184 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
4185 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
4186 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
4191 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
4192 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
4193 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
4197 tcg_temp_free_i32(src
);
4200 static inline void gen_neon_addl(int size
)
4203 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
4204 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
4205 case 2: tcg_gen_add_i64(CPU_V001
); break;
4210 static inline void gen_neon_subl(int size
)
4213 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
4214 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
4215 case 2: tcg_gen_sub_i64(CPU_V001
); break;
4220 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
4223 case 0: gen_helper_neon_negl_u16(var
, var
); break;
4224 case 1: gen_helper_neon_negl_u32(var
, var
); break;
4226 tcg_gen_neg_i64(var
, var
);
4232 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
4235 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
4236 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
4241 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv_i32 a
, TCGv_i32 b
,
4246 switch ((size
<< 1) | u
) {
4247 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
4248 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
4249 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
4250 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
4252 tmp
= gen_muls_i64_i32(a
, b
);
4253 tcg_gen_mov_i64(dest
, tmp
);
4254 tcg_temp_free_i64(tmp
);
4257 tmp
= gen_mulu_i64_i32(a
, b
);
4258 tcg_gen_mov_i64(dest
, tmp
);
4259 tcg_temp_free_i64(tmp
);
4264 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4265 Don't forget to clean them now. */
4267 tcg_temp_free_i32(a
);
4268 tcg_temp_free_i32(b
);
4272 static void gen_neon_narrow_op(int op
, int u
, int size
,
4273 TCGv_i32 dest
, TCGv_i64 src
)
4277 gen_neon_unarrow_sats(size
, dest
, src
);
4279 gen_neon_narrow(size
, dest
, src
);
4283 gen_neon_narrow_satu(size
, dest
, src
);
4285 gen_neon_narrow_sats(size
, dest
, src
);
4290 /* Symbolic constants for op fields for Neon 3-register same-length.
4291 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4294 #define NEON_3R_VHADD 0
4295 #define NEON_3R_VQADD 1
4296 #define NEON_3R_VRHADD 2
4297 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4298 #define NEON_3R_VHSUB 4
4299 #define NEON_3R_VQSUB 5
4300 #define NEON_3R_VCGT 6
4301 #define NEON_3R_VCGE 7
4302 #define NEON_3R_VSHL 8
4303 #define NEON_3R_VQSHL 9
4304 #define NEON_3R_VRSHL 10
4305 #define NEON_3R_VQRSHL 11
4306 #define NEON_3R_VMAX 12
4307 #define NEON_3R_VMIN 13
4308 #define NEON_3R_VABD 14
4309 #define NEON_3R_VABA 15
4310 #define NEON_3R_VADD_VSUB 16
4311 #define NEON_3R_VTST_VCEQ 17
4312 #define NEON_3R_VML 18 /* VMLA, VMLS */
4313 #define NEON_3R_VMUL 19
4314 #define NEON_3R_VPMAX 20
4315 #define NEON_3R_VPMIN 21
4316 #define NEON_3R_VQDMULH_VQRDMULH 22
4317 #define NEON_3R_VPADD_VQRDMLAH 23
4318 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4319 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
4320 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4321 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4322 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4323 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4324 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4325 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4327 static const uint8_t neon_3r_sizes
[] = {
4328 [NEON_3R_VHADD
] = 0x7,
4329 [NEON_3R_VQADD
] = 0xf,
4330 [NEON_3R_VRHADD
] = 0x7,
4331 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
4332 [NEON_3R_VHSUB
] = 0x7,
4333 [NEON_3R_VQSUB
] = 0xf,
4334 [NEON_3R_VCGT
] = 0x7,
4335 [NEON_3R_VCGE
] = 0x7,
4336 [NEON_3R_VSHL
] = 0xf,
4337 [NEON_3R_VQSHL
] = 0xf,
4338 [NEON_3R_VRSHL
] = 0xf,
4339 [NEON_3R_VQRSHL
] = 0xf,
4340 [NEON_3R_VMAX
] = 0x7,
4341 [NEON_3R_VMIN
] = 0x7,
4342 [NEON_3R_VABD
] = 0x7,
4343 [NEON_3R_VABA
] = 0x7,
4344 [NEON_3R_VADD_VSUB
] = 0xf,
4345 [NEON_3R_VTST_VCEQ
] = 0x7,
4346 [NEON_3R_VML
] = 0x7,
4347 [NEON_3R_VMUL
] = 0x7,
4348 [NEON_3R_VPMAX
] = 0x7,
4349 [NEON_3R_VPMIN
] = 0x7,
4350 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
4351 [NEON_3R_VPADD_VQRDMLAH
] = 0x7,
4352 [NEON_3R_SHA
] = 0xf, /* size field encodes op type */
4353 [NEON_3R_VFM_VQRDMLSH
] = 0x7, /* For VFM, size bit 1 encodes op */
4354 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
4355 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
4356 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
4357 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
4358 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
4359 [NEON_3R_FLOAT_MISC
] = 0x5, /* size bit 1 encodes op */
4362 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4363 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4366 #define NEON_2RM_VREV64 0
4367 #define NEON_2RM_VREV32 1
4368 #define NEON_2RM_VREV16 2
4369 #define NEON_2RM_VPADDL 4
4370 #define NEON_2RM_VPADDL_U 5
4371 #define NEON_2RM_AESE 6 /* Includes AESD */
4372 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
4373 #define NEON_2RM_VCLS 8
4374 #define NEON_2RM_VCLZ 9
4375 #define NEON_2RM_VCNT 10
4376 #define NEON_2RM_VMVN 11
4377 #define NEON_2RM_VPADAL 12
4378 #define NEON_2RM_VPADAL_U 13
4379 #define NEON_2RM_VQABS 14
4380 #define NEON_2RM_VQNEG 15
4381 #define NEON_2RM_VCGT0 16
4382 #define NEON_2RM_VCGE0 17
4383 #define NEON_2RM_VCEQ0 18
4384 #define NEON_2RM_VCLE0 19
4385 #define NEON_2RM_VCLT0 20
4386 #define NEON_2RM_SHA1H 21
4387 #define NEON_2RM_VABS 22
4388 #define NEON_2RM_VNEG 23
4389 #define NEON_2RM_VCGT0_F 24
4390 #define NEON_2RM_VCGE0_F 25
4391 #define NEON_2RM_VCEQ0_F 26
4392 #define NEON_2RM_VCLE0_F 27
4393 #define NEON_2RM_VCLT0_F 28
4394 #define NEON_2RM_VABS_F 30
4395 #define NEON_2RM_VNEG_F 31
4396 #define NEON_2RM_VSWP 32
4397 #define NEON_2RM_VTRN 33
4398 #define NEON_2RM_VUZP 34
4399 #define NEON_2RM_VZIP 35
4400 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4401 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4402 #define NEON_2RM_VSHLL 38
4403 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
4404 #define NEON_2RM_VRINTN 40
4405 #define NEON_2RM_VRINTX 41
4406 #define NEON_2RM_VRINTA 42
4407 #define NEON_2RM_VRINTZ 43
4408 #define NEON_2RM_VCVT_F16_F32 44
4409 #define NEON_2RM_VRINTM 45
4410 #define NEON_2RM_VCVT_F32_F16 46
4411 #define NEON_2RM_VRINTP 47
4412 #define NEON_2RM_VCVTAU 48
4413 #define NEON_2RM_VCVTAS 49
4414 #define NEON_2RM_VCVTNU 50
4415 #define NEON_2RM_VCVTNS 51
4416 #define NEON_2RM_VCVTPU 52
4417 #define NEON_2RM_VCVTPS 53
4418 #define NEON_2RM_VCVTMU 54
4419 #define NEON_2RM_VCVTMS 55
4420 #define NEON_2RM_VRECPE 56
4421 #define NEON_2RM_VRSQRTE 57
4422 #define NEON_2RM_VRECPE_F 58
4423 #define NEON_2RM_VRSQRTE_F 59
4424 #define NEON_2RM_VCVT_FS 60
4425 #define NEON_2RM_VCVT_FU 61
4426 #define NEON_2RM_VCVT_SF 62
4427 #define NEON_2RM_VCVT_UF 63
4429 static int neon_2rm_is_float_op(int op
)
4431 /* Return true if this neon 2reg-misc op is float-to-float */
4432 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
4433 (op
>= NEON_2RM_VRINTN
&& op
<= NEON_2RM_VRINTZ
) ||
4434 op
== NEON_2RM_VRINTM
||
4435 (op
>= NEON_2RM_VRINTP
&& op
<= NEON_2RM_VCVTMS
) ||
4436 op
>= NEON_2RM_VRECPE_F
);
4439 static bool neon_2rm_is_v8_op(int op
)
4441 /* Return true if this neon 2reg-misc op is ARMv8 and up */
4443 case NEON_2RM_VRINTN
:
4444 case NEON_2RM_VRINTA
:
4445 case NEON_2RM_VRINTM
:
4446 case NEON_2RM_VRINTP
:
4447 case NEON_2RM_VRINTZ
:
4448 case NEON_2RM_VRINTX
:
4449 case NEON_2RM_VCVTAU
:
4450 case NEON_2RM_VCVTAS
:
4451 case NEON_2RM_VCVTNU
:
4452 case NEON_2RM_VCVTNS
:
4453 case NEON_2RM_VCVTPU
:
4454 case NEON_2RM_VCVTPS
:
4455 case NEON_2RM_VCVTMU
:
4456 case NEON_2RM_VCVTMS
:
4463 /* Each entry in this array has bit n set if the insn allows
4464 * size value n (otherwise it will UNDEF). Since unallocated
4465 * op values will have no bits set they always UNDEF.
4467 static const uint8_t neon_2rm_sizes
[] = {
4468 [NEON_2RM_VREV64
] = 0x7,
4469 [NEON_2RM_VREV32
] = 0x3,
4470 [NEON_2RM_VREV16
] = 0x1,
4471 [NEON_2RM_VPADDL
] = 0x7,
4472 [NEON_2RM_VPADDL_U
] = 0x7,
4473 [NEON_2RM_AESE
] = 0x1,
4474 [NEON_2RM_AESMC
] = 0x1,
4475 [NEON_2RM_VCLS
] = 0x7,
4476 [NEON_2RM_VCLZ
] = 0x7,
4477 [NEON_2RM_VCNT
] = 0x1,
4478 [NEON_2RM_VMVN
] = 0x1,
4479 [NEON_2RM_VPADAL
] = 0x7,
4480 [NEON_2RM_VPADAL_U
] = 0x7,
4481 [NEON_2RM_VQABS
] = 0x7,
4482 [NEON_2RM_VQNEG
] = 0x7,
4483 [NEON_2RM_VCGT0
] = 0x7,
4484 [NEON_2RM_VCGE0
] = 0x7,
4485 [NEON_2RM_VCEQ0
] = 0x7,
4486 [NEON_2RM_VCLE0
] = 0x7,
4487 [NEON_2RM_VCLT0
] = 0x7,
4488 [NEON_2RM_SHA1H
] = 0x4,
4489 [NEON_2RM_VABS
] = 0x7,
4490 [NEON_2RM_VNEG
] = 0x7,
4491 [NEON_2RM_VCGT0_F
] = 0x4,
4492 [NEON_2RM_VCGE0_F
] = 0x4,
4493 [NEON_2RM_VCEQ0_F
] = 0x4,
4494 [NEON_2RM_VCLE0_F
] = 0x4,
4495 [NEON_2RM_VCLT0_F
] = 0x4,
4496 [NEON_2RM_VABS_F
] = 0x4,
4497 [NEON_2RM_VNEG_F
] = 0x4,
4498 [NEON_2RM_VSWP
] = 0x1,
4499 [NEON_2RM_VTRN
] = 0x7,
4500 [NEON_2RM_VUZP
] = 0x7,
4501 [NEON_2RM_VZIP
] = 0x7,
4502 [NEON_2RM_VMOVN
] = 0x7,
4503 [NEON_2RM_VQMOVN
] = 0x7,
4504 [NEON_2RM_VSHLL
] = 0x7,
4505 [NEON_2RM_SHA1SU1
] = 0x4,
4506 [NEON_2RM_VRINTN
] = 0x4,
4507 [NEON_2RM_VRINTX
] = 0x4,
4508 [NEON_2RM_VRINTA
] = 0x4,
4509 [NEON_2RM_VRINTZ
] = 0x4,
4510 [NEON_2RM_VCVT_F16_F32
] = 0x2,
4511 [NEON_2RM_VRINTM
] = 0x4,
4512 [NEON_2RM_VCVT_F32_F16
] = 0x2,
4513 [NEON_2RM_VRINTP
] = 0x4,
4514 [NEON_2RM_VCVTAU
] = 0x4,
4515 [NEON_2RM_VCVTAS
] = 0x4,
4516 [NEON_2RM_VCVTNU
] = 0x4,
4517 [NEON_2RM_VCVTNS
] = 0x4,
4518 [NEON_2RM_VCVTPU
] = 0x4,
4519 [NEON_2RM_VCVTPS
] = 0x4,
4520 [NEON_2RM_VCVTMU
] = 0x4,
4521 [NEON_2RM_VCVTMS
] = 0x4,
4522 [NEON_2RM_VRECPE
] = 0x4,
4523 [NEON_2RM_VRSQRTE
] = 0x4,
4524 [NEON_2RM_VRECPE_F
] = 0x4,
4525 [NEON_2RM_VRSQRTE_F
] = 0x4,
4526 [NEON_2RM_VCVT_FS
] = 0x4,
4527 [NEON_2RM_VCVT_FU
] = 0x4,
4528 [NEON_2RM_VCVT_SF
] = 0x4,
4529 [NEON_2RM_VCVT_UF
] = 0x4,
4533 /* Expand v8.1 simd helper. */
4534 static int do_v81_helper(DisasContext
*s
, gen_helper_gvec_3_ptr
*fn
,
4535 int q
, int rd
, int rn
, int rm
)
4537 if (dc_isar_feature(aa32_rdm
, s
)) {
4538 int opr_sz
= (1 + q
) * 8;
4539 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd
),
4540 vfp_reg_offset(1, rn
),
4541 vfp_reg_offset(1, rm
), cpu_env
,
4542 opr_sz
, opr_sz
, 0, fn
);
4548 static void gen_ssra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4550 tcg_gen_vec_sar8i_i64(a
, a
, shift
);
4551 tcg_gen_vec_add8_i64(d
, d
, a
);
4554 static void gen_ssra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4556 tcg_gen_vec_sar16i_i64(a
, a
, shift
);
4557 tcg_gen_vec_add16_i64(d
, d
, a
);
4560 static void gen_ssra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
4562 tcg_gen_sari_i32(a
, a
, shift
);
4563 tcg_gen_add_i32(d
, d
, a
);
4566 static void gen_ssra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4568 tcg_gen_sari_i64(a
, a
, shift
);
4569 tcg_gen_add_i64(d
, d
, a
);
4572 static void gen_ssra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
4574 tcg_gen_sari_vec(vece
, a
, a
, sh
);
4575 tcg_gen_add_vec(vece
, d
, d
, a
);
4578 static const TCGOpcode vecop_list_ssra
[] = {
4579 INDEX_op_sari_vec
, INDEX_op_add_vec
, 0
4582 const GVecGen2i ssra_op
[4] = {
4583 { .fni8
= gen_ssra8_i64
,
4584 .fniv
= gen_ssra_vec
,
4586 .opt_opc
= vecop_list_ssra
,
4588 { .fni8
= gen_ssra16_i64
,
4589 .fniv
= gen_ssra_vec
,
4591 .opt_opc
= vecop_list_ssra
,
4593 { .fni4
= gen_ssra32_i32
,
4594 .fniv
= gen_ssra_vec
,
4596 .opt_opc
= vecop_list_ssra
,
4598 { .fni8
= gen_ssra64_i64
,
4599 .fniv
= gen_ssra_vec
,
4600 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4601 .opt_opc
= vecop_list_ssra
,
4606 static void gen_usra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4608 tcg_gen_vec_shr8i_i64(a
, a
, shift
);
4609 tcg_gen_vec_add8_i64(d
, d
, a
);
4612 static void gen_usra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4614 tcg_gen_vec_shr16i_i64(a
, a
, shift
);
4615 tcg_gen_vec_add16_i64(d
, d
, a
);
4618 static void gen_usra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
4620 tcg_gen_shri_i32(a
, a
, shift
);
4621 tcg_gen_add_i32(d
, d
, a
);
4624 static void gen_usra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4626 tcg_gen_shri_i64(a
, a
, shift
);
4627 tcg_gen_add_i64(d
, d
, a
);
4630 static void gen_usra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
4632 tcg_gen_shri_vec(vece
, a
, a
, sh
);
4633 tcg_gen_add_vec(vece
, d
, d
, a
);
4636 static const TCGOpcode vecop_list_usra
[] = {
4637 INDEX_op_shri_vec
, INDEX_op_add_vec
, 0
4640 const GVecGen2i usra_op
[4] = {
4641 { .fni8
= gen_usra8_i64
,
4642 .fniv
= gen_usra_vec
,
4644 .opt_opc
= vecop_list_usra
,
4646 { .fni8
= gen_usra16_i64
,
4647 .fniv
= gen_usra_vec
,
4649 .opt_opc
= vecop_list_usra
,
4651 { .fni4
= gen_usra32_i32
,
4652 .fniv
= gen_usra_vec
,
4654 .opt_opc
= vecop_list_usra
,
4656 { .fni8
= gen_usra64_i64
,
4657 .fniv
= gen_usra_vec
,
4658 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4660 .opt_opc
= vecop_list_usra
,
4664 static void gen_shr8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4666 uint64_t mask
= dup_const(MO_8
, 0xff >> shift
);
4667 TCGv_i64 t
= tcg_temp_new_i64();
4669 tcg_gen_shri_i64(t
, a
, shift
);
4670 tcg_gen_andi_i64(t
, t
, mask
);
4671 tcg_gen_andi_i64(d
, d
, ~mask
);
4672 tcg_gen_or_i64(d
, d
, t
);
4673 tcg_temp_free_i64(t
);
4676 static void gen_shr16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4678 uint64_t mask
= dup_const(MO_16
, 0xffff >> shift
);
4679 TCGv_i64 t
= tcg_temp_new_i64();
4681 tcg_gen_shri_i64(t
, a
, shift
);
4682 tcg_gen_andi_i64(t
, t
, mask
);
4683 tcg_gen_andi_i64(d
, d
, ~mask
);
4684 tcg_gen_or_i64(d
, d
, t
);
4685 tcg_temp_free_i64(t
);
4688 static void gen_shr32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
4690 tcg_gen_shri_i32(a
, a
, shift
);
4691 tcg_gen_deposit_i32(d
, d
, a
, 0, 32 - shift
);
4694 static void gen_shr64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4696 tcg_gen_shri_i64(a
, a
, shift
);
4697 tcg_gen_deposit_i64(d
, d
, a
, 0, 64 - shift
);
4700 static void gen_shr_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
4703 tcg_gen_mov_vec(d
, a
);
4705 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
4706 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
4708 tcg_gen_dupi_vec(vece
, m
, MAKE_64BIT_MASK((8 << vece
) - sh
, sh
));
4709 tcg_gen_shri_vec(vece
, t
, a
, sh
);
4710 tcg_gen_and_vec(vece
, d
, d
, m
);
4711 tcg_gen_or_vec(vece
, d
, d
, t
);
4713 tcg_temp_free_vec(t
);
4714 tcg_temp_free_vec(m
);
4718 static const TCGOpcode vecop_list_sri
[] = { INDEX_op_shri_vec
, 0 };
4720 const GVecGen2i sri_op
[4] = {
4721 { .fni8
= gen_shr8_ins_i64
,
4722 .fniv
= gen_shr_ins_vec
,
4724 .opt_opc
= vecop_list_sri
,
4726 { .fni8
= gen_shr16_ins_i64
,
4727 .fniv
= gen_shr_ins_vec
,
4729 .opt_opc
= vecop_list_sri
,
4731 { .fni4
= gen_shr32_ins_i32
,
4732 .fniv
= gen_shr_ins_vec
,
4734 .opt_opc
= vecop_list_sri
,
4736 { .fni8
= gen_shr64_ins_i64
,
4737 .fniv
= gen_shr_ins_vec
,
4738 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4740 .opt_opc
= vecop_list_sri
,
4744 static void gen_shl8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4746 uint64_t mask
= dup_const(MO_8
, 0xff << shift
);
4747 TCGv_i64 t
= tcg_temp_new_i64();
4749 tcg_gen_shli_i64(t
, a
, shift
);
4750 tcg_gen_andi_i64(t
, t
, mask
);
4751 tcg_gen_andi_i64(d
, d
, ~mask
);
4752 tcg_gen_or_i64(d
, d
, t
);
4753 tcg_temp_free_i64(t
);
4756 static void gen_shl16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4758 uint64_t mask
= dup_const(MO_16
, 0xffff << shift
);
4759 TCGv_i64 t
= tcg_temp_new_i64();
4761 tcg_gen_shli_i64(t
, a
, shift
);
4762 tcg_gen_andi_i64(t
, t
, mask
);
4763 tcg_gen_andi_i64(d
, d
, ~mask
);
4764 tcg_gen_or_i64(d
, d
, t
);
4765 tcg_temp_free_i64(t
);
4768 static void gen_shl32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
4770 tcg_gen_deposit_i32(d
, d
, a
, shift
, 32 - shift
);
4773 static void gen_shl64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
4775 tcg_gen_deposit_i64(d
, d
, a
, shift
, 64 - shift
);
4778 static void gen_shl_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
4781 tcg_gen_mov_vec(d
, a
);
4783 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
4784 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
4786 tcg_gen_dupi_vec(vece
, m
, MAKE_64BIT_MASK(0, sh
));
4787 tcg_gen_shli_vec(vece
, t
, a
, sh
);
4788 tcg_gen_and_vec(vece
, d
, d
, m
);
4789 tcg_gen_or_vec(vece
, d
, d
, t
);
4791 tcg_temp_free_vec(t
);
4792 tcg_temp_free_vec(m
);
4796 static const TCGOpcode vecop_list_sli
[] = { INDEX_op_shli_vec
, 0 };
4798 const GVecGen2i sli_op
[4] = {
4799 { .fni8
= gen_shl8_ins_i64
,
4800 .fniv
= gen_shl_ins_vec
,
4802 .opt_opc
= vecop_list_sli
,
4804 { .fni8
= gen_shl16_ins_i64
,
4805 .fniv
= gen_shl_ins_vec
,
4807 .opt_opc
= vecop_list_sli
,
4809 { .fni4
= gen_shl32_ins_i32
,
4810 .fniv
= gen_shl_ins_vec
,
4812 .opt_opc
= vecop_list_sli
,
4814 { .fni8
= gen_shl64_ins_i64
,
4815 .fniv
= gen_shl_ins_vec
,
4816 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4818 .opt_opc
= vecop_list_sli
,
4822 static void gen_mla8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4824 gen_helper_neon_mul_u8(a
, a
, b
);
4825 gen_helper_neon_add_u8(d
, d
, a
);
4828 static void gen_mls8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4830 gen_helper_neon_mul_u8(a
, a
, b
);
4831 gen_helper_neon_sub_u8(d
, d
, a
);
4834 static void gen_mla16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4836 gen_helper_neon_mul_u16(a
, a
, b
);
4837 gen_helper_neon_add_u16(d
, d
, a
);
4840 static void gen_mls16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4842 gen_helper_neon_mul_u16(a
, a
, b
);
4843 gen_helper_neon_sub_u16(d
, d
, a
);
4846 static void gen_mla32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4848 tcg_gen_mul_i32(a
, a
, b
);
4849 tcg_gen_add_i32(d
, d
, a
);
4852 static void gen_mls32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4854 tcg_gen_mul_i32(a
, a
, b
);
4855 tcg_gen_sub_i32(d
, d
, a
);
4858 static void gen_mla64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4860 tcg_gen_mul_i64(a
, a
, b
);
4861 tcg_gen_add_i64(d
, d
, a
);
4864 static void gen_mls64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4866 tcg_gen_mul_i64(a
, a
, b
);
4867 tcg_gen_sub_i64(d
, d
, a
);
4870 static void gen_mla_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4872 tcg_gen_mul_vec(vece
, a
, a
, b
);
4873 tcg_gen_add_vec(vece
, d
, d
, a
);
4876 static void gen_mls_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4878 tcg_gen_mul_vec(vece
, a
, a
, b
);
4879 tcg_gen_sub_vec(vece
, d
, d
, a
);
4882 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
4883 * these tables are shared with AArch64 which does support them.
4886 static const TCGOpcode vecop_list_mla
[] = {
4887 INDEX_op_mul_vec
, INDEX_op_add_vec
, 0
4890 static const TCGOpcode vecop_list_mls
[] = {
4891 INDEX_op_mul_vec
, INDEX_op_sub_vec
, 0
4894 const GVecGen3 mla_op
[4] = {
4895 { .fni4
= gen_mla8_i32
,
4896 .fniv
= gen_mla_vec
,
4898 .opt_opc
= vecop_list_mla
,
4900 { .fni4
= gen_mla16_i32
,
4901 .fniv
= gen_mla_vec
,
4903 .opt_opc
= vecop_list_mla
,
4905 { .fni4
= gen_mla32_i32
,
4906 .fniv
= gen_mla_vec
,
4908 .opt_opc
= vecop_list_mla
,
4910 { .fni8
= gen_mla64_i64
,
4911 .fniv
= gen_mla_vec
,
4912 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4914 .opt_opc
= vecop_list_mla
,
4918 const GVecGen3 mls_op
[4] = {
4919 { .fni4
= gen_mls8_i32
,
4920 .fniv
= gen_mls_vec
,
4922 .opt_opc
= vecop_list_mls
,
4924 { .fni4
= gen_mls16_i32
,
4925 .fniv
= gen_mls_vec
,
4927 .opt_opc
= vecop_list_mls
,
4929 { .fni4
= gen_mls32_i32
,
4930 .fniv
= gen_mls_vec
,
4932 .opt_opc
= vecop_list_mls
,
4934 { .fni8
= gen_mls64_i64
,
4935 .fniv
= gen_mls_vec
,
4936 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4938 .opt_opc
= vecop_list_mls
,
4942 /* CMTST : test is "if (X & Y != 0)". */
4943 static void gen_cmtst_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
4945 tcg_gen_and_i32(d
, a
, b
);
4946 tcg_gen_setcondi_i32(TCG_COND_NE
, d
, d
, 0);
4947 tcg_gen_neg_i32(d
, d
);
4950 void gen_cmtst_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
4952 tcg_gen_and_i64(d
, a
, b
);
4953 tcg_gen_setcondi_i64(TCG_COND_NE
, d
, d
, 0);
4954 tcg_gen_neg_i64(d
, d
);
4957 static void gen_cmtst_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
4959 tcg_gen_and_vec(vece
, d
, a
, b
);
4960 tcg_gen_dupi_vec(vece
, a
, 0);
4961 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, d
, d
, a
);
4964 static const TCGOpcode vecop_list_cmtst
[] = { INDEX_op_cmp_vec
, 0 };
4966 const GVecGen3 cmtst_op
[4] = {
4967 { .fni4
= gen_helper_neon_tst_u8
,
4968 .fniv
= gen_cmtst_vec
,
4969 .opt_opc
= vecop_list_cmtst
,
4971 { .fni4
= gen_helper_neon_tst_u16
,
4972 .fniv
= gen_cmtst_vec
,
4973 .opt_opc
= vecop_list_cmtst
,
4975 { .fni4
= gen_cmtst_i32
,
4976 .fniv
= gen_cmtst_vec
,
4977 .opt_opc
= vecop_list_cmtst
,
4979 { .fni8
= gen_cmtst_i64
,
4980 .fniv
= gen_cmtst_vec
,
4981 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
4982 .opt_opc
= vecop_list_cmtst
,
4986 static void gen_uqadd_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
4987 TCGv_vec a
, TCGv_vec b
)
4989 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
4990 tcg_gen_add_vec(vece
, x
, a
, b
);
4991 tcg_gen_usadd_vec(vece
, t
, a
, b
);
4992 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
4993 tcg_gen_or_vec(vece
, sat
, sat
, x
);
4994 tcg_temp_free_vec(x
);
4997 static const TCGOpcode vecop_list_uqadd
[] = {
4998 INDEX_op_usadd_vec
, INDEX_op_cmp_vec
, INDEX_op_add_vec
, 0
5001 const GVecGen4 uqadd_op
[4] = {
5002 { .fniv
= gen_uqadd_vec
,
5003 .fno
= gen_helper_gvec_uqadd_b
,
5005 .opt_opc
= vecop_list_uqadd
,
5007 { .fniv
= gen_uqadd_vec
,
5008 .fno
= gen_helper_gvec_uqadd_h
,
5010 .opt_opc
= vecop_list_uqadd
,
5012 { .fniv
= gen_uqadd_vec
,
5013 .fno
= gen_helper_gvec_uqadd_s
,
5015 .opt_opc
= vecop_list_uqadd
,
5017 { .fniv
= gen_uqadd_vec
,
5018 .fno
= gen_helper_gvec_uqadd_d
,
5020 .opt_opc
= vecop_list_uqadd
,
5024 static void gen_sqadd_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
5025 TCGv_vec a
, TCGv_vec b
)
5027 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
5028 tcg_gen_add_vec(vece
, x
, a
, b
);
5029 tcg_gen_ssadd_vec(vece
, t
, a
, b
);
5030 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
5031 tcg_gen_or_vec(vece
, sat
, sat
, x
);
5032 tcg_temp_free_vec(x
);
5035 static const TCGOpcode vecop_list_sqadd
[] = {
5036 INDEX_op_ssadd_vec
, INDEX_op_cmp_vec
, INDEX_op_add_vec
, 0
5039 const GVecGen4 sqadd_op
[4] = {
5040 { .fniv
= gen_sqadd_vec
,
5041 .fno
= gen_helper_gvec_sqadd_b
,
5042 .opt_opc
= vecop_list_sqadd
,
5045 { .fniv
= gen_sqadd_vec
,
5046 .fno
= gen_helper_gvec_sqadd_h
,
5047 .opt_opc
= vecop_list_sqadd
,
5050 { .fniv
= gen_sqadd_vec
,
5051 .fno
= gen_helper_gvec_sqadd_s
,
5052 .opt_opc
= vecop_list_sqadd
,
5055 { .fniv
= gen_sqadd_vec
,
5056 .fno
= gen_helper_gvec_sqadd_d
,
5057 .opt_opc
= vecop_list_sqadd
,
5062 static void gen_uqsub_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
5063 TCGv_vec a
, TCGv_vec b
)
5065 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
5066 tcg_gen_sub_vec(vece
, x
, a
, b
);
5067 tcg_gen_ussub_vec(vece
, t
, a
, b
);
5068 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
5069 tcg_gen_or_vec(vece
, sat
, sat
, x
);
5070 tcg_temp_free_vec(x
);
5073 static const TCGOpcode vecop_list_uqsub
[] = {
5074 INDEX_op_ussub_vec
, INDEX_op_cmp_vec
, INDEX_op_sub_vec
, 0
5077 const GVecGen4 uqsub_op
[4] = {
5078 { .fniv
= gen_uqsub_vec
,
5079 .fno
= gen_helper_gvec_uqsub_b
,
5080 .opt_opc
= vecop_list_uqsub
,
5083 { .fniv
= gen_uqsub_vec
,
5084 .fno
= gen_helper_gvec_uqsub_h
,
5085 .opt_opc
= vecop_list_uqsub
,
5088 { .fniv
= gen_uqsub_vec
,
5089 .fno
= gen_helper_gvec_uqsub_s
,
5090 .opt_opc
= vecop_list_uqsub
,
5093 { .fniv
= gen_uqsub_vec
,
5094 .fno
= gen_helper_gvec_uqsub_d
,
5095 .opt_opc
= vecop_list_uqsub
,
5100 static void gen_sqsub_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
5101 TCGv_vec a
, TCGv_vec b
)
5103 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
5104 tcg_gen_sub_vec(vece
, x
, a
, b
);
5105 tcg_gen_sssub_vec(vece
, t
, a
, b
);
5106 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
5107 tcg_gen_or_vec(vece
, sat
, sat
, x
);
5108 tcg_temp_free_vec(x
);
5111 static const TCGOpcode vecop_list_sqsub
[] = {
5112 INDEX_op_sssub_vec
, INDEX_op_cmp_vec
, INDEX_op_sub_vec
, 0
5115 const GVecGen4 sqsub_op
[4] = {
5116 { .fniv
= gen_sqsub_vec
,
5117 .fno
= gen_helper_gvec_sqsub_b
,
5118 .opt_opc
= vecop_list_sqsub
,
5121 { .fniv
= gen_sqsub_vec
,
5122 .fno
= gen_helper_gvec_sqsub_h
,
5123 .opt_opc
= vecop_list_sqsub
,
5126 { .fniv
= gen_sqsub_vec
,
5127 .fno
= gen_helper_gvec_sqsub_s
,
5128 .opt_opc
= vecop_list_sqsub
,
5131 { .fniv
= gen_sqsub_vec
,
5132 .fno
= gen_helper_gvec_sqsub_d
,
5133 .opt_opc
= vecop_list_sqsub
,
5138 /* Translate a NEON data processing instruction. Return nonzero if the
5139 instruction is invalid.
5140 We process data in a mixture of 32-bit and 64-bit chunks.
5141 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5143 static int disas_neon_data_insn(DisasContext
*s
, uint32_t insn
)
5147 int rd
, rn
, rm
, rd_ofs
, rn_ofs
, rm_ofs
;
5156 TCGv_i32 tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
5157 TCGv_ptr ptr1
, ptr2
, ptr3
;
5160 /* FIXME: this access check should not take precedence over UNDEF
5161 * for invalid encodings; we will generate incorrect syndrome information
5162 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5164 if (s
->fp_excp_el
) {
5165 gen_exception_insn(s
, 4, EXCP_UDEF
,
5166 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
5170 if (!s
->vfp_enabled
)
5172 q
= (insn
& (1 << 6)) != 0;
5173 u
= (insn
>> 24) & 1;
5174 VFP_DREG_D(rd
, insn
);
5175 VFP_DREG_N(rn
, insn
);
5176 VFP_DREG_M(rm
, insn
);
5177 size
= (insn
>> 20) & 3;
5178 vec_size
= q
? 16 : 8;
5179 rd_ofs
= neon_reg_offset(rd
, 0);
5180 rn_ofs
= neon_reg_offset(rn
, 0);
5181 rm_ofs
= neon_reg_offset(rm
, 0);
5183 if ((insn
& (1 << 23)) == 0) {
5184 /* Three register same length. */
5185 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
5186 /* Catch invalid op and bad size combinations: UNDEF */
5187 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
5190 /* All insns of this form UNDEF for either this condition or the
5191 * superset of cases "Q==1"; we catch the latter later.
5193 if (q
&& ((rd
| rn
| rm
) & 1)) {
5198 /* The SHA-1/SHA-256 3-register instructions require special
5199 * treatment here, as their size field is overloaded as an
5200 * op type selector, and they all consume their input in a
5206 if (!u
) { /* SHA-1 */
5207 if (!dc_isar_feature(aa32_sha1
, s
)) {
5210 ptr1
= vfp_reg_ptr(true, rd
);
5211 ptr2
= vfp_reg_ptr(true, rn
);
5212 ptr3
= vfp_reg_ptr(true, rm
);
5213 tmp4
= tcg_const_i32(size
);
5214 gen_helper_crypto_sha1_3reg(ptr1
, ptr2
, ptr3
, tmp4
);
5215 tcg_temp_free_i32(tmp4
);
5216 } else { /* SHA-256 */
5217 if (!dc_isar_feature(aa32_sha2
, s
) || size
== 3) {
5220 ptr1
= vfp_reg_ptr(true, rd
);
5221 ptr2
= vfp_reg_ptr(true, rn
);
5222 ptr3
= vfp_reg_ptr(true, rm
);
5225 gen_helper_crypto_sha256h(ptr1
, ptr2
, ptr3
);
5228 gen_helper_crypto_sha256h2(ptr1
, ptr2
, ptr3
);
5231 gen_helper_crypto_sha256su1(ptr1
, ptr2
, ptr3
);
5235 tcg_temp_free_ptr(ptr1
);
5236 tcg_temp_free_ptr(ptr2
);
5237 tcg_temp_free_ptr(ptr3
);
5240 case NEON_3R_VPADD_VQRDMLAH
:
5247 return do_v81_helper(s
, gen_helper_gvec_qrdmlah_s16
,
5250 return do_v81_helper(s
, gen_helper_gvec_qrdmlah_s32
,
5255 case NEON_3R_VFM_VQRDMLSH
:
5266 return do_v81_helper(s
, gen_helper_gvec_qrdmlsh_s16
,
5269 return do_v81_helper(s
, gen_helper_gvec_qrdmlsh_s32
,
5274 case NEON_3R_LOGIC
: /* Logic ops. */
5275 switch ((u
<< 2) | size
) {
5277 tcg_gen_gvec_and(0, rd_ofs
, rn_ofs
, rm_ofs
,
5278 vec_size
, vec_size
);
5281 tcg_gen_gvec_andc(0, rd_ofs
, rn_ofs
, rm_ofs
,
5282 vec_size
, vec_size
);
5285 tcg_gen_gvec_or(0, rd_ofs
, rn_ofs
, rm_ofs
,
5286 vec_size
, vec_size
);
5289 tcg_gen_gvec_orc(0, rd_ofs
, rn_ofs
, rm_ofs
,
5290 vec_size
, vec_size
);
5293 tcg_gen_gvec_xor(0, rd_ofs
, rn_ofs
, rm_ofs
,
5294 vec_size
, vec_size
);
5297 tcg_gen_gvec_bitsel(MO_8
, rd_ofs
, rd_ofs
, rn_ofs
, rm_ofs
,
5298 vec_size
, vec_size
);
5301 tcg_gen_gvec_bitsel(MO_8
, rd_ofs
, rm_ofs
, rn_ofs
, rd_ofs
,
5302 vec_size
, vec_size
);
5305 tcg_gen_gvec_bitsel(MO_8
, rd_ofs
, rm_ofs
, rd_ofs
, rn_ofs
,
5306 vec_size
, vec_size
);
5311 case NEON_3R_VADD_VSUB
:
5313 tcg_gen_gvec_sub(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5314 vec_size
, vec_size
);
5316 tcg_gen_gvec_add(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5317 vec_size
, vec_size
);
5322 tcg_gen_gvec_4(rd_ofs
, offsetof(CPUARMState
, vfp
.qc
),
5323 rn_ofs
, rm_ofs
, vec_size
, vec_size
,
5324 (u
? uqadd_op
: sqadd_op
) + size
);
5328 tcg_gen_gvec_4(rd_ofs
, offsetof(CPUARMState
, vfp
.qc
),
5329 rn_ofs
, rm_ofs
, vec_size
, vec_size
,
5330 (u
? uqsub_op
: sqsub_op
) + size
);
5333 case NEON_3R_VMUL
: /* VMUL */
5335 /* Polynomial case allows only P8 and is handled below. */
5340 tcg_gen_gvec_mul(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5341 vec_size
, vec_size
);
5346 case NEON_3R_VML
: /* VMLA, VMLS */
5347 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
,
5348 u
? &mls_op
[size
] : &mla_op
[size
]);
5351 case NEON_3R_VTST_VCEQ
:
5353 tcg_gen_gvec_cmp(TCG_COND_EQ
, size
, rd_ofs
, rn_ofs
, rm_ofs
,
5354 vec_size
, vec_size
);
5356 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
,
5357 vec_size
, vec_size
, &cmtst_op
[size
]);
5362 tcg_gen_gvec_cmp(u
? TCG_COND_GTU
: TCG_COND_GT
, size
,
5363 rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
);
5367 tcg_gen_gvec_cmp(u
? TCG_COND_GEU
: TCG_COND_GE
, size
,
5368 rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
);
5373 tcg_gen_gvec_umax(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5374 vec_size
, vec_size
);
5376 tcg_gen_gvec_smax(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5377 vec_size
, vec_size
);
5382 tcg_gen_gvec_umin(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5383 vec_size
, vec_size
);
5385 tcg_gen_gvec_smin(size
, rd_ofs
, rn_ofs
, rm_ofs
,
5386 vec_size
, vec_size
);
5392 /* 64-bit element instructions. */
5393 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5394 neon_load_reg64(cpu_V0
, rn
+ pass
);
5395 neon_load_reg64(cpu_V1
, rm
+ pass
);
5399 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5401 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5406 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5409 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5415 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5417 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5420 case NEON_3R_VQRSHL
:
5422 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
5425 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
5432 neon_store_reg64(cpu_V0
, rd
+ pass
);
5441 case NEON_3R_VQRSHL
:
5444 /* Shift instruction operands are reversed. */
5450 case NEON_3R_VPADD_VQRDMLAH
:
5455 case NEON_3R_FLOAT_ARITH
:
5456 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
5458 case NEON_3R_FLOAT_MINMAX
:
5459 pairwise
= u
; /* if VPMIN/VPMAX (float) */
5461 case NEON_3R_FLOAT_CMP
:
5463 /* no encoding for U=0 C=1x */
5467 case NEON_3R_FLOAT_ACMP
:
5472 case NEON_3R_FLOAT_MISC
:
5473 /* VMAXNM/VMINNM in ARMv8 */
5474 if (u
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
5478 case NEON_3R_VFM_VQRDMLSH
:
5479 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
)) {
5487 if (pairwise
&& q
) {
5488 /* All the pairwise insns UNDEF if Q is set */
5492 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5497 tmp
= neon_load_reg(rn
, 0);
5498 tmp2
= neon_load_reg(rn
, 1);
5500 tmp
= neon_load_reg(rm
, 0);
5501 tmp2
= neon_load_reg(rm
, 1);
5505 tmp
= neon_load_reg(rn
, pass
);
5506 tmp2
= neon_load_reg(rm
, pass
);
5510 GEN_NEON_INTEGER_OP(hadd
);
5512 case NEON_3R_VRHADD
:
5513 GEN_NEON_INTEGER_OP(rhadd
);
5516 GEN_NEON_INTEGER_OP(hsub
);
5519 GEN_NEON_INTEGER_OP(shl
);
5522 GEN_NEON_INTEGER_OP_ENV(qshl
);
5525 GEN_NEON_INTEGER_OP(rshl
);
5527 case NEON_3R_VQRSHL
:
5528 GEN_NEON_INTEGER_OP_ENV(qrshl
);
5531 GEN_NEON_INTEGER_OP(abd
);
5534 GEN_NEON_INTEGER_OP(abd
);
5535 tcg_temp_free_i32(tmp2
);
5536 tmp2
= neon_load_reg(rd
, pass
);
5537 gen_neon_add(size
, tmp
, tmp2
);
5540 /* VMUL.P8; other cases already eliminated. */
5541 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
5544 GEN_NEON_INTEGER_OP(pmax
);
5547 GEN_NEON_INTEGER_OP(pmin
);
5549 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
5550 if (!u
) { /* VQDMULH */
5553 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5556 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5560 } else { /* VQRDMULH */
5563 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5566 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5572 case NEON_3R_VPADD_VQRDMLAH
:
5574 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
5575 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
5576 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
5580 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
5582 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5583 switch ((u
<< 2) | size
) {
5586 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5589 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
5592 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
5597 tcg_temp_free_ptr(fpstatus
);
5600 case NEON_3R_FLOAT_MULTIPLY
:
5602 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5603 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5605 tcg_temp_free_i32(tmp2
);
5606 tmp2
= neon_load_reg(rd
, pass
);
5608 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5610 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5613 tcg_temp_free_ptr(fpstatus
);
5616 case NEON_3R_FLOAT_CMP
:
5618 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5620 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
5623 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5625 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5628 tcg_temp_free_ptr(fpstatus
);
5631 case NEON_3R_FLOAT_ACMP
:
5633 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5635 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5637 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5639 tcg_temp_free_ptr(fpstatus
);
5642 case NEON_3R_FLOAT_MINMAX
:
5644 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5646 gen_helper_vfp_maxs(tmp
, tmp
, tmp2
, fpstatus
);
5648 gen_helper_vfp_mins(tmp
, tmp
, tmp2
, fpstatus
);
5650 tcg_temp_free_ptr(fpstatus
);
5653 case NEON_3R_FLOAT_MISC
:
5656 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5658 gen_helper_vfp_maxnums(tmp
, tmp
, tmp2
, fpstatus
);
5660 gen_helper_vfp_minnums(tmp
, tmp
, tmp2
, fpstatus
);
5662 tcg_temp_free_ptr(fpstatus
);
5665 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
5667 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
5671 case NEON_3R_VFM_VQRDMLSH
:
5673 /* VFMA, VFMS: fused multiply-add */
5674 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5675 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
5678 gen_helper_vfp_negs(tmp
, tmp
);
5680 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
5681 tcg_temp_free_i32(tmp3
);
5682 tcg_temp_free_ptr(fpstatus
);
5688 tcg_temp_free_i32(tmp2
);
5690 /* Save the result. For elementwise operations we can put it
5691 straight into the destination register. For pairwise operations
5692 we have to be careful to avoid clobbering the source operands. */
5693 if (pairwise
&& rd
== rm
) {
5694 neon_store_scratch(pass
, tmp
);
5696 neon_store_reg(rd
, pass
, tmp
);
5700 if (pairwise
&& rd
== rm
) {
5701 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5702 tmp
= neon_load_scratch(pass
);
5703 neon_store_reg(rd
, pass
, tmp
);
5706 /* End of 3 register same size operations. */
5707 } else if (insn
& (1 << 4)) {
5708 if ((insn
& 0x00380080) != 0) {
5709 /* Two registers and shift. */
5710 op
= (insn
>> 8) & 0xf;
5711 if (insn
& (1 << 7)) {
5719 while ((insn
& (1 << (size
+ 19))) == 0)
5722 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
5724 /* Shift by immediate:
5725 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5726 if (q
&& ((rd
| rm
) & 1)) {
5729 if (!u
&& (op
== 4 || op
== 6)) {
5732 /* Right shifts are encoded as N - shift, where N is the
5733 element size in bits. */
5735 shift
= shift
- (1 << (size
+ 3));
5740 /* Right shift comes here negative. */
5742 /* Shifts larger than the element size are architecturally
5743 * valid. Unsigned results in all zeros; signed results
5747 tcg_gen_gvec_sari(size
, rd_ofs
, rm_ofs
,
5748 MIN(shift
, (8 << size
) - 1),
5749 vec_size
, vec_size
);
5750 } else if (shift
>= 8 << size
) {
5751 tcg_gen_gvec_dup8i(rd_ofs
, vec_size
, vec_size
, 0);
5753 tcg_gen_gvec_shri(size
, rd_ofs
, rm_ofs
, shift
,
5754 vec_size
, vec_size
);
5759 /* Right shift comes here negative. */
5761 /* Shifts larger than the element size are architecturally
5762 * valid. Unsigned results in all zeros; signed results
5766 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
5767 MIN(shift
, (8 << size
) - 1),
5769 } else if (shift
>= 8 << size
) {
5772 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
5773 shift
, &usra_op
[size
]);
5781 /* Right shift comes here negative. */
5783 /* Shift out of range leaves destination unchanged. */
5784 if (shift
< 8 << size
) {
5785 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
5786 shift
, &sri_op
[size
]);
5790 case 5: /* VSHL, VSLI */
5792 /* Shift out of range leaves destination unchanged. */
5793 if (shift
< 8 << size
) {
5794 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
,
5795 vec_size
, shift
, &sli_op
[size
]);
5798 /* Shifts larger than the element size are
5799 * architecturally valid and results in zero.
5801 if (shift
>= 8 << size
) {
5802 tcg_gen_gvec_dup8i(rd_ofs
, vec_size
, vec_size
, 0);
5804 tcg_gen_gvec_shli(size
, rd_ofs
, rm_ofs
, shift
,
5805 vec_size
, vec_size
);
5817 /* To avoid excessive duplication of ops we implement shift
5818 * by immediate using the variable shift operations.
5820 imm
= dup_const(size
, shift
);
5822 for (pass
= 0; pass
< count
; pass
++) {
5824 neon_load_reg64(cpu_V0
, rm
+ pass
);
5825 tcg_gen_movi_i64(cpu_V1
, imm
);
5830 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
5832 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
5834 case 6: /* VQSHLU */
5835 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
5840 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5843 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5848 g_assert_not_reached();
5852 neon_load_reg64(cpu_V1
, rd
+ pass
);
5853 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
5855 neon_store_reg64(cpu_V0
, rd
+ pass
);
5856 } else { /* size < 3 */
5857 /* Operands in T0 and T1. */
5858 tmp
= neon_load_reg(rm
, pass
);
5859 tmp2
= tcg_temp_new_i32();
5860 tcg_gen_movi_i32(tmp2
, imm
);
5864 GEN_NEON_INTEGER_OP(rshl
);
5866 case 6: /* VQSHLU */
5869 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
5873 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
5877 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
5885 GEN_NEON_INTEGER_OP_ENV(qshl
);
5888 g_assert_not_reached();
5890 tcg_temp_free_i32(tmp2
);
5894 tmp2
= neon_load_reg(rd
, pass
);
5895 gen_neon_add(size
, tmp
, tmp2
);
5896 tcg_temp_free_i32(tmp2
);
5898 neon_store_reg(rd
, pass
, tmp
);
5901 } else if (op
< 10) {
5902 /* Shift by immediate and narrow:
5903 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5904 int input_unsigned
= (op
== 8) ? !u
: u
;
5908 shift
= shift
- (1 << (size
+ 3));
5911 tmp64
= tcg_const_i64(shift
);
5912 neon_load_reg64(cpu_V0
, rm
);
5913 neon_load_reg64(cpu_V1
, rm
+ 1);
5914 for (pass
= 0; pass
< 2; pass
++) {
5922 if (input_unsigned
) {
5923 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
5925 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
5928 if (input_unsigned
) {
5929 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
5931 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
5934 tmp
= tcg_temp_new_i32();
5935 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5936 neon_store_reg(rd
, pass
, tmp
);
5938 tcg_temp_free_i64(tmp64
);
5941 imm
= (uint16_t)shift
;
5945 imm
= (uint32_t)shift
;
5947 tmp2
= tcg_const_i32(imm
);
5948 tmp4
= neon_load_reg(rm
+ 1, 0);
5949 tmp5
= neon_load_reg(rm
+ 1, 1);
5950 for (pass
= 0; pass
< 2; pass
++) {
5952 tmp
= neon_load_reg(rm
, 0);
5956 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
5959 tmp3
= neon_load_reg(rm
, 1);
5963 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
5965 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
5966 tcg_temp_free_i32(tmp
);
5967 tcg_temp_free_i32(tmp3
);
5968 tmp
= tcg_temp_new_i32();
5969 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
5970 neon_store_reg(rd
, pass
, tmp
);
5972 tcg_temp_free_i32(tmp2
);
5974 } else if (op
== 10) {
5976 if (q
|| (rd
& 1)) {
5979 tmp
= neon_load_reg(rm
, 0);
5980 tmp2
= neon_load_reg(rm
, 1);
5981 for (pass
= 0; pass
< 2; pass
++) {
5985 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
5988 /* The shift is less than the width of the source
5989 type, so we can just shift the whole register. */
5990 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
5991 /* Widen the result of shift: we need to clear
5992 * the potential overflow bits resulting from
5993 * left bits of the narrow input appearing as
5994 * right bits of left the neighbour narrow
5996 if (size
< 2 || !u
) {
5999 imm
= (0xffu
>> (8 - shift
));
6001 } else if (size
== 1) {
6002 imm
= 0xffff >> (16 - shift
);
6005 imm
= 0xffffffff >> (32 - shift
);
6008 imm64
= imm
| (((uint64_t)imm
) << 32);
6012 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
6015 neon_store_reg64(cpu_V0
, rd
+ pass
);
6017 } else if (op
>= 14) {
6018 /* VCVT fixed-point. */
6019 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
6022 /* We have already masked out the must-be-1 top bit of imm6,
6023 * hence this 32-shift where the ARM ARM has 64-imm6.
6026 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6027 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
6030 gen_vfp_ulto(0, shift
, 1);
6032 gen_vfp_slto(0, shift
, 1);
6035 gen_vfp_toul(0, shift
, 1);
6037 gen_vfp_tosl(0, shift
, 1);
6039 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
6044 } else { /* (insn & 0x00380080) == 0 */
6045 int invert
, reg_ofs
, vec_size
;
6047 if (q
&& (rd
& 1)) {
6051 op
= (insn
>> 8) & 0xf;
6052 /* One register and immediate. */
6053 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
6054 invert
= (insn
& (1 << 5)) != 0;
6055 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6056 * We choose to not special-case this and will behave as if a
6057 * valid constant encoding of 0 had been given.
6076 imm
= (imm
<< 8) | (imm
<< 24);
6079 imm
= (imm
<< 8) | 0xff;
6082 imm
= (imm
<< 16) | 0xffff;
6085 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
6094 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
6095 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
6102 reg_ofs
= neon_reg_offset(rd
, 0);
6103 vec_size
= q
? 16 : 8;
6105 if (op
& 1 && op
< 12) {
6107 /* The immediate value has already been inverted,
6108 * so BIC becomes AND.
6110 tcg_gen_gvec_andi(MO_32
, reg_ofs
, reg_ofs
, imm
,
6111 vec_size
, vec_size
);
6113 tcg_gen_gvec_ori(MO_32
, reg_ofs
, reg_ofs
, imm
,
6114 vec_size
, vec_size
);
6118 if (op
== 14 && invert
) {
6119 TCGv_i64 t64
= tcg_temp_new_i64();
6121 for (pass
= 0; pass
<= q
; ++pass
) {
6125 for (n
= 0; n
< 8; n
++) {
6126 if (imm
& (1 << (n
+ pass
* 8))) {
6127 val
|= 0xffull
<< (n
* 8);
6130 tcg_gen_movi_i64(t64
, val
);
6131 neon_store_reg64(t64
, rd
+ pass
);
6133 tcg_temp_free_i64(t64
);
6135 tcg_gen_gvec_dup32i(reg_ofs
, vec_size
, vec_size
, imm
);
6139 } else { /* (insn & 0x00800010 == 0x00800000) */
6141 op
= (insn
>> 8) & 0xf;
6142 if ((insn
& (1 << 6)) == 0) {
6143 /* Three registers of different lengths. */
6147 /* undefreq: bit 0 : UNDEF if size == 0
6148 * bit 1 : UNDEF if size == 1
6149 * bit 2 : UNDEF if size == 2
6150 * bit 3 : UNDEF if U == 1
6151 * Note that [2:0] set implies 'always UNDEF'
6154 /* prewiden, src1_wide, src2_wide, undefreq */
6155 static const int neon_3reg_wide
[16][4] = {
6156 {1, 0, 0, 0}, /* VADDL */
6157 {1, 1, 0, 0}, /* VADDW */
6158 {1, 0, 0, 0}, /* VSUBL */
6159 {1, 1, 0, 0}, /* VSUBW */
6160 {0, 1, 1, 0}, /* VADDHN */
6161 {0, 0, 0, 0}, /* VABAL */
6162 {0, 1, 1, 0}, /* VSUBHN */
6163 {0, 0, 0, 0}, /* VABDL */
6164 {0, 0, 0, 0}, /* VMLAL */
6165 {0, 0, 0, 9}, /* VQDMLAL */
6166 {0, 0, 0, 0}, /* VMLSL */
6167 {0, 0, 0, 9}, /* VQDMLSL */
6168 {0, 0, 0, 0}, /* Integer VMULL */
6169 {0, 0, 0, 1}, /* VQDMULL */
6170 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6171 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6174 prewiden
= neon_3reg_wide
[op
][0];
6175 src1_wide
= neon_3reg_wide
[op
][1];
6176 src2_wide
= neon_3reg_wide
[op
][2];
6177 undefreq
= neon_3reg_wide
[op
][3];
6179 if ((undefreq
& (1 << size
)) ||
6180 ((undefreq
& 8) && u
)) {
6183 if ((src1_wide
&& (rn
& 1)) ||
6184 (src2_wide
&& (rm
& 1)) ||
6185 (!src2_wide
&& (rd
& 1))) {
6189 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6190 * outside the loop below as it only performs a single pass.
6192 if (op
== 14 && size
== 2) {
6193 TCGv_i64 tcg_rn
, tcg_rm
, tcg_rd
;
6195 if (!dc_isar_feature(aa32_pmull
, s
)) {
6198 tcg_rn
= tcg_temp_new_i64();
6199 tcg_rm
= tcg_temp_new_i64();
6200 tcg_rd
= tcg_temp_new_i64();
6201 neon_load_reg64(tcg_rn
, rn
);
6202 neon_load_reg64(tcg_rm
, rm
);
6203 gen_helper_neon_pmull_64_lo(tcg_rd
, tcg_rn
, tcg_rm
);
6204 neon_store_reg64(tcg_rd
, rd
);
6205 gen_helper_neon_pmull_64_hi(tcg_rd
, tcg_rn
, tcg_rm
);
6206 neon_store_reg64(tcg_rd
, rd
+ 1);
6207 tcg_temp_free_i64(tcg_rn
);
6208 tcg_temp_free_i64(tcg_rm
);
6209 tcg_temp_free_i64(tcg_rd
);
6213 /* Avoid overlapping operands. Wide source operands are
6214 always aligned so will never overlap with wide
6215 destinations in problematic ways. */
6216 if (rd
== rm
&& !src2_wide
) {
6217 tmp
= neon_load_reg(rm
, 1);
6218 neon_store_scratch(2, tmp
);
6219 } else if (rd
== rn
&& !src1_wide
) {
6220 tmp
= neon_load_reg(rn
, 1);
6221 neon_store_scratch(2, tmp
);
6224 for (pass
= 0; pass
< 2; pass
++) {
6226 neon_load_reg64(cpu_V0
, rn
+ pass
);
6229 if (pass
== 1 && rd
== rn
) {
6230 tmp
= neon_load_scratch(2);
6232 tmp
= neon_load_reg(rn
, pass
);
6235 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
6239 neon_load_reg64(cpu_V1
, rm
+ pass
);
6242 if (pass
== 1 && rd
== rm
) {
6243 tmp2
= neon_load_scratch(2);
6245 tmp2
= neon_load_reg(rm
, pass
);
6248 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
6252 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6253 gen_neon_addl(size
);
6255 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6256 gen_neon_subl(size
);
6258 case 5: case 7: /* VABAL, VABDL */
6259 switch ((size
<< 1) | u
) {
6261 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
6264 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
6267 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
6270 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
6273 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
6276 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
6280 tcg_temp_free_i32(tmp2
);
6281 tcg_temp_free_i32(tmp
);
6283 case 8: case 9: case 10: case 11: case 12: case 13:
6284 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6285 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6287 case 14: /* Polynomial VMULL */
6288 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
6289 tcg_temp_free_i32(tmp2
);
6290 tcg_temp_free_i32(tmp
);
6292 default: /* 15 is RESERVED: caught earlier */
6297 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6298 neon_store_reg64(cpu_V0
, rd
+ pass
);
6299 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
6301 neon_load_reg64(cpu_V1
, rd
+ pass
);
6303 case 10: /* VMLSL */
6304 gen_neon_negl(cpu_V0
, size
);
6306 case 5: case 8: /* VABAL, VMLAL */
6307 gen_neon_addl(size
);
6309 case 9: case 11: /* VQDMLAL, VQDMLSL */
6310 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6312 gen_neon_negl(cpu_V0
, size
);
6314 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6319 neon_store_reg64(cpu_V0
, rd
+ pass
);
6320 } else if (op
== 4 || op
== 6) {
6321 /* Narrowing operation. */
6322 tmp
= tcg_temp_new_i32();
6326 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
6329 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
6332 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6333 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
6340 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
6343 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
6346 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
6347 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6348 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
6356 neon_store_reg(rd
, 0, tmp3
);
6357 neon_store_reg(rd
, 1, tmp
);
6360 /* Write back the result. */
6361 neon_store_reg64(cpu_V0
, rd
+ pass
);
6365 /* Two registers and a scalar. NB that for ops of this form
6366 * the ARM ARM labels bit 24 as Q, but it is in our variable
6373 case 1: /* Float VMLA scalar */
6374 case 5: /* Floating point VMLS scalar */
6375 case 9: /* Floating point VMUL scalar */
6380 case 0: /* Integer VMLA scalar */
6381 case 4: /* Integer VMLS scalar */
6382 case 8: /* Integer VMUL scalar */
6383 case 12: /* VQDMULH scalar */
6384 case 13: /* VQRDMULH scalar */
6385 if (u
&& ((rd
| rn
) & 1)) {
6388 tmp
= neon_get_scalar(size
, rm
);
6389 neon_store_scratch(0, tmp
);
6390 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
6391 tmp
= neon_load_scratch(0);
6392 tmp2
= neon_load_reg(rn
, pass
);
6395 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6397 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6399 } else if (op
== 13) {
6401 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6403 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6405 } else if (op
& 1) {
6406 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6407 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
6408 tcg_temp_free_ptr(fpstatus
);
6411 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
6412 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
6413 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
6417 tcg_temp_free_i32(tmp2
);
6420 tmp2
= neon_load_reg(rd
, pass
);
6423 gen_neon_add(size
, tmp
, tmp2
);
6427 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6428 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
6429 tcg_temp_free_ptr(fpstatus
);
6433 gen_neon_rsb(size
, tmp
, tmp2
);
6437 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6438 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
6439 tcg_temp_free_ptr(fpstatus
);
6445 tcg_temp_free_i32(tmp2
);
6447 neon_store_reg(rd
, pass
, tmp
);
6450 case 3: /* VQDMLAL scalar */
6451 case 7: /* VQDMLSL scalar */
6452 case 11: /* VQDMULL scalar */
6457 case 2: /* VMLAL sclar */
6458 case 6: /* VMLSL scalar */
6459 case 10: /* VMULL scalar */
6463 tmp2
= neon_get_scalar(size
, rm
);
6464 /* We need a copy of tmp2 because gen_neon_mull
6465 * deletes it during pass 0. */
6466 tmp4
= tcg_temp_new_i32();
6467 tcg_gen_mov_i32(tmp4
, tmp2
);
6468 tmp3
= neon_load_reg(rn
, 1);
6470 for (pass
= 0; pass
< 2; pass
++) {
6472 tmp
= neon_load_reg(rn
, 0);
6477 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6479 neon_load_reg64(cpu_V1
, rd
+ pass
);
6483 gen_neon_negl(cpu_V0
, size
);
6486 gen_neon_addl(size
);
6489 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6491 gen_neon_negl(cpu_V0
, size
);
6493 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6499 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6504 neon_store_reg64(cpu_V0
, rd
+ pass
);
6507 case 14: /* VQRDMLAH scalar */
6508 case 15: /* VQRDMLSH scalar */
6510 NeonGenThreeOpEnvFn
*fn
;
6512 if (!dc_isar_feature(aa32_rdm
, s
)) {
6515 if (u
&& ((rd
| rn
) & 1)) {
6520 fn
= gen_helper_neon_qrdmlah_s16
;
6522 fn
= gen_helper_neon_qrdmlah_s32
;
6526 fn
= gen_helper_neon_qrdmlsh_s16
;
6528 fn
= gen_helper_neon_qrdmlsh_s32
;
6532 tmp2
= neon_get_scalar(size
, rm
);
6533 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
6534 tmp
= neon_load_reg(rn
, pass
);
6535 tmp3
= neon_load_reg(rd
, pass
);
6536 fn(tmp
, cpu_env
, tmp
, tmp2
, tmp3
);
6537 tcg_temp_free_i32(tmp3
);
6538 neon_store_reg(rd
, pass
, tmp
);
6540 tcg_temp_free_i32(tmp2
);
6544 g_assert_not_reached();
6547 } else { /* size == 3 */
6550 imm
= (insn
>> 8) & 0xf;
6555 if (q
&& ((rd
| rn
| rm
) & 1)) {
6560 neon_load_reg64(cpu_V0
, rn
);
6562 neon_load_reg64(cpu_V1
, rn
+ 1);
6564 } else if (imm
== 8) {
6565 neon_load_reg64(cpu_V0
, rn
+ 1);
6567 neon_load_reg64(cpu_V1
, rm
);
6570 tmp64
= tcg_temp_new_i64();
6572 neon_load_reg64(cpu_V0
, rn
);
6573 neon_load_reg64(tmp64
, rn
+ 1);
6575 neon_load_reg64(cpu_V0
, rn
+ 1);
6576 neon_load_reg64(tmp64
, rm
);
6578 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
6579 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
6580 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6582 neon_load_reg64(cpu_V1
, rm
);
6584 neon_load_reg64(cpu_V1
, rm
+ 1);
6587 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6588 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
6589 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
6590 tcg_temp_free_i64(tmp64
);
6593 neon_load_reg64(cpu_V0
, rn
);
6594 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
6595 neon_load_reg64(cpu_V1
, rm
);
6596 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6597 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6599 neon_store_reg64(cpu_V0
, rd
);
6601 neon_store_reg64(cpu_V1
, rd
+ 1);
6603 } else if ((insn
& (1 << 11)) == 0) {
6604 /* Two register misc. */
6605 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
6606 size
= (insn
>> 18) & 3;
6607 /* UNDEF for unknown op values and bad op-size combinations */
6608 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
6611 if (neon_2rm_is_v8_op(op
) &&
6612 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
6615 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
6616 q
&& ((rm
| rd
) & 1)) {
6620 case NEON_2RM_VREV64
:
6621 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
6622 tmp
= neon_load_reg(rm
, pass
* 2);
6623 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
6625 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6626 case 1: gen_swap_half(tmp
); break;
6627 case 2: /* no-op */ break;
6630 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
6632 neon_store_reg(rd
, pass
* 2, tmp2
);
6635 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
6636 case 1: gen_swap_half(tmp2
); break;
6639 neon_store_reg(rd
, pass
* 2, tmp2
);
6643 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
6644 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
6645 for (pass
= 0; pass
< q
+ 1; pass
++) {
6646 tmp
= neon_load_reg(rm
, pass
* 2);
6647 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
6648 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
6649 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
6651 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
6652 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
6653 case 2: tcg_gen_add_i64(CPU_V001
); break;
6656 if (op
>= NEON_2RM_VPADAL
) {
6658 neon_load_reg64(cpu_V1
, rd
+ pass
);
6659 gen_neon_addl(size
);
6661 neon_store_reg64(cpu_V0
, rd
+ pass
);
6667 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
6668 tmp
= neon_load_reg(rm
, n
);
6669 tmp2
= neon_load_reg(rd
, n
+ 1);
6670 neon_store_reg(rm
, n
, tmp2
);
6671 neon_store_reg(rd
, n
+ 1, tmp
);
6678 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
6683 if (gen_neon_zip(rd
, rm
, size
, q
)) {
6687 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
6688 /* also VQMOVUN; op field and mnemonics don't line up */
6693 for (pass
= 0; pass
< 2; pass
++) {
6694 neon_load_reg64(cpu_V0
, rm
+ pass
);
6695 tmp
= tcg_temp_new_i32();
6696 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
6701 neon_store_reg(rd
, 0, tmp2
);
6702 neon_store_reg(rd
, 1, tmp
);
6706 case NEON_2RM_VSHLL
:
6707 if (q
|| (rd
& 1)) {
6710 tmp
= neon_load_reg(rm
, 0);
6711 tmp2
= neon_load_reg(rm
, 1);
6712 for (pass
= 0; pass
< 2; pass
++) {
6715 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
6716 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
6717 neon_store_reg64(cpu_V0
, rd
+ pass
);
6720 case NEON_2RM_VCVT_F16_F32
:
6725 if (!dc_isar_feature(aa32_fp16_spconv
, s
) ||
6729 tmp
= tcg_temp_new_i32();
6730 tmp2
= tcg_temp_new_i32();
6731 fpst
= get_fpstatus_ptr(true);
6732 ahp
= get_ahp_flag();
6733 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
6734 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, fpst
, ahp
);
6735 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
6736 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, cpu_F0s
, fpst
, ahp
);
6737 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6738 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6739 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
6740 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, fpst
, ahp
);
6741 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
6742 neon_store_reg(rd
, 0, tmp2
);
6743 tmp2
= tcg_temp_new_i32();
6744 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, cpu_F0s
, fpst
, ahp
);
6745 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6746 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6747 neon_store_reg(rd
, 1, tmp2
);
6748 tcg_temp_free_i32(tmp
);
6749 tcg_temp_free_i32(ahp
);
6750 tcg_temp_free_ptr(fpst
);
6753 case NEON_2RM_VCVT_F32_F16
:
6757 if (!dc_isar_feature(aa32_fp16_spconv
, s
) ||
6761 fpst
= get_fpstatus_ptr(true);
6762 ahp
= get_ahp_flag();
6763 tmp3
= tcg_temp_new_i32();
6764 tmp
= neon_load_reg(rm
, 0);
6765 tmp2
= neon_load_reg(rm
, 1);
6766 tcg_gen_ext16u_i32(tmp3
, tmp
);
6767 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, fpst
, ahp
);
6768 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
6769 tcg_gen_shri_i32(tmp3
, tmp
, 16);
6770 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, fpst
, ahp
);
6771 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
6772 tcg_temp_free_i32(tmp
);
6773 tcg_gen_ext16u_i32(tmp3
, tmp2
);
6774 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, fpst
, ahp
);
6775 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
6776 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
6777 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, fpst
, ahp
);
6778 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
6779 tcg_temp_free_i32(tmp2
);
6780 tcg_temp_free_i32(tmp3
);
6781 tcg_temp_free_i32(ahp
);
6782 tcg_temp_free_ptr(fpst
);
6785 case NEON_2RM_AESE
: case NEON_2RM_AESMC
:
6786 if (!dc_isar_feature(aa32_aes
, s
) || ((rm
| rd
) & 1)) {
6789 ptr1
= vfp_reg_ptr(true, rd
);
6790 ptr2
= vfp_reg_ptr(true, rm
);
6792 /* Bit 6 is the lowest opcode bit; it distinguishes between
6793 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6795 tmp3
= tcg_const_i32(extract32(insn
, 6, 1));
6797 if (op
== NEON_2RM_AESE
) {
6798 gen_helper_crypto_aese(ptr1
, ptr2
, tmp3
);
6800 gen_helper_crypto_aesmc(ptr1
, ptr2
, tmp3
);
6802 tcg_temp_free_ptr(ptr1
);
6803 tcg_temp_free_ptr(ptr2
);
6804 tcg_temp_free_i32(tmp3
);
6806 case NEON_2RM_SHA1H
:
6807 if (!dc_isar_feature(aa32_sha1
, s
) || ((rm
| rd
) & 1)) {
6810 ptr1
= vfp_reg_ptr(true, rd
);
6811 ptr2
= vfp_reg_ptr(true, rm
);
6813 gen_helper_crypto_sha1h(ptr1
, ptr2
);
6815 tcg_temp_free_ptr(ptr1
);
6816 tcg_temp_free_ptr(ptr2
);
6818 case NEON_2RM_SHA1SU1
:
6819 if ((rm
| rd
) & 1) {
6822 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6824 if (!dc_isar_feature(aa32_sha2
, s
)) {
6827 } else if (!dc_isar_feature(aa32_sha1
, s
)) {
6830 ptr1
= vfp_reg_ptr(true, rd
);
6831 ptr2
= vfp_reg_ptr(true, rm
);
6833 gen_helper_crypto_sha256su0(ptr1
, ptr2
);
6835 gen_helper_crypto_sha1su1(ptr1
, ptr2
);
6837 tcg_temp_free_ptr(ptr1
);
6838 tcg_temp_free_ptr(ptr2
);
6842 tcg_gen_gvec_not(0, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
6845 tcg_gen_gvec_neg(size
, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
6848 tcg_gen_gvec_abs(size
, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
6853 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6854 if (neon_2rm_is_float_op(op
)) {
6855 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
6856 neon_reg_offset(rm
, pass
));
6859 tmp
= neon_load_reg(rm
, pass
);
6862 case NEON_2RM_VREV32
:
6864 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6865 case 1: gen_swap_half(tmp
); break;
6869 case NEON_2RM_VREV16
:
6874 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
6875 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
6876 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
6882 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
6883 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
6884 case 2: tcg_gen_clzi_i32(tmp
, tmp
, 32); break;
6889 gen_helper_neon_cnt_u8(tmp
, tmp
);
6891 case NEON_2RM_VQABS
:
6894 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
6897 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
6900 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
6905 case NEON_2RM_VQNEG
:
6908 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
6911 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
6914 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
6919 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
6920 tmp2
= tcg_const_i32(0);
6922 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
6923 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
6924 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
6927 tcg_temp_free_i32(tmp2
);
6928 if (op
== NEON_2RM_VCLE0
) {
6929 tcg_gen_not_i32(tmp
, tmp
);
6932 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
6933 tmp2
= tcg_const_i32(0);
6935 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
6936 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
6937 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
6940 tcg_temp_free_i32(tmp2
);
6941 if (op
== NEON_2RM_VCLT0
) {
6942 tcg_gen_not_i32(tmp
, tmp
);
6945 case NEON_2RM_VCEQ0
:
6946 tmp2
= tcg_const_i32(0);
6948 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
6949 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
6950 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
6953 tcg_temp_free_i32(tmp2
);
6955 case NEON_2RM_VCGT0_F
:
6957 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6958 tmp2
= tcg_const_i32(0);
6959 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6960 tcg_temp_free_i32(tmp2
);
6961 tcg_temp_free_ptr(fpstatus
);
6964 case NEON_2RM_VCGE0_F
:
6966 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6967 tmp2
= tcg_const_i32(0);
6968 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6969 tcg_temp_free_i32(tmp2
);
6970 tcg_temp_free_ptr(fpstatus
);
6973 case NEON_2RM_VCEQ0_F
:
6975 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6976 tmp2
= tcg_const_i32(0);
6977 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6978 tcg_temp_free_i32(tmp2
);
6979 tcg_temp_free_ptr(fpstatus
);
6982 case NEON_2RM_VCLE0_F
:
6984 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6985 tmp2
= tcg_const_i32(0);
6986 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
6987 tcg_temp_free_i32(tmp2
);
6988 tcg_temp_free_ptr(fpstatus
);
6991 case NEON_2RM_VCLT0_F
:
6993 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6994 tmp2
= tcg_const_i32(0);
6995 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
6996 tcg_temp_free_i32(tmp2
);
6997 tcg_temp_free_ptr(fpstatus
);
7000 case NEON_2RM_VABS_F
:
7003 case NEON_2RM_VNEG_F
:
7007 tmp2
= neon_load_reg(rd
, pass
);
7008 neon_store_reg(rm
, pass
, tmp2
);
7011 tmp2
= neon_load_reg(rd
, pass
);
7013 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
7014 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
7017 neon_store_reg(rm
, pass
, tmp2
);
7019 case NEON_2RM_VRINTN
:
7020 case NEON_2RM_VRINTA
:
7021 case NEON_2RM_VRINTM
:
7022 case NEON_2RM_VRINTP
:
7023 case NEON_2RM_VRINTZ
:
7026 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7029 if (op
== NEON_2RM_VRINTZ
) {
7030 rmode
= FPROUNDING_ZERO
;
7032 rmode
= fp_decode_rm
[((op
& 0x6) >> 1) ^ 1];
7035 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
7036 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7038 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpstatus
);
7039 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7041 tcg_temp_free_ptr(fpstatus
);
7042 tcg_temp_free_i32(tcg_rmode
);
7045 case NEON_2RM_VRINTX
:
7047 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7048 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpstatus
);
7049 tcg_temp_free_ptr(fpstatus
);
7052 case NEON_2RM_VCVTAU
:
7053 case NEON_2RM_VCVTAS
:
7054 case NEON_2RM_VCVTNU
:
7055 case NEON_2RM_VCVTNS
:
7056 case NEON_2RM_VCVTPU
:
7057 case NEON_2RM_VCVTPS
:
7058 case NEON_2RM_VCVTMU
:
7059 case NEON_2RM_VCVTMS
:
7061 bool is_signed
= !extract32(insn
, 7, 1);
7062 TCGv_ptr fpst
= get_fpstatus_ptr(1);
7063 TCGv_i32 tcg_rmode
, tcg_shift
;
7064 int rmode
= fp_decode_rm
[extract32(insn
, 8, 2)];
7066 tcg_shift
= tcg_const_i32(0);
7067 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
7068 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7072 gen_helper_vfp_tosls(cpu_F0s
, cpu_F0s
,
7075 gen_helper_vfp_touls(cpu_F0s
, cpu_F0s
,
7079 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7081 tcg_temp_free_i32(tcg_rmode
);
7082 tcg_temp_free_i32(tcg_shift
);
7083 tcg_temp_free_ptr(fpst
);
7086 case NEON_2RM_VRECPE
:
7088 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7089 gen_helper_recpe_u32(tmp
, tmp
, fpstatus
);
7090 tcg_temp_free_ptr(fpstatus
);
7093 case NEON_2RM_VRSQRTE
:
7095 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7096 gen_helper_rsqrte_u32(tmp
, tmp
, fpstatus
);
7097 tcg_temp_free_ptr(fpstatus
);
7100 case NEON_2RM_VRECPE_F
:
7102 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7103 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
7104 tcg_temp_free_ptr(fpstatus
);
7107 case NEON_2RM_VRSQRTE_F
:
7109 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7110 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
7111 tcg_temp_free_ptr(fpstatus
);
7114 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
7117 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
7120 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
7121 gen_vfp_tosiz(0, 1);
7123 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
7124 gen_vfp_touiz(0, 1);
7127 /* Reserved op values were caught by the
7128 * neon_2rm_sizes[] check earlier.
7132 if (neon_2rm_is_float_op(op
)) {
7133 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
7134 neon_reg_offset(rd
, pass
));
7136 neon_store_reg(rd
, pass
, tmp
);
7141 } else if ((insn
& (1 << 10)) == 0) {
7143 int n
= ((insn
>> 8) & 3) + 1;
7144 if ((rn
+ n
) > 32) {
7145 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7146 * helper function running off the end of the register file.
7151 if (insn
& (1 << 6)) {
7152 tmp
= neon_load_reg(rd
, 0);
7154 tmp
= tcg_temp_new_i32();
7155 tcg_gen_movi_i32(tmp
, 0);
7157 tmp2
= neon_load_reg(rm
, 0);
7158 ptr1
= vfp_reg_ptr(true, rn
);
7159 tmp5
= tcg_const_i32(n
);
7160 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, ptr1
, tmp5
);
7161 tcg_temp_free_i32(tmp
);
7162 if (insn
& (1 << 6)) {
7163 tmp
= neon_load_reg(rd
, 1);
7165 tmp
= tcg_temp_new_i32();
7166 tcg_gen_movi_i32(tmp
, 0);
7168 tmp3
= neon_load_reg(rm
, 1);
7169 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, ptr1
, tmp5
);
7170 tcg_temp_free_i32(tmp5
);
7171 tcg_temp_free_ptr(ptr1
);
7172 neon_store_reg(rd
, 0, tmp2
);
7173 neon_store_reg(rd
, 1, tmp3
);
7174 tcg_temp_free_i32(tmp
);
7175 } else if ((insn
& 0x380) == 0) {
7180 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
7183 if (insn
& (1 << 16)) {
7185 element
= (insn
>> 17) & 7;
7186 } else if (insn
& (1 << 17)) {
7188 element
= (insn
>> 18) & 3;
7191 element
= (insn
>> 19) & 1;
7193 tcg_gen_gvec_dup_mem(size
, neon_reg_offset(rd
, 0),
7194 neon_element_offset(rm
, element
, size
),
7195 q
? 16 : 8, q
? 16 : 8);
7204 /* Advanced SIMD three registers of the same length extension.
7205 * 31 25 23 22 20 16 12 11 10 9 8 3 0
7206 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7207 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7208 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
7210 static int disas_neon_insn_3same_ext(DisasContext
*s
, uint32_t insn
)
7212 gen_helper_gvec_3
*fn_gvec
= NULL
;
7213 gen_helper_gvec_3_ptr
*fn_gvec_ptr
= NULL
;
7214 int rd
, rn
, rm
, opr_sz
;
7217 bool is_long
= false, q
= extract32(insn
, 6, 1);
7218 bool ptr_is_env
= false;
7220 if ((insn
& 0xfe200f10) == 0xfc200800) {
7221 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
7222 int size
= extract32(insn
, 20, 1);
7223 data
= extract32(insn
, 23, 2); /* rot */
7224 if (!dc_isar_feature(aa32_vcma
, s
)
7225 || (!size
&& !dc_isar_feature(aa32_fp16_arith
, s
))) {
7228 fn_gvec_ptr
= size
? gen_helper_gvec_fcmlas
: gen_helper_gvec_fcmlah
;
7229 } else if ((insn
& 0xfea00f10) == 0xfc800800) {
7230 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
7231 int size
= extract32(insn
, 20, 1);
7232 data
= extract32(insn
, 24, 1); /* rot */
7233 if (!dc_isar_feature(aa32_vcma
, s
)
7234 || (!size
&& !dc_isar_feature(aa32_fp16_arith
, s
))) {
7237 fn_gvec_ptr
= size
? gen_helper_gvec_fcadds
: gen_helper_gvec_fcaddh
;
7238 } else if ((insn
& 0xfeb00f00) == 0xfc200d00) {
7239 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
7240 bool u
= extract32(insn
, 4, 1);
7241 if (!dc_isar_feature(aa32_dp
, s
)) {
7244 fn_gvec
= u
? gen_helper_gvec_udot_b
: gen_helper_gvec_sdot_b
;
7245 } else if ((insn
& 0xff300f10) == 0xfc200810) {
7246 /* VFM[AS]L -- 1111 1100 S.10 .... .... 1000 .Q.1 .... */
7247 int is_s
= extract32(insn
, 23, 1);
7248 if (!dc_isar_feature(aa32_fhm
, s
)) {
7252 data
= is_s
; /* is_2 == 0 */
7253 fn_gvec_ptr
= gen_helper_gvec_fmlal_a32
;
7259 VFP_DREG_D(rd
, insn
);
7263 if (q
|| !is_long
) {
7264 VFP_DREG_N(rn
, insn
);
7265 VFP_DREG_M(rm
, insn
);
7266 if ((rn
| rm
) & q
& !is_long
) {
7269 off_rn
= vfp_reg_offset(1, rn
);
7270 off_rm
= vfp_reg_offset(1, rm
);
7272 rn
= VFP_SREG_N(insn
);
7273 rm
= VFP_SREG_M(insn
);
7274 off_rn
= vfp_reg_offset(0, rn
);
7275 off_rm
= vfp_reg_offset(0, rm
);
7278 if (s
->fp_excp_el
) {
7279 gen_exception_insn(s
, 4, EXCP_UDEF
,
7280 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
7283 if (!s
->vfp_enabled
) {
7287 opr_sz
= (1 + q
) * 8;
7293 ptr
= get_fpstatus_ptr(1);
7295 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd
), off_rn
, off_rm
, ptr
,
7296 opr_sz
, opr_sz
, data
, fn_gvec_ptr
);
7298 tcg_temp_free_ptr(ptr
);
7301 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd
), off_rn
, off_rm
,
7302 opr_sz
, opr_sz
, data
, fn_gvec
);
7307 /* Advanced SIMD two registers and a scalar extension.
7308 * 31 24 23 22 20 16 12 11 10 9 8 3 0
7309 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7310 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
7311 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
7315 static int disas_neon_insn_2reg_scalar_ext(DisasContext
*s
, uint32_t insn
)
7317 gen_helper_gvec_3
*fn_gvec
= NULL
;
7318 gen_helper_gvec_3_ptr
*fn_gvec_ptr
= NULL
;
7319 int rd
, rn
, rm
, opr_sz
, data
;
7321 bool is_long
= false, q
= extract32(insn
, 6, 1);
7322 bool ptr_is_env
= false;
7324 if ((insn
& 0xff000f10) == 0xfe000800) {
7325 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
7326 int rot
= extract32(insn
, 20, 2);
7327 int size
= extract32(insn
, 23, 1);
7330 if (!dc_isar_feature(aa32_vcma
, s
)) {
7334 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
7337 /* For fp16, rm is just Vm, and index is M. */
7338 rm
= extract32(insn
, 0, 4);
7339 index
= extract32(insn
, 5, 1);
7341 /* For fp32, rm is the usual M:Vm, and index is 0. */
7342 VFP_DREG_M(rm
, insn
);
7345 data
= (index
<< 2) | rot
;
7346 fn_gvec_ptr
= (size
? gen_helper_gvec_fcmlas_idx
7347 : gen_helper_gvec_fcmlah_idx
);
7348 } else if ((insn
& 0xffb00f00) == 0xfe200d00) {
7349 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
7350 int u
= extract32(insn
, 4, 1);
7352 if (!dc_isar_feature(aa32_dp
, s
)) {
7355 fn_gvec
= u
? gen_helper_gvec_udot_idx_b
: gen_helper_gvec_sdot_idx_b
;
7356 /* rm is just Vm, and index is M. */
7357 data
= extract32(insn
, 5, 1); /* index */
7358 rm
= extract32(insn
, 0, 4);
7359 } else if ((insn
& 0xffa00f10) == 0xfe000810) {
7360 /* VFM[AS]L -- 1111 1110 0.0S .... .... 1000 .Q.1 .... */
7361 int is_s
= extract32(insn
, 20, 1);
7362 int vm20
= extract32(insn
, 0, 3);
7363 int vm3
= extract32(insn
, 3, 1);
7364 int m
= extract32(insn
, 5, 1);
7367 if (!dc_isar_feature(aa32_fhm
, s
)) {
7372 index
= m
* 2 + vm3
;
7378 data
= (index
<< 2) | is_s
; /* is_2 == 0 */
7379 fn_gvec_ptr
= gen_helper_gvec_fmlal_idx_a32
;
7385 VFP_DREG_D(rd
, insn
);
7389 if (q
|| !is_long
) {
7390 VFP_DREG_N(rn
, insn
);
7391 if (rn
& q
& !is_long
) {
7394 off_rn
= vfp_reg_offset(1, rn
);
7395 off_rm
= vfp_reg_offset(1, rm
);
7397 rn
= VFP_SREG_N(insn
);
7398 off_rn
= vfp_reg_offset(0, rn
);
7399 off_rm
= vfp_reg_offset(0, rm
);
7401 if (s
->fp_excp_el
) {
7402 gen_exception_insn(s
, 4, EXCP_UDEF
,
7403 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
7406 if (!s
->vfp_enabled
) {
7410 opr_sz
= (1 + q
) * 8;
7416 ptr
= get_fpstatus_ptr(1);
7418 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd
), off_rn
, off_rm
, ptr
,
7419 opr_sz
, opr_sz
, data
, fn_gvec_ptr
);
7421 tcg_temp_free_ptr(ptr
);
7424 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd
), off_rn
, off_rm
,
7425 opr_sz
, opr_sz
, data
, fn_gvec
);
7430 static int disas_coproc_insn(DisasContext
*s
, uint32_t insn
)
7432 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
7433 const ARMCPRegInfo
*ri
;
7435 cpnum
= (insn
>> 8) & 0xf;
7437 /* First check for coprocessor space used for XScale/iwMMXt insns */
7438 if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && (cpnum
< 2)) {
7439 if (extract32(s
->c15_cpar
, cpnum
, 1) == 0) {
7442 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
7443 return disas_iwmmxt_insn(s
, insn
);
7444 } else if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
)) {
7445 return disas_dsp_insn(s
, insn
);
7450 /* Otherwise treat as a generic register access */
7451 is64
= (insn
& (1 << 25)) == 0;
7452 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
7460 opc1
= (insn
>> 4) & 0xf;
7462 rt2
= (insn
>> 16) & 0xf;
7464 crn
= (insn
>> 16) & 0xf;
7465 opc1
= (insn
>> 21) & 7;
7466 opc2
= (insn
>> 5) & 7;
7469 isread
= (insn
>> 20) & 1;
7470 rt
= (insn
>> 12) & 0xf;
7472 ri
= get_arm_cp_reginfo(s
->cp_regs
,
7473 ENCODE_CP_REG(cpnum
, is64
, s
->ns
, crn
, crm
, opc1
, opc2
));
7475 /* Check access permissions */
7476 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
7481 (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && cpnum
< 14)) {
7482 /* Emit code to perform further access permissions checks at
7483 * runtime; this may result in an exception.
7484 * Note that on XScale all cp0..c13 registers do an access check
7485 * call in order to handle c15_cpar.
7488 TCGv_i32 tcg_syn
, tcg_isread
;
7491 /* Note that since we are an implementation which takes an
7492 * exception on a trapped conditional instruction only if the
7493 * instruction passes its condition code check, we can take
7494 * advantage of the clause in the ARM ARM that allows us to set
7495 * the COND field in the instruction to 0xE in all cases.
7496 * We could fish the actual condition out of the insn (ARM)
7497 * or the condexec bits (Thumb) but it isn't necessary.
7502 syndrome
= syn_cp14_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7505 syndrome
= syn_cp14_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7511 syndrome
= syn_cp15_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7514 syndrome
= syn_cp15_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7519 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7520 * so this can only happen if this is an ARMv7 or earlier CPU,
7521 * in which case the syndrome information won't actually be
7524 assert(!arm_dc_feature(s
, ARM_FEATURE_V8
));
7525 syndrome
= syn_uncategorized();
7529 gen_set_condexec(s
);
7530 gen_set_pc_im(s
, s
->pc
- 4);
7531 tmpptr
= tcg_const_ptr(ri
);
7532 tcg_syn
= tcg_const_i32(syndrome
);
7533 tcg_isread
= tcg_const_i32(isread
);
7534 gen_helper_access_check_cp_reg(cpu_env
, tmpptr
, tcg_syn
,
7536 tcg_temp_free_ptr(tmpptr
);
7537 tcg_temp_free_i32(tcg_syn
);
7538 tcg_temp_free_i32(tcg_isread
);
7541 /* Handle special cases first */
7542 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
7549 gen_set_pc_im(s
, s
->pc
);
7550 s
->base
.is_jmp
= DISAS_WFI
;
7556 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
7565 if (ri
->type
& ARM_CP_CONST
) {
7566 tmp64
= tcg_const_i64(ri
->resetvalue
);
7567 } else if (ri
->readfn
) {
7569 tmp64
= tcg_temp_new_i64();
7570 tmpptr
= tcg_const_ptr(ri
);
7571 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
7572 tcg_temp_free_ptr(tmpptr
);
7574 tmp64
= tcg_temp_new_i64();
7575 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7577 tmp
= tcg_temp_new_i32();
7578 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
7579 store_reg(s
, rt
, tmp
);
7580 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7581 tmp
= tcg_temp_new_i32();
7582 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
7583 tcg_temp_free_i64(tmp64
);
7584 store_reg(s
, rt2
, tmp
);
7587 if (ri
->type
& ARM_CP_CONST
) {
7588 tmp
= tcg_const_i32(ri
->resetvalue
);
7589 } else if (ri
->readfn
) {
7591 tmp
= tcg_temp_new_i32();
7592 tmpptr
= tcg_const_ptr(ri
);
7593 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
7594 tcg_temp_free_ptr(tmpptr
);
7596 tmp
= load_cpu_offset(ri
->fieldoffset
);
7599 /* Destination register of r15 for 32 bit loads sets
7600 * the condition codes from the high 4 bits of the value
7603 tcg_temp_free_i32(tmp
);
7605 store_reg(s
, rt
, tmp
);
7610 if (ri
->type
& ARM_CP_CONST
) {
7611 /* If not forbidden by access permissions, treat as WI */
7616 TCGv_i32 tmplo
, tmphi
;
7617 TCGv_i64 tmp64
= tcg_temp_new_i64();
7618 tmplo
= load_reg(s
, rt
);
7619 tmphi
= load_reg(s
, rt2
);
7620 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
7621 tcg_temp_free_i32(tmplo
);
7622 tcg_temp_free_i32(tmphi
);
7624 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
7625 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
7626 tcg_temp_free_ptr(tmpptr
);
7628 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7630 tcg_temp_free_i64(tmp64
);
7635 tmp
= load_reg(s
, rt
);
7636 tmpptr
= tcg_const_ptr(ri
);
7637 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
7638 tcg_temp_free_ptr(tmpptr
);
7639 tcg_temp_free_i32(tmp
);
7641 TCGv_i32 tmp
= load_reg(s
, rt
);
7642 store_cpu_offset(tmp
, ri
->fieldoffset
);
7647 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
7648 /* I/O operations must end the TB here (whether read or write) */
7651 } else if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
7652 /* We default to ending the TB on a coprocessor register write,
7653 * but allow this to be suppressed by the register definition
7654 * (usually only necessary to work around guest bugs).
7662 /* Unknown register; this might be a guest error or a QEMU
7663 * unimplemented feature.
7666 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7667 "64 bit system register cp:%d opc1: %d crm:%d "
7669 isread
? "read" : "write", cpnum
, opc1
, crm
,
7670 s
->ns
? "non-secure" : "secure");
7672 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7673 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7675 isread
? "read" : "write", cpnum
, opc1
, crn
, crm
, opc2
,
7676 s
->ns
? "non-secure" : "secure");
7683 /* Store a 64-bit value to a register pair. Clobbers val. */
7684 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
7687 tmp
= tcg_temp_new_i32();
7688 tcg_gen_extrl_i64_i32(tmp
, val
);
7689 store_reg(s
, rlow
, tmp
);
7690 tmp
= tcg_temp_new_i32();
7691 tcg_gen_shri_i64(val
, val
, 32);
7692 tcg_gen_extrl_i64_i32(tmp
, val
);
7693 store_reg(s
, rhigh
, tmp
);
7696 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7697 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
7702 /* Load value and extend to 64 bits. */
7703 tmp
= tcg_temp_new_i64();
7704 tmp2
= load_reg(s
, rlow
);
7705 tcg_gen_extu_i32_i64(tmp
, tmp2
);
7706 tcg_temp_free_i32(tmp2
);
7707 tcg_gen_add_i64(val
, val
, tmp
);
7708 tcg_temp_free_i64(tmp
);
7711 /* load and add a 64-bit value from a register pair. */
7712 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
7718 /* Load 64-bit value rd:rn. */
7719 tmpl
= load_reg(s
, rlow
);
7720 tmph
= load_reg(s
, rhigh
);
7721 tmp
= tcg_temp_new_i64();
7722 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
7723 tcg_temp_free_i32(tmpl
);
7724 tcg_temp_free_i32(tmph
);
7725 tcg_gen_add_i64(val
, val
, tmp
);
7726 tcg_temp_free_i64(tmp
);
7729 /* Set N and Z flags from hi|lo. */
7730 static void gen_logicq_cc(TCGv_i32 lo
, TCGv_i32 hi
)
7732 tcg_gen_mov_i32(cpu_NF
, hi
);
7733 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
7736 /* Load/Store exclusive instructions are implemented by remembering
7737 the value/address loaded, and seeing if these are the same
7738 when the store is performed. This should be sufficient to implement
7739 the architecturally mandated semantics, and avoids having to monitor
7740 regular stores. The compare vs the remembered value is done during
7741 the cmpxchg operation, but we must compare the addresses manually. */
7742 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
7743 TCGv_i32 addr
, int size
)
7745 TCGv_i32 tmp
= tcg_temp_new_i32();
7746 TCGMemOp opc
= size
| MO_ALIGN
| s
->be_data
;
7751 TCGv_i32 tmp2
= tcg_temp_new_i32();
7752 TCGv_i64 t64
= tcg_temp_new_i64();
7754 /* For AArch32, architecturally the 32-bit word at the lowest
7755 * address is always Rt and the one at addr+4 is Rt2, even if
7756 * the CPU is big-endian. That means we don't want to do a
7757 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7758 * for an architecturally 64-bit access, but instead do a
7759 * 64-bit access using MO_BE if appropriate and then split
7761 * This only makes a difference for BE32 user-mode, where
7762 * frob64() must not flip the two halves of the 64-bit data
7763 * but this code must treat BE32 user-mode like BE32 system.
7765 TCGv taddr
= gen_aa32_addr(s
, addr
, opc
);
7767 tcg_gen_qemu_ld_i64(t64
, taddr
, get_mem_index(s
), opc
);
7768 tcg_temp_free(taddr
);
7769 tcg_gen_mov_i64(cpu_exclusive_val
, t64
);
7770 if (s
->be_data
== MO_BE
) {
7771 tcg_gen_extr_i64_i32(tmp2
, tmp
, t64
);
7773 tcg_gen_extr_i64_i32(tmp
, tmp2
, t64
);
7775 tcg_temp_free_i64(t64
);
7777 store_reg(s
, rt2
, tmp2
);
7779 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), opc
);
7780 tcg_gen_extu_i32_i64(cpu_exclusive_val
, tmp
);
7783 store_reg(s
, rt
, tmp
);
7784 tcg_gen_extu_i32_i64(cpu_exclusive_addr
, addr
);
7787 static void gen_clrex(DisasContext
*s
)
7789 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7792 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7793 TCGv_i32 addr
, int size
)
7795 TCGv_i32 t0
, t1
, t2
;
7798 TCGLabel
*done_label
;
7799 TCGLabel
*fail_label
;
7800 TCGMemOp opc
= size
| MO_ALIGN
| s
->be_data
;
7802 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7808 fail_label
= gen_new_label();
7809 done_label
= gen_new_label();
7810 extaddr
= tcg_temp_new_i64();
7811 tcg_gen_extu_i32_i64(extaddr
, addr
);
7812 tcg_gen_brcond_i64(TCG_COND_NE
, extaddr
, cpu_exclusive_addr
, fail_label
);
7813 tcg_temp_free_i64(extaddr
);
7815 taddr
= gen_aa32_addr(s
, addr
, opc
);
7816 t0
= tcg_temp_new_i32();
7817 t1
= load_reg(s
, rt
);
7819 TCGv_i64 o64
= tcg_temp_new_i64();
7820 TCGv_i64 n64
= tcg_temp_new_i64();
7822 t2
= load_reg(s
, rt2
);
7823 /* For AArch32, architecturally the 32-bit word at the lowest
7824 * address is always Rt and the one at addr+4 is Rt2, even if
7825 * the CPU is big-endian. Since we're going to treat this as a
7826 * single 64-bit BE store, we need to put the two halves in the
7827 * opposite order for BE to LE, so that they end up in the right
7829 * We don't want gen_aa32_frob64() because that does the wrong
7830 * thing for BE32 usermode.
7832 if (s
->be_data
== MO_BE
) {
7833 tcg_gen_concat_i32_i64(n64
, t2
, t1
);
7835 tcg_gen_concat_i32_i64(n64
, t1
, t2
);
7837 tcg_temp_free_i32(t2
);
7839 tcg_gen_atomic_cmpxchg_i64(o64
, taddr
, cpu_exclusive_val
, n64
,
7840 get_mem_index(s
), opc
);
7841 tcg_temp_free_i64(n64
);
7843 tcg_gen_setcond_i64(TCG_COND_NE
, o64
, o64
, cpu_exclusive_val
);
7844 tcg_gen_extrl_i64_i32(t0
, o64
);
7846 tcg_temp_free_i64(o64
);
7848 t2
= tcg_temp_new_i32();
7849 tcg_gen_extrl_i64_i32(t2
, cpu_exclusive_val
);
7850 tcg_gen_atomic_cmpxchg_i32(t0
, taddr
, t2
, t1
, get_mem_index(s
), opc
);
7851 tcg_gen_setcond_i32(TCG_COND_NE
, t0
, t0
, t2
);
7852 tcg_temp_free_i32(t2
);
7854 tcg_temp_free_i32(t1
);
7855 tcg_temp_free(taddr
);
7856 tcg_gen_mov_i32(cpu_R
[rd
], t0
);
7857 tcg_temp_free_i32(t0
);
7858 tcg_gen_br(done_label
);
7860 gen_set_label(fail_label
);
7861 tcg_gen_movi_i32(cpu_R
[rd
], 1);
7862 gen_set_label(done_label
);
7863 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7869 * @mode: mode field from insn (which stack to store to)
7870 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7871 * @writeback: true if writeback bit set
7873 * Generate code for the SRS (Store Return State) insn.
7875 static void gen_srs(DisasContext
*s
,
7876 uint32_t mode
, uint32_t amode
, bool writeback
)
7883 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7884 * and specified mode is monitor mode
7885 * - UNDEFINED in Hyp mode
7886 * - UNPREDICTABLE in User or System mode
7887 * - UNPREDICTABLE if the specified mode is:
7888 * -- not implemented
7889 * -- not a valid mode number
7890 * -- a mode that's at a higher exception level
7891 * -- Monitor, if we are Non-secure
7892 * For the UNPREDICTABLE cases we choose to UNDEF.
7894 if (s
->current_el
== 1 && !s
->ns
&& mode
== ARM_CPU_MODE_MON
) {
7895 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(), 3);
7899 if (s
->current_el
== 0 || s
->current_el
== 2) {
7904 case ARM_CPU_MODE_USR
:
7905 case ARM_CPU_MODE_FIQ
:
7906 case ARM_CPU_MODE_IRQ
:
7907 case ARM_CPU_MODE_SVC
:
7908 case ARM_CPU_MODE_ABT
:
7909 case ARM_CPU_MODE_UND
:
7910 case ARM_CPU_MODE_SYS
:
7912 case ARM_CPU_MODE_HYP
:
7913 if (s
->current_el
== 1 || !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
7917 case ARM_CPU_MODE_MON
:
7918 /* No need to check specifically for "are we non-secure" because
7919 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7920 * so if this isn't EL3 then we must be non-secure.
7922 if (s
->current_el
!= 3) {
7931 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
7932 default_exception_el(s
));
7936 addr
= tcg_temp_new_i32();
7937 tmp
= tcg_const_i32(mode
);
7938 /* get_r13_banked() will raise an exception if called from System mode */
7939 gen_set_condexec(s
);
7940 gen_set_pc_im(s
, s
->pc
- 4);
7941 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7942 tcg_temp_free_i32(tmp
);
7959 tcg_gen_addi_i32(addr
, addr
, offset
);
7960 tmp
= load_reg(s
, 14);
7961 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
7962 tcg_temp_free_i32(tmp
);
7963 tmp
= load_cpu_field(spsr
);
7964 tcg_gen_addi_i32(addr
, addr
, 4);
7965 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
7966 tcg_temp_free_i32(tmp
);
7984 tcg_gen_addi_i32(addr
, addr
, offset
);
7985 tmp
= tcg_const_i32(mode
);
7986 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7987 tcg_temp_free_i32(tmp
);
7989 tcg_temp_free_i32(addr
);
7990 s
->base
.is_jmp
= DISAS_UPDATE
;
7993 /* Generate a label used for skipping this instruction */
7994 static void arm_gen_condlabel(DisasContext
*s
)
7997 s
->condlabel
= gen_new_label();
8002 /* Skip this instruction if the ARM condition is false */
8003 static void arm_skip_unless(DisasContext
*s
, uint32_t cond
)
8005 arm_gen_condlabel(s
);
8006 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
8009 static void disas_arm_insn(DisasContext
*s
, unsigned int insn
)
8011 unsigned int cond
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
8018 /* M variants do not implement ARM mode; this must raise the INVSTATE
8019 * UsageFault exception.
8021 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
8022 gen_exception_insn(s
, 4, EXCP_INVSTATE
, syn_uncategorized(),
8023 default_exception_el(s
));
8028 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8029 * choose to UNDEF. In ARMv5 and above the space is used
8030 * for miscellaneous unconditional instructions.
8034 /* Unconditional instructions. */
8035 if (((insn
>> 25) & 7) == 1) {
8036 /* NEON Data processing. */
8037 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
8041 if (disas_neon_data_insn(s
, insn
)) {
8046 if ((insn
& 0x0f100000) == 0x04000000) {
8047 /* NEON load/store. */
8048 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
8052 if (disas_neon_ls_insn(s
, insn
)) {
8057 if ((insn
& 0x0f000e10) == 0x0e000a00) {
8059 if (disas_vfp_insn(s
, insn
)) {
8064 if (((insn
& 0x0f30f000) == 0x0510f000) ||
8065 ((insn
& 0x0f30f010) == 0x0710f000)) {
8066 if ((insn
& (1 << 22)) == 0) {
8068 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
8072 /* Otherwise PLD; v5TE+ */
8076 if (((insn
& 0x0f70f000) == 0x0450f000) ||
8077 ((insn
& 0x0f70f010) == 0x0650f000)) {
8079 return; /* PLI; V7 */
8081 if (((insn
& 0x0f700000) == 0x04100000) ||
8082 ((insn
& 0x0f700010) == 0x06100000)) {
8083 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
8086 return; /* v7MP: Unallocated memory hint: must NOP */
8089 if ((insn
& 0x0ffffdff) == 0x01010000) {
8092 if (((insn
>> 9) & 1) != !!(s
->be_data
== MO_BE
)) {
8093 gen_helper_setend(cpu_env
);
8094 s
->base
.is_jmp
= DISAS_UPDATE
;
8097 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
8098 switch ((insn
>> 4) & 0xf) {
8106 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
8109 /* We need to break the TB after this insn to execute
8110 * self-modifying code correctly and also to take
8111 * any pending interrupts immediately.
8113 gen_goto_tb(s
, 0, s
->pc
& ~1);
8116 if ((insn
& 0xf) || !dc_isar_feature(aa32_sb
, s
)) {
8120 * TODO: There is no speculation barrier opcode
8121 * for TCG; MB and end the TB instead.
8123 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
8124 gen_goto_tb(s
, 0, s
->pc
& ~1);
8129 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
8132 gen_srs(s
, (insn
& 0x1f), (insn
>> 23) & 3, insn
& (1 << 21));
8134 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
8140 rn
= (insn
>> 16) & 0xf;
8141 addr
= load_reg(s
, rn
);
8142 i
= (insn
>> 23) & 3;
8144 case 0: offset
= -4; break; /* DA */
8145 case 1: offset
= 0; break; /* IA */
8146 case 2: offset
= -8; break; /* DB */
8147 case 3: offset
= 4; break; /* IB */
8151 tcg_gen_addi_i32(addr
, addr
, offset
);
8152 /* Load PC into tmp and CPSR into tmp2. */
8153 tmp
= tcg_temp_new_i32();
8154 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
8155 tcg_gen_addi_i32(addr
, addr
, 4);
8156 tmp2
= tcg_temp_new_i32();
8157 gen_aa32_ld32u(s
, tmp2
, addr
, get_mem_index(s
));
8158 if (insn
& (1 << 21)) {
8159 /* Base writeback. */
8161 case 0: offset
= -8; break;
8162 case 1: offset
= 4; break;
8163 case 2: offset
= -4; break;
8164 case 3: offset
= 0; break;
8168 tcg_gen_addi_i32(addr
, addr
, offset
);
8169 store_reg(s
, rn
, addr
);
8171 tcg_temp_free_i32(addr
);
8173 gen_rfe(s
, tmp
, tmp2
);
8175 } else if ((insn
& 0x0e000000) == 0x0a000000) {
8176 /* branch link and change to thumb (blx <offset>) */
8179 val
= (uint32_t)s
->pc
;
8180 tmp
= tcg_temp_new_i32();
8181 tcg_gen_movi_i32(tmp
, val
);
8182 store_reg(s
, 14, tmp
);
8183 /* Sign-extend the 24-bit offset */
8184 offset
= (((int32_t)insn
) << 8) >> 8;
8185 /* offset * 4 + bit24 * 2 + (thumb bit) */
8186 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
8187 /* pipeline offset */
8189 /* protected by ARCH(5); above, near the start of uncond block */
8192 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
8193 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
8194 /* iWMMXt register transfer. */
8195 if (extract32(s
->c15_cpar
, 1, 1)) {
8196 if (!disas_iwmmxt_insn(s
, insn
)) {
8201 } else if ((insn
& 0x0e000a00) == 0x0c000800
8202 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
8203 if (disas_neon_insn_3same_ext(s
, insn
)) {
8207 } else if ((insn
& 0x0f000a00) == 0x0e000800
8208 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
8209 if (disas_neon_insn_2reg_scalar_ext(s
, insn
)) {
8213 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
8214 /* Coprocessor double register transfer. */
8216 } else if ((insn
& 0x0f000010) == 0x0e000010) {
8217 /* Additional coprocessor register transfer. */
8218 } else if ((insn
& 0x0ff10020) == 0x01000000) {
8221 /* cps (privileged) */
8225 if (insn
& (1 << 19)) {
8226 if (insn
& (1 << 8))
8228 if (insn
& (1 << 7))
8230 if (insn
& (1 << 6))
8232 if (insn
& (1 << 18))
8235 if (insn
& (1 << 17)) {
8237 val
|= (insn
& 0x1f);
8240 gen_set_psr_im(s
, mask
, 0, val
);
8247 /* if not always execute, we generate a conditional jump to
8249 arm_skip_unless(s
, cond
);
8251 if ((insn
& 0x0f900000) == 0x03000000) {
8252 if ((insn
& (1 << 21)) == 0) {
8254 rd
= (insn
>> 12) & 0xf;
8255 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
8256 if ((insn
& (1 << 22)) == 0) {
8258 tmp
= tcg_temp_new_i32();
8259 tcg_gen_movi_i32(tmp
, val
);
8262 tmp
= load_reg(s
, rd
);
8263 tcg_gen_ext16u_i32(tmp
, tmp
);
8264 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
8266 store_reg(s
, rd
, tmp
);
8268 if (((insn
>> 12) & 0xf) != 0xf)
8270 if (((insn
>> 16) & 0xf) == 0) {
8271 gen_nop_hint(s
, insn
& 0xff);
8273 /* CPSR = immediate */
8275 shift
= ((insn
>> 8) & 0xf) * 2;
8277 val
= (val
>> shift
) | (val
<< (32 - shift
));
8278 i
= ((insn
& (1 << 22)) != 0);
8279 if (gen_set_psr_im(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
),
8285 } else if ((insn
& 0x0f900000) == 0x01000000
8286 && (insn
& 0x00000090) != 0x00000090) {
8287 /* miscellaneous instructions */
8288 op1
= (insn
>> 21) & 3;
8289 sh
= (insn
>> 4) & 0xf;
8292 case 0x0: /* MSR, MRS */
8293 if (insn
& (1 << 9)) {
8294 /* MSR (banked) and MRS (banked) */
8295 int sysm
= extract32(insn
, 16, 4) |
8296 (extract32(insn
, 8, 1) << 4);
8297 int r
= extract32(insn
, 22, 1);
8301 gen_msr_banked(s
, r
, sysm
, rm
);
8304 int rd
= extract32(insn
, 12, 4);
8306 gen_mrs_banked(s
, r
, sysm
, rd
);
8311 /* MSR, MRS (for PSRs) */
8314 tmp
= load_reg(s
, rm
);
8315 i
= ((op1
& 2) != 0);
8316 if (gen_set_psr(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
8320 rd
= (insn
>> 12) & 0xf;
8324 tmp
= load_cpu_field(spsr
);
8326 tmp
= tcg_temp_new_i32();
8327 gen_helper_cpsr_read(tmp
, cpu_env
);
8329 store_reg(s
, rd
, tmp
);
8334 /* branch/exchange thumb (bx). */
8336 tmp
= load_reg(s
, rm
);
8338 } else if (op1
== 3) {
8341 rd
= (insn
>> 12) & 0xf;
8342 tmp
= load_reg(s
, rm
);
8343 tcg_gen_clzi_i32(tmp
, tmp
, 32);
8344 store_reg(s
, rd
, tmp
);
8352 /* Trivial implementation equivalent to bx. */
8353 tmp
= load_reg(s
, rm
);
8364 /* branch link/exchange thumb (blx) */
8365 tmp
= load_reg(s
, rm
);
8366 tmp2
= tcg_temp_new_i32();
8367 tcg_gen_movi_i32(tmp2
, s
->pc
);
8368 store_reg(s
, 14, tmp2
);
8374 uint32_t c
= extract32(insn
, 8, 4);
8376 /* Check this CPU supports ARMv8 CRC instructions.
8377 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8378 * Bits 8, 10 and 11 should be zero.
8380 if (!dc_isar_feature(aa32_crc32
, s
) || op1
== 0x3 || (c
& 0xd) != 0) {
8384 rn
= extract32(insn
, 16, 4);
8385 rd
= extract32(insn
, 12, 4);
8387 tmp
= load_reg(s
, rn
);
8388 tmp2
= load_reg(s
, rm
);
8390 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
8391 } else if (op1
== 1) {
8392 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
8394 tmp3
= tcg_const_i32(1 << op1
);
8396 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
8398 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
8400 tcg_temp_free_i32(tmp2
);
8401 tcg_temp_free_i32(tmp3
);
8402 store_reg(s
, rd
, tmp
);
8405 case 0x5: /* saturating add/subtract */
8407 rd
= (insn
>> 12) & 0xf;
8408 rn
= (insn
>> 16) & 0xf;
8409 tmp
= load_reg(s
, rm
);
8410 tmp2
= load_reg(s
, rn
);
8412 gen_helper_double_saturate(tmp2
, cpu_env
, tmp2
);
8414 gen_helper_sub_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8416 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8417 tcg_temp_free_i32(tmp2
);
8418 store_reg(s
, rd
, tmp
);
8420 case 0x6: /* ERET */
8424 if (!arm_dc_feature(s
, ARM_FEATURE_V7VE
)) {
8427 if ((insn
& 0x000fff0f) != 0x0000000e) {
8428 /* UNPREDICTABLE; we choose to UNDEF */
8432 if (s
->current_el
== 2) {
8433 tmp
= load_cpu_field(elr_el
[2]);
8435 tmp
= load_reg(s
, 14);
8437 gen_exception_return(s
, tmp
);
8441 int imm16
= extract32(insn
, 0, 4) | (extract32(insn
, 8, 12) << 4);
8450 gen_exception_bkpt_insn(s
, 4, syn_aa32_bkpt(imm16
, false));
8453 /* Hypervisor call (v7) */
8461 /* Secure monitor call (v6+) */
8469 g_assert_not_reached();
8473 case 0x8: /* signed multiply */
8478 rs
= (insn
>> 8) & 0xf;
8479 rn
= (insn
>> 12) & 0xf;
8480 rd
= (insn
>> 16) & 0xf;
8482 /* (32 * 16) >> 16 */
8483 tmp
= load_reg(s
, rm
);
8484 tmp2
= load_reg(s
, rs
);
8486 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8489 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8490 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8491 tmp
= tcg_temp_new_i32();
8492 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
8493 tcg_temp_free_i64(tmp64
);
8494 if ((sh
& 2) == 0) {
8495 tmp2
= load_reg(s
, rn
);
8496 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8497 tcg_temp_free_i32(tmp2
);
8499 store_reg(s
, rd
, tmp
);
8502 tmp
= load_reg(s
, rm
);
8503 tmp2
= load_reg(s
, rs
);
8504 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
8505 tcg_temp_free_i32(tmp2
);
8507 tmp64
= tcg_temp_new_i64();
8508 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8509 tcg_temp_free_i32(tmp
);
8510 gen_addq(s
, tmp64
, rn
, rd
);
8511 gen_storeq_reg(s
, rn
, rd
, tmp64
);
8512 tcg_temp_free_i64(tmp64
);
8515 tmp2
= load_reg(s
, rn
);
8516 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8517 tcg_temp_free_i32(tmp2
);
8519 store_reg(s
, rd
, tmp
);
8526 } else if (((insn
& 0x0e000000) == 0 &&
8527 (insn
& 0x00000090) != 0x90) ||
8528 ((insn
& 0x0e000000) == (1 << 25))) {
8529 int set_cc
, logic_cc
, shiftop
;
8531 op1
= (insn
>> 21) & 0xf;
8532 set_cc
= (insn
>> 20) & 1;
8533 logic_cc
= table_logic_cc
[op1
] & set_cc
;
8535 /* data processing instruction */
8536 if (insn
& (1 << 25)) {
8537 /* immediate operand */
8539 shift
= ((insn
>> 8) & 0xf) * 2;
8541 val
= (val
>> shift
) | (val
<< (32 - shift
));
8543 tmp2
= tcg_temp_new_i32();
8544 tcg_gen_movi_i32(tmp2
, val
);
8545 if (logic_cc
&& shift
) {
8546 gen_set_CF_bit31(tmp2
);
8551 tmp2
= load_reg(s
, rm
);
8552 shiftop
= (insn
>> 5) & 3;
8553 if (!(insn
& (1 << 4))) {
8554 shift
= (insn
>> 7) & 0x1f;
8555 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8557 rs
= (insn
>> 8) & 0xf;
8558 tmp
= load_reg(s
, rs
);
8559 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
8562 if (op1
!= 0x0f && op1
!= 0x0d) {
8563 rn
= (insn
>> 16) & 0xf;
8564 tmp
= load_reg(s
, rn
);
8568 rd
= (insn
>> 12) & 0xf;
8571 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8575 store_reg_bx(s
, rd
, tmp
);
8578 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8582 store_reg_bx(s
, rd
, tmp
);
8585 if (set_cc
&& rd
== 15) {
8586 /* SUBS r15, ... is used for exception return. */
8590 gen_sub_CC(tmp
, tmp
, tmp2
);
8591 gen_exception_return(s
, tmp
);
8594 gen_sub_CC(tmp
, tmp
, tmp2
);
8596 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8598 store_reg_bx(s
, rd
, tmp
);
8603 gen_sub_CC(tmp
, tmp2
, tmp
);
8605 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8607 store_reg_bx(s
, rd
, tmp
);
8611 gen_add_CC(tmp
, tmp
, tmp2
);
8613 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8615 store_reg_bx(s
, rd
, tmp
);
8619 gen_adc_CC(tmp
, tmp
, tmp2
);
8621 gen_add_carry(tmp
, tmp
, tmp2
);
8623 store_reg_bx(s
, rd
, tmp
);
8627 gen_sbc_CC(tmp
, tmp
, tmp2
);
8629 gen_sub_carry(tmp
, tmp
, tmp2
);
8631 store_reg_bx(s
, rd
, tmp
);
8635 gen_sbc_CC(tmp
, tmp2
, tmp
);
8637 gen_sub_carry(tmp
, tmp2
, tmp
);
8639 store_reg_bx(s
, rd
, tmp
);
8643 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8646 tcg_temp_free_i32(tmp
);
8650 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8653 tcg_temp_free_i32(tmp
);
8657 gen_sub_CC(tmp
, tmp
, tmp2
);
8659 tcg_temp_free_i32(tmp
);
8663 gen_add_CC(tmp
, tmp
, tmp2
);
8665 tcg_temp_free_i32(tmp
);
8668 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8672 store_reg_bx(s
, rd
, tmp
);
8675 if (logic_cc
&& rd
== 15) {
8676 /* MOVS r15, ... is used for exception return. */
8680 gen_exception_return(s
, tmp2
);
8685 store_reg_bx(s
, rd
, tmp2
);
8689 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8693 store_reg_bx(s
, rd
, tmp
);
8697 tcg_gen_not_i32(tmp2
, tmp2
);
8701 store_reg_bx(s
, rd
, tmp2
);
8704 if (op1
!= 0x0f && op1
!= 0x0d) {
8705 tcg_temp_free_i32(tmp2
);
8708 /* other instructions */
8709 op1
= (insn
>> 24) & 0xf;
8713 /* multiplies, extra load/stores */
8714 sh
= (insn
>> 5) & 3;
8717 rd
= (insn
>> 16) & 0xf;
8718 rn
= (insn
>> 12) & 0xf;
8719 rs
= (insn
>> 8) & 0xf;
8721 op1
= (insn
>> 20) & 0xf;
8723 case 0: case 1: case 2: case 3: case 6:
8725 tmp
= load_reg(s
, rs
);
8726 tmp2
= load_reg(s
, rm
);
8727 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8728 tcg_temp_free_i32(tmp2
);
8729 if (insn
& (1 << 22)) {
8730 /* Subtract (mls) */
8732 tmp2
= load_reg(s
, rn
);
8733 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8734 tcg_temp_free_i32(tmp2
);
8735 } else if (insn
& (1 << 21)) {
8737 tmp2
= load_reg(s
, rn
);
8738 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8739 tcg_temp_free_i32(tmp2
);
8741 if (insn
& (1 << 20))
8743 store_reg(s
, rd
, tmp
);
8746 /* 64 bit mul double accumulate (UMAAL) */
8748 tmp
= load_reg(s
, rs
);
8749 tmp2
= load_reg(s
, rm
);
8750 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8751 gen_addq_lo(s
, tmp64
, rn
);
8752 gen_addq_lo(s
, tmp64
, rd
);
8753 gen_storeq_reg(s
, rn
, rd
, tmp64
);
8754 tcg_temp_free_i64(tmp64
);
8756 case 8: case 9: case 10: case 11:
8757 case 12: case 13: case 14: case 15:
8758 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8759 tmp
= load_reg(s
, rs
);
8760 tmp2
= load_reg(s
, rm
);
8761 if (insn
& (1 << 22)) {
8762 tcg_gen_muls2_i32(tmp
, tmp2
, tmp
, tmp2
);
8764 tcg_gen_mulu2_i32(tmp
, tmp2
, tmp
, tmp2
);
8766 if (insn
& (1 << 21)) { /* mult accumulate */
8767 TCGv_i32 al
= load_reg(s
, rn
);
8768 TCGv_i32 ah
= load_reg(s
, rd
);
8769 tcg_gen_add2_i32(tmp
, tmp2
, tmp
, tmp2
, al
, ah
);
8770 tcg_temp_free_i32(al
);
8771 tcg_temp_free_i32(ah
);
8773 if (insn
& (1 << 20)) {
8774 gen_logicq_cc(tmp
, tmp2
);
8776 store_reg(s
, rn
, tmp
);
8777 store_reg(s
, rd
, tmp2
);
8783 rn
= (insn
>> 16) & 0xf;
8784 rd
= (insn
>> 12) & 0xf;
8785 if (insn
& (1 << 23)) {
8786 /* load/store exclusive */
8787 bool is_ld
= extract32(insn
, 20, 1);
8788 bool is_lasr
= !extract32(insn
, 8, 1);
8789 int op2
= (insn
>> 8) & 3;
8790 op1
= (insn
>> 21) & 0x3;
8793 case 0: /* lda/stl */
8799 case 1: /* reserved */
8801 case 2: /* ldaex/stlex */
8804 case 3: /* ldrex/strex */
8813 addr
= tcg_temp_local_new_i32();
8814 load_reg_var(s
, addr
, rn
);
8816 if (is_lasr
&& !is_ld
) {
8817 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
8822 tmp
= tcg_temp_new_i32();
8825 gen_aa32_ld32u_iss(s
, tmp
, addr
,
8830 gen_aa32_ld8u_iss(s
, tmp
, addr
,
8835 gen_aa32_ld16u_iss(s
, tmp
, addr
,
8842 store_reg(s
, rd
, tmp
);
8845 tmp
= load_reg(s
, rm
);
8848 gen_aa32_st32_iss(s
, tmp
, addr
,
8853 gen_aa32_st8_iss(s
, tmp
, addr
,
8858 gen_aa32_st16_iss(s
, tmp
, addr
,
8865 tcg_temp_free_i32(tmp
);
8870 gen_load_exclusive(s
, rd
, 15, addr
, 2);
8872 case 1: /* ldrexd */
8873 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
8875 case 2: /* ldrexb */
8876 gen_load_exclusive(s
, rd
, 15, addr
, 0);
8878 case 3: /* ldrexh */
8879 gen_load_exclusive(s
, rd
, 15, addr
, 1);
8888 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
8890 case 1: /* strexd */
8891 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
8893 case 2: /* strexb */
8894 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
8896 case 3: /* strexh */
8897 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
8903 tcg_temp_free_i32(addr
);
8905 if (is_lasr
&& is_ld
) {
8906 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
8908 } else if ((insn
& 0x00300f00) == 0) {
8909 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
8914 TCGMemOp opc
= s
->be_data
;
8918 if (insn
& (1 << 22)) {
8921 opc
|= MO_UL
| MO_ALIGN
;
8924 addr
= load_reg(s
, rn
);
8925 taddr
= gen_aa32_addr(s
, addr
, opc
);
8926 tcg_temp_free_i32(addr
);
8928 tmp
= load_reg(s
, rm
);
8929 tcg_gen_atomic_xchg_i32(tmp
, taddr
, tmp
,
8930 get_mem_index(s
), opc
);
8931 tcg_temp_free(taddr
);
8932 store_reg(s
, rd
, tmp
);
8939 bool load
= insn
& (1 << 20);
8940 bool wbit
= insn
& (1 << 21);
8941 bool pbit
= insn
& (1 << 24);
8942 bool doubleword
= false;
8945 /* Misc load/store */
8946 rn
= (insn
>> 16) & 0xf;
8947 rd
= (insn
>> 12) & 0xf;
8949 /* ISS not valid if writeback */
8950 issinfo
= (pbit
& !wbit
) ? rd
: ISSInvalid
;
8952 if (!load
&& (sh
& 2)) {
8956 /* UNPREDICTABLE; we choose to UNDEF */
8959 load
= (sh
& 1) == 0;
8963 addr
= load_reg(s
, rn
);
8965 gen_add_datah_offset(s
, insn
, 0, addr
);
8972 tmp
= load_reg(s
, rd
);
8973 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
8974 tcg_temp_free_i32(tmp
);
8975 tcg_gen_addi_i32(addr
, addr
, 4);
8976 tmp
= load_reg(s
, rd
+ 1);
8977 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
8978 tcg_temp_free_i32(tmp
);
8981 tmp
= tcg_temp_new_i32();
8982 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
8983 store_reg(s
, rd
, tmp
);
8984 tcg_gen_addi_i32(addr
, addr
, 4);
8985 tmp
= tcg_temp_new_i32();
8986 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
8989 address_offset
= -4;
8992 tmp
= tcg_temp_new_i32();
8995 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
),
8999 gen_aa32_ld8s_iss(s
, tmp
, addr
, get_mem_index(s
),
9004 gen_aa32_ld16s_iss(s
, tmp
, addr
, get_mem_index(s
),
9010 tmp
= load_reg(s
, rd
);
9011 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
), issinfo
);
9012 tcg_temp_free_i32(tmp
);
9014 /* Perform base writeback before the loaded value to
9015 ensure correct behavior with overlapping index registers.
9016 ldrd with base writeback is undefined if the
9017 destination and index registers overlap. */
9019 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
9020 store_reg(s
, rn
, addr
);
9023 tcg_gen_addi_i32(addr
, addr
, address_offset
);
9024 store_reg(s
, rn
, addr
);
9026 tcg_temp_free_i32(addr
);
9029 /* Complete the load. */
9030 store_reg(s
, rd
, tmp
);
9039 if (insn
& (1 << 4)) {
9041 /* Armv6 Media instructions. */
9043 rn
= (insn
>> 16) & 0xf;
9044 rd
= (insn
>> 12) & 0xf;
9045 rs
= (insn
>> 8) & 0xf;
9046 switch ((insn
>> 23) & 3) {
9047 case 0: /* Parallel add/subtract. */
9048 op1
= (insn
>> 20) & 7;
9049 tmp
= load_reg(s
, rn
);
9050 tmp2
= load_reg(s
, rm
);
9051 sh
= (insn
>> 5) & 7;
9052 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
9054 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
9055 tcg_temp_free_i32(tmp2
);
9056 store_reg(s
, rd
, tmp
);
9059 if ((insn
& 0x00700020) == 0) {
9060 /* Halfword pack. */
9061 tmp
= load_reg(s
, rn
);
9062 tmp2
= load_reg(s
, rm
);
9063 shift
= (insn
>> 7) & 0x1f;
9064 if (insn
& (1 << 6)) {
9068 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
9069 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
9070 tcg_gen_ext16u_i32(tmp2
, tmp2
);
9074 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
9075 tcg_gen_ext16u_i32(tmp
, tmp
);
9076 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
9078 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9079 tcg_temp_free_i32(tmp2
);
9080 store_reg(s
, rd
, tmp
);
9081 } else if ((insn
& 0x00200020) == 0x00200000) {
9083 tmp
= load_reg(s
, rm
);
9084 shift
= (insn
>> 7) & 0x1f;
9085 if (insn
& (1 << 6)) {
9088 tcg_gen_sari_i32(tmp
, tmp
, shift
);
9090 tcg_gen_shli_i32(tmp
, tmp
, shift
);
9092 sh
= (insn
>> 16) & 0x1f;
9093 tmp2
= tcg_const_i32(sh
);
9094 if (insn
& (1 << 22))
9095 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
9097 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
9098 tcg_temp_free_i32(tmp2
);
9099 store_reg(s
, rd
, tmp
);
9100 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
9102 tmp
= load_reg(s
, rm
);
9103 sh
= (insn
>> 16) & 0x1f;
9104 tmp2
= tcg_const_i32(sh
);
9105 if (insn
& (1 << 22))
9106 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
9108 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
9109 tcg_temp_free_i32(tmp2
);
9110 store_reg(s
, rd
, tmp
);
9111 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
9113 tmp
= load_reg(s
, rn
);
9114 tmp2
= load_reg(s
, rm
);
9115 tmp3
= tcg_temp_new_i32();
9116 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
9117 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
9118 tcg_temp_free_i32(tmp3
);
9119 tcg_temp_free_i32(tmp2
);
9120 store_reg(s
, rd
, tmp
);
9121 } else if ((insn
& 0x000003e0) == 0x00000060) {
9122 tmp
= load_reg(s
, rm
);
9123 shift
= (insn
>> 10) & 3;
9124 /* ??? In many cases it's not necessary to do a
9125 rotate, a shift is sufficient. */
9127 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
9128 op1
= (insn
>> 20) & 7;
9130 case 0: gen_sxtb16(tmp
); break;
9131 case 2: gen_sxtb(tmp
); break;
9132 case 3: gen_sxth(tmp
); break;
9133 case 4: gen_uxtb16(tmp
); break;
9134 case 6: gen_uxtb(tmp
); break;
9135 case 7: gen_uxth(tmp
); break;
9136 default: goto illegal_op
;
9139 tmp2
= load_reg(s
, rn
);
9140 if ((op1
& 3) == 0) {
9141 gen_add16(tmp
, tmp2
);
9143 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9144 tcg_temp_free_i32(tmp2
);
9147 store_reg(s
, rd
, tmp
);
9148 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
9150 tmp
= load_reg(s
, rm
);
9151 if (insn
& (1 << 22)) {
9152 if (insn
& (1 << 7)) {
9156 gen_helper_rbit(tmp
, tmp
);
9159 if (insn
& (1 << 7))
9162 tcg_gen_bswap32_i32(tmp
, tmp
);
9164 store_reg(s
, rd
, tmp
);
9169 case 2: /* Multiplies (Type 3). */
9170 switch ((insn
>> 20) & 0x7) {
9172 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
9173 /* op2 not 00x or 11x : UNDEF */
9176 /* Signed multiply most significant [accumulate].
9177 (SMMUL, SMMLA, SMMLS) */
9178 tmp
= load_reg(s
, rm
);
9179 tmp2
= load_reg(s
, rs
);
9180 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9183 tmp
= load_reg(s
, rd
);
9184 if (insn
& (1 << 6)) {
9185 tmp64
= gen_subq_msw(tmp64
, tmp
);
9187 tmp64
= gen_addq_msw(tmp64
, tmp
);
9190 if (insn
& (1 << 5)) {
9191 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
9193 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
9194 tmp
= tcg_temp_new_i32();
9195 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
9196 tcg_temp_free_i64(tmp64
);
9197 store_reg(s
, rn
, tmp
);
9201 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9202 if (insn
& (1 << 7)) {
9205 tmp
= load_reg(s
, rm
);
9206 tmp2
= load_reg(s
, rs
);
9207 if (insn
& (1 << 5))
9208 gen_swap_half(tmp2
);
9209 gen_smul_dual(tmp
, tmp2
);
9210 if (insn
& (1 << 22)) {
9211 /* smlald, smlsld */
9214 tmp64
= tcg_temp_new_i64();
9215 tmp64_2
= tcg_temp_new_i64();
9216 tcg_gen_ext_i32_i64(tmp64
, tmp
);
9217 tcg_gen_ext_i32_i64(tmp64_2
, tmp2
);
9218 tcg_temp_free_i32(tmp
);
9219 tcg_temp_free_i32(tmp2
);
9220 if (insn
& (1 << 6)) {
9221 tcg_gen_sub_i64(tmp64
, tmp64
, tmp64_2
);
9223 tcg_gen_add_i64(tmp64
, tmp64
, tmp64_2
);
9225 tcg_temp_free_i64(tmp64_2
);
9226 gen_addq(s
, tmp64
, rd
, rn
);
9227 gen_storeq_reg(s
, rd
, rn
, tmp64
);
9228 tcg_temp_free_i64(tmp64
);
9230 /* smuad, smusd, smlad, smlsd */
9231 if (insn
& (1 << 6)) {
9232 /* This subtraction cannot overflow. */
9233 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9235 /* This addition cannot overflow 32 bits;
9236 * however it may overflow considered as a
9237 * signed operation, in which case we must set
9240 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9242 tcg_temp_free_i32(tmp2
);
9245 tmp2
= load_reg(s
, rd
);
9246 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9247 tcg_temp_free_i32(tmp2
);
9249 store_reg(s
, rn
, tmp
);
9255 if (!dc_isar_feature(arm_div
, s
)) {
9258 if (((insn
>> 5) & 7) || (rd
!= 15)) {
9261 tmp
= load_reg(s
, rm
);
9262 tmp2
= load_reg(s
, rs
);
9263 if (insn
& (1 << 21)) {
9264 gen_helper_udiv(tmp
, tmp
, tmp2
);
9266 gen_helper_sdiv(tmp
, tmp
, tmp2
);
9268 tcg_temp_free_i32(tmp2
);
9269 store_reg(s
, rn
, tmp
);
9276 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
9278 case 0: /* Unsigned sum of absolute differences. */
9280 tmp
= load_reg(s
, rm
);
9281 tmp2
= load_reg(s
, rs
);
9282 gen_helper_usad8(tmp
, tmp
, tmp2
);
9283 tcg_temp_free_i32(tmp2
);
9285 tmp2
= load_reg(s
, rd
);
9286 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9287 tcg_temp_free_i32(tmp2
);
9289 store_reg(s
, rn
, tmp
);
9291 case 0x20: case 0x24: case 0x28: case 0x2c:
9292 /* Bitfield insert/clear. */
9294 shift
= (insn
>> 7) & 0x1f;
9295 i
= (insn
>> 16) & 0x1f;
9297 /* UNPREDICTABLE; we choose to UNDEF */
9302 tmp
= tcg_temp_new_i32();
9303 tcg_gen_movi_i32(tmp
, 0);
9305 tmp
= load_reg(s
, rm
);
9308 tmp2
= load_reg(s
, rd
);
9309 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, i
);
9310 tcg_temp_free_i32(tmp2
);
9312 store_reg(s
, rd
, tmp
);
9314 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9315 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9317 tmp
= load_reg(s
, rm
);
9318 shift
= (insn
>> 7) & 0x1f;
9319 i
= ((insn
>> 16) & 0x1f) + 1;
9324 tcg_gen_extract_i32(tmp
, tmp
, shift
, i
);
9326 tcg_gen_sextract_i32(tmp
, tmp
, shift
, i
);
9329 store_reg(s
, rd
, tmp
);
9339 /* Check for undefined extension instructions
9340 * per the ARM Bible IE:
9341 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9343 sh
= (0xf << 20) | (0xf << 4);
9344 if (op1
== 0x7 && ((insn
& sh
) == sh
))
9348 /* load/store byte/word */
9349 rn
= (insn
>> 16) & 0xf;
9350 rd
= (insn
>> 12) & 0xf;
9351 tmp2
= load_reg(s
, rn
);
9352 if ((insn
& 0x01200000) == 0x00200000) {
9354 i
= get_a32_user_mem_index(s
);
9356 i
= get_mem_index(s
);
9358 if (insn
& (1 << 24))
9359 gen_add_data_offset(s
, insn
, tmp2
);
9360 if (insn
& (1 << 20)) {
9362 tmp
= tcg_temp_new_i32();
9363 if (insn
& (1 << 22)) {
9364 gen_aa32_ld8u_iss(s
, tmp
, tmp2
, i
, rd
);
9366 gen_aa32_ld32u_iss(s
, tmp
, tmp2
, i
, rd
);
9370 tmp
= load_reg(s
, rd
);
9371 if (insn
& (1 << 22)) {
9372 gen_aa32_st8_iss(s
, tmp
, tmp2
, i
, rd
);
9374 gen_aa32_st32_iss(s
, tmp
, tmp2
, i
, rd
);
9376 tcg_temp_free_i32(tmp
);
9378 if (!(insn
& (1 << 24))) {
9379 gen_add_data_offset(s
, insn
, tmp2
);
9380 store_reg(s
, rn
, tmp2
);
9381 } else if (insn
& (1 << 21)) {
9382 store_reg(s
, rn
, tmp2
);
9384 tcg_temp_free_i32(tmp2
);
9386 if (insn
& (1 << 20)) {
9387 /* Complete the load. */
9388 store_reg_from_load(s
, rd
, tmp
);
9394 int j
, n
, loaded_base
;
9395 bool exc_return
= false;
9396 bool is_load
= extract32(insn
, 20, 1);
9398 TCGv_i32 loaded_var
;
9399 /* load/store multiple words */
9400 /* XXX: store correct base if write back */
9401 if (insn
& (1 << 22)) {
9402 /* LDM (user), LDM (exception return) and STM (user) */
9404 goto illegal_op
; /* only usable in supervisor mode */
9406 if (is_load
&& extract32(insn
, 15, 1)) {
9412 rn
= (insn
>> 16) & 0xf;
9413 addr
= load_reg(s
, rn
);
9415 /* compute total size */
9420 if (insn
& (1 << i
))
9423 /* XXX: test invalid n == 0 case ? */
9424 if (insn
& (1 << 23)) {
9425 if (insn
& (1 << 24)) {
9427 tcg_gen_addi_i32(addr
, addr
, 4);
9429 /* post increment */
9432 if (insn
& (1 << 24)) {
9434 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
9436 /* post decrement */
9438 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
9443 if (insn
& (1 << i
)) {
9446 tmp
= tcg_temp_new_i32();
9447 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9449 tmp2
= tcg_const_i32(i
);
9450 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
9451 tcg_temp_free_i32(tmp2
);
9452 tcg_temp_free_i32(tmp
);
9453 } else if (i
== rn
) {
9456 } else if (i
== 15 && exc_return
) {
9457 store_pc_exc_ret(s
, tmp
);
9459 store_reg_from_load(s
, i
, tmp
);
9464 /* special case: r15 = PC + 8 */
9465 val
= (long)s
->pc
+ 4;
9466 tmp
= tcg_temp_new_i32();
9467 tcg_gen_movi_i32(tmp
, val
);
9469 tmp
= tcg_temp_new_i32();
9470 tmp2
= tcg_const_i32(i
);
9471 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
9472 tcg_temp_free_i32(tmp2
);
9474 tmp
= load_reg(s
, i
);
9476 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9477 tcg_temp_free_i32(tmp
);
9480 /* no need to add after the last transfer */
9482 tcg_gen_addi_i32(addr
, addr
, 4);
9485 if (insn
& (1 << 21)) {
9487 if (insn
& (1 << 23)) {
9488 if (insn
& (1 << 24)) {
9491 /* post increment */
9492 tcg_gen_addi_i32(addr
, addr
, 4);
9495 if (insn
& (1 << 24)) {
9498 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
9500 /* post decrement */
9501 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
9504 store_reg(s
, rn
, addr
);
9506 tcg_temp_free_i32(addr
);
9509 store_reg(s
, rn
, loaded_var
);
9512 /* Restore CPSR from SPSR. */
9513 tmp
= load_cpu_field(spsr
);
9514 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
9517 gen_helper_cpsr_write_eret(cpu_env
, tmp
);
9518 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
9521 tcg_temp_free_i32(tmp
);
9522 /* Must exit loop to check un-masked IRQs */
9523 s
->base
.is_jmp
= DISAS_EXIT
;
9532 /* branch (and link) */
9533 val
= (int32_t)s
->pc
;
9534 if (insn
& (1 << 24)) {
9535 tmp
= tcg_temp_new_i32();
9536 tcg_gen_movi_i32(tmp
, val
);
9537 store_reg(s
, 14, tmp
);
9539 offset
= sextract32(insn
<< 2, 0, 26);
9547 if (((insn
>> 8) & 0xe) == 10) {
9549 if (disas_vfp_insn(s
, insn
)) {
9552 } else if (disas_coproc_insn(s
, insn
)) {
9559 gen_set_pc_im(s
, s
->pc
);
9560 s
->svc_imm
= extract32(insn
, 0, 24);
9561 s
->base
.is_jmp
= DISAS_SWI
;
9565 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
9566 default_exception_el(s
));
9572 static bool thumb_insn_is_16bit(DisasContext
*s
, uint32_t insn
)
9574 /* Return true if this is a 16 bit instruction. We must be precise
9575 * about this (matching the decode). We assume that s->pc still
9576 * points to the first 16 bits of the insn.
9578 if ((insn
>> 11) < 0x1d) {
9579 /* Definitely a 16-bit instruction */
9583 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9584 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9585 * end up actually treating this as two 16-bit insns, though,
9586 * if it's half of a bl/blx pair that might span a page boundary.
9588 if (arm_dc_feature(s
, ARM_FEATURE_THUMB2
) ||
9589 arm_dc_feature(s
, ARM_FEATURE_M
)) {
9590 /* Thumb2 cores (including all M profile ones) always treat
9591 * 32-bit insns as 32-bit.
9596 if ((insn
>> 11) == 0x1e && s
->pc
- s
->page_start
< TARGET_PAGE_SIZE
- 3) {
9597 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9598 * is not on the next page; we merge this into a 32-bit
9603 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9604 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9605 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9606 * -- handle as single 16 bit insn
9611 /* Return true if this is a Thumb-2 logical op. */
9613 thumb2_logic_op(int op
)
9618 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9619 then set condition code flags based on the result of the operation.
9620 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9621 to the high bit of T1.
9622 Returns zero if the opcode is valid. */
9625 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
,
9626 TCGv_i32 t0
, TCGv_i32 t1
)
9633 tcg_gen_and_i32(t0
, t0
, t1
);
9637 tcg_gen_andc_i32(t0
, t0
, t1
);
9641 tcg_gen_or_i32(t0
, t0
, t1
);
9645 tcg_gen_orc_i32(t0
, t0
, t1
);
9649 tcg_gen_xor_i32(t0
, t0
, t1
);
9654 gen_add_CC(t0
, t0
, t1
);
9656 tcg_gen_add_i32(t0
, t0
, t1
);
9660 gen_adc_CC(t0
, t0
, t1
);
9666 gen_sbc_CC(t0
, t0
, t1
);
9668 gen_sub_carry(t0
, t0
, t1
);
9673 gen_sub_CC(t0
, t0
, t1
);
9675 tcg_gen_sub_i32(t0
, t0
, t1
);
9679 gen_sub_CC(t0
, t1
, t0
);
9681 tcg_gen_sub_i32(t0
, t1
, t0
);
9683 default: /* 5, 6, 7, 9, 12, 15. */
9689 gen_set_CF_bit31(t1
);
9694 /* Translate a 32-bit thumb instruction. */
9695 static void disas_thumb2_insn(DisasContext
*s
, uint32_t insn
)
9697 uint32_t imm
, shift
, offset
;
9698 uint32_t rd
, rn
, rm
, rs
;
9710 * ARMv6-M supports a limited subset of Thumb2 instructions.
9711 * Other Thumb1 architectures allow only 32-bit
9712 * combined BL/BLX prefix and suffix.
9714 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
9715 !arm_dc_feature(s
, ARM_FEATURE_V7
)) {
9718 static const uint32_t armv6m_insn
[] = {0xf3808000 /* msr */,
9719 0xf3b08040 /* dsb */,
9720 0xf3b08050 /* dmb */,
9721 0xf3b08060 /* isb */,
9722 0xf3e08000 /* mrs */,
9723 0xf000d000 /* bl */};
9724 static const uint32_t armv6m_mask
[] = {0xffe0d000,
9731 for (i
= 0; i
< ARRAY_SIZE(armv6m_insn
); i
++) {
9732 if ((insn
& armv6m_mask
[i
]) == armv6m_insn
[i
]) {
9740 } else if ((insn
& 0xf800e800) != 0xf000e800) {
9744 rn
= (insn
>> 16) & 0xf;
9745 rs
= (insn
>> 12) & 0xf;
9746 rd
= (insn
>> 8) & 0xf;
9748 switch ((insn
>> 25) & 0xf) {
9749 case 0: case 1: case 2: case 3:
9750 /* 16-bit instructions. Should never happen. */
9753 if (insn
& (1 << 22)) {
9754 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9755 * - load/store doubleword, load/store exclusive, ldacq/strel,
9758 if (insn
== 0xe97fe97f && arm_dc_feature(s
, ARM_FEATURE_M
) &&
9759 arm_dc_feature(s
, ARM_FEATURE_V8
)) {
9760 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9762 * The bulk of the behaviour for this instruction is implemented
9763 * in v7m_handle_execute_nsc(), which deals with the insn when
9764 * it is executed by a CPU in non-secure state from memory
9765 * which is Secure & NonSecure-Callable.
9766 * Here we only need to handle the remaining cases:
9767 * * in NS memory (including the "security extension not
9768 * implemented" case) : NOP
9769 * * in S memory but CPU already secure (clear IT bits)
9770 * We know that the attribute for the memory this insn is
9771 * in must match the current CPU state, because otherwise
9772 * get_phys_addr_pmsav8 would have generated an exception.
9774 if (s
->v8m_secure
) {
9775 /* Like the IT insn, we don't need to generate any code */
9776 s
->condexec_cond
= 0;
9777 s
->condexec_mask
= 0;
9779 } else if (insn
& 0x01200000) {
9780 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9781 * - load/store dual (post-indexed)
9782 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9783 * - load/store dual (literal and immediate)
9784 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9785 * - load/store dual (pre-indexed)
9787 bool wback
= extract32(insn
, 21, 1);
9790 if (insn
& (1 << 21)) {
9794 addr
= tcg_temp_new_i32();
9795 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
9797 addr
= load_reg(s
, rn
);
9799 offset
= (insn
& 0xff) * 4;
9800 if ((insn
& (1 << 23)) == 0) {
9804 if (s
->v8m_stackcheck
&& rn
== 13 && wback
) {
9806 * Here 'addr' is the current SP; if offset is +ve we're
9807 * moving SP up, else down. It is UNKNOWN whether the limit
9808 * check triggers when SP starts below the limit and ends
9809 * up above it; check whichever of the current and final
9810 * SP is lower, so QEMU will trigger in that situation.
9812 if ((int32_t)offset
< 0) {
9813 TCGv_i32 newsp
= tcg_temp_new_i32();
9815 tcg_gen_addi_i32(newsp
, addr
, offset
);
9816 gen_helper_v8m_stackcheck(cpu_env
, newsp
);
9817 tcg_temp_free_i32(newsp
);
9819 gen_helper_v8m_stackcheck(cpu_env
, addr
);
9823 if (insn
& (1 << 24)) {
9824 tcg_gen_addi_i32(addr
, addr
, offset
);
9827 if (insn
& (1 << 20)) {
9829 tmp
= tcg_temp_new_i32();
9830 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9831 store_reg(s
, rs
, tmp
);
9832 tcg_gen_addi_i32(addr
, addr
, 4);
9833 tmp
= tcg_temp_new_i32();
9834 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9835 store_reg(s
, rd
, tmp
);
9838 tmp
= load_reg(s
, rs
);
9839 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9840 tcg_temp_free_i32(tmp
);
9841 tcg_gen_addi_i32(addr
, addr
, 4);
9842 tmp
= load_reg(s
, rd
);
9843 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9844 tcg_temp_free_i32(tmp
);
9847 /* Base writeback. */
9848 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
9849 store_reg(s
, rn
, addr
);
9851 tcg_temp_free_i32(addr
);
9853 } else if ((insn
& (1 << 23)) == 0) {
9854 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9855 * - load/store exclusive word
9859 if (!(insn
& (1 << 20)) &&
9860 arm_dc_feature(s
, ARM_FEATURE_M
) &&
9861 arm_dc_feature(s
, ARM_FEATURE_V8
)) {
9862 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
9865 bool alt
= insn
& (1 << 7);
9866 TCGv_i32 addr
, op
, ttresp
;
9868 if ((insn
& 0x3f) || rd
== 13 || rd
== 15 || rn
== 15) {
9869 /* we UNDEF for these UNPREDICTABLE cases */
9873 if (alt
&& !s
->v8m_secure
) {
9877 addr
= load_reg(s
, rn
);
9878 op
= tcg_const_i32(extract32(insn
, 6, 2));
9879 ttresp
= tcg_temp_new_i32();
9880 gen_helper_v7m_tt(ttresp
, cpu_env
, addr
, op
);
9881 tcg_temp_free_i32(addr
);
9882 tcg_temp_free_i32(op
);
9883 store_reg(s
, rd
, ttresp
);
9888 addr
= tcg_temp_local_new_i32();
9889 load_reg_var(s
, addr
, rn
);
9890 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
9891 if (insn
& (1 << 20)) {
9892 gen_load_exclusive(s
, rs
, 15, addr
, 2);
9894 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
9896 tcg_temp_free_i32(addr
);
9897 } else if ((insn
& (7 << 5)) == 0) {
9900 addr
= tcg_temp_new_i32();
9901 tcg_gen_movi_i32(addr
, s
->pc
);
9903 addr
= load_reg(s
, rn
);
9905 tmp
= load_reg(s
, rm
);
9906 tcg_gen_add_i32(addr
, addr
, tmp
);
9907 if (insn
& (1 << 4)) {
9909 tcg_gen_add_i32(addr
, addr
, tmp
);
9910 tcg_temp_free_i32(tmp
);
9911 tmp
= tcg_temp_new_i32();
9912 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
9914 tcg_temp_free_i32(tmp
);
9915 tmp
= tcg_temp_new_i32();
9916 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
9918 tcg_temp_free_i32(addr
);
9919 tcg_gen_shli_i32(tmp
, tmp
, 1);
9920 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
9921 store_reg(s
, 15, tmp
);
9923 bool is_lasr
= false;
9924 bool is_ld
= extract32(insn
, 20, 1);
9925 int op2
= (insn
>> 6) & 0x3;
9926 op
= (insn
>> 4) & 0x3;
9931 /* Load/store exclusive byte/halfword/doubleword */
9938 /* Load-acquire/store-release */
9944 /* Load-acquire/store-release exclusive */
9950 if (is_lasr
&& !is_ld
) {
9951 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
9954 addr
= tcg_temp_local_new_i32();
9955 load_reg_var(s
, addr
, rn
);
9958 tmp
= tcg_temp_new_i32();
9961 gen_aa32_ld8u_iss(s
, tmp
, addr
, get_mem_index(s
),
9965 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
),
9969 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
),
9975 store_reg(s
, rs
, tmp
);
9977 tmp
= load_reg(s
, rs
);
9980 gen_aa32_st8_iss(s
, tmp
, addr
, get_mem_index(s
),
9984 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
),
9988 gen_aa32_st32_iss(s
, tmp
, addr
, get_mem_index(s
),
9994 tcg_temp_free_i32(tmp
);
9997 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
9999 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
10001 tcg_temp_free_i32(addr
);
10003 if (is_lasr
&& is_ld
) {
10004 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
10008 /* Load/store multiple, RFE, SRS. */
10009 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
10010 /* RFE, SRS: not available in user mode or on M profile */
10011 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
10014 if (insn
& (1 << 20)) {
10016 addr
= load_reg(s
, rn
);
10017 if ((insn
& (1 << 24)) == 0)
10018 tcg_gen_addi_i32(addr
, addr
, -8);
10019 /* Load PC into tmp and CPSR into tmp2. */
10020 tmp
= tcg_temp_new_i32();
10021 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
10022 tcg_gen_addi_i32(addr
, addr
, 4);
10023 tmp2
= tcg_temp_new_i32();
10024 gen_aa32_ld32u(s
, tmp2
, addr
, get_mem_index(s
));
10025 if (insn
& (1 << 21)) {
10026 /* Base writeback. */
10027 if (insn
& (1 << 24)) {
10028 tcg_gen_addi_i32(addr
, addr
, 4);
10030 tcg_gen_addi_i32(addr
, addr
, -4);
10032 store_reg(s
, rn
, addr
);
10034 tcg_temp_free_i32(addr
);
10036 gen_rfe(s
, tmp
, tmp2
);
10039 gen_srs(s
, (insn
& 0x1f), (insn
& (1 << 24)) ? 1 : 2,
10043 int i
, loaded_base
= 0;
10044 TCGv_i32 loaded_var
;
10045 bool wback
= extract32(insn
, 21, 1);
10046 /* Load/store multiple. */
10047 addr
= load_reg(s
, rn
);
10049 for (i
= 0; i
< 16; i
++) {
10050 if (insn
& (1 << i
))
10054 if (insn
& (1 << 24)) {
10055 tcg_gen_addi_i32(addr
, addr
, -offset
);
10058 if (s
->v8m_stackcheck
&& rn
== 13 && wback
) {
10060 * If the writeback is incrementing SP rather than
10061 * decrementing it, and the initial SP is below the
10062 * stack limit but the final written-back SP would
10063 * be above, then then we must not perform any memory
10064 * accesses, but it is IMPDEF whether we generate
10065 * an exception. We choose to do so in this case.
10066 * At this point 'addr' is the lowest address, so
10067 * either the original SP (if incrementing) or our
10068 * final SP (if decrementing), so that's what we check.
10070 gen_helper_v8m_stackcheck(cpu_env
, addr
);
10074 for (i
= 0; i
< 16; i
++) {
10075 if ((insn
& (1 << i
)) == 0)
10077 if (insn
& (1 << 20)) {
10079 tmp
= tcg_temp_new_i32();
10080 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
10082 gen_bx_excret(s
, tmp
);
10083 } else if (i
== rn
) {
10087 store_reg(s
, i
, tmp
);
10091 tmp
= load_reg(s
, i
);
10092 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
10093 tcg_temp_free_i32(tmp
);
10095 tcg_gen_addi_i32(addr
, addr
, 4);
10098 store_reg(s
, rn
, loaded_var
);
10101 /* Base register writeback. */
10102 if (insn
& (1 << 24)) {
10103 tcg_gen_addi_i32(addr
, addr
, -offset
);
10105 /* Fault if writeback register is in register list. */
10106 if (insn
& (1 << rn
))
10108 store_reg(s
, rn
, addr
);
10110 tcg_temp_free_i32(addr
);
10117 op
= (insn
>> 21) & 0xf;
10119 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10122 /* Halfword pack. */
10123 tmp
= load_reg(s
, rn
);
10124 tmp2
= load_reg(s
, rm
);
10125 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
10126 if (insn
& (1 << 5)) {
10130 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
10131 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
10132 tcg_gen_ext16u_i32(tmp2
, tmp2
);
10136 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
10137 tcg_gen_ext16u_i32(tmp
, tmp
);
10138 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
10140 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
10141 tcg_temp_free_i32(tmp2
);
10142 store_reg(s
, rd
, tmp
);
10144 /* Data processing register constant shift. */
10146 tmp
= tcg_temp_new_i32();
10147 tcg_gen_movi_i32(tmp
, 0);
10149 tmp
= load_reg(s
, rn
);
10151 tmp2
= load_reg(s
, rm
);
10153 shiftop
= (insn
>> 4) & 3;
10154 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
10155 conds
= (insn
& (1 << 20)) != 0;
10156 logic_cc
= (conds
&& thumb2_logic_op(op
));
10157 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
10158 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
10160 tcg_temp_free_i32(tmp2
);
10162 ((op
== 2 && rn
== 15) ||
10163 (op
== 8 && rn
== 13) ||
10164 (op
== 13 && rn
== 13))) {
10165 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
10166 store_sp_checked(s
, tmp
);
10167 } else if (rd
!= 15) {
10168 store_reg(s
, rd
, tmp
);
10170 tcg_temp_free_i32(tmp
);
10174 case 13: /* Misc data processing. */
10175 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
10176 if (op
< 4 && (insn
& 0xf000) != 0xf000)
10179 case 0: /* Register controlled shift. */
10180 tmp
= load_reg(s
, rn
);
10181 tmp2
= load_reg(s
, rm
);
10182 if ((insn
& 0x70) != 0)
10185 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
10186 * - MOV, MOVS (register-shifted register), flagsetting
10188 op
= (insn
>> 21) & 3;
10189 logic_cc
= (insn
& (1 << 20)) != 0;
10190 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
10193 store_reg(s
, rd
, tmp
);
10195 case 1: /* Sign/zero extend. */
10196 op
= (insn
>> 20) & 7;
10198 case 0: /* SXTAH, SXTH */
10199 case 1: /* UXTAH, UXTH */
10200 case 4: /* SXTAB, SXTB */
10201 case 5: /* UXTAB, UXTB */
10203 case 2: /* SXTAB16, SXTB16 */
10204 case 3: /* UXTAB16, UXTB16 */
10205 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10213 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10217 tmp
= load_reg(s
, rm
);
10218 shift
= (insn
>> 4) & 3;
10219 /* ??? In many cases it's not necessary to do a
10220 rotate, a shift is sufficient. */
10222 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
10223 op
= (insn
>> 20) & 7;
10225 case 0: gen_sxth(tmp
); break;
10226 case 1: gen_uxth(tmp
); break;
10227 case 2: gen_sxtb16(tmp
); break;
10228 case 3: gen_uxtb16(tmp
); break;
10229 case 4: gen_sxtb(tmp
); break;
10230 case 5: gen_uxtb(tmp
); break;
10232 g_assert_not_reached();
10235 tmp2
= load_reg(s
, rn
);
10236 if ((op
>> 1) == 1) {
10237 gen_add16(tmp
, tmp2
);
10239 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10240 tcg_temp_free_i32(tmp2
);
10243 store_reg(s
, rd
, tmp
);
10245 case 2: /* SIMD add/subtract. */
10246 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10249 op
= (insn
>> 20) & 7;
10250 shift
= (insn
>> 4) & 7;
10251 if ((op
& 3) == 3 || (shift
& 3) == 3)
10253 tmp
= load_reg(s
, rn
);
10254 tmp2
= load_reg(s
, rm
);
10255 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
10256 tcg_temp_free_i32(tmp2
);
10257 store_reg(s
, rd
, tmp
);
10259 case 3: /* Other data processing. */
10260 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
10262 /* Saturating add/subtract. */
10263 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10266 tmp
= load_reg(s
, rn
);
10267 tmp2
= load_reg(s
, rm
);
10269 gen_helper_double_saturate(tmp
, cpu_env
, tmp
);
10271 gen_helper_sub_saturate(tmp
, cpu_env
, tmp2
, tmp
);
10273 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
10274 tcg_temp_free_i32(tmp2
);
10277 case 0x0a: /* rbit */
10278 case 0x08: /* rev */
10279 case 0x09: /* rev16 */
10280 case 0x0b: /* revsh */
10281 case 0x18: /* clz */
10283 case 0x10: /* sel */
10284 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10288 case 0x20: /* crc32/crc32c */
10294 if (!dc_isar_feature(aa32_crc32
, s
)) {
10301 tmp
= load_reg(s
, rn
);
10303 case 0x0a: /* rbit */
10304 gen_helper_rbit(tmp
, tmp
);
10306 case 0x08: /* rev */
10307 tcg_gen_bswap32_i32(tmp
, tmp
);
10309 case 0x09: /* rev16 */
10312 case 0x0b: /* revsh */
10315 case 0x10: /* sel */
10316 tmp2
= load_reg(s
, rm
);
10317 tmp3
= tcg_temp_new_i32();
10318 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
10319 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
10320 tcg_temp_free_i32(tmp3
);
10321 tcg_temp_free_i32(tmp2
);
10323 case 0x18: /* clz */
10324 tcg_gen_clzi_i32(tmp
, tmp
, 32);
10334 uint32_t sz
= op
& 0x3;
10335 uint32_t c
= op
& 0x8;
10337 tmp2
= load_reg(s
, rm
);
10339 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
10340 } else if (sz
== 1) {
10341 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
10343 tmp3
= tcg_const_i32(1 << sz
);
10345 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
10347 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
10349 tcg_temp_free_i32(tmp2
);
10350 tcg_temp_free_i32(tmp3
);
10354 g_assert_not_reached();
10357 store_reg(s
, rd
, tmp
);
10359 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10360 switch ((insn
>> 20) & 7) {
10361 case 0: /* 32 x 32 -> 32 */
10362 case 7: /* Unsigned sum of absolute differences. */
10364 case 1: /* 16 x 16 -> 32 */
10365 case 2: /* Dual multiply add. */
10366 case 3: /* 32 * 16 -> 32msb */
10367 case 4: /* Dual multiply subtract. */
10368 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10369 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10374 op
= (insn
>> 4) & 0xf;
10375 tmp
= load_reg(s
, rn
);
10376 tmp2
= load_reg(s
, rm
);
10377 switch ((insn
>> 20) & 7) {
10378 case 0: /* 32 x 32 -> 32 */
10379 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
10380 tcg_temp_free_i32(tmp2
);
10382 tmp2
= load_reg(s
, rs
);
10384 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
10386 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10387 tcg_temp_free_i32(tmp2
);
10390 case 1: /* 16 x 16 -> 32 */
10391 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
10392 tcg_temp_free_i32(tmp2
);
10394 tmp2
= load_reg(s
, rs
);
10395 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10396 tcg_temp_free_i32(tmp2
);
10399 case 2: /* Dual multiply add. */
10400 case 4: /* Dual multiply subtract. */
10402 gen_swap_half(tmp2
);
10403 gen_smul_dual(tmp
, tmp2
);
10404 if (insn
& (1 << 22)) {
10405 /* This subtraction cannot overflow. */
10406 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10408 /* This addition cannot overflow 32 bits;
10409 * however it may overflow considered as a signed
10410 * operation, in which case we must set the Q flag.
10412 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10414 tcg_temp_free_i32(tmp2
);
10417 tmp2
= load_reg(s
, rs
);
10418 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10419 tcg_temp_free_i32(tmp2
);
10422 case 3: /* 32 * 16 -> 32msb */
10424 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
10427 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
10428 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
10429 tmp
= tcg_temp_new_i32();
10430 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
10431 tcg_temp_free_i64(tmp64
);
10434 tmp2
= load_reg(s
, rs
);
10435 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10436 tcg_temp_free_i32(tmp2
);
10439 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10440 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
10442 tmp
= load_reg(s
, rs
);
10443 if (insn
& (1 << 20)) {
10444 tmp64
= gen_addq_msw(tmp64
, tmp
);
10446 tmp64
= gen_subq_msw(tmp64
, tmp
);
10449 if (insn
& (1 << 4)) {
10450 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
10452 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
10453 tmp
= tcg_temp_new_i32();
10454 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
10455 tcg_temp_free_i64(tmp64
);
10457 case 7: /* Unsigned sum of absolute differences. */
10458 gen_helper_usad8(tmp
, tmp
, tmp2
);
10459 tcg_temp_free_i32(tmp2
);
10461 tmp2
= load_reg(s
, rs
);
10462 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10463 tcg_temp_free_i32(tmp2
);
10467 store_reg(s
, rd
, tmp
);
10469 case 6: case 7: /* 64-bit multiply, Divide. */
10470 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
10471 tmp
= load_reg(s
, rn
);
10472 tmp2
= load_reg(s
, rm
);
10473 if ((op
& 0x50) == 0x10) {
10475 if (!dc_isar_feature(thumb_div
, s
)) {
10479 gen_helper_udiv(tmp
, tmp
, tmp2
);
10481 gen_helper_sdiv(tmp
, tmp
, tmp2
);
10482 tcg_temp_free_i32(tmp2
);
10483 store_reg(s
, rd
, tmp
);
10484 } else if ((op
& 0xe) == 0xc) {
10485 /* Dual multiply accumulate long. */
10486 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10487 tcg_temp_free_i32(tmp
);
10488 tcg_temp_free_i32(tmp2
);
10492 gen_swap_half(tmp2
);
10493 gen_smul_dual(tmp
, tmp2
);
10495 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10497 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10499 tcg_temp_free_i32(tmp2
);
10501 tmp64
= tcg_temp_new_i64();
10502 tcg_gen_ext_i32_i64(tmp64
, tmp
);
10503 tcg_temp_free_i32(tmp
);
10504 gen_addq(s
, tmp64
, rs
, rd
);
10505 gen_storeq_reg(s
, rs
, rd
, tmp64
);
10506 tcg_temp_free_i64(tmp64
);
10509 /* Unsigned 64-bit multiply */
10510 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
10514 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10515 tcg_temp_free_i32(tmp2
);
10516 tcg_temp_free_i32(tmp
);
10519 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
10520 tcg_temp_free_i32(tmp2
);
10521 tmp64
= tcg_temp_new_i64();
10522 tcg_gen_ext_i32_i64(tmp64
, tmp
);
10523 tcg_temp_free_i32(tmp
);
10525 /* Signed 64-bit multiply */
10526 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
10531 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10532 tcg_temp_free_i64(tmp64
);
10535 gen_addq_lo(s
, tmp64
, rs
);
10536 gen_addq_lo(s
, tmp64
, rd
);
10537 } else if (op
& 0x40) {
10538 /* 64-bit accumulate. */
10539 gen_addq(s
, tmp64
, rs
, rd
);
10541 gen_storeq_reg(s
, rs
, rd
, tmp64
);
10542 tcg_temp_free_i64(tmp64
);
10547 case 6: case 7: case 14: case 15:
10549 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10550 /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
10551 if (extract32(insn
, 24, 2) == 3) {
10552 goto illegal_op
; /* op0 = 0b11 : unallocated */
10556 * Decode VLLDM and VLSTM first: these are nonstandard because:
10557 * * if there is no FPU then these insns must NOP in
10558 * Secure state and UNDEF in Nonsecure state
10559 * * if there is an FPU then these insns do not have
10560 * the usual behaviour that disas_vfp_insn() provides of
10561 * being controlled by CPACR/NSACR enable bits or the
10562 * lazy-stacking logic.
10564 if (arm_dc_feature(s
, ARM_FEATURE_V8
) &&
10565 (insn
& 0xffa00f00) == 0xec200a00) {
10566 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
10568 * We choose to UNDEF if the RAZ bits are non-zero.
10570 if (!s
->v8m_secure
|| (insn
& 0x0040f0ff)) {
10574 if (arm_dc_feature(s
, ARM_FEATURE_VFP
)) {
10575 TCGv_i32 fptr
= load_reg(s
, rn
);
10577 if (extract32(insn
, 20, 1)) {
10578 gen_helper_v7m_vlldm(cpu_env
, fptr
);
10580 gen_helper_v7m_vlstm(cpu_env
, fptr
);
10582 tcg_temp_free_i32(fptr
);
10584 /* End the TB, because we have updated FP control bits */
10585 s
->base
.is_jmp
= DISAS_UPDATE
;
10589 if (arm_dc_feature(s
, ARM_FEATURE_VFP
) &&
10590 ((insn
>> 8) & 0xe) == 10) {
10591 /* FP, and the CPU supports it */
10592 if (disas_vfp_insn(s
, insn
)) {
10598 /* All other insns: NOCP */
10599 gen_exception_insn(s
, 4, EXCP_NOCP
, syn_uncategorized(),
10600 default_exception_el(s
));
10603 if ((insn
& 0xfe000a00) == 0xfc000800
10604 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
10605 /* The Thumb2 and ARM encodings are identical. */
10606 if (disas_neon_insn_3same_ext(s
, insn
)) {
10609 } else if ((insn
& 0xff000a00) == 0xfe000800
10610 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
10611 /* The Thumb2 and ARM encodings are identical. */
10612 if (disas_neon_insn_2reg_scalar_ext(s
, insn
)) {
10615 } else if (((insn
>> 24) & 3) == 3) {
10616 /* Translate into the equivalent ARM encoding. */
10617 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
10618 if (disas_neon_data_insn(s
, insn
)) {
10621 } else if (((insn
>> 8) & 0xe) == 10) {
10622 if (disas_vfp_insn(s
, insn
)) {
10626 if (insn
& (1 << 28))
10628 if (disas_coproc_insn(s
, insn
)) {
10633 case 8: case 9: case 10: case 11:
10634 if (insn
& (1 << 15)) {
10635 /* Branches, misc control. */
10636 if (insn
& 0x5000) {
10637 /* Unconditional branch. */
10638 /* signextend(hw1[10:0]) -> offset[:12]. */
10639 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
10640 /* hw1[10:0] -> offset[11:1]. */
10641 offset
|= (insn
& 0x7ff) << 1;
10642 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10643 offset[24:22] already have the same value because of the
10644 sign extension above. */
10645 offset
^= ((~insn
) & (1 << 13)) << 10;
10646 offset
^= ((~insn
) & (1 << 11)) << 11;
10648 if (insn
& (1 << 14)) {
10649 /* Branch and link. */
10650 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
10654 if (insn
& (1 << 12)) {
10656 gen_jmp(s
, offset
);
10659 offset
&= ~(uint32_t)2;
10660 /* thumb2 bx, no need to check */
10661 gen_bx_im(s
, offset
);
10663 } else if (((insn
>> 23) & 7) == 7) {
10665 if (insn
& (1 << 13))
10668 if (insn
& (1 << 26)) {
10669 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10672 if (!(insn
& (1 << 20))) {
10673 /* Hypervisor call (v7) */
10674 int imm16
= extract32(insn
, 16, 4) << 12
10675 | extract32(insn
, 0, 12);
10682 /* Secure monitor call (v6+) */
10690 op
= (insn
>> 20) & 7;
10692 case 0: /* msr cpsr. */
10693 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10694 tmp
= load_reg(s
, rn
);
10695 /* the constant is the mask and SYSm fields */
10696 addr
= tcg_const_i32(insn
& 0xfff);
10697 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
10698 tcg_temp_free_i32(addr
);
10699 tcg_temp_free_i32(tmp
);
10704 case 1: /* msr spsr. */
10705 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10709 if (extract32(insn
, 5, 1)) {
10711 int sysm
= extract32(insn
, 8, 4) |
10712 (extract32(insn
, 4, 1) << 4);
10715 gen_msr_banked(s
, r
, sysm
, rm
);
10719 /* MSR (for PSRs) */
10720 tmp
= load_reg(s
, rn
);
10722 msr_mask(s
, (insn
>> 8) & 0xf, op
== 1),
10726 case 2: /* cps, nop-hint. */
10727 if (((insn
>> 8) & 7) == 0) {
10728 gen_nop_hint(s
, insn
& 0xff);
10730 /* Implemented as NOP in user mode. */
10735 if (insn
& (1 << 10)) {
10736 if (insn
& (1 << 7))
10738 if (insn
& (1 << 6))
10740 if (insn
& (1 << 5))
10742 if (insn
& (1 << 9))
10743 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
10745 if (insn
& (1 << 8)) {
10747 imm
|= (insn
& 0x1f);
10750 gen_set_psr_im(s
, offset
, 0, imm
);
10753 case 3: /* Special control operations. */
10754 if (!arm_dc_feature(s
, ARM_FEATURE_V7
) &&
10755 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
10758 op
= (insn
>> 4) & 0xf;
10760 case 2: /* clrex */
10765 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
10768 /* We need to break the TB after this insn
10769 * to execute self-modifying code correctly
10770 * and also to take any pending interrupts
10773 gen_goto_tb(s
, 0, s
->pc
& ~1);
10776 if ((insn
& 0xf) || !dc_isar_feature(aa32_sb
, s
)) {
10780 * TODO: There is no speculation barrier opcode
10781 * for TCG; MB and end the TB instead.
10783 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
10784 gen_goto_tb(s
, 0, s
->pc
& ~1);
10791 /* Trivial implementation equivalent to bx.
10792 * This instruction doesn't exist at all for M-profile.
10794 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10797 tmp
= load_reg(s
, rn
);
10800 case 5: /* Exception return. */
10804 if (rn
!= 14 || rd
!= 15) {
10807 if (s
->current_el
== 2) {
10808 /* ERET from Hyp uses ELR_Hyp, not LR */
10812 tmp
= load_cpu_field(elr_el
[2]);
10814 tmp
= load_reg(s
, rn
);
10815 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
10817 gen_exception_return(s
, tmp
);
10820 if (extract32(insn
, 5, 1) &&
10821 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
10823 int sysm
= extract32(insn
, 16, 4) |
10824 (extract32(insn
, 4, 1) << 4);
10826 gen_mrs_banked(s
, 0, sysm
, rd
);
10830 if (extract32(insn
, 16, 4) != 0xf) {
10833 if (!arm_dc_feature(s
, ARM_FEATURE_M
) &&
10834 extract32(insn
, 0, 8) != 0) {
10839 tmp
= tcg_temp_new_i32();
10840 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10841 addr
= tcg_const_i32(insn
& 0xff);
10842 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
10843 tcg_temp_free_i32(addr
);
10845 gen_helper_cpsr_read(tmp
, cpu_env
);
10847 store_reg(s
, rd
, tmp
);
10850 if (extract32(insn
, 5, 1) &&
10851 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
10853 int sysm
= extract32(insn
, 16, 4) |
10854 (extract32(insn
, 4, 1) << 4);
10856 gen_mrs_banked(s
, 1, sysm
, rd
);
10861 /* Not accessible in user mode. */
10862 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
10866 if (extract32(insn
, 16, 4) != 0xf ||
10867 extract32(insn
, 0, 8) != 0) {
10871 tmp
= load_cpu_field(spsr
);
10872 store_reg(s
, rd
, tmp
);
10877 /* Conditional branch. */
10878 op
= (insn
>> 22) & 0xf;
10879 /* Generate a conditional jump to next instruction. */
10880 arm_skip_unless(s
, op
);
10882 /* offset[11:1] = insn[10:0] */
10883 offset
= (insn
& 0x7ff) << 1;
10884 /* offset[17:12] = insn[21:16]. */
10885 offset
|= (insn
& 0x003f0000) >> 4;
10886 /* offset[31:20] = insn[26]. */
10887 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
10888 /* offset[18] = insn[13]. */
10889 offset
|= (insn
& (1 << 13)) << 5;
10890 /* offset[19] = insn[11]. */
10891 offset
|= (insn
& (1 << 11)) << 8;
10893 /* jump to the offset */
10894 gen_jmp(s
, s
->pc
+ offset
);
10898 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
10899 * - Data-processing (modified immediate, plain binary immediate)
10901 if (insn
& (1 << 25)) {
10903 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
10904 * - Data-processing (plain binary immediate)
10906 if (insn
& (1 << 24)) {
10907 if (insn
& (1 << 20))
10909 /* Bitfield/Saturate. */
10910 op
= (insn
>> 21) & 7;
10912 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
10914 tmp
= tcg_temp_new_i32();
10915 tcg_gen_movi_i32(tmp
, 0);
10917 tmp
= load_reg(s
, rn
);
10920 case 2: /* Signed bitfield extract. */
10922 if (shift
+ imm
> 32)
10925 tcg_gen_sextract_i32(tmp
, tmp
, shift
, imm
);
10928 case 6: /* Unsigned bitfield extract. */
10930 if (shift
+ imm
> 32)
10933 tcg_gen_extract_i32(tmp
, tmp
, shift
, imm
);
10936 case 3: /* Bitfield insert/clear. */
10939 imm
= imm
+ 1 - shift
;
10941 tmp2
= load_reg(s
, rd
);
10942 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, imm
);
10943 tcg_temp_free_i32(tmp2
);
10948 default: /* Saturate. */
10951 tcg_gen_sari_i32(tmp
, tmp
, shift
);
10953 tcg_gen_shli_i32(tmp
, tmp
, shift
);
10955 tmp2
= tcg_const_i32(imm
);
10958 if ((op
& 1) && shift
== 0) {
10959 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10960 tcg_temp_free_i32(tmp
);
10961 tcg_temp_free_i32(tmp2
);
10964 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
10966 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
10970 if ((op
& 1) && shift
== 0) {
10971 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10972 tcg_temp_free_i32(tmp
);
10973 tcg_temp_free_i32(tmp2
);
10976 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
10978 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
10981 tcg_temp_free_i32(tmp2
);
10984 store_reg(s
, rd
, tmp
);
10986 imm
= ((insn
& 0x04000000) >> 15)
10987 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
10988 if (insn
& (1 << 22)) {
10989 /* 16-bit immediate. */
10990 imm
|= (insn
>> 4) & 0xf000;
10991 if (insn
& (1 << 23)) {
10993 tmp
= load_reg(s
, rd
);
10994 tcg_gen_ext16u_i32(tmp
, tmp
);
10995 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
10998 tmp
= tcg_temp_new_i32();
10999 tcg_gen_movi_i32(tmp
, imm
);
11001 store_reg(s
, rd
, tmp
);
11003 /* Add/sub 12-bit immediate. */
11005 offset
= s
->pc
& ~(uint32_t)3;
11006 if (insn
& (1 << 23))
11010 tmp
= tcg_temp_new_i32();
11011 tcg_gen_movi_i32(tmp
, offset
);
11012 store_reg(s
, rd
, tmp
);
11014 tmp
= load_reg(s
, rn
);
11015 if (insn
& (1 << 23))
11016 tcg_gen_subi_i32(tmp
, tmp
, imm
);
11018 tcg_gen_addi_i32(tmp
, tmp
, imm
);
11019 if (rn
== 13 && rd
== 13) {
11020 /* ADD SP, SP, imm or SUB SP, SP, imm */
11021 store_sp_checked(s
, tmp
);
11023 store_reg(s
, rd
, tmp
);
11030 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
11031 * - Data-processing (modified immediate)
11033 int shifter_out
= 0;
11034 /* modified 12-bit immediate. */
11035 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
11036 imm
= (insn
& 0xff);
11039 /* Nothing to do. */
11041 case 1: /* 00XY00XY */
11044 case 2: /* XY00XY00 */
11048 case 3: /* XYXYXYXY */
11052 default: /* Rotated constant. */
11053 shift
= (shift
<< 1) | (imm
>> 7);
11055 imm
= imm
<< (32 - shift
);
11059 tmp2
= tcg_temp_new_i32();
11060 tcg_gen_movi_i32(tmp2
, imm
);
11061 rn
= (insn
>> 16) & 0xf;
11063 tmp
= tcg_temp_new_i32();
11064 tcg_gen_movi_i32(tmp
, 0);
11066 tmp
= load_reg(s
, rn
);
11068 op
= (insn
>> 21) & 0xf;
11069 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
11070 shifter_out
, tmp
, tmp2
))
11072 tcg_temp_free_i32(tmp2
);
11073 rd
= (insn
>> 8) & 0xf;
11074 if (rd
== 13 && rn
== 13
11075 && (op
== 8 || op
== 13)) {
11076 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
11077 store_sp_checked(s
, tmp
);
11078 } else if (rd
!= 15) {
11079 store_reg(s
, rd
, tmp
);
11081 tcg_temp_free_i32(tmp
);
11086 case 12: /* Load/store single data item. */
11093 if ((insn
& 0x01100000) == 0x01000000) {
11094 if (disas_neon_ls_insn(s
, insn
)) {
11099 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
11101 if (!(insn
& (1 << 20))) {
11105 /* Byte or halfword load space with dest == r15 : memory hints.
11106 * Catch them early so we don't emit pointless addressing code.
11107 * This space is a mix of:
11108 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
11109 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
11111 * unallocated hints, which must be treated as NOPs
11112 * UNPREDICTABLE space, which we NOP or UNDEF depending on
11113 * which is easiest for the decoding logic
11114 * Some space which must UNDEF
11116 int op1
= (insn
>> 23) & 3;
11117 int op2
= (insn
>> 6) & 0x3f;
11122 /* UNPREDICTABLE, unallocated hint or
11123 * PLD/PLDW/PLI (literal)
11128 return; /* PLD/PLDW/PLI or unallocated hint */
11130 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
11131 return; /* PLD/PLDW/PLI or unallocated hint */
11133 /* UNDEF space, or an UNPREDICTABLE */
11137 memidx
= get_mem_index(s
);
11139 addr
= tcg_temp_new_i32();
11141 /* s->pc has already been incremented by 4. */
11142 imm
= s
->pc
& 0xfffffffc;
11143 if (insn
& (1 << 23))
11144 imm
+= insn
& 0xfff;
11146 imm
-= insn
& 0xfff;
11147 tcg_gen_movi_i32(addr
, imm
);
11149 addr
= load_reg(s
, rn
);
11150 if (insn
& (1 << 23)) {
11151 /* Positive offset. */
11152 imm
= insn
& 0xfff;
11153 tcg_gen_addi_i32(addr
, addr
, imm
);
11156 switch ((insn
>> 8) & 0xf) {
11157 case 0x0: /* Shifted Register. */
11158 shift
= (insn
>> 4) & 0xf;
11160 tcg_temp_free_i32(addr
);
11163 tmp
= load_reg(s
, rm
);
11165 tcg_gen_shli_i32(tmp
, tmp
, shift
);
11166 tcg_gen_add_i32(addr
, addr
, tmp
);
11167 tcg_temp_free_i32(tmp
);
11169 case 0xc: /* Negative offset. */
11170 tcg_gen_addi_i32(addr
, addr
, -imm
);
11172 case 0xe: /* User privilege. */
11173 tcg_gen_addi_i32(addr
, addr
, imm
);
11174 memidx
= get_a32_user_mem_index(s
);
11176 case 0x9: /* Post-decrement. */
11178 /* Fall through. */
11179 case 0xb: /* Post-increment. */
11183 case 0xd: /* Pre-decrement. */
11185 /* Fall through. */
11186 case 0xf: /* Pre-increment. */
11190 tcg_temp_free_i32(addr
);
11196 issinfo
= writeback
? ISSInvalid
: rs
;
11198 if (s
->v8m_stackcheck
&& rn
== 13 && writeback
) {
11200 * Stackcheck. Here we know 'addr' is the current SP;
11201 * if imm is +ve we're moving SP up, else down. It is
11202 * UNKNOWN whether the limit check triggers when SP starts
11203 * below the limit and ends up above it; we chose to do so.
11205 if ((int32_t)imm
< 0) {
11206 TCGv_i32 newsp
= tcg_temp_new_i32();
11208 tcg_gen_addi_i32(newsp
, addr
, imm
);
11209 gen_helper_v8m_stackcheck(cpu_env
, newsp
);
11210 tcg_temp_free_i32(newsp
);
11212 gen_helper_v8m_stackcheck(cpu_env
, addr
);
11216 if (writeback
&& !postinc
) {
11217 tcg_gen_addi_i32(addr
, addr
, imm
);
11220 if (insn
& (1 << 20)) {
11222 tmp
= tcg_temp_new_i32();
11225 gen_aa32_ld8u_iss(s
, tmp
, addr
, memidx
, issinfo
);
11228 gen_aa32_ld8s_iss(s
, tmp
, addr
, memidx
, issinfo
);
11231 gen_aa32_ld16u_iss(s
, tmp
, addr
, memidx
, issinfo
);
11234 gen_aa32_ld16s_iss(s
, tmp
, addr
, memidx
, issinfo
);
11237 gen_aa32_ld32u_iss(s
, tmp
, addr
, memidx
, issinfo
);
11240 tcg_temp_free_i32(tmp
);
11241 tcg_temp_free_i32(addr
);
11245 gen_bx_excret(s
, tmp
);
11247 store_reg(s
, rs
, tmp
);
11251 tmp
= load_reg(s
, rs
);
11254 gen_aa32_st8_iss(s
, tmp
, addr
, memidx
, issinfo
);
11257 gen_aa32_st16_iss(s
, tmp
, addr
, memidx
, issinfo
);
11260 gen_aa32_st32_iss(s
, tmp
, addr
, memidx
, issinfo
);
11263 tcg_temp_free_i32(tmp
);
11264 tcg_temp_free_i32(addr
);
11267 tcg_temp_free_i32(tmp
);
11270 tcg_gen_addi_i32(addr
, addr
, imm
);
11272 store_reg(s
, rn
, addr
);
11274 tcg_temp_free_i32(addr
);
11283 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
11284 default_exception_el(s
));
11287 static void disas_thumb_insn(DisasContext
*s
, uint32_t insn
)
11289 uint32_t val
, op
, rm
, rn
, rd
, shift
, cond
;
11296 switch (insn
>> 12) {
11300 op
= (insn
>> 11) & 3;
11303 * 0b0001_1xxx_xxxx_xxxx
11304 * - Add, subtract (three low registers)
11305 * - Add, subtract (two low registers and immediate)
11307 rn
= (insn
>> 3) & 7;
11308 tmp
= load_reg(s
, rn
);
11309 if (insn
& (1 << 10)) {
11311 tmp2
= tcg_temp_new_i32();
11312 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
11315 rm
= (insn
>> 6) & 7;
11316 tmp2
= load_reg(s
, rm
);
11318 if (insn
& (1 << 9)) {
11319 if (s
->condexec_mask
)
11320 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
11322 gen_sub_CC(tmp
, tmp
, tmp2
);
11324 if (s
->condexec_mask
)
11325 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
11327 gen_add_CC(tmp
, tmp
, tmp2
);
11329 tcg_temp_free_i32(tmp2
);
11330 store_reg(s
, rd
, tmp
);
11332 /* shift immediate */
11333 rm
= (insn
>> 3) & 7;
11334 shift
= (insn
>> 6) & 0x1f;
11335 tmp
= load_reg(s
, rm
);
11336 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
11337 if (!s
->condexec_mask
)
11339 store_reg(s
, rd
, tmp
);
11344 * 0b001x_xxxx_xxxx_xxxx
11345 * - Add, subtract, compare, move (one low register and immediate)
11347 op
= (insn
>> 11) & 3;
11348 rd
= (insn
>> 8) & 0x7;
11349 if (op
== 0) { /* mov */
11350 tmp
= tcg_temp_new_i32();
11351 tcg_gen_movi_i32(tmp
, insn
& 0xff);
11352 if (!s
->condexec_mask
)
11354 store_reg(s
, rd
, tmp
);
11356 tmp
= load_reg(s
, rd
);
11357 tmp2
= tcg_temp_new_i32();
11358 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
11361 gen_sub_CC(tmp
, tmp
, tmp2
);
11362 tcg_temp_free_i32(tmp
);
11363 tcg_temp_free_i32(tmp2
);
11366 if (s
->condexec_mask
)
11367 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
11369 gen_add_CC(tmp
, tmp
, tmp2
);
11370 tcg_temp_free_i32(tmp2
);
11371 store_reg(s
, rd
, tmp
);
11374 if (s
->condexec_mask
)
11375 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
11377 gen_sub_CC(tmp
, tmp
, tmp2
);
11378 tcg_temp_free_i32(tmp2
);
11379 store_reg(s
, rd
, tmp
);
11385 if (insn
& (1 << 11)) {
11386 rd
= (insn
>> 8) & 7;
11387 /* load pc-relative. Bit 1 of PC is ignored. */
11388 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
11389 val
&= ~(uint32_t)2;
11390 addr
= tcg_temp_new_i32();
11391 tcg_gen_movi_i32(addr
, val
);
11392 tmp
= tcg_temp_new_i32();
11393 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
),
11395 tcg_temp_free_i32(addr
);
11396 store_reg(s
, rd
, tmp
);
11399 if (insn
& (1 << 10)) {
11400 /* 0b0100_01xx_xxxx_xxxx
11401 * - data processing extended, branch and exchange
11403 rd
= (insn
& 7) | ((insn
>> 4) & 8);
11404 rm
= (insn
>> 3) & 0xf;
11405 op
= (insn
>> 8) & 3;
11408 tmp
= load_reg(s
, rd
);
11409 tmp2
= load_reg(s
, rm
);
11410 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
11411 tcg_temp_free_i32(tmp2
);
11413 /* ADD SP, SP, reg */
11414 store_sp_checked(s
, tmp
);
11416 store_reg(s
, rd
, tmp
);
11420 tmp
= load_reg(s
, rd
);
11421 tmp2
= load_reg(s
, rm
);
11422 gen_sub_CC(tmp
, tmp
, tmp2
);
11423 tcg_temp_free_i32(tmp2
);
11424 tcg_temp_free_i32(tmp
);
11426 case 2: /* mov/cpy */
11427 tmp
= load_reg(s
, rm
);
11430 store_sp_checked(s
, tmp
);
11432 store_reg(s
, rd
, tmp
);
11437 /* 0b0100_0111_xxxx_xxxx
11438 * - branch [and link] exchange thumb register
11440 bool link
= insn
& (1 << 7);
11449 /* BXNS/BLXNS: only exists for v8M with the
11450 * security extensions, and always UNDEF if NonSecure.
11451 * We don't implement these in the user-only mode
11452 * either (in theory you can use them from Secure User
11453 * mode but they are too tied in to system emulation.)
11455 if (!s
->v8m_secure
|| IS_USER_ONLY
) {
11466 tmp
= load_reg(s
, rm
);
11468 val
= (uint32_t)s
->pc
| 1;
11469 tmp2
= tcg_temp_new_i32();
11470 tcg_gen_movi_i32(tmp2
, val
);
11471 store_reg(s
, 14, tmp2
);
11474 /* Only BX works as exception-return, not BLX */
11475 gen_bx_excret(s
, tmp
);
11484 * 0b0100_00xx_xxxx_xxxx
11485 * - Data-processing (two low registers)
11488 rm
= (insn
>> 3) & 7;
11489 op
= (insn
>> 6) & 0xf;
11490 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
11491 /* the shift/rotate ops want the operands backwards */
11500 if (op
== 9) { /* neg */
11501 tmp
= tcg_temp_new_i32();
11502 tcg_gen_movi_i32(tmp
, 0);
11503 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
11504 tmp
= load_reg(s
, rd
);
11509 tmp2
= load_reg(s
, rm
);
11511 case 0x0: /* and */
11512 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
11513 if (!s
->condexec_mask
)
11516 case 0x1: /* eor */
11517 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
11518 if (!s
->condexec_mask
)
11521 case 0x2: /* lsl */
11522 if (s
->condexec_mask
) {
11523 gen_shl(tmp2
, tmp2
, tmp
);
11525 gen_helper_shl_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11526 gen_logic_CC(tmp2
);
11529 case 0x3: /* lsr */
11530 if (s
->condexec_mask
) {
11531 gen_shr(tmp2
, tmp2
, tmp
);
11533 gen_helper_shr_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11534 gen_logic_CC(tmp2
);
11537 case 0x4: /* asr */
11538 if (s
->condexec_mask
) {
11539 gen_sar(tmp2
, tmp2
, tmp
);
11541 gen_helper_sar_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11542 gen_logic_CC(tmp2
);
11545 case 0x5: /* adc */
11546 if (s
->condexec_mask
) {
11547 gen_adc(tmp
, tmp2
);
11549 gen_adc_CC(tmp
, tmp
, tmp2
);
11552 case 0x6: /* sbc */
11553 if (s
->condexec_mask
) {
11554 gen_sub_carry(tmp
, tmp
, tmp2
);
11556 gen_sbc_CC(tmp
, tmp
, tmp2
);
11559 case 0x7: /* ror */
11560 if (s
->condexec_mask
) {
11561 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
11562 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
11564 gen_helper_ror_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11565 gen_logic_CC(tmp2
);
11568 case 0x8: /* tst */
11569 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
11573 case 0x9: /* neg */
11574 if (s
->condexec_mask
)
11575 tcg_gen_neg_i32(tmp
, tmp2
);
11577 gen_sub_CC(tmp
, tmp
, tmp2
);
11579 case 0xa: /* cmp */
11580 gen_sub_CC(tmp
, tmp
, tmp2
);
11583 case 0xb: /* cmn */
11584 gen_add_CC(tmp
, tmp
, tmp2
);
11587 case 0xc: /* orr */
11588 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
11589 if (!s
->condexec_mask
)
11592 case 0xd: /* mul */
11593 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
11594 if (!s
->condexec_mask
)
11597 case 0xe: /* bic */
11598 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
11599 if (!s
->condexec_mask
)
11602 case 0xf: /* mvn */
11603 tcg_gen_not_i32(tmp2
, tmp2
);
11604 if (!s
->condexec_mask
)
11605 gen_logic_CC(tmp2
);
11612 store_reg(s
, rm
, tmp2
);
11614 tcg_temp_free_i32(tmp
);
11616 store_reg(s
, rd
, tmp
);
11617 tcg_temp_free_i32(tmp2
);
11620 tcg_temp_free_i32(tmp
);
11621 tcg_temp_free_i32(tmp2
);
11626 /* load/store register offset. */
11628 rn
= (insn
>> 3) & 7;
11629 rm
= (insn
>> 6) & 7;
11630 op
= (insn
>> 9) & 7;
11631 addr
= load_reg(s
, rn
);
11632 tmp
= load_reg(s
, rm
);
11633 tcg_gen_add_i32(addr
, addr
, tmp
);
11634 tcg_temp_free_i32(tmp
);
11636 if (op
< 3) { /* store */
11637 tmp
= load_reg(s
, rd
);
11639 tmp
= tcg_temp_new_i32();
11644 gen_aa32_st32_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11647 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11650 gen_aa32_st8_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11652 case 3: /* ldrsb */
11653 gen_aa32_ld8s_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11656 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11659 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11662 gen_aa32_ld8u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11664 case 7: /* ldrsh */
11665 gen_aa32_ld16s_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11668 if (op
>= 3) { /* load */
11669 store_reg(s
, rd
, tmp
);
11671 tcg_temp_free_i32(tmp
);
11673 tcg_temp_free_i32(addr
);
11677 /* load/store word immediate offset */
11679 rn
= (insn
>> 3) & 7;
11680 addr
= load_reg(s
, rn
);
11681 val
= (insn
>> 4) & 0x7c;
11682 tcg_gen_addi_i32(addr
, addr
, val
);
11684 if (insn
& (1 << 11)) {
11686 tmp
= tcg_temp_new_i32();
11687 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11688 store_reg(s
, rd
, tmp
);
11691 tmp
= load_reg(s
, rd
);
11692 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11693 tcg_temp_free_i32(tmp
);
11695 tcg_temp_free_i32(addr
);
11699 /* load/store byte immediate offset */
11701 rn
= (insn
>> 3) & 7;
11702 addr
= load_reg(s
, rn
);
11703 val
= (insn
>> 6) & 0x1f;
11704 tcg_gen_addi_i32(addr
, addr
, val
);
11706 if (insn
& (1 << 11)) {
11708 tmp
= tcg_temp_new_i32();
11709 gen_aa32_ld8u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11710 store_reg(s
, rd
, tmp
);
11713 tmp
= load_reg(s
, rd
);
11714 gen_aa32_st8_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11715 tcg_temp_free_i32(tmp
);
11717 tcg_temp_free_i32(addr
);
11721 /* load/store halfword immediate offset */
11723 rn
= (insn
>> 3) & 7;
11724 addr
= load_reg(s
, rn
);
11725 val
= (insn
>> 5) & 0x3e;
11726 tcg_gen_addi_i32(addr
, addr
, val
);
11728 if (insn
& (1 << 11)) {
11730 tmp
= tcg_temp_new_i32();
11731 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11732 store_reg(s
, rd
, tmp
);
11735 tmp
= load_reg(s
, rd
);
11736 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11737 tcg_temp_free_i32(tmp
);
11739 tcg_temp_free_i32(addr
);
11743 /* load/store from stack */
11744 rd
= (insn
>> 8) & 7;
11745 addr
= load_reg(s
, 13);
11746 val
= (insn
& 0xff) * 4;
11747 tcg_gen_addi_i32(addr
, addr
, val
);
11749 if (insn
& (1 << 11)) {
11751 tmp
= tcg_temp_new_i32();
11752 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11753 store_reg(s
, rd
, tmp
);
11756 tmp
= load_reg(s
, rd
);
11757 gen_aa32_st32_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11758 tcg_temp_free_i32(tmp
);
11760 tcg_temp_free_i32(addr
);
11765 * 0b1010_xxxx_xxxx_xxxx
11766 * - Add PC/SP (immediate)
11768 rd
= (insn
>> 8) & 7;
11769 if (insn
& (1 << 11)) {
11771 tmp
= load_reg(s
, 13);
11773 /* PC. bit 1 is ignored. */
11774 tmp
= tcg_temp_new_i32();
11775 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
11777 val
= (insn
& 0xff) * 4;
11778 tcg_gen_addi_i32(tmp
, tmp
, val
);
11779 store_reg(s
, rd
, tmp
);
11784 op
= (insn
>> 8) & 0xf;
11788 * 0b1011_0000_xxxx_xxxx
11789 * - ADD (SP plus immediate)
11790 * - SUB (SP minus immediate)
11792 tmp
= load_reg(s
, 13);
11793 val
= (insn
& 0x7f) * 4;
11794 if (insn
& (1 << 7))
11795 val
= -(int32_t)val
;
11796 tcg_gen_addi_i32(tmp
, tmp
, val
);
11797 store_sp_checked(s
, tmp
);
11800 case 2: /* sign/zero extend. */
11803 rm
= (insn
>> 3) & 7;
11804 tmp
= load_reg(s
, rm
);
11805 switch ((insn
>> 6) & 3) {
11806 case 0: gen_sxth(tmp
); break;
11807 case 1: gen_sxtb(tmp
); break;
11808 case 2: gen_uxth(tmp
); break;
11809 case 3: gen_uxtb(tmp
); break;
11811 store_reg(s
, rd
, tmp
);
11813 case 4: case 5: case 0xc: case 0xd:
11815 * 0b1011_x10x_xxxx_xxxx
11818 addr
= load_reg(s
, 13);
11819 if (insn
& (1 << 8))
11823 for (i
= 0; i
< 8; i
++) {
11824 if (insn
& (1 << i
))
11827 if ((insn
& (1 << 11)) == 0) {
11828 tcg_gen_addi_i32(addr
, addr
, -offset
);
11831 if (s
->v8m_stackcheck
) {
11833 * Here 'addr' is the lower of "old SP" and "new SP";
11834 * if this is a pop that starts below the limit and ends
11835 * above it, it is UNKNOWN whether the limit check triggers;
11836 * we choose to trigger.
11838 gen_helper_v8m_stackcheck(cpu_env
, addr
);
11841 for (i
= 0; i
< 8; i
++) {
11842 if (insn
& (1 << i
)) {
11843 if (insn
& (1 << 11)) {
11845 tmp
= tcg_temp_new_i32();
11846 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11847 store_reg(s
, i
, tmp
);
11850 tmp
= load_reg(s
, i
);
11851 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11852 tcg_temp_free_i32(tmp
);
11854 /* advance to the next address. */
11855 tcg_gen_addi_i32(addr
, addr
, 4);
11859 if (insn
& (1 << 8)) {
11860 if (insn
& (1 << 11)) {
11862 tmp
= tcg_temp_new_i32();
11863 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11864 /* don't set the pc until the rest of the instruction
11868 tmp
= load_reg(s
, 14);
11869 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11870 tcg_temp_free_i32(tmp
);
11872 tcg_gen_addi_i32(addr
, addr
, 4);
11874 if ((insn
& (1 << 11)) == 0) {
11875 tcg_gen_addi_i32(addr
, addr
, -offset
);
11877 /* write back the new stack pointer */
11878 store_reg(s
, 13, addr
);
11879 /* set the new PC value */
11880 if ((insn
& 0x0900) == 0x0900) {
11881 store_reg_from_load(s
, 15, tmp
);
11885 case 1: case 3: case 9: case 11: /* czb */
11887 tmp
= load_reg(s
, rm
);
11888 arm_gen_condlabel(s
);
11889 if (insn
& (1 << 11))
11890 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
11892 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
11893 tcg_temp_free_i32(tmp
);
11894 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
11895 val
= (uint32_t)s
->pc
+ 2;
11900 case 15: /* IT, nop-hint. */
11901 if ((insn
& 0xf) == 0) {
11902 gen_nop_hint(s
, (insn
>> 4) & 0xf);
11906 s
->condexec_cond
= (insn
>> 4) & 0xe;
11907 s
->condexec_mask
= insn
& 0x1f;
11908 /* No actual code generated for this insn, just setup state. */
11911 case 0xe: /* bkpt */
11913 int imm8
= extract32(insn
, 0, 8);
11915 gen_exception_bkpt_insn(s
, 2, syn_aa32_bkpt(imm8
, true));
11919 case 0xa: /* rev, and hlt */
11921 int op1
= extract32(insn
, 6, 2);
11925 int imm6
= extract32(insn
, 0, 6);
11931 /* Otherwise this is rev */
11933 rn
= (insn
>> 3) & 0x7;
11935 tmp
= load_reg(s
, rn
);
11937 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
11938 case 1: gen_rev16(tmp
); break;
11939 case 3: gen_revsh(tmp
); break;
11941 g_assert_not_reached();
11943 store_reg(s
, rd
, tmp
);
11948 switch ((insn
>> 5) & 7) {
11952 if (((insn
>> 3) & 1) != !!(s
->be_data
== MO_BE
)) {
11953 gen_helper_setend(cpu_env
);
11954 s
->base
.is_jmp
= DISAS_UPDATE
;
11963 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
11964 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
11967 addr
= tcg_const_i32(19);
11968 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
11969 tcg_temp_free_i32(addr
);
11973 addr
= tcg_const_i32(16);
11974 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
11975 tcg_temp_free_i32(addr
);
11977 tcg_temp_free_i32(tmp
);
11980 if (insn
& (1 << 4)) {
11981 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
11985 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
12000 /* load/store multiple */
12001 TCGv_i32 loaded_var
= NULL
;
12002 rn
= (insn
>> 8) & 0x7;
12003 addr
= load_reg(s
, rn
);
12004 for (i
= 0; i
< 8; i
++) {
12005 if (insn
& (1 << i
)) {
12006 if (insn
& (1 << 11)) {
12008 tmp
= tcg_temp_new_i32();
12009 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
12013 store_reg(s
, i
, tmp
);
12017 tmp
= load_reg(s
, i
);
12018 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
12019 tcg_temp_free_i32(tmp
);
12021 /* advance to the next address */
12022 tcg_gen_addi_i32(addr
, addr
, 4);
12025 if ((insn
& (1 << rn
)) == 0) {
12026 /* base reg not in list: base register writeback */
12027 store_reg(s
, rn
, addr
);
12029 /* base reg in list: if load, complete it now */
12030 if (insn
& (1 << 11)) {
12031 store_reg(s
, rn
, loaded_var
);
12033 tcg_temp_free_i32(addr
);
12038 /* conditional branch or swi */
12039 cond
= (insn
>> 8) & 0xf;
12045 gen_set_pc_im(s
, s
->pc
);
12046 s
->svc_imm
= extract32(insn
, 0, 8);
12047 s
->base
.is_jmp
= DISAS_SWI
;
12050 /* generate a conditional jump to next instruction */
12051 arm_skip_unless(s
, cond
);
12053 /* jump to the offset */
12054 val
= (uint32_t)s
->pc
+ 2;
12055 offset
= ((int32_t)insn
<< 24) >> 24;
12056 val
+= offset
<< 1;
12061 if (insn
& (1 << 11)) {
12062 /* thumb_insn_is_16bit() ensures we can't get here for
12063 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12064 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12066 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
12068 offset
= ((insn
& 0x7ff) << 1);
12069 tmp
= load_reg(s
, 14);
12070 tcg_gen_addi_i32(tmp
, tmp
, offset
);
12071 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
12073 tmp2
= tcg_temp_new_i32();
12074 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
12075 store_reg(s
, 14, tmp2
);
12079 /* unconditional branch */
12080 val
= (uint32_t)s
->pc
;
12081 offset
= ((int32_t)insn
<< 21) >> 21;
12082 val
+= (offset
<< 1) + 2;
12087 /* thumb_insn_is_16bit() ensures we can't get here for
12088 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
12090 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
12092 if (insn
& (1 << 11)) {
12093 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
12094 offset
= ((insn
& 0x7ff) << 1) | 1;
12095 tmp
= load_reg(s
, 14);
12096 tcg_gen_addi_i32(tmp
, tmp
, offset
);
12098 tmp2
= tcg_temp_new_i32();
12099 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
12100 store_reg(s
, 14, tmp2
);
12103 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
12104 uint32_t uoffset
= ((int32_t)insn
<< 21) >> 9;
12106 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + uoffset
);
12113 gen_exception_insn(s
, 2, EXCP_UDEF
, syn_uncategorized(),
12114 default_exception_el(s
));
12117 static bool insn_crosses_page(CPUARMState
*env
, DisasContext
*s
)
12119 /* Return true if the insn at dc->pc might cross a page boundary.
12120 * (False positives are OK, false negatives are not.)
12121 * We know this is a Thumb insn, and our caller ensures we are
12122 * only called if dc->pc is less than 4 bytes from the page
12123 * boundary, so we cross the page if the first 16 bits indicate
12124 * that this is a 32 bit insn.
12126 uint16_t insn
= arm_lduw_code(env
, s
->pc
, s
->sctlr_b
);
12128 return !thumb_insn_is_16bit(s
, insn
);
12131 static void arm_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
12133 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12134 CPUARMState
*env
= cs
->env_ptr
;
12135 ARMCPU
*cpu
= env_archcpu(env
);
12136 uint32_t tb_flags
= dc
->base
.tb
->flags
;
12137 uint32_t condexec
, core_mmu_idx
;
12139 dc
->isar
= &cpu
->isar
;
12140 dc
->pc
= dc
->base
.pc_first
;
12144 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
12145 * there is no secure EL1, so we route exceptions to EL3.
12147 dc
->secure_routed_to_el3
= arm_feature(env
, ARM_FEATURE_EL3
) &&
12148 !arm_el_is_aa64(env
, 3);
12149 dc
->thumb
= FIELD_EX32(tb_flags
, TBFLAG_A32
, THUMB
);
12150 dc
->sctlr_b
= FIELD_EX32(tb_flags
, TBFLAG_A32
, SCTLR_B
);
12151 dc
->be_data
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, BE_DATA
) ? MO_BE
: MO_LE
;
12152 condexec
= FIELD_EX32(tb_flags
, TBFLAG_A32
, CONDEXEC
);
12153 dc
->condexec_mask
= (condexec
& 0xf) << 1;
12154 dc
->condexec_cond
= condexec
>> 4;
12155 core_mmu_idx
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, MMUIDX
);
12156 dc
->mmu_idx
= core_to_arm_mmu_idx(env
, core_mmu_idx
);
12157 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
12158 #if !defined(CONFIG_USER_ONLY)
12159 dc
->user
= (dc
->current_el
== 0);
12161 dc
->ns
= FIELD_EX32(tb_flags
, TBFLAG_A32
, NS
);
12162 dc
->fp_excp_el
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, FPEXC_EL
);
12163 dc
->vfp_enabled
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VFPEN
);
12164 dc
->vec_len
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VECLEN
);
12165 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
12166 dc
->c15_cpar
= FIELD_EX32(tb_flags
, TBFLAG_A32
, XSCALE_CPAR
);
12167 dc
->vec_stride
= 0;
12169 dc
->vec_stride
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VECSTRIDE
);
12172 dc
->v7m_handler_mode
= FIELD_EX32(tb_flags
, TBFLAG_A32
, HANDLER
);
12173 dc
->v8m_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
12174 regime_is_secure(env
, dc
->mmu_idx
);
12175 dc
->v8m_stackcheck
= FIELD_EX32(tb_flags
, TBFLAG_A32
, STACKCHECK
);
12176 dc
->v8m_fpccr_s_wrong
= FIELD_EX32(tb_flags
, TBFLAG_A32
, FPCCR_S_WRONG
);
12177 dc
->v7m_new_fp_ctxt_needed
=
12178 FIELD_EX32(tb_flags
, TBFLAG_A32
, NEW_FP_CTXT_NEEDED
);
12179 dc
->v7m_lspact
= FIELD_EX32(tb_flags
, TBFLAG_A32
, LSPACT
);
12180 dc
->cp_regs
= cpu
->cp_regs
;
12181 dc
->features
= env
->features
;
12183 /* Single step state. The code-generation logic here is:
12185 * generate code with no special handling for single-stepping (except
12186 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12187 * this happens anyway because those changes are all system register or
12189 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12190 * emit code for one insn
12191 * emit code to clear PSTATE.SS
12192 * emit code to generate software step exception for completed step
12193 * end TB (as usual for having generated an exception)
12194 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12195 * emit code to generate a software step exception
12198 dc
->ss_active
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, SS_ACTIVE
);
12199 dc
->pstate_ss
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, PSTATE_SS
);
12200 dc
->is_ldex
= false;
12201 dc
->ss_same_el
= false; /* Can't be true since EL_d must be AArch64 */
12203 dc
->page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
12205 /* If architectural single step active, limit to 1. */
12206 if (is_singlestepping(dc
)) {
12207 dc
->base
.max_insns
= 1;
12210 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12211 to those left on the page. */
12213 int bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
12214 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
12217 cpu_F0s
= tcg_temp_new_i32();
12218 cpu_F1s
= tcg_temp_new_i32();
12219 cpu_F0d
= tcg_temp_new_i64();
12220 cpu_F1d
= tcg_temp_new_i64();
12223 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
12224 cpu_M0
= tcg_temp_new_i64();
12227 static void arm_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
12229 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12231 /* A note on handling of the condexec (IT) bits:
12233 * We want to avoid the overhead of having to write the updated condexec
12234 * bits back to the CPUARMState for every instruction in an IT block. So:
12235 * (1) if the condexec bits are not already zero then we write
12236 * zero back into the CPUARMState now. This avoids complications trying
12237 * to do it at the end of the block. (For example if we don't do this
12238 * it's hard to identify whether we can safely skip writing condexec
12239 * at the end of the TB, which we definitely want to do for the case
12240 * where a TB doesn't do anything with the IT state at all.)
12241 * (2) if we are going to leave the TB then we call gen_set_condexec()
12242 * which will write the correct value into CPUARMState if zero is wrong.
12243 * This is done both for leaving the TB at the end, and for leaving
12244 * it because of an exception we know will happen, which is done in
12245 * gen_exception_insn(). The latter is necessary because we need to
12246 * leave the TB with the PC/IT state just prior to execution of the
12247 * instruction which caused the exception.
12248 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12249 * then the CPUARMState will be wrong and we need to reset it.
12250 * This is handled in the same way as restoration of the
12251 * PC in these situations; we save the value of the condexec bits
12252 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12253 * then uses this to restore them after an exception.
12255 * Note that there are no instructions which can read the condexec
12256 * bits, and none which can write non-static values to them, so
12257 * we don't need to care about whether CPUARMState is correct in the
12261 /* Reset the conditional execution bits immediately. This avoids
12262 complications trying to do it at the end of the block. */
12263 if (dc
->condexec_mask
|| dc
->condexec_cond
) {
12264 TCGv_i32 tmp
= tcg_temp_new_i32();
12265 tcg_gen_movi_i32(tmp
, 0);
12266 store_cpu_field(tmp
, condexec_bits
);
12270 static void arm_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
12272 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12274 tcg_gen_insn_start(dc
->pc
,
12275 (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1),
12277 dc
->insn_start
= tcg_last_op();
12280 static bool arm_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
12281 const CPUBreakpoint
*bp
)
12283 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12285 if (bp
->flags
& BP_CPU
) {
12286 gen_set_condexec(dc
);
12287 gen_set_pc_im(dc
, dc
->pc
);
12288 gen_helper_check_breakpoints(cpu_env
);
12289 /* End the TB early; it's likely not going to be executed */
12290 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
12292 gen_exception_internal_insn(dc
, 0, EXCP_DEBUG
);
12293 /* The address covered by the breakpoint must be
12294 included in [tb->pc, tb->pc + tb->size) in order
12295 to for it to be properly cleared -- thus we
12296 increment the PC here so that the logic setting
12297 tb->size below does the right thing. */
12298 /* TODO: Advance PC by correct instruction length to
12299 * avoid disassembler error messages */
12301 dc
->base
.is_jmp
= DISAS_NORETURN
;
12307 static bool arm_pre_translate_insn(DisasContext
*dc
)
12309 #ifdef CONFIG_USER_ONLY
12310 /* Intercept jump to the magic kernel page. */
12311 if (dc
->pc
>= 0xffff0000) {
12312 /* We always get here via a jump, so know we are not in a
12313 conditional execution block. */
12314 gen_exception_internal(EXCP_KERNEL_TRAP
);
12315 dc
->base
.is_jmp
= DISAS_NORETURN
;
12320 if (dc
->ss_active
&& !dc
->pstate_ss
) {
12321 /* Singlestep state is Active-pending.
12322 * If we're in this state at the start of a TB then either
12323 * a) we just took an exception to an EL which is being debugged
12324 * and this is the first insn in the exception handler
12325 * b) debug exceptions were masked and we just unmasked them
12326 * without changing EL (eg by clearing PSTATE.D)
12327 * In either case we're going to take a swstep exception in the
12328 * "did not step an insn" case, and so the syndrome ISV and EX
12329 * bits should be zero.
12331 assert(dc
->base
.num_insns
== 1);
12332 gen_exception(EXCP_UDEF
, syn_swstep(dc
->ss_same_el
, 0, 0),
12333 default_exception_el(dc
));
12334 dc
->base
.is_jmp
= DISAS_NORETURN
;
12341 static void arm_post_translate_insn(DisasContext
*dc
)
12343 if (dc
->condjmp
&& !dc
->base
.is_jmp
) {
12344 gen_set_label(dc
->condlabel
);
12347 dc
->base
.pc_next
= dc
->pc
;
12348 translator_loop_temp_check(&dc
->base
);
12351 static void arm_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
12353 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12354 CPUARMState
*env
= cpu
->env_ptr
;
12357 if (arm_pre_translate_insn(dc
)) {
12361 insn
= arm_ldl_code(env
, dc
->pc
, dc
->sctlr_b
);
12364 disas_arm_insn(dc
, insn
);
12366 arm_post_translate_insn(dc
);
12368 /* ARM is a fixed-length ISA. We performed the cross-page check
12369 in init_disas_context by adjusting max_insns. */
12372 static bool thumb_insn_is_unconditional(DisasContext
*s
, uint32_t insn
)
12374 /* Return true if this Thumb insn is always unconditional,
12375 * even inside an IT block. This is true of only a very few
12376 * instructions: BKPT, HLT, and SG.
12378 * A larger class of instructions are UNPREDICTABLE if used
12379 * inside an IT block; we do not need to detect those here, because
12380 * what we do by default (perform the cc check and update the IT
12381 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12382 * choice for those situations.
12384 * insn is either a 16-bit or a 32-bit instruction; the two are
12385 * distinguishable because for the 16-bit case the top 16 bits
12386 * are zeroes, and that isn't a valid 32-bit encoding.
12388 if ((insn
& 0xffffff00) == 0xbe00) {
12393 if ((insn
& 0xffffffc0) == 0xba80 && arm_dc_feature(s
, ARM_FEATURE_V8
) &&
12394 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
12395 /* HLT: v8A only. This is unconditional even when it is going to
12396 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12397 * For v7 cores this was a plain old undefined encoding and so
12398 * honours its cc check. (We might be using the encoding as
12399 * a semihosting trap, but we don't change the cc check behaviour
12400 * on that account, because a debugger connected to a real v7A
12401 * core and emulating semihosting traps by catching the UNDEF
12402 * exception would also only see cases where the cc check passed.
12403 * No guest code should be trying to do a HLT semihosting trap
12404 * in an IT block anyway.
12409 if (insn
== 0xe97fe97f && arm_dc_feature(s
, ARM_FEATURE_V8
) &&
12410 arm_dc_feature(s
, ARM_FEATURE_M
)) {
12418 static void thumb_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
12420 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12421 CPUARMState
*env
= cpu
->env_ptr
;
12425 if (arm_pre_translate_insn(dc
)) {
12429 insn
= arm_lduw_code(env
, dc
->pc
, dc
->sctlr_b
);
12430 is_16bit
= thumb_insn_is_16bit(dc
, insn
);
12433 uint32_t insn2
= arm_lduw_code(env
, dc
->pc
, dc
->sctlr_b
);
12435 insn
= insn
<< 16 | insn2
;
12440 if (dc
->condexec_mask
&& !thumb_insn_is_unconditional(dc
, insn
)) {
12441 uint32_t cond
= dc
->condexec_cond
;
12443 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
12444 arm_skip_unless(dc
, cond
);
12449 disas_thumb_insn(dc
, insn
);
12451 disas_thumb2_insn(dc
, insn
);
12454 /* Advance the Thumb condexec condition. */
12455 if (dc
->condexec_mask
) {
12456 dc
->condexec_cond
= ((dc
->condexec_cond
& 0xe) |
12457 ((dc
->condexec_mask
>> 4) & 1));
12458 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
12459 if (dc
->condexec_mask
== 0) {
12460 dc
->condexec_cond
= 0;
12464 arm_post_translate_insn(dc
);
12466 /* Thumb is a variable-length ISA. Stop translation when the next insn
12467 * will touch a new page. This ensures that prefetch aborts occur at
12470 * We want to stop the TB if the next insn starts in a new page,
12471 * or if it spans between this page and the next. This means that
12472 * if we're looking at the last halfword in the page we need to
12473 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12474 * or a 32-bit Thumb insn (which won't).
12475 * This is to avoid generating a silly TB with a single 16-bit insn
12476 * in it at the end of this page (which would execute correctly
12477 * but isn't very efficient).
12479 if (dc
->base
.is_jmp
== DISAS_NEXT
12480 && (dc
->pc
- dc
->page_start
>= TARGET_PAGE_SIZE
12481 || (dc
->pc
- dc
->page_start
>= TARGET_PAGE_SIZE
- 3
12482 && insn_crosses_page(env
, dc
)))) {
12483 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
12487 static void arm_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
12489 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12491 if (tb_cflags(dc
->base
.tb
) & CF_LAST_IO
&& dc
->condjmp
) {
12492 /* FIXME: This can theoretically happen with self-modifying code. */
12493 cpu_abort(cpu
, "IO on conditional branch instruction");
12496 /* At this stage dc->condjmp will only be set when the skipped
12497 instruction was a conditional branch or trap, and the PC has
12498 already been written. */
12499 gen_set_condexec(dc
);
12500 if (dc
->base
.is_jmp
== DISAS_BX_EXCRET
) {
12501 /* Exception return branches need some special case code at the
12502 * end of the TB, which is complex enough that it has to
12503 * handle the single-step vs not and the condition-failed
12504 * insn codepath itself.
12506 gen_bx_excret_final_code(dc
);
12507 } else if (unlikely(is_singlestepping(dc
))) {
12508 /* Unconditional and "condition passed" instruction codepath. */
12509 switch (dc
->base
.is_jmp
) {
12511 gen_ss_advance(dc
);
12512 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
12513 default_exception_el(dc
));
12516 gen_ss_advance(dc
);
12517 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
12520 gen_ss_advance(dc
);
12521 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
12524 case DISAS_TOO_MANY
:
12526 gen_set_pc_im(dc
, dc
->pc
);
12529 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12530 gen_singlestep_exception(dc
);
12532 case DISAS_NORETURN
:
12536 /* While branches must always occur at the end of an IT block,
12537 there are a few other things that can cause us to terminate
12538 the TB in the middle of an IT block:
12539 - Exception generating instructions (bkpt, swi, undefined).
12541 - Hardware watchpoints.
12542 Hardware breakpoints have already been handled and skip this code.
12544 switch(dc
->base
.is_jmp
) {
12546 case DISAS_TOO_MANY
:
12547 gen_goto_tb(dc
, 1, dc
->pc
);
12553 gen_set_pc_im(dc
, dc
->pc
);
12556 /* indicate that the hash table must be used to find the next TB */
12557 tcg_gen_exit_tb(NULL
, 0);
12559 case DISAS_NORETURN
:
12560 /* nothing more to generate */
12564 TCGv_i32 tmp
= tcg_const_i32((dc
->thumb
&&
12565 !(dc
->insn
& (1U << 31))) ? 2 : 4);
12567 gen_helper_wfi(cpu_env
, tmp
);
12568 tcg_temp_free_i32(tmp
);
12569 /* The helper doesn't necessarily throw an exception, but we
12570 * must go back to the main loop to check for interrupts anyway.
12572 tcg_gen_exit_tb(NULL
, 0);
12576 gen_helper_wfe(cpu_env
);
12579 gen_helper_yield(cpu_env
);
12582 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
12583 default_exception_el(dc
));
12586 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
12589 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
12595 /* "Condition failed" instruction codepath for the branch/trap insn */
12596 gen_set_label(dc
->condlabel
);
12597 gen_set_condexec(dc
);
12598 if (unlikely(is_singlestepping(dc
))) {
12599 gen_set_pc_im(dc
, dc
->pc
);
12600 gen_singlestep_exception(dc
);
12602 gen_goto_tb(dc
, 1, dc
->pc
);
12606 /* Functions above can change dc->pc, so re-align db->pc_next */
12607 dc
->base
.pc_next
= dc
->pc
;
12610 static void arm_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
12612 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12614 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
12615 log_target_disas(cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
12618 static const TranslatorOps arm_translator_ops
= {
12619 .init_disas_context
= arm_tr_init_disas_context
,
12620 .tb_start
= arm_tr_tb_start
,
12621 .insn_start
= arm_tr_insn_start
,
12622 .breakpoint_check
= arm_tr_breakpoint_check
,
12623 .translate_insn
= arm_tr_translate_insn
,
12624 .tb_stop
= arm_tr_tb_stop
,
12625 .disas_log
= arm_tr_disas_log
,
12628 static const TranslatorOps thumb_translator_ops
= {
12629 .init_disas_context
= arm_tr_init_disas_context
,
12630 .tb_start
= arm_tr_tb_start
,
12631 .insn_start
= arm_tr_insn_start
,
12632 .breakpoint_check
= arm_tr_breakpoint_check
,
12633 .translate_insn
= thumb_tr_translate_insn
,
12634 .tb_stop
= arm_tr_tb_stop
,
12635 .disas_log
= arm_tr_disas_log
,
12638 /* generate intermediate code for basic block 'tb'. */
12639 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
12642 const TranslatorOps
*ops
= &arm_translator_ops
;
12644 if (FIELD_EX32(tb
->flags
, TBFLAG_A32
, THUMB
)) {
12645 ops
= &thumb_translator_ops
;
12647 #ifdef TARGET_AARCH64
12648 if (FIELD_EX32(tb
->flags
, TBFLAG_ANY
, AARCH64_STATE
)) {
12649 ops
= &aarch64_translator_ops
;
12653 translator_loop(ops
, &dc
.base
, cpu
, tb
, max_insns
);
12656 void arm_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
12658 ARMCPU
*cpu
= ARM_CPU(cs
);
12659 CPUARMState
*env
= &cpu
->env
;
12663 aarch64_cpu_dump_state(cs
, f
, flags
);
12667 for(i
=0;i
<16;i
++) {
12668 qemu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
12670 qemu_fprintf(f
, "\n");
12672 qemu_fprintf(f
, " ");
12675 if (arm_feature(env
, ARM_FEATURE_M
)) {
12676 uint32_t xpsr
= xpsr_read(env
);
12678 const char *ns_status
= "";
12680 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
12681 ns_status
= env
->v7m
.secure
? "S " : "NS ";
12684 if (xpsr
& XPSR_EXCP
) {
12687 if (env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_NPRIV_MASK
) {
12688 mode
= "unpriv-thread";
12690 mode
= "priv-thread";
12694 qemu_fprintf(f
, "XPSR=%08x %c%c%c%c %c %s%s\n",
12696 xpsr
& XPSR_N
? 'N' : '-',
12697 xpsr
& XPSR_Z
? 'Z' : '-',
12698 xpsr
& XPSR_C
? 'C' : '-',
12699 xpsr
& XPSR_V
? 'V' : '-',
12700 xpsr
& XPSR_T
? 'T' : 'A',
12704 uint32_t psr
= cpsr_read(env
);
12705 const char *ns_status
= "";
12707 if (arm_feature(env
, ARM_FEATURE_EL3
) &&
12708 (psr
& CPSR_M
) != ARM_CPU_MODE_MON
) {
12709 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
12712 qemu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12714 psr
& CPSR_N
? 'N' : '-',
12715 psr
& CPSR_Z
? 'Z' : '-',
12716 psr
& CPSR_C
? 'C' : '-',
12717 psr
& CPSR_V
? 'V' : '-',
12718 psr
& CPSR_T
? 'T' : 'A',
12720 aarch32_mode_name(psr
), (psr
& 0x10) ? 32 : 26);
12723 if (flags
& CPU_DUMP_FPU
) {
12724 int numvfpregs
= 0;
12725 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
12728 if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
12731 for (i
= 0; i
< numvfpregs
; i
++) {
12732 uint64_t v
= *aa32_vfp_dreg(env
, i
);
12733 qemu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
12734 i
* 2, (uint32_t)v
,
12735 i
* 2 + 1, (uint32_t)(v
>> 32),
12738 qemu_fprintf(f
, "FPSCR: %08x\n", vfp_get_fpscr(env
));
12742 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
,
12743 target_ulong
*data
)
12747 env
->condexec_bits
= 0;
12748 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;
12750 env
->regs
[15] = data
[0];
12751 env
->condexec_bits
= data
[1];
12752 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;