4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
28 #include "tcg-op-gvec.h"
30 #include "qemu/bitops.h"
32 #include "exec/semihost.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
37 #include "trace-tcg.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
56 #if defined(CONFIG_USER_ONLY)
59 #define IS_USER(s) (s->user)
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
64 static TCGv_i32 cpu_R
[16];
65 TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
66 TCGv_i64 cpu_exclusive_addr
;
67 TCGv_i64 cpu_exclusive_val
;
69 /* FIXME: These should be removed. */
70 static TCGv_i32 cpu_F0s
, cpu_F1s
;
71 static TCGv_i64 cpu_F0d
, cpu_F1d
;
73 #include "exec/gen-icount.h"
75 static const char * const regnames
[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79 /* Function prototypes for gen_ functions calling Neon helpers. */
80 typedef void NeonGenThreeOpEnvFn(TCGv_i32
, TCGv_env
, TCGv_i32
,
83 /* initialize TCG globals. */
84 void arm_translate_init(void)
88 for (i
= 0; i
< 16; i
++) {
89 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
90 offsetof(CPUARMState
, regs
[i
]),
93 cpu_CF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, CF
), "CF");
94 cpu_NF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, NF
), "NF");
95 cpu_VF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, VF
), "VF");
96 cpu_ZF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, ZF
), "ZF");
98 cpu_exclusive_addr
= tcg_global_mem_new_i64(cpu_env
,
99 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
100 cpu_exclusive_val
= tcg_global_mem_new_i64(cpu_env
,
101 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
103 a64_translate_init();
106 /* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
109 typedef enum ISSInfo
{
112 ISSInvalid
= (1 << 5),
113 ISSIsAcqRel
= (1 << 6),
114 ISSIsWrite
= (1 << 7),
115 ISSIs16Bit
= (1 << 8),
118 /* Save the syndrome information for a Data Abort */
119 static void disas_set_da_iss(DisasContext
*s
, TCGMemOp memop
, ISSInfo issinfo
)
122 int sas
= memop
& MO_SIZE
;
123 bool sse
= memop
& MO_SIGN
;
124 bool is_acqrel
= issinfo
& ISSIsAcqRel
;
125 bool is_write
= issinfo
& ISSIsWrite
;
126 bool is_16bit
= issinfo
& ISSIs16Bit
;
127 int srt
= issinfo
& ISSRegMask
;
129 if (issinfo
& ISSInvalid
) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
144 syn
= syn_data_abort_with_iss(0, sas
, sse
, srt
, 0, is_acqrel
,
145 0, 0, 0, is_write
, 0, is_16bit
);
146 disas_set_insn_syndrome(s
, syn
);
149 static inline int get_a32_user_mem_index(DisasContext
*s
)
151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
156 switch (s
->mmu_idx
) {
157 case ARMMMUIdx_S1E2
: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0
:
159 case ARMMMUIdx_S12NSE1
:
160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0
);
162 case ARMMMUIdx_S1SE0
:
163 case ARMMMUIdx_S1SE1
:
164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0
);
165 case ARMMMUIdx_MUser
:
166 case ARMMMUIdx_MPriv
:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser
);
168 case ARMMMUIdx_MUserNegPri
:
169 case ARMMMUIdx_MPrivNegPri
:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri
);
171 case ARMMMUIdx_MSUser
:
172 case ARMMMUIdx_MSPriv
:
173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser
);
174 case ARMMMUIdx_MSUserNegPri
:
175 case ARMMMUIdx_MSPrivNegPri
:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri
);
179 g_assert_not_reached();
183 static inline TCGv_i32
load_cpu_offset(int offset
)
185 TCGv_i32 tmp
= tcg_temp_new_i32();
186 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
190 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
192 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
194 tcg_gen_st_i32(var
, cpu_env
, offset
);
195 tcg_temp_free_i32(var
);
198 #define store_cpu_field(var, name) \
199 store_cpu_offset(var, offsetof(CPUARMState, name))
201 /* Set a variable to the value of a CPU register. */
202 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
206 /* normally, since we updated PC, we need only to add one insn */
208 addr
= (long)s
->pc
+ 2;
210 addr
= (long)s
->pc
+ 4;
211 tcg_gen_movi_i32(var
, addr
);
213 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
217 /* Create a new temporary and set it to the value of a CPU register. */
218 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
220 TCGv_i32 tmp
= tcg_temp_new_i32();
221 load_reg_var(s
, tmp
, reg
);
225 /* Set a CPU register. The source must be a temporary and will be
227 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
235 tcg_gen_andi_i32(var
, var
, s
->thumb
? ~1 : ~3);
236 s
->base
.is_jmp
= DISAS_JUMP
;
238 tcg_gen_mov_i32(cpu_R
[reg
], var
);
239 tcg_temp_free_i32(var
);
243 * Variant of store_reg which applies v8M stack-limit checks before updating
244 * SP. If the check fails this will result in an exception being taken.
245 * We disable the stack checks for CONFIG_USER_ONLY because we have
246 * no idea what the stack limits should be in that case.
247 * If stack checking is not being done this just acts like store_reg().
249 static void store_sp_checked(DisasContext
*s
, TCGv_i32 var
)
251 #ifndef CONFIG_USER_ONLY
252 if (s
->v8m_stackcheck
) {
253 gen_helper_v8m_stackcheck(cpu_env
, var
);
256 store_reg(s
, 13, var
);
259 /* Value extensions. */
260 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
261 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
262 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
263 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
265 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
266 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
269 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
271 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
272 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
273 tcg_temp_free_i32(tmp_mask
);
275 /* Set NZCV flags from the high 4 bits of var. */
276 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
278 static void gen_exception_internal(int excp
)
280 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
282 assert(excp_is_internal(excp
));
283 gen_helper_exception_internal(cpu_env
, tcg_excp
);
284 tcg_temp_free_i32(tcg_excp
);
287 static void gen_exception(int excp
, uint32_t syndrome
, uint32_t target_el
)
289 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
290 TCGv_i32 tcg_syn
= tcg_const_i32(syndrome
);
291 TCGv_i32 tcg_el
= tcg_const_i32(target_el
);
293 gen_helper_exception_with_syndrome(cpu_env
, tcg_excp
,
296 tcg_temp_free_i32(tcg_el
);
297 tcg_temp_free_i32(tcg_syn
);
298 tcg_temp_free_i32(tcg_excp
);
301 static void gen_ss_advance(DisasContext
*s
)
303 /* If the singlestep state is Active-not-pending, advance to
308 gen_helper_clear_pstate_ss(cpu_env
);
312 static void gen_step_complete_exception(DisasContext
*s
)
314 /* We just completed step of an insn. Move from Active-not-pending
315 * to Active-pending, and then also take the swstep exception.
316 * This corresponds to making the (IMPDEF) choice to prioritize
317 * swstep exceptions over asynchronous exceptions taken to an exception
318 * level where debug is disabled. This choice has the advantage that
319 * we do not need to maintain internal state corresponding to the
320 * ISV/EX syndrome bits between completion of the step and generation
321 * of the exception, and our syndrome information is always correct.
324 gen_exception(EXCP_UDEF
, syn_swstep(s
->ss_same_el
, 1, s
->is_ldex
),
325 default_exception_el(s
));
326 s
->base
.is_jmp
= DISAS_NORETURN
;
329 static void gen_singlestep_exception(DisasContext
*s
)
331 /* Generate the right kind of exception for singlestep, which is
332 * either the architectural singlestep or EXCP_DEBUG for QEMU's
333 * gdb singlestepping.
336 gen_step_complete_exception(s
);
338 gen_exception_internal(EXCP_DEBUG
);
342 static inline bool is_singlestepping(DisasContext
*s
)
344 /* Return true if we are singlestepping either because of
345 * architectural singlestep or QEMU gdbstub singlestep. This does
346 * not include the command line '-singlestep' mode which is rather
347 * misnamed as it only means "one instruction per TB" and doesn't
348 * affect the code we generate.
350 return s
->base
.singlestep_enabled
|| s
->ss_active
;
353 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
355 TCGv_i32 tmp1
= tcg_temp_new_i32();
356 TCGv_i32 tmp2
= tcg_temp_new_i32();
357 tcg_gen_ext16s_i32(tmp1
, a
);
358 tcg_gen_ext16s_i32(tmp2
, b
);
359 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
360 tcg_temp_free_i32(tmp2
);
361 tcg_gen_sari_i32(a
, a
, 16);
362 tcg_gen_sari_i32(b
, b
, 16);
363 tcg_gen_mul_i32(b
, b
, a
);
364 tcg_gen_mov_i32(a
, tmp1
);
365 tcg_temp_free_i32(tmp1
);
368 /* Byteswap each halfword. */
369 static void gen_rev16(TCGv_i32 var
)
371 TCGv_i32 tmp
= tcg_temp_new_i32();
372 TCGv_i32 mask
= tcg_const_i32(0x00ff00ff);
373 tcg_gen_shri_i32(tmp
, var
, 8);
374 tcg_gen_and_i32(tmp
, tmp
, mask
);
375 tcg_gen_and_i32(var
, var
, mask
);
376 tcg_gen_shli_i32(var
, var
, 8);
377 tcg_gen_or_i32(var
, var
, tmp
);
378 tcg_temp_free_i32(mask
);
379 tcg_temp_free_i32(tmp
);
382 /* Byteswap low halfword and sign extend. */
383 static void gen_revsh(TCGv_i32 var
)
385 tcg_gen_ext16u_i32(var
, var
);
386 tcg_gen_bswap16_i32(var
, var
);
387 tcg_gen_ext16s_i32(var
, var
);
390 /* Return (b << 32) + a. Mark inputs as dead */
391 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv_i32 b
)
393 TCGv_i64 tmp64
= tcg_temp_new_i64();
395 tcg_gen_extu_i32_i64(tmp64
, b
);
396 tcg_temp_free_i32(b
);
397 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
398 tcg_gen_add_i64(a
, tmp64
, a
);
400 tcg_temp_free_i64(tmp64
);
404 /* Return (b << 32) - a. Mark inputs as dead. */
405 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv_i32 b
)
407 TCGv_i64 tmp64
= tcg_temp_new_i64();
409 tcg_gen_extu_i32_i64(tmp64
, b
);
410 tcg_temp_free_i32(b
);
411 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
412 tcg_gen_sub_i64(a
, tmp64
, a
);
414 tcg_temp_free_i64(tmp64
);
418 /* 32x32->64 multiply. Marks inputs as dead. */
419 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
421 TCGv_i32 lo
= tcg_temp_new_i32();
422 TCGv_i32 hi
= tcg_temp_new_i32();
425 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
426 tcg_temp_free_i32(a
);
427 tcg_temp_free_i32(b
);
429 ret
= tcg_temp_new_i64();
430 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
431 tcg_temp_free_i32(lo
);
432 tcg_temp_free_i32(hi
);
437 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
439 TCGv_i32 lo
= tcg_temp_new_i32();
440 TCGv_i32 hi
= tcg_temp_new_i32();
443 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
444 tcg_temp_free_i32(a
);
445 tcg_temp_free_i32(b
);
447 ret
= tcg_temp_new_i64();
448 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
449 tcg_temp_free_i32(lo
);
450 tcg_temp_free_i32(hi
);
455 /* Swap low and high halfwords. */
456 static void gen_swap_half(TCGv_i32 var
)
458 TCGv_i32 tmp
= tcg_temp_new_i32();
459 tcg_gen_shri_i32(tmp
, var
, 16);
460 tcg_gen_shli_i32(var
, var
, 16);
461 tcg_gen_or_i32(var
, var
, tmp
);
462 tcg_temp_free_i32(tmp
);
465 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
466 tmp = (t0 ^ t1) & 0x8000;
469 t0 = (t0 + t1) ^ tmp;
472 static void gen_add16(TCGv_i32 t0
, TCGv_i32 t1
)
474 TCGv_i32 tmp
= tcg_temp_new_i32();
475 tcg_gen_xor_i32(tmp
, t0
, t1
);
476 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
477 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
478 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
479 tcg_gen_add_i32(t0
, t0
, t1
);
480 tcg_gen_xor_i32(t0
, t0
, tmp
);
481 tcg_temp_free_i32(tmp
);
482 tcg_temp_free_i32(t1
);
485 /* Set CF to the top bit of var. */
486 static void gen_set_CF_bit31(TCGv_i32 var
)
488 tcg_gen_shri_i32(cpu_CF
, var
, 31);
491 /* Set N and Z flags from var. */
492 static inline void gen_logic_CC(TCGv_i32 var
)
494 tcg_gen_mov_i32(cpu_NF
, var
);
495 tcg_gen_mov_i32(cpu_ZF
, var
);
499 static void gen_adc(TCGv_i32 t0
, TCGv_i32 t1
)
501 tcg_gen_add_i32(t0
, t0
, t1
);
502 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
505 /* dest = T0 + T1 + CF. */
506 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
508 tcg_gen_add_i32(dest
, t0
, t1
);
509 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
512 /* dest = T0 - T1 + CF - 1. */
513 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
515 tcg_gen_sub_i32(dest
, t0
, t1
);
516 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
517 tcg_gen_subi_i32(dest
, dest
, 1);
520 /* dest = T0 + T1. Compute C, N, V and Z flags */
521 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
523 TCGv_i32 tmp
= tcg_temp_new_i32();
524 tcg_gen_movi_i32(tmp
, 0);
525 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
526 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
527 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
528 tcg_gen_xor_i32(tmp
, t0
, t1
);
529 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
530 tcg_temp_free_i32(tmp
);
531 tcg_gen_mov_i32(dest
, cpu_NF
);
534 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
535 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
537 TCGv_i32 tmp
= tcg_temp_new_i32();
538 if (TCG_TARGET_HAS_add2_i32
) {
539 tcg_gen_movi_i32(tmp
, 0);
540 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
541 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
543 TCGv_i64 q0
= tcg_temp_new_i64();
544 TCGv_i64 q1
= tcg_temp_new_i64();
545 tcg_gen_extu_i32_i64(q0
, t0
);
546 tcg_gen_extu_i32_i64(q1
, t1
);
547 tcg_gen_add_i64(q0
, q0
, q1
);
548 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
549 tcg_gen_add_i64(q0
, q0
, q1
);
550 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
551 tcg_temp_free_i64(q0
);
552 tcg_temp_free_i64(q1
);
554 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
555 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
556 tcg_gen_xor_i32(tmp
, t0
, t1
);
557 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
558 tcg_temp_free_i32(tmp
);
559 tcg_gen_mov_i32(dest
, cpu_NF
);
562 /* dest = T0 - T1. Compute C, N, V and Z flags */
563 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
566 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
567 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
568 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
569 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
570 tmp
= tcg_temp_new_i32();
571 tcg_gen_xor_i32(tmp
, t0
, t1
);
572 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
573 tcg_temp_free_i32(tmp
);
574 tcg_gen_mov_i32(dest
, cpu_NF
);
577 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
578 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
580 TCGv_i32 tmp
= tcg_temp_new_i32();
581 tcg_gen_not_i32(tmp
, t1
);
582 gen_adc_CC(dest
, t0
, tmp
);
583 tcg_temp_free_i32(tmp
);
586 #define GEN_SHIFT(name) \
587 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
589 TCGv_i32 tmp1, tmp2, tmp3; \
590 tmp1 = tcg_temp_new_i32(); \
591 tcg_gen_andi_i32(tmp1, t1, 0xff); \
592 tmp2 = tcg_const_i32(0); \
593 tmp3 = tcg_const_i32(0x1f); \
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
595 tcg_temp_free_i32(tmp3); \
596 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
597 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
598 tcg_temp_free_i32(tmp2); \
599 tcg_temp_free_i32(tmp1); \
605 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
608 tmp1
= tcg_temp_new_i32();
609 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
610 tmp2
= tcg_const_i32(0x1f);
611 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
612 tcg_temp_free_i32(tmp2
);
613 tcg_gen_sar_i32(dest
, t0
, tmp1
);
614 tcg_temp_free_i32(tmp1
);
617 static void tcg_gen_abs_i32(TCGv_i32 dest
, TCGv_i32 src
)
619 TCGv_i32 c0
= tcg_const_i32(0);
620 TCGv_i32 tmp
= tcg_temp_new_i32();
621 tcg_gen_neg_i32(tmp
, src
);
622 tcg_gen_movcond_i32(TCG_COND_GT
, dest
, src
, c0
, src
, tmp
);
623 tcg_temp_free_i32(c0
);
624 tcg_temp_free_i32(tmp
);
627 static void shifter_out_im(TCGv_i32 var
, int shift
)
630 tcg_gen_andi_i32(cpu_CF
, var
, 1);
632 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
634 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
639 /* Shift by immediate. Includes special handling for shift == 0. */
640 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
641 int shift
, int flags
)
647 shifter_out_im(var
, 32 - shift
);
648 tcg_gen_shli_i32(var
, var
, shift
);
654 tcg_gen_shri_i32(cpu_CF
, var
, 31);
656 tcg_gen_movi_i32(var
, 0);
659 shifter_out_im(var
, shift
- 1);
660 tcg_gen_shri_i32(var
, var
, shift
);
667 shifter_out_im(var
, shift
- 1);
670 tcg_gen_sari_i32(var
, var
, shift
);
672 case 3: /* ROR/RRX */
675 shifter_out_im(var
, shift
- 1);
676 tcg_gen_rotri_i32(var
, var
, shift
); break;
678 TCGv_i32 tmp
= tcg_temp_new_i32();
679 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
681 shifter_out_im(var
, 0);
682 tcg_gen_shri_i32(var
, var
, 1);
683 tcg_gen_or_i32(var
, var
, tmp
);
684 tcg_temp_free_i32(tmp
);
689 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
690 TCGv_i32 shift
, int flags
)
694 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
695 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
696 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
697 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
702 gen_shl(var
, var
, shift
);
705 gen_shr(var
, var
, shift
);
708 gen_sar(var
, var
, shift
);
710 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
711 tcg_gen_rotr_i32(var
, var
, shift
); break;
714 tcg_temp_free_i32(shift
);
717 #define PAS_OP(pfx) \
719 case 0: gen_pas_helper(glue(pfx,add16)); break; \
720 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
721 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
722 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
723 case 4: gen_pas_helper(glue(pfx,add8)); break; \
724 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
726 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
731 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
733 tmp
= tcg_temp_new_ptr();
734 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
736 tcg_temp_free_ptr(tmp
);
739 tmp
= tcg_temp_new_ptr();
740 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
742 tcg_temp_free_ptr(tmp
);
744 #undef gen_pas_helper
745 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
758 #undef gen_pas_helper
763 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
764 #define PAS_OP(pfx) \
766 case 0: gen_pas_helper(glue(pfx,add8)); break; \
767 case 1: gen_pas_helper(glue(pfx,add16)); break; \
768 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
769 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
770 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
771 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
773 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
778 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
780 tmp
= tcg_temp_new_ptr();
781 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
783 tcg_temp_free_ptr(tmp
);
786 tmp
= tcg_temp_new_ptr();
787 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
789 tcg_temp_free_ptr(tmp
);
791 #undef gen_pas_helper
792 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
805 #undef gen_pas_helper
811 * Generate a conditional based on ARM condition code cc.
812 * This is common between ARM and Aarch64 targets.
814 void arm_test_cc(DisasCompare
*cmp
, int cc
)
845 case 8: /* hi: C && !Z */
846 case 9: /* ls: !C || Z -> !(C && !Z) */
848 value
= tcg_temp_new_i32();
850 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
851 ZF is non-zero for !Z; so AND the two subexpressions. */
852 tcg_gen_neg_i32(value
, cpu_CF
);
853 tcg_gen_and_i32(value
, value
, cpu_ZF
);
856 case 10: /* ge: N == V -> N ^ V == 0 */
857 case 11: /* lt: N != V -> N ^ V != 0 */
858 /* Since we're only interested in the sign bit, == 0 is >= 0. */
860 value
= tcg_temp_new_i32();
862 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
865 case 12: /* gt: !Z && N == V */
866 case 13: /* le: Z || N != V */
868 value
= tcg_temp_new_i32();
870 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
871 * the sign bit then AND with ZF to yield the result. */
872 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
873 tcg_gen_sari_i32(value
, value
, 31);
874 tcg_gen_andc_i32(value
, cpu_ZF
, value
);
877 case 14: /* always */
878 case 15: /* always */
879 /* Use the ALWAYS condition, which will fold early.
880 * It doesn't matter what we use for the value. */
881 cond
= TCG_COND_ALWAYS
;
886 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
891 cond
= tcg_invert_cond(cond
);
897 cmp
->value_global
= global
;
900 void arm_free_cc(DisasCompare
*cmp
)
902 if (!cmp
->value_global
) {
903 tcg_temp_free_i32(cmp
->value
);
907 void arm_jump_cc(DisasCompare
*cmp
, TCGLabel
*label
)
909 tcg_gen_brcondi_i32(cmp
->cond
, cmp
->value
, 0, label
);
912 void arm_gen_test_cc(int cc
, TCGLabel
*label
)
915 arm_test_cc(&cmp
, cc
);
916 arm_jump_cc(&cmp
, label
);
920 static const uint8_t table_logic_cc
[16] = {
939 static inline void gen_set_condexec(DisasContext
*s
)
941 if (s
->condexec_mask
) {
942 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
943 TCGv_i32 tmp
= tcg_temp_new_i32();
944 tcg_gen_movi_i32(tmp
, val
);
945 store_cpu_field(tmp
, condexec_bits
);
949 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
951 tcg_gen_movi_i32(cpu_R
[15], val
);
954 /* Set PC and Thumb state from an immediate address. */
955 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
959 s
->base
.is_jmp
= DISAS_JUMP
;
960 if (s
->thumb
!= (addr
& 1)) {
961 tmp
= tcg_temp_new_i32();
962 tcg_gen_movi_i32(tmp
, addr
& 1);
963 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
964 tcg_temp_free_i32(tmp
);
966 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
969 /* Set PC and Thumb state from var. var is marked as dead. */
970 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
972 s
->base
.is_jmp
= DISAS_JUMP
;
973 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
974 tcg_gen_andi_i32(var
, var
, 1);
975 store_cpu_field(var
, thumb
);
978 /* Set PC and Thumb state from var. var is marked as dead.
979 * For M-profile CPUs, include logic to detect exception-return
980 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
981 * and BX reg, and no others, and happens only for code in Handler mode.
983 static inline void gen_bx_excret(DisasContext
*s
, TCGv_i32 var
)
985 /* Generate the same code here as for a simple bx, but flag via
986 * s->base.is_jmp that we need to do the rest of the work later.
989 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
) ||
990 (s
->v7m_handler_mode
&& arm_dc_feature(s
, ARM_FEATURE_M
))) {
991 s
->base
.is_jmp
= DISAS_BX_EXCRET
;
995 static inline void gen_bx_excret_final_code(DisasContext
*s
)
997 /* Generate the code to finish possible exception return and end the TB */
998 TCGLabel
*excret_label
= gen_new_label();
1001 if (arm_dc_feature(s
, ARM_FEATURE_M_SECURITY
)) {
1002 /* Covers FNC_RETURN and EXC_RETURN magic */
1003 min_magic
= FNC_RETURN_MIN_MAGIC
;
1005 /* EXC_RETURN magic only */
1006 min_magic
= EXC_RETURN_MIN_MAGIC
;
1009 /* Is the new PC value in the magic range indicating exception return? */
1010 tcg_gen_brcondi_i32(TCG_COND_GEU
, cpu_R
[15], min_magic
, excret_label
);
1011 /* No: end the TB as we would for a DISAS_JMP */
1012 if (is_singlestepping(s
)) {
1013 gen_singlestep_exception(s
);
1015 tcg_gen_exit_tb(NULL
, 0);
1017 gen_set_label(excret_label
);
1018 /* Yes: this is an exception return.
1019 * At this point in runtime env->regs[15] and env->thumb will hold
1020 * the exception-return magic number, which do_v7m_exception_exit()
1021 * will read. Nothing else will be able to see those values because
1022 * the cpu-exec main loop guarantees that we will always go straight
1023 * from raising the exception to the exception-handling code.
1025 * gen_ss_advance(s) does nothing on M profile currently but
1026 * calling it is conceptually the right thing as we have executed
1027 * this instruction (compare SWI, HVC, SMC handling).
1030 gen_exception_internal(EXCP_EXCEPTION_EXIT
);
1033 static inline void gen_bxns(DisasContext
*s
, int rm
)
1035 TCGv_i32 var
= load_reg(s
, rm
);
1037 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1038 * we need to sync state before calling it, but:
1039 * - we don't need to do gen_set_pc_im() because the bxns helper will
1040 * always set the PC itself
1041 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1042 * unless it's outside an IT block or the last insn in an IT block,
1043 * so we know that condexec == 0 (already set at the top of the TB)
1044 * is correct in the non-UNPREDICTABLE cases, and we can choose
1045 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1047 gen_helper_v7m_bxns(cpu_env
, var
);
1048 tcg_temp_free_i32(var
);
1049 s
->base
.is_jmp
= DISAS_EXIT
;
1052 static inline void gen_blxns(DisasContext
*s
, int rm
)
1054 TCGv_i32 var
= load_reg(s
, rm
);
1056 /* We don't need to sync condexec state, for the same reason as bxns.
1057 * We do however need to set the PC, because the blxns helper reads it.
1058 * The blxns helper may throw an exception.
1060 gen_set_pc_im(s
, s
->pc
);
1061 gen_helper_v7m_blxns(cpu_env
, var
);
1062 tcg_temp_free_i32(var
);
1063 s
->base
.is_jmp
= DISAS_EXIT
;
1066 /* Variant of store_reg which uses branch&exchange logic when storing
1067 to r15 in ARM architecture v7 and above. The source must be a temporary
1068 and will be marked as dead. */
1069 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv_i32 var
)
1071 if (reg
== 15 && ENABLE_ARCH_7
) {
1074 store_reg(s
, reg
, var
);
1078 /* Variant of store_reg which uses branch&exchange logic when storing
1079 * to r15 in ARM architecture v5T and above. This is used for storing
1080 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1081 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1082 static inline void store_reg_from_load(DisasContext
*s
, int reg
, TCGv_i32 var
)
1084 if (reg
== 15 && ENABLE_ARCH_5
) {
1085 gen_bx_excret(s
, var
);
1087 store_reg(s
, reg
, var
);
1091 #ifdef CONFIG_USER_ONLY
1092 #define IS_USER_ONLY 1
1094 #define IS_USER_ONLY 0
1097 /* Abstractions of "generate code to do a guest load/store for
1098 * AArch32", where a vaddr is always 32 bits (and is zero
1099 * extended if we're a 64 bit core) and data is also
1100 * 32 bits unless specifically doing a 64 bit access.
1101 * These functions work like tcg_gen_qemu_{ld,st}* except
1102 * that the address argument is TCGv_i32 rather than TCGv.
1105 static inline TCGv
gen_aa32_addr(DisasContext
*s
, TCGv_i32 a32
, TCGMemOp op
)
1107 TCGv addr
= tcg_temp_new();
1108 tcg_gen_extu_i32_tl(addr
, a32
);
1110 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1111 if (!IS_USER_ONLY
&& s
->sctlr_b
&& (op
& MO_SIZE
) < MO_32
) {
1112 tcg_gen_xori_tl(addr
, addr
, 4 - (1 << (op
& MO_SIZE
)));
1117 static void gen_aa32_ld_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
1118 int index
, TCGMemOp opc
)
1122 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
1123 !arm_dc_feature(s
, ARM_FEATURE_M_MAIN
)) {
1127 addr
= gen_aa32_addr(s
, a32
, opc
);
1128 tcg_gen_qemu_ld_i32(val
, addr
, index
, opc
);
1129 tcg_temp_free(addr
);
1132 static void gen_aa32_st_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
1133 int index
, TCGMemOp opc
)
1137 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
1138 !arm_dc_feature(s
, ARM_FEATURE_M_MAIN
)) {
1142 addr
= gen_aa32_addr(s
, a32
, opc
);
1143 tcg_gen_qemu_st_i32(val
, addr
, index
, opc
);
1144 tcg_temp_free(addr
);
1147 #define DO_GEN_LD(SUFF, OPC) \
1148 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1149 TCGv_i32 a32, int index) \
1151 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1153 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1155 TCGv_i32 a32, int index, \
1158 gen_aa32_ld##SUFF(s, val, a32, index); \
1159 disas_set_da_iss(s, OPC, issinfo); \
1162 #define DO_GEN_ST(SUFF, OPC) \
1163 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1164 TCGv_i32 a32, int index) \
1166 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1168 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1170 TCGv_i32 a32, int index, \
1173 gen_aa32_st##SUFF(s, val, a32, index); \
1174 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1177 static inline void gen_aa32_frob64(DisasContext
*s
, TCGv_i64 val
)
1179 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1180 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1181 tcg_gen_rotri_i64(val
, val
, 32);
1185 static void gen_aa32_ld_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
1186 int index
, TCGMemOp opc
)
1188 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1189 tcg_gen_qemu_ld_i64(val
, addr
, index
, opc
);
1190 gen_aa32_frob64(s
, val
);
1191 tcg_temp_free(addr
);
1194 static inline void gen_aa32_ld64(DisasContext
*s
, TCGv_i64 val
,
1195 TCGv_i32 a32
, int index
)
1197 gen_aa32_ld_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1200 static void gen_aa32_st_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
1201 int index
, TCGMemOp opc
)
1203 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1205 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1206 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1207 TCGv_i64 tmp
= tcg_temp_new_i64();
1208 tcg_gen_rotri_i64(tmp
, val
, 32);
1209 tcg_gen_qemu_st_i64(tmp
, addr
, index
, opc
);
1210 tcg_temp_free_i64(tmp
);
1212 tcg_gen_qemu_st_i64(val
, addr
, index
, opc
);
1214 tcg_temp_free(addr
);
1217 static inline void gen_aa32_st64(DisasContext
*s
, TCGv_i64 val
,
1218 TCGv_i32 a32
, int index
)
1220 gen_aa32_st_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1223 DO_GEN_LD(8s
, MO_SB
)
1224 DO_GEN_LD(8u, MO_UB
)
1225 DO_GEN_LD(16s
, MO_SW
)
1226 DO_GEN_LD(16u, MO_UW
)
1227 DO_GEN_LD(32u, MO_UL
)
1229 DO_GEN_ST(16, MO_UW
)
1230 DO_GEN_ST(32, MO_UL
)
1232 static inline void gen_hvc(DisasContext
*s
, int imm16
)
1234 /* The pre HVC helper handles cases when HVC gets trapped
1235 * as an undefined insn by runtime configuration (ie before
1236 * the insn really executes).
1238 gen_set_pc_im(s
, s
->pc
- 4);
1239 gen_helper_pre_hvc(cpu_env
);
1240 /* Otherwise we will treat this as a real exception which
1241 * happens after execution of the insn. (The distinction matters
1242 * for the PC value reported to the exception handler and also
1243 * for single stepping.)
1246 gen_set_pc_im(s
, s
->pc
);
1247 s
->base
.is_jmp
= DISAS_HVC
;
1250 static inline void gen_smc(DisasContext
*s
)
1252 /* As with HVC, we may take an exception either before or after
1253 * the insn executes.
1257 gen_set_pc_im(s
, s
->pc
- 4);
1258 tmp
= tcg_const_i32(syn_aa32_smc());
1259 gen_helper_pre_smc(cpu_env
, tmp
);
1260 tcg_temp_free_i32(tmp
);
1261 gen_set_pc_im(s
, s
->pc
);
1262 s
->base
.is_jmp
= DISAS_SMC
;
1265 static void gen_exception_internal_insn(DisasContext
*s
, int offset
, int excp
)
1267 gen_set_condexec(s
);
1268 gen_set_pc_im(s
, s
->pc
- offset
);
1269 gen_exception_internal(excp
);
1270 s
->base
.is_jmp
= DISAS_NORETURN
;
1273 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
,
1274 int syn
, uint32_t target_el
)
1276 gen_set_condexec(s
);
1277 gen_set_pc_im(s
, s
->pc
- offset
);
1278 gen_exception(excp
, syn
, target_el
);
1279 s
->base
.is_jmp
= DISAS_NORETURN
;
1282 static void gen_exception_bkpt_insn(DisasContext
*s
, int offset
, uint32_t syn
)
1286 gen_set_condexec(s
);
1287 gen_set_pc_im(s
, s
->pc
- offset
);
1288 tcg_syn
= tcg_const_i32(syn
);
1289 gen_helper_exception_bkpt_insn(cpu_env
, tcg_syn
);
1290 tcg_temp_free_i32(tcg_syn
);
1291 s
->base
.is_jmp
= DISAS_NORETURN
;
1294 /* Force a TB lookup after an instruction that changes the CPU state. */
1295 static inline void gen_lookup_tb(DisasContext
*s
)
1297 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
1298 s
->base
.is_jmp
= DISAS_EXIT
;
1301 static inline void gen_hlt(DisasContext
*s
, int imm
)
1303 /* HLT. This has two purposes.
1304 * Architecturally, it is an external halting debug instruction.
1305 * Since QEMU doesn't implement external debug, we treat this as
1306 * it is required for halting debug disabled: it will UNDEF.
1307 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1308 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1309 * must trigger semihosting even for ARMv7 and earlier, where
1310 * HLT was an undefined encoding.
1311 * In system mode, we don't allow userspace access to
1312 * semihosting, to provide some semblance of security
1313 * (and for consistency with our 32-bit semihosting).
1315 if (semihosting_enabled() &&
1316 #ifndef CONFIG_USER_ONLY
1317 s
->current_el
!= 0 &&
1319 (imm
== (s
->thumb
? 0x3c : 0xf000))) {
1320 gen_exception_internal_insn(s
, 0, EXCP_SEMIHOST
);
1324 gen_exception_insn(s
, s
->thumb
? 2 : 4, EXCP_UDEF
, syn_uncategorized(),
1325 default_exception_el(s
));
1328 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
1331 int val
, rm
, shift
, shiftop
;
1334 if (!(insn
& (1 << 25))) {
1337 if (!(insn
& (1 << 23)))
1340 tcg_gen_addi_i32(var
, var
, val
);
1342 /* shift/register */
1344 shift
= (insn
>> 7) & 0x1f;
1345 shiftop
= (insn
>> 5) & 3;
1346 offset
= load_reg(s
, rm
);
1347 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
1348 if (!(insn
& (1 << 23)))
1349 tcg_gen_sub_i32(var
, var
, offset
);
1351 tcg_gen_add_i32(var
, var
, offset
);
1352 tcg_temp_free_i32(offset
);
1356 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
1357 int extra
, TCGv_i32 var
)
1362 if (insn
& (1 << 22)) {
1364 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
1365 if (!(insn
& (1 << 23)))
1369 tcg_gen_addi_i32(var
, var
, val
);
1373 tcg_gen_addi_i32(var
, var
, extra
);
1375 offset
= load_reg(s
, rm
);
1376 if (!(insn
& (1 << 23)))
1377 tcg_gen_sub_i32(var
, var
, offset
);
1379 tcg_gen_add_i32(var
, var
, offset
);
1380 tcg_temp_free_i32(offset
);
1384 static TCGv_ptr
get_fpstatus_ptr(int neon
)
1386 TCGv_ptr statusptr
= tcg_temp_new_ptr();
1389 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
1391 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
1393 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
1397 #define VFP_OP2(name) \
1398 static inline void gen_vfp_##name(int dp) \
1400 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1402 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1404 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1406 tcg_temp_free_ptr(fpst); \
1416 static inline void gen_vfp_F1_mul(int dp
)
1418 /* Like gen_vfp_mul() but put result in F1 */
1419 TCGv_ptr fpst
= get_fpstatus_ptr(0);
1421 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
1423 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
1425 tcg_temp_free_ptr(fpst
);
1428 static inline void gen_vfp_F1_neg(int dp
)
1430 /* Like gen_vfp_neg() but put result in F1 */
1432 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
1434 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
1438 static inline void gen_vfp_abs(int dp
)
1441 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
1443 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
1446 static inline void gen_vfp_neg(int dp
)
1449 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1451 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1454 static inline void gen_vfp_sqrt(int dp
)
1457 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
1459 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1462 static inline void gen_vfp_cmp(int dp
)
1465 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1467 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1470 static inline void gen_vfp_cmpe(int dp
)
1473 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1475 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1478 static inline void gen_vfp_F1_ld0(int dp
)
1481 tcg_gen_movi_i64(cpu_F1d
, 0);
1483 tcg_gen_movi_i32(cpu_F1s
, 0);
1486 #define VFP_GEN_ITOF(name) \
1487 static inline void gen_vfp_##name(int dp, int neon) \
1489 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1491 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1493 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1495 tcg_temp_free_ptr(statusptr); \
1502 #define VFP_GEN_FTOI(name) \
1503 static inline void gen_vfp_##name(int dp, int neon) \
1505 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1507 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1509 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1511 tcg_temp_free_ptr(statusptr); \
1520 #define VFP_GEN_FIX(name, round) \
1521 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1523 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1524 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1526 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1529 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1532 tcg_temp_free_i32(tmp_shift); \
1533 tcg_temp_free_ptr(statusptr); \
1535 VFP_GEN_FIX(tosh
, _round_to_zero
)
1536 VFP_GEN_FIX(tosl
, _round_to_zero
)
1537 VFP_GEN_FIX(touh
, _round_to_zero
)
1538 VFP_GEN_FIX(toul
, _round_to_zero
)
1545 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1548 gen_aa32_ld64(s
, cpu_F0d
, addr
, get_mem_index(s
));
1550 gen_aa32_ld32u(s
, cpu_F0s
, addr
, get_mem_index(s
));
1554 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1557 gen_aa32_st64(s
, cpu_F0d
, addr
, get_mem_index(s
));
1559 gen_aa32_st32(s
, cpu_F0s
, addr
, get_mem_index(s
));
1563 static inline long vfp_reg_offset(bool dp
, unsigned reg
)
1566 return offsetof(CPUARMState
, vfp
.zregs
[reg
>> 1].d
[reg
& 1]);
1568 long ofs
= offsetof(CPUARMState
, vfp
.zregs
[reg
>> 2].d
[(reg
>> 1) & 1]);
1570 ofs
+= offsetof(CPU_DoubleU
, l
.upper
);
1572 ofs
+= offsetof(CPU_DoubleU
, l
.lower
);
1578 /* Return the offset of a 32-bit piece of a NEON register.
1579 zero is the least significant end of the register. */
1581 neon_reg_offset (int reg
, int n
)
1585 return vfp_reg_offset(0, sreg
);
1588 /* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1589 * where 0 is the least significant end of the register.
1592 neon_element_offset(int reg
, int element
, TCGMemOp size
)
1594 int element_size
= 1 << size
;
1595 int ofs
= element
* element_size
;
1596 #ifdef HOST_WORDS_BIGENDIAN
1597 /* Calculate the offset assuming fully little-endian,
1598 * then XOR to account for the order of the 8-byte units.
1600 if (element_size
< 8) {
1601 ofs
^= 8 - element_size
;
1604 return neon_reg_offset(reg
, 0) + ofs
;
1607 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1609 TCGv_i32 tmp
= tcg_temp_new_i32();
1610 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1614 static void neon_load_element(TCGv_i32 var
, int reg
, int ele
, TCGMemOp mop
)
1616 long offset
= neon_element_offset(reg
, ele
, mop
& MO_SIZE
);
1620 tcg_gen_ld8u_i32(var
, cpu_env
, offset
);
1623 tcg_gen_ld16u_i32(var
, cpu_env
, offset
);
1626 tcg_gen_ld_i32(var
, cpu_env
, offset
);
1629 g_assert_not_reached();
1633 static void neon_load_element64(TCGv_i64 var
, int reg
, int ele
, TCGMemOp mop
)
1635 long offset
= neon_element_offset(reg
, ele
, mop
& MO_SIZE
);
1639 tcg_gen_ld8u_i64(var
, cpu_env
, offset
);
1642 tcg_gen_ld16u_i64(var
, cpu_env
, offset
);
1645 tcg_gen_ld32u_i64(var
, cpu_env
, offset
);
1648 tcg_gen_ld_i64(var
, cpu_env
, offset
);
1651 g_assert_not_reached();
1655 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1657 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1658 tcg_temp_free_i32(var
);
1661 static void neon_store_element(int reg
, int ele
, TCGMemOp size
, TCGv_i32 var
)
1663 long offset
= neon_element_offset(reg
, ele
, size
);
1667 tcg_gen_st8_i32(var
, cpu_env
, offset
);
1670 tcg_gen_st16_i32(var
, cpu_env
, offset
);
1673 tcg_gen_st_i32(var
, cpu_env
, offset
);
1676 g_assert_not_reached();
1680 static void neon_store_element64(int reg
, int ele
, TCGMemOp size
, TCGv_i64 var
)
1682 long offset
= neon_element_offset(reg
, ele
, size
);
1686 tcg_gen_st8_i64(var
, cpu_env
, offset
);
1689 tcg_gen_st16_i64(var
, cpu_env
, offset
);
1692 tcg_gen_st32_i64(var
, cpu_env
, offset
);
1695 tcg_gen_st_i64(var
, cpu_env
, offset
);
1698 g_assert_not_reached();
1702 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1704 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1707 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1709 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1712 static TCGv_ptr
vfp_reg_ptr(bool dp
, int reg
)
1714 TCGv_ptr ret
= tcg_temp_new_ptr();
1715 tcg_gen_addi_ptr(ret
, cpu_env
, vfp_reg_offset(dp
, reg
));
1719 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1720 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1721 #define tcg_gen_st_f32 tcg_gen_st_i32
1722 #define tcg_gen_st_f64 tcg_gen_st_i64
1724 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1727 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1729 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1732 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1735 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1737 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1740 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1743 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1745 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1748 #define ARM_CP_RW_BIT (1 << 20)
1750 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1752 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1755 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1757 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1760 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1762 TCGv_i32 var
= tcg_temp_new_i32();
1763 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1767 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1769 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1770 tcg_temp_free_i32(var
);
1773 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1775 iwmmxt_store_reg(cpu_M0
, rn
);
1778 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1780 iwmmxt_load_reg(cpu_M0
, rn
);
1783 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1785 iwmmxt_load_reg(cpu_V1
, rn
);
1786 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1789 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1791 iwmmxt_load_reg(cpu_V1
, rn
);
1792 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1795 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1797 iwmmxt_load_reg(cpu_V1
, rn
);
1798 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1801 #define IWMMXT_OP(name) \
1802 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1804 iwmmxt_load_reg(cpu_V1, rn); \
1805 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1808 #define IWMMXT_OP_ENV(name) \
1809 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1811 iwmmxt_load_reg(cpu_V1, rn); \
1812 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1815 #define IWMMXT_OP_ENV_SIZE(name) \
1816 IWMMXT_OP_ENV(name##b) \
1817 IWMMXT_OP_ENV(name##w) \
1818 IWMMXT_OP_ENV(name##l)
1820 #define IWMMXT_OP_ENV1(name) \
1821 static inline void gen_op_iwmmxt_##name##_M0(void) \
1823 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1837 IWMMXT_OP_ENV_SIZE(unpackl
)
1838 IWMMXT_OP_ENV_SIZE(unpackh
)
1840 IWMMXT_OP_ENV1(unpacklub
)
1841 IWMMXT_OP_ENV1(unpackluw
)
1842 IWMMXT_OP_ENV1(unpacklul
)
1843 IWMMXT_OP_ENV1(unpackhub
)
1844 IWMMXT_OP_ENV1(unpackhuw
)
1845 IWMMXT_OP_ENV1(unpackhul
)
1846 IWMMXT_OP_ENV1(unpacklsb
)
1847 IWMMXT_OP_ENV1(unpacklsw
)
1848 IWMMXT_OP_ENV1(unpacklsl
)
1849 IWMMXT_OP_ENV1(unpackhsb
)
1850 IWMMXT_OP_ENV1(unpackhsw
)
1851 IWMMXT_OP_ENV1(unpackhsl
)
1853 IWMMXT_OP_ENV_SIZE(cmpeq
)
1854 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1855 IWMMXT_OP_ENV_SIZE(cmpgts
)
1857 IWMMXT_OP_ENV_SIZE(mins
)
1858 IWMMXT_OP_ENV_SIZE(minu
)
1859 IWMMXT_OP_ENV_SIZE(maxs
)
1860 IWMMXT_OP_ENV_SIZE(maxu
)
1862 IWMMXT_OP_ENV_SIZE(subn
)
1863 IWMMXT_OP_ENV_SIZE(addn
)
1864 IWMMXT_OP_ENV_SIZE(subu
)
1865 IWMMXT_OP_ENV_SIZE(addu
)
1866 IWMMXT_OP_ENV_SIZE(subs
)
1867 IWMMXT_OP_ENV_SIZE(adds
)
1869 IWMMXT_OP_ENV(avgb0
)
1870 IWMMXT_OP_ENV(avgb1
)
1871 IWMMXT_OP_ENV(avgw0
)
1872 IWMMXT_OP_ENV(avgw1
)
1874 IWMMXT_OP_ENV(packuw
)
1875 IWMMXT_OP_ENV(packul
)
1876 IWMMXT_OP_ENV(packuq
)
1877 IWMMXT_OP_ENV(packsw
)
1878 IWMMXT_OP_ENV(packsl
)
1879 IWMMXT_OP_ENV(packsq
)
1881 static void gen_op_iwmmxt_set_mup(void)
1884 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1885 tcg_gen_ori_i32(tmp
, tmp
, 2);
1886 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1889 static void gen_op_iwmmxt_set_cup(void)
1892 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1893 tcg_gen_ori_i32(tmp
, tmp
, 1);
1894 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1897 static void gen_op_iwmmxt_setpsr_nz(void)
1899 TCGv_i32 tmp
= tcg_temp_new_i32();
1900 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1901 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1904 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1906 iwmmxt_load_reg(cpu_V1
, rn
);
1907 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1908 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1911 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1918 rd
= (insn
>> 16) & 0xf;
1919 tmp
= load_reg(s
, rd
);
1921 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1922 if (insn
& (1 << 24)) {
1924 if (insn
& (1 << 23))
1925 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1927 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1928 tcg_gen_mov_i32(dest
, tmp
);
1929 if (insn
& (1 << 21))
1930 store_reg(s
, rd
, tmp
);
1932 tcg_temp_free_i32(tmp
);
1933 } else if (insn
& (1 << 21)) {
1935 tcg_gen_mov_i32(dest
, tmp
);
1936 if (insn
& (1 << 23))
1937 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1939 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1940 store_reg(s
, rd
, tmp
);
1941 } else if (!(insn
& (1 << 23)))
1946 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1948 int rd
= (insn
>> 0) & 0xf;
1951 if (insn
& (1 << 8)) {
1952 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1955 tmp
= iwmmxt_load_creg(rd
);
1958 tmp
= tcg_temp_new_i32();
1959 iwmmxt_load_reg(cpu_V0
, rd
);
1960 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
1962 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1963 tcg_gen_mov_i32(dest
, tmp
);
1964 tcg_temp_free_i32(tmp
);
1968 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1969 (ie. an undefined instruction). */
1970 static int disas_iwmmxt_insn(DisasContext
*s
, uint32_t insn
)
1973 int rdhi
, rdlo
, rd0
, rd1
, i
;
1975 TCGv_i32 tmp
, tmp2
, tmp3
;
1977 if ((insn
& 0x0e000e00) == 0x0c000000) {
1978 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1980 rdlo
= (insn
>> 12) & 0xf;
1981 rdhi
= (insn
>> 16) & 0xf;
1982 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1983 iwmmxt_load_reg(cpu_V0
, wrd
);
1984 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1985 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1986 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1987 } else { /* TMCRR */
1988 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1989 iwmmxt_store_reg(cpu_V0
, wrd
);
1990 gen_op_iwmmxt_set_mup();
1995 wrd
= (insn
>> 12) & 0xf;
1996 addr
= tcg_temp_new_i32();
1997 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1998 tcg_temp_free_i32(addr
);
2001 if (insn
& ARM_CP_RW_BIT
) {
2002 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
2003 tmp
= tcg_temp_new_i32();
2004 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
2005 iwmmxt_store_creg(wrd
, tmp
);
2008 if (insn
& (1 << 8)) {
2009 if (insn
& (1 << 22)) { /* WLDRD */
2010 gen_aa32_ld64(s
, cpu_M0
, addr
, get_mem_index(s
));
2012 } else { /* WLDRW wRd */
2013 tmp
= tcg_temp_new_i32();
2014 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
2017 tmp
= tcg_temp_new_i32();
2018 if (insn
& (1 << 22)) { /* WLDRH */
2019 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
2020 } else { /* WLDRB */
2021 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
2025 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
2026 tcg_temp_free_i32(tmp
);
2028 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2031 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
2032 tmp
= iwmmxt_load_creg(wrd
);
2033 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
2035 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2036 tmp
= tcg_temp_new_i32();
2037 if (insn
& (1 << 8)) {
2038 if (insn
& (1 << 22)) { /* WSTRD */
2039 gen_aa32_st64(s
, cpu_M0
, addr
, get_mem_index(s
));
2040 } else { /* WSTRW wRd */
2041 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2042 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
2045 if (insn
& (1 << 22)) { /* WSTRH */
2046 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2047 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
2048 } else { /* WSTRB */
2049 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2050 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
2054 tcg_temp_free_i32(tmp
);
2056 tcg_temp_free_i32(addr
);
2060 if ((insn
& 0x0f000000) != 0x0e000000)
2063 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
2064 case 0x000: /* WOR */
2065 wrd
= (insn
>> 12) & 0xf;
2066 rd0
= (insn
>> 0) & 0xf;
2067 rd1
= (insn
>> 16) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2069 gen_op_iwmmxt_orq_M0_wRn(rd1
);
2070 gen_op_iwmmxt_setpsr_nz();
2071 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2072 gen_op_iwmmxt_set_mup();
2073 gen_op_iwmmxt_set_cup();
2075 case 0x011: /* TMCR */
2078 rd
= (insn
>> 12) & 0xf;
2079 wrd
= (insn
>> 16) & 0xf;
2081 case ARM_IWMMXT_wCID
:
2082 case ARM_IWMMXT_wCASF
:
2084 case ARM_IWMMXT_wCon
:
2085 gen_op_iwmmxt_set_cup();
2087 case ARM_IWMMXT_wCSSF
:
2088 tmp
= iwmmxt_load_creg(wrd
);
2089 tmp2
= load_reg(s
, rd
);
2090 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
2091 tcg_temp_free_i32(tmp2
);
2092 iwmmxt_store_creg(wrd
, tmp
);
2094 case ARM_IWMMXT_wCGR0
:
2095 case ARM_IWMMXT_wCGR1
:
2096 case ARM_IWMMXT_wCGR2
:
2097 case ARM_IWMMXT_wCGR3
:
2098 gen_op_iwmmxt_set_cup();
2099 tmp
= load_reg(s
, rd
);
2100 iwmmxt_store_creg(wrd
, tmp
);
2106 case 0x100: /* WXOR */
2107 wrd
= (insn
>> 12) & 0xf;
2108 rd0
= (insn
>> 0) & 0xf;
2109 rd1
= (insn
>> 16) & 0xf;
2110 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2111 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
2112 gen_op_iwmmxt_setpsr_nz();
2113 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2114 gen_op_iwmmxt_set_mup();
2115 gen_op_iwmmxt_set_cup();
2117 case 0x111: /* TMRC */
2120 rd
= (insn
>> 12) & 0xf;
2121 wrd
= (insn
>> 16) & 0xf;
2122 tmp
= iwmmxt_load_creg(wrd
);
2123 store_reg(s
, rd
, tmp
);
2125 case 0x300: /* WANDN */
2126 wrd
= (insn
>> 12) & 0xf;
2127 rd0
= (insn
>> 0) & 0xf;
2128 rd1
= (insn
>> 16) & 0xf;
2129 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2130 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
2131 gen_op_iwmmxt_andq_M0_wRn(rd1
);
2132 gen_op_iwmmxt_setpsr_nz();
2133 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2134 gen_op_iwmmxt_set_mup();
2135 gen_op_iwmmxt_set_cup();
2137 case 0x200: /* WAND */
2138 wrd
= (insn
>> 12) & 0xf;
2139 rd0
= (insn
>> 0) & 0xf;
2140 rd1
= (insn
>> 16) & 0xf;
2141 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2142 gen_op_iwmmxt_andq_M0_wRn(rd1
);
2143 gen_op_iwmmxt_setpsr_nz();
2144 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2145 gen_op_iwmmxt_set_mup();
2146 gen_op_iwmmxt_set_cup();
2148 case 0x810: case 0xa10: /* WMADD */
2149 wrd
= (insn
>> 12) & 0xf;
2150 rd0
= (insn
>> 0) & 0xf;
2151 rd1
= (insn
>> 16) & 0xf;
2152 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2153 if (insn
& (1 << 21))
2154 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
2156 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
2157 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2158 gen_op_iwmmxt_set_mup();
2160 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2161 wrd
= (insn
>> 12) & 0xf;
2162 rd0
= (insn
>> 16) & 0xf;
2163 rd1
= (insn
>> 0) & 0xf;
2164 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2165 switch ((insn
>> 22) & 3) {
2167 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
2170 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
2173 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
2178 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2179 gen_op_iwmmxt_set_mup();
2180 gen_op_iwmmxt_set_cup();
2182 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2183 wrd
= (insn
>> 12) & 0xf;
2184 rd0
= (insn
>> 16) & 0xf;
2185 rd1
= (insn
>> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2187 switch ((insn
>> 22) & 3) {
2189 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
2192 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
2195 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
2200 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2204 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2205 wrd
= (insn
>> 12) & 0xf;
2206 rd0
= (insn
>> 16) & 0xf;
2207 rd1
= (insn
>> 0) & 0xf;
2208 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2209 if (insn
& (1 << 22))
2210 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
2212 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
2213 if (!(insn
& (1 << 20)))
2214 gen_op_iwmmxt_addl_M0_wRn(wrd
);
2215 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2216 gen_op_iwmmxt_set_mup();
2218 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2219 wrd
= (insn
>> 12) & 0xf;
2220 rd0
= (insn
>> 16) & 0xf;
2221 rd1
= (insn
>> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2223 if (insn
& (1 << 21)) {
2224 if (insn
& (1 << 20))
2225 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
2227 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
2229 if (insn
& (1 << 20))
2230 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
2232 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
2234 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2235 gen_op_iwmmxt_set_mup();
2237 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2238 wrd
= (insn
>> 12) & 0xf;
2239 rd0
= (insn
>> 16) & 0xf;
2240 rd1
= (insn
>> 0) & 0xf;
2241 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2242 if (insn
& (1 << 21))
2243 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
2245 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
2246 if (!(insn
& (1 << 20))) {
2247 iwmmxt_load_reg(cpu_V1
, wrd
);
2248 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
2250 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2251 gen_op_iwmmxt_set_mup();
2253 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2254 wrd
= (insn
>> 12) & 0xf;
2255 rd0
= (insn
>> 16) & 0xf;
2256 rd1
= (insn
>> 0) & 0xf;
2257 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2258 switch ((insn
>> 22) & 3) {
2260 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
2263 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
2266 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
2271 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2272 gen_op_iwmmxt_set_mup();
2273 gen_op_iwmmxt_set_cup();
2275 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2276 wrd
= (insn
>> 12) & 0xf;
2277 rd0
= (insn
>> 16) & 0xf;
2278 rd1
= (insn
>> 0) & 0xf;
2279 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2280 if (insn
& (1 << 22)) {
2281 if (insn
& (1 << 20))
2282 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
2284 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
2286 if (insn
& (1 << 20))
2287 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
2289 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2295 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2296 wrd
= (insn
>> 12) & 0xf;
2297 rd0
= (insn
>> 16) & 0xf;
2298 rd1
= (insn
>> 0) & 0xf;
2299 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2300 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
2301 tcg_gen_andi_i32(tmp
, tmp
, 7);
2302 iwmmxt_load_reg(cpu_V1
, rd1
);
2303 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2304 tcg_temp_free_i32(tmp
);
2305 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2306 gen_op_iwmmxt_set_mup();
2308 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2309 if (((insn
>> 6) & 3) == 3)
2311 rd
= (insn
>> 12) & 0xf;
2312 wrd
= (insn
>> 16) & 0xf;
2313 tmp
= load_reg(s
, rd
);
2314 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2315 switch ((insn
>> 6) & 3) {
2317 tmp2
= tcg_const_i32(0xff);
2318 tmp3
= tcg_const_i32((insn
& 7) << 3);
2321 tmp2
= tcg_const_i32(0xffff);
2322 tmp3
= tcg_const_i32((insn
& 3) << 4);
2325 tmp2
= tcg_const_i32(0xffffffff);
2326 tmp3
= tcg_const_i32((insn
& 1) << 5);
2332 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
2333 tcg_temp_free_i32(tmp3
);
2334 tcg_temp_free_i32(tmp2
);
2335 tcg_temp_free_i32(tmp
);
2336 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2337 gen_op_iwmmxt_set_mup();
2339 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2340 rd
= (insn
>> 12) & 0xf;
2341 wrd
= (insn
>> 16) & 0xf;
2342 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
2344 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2345 tmp
= tcg_temp_new_i32();
2346 switch ((insn
>> 22) & 3) {
2348 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
2349 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2351 tcg_gen_ext8s_i32(tmp
, tmp
);
2353 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
2357 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
2358 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2360 tcg_gen_ext16s_i32(tmp
, tmp
);
2362 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
2366 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
2367 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2370 store_reg(s
, rd
, tmp
);
2372 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2373 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2375 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2376 switch ((insn
>> 22) & 3) {
2378 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
2381 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
2384 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
2387 tcg_gen_shli_i32(tmp
, tmp
, 28);
2389 tcg_temp_free_i32(tmp
);
2391 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2392 if (((insn
>> 6) & 3) == 3)
2394 rd
= (insn
>> 12) & 0xf;
2395 wrd
= (insn
>> 16) & 0xf;
2396 tmp
= load_reg(s
, rd
);
2397 switch ((insn
>> 6) & 3) {
2399 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
2402 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
2405 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
2408 tcg_temp_free_i32(tmp
);
2409 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2410 gen_op_iwmmxt_set_mup();
2412 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2413 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2415 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2416 tmp2
= tcg_temp_new_i32();
2417 tcg_gen_mov_i32(tmp2
, tmp
);
2418 switch ((insn
>> 22) & 3) {
2420 for (i
= 0; i
< 7; i
++) {
2421 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2422 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2426 for (i
= 0; i
< 3; i
++) {
2427 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2428 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2432 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2433 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2437 tcg_temp_free_i32(tmp2
);
2438 tcg_temp_free_i32(tmp
);
2440 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2441 wrd
= (insn
>> 12) & 0xf;
2442 rd0
= (insn
>> 16) & 0xf;
2443 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2444 switch ((insn
>> 22) & 3) {
2446 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
2449 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
2452 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
2457 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2458 gen_op_iwmmxt_set_mup();
2460 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2461 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2463 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2464 tmp2
= tcg_temp_new_i32();
2465 tcg_gen_mov_i32(tmp2
, tmp
);
2466 switch ((insn
>> 22) & 3) {
2468 for (i
= 0; i
< 7; i
++) {
2469 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2470 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2474 for (i
= 0; i
< 3; i
++) {
2475 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2476 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2480 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2481 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2485 tcg_temp_free_i32(tmp2
);
2486 tcg_temp_free_i32(tmp
);
2488 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2489 rd
= (insn
>> 12) & 0xf;
2490 rd0
= (insn
>> 16) & 0xf;
2491 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
2493 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2494 tmp
= tcg_temp_new_i32();
2495 switch ((insn
>> 22) & 3) {
2497 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
2500 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
2503 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
2506 store_reg(s
, rd
, tmp
);
2508 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2509 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2510 wrd
= (insn
>> 12) & 0xf;
2511 rd0
= (insn
>> 16) & 0xf;
2512 rd1
= (insn
>> 0) & 0xf;
2513 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2514 switch ((insn
>> 22) & 3) {
2516 if (insn
& (1 << 21))
2517 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2519 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2522 if (insn
& (1 << 21))
2523 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2525 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2528 if (insn
& (1 << 21))
2529 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2531 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2536 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2537 gen_op_iwmmxt_set_mup();
2538 gen_op_iwmmxt_set_cup();
2540 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2541 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2542 wrd
= (insn
>> 12) & 0xf;
2543 rd0
= (insn
>> 16) & 0xf;
2544 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2545 switch ((insn
>> 22) & 3) {
2547 if (insn
& (1 << 21))
2548 gen_op_iwmmxt_unpacklsb_M0();
2550 gen_op_iwmmxt_unpacklub_M0();
2553 if (insn
& (1 << 21))
2554 gen_op_iwmmxt_unpacklsw_M0();
2556 gen_op_iwmmxt_unpackluw_M0();
2559 if (insn
& (1 << 21))
2560 gen_op_iwmmxt_unpacklsl_M0();
2562 gen_op_iwmmxt_unpacklul_M0();
2567 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2568 gen_op_iwmmxt_set_mup();
2569 gen_op_iwmmxt_set_cup();
2571 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2572 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2573 wrd
= (insn
>> 12) & 0xf;
2574 rd0
= (insn
>> 16) & 0xf;
2575 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2576 switch ((insn
>> 22) & 3) {
2578 if (insn
& (1 << 21))
2579 gen_op_iwmmxt_unpackhsb_M0();
2581 gen_op_iwmmxt_unpackhub_M0();
2584 if (insn
& (1 << 21))
2585 gen_op_iwmmxt_unpackhsw_M0();
2587 gen_op_iwmmxt_unpackhuw_M0();
2590 if (insn
& (1 << 21))
2591 gen_op_iwmmxt_unpackhsl_M0();
2593 gen_op_iwmmxt_unpackhul_M0();
2598 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2599 gen_op_iwmmxt_set_mup();
2600 gen_op_iwmmxt_set_cup();
2602 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2603 case 0x214: case 0x614: case 0xa14: case 0xe14:
2604 if (((insn
>> 22) & 3) == 0)
2606 wrd
= (insn
>> 12) & 0xf;
2607 rd0
= (insn
>> 16) & 0xf;
2608 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2609 tmp
= tcg_temp_new_i32();
2610 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2611 tcg_temp_free_i32(tmp
);
2614 switch ((insn
>> 22) & 3) {
2616 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2619 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2622 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2625 tcg_temp_free_i32(tmp
);
2626 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2627 gen_op_iwmmxt_set_mup();
2628 gen_op_iwmmxt_set_cup();
2630 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2631 case 0x014: case 0x414: case 0x814: case 0xc14:
2632 if (((insn
>> 22) & 3) == 0)
2634 wrd
= (insn
>> 12) & 0xf;
2635 rd0
= (insn
>> 16) & 0xf;
2636 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2637 tmp
= tcg_temp_new_i32();
2638 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2639 tcg_temp_free_i32(tmp
);
2642 switch ((insn
>> 22) & 3) {
2644 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2647 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2650 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2653 tcg_temp_free_i32(tmp
);
2654 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2655 gen_op_iwmmxt_set_mup();
2656 gen_op_iwmmxt_set_cup();
2658 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2659 case 0x114: case 0x514: case 0x914: case 0xd14:
2660 if (((insn
>> 22) & 3) == 0)
2662 wrd
= (insn
>> 12) & 0xf;
2663 rd0
= (insn
>> 16) & 0xf;
2664 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2665 tmp
= tcg_temp_new_i32();
2666 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2667 tcg_temp_free_i32(tmp
);
2670 switch ((insn
>> 22) & 3) {
2672 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2675 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2678 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2681 tcg_temp_free_i32(tmp
);
2682 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2683 gen_op_iwmmxt_set_mup();
2684 gen_op_iwmmxt_set_cup();
2686 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2687 case 0x314: case 0x714: case 0xb14: case 0xf14:
2688 if (((insn
>> 22) & 3) == 0)
2690 wrd
= (insn
>> 12) & 0xf;
2691 rd0
= (insn
>> 16) & 0xf;
2692 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2693 tmp
= tcg_temp_new_i32();
2694 switch ((insn
>> 22) & 3) {
2696 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2697 tcg_temp_free_i32(tmp
);
2700 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2703 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2704 tcg_temp_free_i32(tmp
);
2707 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2710 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2711 tcg_temp_free_i32(tmp
);
2714 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2717 tcg_temp_free_i32(tmp
);
2718 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2719 gen_op_iwmmxt_set_mup();
2720 gen_op_iwmmxt_set_cup();
2722 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2723 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2724 wrd
= (insn
>> 12) & 0xf;
2725 rd0
= (insn
>> 16) & 0xf;
2726 rd1
= (insn
>> 0) & 0xf;
2727 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2728 switch ((insn
>> 22) & 3) {
2730 if (insn
& (1 << 21))
2731 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2733 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2736 if (insn
& (1 << 21))
2737 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2739 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2742 if (insn
& (1 << 21))
2743 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2745 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2750 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2751 gen_op_iwmmxt_set_mup();
2753 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2754 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2755 wrd
= (insn
>> 12) & 0xf;
2756 rd0
= (insn
>> 16) & 0xf;
2757 rd1
= (insn
>> 0) & 0xf;
2758 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2759 switch ((insn
>> 22) & 3) {
2761 if (insn
& (1 << 21))
2762 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2764 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2767 if (insn
& (1 << 21))
2768 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2770 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2773 if (insn
& (1 << 21))
2774 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2776 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2781 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2782 gen_op_iwmmxt_set_mup();
2784 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2785 case 0x402: case 0x502: case 0x602: case 0x702:
2786 wrd
= (insn
>> 12) & 0xf;
2787 rd0
= (insn
>> 16) & 0xf;
2788 rd1
= (insn
>> 0) & 0xf;
2789 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2790 tmp
= tcg_const_i32((insn
>> 20) & 3);
2791 iwmmxt_load_reg(cpu_V1
, rd1
);
2792 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2793 tcg_temp_free_i32(tmp
);
2794 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2795 gen_op_iwmmxt_set_mup();
2797 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2798 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2799 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2800 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2801 wrd
= (insn
>> 12) & 0xf;
2802 rd0
= (insn
>> 16) & 0xf;
2803 rd1
= (insn
>> 0) & 0xf;
2804 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2805 switch ((insn
>> 20) & 0xf) {
2807 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2810 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2813 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2816 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2819 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2822 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2825 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2828 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2831 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2836 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2837 gen_op_iwmmxt_set_mup();
2838 gen_op_iwmmxt_set_cup();
2840 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2841 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2842 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2843 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2844 wrd
= (insn
>> 12) & 0xf;
2845 rd0
= (insn
>> 16) & 0xf;
2846 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2847 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2848 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2849 tcg_temp_free_i32(tmp
);
2850 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2851 gen_op_iwmmxt_set_mup();
2852 gen_op_iwmmxt_set_cup();
2854 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2855 case 0x418: case 0x518: case 0x618: case 0x718:
2856 case 0x818: case 0x918: case 0xa18: case 0xb18:
2857 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2858 wrd
= (insn
>> 12) & 0xf;
2859 rd0
= (insn
>> 16) & 0xf;
2860 rd1
= (insn
>> 0) & 0xf;
2861 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2862 switch ((insn
>> 20) & 0xf) {
2864 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2867 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2870 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2873 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2876 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2879 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2882 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2885 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2888 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2893 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2894 gen_op_iwmmxt_set_mup();
2895 gen_op_iwmmxt_set_cup();
2897 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2898 case 0x408: case 0x508: case 0x608: case 0x708:
2899 case 0x808: case 0x908: case 0xa08: case 0xb08:
2900 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2901 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2903 wrd
= (insn
>> 12) & 0xf;
2904 rd0
= (insn
>> 16) & 0xf;
2905 rd1
= (insn
>> 0) & 0xf;
2906 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2907 switch ((insn
>> 22) & 3) {
2909 if (insn
& (1 << 21))
2910 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2912 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2915 if (insn
& (1 << 21))
2916 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2918 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2921 if (insn
& (1 << 21))
2922 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2924 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2927 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2928 gen_op_iwmmxt_set_mup();
2929 gen_op_iwmmxt_set_cup();
2931 case 0x201: case 0x203: case 0x205: case 0x207:
2932 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2933 case 0x211: case 0x213: case 0x215: case 0x217:
2934 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2935 wrd
= (insn
>> 5) & 0xf;
2936 rd0
= (insn
>> 12) & 0xf;
2937 rd1
= (insn
>> 0) & 0xf;
2938 if (rd0
== 0xf || rd1
== 0xf)
2940 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2941 tmp
= load_reg(s
, rd0
);
2942 tmp2
= load_reg(s
, rd1
);
2943 switch ((insn
>> 16) & 0xf) {
2944 case 0x0: /* TMIA */
2945 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2947 case 0x8: /* TMIAPH */
2948 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2950 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2951 if (insn
& (1 << 16))
2952 tcg_gen_shri_i32(tmp
, tmp
, 16);
2953 if (insn
& (1 << 17))
2954 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2955 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2958 tcg_temp_free_i32(tmp2
);
2959 tcg_temp_free_i32(tmp
);
2962 tcg_temp_free_i32(tmp2
);
2963 tcg_temp_free_i32(tmp
);
2964 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2965 gen_op_iwmmxt_set_mup();
2974 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2975 (ie. an undefined instruction). */
2976 static int disas_dsp_insn(DisasContext
*s
, uint32_t insn
)
2978 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2981 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2982 /* Multiply with Internal Accumulate Format */
2983 rd0
= (insn
>> 12) & 0xf;
2985 acc
= (insn
>> 5) & 7;
2990 tmp
= load_reg(s
, rd0
);
2991 tmp2
= load_reg(s
, rd1
);
2992 switch ((insn
>> 16) & 0xf) {
2994 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2996 case 0x8: /* MIAPH */
2997 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2999 case 0xc: /* MIABB */
3000 case 0xd: /* MIABT */
3001 case 0xe: /* MIATB */
3002 case 0xf: /* MIATT */
3003 if (insn
& (1 << 16))
3004 tcg_gen_shri_i32(tmp
, tmp
, 16);
3005 if (insn
& (1 << 17))
3006 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
3007 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
3012 tcg_temp_free_i32(tmp2
);
3013 tcg_temp_free_i32(tmp
);
3015 gen_op_iwmmxt_movq_wRn_M0(acc
);
3019 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
3020 /* Internal Accumulator Access Format */
3021 rdhi
= (insn
>> 16) & 0xf;
3022 rdlo
= (insn
>> 12) & 0xf;
3028 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
3029 iwmmxt_load_reg(cpu_V0
, acc
);
3030 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
3031 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
3032 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
3033 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
3035 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
3036 iwmmxt_store_reg(cpu_V0
, acc
);
3044 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
3045 #define VFP_SREG(insn, bigbit, smallbit) \
3046 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
3047 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
3048 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
3049 reg = (((insn) >> (bigbit)) & 0x0f) \
3050 | (((insn) >> ((smallbit) - 4)) & 0x10); \
3052 if (insn & (1 << (smallbit))) \
3054 reg = ((insn) >> (bigbit)) & 0x0f; \
3057 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
3058 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
3059 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
3060 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
3061 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
3062 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
3064 /* Move between integer and VFP cores. */
3065 static TCGv_i32
gen_vfp_mrs(void)
3067 TCGv_i32 tmp
= tcg_temp_new_i32();
3068 tcg_gen_mov_i32(tmp
, cpu_F0s
);
3072 static void gen_vfp_msr(TCGv_i32 tmp
)
3074 tcg_gen_mov_i32(cpu_F0s
, tmp
);
3075 tcg_temp_free_i32(tmp
);
3078 static void gen_neon_dup_low16(TCGv_i32 var
)
3080 TCGv_i32 tmp
= tcg_temp_new_i32();
3081 tcg_gen_ext16u_i32(var
, var
);
3082 tcg_gen_shli_i32(tmp
, var
, 16);
3083 tcg_gen_or_i32(var
, var
, tmp
);
3084 tcg_temp_free_i32(tmp
);
3087 static void gen_neon_dup_high16(TCGv_i32 var
)
3089 TCGv_i32 tmp
= tcg_temp_new_i32();
3090 tcg_gen_andi_i32(var
, var
, 0xffff0000);
3091 tcg_gen_shri_i32(tmp
, var
, 16);
3092 tcg_gen_or_i32(var
, var
, tmp
);
3093 tcg_temp_free_i32(tmp
);
3096 static int handle_vsel(uint32_t insn
, uint32_t rd
, uint32_t rn
, uint32_t rm
,
3099 uint32_t cc
= extract32(insn
, 20, 2);
3102 TCGv_i64 frn
, frm
, dest
;
3103 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
3105 zero
= tcg_const_i64(0);
3107 frn
= tcg_temp_new_i64();
3108 frm
= tcg_temp_new_i64();
3109 dest
= tcg_temp_new_i64();
3111 zf
= tcg_temp_new_i64();
3112 nf
= tcg_temp_new_i64();
3113 vf
= tcg_temp_new_i64();
3115 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
3116 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
3117 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
3119 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
3120 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
3123 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
3127 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
3130 case 2: /* ge: N == V -> N ^ V == 0 */
3131 tmp
= tcg_temp_new_i64();
3132 tcg_gen_xor_i64(tmp
, vf
, nf
);
3133 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
3135 tcg_temp_free_i64(tmp
);
3137 case 3: /* gt: !Z && N == V */
3138 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
3140 tmp
= tcg_temp_new_i64();
3141 tcg_gen_xor_i64(tmp
, vf
, nf
);
3142 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
3144 tcg_temp_free_i64(tmp
);
3147 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
3148 tcg_temp_free_i64(frn
);
3149 tcg_temp_free_i64(frm
);
3150 tcg_temp_free_i64(dest
);
3152 tcg_temp_free_i64(zf
);
3153 tcg_temp_free_i64(nf
);
3154 tcg_temp_free_i64(vf
);
3156 tcg_temp_free_i64(zero
);
3158 TCGv_i32 frn
, frm
, dest
;
3161 zero
= tcg_const_i32(0);
3163 frn
= tcg_temp_new_i32();
3164 frm
= tcg_temp_new_i32();
3165 dest
= tcg_temp_new_i32();
3166 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
3167 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
3170 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
3174 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
3177 case 2: /* ge: N == V -> N ^ V == 0 */
3178 tmp
= tcg_temp_new_i32();
3179 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
3180 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
3182 tcg_temp_free_i32(tmp
);
3184 case 3: /* gt: !Z && N == V */
3185 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
3187 tmp
= tcg_temp_new_i32();
3188 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
3189 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
3191 tcg_temp_free_i32(tmp
);
3194 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
3195 tcg_temp_free_i32(frn
);
3196 tcg_temp_free_i32(frm
);
3197 tcg_temp_free_i32(dest
);
3199 tcg_temp_free_i32(zero
);
3205 static int handle_vminmaxnm(uint32_t insn
, uint32_t rd
, uint32_t rn
,
3206 uint32_t rm
, uint32_t dp
)
3208 uint32_t vmin
= extract32(insn
, 6, 1);
3209 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3212 TCGv_i64 frn
, frm
, dest
;
3214 frn
= tcg_temp_new_i64();
3215 frm
= tcg_temp_new_i64();
3216 dest
= tcg_temp_new_i64();
3218 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
3219 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
3221 gen_helper_vfp_minnumd(dest
, frn
, frm
, fpst
);
3223 gen_helper_vfp_maxnumd(dest
, frn
, frm
, fpst
);
3225 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
3226 tcg_temp_free_i64(frn
);
3227 tcg_temp_free_i64(frm
);
3228 tcg_temp_free_i64(dest
);
3230 TCGv_i32 frn
, frm
, dest
;
3232 frn
= tcg_temp_new_i32();
3233 frm
= tcg_temp_new_i32();
3234 dest
= tcg_temp_new_i32();
3236 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
3237 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
3239 gen_helper_vfp_minnums(dest
, frn
, frm
, fpst
);
3241 gen_helper_vfp_maxnums(dest
, frn
, frm
, fpst
);
3243 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
3244 tcg_temp_free_i32(frn
);
3245 tcg_temp_free_i32(frm
);
3246 tcg_temp_free_i32(dest
);
3249 tcg_temp_free_ptr(fpst
);
3253 static int handle_vrint(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
3256 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3259 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
3260 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3265 tcg_op
= tcg_temp_new_i64();
3266 tcg_res
= tcg_temp_new_i64();
3267 tcg_gen_ld_f64(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
3268 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
3269 tcg_gen_st_f64(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
3270 tcg_temp_free_i64(tcg_op
);
3271 tcg_temp_free_i64(tcg_res
);
3275 tcg_op
= tcg_temp_new_i32();
3276 tcg_res
= tcg_temp_new_i32();
3277 tcg_gen_ld_f32(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
3278 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
3279 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
3280 tcg_temp_free_i32(tcg_op
);
3281 tcg_temp_free_i32(tcg_res
);
3284 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3285 tcg_temp_free_i32(tcg_rmode
);
3287 tcg_temp_free_ptr(fpst
);
3291 static int handle_vcvt(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
3294 bool is_signed
= extract32(insn
, 7, 1);
3295 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3296 TCGv_i32 tcg_rmode
, tcg_shift
;
3298 tcg_shift
= tcg_const_i32(0);
3300 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
3301 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3304 TCGv_i64 tcg_double
, tcg_res
;
3306 /* Rd is encoded as a single precision register even when the source
3307 * is double precision.
3309 rd
= ((rd
<< 1) & 0x1e) | ((rd
>> 4) & 0x1);
3310 tcg_double
= tcg_temp_new_i64();
3311 tcg_res
= tcg_temp_new_i64();
3312 tcg_tmp
= tcg_temp_new_i32();
3313 tcg_gen_ld_f64(tcg_double
, cpu_env
, vfp_reg_offset(1, rm
));
3315 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
3317 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
3319 tcg_gen_extrl_i64_i32(tcg_tmp
, tcg_res
);
3320 tcg_gen_st_f32(tcg_tmp
, cpu_env
, vfp_reg_offset(0, rd
));
3321 tcg_temp_free_i32(tcg_tmp
);
3322 tcg_temp_free_i64(tcg_res
);
3323 tcg_temp_free_i64(tcg_double
);
3325 TCGv_i32 tcg_single
, tcg_res
;
3326 tcg_single
= tcg_temp_new_i32();
3327 tcg_res
= tcg_temp_new_i32();
3328 tcg_gen_ld_f32(tcg_single
, cpu_env
, vfp_reg_offset(0, rm
));
3330 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
3332 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
3334 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(0, rd
));
3335 tcg_temp_free_i32(tcg_res
);
3336 tcg_temp_free_i32(tcg_single
);
3339 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3340 tcg_temp_free_i32(tcg_rmode
);
3342 tcg_temp_free_i32(tcg_shift
);
3344 tcg_temp_free_ptr(fpst
);
3349 /* Table for converting the most common AArch32 encoding of
3350 * rounding mode to arm_fprounding order (which matches the
3351 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3353 static const uint8_t fp_decode_rm
[] = {
3360 static int disas_vfp_v8_insn(DisasContext
*s
, uint32_t insn
)
3362 uint32_t rd
, rn
, rm
, dp
= extract32(insn
, 8, 1);
3364 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
3369 VFP_DREG_D(rd
, insn
);
3370 VFP_DREG_N(rn
, insn
);
3371 VFP_DREG_M(rm
, insn
);
3373 rd
= VFP_SREG_D(insn
);
3374 rn
= VFP_SREG_N(insn
);
3375 rm
= VFP_SREG_M(insn
);
3378 if ((insn
& 0x0f800e50) == 0x0e000a00) {
3379 return handle_vsel(insn
, rd
, rn
, rm
, dp
);
3380 } else if ((insn
& 0x0fb00e10) == 0x0e800a00) {
3381 return handle_vminmaxnm(insn
, rd
, rn
, rm
, dp
);
3382 } else if ((insn
& 0x0fbc0ed0) == 0x0eb80a40) {
3383 /* VRINTA, VRINTN, VRINTP, VRINTM */
3384 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
3385 return handle_vrint(insn
, rd
, rm
, dp
, rounding
);
3386 } else if ((insn
& 0x0fbc0e50) == 0x0ebc0a40) {
3387 /* VCVTA, VCVTN, VCVTP, VCVTM */
3388 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
3389 return handle_vcvt(insn
, rd
, rm
, dp
, rounding
);
3394 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3395 (ie. an undefined instruction). */
3396 static int disas_vfp_insn(DisasContext
*s
, uint32_t insn
)
3398 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
3404 if (!arm_dc_feature(s
, ARM_FEATURE_VFP
)) {
3408 /* FIXME: this access check should not take precedence over UNDEF
3409 * for invalid encodings; we will generate incorrect syndrome information
3410 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3412 if (s
->fp_excp_el
) {
3413 gen_exception_insn(s
, 4, EXCP_UDEF
,
3414 syn_fp_access_trap(1, 0xe, false), s
->fp_excp_el
);
3418 if (!s
->vfp_enabled
) {
3419 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3420 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
3422 rn
= (insn
>> 16) & 0xf;
3423 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
&& rn
!= ARM_VFP_MVFR2
3424 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
) {
3429 if (extract32(insn
, 28, 4) == 0xf) {
3430 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3431 * only used in v8 and above.
3433 return disas_vfp_v8_insn(s
, insn
);
3436 dp
= ((insn
& 0xf00) == 0xb00);
3437 switch ((insn
>> 24) & 0xf) {
3439 if (insn
& (1 << 4)) {
3440 /* single register transfer */
3441 rd
= (insn
>> 12) & 0xf;
3446 VFP_DREG_N(rn
, insn
);
3449 if (insn
& 0x00c00060
3450 && !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
3454 pass
= (insn
>> 21) & 1;
3455 if (insn
& (1 << 22)) {
3457 offset
= ((insn
>> 5) & 3) * 8;
3458 } else if (insn
& (1 << 5)) {
3460 offset
= (insn
& (1 << 6)) ? 16 : 0;
3465 if (insn
& ARM_CP_RW_BIT
) {
3467 tmp
= neon_load_reg(rn
, pass
);
3471 tcg_gen_shri_i32(tmp
, tmp
, offset
);
3472 if (insn
& (1 << 23))
3478 if (insn
& (1 << 23)) {
3480 tcg_gen_shri_i32(tmp
, tmp
, 16);
3486 tcg_gen_sari_i32(tmp
, tmp
, 16);
3495 store_reg(s
, rd
, tmp
);
3498 tmp
= load_reg(s
, rd
);
3499 if (insn
& (1 << 23)) {
3501 int vec_size
= pass
? 16 : 8;
3502 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(rn
, 0),
3503 vec_size
, vec_size
, tmp
);
3504 tcg_temp_free_i32(tmp
);
3509 tmp2
= neon_load_reg(rn
, pass
);
3510 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
3511 tcg_temp_free_i32(tmp2
);
3514 tmp2
= neon_load_reg(rn
, pass
);
3515 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
3516 tcg_temp_free_i32(tmp2
);
3521 neon_store_reg(rn
, pass
, tmp
);
3525 if ((insn
& 0x6f) != 0x00)
3527 rn
= VFP_SREG_N(insn
);
3528 if (insn
& ARM_CP_RW_BIT
) {
3530 if (insn
& (1 << 21)) {
3531 /* system register */
3536 /* VFP2 allows access to FSID from userspace.
3537 VFP3 restricts all id registers to privileged
3540 && arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3543 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3548 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3550 case ARM_VFP_FPINST
:
3551 case ARM_VFP_FPINST2
:
3552 /* Not present in VFP3. */
3554 || arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3557 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3561 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
3562 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
3564 tmp
= tcg_temp_new_i32();
3565 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
3569 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
3576 || !arm_dc_feature(s
, ARM_FEATURE_MVFR
)) {
3579 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3585 gen_mov_F0_vreg(0, rn
);
3586 tmp
= gen_vfp_mrs();
3589 /* Set the 4 flag bits in the CPSR. */
3591 tcg_temp_free_i32(tmp
);
3593 store_reg(s
, rd
, tmp
);
3597 if (insn
& (1 << 21)) {
3599 /* system register */
3604 /* Writes are ignored. */
3607 tmp
= load_reg(s
, rd
);
3608 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
3609 tcg_temp_free_i32(tmp
);
3615 /* TODO: VFP subarchitecture support.
3616 * For now, keep the EN bit only */
3617 tmp
= load_reg(s
, rd
);
3618 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
3619 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3622 case ARM_VFP_FPINST
:
3623 case ARM_VFP_FPINST2
:
3627 tmp
= load_reg(s
, rd
);
3628 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3634 tmp
= load_reg(s
, rd
);
3636 gen_mov_vreg_F0(0, rn
);
3641 /* data processing */
3642 /* The opcode is in bits 23, 21, 20 and 6. */
3643 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
3647 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
3649 /* rn is register number */
3650 VFP_DREG_N(rn
, insn
);
3653 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18) ||
3654 ((rn
& 0x1e) == 0x6))) {
3655 /* Integer or single/half precision destination. */
3656 rd
= VFP_SREG_D(insn
);
3658 VFP_DREG_D(rd
, insn
);
3661 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14) ||
3662 ((rn
& 0x1e) == 0x4))) {
3663 /* VCVT from int or half precision is always from S reg
3664 * regardless of dp bit. VCVT with immediate frac_bits
3665 * has same format as SREG_M.
3667 rm
= VFP_SREG_M(insn
);
3669 VFP_DREG_M(rm
, insn
);
3672 rn
= VFP_SREG_N(insn
);
3673 if (op
== 15 && rn
== 15) {
3674 /* Double precision destination. */
3675 VFP_DREG_D(rd
, insn
);
3677 rd
= VFP_SREG_D(insn
);
3679 /* NB that we implicitly rely on the encoding for the frac_bits
3680 * in VCVT of fixed to float being the same as that of an SREG_M
3682 rm
= VFP_SREG_M(insn
);
3685 veclen
= s
->vec_len
;
3686 if (op
== 15 && rn
> 3)
3689 /* Shut up compiler warnings. */
3700 /* Figure out what type of vector operation this is. */
3701 if ((rd
& bank_mask
) == 0) {
3706 delta_d
= (s
->vec_stride
>> 1) + 1;
3708 delta_d
= s
->vec_stride
+ 1;
3710 if ((rm
& bank_mask
) == 0) {
3711 /* mixed scalar/vector */
3720 /* Load the initial operands. */
3725 /* Integer source */
3726 gen_mov_F0_vreg(0, rm
);
3731 gen_mov_F0_vreg(dp
, rd
);
3732 gen_mov_F1_vreg(dp
, rm
);
3736 /* Compare with zero */
3737 gen_mov_F0_vreg(dp
, rd
);
3748 /* Source and destination the same. */
3749 gen_mov_F0_vreg(dp
, rd
);
3755 /* VCVTB, VCVTT: only present with the halfprec extension
3756 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3757 * (we choose to UNDEF)
3759 if ((dp
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) ||
3760 !arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
)) {
3763 if (!extract32(rn
, 1, 1)) {
3764 /* Half precision source. */
3765 gen_mov_F0_vreg(0, rm
);
3768 /* Otherwise fall through */
3770 /* One source operand. */
3771 gen_mov_F0_vreg(dp
, rm
);
3775 /* Two source operands. */
3776 gen_mov_F0_vreg(dp
, rn
);
3777 gen_mov_F1_vreg(dp
, rm
);
3781 /* Perform the calculation. */
3783 case 0: /* VMLA: fd + (fn * fm) */
3784 /* Note that order of inputs to the add matters for NaNs */
3786 gen_mov_F0_vreg(dp
, rd
);
3789 case 1: /* VMLS: fd + -(fn * fm) */
3792 gen_mov_F0_vreg(dp
, rd
);
3795 case 2: /* VNMLS: -fd + (fn * fm) */
3796 /* Note that it isn't valid to replace (-A + B) with (B - A)
3797 * or similar plausible looking simplifications
3798 * because this will give wrong results for NaNs.
3801 gen_mov_F0_vreg(dp
, rd
);
3805 case 3: /* VNMLA: -fd + -(fn * fm) */
3808 gen_mov_F0_vreg(dp
, rd
);
3812 case 4: /* mul: fn * fm */
3815 case 5: /* nmul: -(fn * fm) */
3819 case 6: /* add: fn + fm */
3822 case 7: /* sub: fn - fm */
3825 case 8: /* div: fn / fm */
3828 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3829 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3830 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3831 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3832 /* These are fused multiply-add, and must be done as one
3833 * floating point operation with no rounding between the
3834 * multiplication and addition steps.
3835 * NB that doing the negations here as separate steps is
3836 * correct : an input NaN should come out with its sign bit
3837 * flipped if it is a negated-input.
3839 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
)) {
3847 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
3849 frd
= tcg_temp_new_i64();
3850 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3853 gen_helper_vfp_negd(frd
, frd
);
3855 fpst
= get_fpstatus_ptr(0);
3856 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
3857 cpu_F1d
, frd
, fpst
);
3858 tcg_temp_free_ptr(fpst
);
3859 tcg_temp_free_i64(frd
);
3865 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
3867 frd
= tcg_temp_new_i32();
3868 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3870 gen_helper_vfp_negs(frd
, frd
);
3872 fpst
= get_fpstatus_ptr(0);
3873 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
3874 cpu_F1s
, frd
, fpst
);
3875 tcg_temp_free_ptr(fpst
);
3876 tcg_temp_free_i32(frd
);
3879 case 14: /* fconst */
3880 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3884 n
= (insn
<< 12) & 0x80000000;
3885 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3892 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3899 tcg_gen_movi_i32(cpu_F0s
, n
);
3902 case 15: /* extension space */
3916 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3918 TCGv_ptr fpst
= get_fpstatus_ptr(false);
3919 TCGv_i32 ahp_mode
= get_ahp_flag();
3920 tmp
= gen_vfp_mrs();
3921 tcg_gen_ext16u_i32(tmp
, tmp
);
3923 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3926 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3929 tcg_temp_free_i32(ahp_mode
);
3930 tcg_temp_free_ptr(fpst
);
3931 tcg_temp_free_i32(tmp
);
3934 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3936 TCGv_ptr fpst
= get_fpstatus_ptr(false);
3937 TCGv_i32 ahp
= get_ahp_flag();
3938 tmp
= gen_vfp_mrs();
3939 tcg_gen_shri_i32(tmp
, tmp
, 16);
3941 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3944 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3947 tcg_temp_free_i32(tmp
);
3948 tcg_temp_free_i32(ahp
);
3949 tcg_temp_free_ptr(fpst
);
3952 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3954 TCGv_ptr fpst
= get_fpstatus_ptr(false);
3955 TCGv_i32 ahp
= get_ahp_flag();
3956 tmp
= tcg_temp_new_i32();
3959 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3962 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3965 tcg_temp_free_i32(ahp
);
3966 tcg_temp_free_ptr(fpst
);
3967 gen_mov_F0_vreg(0, rd
);
3968 tmp2
= gen_vfp_mrs();
3969 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3970 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3971 tcg_temp_free_i32(tmp2
);
3975 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3977 TCGv_ptr fpst
= get_fpstatus_ptr(false);
3978 TCGv_i32 ahp
= get_ahp_flag();
3979 tmp
= tcg_temp_new_i32();
3981 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3984 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3987 tcg_temp_free_i32(ahp
);
3988 tcg_temp_free_ptr(fpst
);
3989 tcg_gen_shli_i32(tmp
, tmp
, 16);
3990 gen_mov_F0_vreg(0, rd
);
3991 tmp2
= gen_vfp_mrs();
3992 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3993 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3994 tcg_temp_free_i32(tmp2
);
4007 case 11: /* cmpez */
4011 case 12: /* vrintr */
4013 TCGv_ptr fpst
= get_fpstatus_ptr(0);
4015 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
4017 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
4019 tcg_temp_free_ptr(fpst
);
4022 case 13: /* vrintz */
4024 TCGv_ptr fpst
= get_fpstatus_ptr(0);
4026 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
4027 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
4029 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
4031 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
4033 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
4034 tcg_temp_free_i32(tcg_rmode
);
4035 tcg_temp_free_ptr(fpst
);
4038 case 14: /* vrintx */
4040 TCGv_ptr fpst
= get_fpstatus_ptr(0);
4042 gen_helper_rintd_exact(cpu_F0d
, cpu_F0d
, fpst
);
4044 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpst
);
4046 tcg_temp_free_ptr(fpst
);
4049 case 15: /* single<->double conversion */
4051 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
4053 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
4055 case 16: /* fuito */
4056 gen_vfp_uito(dp
, 0);
4058 case 17: /* fsito */
4059 gen_vfp_sito(dp
, 0);
4061 case 20: /* fshto */
4062 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
4065 gen_vfp_shto(dp
, 16 - rm
, 0);
4067 case 21: /* fslto */
4068 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
4071 gen_vfp_slto(dp
, 32 - rm
, 0);
4073 case 22: /* fuhto */
4074 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
4077 gen_vfp_uhto(dp
, 16 - rm
, 0);
4079 case 23: /* fulto */
4080 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
4083 gen_vfp_ulto(dp
, 32 - rm
, 0);
4085 case 24: /* ftoui */
4086 gen_vfp_toui(dp
, 0);
4088 case 25: /* ftouiz */
4089 gen_vfp_touiz(dp
, 0);
4091 case 26: /* ftosi */
4092 gen_vfp_tosi(dp
, 0);
4094 case 27: /* ftosiz */
4095 gen_vfp_tosiz(dp
, 0);
4097 case 28: /* ftosh */
4098 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
4101 gen_vfp_tosh(dp
, 16 - rm
, 0);
4103 case 29: /* ftosl */
4104 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
4107 gen_vfp_tosl(dp
, 32 - rm
, 0);
4109 case 30: /* ftouh */
4110 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
4113 gen_vfp_touh(dp
, 16 - rm
, 0);
4115 case 31: /* ftoul */
4116 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
4119 gen_vfp_toul(dp
, 32 - rm
, 0);
4121 default: /* undefined */
4125 default: /* undefined */
4129 /* Write back the result. */
4130 if (op
== 15 && (rn
>= 8 && rn
<= 11)) {
4131 /* Comparison, do nothing. */
4132 } else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18 ||
4133 (rn
& 0x1e) == 0x6)) {
4134 /* VCVT double to int: always integer result.
4135 * VCVT double to half precision is always a single
4138 gen_mov_vreg_F0(0, rd
);
4139 } else if (op
== 15 && rn
== 15) {
4141 gen_mov_vreg_F0(!dp
, rd
);
4143 gen_mov_vreg_F0(dp
, rd
);
4146 /* break out of the loop if we have finished */
4150 if (op
== 15 && delta_m
== 0) {
4151 /* single source one-many */
4153 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
4155 gen_mov_vreg_F0(dp
, rd
);
4159 /* Setup the next operands. */
4161 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
4165 /* One source operand. */
4166 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
4168 gen_mov_F0_vreg(dp
, rm
);
4170 /* Two source operands. */
4171 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
4173 gen_mov_F0_vreg(dp
, rn
);
4175 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
4177 gen_mov_F1_vreg(dp
, rm
);
4185 if ((insn
& 0x03e00000) == 0x00400000) {
4186 /* two-register transfer */
4187 rn
= (insn
>> 16) & 0xf;
4188 rd
= (insn
>> 12) & 0xf;
4190 VFP_DREG_M(rm
, insn
);
4192 rm
= VFP_SREG_M(insn
);
4195 if (insn
& ARM_CP_RW_BIT
) {
4198 gen_mov_F0_vreg(0, rm
* 2);
4199 tmp
= gen_vfp_mrs();
4200 store_reg(s
, rd
, tmp
);
4201 gen_mov_F0_vreg(0, rm
* 2 + 1);
4202 tmp
= gen_vfp_mrs();
4203 store_reg(s
, rn
, tmp
);
4205 gen_mov_F0_vreg(0, rm
);
4206 tmp
= gen_vfp_mrs();
4207 store_reg(s
, rd
, tmp
);
4208 gen_mov_F0_vreg(0, rm
+ 1);
4209 tmp
= gen_vfp_mrs();
4210 store_reg(s
, rn
, tmp
);
4215 tmp
= load_reg(s
, rd
);
4217 gen_mov_vreg_F0(0, rm
* 2);
4218 tmp
= load_reg(s
, rn
);
4220 gen_mov_vreg_F0(0, rm
* 2 + 1);
4222 tmp
= load_reg(s
, rd
);
4224 gen_mov_vreg_F0(0, rm
);
4225 tmp
= load_reg(s
, rn
);
4227 gen_mov_vreg_F0(0, rm
+ 1);
4232 rn
= (insn
>> 16) & 0xf;
4234 VFP_DREG_D(rd
, insn
);
4236 rd
= VFP_SREG_D(insn
);
4237 if ((insn
& 0x01200000) == 0x01000000) {
4238 /* Single load/store */
4239 offset
= (insn
& 0xff) << 2;
4240 if ((insn
& (1 << 23)) == 0)
4242 if (s
->thumb
&& rn
== 15) {
4243 /* This is actually UNPREDICTABLE */
4244 addr
= tcg_temp_new_i32();
4245 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
4247 addr
= load_reg(s
, rn
);
4249 tcg_gen_addi_i32(addr
, addr
, offset
);
4250 if (insn
& (1 << 20)) {
4251 gen_vfp_ld(s
, dp
, addr
);
4252 gen_mov_vreg_F0(dp
, rd
);
4254 gen_mov_F0_vreg(dp
, rd
);
4255 gen_vfp_st(s
, dp
, addr
);
4257 tcg_temp_free_i32(addr
);
4259 /* load/store multiple */
4260 int w
= insn
& (1 << 21);
4262 n
= (insn
>> 1) & 0x7f;
4266 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
4267 /* P == U , W == 1 => UNDEF */
4270 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
4271 /* UNPREDICTABLE cases for bad immediates: we choose to
4272 * UNDEF to avoid generating huge numbers of TCG ops
4276 if (rn
== 15 && w
) {
4277 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4281 if (s
->thumb
&& rn
== 15) {
4282 /* This is actually UNPREDICTABLE */
4283 addr
= tcg_temp_new_i32();
4284 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
4286 addr
= load_reg(s
, rn
);
4288 if (insn
& (1 << 24)) /* pre-decrement */
4289 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
4291 if (s
->v8m_stackcheck
&& rn
== 13 && w
) {
4293 * Here 'addr' is the lowest address we will store to,
4294 * and is either the old SP (if post-increment) or
4295 * the new SP (if pre-decrement). For post-increment
4296 * where the old value is below the limit and the new
4297 * value is above, it is UNKNOWN whether the limit check
4298 * triggers; we choose to trigger.
4300 gen_helper_v8m_stackcheck(cpu_env
, addr
);
4307 for (i
= 0; i
< n
; i
++) {
4308 if (insn
& ARM_CP_RW_BIT
) {
4310 gen_vfp_ld(s
, dp
, addr
);
4311 gen_mov_vreg_F0(dp
, rd
+ i
);
4314 gen_mov_F0_vreg(dp
, rd
+ i
);
4315 gen_vfp_st(s
, dp
, addr
);
4317 tcg_gen_addi_i32(addr
, addr
, offset
);
4321 if (insn
& (1 << 24))
4322 offset
= -offset
* n
;
4323 else if (dp
&& (insn
& 1))
4329 tcg_gen_addi_i32(addr
, addr
, offset
);
4330 store_reg(s
, rn
, addr
);
4332 tcg_temp_free_i32(addr
);
4338 /* Should never happen. */
4344 static inline bool use_goto_tb(DisasContext
*s
, target_ulong dest
)
4346 #ifndef CONFIG_USER_ONLY
4347 return (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
4348 ((s
->pc
- 1) & TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
4354 static void gen_goto_ptr(void)
4356 tcg_gen_lookup_and_goto_ptr();
4359 /* This will end the TB but doesn't guarantee we'll return to
4360 * cpu_loop_exec. Any live exit_requests will be processed as we
4361 * enter the next TB.
4363 static void gen_goto_tb(DisasContext
*s
, int n
, target_ulong dest
)
4365 if (use_goto_tb(s
, dest
)) {
4367 gen_set_pc_im(s
, dest
);
4368 tcg_gen_exit_tb(s
->base
.tb
, n
);
4370 gen_set_pc_im(s
, dest
);
4373 s
->base
.is_jmp
= DISAS_NORETURN
;
4376 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
4378 if (unlikely(is_singlestepping(s
))) {
4379 /* An indirect jump so that we still trigger the debug exception. */
4384 gen_goto_tb(s
, 0, dest
);
4388 static inline void gen_mulxy(TCGv_i32 t0
, TCGv_i32 t1
, int x
, int y
)
4391 tcg_gen_sari_i32(t0
, t0
, 16);
4395 tcg_gen_sari_i32(t1
, t1
, 16);
4398 tcg_gen_mul_i32(t0
, t0
, t1
);
4401 /* Return the mask of PSR bits set by a MSR instruction. */
4402 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
)
4407 if (flags
& (1 << 0))
4409 if (flags
& (1 << 1))
4411 if (flags
& (1 << 2))
4413 if (flags
& (1 << 3))
4416 /* Mask out undefined bits. */
4417 mask
&= ~CPSR_RESERVED
;
4418 if (!arm_dc_feature(s
, ARM_FEATURE_V4T
)) {
4421 if (!arm_dc_feature(s
, ARM_FEATURE_V5
)) {
4422 mask
&= ~CPSR_Q
; /* V5TE in reality*/
4424 if (!arm_dc_feature(s
, ARM_FEATURE_V6
)) {
4425 mask
&= ~(CPSR_E
| CPSR_GE
);
4427 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB2
)) {
4430 /* Mask out execution state and reserved bits. */
4432 mask
&= ~(CPSR_EXEC
| CPSR_RESERVED
);
4434 /* Mask out privileged bits. */
4440 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4441 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv_i32 t0
)
4445 /* ??? This is also undefined in system mode. */
4449 tmp
= load_cpu_field(spsr
);
4450 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
4451 tcg_gen_andi_i32(t0
, t0
, mask
);
4452 tcg_gen_or_i32(tmp
, tmp
, t0
);
4453 store_cpu_field(tmp
, spsr
);
4455 gen_set_cpsr(t0
, mask
);
4457 tcg_temp_free_i32(t0
);
4462 /* Returns nonzero if access to the PSR is not permitted. */
4463 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
4466 tmp
= tcg_temp_new_i32();
4467 tcg_gen_movi_i32(tmp
, val
);
4468 return gen_set_psr(s
, mask
, spsr
, tmp
);
4471 static bool msr_banked_access_decode(DisasContext
*s
, int r
, int sysm
, int rn
,
4472 int *tgtmode
, int *regno
)
4474 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4475 * the target mode and register number, and identify the various
4476 * unpredictable cases.
4477 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4478 * + executed in user mode
4479 * + using R15 as the src/dest register
4480 * + accessing an unimplemented register
4481 * + accessing a register that's inaccessible at current PL/security state*
4482 * + accessing a register that you could access with a different insn
4483 * We choose to UNDEF in all these cases.
4484 * Since we don't know which of the various AArch32 modes we are in
4485 * we have to defer some checks to runtime.
4486 * Accesses to Monitor mode registers from Secure EL1 (which implies
4487 * that EL3 is AArch64) must trap to EL3.
4489 * If the access checks fail this function will emit code to take
4490 * an exception and return false. Otherwise it will return true,
4491 * and set *tgtmode and *regno appropriately.
4493 int exc_target
= default_exception_el(s
);
4495 /* These instructions are present only in ARMv8, or in ARMv7 with the
4496 * Virtualization Extensions.
4498 if (!arm_dc_feature(s
, ARM_FEATURE_V8
) &&
4499 !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
4503 if (IS_USER(s
) || rn
== 15) {
4507 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4508 * of registers into (r, sysm).
4511 /* SPSRs for other modes */
4513 case 0xe: /* SPSR_fiq */
4514 *tgtmode
= ARM_CPU_MODE_FIQ
;
4516 case 0x10: /* SPSR_irq */
4517 *tgtmode
= ARM_CPU_MODE_IRQ
;
4519 case 0x12: /* SPSR_svc */
4520 *tgtmode
= ARM_CPU_MODE_SVC
;
4522 case 0x14: /* SPSR_abt */
4523 *tgtmode
= ARM_CPU_MODE_ABT
;
4525 case 0x16: /* SPSR_und */
4526 *tgtmode
= ARM_CPU_MODE_UND
;
4528 case 0x1c: /* SPSR_mon */
4529 *tgtmode
= ARM_CPU_MODE_MON
;
4531 case 0x1e: /* SPSR_hyp */
4532 *tgtmode
= ARM_CPU_MODE_HYP
;
4534 default: /* unallocated */
4537 /* We arbitrarily assign SPSR a register number of 16. */
4540 /* general purpose registers for other modes */
4542 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4543 *tgtmode
= ARM_CPU_MODE_USR
;
4546 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4547 *tgtmode
= ARM_CPU_MODE_FIQ
;
4550 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4551 *tgtmode
= ARM_CPU_MODE_IRQ
;
4552 *regno
= sysm
& 1 ? 13 : 14;
4554 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4555 *tgtmode
= ARM_CPU_MODE_SVC
;
4556 *regno
= sysm
& 1 ? 13 : 14;
4558 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4559 *tgtmode
= ARM_CPU_MODE_ABT
;
4560 *regno
= sysm
& 1 ? 13 : 14;
4562 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4563 *tgtmode
= ARM_CPU_MODE_UND
;
4564 *regno
= sysm
& 1 ? 13 : 14;
4566 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4567 *tgtmode
= ARM_CPU_MODE_MON
;
4568 *regno
= sysm
& 1 ? 13 : 14;
4570 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4571 *tgtmode
= ARM_CPU_MODE_HYP
;
4572 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4573 *regno
= sysm
& 1 ? 13 : 17;
4575 default: /* unallocated */
4580 /* Catch the 'accessing inaccessible register' cases we can detect
4581 * at translate time.
4584 case ARM_CPU_MODE_MON
:
4585 if (!arm_dc_feature(s
, ARM_FEATURE_EL3
) || s
->ns
) {
4588 if (s
->current_el
== 1) {
4589 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4590 * then accesses to Mon registers trap to EL3
4596 case ARM_CPU_MODE_HYP
:
4598 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
4599 * (and so we can forbid accesses from EL2 or below). elr_hyp
4600 * can be accessed also from Hyp mode, so forbid accesses from
4603 if (!arm_dc_feature(s
, ARM_FEATURE_EL2
) || s
->current_el
< 2 ||
4604 (s
->current_el
< 3 && *regno
!= 17)) {
4615 /* If we get here then some access check did not pass */
4616 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(), exc_target
);
4620 static void gen_msr_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
4622 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
4623 int tgtmode
= 0, regno
= 0;
4625 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
4629 /* Sync state because msr_banked() can raise exceptions */
4630 gen_set_condexec(s
);
4631 gen_set_pc_im(s
, s
->pc
- 4);
4632 tcg_reg
= load_reg(s
, rn
);
4633 tcg_tgtmode
= tcg_const_i32(tgtmode
);
4634 tcg_regno
= tcg_const_i32(regno
);
4635 gen_helper_msr_banked(cpu_env
, tcg_reg
, tcg_tgtmode
, tcg_regno
);
4636 tcg_temp_free_i32(tcg_tgtmode
);
4637 tcg_temp_free_i32(tcg_regno
);
4638 tcg_temp_free_i32(tcg_reg
);
4639 s
->base
.is_jmp
= DISAS_UPDATE
;
4642 static void gen_mrs_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
4644 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
4645 int tgtmode
= 0, regno
= 0;
4647 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
4651 /* Sync state because mrs_banked() can raise exceptions */
4652 gen_set_condexec(s
);
4653 gen_set_pc_im(s
, s
->pc
- 4);
4654 tcg_reg
= tcg_temp_new_i32();
4655 tcg_tgtmode
= tcg_const_i32(tgtmode
);
4656 tcg_regno
= tcg_const_i32(regno
);
4657 gen_helper_mrs_banked(tcg_reg
, cpu_env
, tcg_tgtmode
, tcg_regno
);
4658 tcg_temp_free_i32(tcg_tgtmode
);
4659 tcg_temp_free_i32(tcg_regno
);
4660 store_reg(s
, rn
, tcg_reg
);
4661 s
->base
.is_jmp
= DISAS_UPDATE
;
4664 /* Store value to PC as for an exception return (ie don't
4665 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4666 * will do the masking based on the new value of the Thumb bit.
4668 static void store_pc_exc_ret(DisasContext
*s
, TCGv_i32 pc
)
4670 tcg_gen_mov_i32(cpu_R
[15], pc
);
4671 tcg_temp_free_i32(pc
);
4674 /* Generate a v6 exception return. Marks both values as dead. */
4675 static void gen_rfe(DisasContext
*s
, TCGv_i32 pc
, TCGv_i32 cpsr
)
4677 store_pc_exc_ret(s
, pc
);
4678 /* The cpsr_write_eret helper will mask the low bits of PC
4679 * appropriately depending on the new Thumb bit, so it must
4680 * be called after storing the new PC.
4682 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
4685 gen_helper_cpsr_write_eret(cpu_env
, cpsr
);
4686 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
4689 tcg_temp_free_i32(cpsr
);
4690 /* Must exit loop to check un-masked IRQs */
4691 s
->base
.is_jmp
= DISAS_EXIT
;
4694 /* Generate an old-style exception return. Marks pc as dead. */
4695 static void gen_exception_return(DisasContext
*s
, TCGv_i32 pc
)
4697 gen_rfe(s
, pc
, load_cpu_field(spsr
));
4701 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4702 * only call the helper when running single threaded TCG code to ensure
4703 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4704 * just skip this instruction. Currently the SEV/SEVL instructions
4705 * which are *one* of many ways to wake the CPU from WFE are not
4706 * implemented so we can't sleep like WFI does.
4708 static void gen_nop_hint(DisasContext
*s
, int val
)
4711 /* When running in MTTCG we don't generate jumps to the yield and
4712 * WFE helpers as it won't affect the scheduling of other vCPUs.
4713 * If we wanted to more completely model WFE/SEV so we don't busy
4714 * spin unnecessarily we would need to do something more involved.
4717 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
4718 gen_set_pc_im(s
, s
->pc
);
4719 s
->base
.is_jmp
= DISAS_YIELD
;
4723 gen_set_pc_im(s
, s
->pc
);
4724 s
->base
.is_jmp
= DISAS_WFI
;
4727 if (!(tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
4728 gen_set_pc_im(s
, s
->pc
);
4729 s
->base
.is_jmp
= DISAS_WFE
;
4734 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4740 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4742 static inline void gen_neon_add(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
4745 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
4746 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
4747 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
4752 static inline void gen_neon_rsb(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
4755 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
4756 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
4757 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
4762 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4763 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
4764 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
4765 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
4766 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
4768 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4769 switch ((size << 1) | u) { \
4771 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4774 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4777 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4780 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4783 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4786 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4788 default: return 1; \
4791 #define GEN_NEON_INTEGER_OP(name) do { \
4792 switch ((size << 1) | u) { \
4794 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4797 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4800 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4803 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4806 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4809 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4811 default: return 1; \
4814 static TCGv_i32
neon_load_scratch(int scratch
)
4816 TCGv_i32 tmp
= tcg_temp_new_i32();
4817 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4821 static void neon_store_scratch(int scratch
, TCGv_i32 var
)
4823 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4824 tcg_temp_free_i32(var
);
4827 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
4831 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
4833 gen_neon_dup_high16(tmp
);
4835 gen_neon_dup_low16(tmp
);
4838 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
4843 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
4847 if (!q
&& size
== 2) {
4850 pd
= vfp_reg_ptr(true, rd
);
4851 pm
= vfp_reg_ptr(true, rm
);
4855 gen_helper_neon_qunzip8(pd
, pm
);
4858 gen_helper_neon_qunzip16(pd
, pm
);
4861 gen_helper_neon_qunzip32(pd
, pm
);
4869 gen_helper_neon_unzip8(pd
, pm
);
4872 gen_helper_neon_unzip16(pd
, pm
);
4878 tcg_temp_free_ptr(pd
);
4879 tcg_temp_free_ptr(pm
);
4883 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
4887 if (!q
&& size
== 2) {
4890 pd
= vfp_reg_ptr(true, rd
);
4891 pm
= vfp_reg_ptr(true, rm
);
4895 gen_helper_neon_qzip8(pd
, pm
);
4898 gen_helper_neon_qzip16(pd
, pm
);
4901 gen_helper_neon_qzip32(pd
, pm
);
4909 gen_helper_neon_zip8(pd
, pm
);
4912 gen_helper_neon_zip16(pd
, pm
);
4918 tcg_temp_free_ptr(pd
);
4919 tcg_temp_free_ptr(pm
);
4923 static void gen_neon_trn_u8(TCGv_i32 t0
, TCGv_i32 t1
)
4927 rd
= tcg_temp_new_i32();
4928 tmp
= tcg_temp_new_i32();
4930 tcg_gen_shli_i32(rd
, t0
, 8);
4931 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
4932 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
4933 tcg_gen_or_i32(rd
, rd
, tmp
);
4935 tcg_gen_shri_i32(t1
, t1
, 8);
4936 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
4937 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
4938 tcg_gen_or_i32(t1
, t1
, tmp
);
4939 tcg_gen_mov_i32(t0
, rd
);
4941 tcg_temp_free_i32(tmp
);
4942 tcg_temp_free_i32(rd
);
4945 static void gen_neon_trn_u16(TCGv_i32 t0
, TCGv_i32 t1
)
4949 rd
= tcg_temp_new_i32();
4950 tmp
= tcg_temp_new_i32();
4952 tcg_gen_shli_i32(rd
, t0
, 16);
4953 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
4954 tcg_gen_or_i32(rd
, rd
, tmp
);
4955 tcg_gen_shri_i32(t1
, t1
, 16);
4956 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
4957 tcg_gen_or_i32(t1
, t1
, tmp
);
4958 tcg_gen_mov_i32(t0
, rd
);
4960 tcg_temp_free_i32(tmp
);
4961 tcg_temp_free_i32(rd
);
4969 } const neon_ls_element_type
[11] = {
4983 /* Translate a NEON load/store element instruction. Return nonzero if the
4984 instruction is invalid. */
4985 static int disas_neon_ls_insn(DisasContext
*s
, uint32_t insn
)
5005 /* FIXME: this access check should not take precedence over UNDEF
5006 * for invalid encodings; we will generate incorrect syndrome information
5007 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5009 if (s
->fp_excp_el
) {
5010 gen_exception_insn(s
, 4, EXCP_UDEF
,
5011 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
5015 if (!s
->vfp_enabled
)
5017 VFP_DREG_D(rd
, insn
);
5018 rn
= (insn
>> 16) & 0xf;
5020 load
= (insn
& (1 << 21)) != 0;
5021 endian
= s
->be_data
;
5022 mmu_idx
= get_mem_index(s
);
5023 if ((insn
& (1 << 23)) == 0) {
5024 /* Load store all elements. */
5025 op
= (insn
>> 8) & 0xf;
5026 size
= (insn
>> 6) & 3;
5029 /* Catch UNDEF cases for bad values of align field */
5032 if (((insn
>> 5) & 1) == 1) {
5037 if (((insn
>> 4) & 3) == 3) {
5044 nregs
= neon_ls_element_type
[op
].nregs
;
5045 interleave
= neon_ls_element_type
[op
].interleave
;
5046 spacing
= neon_ls_element_type
[op
].spacing
;
5047 if (size
== 3 && (interleave
| spacing
) != 1) {
5050 /* For our purposes, bytes are always little-endian. */
5054 /* Consecutive little-endian elements from a single register
5055 * can be promoted to a larger little-endian operation.
5057 if (interleave
== 1 && endian
== MO_LE
) {
5060 tmp64
= tcg_temp_new_i64();
5061 addr
= tcg_temp_new_i32();
5062 tmp2
= tcg_const_i32(1 << size
);
5063 load_reg_var(s
, addr
, rn
);
5064 for (reg
= 0; reg
< nregs
; reg
++) {
5065 for (n
= 0; n
< 8 >> size
; n
++) {
5067 for (xs
= 0; xs
< interleave
; xs
++) {
5068 int tt
= rd
+ reg
+ spacing
* xs
;
5071 gen_aa32_ld_i64(s
, tmp64
, addr
, mmu_idx
, endian
| size
);
5072 neon_store_element64(tt
, n
, size
, tmp64
);
5074 neon_load_element64(tmp64
, tt
, n
, size
);
5075 gen_aa32_st_i64(s
, tmp64
, addr
, mmu_idx
, endian
| size
);
5077 tcg_gen_add_i32(addr
, addr
, tmp2
);
5081 tcg_temp_free_i32(addr
);
5082 tcg_temp_free_i32(tmp2
);
5083 tcg_temp_free_i64(tmp64
);
5084 stride
= nregs
* interleave
* 8;
5086 size
= (insn
>> 10) & 3;
5088 /* Load single element to all lanes. */
5089 int a
= (insn
>> 4) & 1;
5093 size
= (insn
>> 6) & 3;
5094 nregs
= ((insn
>> 8) & 3) + 1;
5097 if (nregs
!= 4 || a
== 0) {
5100 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5103 if (nregs
== 1 && a
== 1 && size
== 0) {
5106 if (nregs
== 3 && a
== 1) {
5109 addr
= tcg_temp_new_i32();
5110 load_reg_var(s
, addr
, rn
);
5112 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
5113 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
5115 stride
= (insn
& (1 << 5)) ? 2 : 1;
5116 vec_size
= nregs
== 1 ? stride
* 8 : 8;
5118 tmp
= tcg_temp_new_i32();
5119 for (reg
= 0; reg
< nregs
; reg
++) {
5120 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
),
5122 if ((rd
& 1) && vec_size
== 16) {
5123 /* We cannot write 16 bytes at once because the
5124 * destination is unaligned.
5126 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(rd
, 0),
5128 tcg_gen_gvec_mov(0, neon_reg_offset(rd
+ 1, 0),
5129 neon_reg_offset(rd
, 0), 8, 8);
5131 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(rd
, 0),
5132 vec_size
, vec_size
, tmp
);
5134 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
5137 tcg_temp_free_i32(tmp
);
5138 tcg_temp_free_i32(addr
);
5139 stride
= (1 << size
) * nregs
;
5141 /* Single element. */
5142 int idx
= (insn
>> 4) & 0xf;
5146 reg_idx
= (insn
>> 5) & 7;
5150 reg_idx
= (insn
>> 6) & 3;
5151 stride
= (insn
& (1 << 5)) ? 2 : 1;
5154 reg_idx
= (insn
>> 7) & 1;
5155 stride
= (insn
& (1 << 6)) ? 2 : 1;
5160 nregs
= ((insn
>> 8) & 3) + 1;
5161 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5164 if (((idx
& (1 << size
)) != 0) ||
5165 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
5170 if ((idx
& 1) != 0) {
5175 if (size
== 2 && (idx
& 2) != 0) {
5180 if ((size
== 2) && ((idx
& 3) == 3)) {
5187 if ((rd
+ stride
* (nregs
- 1)) > 31) {
5188 /* Attempts to write off the end of the register file
5189 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5190 * the neon_load_reg() would write off the end of the array.
5194 tmp
= tcg_temp_new_i32();
5195 addr
= tcg_temp_new_i32();
5196 load_reg_var(s
, addr
, rn
);
5197 for (reg
= 0; reg
< nregs
; reg
++) {
5199 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
),
5201 neon_store_element(rd
, reg_idx
, size
, tmp
);
5202 } else { /* Store */
5203 neon_load_element(tmp
, rd
, reg_idx
, size
);
5204 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
),
5208 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
5210 tcg_temp_free_i32(addr
);
5211 tcg_temp_free_i32(tmp
);
5212 stride
= nregs
* (1 << size
);
5218 base
= load_reg(s
, rn
);
5220 tcg_gen_addi_i32(base
, base
, stride
);
5223 index
= load_reg(s
, rm
);
5224 tcg_gen_add_i32(base
, base
, index
);
5225 tcg_temp_free_i32(index
);
5227 store_reg(s
, rn
, base
);
5232 static inline void gen_neon_narrow(int size
, TCGv_i32 dest
, TCGv_i64 src
)
5235 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
5236 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
5237 case 2: tcg_gen_extrl_i64_i32(dest
, src
); break;
5242 static inline void gen_neon_narrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
5245 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
5246 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
5247 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
5252 static inline void gen_neon_narrow_satu(int size
, TCGv_i32 dest
, TCGv_i64 src
)
5255 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
5256 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
5257 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
5262 static inline void gen_neon_unarrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
5265 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
5266 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
5267 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
5272 static inline void gen_neon_shift_narrow(int size
, TCGv_i32 var
, TCGv_i32 shift
,
5278 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
5279 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
5284 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
5285 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
5292 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
5293 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
5298 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
5299 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
5306 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv_i32 src
, int size
, int u
)
5310 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
5311 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
5312 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
5317 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
5318 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
5319 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
5323 tcg_temp_free_i32(src
);
5326 static inline void gen_neon_addl(int size
)
5329 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
5330 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
5331 case 2: tcg_gen_add_i64(CPU_V001
); break;
5336 static inline void gen_neon_subl(int size
)
5339 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
5340 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
5341 case 2: tcg_gen_sub_i64(CPU_V001
); break;
5346 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
5349 case 0: gen_helper_neon_negl_u16(var
, var
); break;
5350 case 1: gen_helper_neon_negl_u32(var
, var
); break;
5352 tcg_gen_neg_i64(var
, var
);
5358 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
5361 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
5362 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
5367 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv_i32 a
, TCGv_i32 b
,
5372 switch ((size
<< 1) | u
) {
5373 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
5374 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
5375 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
5376 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
5378 tmp
= gen_muls_i64_i32(a
, b
);
5379 tcg_gen_mov_i64(dest
, tmp
);
5380 tcg_temp_free_i64(tmp
);
5383 tmp
= gen_mulu_i64_i32(a
, b
);
5384 tcg_gen_mov_i64(dest
, tmp
);
5385 tcg_temp_free_i64(tmp
);
5390 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5391 Don't forget to clean them now. */
5393 tcg_temp_free_i32(a
);
5394 tcg_temp_free_i32(b
);
5398 static void gen_neon_narrow_op(int op
, int u
, int size
,
5399 TCGv_i32 dest
, TCGv_i64 src
)
5403 gen_neon_unarrow_sats(size
, dest
, src
);
5405 gen_neon_narrow(size
, dest
, src
);
5409 gen_neon_narrow_satu(size
, dest
, src
);
5411 gen_neon_narrow_sats(size
, dest
, src
);
5416 /* Symbolic constants for op fields for Neon 3-register same-length.
5417 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5420 #define NEON_3R_VHADD 0
5421 #define NEON_3R_VQADD 1
5422 #define NEON_3R_VRHADD 2
5423 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5424 #define NEON_3R_VHSUB 4
5425 #define NEON_3R_VQSUB 5
5426 #define NEON_3R_VCGT 6
5427 #define NEON_3R_VCGE 7
5428 #define NEON_3R_VSHL 8
5429 #define NEON_3R_VQSHL 9
5430 #define NEON_3R_VRSHL 10
5431 #define NEON_3R_VQRSHL 11
5432 #define NEON_3R_VMAX 12
5433 #define NEON_3R_VMIN 13
5434 #define NEON_3R_VABD 14
5435 #define NEON_3R_VABA 15
5436 #define NEON_3R_VADD_VSUB 16
5437 #define NEON_3R_VTST_VCEQ 17
5438 #define NEON_3R_VML 18 /* VMLA, VMLS */
5439 #define NEON_3R_VMUL 19
5440 #define NEON_3R_VPMAX 20
5441 #define NEON_3R_VPMIN 21
5442 #define NEON_3R_VQDMULH_VQRDMULH 22
5443 #define NEON_3R_VPADD_VQRDMLAH 23
5444 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
5445 #define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
5446 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5447 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5448 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5449 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5450 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
5451 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
5453 static const uint8_t neon_3r_sizes
[] = {
5454 [NEON_3R_VHADD
] = 0x7,
5455 [NEON_3R_VQADD
] = 0xf,
5456 [NEON_3R_VRHADD
] = 0x7,
5457 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
5458 [NEON_3R_VHSUB
] = 0x7,
5459 [NEON_3R_VQSUB
] = 0xf,
5460 [NEON_3R_VCGT
] = 0x7,
5461 [NEON_3R_VCGE
] = 0x7,
5462 [NEON_3R_VSHL
] = 0xf,
5463 [NEON_3R_VQSHL
] = 0xf,
5464 [NEON_3R_VRSHL
] = 0xf,
5465 [NEON_3R_VQRSHL
] = 0xf,
5466 [NEON_3R_VMAX
] = 0x7,
5467 [NEON_3R_VMIN
] = 0x7,
5468 [NEON_3R_VABD
] = 0x7,
5469 [NEON_3R_VABA
] = 0x7,
5470 [NEON_3R_VADD_VSUB
] = 0xf,
5471 [NEON_3R_VTST_VCEQ
] = 0x7,
5472 [NEON_3R_VML
] = 0x7,
5473 [NEON_3R_VMUL
] = 0x7,
5474 [NEON_3R_VPMAX
] = 0x7,
5475 [NEON_3R_VPMIN
] = 0x7,
5476 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
5477 [NEON_3R_VPADD_VQRDMLAH
] = 0x7,
5478 [NEON_3R_SHA
] = 0xf, /* size field encodes op type */
5479 [NEON_3R_VFM_VQRDMLSH
] = 0x7, /* For VFM, size bit 1 encodes op */
5480 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
5481 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
5482 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
5483 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
5484 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
5485 [NEON_3R_FLOAT_MISC
] = 0x5, /* size bit 1 encodes op */
5488 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
5489 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5492 #define NEON_2RM_VREV64 0
5493 #define NEON_2RM_VREV32 1
5494 #define NEON_2RM_VREV16 2
5495 #define NEON_2RM_VPADDL 4
5496 #define NEON_2RM_VPADDL_U 5
5497 #define NEON_2RM_AESE 6 /* Includes AESD */
5498 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5499 #define NEON_2RM_VCLS 8
5500 #define NEON_2RM_VCLZ 9
5501 #define NEON_2RM_VCNT 10
5502 #define NEON_2RM_VMVN 11
5503 #define NEON_2RM_VPADAL 12
5504 #define NEON_2RM_VPADAL_U 13
5505 #define NEON_2RM_VQABS 14
5506 #define NEON_2RM_VQNEG 15
5507 #define NEON_2RM_VCGT0 16
5508 #define NEON_2RM_VCGE0 17
5509 #define NEON_2RM_VCEQ0 18
5510 #define NEON_2RM_VCLE0 19
5511 #define NEON_2RM_VCLT0 20
5512 #define NEON_2RM_SHA1H 21
5513 #define NEON_2RM_VABS 22
5514 #define NEON_2RM_VNEG 23
5515 #define NEON_2RM_VCGT0_F 24
5516 #define NEON_2RM_VCGE0_F 25
5517 #define NEON_2RM_VCEQ0_F 26
5518 #define NEON_2RM_VCLE0_F 27
5519 #define NEON_2RM_VCLT0_F 28
5520 #define NEON_2RM_VABS_F 30
5521 #define NEON_2RM_VNEG_F 31
5522 #define NEON_2RM_VSWP 32
5523 #define NEON_2RM_VTRN 33
5524 #define NEON_2RM_VUZP 34
5525 #define NEON_2RM_VZIP 35
5526 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5527 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5528 #define NEON_2RM_VSHLL 38
5529 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5530 #define NEON_2RM_VRINTN 40
5531 #define NEON_2RM_VRINTX 41
5532 #define NEON_2RM_VRINTA 42
5533 #define NEON_2RM_VRINTZ 43
5534 #define NEON_2RM_VCVT_F16_F32 44
5535 #define NEON_2RM_VRINTM 45
5536 #define NEON_2RM_VCVT_F32_F16 46
5537 #define NEON_2RM_VRINTP 47
5538 #define NEON_2RM_VCVTAU 48
5539 #define NEON_2RM_VCVTAS 49
5540 #define NEON_2RM_VCVTNU 50
5541 #define NEON_2RM_VCVTNS 51
5542 #define NEON_2RM_VCVTPU 52
5543 #define NEON_2RM_VCVTPS 53
5544 #define NEON_2RM_VCVTMU 54
5545 #define NEON_2RM_VCVTMS 55
5546 #define NEON_2RM_VRECPE 56
5547 #define NEON_2RM_VRSQRTE 57
5548 #define NEON_2RM_VRECPE_F 58
5549 #define NEON_2RM_VRSQRTE_F 59
5550 #define NEON_2RM_VCVT_FS 60
5551 #define NEON_2RM_VCVT_FU 61
5552 #define NEON_2RM_VCVT_SF 62
5553 #define NEON_2RM_VCVT_UF 63
5555 static int neon_2rm_is_float_op(int op
)
5557 /* Return true if this neon 2reg-misc op is float-to-float */
5558 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
5559 (op
>= NEON_2RM_VRINTN
&& op
<= NEON_2RM_VRINTZ
) ||
5560 op
== NEON_2RM_VRINTM
||
5561 (op
>= NEON_2RM_VRINTP
&& op
<= NEON_2RM_VCVTMS
) ||
5562 op
>= NEON_2RM_VRECPE_F
);
5565 static bool neon_2rm_is_v8_op(int op
)
5567 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5569 case NEON_2RM_VRINTN
:
5570 case NEON_2RM_VRINTA
:
5571 case NEON_2RM_VRINTM
:
5572 case NEON_2RM_VRINTP
:
5573 case NEON_2RM_VRINTZ
:
5574 case NEON_2RM_VRINTX
:
5575 case NEON_2RM_VCVTAU
:
5576 case NEON_2RM_VCVTAS
:
5577 case NEON_2RM_VCVTNU
:
5578 case NEON_2RM_VCVTNS
:
5579 case NEON_2RM_VCVTPU
:
5580 case NEON_2RM_VCVTPS
:
5581 case NEON_2RM_VCVTMU
:
5582 case NEON_2RM_VCVTMS
:
5589 /* Each entry in this array has bit n set if the insn allows
5590 * size value n (otherwise it will UNDEF). Since unallocated
5591 * op values will have no bits set they always UNDEF.
5593 static const uint8_t neon_2rm_sizes
[] = {
5594 [NEON_2RM_VREV64
] = 0x7,
5595 [NEON_2RM_VREV32
] = 0x3,
5596 [NEON_2RM_VREV16
] = 0x1,
5597 [NEON_2RM_VPADDL
] = 0x7,
5598 [NEON_2RM_VPADDL_U
] = 0x7,
5599 [NEON_2RM_AESE
] = 0x1,
5600 [NEON_2RM_AESMC
] = 0x1,
5601 [NEON_2RM_VCLS
] = 0x7,
5602 [NEON_2RM_VCLZ
] = 0x7,
5603 [NEON_2RM_VCNT
] = 0x1,
5604 [NEON_2RM_VMVN
] = 0x1,
5605 [NEON_2RM_VPADAL
] = 0x7,
5606 [NEON_2RM_VPADAL_U
] = 0x7,
5607 [NEON_2RM_VQABS
] = 0x7,
5608 [NEON_2RM_VQNEG
] = 0x7,
5609 [NEON_2RM_VCGT0
] = 0x7,
5610 [NEON_2RM_VCGE0
] = 0x7,
5611 [NEON_2RM_VCEQ0
] = 0x7,
5612 [NEON_2RM_VCLE0
] = 0x7,
5613 [NEON_2RM_VCLT0
] = 0x7,
5614 [NEON_2RM_SHA1H
] = 0x4,
5615 [NEON_2RM_VABS
] = 0x7,
5616 [NEON_2RM_VNEG
] = 0x7,
5617 [NEON_2RM_VCGT0_F
] = 0x4,
5618 [NEON_2RM_VCGE0_F
] = 0x4,
5619 [NEON_2RM_VCEQ0_F
] = 0x4,
5620 [NEON_2RM_VCLE0_F
] = 0x4,
5621 [NEON_2RM_VCLT0_F
] = 0x4,
5622 [NEON_2RM_VABS_F
] = 0x4,
5623 [NEON_2RM_VNEG_F
] = 0x4,
5624 [NEON_2RM_VSWP
] = 0x1,
5625 [NEON_2RM_VTRN
] = 0x7,
5626 [NEON_2RM_VUZP
] = 0x7,
5627 [NEON_2RM_VZIP
] = 0x7,
5628 [NEON_2RM_VMOVN
] = 0x7,
5629 [NEON_2RM_VQMOVN
] = 0x7,
5630 [NEON_2RM_VSHLL
] = 0x7,
5631 [NEON_2RM_SHA1SU1
] = 0x4,
5632 [NEON_2RM_VRINTN
] = 0x4,
5633 [NEON_2RM_VRINTX
] = 0x4,
5634 [NEON_2RM_VRINTA
] = 0x4,
5635 [NEON_2RM_VRINTZ
] = 0x4,
5636 [NEON_2RM_VCVT_F16_F32
] = 0x2,
5637 [NEON_2RM_VRINTM
] = 0x4,
5638 [NEON_2RM_VCVT_F32_F16
] = 0x2,
5639 [NEON_2RM_VRINTP
] = 0x4,
5640 [NEON_2RM_VCVTAU
] = 0x4,
5641 [NEON_2RM_VCVTAS
] = 0x4,
5642 [NEON_2RM_VCVTNU
] = 0x4,
5643 [NEON_2RM_VCVTNS
] = 0x4,
5644 [NEON_2RM_VCVTPU
] = 0x4,
5645 [NEON_2RM_VCVTPS
] = 0x4,
5646 [NEON_2RM_VCVTMU
] = 0x4,
5647 [NEON_2RM_VCVTMS
] = 0x4,
5648 [NEON_2RM_VRECPE
] = 0x4,
5649 [NEON_2RM_VRSQRTE
] = 0x4,
5650 [NEON_2RM_VRECPE_F
] = 0x4,
5651 [NEON_2RM_VRSQRTE_F
] = 0x4,
5652 [NEON_2RM_VCVT_FS
] = 0x4,
5653 [NEON_2RM_VCVT_FU
] = 0x4,
5654 [NEON_2RM_VCVT_SF
] = 0x4,
5655 [NEON_2RM_VCVT_UF
] = 0x4,
5659 /* Expand v8.1 simd helper. */
5660 static int do_v81_helper(DisasContext
*s
, gen_helper_gvec_3_ptr
*fn
,
5661 int q
, int rd
, int rn
, int rm
)
5663 if (dc_isar_feature(aa32_rdm
, s
)) {
5664 int opr_sz
= (1 + q
) * 8;
5665 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd
),
5666 vfp_reg_offset(1, rn
),
5667 vfp_reg_offset(1, rm
), cpu_env
,
5668 opr_sz
, opr_sz
, 0, fn
);
5675 * Expanders for VBitOps_VBIF, VBIT, VBSL.
5677 static void gen_bsl_i64(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
5679 tcg_gen_xor_i64(rn
, rn
, rm
);
5680 tcg_gen_and_i64(rn
, rn
, rd
);
5681 tcg_gen_xor_i64(rd
, rm
, rn
);
5684 static void gen_bit_i64(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
5686 tcg_gen_xor_i64(rn
, rn
, rd
);
5687 tcg_gen_and_i64(rn
, rn
, rm
);
5688 tcg_gen_xor_i64(rd
, rd
, rn
);
5691 static void gen_bif_i64(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
5693 tcg_gen_xor_i64(rn
, rn
, rd
);
5694 tcg_gen_andc_i64(rn
, rn
, rm
);
5695 tcg_gen_xor_i64(rd
, rd
, rn
);
5698 static void gen_bsl_vec(unsigned vece
, TCGv_vec rd
, TCGv_vec rn
, TCGv_vec rm
)
5700 tcg_gen_xor_vec(vece
, rn
, rn
, rm
);
5701 tcg_gen_and_vec(vece
, rn
, rn
, rd
);
5702 tcg_gen_xor_vec(vece
, rd
, rm
, rn
);
5705 static void gen_bit_vec(unsigned vece
, TCGv_vec rd
, TCGv_vec rn
, TCGv_vec rm
)
5707 tcg_gen_xor_vec(vece
, rn
, rn
, rd
);
5708 tcg_gen_and_vec(vece
, rn
, rn
, rm
);
5709 tcg_gen_xor_vec(vece
, rd
, rd
, rn
);
5712 static void gen_bif_vec(unsigned vece
, TCGv_vec rd
, TCGv_vec rn
, TCGv_vec rm
)
5714 tcg_gen_xor_vec(vece
, rn
, rn
, rd
);
5715 tcg_gen_andc_vec(vece
, rn
, rn
, rm
);
5716 tcg_gen_xor_vec(vece
, rd
, rd
, rn
);
5719 const GVecGen3 bsl_op
= {
5720 .fni8
= gen_bsl_i64
,
5721 .fniv
= gen_bsl_vec
,
5722 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
5726 const GVecGen3 bit_op
= {
5727 .fni8
= gen_bit_i64
,
5728 .fniv
= gen_bit_vec
,
5729 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
5733 const GVecGen3 bif_op
= {
5734 .fni8
= gen_bif_i64
,
5735 .fniv
= gen_bif_vec
,
5736 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
5740 static void gen_ssra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
5742 tcg_gen_vec_sar8i_i64(a
, a
, shift
);
5743 tcg_gen_vec_add8_i64(d
, d
, a
);
5746 static void gen_ssra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
5748 tcg_gen_vec_sar16i_i64(a
, a
, shift
);
5749 tcg_gen_vec_add16_i64(d
, d
, a
);
5752 static void gen_ssra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
5754 tcg_gen_sari_i32(a
, a
, shift
);
5755 tcg_gen_add_i32(d
, d
, a
);
5758 static void gen_ssra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
5760 tcg_gen_sari_i64(a
, a
, shift
);
5761 tcg_gen_add_i64(d
, d
, a
);
5764 static void gen_ssra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
5766 tcg_gen_sari_vec(vece
, a
, a
, sh
);
5767 tcg_gen_add_vec(vece
, d
, d
, a
);
5770 const GVecGen2i ssra_op
[4] = {
5771 { .fni8
= gen_ssra8_i64
,
5772 .fniv
= gen_ssra_vec
,
5774 .opc
= INDEX_op_sari_vec
,
5776 { .fni8
= gen_ssra16_i64
,
5777 .fniv
= gen_ssra_vec
,
5779 .opc
= INDEX_op_sari_vec
,
5781 { .fni4
= gen_ssra32_i32
,
5782 .fniv
= gen_ssra_vec
,
5784 .opc
= INDEX_op_sari_vec
,
5786 { .fni8
= gen_ssra64_i64
,
5787 .fniv
= gen_ssra_vec
,
5788 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
5790 .opc
= INDEX_op_sari_vec
,
5794 static void gen_usra8_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
5796 tcg_gen_vec_shr8i_i64(a
, a
, shift
);
5797 tcg_gen_vec_add8_i64(d
, d
, a
);
5800 static void gen_usra16_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
5802 tcg_gen_vec_shr16i_i64(a
, a
, shift
);
5803 tcg_gen_vec_add16_i64(d
, d
, a
);
5806 static void gen_usra32_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
5808 tcg_gen_shri_i32(a
, a
, shift
);
5809 tcg_gen_add_i32(d
, d
, a
);
5812 static void gen_usra64_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
5814 tcg_gen_shri_i64(a
, a
, shift
);
5815 tcg_gen_add_i64(d
, d
, a
);
5818 static void gen_usra_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
5820 tcg_gen_shri_vec(vece
, a
, a
, sh
);
5821 tcg_gen_add_vec(vece
, d
, d
, a
);
5824 const GVecGen2i usra_op
[4] = {
5825 { .fni8
= gen_usra8_i64
,
5826 .fniv
= gen_usra_vec
,
5828 .opc
= INDEX_op_shri_vec
,
5830 { .fni8
= gen_usra16_i64
,
5831 .fniv
= gen_usra_vec
,
5833 .opc
= INDEX_op_shri_vec
,
5835 { .fni4
= gen_usra32_i32
,
5836 .fniv
= gen_usra_vec
,
5838 .opc
= INDEX_op_shri_vec
,
5840 { .fni8
= gen_usra64_i64
,
5841 .fniv
= gen_usra_vec
,
5842 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
5844 .opc
= INDEX_op_shri_vec
,
5848 static void gen_shr8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
5850 uint64_t mask
= dup_const(MO_8
, 0xff >> shift
);
5851 TCGv_i64 t
= tcg_temp_new_i64();
5853 tcg_gen_shri_i64(t
, a
, shift
);
5854 tcg_gen_andi_i64(t
, t
, mask
);
5855 tcg_gen_andi_i64(d
, d
, ~mask
);
5856 tcg_gen_or_i64(d
, d
, t
);
5857 tcg_temp_free_i64(t
);
5860 static void gen_shr16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
5862 uint64_t mask
= dup_const(MO_16
, 0xffff >> shift
);
5863 TCGv_i64 t
= tcg_temp_new_i64();
5865 tcg_gen_shri_i64(t
, a
, shift
);
5866 tcg_gen_andi_i64(t
, t
, mask
);
5867 tcg_gen_andi_i64(d
, d
, ~mask
);
5868 tcg_gen_or_i64(d
, d
, t
);
5869 tcg_temp_free_i64(t
);
5872 static void gen_shr32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
5874 tcg_gen_shri_i32(a
, a
, shift
);
5875 tcg_gen_deposit_i32(d
, d
, a
, 0, 32 - shift
);
5878 static void gen_shr64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
5880 tcg_gen_shri_i64(a
, a
, shift
);
5881 tcg_gen_deposit_i64(d
, d
, a
, 0, 64 - shift
);
5884 static void gen_shr_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
5887 tcg_gen_mov_vec(d
, a
);
5889 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
5890 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
5892 tcg_gen_dupi_vec(vece
, m
, MAKE_64BIT_MASK((8 << vece
) - sh
, sh
));
5893 tcg_gen_shri_vec(vece
, t
, a
, sh
);
5894 tcg_gen_and_vec(vece
, d
, d
, m
);
5895 tcg_gen_or_vec(vece
, d
, d
, t
);
5897 tcg_temp_free_vec(t
);
5898 tcg_temp_free_vec(m
);
5902 const GVecGen2i sri_op
[4] = {
5903 { .fni8
= gen_shr8_ins_i64
,
5904 .fniv
= gen_shr_ins_vec
,
5906 .opc
= INDEX_op_shri_vec
,
5908 { .fni8
= gen_shr16_ins_i64
,
5909 .fniv
= gen_shr_ins_vec
,
5911 .opc
= INDEX_op_shri_vec
,
5913 { .fni4
= gen_shr32_ins_i32
,
5914 .fniv
= gen_shr_ins_vec
,
5916 .opc
= INDEX_op_shri_vec
,
5918 { .fni8
= gen_shr64_ins_i64
,
5919 .fniv
= gen_shr_ins_vec
,
5920 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
5922 .opc
= INDEX_op_shri_vec
,
5926 static void gen_shl8_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
5928 uint64_t mask
= dup_const(MO_8
, 0xff << shift
);
5929 TCGv_i64 t
= tcg_temp_new_i64();
5931 tcg_gen_shli_i64(t
, a
, shift
);
5932 tcg_gen_andi_i64(t
, t
, mask
);
5933 tcg_gen_andi_i64(d
, d
, ~mask
);
5934 tcg_gen_or_i64(d
, d
, t
);
5935 tcg_temp_free_i64(t
);
5938 static void gen_shl16_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
5940 uint64_t mask
= dup_const(MO_16
, 0xffff << shift
);
5941 TCGv_i64 t
= tcg_temp_new_i64();
5943 tcg_gen_shli_i64(t
, a
, shift
);
5944 tcg_gen_andi_i64(t
, t
, mask
);
5945 tcg_gen_andi_i64(d
, d
, ~mask
);
5946 tcg_gen_or_i64(d
, d
, t
);
5947 tcg_temp_free_i64(t
);
5950 static void gen_shl32_ins_i32(TCGv_i32 d
, TCGv_i32 a
, int32_t shift
)
5952 tcg_gen_deposit_i32(d
, d
, a
, shift
, 32 - shift
);
5955 static void gen_shl64_ins_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t shift
)
5957 tcg_gen_deposit_i64(d
, d
, a
, shift
, 64 - shift
);
5960 static void gen_shl_ins_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, int64_t sh
)
5963 tcg_gen_mov_vec(d
, a
);
5965 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
5966 TCGv_vec m
= tcg_temp_new_vec_matching(d
);
5968 tcg_gen_dupi_vec(vece
, m
, MAKE_64BIT_MASK(0, sh
));
5969 tcg_gen_shli_vec(vece
, t
, a
, sh
);
5970 tcg_gen_and_vec(vece
, d
, d
, m
);
5971 tcg_gen_or_vec(vece
, d
, d
, t
);
5973 tcg_temp_free_vec(t
);
5974 tcg_temp_free_vec(m
);
5978 const GVecGen2i sli_op
[4] = {
5979 { .fni8
= gen_shl8_ins_i64
,
5980 .fniv
= gen_shl_ins_vec
,
5982 .opc
= INDEX_op_shli_vec
,
5984 { .fni8
= gen_shl16_ins_i64
,
5985 .fniv
= gen_shl_ins_vec
,
5987 .opc
= INDEX_op_shli_vec
,
5989 { .fni4
= gen_shl32_ins_i32
,
5990 .fniv
= gen_shl_ins_vec
,
5992 .opc
= INDEX_op_shli_vec
,
5994 { .fni8
= gen_shl64_ins_i64
,
5995 .fniv
= gen_shl_ins_vec
,
5996 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
5998 .opc
= INDEX_op_shli_vec
,
6002 static void gen_mla8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
6004 gen_helper_neon_mul_u8(a
, a
, b
);
6005 gen_helper_neon_add_u8(d
, d
, a
);
6008 static void gen_mls8_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
6010 gen_helper_neon_mul_u8(a
, a
, b
);
6011 gen_helper_neon_sub_u8(d
, d
, a
);
6014 static void gen_mla16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
6016 gen_helper_neon_mul_u16(a
, a
, b
);
6017 gen_helper_neon_add_u16(d
, d
, a
);
6020 static void gen_mls16_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
6022 gen_helper_neon_mul_u16(a
, a
, b
);
6023 gen_helper_neon_sub_u16(d
, d
, a
);
6026 static void gen_mla32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
6028 tcg_gen_mul_i32(a
, a
, b
);
6029 tcg_gen_add_i32(d
, d
, a
);
6032 static void gen_mls32_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
6034 tcg_gen_mul_i32(a
, a
, b
);
6035 tcg_gen_sub_i32(d
, d
, a
);
6038 static void gen_mla64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
6040 tcg_gen_mul_i64(a
, a
, b
);
6041 tcg_gen_add_i64(d
, d
, a
);
6044 static void gen_mls64_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
6046 tcg_gen_mul_i64(a
, a
, b
);
6047 tcg_gen_sub_i64(d
, d
, a
);
6050 static void gen_mla_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
6052 tcg_gen_mul_vec(vece
, a
, a
, b
);
6053 tcg_gen_add_vec(vece
, d
, d
, a
);
6056 static void gen_mls_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
6058 tcg_gen_mul_vec(vece
, a
, a
, b
);
6059 tcg_gen_sub_vec(vece
, d
, d
, a
);
6062 /* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
6063 * these tables are shared with AArch64 which does support them.
6065 const GVecGen3 mla_op
[4] = {
6066 { .fni4
= gen_mla8_i32
,
6067 .fniv
= gen_mla_vec
,
6068 .opc
= INDEX_op_mul_vec
,
6071 { .fni4
= gen_mla16_i32
,
6072 .fniv
= gen_mla_vec
,
6073 .opc
= INDEX_op_mul_vec
,
6076 { .fni4
= gen_mla32_i32
,
6077 .fniv
= gen_mla_vec
,
6078 .opc
= INDEX_op_mul_vec
,
6081 { .fni8
= gen_mla64_i64
,
6082 .fniv
= gen_mla_vec
,
6083 .opc
= INDEX_op_mul_vec
,
6084 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
6089 const GVecGen3 mls_op
[4] = {
6090 { .fni4
= gen_mls8_i32
,
6091 .fniv
= gen_mls_vec
,
6092 .opc
= INDEX_op_mul_vec
,
6095 { .fni4
= gen_mls16_i32
,
6096 .fniv
= gen_mls_vec
,
6097 .opc
= INDEX_op_mul_vec
,
6100 { .fni4
= gen_mls32_i32
,
6101 .fniv
= gen_mls_vec
,
6102 .opc
= INDEX_op_mul_vec
,
6105 { .fni8
= gen_mls64_i64
,
6106 .fniv
= gen_mls_vec
,
6107 .opc
= INDEX_op_mul_vec
,
6108 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
6113 /* CMTST : test is "if (X & Y != 0)". */
6114 static void gen_cmtst_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
6116 tcg_gen_and_i32(d
, a
, b
);
6117 tcg_gen_setcondi_i32(TCG_COND_NE
, d
, d
, 0);
6118 tcg_gen_neg_i32(d
, d
);
6121 void gen_cmtst_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
6123 tcg_gen_and_i64(d
, a
, b
);
6124 tcg_gen_setcondi_i64(TCG_COND_NE
, d
, d
, 0);
6125 tcg_gen_neg_i64(d
, d
);
6128 static void gen_cmtst_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
)
6130 tcg_gen_and_vec(vece
, d
, a
, b
);
6131 tcg_gen_dupi_vec(vece
, a
, 0);
6132 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, d
, d
, a
);
6135 const GVecGen3 cmtst_op
[4] = {
6136 { .fni4
= gen_helper_neon_tst_u8
,
6137 .fniv
= gen_cmtst_vec
,
6139 { .fni4
= gen_helper_neon_tst_u16
,
6140 .fniv
= gen_cmtst_vec
,
6142 { .fni4
= gen_cmtst_i32
,
6143 .fniv
= gen_cmtst_vec
,
6145 { .fni8
= gen_cmtst_i64
,
6146 .fniv
= gen_cmtst_vec
,
6147 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
6151 static void gen_uqadd_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
6152 TCGv_vec a
, TCGv_vec b
)
6154 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
6155 tcg_gen_add_vec(vece
, x
, a
, b
);
6156 tcg_gen_usadd_vec(vece
, t
, a
, b
);
6157 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
6158 tcg_gen_or_vec(vece
, sat
, sat
, x
);
6159 tcg_temp_free_vec(x
);
6162 const GVecGen4 uqadd_op
[4] = {
6163 { .fniv
= gen_uqadd_vec
,
6164 .fno
= gen_helper_gvec_uqadd_b
,
6165 .opc
= INDEX_op_usadd_vec
,
6168 { .fniv
= gen_uqadd_vec
,
6169 .fno
= gen_helper_gvec_uqadd_h
,
6170 .opc
= INDEX_op_usadd_vec
,
6173 { .fniv
= gen_uqadd_vec
,
6174 .fno
= gen_helper_gvec_uqadd_s
,
6175 .opc
= INDEX_op_usadd_vec
,
6178 { .fniv
= gen_uqadd_vec
,
6179 .fno
= gen_helper_gvec_uqadd_d
,
6180 .opc
= INDEX_op_usadd_vec
,
6185 static void gen_sqadd_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
6186 TCGv_vec a
, TCGv_vec b
)
6188 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
6189 tcg_gen_add_vec(vece
, x
, a
, b
);
6190 tcg_gen_ssadd_vec(vece
, t
, a
, b
);
6191 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
6192 tcg_gen_or_vec(vece
, sat
, sat
, x
);
6193 tcg_temp_free_vec(x
);
6196 const GVecGen4 sqadd_op
[4] = {
6197 { .fniv
= gen_sqadd_vec
,
6198 .fno
= gen_helper_gvec_sqadd_b
,
6199 .opc
= INDEX_op_ssadd_vec
,
6202 { .fniv
= gen_sqadd_vec
,
6203 .fno
= gen_helper_gvec_sqadd_h
,
6204 .opc
= INDEX_op_ssadd_vec
,
6207 { .fniv
= gen_sqadd_vec
,
6208 .fno
= gen_helper_gvec_sqadd_s
,
6209 .opc
= INDEX_op_ssadd_vec
,
6212 { .fniv
= gen_sqadd_vec
,
6213 .fno
= gen_helper_gvec_sqadd_d
,
6214 .opc
= INDEX_op_ssadd_vec
,
6219 static void gen_uqsub_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
6220 TCGv_vec a
, TCGv_vec b
)
6222 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
6223 tcg_gen_sub_vec(vece
, x
, a
, b
);
6224 tcg_gen_ussub_vec(vece
, t
, a
, b
);
6225 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
6226 tcg_gen_or_vec(vece
, sat
, sat
, x
);
6227 tcg_temp_free_vec(x
);
6230 const GVecGen4 uqsub_op
[4] = {
6231 { .fniv
= gen_uqsub_vec
,
6232 .fno
= gen_helper_gvec_uqsub_b
,
6233 .opc
= INDEX_op_ussub_vec
,
6236 { .fniv
= gen_uqsub_vec
,
6237 .fno
= gen_helper_gvec_uqsub_h
,
6238 .opc
= INDEX_op_ussub_vec
,
6241 { .fniv
= gen_uqsub_vec
,
6242 .fno
= gen_helper_gvec_uqsub_s
,
6243 .opc
= INDEX_op_ussub_vec
,
6246 { .fniv
= gen_uqsub_vec
,
6247 .fno
= gen_helper_gvec_uqsub_d
,
6248 .opc
= INDEX_op_ussub_vec
,
6253 static void gen_sqsub_vec(unsigned vece
, TCGv_vec t
, TCGv_vec sat
,
6254 TCGv_vec a
, TCGv_vec b
)
6256 TCGv_vec x
= tcg_temp_new_vec_matching(t
);
6257 tcg_gen_sub_vec(vece
, x
, a
, b
);
6258 tcg_gen_sssub_vec(vece
, t
, a
, b
);
6259 tcg_gen_cmp_vec(TCG_COND_NE
, vece
, x
, x
, t
);
6260 tcg_gen_or_vec(vece
, sat
, sat
, x
);
6261 tcg_temp_free_vec(x
);
6264 const GVecGen4 sqsub_op
[4] = {
6265 { .fniv
= gen_sqsub_vec
,
6266 .fno
= gen_helper_gvec_sqsub_b
,
6267 .opc
= INDEX_op_sssub_vec
,
6270 { .fniv
= gen_sqsub_vec
,
6271 .fno
= gen_helper_gvec_sqsub_h
,
6272 .opc
= INDEX_op_sssub_vec
,
6275 { .fniv
= gen_sqsub_vec
,
6276 .fno
= gen_helper_gvec_sqsub_s
,
6277 .opc
= INDEX_op_sssub_vec
,
6280 { .fniv
= gen_sqsub_vec
,
6281 .fno
= gen_helper_gvec_sqsub_d
,
6282 .opc
= INDEX_op_sssub_vec
,
6287 /* Translate a NEON data processing instruction. Return nonzero if the
6288 instruction is invalid.
6289 We process data in a mixture of 32-bit and 64-bit chunks.
6290 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
6292 static int disas_neon_data_insn(DisasContext
*s
, uint32_t insn
)
6296 int rd
, rn
, rm
, rd_ofs
, rn_ofs
, rm_ofs
;
6305 TCGv_i32 tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
6306 TCGv_ptr ptr1
, ptr2
, ptr3
;
6309 /* FIXME: this access check should not take precedence over UNDEF
6310 * for invalid encodings; we will generate incorrect syndrome information
6311 * for attempts to execute invalid vfp/neon encodings with FP disabled.
6313 if (s
->fp_excp_el
) {
6314 gen_exception_insn(s
, 4, EXCP_UDEF
,
6315 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
6319 if (!s
->vfp_enabled
)
6321 q
= (insn
& (1 << 6)) != 0;
6322 u
= (insn
>> 24) & 1;
6323 VFP_DREG_D(rd
, insn
);
6324 VFP_DREG_N(rn
, insn
);
6325 VFP_DREG_M(rm
, insn
);
6326 size
= (insn
>> 20) & 3;
6327 vec_size
= q
? 16 : 8;
6328 rd_ofs
= neon_reg_offset(rd
, 0);
6329 rn_ofs
= neon_reg_offset(rn
, 0);
6330 rm_ofs
= neon_reg_offset(rm
, 0);
6332 if ((insn
& (1 << 23)) == 0) {
6333 /* Three register same length. */
6334 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
6335 /* Catch invalid op and bad size combinations: UNDEF */
6336 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
6339 /* All insns of this form UNDEF for either this condition or the
6340 * superset of cases "Q==1"; we catch the latter later.
6342 if (q
&& ((rd
| rn
| rm
) & 1)) {
6347 /* The SHA-1/SHA-256 3-register instructions require special
6348 * treatment here, as their size field is overloaded as an
6349 * op type selector, and they all consume their input in a
6355 if (!u
) { /* SHA-1 */
6356 if (!dc_isar_feature(aa32_sha1
, s
)) {
6359 ptr1
= vfp_reg_ptr(true, rd
);
6360 ptr2
= vfp_reg_ptr(true, rn
);
6361 ptr3
= vfp_reg_ptr(true, rm
);
6362 tmp4
= tcg_const_i32(size
);
6363 gen_helper_crypto_sha1_3reg(ptr1
, ptr2
, ptr3
, tmp4
);
6364 tcg_temp_free_i32(tmp4
);
6365 } else { /* SHA-256 */
6366 if (!dc_isar_feature(aa32_sha2
, s
) || size
== 3) {
6369 ptr1
= vfp_reg_ptr(true, rd
);
6370 ptr2
= vfp_reg_ptr(true, rn
);
6371 ptr3
= vfp_reg_ptr(true, rm
);
6374 gen_helper_crypto_sha256h(ptr1
, ptr2
, ptr3
);
6377 gen_helper_crypto_sha256h2(ptr1
, ptr2
, ptr3
);
6380 gen_helper_crypto_sha256su1(ptr1
, ptr2
, ptr3
);
6384 tcg_temp_free_ptr(ptr1
);
6385 tcg_temp_free_ptr(ptr2
);
6386 tcg_temp_free_ptr(ptr3
);
6389 case NEON_3R_VPADD_VQRDMLAH
:
6396 return do_v81_helper(s
, gen_helper_gvec_qrdmlah_s16
,
6399 return do_v81_helper(s
, gen_helper_gvec_qrdmlah_s32
,
6404 case NEON_3R_VFM_VQRDMLSH
:
6415 return do_v81_helper(s
, gen_helper_gvec_qrdmlsh_s16
,
6418 return do_v81_helper(s
, gen_helper_gvec_qrdmlsh_s32
,
6423 case NEON_3R_LOGIC
: /* Logic ops. */
6424 switch ((u
<< 2) | size
) {
6426 tcg_gen_gvec_and(0, rd_ofs
, rn_ofs
, rm_ofs
,
6427 vec_size
, vec_size
);
6430 tcg_gen_gvec_andc(0, rd_ofs
, rn_ofs
, rm_ofs
,
6431 vec_size
, vec_size
);
6434 tcg_gen_gvec_or(0, rd_ofs
, rn_ofs
, rm_ofs
,
6435 vec_size
, vec_size
);
6438 tcg_gen_gvec_orc(0, rd_ofs
, rn_ofs
, rm_ofs
,
6439 vec_size
, vec_size
);
6442 tcg_gen_gvec_xor(0, rd_ofs
, rn_ofs
, rm_ofs
,
6443 vec_size
, vec_size
);
6446 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
,
6447 vec_size
, vec_size
, &bsl_op
);
6450 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
,
6451 vec_size
, vec_size
, &bit_op
);
6454 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
,
6455 vec_size
, vec_size
, &bif_op
);
6460 case NEON_3R_VADD_VSUB
:
6462 tcg_gen_gvec_sub(size
, rd_ofs
, rn_ofs
, rm_ofs
,
6463 vec_size
, vec_size
);
6465 tcg_gen_gvec_add(size
, rd_ofs
, rn_ofs
, rm_ofs
,
6466 vec_size
, vec_size
);
6471 tcg_gen_gvec_4(rd_ofs
, offsetof(CPUARMState
, vfp
.qc
),
6472 rn_ofs
, rm_ofs
, vec_size
, vec_size
,
6473 (u
? uqadd_op
: sqadd_op
) + size
);
6477 tcg_gen_gvec_4(rd_ofs
, offsetof(CPUARMState
, vfp
.qc
),
6478 rn_ofs
, rm_ofs
, vec_size
, vec_size
,
6479 (u
? uqsub_op
: sqsub_op
) + size
);
6482 case NEON_3R_VMUL
: /* VMUL */
6484 /* Polynomial case allows only P8 and is handled below. */
6489 tcg_gen_gvec_mul(size
, rd_ofs
, rn_ofs
, rm_ofs
,
6490 vec_size
, vec_size
);
6495 case NEON_3R_VML
: /* VMLA, VMLS */
6496 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
,
6497 u
? &mls_op
[size
] : &mla_op
[size
]);
6500 case NEON_3R_VTST_VCEQ
:
6502 tcg_gen_gvec_cmp(TCG_COND_EQ
, size
, rd_ofs
, rn_ofs
, rm_ofs
,
6503 vec_size
, vec_size
);
6505 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
,
6506 vec_size
, vec_size
, &cmtst_op
[size
]);
6511 tcg_gen_gvec_cmp(u
? TCG_COND_GTU
: TCG_COND_GT
, size
,
6512 rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
);
6516 tcg_gen_gvec_cmp(u
? TCG_COND_GEU
: TCG_COND_GE
, size
,
6517 rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
);
6522 tcg_gen_gvec_umax(size
, rd_ofs
, rn_ofs
, rm_ofs
,
6523 vec_size
, vec_size
);
6525 tcg_gen_gvec_smax(size
, rd_ofs
, rn_ofs
, rm_ofs
,
6526 vec_size
, vec_size
);
6531 tcg_gen_gvec_umin(size
, rd_ofs
, rn_ofs
, rm_ofs
,
6532 vec_size
, vec_size
);
6534 tcg_gen_gvec_smin(size
, rd_ofs
, rn_ofs
, rm_ofs
,
6535 vec_size
, vec_size
);
6541 /* 64-bit element instructions. */
6542 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
6543 neon_load_reg64(cpu_V0
, rn
+ pass
);
6544 neon_load_reg64(cpu_V1
, rm
+ pass
);
6548 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
6550 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
6555 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
6558 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
6564 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
6566 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
6569 case NEON_3R_VQRSHL
:
6571 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
6574 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
6581 neon_store_reg64(cpu_V0
, rd
+ pass
);
6590 case NEON_3R_VQRSHL
:
6593 /* Shift instruction operands are reversed. */
6599 case NEON_3R_VPADD_VQRDMLAH
:
6604 case NEON_3R_FLOAT_ARITH
:
6605 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
6607 case NEON_3R_FLOAT_MINMAX
:
6608 pairwise
= u
; /* if VPMIN/VPMAX (float) */
6610 case NEON_3R_FLOAT_CMP
:
6612 /* no encoding for U=0 C=1x */
6616 case NEON_3R_FLOAT_ACMP
:
6621 case NEON_3R_FLOAT_MISC
:
6622 /* VMAXNM/VMINNM in ARMv8 */
6623 if (u
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
6627 case NEON_3R_VFM_VQRDMLSH
:
6628 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
)) {
6636 if (pairwise
&& q
) {
6637 /* All the pairwise insns UNDEF if Q is set */
6641 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6646 tmp
= neon_load_reg(rn
, 0);
6647 tmp2
= neon_load_reg(rn
, 1);
6649 tmp
= neon_load_reg(rm
, 0);
6650 tmp2
= neon_load_reg(rm
, 1);
6654 tmp
= neon_load_reg(rn
, pass
);
6655 tmp2
= neon_load_reg(rm
, pass
);
6659 GEN_NEON_INTEGER_OP(hadd
);
6661 case NEON_3R_VRHADD
:
6662 GEN_NEON_INTEGER_OP(rhadd
);
6665 GEN_NEON_INTEGER_OP(hsub
);
6668 GEN_NEON_INTEGER_OP(shl
);
6671 GEN_NEON_INTEGER_OP_ENV(qshl
);
6674 GEN_NEON_INTEGER_OP(rshl
);
6676 case NEON_3R_VQRSHL
:
6677 GEN_NEON_INTEGER_OP_ENV(qrshl
);
6680 GEN_NEON_INTEGER_OP(abd
);
6683 GEN_NEON_INTEGER_OP(abd
);
6684 tcg_temp_free_i32(tmp2
);
6685 tmp2
= neon_load_reg(rd
, pass
);
6686 gen_neon_add(size
, tmp
, tmp2
);
6689 /* VMUL.P8; other cases already eliminated. */
6690 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
6693 GEN_NEON_INTEGER_OP(pmax
);
6696 GEN_NEON_INTEGER_OP(pmin
);
6698 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
6699 if (!u
) { /* VQDMULH */
6702 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6705 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6709 } else { /* VQRDMULH */
6712 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6715 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6721 case NEON_3R_VPADD_VQRDMLAH
:
6723 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
6724 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
6725 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
6729 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
6731 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6732 switch ((u
<< 2) | size
) {
6735 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
6738 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
6741 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
6746 tcg_temp_free_ptr(fpstatus
);
6749 case NEON_3R_FLOAT_MULTIPLY
:
6751 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6752 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
6754 tcg_temp_free_i32(tmp2
);
6755 tmp2
= neon_load_reg(rd
, pass
);
6757 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
6759 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
6762 tcg_temp_free_ptr(fpstatus
);
6765 case NEON_3R_FLOAT_CMP
:
6767 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6769 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6772 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6774 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6777 tcg_temp_free_ptr(fpstatus
);
6780 case NEON_3R_FLOAT_ACMP
:
6782 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6784 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6786 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6788 tcg_temp_free_ptr(fpstatus
);
6791 case NEON_3R_FLOAT_MINMAX
:
6793 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6795 gen_helper_vfp_maxs(tmp
, tmp
, tmp2
, fpstatus
);
6797 gen_helper_vfp_mins(tmp
, tmp
, tmp2
, fpstatus
);
6799 tcg_temp_free_ptr(fpstatus
);
6802 case NEON_3R_FLOAT_MISC
:
6805 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6807 gen_helper_vfp_maxnums(tmp
, tmp
, tmp2
, fpstatus
);
6809 gen_helper_vfp_minnums(tmp
, tmp
, tmp2
, fpstatus
);
6811 tcg_temp_free_ptr(fpstatus
);
6814 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
6816 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
6820 case NEON_3R_VFM_VQRDMLSH
:
6822 /* VFMA, VFMS: fused multiply-add */
6823 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6824 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
6827 gen_helper_vfp_negs(tmp
, tmp
);
6829 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
6830 tcg_temp_free_i32(tmp3
);
6831 tcg_temp_free_ptr(fpstatus
);
6837 tcg_temp_free_i32(tmp2
);
6839 /* Save the result. For elementwise operations we can put it
6840 straight into the destination register. For pairwise operations
6841 we have to be careful to avoid clobbering the source operands. */
6842 if (pairwise
&& rd
== rm
) {
6843 neon_store_scratch(pass
, tmp
);
6845 neon_store_reg(rd
, pass
, tmp
);
6849 if (pairwise
&& rd
== rm
) {
6850 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6851 tmp
= neon_load_scratch(pass
);
6852 neon_store_reg(rd
, pass
, tmp
);
6855 /* End of 3 register same size operations. */
6856 } else if (insn
& (1 << 4)) {
6857 if ((insn
& 0x00380080) != 0) {
6858 /* Two registers and shift. */
6859 op
= (insn
>> 8) & 0xf;
6860 if (insn
& (1 << 7)) {
6868 while ((insn
& (1 << (size
+ 19))) == 0)
6871 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
6873 /* Shift by immediate:
6874 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
6875 if (q
&& ((rd
| rm
) & 1)) {
6878 if (!u
&& (op
== 4 || op
== 6)) {
6881 /* Right shifts are encoded as N - shift, where N is the
6882 element size in bits. */
6884 shift
= shift
- (1 << (size
+ 3));
6889 /* Right shift comes here negative. */
6891 /* Shifts larger than the element size are architecturally
6892 * valid. Unsigned results in all zeros; signed results
6896 tcg_gen_gvec_sari(size
, rd_ofs
, rm_ofs
,
6897 MIN(shift
, (8 << size
) - 1),
6898 vec_size
, vec_size
);
6899 } else if (shift
>= 8 << size
) {
6900 tcg_gen_gvec_dup8i(rd_ofs
, vec_size
, vec_size
, 0);
6902 tcg_gen_gvec_shri(size
, rd_ofs
, rm_ofs
, shift
,
6903 vec_size
, vec_size
);
6908 /* Right shift comes here negative. */
6910 /* Shifts larger than the element size are architecturally
6911 * valid. Unsigned results in all zeros; signed results
6915 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
6916 MIN(shift
, (8 << size
) - 1),
6918 } else if (shift
>= 8 << size
) {
6921 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
6922 shift
, &usra_op
[size
]);
6930 /* Right shift comes here negative. */
6932 /* Shift out of range leaves destination unchanged. */
6933 if (shift
< 8 << size
) {
6934 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
, vec_size
,
6935 shift
, &sri_op
[size
]);
6939 case 5: /* VSHL, VSLI */
6941 /* Shift out of range leaves destination unchanged. */
6942 if (shift
< 8 << size
) {
6943 tcg_gen_gvec_2i(rd_ofs
, rm_ofs
, vec_size
,
6944 vec_size
, shift
, &sli_op
[size
]);
6947 /* Shifts larger than the element size are
6948 * architecturally valid and results in zero.
6950 if (shift
>= 8 << size
) {
6951 tcg_gen_gvec_dup8i(rd_ofs
, vec_size
, vec_size
, 0);
6953 tcg_gen_gvec_shli(size
, rd_ofs
, rm_ofs
, shift
,
6954 vec_size
, vec_size
);
6966 /* To avoid excessive duplication of ops we implement shift
6967 * by immediate using the variable shift operations.
6969 imm
= dup_const(size
, shift
);
6971 for (pass
= 0; pass
< count
; pass
++) {
6973 neon_load_reg64(cpu_V0
, rm
+ pass
);
6974 tcg_gen_movi_i64(cpu_V1
, imm
);
6979 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
6981 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
6983 case 6: /* VQSHLU */
6984 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
6989 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
6992 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
6997 g_assert_not_reached();
7001 neon_load_reg64(cpu_V1
, rd
+ pass
);
7002 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
7004 neon_store_reg64(cpu_V0
, rd
+ pass
);
7005 } else { /* size < 3 */
7006 /* Operands in T0 and T1. */
7007 tmp
= neon_load_reg(rm
, pass
);
7008 tmp2
= tcg_temp_new_i32();
7009 tcg_gen_movi_i32(tmp2
, imm
);
7013 GEN_NEON_INTEGER_OP(rshl
);
7015 case 6: /* VQSHLU */
7018 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
7022 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
7026 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
7034 GEN_NEON_INTEGER_OP_ENV(qshl
);
7037 g_assert_not_reached();
7039 tcg_temp_free_i32(tmp2
);
7043 tmp2
= neon_load_reg(rd
, pass
);
7044 gen_neon_add(size
, tmp
, tmp2
);
7045 tcg_temp_free_i32(tmp2
);
7047 neon_store_reg(rd
, pass
, tmp
);
7050 } else if (op
< 10) {
7051 /* Shift by immediate and narrow:
7052 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
7053 int input_unsigned
= (op
== 8) ? !u
: u
;
7057 shift
= shift
- (1 << (size
+ 3));
7060 tmp64
= tcg_const_i64(shift
);
7061 neon_load_reg64(cpu_V0
, rm
);
7062 neon_load_reg64(cpu_V1
, rm
+ 1);
7063 for (pass
= 0; pass
< 2; pass
++) {
7071 if (input_unsigned
) {
7072 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
7074 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
7077 if (input_unsigned
) {
7078 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
7080 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
7083 tmp
= tcg_temp_new_i32();
7084 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
7085 neon_store_reg(rd
, pass
, tmp
);
7087 tcg_temp_free_i64(tmp64
);
7090 imm
= (uint16_t)shift
;
7094 imm
= (uint32_t)shift
;
7096 tmp2
= tcg_const_i32(imm
);
7097 tmp4
= neon_load_reg(rm
+ 1, 0);
7098 tmp5
= neon_load_reg(rm
+ 1, 1);
7099 for (pass
= 0; pass
< 2; pass
++) {
7101 tmp
= neon_load_reg(rm
, 0);
7105 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
7108 tmp3
= neon_load_reg(rm
, 1);
7112 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
7114 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
7115 tcg_temp_free_i32(tmp
);
7116 tcg_temp_free_i32(tmp3
);
7117 tmp
= tcg_temp_new_i32();
7118 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
7119 neon_store_reg(rd
, pass
, tmp
);
7121 tcg_temp_free_i32(tmp2
);
7123 } else if (op
== 10) {
7125 if (q
|| (rd
& 1)) {
7128 tmp
= neon_load_reg(rm
, 0);
7129 tmp2
= neon_load_reg(rm
, 1);
7130 for (pass
= 0; pass
< 2; pass
++) {
7134 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
7137 /* The shift is less than the width of the source
7138 type, so we can just shift the whole register. */
7139 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
7140 /* Widen the result of shift: we need to clear
7141 * the potential overflow bits resulting from
7142 * left bits of the narrow input appearing as
7143 * right bits of left the neighbour narrow
7145 if (size
< 2 || !u
) {
7148 imm
= (0xffu
>> (8 - shift
));
7150 } else if (size
== 1) {
7151 imm
= 0xffff >> (16 - shift
);
7154 imm
= 0xffffffff >> (32 - shift
);
7157 imm64
= imm
| (((uint64_t)imm
) << 32);
7161 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
7164 neon_store_reg64(cpu_V0
, rd
+ pass
);
7166 } else if (op
>= 14) {
7167 /* VCVT fixed-point. */
7168 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
7171 /* We have already masked out the must-be-1 top bit of imm6,
7172 * hence this 32-shift where the ARM ARM has 64-imm6.
7175 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
7176 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
7179 gen_vfp_ulto(0, shift
, 1);
7181 gen_vfp_slto(0, shift
, 1);
7184 gen_vfp_toul(0, shift
, 1);
7186 gen_vfp_tosl(0, shift
, 1);
7188 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
7193 } else { /* (insn & 0x00380080) == 0 */
7194 int invert
, reg_ofs
, vec_size
;
7196 if (q
&& (rd
& 1)) {
7200 op
= (insn
>> 8) & 0xf;
7201 /* One register and immediate. */
7202 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
7203 invert
= (insn
& (1 << 5)) != 0;
7204 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
7205 * We choose to not special-case this and will behave as if a
7206 * valid constant encoding of 0 had been given.
7225 imm
= (imm
<< 8) | (imm
<< 24);
7228 imm
= (imm
<< 8) | 0xff;
7231 imm
= (imm
<< 16) | 0xffff;
7234 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
7243 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
7244 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
7251 reg_ofs
= neon_reg_offset(rd
, 0);
7252 vec_size
= q
? 16 : 8;
7254 if (op
& 1 && op
< 12) {
7256 /* The immediate value has already been inverted,
7257 * so BIC becomes AND.
7259 tcg_gen_gvec_andi(MO_32
, reg_ofs
, reg_ofs
, imm
,
7260 vec_size
, vec_size
);
7262 tcg_gen_gvec_ori(MO_32
, reg_ofs
, reg_ofs
, imm
,
7263 vec_size
, vec_size
);
7267 if (op
== 14 && invert
) {
7268 TCGv_i64 t64
= tcg_temp_new_i64();
7270 for (pass
= 0; pass
<= q
; ++pass
) {
7274 for (n
= 0; n
< 8; n
++) {
7275 if (imm
& (1 << (n
+ pass
* 8))) {
7276 val
|= 0xffull
<< (n
* 8);
7279 tcg_gen_movi_i64(t64
, val
);
7280 neon_store_reg64(t64
, rd
+ pass
);
7282 tcg_temp_free_i64(t64
);
7284 tcg_gen_gvec_dup32i(reg_ofs
, vec_size
, vec_size
, imm
);
7288 } else { /* (insn & 0x00800010 == 0x00800000) */
7290 op
= (insn
>> 8) & 0xf;
7291 if ((insn
& (1 << 6)) == 0) {
7292 /* Three registers of different lengths. */
7296 /* undefreq: bit 0 : UNDEF if size == 0
7297 * bit 1 : UNDEF if size == 1
7298 * bit 2 : UNDEF if size == 2
7299 * bit 3 : UNDEF if U == 1
7300 * Note that [2:0] set implies 'always UNDEF'
7303 /* prewiden, src1_wide, src2_wide, undefreq */
7304 static const int neon_3reg_wide
[16][4] = {
7305 {1, 0, 0, 0}, /* VADDL */
7306 {1, 1, 0, 0}, /* VADDW */
7307 {1, 0, 0, 0}, /* VSUBL */
7308 {1, 1, 0, 0}, /* VSUBW */
7309 {0, 1, 1, 0}, /* VADDHN */
7310 {0, 0, 0, 0}, /* VABAL */
7311 {0, 1, 1, 0}, /* VSUBHN */
7312 {0, 0, 0, 0}, /* VABDL */
7313 {0, 0, 0, 0}, /* VMLAL */
7314 {0, 0, 0, 9}, /* VQDMLAL */
7315 {0, 0, 0, 0}, /* VMLSL */
7316 {0, 0, 0, 9}, /* VQDMLSL */
7317 {0, 0, 0, 0}, /* Integer VMULL */
7318 {0, 0, 0, 1}, /* VQDMULL */
7319 {0, 0, 0, 0xa}, /* Polynomial VMULL */
7320 {0, 0, 0, 7}, /* Reserved: always UNDEF */
7323 prewiden
= neon_3reg_wide
[op
][0];
7324 src1_wide
= neon_3reg_wide
[op
][1];
7325 src2_wide
= neon_3reg_wide
[op
][2];
7326 undefreq
= neon_3reg_wide
[op
][3];
7328 if ((undefreq
& (1 << size
)) ||
7329 ((undefreq
& 8) && u
)) {
7332 if ((src1_wide
&& (rn
& 1)) ||
7333 (src2_wide
&& (rm
& 1)) ||
7334 (!src2_wide
&& (rd
& 1))) {
7338 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
7339 * outside the loop below as it only performs a single pass.
7341 if (op
== 14 && size
== 2) {
7342 TCGv_i64 tcg_rn
, tcg_rm
, tcg_rd
;
7344 if (!dc_isar_feature(aa32_pmull
, s
)) {
7347 tcg_rn
= tcg_temp_new_i64();
7348 tcg_rm
= tcg_temp_new_i64();
7349 tcg_rd
= tcg_temp_new_i64();
7350 neon_load_reg64(tcg_rn
, rn
);
7351 neon_load_reg64(tcg_rm
, rm
);
7352 gen_helper_neon_pmull_64_lo(tcg_rd
, tcg_rn
, tcg_rm
);
7353 neon_store_reg64(tcg_rd
, rd
);
7354 gen_helper_neon_pmull_64_hi(tcg_rd
, tcg_rn
, tcg_rm
);
7355 neon_store_reg64(tcg_rd
, rd
+ 1);
7356 tcg_temp_free_i64(tcg_rn
);
7357 tcg_temp_free_i64(tcg_rm
);
7358 tcg_temp_free_i64(tcg_rd
);
7362 /* Avoid overlapping operands. Wide source operands are
7363 always aligned so will never overlap with wide
7364 destinations in problematic ways. */
7365 if (rd
== rm
&& !src2_wide
) {
7366 tmp
= neon_load_reg(rm
, 1);
7367 neon_store_scratch(2, tmp
);
7368 } else if (rd
== rn
&& !src1_wide
) {
7369 tmp
= neon_load_reg(rn
, 1);
7370 neon_store_scratch(2, tmp
);
7373 for (pass
= 0; pass
< 2; pass
++) {
7375 neon_load_reg64(cpu_V0
, rn
+ pass
);
7378 if (pass
== 1 && rd
== rn
) {
7379 tmp
= neon_load_scratch(2);
7381 tmp
= neon_load_reg(rn
, pass
);
7384 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
7388 neon_load_reg64(cpu_V1
, rm
+ pass
);
7391 if (pass
== 1 && rd
== rm
) {
7392 tmp2
= neon_load_scratch(2);
7394 tmp2
= neon_load_reg(rm
, pass
);
7397 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
7401 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
7402 gen_neon_addl(size
);
7404 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
7405 gen_neon_subl(size
);
7407 case 5: case 7: /* VABAL, VABDL */
7408 switch ((size
<< 1) | u
) {
7410 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
7413 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
7416 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
7419 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
7422 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
7425 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
7429 tcg_temp_free_i32(tmp2
);
7430 tcg_temp_free_i32(tmp
);
7432 case 8: case 9: case 10: case 11: case 12: case 13:
7433 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
7434 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
7436 case 14: /* Polynomial VMULL */
7437 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
7438 tcg_temp_free_i32(tmp2
);
7439 tcg_temp_free_i32(tmp
);
7441 default: /* 15 is RESERVED: caught earlier */
7446 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
7447 neon_store_reg64(cpu_V0
, rd
+ pass
);
7448 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
7450 neon_load_reg64(cpu_V1
, rd
+ pass
);
7452 case 10: /* VMLSL */
7453 gen_neon_negl(cpu_V0
, size
);
7455 case 5: case 8: /* VABAL, VMLAL */
7456 gen_neon_addl(size
);
7458 case 9: case 11: /* VQDMLAL, VQDMLSL */
7459 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
7461 gen_neon_negl(cpu_V0
, size
);
7463 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
7468 neon_store_reg64(cpu_V0
, rd
+ pass
);
7469 } else if (op
== 4 || op
== 6) {
7470 /* Narrowing operation. */
7471 tmp
= tcg_temp_new_i32();
7475 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
7478 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
7481 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
7482 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
7489 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
7492 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
7495 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
7496 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
7497 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
7505 neon_store_reg(rd
, 0, tmp3
);
7506 neon_store_reg(rd
, 1, tmp
);
7509 /* Write back the result. */
7510 neon_store_reg64(cpu_V0
, rd
+ pass
);
7514 /* Two registers and a scalar. NB that for ops of this form
7515 * the ARM ARM labels bit 24 as Q, but it is in our variable
7522 case 1: /* Float VMLA scalar */
7523 case 5: /* Floating point VMLS scalar */
7524 case 9: /* Floating point VMUL scalar */
7529 case 0: /* Integer VMLA scalar */
7530 case 4: /* Integer VMLS scalar */
7531 case 8: /* Integer VMUL scalar */
7532 case 12: /* VQDMULH scalar */
7533 case 13: /* VQRDMULH scalar */
7534 if (u
&& ((rd
| rn
) & 1)) {
7537 tmp
= neon_get_scalar(size
, rm
);
7538 neon_store_scratch(0, tmp
);
7539 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
7540 tmp
= neon_load_scratch(0);
7541 tmp2
= neon_load_reg(rn
, pass
);
7544 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
7546 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
7548 } else if (op
== 13) {
7550 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
7552 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
7554 } else if (op
& 1) {
7555 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7556 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
7557 tcg_temp_free_ptr(fpstatus
);
7560 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
7561 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
7562 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
7566 tcg_temp_free_i32(tmp2
);
7569 tmp2
= neon_load_reg(rd
, pass
);
7572 gen_neon_add(size
, tmp
, tmp2
);
7576 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7577 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
7578 tcg_temp_free_ptr(fpstatus
);
7582 gen_neon_rsb(size
, tmp
, tmp2
);
7586 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7587 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
7588 tcg_temp_free_ptr(fpstatus
);
7594 tcg_temp_free_i32(tmp2
);
7596 neon_store_reg(rd
, pass
, tmp
);
7599 case 3: /* VQDMLAL scalar */
7600 case 7: /* VQDMLSL scalar */
7601 case 11: /* VQDMULL scalar */
7606 case 2: /* VMLAL sclar */
7607 case 6: /* VMLSL scalar */
7608 case 10: /* VMULL scalar */
7612 tmp2
= neon_get_scalar(size
, rm
);
7613 /* We need a copy of tmp2 because gen_neon_mull
7614 * deletes it during pass 0. */
7615 tmp4
= tcg_temp_new_i32();
7616 tcg_gen_mov_i32(tmp4
, tmp2
);
7617 tmp3
= neon_load_reg(rn
, 1);
7619 for (pass
= 0; pass
< 2; pass
++) {
7621 tmp
= neon_load_reg(rn
, 0);
7626 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
7628 neon_load_reg64(cpu_V1
, rd
+ pass
);
7632 gen_neon_negl(cpu_V0
, size
);
7635 gen_neon_addl(size
);
7638 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
7640 gen_neon_negl(cpu_V0
, size
);
7642 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
7648 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
7653 neon_store_reg64(cpu_V0
, rd
+ pass
);
7656 case 14: /* VQRDMLAH scalar */
7657 case 15: /* VQRDMLSH scalar */
7659 NeonGenThreeOpEnvFn
*fn
;
7661 if (!dc_isar_feature(aa32_rdm
, s
)) {
7664 if (u
&& ((rd
| rn
) & 1)) {
7669 fn
= gen_helper_neon_qrdmlah_s16
;
7671 fn
= gen_helper_neon_qrdmlah_s32
;
7675 fn
= gen_helper_neon_qrdmlsh_s16
;
7677 fn
= gen_helper_neon_qrdmlsh_s32
;
7681 tmp2
= neon_get_scalar(size
, rm
);
7682 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
7683 tmp
= neon_load_reg(rn
, pass
);
7684 tmp3
= neon_load_reg(rd
, pass
);
7685 fn(tmp
, cpu_env
, tmp
, tmp2
, tmp3
);
7686 tcg_temp_free_i32(tmp3
);
7687 neon_store_reg(rd
, pass
, tmp
);
7689 tcg_temp_free_i32(tmp2
);
7693 g_assert_not_reached();
7696 } else { /* size == 3 */
7699 imm
= (insn
>> 8) & 0xf;
7704 if (q
&& ((rd
| rn
| rm
) & 1)) {
7709 neon_load_reg64(cpu_V0
, rn
);
7711 neon_load_reg64(cpu_V1
, rn
+ 1);
7713 } else if (imm
== 8) {
7714 neon_load_reg64(cpu_V0
, rn
+ 1);
7716 neon_load_reg64(cpu_V1
, rm
);
7719 tmp64
= tcg_temp_new_i64();
7721 neon_load_reg64(cpu_V0
, rn
);
7722 neon_load_reg64(tmp64
, rn
+ 1);
7724 neon_load_reg64(cpu_V0
, rn
+ 1);
7725 neon_load_reg64(tmp64
, rm
);
7727 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
7728 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
7729 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
7731 neon_load_reg64(cpu_V1
, rm
);
7733 neon_load_reg64(cpu_V1
, rm
+ 1);
7736 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
7737 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
7738 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
7739 tcg_temp_free_i64(tmp64
);
7742 neon_load_reg64(cpu_V0
, rn
);
7743 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
7744 neon_load_reg64(cpu_V1
, rm
);
7745 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
7746 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
7748 neon_store_reg64(cpu_V0
, rd
);
7750 neon_store_reg64(cpu_V1
, rd
+ 1);
7752 } else if ((insn
& (1 << 11)) == 0) {
7753 /* Two register misc. */
7754 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
7755 size
= (insn
>> 18) & 3;
7756 /* UNDEF for unknown op values and bad op-size combinations */
7757 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
7760 if (neon_2rm_is_v8_op(op
) &&
7761 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
7764 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
7765 q
&& ((rm
| rd
) & 1)) {
7769 case NEON_2RM_VREV64
:
7770 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
7771 tmp
= neon_load_reg(rm
, pass
* 2);
7772 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
7774 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
7775 case 1: gen_swap_half(tmp
); break;
7776 case 2: /* no-op */ break;
7779 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
7781 neon_store_reg(rd
, pass
* 2, tmp2
);
7784 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
7785 case 1: gen_swap_half(tmp2
); break;
7788 neon_store_reg(rd
, pass
* 2, tmp2
);
7792 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
7793 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
7794 for (pass
= 0; pass
< q
+ 1; pass
++) {
7795 tmp
= neon_load_reg(rm
, pass
* 2);
7796 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
7797 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
7798 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
7800 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
7801 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
7802 case 2: tcg_gen_add_i64(CPU_V001
); break;
7805 if (op
>= NEON_2RM_VPADAL
) {
7807 neon_load_reg64(cpu_V1
, rd
+ pass
);
7808 gen_neon_addl(size
);
7810 neon_store_reg64(cpu_V0
, rd
+ pass
);
7816 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
7817 tmp
= neon_load_reg(rm
, n
);
7818 tmp2
= neon_load_reg(rd
, n
+ 1);
7819 neon_store_reg(rm
, n
, tmp2
);
7820 neon_store_reg(rd
, n
+ 1, tmp
);
7827 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
7832 if (gen_neon_zip(rd
, rm
, size
, q
)) {
7836 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
7837 /* also VQMOVUN; op field and mnemonics don't line up */
7842 for (pass
= 0; pass
< 2; pass
++) {
7843 neon_load_reg64(cpu_V0
, rm
+ pass
);
7844 tmp
= tcg_temp_new_i32();
7845 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
7850 neon_store_reg(rd
, 0, tmp2
);
7851 neon_store_reg(rd
, 1, tmp
);
7855 case NEON_2RM_VSHLL
:
7856 if (q
|| (rd
& 1)) {
7859 tmp
= neon_load_reg(rm
, 0);
7860 tmp2
= neon_load_reg(rm
, 1);
7861 for (pass
= 0; pass
< 2; pass
++) {
7864 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
7865 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
7866 neon_store_reg64(cpu_V0
, rd
+ pass
);
7869 case NEON_2RM_VCVT_F16_F32
:
7874 if (!arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
) ||
7878 tmp
= tcg_temp_new_i32();
7879 tmp2
= tcg_temp_new_i32();
7880 fpst
= get_fpstatus_ptr(true);
7881 ahp
= get_ahp_flag();
7882 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
7883 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, fpst
, ahp
);
7884 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
7885 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, cpu_F0s
, fpst
, ahp
);
7886 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
7887 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
7888 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
7889 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
, fpst
, ahp
);
7890 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
7891 neon_store_reg(rd
, 0, tmp2
);
7892 tmp2
= tcg_temp_new_i32();
7893 gen_helper_vfp_fcvt_f32_to_f16(tmp2
, cpu_F0s
, fpst
, ahp
);
7894 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
7895 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
7896 neon_store_reg(rd
, 1, tmp2
);
7897 tcg_temp_free_i32(tmp
);
7898 tcg_temp_free_i32(ahp
);
7899 tcg_temp_free_ptr(fpst
);
7902 case NEON_2RM_VCVT_F32_F16
:
7906 if (!arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
) ||
7910 fpst
= get_fpstatus_ptr(true);
7911 ahp
= get_ahp_flag();
7912 tmp3
= tcg_temp_new_i32();
7913 tmp
= neon_load_reg(rm
, 0);
7914 tmp2
= neon_load_reg(rm
, 1);
7915 tcg_gen_ext16u_i32(tmp3
, tmp
);
7916 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, fpst
, ahp
);
7917 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
7918 tcg_gen_shri_i32(tmp3
, tmp
, 16);
7919 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, fpst
, ahp
);
7920 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
7921 tcg_temp_free_i32(tmp
);
7922 tcg_gen_ext16u_i32(tmp3
, tmp2
);
7923 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, fpst
, ahp
);
7924 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
7925 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
7926 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp3
, fpst
, ahp
);
7927 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
7928 tcg_temp_free_i32(tmp2
);
7929 tcg_temp_free_i32(tmp3
);
7930 tcg_temp_free_i32(ahp
);
7931 tcg_temp_free_ptr(fpst
);
7934 case NEON_2RM_AESE
: case NEON_2RM_AESMC
:
7935 if (!dc_isar_feature(aa32_aes
, s
) || ((rm
| rd
) & 1)) {
7938 ptr1
= vfp_reg_ptr(true, rd
);
7939 ptr2
= vfp_reg_ptr(true, rm
);
7941 /* Bit 6 is the lowest opcode bit; it distinguishes between
7942 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7944 tmp3
= tcg_const_i32(extract32(insn
, 6, 1));
7946 if (op
== NEON_2RM_AESE
) {
7947 gen_helper_crypto_aese(ptr1
, ptr2
, tmp3
);
7949 gen_helper_crypto_aesmc(ptr1
, ptr2
, tmp3
);
7951 tcg_temp_free_ptr(ptr1
);
7952 tcg_temp_free_ptr(ptr2
);
7953 tcg_temp_free_i32(tmp3
);
7955 case NEON_2RM_SHA1H
:
7956 if (!dc_isar_feature(aa32_sha1
, s
) || ((rm
| rd
) & 1)) {
7959 ptr1
= vfp_reg_ptr(true, rd
);
7960 ptr2
= vfp_reg_ptr(true, rm
);
7962 gen_helper_crypto_sha1h(ptr1
, ptr2
);
7964 tcg_temp_free_ptr(ptr1
);
7965 tcg_temp_free_ptr(ptr2
);
7967 case NEON_2RM_SHA1SU1
:
7968 if ((rm
| rd
) & 1) {
7971 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7973 if (!dc_isar_feature(aa32_sha2
, s
)) {
7976 } else if (!dc_isar_feature(aa32_sha1
, s
)) {
7979 ptr1
= vfp_reg_ptr(true, rd
);
7980 ptr2
= vfp_reg_ptr(true, rm
);
7982 gen_helper_crypto_sha256su0(ptr1
, ptr2
);
7984 gen_helper_crypto_sha1su1(ptr1
, ptr2
);
7986 tcg_temp_free_ptr(ptr1
);
7987 tcg_temp_free_ptr(ptr2
);
7991 tcg_gen_gvec_not(0, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
7994 tcg_gen_gvec_neg(size
, rd_ofs
, rm_ofs
, vec_size
, vec_size
);
7999 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
8000 if (neon_2rm_is_float_op(op
)) {
8001 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
8002 neon_reg_offset(rm
, pass
));
8005 tmp
= neon_load_reg(rm
, pass
);
8008 case NEON_2RM_VREV32
:
8010 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
8011 case 1: gen_swap_half(tmp
); break;
8015 case NEON_2RM_VREV16
:
8020 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
8021 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
8022 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
8028 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
8029 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
8030 case 2: tcg_gen_clzi_i32(tmp
, tmp
, 32); break;
8035 gen_helper_neon_cnt_u8(tmp
, tmp
);
8037 case NEON_2RM_VQABS
:
8040 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
8043 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
8046 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
8051 case NEON_2RM_VQNEG
:
8054 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
8057 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
8060 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
8065 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
8066 tmp2
= tcg_const_i32(0);
8068 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
8069 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
8070 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
8073 tcg_temp_free_i32(tmp2
);
8074 if (op
== NEON_2RM_VCLE0
) {
8075 tcg_gen_not_i32(tmp
, tmp
);
8078 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
8079 tmp2
= tcg_const_i32(0);
8081 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
8082 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
8083 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
8086 tcg_temp_free_i32(tmp2
);
8087 if (op
== NEON_2RM_VCLT0
) {
8088 tcg_gen_not_i32(tmp
, tmp
);
8091 case NEON_2RM_VCEQ0
:
8092 tmp2
= tcg_const_i32(0);
8094 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
8095 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
8096 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
8099 tcg_temp_free_i32(tmp2
);
8103 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
8104 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
8105 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
8109 case NEON_2RM_VCGT0_F
:
8111 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
8112 tmp2
= tcg_const_i32(0);
8113 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
8114 tcg_temp_free_i32(tmp2
);
8115 tcg_temp_free_ptr(fpstatus
);
8118 case NEON_2RM_VCGE0_F
:
8120 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
8121 tmp2
= tcg_const_i32(0);
8122 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
8123 tcg_temp_free_i32(tmp2
);
8124 tcg_temp_free_ptr(fpstatus
);
8127 case NEON_2RM_VCEQ0_F
:
8129 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
8130 tmp2
= tcg_const_i32(0);
8131 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
8132 tcg_temp_free_i32(tmp2
);
8133 tcg_temp_free_ptr(fpstatus
);
8136 case NEON_2RM_VCLE0_F
:
8138 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
8139 tmp2
= tcg_const_i32(0);
8140 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
8141 tcg_temp_free_i32(tmp2
);
8142 tcg_temp_free_ptr(fpstatus
);
8145 case NEON_2RM_VCLT0_F
:
8147 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
8148 tmp2
= tcg_const_i32(0);
8149 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
8150 tcg_temp_free_i32(tmp2
);
8151 tcg_temp_free_ptr(fpstatus
);
8154 case NEON_2RM_VABS_F
:
8157 case NEON_2RM_VNEG_F
:
8161 tmp2
= neon_load_reg(rd
, pass
);
8162 neon_store_reg(rm
, pass
, tmp2
);
8165 tmp2
= neon_load_reg(rd
, pass
);
8167 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
8168 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
8171 neon_store_reg(rm
, pass
, tmp2
);
8173 case NEON_2RM_VRINTN
:
8174 case NEON_2RM_VRINTA
:
8175 case NEON_2RM_VRINTM
:
8176 case NEON_2RM_VRINTP
:
8177 case NEON_2RM_VRINTZ
:
8180 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
8183 if (op
== NEON_2RM_VRINTZ
) {
8184 rmode
= FPROUNDING_ZERO
;
8186 rmode
= fp_decode_rm
[((op
& 0x6) >> 1) ^ 1];
8189 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
8190 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
8192 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpstatus
);
8193 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
8195 tcg_temp_free_ptr(fpstatus
);
8196 tcg_temp_free_i32(tcg_rmode
);
8199 case NEON_2RM_VRINTX
:
8201 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
8202 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpstatus
);
8203 tcg_temp_free_ptr(fpstatus
);
8206 case NEON_2RM_VCVTAU
:
8207 case NEON_2RM_VCVTAS
:
8208 case NEON_2RM_VCVTNU
:
8209 case NEON_2RM_VCVTNS
:
8210 case NEON_2RM_VCVTPU
:
8211 case NEON_2RM_VCVTPS
:
8212 case NEON_2RM_VCVTMU
:
8213 case NEON_2RM_VCVTMS
:
8215 bool is_signed
= !extract32(insn
, 7, 1);
8216 TCGv_ptr fpst
= get_fpstatus_ptr(1);
8217 TCGv_i32 tcg_rmode
, tcg_shift
;
8218 int rmode
= fp_decode_rm
[extract32(insn
, 8, 2)];
8220 tcg_shift
= tcg_const_i32(0);
8221 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
8222 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
8226 gen_helper_vfp_tosls(cpu_F0s
, cpu_F0s
,
8229 gen_helper_vfp_touls(cpu_F0s
, cpu_F0s
,
8233 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
8235 tcg_temp_free_i32(tcg_rmode
);
8236 tcg_temp_free_i32(tcg_shift
);
8237 tcg_temp_free_ptr(fpst
);
8240 case NEON_2RM_VRECPE
:
8242 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
8243 gen_helper_recpe_u32(tmp
, tmp
, fpstatus
);
8244 tcg_temp_free_ptr(fpstatus
);
8247 case NEON_2RM_VRSQRTE
:
8249 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
8250 gen_helper_rsqrte_u32(tmp
, tmp
, fpstatus
);
8251 tcg_temp_free_ptr(fpstatus
);
8254 case NEON_2RM_VRECPE_F
:
8256 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
8257 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
8258 tcg_temp_free_ptr(fpstatus
);
8261 case NEON_2RM_VRSQRTE_F
:
8263 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
8264 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
8265 tcg_temp_free_ptr(fpstatus
);
8268 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
8271 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
8274 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
8275 gen_vfp_tosiz(0, 1);
8277 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
8278 gen_vfp_touiz(0, 1);
8281 /* Reserved op values were caught by the
8282 * neon_2rm_sizes[] check earlier.
8286 if (neon_2rm_is_float_op(op
)) {
8287 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
8288 neon_reg_offset(rd
, pass
));
8290 neon_store_reg(rd
, pass
, tmp
);
8295 } else if ((insn
& (1 << 10)) == 0) {
8297 int n
= ((insn
>> 8) & 3) + 1;
8298 if ((rn
+ n
) > 32) {
8299 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
8300 * helper function running off the end of the register file.
8305 if (insn
& (1 << 6)) {
8306 tmp
= neon_load_reg(rd
, 0);
8308 tmp
= tcg_temp_new_i32();
8309 tcg_gen_movi_i32(tmp
, 0);
8311 tmp2
= neon_load_reg(rm
, 0);
8312 ptr1
= vfp_reg_ptr(true, rn
);
8313 tmp5
= tcg_const_i32(n
);
8314 gen_helper_neon_tbl(tmp2
, tmp2
, tmp
, ptr1
, tmp5
);
8315 tcg_temp_free_i32(tmp
);
8316 if (insn
& (1 << 6)) {
8317 tmp
= neon_load_reg(rd
, 1);
8319 tmp
= tcg_temp_new_i32();
8320 tcg_gen_movi_i32(tmp
, 0);
8322 tmp3
= neon_load_reg(rm
, 1);
8323 gen_helper_neon_tbl(tmp3
, tmp3
, tmp
, ptr1
, tmp5
);
8324 tcg_temp_free_i32(tmp5
);
8325 tcg_temp_free_ptr(ptr1
);
8326 neon_store_reg(rd
, 0, tmp2
);
8327 neon_store_reg(rd
, 1, tmp3
);
8328 tcg_temp_free_i32(tmp
);
8329 } else if ((insn
& 0x380) == 0) {
8334 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
8337 if (insn
& (1 << 16)) {
8339 element
= (insn
>> 17) & 7;
8340 } else if (insn
& (1 << 17)) {
8342 element
= (insn
>> 18) & 3;
8345 element
= (insn
>> 19) & 1;
8347 tcg_gen_gvec_dup_mem(size
, neon_reg_offset(rd
, 0),
8348 neon_element_offset(rm
, element
, size
),
8349 q
? 16 : 8, q
? 16 : 8);
8358 /* Advanced SIMD three registers of the same length extension.
8359 * 31 25 23 22 20 16 12 11 10 9 8 3 0
8360 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8361 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8362 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8364 static int disas_neon_insn_3same_ext(DisasContext
*s
, uint32_t insn
)
8366 gen_helper_gvec_3
*fn_gvec
= NULL
;
8367 gen_helper_gvec_3_ptr
*fn_gvec_ptr
= NULL
;
8368 int rd
, rn
, rm
, opr_sz
;
8372 q
= extract32(insn
, 6, 1);
8373 VFP_DREG_D(rd
, insn
);
8374 VFP_DREG_N(rn
, insn
);
8375 VFP_DREG_M(rm
, insn
);
8376 if ((rd
| rn
| rm
) & q
) {
8380 if ((insn
& 0xfe200f10) == 0xfc200800) {
8381 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
8382 int size
= extract32(insn
, 20, 1);
8383 data
= extract32(insn
, 23, 2); /* rot */
8384 if (!dc_isar_feature(aa32_vcma
, s
)
8385 || (!size
&& !dc_isar_feature(aa32_fp16_arith
, s
))) {
8388 fn_gvec_ptr
= size
? gen_helper_gvec_fcmlas
: gen_helper_gvec_fcmlah
;
8389 } else if ((insn
& 0xfea00f10) == 0xfc800800) {
8390 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
8391 int size
= extract32(insn
, 20, 1);
8392 data
= extract32(insn
, 24, 1); /* rot */
8393 if (!dc_isar_feature(aa32_vcma
, s
)
8394 || (!size
&& !dc_isar_feature(aa32_fp16_arith
, s
))) {
8397 fn_gvec_ptr
= size
? gen_helper_gvec_fcadds
: gen_helper_gvec_fcaddh
;
8398 } else if ((insn
& 0xfeb00f00) == 0xfc200d00) {
8399 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
8400 bool u
= extract32(insn
, 4, 1);
8401 if (!dc_isar_feature(aa32_dp
, s
)) {
8404 fn_gvec
= u
? gen_helper_gvec_udot_b
: gen_helper_gvec_sdot_b
;
8409 if (s
->fp_excp_el
) {
8410 gen_exception_insn(s
, 4, EXCP_UDEF
,
8411 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
8414 if (!s
->vfp_enabled
) {
8418 opr_sz
= (1 + q
) * 8;
8420 TCGv_ptr fpst
= get_fpstatus_ptr(1);
8421 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd
),
8422 vfp_reg_offset(1, rn
),
8423 vfp_reg_offset(1, rm
), fpst
,
8424 opr_sz
, opr_sz
, data
, fn_gvec_ptr
);
8425 tcg_temp_free_ptr(fpst
);
8427 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd
),
8428 vfp_reg_offset(1, rn
),
8429 vfp_reg_offset(1, rm
),
8430 opr_sz
, opr_sz
, data
, fn_gvec
);
8435 /* Advanced SIMD two registers and a scalar extension.
8436 * 31 24 23 22 20 16 12 11 10 9 8 3 0
8437 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8438 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8439 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8443 static int disas_neon_insn_2reg_scalar_ext(DisasContext
*s
, uint32_t insn
)
8445 gen_helper_gvec_3
*fn_gvec
= NULL
;
8446 gen_helper_gvec_3_ptr
*fn_gvec_ptr
= NULL
;
8447 int rd
, rn
, rm
, opr_sz
, data
;
8450 q
= extract32(insn
, 6, 1);
8451 VFP_DREG_D(rd
, insn
);
8452 VFP_DREG_N(rn
, insn
);
8453 if ((rd
| rn
) & q
) {
8457 if ((insn
& 0xff000f10) == 0xfe000800) {
8458 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
8459 int rot
= extract32(insn
, 20, 2);
8460 int size
= extract32(insn
, 23, 1);
8463 if (!dc_isar_feature(aa32_vcma
, s
)) {
8467 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
8470 /* For fp16, rm is just Vm, and index is M. */
8471 rm
= extract32(insn
, 0, 4);
8472 index
= extract32(insn
, 5, 1);
8474 /* For fp32, rm is the usual M:Vm, and index is 0. */
8475 VFP_DREG_M(rm
, insn
);
8478 data
= (index
<< 2) | rot
;
8479 fn_gvec_ptr
= (size
? gen_helper_gvec_fcmlas_idx
8480 : gen_helper_gvec_fcmlah_idx
);
8481 } else if ((insn
& 0xffb00f00) == 0xfe200d00) {
8482 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
8483 int u
= extract32(insn
, 4, 1);
8484 if (!dc_isar_feature(aa32_dp
, s
)) {
8487 fn_gvec
= u
? gen_helper_gvec_udot_idx_b
: gen_helper_gvec_sdot_idx_b
;
8488 /* rm is just Vm, and index is M. */
8489 data
= extract32(insn
, 5, 1); /* index */
8490 rm
= extract32(insn
, 0, 4);
8495 if (s
->fp_excp_el
) {
8496 gen_exception_insn(s
, 4, EXCP_UDEF
,
8497 syn_simd_access_trap(1, 0xe, false), s
->fp_excp_el
);
8500 if (!s
->vfp_enabled
) {
8504 opr_sz
= (1 + q
) * 8;
8506 TCGv_ptr fpst
= get_fpstatus_ptr(1);
8507 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd
),
8508 vfp_reg_offset(1, rn
),
8509 vfp_reg_offset(1, rm
), fpst
,
8510 opr_sz
, opr_sz
, data
, fn_gvec_ptr
);
8511 tcg_temp_free_ptr(fpst
);
8513 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd
),
8514 vfp_reg_offset(1, rn
),
8515 vfp_reg_offset(1, rm
),
8516 opr_sz
, opr_sz
, data
, fn_gvec
);
8521 static int disas_coproc_insn(DisasContext
*s
, uint32_t insn
)
8523 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
8524 const ARMCPRegInfo
*ri
;
8526 cpnum
= (insn
>> 8) & 0xf;
8528 /* First check for coprocessor space used for XScale/iwMMXt insns */
8529 if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && (cpnum
< 2)) {
8530 if (extract32(s
->c15_cpar
, cpnum
, 1) == 0) {
8533 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
8534 return disas_iwmmxt_insn(s
, insn
);
8535 } else if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
)) {
8536 return disas_dsp_insn(s
, insn
);
8541 /* Otherwise treat as a generic register access */
8542 is64
= (insn
& (1 << 25)) == 0;
8543 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
8551 opc1
= (insn
>> 4) & 0xf;
8553 rt2
= (insn
>> 16) & 0xf;
8555 crn
= (insn
>> 16) & 0xf;
8556 opc1
= (insn
>> 21) & 7;
8557 opc2
= (insn
>> 5) & 7;
8560 isread
= (insn
>> 20) & 1;
8561 rt
= (insn
>> 12) & 0xf;
8563 ri
= get_arm_cp_reginfo(s
->cp_regs
,
8564 ENCODE_CP_REG(cpnum
, is64
, s
->ns
, crn
, crm
, opc1
, opc2
));
8566 /* Check access permissions */
8567 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
8572 (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && cpnum
< 14)) {
8573 /* Emit code to perform further access permissions checks at
8574 * runtime; this may result in an exception.
8575 * Note that on XScale all cp0..c13 registers do an access check
8576 * call in order to handle c15_cpar.
8579 TCGv_i32 tcg_syn
, tcg_isread
;
8582 /* Note that since we are an implementation which takes an
8583 * exception on a trapped conditional instruction only if the
8584 * instruction passes its condition code check, we can take
8585 * advantage of the clause in the ARM ARM that allows us to set
8586 * the COND field in the instruction to 0xE in all cases.
8587 * We could fish the actual condition out of the insn (ARM)
8588 * or the condexec bits (Thumb) but it isn't necessary.
8593 syndrome
= syn_cp14_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
8596 syndrome
= syn_cp14_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
8602 syndrome
= syn_cp15_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
8605 syndrome
= syn_cp15_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
8610 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8611 * so this can only happen if this is an ARMv7 or earlier CPU,
8612 * in which case the syndrome information won't actually be
8615 assert(!arm_dc_feature(s
, ARM_FEATURE_V8
));
8616 syndrome
= syn_uncategorized();
8620 gen_set_condexec(s
);
8621 gen_set_pc_im(s
, s
->pc
- 4);
8622 tmpptr
= tcg_const_ptr(ri
);
8623 tcg_syn
= tcg_const_i32(syndrome
);
8624 tcg_isread
= tcg_const_i32(isread
);
8625 gen_helper_access_check_cp_reg(cpu_env
, tmpptr
, tcg_syn
,
8627 tcg_temp_free_ptr(tmpptr
);
8628 tcg_temp_free_i32(tcg_syn
);
8629 tcg_temp_free_i32(tcg_isread
);
8632 /* Handle special cases first */
8633 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
8640 gen_set_pc_im(s
, s
->pc
);
8641 s
->base
.is_jmp
= DISAS_WFI
;
8647 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
8656 if (ri
->type
& ARM_CP_CONST
) {
8657 tmp64
= tcg_const_i64(ri
->resetvalue
);
8658 } else if (ri
->readfn
) {
8660 tmp64
= tcg_temp_new_i64();
8661 tmpptr
= tcg_const_ptr(ri
);
8662 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
8663 tcg_temp_free_ptr(tmpptr
);
8665 tmp64
= tcg_temp_new_i64();
8666 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
8668 tmp
= tcg_temp_new_i32();
8669 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
8670 store_reg(s
, rt
, tmp
);
8671 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
8672 tmp
= tcg_temp_new_i32();
8673 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
8674 tcg_temp_free_i64(tmp64
);
8675 store_reg(s
, rt2
, tmp
);
8678 if (ri
->type
& ARM_CP_CONST
) {
8679 tmp
= tcg_const_i32(ri
->resetvalue
);
8680 } else if (ri
->readfn
) {
8682 tmp
= tcg_temp_new_i32();
8683 tmpptr
= tcg_const_ptr(ri
);
8684 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
8685 tcg_temp_free_ptr(tmpptr
);
8687 tmp
= load_cpu_offset(ri
->fieldoffset
);
8690 /* Destination register of r15 for 32 bit loads sets
8691 * the condition codes from the high 4 bits of the value
8694 tcg_temp_free_i32(tmp
);
8696 store_reg(s
, rt
, tmp
);
8701 if (ri
->type
& ARM_CP_CONST
) {
8702 /* If not forbidden by access permissions, treat as WI */
8707 TCGv_i32 tmplo
, tmphi
;
8708 TCGv_i64 tmp64
= tcg_temp_new_i64();
8709 tmplo
= load_reg(s
, rt
);
8710 tmphi
= load_reg(s
, rt2
);
8711 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
8712 tcg_temp_free_i32(tmplo
);
8713 tcg_temp_free_i32(tmphi
);
8715 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
8716 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
8717 tcg_temp_free_ptr(tmpptr
);
8719 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
8721 tcg_temp_free_i64(tmp64
);
8726 tmp
= load_reg(s
, rt
);
8727 tmpptr
= tcg_const_ptr(ri
);
8728 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
8729 tcg_temp_free_ptr(tmpptr
);
8730 tcg_temp_free_i32(tmp
);
8732 TCGv_i32 tmp
= load_reg(s
, rt
);
8733 store_cpu_offset(tmp
, ri
->fieldoffset
);
8738 if ((tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
8739 /* I/O operations must end the TB here (whether read or write) */
8742 } else if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
8743 /* We default to ending the TB on a coprocessor register write,
8744 * but allow this to be suppressed by the register definition
8745 * (usually only necessary to work around guest bugs).
8753 /* Unknown register; this might be a guest error or a QEMU
8754 * unimplemented feature.
8757 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
8758 "64 bit system register cp:%d opc1: %d crm:%d "
8760 isread
? "read" : "write", cpnum
, opc1
, crm
,
8761 s
->ns
? "non-secure" : "secure");
8763 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
8764 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8766 isread
? "read" : "write", cpnum
, opc1
, crn
, crm
, opc2
,
8767 s
->ns
? "non-secure" : "secure");
8774 /* Store a 64-bit value to a register pair. Clobbers val. */
8775 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
8778 tmp
= tcg_temp_new_i32();
8779 tcg_gen_extrl_i64_i32(tmp
, val
);
8780 store_reg(s
, rlow
, tmp
);
8781 tmp
= tcg_temp_new_i32();
8782 tcg_gen_shri_i64(val
, val
, 32);
8783 tcg_gen_extrl_i64_i32(tmp
, val
);
8784 store_reg(s
, rhigh
, tmp
);
8787 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
8788 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
8793 /* Load value and extend to 64 bits. */
8794 tmp
= tcg_temp_new_i64();
8795 tmp2
= load_reg(s
, rlow
);
8796 tcg_gen_extu_i32_i64(tmp
, tmp2
);
8797 tcg_temp_free_i32(tmp2
);
8798 tcg_gen_add_i64(val
, val
, tmp
);
8799 tcg_temp_free_i64(tmp
);
8802 /* load and add a 64-bit value from a register pair. */
8803 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
8809 /* Load 64-bit value rd:rn. */
8810 tmpl
= load_reg(s
, rlow
);
8811 tmph
= load_reg(s
, rhigh
);
8812 tmp
= tcg_temp_new_i64();
8813 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
8814 tcg_temp_free_i32(tmpl
);
8815 tcg_temp_free_i32(tmph
);
8816 tcg_gen_add_i64(val
, val
, tmp
);
8817 tcg_temp_free_i64(tmp
);
8820 /* Set N and Z flags from hi|lo. */
8821 static void gen_logicq_cc(TCGv_i32 lo
, TCGv_i32 hi
)
8823 tcg_gen_mov_i32(cpu_NF
, hi
);
8824 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
8827 /* Load/Store exclusive instructions are implemented by remembering
8828 the value/address loaded, and seeing if these are the same
8829 when the store is performed. This should be sufficient to implement
8830 the architecturally mandated semantics, and avoids having to monitor
8831 regular stores. The compare vs the remembered value is done during
8832 the cmpxchg operation, but we must compare the addresses manually. */
8833 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
8834 TCGv_i32 addr
, int size
)
8836 TCGv_i32 tmp
= tcg_temp_new_i32();
8837 TCGMemOp opc
= size
| MO_ALIGN
| s
->be_data
;
8842 TCGv_i32 tmp2
= tcg_temp_new_i32();
8843 TCGv_i64 t64
= tcg_temp_new_i64();
8845 /* For AArch32, architecturally the 32-bit word at the lowest
8846 * address is always Rt and the one at addr+4 is Rt2, even if
8847 * the CPU is big-endian. That means we don't want to do a
8848 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8849 * for an architecturally 64-bit access, but instead do a
8850 * 64-bit access using MO_BE if appropriate and then split
8852 * This only makes a difference for BE32 user-mode, where
8853 * frob64() must not flip the two halves of the 64-bit data
8854 * but this code must treat BE32 user-mode like BE32 system.
8856 TCGv taddr
= gen_aa32_addr(s
, addr
, opc
);
8858 tcg_gen_qemu_ld_i64(t64
, taddr
, get_mem_index(s
), opc
);
8859 tcg_temp_free(taddr
);
8860 tcg_gen_mov_i64(cpu_exclusive_val
, t64
);
8861 if (s
->be_data
== MO_BE
) {
8862 tcg_gen_extr_i64_i32(tmp2
, tmp
, t64
);
8864 tcg_gen_extr_i64_i32(tmp
, tmp2
, t64
);
8866 tcg_temp_free_i64(t64
);
8868 store_reg(s
, rt2
, tmp2
);
8870 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), opc
);
8871 tcg_gen_extu_i32_i64(cpu_exclusive_val
, tmp
);
8874 store_reg(s
, rt
, tmp
);
8875 tcg_gen_extu_i32_i64(cpu_exclusive_addr
, addr
);
8878 static void gen_clrex(DisasContext
*s
)
8880 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
8883 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
8884 TCGv_i32 addr
, int size
)
8886 TCGv_i32 t0
, t1
, t2
;
8889 TCGLabel
*done_label
;
8890 TCGLabel
*fail_label
;
8891 TCGMemOp opc
= size
| MO_ALIGN
| s
->be_data
;
8893 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8899 fail_label
= gen_new_label();
8900 done_label
= gen_new_label();
8901 extaddr
= tcg_temp_new_i64();
8902 tcg_gen_extu_i32_i64(extaddr
, addr
);
8903 tcg_gen_brcond_i64(TCG_COND_NE
, extaddr
, cpu_exclusive_addr
, fail_label
);
8904 tcg_temp_free_i64(extaddr
);
8906 taddr
= gen_aa32_addr(s
, addr
, opc
);
8907 t0
= tcg_temp_new_i32();
8908 t1
= load_reg(s
, rt
);
8910 TCGv_i64 o64
= tcg_temp_new_i64();
8911 TCGv_i64 n64
= tcg_temp_new_i64();
8913 t2
= load_reg(s
, rt2
);
8914 /* For AArch32, architecturally the 32-bit word at the lowest
8915 * address is always Rt and the one at addr+4 is Rt2, even if
8916 * the CPU is big-endian. Since we're going to treat this as a
8917 * single 64-bit BE store, we need to put the two halves in the
8918 * opposite order for BE to LE, so that they end up in the right
8920 * We don't want gen_aa32_frob64() because that does the wrong
8921 * thing for BE32 usermode.
8923 if (s
->be_data
== MO_BE
) {
8924 tcg_gen_concat_i32_i64(n64
, t2
, t1
);
8926 tcg_gen_concat_i32_i64(n64
, t1
, t2
);
8928 tcg_temp_free_i32(t2
);
8930 tcg_gen_atomic_cmpxchg_i64(o64
, taddr
, cpu_exclusive_val
, n64
,
8931 get_mem_index(s
), opc
);
8932 tcg_temp_free_i64(n64
);
8934 tcg_gen_setcond_i64(TCG_COND_NE
, o64
, o64
, cpu_exclusive_val
);
8935 tcg_gen_extrl_i64_i32(t0
, o64
);
8937 tcg_temp_free_i64(o64
);
8939 t2
= tcg_temp_new_i32();
8940 tcg_gen_extrl_i64_i32(t2
, cpu_exclusive_val
);
8941 tcg_gen_atomic_cmpxchg_i32(t0
, taddr
, t2
, t1
, get_mem_index(s
), opc
);
8942 tcg_gen_setcond_i32(TCG_COND_NE
, t0
, t0
, t2
);
8943 tcg_temp_free_i32(t2
);
8945 tcg_temp_free_i32(t1
);
8946 tcg_temp_free(taddr
);
8947 tcg_gen_mov_i32(cpu_R
[rd
], t0
);
8948 tcg_temp_free_i32(t0
);
8949 tcg_gen_br(done_label
);
8951 gen_set_label(fail_label
);
8952 tcg_gen_movi_i32(cpu_R
[rd
], 1);
8953 gen_set_label(done_label
);
8954 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
8960 * @mode: mode field from insn (which stack to store to)
8961 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8962 * @writeback: true if writeback bit set
8964 * Generate code for the SRS (Store Return State) insn.
8966 static void gen_srs(DisasContext
*s
,
8967 uint32_t mode
, uint32_t amode
, bool writeback
)
8974 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
8975 * and specified mode is monitor mode
8976 * - UNDEFINED in Hyp mode
8977 * - UNPREDICTABLE in User or System mode
8978 * - UNPREDICTABLE if the specified mode is:
8979 * -- not implemented
8980 * -- not a valid mode number
8981 * -- a mode that's at a higher exception level
8982 * -- Monitor, if we are Non-secure
8983 * For the UNPREDICTABLE cases we choose to UNDEF.
8985 if (s
->current_el
== 1 && !s
->ns
&& mode
== ARM_CPU_MODE_MON
) {
8986 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(), 3);
8990 if (s
->current_el
== 0 || s
->current_el
== 2) {
8995 case ARM_CPU_MODE_USR
:
8996 case ARM_CPU_MODE_FIQ
:
8997 case ARM_CPU_MODE_IRQ
:
8998 case ARM_CPU_MODE_SVC
:
8999 case ARM_CPU_MODE_ABT
:
9000 case ARM_CPU_MODE_UND
:
9001 case ARM_CPU_MODE_SYS
:
9003 case ARM_CPU_MODE_HYP
:
9004 if (s
->current_el
== 1 || !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
9008 case ARM_CPU_MODE_MON
:
9009 /* No need to check specifically for "are we non-secure" because
9010 * we've already made EL0 UNDEF and handled the trap for S-EL1;
9011 * so if this isn't EL3 then we must be non-secure.
9013 if (s
->current_el
!= 3) {
9022 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
9023 default_exception_el(s
));
9027 addr
= tcg_temp_new_i32();
9028 tmp
= tcg_const_i32(mode
);
9029 /* get_r13_banked() will raise an exception if called from System mode */
9030 gen_set_condexec(s
);
9031 gen_set_pc_im(s
, s
->pc
- 4);
9032 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
9033 tcg_temp_free_i32(tmp
);
9050 tcg_gen_addi_i32(addr
, addr
, offset
);
9051 tmp
= load_reg(s
, 14);
9052 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9053 tcg_temp_free_i32(tmp
);
9054 tmp
= load_cpu_field(spsr
);
9055 tcg_gen_addi_i32(addr
, addr
, 4);
9056 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9057 tcg_temp_free_i32(tmp
);
9075 tcg_gen_addi_i32(addr
, addr
, offset
);
9076 tmp
= tcg_const_i32(mode
);
9077 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
9078 tcg_temp_free_i32(tmp
);
9080 tcg_temp_free_i32(addr
);
9081 s
->base
.is_jmp
= DISAS_UPDATE
;
9084 /* Generate a label used for skipping this instruction */
9085 static void arm_gen_condlabel(DisasContext
*s
)
9088 s
->condlabel
= gen_new_label();
9093 /* Skip this instruction if the ARM condition is false */
9094 static void arm_skip_unless(DisasContext
*s
, uint32_t cond
)
9096 arm_gen_condlabel(s
);
9097 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
9100 static void disas_arm_insn(DisasContext
*s
, unsigned int insn
)
9102 unsigned int cond
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
9109 /* M variants do not implement ARM mode; this must raise the INVSTATE
9110 * UsageFault exception.
9112 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
9113 gen_exception_insn(s
, 4, EXCP_INVSTATE
, syn_uncategorized(),
9114 default_exception_el(s
));
9119 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
9120 * choose to UNDEF. In ARMv5 and above the space is used
9121 * for miscellaneous unconditional instructions.
9125 /* Unconditional instructions. */
9126 if (((insn
>> 25) & 7) == 1) {
9127 /* NEON Data processing. */
9128 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
9132 if (disas_neon_data_insn(s
, insn
)) {
9137 if ((insn
& 0x0f100000) == 0x04000000) {
9138 /* NEON load/store. */
9139 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
9143 if (disas_neon_ls_insn(s
, insn
)) {
9148 if ((insn
& 0x0f000e10) == 0x0e000a00) {
9150 if (disas_vfp_insn(s
, insn
)) {
9155 if (((insn
& 0x0f30f000) == 0x0510f000) ||
9156 ((insn
& 0x0f30f010) == 0x0710f000)) {
9157 if ((insn
& (1 << 22)) == 0) {
9159 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
9163 /* Otherwise PLD; v5TE+ */
9167 if (((insn
& 0x0f70f000) == 0x0450f000) ||
9168 ((insn
& 0x0f70f010) == 0x0650f000)) {
9170 return; /* PLI; V7 */
9172 if (((insn
& 0x0f700000) == 0x04100000) ||
9173 ((insn
& 0x0f700010) == 0x06100000)) {
9174 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
9177 return; /* v7MP: Unallocated memory hint: must NOP */
9180 if ((insn
& 0x0ffffdff) == 0x01010000) {
9183 if (((insn
>> 9) & 1) != !!(s
->be_data
== MO_BE
)) {
9184 gen_helper_setend(cpu_env
);
9185 s
->base
.is_jmp
= DISAS_UPDATE
;
9188 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
9189 switch ((insn
>> 4) & 0xf) {
9197 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
9200 /* We need to break the TB after this insn to execute
9201 * self-modifying code correctly and also to take
9202 * any pending interrupts immediately.
9204 gen_goto_tb(s
, 0, s
->pc
& ~1);
9209 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
9212 gen_srs(s
, (insn
& 0x1f), (insn
>> 23) & 3, insn
& (1 << 21));
9214 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
9220 rn
= (insn
>> 16) & 0xf;
9221 addr
= load_reg(s
, rn
);
9222 i
= (insn
>> 23) & 3;
9224 case 0: offset
= -4; break; /* DA */
9225 case 1: offset
= 0; break; /* IA */
9226 case 2: offset
= -8; break; /* DB */
9227 case 3: offset
= 4; break; /* IB */
9231 tcg_gen_addi_i32(addr
, addr
, offset
);
9232 /* Load PC into tmp and CPSR into tmp2. */
9233 tmp
= tcg_temp_new_i32();
9234 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9235 tcg_gen_addi_i32(addr
, addr
, 4);
9236 tmp2
= tcg_temp_new_i32();
9237 gen_aa32_ld32u(s
, tmp2
, addr
, get_mem_index(s
));
9238 if (insn
& (1 << 21)) {
9239 /* Base writeback. */
9241 case 0: offset
= -8; break;
9242 case 1: offset
= 4; break;
9243 case 2: offset
= -4; break;
9244 case 3: offset
= 0; break;
9248 tcg_gen_addi_i32(addr
, addr
, offset
);
9249 store_reg(s
, rn
, addr
);
9251 tcg_temp_free_i32(addr
);
9253 gen_rfe(s
, tmp
, tmp2
);
9255 } else if ((insn
& 0x0e000000) == 0x0a000000) {
9256 /* branch link and change to thumb (blx <offset>) */
9259 val
= (uint32_t)s
->pc
;
9260 tmp
= tcg_temp_new_i32();
9261 tcg_gen_movi_i32(tmp
, val
);
9262 store_reg(s
, 14, tmp
);
9263 /* Sign-extend the 24-bit offset */
9264 offset
= (((int32_t)insn
) << 8) >> 8;
9265 /* offset * 4 + bit24 * 2 + (thumb bit) */
9266 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
9267 /* pipeline offset */
9269 /* protected by ARCH(5); above, near the start of uncond block */
9272 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
9273 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
9274 /* iWMMXt register transfer. */
9275 if (extract32(s
->c15_cpar
, 1, 1)) {
9276 if (!disas_iwmmxt_insn(s
, insn
)) {
9281 } else if ((insn
& 0x0e000a00) == 0x0c000800
9282 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
9283 if (disas_neon_insn_3same_ext(s
, insn
)) {
9287 } else if ((insn
& 0x0f000a00) == 0x0e000800
9288 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
9289 if (disas_neon_insn_2reg_scalar_ext(s
, insn
)) {
9293 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
9294 /* Coprocessor double register transfer. */
9296 } else if ((insn
& 0x0f000010) == 0x0e000010) {
9297 /* Additional coprocessor register transfer. */
9298 } else if ((insn
& 0x0ff10020) == 0x01000000) {
9301 /* cps (privileged) */
9305 if (insn
& (1 << 19)) {
9306 if (insn
& (1 << 8))
9308 if (insn
& (1 << 7))
9310 if (insn
& (1 << 6))
9312 if (insn
& (1 << 18))
9315 if (insn
& (1 << 17)) {
9317 val
|= (insn
& 0x1f);
9320 gen_set_psr_im(s
, mask
, 0, val
);
9327 /* if not always execute, we generate a conditional jump to
9329 arm_skip_unless(s
, cond
);
9331 if ((insn
& 0x0f900000) == 0x03000000) {
9332 if ((insn
& (1 << 21)) == 0) {
9334 rd
= (insn
>> 12) & 0xf;
9335 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
9336 if ((insn
& (1 << 22)) == 0) {
9338 tmp
= tcg_temp_new_i32();
9339 tcg_gen_movi_i32(tmp
, val
);
9342 tmp
= load_reg(s
, rd
);
9343 tcg_gen_ext16u_i32(tmp
, tmp
);
9344 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
9346 store_reg(s
, rd
, tmp
);
9348 if (((insn
>> 12) & 0xf) != 0xf)
9350 if (((insn
>> 16) & 0xf) == 0) {
9351 gen_nop_hint(s
, insn
& 0xff);
9353 /* CPSR = immediate */
9355 shift
= ((insn
>> 8) & 0xf) * 2;
9357 val
= (val
>> shift
) | (val
<< (32 - shift
));
9358 i
= ((insn
& (1 << 22)) != 0);
9359 if (gen_set_psr_im(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
),
9365 } else if ((insn
& 0x0f900000) == 0x01000000
9366 && (insn
& 0x00000090) != 0x00000090) {
9367 /* miscellaneous instructions */
9368 op1
= (insn
>> 21) & 3;
9369 sh
= (insn
>> 4) & 0xf;
9372 case 0x0: /* MSR, MRS */
9373 if (insn
& (1 << 9)) {
9374 /* MSR (banked) and MRS (banked) */
9375 int sysm
= extract32(insn
, 16, 4) |
9376 (extract32(insn
, 8, 1) << 4);
9377 int r
= extract32(insn
, 22, 1);
9381 gen_msr_banked(s
, r
, sysm
, rm
);
9384 int rd
= extract32(insn
, 12, 4);
9386 gen_mrs_banked(s
, r
, sysm
, rd
);
9391 /* MSR, MRS (for PSRs) */
9394 tmp
= load_reg(s
, rm
);
9395 i
= ((op1
& 2) != 0);
9396 if (gen_set_psr(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
9400 rd
= (insn
>> 12) & 0xf;
9404 tmp
= load_cpu_field(spsr
);
9406 tmp
= tcg_temp_new_i32();
9407 gen_helper_cpsr_read(tmp
, cpu_env
);
9409 store_reg(s
, rd
, tmp
);
9414 /* branch/exchange thumb (bx). */
9416 tmp
= load_reg(s
, rm
);
9418 } else if (op1
== 3) {
9421 rd
= (insn
>> 12) & 0xf;
9422 tmp
= load_reg(s
, rm
);
9423 tcg_gen_clzi_i32(tmp
, tmp
, 32);
9424 store_reg(s
, rd
, tmp
);
9432 /* Trivial implementation equivalent to bx. */
9433 tmp
= load_reg(s
, rm
);
9444 /* branch link/exchange thumb (blx) */
9445 tmp
= load_reg(s
, rm
);
9446 tmp2
= tcg_temp_new_i32();
9447 tcg_gen_movi_i32(tmp2
, s
->pc
);
9448 store_reg(s
, 14, tmp2
);
9454 uint32_t c
= extract32(insn
, 8, 4);
9456 /* Check this CPU supports ARMv8 CRC instructions.
9457 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
9458 * Bits 8, 10 and 11 should be zero.
9460 if (!dc_isar_feature(aa32_crc32
, s
) || op1
== 0x3 || (c
& 0xd) != 0) {
9464 rn
= extract32(insn
, 16, 4);
9465 rd
= extract32(insn
, 12, 4);
9467 tmp
= load_reg(s
, rn
);
9468 tmp2
= load_reg(s
, rm
);
9470 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
9471 } else if (op1
== 1) {
9472 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
9474 tmp3
= tcg_const_i32(1 << op1
);
9476 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
9478 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
9480 tcg_temp_free_i32(tmp2
);
9481 tcg_temp_free_i32(tmp3
);
9482 store_reg(s
, rd
, tmp
);
9485 case 0x5: /* saturating add/subtract */
9487 rd
= (insn
>> 12) & 0xf;
9488 rn
= (insn
>> 16) & 0xf;
9489 tmp
= load_reg(s
, rm
);
9490 tmp2
= load_reg(s
, rn
);
9492 gen_helper_double_saturate(tmp2
, cpu_env
, tmp2
);
9494 gen_helper_sub_saturate(tmp
, cpu_env
, tmp
, tmp2
);
9496 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
9497 tcg_temp_free_i32(tmp2
);
9498 store_reg(s
, rd
, tmp
);
9500 case 0x6: /* ERET */
9504 if (!arm_dc_feature(s
, ARM_FEATURE_V7VE
)) {
9507 if ((insn
& 0x000fff0f) != 0x0000000e) {
9508 /* UNPREDICTABLE; we choose to UNDEF */
9512 if (s
->current_el
== 2) {
9513 tmp
= load_cpu_field(elr_el
[2]);
9515 tmp
= load_reg(s
, 14);
9517 gen_exception_return(s
, tmp
);
9521 int imm16
= extract32(insn
, 0, 4) | (extract32(insn
, 8, 12) << 4);
9530 gen_exception_bkpt_insn(s
, 4, syn_aa32_bkpt(imm16
, false));
9533 /* Hypervisor call (v7) */
9541 /* Secure monitor call (v6+) */
9549 g_assert_not_reached();
9553 case 0x8: /* signed multiply */
9558 rs
= (insn
>> 8) & 0xf;
9559 rn
= (insn
>> 12) & 0xf;
9560 rd
= (insn
>> 16) & 0xf;
9562 /* (32 * 16) >> 16 */
9563 tmp
= load_reg(s
, rm
);
9564 tmp2
= load_reg(s
, rs
);
9566 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
9569 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9570 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
9571 tmp
= tcg_temp_new_i32();
9572 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
9573 tcg_temp_free_i64(tmp64
);
9574 if ((sh
& 2) == 0) {
9575 tmp2
= load_reg(s
, rn
);
9576 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9577 tcg_temp_free_i32(tmp2
);
9579 store_reg(s
, rd
, tmp
);
9582 tmp
= load_reg(s
, rm
);
9583 tmp2
= load_reg(s
, rs
);
9584 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
9585 tcg_temp_free_i32(tmp2
);
9587 tmp64
= tcg_temp_new_i64();
9588 tcg_gen_ext_i32_i64(tmp64
, tmp
);
9589 tcg_temp_free_i32(tmp
);
9590 gen_addq(s
, tmp64
, rn
, rd
);
9591 gen_storeq_reg(s
, rn
, rd
, tmp64
);
9592 tcg_temp_free_i64(tmp64
);
9595 tmp2
= load_reg(s
, rn
);
9596 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9597 tcg_temp_free_i32(tmp2
);
9599 store_reg(s
, rd
, tmp
);
9606 } else if (((insn
& 0x0e000000) == 0 &&
9607 (insn
& 0x00000090) != 0x90) ||
9608 ((insn
& 0x0e000000) == (1 << 25))) {
9609 int set_cc
, logic_cc
, shiftop
;
9611 op1
= (insn
>> 21) & 0xf;
9612 set_cc
= (insn
>> 20) & 1;
9613 logic_cc
= table_logic_cc
[op1
] & set_cc
;
9615 /* data processing instruction */
9616 if (insn
& (1 << 25)) {
9617 /* immediate operand */
9619 shift
= ((insn
>> 8) & 0xf) * 2;
9621 val
= (val
>> shift
) | (val
<< (32 - shift
));
9623 tmp2
= tcg_temp_new_i32();
9624 tcg_gen_movi_i32(tmp2
, val
);
9625 if (logic_cc
&& shift
) {
9626 gen_set_CF_bit31(tmp2
);
9631 tmp2
= load_reg(s
, rm
);
9632 shiftop
= (insn
>> 5) & 3;
9633 if (!(insn
& (1 << 4))) {
9634 shift
= (insn
>> 7) & 0x1f;
9635 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
9637 rs
= (insn
>> 8) & 0xf;
9638 tmp
= load_reg(s
, rs
);
9639 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
9642 if (op1
!= 0x0f && op1
!= 0x0d) {
9643 rn
= (insn
>> 16) & 0xf;
9644 tmp
= load_reg(s
, rn
);
9648 rd
= (insn
>> 12) & 0xf;
9651 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9655 store_reg_bx(s
, rd
, tmp
);
9658 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
9662 store_reg_bx(s
, rd
, tmp
);
9665 if (set_cc
&& rd
== 15) {
9666 /* SUBS r15, ... is used for exception return. */
9670 gen_sub_CC(tmp
, tmp
, tmp2
);
9671 gen_exception_return(s
, tmp
);
9674 gen_sub_CC(tmp
, tmp
, tmp2
);
9676 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9678 store_reg_bx(s
, rd
, tmp
);
9683 gen_sub_CC(tmp
, tmp2
, tmp
);
9685 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
9687 store_reg_bx(s
, rd
, tmp
);
9691 gen_add_CC(tmp
, tmp
, tmp2
);
9693 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9695 store_reg_bx(s
, rd
, tmp
);
9699 gen_adc_CC(tmp
, tmp
, tmp2
);
9701 gen_add_carry(tmp
, tmp
, tmp2
);
9703 store_reg_bx(s
, rd
, tmp
);
9707 gen_sbc_CC(tmp
, tmp
, tmp2
);
9709 gen_sub_carry(tmp
, tmp
, tmp2
);
9711 store_reg_bx(s
, rd
, tmp
);
9715 gen_sbc_CC(tmp
, tmp2
, tmp
);
9717 gen_sub_carry(tmp
, tmp2
, tmp
);
9719 store_reg_bx(s
, rd
, tmp
);
9723 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
9726 tcg_temp_free_i32(tmp
);
9730 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
9733 tcg_temp_free_i32(tmp
);
9737 gen_sub_CC(tmp
, tmp
, tmp2
);
9739 tcg_temp_free_i32(tmp
);
9743 gen_add_CC(tmp
, tmp
, tmp2
);
9745 tcg_temp_free_i32(tmp
);
9748 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9752 store_reg_bx(s
, rd
, tmp
);
9755 if (logic_cc
&& rd
== 15) {
9756 /* MOVS r15, ... is used for exception return. */
9760 gen_exception_return(s
, tmp2
);
9765 store_reg_bx(s
, rd
, tmp2
);
9769 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
9773 store_reg_bx(s
, rd
, tmp
);
9777 tcg_gen_not_i32(tmp2
, tmp2
);
9781 store_reg_bx(s
, rd
, tmp2
);
9784 if (op1
!= 0x0f && op1
!= 0x0d) {
9785 tcg_temp_free_i32(tmp2
);
9788 /* other instructions */
9789 op1
= (insn
>> 24) & 0xf;
9793 /* multiplies, extra load/stores */
9794 sh
= (insn
>> 5) & 3;
9797 rd
= (insn
>> 16) & 0xf;
9798 rn
= (insn
>> 12) & 0xf;
9799 rs
= (insn
>> 8) & 0xf;
9801 op1
= (insn
>> 20) & 0xf;
9803 case 0: case 1: case 2: case 3: case 6:
9805 tmp
= load_reg(s
, rs
);
9806 tmp2
= load_reg(s
, rm
);
9807 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
9808 tcg_temp_free_i32(tmp2
);
9809 if (insn
& (1 << 22)) {
9810 /* Subtract (mls) */
9812 tmp2
= load_reg(s
, rn
);
9813 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
9814 tcg_temp_free_i32(tmp2
);
9815 } else if (insn
& (1 << 21)) {
9817 tmp2
= load_reg(s
, rn
);
9818 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9819 tcg_temp_free_i32(tmp2
);
9821 if (insn
& (1 << 20))
9823 store_reg(s
, rd
, tmp
);
9826 /* 64 bit mul double accumulate (UMAAL) */
9828 tmp
= load_reg(s
, rs
);
9829 tmp2
= load_reg(s
, rm
);
9830 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
9831 gen_addq_lo(s
, tmp64
, rn
);
9832 gen_addq_lo(s
, tmp64
, rd
);
9833 gen_storeq_reg(s
, rn
, rd
, tmp64
);
9834 tcg_temp_free_i64(tmp64
);
9836 case 8: case 9: case 10: case 11:
9837 case 12: case 13: case 14: case 15:
9838 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
9839 tmp
= load_reg(s
, rs
);
9840 tmp2
= load_reg(s
, rm
);
9841 if (insn
& (1 << 22)) {
9842 tcg_gen_muls2_i32(tmp
, tmp2
, tmp
, tmp2
);
9844 tcg_gen_mulu2_i32(tmp
, tmp2
, tmp
, tmp2
);
9846 if (insn
& (1 << 21)) { /* mult accumulate */
9847 TCGv_i32 al
= load_reg(s
, rn
);
9848 TCGv_i32 ah
= load_reg(s
, rd
);
9849 tcg_gen_add2_i32(tmp
, tmp2
, tmp
, tmp2
, al
, ah
);
9850 tcg_temp_free_i32(al
);
9851 tcg_temp_free_i32(ah
);
9853 if (insn
& (1 << 20)) {
9854 gen_logicq_cc(tmp
, tmp2
);
9856 store_reg(s
, rn
, tmp
);
9857 store_reg(s
, rd
, tmp2
);
9863 rn
= (insn
>> 16) & 0xf;
9864 rd
= (insn
>> 12) & 0xf;
9865 if (insn
& (1 << 23)) {
9866 /* load/store exclusive */
9867 bool is_ld
= extract32(insn
, 20, 1);
9868 bool is_lasr
= !extract32(insn
, 8, 1);
9869 int op2
= (insn
>> 8) & 3;
9870 op1
= (insn
>> 21) & 0x3;
9873 case 0: /* lda/stl */
9879 case 1: /* reserved */
9881 case 2: /* ldaex/stlex */
9884 case 3: /* ldrex/strex */
9893 addr
= tcg_temp_local_new_i32();
9894 load_reg_var(s
, addr
, rn
);
9896 if (is_lasr
&& !is_ld
) {
9897 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
9902 tmp
= tcg_temp_new_i32();
9905 gen_aa32_ld32u_iss(s
, tmp
, addr
,
9910 gen_aa32_ld8u_iss(s
, tmp
, addr
,
9915 gen_aa32_ld16u_iss(s
, tmp
, addr
,
9922 store_reg(s
, rd
, tmp
);
9925 tmp
= load_reg(s
, rm
);
9928 gen_aa32_st32_iss(s
, tmp
, addr
,
9933 gen_aa32_st8_iss(s
, tmp
, addr
,
9938 gen_aa32_st16_iss(s
, tmp
, addr
,
9945 tcg_temp_free_i32(tmp
);
9950 gen_load_exclusive(s
, rd
, 15, addr
, 2);
9952 case 1: /* ldrexd */
9953 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
9955 case 2: /* ldrexb */
9956 gen_load_exclusive(s
, rd
, 15, addr
, 0);
9958 case 3: /* ldrexh */
9959 gen_load_exclusive(s
, rd
, 15, addr
, 1);
9968 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
9970 case 1: /* strexd */
9971 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
9973 case 2: /* strexb */
9974 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
9976 case 3: /* strexh */
9977 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
9983 tcg_temp_free_i32(addr
);
9985 if (is_lasr
&& is_ld
) {
9986 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
9988 } else if ((insn
& 0x00300f00) == 0) {
9989 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
9994 TCGMemOp opc
= s
->be_data
;
9998 if (insn
& (1 << 22)) {
10001 opc
|= MO_UL
| MO_ALIGN
;
10004 addr
= load_reg(s
, rn
);
10005 taddr
= gen_aa32_addr(s
, addr
, opc
);
10006 tcg_temp_free_i32(addr
);
10008 tmp
= load_reg(s
, rm
);
10009 tcg_gen_atomic_xchg_i32(tmp
, taddr
, tmp
,
10010 get_mem_index(s
), opc
);
10011 tcg_temp_free(taddr
);
10012 store_reg(s
, rd
, tmp
);
10018 int address_offset
;
10019 bool load
= insn
& (1 << 20);
10020 bool wbit
= insn
& (1 << 21);
10021 bool pbit
= insn
& (1 << 24);
10022 bool doubleword
= false;
10025 /* Misc load/store */
10026 rn
= (insn
>> 16) & 0xf;
10027 rd
= (insn
>> 12) & 0xf;
10029 /* ISS not valid if writeback */
10030 issinfo
= (pbit
& !wbit
) ? rd
: ISSInvalid
;
10032 if (!load
&& (sh
& 2)) {
10036 /* UNPREDICTABLE; we choose to UNDEF */
10039 load
= (sh
& 1) == 0;
10043 addr
= load_reg(s
, rn
);
10045 gen_add_datah_offset(s
, insn
, 0, addr
);
10047 address_offset
= 0;
10052 tmp
= load_reg(s
, rd
);
10053 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
10054 tcg_temp_free_i32(tmp
);
10055 tcg_gen_addi_i32(addr
, addr
, 4);
10056 tmp
= load_reg(s
, rd
+ 1);
10057 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
10058 tcg_temp_free_i32(tmp
);
10061 tmp
= tcg_temp_new_i32();
10062 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
10063 store_reg(s
, rd
, tmp
);
10064 tcg_gen_addi_i32(addr
, addr
, 4);
10065 tmp
= tcg_temp_new_i32();
10066 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
10069 address_offset
= -4;
10072 tmp
= tcg_temp_new_i32();
10075 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
),
10079 gen_aa32_ld8s_iss(s
, tmp
, addr
, get_mem_index(s
),
10084 gen_aa32_ld16s_iss(s
, tmp
, addr
, get_mem_index(s
),
10090 tmp
= load_reg(s
, rd
);
10091 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
), issinfo
);
10092 tcg_temp_free_i32(tmp
);
10094 /* Perform base writeback before the loaded value to
10095 ensure correct behavior with overlapping index registers.
10096 ldrd with base writeback is undefined if the
10097 destination and index registers overlap. */
10099 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
10100 store_reg(s
, rn
, addr
);
10102 if (address_offset
)
10103 tcg_gen_addi_i32(addr
, addr
, address_offset
);
10104 store_reg(s
, rn
, addr
);
10106 tcg_temp_free_i32(addr
);
10109 /* Complete the load. */
10110 store_reg(s
, rd
, tmp
);
10119 if (insn
& (1 << 4)) {
10121 /* Armv6 Media instructions. */
10123 rn
= (insn
>> 16) & 0xf;
10124 rd
= (insn
>> 12) & 0xf;
10125 rs
= (insn
>> 8) & 0xf;
10126 switch ((insn
>> 23) & 3) {
10127 case 0: /* Parallel add/subtract. */
10128 op1
= (insn
>> 20) & 7;
10129 tmp
= load_reg(s
, rn
);
10130 tmp2
= load_reg(s
, rm
);
10131 sh
= (insn
>> 5) & 7;
10132 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
10134 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
10135 tcg_temp_free_i32(tmp2
);
10136 store_reg(s
, rd
, tmp
);
10139 if ((insn
& 0x00700020) == 0) {
10140 /* Halfword pack. */
10141 tmp
= load_reg(s
, rn
);
10142 tmp2
= load_reg(s
, rm
);
10143 shift
= (insn
>> 7) & 0x1f;
10144 if (insn
& (1 << 6)) {
10148 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
10149 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
10150 tcg_gen_ext16u_i32(tmp2
, tmp2
);
10154 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
10155 tcg_gen_ext16u_i32(tmp
, tmp
);
10156 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
10158 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
10159 tcg_temp_free_i32(tmp2
);
10160 store_reg(s
, rd
, tmp
);
10161 } else if ((insn
& 0x00200020) == 0x00200000) {
10163 tmp
= load_reg(s
, rm
);
10164 shift
= (insn
>> 7) & 0x1f;
10165 if (insn
& (1 << 6)) {
10168 tcg_gen_sari_i32(tmp
, tmp
, shift
);
10170 tcg_gen_shli_i32(tmp
, tmp
, shift
);
10172 sh
= (insn
>> 16) & 0x1f;
10173 tmp2
= tcg_const_i32(sh
);
10174 if (insn
& (1 << 22))
10175 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
10177 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
10178 tcg_temp_free_i32(tmp2
);
10179 store_reg(s
, rd
, tmp
);
10180 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
10182 tmp
= load_reg(s
, rm
);
10183 sh
= (insn
>> 16) & 0x1f;
10184 tmp2
= tcg_const_i32(sh
);
10185 if (insn
& (1 << 22))
10186 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
10188 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
10189 tcg_temp_free_i32(tmp2
);
10190 store_reg(s
, rd
, tmp
);
10191 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
10192 /* Select bytes. */
10193 tmp
= load_reg(s
, rn
);
10194 tmp2
= load_reg(s
, rm
);
10195 tmp3
= tcg_temp_new_i32();
10196 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
10197 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
10198 tcg_temp_free_i32(tmp3
);
10199 tcg_temp_free_i32(tmp2
);
10200 store_reg(s
, rd
, tmp
);
10201 } else if ((insn
& 0x000003e0) == 0x00000060) {
10202 tmp
= load_reg(s
, rm
);
10203 shift
= (insn
>> 10) & 3;
10204 /* ??? In many cases it's not necessary to do a
10205 rotate, a shift is sufficient. */
10207 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
10208 op1
= (insn
>> 20) & 7;
10210 case 0: gen_sxtb16(tmp
); break;
10211 case 2: gen_sxtb(tmp
); break;
10212 case 3: gen_sxth(tmp
); break;
10213 case 4: gen_uxtb16(tmp
); break;
10214 case 6: gen_uxtb(tmp
); break;
10215 case 7: gen_uxth(tmp
); break;
10216 default: goto illegal_op
;
10219 tmp2
= load_reg(s
, rn
);
10220 if ((op1
& 3) == 0) {
10221 gen_add16(tmp
, tmp2
);
10223 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10224 tcg_temp_free_i32(tmp2
);
10227 store_reg(s
, rd
, tmp
);
10228 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
10230 tmp
= load_reg(s
, rm
);
10231 if (insn
& (1 << 22)) {
10232 if (insn
& (1 << 7)) {
10236 gen_helper_rbit(tmp
, tmp
);
10239 if (insn
& (1 << 7))
10242 tcg_gen_bswap32_i32(tmp
, tmp
);
10244 store_reg(s
, rd
, tmp
);
10249 case 2: /* Multiplies (Type 3). */
10250 switch ((insn
>> 20) & 0x7) {
10252 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
10253 /* op2 not 00x or 11x : UNDEF */
10256 /* Signed multiply most significant [accumulate].
10257 (SMMUL, SMMLA, SMMLS) */
10258 tmp
= load_reg(s
, rm
);
10259 tmp2
= load_reg(s
, rs
);
10260 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
10263 tmp
= load_reg(s
, rd
);
10264 if (insn
& (1 << 6)) {
10265 tmp64
= gen_subq_msw(tmp64
, tmp
);
10267 tmp64
= gen_addq_msw(tmp64
, tmp
);
10270 if (insn
& (1 << 5)) {
10271 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
10273 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
10274 tmp
= tcg_temp_new_i32();
10275 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
10276 tcg_temp_free_i64(tmp64
);
10277 store_reg(s
, rn
, tmp
);
10281 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
10282 if (insn
& (1 << 7)) {
10285 tmp
= load_reg(s
, rm
);
10286 tmp2
= load_reg(s
, rs
);
10287 if (insn
& (1 << 5))
10288 gen_swap_half(tmp2
);
10289 gen_smul_dual(tmp
, tmp2
);
10290 if (insn
& (1 << 22)) {
10291 /* smlald, smlsld */
10294 tmp64
= tcg_temp_new_i64();
10295 tmp64_2
= tcg_temp_new_i64();
10296 tcg_gen_ext_i32_i64(tmp64
, tmp
);
10297 tcg_gen_ext_i32_i64(tmp64_2
, tmp2
);
10298 tcg_temp_free_i32(tmp
);
10299 tcg_temp_free_i32(tmp2
);
10300 if (insn
& (1 << 6)) {
10301 tcg_gen_sub_i64(tmp64
, tmp64
, tmp64_2
);
10303 tcg_gen_add_i64(tmp64
, tmp64
, tmp64_2
);
10305 tcg_temp_free_i64(tmp64_2
);
10306 gen_addq(s
, tmp64
, rd
, rn
);
10307 gen_storeq_reg(s
, rd
, rn
, tmp64
);
10308 tcg_temp_free_i64(tmp64
);
10310 /* smuad, smusd, smlad, smlsd */
10311 if (insn
& (1 << 6)) {
10312 /* This subtraction cannot overflow. */
10313 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10315 /* This addition cannot overflow 32 bits;
10316 * however it may overflow considered as a
10317 * signed operation, in which case we must set
10320 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10322 tcg_temp_free_i32(tmp2
);
10325 tmp2
= load_reg(s
, rd
);
10326 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10327 tcg_temp_free_i32(tmp2
);
10329 store_reg(s
, rn
, tmp
);
10335 if (!dc_isar_feature(arm_div
, s
)) {
10338 if (((insn
>> 5) & 7) || (rd
!= 15)) {
10341 tmp
= load_reg(s
, rm
);
10342 tmp2
= load_reg(s
, rs
);
10343 if (insn
& (1 << 21)) {
10344 gen_helper_udiv(tmp
, tmp
, tmp2
);
10346 gen_helper_sdiv(tmp
, tmp
, tmp2
);
10348 tcg_temp_free_i32(tmp2
);
10349 store_reg(s
, rn
, tmp
);
10356 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
10358 case 0: /* Unsigned sum of absolute differences. */
10360 tmp
= load_reg(s
, rm
);
10361 tmp2
= load_reg(s
, rs
);
10362 gen_helper_usad8(tmp
, tmp
, tmp2
);
10363 tcg_temp_free_i32(tmp2
);
10365 tmp2
= load_reg(s
, rd
);
10366 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10367 tcg_temp_free_i32(tmp2
);
10369 store_reg(s
, rn
, tmp
);
10371 case 0x20: case 0x24: case 0x28: case 0x2c:
10372 /* Bitfield insert/clear. */
10374 shift
= (insn
>> 7) & 0x1f;
10375 i
= (insn
>> 16) & 0x1f;
10377 /* UNPREDICTABLE; we choose to UNDEF */
10382 tmp
= tcg_temp_new_i32();
10383 tcg_gen_movi_i32(tmp
, 0);
10385 tmp
= load_reg(s
, rm
);
10388 tmp2
= load_reg(s
, rd
);
10389 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, i
);
10390 tcg_temp_free_i32(tmp2
);
10392 store_reg(s
, rd
, tmp
);
10394 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
10395 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
10397 tmp
= load_reg(s
, rm
);
10398 shift
= (insn
>> 7) & 0x1f;
10399 i
= ((insn
>> 16) & 0x1f) + 1;
10400 if (shift
+ i
> 32)
10404 tcg_gen_extract_i32(tmp
, tmp
, shift
, i
);
10406 tcg_gen_sextract_i32(tmp
, tmp
, shift
, i
);
10409 store_reg(s
, rd
, tmp
);
10419 /* Check for undefined extension instructions
10420 * per the ARM Bible IE:
10421 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
10423 sh
= (0xf << 20) | (0xf << 4);
10424 if (op1
== 0x7 && ((insn
& sh
) == sh
))
10428 /* load/store byte/word */
10429 rn
= (insn
>> 16) & 0xf;
10430 rd
= (insn
>> 12) & 0xf;
10431 tmp2
= load_reg(s
, rn
);
10432 if ((insn
& 0x01200000) == 0x00200000) {
10434 i
= get_a32_user_mem_index(s
);
10436 i
= get_mem_index(s
);
10438 if (insn
& (1 << 24))
10439 gen_add_data_offset(s
, insn
, tmp2
);
10440 if (insn
& (1 << 20)) {
10442 tmp
= tcg_temp_new_i32();
10443 if (insn
& (1 << 22)) {
10444 gen_aa32_ld8u_iss(s
, tmp
, tmp2
, i
, rd
);
10446 gen_aa32_ld32u_iss(s
, tmp
, tmp2
, i
, rd
);
10450 tmp
= load_reg(s
, rd
);
10451 if (insn
& (1 << 22)) {
10452 gen_aa32_st8_iss(s
, tmp
, tmp2
, i
, rd
);
10454 gen_aa32_st32_iss(s
, tmp
, tmp2
, i
, rd
);
10456 tcg_temp_free_i32(tmp
);
10458 if (!(insn
& (1 << 24))) {
10459 gen_add_data_offset(s
, insn
, tmp2
);
10460 store_reg(s
, rn
, tmp2
);
10461 } else if (insn
& (1 << 21)) {
10462 store_reg(s
, rn
, tmp2
);
10464 tcg_temp_free_i32(tmp2
);
10466 if (insn
& (1 << 20)) {
10467 /* Complete the load. */
10468 store_reg_from_load(s
, rd
, tmp
);
10474 int j
, n
, loaded_base
;
10475 bool exc_return
= false;
10476 bool is_load
= extract32(insn
, 20, 1);
10478 TCGv_i32 loaded_var
;
10479 /* load/store multiple words */
10480 /* XXX: store correct base if write back */
10481 if (insn
& (1 << 22)) {
10482 /* LDM (user), LDM (exception return) and STM (user) */
10484 goto illegal_op
; /* only usable in supervisor mode */
10486 if (is_load
&& extract32(insn
, 15, 1)) {
10492 rn
= (insn
>> 16) & 0xf;
10493 addr
= load_reg(s
, rn
);
10495 /* compute total size */
10499 for(i
=0;i
<16;i
++) {
10500 if (insn
& (1 << i
))
10503 /* XXX: test invalid n == 0 case ? */
10504 if (insn
& (1 << 23)) {
10505 if (insn
& (1 << 24)) {
10506 /* pre increment */
10507 tcg_gen_addi_i32(addr
, addr
, 4);
10509 /* post increment */
10512 if (insn
& (1 << 24)) {
10513 /* pre decrement */
10514 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
10516 /* post decrement */
10518 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
10522 for(i
=0;i
<16;i
++) {
10523 if (insn
& (1 << i
)) {
10526 tmp
= tcg_temp_new_i32();
10527 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
10529 tmp2
= tcg_const_i32(i
);
10530 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
10531 tcg_temp_free_i32(tmp2
);
10532 tcg_temp_free_i32(tmp
);
10533 } else if (i
== rn
) {
10536 } else if (rn
== 15 && exc_return
) {
10537 store_pc_exc_ret(s
, tmp
);
10539 store_reg_from_load(s
, i
, tmp
);
10544 /* special case: r15 = PC + 8 */
10545 val
= (long)s
->pc
+ 4;
10546 tmp
= tcg_temp_new_i32();
10547 tcg_gen_movi_i32(tmp
, val
);
10549 tmp
= tcg_temp_new_i32();
10550 tmp2
= tcg_const_i32(i
);
10551 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
10552 tcg_temp_free_i32(tmp2
);
10554 tmp
= load_reg(s
, i
);
10556 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
10557 tcg_temp_free_i32(tmp
);
10560 /* no need to add after the last transfer */
10562 tcg_gen_addi_i32(addr
, addr
, 4);
10565 if (insn
& (1 << 21)) {
10567 if (insn
& (1 << 23)) {
10568 if (insn
& (1 << 24)) {
10569 /* pre increment */
10571 /* post increment */
10572 tcg_gen_addi_i32(addr
, addr
, 4);
10575 if (insn
& (1 << 24)) {
10576 /* pre decrement */
10578 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
10580 /* post decrement */
10581 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
10584 store_reg(s
, rn
, addr
);
10586 tcg_temp_free_i32(addr
);
10589 store_reg(s
, rn
, loaded_var
);
10592 /* Restore CPSR from SPSR. */
10593 tmp
= load_cpu_field(spsr
);
10594 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
10597 gen_helper_cpsr_write_eret(cpu_env
, tmp
);
10598 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
10601 tcg_temp_free_i32(tmp
);
10602 /* Must exit loop to check un-masked IRQs */
10603 s
->base
.is_jmp
= DISAS_EXIT
;
10612 /* branch (and link) */
10613 val
= (int32_t)s
->pc
;
10614 if (insn
& (1 << 24)) {
10615 tmp
= tcg_temp_new_i32();
10616 tcg_gen_movi_i32(tmp
, val
);
10617 store_reg(s
, 14, tmp
);
10619 offset
= sextract32(insn
<< 2, 0, 26);
10627 if (((insn
>> 8) & 0xe) == 10) {
10629 if (disas_vfp_insn(s
, insn
)) {
10632 } else if (disas_coproc_insn(s
, insn
)) {
10639 gen_set_pc_im(s
, s
->pc
);
10640 s
->svc_imm
= extract32(insn
, 0, 24);
10641 s
->base
.is_jmp
= DISAS_SWI
;
10645 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
10646 default_exception_el(s
));
10652 static bool thumb_insn_is_16bit(DisasContext
*s
, uint32_t insn
)
10654 /* Return true if this is a 16 bit instruction. We must be precise
10655 * about this (matching the decode). We assume that s->pc still
10656 * points to the first 16 bits of the insn.
10658 if ((insn
>> 11) < 0x1d) {
10659 /* Definitely a 16-bit instruction */
10663 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10664 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10665 * end up actually treating this as two 16-bit insns, though,
10666 * if it's half of a bl/blx pair that might span a page boundary.
10668 if (arm_dc_feature(s
, ARM_FEATURE_THUMB2
) ||
10669 arm_dc_feature(s
, ARM_FEATURE_M
)) {
10670 /* Thumb2 cores (including all M profile ones) always treat
10671 * 32-bit insns as 32-bit.
10676 if ((insn
>> 11) == 0x1e && s
->pc
- s
->page_start
< TARGET_PAGE_SIZE
- 3) {
10677 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10678 * is not on the next page; we merge this into a 32-bit
10683 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10684 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10685 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10686 * -- handle as single 16 bit insn
10691 /* Return true if this is a Thumb-2 logical op. */
10693 thumb2_logic_op(int op
)
10698 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10699 then set condition code flags based on the result of the operation.
10700 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10701 to the high bit of T1.
10702 Returns zero if the opcode is valid. */
10705 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
,
10706 TCGv_i32 t0
, TCGv_i32 t1
)
10713 tcg_gen_and_i32(t0
, t0
, t1
);
10717 tcg_gen_andc_i32(t0
, t0
, t1
);
10721 tcg_gen_or_i32(t0
, t0
, t1
);
10725 tcg_gen_orc_i32(t0
, t0
, t1
);
10729 tcg_gen_xor_i32(t0
, t0
, t1
);
10734 gen_add_CC(t0
, t0
, t1
);
10736 tcg_gen_add_i32(t0
, t0
, t1
);
10740 gen_adc_CC(t0
, t0
, t1
);
10746 gen_sbc_CC(t0
, t0
, t1
);
10748 gen_sub_carry(t0
, t0
, t1
);
10753 gen_sub_CC(t0
, t0
, t1
);
10755 tcg_gen_sub_i32(t0
, t0
, t1
);
10759 gen_sub_CC(t0
, t1
, t0
);
10761 tcg_gen_sub_i32(t0
, t1
, t0
);
10763 default: /* 5, 6, 7, 9, 12, 15. */
10769 gen_set_CF_bit31(t1
);
10774 /* Translate a 32-bit thumb instruction. */
10775 static void disas_thumb2_insn(DisasContext
*s
, uint32_t insn
)
10777 uint32_t imm
, shift
, offset
;
10778 uint32_t rd
, rn
, rm
, rs
;
10790 * ARMv6-M supports a limited subset of Thumb2 instructions.
10791 * Other Thumb1 architectures allow only 32-bit
10792 * combined BL/BLX prefix and suffix.
10794 if (arm_dc_feature(s
, ARM_FEATURE_M
) &&
10795 !arm_dc_feature(s
, ARM_FEATURE_V7
)) {
10797 bool found
= false;
10798 static const uint32_t armv6m_insn
[] = {0xf3808000 /* msr */,
10799 0xf3b08040 /* dsb */,
10800 0xf3b08050 /* dmb */,
10801 0xf3b08060 /* isb */,
10802 0xf3e08000 /* mrs */,
10803 0xf000d000 /* bl */};
10804 static const uint32_t armv6m_mask
[] = {0xffe0d000,
10811 for (i
= 0; i
< ARRAY_SIZE(armv6m_insn
); i
++) {
10812 if ((insn
& armv6m_mask
[i
]) == armv6m_insn
[i
]) {
10820 } else if ((insn
& 0xf800e800) != 0xf000e800) {
10824 rn
= (insn
>> 16) & 0xf;
10825 rs
= (insn
>> 12) & 0xf;
10826 rd
= (insn
>> 8) & 0xf;
10828 switch ((insn
>> 25) & 0xf) {
10829 case 0: case 1: case 2: case 3:
10830 /* 16-bit instructions. Should never happen. */
10833 if (insn
& (1 << 22)) {
10834 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10835 * - load/store doubleword, load/store exclusive, ldacq/strel,
10836 * table branch, TT.
10838 if (insn
== 0xe97fe97f && arm_dc_feature(s
, ARM_FEATURE_M
) &&
10839 arm_dc_feature(s
, ARM_FEATURE_V8
)) {
10840 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10842 * The bulk of the behaviour for this instruction is implemented
10843 * in v7m_handle_execute_nsc(), which deals with the insn when
10844 * it is executed by a CPU in non-secure state from memory
10845 * which is Secure & NonSecure-Callable.
10846 * Here we only need to handle the remaining cases:
10847 * * in NS memory (including the "security extension not
10848 * implemented" case) : NOP
10849 * * in S memory but CPU already secure (clear IT bits)
10850 * We know that the attribute for the memory this insn is
10851 * in must match the current CPU state, because otherwise
10852 * get_phys_addr_pmsav8 would have generated an exception.
10854 if (s
->v8m_secure
) {
10855 /* Like the IT insn, we don't need to generate any code */
10856 s
->condexec_cond
= 0;
10857 s
->condexec_mask
= 0;
10859 } else if (insn
& 0x01200000) {
10860 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10861 * - load/store dual (post-indexed)
10862 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10863 * - load/store dual (literal and immediate)
10864 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10865 * - load/store dual (pre-indexed)
10867 bool wback
= extract32(insn
, 21, 1);
10870 if (insn
& (1 << 21)) {
10871 /* UNPREDICTABLE */
10874 addr
= tcg_temp_new_i32();
10875 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
10877 addr
= load_reg(s
, rn
);
10879 offset
= (insn
& 0xff) * 4;
10880 if ((insn
& (1 << 23)) == 0) {
10884 if (s
->v8m_stackcheck
&& rn
== 13 && wback
) {
10886 * Here 'addr' is the current SP; if offset is +ve we're
10887 * moving SP up, else down. It is UNKNOWN whether the limit
10888 * check triggers when SP starts below the limit and ends
10889 * up above it; check whichever of the current and final
10890 * SP is lower, so QEMU will trigger in that situation.
10892 if ((int32_t)offset
< 0) {
10893 TCGv_i32 newsp
= tcg_temp_new_i32();
10895 tcg_gen_addi_i32(newsp
, addr
, offset
);
10896 gen_helper_v8m_stackcheck(cpu_env
, newsp
);
10897 tcg_temp_free_i32(newsp
);
10899 gen_helper_v8m_stackcheck(cpu_env
, addr
);
10903 if (insn
& (1 << 24)) {
10904 tcg_gen_addi_i32(addr
, addr
, offset
);
10907 if (insn
& (1 << 20)) {
10909 tmp
= tcg_temp_new_i32();
10910 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
10911 store_reg(s
, rs
, tmp
);
10912 tcg_gen_addi_i32(addr
, addr
, 4);
10913 tmp
= tcg_temp_new_i32();
10914 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
10915 store_reg(s
, rd
, tmp
);
10918 tmp
= load_reg(s
, rs
);
10919 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
10920 tcg_temp_free_i32(tmp
);
10921 tcg_gen_addi_i32(addr
, addr
, 4);
10922 tmp
= load_reg(s
, rd
);
10923 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
10924 tcg_temp_free_i32(tmp
);
10927 /* Base writeback. */
10928 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
10929 store_reg(s
, rn
, addr
);
10931 tcg_temp_free_i32(addr
);
10933 } else if ((insn
& (1 << 23)) == 0) {
10934 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10935 * - load/store exclusive word
10939 if (!(insn
& (1 << 20)) &&
10940 arm_dc_feature(s
, ARM_FEATURE_M
) &&
10941 arm_dc_feature(s
, ARM_FEATURE_V8
)) {
10942 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10945 bool alt
= insn
& (1 << 7);
10946 TCGv_i32 addr
, op
, ttresp
;
10948 if ((insn
& 0x3f) || rd
== 13 || rd
== 15 || rn
== 15) {
10949 /* we UNDEF for these UNPREDICTABLE cases */
10953 if (alt
&& !s
->v8m_secure
) {
10957 addr
= load_reg(s
, rn
);
10958 op
= tcg_const_i32(extract32(insn
, 6, 2));
10959 ttresp
= tcg_temp_new_i32();
10960 gen_helper_v7m_tt(ttresp
, cpu_env
, addr
, op
);
10961 tcg_temp_free_i32(addr
);
10962 tcg_temp_free_i32(op
);
10963 store_reg(s
, rd
, ttresp
);
10968 addr
= tcg_temp_local_new_i32();
10969 load_reg_var(s
, addr
, rn
);
10970 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
10971 if (insn
& (1 << 20)) {
10972 gen_load_exclusive(s
, rs
, 15, addr
, 2);
10974 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
10976 tcg_temp_free_i32(addr
);
10977 } else if ((insn
& (7 << 5)) == 0) {
10978 /* Table Branch. */
10980 addr
= tcg_temp_new_i32();
10981 tcg_gen_movi_i32(addr
, s
->pc
);
10983 addr
= load_reg(s
, rn
);
10985 tmp
= load_reg(s
, rm
);
10986 tcg_gen_add_i32(addr
, addr
, tmp
);
10987 if (insn
& (1 << 4)) {
10989 tcg_gen_add_i32(addr
, addr
, tmp
);
10990 tcg_temp_free_i32(tmp
);
10991 tmp
= tcg_temp_new_i32();
10992 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
10994 tcg_temp_free_i32(tmp
);
10995 tmp
= tcg_temp_new_i32();
10996 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
10998 tcg_temp_free_i32(addr
);
10999 tcg_gen_shli_i32(tmp
, tmp
, 1);
11000 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
11001 store_reg(s
, 15, tmp
);
11003 bool is_lasr
= false;
11004 bool is_ld
= extract32(insn
, 20, 1);
11005 int op2
= (insn
>> 6) & 0x3;
11006 op
= (insn
>> 4) & 0x3;
11011 /* Load/store exclusive byte/halfword/doubleword */
11018 /* Load-acquire/store-release */
11024 /* Load-acquire/store-release exclusive */
11030 if (is_lasr
&& !is_ld
) {
11031 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_STRL
);
11034 addr
= tcg_temp_local_new_i32();
11035 load_reg_var(s
, addr
, rn
);
11038 tmp
= tcg_temp_new_i32();
11041 gen_aa32_ld8u_iss(s
, tmp
, addr
, get_mem_index(s
),
11045 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
),
11049 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
),
11055 store_reg(s
, rs
, tmp
);
11057 tmp
= load_reg(s
, rs
);
11060 gen_aa32_st8_iss(s
, tmp
, addr
, get_mem_index(s
),
11064 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
),
11068 gen_aa32_st32_iss(s
, tmp
, addr
, get_mem_index(s
),
11074 tcg_temp_free_i32(tmp
);
11076 } else if (is_ld
) {
11077 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
11079 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
11081 tcg_temp_free_i32(addr
);
11083 if (is_lasr
&& is_ld
) {
11084 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_LDAQ
);
11088 /* Load/store multiple, RFE, SRS. */
11089 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
11090 /* RFE, SRS: not available in user mode or on M profile */
11091 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
11094 if (insn
& (1 << 20)) {
11096 addr
= load_reg(s
, rn
);
11097 if ((insn
& (1 << 24)) == 0)
11098 tcg_gen_addi_i32(addr
, addr
, -8);
11099 /* Load PC into tmp and CPSR into tmp2. */
11100 tmp
= tcg_temp_new_i32();
11101 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11102 tcg_gen_addi_i32(addr
, addr
, 4);
11103 tmp2
= tcg_temp_new_i32();
11104 gen_aa32_ld32u(s
, tmp2
, addr
, get_mem_index(s
));
11105 if (insn
& (1 << 21)) {
11106 /* Base writeback. */
11107 if (insn
& (1 << 24)) {
11108 tcg_gen_addi_i32(addr
, addr
, 4);
11110 tcg_gen_addi_i32(addr
, addr
, -4);
11112 store_reg(s
, rn
, addr
);
11114 tcg_temp_free_i32(addr
);
11116 gen_rfe(s
, tmp
, tmp2
);
11119 gen_srs(s
, (insn
& 0x1f), (insn
& (1 << 24)) ? 1 : 2,
11123 int i
, loaded_base
= 0;
11124 TCGv_i32 loaded_var
;
11125 bool wback
= extract32(insn
, 21, 1);
11126 /* Load/store multiple. */
11127 addr
= load_reg(s
, rn
);
11129 for (i
= 0; i
< 16; i
++) {
11130 if (insn
& (1 << i
))
11134 if (insn
& (1 << 24)) {
11135 tcg_gen_addi_i32(addr
, addr
, -offset
);
11138 if (s
->v8m_stackcheck
&& rn
== 13 && wback
) {
11140 * If the writeback is incrementing SP rather than
11141 * decrementing it, and the initial SP is below the
11142 * stack limit but the final written-back SP would
11143 * be above, then then we must not perform any memory
11144 * accesses, but it is IMPDEF whether we generate
11145 * an exception. We choose to do so in this case.
11146 * At this point 'addr' is the lowest address, so
11147 * either the original SP (if incrementing) or our
11148 * final SP (if decrementing), so that's what we check.
11150 gen_helper_v8m_stackcheck(cpu_env
, addr
);
11154 for (i
= 0; i
< 16; i
++) {
11155 if ((insn
& (1 << i
)) == 0)
11157 if (insn
& (1 << 20)) {
11159 tmp
= tcg_temp_new_i32();
11160 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11162 gen_bx_excret(s
, tmp
);
11163 } else if (i
== rn
) {
11167 store_reg(s
, i
, tmp
);
11171 tmp
= load_reg(s
, i
);
11172 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11173 tcg_temp_free_i32(tmp
);
11175 tcg_gen_addi_i32(addr
, addr
, 4);
11178 store_reg(s
, rn
, loaded_var
);
11181 /* Base register writeback. */
11182 if (insn
& (1 << 24)) {
11183 tcg_gen_addi_i32(addr
, addr
, -offset
);
11185 /* Fault if writeback register is in register list. */
11186 if (insn
& (1 << rn
))
11188 store_reg(s
, rn
, addr
);
11190 tcg_temp_free_i32(addr
);
11197 op
= (insn
>> 21) & 0xf;
11199 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
11202 /* Halfword pack. */
11203 tmp
= load_reg(s
, rn
);
11204 tmp2
= load_reg(s
, rm
);
11205 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
11206 if (insn
& (1 << 5)) {
11210 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
11211 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
11212 tcg_gen_ext16u_i32(tmp2
, tmp2
);
11216 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
11217 tcg_gen_ext16u_i32(tmp
, tmp
);
11218 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
11220 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
11221 tcg_temp_free_i32(tmp2
);
11222 store_reg(s
, rd
, tmp
);
11224 /* Data processing register constant shift. */
11226 tmp
= tcg_temp_new_i32();
11227 tcg_gen_movi_i32(tmp
, 0);
11229 tmp
= load_reg(s
, rn
);
11231 tmp2
= load_reg(s
, rm
);
11233 shiftop
= (insn
>> 4) & 3;
11234 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
11235 conds
= (insn
& (1 << 20)) != 0;
11236 logic_cc
= (conds
&& thumb2_logic_op(op
));
11237 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
11238 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
11240 tcg_temp_free_i32(tmp2
);
11242 ((op
== 2 && rn
== 15) ||
11243 (op
== 8 && rn
== 13) ||
11244 (op
== 13 && rn
== 13))) {
11245 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
11246 store_sp_checked(s
, tmp
);
11247 } else if (rd
!= 15) {
11248 store_reg(s
, rd
, tmp
);
11250 tcg_temp_free_i32(tmp
);
11254 case 13: /* Misc data processing. */
11255 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
11256 if (op
< 4 && (insn
& 0xf000) != 0xf000)
11259 case 0: /* Register controlled shift. */
11260 tmp
= load_reg(s
, rn
);
11261 tmp2
= load_reg(s
, rm
);
11262 if ((insn
& 0x70) != 0)
11265 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
11266 * - MOV, MOVS (register-shifted register), flagsetting
11268 op
= (insn
>> 21) & 3;
11269 logic_cc
= (insn
& (1 << 20)) != 0;
11270 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
11273 store_reg(s
, rd
, tmp
);
11275 case 1: /* Sign/zero extend. */
11276 op
= (insn
>> 20) & 7;
11278 case 0: /* SXTAH, SXTH */
11279 case 1: /* UXTAH, UXTH */
11280 case 4: /* SXTAB, SXTB */
11281 case 5: /* UXTAB, UXTB */
11283 case 2: /* SXTAB16, SXTB16 */
11284 case 3: /* UXTAB16, UXTB16 */
11285 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
11293 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
11297 tmp
= load_reg(s
, rm
);
11298 shift
= (insn
>> 4) & 3;
11299 /* ??? In many cases it's not necessary to do a
11300 rotate, a shift is sufficient. */
11302 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
11303 op
= (insn
>> 20) & 7;
11305 case 0: gen_sxth(tmp
); break;
11306 case 1: gen_uxth(tmp
); break;
11307 case 2: gen_sxtb16(tmp
); break;
11308 case 3: gen_uxtb16(tmp
); break;
11309 case 4: gen_sxtb(tmp
); break;
11310 case 5: gen_uxtb(tmp
); break;
11312 g_assert_not_reached();
11315 tmp2
= load_reg(s
, rn
);
11316 if ((op
>> 1) == 1) {
11317 gen_add16(tmp
, tmp2
);
11319 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
11320 tcg_temp_free_i32(tmp2
);
11323 store_reg(s
, rd
, tmp
);
11325 case 2: /* SIMD add/subtract. */
11326 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
11329 op
= (insn
>> 20) & 7;
11330 shift
= (insn
>> 4) & 7;
11331 if ((op
& 3) == 3 || (shift
& 3) == 3)
11333 tmp
= load_reg(s
, rn
);
11334 tmp2
= load_reg(s
, rm
);
11335 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
11336 tcg_temp_free_i32(tmp2
);
11337 store_reg(s
, rd
, tmp
);
11339 case 3: /* Other data processing. */
11340 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
11342 /* Saturating add/subtract. */
11343 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
11346 tmp
= load_reg(s
, rn
);
11347 tmp2
= load_reg(s
, rm
);
11349 gen_helper_double_saturate(tmp
, cpu_env
, tmp
);
11351 gen_helper_sub_saturate(tmp
, cpu_env
, tmp2
, tmp
);
11353 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
11354 tcg_temp_free_i32(tmp2
);
11357 case 0x0a: /* rbit */
11358 case 0x08: /* rev */
11359 case 0x09: /* rev16 */
11360 case 0x0b: /* revsh */
11361 case 0x18: /* clz */
11363 case 0x10: /* sel */
11364 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
11368 case 0x20: /* crc32/crc32c */
11374 if (!dc_isar_feature(aa32_crc32
, s
)) {
11381 tmp
= load_reg(s
, rn
);
11383 case 0x0a: /* rbit */
11384 gen_helper_rbit(tmp
, tmp
);
11386 case 0x08: /* rev */
11387 tcg_gen_bswap32_i32(tmp
, tmp
);
11389 case 0x09: /* rev16 */
11392 case 0x0b: /* revsh */
11395 case 0x10: /* sel */
11396 tmp2
= load_reg(s
, rm
);
11397 tmp3
= tcg_temp_new_i32();
11398 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
11399 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
11400 tcg_temp_free_i32(tmp3
);
11401 tcg_temp_free_i32(tmp2
);
11403 case 0x18: /* clz */
11404 tcg_gen_clzi_i32(tmp
, tmp
, 32);
11414 uint32_t sz
= op
& 0x3;
11415 uint32_t c
= op
& 0x8;
11417 tmp2
= load_reg(s
, rm
);
11419 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
11420 } else if (sz
== 1) {
11421 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
11423 tmp3
= tcg_const_i32(1 << sz
);
11425 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
11427 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
11429 tcg_temp_free_i32(tmp2
);
11430 tcg_temp_free_i32(tmp3
);
11434 g_assert_not_reached();
11437 store_reg(s
, rd
, tmp
);
11439 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
11440 switch ((insn
>> 20) & 7) {
11441 case 0: /* 32 x 32 -> 32 */
11442 case 7: /* Unsigned sum of absolute differences. */
11444 case 1: /* 16 x 16 -> 32 */
11445 case 2: /* Dual multiply add. */
11446 case 3: /* 32 * 16 -> 32msb */
11447 case 4: /* Dual multiply subtract. */
11448 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11449 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
11454 op
= (insn
>> 4) & 0xf;
11455 tmp
= load_reg(s
, rn
);
11456 tmp2
= load_reg(s
, rm
);
11457 switch ((insn
>> 20) & 7) {
11458 case 0: /* 32 x 32 -> 32 */
11459 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
11460 tcg_temp_free_i32(tmp2
);
11462 tmp2
= load_reg(s
, rs
);
11464 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
11466 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
11467 tcg_temp_free_i32(tmp2
);
11470 case 1: /* 16 x 16 -> 32 */
11471 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
11472 tcg_temp_free_i32(tmp2
);
11474 tmp2
= load_reg(s
, rs
);
11475 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
11476 tcg_temp_free_i32(tmp2
);
11479 case 2: /* Dual multiply add. */
11480 case 4: /* Dual multiply subtract. */
11482 gen_swap_half(tmp2
);
11483 gen_smul_dual(tmp
, tmp2
);
11484 if (insn
& (1 << 22)) {
11485 /* This subtraction cannot overflow. */
11486 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
11488 /* This addition cannot overflow 32 bits;
11489 * however it may overflow considered as a signed
11490 * operation, in which case we must set the Q flag.
11492 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
11494 tcg_temp_free_i32(tmp2
);
11497 tmp2
= load_reg(s
, rs
);
11498 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
11499 tcg_temp_free_i32(tmp2
);
11502 case 3: /* 32 * 16 -> 32msb */
11504 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
11507 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
11508 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
11509 tmp
= tcg_temp_new_i32();
11510 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
11511 tcg_temp_free_i64(tmp64
);
11514 tmp2
= load_reg(s
, rs
);
11515 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
11516 tcg_temp_free_i32(tmp2
);
11519 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11520 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
11522 tmp
= load_reg(s
, rs
);
11523 if (insn
& (1 << 20)) {
11524 tmp64
= gen_addq_msw(tmp64
, tmp
);
11526 tmp64
= gen_subq_msw(tmp64
, tmp
);
11529 if (insn
& (1 << 4)) {
11530 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
11532 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
11533 tmp
= tcg_temp_new_i32();
11534 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
11535 tcg_temp_free_i64(tmp64
);
11537 case 7: /* Unsigned sum of absolute differences. */
11538 gen_helper_usad8(tmp
, tmp
, tmp2
);
11539 tcg_temp_free_i32(tmp2
);
11541 tmp2
= load_reg(s
, rs
);
11542 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
11543 tcg_temp_free_i32(tmp2
);
11547 store_reg(s
, rd
, tmp
);
11549 case 6: case 7: /* 64-bit multiply, Divide. */
11550 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
11551 tmp
= load_reg(s
, rn
);
11552 tmp2
= load_reg(s
, rm
);
11553 if ((op
& 0x50) == 0x10) {
11555 if (!dc_isar_feature(thumb_div
, s
)) {
11559 gen_helper_udiv(tmp
, tmp
, tmp2
);
11561 gen_helper_sdiv(tmp
, tmp
, tmp2
);
11562 tcg_temp_free_i32(tmp2
);
11563 store_reg(s
, rd
, tmp
);
11564 } else if ((op
& 0xe) == 0xc) {
11565 /* Dual multiply accumulate long. */
11566 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
11567 tcg_temp_free_i32(tmp
);
11568 tcg_temp_free_i32(tmp2
);
11572 gen_swap_half(tmp2
);
11573 gen_smul_dual(tmp
, tmp2
);
11575 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
11577 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
11579 tcg_temp_free_i32(tmp2
);
11581 tmp64
= tcg_temp_new_i64();
11582 tcg_gen_ext_i32_i64(tmp64
, tmp
);
11583 tcg_temp_free_i32(tmp
);
11584 gen_addq(s
, tmp64
, rs
, rd
);
11585 gen_storeq_reg(s
, rs
, rd
, tmp64
);
11586 tcg_temp_free_i64(tmp64
);
11589 /* Unsigned 64-bit multiply */
11590 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
11594 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
11595 tcg_temp_free_i32(tmp2
);
11596 tcg_temp_free_i32(tmp
);
11599 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
11600 tcg_temp_free_i32(tmp2
);
11601 tmp64
= tcg_temp_new_i64();
11602 tcg_gen_ext_i32_i64(tmp64
, tmp
);
11603 tcg_temp_free_i32(tmp
);
11605 /* Signed 64-bit multiply */
11606 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
11611 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
11612 tcg_temp_free_i64(tmp64
);
11615 gen_addq_lo(s
, tmp64
, rs
);
11616 gen_addq_lo(s
, tmp64
, rd
);
11617 } else if (op
& 0x40) {
11618 /* 64-bit accumulate. */
11619 gen_addq(s
, tmp64
, rs
, rd
);
11621 gen_storeq_reg(s
, rs
, rd
, tmp64
);
11622 tcg_temp_free_i64(tmp64
);
11627 case 6: case 7: case 14: case 15:
11629 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
11630 /* We don't currently implement M profile FP support,
11631 * so this entire space should give a NOCP fault, with
11632 * the exception of the v8M VLLDM and VLSTM insns, which
11633 * must be NOPs in Secure state and UNDEF in Nonsecure state.
11635 if (arm_dc_feature(s
, ARM_FEATURE_V8
) &&
11636 (insn
& 0xffa00f00) == 0xec200a00) {
11637 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
11639 * We choose to UNDEF if the RAZ bits are non-zero.
11641 if (!s
->v8m_secure
|| (insn
& 0x0040f0ff)) {
11644 /* Just NOP since FP support is not implemented */
11647 /* All other insns: NOCP */
11648 gen_exception_insn(s
, 4, EXCP_NOCP
, syn_uncategorized(),
11649 default_exception_el(s
));
11652 if ((insn
& 0xfe000a00) == 0xfc000800
11653 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
11654 /* The Thumb2 and ARM encodings are identical. */
11655 if (disas_neon_insn_3same_ext(s
, insn
)) {
11658 } else if ((insn
& 0xff000a00) == 0xfe000800
11659 && arm_dc_feature(s
, ARM_FEATURE_V8
)) {
11660 /* The Thumb2 and ARM encodings are identical. */
11661 if (disas_neon_insn_2reg_scalar_ext(s
, insn
)) {
11664 } else if (((insn
>> 24) & 3) == 3) {
11665 /* Translate into the equivalent ARM encoding. */
11666 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
11667 if (disas_neon_data_insn(s
, insn
)) {
11670 } else if (((insn
>> 8) & 0xe) == 10) {
11671 if (disas_vfp_insn(s
, insn
)) {
11675 if (insn
& (1 << 28))
11677 if (disas_coproc_insn(s
, insn
)) {
11682 case 8: case 9: case 10: case 11:
11683 if (insn
& (1 << 15)) {
11684 /* Branches, misc control. */
11685 if (insn
& 0x5000) {
11686 /* Unconditional branch. */
11687 /* signextend(hw1[10:0]) -> offset[:12]. */
11688 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
11689 /* hw1[10:0] -> offset[11:1]. */
11690 offset
|= (insn
& 0x7ff) << 1;
11691 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
11692 offset[24:22] already have the same value because of the
11693 sign extension above. */
11694 offset
^= ((~insn
) & (1 << 13)) << 10;
11695 offset
^= ((~insn
) & (1 << 11)) << 11;
11697 if (insn
& (1 << 14)) {
11698 /* Branch and link. */
11699 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
11703 if (insn
& (1 << 12)) {
11705 gen_jmp(s
, offset
);
11708 offset
&= ~(uint32_t)2;
11709 /* thumb2 bx, no need to check */
11710 gen_bx_im(s
, offset
);
11712 } else if (((insn
>> 23) & 7) == 7) {
11714 if (insn
& (1 << 13))
11717 if (insn
& (1 << 26)) {
11718 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
11721 if (!(insn
& (1 << 20))) {
11722 /* Hypervisor call (v7) */
11723 int imm16
= extract32(insn
, 16, 4) << 12
11724 | extract32(insn
, 0, 12);
11731 /* Secure monitor call (v6+) */
11739 op
= (insn
>> 20) & 7;
11741 case 0: /* msr cpsr. */
11742 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
11743 tmp
= load_reg(s
, rn
);
11744 /* the constant is the mask and SYSm fields */
11745 addr
= tcg_const_i32(insn
& 0xfff);
11746 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
11747 tcg_temp_free_i32(addr
);
11748 tcg_temp_free_i32(tmp
);
11753 case 1: /* msr spsr. */
11754 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
11758 if (extract32(insn
, 5, 1)) {
11760 int sysm
= extract32(insn
, 8, 4) |
11761 (extract32(insn
, 4, 1) << 4);
11764 gen_msr_banked(s
, r
, sysm
, rm
);
11768 /* MSR (for PSRs) */
11769 tmp
= load_reg(s
, rn
);
11771 msr_mask(s
, (insn
>> 8) & 0xf, op
== 1),
11775 case 2: /* cps, nop-hint. */
11776 if (((insn
>> 8) & 7) == 0) {
11777 gen_nop_hint(s
, insn
& 0xff);
11779 /* Implemented as NOP in user mode. */
11784 if (insn
& (1 << 10)) {
11785 if (insn
& (1 << 7))
11787 if (insn
& (1 << 6))
11789 if (insn
& (1 << 5))
11791 if (insn
& (1 << 9))
11792 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
11794 if (insn
& (1 << 8)) {
11796 imm
|= (insn
& 0x1f);
11799 gen_set_psr_im(s
, offset
, 0, imm
);
11802 case 3: /* Special control operations. */
11803 if (!arm_dc_feature(s
, ARM_FEATURE_V7
) &&
11804 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
11807 op
= (insn
>> 4) & 0xf;
11809 case 2: /* clrex */
11814 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
11817 /* We need to break the TB after this insn
11818 * to execute self-modifying code correctly
11819 * and also to take any pending interrupts
11822 gen_goto_tb(s
, 0, s
->pc
& ~1);
11829 /* Trivial implementation equivalent to bx.
11830 * This instruction doesn't exist at all for M-profile.
11832 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
11835 tmp
= load_reg(s
, rn
);
11838 case 5: /* Exception return. */
11842 if (rn
!= 14 || rd
!= 15) {
11845 if (s
->current_el
== 2) {
11846 /* ERET from Hyp uses ELR_Hyp, not LR */
11850 tmp
= load_cpu_field(elr_el
[2]);
11852 tmp
= load_reg(s
, rn
);
11853 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
11855 gen_exception_return(s
, tmp
);
11858 if (extract32(insn
, 5, 1) &&
11859 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
11861 int sysm
= extract32(insn
, 16, 4) |
11862 (extract32(insn
, 4, 1) << 4);
11864 gen_mrs_banked(s
, 0, sysm
, rd
);
11868 if (extract32(insn
, 16, 4) != 0xf) {
11871 if (!arm_dc_feature(s
, ARM_FEATURE_M
) &&
11872 extract32(insn
, 0, 8) != 0) {
11877 tmp
= tcg_temp_new_i32();
11878 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
11879 addr
= tcg_const_i32(insn
& 0xff);
11880 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
11881 tcg_temp_free_i32(addr
);
11883 gen_helper_cpsr_read(tmp
, cpu_env
);
11885 store_reg(s
, rd
, tmp
);
11888 if (extract32(insn
, 5, 1) &&
11889 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
11891 int sysm
= extract32(insn
, 16, 4) |
11892 (extract32(insn
, 4, 1) << 4);
11894 gen_mrs_banked(s
, 1, sysm
, rd
);
11899 /* Not accessible in user mode. */
11900 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
11904 if (extract32(insn
, 16, 4) != 0xf ||
11905 extract32(insn
, 0, 8) != 0) {
11909 tmp
= load_cpu_field(spsr
);
11910 store_reg(s
, rd
, tmp
);
11915 /* Conditional branch. */
11916 op
= (insn
>> 22) & 0xf;
11917 /* Generate a conditional jump to next instruction. */
11918 arm_skip_unless(s
, op
);
11920 /* offset[11:1] = insn[10:0] */
11921 offset
= (insn
& 0x7ff) << 1;
11922 /* offset[17:12] = insn[21:16]. */
11923 offset
|= (insn
& 0x003f0000) >> 4;
11924 /* offset[31:20] = insn[26]. */
11925 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
11926 /* offset[18] = insn[13]. */
11927 offset
|= (insn
& (1 << 13)) << 5;
11928 /* offset[19] = insn[11]. */
11929 offset
|= (insn
& (1 << 11)) << 8;
11931 /* jump to the offset */
11932 gen_jmp(s
, s
->pc
+ offset
);
11936 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
11937 * - Data-processing (modified immediate, plain binary immediate)
11939 if (insn
& (1 << 25)) {
11941 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
11942 * - Data-processing (plain binary immediate)
11944 if (insn
& (1 << 24)) {
11945 if (insn
& (1 << 20))
11947 /* Bitfield/Saturate. */
11948 op
= (insn
>> 21) & 7;
11950 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
11952 tmp
= tcg_temp_new_i32();
11953 tcg_gen_movi_i32(tmp
, 0);
11955 tmp
= load_reg(s
, rn
);
11958 case 2: /* Signed bitfield extract. */
11960 if (shift
+ imm
> 32)
11963 tcg_gen_sextract_i32(tmp
, tmp
, shift
, imm
);
11966 case 6: /* Unsigned bitfield extract. */
11968 if (shift
+ imm
> 32)
11971 tcg_gen_extract_i32(tmp
, tmp
, shift
, imm
);
11974 case 3: /* Bitfield insert/clear. */
11977 imm
= imm
+ 1 - shift
;
11979 tmp2
= load_reg(s
, rd
);
11980 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, imm
);
11981 tcg_temp_free_i32(tmp2
);
11986 default: /* Saturate. */
11989 tcg_gen_sari_i32(tmp
, tmp
, shift
);
11991 tcg_gen_shli_i32(tmp
, tmp
, shift
);
11993 tmp2
= tcg_const_i32(imm
);
11996 if ((op
& 1) && shift
== 0) {
11997 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
11998 tcg_temp_free_i32(tmp
);
11999 tcg_temp_free_i32(tmp2
);
12002 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
12004 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
12008 if ((op
& 1) && shift
== 0) {
12009 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
12010 tcg_temp_free_i32(tmp
);
12011 tcg_temp_free_i32(tmp2
);
12014 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
12016 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
12019 tcg_temp_free_i32(tmp2
);
12022 store_reg(s
, rd
, tmp
);
12024 imm
= ((insn
& 0x04000000) >> 15)
12025 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
12026 if (insn
& (1 << 22)) {
12027 /* 16-bit immediate. */
12028 imm
|= (insn
>> 4) & 0xf000;
12029 if (insn
& (1 << 23)) {
12031 tmp
= load_reg(s
, rd
);
12032 tcg_gen_ext16u_i32(tmp
, tmp
);
12033 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
12036 tmp
= tcg_temp_new_i32();
12037 tcg_gen_movi_i32(tmp
, imm
);
12039 store_reg(s
, rd
, tmp
);
12041 /* Add/sub 12-bit immediate. */
12043 offset
= s
->pc
& ~(uint32_t)3;
12044 if (insn
& (1 << 23))
12048 tmp
= tcg_temp_new_i32();
12049 tcg_gen_movi_i32(tmp
, offset
);
12050 store_reg(s
, rd
, tmp
);
12052 tmp
= load_reg(s
, rn
);
12053 if (insn
& (1 << 23))
12054 tcg_gen_subi_i32(tmp
, tmp
, imm
);
12056 tcg_gen_addi_i32(tmp
, tmp
, imm
);
12057 if (rn
== 13 && rd
== 13) {
12058 /* ADD SP, SP, imm or SUB SP, SP, imm */
12059 store_sp_checked(s
, tmp
);
12061 store_reg(s
, rd
, tmp
);
12068 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
12069 * - Data-processing (modified immediate)
12071 int shifter_out
= 0;
12072 /* modified 12-bit immediate. */
12073 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
12074 imm
= (insn
& 0xff);
12077 /* Nothing to do. */
12079 case 1: /* 00XY00XY */
12082 case 2: /* XY00XY00 */
12086 case 3: /* XYXYXYXY */
12090 default: /* Rotated constant. */
12091 shift
= (shift
<< 1) | (imm
>> 7);
12093 imm
= imm
<< (32 - shift
);
12097 tmp2
= tcg_temp_new_i32();
12098 tcg_gen_movi_i32(tmp2
, imm
);
12099 rn
= (insn
>> 16) & 0xf;
12101 tmp
= tcg_temp_new_i32();
12102 tcg_gen_movi_i32(tmp
, 0);
12104 tmp
= load_reg(s
, rn
);
12106 op
= (insn
>> 21) & 0xf;
12107 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
12108 shifter_out
, tmp
, tmp2
))
12110 tcg_temp_free_i32(tmp2
);
12111 rd
= (insn
>> 8) & 0xf;
12112 if (rd
== 13 && rn
== 13
12113 && (op
== 8 || op
== 13)) {
12114 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
12115 store_sp_checked(s
, tmp
);
12116 } else if (rd
!= 15) {
12117 store_reg(s
, rd
, tmp
);
12119 tcg_temp_free_i32(tmp
);
12124 case 12: /* Load/store single data item. */
12131 if ((insn
& 0x01100000) == 0x01000000) {
12132 if (disas_neon_ls_insn(s
, insn
)) {
12137 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
12139 if (!(insn
& (1 << 20))) {
12143 /* Byte or halfword load space with dest == r15 : memory hints.
12144 * Catch them early so we don't emit pointless addressing code.
12145 * This space is a mix of:
12146 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
12147 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
12149 * unallocated hints, which must be treated as NOPs
12150 * UNPREDICTABLE space, which we NOP or UNDEF depending on
12151 * which is easiest for the decoding logic
12152 * Some space which must UNDEF
12154 int op1
= (insn
>> 23) & 3;
12155 int op2
= (insn
>> 6) & 0x3f;
12160 /* UNPREDICTABLE, unallocated hint or
12161 * PLD/PLDW/PLI (literal)
12166 return; /* PLD/PLDW/PLI or unallocated hint */
12168 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
12169 return; /* PLD/PLDW/PLI or unallocated hint */
12171 /* UNDEF space, or an UNPREDICTABLE */
12175 memidx
= get_mem_index(s
);
12177 addr
= tcg_temp_new_i32();
12179 /* s->pc has already been incremented by 4. */
12180 imm
= s
->pc
& 0xfffffffc;
12181 if (insn
& (1 << 23))
12182 imm
+= insn
& 0xfff;
12184 imm
-= insn
& 0xfff;
12185 tcg_gen_movi_i32(addr
, imm
);
12187 addr
= load_reg(s
, rn
);
12188 if (insn
& (1 << 23)) {
12189 /* Positive offset. */
12190 imm
= insn
& 0xfff;
12191 tcg_gen_addi_i32(addr
, addr
, imm
);
12194 switch ((insn
>> 8) & 0xf) {
12195 case 0x0: /* Shifted Register. */
12196 shift
= (insn
>> 4) & 0xf;
12198 tcg_temp_free_i32(addr
);
12201 tmp
= load_reg(s
, rm
);
12203 tcg_gen_shli_i32(tmp
, tmp
, shift
);
12204 tcg_gen_add_i32(addr
, addr
, tmp
);
12205 tcg_temp_free_i32(tmp
);
12207 case 0xc: /* Negative offset. */
12208 tcg_gen_addi_i32(addr
, addr
, -imm
);
12210 case 0xe: /* User privilege. */
12211 tcg_gen_addi_i32(addr
, addr
, imm
);
12212 memidx
= get_a32_user_mem_index(s
);
12214 case 0x9: /* Post-decrement. */
12216 /* Fall through. */
12217 case 0xb: /* Post-increment. */
12221 case 0xd: /* Pre-decrement. */
12223 /* Fall through. */
12224 case 0xf: /* Pre-increment. */
12228 tcg_temp_free_i32(addr
);
12234 issinfo
= writeback
? ISSInvalid
: rs
;
12236 if (s
->v8m_stackcheck
&& rn
== 13 && writeback
) {
12238 * Stackcheck. Here we know 'addr' is the current SP;
12239 * if imm is +ve we're moving SP up, else down. It is
12240 * UNKNOWN whether the limit check triggers when SP starts
12241 * below the limit and ends up above it; we chose to do so.
12243 if ((int32_t)imm
< 0) {
12244 TCGv_i32 newsp
= tcg_temp_new_i32();
12246 tcg_gen_addi_i32(newsp
, addr
, imm
);
12247 gen_helper_v8m_stackcheck(cpu_env
, newsp
);
12248 tcg_temp_free_i32(newsp
);
12250 gen_helper_v8m_stackcheck(cpu_env
, addr
);
12254 if (writeback
&& !postinc
) {
12255 tcg_gen_addi_i32(addr
, addr
, imm
);
12258 if (insn
& (1 << 20)) {
12260 tmp
= tcg_temp_new_i32();
12263 gen_aa32_ld8u_iss(s
, tmp
, addr
, memidx
, issinfo
);
12266 gen_aa32_ld8s_iss(s
, tmp
, addr
, memidx
, issinfo
);
12269 gen_aa32_ld16u_iss(s
, tmp
, addr
, memidx
, issinfo
);
12272 gen_aa32_ld16s_iss(s
, tmp
, addr
, memidx
, issinfo
);
12275 gen_aa32_ld32u_iss(s
, tmp
, addr
, memidx
, issinfo
);
12278 tcg_temp_free_i32(tmp
);
12279 tcg_temp_free_i32(addr
);
12283 gen_bx_excret(s
, tmp
);
12285 store_reg(s
, rs
, tmp
);
12289 tmp
= load_reg(s
, rs
);
12292 gen_aa32_st8_iss(s
, tmp
, addr
, memidx
, issinfo
);
12295 gen_aa32_st16_iss(s
, tmp
, addr
, memidx
, issinfo
);
12298 gen_aa32_st32_iss(s
, tmp
, addr
, memidx
, issinfo
);
12301 tcg_temp_free_i32(tmp
);
12302 tcg_temp_free_i32(addr
);
12305 tcg_temp_free_i32(tmp
);
12308 tcg_gen_addi_i32(addr
, addr
, imm
);
12310 store_reg(s
, rn
, addr
);
12312 tcg_temp_free_i32(addr
);
12321 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
12322 default_exception_el(s
));
12325 static void disas_thumb_insn(DisasContext
*s
, uint32_t insn
)
12327 uint32_t val
, op
, rm
, rn
, rd
, shift
, cond
;
12334 switch (insn
>> 12) {
12338 op
= (insn
>> 11) & 3;
12341 * 0b0001_1xxx_xxxx_xxxx
12342 * - Add, subtract (three low registers)
12343 * - Add, subtract (two low registers and immediate)
12345 rn
= (insn
>> 3) & 7;
12346 tmp
= load_reg(s
, rn
);
12347 if (insn
& (1 << 10)) {
12349 tmp2
= tcg_temp_new_i32();
12350 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
12353 rm
= (insn
>> 6) & 7;
12354 tmp2
= load_reg(s
, rm
);
12356 if (insn
& (1 << 9)) {
12357 if (s
->condexec_mask
)
12358 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
12360 gen_sub_CC(tmp
, tmp
, tmp2
);
12362 if (s
->condexec_mask
)
12363 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
12365 gen_add_CC(tmp
, tmp
, tmp2
);
12367 tcg_temp_free_i32(tmp2
);
12368 store_reg(s
, rd
, tmp
);
12370 /* shift immediate */
12371 rm
= (insn
>> 3) & 7;
12372 shift
= (insn
>> 6) & 0x1f;
12373 tmp
= load_reg(s
, rm
);
12374 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
12375 if (!s
->condexec_mask
)
12377 store_reg(s
, rd
, tmp
);
12382 * 0b001x_xxxx_xxxx_xxxx
12383 * - Add, subtract, compare, move (one low register and immediate)
12385 op
= (insn
>> 11) & 3;
12386 rd
= (insn
>> 8) & 0x7;
12387 if (op
== 0) { /* mov */
12388 tmp
= tcg_temp_new_i32();
12389 tcg_gen_movi_i32(tmp
, insn
& 0xff);
12390 if (!s
->condexec_mask
)
12392 store_reg(s
, rd
, tmp
);
12394 tmp
= load_reg(s
, rd
);
12395 tmp2
= tcg_temp_new_i32();
12396 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
12399 gen_sub_CC(tmp
, tmp
, tmp2
);
12400 tcg_temp_free_i32(tmp
);
12401 tcg_temp_free_i32(tmp2
);
12404 if (s
->condexec_mask
)
12405 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
12407 gen_add_CC(tmp
, tmp
, tmp2
);
12408 tcg_temp_free_i32(tmp2
);
12409 store_reg(s
, rd
, tmp
);
12412 if (s
->condexec_mask
)
12413 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
12415 gen_sub_CC(tmp
, tmp
, tmp2
);
12416 tcg_temp_free_i32(tmp2
);
12417 store_reg(s
, rd
, tmp
);
12423 if (insn
& (1 << 11)) {
12424 rd
= (insn
>> 8) & 7;
12425 /* load pc-relative. Bit 1 of PC is ignored. */
12426 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
12427 val
&= ~(uint32_t)2;
12428 addr
= tcg_temp_new_i32();
12429 tcg_gen_movi_i32(addr
, val
);
12430 tmp
= tcg_temp_new_i32();
12431 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
),
12433 tcg_temp_free_i32(addr
);
12434 store_reg(s
, rd
, tmp
);
12437 if (insn
& (1 << 10)) {
12438 /* 0b0100_01xx_xxxx_xxxx
12439 * - data processing extended, branch and exchange
12441 rd
= (insn
& 7) | ((insn
>> 4) & 8);
12442 rm
= (insn
>> 3) & 0xf;
12443 op
= (insn
>> 8) & 3;
12446 tmp
= load_reg(s
, rd
);
12447 tmp2
= load_reg(s
, rm
);
12448 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
12449 tcg_temp_free_i32(tmp2
);
12451 /* ADD SP, SP, reg */
12452 store_sp_checked(s
, tmp
);
12454 store_reg(s
, rd
, tmp
);
12458 tmp
= load_reg(s
, rd
);
12459 tmp2
= load_reg(s
, rm
);
12460 gen_sub_CC(tmp
, tmp
, tmp2
);
12461 tcg_temp_free_i32(tmp2
);
12462 tcg_temp_free_i32(tmp
);
12464 case 2: /* mov/cpy */
12465 tmp
= load_reg(s
, rm
);
12468 store_sp_checked(s
, tmp
);
12470 store_reg(s
, rd
, tmp
);
12475 /* 0b0100_0111_xxxx_xxxx
12476 * - branch [and link] exchange thumb register
12478 bool link
= insn
& (1 << 7);
12487 /* BXNS/BLXNS: only exists for v8M with the
12488 * security extensions, and always UNDEF if NonSecure.
12489 * We don't implement these in the user-only mode
12490 * either (in theory you can use them from Secure User
12491 * mode but they are too tied in to system emulation.)
12493 if (!s
->v8m_secure
|| IS_USER_ONLY
) {
12504 tmp
= load_reg(s
, rm
);
12506 val
= (uint32_t)s
->pc
| 1;
12507 tmp2
= tcg_temp_new_i32();
12508 tcg_gen_movi_i32(tmp2
, val
);
12509 store_reg(s
, 14, tmp2
);
12512 /* Only BX works as exception-return, not BLX */
12513 gen_bx_excret(s
, tmp
);
12522 * 0b0100_00xx_xxxx_xxxx
12523 * - Data-processing (two low registers)
12526 rm
= (insn
>> 3) & 7;
12527 op
= (insn
>> 6) & 0xf;
12528 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
12529 /* the shift/rotate ops want the operands backwards */
12538 if (op
== 9) { /* neg */
12539 tmp
= tcg_temp_new_i32();
12540 tcg_gen_movi_i32(tmp
, 0);
12541 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
12542 tmp
= load_reg(s
, rd
);
12547 tmp2
= load_reg(s
, rm
);
12549 case 0x0: /* and */
12550 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
12551 if (!s
->condexec_mask
)
12554 case 0x1: /* eor */
12555 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
12556 if (!s
->condexec_mask
)
12559 case 0x2: /* lsl */
12560 if (s
->condexec_mask
) {
12561 gen_shl(tmp2
, tmp2
, tmp
);
12563 gen_helper_shl_cc(tmp2
, cpu_env
, tmp2
, tmp
);
12564 gen_logic_CC(tmp2
);
12567 case 0x3: /* lsr */
12568 if (s
->condexec_mask
) {
12569 gen_shr(tmp2
, tmp2
, tmp
);
12571 gen_helper_shr_cc(tmp2
, cpu_env
, tmp2
, tmp
);
12572 gen_logic_CC(tmp2
);
12575 case 0x4: /* asr */
12576 if (s
->condexec_mask
) {
12577 gen_sar(tmp2
, tmp2
, tmp
);
12579 gen_helper_sar_cc(tmp2
, cpu_env
, tmp2
, tmp
);
12580 gen_logic_CC(tmp2
);
12583 case 0x5: /* adc */
12584 if (s
->condexec_mask
) {
12585 gen_adc(tmp
, tmp2
);
12587 gen_adc_CC(tmp
, tmp
, tmp2
);
12590 case 0x6: /* sbc */
12591 if (s
->condexec_mask
) {
12592 gen_sub_carry(tmp
, tmp
, tmp2
);
12594 gen_sbc_CC(tmp
, tmp
, tmp2
);
12597 case 0x7: /* ror */
12598 if (s
->condexec_mask
) {
12599 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
12600 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
12602 gen_helper_ror_cc(tmp2
, cpu_env
, tmp2
, tmp
);
12603 gen_logic_CC(tmp2
);
12606 case 0x8: /* tst */
12607 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
12611 case 0x9: /* neg */
12612 if (s
->condexec_mask
)
12613 tcg_gen_neg_i32(tmp
, tmp2
);
12615 gen_sub_CC(tmp
, tmp
, tmp2
);
12617 case 0xa: /* cmp */
12618 gen_sub_CC(tmp
, tmp
, tmp2
);
12621 case 0xb: /* cmn */
12622 gen_add_CC(tmp
, tmp
, tmp2
);
12625 case 0xc: /* orr */
12626 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
12627 if (!s
->condexec_mask
)
12630 case 0xd: /* mul */
12631 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
12632 if (!s
->condexec_mask
)
12635 case 0xe: /* bic */
12636 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
12637 if (!s
->condexec_mask
)
12640 case 0xf: /* mvn */
12641 tcg_gen_not_i32(tmp2
, tmp2
);
12642 if (!s
->condexec_mask
)
12643 gen_logic_CC(tmp2
);
12650 store_reg(s
, rm
, tmp2
);
12652 tcg_temp_free_i32(tmp
);
12654 store_reg(s
, rd
, tmp
);
12655 tcg_temp_free_i32(tmp2
);
12658 tcg_temp_free_i32(tmp
);
12659 tcg_temp_free_i32(tmp2
);
12664 /* load/store register offset. */
12666 rn
= (insn
>> 3) & 7;
12667 rm
= (insn
>> 6) & 7;
12668 op
= (insn
>> 9) & 7;
12669 addr
= load_reg(s
, rn
);
12670 tmp
= load_reg(s
, rm
);
12671 tcg_gen_add_i32(addr
, addr
, tmp
);
12672 tcg_temp_free_i32(tmp
);
12674 if (op
< 3) { /* store */
12675 tmp
= load_reg(s
, rd
);
12677 tmp
= tcg_temp_new_i32();
12682 gen_aa32_st32_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12685 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12688 gen_aa32_st8_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12690 case 3: /* ldrsb */
12691 gen_aa32_ld8s_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12694 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12697 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12700 gen_aa32_ld8u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12702 case 7: /* ldrsh */
12703 gen_aa32_ld16s_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12706 if (op
>= 3) { /* load */
12707 store_reg(s
, rd
, tmp
);
12709 tcg_temp_free_i32(tmp
);
12711 tcg_temp_free_i32(addr
);
12715 /* load/store word immediate offset */
12717 rn
= (insn
>> 3) & 7;
12718 addr
= load_reg(s
, rn
);
12719 val
= (insn
>> 4) & 0x7c;
12720 tcg_gen_addi_i32(addr
, addr
, val
);
12722 if (insn
& (1 << 11)) {
12724 tmp
= tcg_temp_new_i32();
12725 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
12726 store_reg(s
, rd
, tmp
);
12729 tmp
= load_reg(s
, rd
);
12730 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
12731 tcg_temp_free_i32(tmp
);
12733 tcg_temp_free_i32(addr
);
12737 /* load/store byte immediate offset */
12739 rn
= (insn
>> 3) & 7;
12740 addr
= load_reg(s
, rn
);
12741 val
= (insn
>> 6) & 0x1f;
12742 tcg_gen_addi_i32(addr
, addr
, val
);
12744 if (insn
& (1 << 11)) {
12746 tmp
= tcg_temp_new_i32();
12747 gen_aa32_ld8u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12748 store_reg(s
, rd
, tmp
);
12751 tmp
= load_reg(s
, rd
);
12752 gen_aa32_st8_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12753 tcg_temp_free_i32(tmp
);
12755 tcg_temp_free_i32(addr
);
12759 /* load/store halfword immediate offset */
12761 rn
= (insn
>> 3) & 7;
12762 addr
= load_reg(s
, rn
);
12763 val
= (insn
>> 5) & 0x3e;
12764 tcg_gen_addi_i32(addr
, addr
, val
);
12766 if (insn
& (1 << 11)) {
12768 tmp
= tcg_temp_new_i32();
12769 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12770 store_reg(s
, rd
, tmp
);
12773 tmp
= load_reg(s
, rd
);
12774 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12775 tcg_temp_free_i32(tmp
);
12777 tcg_temp_free_i32(addr
);
12781 /* load/store from stack */
12782 rd
= (insn
>> 8) & 7;
12783 addr
= load_reg(s
, 13);
12784 val
= (insn
& 0xff) * 4;
12785 tcg_gen_addi_i32(addr
, addr
, val
);
12787 if (insn
& (1 << 11)) {
12789 tmp
= tcg_temp_new_i32();
12790 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12791 store_reg(s
, rd
, tmp
);
12794 tmp
= load_reg(s
, rd
);
12795 gen_aa32_st32_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
12796 tcg_temp_free_i32(tmp
);
12798 tcg_temp_free_i32(addr
);
12803 * 0b1010_xxxx_xxxx_xxxx
12804 * - Add PC/SP (immediate)
12806 rd
= (insn
>> 8) & 7;
12807 if (insn
& (1 << 11)) {
12809 tmp
= load_reg(s
, 13);
12811 /* PC. bit 1 is ignored. */
12812 tmp
= tcg_temp_new_i32();
12813 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
12815 val
= (insn
& 0xff) * 4;
12816 tcg_gen_addi_i32(tmp
, tmp
, val
);
12817 store_reg(s
, rd
, tmp
);
12822 op
= (insn
>> 8) & 0xf;
12826 * 0b1011_0000_xxxx_xxxx
12827 * - ADD (SP plus immediate)
12828 * - SUB (SP minus immediate)
12830 tmp
= load_reg(s
, 13);
12831 val
= (insn
& 0x7f) * 4;
12832 if (insn
& (1 << 7))
12833 val
= -(int32_t)val
;
12834 tcg_gen_addi_i32(tmp
, tmp
, val
);
12835 store_sp_checked(s
, tmp
);
12838 case 2: /* sign/zero extend. */
12841 rm
= (insn
>> 3) & 7;
12842 tmp
= load_reg(s
, rm
);
12843 switch ((insn
>> 6) & 3) {
12844 case 0: gen_sxth(tmp
); break;
12845 case 1: gen_sxtb(tmp
); break;
12846 case 2: gen_uxth(tmp
); break;
12847 case 3: gen_uxtb(tmp
); break;
12849 store_reg(s
, rd
, tmp
);
12851 case 4: case 5: case 0xc: case 0xd:
12853 * 0b1011_x10x_xxxx_xxxx
12856 addr
= load_reg(s
, 13);
12857 if (insn
& (1 << 8))
12861 for (i
= 0; i
< 8; i
++) {
12862 if (insn
& (1 << i
))
12865 if ((insn
& (1 << 11)) == 0) {
12866 tcg_gen_addi_i32(addr
, addr
, -offset
);
12869 if (s
->v8m_stackcheck
) {
12871 * Here 'addr' is the lower of "old SP" and "new SP";
12872 * if this is a pop that starts below the limit and ends
12873 * above it, it is UNKNOWN whether the limit check triggers;
12874 * we choose to trigger.
12876 gen_helper_v8m_stackcheck(cpu_env
, addr
);
12879 for (i
= 0; i
< 8; i
++) {
12880 if (insn
& (1 << i
)) {
12881 if (insn
& (1 << 11)) {
12883 tmp
= tcg_temp_new_i32();
12884 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
12885 store_reg(s
, i
, tmp
);
12888 tmp
= load_reg(s
, i
);
12889 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
12890 tcg_temp_free_i32(tmp
);
12892 /* advance to the next address. */
12893 tcg_gen_addi_i32(addr
, addr
, 4);
12897 if (insn
& (1 << 8)) {
12898 if (insn
& (1 << 11)) {
12900 tmp
= tcg_temp_new_i32();
12901 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
12902 /* don't set the pc until the rest of the instruction
12906 tmp
= load_reg(s
, 14);
12907 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
12908 tcg_temp_free_i32(tmp
);
12910 tcg_gen_addi_i32(addr
, addr
, 4);
12912 if ((insn
& (1 << 11)) == 0) {
12913 tcg_gen_addi_i32(addr
, addr
, -offset
);
12915 /* write back the new stack pointer */
12916 store_reg(s
, 13, addr
);
12917 /* set the new PC value */
12918 if ((insn
& 0x0900) == 0x0900) {
12919 store_reg_from_load(s
, 15, tmp
);
12923 case 1: case 3: case 9: case 11: /* czb */
12925 tmp
= load_reg(s
, rm
);
12926 arm_gen_condlabel(s
);
12927 if (insn
& (1 << 11))
12928 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
12930 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
12931 tcg_temp_free_i32(tmp
);
12932 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
12933 val
= (uint32_t)s
->pc
+ 2;
12938 case 15: /* IT, nop-hint. */
12939 if ((insn
& 0xf) == 0) {
12940 gen_nop_hint(s
, (insn
>> 4) & 0xf);
12944 s
->condexec_cond
= (insn
>> 4) & 0xe;
12945 s
->condexec_mask
= insn
& 0x1f;
12946 /* No actual code generated for this insn, just setup state. */
12949 case 0xe: /* bkpt */
12951 int imm8
= extract32(insn
, 0, 8);
12953 gen_exception_bkpt_insn(s
, 2, syn_aa32_bkpt(imm8
, true));
12957 case 0xa: /* rev, and hlt */
12959 int op1
= extract32(insn
, 6, 2);
12963 int imm6
= extract32(insn
, 0, 6);
12969 /* Otherwise this is rev */
12971 rn
= (insn
>> 3) & 0x7;
12973 tmp
= load_reg(s
, rn
);
12975 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
12976 case 1: gen_rev16(tmp
); break;
12977 case 3: gen_revsh(tmp
); break;
12979 g_assert_not_reached();
12981 store_reg(s
, rd
, tmp
);
12986 switch ((insn
>> 5) & 7) {
12990 if (((insn
>> 3) & 1) != !!(s
->be_data
== MO_BE
)) {
12991 gen_helper_setend(cpu_env
);
12992 s
->base
.is_jmp
= DISAS_UPDATE
;
13001 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
13002 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
13005 addr
= tcg_const_i32(19);
13006 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
13007 tcg_temp_free_i32(addr
);
13011 addr
= tcg_const_i32(16);
13012 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
13013 tcg_temp_free_i32(addr
);
13015 tcg_temp_free_i32(tmp
);
13018 if (insn
& (1 << 4)) {
13019 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
13023 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
13038 /* load/store multiple */
13039 TCGv_i32 loaded_var
= NULL
;
13040 rn
= (insn
>> 8) & 0x7;
13041 addr
= load_reg(s
, rn
);
13042 for (i
= 0; i
< 8; i
++) {
13043 if (insn
& (1 << i
)) {
13044 if (insn
& (1 << 11)) {
13046 tmp
= tcg_temp_new_i32();
13047 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
13051 store_reg(s
, i
, tmp
);
13055 tmp
= load_reg(s
, i
);
13056 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
13057 tcg_temp_free_i32(tmp
);
13059 /* advance to the next address */
13060 tcg_gen_addi_i32(addr
, addr
, 4);
13063 if ((insn
& (1 << rn
)) == 0) {
13064 /* base reg not in list: base register writeback */
13065 store_reg(s
, rn
, addr
);
13067 /* base reg in list: if load, complete it now */
13068 if (insn
& (1 << 11)) {
13069 store_reg(s
, rn
, loaded_var
);
13071 tcg_temp_free_i32(addr
);
13076 /* conditional branch or swi */
13077 cond
= (insn
>> 8) & 0xf;
13083 gen_set_pc_im(s
, s
->pc
);
13084 s
->svc_imm
= extract32(insn
, 0, 8);
13085 s
->base
.is_jmp
= DISAS_SWI
;
13088 /* generate a conditional jump to next instruction */
13089 arm_skip_unless(s
, cond
);
13091 /* jump to the offset */
13092 val
= (uint32_t)s
->pc
+ 2;
13093 offset
= ((int32_t)insn
<< 24) >> 24;
13094 val
+= offset
<< 1;
13099 if (insn
& (1 << 11)) {
13100 /* thumb_insn_is_16bit() ensures we can't get here for
13101 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
13102 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
13104 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
13106 offset
= ((insn
& 0x7ff) << 1);
13107 tmp
= load_reg(s
, 14);
13108 tcg_gen_addi_i32(tmp
, tmp
, offset
);
13109 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
13111 tmp2
= tcg_temp_new_i32();
13112 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
13113 store_reg(s
, 14, tmp2
);
13117 /* unconditional branch */
13118 val
= (uint32_t)s
->pc
;
13119 offset
= ((int32_t)insn
<< 21) >> 21;
13120 val
+= (offset
<< 1) + 2;
13125 /* thumb_insn_is_16bit() ensures we can't get here for
13126 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
13128 assert(!arm_dc_feature(s
, ARM_FEATURE_THUMB2
));
13130 if (insn
& (1 << 11)) {
13131 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
13132 offset
= ((insn
& 0x7ff) << 1) | 1;
13133 tmp
= load_reg(s
, 14);
13134 tcg_gen_addi_i32(tmp
, tmp
, offset
);
13136 tmp2
= tcg_temp_new_i32();
13137 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
13138 store_reg(s
, 14, tmp2
);
13141 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
13142 uint32_t uoffset
= ((int32_t)insn
<< 21) >> 9;
13144 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + uoffset
);
13151 gen_exception_insn(s
, 2, EXCP_UDEF
, syn_uncategorized(),
13152 default_exception_el(s
));
13155 static bool insn_crosses_page(CPUARMState
*env
, DisasContext
*s
)
13157 /* Return true if the insn at dc->pc might cross a page boundary.
13158 * (False positives are OK, false negatives are not.)
13159 * We know this is a Thumb insn, and our caller ensures we are
13160 * only called if dc->pc is less than 4 bytes from the page
13161 * boundary, so we cross the page if the first 16 bits indicate
13162 * that this is a 32 bit insn.
13164 uint16_t insn
= arm_lduw_code(env
, s
->pc
, s
->sctlr_b
);
13166 return !thumb_insn_is_16bit(s
, insn
);
13169 static void arm_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
13171 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13172 CPUARMState
*env
= cs
->env_ptr
;
13173 ARMCPU
*cpu
= arm_env_get_cpu(env
);
13174 uint32_t tb_flags
= dc
->base
.tb
->flags
;
13175 uint32_t condexec
, core_mmu_idx
;
13177 dc
->isar
= &cpu
->isar
;
13178 dc
->pc
= dc
->base
.pc_first
;
13182 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
13183 * there is no secure EL1, so we route exceptions to EL3.
13185 dc
->secure_routed_to_el3
= arm_feature(env
, ARM_FEATURE_EL3
) &&
13186 !arm_el_is_aa64(env
, 3);
13187 dc
->thumb
= FIELD_EX32(tb_flags
, TBFLAG_A32
, THUMB
);
13188 dc
->sctlr_b
= FIELD_EX32(tb_flags
, TBFLAG_A32
, SCTLR_B
);
13189 dc
->be_data
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, BE_DATA
) ? MO_BE
: MO_LE
;
13190 condexec
= FIELD_EX32(tb_flags
, TBFLAG_A32
, CONDEXEC
);
13191 dc
->condexec_mask
= (condexec
& 0xf) << 1;
13192 dc
->condexec_cond
= condexec
>> 4;
13193 core_mmu_idx
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, MMUIDX
);
13194 dc
->mmu_idx
= core_to_arm_mmu_idx(env
, core_mmu_idx
);
13195 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
13196 #if !defined(CONFIG_USER_ONLY)
13197 dc
->user
= (dc
->current_el
== 0);
13199 dc
->ns
= FIELD_EX32(tb_flags
, TBFLAG_A32
, NS
);
13200 dc
->fp_excp_el
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, FPEXC_EL
);
13201 dc
->vfp_enabled
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VFPEN
);
13202 dc
->vec_len
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VECLEN
);
13203 dc
->vec_stride
= FIELD_EX32(tb_flags
, TBFLAG_A32
, VECSTRIDE
);
13204 dc
->c15_cpar
= FIELD_EX32(tb_flags
, TBFLAG_A32
, XSCALE_CPAR
);
13205 dc
->v7m_handler_mode
= FIELD_EX32(tb_flags
, TBFLAG_A32
, HANDLER
);
13206 dc
->v8m_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
13207 regime_is_secure(env
, dc
->mmu_idx
);
13208 dc
->v8m_stackcheck
= FIELD_EX32(tb_flags
, TBFLAG_A32
, STACKCHECK
);
13209 dc
->cp_regs
= cpu
->cp_regs
;
13210 dc
->features
= env
->features
;
13212 /* Single step state. The code-generation logic here is:
13214 * generate code with no special handling for single-stepping (except
13215 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
13216 * this happens anyway because those changes are all system register or
13218 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
13219 * emit code for one insn
13220 * emit code to clear PSTATE.SS
13221 * emit code to generate software step exception for completed step
13222 * end TB (as usual for having generated an exception)
13223 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
13224 * emit code to generate a software step exception
13227 dc
->ss_active
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, SS_ACTIVE
);
13228 dc
->pstate_ss
= FIELD_EX32(tb_flags
, TBFLAG_ANY
, PSTATE_SS
);
13229 dc
->is_ldex
= false;
13230 dc
->ss_same_el
= false; /* Can't be true since EL_d must be AArch64 */
13232 dc
->page_start
= dc
->base
.pc_first
& TARGET_PAGE_MASK
;
13234 /* If architectural single step active, limit to 1. */
13235 if (is_singlestepping(dc
)) {
13236 dc
->base
.max_insns
= 1;
13239 /* ARM is a fixed-length ISA. Bound the number of insns to execute
13240 to those left on the page. */
13242 int bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
13243 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
13246 cpu_F0s
= tcg_temp_new_i32();
13247 cpu_F1s
= tcg_temp_new_i32();
13248 cpu_F0d
= tcg_temp_new_i64();
13249 cpu_F1d
= tcg_temp_new_i64();
13252 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
13253 cpu_M0
= tcg_temp_new_i64();
13256 static void arm_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
13258 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13260 /* A note on handling of the condexec (IT) bits:
13262 * We want to avoid the overhead of having to write the updated condexec
13263 * bits back to the CPUARMState for every instruction in an IT block. So:
13264 * (1) if the condexec bits are not already zero then we write
13265 * zero back into the CPUARMState now. This avoids complications trying
13266 * to do it at the end of the block. (For example if we don't do this
13267 * it's hard to identify whether we can safely skip writing condexec
13268 * at the end of the TB, which we definitely want to do for the case
13269 * where a TB doesn't do anything with the IT state at all.)
13270 * (2) if we are going to leave the TB then we call gen_set_condexec()
13271 * which will write the correct value into CPUARMState if zero is wrong.
13272 * This is done both for leaving the TB at the end, and for leaving
13273 * it because of an exception we know will happen, which is done in
13274 * gen_exception_insn(). The latter is necessary because we need to
13275 * leave the TB with the PC/IT state just prior to execution of the
13276 * instruction which caused the exception.
13277 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
13278 * then the CPUARMState will be wrong and we need to reset it.
13279 * This is handled in the same way as restoration of the
13280 * PC in these situations; we save the value of the condexec bits
13281 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
13282 * then uses this to restore them after an exception.
13284 * Note that there are no instructions which can read the condexec
13285 * bits, and none which can write non-static values to them, so
13286 * we don't need to care about whether CPUARMState is correct in the
13290 /* Reset the conditional execution bits immediately. This avoids
13291 complications trying to do it at the end of the block. */
13292 if (dc
->condexec_mask
|| dc
->condexec_cond
) {
13293 TCGv_i32 tmp
= tcg_temp_new_i32();
13294 tcg_gen_movi_i32(tmp
, 0);
13295 store_cpu_field(tmp
, condexec_bits
);
13299 static void arm_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
13301 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13303 tcg_gen_insn_start(dc
->pc
,
13304 (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1),
13306 dc
->insn_start
= tcg_last_op();
13309 static bool arm_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
13310 const CPUBreakpoint
*bp
)
13312 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13314 if (bp
->flags
& BP_CPU
) {
13315 gen_set_condexec(dc
);
13316 gen_set_pc_im(dc
, dc
->pc
);
13317 gen_helper_check_breakpoints(cpu_env
);
13318 /* End the TB early; it's likely not going to be executed */
13319 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
13321 gen_exception_internal_insn(dc
, 0, EXCP_DEBUG
);
13322 /* The address covered by the breakpoint must be
13323 included in [tb->pc, tb->pc + tb->size) in order
13324 to for it to be properly cleared -- thus we
13325 increment the PC here so that the logic setting
13326 tb->size below does the right thing. */
13327 /* TODO: Advance PC by correct instruction length to
13328 * avoid disassembler error messages */
13330 dc
->base
.is_jmp
= DISAS_NORETURN
;
13336 static bool arm_pre_translate_insn(DisasContext
*dc
)
13338 #ifdef CONFIG_USER_ONLY
13339 /* Intercept jump to the magic kernel page. */
13340 if (dc
->pc
>= 0xffff0000) {
13341 /* We always get here via a jump, so know we are not in a
13342 conditional execution block. */
13343 gen_exception_internal(EXCP_KERNEL_TRAP
);
13344 dc
->base
.is_jmp
= DISAS_NORETURN
;
13349 if (dc
->ss_active
&& !dc
->pstate_ss
) {
13350 /* Singlestep state is Active-pending.
13351 * If we're in this state at the start of a TB then either
13352 * a) we just took an exception to an EL which is being debugged
13353 * and this is the first insn in the exception handler
13354 * b) debug exceptions were masked and we just unmasked them
13355 * without changing EL (eg by clearing PSTATE.D)
13356 * In either case we're going to take a swstep exception in the
13357 * "did not step an insn" case, and so the syndrome ISV and EX
13358 * bits should be zero.
13360 assert(dc
->base
.num_insns
== 1);
13361 gen_exception(EXCP_UDEF
, syn_swstep(dc
->ss_same_el
, 0, 0),
13362 default_exception_el(dc
));
13363 dc
->base
.is_jmp
= DISAS_NORETURN
;
13370 static void arm_post_translate_insn(DisasContext
*dc
)
13372 if (dc
->condjmp
&& !dc
->base
.is_jmp
) {
13373 gen_set_label(dc
->condlabel
);
13376 dc
->base
.pc_next
= dc
->pc
;
13377 translator_loop_temp_check(&dc
->base
);
13380 static void arm_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
13382 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13383 CPUARMState
*env
= cpu
->env_ptr
;
13386 if (arm_pre_translate_insn(dc
)) {
13390 insn
= arm_ldl_code(env
, dc
->pc
, dc
->sctlr_b
);
13393 disas_arm_insn(dc
, insn
);
13395 arm_post_translate_insn(dc
);
13397 /* ARM is a fixed-length ISA. We performed the cross-page check
13398 in init_disas_context by adjusting max_insns. */
13401 static bool thumb_insn_is_unconditional(DisasContext
*s
, uint32_t insn
)
13403 /* Return true if this Thumb insn is always unconditional,
13404 * even inside an IT block. This is true of only a very few
13405 * instructions: BKPT, HLT, and SG.
13407 * A larger class of instructions are UNPREDICTABLE if used
13408 * inside an IT block; we do not need to detect those here, because
13409 * what we do by default (perform the cc check and update the IT
13410 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
13411 * choice for those situations.
13413 * insn is either a 16-bit or a 32-bit instruction; the two are
13414 * distinguishable because for the 16-bit case the top 16 bits
13415 * are zeroes, and that isn't a valid 32-bit encoding.
13417 if ((insn
& 0xffffff00) == 0xbe00) {
13422 if ((insn
& 0xffffffc0) == 0xba80 && arm_dc_feature(s
, ARM_FEATURE_V8
) &&
13423 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
13424 /* HLT: v8A only. This is unconditional even when it is going to
13425 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
13426 * For v7 cores this was a plain old undefined encoding and so
13427 * honours its cc check. (We might be using the encoding as
13428 * a semihosting trap, but we don't change the cc check behaviour
13429 * on that account, because a debugger connected to a real v7A
13430 * core and emulating semihosting traps by catching the UNDEF
13431 * exception would also only see cases where the cc check passed.
13432 * No guest code should be trying to do a HLT semihosting trap
13433 * in an IT block anyway.
13438 if (insn
== 0xe97fe97f && arm_dc_feature(s
, ARM_FEATURE_V8
) &&
13439 arm_dc_feature(s
, ARM_FEATURE_M
)) {
13447 static void thumb_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
13449 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13450 CPUARMState
*env
= cpu
->env_ptr
;
13454 if (arm_pre_translate_insn(dc
)) {
13458 insn
= arm_lduw_code(env
, dc
->pc
, dc
->sctlr_b
);
13459 is_16bit
= thumb_insn_is_16bit(dc
, insn
);
13462 uint32_t insn2
= arm_lduw_code(env
, dc
->pc
, dc
->sctlr_b
);
13464 insn
= insn
<< 16 | insn2
;
13469 if (dc
->condexec_mask
&& !thumb_insn_is_unconditional(dc
, insn
)) {
13470 uint32_t cond
= dc
->condexec_cond
;
13472 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
13473 arm_skip_unless(dc
, cond
);
13478 disas_thumb_insn(dc
, insn
);
13480 disas_thumb2_insn(dc
, insn
);
13483 /* Advance the Thumb condexec condition. */
13484 if (dc
->condexec_mask
) {
13485 dc
->condexec_cond
= ((dc
->condexec_cond
& 0xe) |
13486 ((dc
->condexec_mask
>> 4) & 1));
13487 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
13488 if (dc
->condexec_mask
== 0) {
13489 dc
->condexec_cond
= 0;
13493 arm_post_translate_insn(dc
);
13495 /* Thumb is a variable-length ISA. Stop translation when the next insn
13496 * will touch a new page. This ensures that prefetch aborts occur at
13499 * We want to stop the TB if the next insn starts in a new page,
13500 * or if it spans between this page and the next. This means that
13501 * if we're looking at the last halfword in the page we need to
13502 * see if it's a 16-bit Thumb insn (which will fit in this TB)
13503 * or a 32-bit Thumb insn (which won't).
13504 * This is to avoid generating a silly TB with a single 16-bit insn
13505 * in it at the end of this page (which would execute correctly
13506 * but isn't very efficient).
13508 if (dc
->base
.is_jmp
== DISAS_NEXT
13509 && (dc
->pc
- dc
->page_start
>= TARGET_PAGE_SIZE
13510 || (dc
->pc
- dc
->page_start
>= TARGET_PAGE_SIZE
- 3
13511 && insn_crosses_page(env
, dc
)))) {
13512 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
13516 static void arm_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
13518 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13520 if (tb_cflags(dc
->base
.tb
) & CF_LAST_IO
&& dc
->condjmp
) {
13521 /* FIXME: This can theoretically happen with self-modifying code. */
13522 cpu_abort(cpu
, "IO on conditional branch instruction");
13525 /* At this stage dc->condjmp will only be set when the skipped
13526 instruction was a conditional branch or trap, and the PC has
13527 already been written. */
13528 gen_set_condexec(dc
);
13529 if (dc
->base
.is_jmp
== DISAS_BX_EXCRET
) {
13530 /* Exception return branches need some special case code at the
13531 * end of the TB, which is complex enough that it has to
13532 * handle the single-step vs not and the condition-failed
13533 * insn codepath itself.
13535 gen_bx_excret_final_code(dc
);
13536 } else if (unlikely(is_singlestepping(dc
))) {
13537 /* Unconditional and "condition passed" instruction codepath. */
13538 switch (dc
->base
.is_jmp
) {
13540 gen_ss_advance(dc
);
13541 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
13542 default_exception_el(dc
));
13545 gen_ss_advance(dc
);
13546 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
13549 gen_ss_advance(dc
);
13550 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
13553 case DISAS_TOO_MANY
:
13555 gen_set_pc_im(dc
, dc
->pc
);
13558 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
13559 gen_singlestep_exception(dc
);
13561 case DISAS_NORETURN
:
13565 /* While branches must always occur at the end of an IT block,
13566 there are a few other things that can cause us to terminate
13567 the TB in the middle of an IT block:
13568 - Exception generating instructions (bkpt, swi, undefined).
13570 - Hardware watchpoints.
13571 Hardware breakpoints have already been handled and skip this code.
13573 switch(dc
->base
.is_jmp
) {
13575 case DISAS_TOO_MANY
:
13576 gen_goto_tb(dc
, 1, dc
->pc
);
13582 gen_set_pc_im(dc
, dc
->pc
);
13585 /* indicate that the hash table must be used to find the next TB */
13586 tcg_gen_exit_tb(NULL
, 0);
13588 case DISAS_NORETURN
:
13589 /* nothing more to generate */
13593 TCGv_i32 tmp
= tcg_const_i32((dc
->thumb
&&
13594 !(dc
->insn
& (1U << 31))) ? 2 : 4);
13596 gen_helper_wfi(cpu_env
, tmp
);
13597 tcg_temp_free_i32(tmp
);
13598 /* The helper doesn't necessarily throw an exception, but we
13599 * must go back to the main loop to check for interrupts anyway.
13601 tcg_gen_exit_tb(NULL
, 0);
13605 gen_helper_wfe(cpu_env
);
13608 gen_helper_yield(cpu_env
);
13611 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
13612 default_exception_el(dc
));
13615 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
13618 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
13624 /* "Condition failed" instruction codepath for the branch/trap insn */
13625 gen_set_label(dc
->condlabel
);
13626 gen_set_condexec(dc
);
13627 if (unlikely(is_singlestepping(dc
))) {
13628 gen_set_pc_im(dc
, dc
->pc
);
13629 gen_singlestep_exception(dc
);
13631 gen_goto_tb(dc
, 1, dc
->pc
);
13635 /* Functions above can change dc->pc, so re-align db->pc_next */
13636 dc
->base
.pc_next
= dc
->pc
;
13639 static void arm_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
13641 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
13643 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
13644 log_target_disas(cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
13647 static const TranslatorOps arm_translator_ops
= {
13648 .init_disas_context
= arm_tr_init_disas_context
,
13649 .tb_start
= arm_tr_tb_start
,
13650 .insn_start
= arm_tr_insn_start
,
13651 .breakpoint_check
= arm_tr_breakpoint_check
,
13652 .translate_insn
= arm_tr_translate_insn
,
13653 .tb_stop
= arm_tr_tb_stop
,
13654 .disas_log
= arm_tr_disas_log
,
13657 static const TranslatorOps thumb_translator_ops
= {
13658 .init_disas_context
= arm_tr_init_disas_context
,
13659 .tb_start
= arm_tr_tb_start
,
13660 .insn_start
= arm_tr_insn_start
,
13661 .breakpoint_check
= arm_tr_breakpoint_check
,
13662 .translate_insn
= thumb_tr_translate_insn
,
13663 .tb_stop
= arm_tr_tb_stop
,
13664 .disas_log
= arm_tr_disas_log
,
13667 /* generate intermediate code for basic block 'tb'. */
13668 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
)
13671 const TranslatorOps
*ops
= &arm_translator_ops
;
13673 if (FIELD_EX32(tb
->flags
, TBFLAG_A32
, THUMB
)) {
13674 ops
= &thumb_translator_ops
;
13676 #ifdef TARGET_AARCH64
13677 if (FIELD_EX32(tb
->flags
, TBFLAG_ANY
, AARCH64_STATE
)) {
13678 ops
= &aarch64_translator_ops
;
13682 translator_loop(ops
, &dc
.base
, cpu
, tb
);
13685 void arm_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
13688 ARMCPU
*cpu
= ARM_CPU(cs
);
13689 CPUARMState
*env
= &cpu
->env
;
13693 aarch64_cpu_dump_state(cs
, f
, cpu_fprintf
, flags
);
13697 for(i
=0;i
<16;i
++) {
13698 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
13700 cpu_fprintf(f
, "\n");
13702 cpu_fprintf(f
, " ");
13705 if (arm_feature(env
, ARM_FEATURE_M
)) {
13706 uint32_t xpsr
= xpsr_read(env
);
13708 const char *ns_status
= "";
13710 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
13711 ns_status
= env
->v7m
.secure
? "S " : "NS ";
13714 if (xpsr
& XPSR_EXCP
) {
13717 if (env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_NPRIV_MASK
) {
13718 mode
= "unpriv-thread";
13720 mode
= "priv-thread";
13724 cpu_fprintf(f
, "XPSR=%08x %c%c%c%c %c %s%s\n",
13726 xpsr
& XPSR_N
? 'N' : '-',
13727 xpsr
& XPSR_Z
? 'Z' : '-',
13728 xpsr
& XPSR_C
? 'C' : '-',
13729 xpsr
& XPSR_V
? 'V' : '-',
13730 xpsr
& XPSR_T
? 'T' : 'A',
13734 uint32_t psr
= cpsr_read(env
);
13735 const char *ns_status
= "";
13737 if (arm_feature(env
, ARM_FEATURE_EL3
) &&
13738 (psr
& CPSR_M
) != ARM_CPU_MODE_MON
) {
13739 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
13742 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%s%d\n",
13744 psr
& CPSR_N
? 'N' : '-',
13745 psr
& CPSR_Z
? 'Z' : '-',
13746 psr
& CPSR_C
? 'C' : '-',
13747 psr
& CPSR_V
? 'V' : '-',
13748 psr
& CPSR_T
? 'T' : 'A',
13750 aarch32_mode_name(psr
), (psr
& 0x10) ? 32 : 26);
13753 if (flags
& CPU_DUMP_FPU
) {
13754 int numvfpregs
= 0;
13755 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
13758 if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
13761 for (i
= 0; i
< numvfpregs
; i
++) {
13762 uint64_t v
= *aa32_vfp_dreg(env
, i
);
13763 cpu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
13764 i
* 2, (uint32_t)v
,
13765 i
* 2 + 1, (uint32_t)(v
>> 32),
13768 cpu_fprintf(f
, "FPSCR: %08x\n", vfp_get_fpscr(env
));
13772 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
,
13773 target_ulong
*data
)
13777 env
->condexec_bits
= 0;
13778 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;
13780 env
->regs
[15] = data
[0];
13781 env
->condexec_bits
= data
[1];
13782 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;