4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
29 #include "qemu/bitops.h"
31 #include "exec/semihost.h"
33 #include "exec/helper-proto.h"
34 #include "exec/helper-gen.h"
36 #include "trace-tcg.h"
40 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
42 /* currently all emulated v5 cores are also v5TE, so don't bother */
43 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
44 #define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
45 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
51 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
53 #include "translate.h"
55 #if defined(CONFIG_USER_ONLY)
58 #define IS_USER(s) (s->user)
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
64 static TCGv_i32 cpu_R
[16];
65 TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
66 TCGv_i64 cpu_exclusive_addr
;
67 TCGv_i64 cpu_exclusive_val
;
69 /* FIXME: These should be removed. */
70 static TCGv_i32 cpu_F0s
, cpu_F1s
;
71 static TCGv_i64 cpu_F0d
, cpu_F1d
;
73 #include "exec/gen-icount.h"
75 static const char *regnames
[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79 /* initialize TCG globals. */
80 void arm_translate_init(void)
84 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
85 tcg_ctx
.tcg_env
= cpu_env
;
87 for (i
= 0; i
< 16; i
++) {
88 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
89 offsetof(CPUARMState
, regs
[i
]),
92 cpu_CF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, CF
), "CF");
93 cpu_NF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, NF
), "NF");
94 cpu_VF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, VF
), "VF");
95 cpu_ZF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, ZF
), "ZF");
97 cpu_exclusive_addr
= tcg_global_mem_new_i64(cpu_env
,
98 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
99 cpu_exclusive_val
= tcg_global_mem_new_i64(cpu_env
,
100 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
102 a64_translate_init();
105 /* Flags for the disas_set_da_iss info argument:
106 * lower bits hold the Rt register number, higher bits are flags.
108 typedef enum ISSInfo
{
111 ISSInvalid
= (1 << 5),
112 ISSIsAcqRel
= (1 << 6),
113 ISSIsWrite
= (1 << 7),
114 ISSIs16Bit
= (1 << 8),
117 /* Save the syndrome information for a Data Abort */
118 static void disas_set_da_iss(DisasContext
*s
, TCGMemOp memop
, ISSInfo issinfo
)
121 int sas
= memop
& MO_SIZE
;
122 bool sse
= memop
& MO_SIGN
;
123 bool is_acqrel
= issinfo
& ISSIsAcqRel
;
124 bool is_write
= issinfo
& ISSIsWrite
;
125 bool is_16bit
= issinfo
& ISSIs16Bit
;
126 int srt
= issinfo
& ISSRegMask
;
128 if (issinfo
& ISSInvalid
) {
129 /* Some callsites want to conditionally provide ISS info,
130 * eg "only if this was not a writeback"
136 /* For AArch32, insns where the src/dest is R15 never generate
137 * ISS information. Catching that here saves checking at all
143 syn
= syn_data_abort_with_iss(0, sas
, sse
, srt
, 0, is_acqrel
,
144 0, 0, 0, is_write
, 0, is_16bit
);
145 disas_set_insn_syndrome(s
, syn
);
148 static inline int get_a32_user_mem_index(DisasContext
*s
)
150 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
152 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
153 * otherwise, access as if at PL0.
155 switch (s
->mmu_idx
) {
156 case ARMMMUIdx_S1E2
: /* this one is UNPREDICTABLE */
157 case ARMMMUIdx_S12NSE0
:
158 case ARMMMUIdx_S12NSE1
:
159 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0
);
161 case ARMMMUIdx_S1SE0
:
162 case ARMMMUIdx_S1SE1
:
163 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0
);
164 case ARMMMUIdx_MUser
:
165 case ARMMMUIdx_MPriv
:
166 case ARMMMUIdx_MNegPri
:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser
);
170 g_assert_not_reached();
174 static inline TCGv_i32
load_cpu_offset(int offset
)
176 TCGv_i32 tmp
= tcg_temp_new_i32();
177 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
181 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
183 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
185 tcg_gen_st_i32(var
, cpu_env
, offset
);
186 tcg_temp_free_i32(var
);
189 #define store_cpu_field(var, name) \
190 store_cpu_offset(var, offsetof(CPUARMState, name))
192 /* Set a variable to the value of a CPU register. */
193 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
197 /* normally, since we updated PC, we need only to add one insn */
199 addr
= (long)s
->pc
+ 2;
201 addr
= (long)s
->pc
+ 4;
202 tcg_gen_movi_i32(var
, addr
);
204 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
208 /* Create a new temporary and set it to the value of a CPU register. */
209 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
211 TCGv_i32 tmp
= tcg_temp_new_i32();
212 load_reg_var(s
, tmp
, reg
);
216 /* Set a CPU register. The source must be a temporary and will be
218 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
221 /* In Thumb mode, we must ignore bit 0.
222 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
223 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
224 * We choose to ignore [1:0] in ARM mode for all architecture versions.
226 tcg_gen_andi_i32(var
, var
, s
->thumb
? ~1 : ~3);
227 s
->base
.is_jmp
= DISAS_JUMP
;
229 tcg_gen_mov_i32(cpu_R
[reg
], var
);
230 tcg_temp_free_i32(var
);
233 /* Value extensions. */
234 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
235 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
236 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
237 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
239 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
240 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
243 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
245 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
246 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
247 tcg_temp_free_i32(tmp_mask
);
249 /* Set NZCV flags from the high 4 bits of var. */
250 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
252 static void gen_exception_internal(int excp
)
254 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
256 assert(excp_is_internal(excp
));
257 gen_helper_exception_internal(cpu_env
, tcg_excp
);
258 tcg_temp_free_i32(tcg_excp
);
261 static void gen_exception(int excp
, uint32_t syndrome
, uint32_t target_el
)
263 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
264 TCGv_i32 tcg_syn
= tcg_const_i32(syndrome
);
265 TCGv_i32 tcg_el
= tcg_const_i32(target_el
);
267 gen_helper_exception_with_syndrome(cpu_env
, tcg_excp
,
270 tcg_temp_free_i32(tcg_el
);
271 tcg_temp_free_i32(tcg_syn
);
272 tcg_temp_free_i32(tcg_excp
);
275 static void gen_ss_advance(DisasContext
*s
)
277 /* If the singlestep state is Active-not-pending, advance to
282 gen_helper_clear_pstate_ss(cpu_env
);
286 static void gen_step_complete_exception(DisasContext
*s
)
288 /* We just completed step of an insn. Move from Active-not-pending
289 * to Active-pending, and then also take the swstep exception.
290 * This corresponds to making the (IMPDEF) choice to prioritize
291 * swstep exceptions over asynchronous exceptions taken to an exception
292 * level where debug is disabled. This choice has the advantage that
293 * we do not need to maintain internal state corresponding to the
294 * ISV/EX syndrome bits between completion of the step and generation
295 * of the exception, and our syndrome information is always correct.
298 gen_exception(EXCP_UDEF
, syn_swstep(s
->ss_same_el
, 1, s
->is_ldex
),
299 default_exception_el(s
));
300 s
->base
.is_jmp
= DISAS_NORETURN
;
303 static void gen_singlestep_exception(DisasContext
*s
)
305 /* Generate the right kind of exception for singlestep, which is
306 * either the architectural singlestep or EXCP_DEBUG for QEMU's
307 * gdb singlestepping.
310 gen_step_complete_exception(s
);
312 gen_exception_internal(EXCP_DEBUG
);
316 static inline bool is_singlestepping(DisasContext
*s
)
318 /* Return true if we are singlestepping either because of
319 * architectural singlestep or QEMU gdbstub singlestep. This does
320 * not include the command line '-singlestep' mode which is rather
321 * misnamed as it only means "one instruction per TB" and doesn't
322 * affect the code we generate.
324 return s
->base
.singlestep_enabled
|| s
->ss_active
;
327 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
329 TCGv_i32 tmp1
= tcg_temp_new_i32();
330 TCGv_i32 tmp2
= tcg_temp_new_i32();
331 tcg_gen_ext16s_i32(tmp1
, a
);
332 tcg_gen_ext16s_i32(tmp2
, b
);
333 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
334 tcg_temp_free_i32(tmp2
);
335 tcg_gen_sari_i32(a
, a
, 16);
336 tcg_gen_sari_i32(b
, b
, 16);
337 tcg_gen_mul_i32(b
, b
, a
);
338 tcg_gen_mov_i32(a
, tmp1
);
339 tcg_temp_free_i32(tmp1
);
342 /* Byteswap each halfword. */
343 static void gen_rev16(TCGv_i32 var
)
345 TCGv_i32 tmp
= tcg_temp_new_i32();
346 TCGv_i32 mask
= tcg_const_i32(0x00ff00ff);
347 tcg_gen_shri_i32(tmp
, var
, 8);
348 tcg_gen_and_i32(tmp
, tmp
, mask
);
349 tcg_gen_and_i32(var
, var
, mask
);
350 tcg_gen_shli_i32(var
, var
, 8);
351 tcg_gen_or_i32(var
, var
, tmp
);
352 tcg_temp_free_i32(mask
);
353 tcg_temp_free_i32(tmp
);
356 /* Byteswap low halfword and sign extend. */
357 static void gen_revsh(TCGv_i32 var
)
359 tcg_gen_ext16u_i32(var
, var
);
360 tcg_gen_bswap16_i32(var
, var
);
361 tcg_gen_ext16s_i32(var
, var
);
364 /* Return (b << 32) + a. Mark inputs as dead */
365 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv_i32 b
)
367 TCGv_i64 tmp64
= tcg_temp_new_i64();
369 tcg_gen_extu_i32_i64(tmp64
, b
);
370 tcg_temp_free_i32(b
);
371 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
372 tcg_gen_add_i64(a
, tmp64
, a
);
374 tcg_temp_free_i64(tmp64
);
378 /* Return (b << 32) - a. Mark inputs as dead. */
379 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv_i32 b
)
381 TCGv_i64 tmp64
= tcg_temp_new_i64();
383 tcg_gen_extu_i32_i64(tmp64
, b
);
384 tcg_temp_free_i32(b
);
385 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
386 tcg_gen_sub_i64(a
, tmp64
, a
);
388 tcg_temp_free_i64(tmp64
);
392 /* 32x32->64 multiply. Marks inputs as dead. */
393 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
395 TCGv_i32 lo
= tcg_temp_new_i32();
396 TCGv_i32 hi
= tcg_temp_new_i32();
399 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
400 tcg_temp_free_i32(a
);
401 tcg_temp_free_i32(b
);
403 ret
= tcg_temp_new_i64();
404 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
405 tcg_temp_free_i32(lo
);
406 tcg_temp_free_i32(hi
);
411 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
413 TCGv_i32 lo
= tcg_temp_new_i32();
414 TCGv_i32 hi
= tcg_temp_new_i32();
417 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
418 tcg_temp_free_i32(a
);
419 tcg_temp_free_i32(b
);
421 ret
= tcg_temp_new_i64();
422 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
423 tcg_temp_free_i32(lo
);
424 tcg_temp_free_i32(hi
);
429 /* Swap low and high halfwords. */
430 static void gen_swap_half(TCGv_i32 var
)
432 TCGv_i32 tmp
= tcg_temp_new_i32();
433 tcg_gen_shri_i32(tmp
, var
, 16);
434 tcg_gen_shli_i32(var
, var
, 16);
435 tcg_gen_or_i32(var
, var
, tmp
);
436 tcg_temp_free_i32(tmp
);
439 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
440 tmp = (t0 ^ t1) & 0x8000;
443 t0 = (t0 + t1) ^ tmp;
446 static void gen_add16(TCGv_i32 t0
, TCGv_i32 t1
)
448 TCGv_i32 tmp
= tcg_temp_new_i32();
449 tcg_gen_xor_i32(tmp
, t0
, t1
);
450 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
451 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
452 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
453 tcg_gen_add_i32(t0
, t0
, t1
);
454 tcg_gen_xor_i32(t0
, t0
, tmp
);
455 tcg_temp_free_i32(tmp
);
456 tcg_temp_free_i32(t1
);
459 /* Set CF to the top bit of var. */
460 static void gen_set_CF_bit31(TCGv_i32 var
)
462 tcg_gen_shri_i32(cpu_CF
, var
, 31);
465 /* Set N and Z flags from var. */
466 static inline void gen_logic_CC(TCGv_i32 var
)
468 tcg_gen_mov_i32(cpu_NF
, var
);
469 tcg_gen_mov_i32(cpu_ZF
, var
);
473 static void gen_adc(TCGv_i32 t0
, TCGv_i32 t1
)
475 tcg_gen_add_i32(t0
, t0
, t1
);
476 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
479 /* dest = T0 + T1 + CF. */
480 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
482 tcg_gen_add_i32(dest
, t0
, t1
);
483 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
486 /* dest = T0 - T1 + CF - 1. */
487 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
489 tcg_gen_sub_i32(dest
, t0
, t1
);
490 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
491 tcg_gen_subi_i32(dest
, dest
, 1);
494 /* dest = T0 + T1. Compute C, N, V and Z flags */
495 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
497 TCGv_i32 tmp
= tcg_temp_new_i32();
498 tcg_gen_movi_i32(tmp
, 0);
499 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
500 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
501 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
502 tcg_gen_xor_i32(tmp
, t0
, t1
);
503 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
504 tcg_temp_free_i32(tmp
);
505 tcg_gen_mov_i32(dest
, cpu_NF
);
508 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
509 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
511 TCGv_i32 tmp
= tcg_temp_new_i32();
512 if (TCG_TARGET_HAS_add2_i32
) {
513 tcg_gen_movi_i32(tmp
, 0);
514 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
515 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
517 TCGv_i64 q0
= tcg_temp_new_i64();
518 TCGv_i64 q1
= tcg_temp_new_i64();
519 tcg_gen_extu_i32_i64(q0
, t0
);
520 tcg_gen_extu_i32_i64(q1
, t1
);
521 tcg_gen_add_i64(q0
, q0
, q1
);
522 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
523 tcg_gen_add_i64(q0
, q0
, q1
);
524 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
525 tcg_temp_free_i64(q0
);
526 tcg_temp_free_i64(q1
);
528 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
529 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
530 tcg_gen_xor_i32(tmp
, t0
, t1
);
531 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
532 tcg_temp_free_i32(tmp
);
533 tcg_gen_mov_i32(dest
, cpu_NF
);
536 /* dest = T0 - T1. Compute C, N, V and Z flags */
537 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
540 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
541 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
542 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
543 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
544 tmp
= tcg_temp_new_i32();
545 tcg_gen_xor_i32(tmp
, t0
, t1
);
546 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
547 tcg_temp_free_i32(tmp
);
548 tcg_gen_mov_i32(dest
, cpu_NF
);
551 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
552 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
554 TCGv_i32 tmp
= tcg_temp_new_i32();
555 tcg_gen_not_i32(tmp
, t1
);
556 gen_adc_CC(dest
, t0
, tmp
);
557 tcg_temp_free_i32(tmp
);
560 #define GEN_SHIFT(name) \
561 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
563 TCGv_i32 tmp1, tmp2, tmp3; \
564 tmp1 = tcg_temp_new_i32(); \
565 tcg_gen_andi_i32(tmp1, t1, 0xff); \
566 tmp2 = tcg_const_i32(0); \
567 tmp3 = tcg_const_i32(0x1f); \
568 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
569 tcg_temp_free_i32(tmp3); \
570 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
571 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
572 tcg_temp_free_i32(tmp2); \
573 tcg_temp_free_i32(tmp1); \
579 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
582 tmp1
= tcg_temp_new_i32();
583 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
584 tmp2
= tcg_const_i32(0x1f);
585 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
586 tcg_temp_free_i32(tmp2
);
587 tcg_gen_sar_i32(dest
, t0
, tmp1
);
588 tcg_temp_free_i32(tmp1
);
591 static void tcg_gen_abs_i32(TCGv_i32 dest
, TCGv_i32 src
)
593 TCGv_i32 c0
= tcg_const_i32(0);
594 TCGv_i32 tmp
= tcg_temp_new_i32();
595 tcg_gen_neg_i32(tmp
, src
);
596 tcg_gen_movcond_i32(TCG_COND_GT
, dest
, src
, c0
, src
, tmp
);
597 tcg_temp_free_i32(c0
);
598 tcg_temp_free_i32(tmp
);
601 static void shifter_out_im(TCGv_i32 var
, int shift
)
604 tcg_gen_andi_i32(cpu_CF
, var
, 1);
606 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
608 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
613 /* Shift by immediate. Includes special handling for shift == 0. */
614 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
615 int shift
, int flags
)
621 shifter_out_im(var
, 32 - shift
);
622 tcg_gen_shli_i32(var
, var
, shift
);
628 tcg_gen_shri_i32(cpu_CF
, var
, 31);
630 tcg_gen_movi_i32(var
, 0);
633 shifter_out_im(var
, shift
- 1);
634 tcg_gen_shri_i32(var
, var
, shift
);
641 shifter_out_im(var
, shift
- 1);
644 tcg_gen_sari_i32(var
, var
, shift
);
646 case 3: /* ROR/RRX */
649 shifter_out_im(var
, shift
- 1);
650 tcg_gen_rotri_i32(var
, var
, shift
); break;
652 TCGv_i32 tmp
= tcg_temp_new_i32();
653 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
655 shifter_out_im(var
, 0);
656 tcg_gen_shri_i32(var
, var
, 1);
657 tcg_gen_or_i32(var
, var
, tmp
);
658 tcg_temp_free_i32(tmp
);
663 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
664 TCGv_i32 shift
, int flags
)
668 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
669 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
670 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
671 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
676 gen_shl(var
, var
, shift
);
679 gen_shr(var
, var
, shift
);
682 gen_sar(var
, var
, shift
);
684 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
685 tcg_gen_rotr_i32(var
, var
, shift
); break;
688 tcg_temp_free_i32(shift
);
691 #define PAS_OP(pfx) \
693 case 0: gen_pas_helper(glue(pfx,add16)); break; \
694 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
695 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
696 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
697 case 4: gen_pas_helper(glue(pfx,add8)); break; \
698 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
700 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
705 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
707 tmp
= tcg_temp_new_ptr();
708 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
710 tcg_temp_free_ptr(tmp
);
713 tmp
= tcg_temp_new_ptr();
714 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
716 tcg_temp_free_ptr(tmp
);
718 #undef gen_pas_helper
719 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
732 #undef gen_pas_helper
737 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
738 #define PAS_OP(pfx) \
740 case 0: gen_pas_helper(glue(pfx,add8)); break; \
741 case 1: gen_pas_helper(glue(pfx,add16)); break; \
742 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
743 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
744 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
745 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
747 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
752 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
754 tmp
= tcg_temp_new_ptr();
755 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
757 tcg_temp_free_ptr(tmp
);
760 tmp
= tcg_temp_new_ptr();
761 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
763 tcg_temp_free_ptr(tmp
);
765 #undef gen_pas_helper
766 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
779 #undef gen_pas_helper
785 * Generate a conditional based on ARM condition code cc.
786 * This is common between ARM and Aarch64 targets.
788 void arm_test_cc(DisasCompare
*cmp
, int cc
)
819 case 8: /* hi: C && !Z */
820 case 9: /* ls: !C || Z -> !(C && !Z) */
822 value
= tcg_temp_new_i32();
824 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
825 ZF is non-zero for !Z; so AND the two subexpressions. */
826 tcg_gen_neg_i32(value
, cpu_CF
);
827 tcg_gen_and_i32(value
, value
, cpu_ZF
);
830 case 10: /* ge: N == V -> N ^ V == 0 */
831 case 11: /* lt: N != V -> N ^ V != 0 */
832 /* Since we're only interested in the sign bit, == 0 is >= 0. */
834 value
= tcg_temp_new_i32();
836 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
839 case 12: /* gt: !Z && N == V */
840 case 13: /* le: Z || N != V */
842 value
= tcg_temp_new_i32();
844 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
845 * the sign bit then AND with ZF to yield the result. */
846 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
847 tcg_gen_sari_i32(value
, value
, 31);
848 tcg_gen_andc_i32(value
, cpu_ZF
, value
);
851 case 14: /* always */
852 case 15: /* always */
853 /* Use the ALWAYS condition, which will fold early.
854 * It doesn't matter what we use for the value. */
855 cond
= TCG_COND_ALWAYS
;
860 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
865 cond
= tcg_invert_cond(cond
);
871 cmp
->value_global
= global
;
874 void arm_free_cc(DisasCompare
*cmp
)
876 if (!cmp
->value_global
) {
877 tcg_temp_free_i32(cmp
->value
);
881 void arm_jump_cc(DisasCompare
*cmp
, TCGLabel
*label
)
883 tcg_gen_brcondi_i32(cmp
->cond
, cmp
->value
, 0, label
);
886 void arm_gen_test_cc(int cc
, TCGLabel
*label
)
889 arm_test_cc(&cmp
, cc
);
890 arm_jump_cc(&cmp
, label
);
894 static const uint8_t table_logic_cc
[16] = {
913 static inline void gen_set_condexec(DisasContext
*s
)
915 if (s
->condexec_mask
) {
916 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
917 TCGv_i32 tmp
= tcg_temp_new_i32();
918 tcg_gen_movi_i32(tmp
, val
);
919 store_cpu_field(tmp
, condexec_bits
);
923 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
925 tcg_gen_movi_i32(cpu_R
[15], val
);
928 /* Set PC and Thumb state from an immediate address. */
929 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
933 s
->base
.is_jmp
= DISAS_JUMP
;
934 if (s
->thumb
!= (addr
& 1)) {
935 tmp
= tcg_temp_new_i32();
936 tcg_gen_movi_i32(tmp
, addr
& 1);
937 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
938 tcg_temp_free_i32(tmp
);
940 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
943 /* Set PC and Thumb state from var. var is marked as dead. */
944 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
946 s
->base
.is_jmp
= DISAS_JUMP
;
947 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
948 tcg_gen_andi_i32(var
, var
, 1);
949 store_cpu_field(var
, thumb
);
952 /* Set PC and Thumb state from var. var is marked as dead.
953 * For M-profile CPUs, include logic to detect exception-return
954 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
955 * and BX reg, and no others, and happens only for code in Handler mode.
957 static inline void gen_bx_excret(DisasContext
*s
, TCGv_i32 var
)
959 /* Generate the same code here as for a simple bx, but flag via
960 * s->base.is_jmp that we need to do the rest of the work later.
963 if (s
->v7m_handler_mode
&& arm_dc_feature(s
, ARM_FEATURE_M
)) {
964 s
->base
.is_jmp
= DISAS_BX_EXCRET
;
968 static inline void gen_bx_excret_final_code(DisasContext
*s
)
970 /* Generate the code to finish possible exception return and end the TB */
971 TCGLabel
*excret_label
= gen_new_label();
973 /* Is the new PC value in the magic range indicating exception return? */
974 tcg_gen_brcondi_i32(TCG_COND_GEU
, cpu_R
[15], 0xff000000, excret_label
);
975 /* No: end the TB as we would for a DISAS_JMP */
976 if (is_singlestepping(s
)) {
977 gen_singlestep_exception(s
);
981 gen_set_label(excret_label
);
982 /* Yes: this is an exception return.
983 * At this point in runtime env->regs[15] and env->thumb will hold
984 * the exception-return magic number, which do_v7m_exception_exit()
985 * will read. Nothing else will be able to see those values because
986 * the cpu-exec main loop guarantees that we will always go straight
987 * from raising the exception to the exception-handling code.
989 * gen_ss_advance(s) does nothing on M profile currently but
990 * calling it is conceptually the right thing as we have executed
991 * this instruction (compare SWI, HVC, SMC handling).
994 gen_exception_internal(EXCP_EXCEPTION_EXIT
);
997 static inline void gen_bxns(DisasContext
*s
, int rm
)
999 TCGv_i32 var
= load_reg(s
, rm
);
1001 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1002 * we need to sync state before calling it, but:
1003 * - we don't need to do gen_set_pc_im() because the bxns helper will
1004 * always set the PC itself
1005 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1006 * unless it's outside an IT block or the last insn in an IT block,
1007 * so we know that condexec == 0 (already set at the top of the TB)
1008 * is correct in the non-UNPREDICTABLE cases, and we can choose
1009 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1011 gen_helper_v7m_bxns(cpu_env
, var
);
1012 tcg_temp_free_i32(var
);
1013 s
->base
.is_jmp
= DISAS_EXIT
;
1016 /* Variant of store_reg which uses branch&exchange logic when storing
1017 to r15 in ARM architecture v7 and above. The source must be a temporary
1018 and will be marked as dead. */
1019 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv_i32 var
)
1021 if (reg
== 15 && ENABLE_ARCH_7
) {
1024 store_reg(s
, reg
, var
);
1028 /* Variant of store_reg which uses branch&exchange logic when storing
1029 * to r15 in ARM architecture v5T and above. This is used for storing
1030 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1031 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
1032 static inline void store_reg_from_load(DisasContext
*s
, int reg
, TCGv_i32 var
)
1034 if (reg
== 15 && ENABLE_ARCH_5
) {
1035 gen_bx_excret(s
, var
);
1037 store_reg(s
, reg
, var
);
1041 #ifdef CONFIG_USER_ONLY
1042 #define IS_USER_ONLY 1
1044 #define IS_USER_ONLY 0
1047 /* Abstractions of "generate code to do a guest load/store for
1048 * AArch32", where a vaddr is always 32 bits (and is zero
1049 * extended if we're a 64 bit core) and data is also
1050 * 32 bits unless specifically doing a 64 bit access.
1051 * These functions work like tcg_gen_qemu_{ld,st}* except
1052 * that the address argument is TCGv_i32 rather than TCGv.
1055 static inline TCGv
gen_aa32_addr(DisasContext
*s
, TCGv_i32 a32
, TCGMemOp op
)
1057 TCGv addr
= tcg_temp_new();
1058 tcg_gen_extu_i32_tl(addr
, a32
);
1060 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1061 if (!IS_USER_ONLY
&& s
->sctlr_b
&& (op
& MO_SIZE
) < MO_32
) {
1062 tcg_gen_xori_tl(addr
, addr
, 4 - (1 << (op
& MO_SIZE
)));
1067 static void gen_aa32_ld_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
1068 int index
, TCGMemOp opc
)
1070 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1071 tcg_gen_qemu_ld_i32(val
, addr
, index
, opc
);
1072 tcg_temp_free(addr
);
1075 static void gen_aa32_st_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
1076 int index
, TCGMemOp opc
)
1078 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1079 tcg_gen_qemu_st_i32(val
, addr
, index
, opc
);
1080 tcg_temp_free(addr
);
1083 #define DO_GEN_LD(SUFF, OPC) \
1084 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1085 TCGv_i32 a32, int index) \
1087 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1089 static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1091 TCGv_i32 a32, int index, \
1094 gen_aa32_ld##SUFF(s, val, a32, index); \
1095 disas_set_da_iss(s, OPC, issinfo); \
1098 #define DO_GEN_ST(SUFF, OPC) \
1099 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1100 TCGv_i32 a32, int index) \
1102 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1104 static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1106 TCGv_i32 a32, int index, \
1109 gen_aa32_st##SUFF(s, val, a32, index); \
1110 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1113 static inline void gen_aa32_frob64(DisasContext
*s
, TCGv_i64 val
)
1115 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1116 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1117 tcg_gen_rotri_i64(val
, val
, 32);
1121 static void gen_aa32_ld_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
1122 int index
, TCGMemOp opc
)
1124 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1125 tcg_gen_qemu_ld_i64(val
, addr
, index
, opc
);
1126 gen_aa32_frob64(s
, val
);
1127 tcg_temp_free(addr
);
1130 static inline void gen_aa32_ld64(DisasContext
*s
, TCGv_i64 val
,
1131 TCGv_i32 a32
, int index
)
1133 gen_aa32_ld_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1136 static void gen_aa32_st_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
1137 int index
, TCGMemOp opc
)
1139 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
1141 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1142 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
1143 TCGv_i64 tmp
= tcg_temp_new_i64();
1144 tcg_gen_rotri_i64(tmp
, val
, 32);
1145 tcg_gen_qemu_st_i64(tmp
, addr
, index
, opc
);
1146 tcg_temp_free_i64(tmp
);
1148 tcg_gen_qemu_st_i64(val
, addr
, index
, opc
);
1150 tcg_temp_free(addr
);
1153 static inline void gen_aa32_st64(DisasContext
*s
, TCGv_i64 val
,
1154 TCGv_i32 a32
, int index
)
1156 gen_aa32_st_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1159 DO_GEN_LD(8s
, MO_SB
)
1160 DO_GEN_LD(8u, MO_UB
)
1161 DO_GEN_LD(16s
, MO_SW
)
1162 DO_GEN_LD(16u, MO_UW
)
1163 DO_GEN_LD(32u, MO_UL
)
1165 DO_GEN_ST(16, MO_UW
)
1166 DO_GEN_ST(32, MO_UL
)
1168 static inline void gen_hvc(DisasContext
*s
, int imm16
)
1170 /* The pre HVC helper handles cases when HVC gets trapped
1171 * as an undefined insn by runtime configuration (ie before
1172 * the insn really executes).
1174 gen_set_pc_im(s
, s
->pc
- 4);
1175 gen_helper_pre_hvc(cpu_env
);
1176 /* Otherwise we will treat this as a real exception which
1177 * happens after execution of the insn. (The distinction matters
1178 * for the PC value reported to the exception handler and also
1179 * for single stepping.)
1182 gen_set_pc_im(s
, s
->pc
);
1183 s
->base
.is_jmp
= DISAS_HVC
;
1186 static inline void gen_smc(DisasContext
*s
)
1188 /* As with HVC, we may take an exception either before or after
1189 * the insn executes.
1193 gen_set_pc_im(s
, s
->pc
- 4);
1194 tmp
= tcg_const_i32(syn_aa32_smc());
1195 gen_helper_pre_smc(cpu_env
, tmp
);
1196 tcg_temp_free_i32(tmp
);
1197 gen_set_pc_im(s
, s
->pc
);
1198 s
->base
.is_jmp
= DISAS_SMC
;
1201 static void gen_exception_internal_insn(DisasContext
*s
, int offset
, int excp
)
1203 gen_set_condexec(s
);
1204 gen_set_pc_im(s
, s
->pc
- offset
);
1205 gen_exception_internal(excp
);
1206 s
->base
.is_jmp
= DISAS_NORETURN
;
1209 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
,
1210 int syn
, uint32_t target_el
)
1212 gen_set_condexec(s
);
1213 gen_set_pc_im(s
, s
->pc
- offset
);
1214 gen_exception(excp
, syn
, target_el
);
1215 s
->base
.is_jmp
= DISAS_NORETURN
;
1218 /* Force a TB lookup after an instruction that changes the CPU state. */
1219 static inline void gen_lookup_tb(DisasContext
*s
)
1221 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
1222 s
->base
.is_jmp
= DISAS_EXIT
;
1225 static inline void gen_hlt(DisasContext
*s
, int imm
)
1227 /* HLT. This has two purposes.
1228 * Architecturally, it is an external halting debug instruction.
1229 * Since QEMU doesn't implement external debug, we treat this as
1230 * it is required for halting debug disabled: it will UNDEF.
1231 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1232 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1233 * must trigger semihosting even for ARMv7 and earlier, where
1234 * HLT was an undefined encoding.
1235 * In system mode, we don't allow userspace access to
1236 * semihosting, to provide some semblance of security
1237 * (and for consistency with our 32-bit semihosting).
1239 if (semihosting_enabled() &&
1240 #ifndef CONFIG_USER_ONLY
1241 s
->current_el
!= 0 &&
1243 (imm
== (s
->thumb
? 0x3c : 0xf000))) {
1244 gen_exception_internal_insn(s
, 0, EXCP_SEMIHOST
);
1248 gen_exception_insn(s
, s
->thumb
? 2 : 4, EXCP_UDEF
, syn_uncategorized(),
1249 default_exception_el(s
));
1252 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
1255 int val
, rm
, shift
, shiftop
;
1258 if (!(insn
& (1 << 25))) {
1261 if (!(insn
& (1 << 23)))
1264 tcg_gen_addi_i32(var
, var
, val
);
1266 /* shift/register */
1268 shift
= (insn
>> 7) & 0x1f;
1269 shiftop
= (insn
>> 5) & 3;
1270 offset
= load_reg(s
, rm
);
1271 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
1272 if (!(insn
& (1 << 23)))
1273 tcg_gen_sub_i32(var
, var
, offset
);
1275 tcg_gen_add_i32(var
, var
, offset
);
1276 tcg_temp_free_i32(offset
);
1280 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
1281 int extra
, TCGv_i32 var
)
1286 if (insn
& (1 << 22)) {
1288 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
1289 if (!(insn
& (1 << 23)))
1293 tcg_gen_addi_i32(var
, var
, val
);
1297 tcg_gen_addi_i32(var
, var
, extra
);
1299 offset
= load_reg(s
, rm
);
1300 if (!(insn
& (1 << 23)))
1301 tcg_gen_sub_i32(var
, var
, offset
);
1303 tcg_gen_add_i32(var
, var
, offset
);
1304 tcg_temp_free_i32(offset
);
1308 static TCGv_ptr
get_fpstatus_ptr(int neon
)
1310 TCGv_ptr statusptr
= tcg_temp_new_ptr();
1313 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
1315 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
1317 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
1321 #define VFP_OP2(name) \
1322 static inline void gen_vfp_##name(int dp) \
1324 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1326 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1328 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1330 tcg_temp_free_ptr(fpst); \
1340 static inline void gen_vfp_F1_mul(int dp
)
1342 /* Like gen_vfp_mul() but put result in F1 */
1343 TCGv_ptr fpst
= get_fpstatus_ptr(0);
1345 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
1347 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
1349 tcg_temp_free_ptr(fpst
);
1352 static inline void gen_vfp_F1_neg(int dp
)
1354 /* Like gen_vfp_neg() but put result in F1 */
1356 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
1358 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
1362 static inline void gen_vfp_abs(int dp
)
1365 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
1367 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
1370 static inline void gen_vfp_neg(int dp
)
1373 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1375 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1378 static inline void gen_vfp_sqrt(int dp
)
1381 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
1383 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1386 static inline void gen_vfp_cmp(int dp
)
1389 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1391 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1394 static inline void gen_vfp_cmpe(int dp
)
1397 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1399 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1402 static inline void gen_vfp_F1_ld0(int dp
)
1405 tcg_gen_movi_i64(cpu_F1d
, 0);
1407 tcg_gen_movi_i32(cpu_F1s
, 0);
1410 #define VFP_GEN_ITOF(name) \
1411 static inline void gen_vfp_##name(int dp, int neon) \
1413 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1415 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1417 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1419 tcg_temp_free_ptr(statusptr); \
1426 #define VFP_GEN_FTOI(name) \
1427 static inline void gen_vfp_##name(int dp, int neon) \
1429 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1431 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1433 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1435 tcg_temp_free_ptr(statusptr); \
1444 #define VFP_GEN_FIX(name, round) \
1445 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1447 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1448 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1450 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1453 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1456 tcg_temp_free_i32(tmp_shift); \
1457 tcg_temp_free_ptr(statusptr); \
1459 VFP_GEN_FIX(tosh
, _round_to_zero
)
1460 VFP_GEN_FIX(tosl
, _round_to_zero
)
1461 VFP_GEN_FIX(touh
, _round_to_zero
)
1462 VFP_GEN_FIX(toul
, _round_to_zero
)
1469 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1472 gen_aa32_ld64(s
, cpu_F0d
, addr
, get_mem_index(s
));
1474 gen_aa32_ld32u(s
, cpu_F0s
, addr
, get_mem_index(s
));
1478 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1481 gen_aa32_st64(s
, cpu_F0d
, addr
, get_mem_index(s
));
1483 gen_aa32_st32(s
, cpu_F0s
, addr
, get_mem_index(s
));
1488 vfp_reg_offset (int dp
, int reg
)
1491 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1493 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1494 + offsetof(CPU_DoubleU
, l
.upper
);
1496 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1497 + offsetof(CPU_DoubleU
, l
.lower
);
1501 /* Return the offset of a 32-bit piece of a NEON register.
1502 zero is the least significant end of the register. */
1504 neon_reg_offset (int reg
, int n
)
1508 return vfp_reg_offset(0, sreg
);
1511 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1513 TCGv_i32 tmp
= tcg_temp_new_i32();
1514 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1518 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1520 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1521 tcg_temp_free_i32(var
);
1524 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1526 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1529 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1531 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1534 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1535 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1536 #define tcg_gen_st_f32 tcg_gen_st_i32
1537 #define tcg_gen_st_f64 tcg_gen_st_i64
1539 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1542 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1544 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1547 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1550 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1552 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1555 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1558 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1560 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1563 #define ARM_CP_RW_BIT (1 << 20)
1565 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1567 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1570 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1572 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1575 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1577 TCGv_i32 var
= tcg_temp_new_i32();
1578 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1582 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1584 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1585 tcg_temp_free_i32(var
);
1588 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1590 iwmmxt_store_reg(cpu_M0
, rn
);
1593 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1595 iwmmxt_load_reg(cpu_M0
, rn
);
1598 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1600 iwmmxt_load_reg(cpu_V1
, rn
);
1601 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1604 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1606 iwmmxt_load_reg(cpu_V1
, rn
);
1607 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1610 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1612 iwmmxt_load_reg(cpu_V1
, rn
);
1613 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1616 #define IWMMXT_OP(name) \
1617 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1619 iwmmxt_load_reg(cpu_V1, rn); \
1620 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1623 #define IWMMXT_OP_ENV(name) \
1624 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1626 iwmmxt_load_reg(cpu_V1, rn); \
1627 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1630 #define IWMMXT_OP_ENV_SIZE(name) \
1631 IWMMXT_OP_ENV(name##b) \
1632 IWMMXT_OP_ENV(name##w) \
1633 IWMMXT_OP_ENV(name##l)
1635 #define IWMMXT_OP_ENV1(name) \
1636 static inline void gen_op_iwmmxt_##name##_M0(void) \
1638 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1652 IWMMXT_OP_ENV_SIZE(unpackl
)
1653 IWMMXT_OP_ENV_SIZE(unpackh
)
1655 IWMMXT_OP_ENV1(unpacklub
)
1656 IWMMXT_OP_ENV1(unpackluw
)
1657 IWMMXT_OP_ENV1(unpacklul
)
1658 IWMMXT_OP_ENV1(unpackhub
)
1659 IWMMXT_OP_ENV1(unpackhuw
)
1660 IWMMXT_OP_ENV1(unpackhul
)
1661 IWMMXT_OP_ENV1(unpacklsb
)
1662 IWMMXT_OP_ENV1(unpacklsw
)
1663 IWMMXT_OP_ENV1(unpacklsl
)
1664 IWMMXT_OP_ENV1(unpackhsb
)
1665 IWMMXT_OP_ENV1(unpackhsw
)
1666 IWMMXT_OP_ENV1(unpackhsl
)
1668 IWMMXT_OP_ENV_SIZE(cmpeq
)
1669 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1670 IWMMXT_OP_ENV_SIZE(cmpgts
)
1672 IWMMXT_OP_ENV_SIZE(mins
)
1673 IWMMXT_OP_ENV_SIZE(minu
)
1674 IWMMXT_OP_ENV_SIZE(maxs
)
1675 IWMMXT_OP_ENV_SIZE(maxu
)
1677 IWMMXT_OP_ENV_SIZE(subn
)
1678 IWMMXT_OP_ENV_SIZE(addn
)
1679 IWMMXT_OP_ENV_SIZE(subu
)
1680 IWMMXT_OP_ENV_SIZE(addu
)
1681 IWMMXT_OP_ENV_SIZE(subs
)
1682 IWMMXT_OP_ENV_SIZE(adds
)
1684 IWMMXT_OP_ENV(avgb0
)
1685 IWMMXT_OP_ENV(avgb1
)
1686 IWMMXT_OP_ENV(avgw0
)
1687 IWMMXT_OP_ENV(avgw1
)
1689 IWMMXT_OP_ENV(packuw
)
1690 IWMMXT_OP_ENV(packul
)
1691 IWMMXT_OP_ENV(packuq
)
1692 IWMMXT_OP_ENV(packsw
)
1693 IWMMXT_OP_ENV(packsl
)
1694 IWMMXT_OP_ENV(packsq
)
1696 static void gen_op_iwmmxt_set_mup(void)
1699 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1700 tcg_gen_ori_i32(tmp
, tmp
, 2);
1701 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1704 static void gen_op_iwmmxt_set_cup(void)
1707 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1708 tcg_gen_ori_i32(tmp
, tmp
, 1);
1709 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1712 static void gen_op_iwmmxt_setpsr_nz(void)
1714 TCGv_i32 tmp
= tcg_temp_new_i32();
1715 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1716 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1719 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1721 iwmmxt_load_reg(cpu_V1
, rn
);
1722 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1723 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1726 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1733 rd
= (insn
>> 16) & 0xf;
1734 tmp
= load_reg(s
, rd
);
1736 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1737 if (insn
& (1 << 24)) {
1739 if (insn
& (1 << 23))
1740 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1742 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1743 tcg_gen_mov_i32(dest
, tmp
);
1744 if (insn
& (1 << 21))
1745 store_reg(s
, rd
, tmp
);
1747 tcg_temp_free_i32(tmp
);
1748 } else if (insn
& (1 << 21)) {
1750 tcg_gen_mov_i32(dest
, tmp
);
1751 if (insn
& (1 << 23))
1752 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1754 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1755 store_reg(s
, rd
, tmp
);
1756 } else if (!(insn
& (1 << 23)))
1761 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1763 int rd
= (insn
>> 0) & 0xf;
1766 if (insn
& (1 << 8)) {
1767 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1770 tmp
= iwmmxt_load_creg(rd
);
1773 tmp
= tcg_temp_new_i32();
1774 iwmmxt_load_reg(cpu_V0
, rd
);
1775 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
1777 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1778 tcg_gen_mov_i32(dest
, tmp
);
1779 tcg_temp_free_i32(tmp
);
1783 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1784 (ie. an undefined instruction). */
1785 static int disas_iwmmxt_insn(DisasContext
*s
, uint32_t insn
)
1788 int rdhi
, rdlo
, rd0
, rd1
, i
;
1790 TCGv_i32 tmp
, tmp2
, tmp3
;
1792 if ((insn
& 0x0e000e00) == 0x0c000000) {
1793 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1795 rdlo
= (insn
>> 12) & 0xf;
1796 rdhi
= (insn
>> 16) & 0xf;
1797 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1798 iwmmxt_load_reg(cpu_V0
, wrd
);
1799 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1800 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1801 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1802 } else { /* TMCRR */
1803 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1804 iwmmxt_store_reg(cpu_V0
, wrd
);
1805 gen_op_iwmmxt_set_mup();
1810 wrd
= (insn
>> 12) & 0xf;
1811 addr
= tcg_temp_new_i32();
1812 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1813 tcg_temp_free_i32(addr
);
1816 if (insn
& ARM_CP_RW_BIT
) {
1817 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1818 tmp
= tcg_temp_new_i32();
1819 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1820 iwmmxt_store_creg(wrd
, tmp
);
1823 if (insn
& (1 << 8)) {
1824 if (insn
& (1 << 22)) { /* WLDRD */
1825 gen_aa32_ld64(s
, cpu_M0
, addr
, get_mem_index(s
));
1827 } else { /* WLDRW wRd */
1828 tmp
= tcg_temp_new_i32();
1829 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1832 tmp
= tcg_temp_new_i32();
1833 if (insn
& (1 << 22)) { /* WLDRH */
1834 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
1835 } else { /* WLDRB */
1836 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
1840 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1841 tcg_temp_free_i32(tmp
);
1843 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1846 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1847 tmp
= iwmmxt_load_creg(wrd
);
1848 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1850 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1851 tmp
= tcg_temp_new_i32();
1852 if (insn
& (1 << 8)) {
1853 if (insn
& (1 << 22)) { /* WSTRD */
1854 gen_aa32_st64(s
, cpu_M0
, addr
, get_mem_index(s
));
1855 } else { /* WSTRW wRd */
1856 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1857 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1860 if (insn
& (1 << 22)) { /* WSTRH */
1861 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1862 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
1863 } else { /* WSTRB */
1864 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1865 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
1869 tcg_temp_free_i32(tmp
);
1871 tcg_temp_free_i32(addr
);
1875 if ((insn
& 0x0f000000) != 0x0e000000)
1878 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1879 case 0x000: /* WOR */
1880 wrd
= (insn
>> 12) & 0xf;
1881 rd0
= (insn
>> 0) & 0xf;
1882 rd1
= (insn
>> 16) & 0xf;
1883 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1884 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1885 gen_op_iwmmxt_setpsr_nz();
1886 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1887 gen_op_iwmmxt_set_mup();
1888 gen_op_iwmmxt_set_cup();
1890 case 0x011: /* TMCR */
1893 rd
= (insn
>> 12) & 0xf;
1894 wrd
= (insn
>> 16) & 0xf;
1896 case ARM_IWMMXT_wCID
:
1897 case ARM_IWMMXT_wCASF
:
1899 case ARM_IWMMXT_wCon
:
1900 gen_op_iwmmxt_set_cup();
1902 case ARM_IWMMXT_wCSSF
:
1903 tmp
= iwmmxt_load_creg(wrd
);
1904 tmp2
= load_reg(s
, rd
);
1905 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1906 tcg_temp_free_i32(tmp2
);
1907 iwmmxt_store_creg(wrd
, tmp
);
1909 case ARM_IWMMXT_wCGR0
:
1910 case ARM_IWMMXT_wCGR1
:
1911 case ARM_IWMMXT_wCGR2
:
1912 case ARM_IWMMXT_wCGR3
:
1913 gen_op_iwmmxt_set_cup();
1914 tmp
= load_reg(s
, rd
);
1915 iwmmxt_store_creg(wrd
, tmp
);
1921 case 0x100: /* WXOR */
1922 wrd
= (insn
>> 12) & 0xf;
1923 rd0
= (insn
>> 0) & 0xf;
1924 rd1
= (insn
>> 16) & 0xf;
1925 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1926 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1927 gen_op_iwmmxt_setpsr_nz();
1928 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1929 gen_op_iwmmxt_set_mup();
1930 gen_op_iwmmxt_set_cup();
1932 case 0x111: /* TMRC */
1935 rd
= (insn
>> 12) & 0xf;
1936 wrd
= (insn
>> 16) & 0xf;
1937 tmp
= iwmmxt_load_creg(wrd
);
1938 store_reg(s
, rd
, tmp
);
1940 case 0x300: /* WANDN */
1941 wrd
= (insn
>> 12) & 0xf;
1942 rd0
= (insn
>> 0) & 0xf;
1943 rd1
= (insn
>> 16) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1945 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1946 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1947 gen_op_iwmmxt_setpsr_nz();
1948 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1949 gen_op_iwmmxt_set_mup();
1950 gen_op_iwmmxt_set_cup();
1952 case 0x200: /* WAND */
1953 wrd
= (insn
>> 12) & 0xf;
1954 rd0
= (insn
>> 0) & 0xf;
1955 rd1
= (insn
>> 16) & 0xf;
1956 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1957 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1958 gen_op_iwmmxt_setpsr_nz();
1959 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1960 gen_op_iwmmxt_set_mup();
1961 gen_op_iwmmxt_set_cup();
1963 case 0x810: case 0xa10: /* WMADD */
1964 wrd
= (insn
>> 12) & 0xf;
1965 rd0
= (insn
>> 0) & 0xf;
1966 rd1
= (insn
>> 16) & 0xf;
1967 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1968 if (insn
& (1 << 21))
1969 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1971 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1972 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1973 gen_op_iwmmxt_set_mup();
1975 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1976 wrd
= (insn
>> 12) & 0xf;
1977 rd0
= (insn
>> 16) & 0xf;
1978 rd1
= (insn
>> 0) & 0xf;
1979 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1980 switch ((insn
>> 22) & 3) {
1982 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1985 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1988 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1993 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1994 gen_op_iwmmxt_set_mup();
1995 gen_op_iwmmxt_set_cup();
1997 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1998 wrd
= (insn
>> 12) & 0xf;
1999 rd0
= (insn
>> 16) & 0xf;
2000 rd1
= (insn
>> 0) & 0xf;
2001 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2002 switch ((insn
>> 22) & 3) {
2004 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
2007 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
2010 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
2015 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2016 gen_op_iwmmxt_set_mup();
2017 gen_op_iwmmxt_set_cup();
2019 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2020 wrd
= (insn
>> 12) & 0xf;
2021 rd0
= (insn
>> 16) & 0xf;
2022 rd1
= (insn
>> 0) & 0xf;
2023 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2024 if (insn
& (1 << 22))
2025 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
2027 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
2028 if (!(insn
& (1 << 20)))
2029 gen_op_iwmmxt_addl_M0_wRn(wrd
);
2030 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2031 gen_op_iwmmxt_set_mup();
2033 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2034 wrd
= (insn
>> 12) & 0xf;
2035 rd0
= (insn
>> 16) & 0xf;
2036 rd1
= (insn
>> 0) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2038 if (insn
& (1 << 21)) {
2039 if (insn
& (1 << 20))
2040 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
2042 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
2044 if (insn
& (1 << 20))
2045 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
2047 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
2049 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2050 gen_op_iwmmxt_set_mup();
2052 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2053 wrd
= (insn
>> 12) & 0xf;
2054 rd0
= (insn
>> 16) & 0xf;
2055 rd1
= (insn
>> 0) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2057 if (insn
& (1 << 21))
2058 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
2060 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
2061 if (!(insn
& (1 << 20))) {
2062 iwmmxt_load_reg(cpu_V1
, wrd
);
2063 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
2065 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2066 gen_op_iwmmxt_set_mup();
2068 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2069 wrd
= (insn
>> 12) & 0xf;
2070 rd0
= (insn
>> 16) & 0xf;
2071 rd1
= (insn
>> 0) & 0xf;
2072 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2073 switch ((insn
>> 22) & 3) {
2075 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
2078 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
2081 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
2086 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2087 gen_op_iwmmxt_set_mup();
2088 gen_op_iwmmxt_set_cup();
2090 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2091 wrd
= (insn
>> 12) & 0xf;
2092 rd0
= (insn
>> 16) & 0xf;
2093 rd1
= (insn
>> 0) & 0xf;
2094 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2095 if (insn
& (1 << 22)) {
2096 if (insn
& (1 << 20))
2097 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
2099 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
2101 if (insn
& (1 << 20))
2102 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
2104 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
2106 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2107 gen_op_iwmmxt_set_mup();
2108 gen_op_iwmmxt_set_cup();
2110 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2111 wrd
= (insn
>> 12) & 0xf;
2112 rd0
= (insn
>> 16) & 0xf;
2113 rd1
= (insn
>> 0) & 0xf;
2114 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2115 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
2116 tcg_gen_andi_i32(tmp
, tmp
, 7);
2117 iwmmxt_load_reg(cpu_V1
, rd1
);
2118 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2119 tcg_temp_free_i32(tmp
);
2120 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2121 gen_op_iwmmxt_set_mup();
2123 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
2124 if (((insn
>> 6) & 3) == 3)
2126 rd
= (insn
>> 12) & 0xf;
2127 wrd
= (insn
>> 16) & 0xf;
2128 tmp
= load_reg(s
, rd
);
2129 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2130 switch ((insn
>> 6) & 3) {
2132 tmp2
= tcg_const_i32(0xff);
2133 tmp3
= tcg_const_i32((insn
& 7) << 3);
2136 tmp2
= tcg_const_i32(0xffff);
2137 tmp3
= tcg_const_i32((insn
& 3) << 4);
2140 tmp2
= tcg_const_i32(0xffffffff);
2141 tmp3
= tcg_const_i32((insn
& 1) << 5);
2144 TCGV_UNUSED_I32(tmp2
);
2145 TCGV_UNUSED_I32(tmp3
);
2147 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
2148 tcg_temp_free_i32(tmp3
);
2149 tcg_temp_free_i32(tmp2
);
2150 tcg_temp_free_i32(tmp
);
2151 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2152 gen_op_iwmmxt_set_mup();
2154 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2155 rd
= (insn
>> 12) & 0xf;
2156 wrd
= (insn
>> 16) & 0xf;
2157 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
2159 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2160 tmp
= tcg_temp_new_i32();
2161 switch ((insn
>> 22) & 3) {
2163 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
2164 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2166 tcg_gen_ext8s_i32(tmp
, tmp
);
2168 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
2172 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
2173 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2175 tcg_gen_ext16s_i32(tmp
, tmp
);
2177 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
2181 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
2182 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2185 store_reg(s
, rd
, tmp
);
2187 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2188 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2190 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2191 switch ((insn
>> 22) & 3) {
2193 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
2196 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
2199 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
2202 tcg_gen_shli_i32(tmp
, tmp
, 28);
2204 tcg_temp_free_i32(tmp
);
2206 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2207 if (((insn
>> 6) & 3) == 3)
2209 rd
= (insn
>> 12) & 0xf;
2210 wrd
= (insn
>> 16) & 0xf;
2211 tmp
= load_reg(s
, rd
);
2212 switch ((insn
>> 6) & 3) {
2214 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
2217 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
2220 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
2223 tcg_temp_free_i32(tmp
);
2224 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2225 gen_op_iwmmxt_set_mup();
2227 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2228 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2230 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2231 tmp2
= tcg_temp_new_i32();
2232 tcg_gen_mov_i32(tmp2
, tmp
);
2233 switch ((insn
>> 22) & 3) {
2235 for (i
= 0; i
< 7; i
++) {
2236 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2237 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2241 for (i
= 0; i
< 3; i
++) {
2242 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2243 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2247 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2248 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2252 tcg_temp_free_i32(tmp2
);
2253 tcg_temp_free_i32(tmp
);
2255 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2256 wrd
= (insn
>> 12) & 0xf;
2257 rd0
= (insn
>> 16) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2259 switch ((insn
>> 22) & 3) {
2261 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
2264 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
2267 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
2272 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2273 gen_op_iwmmxt_set_mup();
2275 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2276 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2278 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2279 tmp2
= tcg_temp_new_i32();
2280 tcg_gen_mov_i32(tmp2
, tmp
);
2281 switch ((insn
>> 22) & 3) {
2283 for (i
= 0; i
< 7; i
++) {
2284 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2285 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2289 for (i
= 0; i
< 3; i
++) {
2290 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2291 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2295 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2296 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2300 tcg_temp_free_i32(tmp2
);
2301 tcg_temp_free_i32(tmp
);
2303 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2304 rd
= (insn
>> 12) & 0xf;
2305 rd0
= (insn
>> 16) & 0xf;
2306 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
2308 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2309 tmp
= tcg_temp_new_i32();
2310 switch ((insn
>> 22) & 3) {
2312 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
2315 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
2318 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
2321 store_reg(s
, rd
, tmp
);
2323 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2324 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2325 wrd
= (insn
>> 12) & 0xf;
2326 rd0
= (insn
>> 16) & 0xf;
2327 rd1
= (insn
>> 0) & 0xf;
2328 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2329 switch ((insn
>> 22) & 3) {
2331 if (insn
& (1 << 21))
2332 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2334 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2337 if (insn
& (1 << 21))
2338 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2340 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2343 if (insn
& (1 << 21))
2344 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2346 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2351 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2352 gen_op_iwmmxt_set_mup();
2353 gen_op_iwmmxt_set_cup();
2355 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2356 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2357 wrd
= (insn
>> 12) & 0xf;
2358 rd0
= (insn
>> 16) & 0xf;
2359 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2360 switch ((insn
>> 22) & 3) {
2362 if (insn
& (1 << 21))
2363 gen_op_iwmmxt_unpacklsb_M0();
2365 gen_op_iwmmxt_unpacklub_M0();
2368 if (insn
& (1 << 21))
2369 gen_op_iwmmxt_unpacklsw_M0();
2371 gen_op_iwmmxt_unpackluw_M0();
2374 if (insn
& (1 << 21))
2375 gen_op_iwmmxt_unpacklsl_M0();
2377 gen_op_iwmmxt_unpacklul_M0();
2382 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2383 gen_op_iwmmxt_set_mup();
2384 gen_op_iwmmxt_set_cup();
2386 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2387 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2388 wrd
= (insn
>> 12) & 0xf;
2389 rd0
= (insn
>> 16) & 0xf;
2390 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2391 switch ((insn
>> 22) & 3) {
2393 if (insn
& (1 << 21))
2394 gen_op_iwmmxt_unpackhsb_M0();
2396 gen_op_iwmmxt_unpackhub_M0();
2399 if (insn
& (1 << 21))
2400 gen_op_iwmmxt_unpackhsw_M0();
2402 gen_op_iwmmxt_unpackhuw_M0();
2405 if (insn
& (1 << 21))
2406 gen_op_iwmmxt_unpackhsl_M0();
2408 gen_op_iwmmxt_unpackhul_M0();
2413 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2414 gen_op_iwmmxt_set_mup();
2415 gen_op_iwmmxt_set_cup();
2417 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2418 case 0x214: case 0x614: case 0xa14: case 0xe14:
2419 if (((insn
>> 22) & 3) == 0)
2421 wrd
= (insn
>> 12) & 0xf;
2422 rd0
= (insn
>> 16) & 0xf;
2423 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2424 tmp
= tcg_temp_new_i32();
2425 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2426 tcg_temp_free_i32(tmp
);
2429 switch ((insn
>> 22) & 3) {
2431 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2434 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2437 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2440 tcg_temp_free_i32(tmp
);
2441 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2442 gen_op_iwmmxt_set_mup();
2443 gen_op_iwmmxt_set_cup();
2445 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2446 case 0x014: case 0x414: case 0x814: case 0xc14:
2447 if (((insn
>> 22) & 3) == 0)
2449 wrd
= (insn
>> 12) & 0xf;
2450 rd0
= (insn
>> 16) & 0xf;
2451 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2452 tmp
= tcg_temp_new_i32();
2453 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2454 tcg_temp_free_i32(tmp
);
2457 switch ((insn
>> 22) & 3) {
2459 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2462 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2465 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2468 tcg_temp_free_i32(tmp
);
2469 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2470 gen_op_iwmmxt_set_mup();
2471 gen_op_iwmmxt_set_cup();
2473 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2474 case 0x114: case 0x514: case 0x914: case 0xd14:
2475 if (((insn
>> 22) & 3) == 0)
2477 wrd
= (insn
>> 12) & 0xf;
2478 rd0
= (insn
>> 16) & 0xf;
2479 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2480 tmp
= tcg_temp_new_i32();
2481 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2482 tcg_temp_free_i32(tmp
);
2485 switch ((insn
>> 22) & 3) {
2487 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2490 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2493 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2496 tcg_temp_free_i32(tmp
);
2497 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2498 gen_op_iwmmxt_set_mup();
2499 gen_op_iwmmxt_set_cup();
2501 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2502 case 0x314: case 0x714: case 0xb14: case 0xf14:
2503 if (((insn
>> 22) & 3) == 0)
2505 wrd
= (insn
>> 12) & 0xf;
2506 rd0
= (insn
>> 16) & 0xf;
2507 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2508 tmp
= tcg_temp_new_i32();
2509 switch ((insn
>> 22) & 3) {
2511 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2512 tcg_temp_free_i32(tmp
);
2515 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2518 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2519 tcg_temp_free_i32(tmp
);
2522 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2525 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2526 tcg_temp_free_i32(tmp
);
2529 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2532 tcg_temp_free_i32(tmp
);
2533 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2534 gen_op_iwmmxt_set_mup();
2535 gen_op_iwmmxt_set_cup();
2537 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2538 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2539 wrd
= (insn
>> 12) & 0xf;
2540 rd0
= (insn
>> 16) & 0xf;
2541 rd1
= (insn
>> 0) & 0xf;
2542 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2543 switch ((insn
>> 22) & 3) {
2545 if (insn
& (1 << 21))
2546 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2548 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2551 if (insn
& (1 << 21))
2552 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2554 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2557 if (insn
& (1 << 21))
2558 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2560 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2565 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2566 gen_op_iwmmxt_set_mup();
2568 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2569 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2570 wrd
= (insn
>> 12) & 0xf;
2571 rd0
= (insn
>> 16) & 0xf;
2572 rd1
= (insn
>> 0) & 0xf;
2573 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2574 switch ((insn
>> 22) & 3) {
2576 if (insn
& (1 << 21))
2577 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2579 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2582 if (insn
& (1 << 21))
2583 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2585 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2588 if (insn
& (1 << 21))
2589 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2591 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2596 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2597 gen_op_iwmmxt_set_mup();
2599 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2600 case 0x402: case 0x502: case 0x602: case 0x702:
2601 wrd
= (insn
>> 12) & 0xf;
2602 rd0
= (insn
>> 16) & 0xf;
2603 rd1
= (insn
>> 0) & 0xf;
2604 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2605 tmp
= tcg_const_i32((insn
>> 20) & 3);
2606 iwmmxt_load_reg(cpu_V1
, rd1
);
2607 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2608 tcg_temp_free_i32(tmp
);
2609 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2610 gen_op_iwmmxt_set_mup();
2612 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2613 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2614 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2615 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2616 wrd
= (insn
>> 12) & 0xf;
2617 rd0
= (insn
>> 16) & 0xf;
2618 rd1
= (insn
>> 0) & 0xf;
2619 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2620 switch ((insn
>> 20) & 0xf) {
2622 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2625 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2628 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2631 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2634 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2637 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2640 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2643 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2646 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2651 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2652 gen_op_iwmmxt_set_mup();
2653 gen_op_iwmmxt_set_cup();
2655 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2656 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2657 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2658 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2659 wrd
= (insn
>> 12) & 0xf;
2660 rd0
= (insn
>> 16) & 0xf;
2661 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2662 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2663 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2664 tcg_temp_free_i32(tmp
);
2665 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2666 gen_op_iwmmxt_set_mup();
2667 gen_op_iwmmxt_set_cup();
2669 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2670 case 0x418: case 0x518: case 0x618: case 0x718:
2671 case 0x818: case 0x918: case 0xa18: case 0xb18:
2672 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2673 wrd
= (insn
>> 12) & 0xf;
2674 rd0
= (insn
>> 16) & 0xf;
2675 rd1
= (insn
>> 0) & 0xf;
2676 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2677 switch ((insn
>> 20) & 0xf) {
2679 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2682 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2685 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2688 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2691 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2694 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2697 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2700 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2703 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2708 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2709 gen_op_iwmmxt_set_mup();
2710 gen_op_iwmmxt_set_cup();
2712 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2713 case 0x408: case 0x508: case 0x608: case 0x708:
2714 case 0x808: case 0x908: case 0xa08: case 0xb08:
2715 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2716 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2718 wrd
= (insn
>> 12) & 0xf;
2719 rd0
= (insn
>> 16) & 0xf;
2720 rd1
= (insn
>> 0) & 0xf;
2721 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2722 switch ((insn
>> 22) & 3) {
2724 if (insn
& (1 << 21))
2725 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2727 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2730 if (insn
& (1 << 21))
2731 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2733 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2736 if (insn
& (1 << 21))
2737 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2739 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2742 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2743 gen_op_iwmmxt_set_mup();
2744 gen_op_iwmmxt_set_cup();
2746 case 0x201: case 0x203: case 0x205: case 0x207:
2747 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2748 case 0x211: case 0x213: case 0x215: case 0x217:
2749 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2750 wrd
= (insn
>> 5) & 0xf;
2751 rd0
= (insn
>> 12) & 0xf;
2752 rd1
= (insn
>> 0) & 0xf;
2753 if (rd0
== 0xf || rd1
== 0xf)
2755 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2756 tmp
= load_reg(s
, rd0
);
2757 tmp2
= load_reg(s
, rd1
);
2758 switch ((insn
>> 16) & 0xf) {
2759 case 0x0: /* TMIA */
2760 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2762 case 0x8: /* TMIAPH */
2763 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2765 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2766 if (insn
& (1 << 16))
2767 tcg_gen_shri_i32(tmp
, tmp
, 16);
2768 if (insn
& (1 << 17))
2769 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2770 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2773 tcg_temp_free_i32(tmp2
);
2774 tcg_temp_free_i32(tmp
);
2777 tcg_temp_free_i32(tmp2
);
2778 tcg_temp_free_i32(tmp
);
2779 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2780 gen_op_iwmmxt_set_mup();
2789 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2790 (ie. an undefined instruction). */
2791 static int disas_dsp_insn(DisasContext
*s
, uint32_t insn
)
2793 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2796 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2797 /* Multiply with Internal Accumulate Format */
2798 rd0
= (insn
>> 12) & 0xf;
2800 acc
= (insn
>> 5) & 7;
2805 tmp
= load_reg(s
, rd0
);
2806 tmp2
= load_reg(s
, rd1
);
2807 switch ((insn
>> 16) & 0xf) {
2809 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2811 case 0x8: /* MIAPH */
2812 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2814 case 0xc: /* MIABB */
2815 case 0xd: /* MIABT */
2816 case 0xe: /* MIATB */
2817 case 0xf: /* MIATT */
2818 if (insn
& (1 << 16))
2819 tcg_gen_shri_i32(tmp
, tmp
, 16);
2820 if (insn
& (1 << 17))
2821 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2822 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2827 tcg_temp_free_i32(tmp2
);
2828 tcg_temp_free_i32(tmp
);
2830 gen_op_iwmmxt_movq_wRn_M0(acc
);
2834 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2835 /* Internal Accumulator Access Format */
2836 rdhi
= (insn
>> 16) & 0xf;
2837 rdlo
= (insn
>> 12) & 0xf;
2843 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2844 iwmmxt_load_reg(cpu_V0
, acc
);
2845 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2846 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2847 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2848 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2850 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2851 iwmmxt_store_reg(cpu_V0
, acc
);
2859 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2860 #define VFP_SREG(insn, bigbit, smallbit) \
2861 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2862 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2863 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2864 reg = (((insn) >> (bigbit)) & 0x0f) \
2865 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2867 if (insn & (1 << (smallbit))) \
2869 reg = ((insn) >> (bigbit)) & 0x0f; \
2872 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2873 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2874 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2875 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2876 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2877 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2879 /* Move between integer and VFP cores. */
2880 static TCGv_i32
gen_vfp_mrs(void)
2882 TCGv_i32 tmp
= tcg_temp_new_i32();
2883 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2887 static void gen_vfp_msr(TCGv_i32 tmp
)
2889 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2890 tcg_temp_free_i32(tmp
);
2893 static void gen_neon_dup_u8(TCGv_i32 var
, int shift
)
2895 TCGv_i32 tmp
= tcg_temp_new_i32();
2897 tcg_gen_shri_i32(var
, var
, shift
);
2898 tcg_gen_ext8u_i32(var
, var
);
2899 tcg_gen_shli_i32(tmp
, var
, 8);
2900 tcg_gen_or_i32(var
, var
, tmp
);
2901 tcg_gen_shli_i32(tmp
, var
, 16);
2902 tcg_gen_or_i32(var
, var
, tmp
);
2903 tcg_temp_free_i32(tmp
);
2906 static void gen_neon_dup_low16(TCGv_i32 var
)
2908 TCGv_i32 tmp
= tcg_temp_new_i32();
2909 tcg_gen_ext16u_i32(var
, var
);
2910 tcg_gen_shli_i32(tmp
, var
, 16);
2911 tcg_gen_or_i32(var
, var
, tmp
);
2912 tcg_temp_free_i32(tmp
);
2915 static void gen_neon_dup_high16(TCGv_i32 var
)
2917 TCGv_i32 tmp
= tcg_temp_new_i32();
2918 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2919 tcg_gen_shri_i32(tmp
, var
, 16);
2920 tcg_gen_or_i32(var
, var
, tmp
);
2921 tcg_temp_free_i32(tmp
);
2924 static TCGv_i32
gen_load_and_replicate(DisasContext
*s
, TCGv_i32 addr
, int size
)
2926 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2927 TCGv_i32 tmp
= tcg_temp_new_i32();
2930 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
2931 gen_neon_dup_u8(tmp
, 0);
2934 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
2935 gen_neon_dup_low16(tmp
);
2938 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
2940 default: /* Avoid compiler warnings. */
2946 static int handle_vsel(uint32_t insn
, uint32_t rd
, uint32_t rn
, uint32_t rm
,
2949 uint32_t cc
= extract32(insn
, 20, 2);
2952 TCGv_i64 frn
, frm
, dest
;
2953 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
2955 zero
= tcg_const_i64(0);
2957 frn
= tcg_temp_new_i64();
2958 frm
= tcg_temp_new_i64();
2959 dest
= tcg_temp_new_i64();
2961 zf
= tcg_temp_new_i64();
2962 nf
= tcg_temp_new_i64();
2963 vf
= tcg_temp_new_i64();
2965 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
2966 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
2967 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
2969 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2970 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2973 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
2977 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
2980 case 2: /* ge: N == V -> N ^ V == 0 */
2981 tmp
= tcg_temp_new_i64();
2982 tcg_gen_xor_i64(tmp
, vf
, nf
);
2983 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2985 tcg_temp_free_i64(tmp
);
2987 case 3: /* gt: !Z && N == V */
2988 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
2990 tmp
= tcg_temp_new_i64();
2991 tcg_gen_xor_i64(tmp
, vf
, nf
);
2992 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2994 tcg_temp_free_i64(tmp
);
2997 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2998 tcg_temp_free_i64(frn
);
2999 tcg_temp_free_i64(frm
);
3000 tcg_temp_free_i64(dest
);
3002 tcg_temp_free_i64(zf
);
3003 tcg_temp_free_i64(nf
);
3004 tcg_temp_free_i64(vf
);
3006 tcg_temp_free_i64(zero
);
3008 TCGv_i32 frn
, frm
, dest
;
3011 zero
= tcg_const_i32(0);
3013 frn
= tcg_temp_new_i32();
3014 frm
= tcg_temp_new_i32();
3015 dest
= tcg_temp_new_i32();
3016 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
3017 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
3020 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
3024 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
3027 case 2: /* ge: N == V -> N ^ V == 0 */
3028 tmp
= tcg_temp_new_i32();
3029 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
3030 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
3032 tcg_temp_free_i32(tmp
);
3034 case 3: /* gt: !Z && N == V */
3035 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
3037 tmp
= tcg_temp_new_i32();
3038 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
3039 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
3041 tcg_temp_free_i32(tmp
);
3044 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
3045 tcg_temp_free_i32(frn
);
3046 tcg_temp_free_i32(frm
);
3047 tcg_temp_free_i32(dest
);
3049 tcg_temp_free_i32(zero
);
3055 static int handle_vminmaxnm(uint32_t insn
, uint32_t rd
, uint32_t rn
,
3056 uint32_t rm
, uint32_t dp
)
3058 uint32_t vmin
= extract32(insn
, 6, 1);
3059 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3062 TCGv_i64 frn
, frm
, dest
;
3064 frn
= tcg_temp_new_i64();
3065 frm
= tcg_temp_new_i64();
3066 dest
= tcg_temp_new_i64();
3068 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
3069 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
3071 gen_helper_vfp_minnumd(dest
, frn
, frm
, fpst
);
3073 gen_helper_vfp_maxnumd(dest
, frn
, frm
, fpst
);
3075 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
3076 tcg_temp_free_i64(frn
);
3077 tcg_temp_free_i64(frm
);
3078 tcg_temp_free_i64(dest
);
3080 TCGv_i32 frn
, frm
, dest
;
3082 frn
= tcg_temp_new_i32();
3083 frm
= tcg_temp_new_i32();
3084 dest
= tcg_temp_new_i32();
3086 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
3087 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
3089 gen_helper_vfp_minnums(dest
, frn
, frm
, fpst
);
3091 gen_helper_vfp_maxnums(dest
, frn
, frm
, fpst
);
3093 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
3094 tcg_temp_free_i32(frn
);
3095 tcg_temp_free_i32(frm
);
3096 tcg_temp_free_i32(dest
);
3099 tcg_temp_free_ptr(fpst
);
3103 static int handle_vrint(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
3106 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3109 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
3110 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3115 tcg_op
= tcg_temp_new_i64();
3116 tcg_res
= tcg_temp_new_i64();
3117 tcg_gen_ld_f64(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
3118 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
3119 tcg_gen_st_f64(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
3120 tcg_temp_free_i64(tcg_op
);
3121 tcg_temp_free_i64(tcg_res
);
3125 tcg_op
= tcg_temp_new_i32();
3126 tcg_res
= tcg_temp_new_i32();
3127 tcg_gen_ld_f32(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
3128 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
3129 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
3130 tcg_temp_free_i32(tcg_op
);
3131 tcg_temp_free_i32(tcg_res
);
3134 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3135 tcg_temp_free_i32(tcg_rmode
);
3137 tcg_temp_free_ptr(fpst
);
3141 static int handle_vcvt(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
3144 bool is_signed
= extract32(insn
, 7, 1);
3145 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3146 TCGv_i32 tcg_rmode
, tcg_shift
;
3148 tcg_shift
= tcg_const_i32(0);
3150 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
3151 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3154 TCGv_i64 tcg_double
, tcg_res
;
3156 /* Rd is encoded as a single precision register even when the source
3157 * is double precision.
3159 rd
= ((rd
<< 1) & 0x1e) | ((rd
>> 4) & 0x1);
3160 tcg_double
= tcg_temp_new_i64();
3161 tcg_res
= tcg_temp_new_i64();
3162 tcg_tmp
= tcg_temp_new_i32();
3163 tcg_gen_ld_f64(tcg_double
, cpu_env
, vfp_reg_offset(1, rm
));
3165 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
3167 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
3169 tcg_gen_extrl_i64_i32(tcg_tmp
, tcg_res
);
3170 tcg_gen_st_f32(tcg_tmp
, cpu_env
, vfp_reg_offset(0, rd
));
3171 tcg_temp_free_i32(tcg_tmp
);
3172 tcg_temp_free_i64(tcg_res
);
3173 tcg_temp_free_i64(tcg_double
);
3175 TCGv_i32 tcg_single
, tcg_res
;
3176 tcg_single
= tcg_temp_new_i32();
3177 tcg_res
= tcg_temp_new_i32();
3178 tcg_gen_ld_f32(tcg_single
, cpu_env
, vfp_reg_offset(0, rm
));
3180 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
3182 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
3184 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(0, rd
));
3185 tcg_temp_free_i32(tcg_res
);
3186 tcg_temp_free_i32(tcg_single
);
3189 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3190 tcg_temp_free_i32(tcg_rmode
);
3192 tcg_temp_free_i32(tcg_shift
);
3194 tcg_temp_free_ptr(fpst
);
3199 /* Table for converting the most common AArch32 encoding of
3200 * rounding mode to arm_fprounding order (which matches the
3201 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3203 static const uint8_t fp_decode_rm
[] = {
3210 static int disas_vfp_v8_insn(DisasContext
*s
, uint32_t insn
)
3212 uint32_t rd
, rn
, rm
, dp
= extract32(insn
, 8, 1);
3214 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
3219 VFP_DREG_D(rd
, insn
);
3220 VFP_DREG_N(rn
, insn
);
3221 VFP_DREG_M(rm
, insn
);
3223 rd
= VFP_SREG_D(insn
);
3224 rn
= VFP_SREG_N(insn
);
3225 rm
= VFP_SREG_M(insn
);
3228 if ((insn
& 0x0f800e50) == 0x0e000a00) {
3229 return handle_vsel(insn
, rd
, rn
, rm
, dp
);
3230 } else if ((insn
& 0x0fb00e10) == 0x0e800a00) {
3231 return handle_vminmaxnm(insn
, rd
, rn
, rm
, dp
);
3232 } else if ((insn
& 0x0fbc0ed0) == 0x0eb80a40) {
3233 /* VRINTA, VRINTN, VRINTP, VRINTM */
3234 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
3235 return handle_vrint(insn
, rd
, rm
, dp
, rounding
);
3236 } else if ((insn
& 0x0fbc0e50) == 0x0ebc0a40) {
3237 /* VCVTA, VCVTN, VCVTP, VCVTM */
3238 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
3239 return handle_vcvt(insn
, rd
, rm
, dp
, rounding
);
3244 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3245 (ie. an undefined instruction). */
3246 static int disas_vfp_insn(DisasContext
*s
, uint32_t insn
)
3248 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
3254 if (!arm_dc_feature(s
, ARM_FEATURE_VFP
)) {
3258 /* FIXME: this access check should not take precedence over UNDEF
3259 * for invalid encodings; we will generate incorrect syndrome information
3260 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3262 if (s
->fp_excp_el
) {
3263 gen_exception_insn(s
, 4, EXCP_UDEF
,
3264 syn_fp_access_trap(1, 0xe, false), s
->fp_excp_el
);
3268 if (!s
->vfp_enabled
) {
3269 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3270 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
3272 rn
= (insn
>> 16) & 0xf;
3273 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
&& rn
!= ARM_VFP_MVFR2
3274 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
) {
3279 if (extract32(insn
, 28, 4) == 0xf) {
3280 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3281 * only used in v8 and above.
3283 return disas_vfp_v8_insn(s
, insn
);
3286 dp
= ((insn
& 0xf00) == 0xb00);
3287 switch ((insn
>> 24) & 0xf) {
3289 if (insn
& (1 << 4)) {
3290 /* single register transfer */
3291 rd
= (insn
>> 12) & 0xf;
3296 VFP_DREG_N(rn
, insn
);
3299 if (insn
& 0x00c00060
3300 && !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
3304 pass
= (insn
>> 21) & 1;
3305 if (insn
& (1 << 22)) {
3307 offset
= ((insn
>> 5) & 3) * 8;
3308 } else if (insn
& (1 << 5)) {
3310 offset
= (insn
& (1 << 6)) ? 16 : 0;
3315 if (insn
& ARM_CP_RW_BIT
) {
3317 tmp
= neon_load_reg(rn
, pass
);
3321 tcg_gen_shri_i32(tmp
, tmp
, offset
);
3322 if (insn
& (1 << 23))
3328 if (insn
& (1 << 23)) {
3330 tcg_gen_shri_i32(tmp
, tmp
, 16);
3336 tcg_gen_sari_i32(tmp
, tmp
, 16);
3345 store_reg(s
, rd
, tmp
);
3348 tmp
= load_reg(s
, rd
);
3349 if (insn
& (1 << 23)) {
3352 gen_neon_dup_u8(tmp
, 0);
3353 } else if (size
== 1) {
3354 gen_neon_dup_low16(tmp
);
3356 for (n
= 0; n
<= pass
* 2; n
++) {
3357 tmp2
= tcg_temp_new_i32();
3358 tcg_gen_mov_i32(tmp2
, tmp
);
3359 neon_store_reg(rn
, n
, tmp2
);
3361 neon_store_reg(rn
, n
, tmp
);
3366 tmp2
= neon_load_reg(rn
, pass
);
3367 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
3368 tcg_temp_free_i32(tmp2
);
3371 tmp2
= neon_load_reg(rn
, pass
);
3372 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
3373 tcg_temp_free_i32(tmp2
);
3378 neon_store_reg(rn
, pass
, tmp
);
3382 if ((insn
& 0x6f) != 0x00)
3384 rn
= VFP_SREG_N(insn
);
3385 if (insn
& ARM_CP_RW_BIT
) {
3387 if (insn
& (1 << 21)) {
3388 /* system register */
3393 /* VFP2 allows access to FSID from userspace.
3394 VFP3 restricts all id registers to privileged
3397 && arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3400 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3405 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3407 case ARM_VFP_FPINST
:
3408 case ARM_VFP_FPINST2
:
3409 /* Not present in VFP3. */
3411 || arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3414 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3418 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
3419 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
3421 tmp
= tcg_temp_new_i32();
3422 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
3426 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
3433 || !arm_dc_feature(s
, ARM_FEATURE_MVFR
)) {
3436 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3442 gen_mov_F0_vreg(0, rn
);
3443 tmp
= gen_vfp_mrs();
3446 /* Set the 4 flag bits in the CPSR. */
3448 tcg_temp_free_i32(tmp
);
3450 store_reg(s
, rd
, tmp
);
3454 if (insn
& (1 << 21)) {
3456 /* system register */
3461 /* Writes are ignored. */
3464 tmp
= load_reg(s
, rd
);
3465 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
3466 tcg_temp_free_i32(tmp
);
3472 /* TODO: VFP subarchitecture support.
3473 * For now, keep the EN bit only */
3474 tmp
= load_reg(s
, rd
);
3475 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
3476 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3479 case ARM_VFP_FPINST
:
3480 case ARM_VFP_FPINST2
:
3484 tmp
= load_reg(s
, rd
);
3485 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3491 tmp
= load_reg(s
, rd
);
3493 gen_mov_vreg_F0(0, rn
);
3498 /* data processing */
3499 /* The opcode is in bits 23, 21, 20 and 6. */
3500 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
3504 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
3506 /* rn is register number */
3507 VFP_DREG_N(rn
, insn
);
3510 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18) ||
3511 ((rn
& 0x1e) == 0x6))) {
3512 /* Integer or single/half precision destination. */
3513 rd
= VFP_SREG_D(insn
);
3515 VFP_DREG_D(rd
, insn
);
3518 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14) ||
3519 ((rn
& 0x1e) == 0x4))) {
3520 /* VCVT from int or half precision is always from S reg
3521 * regardless of dp bit. VCVT with immediate frac_bits
3522 * has same format as SREG_M.
3524 rm
= VFP_SREG_M(insn
);
3526 VFP_DREG_M(rm
, insn
);
3529 rn
= VFP_SREG_N(insn
);
3530 if (op
== 15 && rn
== 15) {
3531 /* Double precision destination. */
3532 VFP_DREG_D(rd
, insn
);
3534 rd
= VFP_SREG_D(insn
);
3536 /* NB that we implicitly rely on the encoding for the frac_bits
3537 * in VCVT of fixed to float being the same as that of an SREG_M
3539 rm
= VFP_SREG_M(insn
);
3542 veclen
= s
->vec_len
;
3543 if (op
== 15 && rn
> 3)
3546 /* Shut up compiler warnings. */
3557 /* Figure out what type of vector operation this is. */
3558 if ((rd
& bank_mask
) == 0) {
3563 delta_d
= (s
->vec_stride
>> 1) + 1;
3565 delta_d
= s
->vec_stride
+ 1;
3567 if ((rm
& bank_mask
) == 0) {
3568 /* mixed scalar/vector */
3577 /* Load the initial operands. */
3582 /* Integer source */
3583 gen_mov_F0_vreg(0, rm
);
3588 gen_mov_F0_vreg(dp
, rd
);
3589 gen_mov_F1_vreg(dp
, rm
);
3593 /* Compare with zero */
3594 gen_mov_F0_vreg(dp
, rd
);
3605 /* Source and destination the same. */
3606 gen_mov_F0_vreg(dp
, rd
);
3612 /* VCVTB, VCVTT: only present with the halfprec extension
3613 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3614 * (we choose to UNDEF)
3616 if ((dp
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) ||
3617 !arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
)) {
3620 if (!extract32(rn
, 1, 1)) {
3621 /* Half precision source. */
3622 gen_mov_F0_vreg(0, rm
);
3625 /* Otherwise fall through */
3627 /* One source operand. */
3628 gen_mov_F0_vreg(dp
, rm
);
3632 /* Two source operands. */
3633 gen_mov_F0_vreg(dp
, rn
);
3634 gen_mov_F1_vreg(dp
, rm
);
3638 /* Perform the calculation. */
3640 case 0: /* VMLA: fd + (fn * fm) */
3641 /* Note that order of inputs to the add matters for NaNs */
3643 gen_mov_F0_vreg(dp
, rd
);
3646 case 1: /* VMLS: fd + -(fn * fm) */
3649 gen_mov_F0_vreg(dp
, rd
);
3652 case 2: /* VNMLS: -fd + (fn * fm) */
3653 /* Note that it isn't valid to replace (-A + B) with (B - A)
3654 * or similar plausible looking simplifications
3655 * because this will give wrong results for NaNs.
3658 gen_mov_F0_vreg(dp
, rd
);
3662 case 3: /* VNMLA: -fd + -(fn * fm) */
3665 gen_mov_F0_vreg(dp
, rd
);
3669 case 4: /* mul: fn * fm */
3672 case 5: /* nmul: -(fn * fm) */
3676 case 6: /* add: fn + fm */
3679 case 7: /* sub: fn - fm */
3682 case 8: /* div: fn / fm */
3685 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3686 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3687 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3688 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3689 /* These are fused multiply-add, and must be done as one
3690 * floating point operation with no rounding between the
3691 * multiplication and addition steps.
3692 * NB that doing the negations here as separate steps is
3693 * correct : an input NaN should come out with its sign bit
3694 * flipped if it is a negated-input.
3696 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
)) {
3704 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
3706 frd
= tcg_temp_new_i64();
3707 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3710 gen_helper_vfp_negd(frd
, frd
);
3712 fpst
= get_fpstatus_ptr(0);
3713 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
3714 cpu_F1d
, frd
, fpst
);
3715 tcg_temp_free_ptr(fpst
);
3716 tcg_temp_free_i64(frd
);
3722 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
3724 frd
= tcg_temp_new_i32();
3725 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3727 gen_helper_vfp_negs(frd
, frd
);
3729 fpst
= get_fpstatus_ptr(0);
3730 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
3731 cpu_F1s
, frd
, fpst
);
3732 tcg_temp_free_ptr(fpst
);
3733 tcg_temp_free_i32(frd
);
3736 case 14: /* fconst */
3737 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3741 n
= (insn
<< 12) & 0x80000000;
3742 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3749 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3756 tcg_gen_movi_i32(cpu_F0s
, n
);
3759 case 15: /* extension space */
3773 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3774 tmp
= gen_vfp_mrs();
3775 tcg_gen_ext16u_i32(tmp
, tmp
);
3777 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3780 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3783 tcg_temp_free_i32(tmp
);
3785 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3786 tmp
= gen_vfp_mrs();
3787 tcg_gen_shri_i32(tmp
, tmp
, 16);
3789 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3792 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3795 tcg_temp_free_i32(tmp
);
3797 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3798 tmp
= tcg_temp_new_i32();
3800 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3803 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3806 gen_mov_F0_vreg(0, rd
);
3807 tmp2
= gen_vfp_mrs();
3808 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3809 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3810 tcg_temp_free_i32(tmp2
);
3813 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3814 tmp
= tcg_temp_new_i32();
3816 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3819 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3822 tcg_gen_shli_i32(tmp
, tmp
, 16);
3823 gen_mov_F0_vreg(0, rd
);
3824 tmp2
= gen_vfp_mrs();
3825 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3826 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3827 tcg_temp_free_i32(tmp2
);
3839 case 11: /* cmpez */
3843 case 12: /* vrintr */
3845 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3847 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
3849 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
3851 tcg_temp_free_ptr(fpst
);
3854 case 13: /* vrintz */
3856 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3858 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3859 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3861 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
3863 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
3865 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3866 tcg_temp_free_i32(tcg_rmode
);
3867 tcg_temp_free_ptr(fpst
);
3870 case 14: /* vrintx */
3872 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3874 gen_helper_rintd_exact(cpu_F0d
, cpu_F0d
, fpst
);
3876 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpst
);
3878 tcg_temp_free_ptr(fpst
);
3881 case 15: /* single<->double conversion */
3883 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3885 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3887 case 16: /* fuito */
3888 gen_vfp_uito(dp
, 0);
3890 case 17: /* fsito */
3891 gen_vfp_sito(dp
, 0);
3893 case 20: /* fshto */
3894 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3897 gen_vfp_shto(dp
, 16 - rm
, 0);
3899 case 21: /* fslto */
3900 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3903 gen_vfp_slto(dp
, 32 - rm
, 0);
3905 case 22: /* fuhto */
3906 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3909 gen_vfp_uhto(dp
, 16 - rm
, 0);
3911 case 23: /* fulto */
3912 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3915 gen_vfp_ulto(dp
, 32 - rm
, 0);
3917 case 24: /* ftoui */
3918 gen_vfp_toui(dp
, 0);
3920 case 25: /* ftouiz */
3921 gen_vfp_touiz(dp
, 0);
3923 case 26: /* ftosi */
3924 gen_vfp_tosi(dp
, 0);
3926 case 27: /* ftosiz */
3927 gen_vfp_tosiz(dp
, 0);
3929 case 28: /* ftosh */
3930 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3933 gen_vfp_tosh(dp
, 16 - rm
, 0);
3935 case 29: /* ftosl */
3936 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3939 gen_vfp_tosl(dp
, 32 - rm
, 0);
3941 case 30: /* ftouh */
3942 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3945 gen_vfp_touh(dp
, 16 - rm
, 0);
3947 case 31: /* ftoul */
3948 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3951 gen_vfp_toul(dp
, 32 - rm
, 0);
3953 default: /* undefined */
3957 default: /* undefined */
3961 /* Write back the result. */
3962 if (op
== 15 && (rn
>= 8 && rn
<= 11)) {
3963 /* Comparison, do nothing. */
3964 } else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18 ||
3965 (rn
& 0x1e) == 0x6)) {
3966 /* VCVT double to int: always integer result.
3967 * VCVT double to half precision is always a single
3970 gen_mov_vreg_F0(0, rd
);
3971 } else if (op
== 15 && rn
== 15) {
3973 gen_mov_vreg_F0(!dp
, rd
);
3975 gen_mov_vreg_F0(dp
, rd
);
3978 /* break out of the loop if we have finished */
3982 if (op
== 15 && delta_m
== 0) {
3983 /* single source one-many */
3985 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3987 gen_mov_vreg_F0(dp
, rd
);
3991 /* Setup the next operands. */
3993 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3997 /* One source operand. */
3998 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
4000 gen_mov_F0_vreg(dp
, rm
);
4002 /* Two source operands. */
4003 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
4005 gen_mov_F0_vreg(dp
, rn
);
4007 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
4009 gen_mov_F1_vreg(dp
, rm
);
4017 if ((insn
& 0x03e00000) == 0x00400000) {
4018 /* two-register transfer */
4019 rn
= (insn
>> 16) & 0xf;
4020 rd
= (insn
>> 12) & 0xf;
4022 VFP_DREG_M(rm
, insn
);
4024 rm
= VFP_SREG_M(insn
);
4027 if (insn
& ARM_CP_RW_BIT
) {
4030 gen_mov_F0_vreg(0, rm
* 2);
4031 tmp
= gen_vfp_mrs();
4032 store_reg(s
, rd
, tmp
);
4033 gen_mov_F0_vreg(0, rm
* 2 + 1);
4034 tmp
= gen_vfp_mrs();
4035 store_reg(s
, rn
, tmp
);
4037 gen_mov_F0_vreg(0, rm
);
4038 tmp
= gen_vfp_mrs();
4039 store_reg(s
, rd
, tmp
);
4040 gen_mov_F0_vreg(0, rm
+ 1);
4041 tmp
= gen_vfp_mrs();
4042 store_reg(s
, rn
, tmp
);
4047 tmp
= load_reg(s
, rd
);
4049 gen_mov_vreg_F0(0, rm
* 2);
4050 tmp
= load_reg(s
, rn
);
4052 gen_mov_vreg_F0(0, rm
* 2 + 1);
4054 tmp
= load_reg(s
, rd
);
4056 gen_mov_vreg_F0(0, rm
);
4057 tmp
= load_reg(s
, rn
);
4059 gen_mov_vreg_F0(0, rm
+ 1);
4064 rn
= (insn
>> 16) & 0xf;
4066 VFP_DREG_D(rd
, insn
);
4068 rd
= VFP_SREG_D(insn
);
4069 if ((insn
& 0x01200000) == 0x01000000) {
4070 /* Single load/store */
4071 offset
= (insn
& 0xff) << 2;
4072 if ((insn
& (1 << 23)) == 0)
4074 if (s
->thumb
&& rn
== 15) {
4075 /* This is actually UNPREDICTABLE */
4076 addr
= tcg_temp_new_i32();
4077 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
4079 addr
= load_reg(s
, rn
);
4081 tcg_gen_addi_i32(addr
, addr
, offset
);
4082 if (insn
& (1 << 20)) {
4083 gen_vfp_ld(s
, dp
, addr
);
4084 gen_mov_vreg_F0(dp
, rd
);
4086 gen_mov_F0_vreg(dp
, rd
);
4087 gen_vfp_st(s
, dp
, addr
);
4089 tcg_temp_free_i32(addr
);
4091 /* load/store multiple */
4092 int w
= insn
& (1 << 21);
4094 n
= (insn
>> 1) & 0x7f;
4098 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
4099 /* P == U , W == 1 => UNDEF */
4102 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
4103 /* UNPREDICTABLE cases for bad immediates: we choose to
4104 * UNDEF to avoid generating huge numbers of TCG ops
4108 if (rn
== 15 && w
) {
4109 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4113 if (s
->thumb
&& rn
== 15) {
4114 /* This is actually UNPREDICTABLE */
4115 addr
= tcg_temp_new_i32();
4116 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
4118 addr
= load_reg(s
, rn
);
4120 if (insn
& (1 << 24)) /* pre-decrement */
4121 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
4127 for (i
= 0; i
< n
; i
++) {
4128 if (insn
& ARM_CP_RW_BIT
) {
4130 gen_vfp_ld(s
, dp
, addr
);
4131 gen_mov_vreg_F0(dp
, rd
+ i
);
4134 gen_mov_F0_vreg(dp
, rd
+ i
);
4135 gen_vfp_st(s
, dp
, addr
);
4137 tcg_gen_addi_i32(addr
, addr
, offset
);
4141 if (insn
& (1 << 24))
4142 offset
= -offset
* n
;
4143 else if (dp
&& (insn
& 1))
4149 tcg_gen_addi_i32(addr
, addr
, offset
);
4150 store_reg(s
, rn
, addr
);
4152 tcg_temp_free_i32(addr
);
4158 /* Should never happen. */
4164 static inline bool use_goto_tb(DisasContext
*s
, target_ulong dest
)
4166 #ifndef CONFIG_USER_ONLY
4167 return (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
4168 ((s
->pc
- 1) & TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
4174 static void gen_goto_ptr(void)
4176 TCGv addr
= tcg_temp_new();
4177 tcg_gen_extu_i32_tl(addr
, cpu_R
[15]);
4178 tcg_gen_lookup_and_goto_ptr(addr
);
4179 tcg_temp_free(addr
);
4182 /* This will end the TB but doesn't guarantee we'll return to
4183 * cpu_loop_exec. Any live exit_requests will be processed as we
4184 * enter the next TB.
4186 static void gen_goto_tb(DisasContext
*s
, int n
, target_ulong dest
)
4188 if (use_goto_tb(s
, dest
)) {
4190 gen_set_pc_im(s
, dest
);
4191 tcg_gen_exit_tb((uintptr_t)s
->base
.tb
+ n
);
4193 gen_set_pc_im(s
, dest
);
4196 s
->base
.is_jmp
= DISAS_NORETURN
;
4199 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
4201 if (unlikely(is_singlestepping(s
))) {
4202 /* An indirect jump so that we still trigger the debug exception. */
4207 gen_goto_tb(s
, 0, dest
);
4211 static inline void gen_mulxy(TCGv_i32 t0
, TCGv_i32 t1
, int x
, int y
)
4214 tcg_gen_sari_i32(t0
, t0
, 16);
4218 tcg_gen_sari_i32(t1
, t1
, 16);
4221 tcg_gen_mul_i32(t0
, t0
, t1
);
4224 /* Return the mask of PSR bits set by a MSR instruction. */
4225 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
)
4230 if (flags
& (1 << 0))
4232 if (flags
& (1 << 1))
4234 if (flags
& (1 << 2))
4236 if (flags
& (1 << 3))
4239 /* Mask out undefined bits. */
4240 mask
&= ~CPSR_RESERVED
;
4241 if (!arm_dc_feature(s
, ARM_FEATURE_V4T
)) {
4244 if (!arm_dc_feature(s
, ARM_FEATURE_V5
)) {
4245 mask
&= ~CPSR_Q
; /* V5TE in reality*/
4247 if (!arm_dc_feature(s
, ARM_FEATURE_V6
)) {
4248 mask
&= ~(CPSR_E
| CPSR_GE
);
4250 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB2
)) {
4253 /* Mask out execution state and reserved bits. */
4255 mask
&= ~(CPSR_EXEC
| CPSR_RESERVED
);
4257 /* Mask out privileged bits. */
4263 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4264 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv_i32 t0
)
4268 /* ??? This is also undefined in system mode. */
4272 tmp
= load_cpu_field(spsr
);
4273 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
4274 tcg_gen_andi_i32(t0
, t0
, mask
);
4275 tcg_gen_or_i32(tmp
, tmp
, t0
);
4276 store_cpu_field(tmp
, spsr
);
4278 gen_set_cpsr(t0
, mask
);
4280 tcg_temp_free_i32(t0
);
4285 /* Returns nonzero if access to the PSR is not permitted. */
4286 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
4289 tmp
= tcg_temp_new_i32();
4290 tcg_gen_movi_i32(tmp
, val
);
4291 return gen_set_psr(s
, mask
, spsr
, tmp
);
4294 static bool msr_banked_access_decode(DisasContext
*s
, int r
, int sysm
, int rn
,
4295 int *tgtmode
, int *regno
)
4297 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4298 * the target mode and register number, and identify the various
4299 * unpredictable cases.
4300 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4301 * + executed in user mode
4302 * + using R15 as the src/dest register
4303 * + accessing an unimplemented register
4304 * + accessing a register that's inaccessible at current PL/security state*
4305 * + accessing a register that you could access with a different insn
4306 * We choose to UNDEF in all these cases.
4307 * Since we don't know which of the various AArch32 modes we are in
4308 * we have to defer some checks to runtime.
4309 * Accesses to Monitor mode registers from Secure EL1 (which implies
4310 * that EL3 is AArch64) must trap to EL3.
4312 * If the access checks fail this function will emit code to take
4313 * an exception and return false. Otherwise it will return true,
4314 * and set *tgtmode and *regno appropriately.
4316 int exc_target
= default_exception_el(s
);
4318 /* These instructions are present only in ARMv8, or in ARMv7 with the
4319 * Virtualization Extensions.
4321 if (!arm_dc_feature(s
, ARM_FEATURE_V8
) &&
4322 !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
4326 if (IS_USER(s
) || rn
== 15) {
4330 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4331 * of registers into (r, sysm).
4334 /* SPSRs for other modes */
4336 case 0xe: /* SPSR_fiq */
4337 *tgtmode
= ARM_CPU_MODE_FIQ
;
4339 case 0x10: /* SPSR_irq */
4340 *tgtmode
= ARM_CPU_MODE_IRQ
;
4342 case 0x12: /* SPSR_svc */
4343 *tgtmode
= ARM_CPU_MODE_SVC
;
4345 case 0x14: /* SPSR_abt */
4346 *tgtmode
= ARM_CPU_MODE_ABT
;
4348 case 0x16: /* SPSR_und */
4349 *tgtmode
= ARM_CPU_MODE_UND
;
4351 case 0x1c: /* SPSR_mon */
4352 *tgtmode
= ARM_CPU_MODE_MON
;
4354 case 0x1e: /* SPSR_hyp */
4355 *tgtmode
= ARM_CPU_MODE_HYP
;
4357 default: /* unallocated */
4360 /* We arbitrarily assign SPSR a register number of 16. */
4363 /* general purpose registers for other modes */
4365 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4366 *tgtmode
= ARM_CPU_MODE_USR
;
4369 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4370 *tgtmode
= ARM_CPU_MODE_FIQ
;
4373 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4374 *tgtmode
= ARM_CPU_MODE_IRQ
;
4375 *regno
= sysm
& 1 ? 13 : 14;
4377 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4378 *tgtmode
= ARM_CPU_MODE_SVC
;
4379 *regno
= sysm
& 1 ? 13 : 14;
4381 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4382 *tgtmode
= ARM_CPU_MODE_ABT
;
4383 *regno
= sysm
& 1 ? 13 : 14;
4385 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4386 *tgtmode
= ARM_CPU_MODE_UND
;
4387 *regno
= sysm
& 1 ? 13 : 14;
4389 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4390 *tgtmode
= ARM_CPU_MODE_MON
;
4391 *regno
= sysm
& 1 ? 13 : 14;
4393 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4394 *tgtmode
= ARM_CPU_MODE_HYP
;
4395 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4396 *regno
= sysm
& 1 ? 13 : 17;
4398 default: /* unallocated */
4403 /* Catch the 'accessing inaccessible register' cases we can detect
4404 * at translate time.
4407 case ARM_CPU_MODE_MON
:
4408 if (!arm_dc_feature(s
, ARM_FEATURE_EL3
) || s
->ns
) {
4411 if (s
->current_el
== 1) {
4412 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4413 * then accesses to Mon registers trap to EL3
4419 case ARM_CPU_MODE_HYP
:
4420 /* Note that we can forbid accesses from EL2 here because they
4421 * must be from Hyp mode itself
4423 if (!arm_dc_feature(s
, ARM_FEATURE_EL2
) || s
->current_el
< 3) {
4434 /* If we get here then some access check did not pass */
4435 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(), exc_target
);
4439 static void gen_msr_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
4441 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
4442 int tgtmode
= 0, regno
= 0;
4444 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
4448 /* Sync state because msr_banked() can raise exceptions */
4449 gen_set_condexec(s
);
4450 gen_set_pc_im(s
, s
->pc
- 4);
4451 tcg_reg
= load_reg(s
, rn
);
4452 tcg_tgtmode
= tcg_const_i32(tgtmode
);
4453 tcg_regno
= tcg_const_i32(regno
);
4454 gen_helper_msr_banked(cpu_env
, tcg_reg
, tcg_tgtmode
, tcg_regno
);
4455 tcg_temp_free_i32(tcg_tgtmode
);
4456 tcg_temp_free_i32(tcg_regno
);
4457 tcg_temp_free_i32(tcg_reg
);
4458 s
->base
.is_jmp
= DISAS_UPDATE
;
4461 static void gen_mrs_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
4463 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
4464 int tgtmode
= 0, regno
= 0;
4466 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
4470 /* Sync state because mrs_banked() can raise exceptions */
4471 gen_set_condexec(s
);
4472 gen_set_pc_im(s
, s
->pc
- 4);
4473 tcg_reg
= tcg_temp_new_i32();
4474 tcg_tgtmode
= tcg_const_i32(tgtmode
);
4475 tcg_regno
= tcg_const_i32(regno
);
4476 gen_helper_mrs_banked(tcg_reg
, cpu_env
, tcg_tgtmode
, tcg_regno
);
4477 tcg_temp_free_i32(tcg_tgtmode
);
4478 tcg_temp_free_i32(tcg_regno
);
4479 store_reg(s
, rn
, tcg_reg
);
4480 s
->base
.is_jmp
= DISAS_UPDATE
;
4483 /* Store value to PC as for an exception return (ie don't
4484 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4485 * will do the masking based on the new value of the Thumb bit.
4487 static void store_pc_exc_ret(DisasContext
*s
, TCGv_i32 pc
)
4489 tcg_gen_mov_i32(cpu_R
[15], pc
);
4490 tcg_temp_free_i32(pc
);
4493 /* Generate a v6 exception return. Marks both values as dead. */
4494 static void gen_rfe(DisasContext
*s
, TCGv_i32 pc
, TCGv_i32 cpsr
)
4496 store_pc_exc_ret(s
, pc
);
4497 /* The cpsr_write_eret helper will mask the low bits of PC
4498 * appropriately depending on the new Thumb bit, so it must
4499 * be called after storing the new PC.
4501 gen_helper_cpsr_write_eret(cpu_env
, cpsr
);
4502 tcg_temp_free_i32(cpsr
);
4503 /* Must exit loop to check un-masked IRQs */
4504 s
->base
.is_jmp
= DISAS_EXIT
;
4507 /* Generate an old-style exception return. Marks pc as dead. */
4508 static void gen_exception_return(DisasContext
*s
, TCGv_i32 pc
)
4510 gen_rfe(s
, pc
, load_cpu_field(spsr
));
4514 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4515 * only call the helper when running single threaded TCG code to ensure
4516 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4517 * just skip this instruction. Currently the SEV/SEVL instructions
4518 * which are *one* of many ways to wake the CPU from WFE are not
4519 * implemented so we can't sleep like WFI does.
4521 static void gen_nop_hint(DisasContext
*s
, int val
)
4525 if (!parallel_cpus
) {
4526 gen_set_pc_im(s
, s
->pc
);
4527 s
->base
.is_jmp
= DISAS_YIELD
;
4531 gen_set_pc_im(s
, s
->pc
);
4532 s
->base
.is_jmp
= DISAS_WFI
;
4535 if (!parallel_cpus
) {
4536 gen_set_pc_im(s
, s
->pc
);
4537 s
->base
.is_jmp
= DISAS_WFE
;
4542 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4548 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4550 static inline void gen_neon_add(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
4553 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
4554 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
4555 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
4560 static inline void gen_neon_rsb(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
4563 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
4564 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
4565 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
4570 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4571 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4572 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4573 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4574 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4576 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4577 switch ((size << 1) | u) { \
4579 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4582 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4585 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4588 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4591 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4594 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4596 default: return 1; \
4599 #define GEN_NEON_INTEGER_OP(name) do { \
4600 switch ((size << 1) | u) { \
4602 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4605 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4608 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4611 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4614 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4617 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4619 default: return 1; \
4622 static TCGv_i32
neon_load_scratch(int scratch
)
4624 TCGv_i32 tmp
= tcg_temp_new_i32();
4625 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4629 static void neon_store_scratch(int scratch
, TCGv_i32 var
)
4631 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4632 tcg_temp_free_i32(var
);
4635 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
4639 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
4641 gen_neon_dup_high16(tmp
);
4643 gen_neon_dup_low16(tmp
);
4646 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
4651 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
4654 if (!q
&& size
== 2) {
4657 tmp
= tcg_const_i32(rd
);
4658 tmp2
= tcg_const_i32(rm
);
4662 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
4665 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
4668 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
4676 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
4679 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
4685 tcg_temp_free_i32(tmp
);
4686 tcg_temp_free_i32(tmp2
);
4690 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
4693 if (!q
&& size
== 2) {
4696 tmp
= tcg_const_i32(rd
);
4697 tmp2
= tcg_const_i32(rm
);
4701 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
4704 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
4707 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
4715 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
4718 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
4724 tcg_temp_free_i32(tmp
);
4725 tcg_temp_free_i32(tmp2
);
4729 static void gen_neon_trn_u8(TCGv_i32 t0
, TCGv_i32 t1
)
4733 rd
= tcg_temp_new_i32();
4734 tmp
= tcg_temp_new_i32();
4736 tcg_gen_shli_i32(rd
, t0
, 8);
4737 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
4738 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
4739 tcg_gen_or_i32(rd
, rd
, tmp
);
4741 tcg_gen_shri_i32(t1
, t1
, 8);
4742 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
4743 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
4744 tcg_gen_or_i32(t1
, t1
, tmp
);
4745 tcg_gen_mov_i32(t0
, rd
);
4747 tcg_temp_free_i32(tmp
);
4748 tcg_temp_free_i32(rd
);
4751 static void gen_neon_trn_u16(TCGv_i32 t0
, TCGv_i32 t1
)
4755 rd
= tcg_temp_new_i32();
4756 tmp
= tcg_temp_new_i32();
4758 tcg_gen_shli_i32(rd
, t0
, 16);
4759 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
4760 tcg_gen_or_i32(rd
, rd
, tmp
);
4761 tcg_gen_shri_i32(t1
, t1
, 16);
4762 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
4763 tcg_gen_or_i32(t1
, t1
, tmp
);
4764 tcg_gen_mov_i32(t0
, rd
);
4766 tcg_temp_free_i32(tmp
);
4767 tcg_temp_free_i32(rd
);
4775 } neon_ls_element_type
[11] = {
4789 /* Translate a NEON load/store element instruction. Return nonzero if the
4790 instruction is invalid. */
4791 static int disas_neon_ls_insn(DisasContext
*s
, uint32_t insn
)
4810 /* FIXME: this access check should not take precedence over UNDEF
4811 * for invalid encodings; we will generate incorrect syndrome information
4812 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4814 if (s
->fp_excp_el
) {
4815 gen_exception_insn(s
, 4, EXCP_UDEF
,
4816 syn_fp_access_trap(1, 0xe, false), s
->fp_excp_el
);
4820 if (!s
->vfp_enabled
)
4822 VFP_DREG_D(rd
, insn
);
4823 rn
= (insn
>> 16) & 0xf;
4825 load
= (insn
& (1 << 21)) != 0;
4826 if ((insn
& (1 << 23)) == 0) {
4827 /* Load store all elements. */
4828 op
= (insn
>> 8) & 0xf;
4829 size
= (insn
>> 6) & 3;
4832 /* Catch UNDEF cases for bad values of align field */
4835 if (((insn
>> 5) & 1) == 1) {
4840 if (((insn
>> 4) & 3) == 3) {
4847 nregs
= neon_ls_element_type
[op
].nregs
;
4848 interleave
= neon_ls_element_type
[op
].interleave
;
4849 spacing
= neon_ls_element_type
[op
].spacing
;
4850 if (size
== 3 && (interleave
| spacing
) != 1)
4852 addr
= tcg_temp_new_i32();
4853 load_reg_var(s
, addr
, rn
);
4854 stride
= (1 << size
) * interleave
;
4855 for (reg
= 0; reg
< nregs
; reg
++) {
4856 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
4857 load_reg_var(s
, addr
, rn
);
4858 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
4859 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
4860 load_reg_var(s
, addr
, rn
);
4861 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4864 tmp64
= tcg_temp_new_i64();
4866 gen_aa32_ld64(s
, tmp64
, addr
, get_mem_index(s
));
4867 neon_store_reg64(tmp64
, rd
);
4869 neon_load_reg64(tmp64
, rd
);
4870 gen_aa32_st64(s
, tmp64
, addr
, get_mem_index(s
));
4872 tcg_temp_free_i64(tmp64
);
4873 tcg_gen_addi_i32(addr
, addr
, stride
);
4875 for (pass
= 0; pass
< 2; pass
++) {
4878 tmp
= tcg_temp_new_i32();
4879 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
4880 neon_store_reg(rd
, pass
, tmp
);
4882 tmp
= neon_load_reg(rd
, pass
);
4883 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
4884 tcg_temp_free_i32(tmp
);
4886 tcg_gen_addi_i32(addr
, addr
, stride
);
4887 } else if (size
== 1) {
4889 tmp
= tcg_temp_new_i32();
4890 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
4891 tcg_gen_addi_i32(addr
, addr
, stride
);
4892 tmp2
= tcg_temp_new_i32();
4893 gen_aa32_ld16u(s
, tmp2
, addr
, get_mem_index(s
));
4894 tcg_gen_addi_i32(addr
, addr
, stride
);
4895 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
4896 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4897 tcg_temp_free_i32(tmp2
);
4898 neon_store_reg(rd
, pass
, tmp
);
4900 tmp
= neon_load_reg(rd
, pass
);
4901 tmp2
= tcg_temp_new_i32();
4902 tcg_gen_shri_i32(tmp2
, tmp
, 16);
4903 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
4904 tcg_temp_free_i32(tmp
);
4905 tcg_gen_addi_i32(addr
, addr
, stride
);
4906 gen_aa32_st16(s
, tmp2
, addr
, get_mem_index(s
));
4907 tcg_temp_free_i32(tmp2
);
4908 tcg_gen_addi_i32(addr
, addr
, stride
);
4910 } else /* size == 0 */ {
4912 TCGV_UNUSED_I32(tmp2
);
4913 for (n
= 0; n
< 4; n
++) {
4914 tmp
= tcg_temp_new_i32();
4915 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
4916 tcg_gen_addi_i32(addr
, addr
, stride
);
4920 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
4921 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
4922 tcg_temp_free_i32(tmp
);
4925 neon_store_reg(rd
, pass
, tmp2
);
4927 tmp2
= neon_load_reg(rd
, pass
);
4928 for (n
= 0; n
< 4; n
++) {
4929 tmp
= tcg_temp_new_i32();
4931 tcg_gen_mov_i32(tmp
, tmp2
);
4933 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
4935 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
4936 tcg_temp_free_i32(tmp
);
4937 tcg_gen_addi_i32(addr
, addr
, stride
);
4939 tcg_temp_free_i32(tmp2
);
4946 tcg_temp_free_i32(addr
);
4949 size
= (insn
>> 10) & 3;
4951 /* Load single element to all lanes. */
4952 int a
= (insn
>> 4) & 1;
4956 size
= (insn
>> 6) & 3;
4957 nregs
= ((insn
>> 8) & 3) + 1;
4960 if (nregs
!= 4 || a
== 0) {
4963 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4966 if (nregs
== 1 && a
== 1 && size
== 0) {
4969 if (nregs
== 3 && a
== 1) {
4972 addr
= tcg_temp_new_i32();
4973 load_reg_var(s
, addr
, rn
);
4975 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4976 tmp
= gen_load_and_replicate(s
, addr
, size
);
4977 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4978 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4979 if (insn
& (1 << 5)) {
4980 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
4981 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
4983 tcg_temp_free_i32(tmp
);
4985 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4986 stride
= (insn
& (1 << 5)) ? 2 : 1;
4987 for (reg
= 0; reg
< nregs
; reg
++) {
4988 tmp
= gen_load_and_replicate(s
, addr
, size
);
4989 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4990 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4991 tcg_temp_free_i32(tmp
);
4992 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4996 tcg_temp_free_i32(addr
);
4997 stride
= (1 << size
) * nregs
;
4999 /* Single element. */
5000 int idx
= (insn
>> 4) & 0xf;
5001 pass
= (insn
>> 7) & 1;
5004 shift
= ((insn
>> 5) & 3) * 8;
5008 shift
= ((insn
>> 6) & 1) * 16;
5009 stride
= (insn
& (1 << 5)) ? 2 : 1;
5013 stride
= (insn
& (1 << 6)) ? 2 : 1;
5018 nregs
= ((insn
>> 8) & 3) + 1;
5019 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5022 if (((idx
& (1 << size
)) != 0) ||
5023 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
5028 if ((idx
& 1) != 0) {
5033 if (size
== 2 && (idx
& 2) != 0) {
5038 if ((size
== 2) && ((idx
& 3) == 3)) {
5045 if ((rd
+ stride
* (nregs
- 1)) > 31) {
5046 /* Attempts to write off the end of the register file
5047 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5048 * the neon_load_reg() would write off the end of the array.
5052 addr
= tcg_temp_new_i32();
5053 load_reg_var(s
, addr
, rn
);
5054 for (reg
= 0; reg
< nregs
; reg
++) {
5056 tmp
= tcg_temp_new_i32();
5059 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
5062 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
5065 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
5067 default: /* Avoid compiler warnings. */
5071 tmp2
= neon_load_reg(rd
, pass
);
5072 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
,
5073 shift
, size
? 16 : 8);
5074 tcg_temp_free_i32(tmp2
);
5076 neon_store_reg(rd
, pass
, tmp
);
5077 } else { /* Store */
5078 tmp
= neon_load_reg(rd
, pass
);
5080 tcg_gen_shri_i32(tmp
, tmp
, shift
);
5083 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
5086 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
5089 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
5092 tcg_temp_free_i32(tmp
);
5095 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
5097 tcg_temp_free_i32(addr
);
5098 stride
= nregs
* (1 << size
);
5104 base
= load_reg(s
, rn
);
5106 tcg_gen_addi_i32(base
, base
, stride
);
5109 index
= load_reg(s
, rm
);
5110 tcg_gen_add_i32(base
, base
, index
);
5111 tcg_temp_free_i32(index
);
5113 store_reg(s
, rn
, base
);
5118 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
5119 static void gen_neon_bsl(TCGv_i32 dest
, TCGv_i32 t
, TCGv_i32 f
, TCGv_i32 c
)
5121 tcg_gen_and_i32(t
, t
, c
);
5122 tcg_gen_andc_i32(f
, f
, c
);
5123 tcg_gen_or_i32(dest
, t
, f
);
5126 static inline void gen_neon_narrow(int size
, TCGv_i32 dest
, TCGv_i64 src
)
5129 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
5130 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
5131 case 2: tcg_gen_extrl_i64_i32(dest
, src
); break;
5136 static inline void gen_neon_narrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
5139 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
5140 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
5141 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
5146 static inline void gen_neon_narrow_satu(int size
, TCGv_i32 dest
, TCGv_i64 src
)
5149 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
5150 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
5151 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
5156 static inline void gen_neon_unarrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
5159 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
5160 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
5161 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
5166 static inline void gen_neon_shift_narrow(int size
, TCGv_i32 var
, TCGv_i32 shift
,
5172 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
5173 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
5178 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
5179 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
5186 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
5187 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
5192 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
5193 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
5200 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv_i32 src
, int size
, int u
)
5204 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
5205 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
5206 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
5211 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
5212 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
5213 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
5217 tcg_temp_free_i32(src
);
5220 static inline void gen_neon_addl(int size
)
5223 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
5224 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
5225 case 2: tcg_gen_add_i64(CPU_V001
); break;
5230 static inline void gen_neon_subl(int size
)
5233 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
5234 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
5235 case 2: tcg_gen_sub_i64(CPU_V001
); break;
5240 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
5243 case 0: gen_helper_neon_negl_u16(var
, var
); break;
5244 case 1: gen_helper_neon_negl_u32(var
, var
); break;
5246 tcg_gen_neg_i64(var
, var
);
5252 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
5255 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
5256 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
5261 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv_i32 a
, TCGv_i32 b
,
5266 switch ((size
<< 1) | u
) {
5267 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
5268 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
5269 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
5270 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
5272 tmp
= gen_muls_i64_i32(a
, b
);
5273 tcg_gen_mov_i64(dest
, tmp
);
5274 tcg_temp_free_i64(tmp
);
5277 tmp
= gen_mulu_i64_i32(a
, b
);
5278 tcg_gen_mov_i64(dest
, tmp
);
5279 tcg_temp_free_i64(tmp
);
5284 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5285 Don't forget to clean them now. */
5287 tcg_temp_free_i32(a
);
5288 tcg_temp_free_i32(b
);
5292 static void gen_neon_narrow_op(int op
, int u
, int size
,
5293 TCGv_i32 dest
, TCGv_i64 src
)
5297 gen_neon_unarrow_sats(size
, dest
, src
);
5299 gen_neon_narrow(size
, dest
, src
);
5303 gen_neon_narrow_satu(size
, dest
, src
);
5305 gen_neon_narrow_sats(size
, dest
, src
);
5310 /* Symbolic constants for op fields for Neon 3-register same-length.
5311 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5314 #define NEON_3R_VHADD 0
5315 #define NEON_3R_VQADD 1
5316 #define NEON_3R_VRHADD 2
5317 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5318 #define NEON_3R_VHSUB 4
5319 #define NEON_3R_VQSUB 5
5320 #define NEON_3R_VCGT 6
5321 #define NEON_3R_VCGE 7
5322 #define NEON_3R_VSHL 8
5323 #define NEON_3R_VQSHL 9
5324 #define NEON_3R_VRSHL 10
5325 #define NEON_3R_VQRSHL 11
5326 #define NEON_3R_VMAX 12
5327 #define NEON_3R_VMIN 13
5328 #define NEON_3R_VABD 14
5329 #define NEON_3R_VABA 15
5330 #define NEON_3R_VADD_VSUB 16
5331 #define NEON_3R_VTST_VCEQ 17
5332 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5333 #define NEON_3R_VMUL 19
5334 #define NEON_3R_VPMAX 20
5335 #define NEON_3R_VPMIN 21
5336 #define NEON_3R_VQDMULH_VQRDMULH 22
5337 #define NEON_3R_VPADD 23
5338 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
5339 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
5340 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5341 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5342 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5343 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5344 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
5345 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
5347 static const uint8_t neon_3r_sizes
[] = {
5348 [NEON_3R_VHADD
] = 0x7,
5349 [NEON_3R_VQADD
] = 0xf,
5350 [NEON_3R_VRHADD
] = 0x7,
5351 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
5352 [NEON_3R_VHSUB
] = 0x7,
5353 [NEON_3R_VQSUB
] = 0xf,
5354 [NEON_3R_VCGT
] = 0x7,
5355 [NEON_3R_VCGE
] = 0x7,
5356 [NEON_3R_VSHL
] = 0xf,
5357 [NEON_3R_VQSHL
] = 0xf,
5358 [NEON_3R_VRSHL
] = 0xf,
5359 [NEON_3R_VQRSHL
] = 0xf,
5360 [NEON_3R_VMAX
] = 0x7,
5361 [NEON_3R_VMIN
] = 0x7,
5362 [NEON_3R_VABD
] = 0x7,
5363 [NEON_3R_VABA
] = 0x7,
5364 [NEON_3R_VADD_VSUB
] = 0xf,
5365 [NEON_3R_VTST_VCEQ
] = 0x7,
5366 [NEON_3R_VML
] = 0x7,
5367 [NEON_3R_VMUL
] = 0x7,
5368 [NEON_3R_VPMAX
] = 0x7,
5369 [NEON_3R_VPMIN
] = 0x7,
5370 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
5371 [NEON_3R_VPADD
] = 0x7,
5372 [NEON_3R_SHA
] = 0xf, /* size field encodes op type */
5373 [NEON_3R_VFM
] = 0x5, /* size bit 1 encodes op */
5374 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
5375 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
5376 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
5377 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
5378 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
5379 [NEON_3R_FLOAT_MISC
] = 0x5, /* size bit 1 encodes op */
5382 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
5383 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5386 #define NEON_2RM_VREV64 0
5387 #define NEON_2RM_VREV32 1
5388 #define NEON_2RM_VREV16 2
5389 #define NEON_2RM_VPADDL 4
5390 #define NEON_2RM_VPADDL_U 5
5391 #define NEON_2RM_AESE 6 /* Includes AESD */
5392 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5393 #define NEON_2RM_VCLS 8
5394 #define NEON_2RM_VCLZ 9
5395 #define NEON_2RM_VCNT 10
5396 #define NEON_2RM_VMVN 11
5397 #define NEON_2RM_VPADAL 12
5398 #define NEON_2RM_VPADAL_U 13
5399 #define NEON_2RM_VQABS 14
5400 #define NEON_2RM_VQNEG 15
5401 #define NEON_2RM_VCGT0 16
5402 #define NEON_2RM_VCGE0 17
5403 #define NEON_2RM_VCEQ0 18
5404 #define NEON_2RM_VCLE0 19
5405 #define NEON_2RM_VCLT0 20
5406 #define NEON_2RM_SHA1H 21
5407 #define NEON_2RM_VABS 22
5408 #define NEON_2RM_VNEG 23
5409 #define NEON_2RM_VCGT0_F 24
5410 #define NEON_2RM_VCGE0_F 25
5411 #define NEON_2RM_VCEQ0_F 26
5412 #define NEON_2RM_VCLE0_F 27
5413 #define NEON_2RM_VCLT0_F 28
5414 #define NEON_2RM_VABS_F 30
5415 #define NEON_2RM_VNEG_F 31
5416 #define NEON_2RM_VSWP 32
5417 #define NEON_2RM_VTRN 33
5418 #define NEON_2RM_VUZP 34
5419 #define NEON_2RM_VZIP 35
5420 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5421 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5422 #define NEON_2RM_VSHLL 38
5423 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5424 #define NEON_2RM_VRINTN 40
5425 #define NEON_2RM_VRINTX 41
5426 #define NEON_2RM_VRINTA 42
5427 #define NEON_2RM_VRINTZ 43
5428 #define NEON_2RM_VCVT_F16_F32 44
5429 #define NEON_2RM_VRINTM 45
5430 #define NEON_2RM_VCVT_F32_F16 46
5431 #define NEON_2RM_VRINTP 47
5432 #define NEON_2RM_VCVTAU 48
5433 #define NEON_2RM_VCVTAS 49
5434 #define NEON_2RM_VCVTNU 50
5435 #define NEON_2RM_VCVTNS 51
5436 #define NEON_2RM_VCVTPU 52
5437 #define NEON_2RM_VCVTPS 53
5438 #define NEON_2RM_VCVTMU 54
5439 #define NEON_2RM_VCVTMS 55
5440 #define NEON_2RM_VRECPE 56
5441 #define NEON_2RM_VRSQRTE 57
5442 #define NEON_2RM_VRECPE_F 58
5443 #define NEON_2RM_VRSQRTE_F 59
5444 #define NEON_2RM_VCVT_FS 60
5445 #define NEON_2RM_VCVT_FU 61
5446 #define NEON_2RM_VCVT_SF 62
5447 #define NEON_2RM_VCVT_UF 63
5449 static int neon_2rm_is_float_op(int op
)
5451 /* Return true if this neon 2reg-misc op is float-to-float */
5452 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
5453 (op
>= NEON_2RM_VRINTN
&& op
<= NEON_2RM_VRINTZ
) ||
5454 op
== NEON_2RM_VRINTM
||
5455 (op
>= NEON_2RM_VRINTP
&& op
<= NEON_2RM_VCVTMS
) ||
5456 op
>= NEON_2RM_VRECPE_F
);
5459 static bool neon_2rm_is_v8_op(int op
)
5461 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5463 case NEON_2RM_VRINTN
:
5464 case NEON_2RM_VRINTA
:
5465 case NEON_2RM_VRINTM
:
5466 case NEON_2RM_VRINTP
:
5467 case NEON_2RM_VRINTZ
:
5468 case NEON_2RM_VRINTX
:
5469 case NEON_2RM_VCVTAU
:
5470 case NEON_2RM_VCVTAS
:
5471 case NEON_2RM_VCVTNU
:
5472 case NEON_2RM_VCVTNS
:
5473 case NEON_2RM_VCVTPU
:
5474 case NEON_2RM_VCVTPS
:
5475 case NEON_2RM_VCVTMU
:
5476 case NEON_2RM_VCVTMS
:
5483 /* Each entry in this array has bit n set if the insn allows
5484 * size value n (otherwise it will UNDEF). Since unallocated
5485 * op values will have no bits set they always UNDEF.
5487 static const uint8_t neon_2rm_sizes
[] = {
5488 [NEON_2RM_VREV64
] = 0x7,
5489 [NEON_2RM_VREV32
] = 0x3,
5490 [NEON_2RM_VREV16
] = 0x1,
5491 [NEON_2RM_VPADDL
] = 0x7,
5492 [NEON_2RM_VPADDL_U
] = 0x7,
5493 [NEON_2RM_AESE
] = 0x1,
5494 [NEON_2RM_AESMC
] = 0x1,
5495 [NEON_2RM_VCLS
] = 0x7,
5496 [NEON_2RM_VCLZ
] = 0x7,
5497 [NEON_2RM_VCNT
] = 0x1,
5498 [NEON_2RM_VMVN
] = 0x1,
5499 [NEON_2RM_VPADAL
] = 0x7,
5500 [NEON_2RM_VPADAL_U
] = 0x7,
5501 [NEON_2RM_VQABS
] = 0x7,
5502 [NEON_2RM_VQNEG
] = 0x7,
5503 [NEON_2RM_VCGT0
] = 0x7,
5504 [NEON_2RM_VCGE0
] = 0x7,
5505 [NEON_2RM_VCEQ0
] = 0x7,
5506 [NEON_2RM_VCLE0
] = 0x7,
5507 [NEON_2RM_VCLT0
] = 0x7,
5508 [NEON_2RM_SHA1H
] = 0x4,
5509 [NEON_2RM_VABS
] = 0x7,
5510 [NEON_2RM_VNEG
] = 0x7,
5511 [NEON_2RM_VCGT0_F
] = 0x4,
5512 [NEON_2RM_VCGE0_F
] = 0x4,
5513 [NEON_2RM_VCEQ0_F
] = 0x4,
5514 [NEON_2RM_VCLE0_F
] = 0x4,
5515 [NEON_2RM_VCLT0_F
] = 0x4,
5516 [NEON_2RM_VABS_F
] = 0x4,
5517 [NEON_2RM_VNEG_F
] = 0x4,
5518 [NEON_2RM_VSWP
] = 0x1,
5519 [NEON_2RM_VTRN
] = 0x7,
5520 [NEON_2RM_VUZP
] = 0x7,
5521 [NEON_2RM_VZIP
] = 0x7,
5522 [NEON_2RM_VMOVN
] = 0x7,
5523 [NEON_2RM_VQMOVN
] = 0x7,
5524 [NEON_2RM_VSHLL
] = 0x7,
5525 [NEON_2RM_SHA1SU1
] = 0x4,
5526 [NEON_2RM_VRINTN
] = 0x4,
5527 [NEON_2RM_VRINTX
] = 0x4,
5528 [NEON_2RM_VRINTA
] = 0x4,
5529 [NEON_2RM_VRINTZ
] = 0x4,
5530 [NEON_2RM_VCVT_F16_F32
] = 0x2,
5531 [NEON_2RM_VRINTM
] = 0x4,
5532 [NEON_2RM_VCVT_F32_F16
] = 0x2,
5533 [NEON_2RM_VRINTP
] = 0x4,
5534 [NEON_2RM_VCVTAU
] = 0x4,
5535 [NEON_2RM_VCVTAS
] = 0x4,
5536 [NEON_2RM_VCVTNU
] = 0x4,
5537 [NEON_2RM_VCVTNS
] = 0x4,
5538 [NEON_2RM_VCVTPU
] = 0x4,
5539 [NEON_2RM_VCVTPS
] = 0x4,
5540 [NEON_2RM_VCVTMU
] = 0x4,
5541 [NEON_2RM_VCVTMS
] = 0x4,
5542 [NEON_2RM_VRECPE
] = 0x4,
5543 [NEON_2RM_VRSQRTE
] = 0x4,
5544 [NEON_2RM_VRECPE_F
] = 0x4,
5545 [NEON_2RM_VRSQRTE_F
] = 0x4,
5546 [NEON_2RM_VCVT_FS
] = 0x4,
5547 [NEON_2RM_VCVT_FU
] = 0x4,
5548 [NEON_2RM_VCVT_SF
] = 0x4,
5549 [NEON_2RM_VCVT_UF
] = 0x4,
5552 /* Translate a NEON data processing instruction. Return nonzero if the
5553 instruction is invalid.
5554 We process data in a mixture of 32-bit and 64-bit chunks.
5555 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5557 static int disas_neon_data_insn(DisasContext
*s
, uint32_t insn
)
5569 TCGv_i32 tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
5572 /* FIXME: this access check should not take precedence over UNDEF
5573 * for invalid encodings; we will generate incorrect syndrome information
5574 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5576 if (s
->fp_excp_el
) {
5577 gen_exception_insn(s
, 4, EXCP_UDEF
,
5578 syn_fp_access_trap(1, 0xe, false), s
->fp_excp_el
);
5582 if (!s
->vfp_enabled
)
5584 q
= (insn
& (1 << 6)) != 0;
5585 u
= (insn
>> 24) & 1;
5586 VFP_DREG_D(rd
, insn
);
5587 VFP_DREG_N(rn
, insn
);
5588 VFP_DREG_M(rm
, insn
);
5589 size
= (insn
>> 20) & 3;
5590 if ((insn
& (1 << 23)) == 0) {
5591 /* Three register same length. */
5592 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
5593 /* Catch invalid op and bad size combinations: UNDEF */
5594 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
5597 /* All insns of this form UNDEF for either this condition or the
5598 * superset of cases "Q==1"; we catch the latter later.
5600 if (q
&& ((rd
| rn
| rm
) & 1)) {
5604 * The SHA-1/SHA-256 3-register instructions require special treatment
5605 * here, as their size field is overloaded as an op type selector, and
5606 * they all consume their input in a single pass.
5608 if (op
== NEON_3R_SHA
) {
5612 if (!u
) { /* SHA-1 */
5613 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)) {
5616 tmp
= tcg_const_i32(rd
);
5617 tmp2
= tcg_const_i32(rn
);
5618 tmp3
= tcg_const_i32(rm
);
5619 tmp4
= tcg_const_i32(size
);
5620 gen_helper_crypto_sha1_3reg(cpu_env
, tmp
, tmp2
, tmp3
, tmp4
);
5621 tcg_temp_free_i32(tmp4
);
5622 } else { /* SHA-256 */
5623 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA256
) || size
== 3) {
5626 tmp
= tcg_const_i32(rd
);
5627 tmp2
= tcg_const_i32(rn
);
5628 tmp3
= tcg_const_i32(rm
);
5631 gen_helper_crypto_sha256h(cpu_env
, tmp
, tmp2
, tmp3
);
5634 gen_helper_crypto_sha256h2(cpu_env
, tmp
, tmp2
, tmp3
);
5637 gen_helper_crypto_sha256su1(cpu_env
, tmp
, tmp2
, tmp3
);
5641 tcg_temp_free_i32(tmp
);
5642 tcg_temp_free_i32(tmp2
);
5643 tcg_temp_free_i32(tmp3
);
5646 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
5647 /* 64-bit element instructions. */
5648 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5649 neon_load_reg64(cpu_V0
, rn
+ pass
);
5650 neon_load_reg64(cpu_V1
, rm
+ pass
);
5654 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
5657 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
5663 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
5666 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
5672 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5674 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5679 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5682 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5688 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5690 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5693 case NEON_3R_VQRSHL
:
5695 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
5698 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
5702 case NEON_3R_VADD_VSUB
:
5704 tcg_gen_sub_i64(CPU_V001
);
5706 tcg_gen_add_i64(CPU_V001
);
5712 neon_store_reg64(cpu_V0
, rd
+ pass
);
5721 case NEON_3R_VQRSHL
:
5724 /* Shift instruction operands are reversed. */
5739 case NEON_3R_FLOAT_ARITH
:
5740 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
5742 case NEON_3R_FLOAT_MINMAX
:
5743 pairwise
= u
; /* if VPMIN/VPMAX (float) */
5745 case NEON_3R_FLOAT_CMP
:
5747 /* no encoding for U=0 C=1x */
5751 case NEON_3R_FLOAT_ACMP
:
5756 case NEON_3R_FLOAT_MISC
:
5757 /* VMAXNM/VMINNM in ARMv8 */
5758 if (u
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
5763 if (u
&& (size
!= 0)) {
5764 /* UNDEF on invalid size for polynomial subcase */
5769 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
) || u
) {
5777 if (pairwise
&& q
) {
5778 /* All the pairwise insns UNDEF if Q is set */
5782 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5787 tmp
= neon_load_reg(rn
, 0);
5788 tmp2
= neon_load_reg(rn
, 1);
5790 tmp
= neon_load_reg(rm
, 0);
5791 tmp2
= neon_load_reg(rm
, 1);
5795 tmp
= neon_load_reg(rn
, pass
);
5796 tmp2
= neon_load_reg(rm
, pass
);
5800 GEN_NEON_INTEGER_OP(hadd
);
5803 GEN_NEON_INTEGER_OP_ENV(qadd
);
5805 case NEON_3R_VRHADD
:
5806 GEN_NEON_INTEGER_OP(rhadd
);
5808 case NEON_3R_LOGIC
: /* Logic ops. */
5809 switch ((u
<< 2) | size
) {
5811 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
5814 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
5817 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5820 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
5823 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
5826 tmp3
= neon_load_reg(rd
, pass
);
5827 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
5828 tcg_temp_free_i32(tmp3
);
5831 tmp3
= neon_load_reg(rd
, pass
);
5832 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
5833 tcg_temp_free_i32(tmp3
);
5836 tmp3
= neon_load_reg(rd
, pass
);
5837 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
5838 tcg_temp_free_i32(tmp3
);
5843 GEN_NEON_INTEGER_OP(hsub
);
5846 GEN_NEON_INTEGER_OP_ENV(qsub
);
5849 GEN_NEON_INTEGER_OP(cgt
);
5852 GEN_NEON_INTEGER_OP(cge
);
5855 GEN_NEON_INTEGER_OP(shl
);
5858 GEN_NEON_INTEGER_OP_ENV(qshl
);
5861 GEN_NEON_INTEGER_OP(rshl
);
5863 case NEON_3R_VQRSHL
:
5864 GEN_NEON_INTEGER_OP_ENV(qrshl
);
5867 GEN_NEON_INTEGER_OP(max
);
5870 GEN_NEON_INTEGER_OP(min
);
5873 GEN_NEON_INTEGER_OP(abd
);
5876 GEN_NEON_INTEGER_OP(abd
);
5877 tcg_temp_free_i32(tmp2
);
5878 tmp2
= neon_load_reg(rd
, pass
);
5879 gen_neon_add(size
, tmp
, tmp2
);
5881 case NEON_3R_VADD_VSUB
:
5882 if (!u
) { /* VADD */
5883 gen_neon_add(size
, tmp
, tmp2
);
5886 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
5887 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
5888 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
5893 case NEON_3R_VTST_VCEQ
:
5894 if (!u
) { /* VTST */
5896 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
5897 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
5898 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
5903 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5904 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5905 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5910 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
5912 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5913 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5914 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5917 tcg_temp_free_i32(tmp2
);
5918 tmp2
= neon_load_reg(rd
, pass
);
5920 gen_neon_rsb(size
, tmp
, tmp2
);
5922 gen_neon_add(size
, tmp
, tmp2
);
5926 if (u
) { /* polynomial */
5927 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
5928 } else { /* Integer */
5930 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5931 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5932 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5938 GEN_NEON_INTEGER_OP(pmax
);
5941 GEN_NEON_INTEGER_OP(pmin
);
5943 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
5944 if (!u
) { /* VQDMULH */
5947 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5950 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5954 } else { /* VQRDMULH */
5957 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5960 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5968 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
5969 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
5970 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
5974 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
5976 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5977 switch ((u
<< 2) | size
) {
5980 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5983 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
5986 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
5991 tcg_temp_free_ptr(fpstatus
);
5994 case NEON_3R_FLOAT_MULTIPLY
:
5996 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5997 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5999 tcg_temp_free_i32(tmp2
);
6000 tmp2
= neon_load_reg(rd
, pass
);
6002 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
6004 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
6007 tcg_temp_free_ptr(fpstatus
);
6010 case NEON_3R_FLOAT_CMP
:
6012 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6014 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
6017 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6019 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6022 tcg_temp_free_ptr(fpstatus
);
6025 case NEON_3R_FLOAT_ACMP
:
6027 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6029 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
6031 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
6033 tcg_temp_free_ptr(fpstatus
);
6036 case NEON_3R_FLOAT_MINMAX
:
6038 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6040 gen_helper_vfp_maxs(tmp
, tmp
, tmp2
, fpstatus
);
6042 gen_helper_vfp_mins(tmp
, tmp
, tmp2
, fpstatus
);
6044 tcg_temp_free_ptr(fpstatus
);
6047 case NEON_3R_FLOAT_MISC
:
6050 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6052 gen_helper_vfp_maxnums(tmp
, tmp
, tmp2
, fpstatus
);
6054 gen_helper_vfp_minnums(tmp
, tmp
, tmp2
, fpstatus
);
6056 tcg_temp_free_ptr(fpstatus
);
6059 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
6061 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
6067 /* VFMA, VFMS: fused multiply-add */
6068 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6069 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
6072 gen_helper_vfp_negs(tmp
, tmp
);
6074 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
6075 tcg_temp_free_i32(tmp3
);
6076 tcg_temp_free_ptr(fpstatus
);
6082 tcg_temp_free_i32(tmp2
);
6084 /* Save the result. For elementwise operations we can put it
6085 straight into the destination register. For pairwise operations
6086 we have to be careful to avoid clobbering the source operands. */
6087 if (pairwise
&& rd
== rm
) {
6088 neon_store_scratch(pass
, tmp
);
6090 neon_store_reg(rd
, pass
, tmp
);
6094 if (pairwise
&& rd
== rm
) {
6095 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6096 tmp
= neon_load_scratch(pass
);
6097 neon_store_reg(rd
, pass
, tmp
);
6100 /* End of 3 register same size operations. */
6101 } else if (insn
& (1 << 4)) {
6102 if ((insn
& 0x00380080) != 0) {
6103 /* Two registers and shift. */
6104 op
= (insn
>> 8) & 0xf;
6105 if (insn
& (1 << 7)) {
6113 while ((insn
& (1 << (size
+ 19))) == 0)
6116 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
6117 /* To avoid excessive duplication of ops we implement shift
6118 by immediate using the variable shift operations. */
6120 /* Shift by immediate:
6121 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
6122 if (q
&& ((rd
| rm
) & 1)) {
6125 if (!u
&& (op
== 4 || op
== 6)) {
6128 /* Right shifts are encoded as N - shift, where N is the
6129 element size in bits. */
6131 shift
= shift
- (1 << (size
+ 3));
6139 imm
= (uint8_t) shift
;
6144 imm
= (uint16_t) shift
;
6155 for (pass
= 0; pass
< count
; pass
++) {
6157 neon_load_reg64(cpu_V0
, rm
+ pass
);
6158 tcg_gen_movi_i64(cpu_V1
, imm
);
6163 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
6165 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
6170 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
6172 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
6175 case 5: /* VSHL, VSLI */
6176 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
6178 case 6: /* VQSHLU */
6179 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
6184 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
6187 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
6192 if (op
== 1 || op
== 3) {
6194 neon_load_reg64(cpu_V1
, rd
+ pass
);
6195 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6196 } else if (op
== 4 || (op
== 5 && u
)) {
6198 neon_load_reg64(cpu_V1
, rd
+ pass
);
6200 if (shift
< -63 || shift
> 63) {
6204 mask
= 0xffffffffffffffffull
>> -shift
;
6206 mask
= 0xffffffffffffffffull
<< shift
;
6209 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
6210 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6212 neon_store_reg64(cpu_V0
, rd
+ pass
);
6213 } else { /* size < 3 */
6214 /* Operands in T0 and T1. */
6215 tmp
= neon_load_reg(rm
, pass
);
6216 tmp2
= tcg_temp_new_i32();
6217 tcg_gen_movi_i32(tmp2
, imm
);
6221 GEN_NEON_INTEGER_OP(shl
);
6225 GEN_NEON_INTEGER_OP(rshl
);
6228 case 5: /* VSHL, VSLI */
6230 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
6231 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
6232 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
6236 case 6: /* VQSHLU */
6239 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
6243 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
6247 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
6255 GEN_NEON_INTEGER_OP_ENV(qshl
);
6258 tcg_temp_free_i32(tmp2
);
6260 if (op
== 1 || op
== 3) {
6262 tmp2
= neon_load_reg(rd
, pass
);
6263 gen_neon_add(size
, tmp
, tmp2
);
6264 tcg_temp_free_i32(tmp2
);
6265 } else if (op
== 4 || (op
== 5 && u
)) {
6270 mask
= 0xff >> -shift
;
6272 mask
= (uint8_t)(0xff << shift
);
6278 mask
= 0xffff >> -shift
;
6280 mask
= (uint16_t)(0xffff << shift
);
6284 if (shift
< -31 || shift
> 31) {
6288 mask
= 0xffffffffu
>> -shift
;
6290 mask
= 0xffffffffu
<< shift
;
6296 tmp2
= neon_load_reg(rd
, pass
);
6297 tcg_gen_andi_i32(tmp
, tmp
, mask
);
6298 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
6299 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6300 tcg_temp_free_i32(tmp2
);
6302 neon_store_reg(rd
, pass
, tmp
);
6305 } else if (op
< 10) {
6306 /* Shift by immediate and narrow:
6307 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
6308 int input_unsigned
= (op
== 8) ? !u
: u
;
6312 shift
= shift
- (1 << (size
+ 3));
6315 tmp64
= tcg_const_i64(shift
);
6316 neon_load_reg64(cpu_V0
, rm
);
6317 neon_load_reg64(cpu_V1
, rm
+ 1);
6318 for (pass
= 0; pass
< 2; pass
++) {
6326 if (input_unsigned
) {
6327 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
6329 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
6332 if (input_unsigned
) {
6333 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
6335 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
6338 tmp
= tcg_temp_new_i32();
6339 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
6340 neon_store_reg(rd
, pass
, tmp
);
6342 tcg_temp_free_i64(tmp64
);
6345 imm
= (uint16_t)shift
;
6349 imm
= (uint32_t)shift
;
6351 tmp2
= tcg_const_i32(imm
);
6352 tmp4
= neon_load_reg(rm
+ 1, 0);
6353 tmp5
= neon_load_reg(rm
+ 1, 1);
6354 for (pass
= 0; pass
< 2; pass
++) {
6356 tmp
= neon_load_reg(rm
, 0);
6360 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
6363 tmp3
= neon_load_reg(rm
, 1);
6367 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
6369 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
6370 tcg_temp_free_i32(tmp
);
6371 tcg_temp_free_i32(tmp3
);
6372 tmp
= tcg_temp_new_i32();
6373 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
6374 neon_store_reg(rd
, pass
, tmp
);
6376 tcg_temp_free_i32(tmp2
);
6378 } else if (op
== 10) {
6380 if (q
|| (rd
& 1)) {
6383 tmp
= neon_load_reg(rm
, 0);
6384 tmp2
= neon_load_reg(rm
, 1);
6385 for (pass
= 0; pass
< 2; pass
++) {
6389 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
6392 /* The shift is less than the width of the source
6393 type, so we can just shift the whole register. */
6394 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
6395 /* Widen the result of shift: we need to clear
6396 * the potential overflow bits resulting from
6397 * left bits of the narrow input appearing as
6398 * right bits of left the neighbour narrow
6400 if (size
< 2 || !u
) {
6403 imm
= (0xffu
>> (8 - shift
));
6405 } else if (size
== 1) {
6406 imm
= 0xffff >> (16 - shift
);
6409 imm
= 0xffffffff >> (32 - shift
);
6412 imm64
= imm
| (((uint64_t)imm
) << 32);
6416 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
6419 neon_store_reg64(cpu_V0
, rd
+ pass
);
6421 } else if (op
>= 14) {
6422 /* VCVT fixed-point. */
6423 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
6426 /* We have already masked out the must-be-1 top bit of imm6,
6427 * hence this 32-shift where the ARM ARM has 64-imm6.
6430 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6431 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
6434 gen_vfp_ulto(0, shift
, 1);
6436 gen_vfp_slto(0, shift
, 1);
6439 gen_vfp_toul(0, shift
, 1);
6441 gen_vfp_tosl(0, shift
, 1);
6443 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
6448 } else { /* (insn & 0x00380080) == 0 */
6450 if (q
&& (rd
& 1)) {
6454 op
= (insn
>> 8) & 0xf;
6455 /* One register and immediate. */
6456 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
6457 invert
= (insn
& (1 << 5)) != 0;
6458 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6459 * We choose to not special-case this and will behave as if a
6460 * valid constant encoding of 0 had been given.
6479 imm
= (imm
<< 8) | (imm
<< 24);
6482 imm
= (imm
<< 8) | 0xff;
6485 imm
= (imm
<< 16) | 0xffff;
6488 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
6496 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
6497 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
6503 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6504 if (op
& 1 && op
< 12) {
6505 tmp
= neon_load_reg(rd
, pass
);
6507 /* The immediate value has already been inverted, so
6509 tcg_gen_andi_i32(tmp
, tmp
, imm
);
6511 tcg_gen_ori_i32(tmp
, tmp
, imm
);
6515 tmp
= tcg_temp_new_i32();
6516 if (op
== 14 && invert
) {
6520 for (n
= 0; n
< 4; n
++) {
6521 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
6522 val
|= 0xff << (n
* 8);
6524 tcg_gen_movi_i32(tmp
, val
);
6526 tcg_gen_movi_i32(tmp
, imm
);
6529 neon_store_reg(rd
, pass
, tmp
);
6532 } else { /* (insn & 0x00800010 == 0x00800000) */
6534 op
= (insn
>> 8) & 0xf;
6535 if ((insn
& (1 << 6)) == 0) {
6536 /* Three registers of different lengths. */
6540 /* undefreq: bit 0 : UNDEF if size == 0
6541 * bit 1 : UNDEF if size == 1
6542 * bit 2 : UNDEF if size == 2
6543 * bit 3 : UNDEF if U == 1
6544 * Note that [2:0] set implies 'always UNDEF'
6547 /* prewiden, src1_wide, src2_wide, undefreq */
6548 static const int neon_3reg_wide
[16][4] = {
6549 {1, 0, 0, 0}, /* VADDL */
6550 {1, 1, 0, 0}, /* VADDW */
6551 {1, 0, 0, 0}, /* VSUBL */
6552 {1, 1, 0, 0}, /* VSUBW */
6553 {0, 1, 1, 0}, /* VADDHN */
6554 {0, 0, 0, 0}, /* VABAL */
6555 {0, 1, 1, 0}, /* VSUBHN */
6556 {0, 0, 0, 0}, /* VABDL */
6557 {0, 0, 0, 0}, /* VMLAL */
6558 {0, 0, 0, 9}, /* VQDMLAL */
6559 {0, 0, 0, 0}, /* VMLSL */
6560 {0, 0, 0, 9}, /* VQDMLSL */
6561 {0, 0, 0, 0}, /* Integer VMULL */
6562 {0, 0, 0, 1}, /* VQDMULL */
6563 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6564 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6567 prewiden
= neon_3reg_wide
[op
][0];
6568 src1_wide
= neon_3reg_wide
[op
][1];
6569 src2_wide
= neon_3reg_wide
[op
][2];
6570 undefreq
= neon_3reg_wide
[op
][3];
6572 if ((undefreq
& (1 << size
)) ||
6573 ((undefreq
& 8) && u
)) {
6576 if ((src1_wide
&& (rn
& 1)) ||
6577 (src2_wide
&& (rm
& 1)) ||
6578 (!src2_wide
&& (rd
& 1))) {
6582 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6583 * outside the loop below as it only performs a single pass.
6585 if (op
== 14 && size
== 2) {
6586 TCGv_i64 tcg_rn
, tcg_rm
, tcg_rd
;
6588 if (!arm_dc_feature(s
, ARM_FEATURE_V8_PMULL
)) {
6591 tcg_rn
= tcg_temp_new_i64();
6592 tcg_rm
= tcg_temp_new_i64();
6593 tcg_rd
= tcg_temp_new_i64();
6594 neon_load_reg64(tcg_rn
, rn
);
6595 neon_load_reg64(tcg_rm
, rm
);
6596 gen_helper_neon_pmull_64_lo(tcg_rd
, tcg_rn
, tcg_rm
);
6597 neon_store_reg64(tcg_rd
, rd
);
6598 gen_helper_neon_pmull_64_hi(tcg_rd
, tcg_rn
, tcg_rm
);
6599 neon_store_reg64(tcg_rd
, rd
+ 1);
6600 tcg_temp_free_i64(tcg_rn
);
6601 tcg_temp_free_i64(tcg_rm
);
6602 tcg_temp_free_i64(tcg_rd
);
6606 /* Avoid overlapping operands. Wide source operands are
6607 always aligned so will never overlap with wide
6608 destinations in problematic ways. */
6609 if (rd
== rm
&& !src2_wide
) {
6610 tmp
= neon_load_reg(rm
, 1);
6611 neon_store_scratch(2, tmp
);
6612 } else if (rd
== rn
&& !src1_wide
) {
6613 tmp
= neon_load_reg(rn
, 1);
6614 neon_store_scratch(2, tmp
);
6616 TCGV_UNUSED_I32(tmp3
);
6617 for (pass
= 0; pass
< 2; pass
++) {
6619 neon_load_reg64(cpu_V0
, rn
+ pass
);
6620 TCGV_UNUSED_I32(tmp
);
6622 if (pass
== 1 && rd
== rn
) {
6623 tmp
= neon_load_scratch(2);
6625 tmp
= neon_load_reg(rn
, pass
);
6628 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
6632 neon_load_reg64(cpu_V1
, rm
+ pass
);
6633 TCGV_UNUSED_I32(tmp2
);
6635 if (pass
== 1 && rd
== rm
) {
6636 tmp2
= neon_load_scratch(2);
6638 tmp2
= neon_load_reg(rm
, pass
);
6641 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
6645 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6646 gen_neon_addl(size
);
6648 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6649 gen_neon_subl(size
);
6651 case 5: case 7: /* VABAL, VABDL */
6652 switch ((size
<< 1) | u
) {
6654 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
6657 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
6660 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
6663 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
6666 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
6669 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
6673 tcg_temp_free_i32(tmp2
);
6674 tcg_temp_free_i32(tmp
);
6676 case 8: case 9: case 10: case 11: case 12: case 13:
6677 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6678 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6680 case 14: /* Polynomial VMULL */
6681 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
6682 tcg_temp_free_i32(tmp2
);
6683 tcg_temp_free_i32(tmp
);
6685 default: /* 15 is RESERVED: caught earlier */
6690 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6691 neon_store_reg64(cpu_V0
, rd
+ pass
);
6692 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
6694 neon_load_reg64(cpu_V1
, rd
+ pass
);
6696 case 10: /* VMLSL */
6697 gen_neon_negl(cpu_V0
, size
);
6699 case 5: case 8: /* VABAL, VMLAL */
6700 gen_neon_addl(size
);
6702 case 9: case 11: /* VQDMLAL, VQDMLSL */
6703 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6705 gen_neon_negl(cpu_V0
, size
);
6707 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6712 neon_store_reg64(cpu_V0
, rd
+ pass
);
6713 } else if (op
== 4 || op
== 6) {
6714 /* Narrowing operation. */
6715 tmp
= tcg_temp_new_i32();
6719 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
6722 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
6725 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6726 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
6733 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
6736 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
6739 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
6740 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6741 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
6749 neon_store_reg(rd
, 0, tmp3
);
6750 neon_store_reg(rd
, 1, tmp
);
6753 /* Write back the result. */
6754 neon_store_reg64(cpu_V0
, rd
+ pass
);
6758 /* Two registers and a scalar. NB that for ops of this form
6759 * the ARM ARM labels bit 24 as Q, but it is in our variable
6766 case 1: /* Float VMLA scalar */
6767 case 5: /* Floating point VMLS scalar */
6768 case 9: /* Floating point VMUL scalar */
6773 case 0: /* Integer VMLA scalar */
6774 case 4: /* Integer VMLS scalar */
6775 case 8: /* Integer VMUL scalar */
6776 case 12: /* VQDMULH scalar */
6777 case 13: /* VQRDMULH scalar */
6778 if (u
&& ((rd
| rn
) & 1)) {
6781 tmp
= neon_get_scalar(size
, rm
);
6782 neon_store_scratch(0, tmp
);
6783 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
6784 tmp
= neon_load_scratch(0);
6785 tmp2
= neon_load_reg(rn
, pass
);
6788 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6790 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6792 } else if (op
== 13) {
6794 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6796 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6798 } else if (op
& 1) {
6799 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6800 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
6801 tcg_temp_free_ptr(fpstatus
);
6804 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
6805 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
6806 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
6810 tcg_temp_free_i32(tmp2
);
6813 tmp2
= neon_load_reg(rd
, pass
);
6816 gen_neon_add(size
, tmp
, tmp2
);
6820 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6821 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
6822 tcg_temp_free_ptr(fpstatus
);
6826 gen_neon_rsb(size
, tmp
, tmp2
);
6830 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6831 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
6832 tcg_temp_free_ptr(fpstatus
);
6838 tcg_temp_free_i32(tmp2
);
6840 neon_store_reg(rd
, pass
, tmp
);
6843 case 3: /* VQDMLAL scalar */
6844 case 7: /* VQDMLSL scalar */
6845 case 11: /* VQDMULL scalar */
6850 case 2: /* VMLAL sclar */
6851 case 6: /* VMLSL scalar */
6852 case 10: /* VMULL scalar */
6856 tmp2
= neon_get_scalar(size
, rm
);
6857 /* We need a copy of tmp2 because gen_neon_mull
6858 * deletes it during pass 0. */
6859 tmp4
= tcg_temp_new_i32();
6860 tcg_gen_mov_i32(tmp4
, tmp2
);
6861 tmp3
= neon_load_reg(rn
, 1);
6863 for (pass
= 0; pass
< 2; pass
++) {
6865 tmp
= neon_load_reg(rn
, 0);
6870 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6872 neon_load_reg64(cpu_V1
, rd
+ pass
);
6876 gen_neon_negl(cpu_V0
, size
);
6879 gen_neon_addl(size
);
6882 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6884 gen_neon_negl(cpu_V0
, size
);
6886 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6892 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6897 neon_store_reg64(cpu_V0
, rd
+ pass
);
6902 default: /* 14 and 15 are RESERVED */
6906 } else { /* size == 3 */
6909 imm
= (insn
>> 8) & 0xf;
6914 if (q
&& ((rd
| rn
| rm
) & 1)) {
6919 neon_load_reg64(cpu_V0
, rn
);
6921 neon_load_reg64(cpu_V1
, rn
+ 1);
6923 } else if (imm
== 8) {
6924 neon_load_reg64(cpu_V0
, rn
+ 1);
6926 neon_load_reg64(cpu_V1
, rm
);
6929 tmp64
= tcg_temp_new_i64();
6931 neon_load_reg64(cpu_V0
, rn
);
6932 neon_load_reg64(tmp64
, rn
+ 1);
6934 neon_load_reg64(cpu_V0
, rn
+ 1);
6935 neon_load_reg64(tmp64
, rm
);
6937 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
6938 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
6939 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6941 neon_load_reg64(cpu_V1
, rm
);
6943 neon_load_reg64(cpu_V1
, rm
+ 1);
6946 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6947 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
6948 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
6949 tcg_temp_free_i64(tmp64
);
6952 neon_load_reg64(cpu_V0
, rn
);
6953 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
6954 neon_load_reg64(cpu_V1
, rm
);
6955 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6956 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6958 neon_store_reg64(cpu_V0
, rd
);
6960 neon_store_reg64(cpu_V1
, rd
+ 1);
6962 } else if ((insn
& (1 << 11)) == 0) {
6963 /* Two register misc. */
6964 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
6965 size
= (insn
>> 18) & 3;
6966 /* UNDEF for unknown op values and bad op-size combinations */
6967 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
6970 if (neon_2rm_is_v8_op(op
) &&
6971 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
6974 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
6975 q
&& ((rm
| rd
) & 1)) {
6979 case NEON_2RM_VREV64
:
6980 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
6981 tmp
= neon_load_reg(rm
, pass
* 2);
6982 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
6984 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6985 case 1: gen_swap_half(tmp
); break;
6986 case 2: /* no-op */ break;
6989 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
6991 neon_store_reg(rd
, pass
* 2, tmp2
);
6994 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
6995 case 1: gen_swap_half(tmp2
); break;
6998 neon_store_reg(rd
, pass
* 2, tmp2
);
7002 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
7003 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
7004 for (pass
= 0; pass
< q
+ 1; pass
++) {
7005 tmp
= neon_load_reg(rm
, pass
* 2);
7006 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
7007 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
7008 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
7010 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
7011 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
7012 case 2: tcg_gen_add_i64(CPU_V001
); break;
7015 if (op
>= NEON_2RM_VPADAL
) {
7017 neon_load_reg64(cpu_V1
, rd
+ pass
);
7018 gen_neon_addl(size
);
7020 neon_store_reg64(cpu_V0
, rd
+ pass
);
7026 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
7027 tmp
= neon_load_reg(rm
, n
);
7028 tmp2
= neon_load_reg(rd
, n
+ 1);
7029 neon_store_reg(rm
, n
, tmp2
);
7030 neon_store_reg(rd
, n
+ 1, tmp
);
7037 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
7042 if (gen_neon_zip(rd
, rm
, size
, q
)) {
7046 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
7047 /* also VQMOVUN; op field and mnemonics don't line up */
7051 TCGV_UNUSED_I32(tmp2
);
7052 for (pass
= 0; pass
< 2; pass
++) {
7053 neon_load_reg64(cpu_V0
, rm
+ pass
);
7054 tmp
= tcg_temp_new_i32();
7055 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
7060 neon_store_reg(rd
, 0, tmp2
);
7061 neon_store_reg(rd
, 1, tmp
);
7065 case NEON_2RM_VSHLL
:
7066 if (q
|| (rd
& 1)) {
7069 tmp
= neon_load_reg(rm
, 0);
7070 tmp2
= neon_load_reg(rm
, 1);
7071 for (pass
= 0; pass
< 2; pass
++) {
7074 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
7075 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
7076 neon_store_reg64(cpu_V0
, rd
+ pass
);
7079 case NEON_2RM_VCVT_F16_F32
:
7080 if (!arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
) ||
7084 tmp
= tcg_temp_new_i32();
7085 tmp2
= tcg_temp_new_i32();
7086 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
7087 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
7088 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
7089 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
7090 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
7091 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
7092 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
7093 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
7094 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
7095 neon_store_reg(rd
, 0, tmp2
);
7096 tmp2
= tcg_temp_new_i32();
7097 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
7098 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
7099 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
7100 neon_store_reg(rd
, 1, tmp2
);
7101 tcg_temp_free_i32(tmp
);
7103 case NEON_2RM_VCVT_F32_F16
:
7104 if (!arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
) ||
7108 tmp3
= tcg_temp_new_i32();
7109 tmp
= neon_load_reg(rm
, 0);
7110 tmp2
= neon_load_reg(rm
, 1);
7111 tcg_gen_ext16u_i32(tmp3
, tmp
);
7112 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
7113 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
7114 tcg_gen_shri_i32(tmp3
, tmp
, 16);
7115 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
7116 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
7117 tcg_temp_free_i32(tmp
);
7118 tcg_gen_ext16u_i32(tmp3
, tmp2
);
7119 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
7120 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
7121 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
7122 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
7123 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
7124 tcg_temp_free_i32(tmp2
);
7125 tcg_temp_free_i32(tmp3
);
7127 case NEON_2RM_AESE
: case NEON_2RM_AESMC
:
7128 if (!arm_dc_feature(s
, ARM_FEATURE_V8_AES
)
7129 || ((rm
| rd
) & 1)) {
7132 tmp
= tcg_const_i32(rd
);
7133 tmp2
= tcg_const_i32(rm
);
7135 /* Bit 6 is the lowest opcode bit; it distinguishes between
7136 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7138 tmp3
= tcg_const_i32(extract32(insn
, 6, 1));
7140 if (op
== NEON_2RM_AESE
) {
7141 gen_helper_crypto_aese(cpu_env
, tmp
, tmp2
, tmp3
);
7143 gen_helper_crypto_aesmc(cpu_env
, tmp
, tmp2
, tmp3
);
7145 tcg_temp_free_i32(tmp
);
7146 tcg_temp_free_i32(tmp2
);
7147 tcg_temp_free_i32(tmp3
);
7149 case NEON_2RM_SHA1H
:
7150 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)
7151 || ((rm
| rd
) & 1)) {
7154 tmp
= tcg_const_i32(rd
);
7155 tmp2
= tcg_const_i32(rm
);
7157 gen_helper_crypto_sha1h(cpu_env
, tmp
, tmp2
);
7159 tcg_temp_free_i32(tmp
);
7160 tcg_temp_free_i32(tmp2
);
7162 case NEON_2RM_SHA1SU1
:
7163 if ((rm
| rd
) & 1) {
7166 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7168 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA256
)) {
7171 } else if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)) {
7174 tmp
= tcg_const_i32(rd
);
7175 tmp2
= tcg_const_i32(rm
);
7177 gen_helper_crypto_sha256su0(cpu_env
, tmp
, tmp2
);
7179 gen_helper_crypto_sha1su1(cpu_env
, tmp
, tmp2
);
7181 tcg_temp_free_i32(tmp
);
7182 tcg_temp_free_i32(tmp2
);
7186 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
7187 if (neon_2rm_is_float_op(op
)) {
7188 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
7189 neon_reg_offset(rm
, pass
));
7190 TCGV_UNUSED_I32(tmp
);
7192 tmp
= neon_load_reg(rm
, pass
);
7195 case NEON_2RM_VREV32
:
7197 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
7198 case 1: gen_swap_half(tmp
); break;
7202 case NEON_2RM_VREV16
:
7207 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
7208 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
7209 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
7215 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
7216 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
7217 case 2: tcg_gen_clzi_i32(tmp
, tmp
, 32); break;
7222 gen_helper_neon_cnt_u8(tmp
, tmp
);
7225 tcg_gen_not_i32(tmp
, tmp
);
7227 case NEON_2RM_VQABS
:
7230 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
7233 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
7236 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
7241 case NEON_2RM_VQNEG
:
7244 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
7247 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
7250 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
7255 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
7256 tmp2
= tcg_const_i32(0);
7258 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
7259 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
7260 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
7263 tcg_temp_free_i32(tmp2
);
7264 if (op
== NEON_2RM_VCLE0
) {
7265 tcg_gen_not_i32(tmp
, tmp
);
7268 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
7269 tmp2
= tcg_const_i32(0);
7271 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
7272 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
7273 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
7276 tcg_temp_free_i32(tmp2
);
7277 if (op
== NEON_2RM_VCLT0
) {
7278 tcg_gen_not_i32(tmp
, tmp
);
7281 case NEON_2RM_VCEQ0
:
7282 tmp2
= tcg_const_i32(0);
7284 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
7285 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
7286 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
7289 tcg_temp_free_i32(tmp2
);
7293 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
7294 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
7295 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
7300 tmp2
= tcg_const_i32(0);
7301 gen_neon_rsb(size
, tmp
, tmp2
);
7302 tcg_temp_free_i32(tmp2
);
7304 case NEON_2RM_VCGT0_F
:
7306 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7307 tmp2
= tcg_const_i32(0);
7308 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
7309 tcg_temp_free_i32(tmp2
);
7310 tcg_temp_free_ptr(fpstatus
);
7313 case NEON_2RM_VCGE0_F
:
7315 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7316 tmp2
= tcg_const_i32(0);
7317 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
7318 tcg_temp_free_i32(tmp2
);
7319 tcg_temp_free_ptr(fpstatus
);
7322 case NEON_2RM_VCEQ0_F
:
7324 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7325 tmp2
= tcg_const_i32(0);
7326 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
7327 tcg_temp_free_i32(tmp2
);
7328 tcg_temp_free_ptr(fpstatus
);
7331 case NEON_2RM_VCLE0_F
:
7333 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7334 tmp2
= tcg_const_i32(0);
7335 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
7336 tcg_temp_free_i32(tmp2
);
7337 tcg_temp_free_ptr(fpstatus
);
7340 case NEON_2RM_VCLT0_F
:
7342 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7343 tmp2
= tcg_const_i32(0);
7344 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
7345 tcg_temp_free_i32(tmp2
);
7346 tcg_temp_free_ptr(fpstatus
);
7349 case NEON_2RM_VABS_F
:
7352 case NEON_2RM_VNEG_F
:
7356 tmp2
= neon_load_reg(rd
, pass
);
7357 neon_store_reg(rm
, pass
, tmp2
);
7360 tmp2
= neon_load_reg(rd
, pass
);
7362 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
7363 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
7366 neon_store_reg(rm
, pass
, tmp2
);
7368 case NEON_2RM_VRINTN
:
7369 case NEON_2RM_VRINTA
:
7370 case NEON_2RM_VRINTM
:
7371 case NEON_2RM_VRINTP
:
7372 case NEON_2RM_VRINTZ
:
7375 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7378 if (op
== NEON_2RM_VRINTZ
) {
7379 rmode
= FPROUNDING_ZERO
;
7381 rmode
= fp_decode_rm
[((op
& 0x6) >> 1) ^ 1];
7384 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
7385 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7387 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpstatus
);
7388 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7390 tcg_temp_free_ptr(fpstatus
);
7391 tcg_temp_free_i32(tcg_rmode
);
7394 case NEON_2RM_VRINTX
:
7396 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7397 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpstatus
);
7398 tcg_temp_free_ptr(fpstatus
);
7401 case NEON_2RM_VCVTAU
:
7402 case NEON_2RM_VCVTAS
:
7403 case NEON_2RM_VCVTNU
:
7404 case NEON_2RM_VCVTNS
:
7405 case NEON_2RM_VCVTPU
:
7406 case NEON_2RM_VCVTPS
:
7407 case NEON_2RM_VCVTMU
:
7408 case NEON_2RM_VCVTMS
:
7410 bool is_signed
= !extract32(insn
, 7, 1);
7411 TCGv_ptr fpst
= get_fpstatus_ptr(1);
7412 TCGv_i32 tcg_rmode
, tcg_shift
;
7413 int rmode
= fp_decode_rm
[extract32(insn
, 8, 2)];
7415 tcg_shift
= tcg_const_i32(0);
7416 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
7417 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7421 gen_helper_vfp_tosls(cpu_F0s
, cpu_F0s
,
7424 gen_helper_vfp_touls(cpu_F0s
, cpu_F0s
,
7428 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7430 tcg_temp_free_i32(tcg_rmode
);
7431 tcg_temp_free_i32(tcg_shift
);
7432 tcg_temp_free_ptr(fpst
);
7435 case NEON_2RM_VRECPE
:
7437 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7438 gen_helper_recpe_u32(tmp
, tmp
, fpstatus
);
7439 tcg_temp_free_ptr(fpstatus
);
7442 case NEON_2RM_VRSQRTE
:
7444 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7445 gen_helper_rsqrte_u32(tmp
, tmp
, fpstatus
);
7446 tcg_temp_free_ptr(fpstatus
);
7449 case NEON_2RM_VRECPE_F
:
7451 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7452 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
7453 tcg_temp_free_ptr(fpstatus
);
7456 case NEON_2RM_VRSQRTE_F
:
7458 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7459 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
7460 tcg_temp_free_ptr(fpstatus
);
7463 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
7466 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
7469 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
7470 gen_vfp_tosiz(0, 1);
7472 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
7473 gen_vfp_touiz(0, 1);
7476 /* Reserved op values were caught by the
7477 * neon_2rm_sizes[] check earlier.
7481 if (neon_2rm_is_float_op(op
)) {
7482 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
7483 neon_reg_offset(rd
, pass
));
7485 neon_store_reg(rd
, pass
, tmp
);
7490 } else if ((insn
& (1 << 10)) == 0) {
7492 int n
= ((insn
>> 8) & 3) + 1;
7493 if ((rn
+ n
) > 32) {
7494 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7495 * helper function running off the end of the register file.
7500 if (insn
& (1 << 6)) {
7501 tmp
= neon_load_reg(rd
, 0);
7503 tmp
= tcg_temp_new_i32();
7504 tcg_gen_movi_i32(tmp
, 0);
7506 tmp2
= neon_load_reg(rm
, 0);
7507 tmp4
= tcg_const_i32(rn
);
7508 tmp5
= tcg_const_i32(n
);
7509 gen_helper_neon_tbl(tmp2
, cpu_env
, tmp2
, tmp
, tmp4
, tmp5
);
7510 tcg_temp_free_i32(tmp
);
7511 if (insn
& (1 << 6)) {
7512 tmp
= neon_load_reg(rd
, 1);
7514 tmp
= tcg_temp_new_i32();
7515 tcg_gen_movi_i32(tmp
, 0);
7517 tmp3
= neon_load_reg(rm
, 1);
7518 gen_helper_neon_tbl(tmp3
, cpu_env
, tmp3
, tmp
, tmp4
, tmp5
);
7519 tcg_temp_free_i32(tmp5
);
7520 tcg_temp_free_i32(tmp4
);
7521 neon_store_reg(rd
, 0, tmp2
);
7522 neon_store_reg(rd
, 1, tmp3
);
7523 tcg_temp_free_i32(tmp
);
7524 } else if ((insn
& 0x380) == 0) {
7526 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
7529 if (insn
& (1 << 19)) {
7530 tmp
= neon_load_reg(rm
, 1);
7532 tmp
= neon_load_reg(rm
, 0);
7534 if (insn
& (1 << 16)) {
7535 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
7536 } else if (insn
& (1 << 17)) {
7537 if ((insn
>> 18) & 1)
7538 gen_neon_dup_high16(tmp
);
7540 gen_neon_dup_low16(tmp
);
7542 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
7543 tmp2
= tcg_temp_new_i32();
7544 tcg_gen_mov_i32(tmp2
, tmp
);
7545 neon_store_reg(rd
, pass
, tmp2
);
7547 tcg_temp_free_i32(tmp
);
7556 static int disas_coproc_insn(DisasContext
*s
, uint32_t insn
)
7558 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
7559 const ARMCPRegInfo
*ri
;
7561 cpnum
= (insn
>> 8) & 0xf;
7563 /* First check for coprocessor space used for XScale/iwMMXt insns */
7564 if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && (cpnum
< 2)) {
7565 if (extract32(s
->c15_cpar
, cpnum
, 1) == 0) {
7568 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
7569 return disas_iwmmxt_insn(s
, insn
);
7570 } else if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
)) {
7571 return disas_dsp_insn(s
, insn
);
7576 /* Otherwise treat as a generic register access */
7577 is64
= (insn
& (1 << 25)) == 0;
7578 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
7586 opc1
= (insn
>> 4) & 0xf;
7588 rt2
= (insn
>> 16) & 0xf;
7590 crn
= (insn
>> 16) & 0xf;
7591 opc1
= (insn
>> 21) & 7;
7592 opc2
= (insn
>> 5) & 7;
7595 isread
= (insn
>> 20) & 1;
7596 rt
= (insn
>> 12) & 0xf;
7598 ri
= get_arm_cp_reginfo(s
->cp_regs
,
7599 ENCODE_CP_REG(cpnum
, is64
, s
->ns
, crn
, crm
, opc1
, opc2
));
7601 /* Check access permissions */
7602 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
7607 (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && cpnum
< 14)) {
7608 /* Emit code to perform further access permissions checks at
7609 * runtime; this may result in an exception.
7610 * Note that on XScale all cp0..c13 registers do an access check
7611 * call in order to handle c15_cpar.
7614 TCGv_i32 tcg_syn
, tcg_isread
;
7617 /* Note that since we are an implementation which takes an
7618 * exception on a trapped conditional instruction only if the
7619 * instruction passes its condition code check, we can take
7620 * advantage of the clause in the ARM ARM that allows us to set
7621 * the COND field in the instruction to 0xE in all cases.
7622 * We could fish the actual condition out of the insn (ARM)
7623 * or the condexec bits (Thumb) but it isn't necessary.
7628 syndrome
= syn_cp14_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7631 syndrome
= syn_cp14_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7637 syndrome
= syn_cp15_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7640 syndrome
= syn_cp15_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7645 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7646 * so this can only happen if this is an ARMv7 or earlier CPU,
7647 * in which case the syndrome information won't actually be
7650 assert(!arm_dc_feature(s
, ARM_FEATURE_V8
));
7651 syndrome
= syn_uncategorized();
7655 gen_set_condexec(s
);
7656 gen_set_pc_im(s
, s
->pc
- 4);
7657 tmpptr
= tcg_const_ptr(ri
);
7658 tcg_syn
= tcg_const_i32(syndrome
);
7659 tcg_isread
= tcg_const_i32(isread
);
7660 gen_helper_access_check_cp_reg(cpu_env
, tmpptr
, tcg_syn
,
7662 tcg_temp_free_ptr(tmpptr
);
7663 tcg_temp_free_i32(tcg_syn
);
7664 tcg_temp_free_i32(tcg_isread
);
7667 /* Handle special cases first */
7668 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
7675 gen_set_pc_im(s
, s
->pc
);
7676 s
->base
.is_jmp
= DISAS_WFI
;
7682 if ((s
->base
.tb
->cflags
& CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
7691 if (ri
->type
& ARM_CP_CONST
) {
7692 tmp64
= tcg_const_i64(ri
->resetvalue
);
7693 } else if (ri
->readfn
) {
7695 tmp64
= tcg_temp_new_i64();
7696 tmpptr
= tcg_const_ptr(ri
);
7697 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
7698 tcg_temp_free_ptr(tmpptr
);
7700 tmp64
= tcg_temp_new_i64();
7701 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7703 tmp
= tcg_temp_new_i32();
7704 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
7705 store_reg(s
, rt
, tmp
);
7706 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7707 tmp
= tcg_temp_new_i32();
7708 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
7709 tcg_temp_free_i64(tmp64
);
7710 store_reg(s
, rt2
, tmp
);
7713 if (ri
->type
& ARM_CP_CONST
) {
7714 tmp
= tcg_const_i32(ri
->resetvalue
);
7715 } else if (ri
->readfn
) {
7717 tmp
= tcg_temp_new_i32();
7718 tmpptr
= tcg_const_ptr(ri
);
7719 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
7720 tcg_temp_free_ptr(tmpptr
);
7722 tmp
= load_cpu_offset(ri
->fieldoffset
);
7725 /* Destination register of r15 for 32 bit loads sets
7726 * the condition codes from the high 4 bits of the value
7729 tcg_temp_free_i32(tmp
);
7731 store_reg(s
, rt
, tmp
);
7736 if (ri
->type
& ARM_CP_CONST
) {
7737 /* If not forbidden by access permissions, treat as WI */
7742 TCGv_i32 tmplo
, tmphi
;
7743 TCGv_i64 tmp64
= tcg_temp_new_i64();
7744 tmplo
= load_reg(s
, rt
);
7745 tmphi
= load_reg(s
, rt2
);
7746 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
7747 tcg_temp_free_i32(tmplo
);
7748 tcg_temp_free_i32(tmphi
);
7750 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
7751 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
7752 tcg_temp_free_ptr(tmpptr
);
7754 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7756 tcg_temp_free_i64(tmp64
);
7761 tmp
= load_reg(s
, rt
);
7762 tmpptr
= tcg_const_ptr(ri
);
7763 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
7764 tcg_temp_free_ptr(tmpptr
);
7765 tcg_temp_free_i32(tmp
);
7767 TCGv_i32 tmp
= load_reg(s
, rt
);
7768 store_cpu_offset(tmp
, ri
->fieldoffset
);
7773 if ((s
->base
.tb
->cflags
& CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
7774 /* I/O operations must end the TB here (whether read or write) */
7777 } else if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
7778 /* We default to ending the TB on a coprocessor register write,
7779 * but allow this to be suppressed by the register definition
7780 * (usually only necessary to work around guest bugs).
7788 /* Unknown register; this might be a guest error or a QEMU
7789 * unimplemented feature.
7792 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7793 "64 bit system register cp:%d opc1: %d crm:%d "
7795 isread
? "read" : "write", cpnum
, opc1
, crm
,
7796 s
->ns
? "non-secure" : "secure");
7798 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7799 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7801 isread
? "read" : "write", cpnum
, opc1
, crn
, crm
, opc2
,
7802 s
->ns
? "non-secure" : "secure");
7809 /* Store a 64-bit value to a register pair. Clobbers val. */
7810 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
7813 tmp
= tcg_temp_new_i32();
7814 tcg_gen_extrl_i64_i32(tmp
, val
);
7815 store_reg(s
, rlow
, tmp
);
7816 tmp
= tcg_temp_new_i32();
7817 tcg_gen_shri_i64(val
, val
, 32);
7818 tcg_gen_extrl_i64_i32(tmp
, val
);
7819 store_reg(s
, rhigh
, tmp
);
7822 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7823 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
7828 /* Load value and extend to 64 bits. */
7829 tmp
= tcg_temp_new_i64();
7830 tmp2
= load_reg(s
, rlow
);
7831 tcg_gen_extu_i32_i64(tmp
, tmp2
);
7832 tcg_temp_free_i32(tmp2
);
7833 tcg_gen_add_i64(val
, val
, tmp
);
7834 tcg_temp_free_i64(tmp
);
7837 /* load and add a 64-bit value from a register pair. */
7838 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
7844 /* Load 64-bit value rd:rn. */
7845 tmpl
= load_reg(s
, rlow
);
7846 tmph
= load_reg(s
, rhigh
);
7847 tmp
= tcg_temp_new_i64();
7848 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
7849 tcg_temp_free_i32(tmpl
);
7850 tcg_temp_free_i32(tmph
);
7851 tcg_gen_add_i64(val
, val
, tmp
);
7852 tcg_temp_free_i64(tmp
);
7855 /* Set N and Z flags from hi|lo. */
7856 static void gen_logicq_cc(TCGv_i32 lo
, TCGv_i32 hi
)
7858 tcg_gen_mov_i32(cpu_NF
, hi
);
7859 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
7862 /* Load/Store exclusive instructions are implemented by remembering
7863 the value/address loaded, and seeing if these are the same
7864 when the store is performed. This should be sufficient to implement
7865 the architecturally mandated semantics, and avoids having to monitor
7866 regular stores. The compare vs the remembered value is done during
7867 the cmpxchg operation, but we must compare the addresses manually. */
7868 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
7869 TCGv_i32 addr
, int size
)
7871 TCGv_i32 tmp
= tcg_temp_new_i32();
7872 TCGMemOp opc
= size
| MO_ALIGN
| s
->be_data
;
7877 TCGv_i32 tmp2
= tcg_temp_new_i32();
7878 TCGv_i64 t64
= tcg_temp_new_i64();
7880 gen_aa32_ld_i64(s
, t64
, addr
, get_mem_index(s
), opc
);
7881 tcg_gen_mov_i64(cpu_exclusive_val
, t64
);
7882 tcg_gen_extr_i64_i32(tmp
, tmp2
, t64
);
7883 tcg_temp_free_i64(t64
);
7885 store_reg(s
, rt2
, tmp2
);
7887 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), opc
);
7888 tcg_gen_extu_i32_i64(cpu_exclusive_val
, tmp
);
7891 store_reg(s
, rt
, tmp
);
7892 tcg_gen_extu_i32_i64(cpu_exclusive_addr
, addr
);
7895 static void gen_clrex(DisasContext
*s
)
7897 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7900 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7901 TCGv_i32 addr
, int size
)
7903 TCGv_i32 t0
, t1
, t2
;
7906 TCGLabel
*done_label
;
7907 TCGLabel
*fail_label
;
7908 TCGMemOp opc
= size
| MO_ALIGN
| s
->be_data
;
7910 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7916 fail_label
= gen_new_label();
7917 done_label
= gen_new_label();
7918 extaddr
= tcg_temp_new_i64();
7919 tcg_gen_extu_i32_i64(extaddr
, addr
);
7920 tcg_gen_brcond_i64(TCG_COND_NE
, extaddr
, cpu_exclusive_addr
, fail_label
);
7921 tcg_temp_free_i64(extaddr
);
7923 taddr
= gen_aa32_addr(s
, addr
, opc
);
7924 t0
= tcg_temp_new_i32();
7925 t1
= load_reg(s
, rt
);
7927 TCGv_i64 o64
= tcg_temp_new_i64();
7928 TCGv_i64 n64
= tcg_temp_new_i64();
7930 t2
= load_reg(s
, rt2
);
7931 tcg_gen_concat_i32_i64(n64
, t1
, t2
);
7932 tcg_temp_free_i32(t2
);
7933 gen_aa32_frob64(s
, n64
);
7935 tcg_gen_atomic_cmpxchg_i64(o64
, taddr
, cpu_exclusive_val
, n64
,
7936 get_mem_index(s
), opc
);
7937 tcg_temp_free_i64(n64
);
7939 gen_aa32_frob64(s
, o64
);
7940 tcg_gen_setcond_i64(TCG_COND_NE
, o64
, o64
, cpu_exclusive_val
);
7941 tcg_gen_extrl_i64_i32(t0
, o64
);
7943 tcg_temp_free_i64(o64
);
7945 t2
= tcg_temp_new_i32();
7946 tcg_gen_extrl_i64_i32(t2
, cpu_exclusive_val
);
7947 tcg_gen_atomic_cmpxchg_i32(t0
, taddr
, t2
, t1
, get_mem_index(s
), opc
);
7948 tcg_gen_setcond_i32(TCG_COND_NE
, t0
, t0
, t2
);
7949 tcg_temp_free_i32(t2
);
7951 tcg_temp_free_i32(t1
);
7952 tcg_temp_free(taddr
);
7953 tcg_gen_mov_i32(cpu_R
[rd
], t0
);
7954 tcg_temp_free_i32(t0
);
7955 tcg_gen_br(done_label
);
7957 gen_set_label(fail_label
);
7958 tcg_gen_movi_i32(cpu_R
[rd
], 1);
7959 gen_set_label(done_label
);
7960 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7966 * @mode: mode field from insn (which stack to store to)
7967 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7968 * @writeback: true if writeback bit set
7970 * Generate code for the SRS (Store Return State) insn.
7972 static void gen_srs(DisasContext
*s
,
7973 uint32_t mode
, uint32_t amode
, bool writeback
)
7980 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7981 * and specified mode is monitor mode
7982 * - UNDEFINED in Hyp mode
7983 * - UNPREDICTABLE in User or System mode
7984 * - UNPREDICTABLE if the specified mode is:
7985 * -- not implemented
7986 * -- not a valid mode number
7987 * -- a mode that's at a higher exception level
7988 * -- Monitor, if we are Non-secure
7989 * For the UNPREDICTABLE cases we choose to UNDEF.
7991 if (s
->current_el
== 1 && !s
->ns
&& mode
== ARM_CPU_MODE_MON
) {
7992 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(), 3);
7996 if (s
->current_el
== 0 || s
->current_el
== 2) {
8001 case ARM_CPU_MODE_USR
:
8002 case ARM_CPU_MODE_FIQ
:
8003 case ARM_CPU_MODE_IRQ
:
8004 case ARM_CPU_MODE_SVC
:
8005 case ARM_CPU_MODE_ABT
:
8006 case ARM_CPU_MODE_UND
:
8007 case ARM_CPU_MODE_SYS
:
8009 case ARM_CPU_MODE_HYP
:
8010 if (s
->current_el
== 1 || !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
8014 case ARM_CPU_MODE_MON
:
8015 /* No need to check specifically for "are we non-secure" because
8016 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8017 * so if this isn't EL3 then we must be non-secure.
8019 if (s
->current_el
!= 3) {
8028 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
8029 default_exception_el(s
));
8033 addr
= tcg_temp_new_i32();
8034 tmp
= tcg_const_i32(mode
);
8035 /* get_r13_banked() will raise an exception if called from System mode */
8036 gen_set_condexec(s
);
8037 gen_set_pc_im(s
, s
->pc
- 4);
8038 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
8039 tcg_temp_free_i32(tmp
);
8056 tcg_gen_addi_i32(addr
, addr
, offset
);
8057 tmp
= load_reg(s
, 14);
8058 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
8059 tcg_temp_free_i32(tmp
);
8060 tmp
= load_cpu_field(spsr
);
8061 tcg_gen_addi_i32(addr
, addr
, 4);
8062 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
8063 tcg_temp_free_i32(tmp
);
8081 tcg_gen_addi_i32(addr
, addr
, offset
);
8082 tmp
= tcg_const_i32(mode
);
8083 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
8084 tcg_temp_free_i32(tmp
);
8086 tcg_temp_free_i32(addr
);
8087 s
->base
.is_jmp
= DISAS_UPDATE
;
8090 static void disas_arm_insn(DisasContext
*s
, unsigned int insn
)
8092 unsigned int cond
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
8099 /* M variants do not implement ARM mode; this must raise the INVSTATE
8100 * UsageFault exception.
8102 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
8103 gen_exception_insn(s
, 4, EXCP_INVSTATE
, syn_uncategorized(),
8104 default_exception_el(s
));
8109 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8110 * choose to UNDEF. In ARMv5 and above the space is used
8111 * for miscellaneous unconditional instructions.
8115 /* Unconditional instructions. */
8116 if (((insn
>> 25) & 7) == 1) {
8117 /* NEON Data processing. */
8118 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
8122 if (disas_neon_data_insn(s
, insn
)) {
8127 if ((insn
& 0x0f100000) == 0x04000000) {
8128 /* NEON load/store. */
8129 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
8133 if (disas_neon_ls_insn(s
, insn
)) {
8138 if ((insn
& 0x0f000e10) == 0x0e000a00) {
8140 if (disas_vfp_insn(s
, insn
)) {
8145 if (((insn
& 0x0f30f000) == 0x0510f000) ||
8146 ((insn
& 0x0f30f010) == 0x0710f000)) {
8147 if ((insn
& (1 << 22)) == 0) {
8149 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
8153 /* Otherwise PLD; v5TE+ */
8157 if (((insn
& 0x0f70f000) == 0x0450f000) ||
8158 ((insn
& 0x0f70f010) == 0x0650f000)) {
8160 return; /* PLI; V7 */
8162 if (((insn
& 0x0f700000) == 0x04100000) ||
8163 ((insn
& 0x0f700010) == 0x06100000)) {
8164 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
8167 return; /* v7MP: Unallocated memory hint: must NOP */
8170 if ((insn
& 0x0ffffdff) == 0x01010000) {
8173 if (((insn
>> 9) & 1) != !!(s
->be_data
== MO_BE
)) {
8174 gen_helper_setend(cpu_env
);
8175 s
->base
.is_jmp
= DISAS_UPDATE
;
8178 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
8179 switch ((insn
>> 4) & 0xf) {
8187 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
8190 /* We need to break the TB after this insn to execute
8191 * self-modifying code correctly and also to take
8192 * any pending interrupts immediately.
8194 gen_goto_tb(s
, 0, s
->pc
& ~1);
8199 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
8202 gen_srs(s
, (insn
& 0x1f), (insn
>> 23) & 3, insn
& (1 << 21));
8204 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
8210 rn
= (insn
>> 16) & 0xf;
8211 addr
= load_reg(s
, rn
);
8212 i
= (insn
>> 23) & 3;
8214 case 0: offset
= -4; break; /* DA */
8215 case 1: offset
= 0; break; /* IA */
8216 case 2: offset
= -8; break; /* DB */
8217 case 3: offset
= 4; break; /* IB */
8221 tcg_gen_addi_i32(addr
, addr
, offset
);
8222 /* Load PC into tmp and CPSR into tmp2. */
8223 tmp
= tcg_temp_new_i32();
8224 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
8225 tcg_gen_addi_i32(addr
, addr
, 4);
8226 tmp2
= tcg_temp_new_i32();
8227 gen_aa32_ld32u(s
, tmp2
, addr
, get_mem_index(s
));
8228 if (insn
& (1 << 21)) {
8229 /* Base writeback. */
8231 case 0: offset
= -8; break;
8232 case 1: offset
= 4; break;
8233 case 2: offset
= -4; break;
8234 case 3: offset
= 0; break;
8238 tcg_gen_addi_i32(addr
, addr
, offset
);
8239 store_reg(s
, rn
, addr
);
8241 tcg_temp_free_i32(addr
);
8243 gen_rfe(s
, tmp
, tmp2
);
8245 } else if ((insn
& 0x0e000000) == 0x0a000000) {
8246 /* branch link and change to thumb (blx <offset>) */
8249 val
= (uint32_t)s
->pc
;
8250 tmp
= tcg_temp_new_i32();
8251 tcg_gen_movi_i32(tmp
, val
);
8252 store_reg(s
, 14, tmp
);
8253 /* Sign-extend the 24-bit offset */
8254 offset
= (((int32_t)insn
) << 8) >> 8;
8255 /* offset * 4 + bit24 * 2 + (thumb bit) */
8256 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
8257 /* pipeline offset */
8259 /* protected by ARCH(5); above, near the start of uncond block */
8262 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
8263 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
8264 /* iWMMXt register transfer. */
8265 if (extract32(s
->c15_cpar
, 1, 1)) {
8266 if (!disas_iwmmxt_insn(s
, insn
)) {
8271 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
8272 /* Coprocessor double register transfer. */
8274 } else if ((insn
& 0x0f000010) == 0x0e000010) {
8275 /* Additional coprocessor register transfer. */
8276 } else if ((insn
& 0x0ff10020) == 0x01000000) {
8279 /* cps (privileged) */
8283 if (insn
& (1 << 19)) {
8284 if (insn
& (1 << 8))
8286 if (insn
& (1 << 7))
8288 if (insn
& (1 << 6))
8290 if (insn
& (1 << 18))
8293 if (insn
& (1 << 17)) {
8295 val
|= (insn
& 0x1f);
8298 gen_set_psr_im(s
, mask
, 0, val
);
8305 /* if not always execute, we generate a conditional jump to
8307 s
->condlabel
= gen_new_label();
8308 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
8311 if ((insn
& 0x0f900000) == 0x03000000) {
8312 if ((insn
& (1 << 21)) == 0) {
8314 rd
= (insn
>> 12) & 0xf;
8315 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
8316 if ((insn
& (1 << 22)) == 0) {
8318 tmp
= tcg_temp_new_i32();
8319 tcg_gen_movi_i32(tmp
, val
);
8322 tmp
= load_reg(s
, rd
);
8323 tcg_gen_ext16u_i32(tmp
, tmp
);
8324 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
8326 store_reg(s
, rd
, tmp
);
8328 if (((insn
>> 12) & 0xf) != 0xf)
8330 if (((insn
>> 16) & 0xf) == 0) {
8331 gen_nop_hint(s
, insn
& 0xff);
8333 /* CPSR = immediate */
8335 shift
= ((insn
>> 8) & 0xf) * 2;
8337 val
= (val
>> shift
) | (val
<< (32 - shift
));
8338 i
= ((insn
& (1 << 22)) != 0);
8339 if (gen_set_psr_im(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
),
8345 } else if ((insn
& 0x0f900000) == 0x01000000
8346 && (insn
& 0x00000090) != 0x00000090) {
8347 /* miscellaneous instructions */
8348 op1
= (insn
>> 21) & 3;
8349 sh
= (insn
>> 4) & 0xf;
8352 case 0x0: /* MSR, MRS */
8353 if (insn
& (1 << 9)) {
8354 /* MSR (banked) and MRS (banked) */
8355 int sysm
= extract32(insn
, 16, 4) |
8356 (extract32(insn
, 8, 1) << 4);
8357 int r
= extract32(insn
, 22, 1);
8361 gen_msr_banked(s
, r
, sysm
, rm
);
8364 int rd
= extract32(insn
, 12, 4);
8366 gen_mrs_banked(s
, r
, sysm
, rd
);
8371 /* MSR, MRS (for PSRs) */
8374 tmp
= load_reg(s
, rm
);
8375 i
= ((op1
& 2) != 0);
8376 if (gen_set_psr(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
8380 rd
= (insn
>> 12) & 0xf;
8384 tmp
= load_cpu_field(spsr
);
8386 tmp
= tcg_temp_new_i32();
8387 gen_helper_cpsr_read(tmp
, cpu_env
);
8389 store_reg(s
, rd
, tmp
);
8394 /* branch/exchange thumb (bx). */
8396 tmp
= load_reg(s
, rm
);
8398 } else if (op1
== 3) {
8401 rd
= (insn
>> 12) & 0xf;
8402 tmp
= load_reg(s
, rm
);
8403 tcg_gen_clzi_i32(tmp
, tmp
, 32);
8404 store_reg(s
, rd
, tmp
);
8412 /* Trivial implementation equivalent to bx. */
8413 tmp
= load_reg(s
, rm
);
8424 /* branch link/exchange thumb (blx) */
8425 tmp
= load_reg(s
, rm
);
8426 tmp2
= tcg_temp_new_i32();
8427 tcg_gen_movi_i32(tmp2
, s
->pc
);
8428 store_reg(s
, 14, tmp2
);
8434 uint32_t c
= extract32(insn
, 8, 4);
8436 /* Check this CPU supports ARMv8 CRC instructions.
8437 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8438 * Bits 8, 10 and 11 should be zero.
8440 if (!arm_dc_feature(s
, ARM_FEATURE_CRC
) || op1
== 0x3 ||
8445 rn
= extract32(insn
, 16, 4);
8446 rd
= extract32(insn
, 12, 4);
8448 tmp
= load_reg(s
, rn
);
8449 tmp2
= load_reg(s
, rm
);
8451 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
8452 } else if (op1
== 1) {
8453 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
8455 tmp3
= tcg_const_i32(1 << op1
);
8457 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
8459 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
8461 tcg_temp_free_i32(tmp2
);
8462 tcg_temp_free_i32(tmp3
);
8463 store_reg(s
, rd
, tmp
);
8466 case 0x5: /* saturating add/subtract */
8468 rd
= (insn
>> 12) & 0xf;
8469 rn
= (insn
>> 16) & 0xf;
8470 tmp
= load_reg(s
, rm
);
8471 tmp2
= load_reg(s
, rn
);
8473 gen_helper_double_saturate(tmp2
, cpu_env
, tmp2
);
8475 gen_helper_sub_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8477 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8478 tcg_temp_free_i32(tmp2
);
8479 store_reg(s
, rd
, tmp
);
8483 int imm16
= extract32(insn
, 0, 4) | (extract32(insn
, 8, 12) << 4);
8492 gen_exception_insn(s
, 4, EXCP_BKPT
,
8493 syn_aa32_bkpt(imm16
, false),
8494 default_exception_el(s
));
8497 /* Hypervisor call (v7) */
8505 /* Secure monitor call (v6+) */
8513 g_assert_not_reached();
8517 case 0x8: /* signed multiply */
8522 rs
= (insn
>> 8) & 0xf;
8523 rn
= (insn
>> 12) & 0xf;
8524 rd
= (insn
>> 16) & 0xf;
8526 /* (32 * 16) >> 16 */
8527 tmp
= load_reg(s
, rm
);
8528 tmp2
= load_reg(s
, rs
);
8530 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8533 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8534 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8535 tmp
= tcg_temp_new_i32();
8536 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
8537 tcg_temp_free_i64(tmp64
);
8538 if ((sh
& 2) == 0) {
8539 tmp2
= load_reg(s
, rn
);
8540 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8541 tcg_temp_free_i32(tmp2
);
8543 store_reg(s
, rd
, tmp
);
8546 tmp
= load_reg(s
, rm
);
8547 tmp2
= load_reg(s
, rs
);
8548 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
8549 tcg_temp_free_i32(tmp2
);
8551 tmp64
= tcg_temp_new_i64();
8552 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8553 tcg_temp_free_i32(tmp
);
8554 gen_addq(s
, tmp64
, rn
, rd
);
8555 gen_storeq_reg(s
, rn
, rd
, tmp64
);
8556 tcg_temp_free_i64(tmp64
);
8559 tmp2
= load_reg(s
, rn
);
8560 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8561 tcg_temp_free_i32(tmp2
);
8563 store_reg(s
, rd
, tmp
);
8570 } else if (((insn
& 0x0e000000) == 0 &&
8571 (insn
& 0x00000090) != 0x90) ||
8572 ((insn
& 0x0e000000) == (1 << 25))) {
8573 int set_cc
, logic_cc
, shiftop
;
8575 op1
= (insn
>> 21) & 0xf;
8576 set_cc
= (insn
>> 20) & 1;
8577 logic_cc
= table_logic_cc
[op1
] & set_cc
;
8579 /* data processing instruction */
8580 if (insn
& (1 << 25)) {
8581 /* immediate operand */
8583 shift
= ((insn
>> 8) & 0xf) * 2;
8585 val
= (val
>> shift
) | (val
<< (32 - shift
));
8587 tmp2
= tcg_temp_new_i32();
8588 tcg_gen_movi_i32(tmp2
, val
);
8589 if (logic_cc
&& shift
) {
8590 gen_set_CF_bit31(tmp2
);
8595 tmp2
= load_reg(s
, rm
);
8596 shiftop
= (insn
>> 5) & 3;
8597 if (!(insn
& (1 << 4))) {
8598 shift
= (insn
>> 7) & 0x1f;
8599 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8601 rs
= (insn
>> 8) & 0xf;
8602 tmp
= load_reg(s
, rs
);
8603 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
8606 if (op1
!= 0x0f && op1
!= 0x0d) {
8607 rn
= (insn
>> 16) & 0xf;
8608 tmp
= load_reg(s
, rn
);
8610 TCGV_UNUSED_I32(tmp
);
8612 rd
= (insn
>> 12) & 0xf;
8615 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8619 store_reg_bx(s
, rd
, tmp
);
8622 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8626 store_reg_bx(s
, rd
, tmp
);
8629 if (set_cc
&& rd
== 15) {
8630 /* SUBS r15, ... is used for exception return. */
8634 gen_sub_CC(tmp
, tmp
, tmp2
);
8635 gen_exception_return(s
, tmp
);
8638 gen_sub_CC(tmp
, tmp
, tmp2
);
8640 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8642 store_reg_bx(s
, rd
, tmp
);
8647 gen_sub_CC(tmp
, tmp2
, tmp
);
8649 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8651 store_reg_bx(s
, rd
, tmp
);
8655 gen_add_CC(tmp
, tmp
, tmp2
);
8657 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8659 store_reg_bx(s
, rd
, tmp
);
8663 gen_adc_CC(tmp
, tmp
, tmp2
);
8665 gen_add_carry(tmp
, tmp
, tmp2
);
8667 store_reg_bx(s
, rd
, tmp
);
8671 gen_sbc_CC(tmp
, tmp
, tmp2
);
8673 gen_sub_carry(tmp
, tmp
, tmp2
);
8675 store_reg_bx(s
, rd
, tmp
);
8679 gen_sbc_CC(tmp
, tmp2
, tmp
);
8681 gen_sub_carry(tmp
, tmp2
, tmp
);
8683 store_reg_bx(s
, rd
, tmp
);
8687 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8690 tcg_temp_free_i32(tmp
);
8694 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8697 tcg_temp_free_i32(tmp
);
8701 gen_sub_CC(tmp
, tmp
, tmp2
);
8703 tcg_temp_free_i32(tmp
);
8707 gen_add_CC(tmp
, tmp
, tmp2
);
8709 tcg_temp_free_i32(tmp
);
8712 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8716 store_reg_bx(s
, rd
, tmp
);
8719 if (logic_cc
&& rd
== 15) {
8720 /* MOVS r15, ... is used for exception return. */
8724 gen_exception_return(s
, tmp2
);
8729 store_reg_bx(s
, rd
, tmp2
);
8733 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8737 store_reg_bx(s
, rd
, tmp
);
8741 tcg_gen_not_i32(tmp2
, tmp2
);
8745 store_reg_bx(s
, rd
, tmp2
);
8748 if (op1
!= 0x0f && op1
!= 0x0d) {
8749 tcg_temp_free_i32(tmp2
);
8752 /* other instructions */
8753 op1
= (insn
>> 24) & 0xf;
8757 /* multiplies, extra load/stores */
8758 sh
= (insn
>> 5) & 3;
8761 rd
= (insn
>> 16) & 0xf;
8762 rn
= (insn
>> 12) & 0xf;
8763 rs
= (insn
>> 8) & 0xf;
8765 op1
= (insn
>> 20) & 0xf;
8767 case 0: case 1: case 2: case 3: case 6:
8769 tmp
= load_reg(s
, rs
);
8770 tmp2
= load_reg(s
, rm
);
8771 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8772 tcg_temp_free_i32(tmp2
);
8773 if (insn
& (1 << 22)) {
8774 /* Subtract (mls) */
8776 tmp2
= load_reg(s
, rn
);
8777 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8778 tcg_temp_free_i32(tmp2
);
8779 } else if (insn
& (1 << 21)) {
8781 tmp2
= load_reg(s
, rn
);
8782 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8783 tcg_temp_free_i32(tmp2
);
8785 if (insn
& (1 << 20))
8787 store_reg(s
, rd
, tmp
);
8790 /* 64 bit mul double accumulate (UMAAL) */
8792 tmp
= load_reg(s
, rs
);
8793 tmp2
= load_reg(s
, rm
);
8794 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8795 gen_addq_lo(s
, tmp64
, rn
);
8796 gen_addq_lo(s
, tmp64
, rd
);
8797 gen_storeq_reg(s
, rn
, rd
, tmp64
);
8798 tcg_temp_free_i64(tmp64
);
8800 case 8: case 9: case 10: case 11:
8801 case 12: case 13: case 14: case 15:
8802 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8803 tmp
= load_reg(s
, rs
);
8804 tmp2
= load_reg(s
, rm
);
8805 if (insn
& (1 << 22)) {
8806 tcg_gen_muls2_i32(tmp
, tmp2
, tmp
, tmp2
);
8808 tcg_gen_mulu2_i32(tmp
, tmp2
, tmp
, tmp2
);
8810 if (insn
& (1 << 21)) { /* mult accumulate */
8811 TCGv_i32 al
= load_reg(s
, rn
);
8812 TCGv_i32 ah
= load_reg(s
, rd
);
8813 tcg_gen_add2_i32(tmp
, tmp2
, tmp
, tmp2
, al
, ah
);
8814 tcg_temp_free_i32(al
);
8815 tcg_temp_free_i32(ah
);
8817 if (insn
& (1 << 20)) {
8818 gen_logicq_cc(tmp
, tmp2
);
8820 store_reg(s
, rn
, tmp
);
8821 store_reg(s
, rd
, tmp2
);
8827 rn
= (insn
>> 16) & 0xf;
8828 rd
= (insn
>> 12) & 0xf;
8829 if (insn
& (1 << 23)) {
8830 /* load/store exclusive */
8831 int op2
= (insn
>> 8) & 3;
8832 op1
= (insn
>> 21) & 0x3;
8835 case 0: /* lda/stl */
8841 case 1: /* reserved */
8843 case 2: /* ldaex/stlex */
8846 case 3: /* ldrex/strex */
8855 addr
= tcg_temp_local_new_i32();
8856 load_reg_var(s
, addr
, rn
);
8858 /* Since the emulation does not have barriers,
8859 the acquire/release semantics need no special
8862 if (insn
& (1 << 20)) {
8863 tmp
= tcg_temp_new_i32();
8866 gen_aa32_ld32u_iss(s
, tmp
, addr
,
8871 gen_aa32_ld8u_iss(s
, tmp
, addr
,
8876 gen_aa32_ld16u_iss(s
, tmp
, addr
,
8883 store_reg(s
, rd
, tmp
);
8886 tmp
= load_reg(s
, rm
);
8889 gen_aa32_st32_iss(s
, tmp
, addr
,
8894 gen_aa32_st8_iss(s
, tmp
, addr
,
8899 gen_aa32_st16_iss(s
, tmp
, addr
,
8906 tcg_temp_free_i32(tmp
);
8908 } else if (insn
& (1 << 20)) {
8911 gen_load_exclusive(s
, rd
, 15, addr
, 2);
8913 case 1: /* ldrexd */
8914 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
8916 case 2: /* ldrexb */
8917 gen_load_exclusive(s
, rd
, 15, addr
, 0);
8919 case 3: /* ldrexh */
8920 gen_load_exclusive(s
, rd
, 15, addr
, 1);
8929 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
8931 case 1: /* strexd */
8932 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
8934 case 2: /* strexb */
8935 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
8937 case 3: /* strexh */
8938 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
8944 tcg_temp_free_i32(addr
);
8947 TCGMemOp opc
= s
->be_data
;
8949 /* SWP instruction */
8952 if (insn
& (1 << 22)) {
8955 opc
|= MO_UL
| MO_ALIGN
;
8958 addr
= load_reg(s
, rn
);
8959 taddr
= gen_aa32_addr(s
, addr
, opc
);
8960 tcg_temp_free_i32(addr
);
8962 tmp
= load_reg(s
, rm
);
8963 tcg_gen_atomic_xchg_i32(tmp
, taddr
, tmp
,
8964 get_mem_index(s
), opc
);
8965 tcg_temp_free(taddr
);
8966 store_reg(s
, rd
, tmp
);
8971 bool load
= insn
& (1 << 20);
8972 bool wbit
= insn
& (1 << 21);
8973 bool pbit
= insn
& (1 << 24);
8974 bool doubleword
= false;
8977 /* Misc load/store */
8978 rn
= (insn
>> 16) & 0xf;
8979 rd
= (insn
>> 12) & 0xf;
8981 /* ISS not valid if writeback */
8982 issinfo
= (pbit
& !wbit
) ? rd
: ISSInvalid
;
8984 if (!load
&& (sh
& 2)) {
8988 /* UNPREDICTABLE; we choose to UNDEF */
8991 load
= (sh
& 1) == 0;
8995 addr
= load_reg(s
, rn
);
8997 gen_add_datah_offset(s
, insn
, 0, addr
);
9004 tmp
= load_reg(s
, rd
);
9005 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9006 tcg_temp_free_i32(tmp
);
9007 tcg_gen_addi_i32(addr
, addr
, 4);
9008 tmp
= load_reg(s
, rd
+ 1);
9009 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9010 tcg_temp_free_i32(tmp
);
9013 tmp
= tcg_temp_new_i32();
9014 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9015 store_reg(s
, rd
, tmp
);
9016 tcg_gen_addi_i32(addr
, addr
, 4);
9017 tmp
= tcg_temp_new_i32();
9018 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9021 address_offset
= -4;
9024 tmp
= tcg_temp_new_i32();
9027 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
),
9031 gen_aa32_ld8s_iss(s
, tmp
, addr
, get_mem_index(s
),
9036 gen_aa32_ld16s_iss(s
, tmp
, addr
, get_mem_index(s
),
9042 tmp
= load_reg(s
, rd
);
9043 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
), issinfo
);
9044 tcg_temp_free_i32(tmp
);
9046 /* Perform base writeback before the loaded value to
9047 ensure correct behavior with overlapping index registers.
9048 ldrd with base writeback is undefined if the
9049 destination and index registers overlap. */
9051 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
9052 store_reg(s
, rn
, addr
);
9055 tcg_gen_addi_i32(addr
, addr
, address_offset
);
9056 store_reg(s
, rn
, addr
);
9058 tcg_temp_free_i32(addr
);
9061 /* Complete the load. */
9062 store_reg(s
, rd
, tmp
);
9071 if (insn
& (1 << 4)) {
9073 /* Armv6 Media instructions. */
9075 rn
= (insn
>> 16) & 0xf;
9076 rd
= (insn
>> 12) & 0xf;
9077 rs
= (insn
>> 8) & 0xf;
9078 switch ((insn
>> 23) & 3) {
9079 case 0: /* Parallel add/subtract. */
9080 op1
= (insn
>> 20) & 7;
9081 tmp
= load_reg(s
, rn
);
9082 tmp2
= load_reg(s
, rm
);
9083 sh
= (insn
>> 5) & 7;
9084 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
9086 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
9087 tcg_temp_free_i32(tmp2
);
9088 store_reg(s
, rd
, tmp
);
9091 if ((insn
& 0x00700020) == 0) {
9092 /* Halfword pack. */
9093 tmp
= load_reg(s
, rn
);
9094 tmp2
= load_reg(s
, rm
);
9095 shift
= (insn
>> 7) & 0x1f;
9096 if (insn
& (1 << 6)) {
9100 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
9101 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
9102 tcg_gen_ext16u_i32(tmp2
, tmp2
);
9106 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
9107 tcg_gen_ext16u_i32(tmp
, tmp
);
9108 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
9110 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9111 tcg_temp_free_i32(tmp2
);
9112 store_reg(s
, rd
, tmp
);
9113 } else if ((insn
& 0x00200020) == 0x00200000) {
9115 tmp
= load_reg(s
, rm
);
9116 shift
= (insn
>> 7) & 0x1f;
9117 if (insn
& (1 << 6)) {
9120 tcg_gen_sari_i32(tmp
, tmp
, shift
);
9122 tcg_gen_shli_i32(tmp
, tmp
, shift
);
9124 sh
= (insn
>> 16) & 0x1f;
9125 tmp2
= tcg_const_i32(sh
);
9126 if (insn
& (1 << 22))
9127 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
9129 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
9130 tcg_temp_free_i32(tmp2
);
9131 store_reg(s
, rd
, tmp
);
9132 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
9134 tmp
= load_reg(s
, rm
);
9135 sh
= (insn
>> 16) & 0x1f;
9136 tmp2
= tcg_const_i32(sh
);
9137 if (insn
& (1 << 22))
9138 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
9140 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
9141 tcg_temp_free_i32(tmp2
);
9142 store_reg(s
, rd
, tmp
);
9143 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
9145 tmp
= load_reg(s
, rn
);
9146 tmp2
= load_reg(s
, rm
);
9147 tmp3
= tcg_temp_new_i32();
9148 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
9149 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
9150 tcg_temp_free_i32(tmp3
);
9151 tcg_temp_free_i32(tmp2
);
9152 store_reg(s
, rd
, tmp
);
9153 } else if ((insn
& 0x000003e0) == 0x00000060) {
9154 tmp
= load_reg(s
, rm
);
9155 shift
= (insn
>> 10) & 3;
9156 /* ??? In many cases it's not necessary to do a
9157 rotate, a shift is sufficient. */
9159 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
9160 op1
= (insn
>> 20) & 7;
9162 case 0: gen_sxtb16(tmp
); break;
9163 case 2: gen_sxtb(tmp
); break;
9164 case 3: gen_sxth(tmp
); break;
9165 case 4: gen_uxtb16(tmp
); break;
9166 case 6: gen_uxtb(tmp
); break;
9167 case 7: gen_uxth(tmp
); break;
9168 default: goto illegal_op
;
9171 tmp2
= load_reg(s
, rn
);
9172 if ((op1
& 3) == 0) {
9173 gen_add16(tmp
, tmp2
);
9175 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9176 tcg_temp_free_i32(tmp2
);
9179 store_reg(s
, rd
, tmp
);
9180 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
9182 tmp
= load_reg(s
, rm
);
9183 if (insn
& (1 << 22)) {
9184 if (insn
& (1 << 7)) {
9188 gen_helper_rbit(tmp
, tmp
);
9191 if (insn
& (1 << 7))
9194 tcg_gen_bswap32_i32(tmp
, tmp
);
9196 store_reg(s
, rd
, tmp
);
9201 case 2: /* Multiplies (Type 3). */
9202 switch ((insn
>> 20) & 0x7) {
9204 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
9205 /* op2 not 00x or 11x : UNDEF */
9208 /* Signed multiply most significant [accumulate].
9209 (SMMUL, SMMLA, SMMLS) */
9210 tmp
= load_reg(s
, rm
);
9211 tmp2
= load_reg(s
, rs
);
9212 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9215 tmp
= load_reg(s
, rd
);
9216 if (insn
& (1 << 6)) {
9217 tmp64
= gen_subq_msw(tmp64
, tmp
);
9219 tmp64
= gen_addq_msw(tmp64
, tmp
);
9222 if (insn
& (1 << 5)) {
9223 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
9225 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
9226 tmp
= tcg_temp_new_i32();
9227 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
9228 tcg_temp_free_i64(tmp64
);
9229 store_reg(s
, rn
, tmp
);
9233 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9234 if (insn
& (1 << 7)) {
9237 tmp
= load_reg(s
, rm
);
9238 tmp2
= load_reg(s
, rs
);
9239 if (insn
& (1 << 5))
9240 gen_swap_half(tmp2
);
9241 gen_smul_dual(tmp
, tmp2
);
9242 if (insn
& (1 << 22)) {
9243 /* smlald, smlsld */
9246 tmp64
= tcg_temp_new_i64();
9247 tmp64_2
= tcg_temp_new_i64();
9248 tcg_gen_ext_i32_i64(tmp64
, tmp
);
9249 tcg_gen_ext_i32_i64(tmp64_2
, tmp2
);
9250 tcg_temp_free_i32(tmp
);
9251 tcg_temp_free_i32(tmp2
);
9252 if (insn
& (1 << 6)) {
9253 tcg_gen_sub_i64(tmp64
, tmp64
, tmp64_2
);
9255 tcg_gen_add_i64(tmp64
, tmp64
, tmp64_2
);
9257 tcg_temp_free_i64(tmp64_2
);
9258 gen_addq(s
, tmp64
, rd
, rn
);
9259 gen_storeq_reg(s
, rd
, rn
, tmp64
);
9260 tcg_temp_free_i64(tmp64
);
9262 /* smuad, smusd, smlad, smlsd */
9263 if (insn
& (1 << 6)) {
9264 /* This subtraction cannot overflow. */
9265 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9267 /* This addition cannot overflow 32 bits;
9268 * however it may overflow considered as a
9269 * signed operation, in which case we must set
9272 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9274 tcg_temp_free_i32(tmp2
);
9277 tmp2
= load_reg(s
, rd
);
9278 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9279 tcg_temp_free_i32(tmp2
);
9281 store_reg(s
, rn
, tmp
);
9287 if (!arm_dc_feature(s
, ARM_FEATURE_ARM_DIV
)) {
9290 if (((insn
>> 5) & 7) || (rd
!= 15)) {
9293 tmp
= load_reg(s
, rm
);
9294 tmp2
= load_reg(s
, rs
);
9295 if (insn
& (1 << 21)) {
9296 gen_helper_udiv(tmp
, tmp
, tmp2
);
9298 gen_helper_sdiv(tmp
, tmp
, tmp2
);
9300 tcg_temp_free_i32(tmp2
);
9301 store_reg(s
, rn
, tmp
);
9308 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
9310 case 0: /* Unsigned sum of absolute differences. */
9312 tmp
= load_reg(s
, rm
);
9313 tmp2
= load_reg(s
, rs
);
9314 gen_helper_usad8(tmp
, tmp
, tmp2
);
9315 tcg_temp_free_i32(tmp2
);
9317 tmp2
= load_reg(s
, rd
);
9318 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9319 tcg_temp_free_i32(tmp2
);
9321 store_reg(s
, rn
, tmp
);
9323 case 0x20: case 0x24: case 0x28: case 0x2c:
9324 /* Bitfield insert/clear. */
9326 shift
= (insn
>> 7) & 0x1f;
9327 i
= (insn
>> 16) & 0x1f;
9329 /* UNPREDICTABLE; we choose to UNDEF */
9334 tmp
= tcg_temp_new_i32();
9335 tcg_gen_movi_i32(tmp
, 0);
9337 tmp
= load_reg(s
, rm
);
9340 tmp2
= load_reg(s
, rd
);
9341 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, i
);
9342 tcg_temp_free_i32(tmp2
);
9344 store_reg(s
, rd
, tmp
);
9346 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9347 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9349 tmp
= load_reg(s
, rm
);
9350 shift
= (insn
>> 7) & 0x1f;
9351 i
= ((insn
>> 16) & 0x1f) + 1;
9356 tcg_gen_extract_i32(tmp
, tmp
, shift
, i
);
9358 tcg_gen_sextract_i32(tmp
, tmp
, shift
, i
);
9361 store_reg(s
, rd
, tmp
);
9371 /* Check for undefined extension instructions
9372 * per the ARM Bible IE:
9373 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9375 sh
= (0xf << 20) | (0xf << 4);
9376 if (op1
== 0x7 && ((insn
& sh
) == sh
))
9380 /* load/store byte/word */
9381 rn
= (insn
>> 16) & 0xf;
9382 rd
= (insn
>> 12) & 0xf;
9383 tmp2
= load_reg(s
, rn
);
9384 if ((insn
& 0x01200000) == 0x00200000) {
9386 i
= get_a32_user_mem_index(s
);
9388 i
= get_mem_index(s
);
9390 if (insn
& (1 << 24))
9391 gen_add_data_offset(s
, insn
, tmp2
);
9392 if (insn
& (1 << 20)) {
9394 tmp
= tcg_temp_new_i32();
9395 if (insn
& (1 << 22)) {
9396 gen_aa32_ld8u_iss(s
, tmp
, tmp2
, i
, rd
);
9398 gen_aa32_ld32u_iss(s
, tmp
, tmp2
, i
, rd
);
9402 tmp
= load_reg(s
, rd
);
9403 if (insn
& (1 << 22)) {
9404 gen_aa32_st8_iss(s
, tmp
, tmp2
, i
, rd
);
9406 gen_aa32_st32_iss(s
, tmp
, tmp2
, i
, rd
);
9408 tcg_temp_free_i32(tmp
);
9410 if (!(insn
& (1 << 24))) {
9411 gen_add_data_offset(s
, insn
, tmp2
);
9412 store_reg(s
, rn
, tmp2
);
9413 } else if (insn
& (1 << 21)) {
9414 store_reg(s
, rn
, tmp2
);
9416 tcg_temp_free_i32(tmp2
);
9418 if (insn
& (1 << 20)) {
9419 /* Complete the load. */
9420 store_reg_from_load(s
, rd
, tmp
);
9426 int j
, n
, loaded_base
;
9427 bool exc_return
= false;
9428 bool is_load
= extract32(insn
, 20, 1);
9430 TCGv_i32 loaded_var
;
9431 /* load/store multiple words */
9432 /* XXX: store correct base if write back */
9433 if (insn
& (1 << 22)) {
9434 /* LDM (user), LDM (exception return) and STM (user) */
9436 goto illegal_op
; /* only usable in supervisor mode */
9438 if (is_load
&& extract32(insn
, 15, 1)) {
9444 rn
= (insn
>> 16) & 0xf;
9445 addr
= load_reg(s
, rn
);
9447 /* compute total size */
9449 TCGV_UNUSED_I32(loaded_var
);
9452 if (insn
& (1 << i
))
9455 /* XXX: test invalid n == 0 case ? */
9456 if (insn
& (1 << 23)) {
9457 if (insn
& (1 << 24)) {
9459 tcg_gen_addi_i32(addr
, addr
, 4);
9461 /* post increment */
9464 if (insn
& (1 << 24)) {
9466 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
9468 /* post decrement */
9470 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
9475 if (insn
& (1 << i
)) {
9478 tmp
= tcg_temp_new_i32();
9479 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9481 tmp2
= tcg_const_i32(i
);
9482 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
9483 tcg_temp_free_i32(tmp2
);
9484 tcg_temp_free_i32(tmp
);
9485 } else if (i
== rn
) {
9488 } else if (rn
== 15 && exc_return
) {
9489 store_pc_exc_ret(s
, tmp
);
9491 store_reg_from_load(s
, i
, tmp
);
9496 /* special case: r15 = PC + 8 */
9497 val
= (long)s
->pc
+ 4;
9498 tmp
= tcg_temp_new_i32();
9499 tcg_gen_movi_i32(tmp
, val
);
9501 tmp
= tcg_temp_new_i32();
9502 tmp2
= tcg_const_i32(i
);
9503 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
9504 tcg_temp_free_i32(tmp2
);
9506 tmp
= load_reg(s
, i
);
9508 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9509 tcg_temp_free_i32(tmp
);
9512 /* no need to add after the last transfer */
9514 tcg_gen_addi_i32(addr
, addr
, 4);
9517 if (insn
& (1 << 21)) {
9519 if (insn
& (1 << 23)) {
9520 if (insn
& (1 << 24)) {
9523 /* post increment */
9524 tcg_gen_addi_i32(addr
, addr
, 4);
9527 if (insn
& (1 << 24)) {
9530 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
9532 /* post decrement */
9533 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
9536 store_reg(s
, rn
, addr
);
9538 tcg_temp_free_i32(addr
);
9541 store_reg(s
, rn
, loaded_var
);
9544 /* Restore CPSR from SPSR. */
9545 tmp
= load_cpu_field(spsr
);
9546 gen_helper_cpsr_write_eret(cpu_env
, tmp
);
9547 tcg_temp_free_i32(tmp
);
9548 /* Must exit loop to check un-masked IRQs */
9549 s
->base
.is_jmp
= DISAS_EXIT
;
9558 /* branch (and link) */
9559 val
= (int32_t)s
->pc
;
9560 if (insn
& (1 << 24)) {
9561 tmp
= tcg_temp_new_i32();
9562 tcg_gen_movi_i32(tmp
, val
);
9563 store_reg(s
, 14, tmp
);
9565 offset
= sextract32(insn
<< 2, 0, 26);
9573 if (((insn
>> 8) & 0xe) == 10) {
9575 if (disas_vfp_insn(s
, insn
)) {
9578 } else if (disas_coproc_insn(s
, insn
)) {
9585 gen_set_pc_im(s
, s
->pc
);
9586 s
->svc_imm
= extract32(insn
, 0, 24);
9587 s
->base
.is_jmp
= DISAS_SWI
;
9591 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
9592 default_exception_el(s
));
9598 /* Return true if this is a Thumb-2 logical op. */
9600 thumb2_logic_op(int op
)
9605 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9606 then set condition code flags based on the result of the operation.
9607 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9608 to the high bit of T1.
9609 Returns zero if the opcode is valid. */
9612 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
,
9613 TCGv_i32 t0
, TCGv_i32 t1
)
9620 tcg_gen_and_i32(t0
, t0
, t1
);
9624 tcg_gen_andc_i32(t0
, t0
, t1
);
9628 tcg_gen_or_i32(t0
, t0
, t1
);
9632 tcg_gen_orc_i32(t0
, t0
, t1
);
9636 tcg_gen_xor_i32(t0
, t0
, t1
);
9641 gen_add_CC(t0
, t0
, t1
);
9643 tcg_gen_add_i32(t0
, t0
, t1
);
9647 gen_adc_CC(t0
, t0
, t1
);
9653 gen_sbc_CC(t0
, t0
, t1
);
9655 gen_sub_carry(t0
, t0
, t1
);
9660 gen_sub_CC(t0
, t0
, t1
);
9662 tcg_gen_sub_i32(t0
, t0
, t1
);
9666 gen_sub_CC(t0
, t1
, t0
);
9668 tcg_gen_sub_i32(t0
, t1
, t0
);
9670 default: /* 5, 6, 7, 9, 12, 15. */
9676 gen_set_CF_bit31(t1
);
9681 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9683 static int disas_thumb2_insn(CPUARMState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
9685 uint32_t insn
, imm
, shift
, offset
;
9686 uint32_t rd
, rn
, rm
, rs
;
9697 if (!(arm_dc_feature(s
, ARM_FEATURE_THUMB2
)
9698 || arm_dc_feature(s
, ARM_FEATURE_M
))) {
9699 /* Thumb-1 cores may need to treat bl and blx as a pair of
9700 16-bit instructions to get correct prefetch abort behavior. */
9702 if ((insn
& (1 << 12)) == 0) {
9704 /* Second half of blx. */
9705 offset
= ((insn
& 0x7ff) << 1);
9706 tmp
= load_reg(s
, 14);
9707 tcg_gen_addi_i32(tmp
, tmp
, offset
);
9708 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
9710 tmp2
= tcg_temp_new_i32();
9711 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
9712 store_reg(s
, 14, tmp2
);
9716 if (insn
& (1 << 11)) {
9717 /* Second half of bl. */
9718 offset
= ((insn
& 0x7ff) << 1) | 1;
9719 tmp
= load_reg(s
, 14);
9720 tcg_gen_addi_i32(tmp
, tmp
, offset
);
9722 tmp2
= tcg_temp_new_i32();
9723 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
9724 store_reg(s
, 14, tmp2
);
9728 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
9729 /* Instruction spans a page boundary. Implement it as two
9730 16-bit instructions in case the second half causes an
9732 offset
= ((int32_t)insn
<< 21) >> 9;
9733 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
9736 /* Fall through to 32-bit decode. */
9739 insn
= arm_lduw_code(env
, s
->pc
, s
->sctlr_b
);
9741 insn
|= (uint32_t)insn_hw1
<< 16;
9743 if ((insn
& 0xf800e800) != 0xf000e800) {
9747 rn
= (insn
>> 16) & 0xf;
9748 rs
= (insn
>> 12) & 0xf;
9749 rd
= (insn
>> 8) & 0xf;
9751 switch ((insn
>> 25) & 0xf) {
9752 case 0: case 1: case 2: case 3:
9753 /* 16-bit instructions. Should never happen. */
9756 if (insn
& (1 << 22)) {
9757 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9758 * - load/store doubleword, load/store exclusive, ldacq/strel,
9761 if (insn
& 0x01200000) {
9762 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9763 * - load/store dual (post-indexed)
9764 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9765 * - load/store dual (literal and immediate)
9766 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9767 * - load/store dual (pre-indexed)
9770 if (insn
& (1 << 21)) {
9774 addr
= tcg_temp_new_i32();
9775 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
9777 addr
= load_reg(s
, rn
);
9779 offset
= (insn
& 0xff) * 4;
9780 if ((insn
& (1 << 23)) == 0)
9782 if (insn
& (1 << 24)) {
9783 tcg_gen_addi_i32(addr
, addr
, offset
);
9786 if (insn
& (1 << 20)) {
9788 tmp
= tcg_temp_new_i32();
9789 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9790 store_reg(s
, rs
, tmp
);
9791 tcg_gen_addi_i32(addr
, addr
, 4);
9792 tmp
= tcg_temp_new_i32();
9793 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9794 store_reg(s
, rd
, tmp
);
9797 tmp
= load_reg(s
, rs
);
9798 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9799 tcg_temp_free_i32(tmp
);
9800 tcg_gen_addi_i32(addr
, addr
, 4);
9801 tmp
= load_reg(s
, rd
);
9802 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9803 tcg_temp_free_i32(tmp
);
9805 if (insn
& (1 << 21)) {
9806 /* Base writeback. */
9807 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
9808 store_reg(s
, rn
, addr
);
9810 tcg_temp_free_i32(addr
);
9812 } else if ((insn
& (1 << 23)) == 0) {
9813 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9814 * - load/store exclusive word
9819 addr
= tcg_temp_local_new_i32();
9820 load_reg_var(s
, addr
, rn
);
9821 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
9822 if (insn
& (1 << 20)) {
9823 gen_load_exclusive(s
, rs
, 15, addr
, 2);
9825 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
9827 tcg_temp_free_i32(addr
);
9828 } else if ((insn
& (7 << 5)) == 0) {
9831 addr
= tcg_temp_new_i32();
9832 tcg_gen_movi_i32(addr
, s
->pc
);
9834 addr
= load_reg(s
, rn
);
9836 tmp
= load_reg(s
, rm
);
9837 tcg_gen_add_i32(addr
, addr
, tmp
);
9838 if (insn
& (1 << 4)) {
9840 tcg_gen_add_i32(addr
, addr
, tmp
);
9841 tcg_temp_free_i32(tmp
);
9842 tmp
= tcg_temp_new_i32();
9843 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
9845 tcg_temp_free_i32(tmp
);
9846 tmp
= tcg_temp_new_i32();
9847 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
9849 tcg_temp_free_i32(addr
);
9850 tcg_gen_shli_i32(tmp
, tmp
, 1);
9851 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
9852 store_reg(s
, 15, tmp
);
9854 int op2
= (insn
>> 6) & 0x3;
9855 op
= (insn
>> 4) & 0x3;
9860 /* Load/store exclusive byte/halfword/doubleword */
9867 /* Load-acquire/store-release */
9873 /* Load-acquire/store-release exclusive */
9877 addr
= tcg_temp_local_new_i32();
9878 load_reg_var(s
, addr
, rn
);
9880 if (insn
& (1 << 20)) {
9881 tmp
= tcg_temp_new_i32();
9884 gen_aa32_ld8u_iss(s
, tmp
, addr
, get_mem_index(s
),
9888 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
),
9892 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
),
9898 store_reg(s
, rs
, tmp
);
9900 tmp
= load_reg(s
, rs
);
9903 gen_aa32_st8_iss(s
, tmp
, addr
, get_mem_index(s
),
9907 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
),
9911 gen_aa32_st32_iss(s
, tmp
, addr
, get_mem_index(s
),
9917 tcg_temp_free_i32(tmp
);
9919 } else if (insn
& (1 << 20)) {
9920 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
9922 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
9924 tcg_temp_free_i32(addr
);
9927 /* Load/store multiple, RFE, SRS. */
9928 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
9929 /* RFE, SRS: not available in user mode or on M profile */
9930 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
9933 if (insn
& (1 << 20)) {
9935 addr
= load_reg(s
, rn
);
9936 if ((insn
& (1 << 24)) == 0)
9937 tcg_gen_addi_i32(addr
, addr
, -8);
9938 /* Load PC into tmp and CPSR into tmp2. */
9939 tmp
= tcg_temp_new_i32();
9940 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9941 tcg_gen_addi_i32(addr
, addr
, 4);
9942 tmp2
= tcg_temp_new_i32();
9943 gen_aa32_ld32u(s
, tmp2
, addr
, get_mem_index(s
));
9944 if (insn
& (1 << 21)) {
9945 /* Base writeback. */
9946 if (insn
& (1 << 24)) {
9947 tcg_gen_addi_i32(addr
, addr
, 4);
9949 tcg_gen_addi_i32(addr
, addr
, -4);
9951 store_reg(s
, rn
, addr
);
9953 tcg_temp_free_i32(addr
);
9955 gen_rfe(s
, tmp
, tmp2
);
9958 gen_srs(s
, (insn
& 0x1f), (insn
& (1 << 24)) ? 1 : 2,
9962 int i
, loaded_base
= 0;
9963 TCGv_i32 loaded_var
;
9964 /* Load/store multiple. */
9965 addr
= load_reg(s
, rn
);
9967 for (i
= 0; i
< 16; i
++) {
9968 if (insn
& (1 << i
))
9971 if (insn
& (1 << 24)) {
9972 tcg_gen_addi_i32(addr
, addr
, -offset
);
9975 TCGV_UNUSED_I32(loaded_var
);
9976 for (i
= 0; i
< 16; i
++) {
9977 if ((insn
& (1 << i
)) == 0)
9979 if (insn
& (1 << 20)) {
9981 tmp
= tcg_temp_new_i32();
9982 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9984 gen_bx_excret(s
, tmp
);
9985 } else if (i
== rn
) {
9989 store_reg(s
, i
, tmp
);
9993 tmp
= load_reg(s
, i
);
9994 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9995 tcg_temp_free_i32(tmp
);
9997 tcg_gen_addi_i32(addr
, addr
, 4);
10000 store_reg(s
, rn
, loaded_var
);
10002 if (insn
& (1 << 21)) {
10003 /* Base register writeback. */
10004 if (insn
& (1 << 24)) {
10005 tcg_gen_addi_i32(addr
, addr
, -offset
);
10007 /* Fault if writeback register is in register list. */
10008 if (insn
& (1 << rn
))
10010 store_reg(s
, rn
, addr
);
10012 tcg_temp_free_i32(addr
);
10019 op
= (insn
>> 21) & 0xf;
10021 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10024 /* Halfword pack. */
10025 tmp
= load_reg(s
, rn
);
10026 tmp2
= load_reg(s
, rm
);
10027 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
10028 if (insn
& (1 << 5)) {
10032 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
10033 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
10034 tcg_gen_ext16u_i32(tmp2
, tmp2
);
10038 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
10039 tcg_gen_ext16u_i32(tmp
, tmp
);
10040 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
10042 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
10043 tcg_temp_free_i32(tmp2
);
10044 store_reg(s
, rd
, tmp
);
10046 /* Data processing register constant shift. */
10048 tmp
= tcg_temp_new_i32();
10049 tcg_gen_movi_i32(tmp
, 0);
10051 tmp
= load_reg(s
, rn
);
10053 tmp2
= load_reg(s
, rm
);
10055 shiftop
= (insn
>> 4) & 3;
10056 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
10057 conds
= (insn
& (1 << 20)) != 0;
10058 logic_cc
= (conds
&& thumb2_logic_op(op
));
10059 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
10060 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
10062 tcg_temp_free_i32(tmp2
);
10064 store_reg(s
, rd
, tmp
);
10066 tcg_temp_free_i32(tmp
);
10070 case 13: /* Misc data processing. */
10071 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
10072 if (op
< 4 && (insn
& 0xf000) != 0xf000)
10075 case 0: /* Register controlled shift. */
10076 tmp
= load_reg(s
, rn
);
10077 tmp2
= load_reg(s
, rm
);
10078 if ((insn
& 0x70) != 0)
10080 op
= (insn
>> 21) & 3;
10081 logic_cc
= (insn
& (1 << 20)) != 0;
10082 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
10085 store_reg(s
, rd
, tmp
);
10087 case 1: /* Sign/zero extend. */
10088 op
= (insn
>> 20) & 7;
10090 case 0: /* SXTAH, SXTH */
10091 case 1: /* UXTAH, UXTH */
10092 case 4: /* SXTAB, SXTB */
10093 case 5: /* UXTAB, UXTB */
10095 case 2: /* SXTAB16, SXTB16 */
10096 case 3: /* UXTAB16, UXTB16 */
10097 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10105 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10109 tmp
= load_reg(s
, rm
);
10110 shift
= (insn
>> 4) & 3;
10111 /* ??? In many cases it's not necessary to do a
10112 rotate, a shift is sufficient. */
10114 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
10115 op
= (insn
>> 20) & 7;
10117 case 0: gen_sxth(tmp
); break;
10118 case 1: gen_uxth(tmp
); break;
10119 case 2: gen_sxtb16(tmp
); break;
10120 case 3: gen_uxtb16(tmp
); break;
10121 case 4: gen_sxtb(tmp
); break;
10122 case 5: gen_uxtb(tmp
); break;
10124 g_assert_not_reached();
10127 tmp2
= load_reg(s
, rn
);
10128 if ((op
>> 1) == 1) {
10129 gen_add16(tmp
, tmp2
);
10131 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10132 tcg_temp_free_i32(tmp2
);
10135 store_reg(s
, rd
, tmp
);
10137 case 2: /* SIMD add/subtract. */
10138 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10141 op
= (insn
>> 20) & 7;
10142 shift
= (insn
>> 4) & 7;
10143 if ((op
& 3) == 3 || (shift
& 3) == 3)
10145 tmp
= load_reg(s
, rn
);
10146 tmp2
= load_reg(s
, rm
);
10147 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
10148 tcg_temp_free_i32(tmp2
);
10149 store_reg(s
, rd
, tmp
);
10151 case 3: /* Other data processing. */
10152 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
10154 /* Saturating add/subtract. */
10155 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10158 tmp
= load_reg(s
, rn
);
10159 tmp2
= load_reg(s
, rm
);
10161 gen_helper_double_saturate(tmp
, cpu_env
, tmp
);
10163 gen_helper_sub_saturate(tmp
, cpu_env
, tmp2
, tmp
);
10165 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
10166 tcg_temp_free_i32(tmp2
);
10169 case 0x0a: /* rbit */
10170 case 0x08: /* rev */
10171 case 0x09: /* rev16 */
10172 case 0x0b: /* revsh */
10173 case 0x18: /* clz */
10175 case 0x10: /* sel */
10176 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10180 case 0x20: /* crc32/crc32c */
10186 if (!arm_dc_feature(s
, ARM_FEATURE_CRC
)) {
10193 tmp
= load_reg(s
, rn
);
10195 case 0x0a: /* rbit */
10196 gen_helper_rbit(tmp
, tmp
);
10198 case 0x08: /* rev */
10199 tcg_gen_bswap32_i32(tmp
, tmp
);
10201 case 0x09: /* rev16 */
10204 case 0x0b: /* revsh */
10207 case 0x10: /* sel */
10208 tmp2
= load_reg(s
, rm
);
10209 tmp3
= tcg_temp_new_i32();
10210 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
10211 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
10212 tcg_temp_free_i32(tmp3
);
10213 tcg_temp_free_i32(tmp2
);
10215 case 0x18: /* clz */
10216 tcg_gen_clzi_i32(tmp
, tmp
, 32);
10226 uint32_t sz
= op
& 0x3;
10227 uint32_t c
= op
& 0x8;
10229 tmp2
= load_reg(s
, rm
);
10231 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
10232 } else if (sz
== 1) {
10233 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
10235 tmp3
= tcg_const_i32(1 << sz
);
10237 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
10239 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
10241 tcg_temp_free_i32(tmp2
);
10242 tcg_temp_free_i32(tmp3
);
10246 g_assert_not_reached();
10249 store_reg(s
, rd
, tmp
);
10251 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10252 switch ((insn
>> 20) & 7) {
10253 case 0: /* 32 x 32 -> 32 */
10254 case 7: /* Unsigned sum of absolute differences. */
10256 case 1: /* 16 x 16 -> 32 */
10257 case 2: /* Dual multiply add. */
10258 case 3: /* 32 * 16 -> 32msb */
10259 case 4: /* Dual multiply subtract. */
10260 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10261 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10266 op
= (insn
>> 4) & 0xf;
10267 tmp
= load_reg(s
, rn
);
10268 tmp2
= load_reg(s
, rm
);
10269 switch ((insn
>> 20) & 7) {
10270 case 0: /* 32 x 32 -> 32 */
10271 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
10272 tcg_temp_free_i32(tmp2
);
10274 tmp2
= load_reg(s
, rs
);
10276 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
10278 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10279 tcg_temp_free_i32(tmp2
);
10282 case 1: /* 16 x 16 -> 32 */
10283 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
10284 tcg_temp_free_i32(tmp2
);
10286 tmp2
= load_reg(s
, rs
);
10287 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10288 tcg_temp_free_i32(tmp2
);
10291 case 2: /* Dual multiply add. */
10292 case 4: /* Dual multiply subtract. */
10294 gen_swap_half(tmp2
);
10295 gen_smul_dual(tmp
, tmp2
);
10296 if (insn
& (1 << 22)) {
10297 /* This subtraction cannot overflow. */
10298 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10300 /* This addition cannot overflow 32 bits;
10301 * however it may overflow considered as a signed
10302 * operation, in which case we must set the Q flag.
10304 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10306 tcg_temp_free_i32(tmp2
);
10309 tmp2
= load_reg(s
, rs
);
10310 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10311 tcg_temp_free_i32(tmp2
);
10314 case 3: /* 32 * 16 -> 32msb */
10316 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
10319 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
10320 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
10321 tmp
= tcg_temp_new_i32();
10322 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
10323 tcg_temp_free_i64(tmp64
);
10326 tmp2
= load_reg(s
, rs
);
10327 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10328 tcg_temp_free_i32(tmp2
);
10331 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10332 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
10334 tmp
= load_reg(s
, rs
);
10335 if (insn
& (1 << 20)) {
10336 tmp64
= gen_addq_msw(tmp64
, tmp
);
10338 tmp64
= gen_subq_msw(tmp64
, tmp
);
10341 if (insn
& (1 << 4)) {
10342 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
10344 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
10345 tmp
= tcg_temp_new_i32();
10346 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
10347 tcg_temp_free_i64(tmp64
);
10349 case 7: /* Unsigned sum of absolute differences. */
10350 gen_helper_usad8(tmp
, tmp
, tmp2
);
10351 tcg_temp_free_i32(tmp2
);
10353 tmp2
= load_reg(s
, rs
);
10354 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10355 tcg_temp_free_i32(tmp2
);
10359 store_reg(s
, rd
, tmp
);
10361 case 6: case 7: /* 64-bit multiply, Divide. */
10362 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
10363 tmp
= load_reg(s
, rn
);
10364 tmp2
= load_reg(s
, rm
);
10365 if ((op
& 0x50) == 0x10) {
10367 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DIV
)) {
10371 gen_helper_udiv(tmp
, tmp
, tmp2
);
10373 gen_helper_sdiv(tmp
, tmp
, tmp2
);
10374 tcg_temp_free_i32(tmp2
);
10375 store_reg(s
, rd
, tmp
);
10376 } else if ((op
& 0xe) == 0xc) {
10377 /* Dual multiply accumulate long. */
10378 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10379 tcg_temp_free_i32(tmp
);
10380 tcg_temp_free_i32(tmp2
);
10384 gen_swap_half(tmp2
);
10385 gen_smul_dual(tmp
, tmp2
);
10387 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10389 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10391 tcg_temp_free_i32(tmp2
);
10393 tmp64
= tcg_temp_new_i64();
10394 tcg_gen_ext_i32_i64(tmp64
, tmp
);
10395 tcg_temp_free_i32(tmp
);
10396 gen_addq(s
, tmp64
, rs
, rd
);
10397 gen_storeq_reg(s
, rs
, rd
, tmp64
);
10398 tcg_temp_free_i64(tmp64
);
10401 /* Unsigned 64-bit multiply */
10402 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
10406 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10407 tcg_temp_free_i32(tmp2
);
10408 tcg_temp_free_i32(tmp
);
10411 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
10412 tcg_temp_free_i32(tmp2
);
10413 tmp64
= tcg_temp_new_i64();
10414 tcg_gen_ext_i32_i64(tmp64
, tmp
);
10415 tcg_temp_free_i32(tmp
);
10417 /* Signed 64-bit multiply */
10418 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
10423 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10424 tcg_temp_free_i64(tmp64
);
10427 gen_addq_lo(s
, tmp64
, rs
);
10428 gen_addq_lo(s
, tmp64
, rd
);
10429 } else if (op
& 0x40) {
10430 /* 64-bit accumulate. */
10431 gen_addq(s
, tmp64
, rs
, rd
);
10433 gen_storeq_reg(s
, rs
, rd
, tmp64
);
10434 tcg_temp_free_i64(tmp64
);
10439 case 6: case 7: case 14: case 15:
10441 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10442 /* We don't currently implement M profile FP support,
10443 * so this entire space should give a NOCP fault.
10445 gen_exception_insn(s
, 4, EXCP_NOCP
, syn_uncategorized(),
10446 default_exception_el(s
));
10449 if (((insn
>> 24) & 3) == 3) {
10450 /* Translate into the equivalent ARM encoding. */
10451 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
10452 if (disas_neon_data_insn(s
, insn
)) {
10455 } else if (((insn
>> 8) & 0xe) == 10) {
10456 if (disas_vfp_insn(s
, insn
)) {
10460 if (insn
& (1 << 28))
10462 if (disas_coproc_insn(s
, insn
)) {
10467 case 8: case 9: case 10: case 11:
10468 if (insn
& (1 << 15)) {
10469 /* Branches, misc control. */
10470 if (insn
& 0x5000) {
10471 /* Unconditional branch. */
10472 /* signextend(hw1[10:0]) -> offset[:12]. */
10473 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
10474 /* hw1[10:0] -> offset[11:1]. */
10475 offset
|= (insn
& 0x7ff) << 1;
10476 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10477 offset[24:22] already have the same value because of the
10478 sign extension above. */
10479 offset
^= ((~insn
) & (1 << 13)) << 10;
10480 offset
^= ((~insn
) & (1 << 11)) << 11;
10482 if (insn
& (1 << 14)) {
10483 /* Branch and link. */
10484 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
10488 if (insn
& (1 << 12)) {
10490 gen_jmp(s
, offset
);
10493 offset
&= ~(uint32_t)2;
10494 /* thumb2 bx, no need to check */
10495 gen_bx_im(s
, offset
);
10497 } else if (((insn
>> 23) & 7) == 7) {
10499 if (insn
& (1 << 13))
10502 if (insn
& (1 << 26)) {
10503 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10506 if (!(insn
& (1 << 20))) {
10507 /* Hypervisor call (v7) */
10508 int imm16
= extract32(insn
, 16, 4) << 12
10509 | extract32(insn
, 0, 12);
10516 /* Secure monitor call (v6+) */
10524 op
= (insn
>> 20) & 7;
10526 case 0: /* msr cpsr. */
10527 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10528 tmp
= load_reg(s
, rn
);
10529 /* the constant is the mask and SYSm fields */
10530 addr
= tcg_const_i32(insn
& 0xfff);
10531 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
10532 tcg_temp_free_i32(addr
);
10533 tcg_temp_free_i32(tmp
);
10538 case 1: /* msr spsr. */
10539 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10543 if (extract32(insn
, 5, 1)) {
10545 int sysm
= extract32(insn
, 8, 4) |
10546 (extract32(insn
, 4, 1) << 4);
10549 gen_msr_banked(s
, r
, sysm
, rm
);
10553 /* MSR (for PSRs) */
10554 tmp
= load_reg(s
, rn
);
10556 msr_mask(s
, (insn
>> 8) & 0xf, op
== 1),
10560 case 2: /* cps, nop-hint. */
10561 if (((insn
>> 8) & 7) == 0) {
10562 gen_nop_hint(s
, insn
& 0xff);
10564 /* Implemented as NOP in user mode. */
10569 if (insn
& (1 << 10)) {
10570 if (insn
& (1 << 7))
10572 if (insn
& (1 << 6))
10574 if (insn
& (1 << 5))
10576 if (insn
& (1 << 9))
10577 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
10579 if (insn
& (1 << 8)) {
10581 imm
|= (insn
& 0x1f);
10584 gen_set_psr_im(s
, offset
, 0, imm
);
10587 case 3: /* Special control operations. */
10589 op
= (insn
>> 4) & 0xf;
10591 case 2: /* clrex */
10596 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
10599 /* We need to break the TB after this insn
10600 * to execute self-modifying code correctly
10601 * and also to take any pending interrupts
10604 gen_goto_tb(s
, 0, s
->pc
& ~1);
10611 /* Trivial implementation equivalent to bx.
10612 * This instruction doesn't exist at all for M-profile.
10614 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10617 tmp
= load_reg(s
, rn
);
10620 case 5: /* Exception return. */
10624 if (rn
!= 14 || rd
!= 15) {
10627 tmp
= load_reg(s
, rn
);
10628 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
10629 gen_exception_return(s
, tmp
);
10632 if (extract32(insn
, 5, 1) &&
10633 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
10635 int sysm
= extract32(insn
, 16, 4) |
10636 (extract32(insn
, 4, 1) << 4);
10638 gen_mrs_banked(s
, 0, sysm
, rd
);
10642 if (extract32(insn
, 16, 4) != 0xf) {
10645 if (!arm_dc_feature(s
, ARM_FEATURE_M
) &&
10646 extract32(insn
, 0, 8) != 0) {
10651 tmp
= tcg_temp_new_i32();
10652 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10653 addr
= tcg_const_i32(insn
& 0xff);
10654 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
10655 tcg_temp_free_i32(addr
);
10657 gen_helper_cpsr_read(tmp
, cpu_env
);
10659 store_reg(s
, rd
, tmp
);
10662 if (extract32(insn
, 5, 1) &&
10663 !arm_dc_feature(s
, ARM_FEATURE_M
)) {
10665 int sysm
= extract32(insn
, 16, 4) |
10666 (extract32(insn
, 4, 1) << 4);
10668 gen_mrs_banked(s
, 1, sysm
, rd
);
10673 /* Not accessible in user mode. */
10674 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
10678 if (extract32(insn
, 16, 4) != 0xf ||
10679 extract32(insn
, 0, 8) != 0) {
10683 tmp
= load_cpu_field(spsr
);
10684 store_reg(s
, rd
, tmp
);
10689 /* Conditional branch. */
10690 op
= (insn
>> 22) & 0xf;
10691 /* Generate a conditional jump to next instruction. */
10692 s
->condlabel
= gen_new_label();
10693 arm_gen_test_cc(op
^ 1, s
->condlabel
);
10696 /* offset[11:1] = insn[10:0] */
10697 offset
= (insn
& 0x7ff) << 1;
10698 /* offset[17:12] = insn[21:16]. */
10699 offset
|= (insn
& 0x003f0000) >> 4;
10700 /* offset[31:20] = insn[26]. */
10701 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
10702 /* offset[18] = insn[13]. */
10703 offset
|= (insn
& (1 << 13)) << 5;
10704 /* offset[19] = insn[11]. */
10705 offset
|= (insn
& (1 << 11)) << 8;
10707 /* jump to the offset */
10708 gen_jmp(s
, s
->pc
+ offset
);
10711 /* Data processing immediate. */
10712 if (insn
& (1 << 25)) {
10713 if (insn
& (1 << 24)) {
10714 if (insn
& (1 << 20))
10716 /* Bitfield/Saturate. */
10717 op
= (insn
>> 21) & 7;
10719 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
10721 tmp
= tcg_temp_new_i32();
10722 tcg_gen_movi_i32(tmp
, 0);
10724 tmp
= load_reg(s
, rn
);
10727 case 2: /* Signed bitfield extract. */
10729 if (shift
+ imm
> 32)
10732 tcg_gen_sextract_i32(tmp
, tmp
, shift
, imm
);
10735 case 6: /* Unsigned bitfield extract. */
10737 if (shift
+ imm
> 32)
10740 tcg_gen_extract_i32(tmp
, tmp
, shift
, imm
);
10743 case 3: /* Bitfield insert/clear. */
10746 imm
= imm
+ 1 - shift
;
10748 tmp2
= load_reg(s
, rd
);
10749 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, imm
);
10750 tcg_temp_free_i32(tmp2
);
10755 default: /* Saturate. */
10758 tcg_gen_sari_i32(tmp
, tmp
, shift
);
10760 tcg_gen_shli_i32(tmp
, tmp
, shift
);
10762 tmp2
= tcg_const_i32(imm
);
10765 if ((op
& 1) && shift
== 0) {
10766 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10767 tcg_temp_free_i32(tmp
);
10768 tcg_temp_free_i32(tmp2
);
10771 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
10773 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
10777 if ((op
& 1) && shift
== 0) {
10778 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10779 tcg_temp_free_i32(tmp
);
10780 tcg_temp_free_i32(tmp2
);
10783 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
10785 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
10788 tcg_temp_free_i32(tmp2
);
10791 store_reg(s
, rd
, tmp
);
10793 imm
= ((insn
& 0x04000000) >> 15)
10794 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
10795 if (insn
& (1 << 22)) {
10796 /* 16-bit immediate. */
10797 imm
|= (insn
>> 4) & 0xf000;
10798 if (insn
& (1 << 23)) {
10800 tmp
= load_reg(s
, rd
);
10801 tcg_gen_ext16u_i32(tmp
, tmp
);
10802 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
10805 tmp
= tcg_temp_new_i32();
10806 tcg_gen_movi_i32(tmp
, imm
);
10809 /* Add/sub 12-bit immediate. */
10811 offset
= s
->pc
& ~(uint32_t)3;
10812 if (insn
& (1 << 23))
10816 tmp
= tcg_temp_new_i32();
10817 tcg_gen_movi_i32(tmp
, offset
);
10819 tmp
= load_reg(s
, rn
);
10820 if (insn
& (1 << 23))
10821 tcg_gen_subi_i32(tmp
, tmp
, imm
);
10823 tcg_gen_addi_i32(tmp
, tmp
, imm
);
10826 store_reg(s
, rd
, tmp
);
10829 int shifter_out
= 0;
10830 /* modified 12-bit immediate. */
10831 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
10832 imm
= (insn
& 0xff);
10835 /* Nothing to do. */
10837 case 1: /* 00XY00XY */
10840 case 2: /* XY00XY00 */
10844 case 3: /* XYXYXYXY */
10848 default: /* Rotated constant. */
10849 shift
= (shift
<< 1) | (imm
>> 7);
10851 imm
= imm
<< (32 - shift
);
10855 tmp2
= tcg_temp_new_i32();
10856 tcg_gen_movi_i32(tmp2
, imm
);
10857 rn
= (insn
>> 16) & 0xf;
10859 tmp
= tcg_temp_new_i32();
10860 tcg_gen_movi_i32(tmp
, 0);
10862 tmp
= load_reg(s
, rn
);
10864 op
= (insn
>> 21) & 0xf;
10865 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
10866 shifter_out
, tmp
, tmp2
))
10868 tcg_temp_free_i32(tmp2
);
10869 rd
= (insn
>> 8) & 0xf;
10871 store_reg(s
, rd
, tmp
);
10873 tcg_temp_free_i32(tmp
);
10878 case 12: /* Load/store single data item. */
10885 if ((insn
& 0x01100000) == 0x01000000) {
10886 if (disas_neon_ls_insn(s
, insn
)) {
10891 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
10893 if (!(insn
& (1 << 20))) {
10897 /* Byte or halfword load space with dest == r15 : memory hints.
10898 * Catch them early so we don't emit pointless addressing code.
10899 * This space is a mix of:
10900 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10901 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10903 * unallocated hints, which must be treated as NOPs
10904 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10905 * which is easiest for the decoding logic
10906 * Some space which must UNDEF
10908 int op1
= (insn
>> 23) & 3;
10909 int op2
= (insn
>> 6) & 0x3f;
10914 /* UNPREDICTABLE, unallocated hint or
10915 * PLD/PLDW/PLI (literal)
10920 return 0; /* PLD/PLDW/PLI or unallocated hint */
10922 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
10923 return 0; /* PLD/PLDW/PLI or unallocated hint */
10925 /* UNDEF space, or an UNPREDICTABLE */
10929 memidx
= get_mem_index(s
);
10931 addr
= tcg_temp_new_i32();
10933 /* s->pc has already been incremented by 4. */
10934 imm
= s
->pc
& 0xfffffffc;
10935 if (insn
& (1 << 23))
10936 imm
+= insn
& 0xfff;
10938 imm
-= insn
& 0xfff;
10939 tcg_gen_movi_i32(addr
, imm
);
10941 addr
= load_reg(s
, rn
);
10942 if (insn
& (1 << 23)) {
10943 /* Positive offset. */
10944 imm
= insn
& 0xfff;
10945 tcg_gen_addi_i32(addr
, addr
, imm
);
10948 switch ((insn
>> 8) & 0xf) {
10949 case 0x0: /* Shifted Register. */
10950 shift
= (insn
>> 4) & 0xf;
10952 tcg_temp_free_i32(addr
);
10955 tmp
= load_reg(s
, rm
);
10957 tcg_gen_shli_i32(tmp
, tmp
, shift
);
10958 tcg_gen_add_i32(addr
, addr
, tmp
);
10959 tcg_temp_free_i32(tmp
);
10961 case 0xc: /* Negative offset. */
10962 tcg_gen_addi_i32(addr
, addr
, -imm
);
10964 case 0xe: /* User privilege. */
10965 tcg_gen_addi_i32(addr
, addr
, imm
);
10966 memidx
= get_a32_user_mem_index(s
);
10968 case 0x9: /* Post-decrement. */
10970 /* Fall through. */
10971 case 0xb: /* Post-increment. */
10975 case 0xd: /* Pre-decrement. */
10977 /* Fall through. */
10978 case 0xf: /* Pre-increment. */
10979 tcg_gen_addi_i32(addr
, addr
, imm
);
10983 tcg_temp_free_i32(addr
);
10989 issinfo
= writeback
? ISSInvalid
: rs
;
10991 if (insn
& (1 << 20)) {
10993 tmp
= tcg_temp_new_i32();
10996 gen_aa32_ld8u_iss(s
, tmp
, addr
, memidx
, issinfo
);
10999 gen_aa32_ld8s_iss(s
, tmp
, addr
, memidx
, issinfo
);
11002 gen_aa32_ld16u_iss(s
, tmp
, addr
, memidx
, issinfo
);
11005 gen_aa32_ld16s_iss(s
, tmp
, addr
, memidx
, issinfo
);
11008 gen_aa32_ld32u_iss(s
, tmp
, addr
, memidx
, issinfo
);
11011 tcg_temp_free_i32(tmp
);
11012 tcg_temp_free_i32(addr
);
11016 gen_bx_excret(s
, tmp
);
11018 store_reg(s
, rs
, tmp
);
11022 tmp
= load_reg(s
, rs
);
11025 gen_aa32_st8_iss(s
, tmp
, addr
, memidx
, issinfo
);
11028 gen_aa32_st16_iss(s
, tmp
, addr
, memidx
, issinfo
);
11031 gen_aa32_st32_iss(s
, tmp
, addr
, memidx
, issinfo
);
11034 tcg_temp_free_i32(tmp
);
11035 tcg_temp_free_i32(addr
);
11038 tcg_temp_free_i32(tmp
);
11041 tcg_gen_addi_i32(addr
, addr
, imm
);
11043 store_reg(s
, rn
, addr
);
11045 tcg_temp_free_i32(addr
);
11057 static void disas_thumb_insn(CPUARMState
*env
, DisasContext
*s
)
11059 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
11066 if (s
->condexec_mask
) {
11067 cond
= s
->condexec_cond
;
11068 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
11069 s
->condlabel
= gen_new_label();
11070 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
11075 insn
= arm_lduw_code(env
, s
->pc
, s
->sctlr_b
);
11078 switch (insn
>> 12) {
11082 op
= (insn
>> 11) & 3;
11085 rn
= (insn
>> 3) & 7;
11086 tmp
= load_reg(s
, rn
);
11087 if (insn
& (1 << 10)) {
11089 tmp2
= tcg_temp_new_i32();
11090 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
11093 rm
= (insn
>> 6) & 7;
11094 tmp2
= load_reg(s
, rm
);
11096 if (insn
& (1 << 9)) {
11097 if (s
->condexec_mask
)
11098 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
11100 gen_sub_CC(tmp
, tmp
, tmp2
);
11102 if (s
->condexec_mask
)
11103 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
11105 gen_add_CC(tmp
, tmp
, tmp2
);
11107 tcg_temp_free_i32(tmp2
);
11108 store_reg(s
, rd
, tmp
);
11110 /* shift immediate */
11111 rm
= (insn
>> 3) & 7;
11112 shift
= (insn
>> 6) & 0x1f;
11113 tmp
= load_reg(s
, rm
);
11114 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
11115 if (!s
->condexec_mask
)
11117 store_reg(s
, rd
, tmp
);
11121 /* arithmetic large immediate */
11122 op
= (insn
>> 11) & 3;
11123 rd
= (insn
>> 8) & 0x7;
11124 if (op
== 0) { /* mov */
11125 tmp
= tcg_temp_new_i32();
11126 tcg_gen_movi_i32(tmp
, insn
& 0xff);
11127 if (!s
->condexec_mask
)
11129 store_reg(s
, rd
, tmp
);
11131 tmp
= load_reg(s
, rd
);
11132 tmp2
= tcg_temp_new_i32();
11133 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
11136 gen_sub_CC(tmp
, tmp
, tmp2
);
11137 tcg_temp_free_i32(tmp
);
11138 tcg_temp_free_i32(tmp2
);
11141 if (s
->condexec_mask
)
11142 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
11144 gen_add_CC(tmp
, tmp
, tmp2
);
11145 tcg_temp_free_i32(tmp2
);
11146 store_reg(s
, rd
, tmp
);
11149 if (s
->condexec_mask
)
11150 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
11152 gen_sub_CC(tmp
, tmp
, tmp2
);
11153 tcg_temp_free_i32(tmp2
);
11154 store_reg(s
, rd
, tmp
);
11160 if (insn
& (1 << 11)) {
11161 rd
= (insn
>> 8) & 7;
11162 /* load pc-relative. Bit 1 of PC is ignored. */
11163 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
11164 val
&= ~(uint32_t)2;
11165 addr
= tcg_temp_new_i32();
11166 tcg_gen_movi_i32(addr
, val
);
11167 tmp
= tcg_temp_new_i32();
11168 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
),
11170 tcg_temp_free_i32(addr
);
11171 store_reg(s
, rd
, tmp
);
11174 if (insn
& (1 << 10)) {
11175 /* 0b0100_01xx_xxxx_xxxx
11176 * - data processing extended, branch and exchange
11178 rd
= (insn
& 7) | ((insn
>> 4) & 8);
11179 rm
= (insn
>> 3) & 0xf;
11180 op
= (insn
>> 8) & 3;
11183 tmp
= load_reg(s
, rd
);
11184 tmp2
= load_reg(s
, rm
);
11185 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
11186 tcg_temp_free_i32(tmp2
);
11187 store_reg(s
, rd
, tmp
);
11190 tmp
= load_reg(s
, rd
);
11191 tmp2
= load_reg(s
, rm
);
11192 gen_sub_CC(tmp
, tmp
, tmp2
);
11193 tcg_temp_free_i32(tmp2
);
11194 tcg_temp_free_i32(tmp
);
11196 case 2: /* mov/cpy */
11197 tmp
= load_reg(s
, rm
);
11198 store_reg(s
, rd
, tmp
);
11202 /* 0b0100_0111_xxxx_xxxx
11203 * - branch [and link] exchange thumb register
11205 bool link
= insn
& (1 << 7);
11214 /* BXNS/BLXNS: only exists for v8M with the
11215 * security extensions, and always UNDEF if NonSecure.
11216 * We don't implement these in the user-only mode
11217 * either (in theory you can use them from Secure User
11218 * mode but they are too tied in to system emulation.)
11220 if (!s
->v8m_secure
|| IS_USER_ONLY
) {
11224 /* BLXNS: not yet implemented */
11232 tmp
= load_reg(s
, rm
);
11234 val
= (uint32_t)s
->pc
| 1;
11235 tmp2
= tcg_temp_new_i32();
11236 tcg_gen_movi_i32(tmp2
, val
);
11237 store_reg(s
, 14, tmp2
);
11240 /* Only BX works as exception-return, not BLX */
11241 gen_bx_excret(s
, tmp
);
11249 /* data processing register */
11251 rm
= (insn
>> 3) & 7;
11252 op
= (insn
>> 6) & 0xf;
11253 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
11254 /* the shift/rotate ops want the operands backwards */
11263 if (op
== 9) { /* neg */
11264 tmp
= tcg_temp_new_i32();
11265 tcg_gen_movi_i32(tmp
, 0);
11266 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
11267 tmp
= load_reg(s
, rd
);
11269 TCGV_UNUSED_I32(tmp
);
11272 tmp2
= load_reg(s
, rm
);
11274 case 0x0: /* and */
11275 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
11276 if (!s
->condexec_mask
)
11279 case 0x1: /* eor */
11280 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
11281 if (!s
->condexec_mask
)
11284 case 0x2: /* lsl */
11285 if (s
->condexec_mask
) {
11286 gen_shl(tmp2
, tmp2
, tmp
);
11288 gen_helper_shl_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11289 gen_logic_CC(tmp2
);
11292 case 0x3: /* lsr */
11293 if (s
->condexec_mask
) {
11294 gen_shr(tmp2
, tmp2
, tmp
);
11296 gen_helper_shr_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11297 gen_logic_CC(tmp2
);
11300 case 0x4: /* asr */
11301 if (s
->condexec_mask
) {
11302 gen_sar(tmp2
, tmp2
, tmp
);
11304 gen_helper_sar_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11305 gen_logic_CC(tmp2
);
11308 case 0x5: /* adc */
11309 if (s
->condexec_mask
) {
11310 gen_adc(tmp
, tmp2
);
11312 gen_adc_CC(tmp
, tmp
, tmp2
);
11315 case 0x6: /* sbc */
11316 if (s
->condexec_mask
) {
11317 gen_sub_carry(tmp
, tmp
, tmp2
);
11319 gen_sbc_CC(tmp
, tmp
, tmp2
);
11322 case 0x7: /* ror */
11323 if (s
->condexec_mask
) {
11324 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
11325 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
11327 gen_helper_ror_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11328 gen_logic_CC(tmp2
);
11331 case 0x8: /* tst */
11332 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
11336 case 0x9: /* neg */
11337 if (s
->condexec_mask
)
11338 tcg_gen_neg_i32(tmp
, tmp2
);
11340 gen_sub_CC(tmp
, tmp
, tmp2
);
11342 case 0xa: /* cmp */
11343 gen_sub_CC(tmp
, tmp
, tmp2
);
11346 case 0xb: /* cmn */
11347 gen_add_CC(tmp
, tmp
, tmp2
);
11350 case 0xc: /* orr */
11351 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
11352 if (!s
->condexec_mask
)
11355 case 0xd: /* mul */
11356 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
11357 if (!s
->condexec_mask
)
11360 case 0xe: /* bic */
11361 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
11362 if (!s
->condexec_mask
)
11365 case 0xf: /* mvn */
11366 tcg_gen_not_i32(tmp2
, tmp2
);
11367 if (!s
->condexec_mask
)
11368 gen_logic_CC(tmp2
);
11375 store_reg(s
, rm
, tmp2
);
11377 tcg_temp_free_i32(tmp
);
11379 store_reg(s
, rd
, tmp
);
11380 tcg_temp_free_i32(tmp2
);
11383 tcg_temp_free_i32(tmp
);
11384 tcg_temp_free_i32(tmp2
);
11389 /* load/store register offset. */
11391 rn
= (insn
>> 3) & 7;
11392 rm
= (insn
>> 6) & 7;
11393 op
= (insn
>> 9) & 7;
11394 addr
= load_reg(s
, rn
);
11395 tmp
= load_reg(s
, rm
);
11396 tcg_gen_add_i32(addr
, addr
, tmp
);
11397 tcg_temp_free_i32(tmp
);
11399 if (op
< 3) { /* store */
11400 tmp
= load_reg(s
, rd
);
11402 tmp
= tcg_temp_new_i32();
11407 gen_aa32_st32_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11410 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11413 gen_aa32_st8_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11415 case 3: /* ldrsb */
11416 gen_aa32_ld8s_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11419 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11422 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11425 gen_aa32_ld8u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11427 case 7: /* ldrsh */
11428 gen_aa32_ld16s_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11431 if (op
>= 3) { /* load */
11432 store_reg(s
, rd
, tmp
);
11434 tcg_temp_free_i32(tmp
);
11436 tcg_temp_free_i32(addr
);
11440 /* load/store word immediate offset */
11442 rn
= (insn
>> 3) & 7;
11443 addr
= load_reg(s
, rn
);
11444 val
= (insn
>> 4) & 0x7c;
11445 tcg_gen_addi_i32(addr
, addr
, val
);
11447 if (insn
& (1 << 11)) {
11449 tmp
= tcg_temp_new_i32();
11450 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11451 store_reg(s
, rd
, tmp
);
11454 tmp
= load_reg(s
, rd
);
11455 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11456 tcg_temp_free_i32(tmp
);
11458 tcg_temp_free_i32(addr
);
11462 /* load/store byte immediate offset */
11464 rn
= (insn
>> 3) & 7;
11465 addr
= load_reg(s
, rn
);
11466 val
= (insn
>> 6) & 0x1f;
11467 tcg_gen_addi_i32(addr
, addr
, val
);
11469 if (insn
& (1 << 11)) {
11471 tmp
= tcg_temp_new_i32();
11472 gen_aa32_ld8u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11473 store_reg(s
, rd
, tmp
);
11476 tmp
= load_reg(s
, rd
);
11477 gen_aa32_st8_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11478 tcg_temp_free_i32(tmp
);
11480 tcg_temp_free_i32(addr
);
11484 /* load/store halfword immediate offset */
11486 rn
= (insn
>> 3) & 7;
11487 addr
= load_reg(s
, rn
);
11488 val
= (insn
>> 5) & 0x3e;
11489 tcg_gen_addi_i32(addr
, addr
, val
);
11491 if (insn
& (1 << 11)) {
11493 tmp
= tcg_temp_new_i32();
11494 gen_aa32_ld16u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11495 store_reg(s
, rd
, tmp
);
11498 tmp
= load_reg(s
, rd
);
11499 gen_aa32_st16_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11500 tcg_temp_free_i32(tmp
);
11502 tcg_temp_free_i32(addr
);
11506 /* load/store from stack */
11507 rd
= (insn
>> 8) & 7;
11508 addr
= load_reg(s
, 13);
11509 val
= (insn
& 0xff) * 4;
11510 tcg_gen_addi_i32(addr
, addr
, val
);
11512 if (insn
& (1 << 11)) {
11514 tmp
= tcg_temp_new_i32();
11515 gen_aa32_ld32u_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11516 store_reg(s
, rd
, tmp
);
11519 tmp
= load_reg(s
, rd
);
11520 gen_aa32_st32_iss(s
, tmp
, addr
, get_mem_index(s
), rd
| ISSIs16Bit
);
11521 tcg_temp_free_i32(tmp
);
11523 tcg_temp_free_i32(addr
);
11527 /* add to high reg */
11528 rd
= (insn
>> 8) & 7;
11529 if (insn
& (1 << 11)) {
11531 tmp
= load_reg(s
, 13);
11533 /* PC. bit 1 is ignored. */
11534 tmp
= tcg_temp_new_i32();
11535 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
11537 val
= (insn
& 0xff) * 4;
11538 tcg_gen_addi_i32(tmp
, tmp
, val
);
11539 store_reg(s
, rd
, tmp
);
11544 op
= (insn
>> 8) & 0xf;
11547 /* adjust stack pointer */
11548 tmp
= load_reg(s
, 13);
11549 val
= (insn
& 0x7f) * 4;
11550 if (insn
& (1 << 7))
11551 val
= -(int32_t)val
;
11552 tcg_gen_addi_i32(tmp
, tmp
, val
);
11553 store_reg(s
, 13, tmp
);
11556 case 2: /* sign/zero extend. */
11559 rm
= (insn
>> 3) & 7;
11560 tmp
= load_reg(s
, rm
);
11561 switch ((insn
>> 6) & 3) {
11562 case 0: gen_sxth(tmp
); break;
11563 case 1: gen_sxtb(tmp
); break;
11564 case 2: gen_uxth(tmp
); break;
11565 case 3: gen_uxtb(tmp
); break;
11567 store_reg(s
, rd
, tmp
);
11569 case 4: case 5: case 0xc: case 0xd:
11571 addr
= load_reg(s
, 13);
11572 if (insn
& (1 << 8))
11576 for (i
= 0; i
< 8; i
++) {
11577 if (insn
& (1 << i
))
11580 if ((insn
& (1 << 11)) == 0) {
11581 tcg_gen_addi_i32(addr
, addr
, -offset
);
11583 for (i
= 0; i
< 8; i
++) {
11584 if (insn
& (1 << i
)) {
11585 if (insn
& (1 << 11)) {
11587 tmp
= tcg_temp_new_i32();
11588 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11589 store_reg(s
, i
, tmp
);
11592 tmp
= load_reg(s
, i
);
11593 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11594 tcg_temp_free_i32(tmp
);
11596 /* advance to the next address. */
11597 tcg_gen_addi_i32(addr
, addr
, 4);
11600 TCGV_UNUSED_I32(tmp
);
11601 if (insn
& (1 << 8)) {
11602 if (insn
& (1 << 11)) {
11604 tmp
= tcg_temp_new_i32();
11605 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11606 /* don't set the pc until the rest of the instruction
11610 tmp
= load_reg(s
, 14);
11611 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11612 tcg_temp_free_i32(tmp
);
11614 tcg_gen_addi_i32(addr
, addr
, 4);
11616 if ((insn
& (1 << 11)) == 0) {
11617 tcg_gen_addi_i32(addr
, addr
, -offset
);
11619 /* write back the new stack pointer */
11620 store_reg(s
, 13, addr
);
11621 /* set the new PC value */
11622 if ((insn
& 0x0900) == 0x0900) {
11623 store_reg_from_load(s
, 15, tmp
);
11627 case 1: case 3: case 9: case 11: /* czb */
11629 tmp
= load_reg(s
, rm
);
11630 s
->condlabel
= gen_new_label();
11632 if (insn
& (1 << 11))
11633 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
11635 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
11636 tcg_temp_free_i32(tmp
);
11637 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
11638 val
= (uint32_t)s
->pc
+ 2;
11643 case 15: /* IT, nop-hint. */
11644 if ((insn
& 0xf) == 0) {
11645 gen_nop_hint(s
, (insn
>> 4) & 0xf);
11649 s
->condexec_cond
= (insn
>> 4) & 0xe;
11650 s
->condexec_mask
= insn
& 0x1f;
11651 /* No actual code generated for this insn, just setup state. */
11654 case 0xe: /* bkpt */
11656 int imm8
= extract32(insn
, 0, 8);
11658 gen_exception_insn(s
, 2, EXCP_BKPT
, syn_aa32_bkpt(imm8
, true),
11659 default_exception_el(s
));
11663 case 0xa: /* rev, and hlt */
11665 int op1
= extract32(insn
, 6, 2);
11669 int imm6
= extract32(insn
, 0, 6);
11675 /* Otherwise this is rev */
11677 rn
= (insn
>> 3) & 0x7;
11679 tmp
= load_reg(s
, rn
);
11681 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
11682 case 1: gen_rev16(tmp
); break;
11683 case 3: gen_revsh(tmp
); break;
11685 g_assert_not_reached();
11687 store_reg(s
, rd
, tmp
);
11692 switch ((insn
>> 5) & 7) {
11696 if (((insn
>> 3) & 1) != !!(s
->be_data
== MO_BE
)) {
11697 gen_helper_setend(cpu_env
);
11698 s
->base
.is_jmp
= DISAS_UPDATE
;
11707 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
11708 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
11711 addr
= tcg_const_i32(19);
11712 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
11713 tcg_temp_free_i32(addr
);
11717 addr
= tcg_const_i32(16);
11718 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
11719 tcg_temp_free_i32(addr
);
11721 tcg_temp_free_i32(tmp
);
11724 if (insn
& (1 << 4)) {
11725 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
11729 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
11744 /* load/store multiple */
11745 TCGv_i32 loaded_var
;
11746 TCGV_UNUSED_I32(loaded_var
);
11747 rn
= (insn
>> 8) & 0x7;
11748 addr
= load_reg(s
, rn
);
11749 for (i
= 0; i
< 8; i
++) {
11750 if (insn
& (1 << i
)) {
11751 if (insn
& (1 << 11)) {
11753 tmp
= tcg_temp_new_i32();
11754 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11758 store_reg(s
, i
, tmp
);
11762 tmp
= load_reg(s
, i
);
11763 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11764 tcg_temp_free_i32(tmp
);
11766 /* advance to the next address */
11767 tcg_gen_addi_i32(addr
, addr
, 4);
11770 if ((insn
& (1 << rn
)) == 0) {
11771 /* base reg not in list: base register writeback */
11772 store_reg(s
, rn
, addr
);
11774 /* base reg in list: if load, complete it now */
11775 if (insn
& (1 << 11)) {
11776 store_reg(s
, rn
, loaded_var
);
11778 tcg_temp_free_i32(addr
);
11783 /* conditional branch or swi */
11784 cond
= (insn
>> 8) & 0xf;
11790 gen_set_pc_im(s
, s
->pc
);
11791 s
->svc_imm
= extract32(insn
, 0, 8);
11792 s
->base
.is_jmp
= DISAS_SWI
;
11795 /* generate a conditional jump to next instruction */
11796 s
->condlabel
= gen_new_label();
11797 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
11800 /* jump to the offset */
11801 val
= (uint32_t)s
->pc
+ 2;
11802 offset
= ((int32_t)insn
<< 24) >> 24;
11803 val
+= offset
<< 1;
11808 if (insn
& (1 << 11)) {
11809 if (disas_thumb2_insn(env
, s
, insn
))
11813 /* unconditional branch */
11814 val
= (uint32_t)s
->pc
;
11815 offset
= ((int32_t)insn
<< 21) >> 21;
11816 val
+= (offset
<< 1) + 2;
11821 if (disas_thumb2_insn(env
, s
, insn
))
11827 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
11828 default_exception_el(s
));
11832 gen_exception_insn(s
, 2, EXCP_UDEF
, syn_uncategorized(),
11833 default_exception_el(s
));
11836 static bool insn_crosses_page(CPUARMState
*env
, DisasContext
*s
)
11838 /* Return true if the insn at dc->pc might cross a page boundary.
11839 * (False positives are OK, false negatives are not.)
11843 if ((s
->pc
& 3) == 0) {
11844 /* At a 4-aligned address we can't be crossing a page */
11848 /* This must be a Thumb insn */
11849 insn
= arm_lduw_code(env
, s
->pc
, s
->sctlr_b
);
11851 if ((insn
>> 11) >= 0x1d) {
11852 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11853 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11854 * end up actually treating this as two 16-bit insns (see the
11855 * code at the start of disas_thumb2_insn()) but we don't bother
11856 * to check for that as it is unlikely, and false positives here
11861 /* Definitely a 16-bit insn, can't be crossing a page. */
11865 static int arm_tr_init_disas_context(DisasContextBase
*dcbase
,
11866 CPUState
*cs
, int max_insns
)
11868 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11869 CPUARMState
*env
= cs
->env_ptr
;
11870 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11872 dc
->pc
= dc
->base
.pc_first
;
11876 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11877 * there is no secure EL1, so we route exceptions to EL3.
11879 dc
->secure_routed_to_el3
= arm_feature(env
, ARM_FEATURE_EL3
) &&
11880 !arm_el_is_aa64(env
, 3);
11881 dc
->thumb
= ARM_TBFLAG_THUMB(dc
->base
.tb
->flags
);
11882 dc
->sctlr_b
= ARM_TBFLAG_SCTLR_B(dc
->base
.tb
->flags
);
11883 dc
->be_data
= ARM_TBFLAG_BE_DATA(dc
->base
.tb
->flags
) ? MO_BE
: MO_LE
;
11884 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(dc
->base
.tb
->flags
) & 0xf) << 1;
11885 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(dc
->base
.tb
->flags
) >> 4;
11886 dc
->mmu_idx
= core_to_arm_mmu_idx(env
, ARM_TBFLAG_MMUIDX(dc
->base
.tb
->flags
));
11887 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
11888 #if !defined(CONFIG_USER_ONLY)
11889 dc
->user
= (dc
->current_el
== 0);
11891 dc
->ns
= ARM_TBFLAG_NS(dc
->base
.tb
->flags
);
11892 dc
->fp_excp_el
= ARM_TBFLAG_FPEXC_EL(dc
->base
.tb
->flags
);
11893 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(dc
->base
.tb
->flags
);
11894 dc
->vec_len
= ARM_TBFLAG_VECLEN(dc
->base
.tb
->flags
);
11895 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(dc
->base
.tb
->flags
);
11896 dc
->c15_cpar
= ARM_TBFLAG_XSCALE_CPAR(dc
->base
.tb
->flags
);
11897 dc
->v7m_handler_mode
= ARM_TBFLAG_HANDLER(dc
->base
.tb
->flags
);
11898 dc
->v8m_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
11899 regime_is_secure(env
, dc
->mmu_idx
);
11900 dc
->cp_regs
= cpu
->cp_regs
;
11901 dc
->features
= env
->features
;
11903 /* Single step state. The code-generation logic here is:
11905 * generate code with no special handling for single-stepping (except
11906 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11907 * this happens anyway because those changes are all system register or
11909 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11910 * emit code for one insn
11911 * emit code to clear PSTATE.SS
11912 * emit code to generate software step exception for completed step
11913 * end TB (as usual for having generated an exception)
11914 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11915 * emit code to generate a software step exception
11918 dc
->ss_active
= ARM_TBFLAG_SS_ACTIVE(dc
->base
.tb
->flags
);
11919 dc
->pstate_ss
= ARM_TBFLAG_PSTATE_SS(dc
->base
.tb
->flags
);
11920 dc
->is_ldex
= false;
11921 dc
->ss_same_el
= false; /* Can't be true since EL_d must be AArch64 */
11923 dc
->next_page_start
=
11924 (dc
->base
.pc_first
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
11926 /* If architectural single step active, limit to 1. */
11927 if (is_singlestepping(dc
)) {
11931 /* ARM is a fixed-length ISA. Bound the number of insns to execute
11932 to those left on the page. */
11934 int bound
= (dc
->next_page_start
- dc
->base
.pc_first
) / 4;
11935 max_insns
= MIN(max_insns
, bound
);
11938 cpu_F0s
= tcg_temp_new_i32();
11939 cpu_F1s
= tcg_temp_new_i32();
11940 cpu_F0d
= tcg_temp_new_i64();
11941 cpu_F1d
= tcg_temp_new_i64();
11944 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11945 cpu_M0
= tcg_temp_new_i64();
11950 static void arm_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
11952 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11954 /* A note on handling of the condexec (IT) bits:
11956 * We want to avoid the overhead of having to write the updated condexec
11957 * bits back to the CPUARMState for every instruction in an IT block. So:
11958 * (1) if the condexec bits are not already zero then we write
11959 * zero back into the CPUARMState now. This avoids complications trying
11960 * to do it at the end of the block. (For example if we don't do this
11961 * it's hard to identify whether we can safely skip writing condexec
11962 * at the end of the TB, which we definitely want to do for the case
11963 * where a TB doesn't do anything with the IT state at all.)
11964 * (2) if we are going to leave the TB then we call gen_set_condexec()
11965 * which will write the correct value into CPUARMState if zero is wrong.
11966 * This is done both for leaving the TB at the end, and for leaving
11967 * it because of an exception we know will happen, which is done in
11968 * gen_exception_insn(). The latter is necessary because we need to
11969 * leave the TB with the PC/IT state just prior to execution of the
11970 * instruction which caused the exception.
11971 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11972 * then the CPUARMState will be wrong and we need to reset it.
11973 * This is handled in the same way as restoration of the
11974 * PC in these situations; we save the value of the condexec bits
11975 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11976 * then uses this to restore them after an exception.
11978 * Note that there are no instructions which can read the condexec
11979 * bits, and none which can write non-static values to them, so
11980 * we don't need to care about whether CPUARMState is correct in the
11984 /* Reset the conditional execution bits immediately. This avoids
11985 complications trying to do it at the end of the block. */
11986 if (dc
->condexec_mask
|| dc
->condexec_cond
) {
11987 TCGv_i32 tmp
= tcg_temp_new_i32();
11988 tcg_gen_movi_i32(tmp
, 0);
11989 store_cpu_field(tmp
, condexec_bits
);
11991 tcg_clear_temp_count();
11994 static void arm_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
11996 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
11998 dc
->insn_start_idx
= tcg_op_buf_count();
11999 tcg_gen_insn_start(dc
->pc
,
12000 (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1),
12004 static bool arm_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
12005 const CPUBreakpoint
*bp
)
12007 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12009 if (bp
->flags
& BP_CPU
) {
12010 gen_set_condexec(dc
);
12011 gen_set_pc_im(dc
, dc
->pc
);
12012 gen_helper_check_breakpoints(cpu_env
);
12013 /* End the TB early; it's likely not going to be executed */
12014 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
12016 gen_exception_internal_insn(dc
, 0, EXCP_DEBUG
);
12017 /* The address covered by the breakpoint must be
12018 included in [tb->pc, tb->pc + tb->size) in order
12019 to for it to be properly cleared -- thus we
12020 increment the PC here so that the logic setting
12021 tb->size below does the right thing. */
12022 /* TODO: Advance PC by correct instruction length to
12023 * avoid disassembler error messages */
12025 dc
->base
.is_jmp
= DISAS_NORETURN
;
12031 static bool arm_pre_translate_insn(DisasContext
*dc
)
12033 #ifdef CONFIG_USER_ONLY
12034 /* Intercept jump to the magic kernel page. */
12035 if (dc
->pc
>= 0xffff0000) {
12036 /* We always get here via a jump, so know we are not in a
12037 conditional execution block. */
12038 gen_exception_internal(EXCP_KERNEL_TRAP
);
12039 dc
->base
.is_jmp
= DISAS_NORETURN
;
12044 if (dc
->ss_active
&& !dc
->pstate_ss
) {
12045 /* Singlestep state is Active-pending.
12046 * If we're in this state at the start of a TB then either
12047 * a) we just took an exception to an EL which is being debugged
12048 * and this is the first insn in the exception handler
12049 * b) debug exceptions were masked and we just unmasked them
12050 * without changing EL (eg by clearing PSTATE.D)
12051 * In either case we're going to take a swstep exception in the
12052 * "did not step an insn" case, and so the syndrome ISV and EX
12053 * bits should be zero.
12055 assert(dc
->base
.num_insns
== 1);
12056 gen_exception(EXCP_UDEF
, syn_swstep(dc
->ss_same_el
, 0, 0),
12057 default_exception_el(dc
));
12058 dc
->base
.is_jmp
= DISAS_NORETURN
;
12065 static void arm_post_translate_insn(DisasContext
*dc
)
12067 if (dc
->condjmp
&& !dc
->base
.is_jmp
) {
12068 gen_set_label(dc
->condlabel
);
12071 dc
->base
.pc_next
= dc
->pc
;
12072 translator_loop_temp_check(&dc
->base
);
12075 static void arm_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
12077 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12078 CPUARMState
*env
= cpu
->env_ptr
;
12081 if (arm_pre_translate_insn(dc
)) {
12085 insn
= arm_ldl_code(env
, dc
->pc
, dc
->sctlr_b
);
12087 disas_arm_insn(dc
, insn
);
12089 arm_post_translate_insn(dc
);
12091 /* ARM is a fixed-length ISA. We performed the cross-page check
12092 in init_disas_context by adjusting max_insns. */
12095 static void thumb_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
12097 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12098 CPUARMState
*env
= cpu
->env_ptr
;
12100 if (arm_pre_translate_insn(dc
)) {
12104 disas_thumb_insn(env
, dc
);
12106 /* Advance the Thumb condexec condition. */
12107 if (dc
->condexec_mask
) {
12108 dc
->condexec_cond
= ((dc
->condexec_cond
& 0xe) |
12109 ((dc
->condexec_mask
>> 4) & 1));
12110 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
12111 if (dc
->condexec_mask
== 0) {
12112 dc
->condexec_cond
= 0;
12116 arm_post_translate_insn(dc
);
12118 /* Thumb is a variable-length ISA. Stop translation when the next insn
12119 * will touch a new page. This ensures that prefetch aborts occur at
12122 * We want to stop the TB if the next insn starts in a new page,
12123 * or if it spans between this page and the next. This means that
12124 * if we're looking at the last halfword in the page we need to
12125 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12126 * or a 32-bit Thumb insn (which won't).
12127 * This is to avoid generating a silly TB with a single 16-bit insn
12128 * in it at the end of this page (which would execute correctly
12129 * but isn't very efficient).
12131 if (dc
->base
.is_jmp
== DISAS_NEXT
12132 && (dc
->pc
>= dc
->next_page_start
12133 || (dc
->pc
>= dc
->next_page_start
- 3
12134 && insn_crosses_page(env
, dc
)))) {
12135 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
12139 static void arm_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
12141 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12143 if (dc
->base
.tb
->cflags
& CF_LAST_IO
&& dc
->condjmp
) {
12144 /* FIXME: This can theoretically happen with self-modifying code. */
12145 cpu_abort(cpu
, "IO on conditional branch instruction");
12148 /* At this stage dc->condjmp will only be set when the skipped
12149 instruction was a conditional branch or trap, and the PC has
12150 already been written. */
12151 gen_set_condexec(dc
);
12152 if (dc
->base
.is_jmp
== DISAS_BX_EXCRET
) {
12153 /* Exception return branches need some special case code at the
12154 * end of the TB, which is complex enough that it has to
12155 * handle the single-step vs not and the condition-failed
12156 * insn codepath itself.
12158 gen_bx_excret_final_code(dc
);
12159 } else if (unlikely(is_singlestepping(dc
))) {
12160 /* Unconditional and "condition passed" instruction codepath. */
12161 switch (dc
->base
.is_jmp
) {
12163 gen_ss_advance(dc
);
12164 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
12165 default_exception_el(dc
));
12168 gen_ss_advance(dc
);
12169 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
12172 gen_ss_advance(dc
);
12173 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
12176 case DISAS_TOO_MANY
:
12178 gen_set_pc_im(dc
, dc
->pc
);
12181 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12182 gen_singlestep_exception(dc
);
12184 case DISAS_NORETURN
:
12188 /* While branches must always occur at the end of an IT block,
12189 there are a few other things that can cause us to terminate
12190 the TB in the middle of an IT block:
12191 - Exception generating instructions (bkpt, swi, undefined).
12193 - Hardware watchpoints.
12194 Hardware breakpoints have already been handled and skip this code.
12196 switch(dc
->base
.is_jmp
) {
12198 case DISAS_TOO_MANY
:
12199 gen_goto_tb(dc
, 1, dc
->pc
);
12205 gen_set_pc_im(dc
, dc
->pc
);
12208 /* indicate that the hash table must be used to find the next TB */
12209 tcg_gen_exit_tb(0);
12211 case DISAS_NORETURN
:
12212 /* nothing more to generate */
12215 gen_helper_wfi(cpu_env
);
12216 /* The helper doesn't necessarily throw an exception, but we
12217 * must go back to the main loop to check for interrupts anyway.
12219 tcg_gen_exit_tb(0);
12222 gen_helper_wfe(cpu_env
);
12225 gen_helper_yield(cpu_env
);
12228 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
12229 default_exception_el(dc
));
12232 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
12235 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
12241 /* "Condition failed" instruction codepath for the branch/trap insn */
12242 gen_set_label(dc
->condlabel
);
12243 gen_set_condexec(dc
);
12244 if (unlikely(is_singlestepping(dc
))) {
12245 gen_set_pc_im(dc
, dc
->pc
);
12246 gen_singlestep_exception(dc
);
12248 gen_goto_tb(dc
, 1, dc
->pc
);
12252 /* Functions above can change dc->pc, so re-align db->pc_next */
12253 dc
->base
.pc_next
= dc
->pc
;
12256 static void arm_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
12258 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
12260 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
12261 log_target_disas(cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
,
12262 dc
->thumb
| (dc
->sctlr_b
<< 1));
12265 static const TranslatorOps arm_translator_ops
= {
12266 .init_disas_context
= arm_tr_init_disas_context
,
12267 .tb_start
= arm_tr_tb_start
,
12268 .insn_start
= arm_tr_insn_start
,
12269 .breakpoint_check
= arm_tr_breakpoint_check
,
12270 .translate_insn
= arm_tr_translate_insn
,
12271 .tb_stop
= arm_tr_tb_stop
,
12272 .disas_log
= arm_tr_disas_log
,
12275 static const TranslatorOps thumb_translator_ops
= {
12276 .init_disas_context
= arm_tr_init_disas_context
,
12277 .tb_start
= arm_tr_tb_start
,
12278 .insn_start
= arm_tr_insn_start
,
12279 .breakpoint_check
= arm_tr_breakpoint_check
,
12280 .translate_insn
= thumb_tr_translate_insn
,
12281 .tb_stop
= arm_tr_tb_stop
,
12282 .disas_log
= arm_tr_disas_log
,
12285 /* generate intermediate code for basic block 'tb'. */
12286 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
)
12289 const TranslatorOps
*ops
= &arm_translator_ops
;
12291 if (ARM_TBFLAG_THUMB(tb
->flags
)) {
12292 ops
= &thumb_translator_ops
;
12294 #ifdef TARGET_AARCH64
12295 if (ARM_TBFLAG_AARCH64_STATE(tb
->flags
)) {
12296 ops
= &aarch64_translator_ops
;
12300 translator_loop(ops
, &dc
.base
, cpu
, tb
);
12303 static const char *cpu_mode_names
[16] = {
12304 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12305 "???", "???", "hyp", "und", "???", "???", "???", "sys"
12308 void arm_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
12311 ARMCPU
*cpu
= ARM_CPU(cs
);
12312 CPUARMState
*env
= &cpu
->env
;
12316 aarch64_cpu_dump_state(cs
, f
, cpu_fprintf
, flags
);
12320 for(i
=0;i
<16;i
++) {
12321 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
12323 cpu_fprintf(f
, "\n");
12325 cpu_fprintf(f
, " ");
12328 if (arm_feature(env
, ARM_FEATURE_M
)) {
12329 uint32_t xpsr
= xpsr_read(env
);
12331 const char *ns_status
= "";
12333 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
12334 ns_status
= env
->v7m
.secure
? "S " : "NS ";
12337 if (xpsr
& XPSR_EXCP
) {
12340 if (env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_NPRIV_MASK
) {
12341 mode
= "unpriv-thread";
12343 mode
= "priv-thread";
12347 cpu_fprintf(f
, "XPSR=%08x %c%c%c%c %c %s%s\n",
12349 xpsr
& XPSR_N
? 'N' : '-',
12350 xpsr
& XPSR_Z
? 'Z' : '-',
12351 xpsr
& XPSR_C
? 'C' : '-',
12352 xpsr
& XPSR_V
? 'V' : '-',
12353 xpsr
& XPSR_T
? 'T' : 'A',
12357 uint32_t psr
= cpsr_read(env
);
12358 const char *ns_status
= "";
12360 if (arm_feature(env
, ARM_FEATURE_EL3
) &&
12361 (psr
& CPSR_M
) != ARM_CPU_MODE_MON
) {
12362 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
12365 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12367 psr
& CPSR_N
? 'N' : '-',
12368 psr
& CPSR_Z
? 'Z' : '-',
12369 psr
& CPSR_C
? 'C' : '-',
12370 psr
& CPSR_V
? 'V' : '-',
12371 psr
& CPSR_T
? 'T' : 'A',
12373 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
12376 if (flags
& CPU_DUMP_FPU
) {
12377 int numvfpregs
= 0;
12378 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
12381 if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
12384 for (i
= 0; i
< numvfpregs
; i
++) {
12385 uint64_t v
= float64_val(env
->vfp
.regs
[i
]);
12386 cpu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
12387 i
* 2, (uint32_t)v
,
12388 i
* 2 + 1, (uint32_t)(v
>> 32),
12391 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
12395 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
,
12396 target_ulong
*data
)
12400 env
->condexec_bits
= 0;
12401 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;
12403 env
->regs
[15] = data
[0];
12404 env
->condexec_bits
= data
[1];
12405 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;