4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "internals.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
29 #include "qemu/bitops.h"
31 #include "exec/semihost.h"
33 #include "exec/helper-proto.h"
34 #include "exec/helper-gen.h"
36 #include "trace-tcg.h"
40 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
42 /* currently all emulated v5 cores are also v5TE, so don't bother */
43 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
44 #define ENABLE_ARCH_5J 0
45 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
51 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
53 #include "translate.h"
55 #if defined(CONFIG_USER_ONLY)
58 #define IS_USER(s) (s->user)
62 /* We reuse the same 64-bit temporaries for efficiency. */
63 static TCGv_i64 cpu_V0
, cpu_V1
, cpu_M0
;
64 static TCGv_i32 cpu_R
[16];
65 TCGv_i32 cpu_CF
, cpu_NF
, cpu_VF
, cpu_ZF
;
66 TCGv_i64 cpu_exclusive_addr
;
67 TCGv_i64 cpu_exclusive_val
;
69 /* FIXME: These should be removed. */
70 static TCGv_i32 cpu_F0s
, cpu_F1s
;
71 static TCGv_i64 cpu_F0d
, cpu_F1d
;
73 #include "exec/gen-icount.h"
75 static const char *regnames
[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79 /* initialize TCG globals. */
80 void arm_translate_init(void)
84 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
85 tcg_ctx
.tcg_env
= cpu_env
;
87 for (i
= 0; i
< 16; i
++) {
88 cpu_R
[i
] = tcg_global_mem_new_i32(cpu_env
,
89 offsetof(CPUARMState
, regs
[i
]),
92 cpu_CF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, CF
), "CF");
93 cpu_NF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, NF
), "NF");
94 cpu_VF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, VF
), "VF");
95 cpu_ZF
= tcg_global_mem_new_i32(cpu_env
, offsetof(CPUARMState
, ZF
), "ZF");
97 cpu_exclusive_addr
= tcg_global_mem_new_i64(cpu_env
,
98 offsetof(CPUARMState
, exclusive_addr
), "exclusive_addr");
99 cpu_exclusive_val
= tcg_global_mem_new_i64(cpu_env
,
100 offsetof(CPUARMState
, exclusive_val
), "exclusive_val");
102 a64_translate_init();
105 static inline ARMMMUIdx
get_a32_user_mem_index(DisasContext
*s
)
107 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
109 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
110 * otherwise, access as if at PL0.
112 switch (s
->mmu_idx
) {
113 case ARMMMUIdx_S1E2
: /* this one is UNPREDICTABLE */
114 case ARMMMUIdx_S12NSE0
:
115 case ARMMMUIdx_S12NSE1
:
116 return ARMMMUIdx_S12NSE0
;
118 case ARMMMUIdx_S1SE0
:
119 case ARMMMUIdx_S1SE1
:
120 return ARMMMUIdx_S1SE0
;
123 g_assert_not_reached();
127 static inline TCGv_i32
load_cpu_offset(int offset
)
129 TCGv_i32 tmp
= tcg_temp_new_i32();
130 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
134 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
136 static inline void store_cpu_offset(TCGv_i32 var
, int offset
)
138 tcg_gen_st_i32(var
, cpu_env
, offset
);
139 tcg_temp_free_i32(var
);
142 #define store_cpu_field(var, name) \
143 store_cpu_offset(var, offsetof(CPUARMState, name))
145 /* Set a variable to the value of a CPU register. */
146 static void load_reg_var(DisasContext
*s
, TCGv_i32 var
, int reg
)
150 /* normally, since we updated PC, we need only to add one insn */
152 addr
= (long)s
->pc
+ 2;
154 addr
= (long)s
->pc
+ 4;
155 tcg_gen_movi_i32(var
, addr
);
157 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
161 /* Create a new temporary and set it to the value of a CPU register. */
162 static inline TCGv_i32
load_reg(DisasContext
*s
, int reg
)
164 TCGv_i32 tmp
= tcg_temp_new_i32();
165 load_reg_var(s
, tmp
, reg
);
169 /* Set a CPU register. The source must be a temporary and will be
171 static void store_reg(DisasContext
*s
, int reg
, TCGv_i32 var
)
174 /* In Thumb mode, we must ignore bit 0.
175 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
176 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
177 * We choose to ignore [1:0] in ARM mode for all architecture versions.
179 tcg_gen_andi_i32(var
, var
, s
->thumb
? ~1 : ~3);
180 s
->is_jmp
= DISAS_JUMP
;
182 tcg_gen_mov_i32(cpu_R
[reg
], var
);
183 tcg_temp_free_i32(var
);
186 /* Value extensions. */
187 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
188 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
189 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
190 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
192 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
193 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
196 static inline void gen_set_cpsr(TCGv_i32 var
, uint32_t mask
)
198 TCGv_i32 tmp_mask
= tcg_const_i32(mask
);
199 gen_helper_cpsr_write(cpu_env
, var
, tmp_mask
);
200 tcg_temp_free_i32(tmp_mask
);
202 /* Set NZCV flags from the high 4 bits of var. */
203 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
205 static void gen_exception_internal(int excp
)
207 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
209 assert(excp_is_internal(excp
));
210 gen_helper_exception_internal(cpu_env
, tcg_excp
);
211 tcg_temp_free_i32(tcg_excp
);
214 static void gen_exception(int excp
, uint32_t syndrome
, uint32_t target_el
)
216 TCGv_i32 tcg_excp
= tcg_const_i32(excp
);
217 TCGv_i32 tcg_syn
= tcg_const_i32(syndrome
);
218 TCGv_i32 tcg_el
= tcg_const_i32(target_el
);
220 gen_helper_exception_with_syndrome(cpu_env
, tcg_excp
,
223 tcg_temp_free_i32(tcg_el
);
224 tcg_temp_free_i32(tcg_syn
);
225 tcg_temp_free_i32(tcg_excp
);
228 static void gen_ss_advance(DisasContext
*s
)
230 /* If the singlestep state is Active-not-pending, advance to
235 gen_helper_clear_pstate_ss(cpu_env
);
239 static void gen_step_complete_exception(DisasContext
*s
)
241 /* We just completed step of an insn. Move from Active-not-pending
242 * to Active-pending, and then also take the swstep exception.
243 * This corresponds to making the (IMPDEF) choice to prioritize
244 * swstep exceptions over asynchronous exceptions taken to an exception
245 * level where debug is disabled. This choice has the advantage that
246 * we do not need to maintain internal state corresponding to the
247 * ISV/EX syndrome bits between completion of the step and generation
248 * of the exception, and our syndrome information is always correct.
251 gen_exception(EXCP_UDEF
, syn_swstep(s
->ss_same_el
, 1, s
->is_ldex
),
252 default_exception_el(s
));
253 s
->is_jmp
= DISAS_EXC
;
256 static void gen_smul_dual(TCGv_i32 a
, TCGv_i32 b
)
258 TCGv_i32 tmp1
= tcg_temp_new_i32();
259 TCGv_i32 tmp2
= tcg_temp_new_i32();
260 tcg_gen_ext16s_i32(tmp1
, a
);
261 tcg_gen_ext16s_i32(tmp2
, b
);
262 tcg_gen_mul_i32(tmp1
, tmp1
, tmp2
);
263 tcg_temp_free_i32(tmp2
);
264 tcg_gen_sari_i32(a
, a
, 16);
265 tcg_gen_sari_i32(b
, b
, 16);
266 tcg_gen_mul_i32(b
, b
, a
);
267 tcg_gen_mov_i32(a
, tmp1
);
268 tcg_temp_free_i32(tmp1
);
271 /* Byteswap each halfword. */
272 static void gen_rev16(TCGv_i32 var
)
274 TCGv_i32 tmp
= tcg_temp_new_i32();
275 tcg_gen_shri_i32(tmp
, var
, 8);
276 tcg_gen_andi_i32(tmp
, tmp
, 0x00ff00ff);
277 tcg_gen_shli_i32(var
, var
, 8);
278 tcg_gen_andi_i32(var
, var
, 0xff00ff00);
279 tcg_gen_or_i32(var
, var
, tmp
);
280 tcg_temp_free_i32(tmp
);
283 /* Byteswap low halfword and sign extend. */
284 static void gen_revsh(TCGv_i32 var
)
286 tcg_gen_ext16u_i32(var
, var
);
287 tcg_gen_bswap16_i32(var
, var
);
288 tcg_gen_ext16s_i32(var
, var
);
291 /* Unsigned bitfield extract. */
292 static void gen_ubfx(TCGv_i32 var
, int shift
, uint32_t mask
)
295 tcg_gen_shri_i32(var
, var
, shift
);
296 tcg_gen_andi_i32(var
, var
, mask
);
299 /* Signed bitfield extract. */
300 static void gen_sbfx(TCGv_i32 var
, int shift
, int width
)
305 tcg_gen_sari_i32(var
, var
, shift
);
306 if (shift
+ width
< 32) {
307 signbit
= 1u << (width
- 1);
308 tcg_gen_andi_i32(var
, var
, (1u << width
) - 1);
309 tcg_gen_xori_i32(var
, var
, signbit
);
310 tcg_gen_subi_i32(var
, var
, signbit
);
314 /* Return (b << 32) + a. Mark inputs as dead */
315 static TCGv_i64
gen_addq_msw(TCGv_i64 a
, TCGv_i32 b
)
317 TCGv_i64 tmp64
= tcg_temp_new_i64();
319 tcg_gen_extu_i32_i64(tmp64
, b
);
320 tcg_temp_free_i32(b
);
321 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
322 tcg_gen_add_i64(a
, tmp64
, a
);
324 tcg_temp_free_i64(tmp64
);
328 /* Return (b << 32) - a. Mark inputs as dead. */
329 static TCGv_i64
gen_subq_msw(TCGv_i64 a
, TCGv_i32 b
)
331 TCGv_i64 tmp64
= tcg_temp_new_i64();
333 tcg_gen_extu_i32_i64(tmp64
, b
);
334 tcg_temp_free_i32(b
);
335 tcg_gen_shli_i64(tmp64
, tmp64
, 32);
336 tcg_gen_sub_i64(a
, tmp64
, a
);
338 tcg_temp_free_i64(tmp64
);
342 /* 32x32->64 multiply. Marks inputs as dead. */
343 static TCGv_i64
gen_mulu_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
345 TCGv_i32 lo
= tcg_temp_new_i32();
346 TCGv_i32 hi
= tcg_temp_new_i32();
349 tcg_gen_mulu2_i32(lo
, hi
, a
, b
);
350 tcg_temp_free_i32(a
);
351 tcg_temp_free_i32(b
);
353 ret
= tcg_temp_new_i64();
354 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
355 tcg_temp_free_i32(lo
);
356 tcg_temp_free_i32(hi
);
361 static TCGv_i64
gen_muls_i64_i32(TCGv_i32 a
, TCGv_i32 b
)
363 TCGv_i32 lo
= tcg_temp_new_i32();
364 TCGv_i32 hi
= tcg_temp_new_i32();
367 tcg_gen_muls2_i32(lo
, hi
, a
, b
);
368 tcg_temp_free_i32(a
);
369 tcg_temp_free_i32(b
);
371 ret
= tcg_temp_new_i64();
372 tcg_gen_concat_i32_i64(ret
, lo
, hi
);
373 tcg_temp_free_i32(lo
);
374 tcg_temp_free_i32(hi
);
379 /* Swap low and high halfwords. */
380 static void gen_swap_half(TCGv_i32 var
)
382 TCGv_i32 tmp
= tcg_temp_new_i32();
383 tcg_gen_shri_i32(tmp
, var
, 16);
384 tcg_gen_shli_i32(var
, var
, 16);
385 tcg_gen_or_i32(var
, var
, tmp
);
386 tcg_temp_free_i32(tmp
);
389 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
390 tmp = (t0 ^ t1) & 0x8000;
393 t0 = (t0 + t1) ^ tmp;
396 static void gen_add16(TCGv_i32 t0
, TCGv_i32 t1
)
398 TCGv_i32 tmp
= tcg_temp_new_i32();
399 tcg_gen_xor_i32(tmp
, t0
, t1
);
400 tcg_gen_andi_i32(tmp
, tmp
, 0x8000);
401 tcg_gen_andi_i32(t0
, t0
, ~0x8000);
402 tcg_gen_andi_i32(t1
, t1
, ~0x8000);
403 tcg_gen_add_i32(t0
, t0
, t1
);
404 tcg_gen_xor_i32(t0
, t0
, tmp
);
405 tcg_temp_free_i32(tmp
);
406 tcg_temp_free_i32(t1
);
409 /* Set CF to the top bit of var. */
410 static void gen_set_CF_bit31(TCGv_i32 var
)
412 tcg_gen_shri_i32(cpu_CF
, var
, 31);
415 /* Set N and Z flags from var. */
416 static inline void gen_logic_CC(TCGv_i32 var
)
418 tcg_gen_mov_i32(cpu_NF
, var
);
419 tcg_gen_mov_i32(cpu_ZF
, var
);
423 static void gen_adc(TCGv_i32 t0
, TCGv_i32 t1
)
425 tcg_gen_add_i32(t0
, t0
, t1
);
426 tcg_gen_add_i32(t0
, t0
, cpu_CF
);
429 /* dest = T0 + T1 + CF. */
430 static void gen_add_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
432 tcg_gen_add_i32(dest
, t0
, t1
);
433 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
436 /* dest = T0 - T1 + CF - 1. */
437 static void gen_sub_carry(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
439 tcg_gen_sub_i32(dest
, t0
, t1
);
440 tcg_gen_add_i32(dest
, dest
, cpu_CF
);
441 tcg_gen_subi_i32(dest
, dest
, 1);
444 /* dest = T0 + T1. Compute C, N, V and Z flags */
445 static void gen_add_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
447 TCGv_i32 tmp
= tcg_temp_new_i32();
448 tcg_gen_movi_i32(tmp
, 0);
449 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, t1
, tmp
);
450 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
451 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
452 tcg_gen_xor_i32(tmp
, t0
, t1
);
453 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
454 tcg_temp_free_i32(tmp
);
455 tcg_gen_mov_i32(dest
, cpu_NF
);
458 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
459 static void gen_adc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
461 TCGv_i32 tmp
= tcg_temp_new_i32();
462 if (TCG_TARGET_HAS_add2_i32
) {
463 tcg_gen_movi_i32(tmp
, 0);
464 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, t0
, tmp
, cpu_CF
, tmp
);
465 tcg_gen_add2_i32(cpu_NF
, cpu_CF
, cpu_NF
, cpu_CF
, t1
, tmp
);
467 TCGv_i64 q0
= tcg_temp_new_i64();
468 TCGv_i64 q1
= tcg_temp_new_i64();
469 tcg_gen_extu_i32_i64(q0
, t0
);
470 tcg_gen_extu_i32_i64(q1
, t1
);
471 tcg_gen_add_i64(q0
, q0
, q1
);
472 tcg_gen_extu_i32_i64(q1
, cpu_CF
);
473 tcg_gen_add_i64(q0
, q0
, q1
);
474 tcg_gen_extr_i64_i32(cpu_NF
, cpu_CF
, q0
);
475 tcg_temp_free_i64(q0
);
476 tcg_temp_free_i64(q1
);
478 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
479 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
480 tcg_gen_xor_i32(tmp
, t0
, t1
);
481 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, tmp
);
482 tcg_temp_free_i32(tmp
);
483 tcg_gen_mov_i32(dest
, cpu_NF
);
486 /* dest = T0 - T1. Compute C, N, V and Z flags */
487 static void gen_sub_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
490 tcg_gen_sub_i32(cpu_NF
, t0
, t1
);
491 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
492 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_CF
, t0
, t1
);
493 tcg_gen_xor_i32(cpu_VF
, cpu_NF
, t0
);
494 tmp
= tcg_temp_new_i32();
495 tcg_gen_xor_i32(tmp
, t0
, t1
);
496 tcg_gen_and_i32(cpu_VF
, cpu_VF
, tmp
);
497 tcg_temp_free_i32(tmp
);
498 tcg_gen_mov_i32(dest
, cpu_NF
);
501 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
502 static void gen_sbc_CC(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
504 TCGv_i32 tmp
= tcg_temp_new_i32();
505 tcg_gen_not_i32(tmp
, t1
);
506 gen_adc_CC(dest
, t0
, tmp
);
507 tcg_temp_free_i32(tmp
);
510 #define GEN_SHIFT(name) \
511 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
513 TCGv_i32 tmp1, tmp2, tmp3; \
514 tmp1 = tcg_temp_new_i32(); \
515 tcg_gen_andi_i32(tmp1, t1, 0xff); \
516 tmp2 = tcg_const_i32(0); \
517 tmp3 = tcg_const_i32(0x1f); \
518 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
519 tcg_temp_free_i32(tmp3); \
520 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
521 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
522 tcg_temp_free_i32(tmp2); \
523 tcg_temp_free_i32(tmp1); \
529 static void gen_sar(TCGv_i32 dest
, TCGv_i32 t0
, TCGv_i32 t1
)
532 tmp1
= tcg_temp_new_i32();
533 tcg_gen_andi_i32(tmp1
, t1
, 0xff);
534 tmp2
= tcg_const_i32(0x1f);
535 tcg_gen_movcond_i32(TCG_COND_GTU
, tmp1
, tmp1
, tmp2
, tmp2
, tmp1
);
536 tcg_temp_free_i32(tmp2
);
537 tcg_gen_sar_i32(dest
, t0
, tmp1
);
538 tcg_temp_free_i32(tmp1
);
541 static void tcg_gen_abs_i32(TCGv_i32 dest
, TCGv_i32 src
)
543 TCGv_i32 c0
= tcg_const_i32(0);
544 TCGv_i32 tmp
= tcg_temp_new_i32();
545 tcg_gen_neg_i32(tmp
, src
);
546 tcg_gen_movcond_i32(TCG_COND_GT
, dest
, src
, c0
, src
, tmp
);
547 tcg_temp_free_i32(c0
);
548 tcg_temp_free_i32(tmp
);
551 static void shifter_out_im(TCGv_i32 var
, int shift
)
554 tcg_gen_andi_i32(cpu_CF
, var
, 1);
556 tcg_gen_shri_i32(cpu_CF
, var
, shift
);
558 tcg_gen_andi_i32(cpu_CF
, cpu_CF
, 1);
563 /* Shift by immediate. Includes special handling for shift == 0. */
564 static inline void gen_arm_shift_im(TCGv_i32 var
, int shiftop
,
565 int shift
, int flags
)
571 shifter_out_im(var
, 32 - shift
);
572 tcg_gen_shli_i32(var
, var
, shift
);
578 tcg_gen_shri_i32(cpu_CF
, var
, 31);
580 tcg_gen_movi_i32(var
, 0);
583 shifter_out_im(var
, shift
- 1);
584 tcg_gen_shri_i32(var
, var
, shift
);
591 shifter_out_im(var
, shift
- 1);
594 tcg_gen_sari_i32(var
, var
, shift
);
596 case 3: /* ROR/RRX */
599 shifter_out_im(var
, shift
- 1);
600 tcg_gen_rotri_i32(var
, var
, shift
); break;
602 TCGv_i32 tmp
= tcg_temp_new_i32();
603 tcg_gen_shli_i32(tmp
, cpu_CF
, 31);
605 shifter_out_im(var
, 0);
606 tcg_gen_shri_i32(var
, var
, 1);
607 tcg_gen_or_i32(var
, var
, tmp
);
608 tcg_temp_free_i32(tmp
);
613 static inline void gen_arm_shift_reg(TCGv_i32 var
, int shiftop
,
614 TCGv_i32 shift
, int flags
)
618 case 0: gen_helper_shl_cc(var
, cpu_env
, var
, shift
); break;
619 case 1: gen_helper_shr_cc(var
, cpu_env
, var
, shift
); break;
620 case 2: gen_helper_sar_cc(var
, cpu_env
, var
, shift
); break;
621 case 3: gen_helper_ror_cc(var
, cpu_env
, var
, shift
); break;
626 gen_shl(var
, var
, shift
);
629 gen_shr(var
, var
, shift
);
632 gen_sar(var
, var
, shift
);
634 case 3: tcg_gen_andi_i32(shift
, shift
, 0x1f);
635 tcg_gen_rotr_i32(var
, var
, shift
); break;
638 tcg_temp_free_i32(shift
);
641 #define PAS_OP(pfx) \
643 case 0: gen_pas_helper(glue(pfx,add16)); break; \
644 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
645 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
646 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
647 case 4: gen_pas_helper(glue(pfx,add8)); break; \
648 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
650 static void gen_arm_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
655 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
657 tmp
= tcg_temp_new_ptr();
658 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
660 tcg_temp_free_ptr(tmp
);
663 tmp
= tcg_temp_new_ptr();
664 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
666 tcg_temp_free_ptr(tmp
);
668 #undef gen_pas_helper
669 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
682 #undef gen_pas_helper
687 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
688 #define PAS_OP(pfx) \
690 case 0: gen_pas_helper(glue(pfx,add8)); break; \
691 case 1: gen_pas_helper(glue(pfx,add16)); break; \
692 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
693 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
694 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
695 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
697 static void gen_thumb2_parallel_addsub(int op1
, int op2
, TCGv_i32 a
, TCGv_i32 b
)
702 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
704 tmp
= tcg_temp_new_ptr();
705 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
707 tcg_temp_free_ptr(tmp
);
710 tmp
= tcg_temp_new_ptr();
711 tcg_gen_addi_ptr(tmp
, cpu_env
, offsetof(CPUARMState
, GE
));
713 tcg_temp_free_ptr(tmp
);
715 #undef gen_pas_helper
716 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
729 #undef gen_pas_helper
735 * Generate a conditional based on ARM condition code cc.
736 * This is common between ARM and Aarch64 targets.
738 void arm_test_cc(DisasCompare
*cmp
, int cc
)
769 case 8: /* hi: C && !Z */
770 case 9: /* ls: !C || Z -> !(C && !Z) */
772 value
= tcg_temp_new_i32();
774 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
775 ZF is non-zero for !Z; so AND the two subexpressions. */
776 tcg_gen_neg_i32(value
, cpu_CF
);
777 tcg_gen_and_i32(value
, value
, cpu_ZF
);
780 case 10: /* ge: N == V -> N ^ V == 0 */
781 case 11: /* lt: N != V -> N ^ V != 0 */
782 /* Since we're only interested in the sign bit, == 0 is >= 0. */
784 value
= tcg_temp_new_i32();
786 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
789 case 12: /* gt: !Z && N == V */
790 case 13: /* le: Z || N != V */
792 value
= tcg_temp_new_i32();
794 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
795 * the sign bit then AND with ZF to yield the result. */
796 tcg_gen_xor_i32(value
, cpu_VF
, cpu_NF
);
797 tcg_gen_sari_i32(value
, value
, 31);
798 tcg_gen_andc_i32(value
, cpu_ZF
, value
);
801 case 14: /* always */
802 case 15: /* always */
803 /* Use the ALWAYS condition, which will fold early.
804 * It doesn't matter what we use for the value. */
805 cond
= TCG_COND_ALWAYS
;
810 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
815 cond
= tcg_invert_cond(cond
);
821 cmp
->value_global
= global
;
824 void arm_free_cc(DisasCompare
*cmp
)
826 if (!cmp
->value_global
) {
827 tcg_temp_free_i32(cmp
->value
);
831 void arm_jump_cc(DisasCompare
*cmp
, TCGLabel
*label
)
833 tcg_gen_brcondi_i32(cmp
->cond
, cmp
->value
, 0, label
);
836 void arm_gen_test_cc(int cc
, TCGLabel
*label
)
839 arm_test_cc(&cmp
, cc
);
840 arm_jump_cc(&cmp
, label
);
844 static const uint8_t table_logic_cc
[16] = {
863 /* Set PC and Thumb state from an immediate address. */
864 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
868 s
->is_jmp
= DISAS_JUMP
;
869 if (s
->thumb
!= (addr
& 1)) {
870 tmp
= tcg_temp_new_i32();
871 tcg_gen_movi_i32(tmp
, addr
& 1);
872 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUARMState
, thumb
));
873 tcg_temp_free_i32(tmp
);
875 tcg_gen_movi_i32(cpu_R
[15], addr
& ~1);
878 /* Set PC and Thumb state from var. var is marked as dead. */
879 static inline void gen_bx(DisasContext
*s
, TCGv_i32 var
)
881 s
->is_jmp
= DISAS_JUMP
;
882 tcg_gen_andi_i32(cpu_R
[15], var
, ~1);
883 tcg_gen_andi_i32(var
, var
, 1);
884 store_cpu_field(var
, thumb
);
887 /* Variant of store_reg which uses branch&exchange logic when storing
888 to r15 in ARM architecture v7 and above. The source must be a temporary
889 and will be marked as dead. */
890 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv_i32 var
)
892 if (reg
== 15 && ENABLE_ARCH_7
) {
895 store_reg(s
, reg
, var
);
899 /* Variant of store_reg which uses branch&exchange logic when storing
900 * to r15 in ARM architecture v5T and above. This is used for storing
901 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
902 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
903 static inline void store_reg_from_load(DisasContext
*s
, int reg
, TCGv_i32 var
)
905 if (reg
== 15 && ENABLE_ARCH_5
) {
908 store_reg(s
, reg
, var
);
912 #ifdef CONFIG_USER_ONLY
913 #define IS_USER_ONLY 1
915 #define IS_USER_ONLY 0
918 /* Abstractions of "generate code to do a guest load/store for
919 * AArch32", where a vaddr is always 32 bits (and is zero
920 * extended if we're a 64 bit core) and data is also
921 * 32 bits unless specifically doing a 64 bit access.
922 * These functions work like tcg_gen_qemu_{ld,st}* except
923 * that the address argument is TCGv_i32 rather than TCGv.
926 static inline TCGv
gen_aa32_addr(DisasContext
*s
, TCGv_i32 a32
, TCGMemOp op
)
928 TCGv addr
= tcg_temp_new();
929 tcg_gen_extu_i32_tl(addr
, a32
);
931 /* Not needed for user-mode BE32, where we use MO_BE instead. */
932 if (!IS_USER_ONLY
&& s
->sctlr_b
&& (op
& MO_SIZE
) < MO_32
) {
933 tcg_gen_xori_tl(addr
, addr
, 4 - (1 << (op
& MO_SIZE
)));
938 static void gen_aa32_ld_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
939 int index
, TCGMemOp opc
)
941 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
942 tcg_gen_qemu_ld_i32(val
, addr
, index
, opc
);
946 static void gen_aa32_st_i32(DisasContext
*s
, TCGv_i32 val
, TCGv_i32 a32
,
947 int index
, TCGMemOp opc
)
949 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
950 tcg_gen_qemu_st_i32(val
, addr
, index
, opc
);
954 #define DO_GEN_LD(SUFF, OPC) \
955 static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
956 TCGv_i32 a32, int index) \
958 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
961 #define DO_GEN_ST(SUFF, OPC) \
962 static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
963 TCGv_i32 a32, int index) \
965 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
968 static inline void gen_aa32_frob64(DisasContext
*s
, TCGv_i64 val
)
970 /* Not needed for user-mode BE32, where we use MO_BE instead. */
971 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
972 tcg_gen_rotri_i64(val
, val
, 32);
976 static void gen_aa32_ld_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
977 int index
, TCGMemOp opc
)
979 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
980 tcg_gen_qemu_ld_i64(val
, addr
, index
, opc
);
981 gen_aa32_frob64(s
, val
);
985 static inline void gen_aa32_ld64(DisasContext
*s
, TCGv_i64 val
,
986 TCGv_i32 a32
, int index
)
988 gen_aa32_ld_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
991 static void gen_aa32_st_i64(DisasContext
*s
, TCGv_i64 val
, TCGv_i32 a32
,
992 int index
, TCGMemOp opc
)
994 TCGv addr
= gen_aa32_addr(s
, a32
, opc
);
996 /* Not needed for user-mode BE32, where we use MO_BE instead. */
997 if (!IS_USER_ONLY
&& s
->sctlr_b
) {
998 TCGv_i64 tmp
= tcg_temp_new_i64();
999 tcg_gen_rotri_i64(tmp
, val
, 32);
1000 tcg_gen_qemu_st_i64(tmp
, addr
, index
, opc
);
1001 tcg_temp_free_i64(tmp
);
1003 tcg_gen_qemu_st_i64(val
, addr
, index
, opc
);
1005 tcg_temp_free(addr
);
1008 static inline void gen_aa32_st64(DisasContext
*s
, TCGv_i64 val
,
1009 TCGv_i32 a32
, int index
)
1011 gen_aa32_st_i64(s
, val
, a32
, index
, MO_Q
| s
->be_data
);
1014 DO_GEN_LD(8s
, MO_SB
)
1015 DO_GEN_LD(8u, MO_UB
)
1016 DO_GEN_LD(16s
, MO_SW
)
1017 DO_GEN_LD(16u, MO_UW
)
1018 DO_GEN_LD(32u, MO_UL
)
1020 DO_GEN_ST(16, MO_UW
)
1021 DO_GEN_ST(32, MO_UL
)
1023 static inline void gen_set_pc_im(DisasContext
*s
, target_ulong val
)
1025 tcg_gen_movi_i32(cpu_R
[15], val
);
1028 static inline void gen_hvc(DisasContext
*s
, int imm16
)
1030 /* The pre HVC helper handles cases when HVC gets trapped
1031 * as an undefined insn by runtime configuration (ie before
1032 * the insn really executes).
1034 gen_set_pc_im(s
, s
->pc
- 4);
1035 gen_helper_pre_hvc(cpu_env
);
1036 /* Otherwise we will treat this as a real exception which
1037 * happens after execution of the insn. (The distinction matters
1038 * for the PC value reported to the exception handler and also
1039 * for single stepping.)
1042 gen_set_pc_im(s
, s
->pc
);
1043 s
->is_jmp
= DISAS_HVC
;
1046 static inline void gen_smc(DisasContext
*s
)
1048 /* As with HVC, we may take an exception either before or after
1049 * the insn executes.
1053 gen_set_pc_im(s
, s
->pc
- 4);
1054 tmp
= tcg_const_i32(syn_aa32_smc());
1055 gen_helper_pre_smc(cpu_env
, tmp
);
1056 tcg_temp_free_i32(tmp
);
1057 gen_set_pc_im(s
, s
->pc
);
1058 s
->is_jmp
= DISAS_SMC
;
1062 gen_set_condexec (DisasContext
*s
)
1064 if (s
->condexec_mask
) {
1065 uint32_t val
= (s
->condexec_cond
<< 4) | (s
->condexec_mask
>> 1);
1066 TCGv_i32 tmp
= tcg_temp_new_i32();
1067 tcg_gen_movi_i32(tmp
, val
);
1068 store_cpu_field(tmp
, condexec_bits
);
1072 static void gen_exception_internal_insn(DisasContext
*s
, int offset
, int excp
)
1074 gen_set_condexec(s
);
1075 gen_set_pc_im(s
, s
->pc
- offset
);
1076 gen_exception_internal(excp
);
1077 s
->is_jmp
= DISAS_JUMP
;
1080 static void gen_exception_insn(DisasContext
*s
, int offset
, int excp
,
1081 int syn
, uint32_t target_el
)
1083 gen_set_condexec(s
);
1084 gen_set_pc_im(s
, s
->pc
- offset
);
1085 gen_exception(excp
, syn
, target_el
);
1086 s
->is_jmp
= DISAS_JUMP
;
1089 /* Force a TB lookup after an instruction that changes the CPU state. */
1090 static inline void gen_lookup_tb(DisasContext
*s
)
1092 tcg_gen_movi_i32(cpu_R
[15], s
->pc
& ~1);
1093 s
->is_jmp
= DISAS_JUMP
;
1096 static inline void gen_hlt(DisasContext
*s
, int imm
)
1098 /* HLT. This has two purposes.
1099 * Architecturally, it is an external halting debug instruction.
1100 * Since QEMU doesn't implement external debug, we treat this as
1101 * it is required for halting debug disabled: it will UNDEF.
1102 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1103 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1104 * must trigger semihosting even for ARMv7 and earlier, where
1105 * HLT was an undefined encoding.
1106 * In system mode, we don't allow userspace access to
1107 * semihosting, to provide some semblance of security
1108 * (and for consistency with our 32-bit semihosting).
1110 if (semihosting_enabled() &&
1111 #ifndef CONFIG_USER_ONLY
1112 s
->current_el
!= 0 &&
1114 (imm
== (s
->thumb
? 0x3c : 0xf000))) {
1115 gen_exception_internal_insn(s
, 0, EXCP_SEMIHOST
);
1119 gen_exception_insn(s
, s
->thumb
? 2 : 4, EXCP_UDEF
, syn_uncategorized(),
1120 default_exception_el(s
));
1123 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
1126 int val
, rm
, shift
, shiftop
;
1129 if (!(insn
& (1 << 25))) {
1132 if (!(insn
& (1 << 23)))
1135 tcg_gen_addi_i32(var
, var
, val
);
1137 /* shift/register */
1139 shift
= (insn
>> 7) & 0x1f;
1140 shiftop
= (insn
>> 5) & 3;
1141 offset
= load_reg(s
, rm
);
1142 gen_arm_shift_im(offset
, shiftop
, shift
, 0);
1143 if (!(insn
& (1 << 23)))
1144 tcg_gen_sub_i32(var
, var
, offset
);
1146 tcg_gen_add_i32(var
, var
, offset
);
1147 tcg_temp_free_i32(offset
);
1151 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
1152 int extra
, TCGv_i32 var
)
1157 if (insn
& (1 << 22)) {
1159 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
1160 if (!(insn
& (1 << 23)))
1164 tcg_gen_addi_i32(var
, var
, val
);
1168 tcg_gen_addi_i32(var
, var
, extra
);
1170 offset
= load_reg(s
, rm
);
1171 if (!(insn
& (1 << 23)))
1172 tcg_gen_sub_i32(var
, var
, offset
);
1174 tcg_gen_add_i32(var
, var
, offset
);
1175 tcg_temp_free_i32(offset
);
1179 static TCGv_ptr
get_fpstatus_ptr(int neon
)
1181 TCGv_ptr statusptr
= tcg_temp_new_ptr();
1184 offset
= offsetof(CPUARMState
, vfp
.standard_fp_status
);
1186 offset
= offsetof(CPUARMState
, vfp
.fp_status
);
1188 tcg_gen_addi_ptr(statusptr
, cpu_env
, offset
);
1192 #define VFP_OP2(name) \
1193 static inline void gen_vfp_##name(int dp) \
1195 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1197 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1199 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1201 tcg_temp_free_ptr(fpst); \
1211 static inline void gen_vfp_F1_mul(int dp
)
1213 /* Like gen_vfp_mul() but put result in F1 */
1214 TCGv_ptr fpst
= get_fpstatus_ptr(0);
1216 gen_helper_vfp_muld(cpu_F1d
, cpu_F0d
, cpu_F1d
, fpst
);
1218 gen_helper_vfp_muls(cpu_F1s
, cpu_F0s
, cpu_F1s
, fpst
);
1220 tcg_temp_free_ptr(fpst
);
1223 static inline void gen_vfp_F1_neg(int dp
)
1225 /* Like gen_vfp_neg() but put result in F1 */
1227 gen_helper_vfp_negd(cpu_F1d
, cpu_F0d
);
1229 gen_helper_vfp_negs(cpu_F1s
, cpu_F0s
);
1233 static inline void gen_vfp_abs(int dp
)
1236 gen_helper_vfp_absd(cpu_F0d
, cpu_F0d
);
1238 gen_helper_vfp_abss(cpu_F0s
, cpu_F0s
);
1241 static inline void gen_vfp_neg(int dp
)
1244 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
1246 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
1249 static inline void gen_vfp_sqrt(int dp
)
1252 gen_helper_vfp_sqrtd(cpu_F0d
, cpu_F0d
, cpu_env
);
1254 gen_helper_vfp_sqrts(cpu_F0s
, cpu_F0s
, cpu_env
);
1257 static inline void gen_vfp_cmp(int dp
)
1260 gen_helper_vfp_cmpd(cpu_F0d
, cpu_F1d
, cpu_env
);
1262 gen_helper_vfp_cmps(cpu_F0s
, cpu_F1s
, cpu_env
);
1265 static inline void gen_vfp_cmpe(int dp
)
1268 gen_helper_vfp_cmped(cpu_F0d
, cpu_F1d
, cpu_env
);
1270 gen_helper_vfp_cmpes(cpu_F0s
, cpu_F1s
, cpu_env
);
1273 static inline void gen_vfp_F1_ld0(int dp
)
1276 tcg_gen_movi_i64(cpu_F1d
, 0);
1278 tcg_gen_movi_i32(cpu_F1s
, 0);
1281 #define VFP_GEN_ITOF(name) \
1282 static inline void gen_vfp_##name(int dp, int neon) \
1284 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1286 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1288 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1290 tcg_temp_free_ptr(statusptr); \
1297 #define VFP_GEN_FTOI(name) \
1298 static inline void gen_vfp_##name(int dp, int neon) \
1300 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1302 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1304 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1306 tcg_temp_free_ptr(statusptr); \
1315 #define VFP_GEN_FIX(name, round) \
1316 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1318 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1319 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1321 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1324 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1327 tcg_temp_free_i32(tmp_shift); \
1328 tcg_temp_free_ptr(statusptr); \
1330 VFP_GEN_FIX(tosh
, _round_to_zero
)
1331 VFP_GEN_FIX(tosl
, _round_to_zero
)
1332 VFP_GEN_FIX(touh
, _round_to_zero
)
1333 VFP_GEN_FIX(toul
, _round_to_zero
)
1340 static inline void gen_vfp_ld(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1343 gen_aa32_ld64(s
, cpu_F0d
, addr
, get_mem_index(s
));
1345 gen_aa32_ld32u(s
, cpu_F0s
, addr
, get_mem_index(s
));
1349 static inline void gen_vfp_st(DisasContext
*s
, int dp
, TCGv_i32 addr
)
1352 gen_aa32_st64(s
, cpu_F0d
, addr
, get_mem_index(s
));
1354 gen_aa32_st32(s
, cpu_F0s
, addr
, get_mem_index(s
));
1359 vfp_reg_offset (int dp
, int reg
)
1362 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
1364 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1365 + offsetof(CPU_DoubleU
, l
.upper
);
1367 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
1368 + offsetof(CPU_DoubleU
, l
.lower
);
1372 /* Return the offset of a 32-bit piece of a NEON register.
1373 zero is the least significant end of the register. */
1375 neon_reg_offset (int reg
, int n
)
1379 return vfp_reg_offset(0, sreg
);
1382 static TCGv_i32
neon_load_reg(int reg
, int pass
)
1384 TCGv_i32 tmp
= tcg_temp_new_i32();
1385 tcg_gen_ld_i32(tmp
, cpu_env
, neon_reg_offset(reg
, pass
));
1389 static void neon_store_reg(int reg
, int pass
, TCGv_i32 var
)
1391 tcg_gen_st_i32(var
, cpu_env
, neon_reg_offset(reg
, pass
));
1392 tcg_temp_free_i32(var
);
1395 static inline void neon_load_reg64(TCGv_i64 var
, int reg
)
1397 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1400 static inline void neon_store_reg64(TCGv_i64 var
, int reg
)
1402 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(1, reg
));
1405 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1406 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1407 #define tcg_gen_st_f32 tcg_gen_st_i32
1408 #define tcg_gen_st_f64 tcg_gen_st_i64
1410 static inline void gen_mov_F0_vreg(int dp
, int reg
)
1413 tcg_gen_ld_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1415 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1418 static inline void gen_mov_F1_vreg(int dp
, int reg
)
1421 tcg_gen_ld_f64(cpu_F1d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1423 tcg_gen_ld_f32(cpu_F1s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1426 static inline void gen_mov_vreg_F0(int dp
, int reg
)
1429 tcg_gen_st_f64(cpu_F0d
, cpu_env
, vfp_reg_offset(dp
, reg
));
1431 tcg_gen_st_f32(cpu_F0s
, cpu_env
, vfp_reg_offset(dp
, reg
));
1434 #define ARM_CP_RW_BIT (1 << 20)
1436 static inline void iwmmxt_load_reg(TCGv_i64 var
, int reg
)
1438 tcg_gen_ld_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1441 static inline void iwmmxt_store_reg(TCGv_i64 var
, int reg
)
1443 tcg_gen_st_i64(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.regs
[reg
]));
1446 static inline TCGv_i32
iwmmxt_load_creg(int reg
)
1448 TCGv_i32 var
= tcg_temp_new_i32();
1449 tcg_gen_ld_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1453 static inline void iwmmxt_store_creg(int reg
, TCGv_i32 var
)
1455 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, iwmmxt
.cregs
[reg
]));
1456 tcg_temp_free_i32(var
);
1459 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn
)
1461 iwmmxt_store_reg(cpu_M0
, rn
);
1464 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn
)
1466 iwmmxt_load_reg(cpu_M0
, rn
);
1469 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn
)
1471 iwmmxt_load_reg(cpu_V1
, rn
);
1472 tcg_gen_or_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1475 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn
)
1477 iwmmxt_load_reg(cpu_V1
, rn
);
1478 tcg_gen_and_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1481 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn
)
1483 iwmmxt_load_reg(cpu_V1
, rn
);
1484 tcg_gen_xor_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1487 #define IWMMXT_OP(name) \
1488 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1490 iwmmxt_load_reg(cpu_V1, rn); \
1491 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1494 #define IWMMXT_OP_ENV(name) \
1495 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1497 iwmmxt_load_reg(cpu_V1, rn); \
1498 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1501 #define IWMMXT_OP_ENV_SIZE(name) \
1502 IWMMXT_OP_ENV(name##b) \
1503 IWMMXT_OP_ENV(name##w) \
1504 IWMMXT_OP_ENV(name##l)
1506 #define IWMMXT_OP_ENV1(name) \
1507 static inline void gen_op_iwmmxt_##name##_M0(void) \
1509 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1523 IWMMXT_OP_ENV_SIZE(unpackl
)
1524 IWMMXT_OP_ENV_SIZE(unpackh
)
1526 IWMMXT_OP_ENV1(unpacklub
)
1527 IWMMXT_OP_ENV1(unpackluw
)
1528 IWMMXT_OP_ENV1(unpacklul
)
1529 IWMMXT_OP_ENV1(unpackhub
)
1530 IWMMXT_OP_ENV1(unpackhuw
)
1531 IWMMXT_OP_ENV1(unpackhul
)
1532 IWMMXT_OP_ENV1(unpacklsb
)
1533 IWMMXT_OP_ENV1(unpacklsw
)
1534 IWMMXT_OP_ENV1(unpacklsl
)
1535 IWMMXT_OP_ENV1(unpackhsb
)
1536 IWMMXT_OP_ENV1(unpackhsw
)
1537 IWMMXT_OP_ENV1(unpackhsl
)
1539 IWMMXT_OP_ENV_SIZE(cmpeq
)
1540 IWMMXT_OP_ENV_SIZE(cmpgtu
)
1541 IWMMXT_OP_ENV_SIZE(cmpgts
)
1543 IWMMXT_OP_ENV_SIZE(mins
)
1544 IWMMXT_OP_ENV_SIZE(minu
)
1545 IWMMXT_OP_ENV_SIZE(maxs
)
1546 IWMMXT_OP_ENV_SIZE(maxu
)
1548 IWMMXT_OP_ENV_SIZE(subn
)
1549 IWMMXT_OP_ENV_SIZE(addn
)
1550 IWMMXT_OP_ENV_SIZE(subu
)
1551 IWMMXT_OP_ENV_SIZE(addu
)
1552 IWMMXT_OP_ENV_SIZE(subs
)
1553 IWMMXT_OP_ENV_SIZE(adds
)
1555 IWMMXT_OP_ENV(avgb0
)
1556 IWMMXT_OP_ENV(avgb1
)
1557 IWMMXT_OP_ENV(avgw0
)
1558 IWMMXT_OP_ENV(avgw1
)
1560 IWMMXT_OP_ENV(packuw
)
1561 IWMMXT_OP_ENV(packul
)
1562 IWMMXT_OP_ENV(packuq
)
1563 IWMMXT_OP_ENV(packsw
)
1564 IWMMXT_OP_ENV(packsl
)
1565 IWMMXT_OP_ENV(packsq
)
1567 static void gen_op_iwmmxt_set_mup(void)
1570 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1571 tcg_gen_ori_i32(tmp
, tmp
, 2);
1572 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1575 static void gen_op_iwmmxt_set_cup(void)
1578 tmp
= load_cpu_field(iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1579 tcg_gen_ori_i32(tmp
, tmp
, 1);
1580 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCon
]);
1583 static void gen_op_iwmmxt_setpsr_nz(void)
1585 TCGv_i32 tmp
= tcg_temp_new_i32();
1586 gen_helper_iwmmxt_setpsr_nz(tmp
, cpu_M0
);
1587 store_cpu_field(tmp
, iwmmxt
.cregs
[ARM_IWMMXT_wCASF
]);
1590 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn
)
1592 iwmmxt_load_reg(cpu_V1
, rn
);
1593 tcg_gen_ext32u_i64(cpu_V1
, cpu_V1
);
1594 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1597 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
,
1604 rd
= (insn
>> 16) & 0xf;
1605 tmp
= load_reg(s
, rd
);
1607 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
1608 if (insn
& (1 << 24)) {
1610 if (insn
& (1 << 23))
1611 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1613 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1614 tcg_gen_mov_i32(dest
, tmp
);
1615 if (insn
& (1 << 21))
1616 store_reg(s
, rd
, tmp
);
1618 tcg_temp_free_i32(tmp
);
1619 } else if (insn
& (1 << 21)) {
1621 tcg_gen_mov_i32(dest
, tmp
);
1622 if (insn
& (1 << 23))
1623 tcg_gen_addi_i32(tmp
, tmp
, offset
);
1625 tcg_gen_addi_i32(tmp
, tmp
, -offset
);
1626 store_reg(s
, rd
, tmp
);
1627 } else if (!(insn
& (1 << 23)))
1632 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
, TCGv_i32 dest
)
1634 int rd
= (insn
>> 0) & 0xf;
1637 if (insn
& (1 << 8)) {
1638 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
) {
1641 tmp
= iwmmxt_load_creg(rd
);
1644 tmp
= tcg_temp_new_i32();
1645 iwmmxt_load_reg(cpu_V0
, rd
);
1646 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
1648 tcg_gen_andi_i32(tmp
, tmp
, mask
);
1649 tcg_gen_mov_i32(dest
, tmp
);
1650 tcg_temp_free_i32(tmp
);
1654 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1655 (ie. an undefined instruction). */
1656 static int disas_iwmmxt_insn(DisasContext
*s
, uint32_t insn
)
1659 int rdhi
, rdlo
, rd0
, rd1
, i
;
1661 TCGv_i32 tmp
, tmp2
, tmp3
;
1663 if ((insn
& 0x0e000e00) == 0x0c000000) {
1664 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
1666 rdlo
= (insn
>> 12) & 0xf;
1667 rdhi
= (insn
>> 16) & 0xf;
1668 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
1669 iwmmxt_load_reg(cpu_V0
, wrd
);
1670 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
1671 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
1672 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
1673 } else { /* TMCRR */
1674 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
1675 iwmmxt_store_reg(cpu_V0
, wrd
);
1676 gen_op_iwmmxt_set_mup();
1681 wrd
= (insn
>> 12) & 0xf;
1682 addr
= tcg_temp_new_i32();
1683 if (gen_iwmmxt_address(s
, insn
, addr
)) {
1684 tcg_temp_free_i32(addr
);
1687 if (insn
& ARM_CP_RW_BIT
) {
1688 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
1689 tmp
= tcg_temp_new_i32();
1690 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1691 iwmmxt_store_creg(wrd
, tmp
);
1694 if (insn
& (1 << 8)) {
1695 if (insn
& (1 << 22)) { /* WLDRD */
1696 gen_aa32_ld64(s
, cpu_M0
, addr
, get_mem_index(s
));
1698 } else { /* WLDRW wRd */
1699 tmp
= tcg_temp_new_i32();
1700 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1703 tmp
= tcg_temp_new_i32();
1704 if (insn
& (1 << 22)) { /* WLDRH */
1705 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
1706 } else { /* WLDRB */
1707 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
1711 tcg_gen_extu_i32_i64(cpu_M0
, tmp
);
1712 tcg_temp_free_i32(tmp
);
1714 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1717 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
1718 tmp
= iwmmxt_load_creg(wrd
);
1719 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1721 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1722 tmp
= tcg_temp_new_i32();
1723 if (insn
& (1 << 8)) {
1724 if (insn
& (1 << 22)) { /* WSTRD */
1725 gen_aa32_st64(s
, cpu_M0
, addr
, get_mem_index(s
));
1726 } else { /* WSTRW wRd */
1727 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1728 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1731 if (insn
& (1 << 22)) { /* WSTRH */
1732 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1733 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
1734 } else { /* WSTRB */
1735 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
1736 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
1740 tcg_temp_free_i32(tmp
);
1742 tcg_temp_free_i32(addr
);
1746 if ((insn
& 0x0f000000) != 0x0e000000)
1749 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
1750 case 0x000: /* WOR */
1751 wrd
= (insn
>> 12) & 0xf;
1752 rd0
= (insn
>> 0) & 0xf;
1753 rd1
= (insn
>> 16) & 0xf;
1754 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1755 gen_op_iwmmxt_orq_M0_wRn(rd1
);
1756 gen_op_iwmmxt_setpsr_nz();
1757 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1758 gen_op_iwmmxt_set_mup();
1759 gen_op_iwmmxt_set_cup();
1761 case 0x011: /* TMCR */
1764 rd
= (insn
>> 12) & 0xf;
1765 wrd
= (insn
>> 16) & 0xf;
1767 case ARM_IWMMXT_wCID
:
1768 case ARM_IWMMXT_wCASF
:
1770 case ARM_IWMMXT_wCon
:
1771 gen_op_iwmmxt_set_cup();
1773 case ARM_IWMMXT_wCSSF
:
1774 tmp
= iwmmxt_load_creg(wrd
);
1775 tmp2
= load_reg(s
, rd
);
1776 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1777 tcg_temp_free_i32(tmp2
);
1778 iwmmxt_store_creg(wrd
, tmp
);
1780 case ARM_IWMMXT_wCGR0
:
1781 case ARM_IWMMXT_wCGR1
:
1782 case ARM_IWMMXT_wCGR2
:
1783 case ARM_IWMMXT_wCGR3
:
1784 gen_op_iwmmxt_set_cup();
1785 tmp
= load_reg(s
, rd
);
1786 iwmmxt_store_creg(wrd
, tmp
);
1792 case 0x100: /* WXOR */
1793 wrd
= (insn
>> 12) & 0xf;
1794 rd0
= (insn
>> 0) & 0xf;
1795 rd1
= (insn
>> 16) & 0xf;
1796 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1797 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
1798 gen_op_iwmmxt_setpsr_nz();
1799 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1800 gen_op_iwmmxt_set_mup();
1801 gen_op_iwmmxt_set_cup();
1803 case 0x111: /* TMRC */
1806 rd
= (insn
>> 12) & 0xf;
1807 wrd
= (insn
>> 16) & 0xf;
1808 tmp
= iwmmxt_load_creg(wrd
);
1809 store_reg(s
, rd
, tmp
);
1811 case 0x300: /* WANDN */
1812 wrd
= (insn
>> 12) & 0xf;
1813 rd0
= (insn
>> 0) & 0xf;
1814 rd1
= (insn
>> 16) & 0xf;
1815 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1816 tcg_gen_neg_i64(cpu_M0
, cpu_M0
);
1817 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1818 gen_op_iwmmxt_setpsr_nz();
1819 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1820 gen_op_iwmmxt_set_mup();
1821 gen_op_iwmmxt_set_cup();
1823 case 0x200: /* WAND */
1824 wrd
= (insn
>> 12) & 0xf;
1825 rd0
= (insn
>> 0) & 0xf;
1826 rd1
= (insn
>> 16) & 0xf;
1827 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1828 gen_op_iwmmxt_andq_M0_wRn(rd1
);
1829 gen_op_iwmmxt_setpsr_nz();
1830 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1831 gen_op_iwmmxt_set_mup();
1832 gen_op_iwmmxt_set_cup();
1834 case 0x810: case 0xa10: /* WMADD */
1835 wrd
= (insn
>> 12) & 0xf;
1836 rd0
= (insn
>> 0) & 0xf;
1837 rd1
= (insn
>> 16) & 0xf;
1838 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1839 if (insn
& (1 << 21))
1840 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
1842 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
1843 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1844 gen_op_iwmmxt_set_mup();
1846 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1847 wrd
= (insn
>> 12) & 0xf;
1848 rd0
= (insn
>> 16) & 0xf;
1849 rd1
= (insn
>> 0) & 0xf;
1850 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1851 switch ((insn
>> 22) & 3) {
1853 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
1856 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
1859 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
1864 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1865 gen_op_iwmmxt_set_mup();
1866 gen_op_iwmmxt_set_cup();
1868 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1869 wrd
= (insn
>> 12) & 0xf;
1870 rd0
= (insn
>> 16) & 0xf;
1871 rd1
= (insn
>> 0) & 0xf;
1872 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1873 switch ((insn
>> 22) & 3) {
1875 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
1878 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
1881 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
1886 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1887 gen_op_iwmmxt_set_mup();
1888 gen_op_iwmmxt_set_cup();
1890 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1891 wrd
= (insn
>> 12) & 0xf;
1892 rd0
= (insn
>> 16) & 0xf;
1893 rd1
= (insn
>> 0) & 0xf;
1894 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1895 if (insn
& (1 << 22))
1896 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
1898 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
1899 if (!(insn
& (1 << 20)))
1900 gen_op_iwmmxt_addl_M0_wRn(wrd
);
1901 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1902 gen_op_iwmmxt_set_mup();
1904 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1905 wrd
= (insn
>> 12) & 0xf;
1906 rd0
= (insn
>> 16) & 0xf;
1907 rd1
= (insn
>> 0) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1909 if (insn
& (1 << 21)) {
1910 if (insn
& (1 << 20))
1911 gen_op_iwmmxt_mulshw_M0_wRn(rd1
);
1913 gen_op_iwmmxt_mulslw_M0_wRn(rd1
);
1915 if (insn
& (1 << 20))
1916 gen_op_iwmmxt_muluhw_M0_wRn(rd1
);
1918 gen_op_iwmmxt_mululw_M0_wRn(rd1
);
1920 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1921 gen_op_iwmmxt_set_mup();
1923 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1924 wrd
= (insn
>> 12) & 0xf;
1925 rd0
= (insn
>> 16) & 0xf;
1926 rd1
= (insn
>> 0) & 0xf;
1927 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1928 if (insn
& (1 << 21))
1929 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
1931 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
1932 if (!(insn
& (1 << 20))) {
1933 iwmmxt_load_reg(cpu_V1
, wrd
);
1934 tcg_gen_add_i64(cpu_M0
, cpu_M0
, cpu_V1
);
1936 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1937 gen_op_iwmmxt_set_mup();
1939 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1940 wrd
= (insn
>> 12) & 0xf;
1941 rd0
= (insn
>> 16) & 0xf;
1942 rd1
= (insn
>> 0) & 0xf;
1943 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1944 switch ((insn
>> 22) & 3) {
1946 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
1949 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
1952 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
1957 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1958 gen_op_iwmmxt_set_mup();
1959 gen_op_iwmmxt_set_cup();
1961 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1962 wrd
= (insn
>> 12) & 0xf;
1963 rd0
= (insn
>> 16) & 0xf;
1964 rd1
= (insn
>> 0) & 0xf;
1965 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1966 if (insn
& (1 << 22)) {
1967 if (insn
& (1 << 20))
1968 gen_op_iwmmxt_avgw1_M0_wRn(rd1
);
1970 gen_op_iwmmxt_avgw0_M0_wRn(rd1
);
1972 if (insn
& (1 << 20))
1973 gen_op_iwmmxt_avgb1_M0_wRn(rd1
);
1975 gen_op_iwmmxt_avgb0_M0_wRn(rd1
);
1977 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1978 gen_op_iwmmxt_set_mup();
1979 gen_op_iwmmxt_set_cup();
1981 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1982 wrd
= (insn
>> 12) & 0xf;
1983 rd0
= (insn
>> 16) & 0xf;
1984 rd1
= (insn
>> 0) & 0xf;
1985 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1986 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
1987 tcg_gen_andi_i32(tmp
, tmp
, 7);
1988 iwmmxt_load_reg(cpu_V1
, rd1
);
1989 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
1990 tcg_temp_free_i32(tmp
);
1991 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1992 gen_op_iwmmxt_set_mup();
1994 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1995 if (((insn
>> 6) & 3) == 3)
1997 rd
= (insn
>> 12) & 0xf;
1998 wrd
= (insn
>> 16) & 0xf;
1999 tmp
= load_reg(s
, rd
);
2000 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2001 switch ((insn
>> 6) & 3) {
2003 tmp2
= tcg_const_i32(0xff);
2004 tmp3
= tcg_const_i32((insn
& 7) << 3);
2007 tmp2
= tcg_const_i32(0xffff);
2008 tmp3
= tcg_const_i32((insn
& 3) << 4);
2011 tmp2
= tcg_const_i32(0xffffffff);
2012 tmp3
= tcg_const_i32((insn
& 1) << 5);
2015 TCGV_UNUSED_I32(tmp2
);
2016 TCGV_UNUSED_I32(tmp3
);
2018 gen_helper_iwmmxt_insr(cpu_M0
, cpu_M0
, tmp
, tmp2
, tmp3
);
2019 tcg_temp_free_i32(tmp3
);
2020 tcg_temp_free_i32(tmp2
);
2021 tcg_temp_free_i32(tmp
);
2022 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2023 gen_op_iwmmxt_set_mup();
2025 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2026 rd
= (insn
>> 12) & 0xf;
2027 wrd
= (insn
>> 16) & 0xf;
2028 if (rd
== 15 || ((insn
>> 22) & 3) == 3)
2030 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2031 tmp
= tcg_temp_new_i32();
2032 switch ((insn
>> 22) & 3) {
2034 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 7) << 3);
2035 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2037 tcg_gen_ext8s_i32(tmp
, tmp
);
2039 tcg_gen_andi_i32(tmp
, tmp
, 0xff);
2043 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 3) << 4);
2044 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2046 tcg_gen_ext16s_i32(tmp
, tmp
);
2048 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
2052 tcg_gen_shri_i64(cpu_M0
, cpu_M0
, (insn
& 1) << 5);
2053 tcg_gen_extrl_i64_i32(tmp
, cpu_M0
);
2056 store_reg(s
, rd
, tmp
);
2058 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
2059 if ((insn
& 0x000ff008) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2061 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2062 switch ((insn
>> 22) & 3) {
2064 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 7) << 2) + 0);
2067 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 3) << 3) + 4);
2070 tcg_gen_shri_i32(tmp
, tmp
, ((insn
& 1) << 4) + 12);
2073 tcg_gen_shli_i32(tmp
, tmp
, 28);
2075 tcg_temp_free_i32(tmp
);
2077 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
2078 if (((insn
>> 6) & 3) == 3)
2080 rd
= (insn
>> 12) & 0xf;
2081 wrd
= (insn
>> 16) & 0xf;
2082 tmp
= load_reg(s
, rd
);
2083 switch ((insn
>> 6) & 3) {
2085 gen_helper_iwmmxt_bcstb(cpu_M0
, tmp
);
2088 gen_helper_iwmmxt_bcstw(cpu_M0
, tmp
);
2091 gen_helper_iwmmxt_bcstl(cpu_M0
, tmp
);
2094 tcg_temp_free_i32(tmp
);
2095 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2096 gen_op_iwmmxt_set_mup();
2098 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2099 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2101 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2102 tmp2
= tcg_temp_new_i32();
2103 tcg_gen_mov_i32(tmp2
, tmp
);
2104 switch ((insn
>> 22) & 3) {
2106 for (i
= 0; i
< 7; i
++) {
2107 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2108 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2112 for (i
= 0; i
< 3; i
++) {
2113 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2114 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2118 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2119 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
2123 tcg_temp_free_i32(tmp2
);
2124 tcg_temp_free_i32(tmp
);
2126 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2127 wrd
= (insn
>> 12) & 0xf;
2128 rd0
= (insn
>> 16) & 0xf;
2129 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2130 switch ((insn
>> 22) & 3) {
2132 gen_helper_iwmmxt_addcb(cpu_M0
, cpu_M0
);
2135 gen_helper_iwmmxt_addcw(cpu_M0
, cpu_M0
);
2138 gen_helper_iwmmxt_addcl(cpu_M0
, cpu_M0
);
2143 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2144 gen_op_iwmmxt_set_mup();
2146 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2147 if ((insn
& 0x000ff00f) != 0x0003f000 || ((insn
>> 22) & 3) == 3)
2149 tmp
= iwmmxt_load_creg(ARM_IWMMXT_wCASF
);
2150 tmp2
= tcg_temp_new_i32();
2151 tcg_gen_mov_i32(tmp2
, tmp
);
2152 switch ((insn
>> 22) & 3) {
2154 for (i
= 0; i
< 7; i
++) {
2155 tcg_gen_shli_i32(tmp2
, tmp2
, 4);
2156 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2160 for (i
= 0; i
< 3; i
++) {
2161 tcg_gen_shli_i32(tmp2
, tmp2
, 8);
2162 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2166 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
2167 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
2171 tcg_temp_free_i32(tmp2
);
2172 tcg_temp_free_i32(tmp
);
2174 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2175 rd
= (insn
>> 12) & 0xf;
2176 rd0
= (insn
>> 16) & 0xf;
2177 if ((insn
& 0xf) != 0 || ((insn
>> 22) & 3) == 3)
2179 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2180 tmp
= tcg_temp_new_i32();
2181 switch ((insn
>> 22) & 3) {
2183 gen_helper_iwmmxt_msbb(tmp
, cpu_M0
);
2186 gen_helper_iwmmxt_msbw(tmp
, cpu_M0
);
2189 gen_helper_iwmmxt_msbl(tmp
, cpu_M0
);
2192 store_reg(s
, rd
, tmp
);
2194 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2195 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2196 wrd
= (insn
>> 12) & 0xf;
2197 rd0
= (insn
>> 16) & 0xf;
2198 rd1
= (insn
>> 0) & 0xf;
2199 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2200 switch ((insn
>> 22) & 3) {
2202 if (insn
& (1 << 21))
2203 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
2205 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
2208 if (insn
& (1 << 21))
2209 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
2211 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
2214 if (insn
& (1 << 21))
2215 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
2217 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
2222 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2223 gen_op_iwmmxt_set_mup();
2224 gen_op_iwmmxt_set_cup();
2226 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2227 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2228 wrd
= (insn
>> 12) & 0xf;
2229 rd0
= (insn
>> 16) & 0xf;
2230 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2231 switch ((insn
>> 22) & 3) {
2233 if (insn
& (1 << 21))
2234 gen_op_iwmmxt_unpacklsb_M0();
2236 gen_op_iwmmxt_unpacklub_M0();
2239 if (insn
& (1 << 21))
2240 gen_op_iwmmxt_unpacklsw_M0();
2242 gen_op_iwmmxt_unpackluw_M0();
2245 if (insn
& (1 << 21))
2246 gen_op_iwmmxt_unpacklsl_M0();
2248 gen_op_iwmmxt_unpacklul_M0();
2253 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2254 gen_op_iwmmxt_set_mup();
2255 gen_op_iwmmxt_set_cup();
2257 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2258 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2259 wrd
= (insn
>> 12) & 0xf;
2260 rd0
= (insn
>> 16) & 0xf;
2261 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2262 switch ((insn
>> 22) & 3) {
2264 if (insn
& (1 << 21))
2265 gen_op_iwmmxt_unpackhsb_M0();
2267 gen_op_iwmmxt_unpackhub_M0();
2270 if (insn
& (1 << 21))
2271 gen_op_iwmmxt_unpackhsw_M0();
2273 gen_op_iwmmxt_unpackhuw_M0();
2276 if (insn
& (1 << 21))
2277 gen_op_iwmmxt_unpackhsl_M0();
2279 gen_op_iwmmxt_unpackhul_M0();
2284 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2285 gen_op_iwmmxt_set_mup();
2286 gen_op_iwmmxt_set_cup();
2288 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2289 case 0x214: case 0x614: case 0xa14: case 0xe14:
2290 if (((insn
>> 22) & 3) == 0)
2292 wrd
= (insn
>> 12) & 0xf;
2293 rd0
= (insn
>> 16) & 0xf;
2294 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2295 tmp
= tcg_temp_new_i32();
2296 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2297 tcg_temp_free_i32(tmp
);
2300 switch ((insn
>> 22) & 3) {
2302 gen_helper_iwmmxt_srlw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2305 gen_helper_iwmmxt_srll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2308 gen_helper_iwmmxt_srlq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2311 tcg_temp_free_i32(tmp
);
2312 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2313 gen_op_iwmmxt_set_mup();
2314 gen_op_iwmmxt_set_cup();
2316 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2317 case 0x014: case 0x414: case 0x814: case 0xc14:
2318 if (((insn
>> 22) & 3) == 0)
2320 wrd
= (insn
>> 12) & 0xf;
2321 rd0
= (insn
>> 16) & 0xf;
2322 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2323 tmp
= tcg_temp_new_i32();
2324 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2325 tcg_temp_free_i32(tmp
);
2328 switch ((insn
>> 22) & 3) {
2330 gen_helper_iwmmxt_sraw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2333 gen_helper_iwmmxt_sral(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2336 gen_helper_iwmmxt_sraq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2339 tcg_temp_free_i32(tmp
);
2340 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2341 gen_op_iwmmxt_set_mup();
2342 gen_op_iwmmxt_set_cup();
2344 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2345 case 0x114: case 0x514: case 0x914: case 0xd14:
2346 if (((insn
>> 22) & 3) == 0)
2348 wrd
= (insn
>> 12) & 0xf;
2349 rd0
= (insn
>> 16) & 0xf;
2350 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2351 tmp
= tcg_temp_new_i32();
2352 if (gen_iwmmxt_shift(insn
, 0xff, tmp
)) {
2353 tcg_temp_free_i32(tmp
);
2356 switch ((insn
>> 22) & 3) {
2358 gen_helper_iwmmxt_sllw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2361 gen_helper_iwmmxt_slll(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2364 gen_helper_iwmmxt_sllq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2367 tcg_temp_free_i32(tmp
);
2368 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2369 gen_op_iwmmxt_set_mup();
2370 gen_op_iwmmxt_set_cup();
2372 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2373 case 0x314: case 0x714: case 0xb14: case 0xf14:
2374 if (((insn
>> 22) & 3) == 0)
2376 wrd
= (insn
>> 12) & 0xf;
2377 rd0
= (insn
>> 16) & 0xf;
2378 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2379 tmp
= tcg_temp_new_i32();
2380 switch ((insn
>> 22) & 3) {
2382 if (gen_iwmmxt_shift(insn
, 0xf, tmp
)) {
2383 tcg_temp_free_i32(tmp
);
2386 gen_helper_iwmmxt_rorw(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2389 if (gen_iwmmxt_shift(insn
, 0x1f, tmp
)) {
2390 tcg_temp_free_i32(tmp
);
2393 gen_helper_iwmmxt_rorl(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2396 if (gen_iwmmxt_shift(insn
, 0x3f, tmp
)) {
2397 tcg_temp_free_i32(tmp
);
2400 gen_helper_iwmmxt_rorq(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2403 tcg_temp_free_i32(tmp
);
2404 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2405 gen_op_iwmmxt_set_mup();
2406 gen_op_iwmmxt_set_cup();
2408 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2409 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2410 wrd
= (insn
>> 12) & 0xf;
2411 rd0
= (insn
>> 16) & 0xf;
2412 rd1
= (insn
>> 0) & 0xf;
2413 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2414 switch ((insn
>> 22) & 3) {
2416 if (insn
& (1 << 21))
2417 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
2419 gen_op_iwmmxt_minub_M0_wRn(rd1
);
2422 if (insn
& (1 << 21))
2423 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
2425 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
2428 if (insn
& (1 << 21))
2429 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
2431 gen_op_iwmmxt_minul_M0_wRn(rd1
);
2436 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2437 gen_op_iwmmxt_set_mup();
2439 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2440 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2441 wrd
= (insn
>> 12) & 0xf;
2442 rd0
= (insn
>> 16) & 0xf;
2443 rd1
= (insn
>> 0) & 0xf;
2444 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2445 switch ((insn
>> 22) & 3) {
2447 if (insn
& (1 << 21))
2448 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
2450 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
2453 if (insn
& (1 << 21))
2454 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
2456 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
2459 if (insn
& (1 << 21))
2460 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
2462 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
2467 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2468 gen_op_iwmmxt_set_mup();
2470 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2471 case 0x402: case 0x502: case 0x602: case 0x702:
2472 wrd
= (insn
>> 12) & 0xf;
2473 rd0
= (insn
>> 16) & 0xf;
2474 rd1
= (insn
>> 0) & 0xf;
2475 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2476 tmp
= tcg_const_i32((insn
>> 20) & 3);
2477 iwmmxt_load_reg(cpu_V1
, rd1
);
2478 gen_helper_iwmmxt_align(cpu_M0
, cpu_M0
, cpu_V1
, tmp
);
2479 tcg_temp_free_i32(tmp
);
2480 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2481 gen_op_iwmmxt_set_mup();
2483 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2484 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2485 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2486 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2487 wrd
= (insn
>> 12) & 0xf;
2488 rd0
= (insn
>> 16) & 0xf;
2489 rd1
= (insn
>> 0) & 0xf;
2490 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2491 switch ((insn
>> 20) & 0xf) {
2493 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
2496 gen_op_iwmmxt_subub_M0_wRn(rd1
);
2499 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
2502 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
2505 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
2508 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
2511 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
2514 gen_op_iwmmxt_subul_M0_wRn(rd1
);
2517 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
2522 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2523 gen_op_iwmmxt_set_mup();
2524 gen_op_iwmmxt_set_cup();
2526 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2527 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2528 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2529 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2530 wrd
= (insn
>> 12) & 0xf;
2531 rd0
= (insn
>> 16) & 0xf;
2532 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2533 tmp
= tcg_const_i32(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
2534 gen_helper_iwmmxt_shufh(cpu_M0
, cpu_env
, cpu_M0
, tmp
);
2535 tcg_temp_free_i32(tmp
);
2536 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2537 gen_op_iwmmxt_set_mup();
2538 gen_op_iwmmxt_set_cup();
2540 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2541 case 0x418: case 0x518: case 0x618: case 0x718:
2542 case 0x818: case 0x918: case 0xa18: case 0xb18:
2543 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2544 wrd
= (insn
>> 12) & 0xf;
2545 rd0
= (insn
>> 16) & 0xf;
2546 rd1
= (insn
>> 0) & 0xf;
2547 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2548 switch ((insn
>> 20) & 0xf) {
2550 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
2553 gen_op_iwmmxt_addub_M0_wRn(rd1
);
2556 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
2559 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
2562 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
2565 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
2568 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
2571 gen_op_iwmmxt_addul_M0_wRn(rd1
);
2574 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
2579 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2580 gen_op_iwmmxt_set_mup();
2581 gen_op_iwmmxt_set_cup();
2583 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2584 case 0x408: case 0x508: case 0x608: case 0x708:
2585 case 0x808: case 0x908: case 0xa08: case 0xb08:
2586 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2587 if (!(insn
& (1 << 20)) || ((insn
>> 22) & 3) == 0)
2589 wrd
= (insn
>> 12) & 0xf;
2590 rd0
= (insn
>> 16) & 0xf;
2591 rd1
= (insn
>> 0) & 0xf;
2592 gen_op_iwmmxt_movq_M0_wRn(rd0
);
2593 switch ((insn
>> 22) & 3) {
2595 if (insn
& (1 << 21))
2596 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
2598 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
2601 if (insn
& (1 << 21))
2602 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
2604 gen_op_iwmmxt_packul_M0_wRn(rd1
);
2607 if (insn
& (1 << 21))
2608 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
2610 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
2613 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2614 gen_op_iwmmxt_set_mup();
2615 gen_op_iwmmxt_set_cup();
2617 case 0x201: case 0x203: case 0x205: case 0x207:
2618 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2619 case 0x211: case 0x213: case 0x215: case 0x217:
2620 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2621 wrd
= (insn
>> 5) & 0xf;
2622 rd0
= (insn
>> 12) & 0xf;
2623 rd1
= (insn
>> 0) & 0xf;
2624 if (rd0
== 0xf || rd1
== 0xf)
2626 gen_op_iwmmxt_movq_M0_wRn(wrd
);
2627 tmp
= load_reg(s
, rd0
);
2628 tmp2
= load_reg(s
, rd1
);
2629 switch ((insn
>> 16) & 0xf) {
2630 case 0x0: /* TMIA */
2631 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2633 case 0x8: /* TMIAPH */
2634 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2636 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2637 if (insn
& (1 << 16))
2638 tcg_gen_shri_i32(tmp
, tmp
, 16);
2639 if (insn
& (1 << 17))
2640 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2641 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2644 tcg_temp_free_i32(tmp2
);
2645 tcg_temp_free_i32(tmp
);
2648 tcg_temp_free_i32(tmp2
);
2649 tcg_temp_free_i32(tmp
);
2650 gen_op_iwmmxt_movq_wRn_M0(wrd
);
2651 gen_op_iwmmxt_set_mup();
2660 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2661 (ie. an undefined instruction). */
2662 static int disas_dsp_insn(DisasContext
*s
, uint32_t insn
)
2664 int acc
, rd0
, rd1
, rdhi
, rdlo
;
2667 if ((insn
& 0x0ff00f10) == 0x0e200010) {
2668 /* Multiply with Internal Accumulate Format */
2669 rd0
= (insn
>> 12) & 0xf;
2671 acc
= (insn
>> 5) & 7;
2676 tmp
= load_reg(s
, rd0
);
2677 tmp2
= load_reg(s
, rd1
);
2678 switch ((insn
>> 16) & 0xf) {
2680 gen_helper_iwmmxt_muladdsl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2682 case 0x8: /* MIAPH */
2683 gen_helper_iwmmxt_muladdsw(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2685 case 0xc: /* MIABB */
2686 case 0xd: /* MIABT */
2687 case 0xe: /* MIATB */
2688 case 0xf: /* MIATT */
2689 if (insn
& (1 << 16))
2690 tcg_gen_shri_i32(tmp
, tmp
, 16);
2691 if (insn
& (1 << 17))
2692 tcg_gen_shri_i32(tmp2
, tmp2
, 16);
2693 gen_helper_iwmmxt_muladdswl(cpu_M0
, cpu_M0
, tmp
, tmp2
);
2698 tcg_temp_free_i32(tmp2
);
2699 tcg_temp_free_i32(tmp
);
2701 gen_op_iwmmxt_movq_wRn_M0(acc
);
2705 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
2706 /* Internal Accumulator Access Format */
2707 rdhi
= (insn
>> 16) & 0xf;
2708 rdlo
= (insn
>> 12) & 0xf;
2714 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
2715 iwmmxt_load_reg(cpu_V0
, acc
);
2716 tcg_gen_extrl_i64_i32(cpu_R
[rdlo
], cpu_V0
);
2717 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
2718 tcg_gen_extrl_i64_i32(cpu_R
[rdhi
], cpu_V0
);
2719 tcg_gen_andi_i32(cpu_R
[rdhi
], cpu_R
[rdhi
], (1 << (40 - 32)) - 1);
2721 tcg_gen_concat_i32_i64(cpu_V0
, cpu_R
[rdlo
], cpu_R
[rdhi
]);
2722 iwmmxt_store_reg(cpu_V0
, acc
);
2730 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2731 #define VFP_SREG(insn, bigbit, smallbit) \
2732 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2733 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2734 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2735 reg = (((insn) >> (bigbit)) & 0x0f) \
2736 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2738 if (insn & (1 << (smallbit))) \
2740 reg = ((insn) >> (bigbit)) & 0x0f; \
2743 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2744 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2745 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2746 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2747 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2748 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2750 /* Move between integer and VFP cores. */
2751 static TCGv_i32
gen_vfp_mrs(void)
2753 TCGv_i32 tmp
= tcg_temp_new_i32();
2754 tcg_gen_mov_i32(tmp
, cpu_F0s
);
2758 static void gen_vfp_msr(TCGv_i32 tmp
)
2760 tcg_gen_mov_i32(cpu_F0s
, tmp
);
2761 tcg_temp_free_i32(tmp
);
2764 static void gen_neon_dup_u8(TCGv_i32 var
, int shift
)
2766 TCGv_i32 tmp
= tcg_temp_new_i32();
2768 tcg_gen_shri_i32(var
, var
, shift
);
2769 tcg_gen_ext8u_i32(var
, var
);
2770 tcg_gen_shli_i32(tmp
, var
, 8);
2771 tcg_gen_or_i32(var
, var
, tmp
);
2772 tcg_gen_shli_i32(tmp
, var
, 16);
2773 tcg_gen_or_i32(var
, var
, tmp
);
2774 tcg_temp_free_i32(tmp
);
2777 static void gen_neon_dup_low16(TCGv_i32 var
)
2779 TCGv_i32 tmp
= tcg_temp_new_i32();
2780 tcg_gen_ext16u_i32(var
, var
);
2781 tcg_gen_shli_i32(tmp
, var
, 16);
2782 tcg_gen_or_i32(var
, var
, tmp
);
2783 tcg_temp_free_i32(tmp
);
2786 static void gen_neon_dup_high16(TCGv_i32 var
)
2788 TCGv_i32 tmp
= tcg_temp_new_i32();
2789 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2790 tcg_gen_shri_i32(tmp
, var
, 16);
2791 tcg_gen_or_i32(var
, var
, tmp
);
2792 tcg_temp_free_i32(tmp
);
2795 static TCGv_i32
gen_load_and_replicate(DisasContext
*s
, TCGv_i32 addr
, int size
)
2797 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2798 TCGv_i32 tmp
= tcg_temp_new_i32();
2801 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
2802 gen_neon_dup_u8(tmp
, 0);
2805 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
2806 gen_neon_dup_low16(tmp
);
2809 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
2811 default: /* Avoid compiler warnings. */
2817 static int handle_vsel(uint32_t insn
, uint32_t rd
, uint32_t rn
, uint32_t rm
,
2820 uint32_t cc
= extract32(insn
, 20, 2);
2823 TCGv_i64 frn
, frm
, dest
;
2824 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
2826 zero
= tcg_const_i64(0);
2828 frn
= tcg_temp_new_i64();
2829 frm
= tcg_temp_new_i64();
2830 dest
= tcg_temp_new_i64();
2832 zf
= tcg_temp_new_i64();
2833 nf
= tcg_temp_new_i64();
2834 vf
= tcg_temp_new_i64();
2836 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
2837 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
2838 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
2840 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2841 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2844 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
2848 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
2851 case 2: /* ge: N == V -> N ^ V == 0 */
2852 tmp
= tcg_temp_new_i64();
2853 tcg_gen_xor_i64(tmp
, vf
, nf
);
2854 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2856 tcg_temp_free_i64(tmp
);
2858 case 3: /* gt: !Z && N == V */
2859 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
2861 tmp
= tcg_temp_new_i64();
2862 tcg_gen_xor_i64(tmp
, vf
, nf
);
2863 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
2865 tcg_temp_free_i64(tmp
);
2868 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2869 tcg_temp_free_i64(frn
);
2870 tcg_temp_free_i64(frm
);
2871 tcg_temp_free_i64(dest
);
2873 tcg_temp_free_i64(zf
);
2874 tcg_temp_free_i64(nf
);
2875 tcg_temp_free_i64(vf
);
2877 tcg_temp_free_i64(zero
);
2879 TCGv_i32 frn
, frm
, dest
;
2882 zero
= tcg_const_i32(0);
2884 frn
= tcg_temp_new_i32();
2885 frm
= tcg_temp_new_i32();
2886 dest
= tcg_temp_new_i32();
2887 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2888 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2891 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
2895 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
2898 case 2: /* ge: N == V -> N ^ V == 0 */
2899 tmp
= tcg_temp_new_i32();
2900 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2901 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2903 tcg_temp_free_i32(tmp
);
2905 case 3: /* gt: !Z && N == V */
2906 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
2908 tmp
= tcg_temp_new_i32();
2909 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
2910 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
2912 tcg_temp_free_i32(tmp
);
2915 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2916 tcg_temp_free_i32(frn
);
2917 tcg_temp_free_i32(frm
);
2918 tcg_temp_free_i32(dest
);
2920 tcg_temp_free_i32(zero
);
2926 static int handle_vminmaxnm(uint32_t insn
, uint32_t rd
, uint32_t rn
,
2927 uint32_t rm
, uint32_t dp
)
2929 uint32_t vmin
= extract32(insn
, 6, 1);
2930 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2933 TCGv_i64 frn
, frm
, dest
;
2935 frn
= tcg_temp_new_i64();
2936 frm
= tcg_temp_new_i64();
2937 dest
= tcg_temp_new_i64();
2939 tcg_gen_ld_f64(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2940 tcg_gen_ld_f64(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2942 gen_helper_vfp_minnumd(dest
, frn
, frm
, fpst
);
2944 gen_helper_vfp_maxnumd(dest
, frn
, frm
, fpst
);
2946 tcg_gen_st_f64(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2947 tcg_temp_free_i64(frn
);
2948 tcg_temp_free_i64(frm
);
2949 tcg_temp_free_i64(dest
);
2951 TCGv_i32 frn
, frm
, dest
;
2953 frn
= tcg_temp_new_i32();
2954 frm
= tcg_temp_new_i32();
2955 dest
= tcg_temp_new_i32();
2957 tcg_gen_ld_f32(frn
, cpu_env
, vfp_reg_offset(dp
, rn
));
2958 tcg_gen_ld_f32(frm
, cpu_env
, vfp_reg_offset(dp
, rm
));
2960 gen_helper_vfp_minnums(dest
, frn
, frm
, fpst
);
2962 gen_helper_vfp_maxnums(dest
, frn
, frm
, fpst
);
2964 tcg_gen_st_f32(dest
, cpu_env
, vfp_reg_offset(dp
, rd
));
2965 tcg_temp_free_i32(frn
);
2966 tcg_temp_free_i32(frm
);
2967 tcg_temp_free_i32(dest
);
2970 tcg_temp_free_ptr(fpst
);
2974 static int handle_vrint(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
2977 TCGv_ptr fpst
= get_fpstatus_ptr(0);
2980 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
2981 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
2986 tcg_op
= tcg_temp_new_i64();
2987 tcg_res
= tcg_temp_new_i64();
2988 tcg_gen_ld_f64(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
2989 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
2990 tcg_gen_st_f64(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
2991 tcg_temp_free_i64(tcg_op
);
2992 tcg_temp_free_i64(tcg_res
);
2996 tcg_op
= tcg_temp_new_i32();
2997 tcg_res
= tcg_temp_new_i32();
2998 tcg_gen_ld_f32(tcg_op
, cpu_env
, vfp_reg_offset(dp
, rm
));
2999 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
3000 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(dp
, rd
));
3001 tcg_temp_free_i32(tcg_op
);
3002 tcg_temp_free_i32(tcg_res
);
3005 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3006 tcg_temp_free_i32(tcg_rmode
);
3008 tcg_temp_free_ptr(fpst
);
3012 static int handle_vcvt(uint32_t insn
, uint32_t rd
, uint32_t rm
, uint32_t dp
,
3015 bool is_signed
= extract32(insn
, 7, 1);
3016 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3017 TCGv_i32 tcg_rmode
, tcg_shift
;
3019 tcg_shift
= tcg_const_i32(0);
3021 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
3022 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3025 TCGv_i64 tcg_double
, tcg_res
;
3027 /* Rd is encoded as a single precision register even when the source
3028 * is double precision.
3030 rd
= ((rd
<< 1) & 0x1e) | ((rd
>> 4) & 0x1);
3031 tcg_double
= tcg_temp_new_i64();
3032 tcg_res
= tcg_temp_new_i64();
3033 tcg_tmp
= tcg_temp_new_i32();
3034 tcg_gen_ld_f64(tcg_double
, cpu_env
, vfp_reg_offset(1, rm
));
3036 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
3038 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
3040 tcg_gen_extrl_i64_i32(tcg_tmp
, tcg_res
);
3041 tcg_gen_st_f32(tcg_tmp
, cpu_env
, vfp_reg_offset(0, rd
));
3042 tcg_temp_free_i32(tcg_tmp
);
3043 tcg_temp_free_i64(tcg_res
);
3044 tcg_temp_free_i64(tcg_double
);
3046 TCGv_i32 tcg_single
, tcg_res
;
3047 tcg_single
= tcg_temp_new_i32();
3048 tcg_res
= tcg_temp_new_i32();
3049 tcg_gen_ld_f32(tcg_single
, cpu_env
, vfp_reg_offset(0, rm
));
3051 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
3053 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
3055 tcg_gen_st_f32(tcg_res
, cpu_env
, vfp_reg_offset(0, rd
));
3056 tcg_temp_free_i32(tcg_res
);
3057 tcg_temp_free_i32(tcg_single
);
3060 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3061 tcg_temp_free_i32(tcg_rmode
);
3063 tcg_temp_free_i32(tcg_shift
);
3065 tcg_temp_free_ptr(fpst
);
3070 /* Table for converting the most common AArch32 encoding of
3071 * rounding mode to arm_fprounding order (which matches the
3072 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3074 static const uint8_t fp_decode_rm
[] = {
3081 static int disas_vfp_v8_insn(DisasContext
*s
, uint32_t insn
)
3083 uint32_t rd
, rn
, rm
, dp
= extract32(insn
, 8, 1);
3085 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
3090 VFP_DREG_D(rd
, insn
);
3091 VFP_DREG_N(rn
, insn
);
3092 VFP_DREG_M(rm
, insn
);
3094 rd
= VFP_SREG_D(insn
);
3095 rn
= VFP_SREG_N(insn
);
3096 rm
= VFP_SREG_M(insn
);
3099 if ((insn
& 0x0f800e50) == 0x0e000a00) {
3100 return handle_vsel(insn
, rd
, rn
, rm
, dp
);
3101 } else if ((insn
& 0x0fb00e10) == 0x0e800a00) {
3102 return handle_vminmaxnm(insn
, rd
, rn
, rm
, dp
);
3103 } else if ((insn
& 0x0fbc0ed0) == 0x0eb80a40) {
3104 /* VRINTA, VRINTN, VRINTP, VRINTM */
3105 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
3106 return handle_vrint(insn
, rd
, rm
, dp
, rounding
);
3107 } else if ((insn
& 0x0fbc0e50) == 0x0ebc0a40) {
3108 /* VCVTA, VCVTN, VCVTP, VCVTM */
3109 int rounding
= fp_decode_rm
[extract32(insn
, 16, 2)];
3110 return handle_vcvt(insn
, rd
, rm
, dp
, rounding
);
3115 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3116 (ie. an undefined instruction). */
3117 static int disas_vfp_insn(DisasContext
*s
, uint32_t insn
)
3119 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
3125 if (!arm_dc_feature(s
, ARM_FEATURE_VFP
)) {
3129 /* FIXME: this access check should not take precedence over UNDEF
3130 * for invalid encodings; we will generate incorrect syndrome information
3131 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3133 if (s
->fp_excp_el
) {
3134 gen_exception_insn(s
, 4, EXCP_UDEF
,
3135 syn_fp_access_trap(1, 0xe, false), s
->fp_excp_el
);
3139 if (!s
->vfp_enabled
) {
3140 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3141 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
3143 rn
= (insn
>> 16) & 0xf;
3144 if (rn
!= ARM_VFP_FPSID
&& rn
!= ARM_VFP_FPEXC
&& rn
!= ARM_VFP_MVFR2
3145 && rn
!= ARM_VFP_MVFR1
&& rn
!= ARM_VFP_MVFR0
) {
3150 if (extract32(insn
, 28, 4) == 0xf) {
3151 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3152 * only used in v8 and above.
3154 return disas_vfp_v8_insn(s
, insn
);
3157 dp
= ((insn
& 0xf00) == 0xb00);
3158 switch ((insn
>> 24) & 0xf) {
3160 if (insn
& (1 << 4)) {
3161 /* single register transfer */
3162 rd
= (insn
>> 12) & 0xf;
3167 VFP_DREG_N(rn
, insn
);
3170 if (insn
& 0x00c00060
3171 && !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
3175 pass
= (insn
>> 21) & 1;
3176 if (insn
& (1 << 22)) {
3178 offset
= ((insn
>> 5) & 3) * 8;
3179 } else if (insn
& (1 << 5)) {
3181 offset
= (insn
& (1 << 6)) ? 16 : 0;
3186 if (insn
& ARM_CP_RW_BIT
) {
3188 tmp
= neon_load_reg(rn
, pass
);
3192 tcg_gen_shri_i32(tmp
, tmp
, offset
);
3193 if (insn
& (1 << 23))
3199 if (insn
& (1 << 23)) {
3201 tcg_gen_shri_i32(tmp
, tmp
, 16);
3207 tcg_gen_sari_i32(tmp
, tmp
, 16);
3216 store_reg(s
, rd
, tmp
);
3219 tmp
= load_reg(s
, rd
);
3220 if (insn
& (1 << 23)) {
3223 gen_neon_dup_u8(tmp
, 0);
3224 } else if (size
== 1) {
3225 gen_neon_dup_low16(tmp
);
3227 for (n
= 0; n
<= pass
* 2; n
++) {
3228 tmp2
= tcg_temp_new_i32();
3229 tcg_gen_mov_i32(tmp2
, tmp
);
3230 neon_store_reg(rn
, n
, tmp2
);
3232 neon_store_reg(rn
, n
, tmp
);
3237 tmp2
= neon_load_reg(rn
, pass
);
3238 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
3239 tcg_temp_free_i32(tmp2
);
3242 tmp2
= neon_load_reg(rn
, pass
);
3243 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
3244 tcg_temp_free_i32(tmp2
);
3249 neon_store_reg(rn
, pass
, tmp
);
3253 if ((insn
& 0x6f) != 0x00)
3255 rn
= VFP_SREG_N(insn
);
3256 if (insn
& ARM_CP_RW_BIT
) {
3258 if (insn
& (1 << 21)) {
3259 /* system register */
3264 /* VFP2 allows access to FSID from userspace.
3265 VFP3 restricts all id registers to privileged
3268 && arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3271 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3276 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3278 case ARM_VFP_FPINST
:
3279 case ARM_VFP_FPINST2
:
3280 /* Not present in VFP3. */
3282 || arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3285 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3289 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
3290 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
3292 tmp
= tcg_temp_new_i32();
3293 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
3297 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
3304 || !arm_dc_feature(s
, ARM_FEATURE_MVFR
)) {
3307 tmp
= load_cpu_field(vfp
.xregs
[rn
]);
3313 gen_mov_F0_vreg(0, rn
);
3314 tmp
= gen_vfp_mrs();
3317 /* Set the 4 flag bits in the CPSR. */
3319 tcg_temp_free_i32(tmp
);
3321 store_reg(s
, rd
, tmp
);
3325 if (insn
& (1 << 21)) {
3327 /* system register */
3332 /* Writes are ignored. */
3335 tmp
= load_reg(s
, rd
);
3336 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
3337 tcg_temp_free_i32(tmp
);
3343 /* TODO: VFP subarchitecture support.
3344 * For now, keep the EN bit only */
3345 tmp
= load_reg(s
, rd
);
3346 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
3347 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3350 case ARM_VFP_FPINST
:
3351 case ARM_VFP_FPINST2
:
3355 tmp
= load_reg(s
, rd
);
3356 store_cpu_field(tmp
, vfp
.xregs
[rn
]);
3362 tmp
= load_reg(s
, rd
);
3364 gen_mov_vreg_F0(0, rn
);
3369 /* data processing */
3370 /* The opcode is in bits 23, 21, 20 and 6. */
3371 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
3375 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
3377 /* rn is register number */
3378 VFP_DREG_N(rn
, insn
);
3381 if (op
== 15 && (rn
== 15 || ((rn
& 0x1c) == 0x18) ||
3382 ((rn
& 0x1e) == 0x6))) {
3383 /* Integer or single/half precision destination. */
3384 rd
= VFP_SREG_D(insn
);
3386 VFP_DREG_D(rd
, insn
);
3389 (((rn
& 0x1c) == 0x10) || ((rn
& 0x14) == 0x14) ||
3390 ((rn
& 0x1e) == 0x4))) {
3391 /* VCVT from int or half precision is always from S reg
3392 * regardless of dp bit. VCVT with immediate frac_bits
3393 * has same format as SREG_M.
3395 rm
= VFP_SREG_M(insn
);
3397 VFP_DREG_M(rm
, insn
);
3400 rn
= VFP_SREG_N(insn
);
3401 if (op
== 15 && rn
== 15) {
3402 /* Double precision destination. */
3403 VFP_DREG_D(rd
, insn
);
3405 rd
= VFP_SREG_D(insn
);
3407 /* NB that we implicitly rely on the encoding for the frac_bits
3408 * in VCVT of fixed to float being the same as that of an SREG_M
3410 rm
= VFP_SREG_M(insn
);
3413 veclen
= s
->vec_len
;
3414 if (op
== 15 && rn
> 3)
3417 /* Shut up compiler warnings. */
3428 /* Figure out what type of vector operation this is. */
3429 if ((rd
& bank_mask
) == 0) {
3434 delta_d
= (s
->vec_stride
>> 1) + 1;
3436 delta_d
= s
->vec_stride
+ 1;
3438 if ((rm
& bank_mask
) == 0) {
3439 /* mixed scalar/vector */
3448 /* Load the initial operands. */
3453 /* Integer source */
3454 gen_mov_F0_vreg(0, rm
);
3459 gen_mov_F0_vreg(dp
, rd
);
3460 gen_mov_F1_vreg(dp
, rm
);
3464 /* Compare with zero */
3465 gen_mov_F0_vreg(dp
, rd
);
3476 /* Source and destination the same. */
3477 gen_mov_F0_vreg(dp
, rd
);
3483 /* VCVTB, VCVTT: only present with the halfprec extension
3484 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3485 * (we choose to UNDEF)
3487 if ((dp
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) ||
3488 !arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
)) {
3491 if (!extract32(rn
, 1, 1)) {
3492 /* Half precision source. */
3493 gen_mov_F0_vreg(0, rm
);
3496 /* Otherwise fall through */
3498 /* One source operand. */
3499 gen_mov_F0_vreg(dp
, rm
);
3503 /* Two source operands. */
3504 gen_mov_F0_vreg(dp
, rn
);
3505 gen_mov_F1_vreg(dp
, rm
);
3509 /* Perform the calculation. */
3511 case 0: /* VMLA: fd + (fn * fm) */
3512 /* Note that order of inputs to the add matters for NaNs */
3514 gen_mov_F0_vreg(dp
, rd
);
3517 case 1: /* VMLS: fd + -(fn * fm) */
3520 gen_mov_F0_vreg(dp
, rd
);
3523 case 2: /* VNMLS: -fd + (fn * fm) */
3524 /* Note that it isn't valid to replace (-A + B) with (B - A)
3525 * or similar plausible looking simplifications
3526 * because this will give wrong results for NaNs.
3529 gen_mov_F0_vreg(dp
, rd
);
3533 case 3: /* VNMLA: -fd + -(fn * fm) */
3536 gen_mov_F0_vreg(dp
, rd
);
3540 case 4: /* mul: fn * fm */
3543 case 5: /* nmul: -(fn * fm) */
3547 case 6: /* add: fn + fm */
3550 case 7: /* sub: fn - fm */
3553 case 8: /* div: fn / fm */
3556 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3557 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3558 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3559 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3560 /* These are fused multiply-add, and must be done as one
3561 * floating point operation with no rounding between the
3562 * multiplication and addition steps.
3563 * NB that doing the negations here as separate steps is
3564 * correct : an input NaN should come out with its sign bit
3565 * flipped if it is a negated-input.
3567 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
)) {
3575 gen_helper_vfp_negd(cpu_F0d
, cpu_F0d
);
3577 frd
= tcg_temp_new_i64();
3578 tcg_gen_ld_f64(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3581 gen_helper_vfp_negd(frd
, frd
);
3583 fpst
= get_fpstatus_ptr(0);
3584 gen_helper_vfp_muladdd(cpu_F0d
, cpu_F0d
,
3585 cpu_F1d
, frd
, fpst
);
3586 tcg_temp_free_ptr(fpst
);
3587 tcg_temp_free_i64(frd
);
3593 gen_helper_vfp_negs(cpu_F0s
, cpu_F0s
);
3595 frd
= tcg_temp_new_i32();
3596 tcg_gen_ld_f32(frd
, cpu_env
, vfp_reg_offset(dp
, rd
));
3598 gen_helper_vfp_negs(frd
, frd
);
3600 fpst
= get_fpstatus_ptr(0);
3601 gen_helper_vfp_muladds(cpu_F0s
, cpu_F0s
,
3602 cpu_F1s
, frd
, fpst
);
3603 tcg_temp_free_ptr(fpst
);
3604 tcg_temp_free_i32(frd
);
3607 case 14: /* fconst */
3608 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3612 n
= (insn
<< 12) & 0x80000000;
3613 i
= ((insn
>> 12) & 0x70) | (insn
& 0xf);
3620 tcg_gen_movi_i64(cpu_F0d
, ((uint64_t)n
) << 32);
3627 tcg_gen_movi_i32(cpu_F0s
, n
);
3630 case 15: /* extension space */
3644 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3645 tmp
= gen_vfp_mrs();
3646 tcg_gen_ext16u_i32(tmp
, tmp
);
3648 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3651 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3654 tcg_temp_free_i32(tmp
);
3656 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3657 tmp
= gen_vfp_mrs();
3658 tcg_gen_shri_i32(tmp
, tmp
, 16);
3660 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d
, tmp
,
3663 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s
, tmp
,
3666 tcg_temp_free_i32(tmp
);
3668 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3669 tmp
= tcg_temp_new_i32();
3671 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3674 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3677 gen_mov_F0_vreg(0, rd
);
3678 tmp2
= gen_vfp_mrs();
3679 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
3680 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3681 tcg_temp_free_i32(tmp2
);
3684 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3685 tmp
= tcg_temp_new_i32();
3687 gen_helper_vfp_fcvt_f64_to_f16(tmp
, cpu_F0d
,
3690 gen_helper_vfp_fcvt_f32_to_f16(tmp
, cpu_F0s
,
3693 tcg_gen_shli_i32(tmp
, tmp
, 16);
3694 gen_mov_F0_vreg(0, rd
);
3695 tmp2
= gen_vfp_mrs();
3696 tcg_gen_ext16u_i32(tmp2
, tmp2
);
3697 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
3698 tcg_temp_free_i32(tmp2
);
3710 case 11: /* cmpez */
3714 case 12: /* vrintr */
3716 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3718 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
3720 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
3722 tcg_temp_free_ptr(fpst
);
3725 case 13: /* vrintz */
3727 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3729 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3730 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3732 gen_helper_rintd(cpu_F0d
, cpu_F0d
, fpst
);
3734 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpst
);
3736 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, cpu_env
);
3737 tcg_temp_free_i32(tcg_rmode
);
3738 tcg_temp_free_ptr(fpst
);
3741 case 14: /* vrintx */
3743 TCGv_ptr fpst
= get_fpstatus_ptr(0);
3745 gen_helper_rintd_exact(cpu_F0d
, cpu_F0d
, fpst
);
3747 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpst
);
3749 tcg_temp_free_ptr(fpst
);
3752 case 15: /* single<->double conversion */
3754 gen_helper_vfp_fcvtsd(cpu_F0s
, cpu_F0d
, cpu_env
);
3756 gen_helper_vfp_fcvtds(cpu_F0d
, cpu_F0s
, cpu_env
);
3758 case 16: /* fuito */
3759 gen_vfp_uito(dp
, 0);
3761 case 17: /* fsito */
3762 gen_vfp_sito(dp
, 0);
3764 case 20: /* fshto */
3765 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3768 gen_vfp_shto(dp
, 16 - rm
, 0);
3770 case 21: /* fslto */
3771 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3774 gen_vfp_slto(dp
, 32 - rm
, 0);
3776 case 22: /* fuhto */
3777 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3780 gen_vfp_uhto(dp
, 16 - rm
, 0);
3782 case 23: /* fulto */
3783 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3786 gen_vfp_ulto(dp
, 32 - rm
, 0);
3788 case 24: /* ftoui */
3789 gen_vfp_toui(dp
, 0);
3791 case 25: /* ftouiz */
3792 gen_vfp_touiz(dp
, 0);
3794 case 26: /* ftosi */
3795 gen_vfp_tosi(dp
, 0);
3797 case 27: /* ftosiz */
3798 gen_vfp_tosiz(dp
, 0);
3800 case 28: /* ftosh */
3801 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3804 gen_vfp_tosh(dp
, 16 - rm
, 0);
3806 case 29: /* ftosl */
3807 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3810 gen_vfp_tosl(dp
, 32 - rm
, 0);
3812 case 30: /* ftouh */
3813 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3816 gen_vfp_touh(dp
, 16 - rm
, 0);
3818 case 31: /* ftoul */
3819 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
3822 gen_vfp_toul(dp
, 32 - rm
, 0);
3824 default: /* undefined */
3828 default: /* undefined */
3832 /* Write back the result. */
3833 if (op
== 15 && (rn
>= 8 && rn
<= 11)) {
3834 /* Comparison, do nothing. */
3835 } else if (op
== 15 && dp
&& ((rn
& 0x1c) == 0x18 ||
3836 (rn
& 0x1e) == 0x6)) {
3837 /* VCVT double to int: always integer result.
3838 * VCVT double to half precision is always a single
3841 gen_mov_vreg_F0(0, rd
);
3842 } else if (op
== 15 && rn
== 15) {
3844 gen_mov_vreg_F0(!dp
, rd
);
3846 gen_mov_vreg_F0(dp
, rd
);
3849 /* break out of the loop if we have finished */
3853 if (op
== 15 && delta_m
== 0) {
3854 /* single source one-many */
3856 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3858 gen_mov_vreg_F0(dp
, rd
);
3862 /* Setup the next operands. */
3864 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
3868 /* One source operand. */
3869 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3871 gen_mov_F0_vreg(dp
, rm
);
3873 /* Two source operands. */
3874 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
3876 gen_mov_F0_vreg(dp
, rn
);
3878 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
3880 gen_mov_F1_vreg(dp
, rm
);
3888 if ((insn
& 0x03e00000) == 0x00400000) {
3889 /* two-register transfer */
3890 rn
= (insn
>> 16) & 0xf;
3891 rd
= (insn
>> 12) & 0xf;
3893 VFP_DREG_M(rm
, insn
);
3895 rm
= VFP_SREG_M(insn
);
3898 if (insn
& ARM_CP_RW_BIT
) {
3901 gen_mov_F0_vreg(0, rm
* 2);
3902 tmp
= gen_vfp_mrs();
3903 store_reg(s
, rd
, tmp
);
3904 gen_mov_F0_vreg(0, rm
* 2 + 1);
3905 tmp
= gen_vfp_mrs();
3906 store_reg(s
, rn
, tmp
);
3908 gen_mov_F0_vreg(0, rm
);
3909 tmp
= gen_vfp_mrs();
3910 store_reg(s
, rd
, tmp
);
3911 gen_mov_F0_vreg(0, rm
+ 1);
3912 tmp
= gen_vfp_mrs();
3913 store_reg(s
, rn
, tmp
);
3918 tmp
= load_reg(s
, rd
);
3920 gen_mov_vreg_F0(0, rm
* 2);
3921 tmp
= load_reg(s
, rn
);
3923 gen_mov_vreg_F0(0, rm
* 2 + 1);
3925 tmp
= load_reg(s
, rd
);
3927 gen_mov_vreg_F0(0, rm
);
3928 tmp
= load_reg(s
, rn
);
3930 gen_mov_vreg_F0(0, rm
+ 1);
3935 rn
= (insn
>> 16) & 0xf;
3937 VFP_DREG_D(rd
, insn
);
3939 rd
= VFP_SREG_D(insn
);
3940 if ((insn
& 0x01200000) == 0x01000000) {
3941 /* Single load/store */
3942 offset
= (insn
& 0xff) << 2;
3943 if ((insn
& (1 << 23)) == 0)
3945 if (s
->thumb
&& rn
== 15) {
3946 /* This is actually UNPREDICTABLE */
3947 addr
= tcg_temp_new_i32();
3948 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3950 addr
= load_reg(s
, rn
);
3952 tcg_gen_addi_i32(addr
, addr
, offset
);
3953 if (insn
& (1 << 20)) {
3954 gen_vfp_ld(s
, dp
, addr
);
3955 gen_mov_vreg_F0(dp
, rd
);
3957 gen_mov_F0_vreg(dp
, rd
);
3958 gen_vfp_st(s
, dp
, addr
);
3960 tcg_temp_free_i32(addr
);
3962 /* load/store multiple */
3963 int w
= insn
& (1 << 21);
3965 n
= (insn
>> 1) & 0x7f;
3969 if (w
&& !(((insn
>> 23) ^ (insn
>> 24)) & 1)) {
3970 /* P == U , W == 1 => UNDEF */
3973 if (n
== 0 || (rd
+ n
) > 32 || (dp
&& n
> 16)) {
3974 /* UNPREDICTABLE cases for bad immediates: we choose to
3975 * UNDEF to avoid generating huge numbers of TCG ops
3979 if (rn
== 15 && w
) {
3980 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3984 if (s
->thumb
&& rn
== 15) {
3985 /* This is actually UNPREDICTABLE */
3986 addr
= tcg_temp_new_i32();
3987 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
3989 addr
= load_reg(s
, rn
);
3991 if (insn
& (1 << 24)) /* pre-decrement */
3992 tcg_gen_addi_i32(addr
, addr
, -((insn
& 0xff) << 2));
3998 for (i
= 0; i
< n
; i
++) {
3999 if (insn
& ARM_CP_RW_BIT
) {
4001 gen_vfp_ld(s
, dp
, addr
);
4002 gen_mov_vreg_F0(dp
, rd
+ i
);
4005 gen_mov_F0_vreg(dp
, rd
+ i
);
4006 gen_vfp_st(s
, dp
, addr
);
4008 tcg_gen_addi_i32(addr
, addr
, offset
);
4012 if (insn
& (1 << 24))
4013 offset
= -offset
* n
;
4014 else if (dp
&& (insn
& 1))
4020 tcg_gen_addi_i32(addr
, addr
, offset
);
4021 store_reg(s
, rn
, addr
);
4023 tcg_temp_free_i32(addr
);
4029 /* Should never happen. */
4035 static inline bool use_goto_tb(DisasContext
*s
, target_ulong dest
)
4037 #ifndef CONFIG_USER_ONLY
4038 return (s
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
4039 ((s
->pc
- 1) & TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
4045 static inline void gen_goto_tb(DisasContext
*s
, int n
, target_ulong dest
)
4047 if (use_goto_tb(s
, dest
)) {
4049 gen_set_pc_im(s
, dest
);
4050 tcg_gen_exit_tb((uintptr_t)s
->tb
+ n
);
4052 gen_set_pc_im(s
, dest
);
4057 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
4059 if (unlikely(s
->singlestep_enabled
|| s
->ss_active
)) {
4060 /* An indirect jump so that we still trigger the debug exception. */
4065 gen_goto_tb(s
, 0, dest
);
4066 s
->is_jmp
= DISAS_TB_JUMP
;
4070 static inline void gen_mulxy(TCGv_i32 t0
, TCGv_i32 t1
, int x
, int y
)
4073 tcg_gen_sari_i32(t0
, t0
, 16);
4077 tcg_gen_sari_i32(t1
, t1
, 16);
4080 tcg_gen_mul_i32(t0
, t0
, t1
);
4083 /* Return the mask of PSR bits set by a MSR instruction. */
4084 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
)
4089 if (flags
& (1 << 0))
4091 if (flags
& (1 << 1))
4093 if (flags
& (1 << 2))
4095 if (flags
& (1 << 3))
4098 /* Mask out undefined bits. */
4099 mask
&= ~CPSR_RESERVED
;
4100 if (!arm_dc_feature(s
, ARM_FEATURE_V4T
)) {
4103 if (!arm_dc_feature(s
, ARM_FEATURE_V5
)) {
4104 mask
&= ~CPSR_Q
; /* V5TE in reality*/
4106 if (!arm_dc_feature(s
, ARM_FEATURE_V6
)) {
4107 mask
&= ~(CPSR_E
| CPSR_GE
);
4109 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB2
)) {
4112 /* Mask out execution state and reserved bits. */
4114 mask
&= ~(CPSR_EXEC
| CPSR_RESERVED
);
4116 /* Mask out privileged bits. */
4122 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4123 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int spsr
, TCGv_i32 t0
)
4127 /* ??? This is also undefined in system mode. */
4131 tmp
= load_cpu_field(spsr
);
4132 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
4133 tcg_gen_andi_i32(t0
, t0
, mask
);
4134 tcg_gen_or_i32(tmp
, tmp
, t0
);
4135 store_cpu_field(tmp
, spsr
);
4137 gen_set_cpsr(t0
, mask
);
4139 tcg_temp_free_i32(t0
);
4144 /* Returns nonzero if access to the PSR is not permitted. */
4145 static int gen_set_psr_im(DisasContext
*s
, uint32_t mask
, int spsr
, uint32_t val
)
4148 tmp
= tcg_temp_new_i32();
4149 tcg_gen_movi_i32(tmp
, val
);
4150 return gen_set_psr(s
, mask
, spsr
, tmp
);
4153 static bool msr_banked_access_decode(DisasContext
*s
, int r
, int sysm
, int rn
,
4154 int *tgtmode
, int *regno
)
4156 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4157 * the target mode and register number, and identify the various
4158 * unpredictable cases.
4159 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4160 * + executed in user mode
4161 * + using R15 as the src/dest register
4162 * + accessing an unimplemented register
4163 * + accessing a register that's inaccessible at current PL/security state*
4164 * + accessing a register that you could access with a different insn
4165 * We choose to UNDEF in all these cases.
4166 * Since we don't know which of the various AArch32 modes we are in
4167 * we have to defer some checks to runtime.
4168 * Accesses to Monitor mode registers from Secure EL1 (which implies
4169 * that EL3 is AArch64) must trap to EL3.
4171 * If the access checks fail this function will emit code to take
4172 * an exception and return false. Otherwise it will return true,
4173 * and set *tgtmode and *regno appropriately.
4175 int exc_target
= default_exception_el(s
);
4177 /* These instructions are present only in ARMv8, or in ARMv7 with the
4178 * Virtualization Extensions.
4180 if (!arm_dc_feature(s
, ARM_FEATURE_V8
) &&
4181 !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
4185 if (IS_USER(s
) || rn
== 15) {
4189 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4190 * of registers into (r, sysm).
4193 /* SPSRs for other modes */
4195 case 0xe: /* SPSR_fiq */
4196 *tgtmode
= ARM_CPU_MODE_FIQ
;
4198 case 0x10: /* SPSR_irq */
4199 *tgtmode
= ARM_CPU_MODE_IRQ
;
4201 case 0x12: /* SPSR_svc */
4202 *tgtmode
= ARM_CPU_MODE_SVC
;
4204 case 0x14: /* SPSR_abt */
4205 *tgtmode
= ARM_CPU_MODE_ABT
;
4207 case 0x16: /* SPSR_und */
4208 *tgtmode
= ARM_CPU_MODE_UND
;
4210 case 0x1c: /* SPSR_mon */
4211 *tgtmode
= ARM_CPU_MODE_MON
;
4213 case 0x1e: /* SPSR_hyp */
4214 *tgtmode
= ARM_CPU_MODE_HYP
;
4216 default: /* unallocated */
4219 /* We arbitrarily assign SPSR a register number of 16. */
4222 /* general purpose registers for other modes */
4224 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4225 *tgtmode
= ARM_CPU_MODE_USR
;
4228 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4229 *tgtmode
= ARM_CPU_MODE_FIQ
;
4232 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4233 *tgtmode
= ARM_CPU_MODE_IRQ
;
4234 *regno
= sysm
& 1 ? 13 : 14;
4236 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4237 *tgtmode
= ARM_CPU_MODE_SVC
;
4238 *regno
= sysm
& 1 ? 13 : 14;
4240 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4241 *tgtmode
= ARM_CPU_MODE_ABT
;
4242 *regno
= sysm
& 1 ? 13 : 14;
4244 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4245 *tgtmode
= ARM_CPU_MODE_UND
;
4246 *regno
= sysm
& 1 ? 13 : 14;
4248 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4249 *tgtmode
= ARM_CPU_MODE_MON
;
4250 *regno
= sysm
& 1 ? 13 : 14;
4252 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4253 *tgtmode
= ARM_CPU_MODE_HYP
;
4254 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4255 *regno
= sysm
& 1 ? 13 : 17;
4257 default: /* unallocated */
4262 /* Catch the 'accessing inaccessible register' cases we can detect
4263 * at translate time.
4266 case ARM_CPU_MODE_MON
:
4267 if (!arm_dc_feature(s
, ARM_FEATURE_EL3
) || s
->ns
) {
4270 if (s
->current_el
== 1) {
4271 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4272 * then accesses to Mon registers trap to EL3
4278 case ARM_CPU_MODE_HYP
:
4279 /* Note that we can forbid accesses from EL2 here because they
4280 * must be from Hyp mode itself
4282 if (!arm_dc_feature(s
, ARM_FEATURE_EL2
) || s
->current_el
< 3) {
4293 /* If we get here then some access check did not pass */
4294 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(), exc_target
);
4298 static void gen_msr_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
4300 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
4301 int tgtmode
= 0, regno
= 0;
4303 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
4307 /* Sync state because msr_banked() can raise exceptions */
4308 gen_set_condexec(s
);
4309 gen_set_pc_im(s
, s
->pc
- 4);
4310 tcg_reg
= load_reg(s
, rn
);
4311 tcg_tgtmode
= tcg_const_i32(tgtmode
);
4312 tcg_regno
= tcg_const_i32(regno
);
4313 gen_helper_msr_banked(cpu_env
, tcg_reg
, tcg_tgtmode
, tcg_regno
);
4314 tcg_temp_free_i32(tcg_tgtmode
);
4315 tcg_temp_free_i32(tcg_regno
);
4316 tcg_temp_free_i32(tcg_reg
);
4317 s
->is_jmp
= DISAS_UPDATE
;
4320 static void gen_mrs_banked(DisasContext
*s
, int r
, int sysm
, int rn
)
4322 TCGv_i32 tcg_reg
, tcg_tgtmode
, tcg_regno
;
4323 int tgtmode
= 0, regno
= 0;
4325 if (!msr_banked_access_decode(s
, r
, sysm
, rn
, &tgtmode
, ®no
)) {
4329 /* Sync state because mrs_banked() can raise exceptions */
4330 gen_set_condexec(s
);
4331 gen_set_pc_im(s
, s
->pc
- 4);
4332 tcg_reg
= tcg_temp_new_i32();
4333 tcg_tgtmode
= tcg_const_i32(tgtmode
);
4334 tcg_regno
= tcg_const_i32(regno
);
4335 gen_helper_mrs_banked(tcg_reg
, cpu_env
, tcg_tgtmode
, tcg_regno
);
4336 tcg_temp_free_i32(tcg_tgtmode
);
4337 tcg_temp_free_i32(tcg_regno
);
4338 store_reg(s
, rn
, tcg_reg
);
4339 s
->is_jmp
= DISAS_UPDATE
;
4342 /* Store value to PC as for an exception return (ie don't
4343 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4344 * will do the masking based on the new value of the Thumb bit.
4346 static void store_pc_exc_ret(DisasContext
*s
, TCGv_i32 pc
)
4348 tcg_gen_mov_i32(cpu_R
[15], pc
);
4349 tcg_temp_free_i32(pc
);
4352 /* Generate a v6 exception return. Marks both values as dead. */
4353 static void gen_rfe(DisasContext
*s
, TCGv_i32 pc
, TCGv_i32 cpsr
)
4355 store_pc_exc_ret(s
, pc
);
4356 /* The cpsr_write_eret helper will mask the low bits of PC
4357 * appropriately depending on the new Thumb bit, so it must
4358 * be called after storing the new PC.
4360 gen_helper_cpsr_write_eret(cpu_env
, cpsr
);
4361 tcg_temp_free_i32(cpsr
);
4362 s
->is_jmp
= DISAS_JUMP
;
4365 /* Generate an old-style exception return. Marks pc as dead. */
4366 static void gen_exception_return(DisasContext
*s
, TCGv_i32 pc
)
4368 gen_rfe(s
, pc
, load_cpu_field(spsr
));
4371 static void gen_nop_hint(DisasContext
*s
, int val
)
4375 gen_set_pc_im(s
, s
->pc
);
4376 s
->is_jmp
= DISAS_YIELD
;
4379 gen_set_pc_im(s
, s
->pc
);
4380 s
->is_jmp
= DISAS_WFI
;
4383 gen_set_pc_im(s
, s
->pc
);
4384 s
->is_jmp
= DISAS_WFE
;
4388 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4394 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4396 static inline void gen_neon_add(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
4399 case 0: gen_helper_neon_add_u8(t0
, t0
, t1
); break;
4400 case 1: gen_helper_neon_add_u16(t0
, t0
, t1
); break;
4401 case 2: tcg_gen_add_i32(t0
, t0
, t1
); break;
4406 static inline void gen_neon_rsb(int size
, TCGv_i32 t0
, TCGv_i32 t1
)
4409 case 0: gen_helper_neon_sub_u8(t0
, t1
, t0
); break;
4410 case 1: gen_helper_neon_sub_u16(t0
, t1
, t0
); break;
4411 case 2: tcg_gen_sub_i32(t0
, t1
, t0
); break;
4416 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4417 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4418 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4419 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4420 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4422 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4423 switch ((size << 1) | u) { \
4425 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4428 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4431 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4434 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4437 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4440 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4442 default: return 1; \
4445 #define GEN_NEON_INTEGER_OP(name) do { \
4446 switch ((size << 1) | u) { \
4448 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4451 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4454 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4457 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4460 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4463 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4465 default: return 1; \
4468 static TCGv_i32
neon_load_scratch(int scratch
)
4470 TCGv_i32 tmp
= tcg_temp_new_i32();
4471 tcg_gen_ld_i32(tmp
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4475 static void neon_store_scratch(int scratch
, TCGv_i32 var
)
4477 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUARMState
, vfp
.scratch
[scratch
]));
4478 tcg_temp_free_i32(var
);
4481 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
4485 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
4487 gen_neon_dup_high16(tmp
);
4489 gen_neon_dup_low16(tmp
);
4492 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
4497 static int gen_neon_unzip(int rd
, int rm
, int size
, int q
)
4500 if (!q
&& size
== 2) {
4503 tmp
= tcg_const_i32(rd
);
4504 tmp2
= tcg_const_i32(rm
);
4508 gen_helper_neon_qunzip8(cpu_env
, tmp
, tmp2
);
4511 gen_helper_neon_qunzip16(cpu_env
, tmp
, tmp2
);
4514 gen_helper_neon_qunzip32(cpu_env
, tmp
, tmp2
);
4522 gen_helper_neon_unzip8(cpu_env
, tmp
, tmp2
);
4525 gen_helper_neon_unzip16(cpu_env
, tmp
, tmp2
);
4531 tcg_temp_free_i32(tmp
);
4532 tcg_temp_free_i32(tmp2
);
4536 static int gen_neon_zip(int rd
, int rm
, int size
, int q
)
4539 if (!q
&& size
== 2) {
4542 tmp
= tcg_const_i32(rd
);
4543 tmp2
= tcg_const_i32(rm
);
4547 gen_helper_neon_qzip8(cpu_env
, tmp
, tmp2
);
4550 gen_helper_neon_qzip16(cpu_env
, tmp
, tmp2
);
4553 gen_helper_neon_qzip32(cpu_env
, tmp
, tmp2
);
4561 gen_helper_neon_zip8(cpu_env
, tmp
, tmp2
);
4564 gen_helper_neon_zip16(cpu_env
, tmp
, tmp2
);
4570 tcg_temp_free_i32(tmp
);
4571 tcg_temp_free_i32(tmp2
);
4575 static void gen_neon_trn_u8(TCGv_i32 t0
, TCGv_i32 t1
)
4579 rd
= tcg_temp_new_i32();
4580 tmp
= tcg_temp_new_i32();
4582 tcg_gen_shli_i32(rd
, t0
, 8);
4583 tcg_gen_andi_i32(rd
, rd
, 0xff00ff00);
4584 tcg_gen_andi_i32(tmp
, t1
, 0x00ff00ff);
4585 tcg_gen_or_i32(rd
, rd
, tmp
);
4587 tcg_gen_shri_i32(t1
, t1
, 8);
4588 tcg_gen_andi_i32(t1
, t1
, 0x00ff00ff);
4589 tcg_gen_andi_i32(tmp
, t0
, 0xff00ff00);
4590 tcg_gen_or_i32(t1
, t1
, tmp
);
4591 tcg_gen_mov_i32(t0
, rd
);
4593 tcg_temp_free_i32(tmp
);
4594 tcg_temp_free_i32(rd
);
4597 static void gen_neon_trn_u16(TCGv_i32 t0
, TCGv_i32 t1
)
4601 rd
= tcg_temp_new_i32();
4602 tmp
= tcg_temp_new_i32();
4604 tcg_gen_shli_i32(rd
, t0
, 16);
4605 tcg_gen_andi_i32(tmp
, t1
, 0xffff);
4606 tcg_gen_or_i32(rd
, rd
, tmp
);
4607 tcg_gen_shri_i32(t1
, t1
, 16);
4608 tcg_gen_andi_i32(tmp
, t0
, 0xffff0000);
4609 tcg_gen_or_i32(t1
, t1
, tmp
);
4610 tcg_gen_mov_i32(t0
, rd
);
4612 tcg_temp_free_i32(tmp
);
4613 tcg_temp_free_i32(rd
);
4621 } neon_ls_element_type
[11] = {
4635 /* Translate a NEON load/store element instruction. Return nonzero if the
4636 instruction is invalid. */
4637 static int disas_neon_ls_insn(DisasContext
*s
, uint32_t insn
)
4656 /* FIXME: this access check should not take precedence over UNDEF
4657 * for invalid encodings; we will generate incorrect syndrome information
4658 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4660 if (s
->fp_excp_el
) {
4661 gen_exception_insn(s
, 4, EXCP_UDEF
,
4662 syn_fp_access_trap(1, 0xe, false), s
->fp_excp_el
);
4666 if (!s
->vfp_enabled
)
4668 VFP_DREG_D(rd
, insn
);
4669 rn
= (insn
>> 16) & 0xf;
4671 load
= (insn
& (1 << 21)) != 0;
4672 if ((insn
& (1 << 23)) == 0) {
4673 /* Load store all elements. */
4674 op
= (insn
>> 8) & 0xf;
4675 size
= (insn
>> 6) & 3;
4678 /* Catch UNDEF cases for bad values of align field */
4681 if (((insn
>> 5) & 1) == 1) {
4686 if (((insn
>> 4) & 3) == 3) {
4693 nregs
= neon_ls_element_type
[op
].nregs
;
4694 interleave
= neon_ls_element_type
[op
].interleave
;
4695 spacing
= neon_ls_element_type
[op
].spacing
;
4696 if (size
== 3 && (interleave
| spacing
) != 1)
4698 addr
= tcg_temp_new_i32();
4699 load_reg_var(s
, addr
, rn
);
4700 stride
= (1 << size
) * interleave
;
4701 for (reg
= 0; reg
< nregs
; reg
++) {
4702 if (interleave
> 2 || (interleave
== 2 && nregs
== 2)) {
4703 load_reg_var(s
, addr
, rn
);
4704 tcg_gen_addi_i32(addr
, addr
, (1 << size
) * reg
);
4705 } else if (interleave
== 2 && nregs
== 4 && reg
== 2) {
4706 load_reg_var(s
, addr
, rn
);
4707 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4710 tmp64
= tcg_temp_new_i64();
4712 gen_aa32_ld64(s
, tmp64
, addr
, get_mem_index(s
));
4713 neon_store_reg64(tmp64
, rd
);
4715 neon_load_reg64(tmp64
, rd
);
4716 gen_aa32_st64(s
, tmp64
, addr
, get_mem_index(s
));
4718 tcg_temp_free_i64(tmp64
);
4719 tcg_gen_addi_i32(addr
, addr
, stride
);
4721 for (pass
= 0; pass
< 2; pass
++) {
4724 tmp
= tcg_temp_new_i32();
4725 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
4726 neon_store_reg(rd
, pass
, tmp
);
4728 tmp
= neon_load_reg(rd
, pass
);
4729 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
4730 tcg_temp_free_i32(tmp
);
4732 tcg_gen_addi_i32(addr
, addr
, stride
);
4733 } else if (size
== 1) {
4735 tmp
= tcg_temp_new_i32();
4736 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
4737 tcg_gen_addi_i32(addr
, addr
, stride
);
4738 tmp2
= tcg_temp_new_i32();
4739 gen_aa32_ld16u(s
, tmp2
, addr
, get_mem_index(s
));
4740 tcg_gen_addi_i32(addr
, addr
, stride
);
4741 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
4742 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
4743 tcg_temp_free_i32(tmp2
);
4744 neon_store_reg(rd
, pass
, tmp
);
4746 tmp
= neon_load_reg(rd
, pass
);
4747 tmp2
= tcg_temp_new_i32();
4748 tcg_gen_shri_i32(tmp2
, tmp
, 16);
4749 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
4750 tcg_temp_free_i32(tmp
);
4751 tcg_gen_addi_i32(addr
, addr
, stride
);
4752 gen_aa32_st16(s
, tmp2
, addr
, get_mem_index(s
));
4753 tcg_temp_free_i32(tmp2
);
4754 tcg_gen_addi_i32(addr
, addr
, stride
);
4756 } else /* size == 0 */ {
4758 TCGV_UNUSED_I32(tmp2
);
4759 for (n
= 0; n
< 4; n
++) {
4760 tmp
= tcg_temp_new_i32();
4761 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
4762 tcg_gen_addi_i32(addr
, addr
, stride
);
4766 tcg_gen_shli_i32(tmp
, tmp
, n
* 8);
4767 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
4768 tcg_temp_free_i32(tmp
);
4771 neon_store_reg(rd
, pass
, tmp2
);
4773 tmp2
= neon_load_reg(rd
, pass
);
4774 for (n
= 0; n
< 4; n
++) {
4775 tmp
= tcg_temp_new_i32();
4777 tcg_gen_mov_i32(tmp
, tmp2
);
4779 tcg_gen_shri_i32(tmp
, tmp2
, n
* 8);
4781 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
4782 tcg_temp_free_i32(tmp
);
4783 tcg_gen_addi_i32(addr
, addr
, stride
);
4785 tcg_temp_free_i32(tmp2
);
4792 tcg_temp_free_i32(addr
);
4795 size
= (insn
>> 10) & 3;
4797 /* Load single element to all lanes. */
4798 int a
= (insn
>> 4) & 1;
4802 size
= (insn
>> 6) & 3;
4803 nregs
= ((insn
>> 8) & 3) + 1;
4806 if (nregs
!= 4 || a
== 0) {
4809 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4812 if (nregs
== 1 && a
== 1 && size
== 0) {
4815 if (nregs
== 3 && a
== 1) {
4818 addr
= tcg_temp_new_i32();
4819 load_reg_var(s
, addr
, rn
);
4821 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4822 tmp
= gen_load_and_replicate(s
, addr
, size
);
4823 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4824 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4825 if (insn
& (1 << 5)) {
4826 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 0));
4827 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
+ 1, 1));
4829 tcg_temp_free_i32(tmp
);
4831 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4832 stride
= (insn
& (1 << 5)) ? 2 : 1;
4833 for (reg
= 0; reg
< nregs
; reg
++) {
4834 tmp
= gen_load_and_replicate(s
, addr
, size
);
4835 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 0));
4836 tcg_gen_st_i32(tmp
, cpu_env
, neon_reg_offset(rd
, 1));
4837 tcg_temp_free_i32(tmp
);
4838 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4842 tcg_temp_free_i32(addr
);
4843 stride
= (1 << size
) * nregs
;
4845 /* Single element. */
4846 int idx
= (insn
>> 4) & 0xf;
4847 pass
= (insn
>> 7) & 1;
4850 shift
= ((insn
>> 5) & 3) * 8;
4854 shift
= ((insn
>> 6) & 1) * 16;
4855 stride
= (insn
& (1 << 5)) ? 2 : 1;
4859 stride
= (insn
& (1 << 6)) ? 2 : 1;
4864 nregs
= ((insn
>> 8) & 3) + 1;
4865 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4868 if (((idx
& (1 << size
)) != 0) ||
4869 (size
== 2 && ((idx
& 3) == 1 || (idx
& 3) == 2))) {
4874 if ((idx
& 1) != 0) {
4879 if (size
== 2 && (idx
& 2) != 0) {
4884 if ((size
== 2) && ((idx
& 3) == 3)) {
4891 if ((rd
+ stride
* (nregs
- 1)) > 31) {
4892 /* Attempts to write off the end of the register file
4893 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4894 * the neon_load_reg() would write off the end of the array.
4898 addr
= tcg_temp_new_i32();
4899 load_reg_var(s
, addr
, rn
);
4900 for (reg
= 0; reg
< nregs
; reg
++) {
4902 tmp
= tcg_temp_new_i32();
4905 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
4908 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
4911 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
4913 default: /* Avoid compiler warnings. */
4917 tmp2
= neon_load_reg(rd
, pass
);
4918 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
,
4919 shift
, size
? 16 : 8);
4920 tcg_temp_free_i32(tmp2
);
4922 neon_store_reg(rd
, pass
, tmp
);
4923 } else { /* Store */
4924 tmp
= neon_load_reg(rd
, pass
);
4926 tcg_gen_shri_i32(tmp
, tmp
, shift
);
4929 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
4932 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
4935 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
4938 tcg_temp_free_i32(tmp
);
4941 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
4943 tcg_temp_free_i32(addr
);
4944 stride
= nregs
* (1 << size
);
4950 base
= load_reg(s
, rn
);
4952 tcg_gen_addi_i32(base
, base
, stride
);
4955 index
= load_reg(s
, rm
);
4956 tcg_gen_add_i32(base
, base
, index
);
4957 tcg_temp_free_i32(index
);
4959 store_reg(s
, rn
, base
);
4964 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4965 static void gen_neon_bsl(TCGv_i32 dest
, TCGv_i32 t
, TCGv_i32 f
, TCGv_i32 c
)
4967 tcg_gen_and_i32(t
, t
, c
);
4968 tcg_gen_andc_i32(f
, f
, c
);
4969 tcg_gen_or_i32(dest
, t
, f
);
4972 static inline void gen_neon_narrow(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4975 case 0: gen_helper_neon_narrow_u8(dest
, src
); break;
4976 case 1: gen_helper_neon_narrow_u16(dest
, src
); break;
4977 case 2: tcg_gen_extrl_i64_i32(dest
, src
); break;
4982 static inline void gen_neon_narrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4985 case 0: gen_helper_neon_narrow_sat_s8(dest
, cpu_env
, src
); break;
4986 case 1: gen_helper_neon_narrow_sat_s16(dest
, cpu_env
, src
); break;
4987 case 2: gen_helper_neon_narrow_sat_s32(dest
, cpu_env
, src
); break;
4992 static inline void gen_neon_narrow_satu(int size
, TCGv_i32 dest
, TCGv_i64 src
)
4995 case 0: gen_helper_neon_narrow_sat_u8(dest
, cpu_env
, src
); break;
4996 case 1: gen_helper_neon_narrow_sat_u16(dest
, cpu_env
, src
); break;
4997 case 2: gen_helper_neon_narrow_sat_u32(dest
, cpu_env
, src
); break;
5002 static inline void gen_neon_unarrow_sats(int size
, TCGv_i32 dest
, TCGv_i64 src
)
5005 case 0: gen_helper_neon_unarrow_sat8(dest
, cpu_env
, src
); break;
5006 case 1: gen_helper_neon_unarrow_sat16(dest
, cpu_env
, src
); break;
5007 case 2: gen_helper_neon_unarrow_sat32(dest
, cpu_env
, src
); break;
5012 static inline void gen_neon_shift_narrow(int size
, TCGv_i32 var
, TCGv_i32 shift
,
5018 case 1: gen_helper_neon_rshl_u16(var
, var
, shift
); break;
5019 case 2: gen_helper_neon_rshl_u32(var
, var
, shift
); break;
5024 case 1: gen_helper_neon_rshl_s16(var
, var
, shift
); break;
5025 case 2: gen_helper_neon_rshl_s32(var
, var
, shift
); break;
5032 case 1: gen_helper_neon_shl_u16(var
, var
, shift
); break;
5033 case 2: gen_helper_neon_shl_u32(var
, var
, shift
); break;
5038 case 1: gen_helper_neon_shl_s16(var
, var
, shift
); break;
5039 case 2: gen_helper_neon_shl_s32(var
, var
, shift
); break;
5046 static inline void gen_neon_widen(TCGv_i64 dest
, TCGv_i32 src
, int size
, int u
)
5050 case 0: gen_helper_neon_widen_u8(dest
, src
); break;
5051 case 1: gen_helper_neon_widen_u16(dest
, src
); break;
5052 case 2: tcg_gen_extu_i32_i64(dest
, src
); break;
5057 case 0: gen_helper_neon_widen_s8(dest
, src
); break;
5058 case 1: gen_helper_neon_widen_s16(dest
, src
); break;
5059 case 2: tcg_gen_ext_i32_i64(dest
, src
); break;
5063 tcg_temp_free_i32(src
);
5066 static inline void gen_neon_addl(int size
)
5069 case 0: gen_helper_neon_addl_u16(CPU_V001
); break;
5070 case 1: gen_helper_neon_addl_u32(CPU_V001
); break;
5071 case 2: tcg_gen_add_i64(CPU_V001
); break;
5076 static inline void gen_neon_subl(int size
)
5079 case 0: gen_helper_neon_subl_u16(CPU_V001
); break;
5080 case 1: gen_helper_neon_subl_u32(CPU_V001
); break;
5081 case 2: tcg_gen_sub_i64(CPU_V001
); break;
5086 static inline void gen_neon_negl(TCGv_i64 var
, int size
)
5089 case 0: gen_helper_neon_negl_u16(var
, var
); break;
5090 case 1: gen_helper_neon_negl_u32(var
, var
); break;
5092 tcg_gen_neg_i64(var
, var
);
5098 static inline void gen_neon_addl_saturate(TCGv_i64 op0
, TCGv_i64 op1
, int size
)
5101 case 1: gen_helper_neon_addl_saturate_s32(op0
, cpu_env
, op0
, op1
); break;
5102 case 2: gen_helper_neon_addl_saturate_s64(op0
, cpu_env
, op0
, op1
); break;
5107 static inline void gen_neon_mull(TCGv_i64 dest
, TCGv_i32 a
, TCGv_i32 b
,
5112 switch ((size
<< 1) | u
) {
5113 case 0: gen_helper_neon_mull_s8(dest
, a
, b
); break;
5114 case 1: gen_helper_neon_mull_u8(dest
, a
, b
); break;
5115 case 2: gen_helper_neon_mull_s16(dest
, a
, b
); break;
5116 case 3: gen_helper_neon_mull_u16(dest
, a
, b
); break;
5118 tmp
= gen_muls_i64_i32(a
, b
);
5119 tcg_gen_mov_i64(dest
, tmp
);
5120 tcg_temp_free_i64(tmp
);
5123 tmp
= gen_mulu_i64_i32(a
, b
);
5124 tcg_gen_mov_i64(dest
, tmp
);
5125 tcg_temp_free_i64(tmp
);
5130 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5131 Don't forget to clean them now. */
5133 tcg_temp_free_i32(a
);
5134 tcg_temp_free_i32(b
);
5138 static void gen_neon_narrow_op(int op
, int u
, int size
,
5139 TCGv_i32 dest
, TCGv_i64 src
)
5143 gen_neon_unarrow_sats(size
, dest
, src
);
5145 gen_neon_narrow(size
, dest
, src
);
5149 gen_neon_narrow_satu(size
, dest
, src
);
5151 gen_neon_narrow_sats(size
, dest
, src
);
5156 /* Symbolic constants for op fields for Neon 3-register same-length.
5157 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5160 #define NEON_3R_VHADD 0
5161 #define NEON_3R_VQADD 1
5162 #define NEON_3R_VRHADD 2
5163 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5164 #define NEON_3R_VHSUB 4
5165 #define NEON_3R_VQSUB 5
5166 #define NEON_3R_VCGT 6
5167 #define NEON_3R_VCGE 7
5168 #define NEON_3R_VSHL 8
5169 #define NEON_3R_VQSHL 9
5170 #define NEON_3R_VRSHL 10
5171 #define NEON_3R_VQRSHL 11
5172 #define NEON_3R_VMAX 12
5173 #define NEON_3R_VMIN 13
5174 #define NEON_3R_VABD 14
5175 #define NEON_3R_VABA 15
5176 #define NEON_3R_VADD_VSUB 16
5177 #define NEON_3R_VTST_VCEQ 17
5178 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5179 #define NEON_3R_VMUL 19
5180 #define NEON_3R_VPMAX 20
5181 #define NEON_3R_VPMIN 21
5182 #define NEON_3R_VQDMULH_VQRDMULH 22
5183 #define NEON_3R_VPADD 23
5184 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
5185 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
5186 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5187 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5188 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5189 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5190 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
5191 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
5193 static const uint8_t neon_3r_sizes
[] = {
5194 [NEON_3R_VHADD
] = 0x7,
5195 [NEON_3R_VQADD
] = 0xf,
5196 [NEON_3R_VRHADD
] = 0x7,
5197 [NEON_3R_LOGIC
] = 0xf, /* size field encodes op type */
5198 [NEON_3R_VHSUB
] = 0x7,
5199 [NEON_3R_VQSUB
] = 0xf,
5200 [NEON_3R_VCGT
] = 0x7,
5201 [NEON_3R_VCGE
] = 0x7,
5202 [NEON_3R_VSHL
] = 0xf,
5203 [NEON_3R_VQSHL
] = 0xf,
5204 [NEON_3R_VRSHL
] = 0xf,
5205 [NEON_3R_VQRSHL
] = 0xf,
5206 [NEON_3R_VMAX
] = 0x7,
5207 [NEON_3R_VMIN
] = 0x7,
5208 [NEON_3R_VABD
] = 0x7,
5209 [NEON_3R_VABA
] = 0x7,
5210 [NEON_3R_VADD_VSUB
] = 0xf,
5211 [NEON_3R_VTST_VCEQ
] = 0x7,
5212 [NEON_3R_VML
] = 0x7,
5213 [NEON_3R_VMUL
] = 0x7,
5214 [NEON_3R_VPMAX
] = 0x7,
5215 [NEON_3R_VPMIN
] = 0x7,
5216 [NEON_3R_VQDMULH_VQRDMULH
] = 0x6,
5217 [NEON_3R_VPADD
] = 0x7,
5218 [NEON_3R_SHA
] = 0xf, /* size field encodes op type */
5219 [NEON_3R_VFM
] = 0x5, /* size bit 1 encodes op */
5220 [NEON_3R_FLOAT_ARITH
] = 0x5, /* size bit 1 encodes op */
5221 [NEON_3R_FLOAT_MULTIPLY
] = 0x5, /* size bit 1 encodes op */
5222 [NEON_3R_FLOAT_CMP
] = 0x5, /* size bit 1 encodes op */
5223 [NEON_3R_FLOAT_ACMP
] = 0x5, /* size bit 1 encodes op */
5224 [NEON_3R_FLOAT_MINMAX
] = 0x5, /* size bit 1 encodes op */
5225 [NEON_3R_FLOAT_MISC
] = 0x5, /* size bit 1 encodes op */
5228 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
5229 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5232 #define NEON_2RM_VREV64 0
5233 #define NEON_2RM_VREV32 1
5234 #define NEON_2RM_VREV16 2
5235 #define NEON_2RM_VPADDL 4
5236 #define NEON_2RM_VPADDL_U 5
5237 #define NEON_2RM_AESE 6 /* Includes AESD */
5238 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
5239 #define NEON_2RM_VCLS 8
5240 #define NEON_2RM_VCLZ 9
5241 #define NEON_2RM_VCNT 10
5242 #define NEON_2RM_VMVN 11
5243 #define NEON_2RM_VPADAL 12
5244 #define NEON_2RM_VPADAL_U 13
5245 #define NEON_2RM_VQABS 14
5246 #define NEON_2RM_VQNEG 15
5247 #define NEON_2RM_VCGT0 16
5248 #define NEON_2RM_VCGE0 17
5249 #define NEON_2RM_VCEQ0 18
5250 #define NEON_2RM_VCLE0 19
5251 #define NEON_2RM_VCLT0 20
5252 #define NEON_2RM_SHA1H 21
5253 #define NEON_2RM_VABS 22
5254 #define NEON_2RM_VNEG 23
5255 #define NEON_2RM_VCGT0_F 24
5256 #define NEON_2RM_VCGE0_F 25
5257 #define NEON_2RM_VCEQ0_F 26
5258 #define NEON_2RM_VCLE0_F 27
5259 #define NEON_2RM_VCLT0_F 28
5260 #define NEON_2RM_VABS_F 30
5261 #define NEON_2RM_VNEG_F 31
5262 #define NEON_2RM_VSWP 32
5263 #define NEON_2RM_VTRN 33
5264 #define NEON_2RM_VUZP 34
5265 #define NEON_2RM_VZIP 35
5266 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5267 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5268 #define NEON_2RM_VSHLL 38
5269 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
5270 #define NEON_2RM_VRINTN 40
5271 #define NEON_2RM_VRINTX 41
5272 #define NEON_2RM_VRINTA 42
5273 #define NEON_2RM_VRINTZ 43
5274 #define NEON_2RM_VCVT_F16_F32 44
5275 #define NEON_2RM_VRINTM 45
5276 #define NEON_2RM_VCVT_F32_F16 46
5277 #define NEON_2RM_VRINTP 47
5278 #define NEON_2RM_VCVTAU 48
5279 #define NEON_2RM_VCVTAS 49
5280 #define NEON_2RM_VCVTNU 50
5281 #define NEON_2RM_VCVTNS 51
5282 #define NEON_2RM_VCVTPU 52
5283 #define NEON_2RM_VCVTPS 53
5284 #define NEON_2RM_VCVTMU 54
5285 #define NEON_2RM_VCVTMS 55
5286 #define NEON_2RM_VRECPE 56
5287 #define NEON_2RM_VRSQRTE 57
5288 #define NEON_2RM_VRECPE_F 58
5289 #define NEON_2RM_VRSQRTE_F 59
5290 #define NEON_2RM_VCVT_FS 60
5291 #define NEON_2RM_VCVT_FU 61
5292 #define NEON_2RM_VCVT_SF 62
5293 #define NEON_2RM_VCVT_UF 63
5295 static int neon_2rm_is_float_op(int op
)
5297 /* Return true if this neon 2reg-misc op is float-to-float */
5298 return (op
== NEON_2RM_VABS_F
|| op
== NEON_2RM_VNEG_F
||
5299 (op
>= NEON_2RM_VRINTN
&& op
<= NEON_2RM_VRINTZ
) ||
5300 op
== NEON_2RM_VRINTM
||
5301 (op
>= NEON_2RM_VRINTP
&& op
<= NEON_2RM_VCVTMS
) ||
5302 op
>= NEON_2RM_VRECPE_F
);
5305 static bool neon_2rm_is_v8_op(int op
)
5307 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5309 case NEON_2RM_VRINTN
:
5310 case NEON_2RM_VRINTA
:
5311 case NEON_2RM_VRINTM
:
5312 case NEON_2RM_VRINTP
:
5313 case NEON_2RM_VRINTZ
:
5314 case NEON_2RM_VRINTX
:
5315 case NEON_2RM_VCVTAU
:
5316 case NEON_2RM_VCVTAS
:
5317 case NEON_2RM_VCVTNU
:
5318 case NEON_2RM_VCVTNS
:
5319 case NEON_2RM_VCVTPU
:
5320 case NEON_2RM_VCVTPS
:
5321 case NEON_2RM_VCVTMU
:
5322 case NEON_2RM_VCVTMS
:
5329 /* Each entry in this array has bit n set if the insn allows
5330 * size value n (otherwise it will UNDEF). Since unallocated
5331 * op values will have no bits set they always UNDEF.
5333 static const uint8_t neon_2rm_sizes
[] = {
5334 [NEON_2RM_VREV64
] = 0x7,
5335 [NEON_2RM_VREV32
] = 0x3,
5336 [NEON_2RM_VREV16
] = 0x1,
5337 [NEON_2RM_VPADDL
] = 0x7,
5338 [NEON_2RM_VPADDL_U
] = 0x7,
5339 [NEON_2RM_AESE
] = 0x1,
5340 [NEON_2RM_AESMC
] = 0x1,
5341 [NEON_2RM_VCLS
] = 0x7,
5342 [NEON_2RM_VCLZ
] = 0x7,
5343 [NEON_2RM_VCNT
] = 0x1,
5344 [NEON_2RM_VMVN
] = 0x1,
5345 [NEON_2RM_VPADAL
] = 0x7,
5346 [NEON_2RM_VPADAL_U
] = 0x7,
5347 [NEON_2RM_VQABS
] = 0x7,
5348 [NEON_2RM_VQNEG
] = 0x7,
5349 [NEON_2RM_VCGT0
] = 0x7,
5350 [NEON_2RM_VCGE0
] = 0x7,
5351 [NEON_2RM_VCEQ0
] = 0x7,
5352 [NEON_2RM_VCLE0
] = 0x7,
5353 [NEON_2RM_VCLT0
] = 0x7,
5354 [NEON_2RM_SHA1H
] = 0x4,
5355 [NEON_2RM_VABS
] = 0x7,
5356 [NEON_2RM_VNEG
] = 0x7,
5357 [NEON_2RM_VCGT0_F
] = 0x4,
5358 [NEON_2RM_VCGE0_F
] = 0x4,
5359 [NEON_2RM_VCEQ0_F
] = 0x4,
5360 [NEON_2RM_VCLE0_F
] = 0x4,
5361 [NEON_2RM_VCLT0_F
] = 0x4,
5362 [NEON_2RM_VABS_F
] = 0x4,
5363 [NEON_2RM_VNEG_F
] = 0x4,
5364 [NEON_2RM_VSWP
] = 0x1,
5365 [NEON_2RM_VTRN
] = 0x7,
5366 [NEON_2RM_VUZP
] = 0x7,
5367 [NEON_2RM_VZIP
] = 0x7,
5368 [NEON_2RM_VMOVN
] = 0x7,
5369 [NEON_2RM_VQMOVN
] = 0x7,
5370 [NEON_2RM_VSHLL
] = 0x7,
5371 [NEON_2RM_SHA1SU1
] = 0x4,
5372 [NEON_2RM_VRINTN
] = 0x4,
5373 [NEON_2RM_VRINTX
] = 0x4,
5374 [NEON_2RM_VRINTA
] = 0x4,
5375 [NEON_2RM_VRINTZ
] = 0x4,
5376 [NEON_2RM_VCVT_F16_F32
] = 0x2,
5377 [NEON_2RM_VRINTM
] = 0x4,
5378 [NEON_2RM_VCVT_F32_F16
] = 0x2,
5379 [NEON_2RM_VRINTP
] = 0x4,
5380 [NEON_2RM_VCVTAU
] = 0x4,
5381 [NEON_2RM_VCVTAS
] = 0x4,
5382 [NEON_2RM_VCVTNU
] = 0x4,
5383 [NEON_2RM_VCVTNS
] = 0x4,
5384 [NEON_2RM_VCVTPU
] = 0x4,
5385 [NEON_2RM_VCVTPS
] = 0x4,
5386 [NEON_2RM_VCVTMU
] = 0x4,
5387 [NEON_2RM_VCVTMS
] = 0x4,
5388 [NEON_2RM_VRECPE
] = 0x4,
5389 [NEON_2RM_VRSQRTE
] = 0x4,
5390 [NEON_2RM_VRECPE_F
] = 0x4,
5391 [NEON_2RM_VRSQRTE_F
] = 0x4,
5392 [NEON_2RM_VCVT_FS
] = 0x4,
5393 [NEON_2RM_VCVT_FU
] = 0x4,
5394 [NEON_2RM_VCVT_SF
] = 0x4,
5395 [NEON_2RM_VCVT_UF
] = 0x4,
5398 /* Translate a NEON data processing instruction. Return nonzero if the
5399 instruction is invalid.
5400 We process data in a mixture of 32-bit and 64-bit chunks.
5401 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5403 static int disas_neon_data_insn(DisasContext
*s
, uint32_t insn
)
5415 TCGv_i32 tmp
, tmp2
, tmp3
, tmp4
, tmp5
;
5418 /* FIXME: this access check should not take precedence over UNDEF
5419 * for invalid encodings; we will generate incorrect syndrome information
5420 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5422 if (s
->fp_excp_el
) {
5423 gen_exception_insn(s
, 4, EXCP_UDEF
,
5424 syn_fp_access_trap(1, 0xe, false), s
->fp_excp_el
);
5428 if (!s
->vfp_enabled
)
5430 q
= (insn
& (1 << 6)) != 0;
5431 u
= (insn
>> 24) & 1;
5432 VFP_DREG_D(rd
, insn
);
5433 VFP_DREG_N(rn
, insn
);
5434 VFP_DREG_M(rm
, insn
);
5435 size
= (insn
>> 20) & 3;
5436 if ((insn
& (1 << 23)) == 0) {
5437 /* Three register same length. */
5438 op
= ((insn
>> 7) & 0x1e) | ((insn
>> 4) & 1);
5439 /* Catch invalid op and bad size combinations: UNDEF */
5440 if ((neon_3r_sizes
[op
] & (1 << size
)) == 0) {
5443 /* All insns of this form UNDEF for either this condition or the
5444 * superset of cases "Q==1"; we catch the latter later.
5446 if (q
&& ((rd
| rn
| rm
) & 1)) {
5450 * The SHA-1/SHA-256 3-register instructions require special treatment
5451 * here, as their size field is overloaded as an op type selector, and
5452 * they all consume their input in a single pass.
5454 if (op
== NEON_3R_SHA
) {
5458 if (!u
) { /* SHA-1 */
5459 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)) {
5462 tmp
= tcg_const_i32(rd
);
5463 tmp2
= tcg_const_i32(rn
);
5464 tmp3
= tcg_const_i32(rm
);
5465 tmp4
= tcg_const_i32(size
);
5466 gen_helper_crypto_sha1_3reg(cpu_env
, tmp
, tmp2
, tmp3
, tmp4
);
5467 tcg_temp_free_i32(tmp4
);
5468 } else { /* SHA-256 */
5469 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA256
) || size
== 3) {
5472 tmp
= tcg_const_i32(rd
);
5473 tmp2
= tcg_const_i32(rn
);
5474 tmp3
= tcg_const_i32(rm
);
5477 gen_helper_crypto_sha256h(cpu_env
, tmp
, tmp2
, tmp3
);
5480 gen_helper_crypto_sha256h2(cpu_env
, tmp
, tmp2
, tmp3
);
5483 gen_helper_crypto_sha256su1(cpu_env
, tmp
, tmp2
, tmp3
);
5487 tcg_temp_free_i32(tmp
);
5488 tcg_temp_free_i32(tmp2
);
5489 tcg_temp_free_i32(tmp3
);
5492 if (size
== 3 && op
!= NEON_3R_LOGIC
) {
5493 /* 64-bit element instructions. */
5494 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
5495 neon_load_reg64(cpu_V0
, rn
+ pass
);
5496 neon_load_reg64(cpu_V1
, rm
+ pass
);
5500 gen_helper_neon_qadd_u64(cpu_V0
, cpu_env
,
5503 gen_helper_neon_qadd_s64(cpu_V0
, cpu_env
,
5509 gen_helper_neon_qsub_u64(cpu_V0
, cpu_env
,
5512 gen_helper_neon_qsub_s64(cpu_V0
, cpu_env
,
5518 gen_helper_neon_shl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5520 gen_helper_neon_shl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5525 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
5528 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
5534 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V1
, cpu_V0
);
5536 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V1
, cpu_V0
);
5539 case NEON_3R_VQRSHL
:
5541 gen_helper_neon_qrshl_u64(cpu_V0
, cpu_env
,
5544 gen_helper_neon_qrshl_s64(cpu_V0
, cpu_env
,
5548 case NEON_3R_VADD_VSUB
:
5550 tcg_gen_sub_i64(CPU_V001
);
5552 tcg_gen_add_i64(CPU_V001
);
5558 neon_store_reg64(cpu_V0
, rd
+ pass
);
5567 case NEON_3R_VQRSHL
:
5570 /* Shift instruction operands are reversed. */
5585 case NEON_3R_FLOAT_ARITH
:
5586 pairwise
= (u
&& size
< 2); /* if VPADD (float) */
5588 case NEON_3R_FLOAT_MINMAX
:
5589 pairwise
= u
; /* if VPMIN/VPMAX (float) */
5591 case NEON_3R_FLOAT_CMP
:
5593 /* no encoding for U=0 C=1x */
5597 case NEON_3R_FLOAT_ACMP
:
5602 case NEON_3R_FLOAT_MISC
:
5603 /* VMAXNM/VMINNM in ARMv8 */
5604 if (u
&& !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
5609 if (u
&& (size
!= 0)) {
5610 /* UNDEF on invalid size for polynomial subcase */
5615 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
) || u
) {
5623 if (pairwise
&& q
) {
5624 /* All the pairwise insns UNDEF if Q is set */
5628 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5633 tmp
= neon_load_reg(rn
, 0);
5634 tmp2
= neon_load_reg(rn
, 1);
5636 tmp
= neon_load_reg(rm
, 0);
5637 tmp2
= neon_load_reg(rm
, 1);
5641 tmp
= neon_load_reg(rn
, pass
);
5642 tmp2
= neon_load_reg(rm
, pass
);
5646 GEN_NEON_INTEGER_OP(hadd
);
5649 GEN_NEON_INTEGER_OP_ENV(qadd
);
5651 case NEON_3R_VRHADD
:
5652 GEN_NEON_INTEGER_OP(rhadd
);
5654 case NEON_3R_LOGIC
: /* Logic ops. */
5655 switch ((u
<< 2) | size
) {
5657 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
5660 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
5663 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
5666 tcg_gen_orc_i32(tmp
, tmp
, tmp2
);
5669 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
5672 tmp3
= neon_load_reg(rd
, pass
);
5673 gen_neon_bsl(tmp
, tmp
, tmp2
, tmp3
);
5674 tcg_temp_free_i32(tmp3
);
5677 tmp3
= neon_load_reg(rd
, pass
);
5678 gen_neon_bsl(tmp
, tmp
, tmp3
, tmp2
);
5679 tcg_temp_free_i32(tmp3
);
5682 tmp3
= neon_load_reg(rd
, pass
);
5683 gen_neon_bsl(tmp
, tmp3
, tmp
, tmp2
);
5684 tcg_temp_free_i32(tmp3
);
5689 GEN_NEON_INTEGER_OP(hsub
);
5692 GEN_NEON_INTEGER_OP_ENV(qsub
);
5695 GEN_NEON_INTEGER_OP(cgt
);
5698 GEN_NEON_INTEGER_OP(cge
);
5701 GEN_NEON_INTEGER_OP(shl
);
5704 GEN_NEON_INTEGER_OP_ENV(qshl
);
5707 GEN_NEON_INTEGER_OP(rshl
);
5709 case NEON_3R_VQRSHL
:
5710 GEN_NEON_INTEGER_OP_ENV(qrshl
);
5713 GEN_NEON_INTEGER_OP(max
);
5716 GEN_NEON_INTEGER_OP(min
);
5719 GEN_NEON_INTEGER_OP(abd
);
5722 GEN_NEON_INTEGER_OP(abd
);
5723 tcg_temp_free_i32(tmp2
);
5724 tmp2
= neon_load_reg(rd
, pass
);
5725 gen_neon_add(size
, tmp
, tmp2
);
5727 case NEON_3R_VADD_VSUB
:
5728 if (!u
) { /* VADD */
5729 gen_neon_add(size
, tmp
, tmp2
);
5732 case 0: gen_helper_neon_sub_u8(tmp
, tmp
, tmp2
); break;
5733 case 1: gen_helper_neon_sub_u16(tmp
, tmp
, tmp2
); break;
5734 case 2: tcg_gen_sub_i32(tmp
, tmp
, tmp2
); break;
5739 case NEON_3R_VTST_VCEQ
:
5740 if (!u
) { /* VTST */
5742 case 0: gen_helper_neon_tst_u8(tmp
, tmp
, tmp2
); break;
5743 case 1: gen_helper_neon_tst_u16(tmp
, tmp
, tmp2
); break;
5744 case 2: gen_helper_neon_tst_u32(tmp
, tmp
, tmp2
); break;
5749 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
5750 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
5751 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
5756 case NEON_3R_VML
: /* VMLA, VMLAL, VMLS,VMLSL */
5758 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5759 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5760 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5763 tcg_temp_free_i32(tmp2
);
5764 tmp2
= neon_load_reg(rd
, pass
);
5766 gen_neon_rsb(size
, tmp
, tmp2
);
5768 gen_neon_add(size
, tmp
, tmp2
);
5772 if (u
) { /* polynomial */
5773 gen_helper_neon_mul_p8(tmp
, tmp
, tmp2
);
5774 } else { /* Integer */
5776 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
5777 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
5778 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
5784 GEN_NEON_INTEGER_OP(pmax
);
5787 GEN_NEON_INTEGER_OP(pmin
);
5789 case NEON_3R_VQDMULH_VQRDMULH
: /* Multiply high. */
5790 if (!u
) { /* VQDMULH */
5793 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5796 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5800 } else { /* VQRDMULH */
5803 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
5806 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
5814 case 0: gen_helper_neon_padd_u8(tmp
, tmp
, tmp2
); break;
5815 case 1: gen_helper_neon_padd_u16(tmp
, tmp
, tmp2
); break;
5816 case 2: tcg_gen_add_i32(tmp
, tmp
, tmp2
); break;
5820 case NEON_3R_FLOAT_ARITH
: /* Floating point arithmetic. */
5822 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5823 switch ((u
<< 2) | size
) {
5826 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5829 gen_helper_vfp_subs(tmp
, tmp
, tmp2
, fpstatus
);
5832 gen_helper_neon_abd_f32(tmp
, tmp
, tmp2
, fpstatus
);
5837 tcg_temp_free_ptr(fpstatus
);
5840 case NEON_3R_FLOAT_MULTIPLY
:
5842 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5843 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
5845 tcg_temp_free_i32(tmp2
);
5846 tmp2
= neon_load_reg(rd
, pass
);
5848 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
5850 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
5853 tcg_temp_free_ptr(fpstatus
);
5856 case NEON_3R_FLOAT_CMP
:
5858 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5860 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
5863 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5865 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5868 tcg_temp_free_ptr(fpstatus
);
5871 case NEON_3R_FLOAT_ACMP
:
5873 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5875 gen_helper_neon_acge_f32(tmp
, tmp
, tmp2
, fpstatus
);
5877 gen_helper_neon_acgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
5879 tcg_temp_free_ptr(fpstatus
);
5882 case NEON_3R_FLOAT_MINMAX
:
5884 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5886 gen_helper_vfp_maxs(tmp
, tmp
, tmp2
, fpstatus
);
5888 gen_helper_vfp_mins(tmp
, tmp
, tmp2
, fpstatus
);
5890 tcg_temp_free_ptr(fpstatus
);
5893 case NEON_3R_FLOAT_MISC
:
5896 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5898 gen_helper_vfp_maxnums(tmp
, tmp
, tmp2
, fpstatus
);
5900 gen_helper_vfp_minnums(tmp
, tmp
, tmp2
, fpstatus
);
5902 tcg_temp_free_ptr(fpstatus
);
5905 gen_helper_recps_f32(tmp
, tmp
, tmp2
, cpu_env
);
5907 gen_helper_rsqrts_f32(tmp
, tmp
, tmp2
, cpu_env
);
5913 /* VFMA, VFMS: fused multiply-add */
5914 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
5915 TCGv_i32 tmp3
= neon_load_reg(rd
, pass
);
5918 gen_helper_vfp_negs(tmp
, tmp
);
5920 gen_helper_vfp_muladds(tmp
, tmp
, tmp2
, tmp3
, fpstatus
);
5921 tcg_temp_free_i32(tmp3
);
5922 tcg_temp_free_ptr(fpstatus
);
5928 tcg_temp_free_i32(tmp2
);
5930 /* Save the result. For elementwise operations we can put it
5931 straight into the destination register. For pairwise operations
5932 we have to be careful to avoid clobbering the source operands. */
5933 if (pairwise
&& rd
== rm
) {
5934 neon_store_scratch(pass
, tmp
);
5936 neon_store_reg(rd
, pass
, tmp
);
5940 if (pairwise
&& rd
== rm
) {
5941 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
5942 tmp
= neon_load_scratch(pass
);
5943 neon_store_reg(rd
, pass
, tmp
);
5946 /* End of 3 register same size operations. */
5947 } else if (insn
& (1 << 4)) {
5948 if ((insn
& 0x00380080) != 0) {
5949 /* Two registers and shift. */
5950 op
= (insn
>> 8) & 0xf;
5951 if (insn
& (1 << 7)) {
5959 while ((insn
& (1 << (size
+ 19))) == 0)
5962 shift
= (insn
>> 16) & ((1 << (3 + size
)) - 1);
5963 /* To avoid excessive duplication of ops we implement shift
5964 by immediate using the variable shift operations. */
5966 /* Shift by immediate:
5967 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5968 if (q
&& ((rd
| rm
) & 1)) {
5971 if (!u
&& (op
== 4 || op
== 6)) {
5974 /* Right shifts are encoded as N - shift, where N is the
5975 element size in bits. */
5977 shift
= shift
- (1 << (size
+ 3));
5985 imm
= (uint8_t) shift
;
5990 imm
= (uint16_t) shift
;
6001 for (pass
= 0; pass
< count
; pass
++) {
6003 neon_load_reg64(cpu_V0
, rm
+ pass
);
6004 tcg_gen_movi_i64(cpu_V1
, imm
);
6009 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
6011 gen_helper_neon_shl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
6016 gen_helper_neon_rshl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
6018 gen_helper_neon_rshl_s64(cpu_V0
, cpu_V0
, cpu_V1
);
6021 case 5: /* VSHL, VSLI */
6022 gen_helper_neon_shl_u64(cpu_V0
, cpu_V0
, cpu_V1
);
6024 case 6: /* VQSHLU */
6025 gen_helper_neon_qshlu_s64(cpu_V0
, cpu_env
,
6030 gen_helper_neon_qshl_u64(cpu_V0
, cpu_env
,
6033 gen_helper_neon_qshl_s64(cpu_V0
, cpu_env
,
6038 if (op
== 1 || op
== 3) {
6040 neon_load_reg64(cpu_V1
, rd
+ pass
);
6041 tcg_gen_add_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6042 } else if (op
== 4 || (op
== 5 && u
)) {
6044 neon_load_reg64(cpu_V1
, rd
+ pass
);
6046 if (shift
< -63 || shift
> 63) {
6050 mask
= 0xffffffffffffffffull
>> -shift
;
6052 mask
= 0xffffffffffffffffull
<< shift
;
6055 tcg_gen_andi_i64(cpu_V1
, cpu_V1
, ~mask
);
6056 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6058 neon_store_reg64(cpu_V0
, rd
+ pass
);
6059 } else { /* size < 3 */
6060 /* Operands in T0 and T1. */
6061 tmp
= neon_load_reg(rm
, pass
);
6062 tmp2
= tcg_temp_new_i32();
6063 tcg_gen_movi_i32(tmp2
, imm
);
6067 GEN_NEON_INTEGER_OP(shl
);
6071 GEN_NEON_INTEGER_OP(rshl
);
6074 case 5: /* VSHL, VSLI */
6076 case 0: gen_helper_neon_shl_u8(tmp
, tmp
, tmp2
); break;
6077 case 1: gen_helper_neon_shl_u16(tmp
, tmp
, tmp2
); break;
6078 case 2: gen_helper_neon_shl_u32(tmp
, tmp
, tmp2
); break;
6082 case 6: /* VQSHLU */
6085 gen_helper_neon_qshlu_s8(tmp
, cpu_env
,
6089 gen_helper_neon_qshlu_s16(tmp
, cpu_env
,
6093 gen_helper_neon_qshlu_s32(tmp
, cpu_env
,
6101 GEN_NEON_INTEGER_OP_ENV(qshl
);
6104 tcg_temp_free_i32(tmp2
);
6106 if (op
== 1 || op
== 3) {
6108 tmp2
= neon_load_reg(rd
, pass
);
6109 gen_neon_add(size
, tmp
, tmp2
);
6110 tcg_temp_free_i32(tmp2
);
6111 } else if (op
== 4 || (op
== 5 && u
)) {
6116 mask
= 0xff >> -shift
;
6118 mask
= (uint8_t)(0xff << shift
);
6124 mask
= 0xffff >> -shift
;
6126 mask
= (uint16_t)(0xffff << shift
);
6130 if (shift
< -31 || shift
> 31) {
6134 mask
= 0xffffffffu
>> -shift
;
6136 mask
= 0xffffffffu
<< shift
;
6142 tmp2
= neon_load_reg(rd
, pass
);
6143 tcg_gen_andi_i32(tmp
, tmp
, mask
);
6144 tcg_gen_andi_i32(tmp2
, tmp2
, ~mask
);
6145 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
6146 tcg_temp_free_i32(tmp2
);
6148 neon_store_reg(rd
, pass
, tmp
);
6151 } else if (op
< 10) {
6152 /* Shift by immediate and narrow:
6153 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
6154 int input_unsigned
= (op
== 8) ? !u
: u
;
6158 shift
= shift
- (1 << (size
+ 3));
6161 tmp64
= tcg_const_i64(shift
);
6162 neon_load_reg64(cpu_V0
, rm
);
6163 neon_load_reg64(cpu_V1
, rm
+ 1);
6164 for (pass
= 0; pass
< 2; pass
++) {
6172 if (input_unsigned
) {
6173 gen_helper_neon_rshl_u64(cpu_V0
, in
, tmp64
);
6175 gen_helper_neon_rshl_s64(cpu_V0
, in
, tmp64
);
6178 if (input_unsigned
) {
6179 gen_helper_neon_shl_u64(cpu_V0
, in
, tmp64
);
6181 gen_helper_neon_shl_s64(cpu_V0
, in
, tmp64
);
6184 tmp
= tcg_temp_new_i32();
6185 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
6186 neon_store_reg(rd
, pass
, tmp
);
6188 tcg_temp_free_i64(tmp64
);
6191 imm
= (uint16_t)shift
;
6195 imm
= (uint32_t)shift
;
6197 tmp2
= tcg_const_i32(imm
);
6198 tmp4
= neon_load_reg(rm
+ 1, 0);
6199 tmp5
= neon_load_reg(rm
+ 1, 1);
6200 for (pass
= 0; pass
< 2; pass
++) {
6202 tmp
= neon_load_reg(rm
, 0);
6206 gen_neon_shift_narrow(size
, tmp
, tmp2
, q
,
6209 tmp3
= neon_load_reg(rm
, 1);
6213 gen_neon_shift_narrow(size
, tmp3
, tmp2
, q
,
6215 tcg_gen_concat_i32_i64(cpu_V0
, tmp
, tmp3
);
6216 tcg_temp_free_i32(tmp
);
6217 tcg_temp_free_i32(tmp3
);
6218 tmp
= tcg_temp_new_i32();
6219 gen_neon_narrow_op(op
== 8, u
, size
- 1, tmp
, cpu_V0
);
6220 neon_store_reg(rd
, pass
, tmp
);
6222 tcg_temp_free_i32(tmp2
);
6224 } else if (op
== 10) {
6226 if (q
|| (rd
& 1)) {
6229 tmp
= neon_load_reg(rm
, 0);
6230 tmp2
= neon_load_reg(rm
, 1);
6231 for (pass
= 0; pass
< 2; pass
++) {
6235 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
6238 /* The shift is less than the width of the source
6239 type, so we can just shift the whole register. */
6240 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, shift
);
6241 /* Widen the result of shift: we need to clear
6242 * the potential overflow bits resulting from
6243 * left bits of the narrow input appearing as
6244 * right bits of left the neighbour narrow
6246 if (size
< 2 || !u
) {
6249 imm
= (0xffu
>> (8 - shift
));
6251 } else if (size
== 1) {
6252 imm
= 0xffff >> (16 - shift
);
6255 imm
= 0xffffffff >> (32 - shift
);
6258 imm64
= imm
| (((uint64_t)imm
) << 32);
6262 tcg_gen_andi_i64(cpu_V0
, cpu_V0
, ~imm64
);
6265 neon_store_reg64(cpu_V0
, rd
+ pass
);
6267 } else if (op
>= 14) {
6268 /* VCVT fixed-point. */
6269 if (!(insn
& (1 << 21)) || (q
&& ((rd
| rm
) & 1))) {
6272 /* We have already masked out the must-be-1 top bit of imm6,
6273 * hence this 32-shift where the ARM ARM has 64-imm6.
6276 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6277 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, pass
));
6280 gen_vfp_ulto(0, shift
, 1);
6282 gen_vfp_slto(0, shift
, 1);
6285 gen_vfp_toul(0, shift
, 1);
6287 gen_vfp_tosl(0, shift
, 1);
6289 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, pass
));
6294 } else { /* (insn & 0x00380080) == 0 */
6296 if (q
&& (rd
& 1)) {
6300 op
= (insn
>> 8) & 0xf;
6301 /* One register and immediate. */
6302 imm
= (u
<< 7) | ((insn
>> 12) & 0x70) | (insn
& 0xf);
6303 invert
= (insn
& (1 << 5)) != 0;
6304 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6305 * We choose to not special-case this and will behave as if a
6306 * valid constant encoding of 0 had been given.
6325 imm
= (imm
<< 8) | (imm
<< 24);
6328 imm
= (imm
<< 8) | 0xff;
6331 imm
= (imm
<< 16) | 0xffff;
6334 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
6342 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
6343 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
6349 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
6350 if (op
& 1 && op
< 12) {
6351 tmp
= neon_load_reg(rd
, pass
);
6353 /* The immediate value has already been inverted, so
6355 tcg_gen_andi_i32(tmp
, tmp
, imm
);
6357 tcg_gen_ori_i32(tmp
, tmp
, imm
);
6361 tmp
= tcg_temp_new_i32();
6362 if (op
== 14 && invert
) {
6366 for (n
= 0; n
< 4; n
++) {
6367 if (imm
& (1 << (n
+ (pass
& 1) * 4)))
6368 val
|= 0xff << (n
* 8);
6370 tcg_gen_movi_i32(tmp
, val
);
6372 tcg_gen_movi_i32(tmp
, imm
);
6375 neon_store_reg(rd
, pass
, tmp
);
6378 } else { /* (insn & 0x00800010 == 0x00800000) */
6380 op
= (insn
>> 8) & 0xf;
6381 if ((insn
& (1 << 6)) == 0) {
6382 /* Three registers of different lengths. */
6386 /* undefreq: bit 0 : UNDEF if size == 0
6387 * bit 1 : UNDEF if size == 1
6388 * bit 2 : UNDEF if size == 2
6389 * bit 3 : UNDEF if U == 1
6390 * Note that [2:0] set implies 'always UNDEF'
6393 /* prewiden, src1_wide, src2_wide, undefreq */
6394 static const int neon_3reg_wide
[16][4] = {
6395 {1, 0, 0, 0}, /* VADDL */
6396 {1, 1, 0, 0}, /* VADDW */
6397 {1, 0, 0, 0}, /* VSUBL */
6398 {1, 1, 0, 0}, /* VSUBW */
6399 {0, 1, 1, 0}, /* VADDHN */
6400 {0, 0, 0, 0}, /* VABAL */
6401 {0, 1, 1, 0}, /* VSUBHN */
6402 {0, 0, 0, 0}, /* VABDL */
6403 {0, 0, 0, 0}, /* VMLAL */
6404 {0, 0, 0, 9}, /* VQDMLAL */
6405 {0, 0, 0, 0}, /* VMLSL */
6406 {0, 0, 0, 9}, /* VQDMLSL */
6407 {0, 0, 0, 0}, /* Integer VMULL */
6408 {0, 0, 0, 1}, /* VQDMULL */
6409 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6410 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6413 prewiden
= neon_3reg_wide
[op
][0];
6414 src1_wide
= neon_3reg_wide
[op
][1];
6415 src2_wide
= neon_3reg_wide
[op
][2];
6416 undefreq
= neon_3reg_wide
[op
][3];
6418 if ((undefreq
& (1 << size
)) ||
6419 ((undefreq
& 8) && u
)) {
6422 if ((src1_wide
&& (rn
& 1)) ||
6423 (src2_wide
&& (rm
& 1)) ||
6424 (!src2_wide
&& (rd
& 1))) {
6428 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6429 * outside the loop below as it only performs a single pass.
6431 if (op
== 14 && size
== 2) {
6432 TCGv_i64 tcg_rn
, tcg_rm
, tcg_rd
;
6434 if (!arm_dc_feature(s
, ARM_FEATURE_V8_PMULL
)) {
6437 tcg_rn
= tcg_temp_new_i64();
6438 tcg_rm
= tcg_temp_new_i64();
6439 tcg_rd
= tcg_temp_new_i64();
6440 neon_load_reg64(tcg_rn
, rn
);
6441 neon_load_reg64(tcg_rm
, rm
);
6442 gen_helper_neon_pmull_64_lo(tcg_rd
, tcg_rn
, tcg_rm
);
6443 neon_store_reg64(tcg_rd
, rd
);
6444 gen_helper_neon_pmull_64_hi(tcg_rd
, tcg_rn
, tcg_rm
);
6445 neon_store_reg64(tcg_rd
, rd
+ 1);
6446 tcg_temp_free_i64(tcg_rn
);
6447 tcg_temp_free_i64(tcg_rm
);
6448 tcg_temp_free_i64(tcg_rd
);
6452 /* Avoid overlapping operands. Wide source operands are
6453 always aligned so will never overlap with wide
6454 destinations in problematic ways. */
6455 if (rd
== rm
&& !src2_wide
) {
6456 tmp
= neon_load_reg(rm
, 1);
6457 neon_store_scratch(2, tmp
);
6458 } else if (rd
== rn
&& !src1_wide
) {
6459 tmp
= neon_load_reg(rn
, 1);
6460 neon_store_scratch(2, tmp
);
6462 TCGV_UNUSED_I32(tmp3
);
6463 for (pass
= 0; pass
< 2; pass
++) {
6465 neon_load_reg64(cpu_V0
, rn
+ pass
);
6466 TCGV_UNUSED_I32(tmp
);
6468 if (pass
== 1 && rd
== rn
) {
6469 tmp
= neon_load_scratch(2);
6471 tmp
= neon_load_reg(rn
, pass
);
6474 gen_neon_widen(cpu_V0
, tmp
, size
, u
);
6478 neon_load_reg64(cpu_V1
, rm
+ pass
);
6479 TCGV_UNUSED_I32(tmp2
);
6481 if (pass
== 1 && rd
== rm
) {
6482 tmp2
= neon_load_scratch(2);
6484 tmp2
= neon_load_reg(rm
, pass
);
6487 gen_neon_widen(cpu_V1
, tmp2
, size
, u
);
6491 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6492 gen_neon_addl(size
);
6494 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6495 gen_neon_subl(size
);
6497 case 5: case 7: /* VABAL, VABDL */
6498 switch ((size
<< 1) | u
) {
6500 gen_helper_neon_abdl_s16(cpu_V0
, tmp
, tmp2
);
6503 gen_helper_neon_abdl_u16(cpu_V0
, tmp
, tmp2
);
6506 gen_helper_neon_abdl_s32(cpu_V0
, tmp
, tmp2
);
6509 gen_helper_neon_abdl_u32(cpu_V0
, tmp
, tmp2
);
6512 gen_helper_neon_abdl_s64(cpu_V0
, tmp
, tmp2
);
6515 gen_helper_neon_abdl_u64(cpu_V0
, tmp
, tmp2
);
6519 tcg_temp_free_i32(tmp2
);
6520 tcg_temp_free_i32(tmp
);
6522 case 8: case 9: case 10: case 11: case 12: case 13:
6523 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6524 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6526 case 14: /* Polynomial VMULL */
6527 gen_helper_neon_mull_p8(cpu_V0
, tmp
, tmp2
);
6528 tcg_temp_free_i32(tmp2
);
6529 tcg_temp_free_i32(tmp
);
6531 default: /* 15 is RESERVED: caught earlier */
6536 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6537 neon_store_reg64(cpu_V0
, rd
+ pass
);
6538 } else if (op
== 5 || (op
>= 8 && op
<= 11)) {
6540 neon_load_reg64(cpu_V1
, rd
+ pass
);
6542 case 10: /* VMLSL */
6543 gen_neon_negl(cpu_V0
, size
);
6545 case 5: case 8: /* VABAL, VMLAL */
6546 gen_neon_addl(size
);
6548 case 9: case 11: /* VQDMLAL, VQDMLSL */
6549 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6551 gen_neon_negl(cpu_V0
, size
);
6553 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6558 neon_store_reg64(cpu_V0
, rd
+ pass
);
6559 } else if (op
== 4 || op
== 6) {
6560 /* Narrowing operation. */
6561 tmp
= tcg_temp_new_i32();
6565 gen_helper_neon_narrow_high_u8(tmp
, cpu_V0
);
6568 gen_helper_neon_narrow_high_u16(tmp
, cpu_V0
);
6571 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6572 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
6579 gen_helper_neon_narrow_round_high_u8(tmp
, cpu_V0
);
6582 gen_helper_neon_narrow_round_high_u16(tmp
, cpu_V0
);
6585 tcg_gen_addi_i64(cpu_V0
, cpu_V0
, 1u << 31);
6586 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, 32);
6587 tcg_gen_extrl_i64_i32(tmp
, cpu_V0
);
6595 neon_store_reg(rd
, 0, tmp3
);
6596 neon_store_reg(rd
, 1, tmp
);
6599 /* Write back the result. */
6600 neon_store_reg64(cpu_V0
, rd
+ pass
);
6604 /* Two registers and a scalar. NB that for ops of this form
6605 * the ARM ARM labels bit 24 as Q, but it is in our variable
6612 case 1: /* Float VMLA scalar */
6613 case 5: /* Floating point VMLS scalar */
6614 case 9: /* Floating point VMUL scalar */
6619 case 0: /* Integer VMLA scalar */
6620 case 4: /* Integer VMLS scalar */
6621 case 8: /* Integer VMUL scalar */
6622 case 12: /* VQDMULH scalar */
6623 case 13: /* VQRDMULH scalar */
6624 if (u
&& ((rd
| rn
) & 1)) {
6627 tmp
= neon_get_scalar(size
, rm
);
6628 neon_store_scratch(0, tmp
);
6629 for (pass
= 0; pass
< (u
? 4 : 2); pass
++) {
6630 tmp
= neon_load_scratch(0);
6631 tmp2
= neon_load_reg(rn
, pass
);
6634 gen_helper_neon_qdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6636 gen_helper_neon_qdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6638 } else if (op
== 13) {
6640 gen_helper_neon_qrdmulh_s16(tmp
, cpu_env
, tmp
, tmp2
);
6642 gen_helper_neon_qrdmulh_s32(tmp
, cpu_env
, tmp
, tmp2
);
6644 } else if (op
& 1) {
6645 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6646 gen_helper_vfp_muls(tmp
, tmp
, tmp2
, fpstatus
);
6647 tcg_temp_free_ptr(fpstatus
);
6650 case 0: gen_helper_neon_mul_u8(tmp
, tmp
, tmp2
); break;
6651 case 1: gen_helper_neon_mul_u16(tmp
, tmp
, tmp2
); break;
6652 case 2: tcg_gen_mul_i32(tmp
, tmp
, tmp2
); break;
6656 tcg_temp_free_i32(tmp2
);
6659 tmp2
= neon_load_reg(rd
, pass
);
6662 gen_neon_add(size
, tmp
, tmp2
);
6666 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6667 gen_helper_vfp_adds(tmp
, tmp
, tmp2
, fpstatus
);
6668 tcg_temp_free_ptr(fpstatus
);
6672 gen_neon_rsb(size
, tmp
, tmp2
);
6676 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
6677 gen_helper_vfp_subs(tmp
, tmp2
, tmp
, fpstatus
);
6678 tcg_temp_free_ptr(fpstatus
);
6684 tcg_temp_free_i32(tmp2
);
6686 neon_store_reg(rd
, pass
, tmp
);
6689 case 3: /* VQDMLAL scalar */
6690 case 7: /* VQDMLSL scalar */
6691 case 11: /* VQDMULL scalar */
6696 case 2: /* VMLAL sclar */
6697 case 6: /* VMLSL scalar */
6698 case 10: /* VMULL scalar */
6702 tmp2
= neon_get_scalar(size
, rm
);
6703 /* We need a copy of tmp2 because gen_neon_mull
6704 * deletes it during pass 0. */
6705 tmp4
= tcg_temp_new_i32();
6706 tcg_gen_mov_i32(tmp4
, tmp2
);
6707 tmp3
= neon_load_reg(rn
, 1);
6709 for (pass
= 0; pass
< 2; pass
++) {
6711 tmp
= neon_load_reg(rn
, 0);
6716 gen_neon_mull(cpu_V0
, tmp
, tmp2
, size
, u
);
6718 neon_load_reg64(cpu_V1
, rd
+ pass
);
6722 gen_neon_negl(cpu_V0
, size
);
6725 gen_neon_addl(size
);
6728 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6730 gen_neon_negl(cpu_V0
, size
);
6732 gen_neon_addl_saturate(cpu_V0
, cpu_V1
, size
);
6738 gen_neon_addl_saturate(cpu_V0
, cpu_V0
, size
);
6743 neon_store_reg64(cpu_V0
, rd
+ pass
);
6748 default: /* 14 and 15 are RESERVED */
6752 } else { /* size == 3 */
6755 imm
= (insn
>> 8) & 0xf;
6760 if (q
&& ((rd
| rn
| rm
) & 1)) {
6765 neon_load_reg64(cpu_V0
, rn
);
6767 neon_load_reg64(cpu_V1
, rn
+ 1);
6769 } else if (imm
== 8) {
6770 neon_load_reg64(cpu_V0
, rn
+ 1);
6772 neon_load_reg64(cpu_V1
, rm
);
6775 tmp64
= tcg_temp_new_i64();
6777 neon_load_reg64(cpu_V0
, rn
);
6778 neon_load_reg64(tmp64
, rn
+ 1);
6780 neon_load_reg64(cpu_V0
, rn
+ 1);
6781 neon_load_reg64(tmp64
, rm
);
6783 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, (imm
& 7) * 8);
6784 tcg_gen_shli_i64(cpu_V1
, tmp64
, 64 - ((imm
& 7) * 8));
6785 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6787 neon_load_reg64(cpu_V1
, rm
);
6789 neon_load_reg64(cpu_V1
, rm
+ 1);
6792 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6793 tcg_gen_shri_i64(tmp64
, tmp64
, imm
* 8);
6794 tcg_gen_or_i64(cpu_V1
, cpu_V1
, tmp64
);
6795 tcg_temp_free_i64(tmp64
);
6798 neon_load_reg64(cpu_V0
, rn
);
6799 tcg_gen_shri_i64(cpu_V0
, cpu_V0
, imm
* 8);
6800 neon_load_reg64(cpu_V1
, rm
);
6801 tcg_gen_shli_i64(cpu_V1
, cpu_V1
, 64 - (imm
* 8));
6802 tcg_gen_or_i64(cpu_V0
, cpu_V0
, cpu_V1
);
6804 neon_store_reg64(cpu_V0
, rd
);
6806 neon_store_reg64(cpu_V1
, rd
+ 1);
6808 } else if ((insn
& (1 << 11)) == 0) {
6809 /* Two register misc. */
6810 op
= ((insn
>> 12) & 0x30) | ((insn
>> 7) & 0xf);
6811 size
= (insn
>> 18) & 3;
6812 /* UNDEF for unknown op values and bad op-size combinations */
6813 if ((neon_2rm_sizes
[op
] & (1 << size
)) == 0) {
6816 if (neon_2rm_is_v8_op(op
) &&
6817 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
6820 if ((op
!= NEON_2RM_VMOVN
&& op
!= NEON_2RM_VQMOVN
) &&
6821 q
&& ((rm
| rd
) & 1)) {
6825 case NEON_2RM_VREV64
:
6826 for (pass
= 0; pass
< (q
? 2 : 1); pass
++) {
6827 tmp
= neon_load_reg(rm
, pass
* 2);
6828 tmp2
= neon_load_reg(rm
, pass
* 2 + 1);
6830 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
6831 case 1: gen_swap_half(tmp
); break;
6832 case 2: /* no-op */ break;
6835 neon_store_reg(rd
, pass
* 2 + 1, tmp
);
6837 neon_store_reg(rd
, pass
* 2, tmp2
);
6840 case 0: tcg_gen_bswap32_i32(tmp2
, tmp2
); break;
6841 case 1: gen_swap_half(tmp2
); break;
6844 neon_store_reg(rd
, pass
* 2, tmp2
);
6848 case NEON_2RM_VPADDL
: case NEON_2RM_VPADDL_U
:
6849 case NEON_2RM_VPADAL
: case NEON_2RM_VPADAL_U
:
6850 for (pass
= 0; pass
< q
+ 1; pass
++) {
6851 tmp
= neon_load_reg(rm
, pass
* 2);
6852 gen_neon_widen(cpu_V0
, tmp
, size
, op
& 1);
6853 tmp
= neon_load_reg(rm
, pass
* 2 + 1);
6854 gen_neon_widen(cpu_V1
, tmp
, size
, op
& 1);
6856 case 0: gen_helper_neon_paddl_u16(CPU_V001
); break;
6857 case 1: gen_helper_neon_paddl_u32(CPU_V001
); break;
6858 case 2: tcg_gen_add_i64(CPU_V001
); break;
6861 if (op
>= NEON_2RM_VPADAL
) {
6863 neon_load_reg64(cpu_V1
, rd
+ pass
);
6864 gen_neon_addl(size
);
6866 neon_store_reg64(cpu_V0
, rd
+ pass
);
6872 for (n
= 0; n
< (q
? 4 : 2); n
+= 2) {
6873 tmp
= neon_load_reg(rm
, n
);
6874 tmp2
= neon_load_reg(rd
, n
+ 1);
6875 neon_store_reg(rm
, n
, tmp2
);
6876 neon_store_reg(rd
, n
+ 1, tmp
);
6883 if (gen_neon_unzip(rd
, rm
, size
, q
)) {
6888 if (gen_neon_zip(rd
, rm
, size
, q
)) {
6892 case NEON_2RM_VMOVN
: case NEON_2RM_VQMOVN
:
6893 /* also VQMOVUN; op field and mnemonics don't line up */
6897 TCGV_UNUSED_I32(tmp2
);
6898 for (pass
= 0; pass
< 2; pass
++) {
6899 neon_load_reg64(cpu_V0
, rm
+ pass
);
6900 tmp
= tcg_temp_new_i32();
6901 gen_neon_narrow_op(op
== NEON_2RM_VMOVN
, q
, size
,
6906 neon_store_reg(rd
, 0, tmp2
);
6907 neon_store_reg(rd
, 1, tmp
);
6911 case NEON_2RM_VSHLL
:
6912 if (q
|| (rd
& 1)) {
6915 tmp
= neon_load_reg(rm
, 0);
6916 tmp2
= neon_load_reg(rm
, 1);
6917 for (pass
= 0; pass
< 2; pass
++) {
6920 gen_neon_widen(cpu_V0
, tmp
, size
, 1);
6921 tcg_gen_shli_i64(cpu_V0
, cpu_V0
, 8 << size
);
6922 neon_store_reg64(cpu_V0
, rd
+ pass
);
6925 case NEON_2RM_VCVT_F16_F32
:
6926 if (!arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
) ||
6930 tmp
= tcg_temp_new_i32();
6931 tmp2
= tcg_temp_new_i32();
6932 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 0));
6933 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
6934 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 1));
6935 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
6936 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6937 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6938 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 2));
6939 gen_helper_neon_fcvt_f32_to_f16(tmp
, cpu_F0s
, cpu_env
);
6940 tcg_gen_ld_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rm
, 3));
6941 neon_store_reg(rd
, 0, tmp2
);
6942 tmp2
= tcg_temp_new_i32();
6943 gen_helper_neon_fcvt_f32_to_f16(tmp2
, cpu_F0s
, cpu_env
);
6944 tcg_gen_shli_i32(tmp2
, tmp2
, 16);
6945 tcg_gen_or_i32(tmp2
, tmp2
, tmp
);
6946 neon_store_reg(rd
, 1, tmp2
);
6947 tcg_temp_free_i32(tmp
);
6949 case NEON_2RM_VCVT_F32_F16
:
6950 if (!arm_dc_feature(s
, ARM_FEATURE_VFP_FP16
) ||
6954 tmp3
= tcg_temp_new_i32();
6955 tmp
= neon_load_reg(rm
, 0);
6956 tmp2
= neon_load_reg(rm
, 1);
6957 tcg_gen_ext16u_i32(tmp3
, tmp
);
6958 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6959 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 0));
6960 tcg_gen_shri_i32(tmp3
, tmp
, 16);
6961 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6962 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 1));
6963 tcg_temp_free_i32(tmp
);
6964 tcg_gen_ext16u_i32(tmp3
, tmp2
);
6965 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6966 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 2));
6967 tcg_gen_shri_i32(tmp3
, tmp2
, 16);
6968 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s
, tmp3
, cpu_env
);
6969 tcg_gen_st_f32(cpu_F0s
, cpu_env
, neon_reg_offset(rd
, 3));
6970 tcg_temp_free_i32(tmp2
);
6971 tcg_temp_free_i32(tmp3
);
6973 case NEON_2RM_AESE
: case NEON_2RM_AESMC
:
6974 if (!arm_dc_feature(s
, ARM_FEATURE_V8_AES
)
6975 || ((rm
| rd
) & 1)) {
6978 tmp
= tcg_const_i32(rd
);
6979 tmp2
= tcg_const_i32(rm
);
6981 /* Bit 6 is the lowest opcode bit; it distinguishes between
6982 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6984 tmp3
= tcg_const_i32(extract32(insn
, 6, 1));
6986 if (op
== NEON_2RM_AESE
) {
6987 gen_helper_crypto_aese(cpu_env
, tmp
, tmp2
, tmp3
);
6989 gen_helper_crypto_aesmc(cpu_env
, tmp
, tmp2
, tmp3
);
6991 tcg_temp_free_i32(tmp
);
6992 tcg_temp_free_i32(tmp2
);
6993 tcg_temp_free_i32(tmp3
);
6995 case NEON_2RM_SHA1H
:
6996 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)
6997 || ((rm
| rd
) & 1)) {
7000 tmp
= tcg_const_i32(rd
);
7001 tmp2
= tcg_const_i32(rm
);
7003 gen_helper_crypto_sha1h(cpu_env
, tmp
, tmp2
);
7005 tcg_temp_free_i32(tmp
);
7006 tcg_temp_free_i32(tmp2
);
7008 case NEON_2RM_SHA1SU1
:
7009 if ((rm
| rd
) & 1) {
7012 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7014 if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA256
)) {
7017 } else if (!arm_dc_feature(s
, ARM_FEATURE_V8_SHA1
)) {
7020 tmp
= tcg_const_i32(rd
);
7021 tmp2
= tcg_const_i32(rm
);
7023 gen_helper_crypto_sha256su0(cpu_env
, tmp
, tmp2
);
7025 gen_helper_crypto_sha1su1(cpu_env
, tmp
, tmp2
);
7027 tcg_temp_free_i32(tmp
);
7028 tcg_temp_free_i32(tmp2
);
7032 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
7033 if (neon_2rm_is_float_op(op
)) {
7034 tcg_gen_ld_f32(cpu_F0s
, cpu_env
,
7035 neon_reg_offset(rm
, pass
));
7036 TCGV_UNUSED_I32(tmp
);
7038 tmp
= neon_load_reg(rm
, pass
);
7041 case NEON_2RM_VREV32
:
7043 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
7044 case 1: gen_swap_half(tmp
); break;
7048 case NEON_2RM_VREV16
:
7053 case 0: gen_helper_neon_cls_s8(tmp
, tmp
); break;
7054 case 1: gen_helper_neon_cls_s16(tmp
, tmp
); break;
7055 case 2: gen_helper_neon_cls_s32(tmp
, tmp
); break;
7061 case 0: gen_helper_neon_clz_u8(tmp
, tmp
); break;
7062 case 1: gen_helper_neon_clz_u16(tmp
, tmp
); break;
7063 case 2: gen_helper_clz(tmp
, tmp
); break;
7068 gen_helper_neon_cnt_u8(tmp
, tmp
);
7071 tcg_gen_not_i32(tmp
, tmp
);
7073 case NEON_2RM_VQABS
:
7076 gen_helper_neon_qabs_s8(tmp
, cpu_env
, tmp
);
7079 gen_helper_neon_qabs_s16(tmp
, cpu_env
, tmp
);
7082 gen_helper_neon_qabs_s32(tmp
, cpu_env
, tmp
);
7087 case NEON_2RM_VQNEG
:
7090 gen_helper_neon_qneg_s8(tmp
, cpu_env
, tmp
);
7093 gen_helper_neon_qneg_s16(tmp
, cpu_env
, tmp
);
7096 gen_helper_neon_qneg_s32(tmp
, cpu_env
, tmp
);
7101 case NEON_2RM_VCGT0
: case NEON_2RM_VCLE0
:
7102 tmp2
= tcg_const_i32(0);
7104 case 0: gen_helper_neon_cgt_s8(tmp
, tmp
, tmp2
); break;
7105 case 1: gen_helper_neon_cgt_s16(tmp
, tmp
, tmp2
); break;
7106 case 2: gen_helper_neon_cgt_s32(tmp
, tmp
, tmp2
); break;
7109 tcg_temp_free_i32(tmp2
);
7110 if (op
== NEON_2RM_VCLE0
) {
7111 tcg_gen_not_i32(tmp
, tmp
);
7114 case NEON_2RM_VCGE0
: case NEON_2RM_VCLT0
:
7115 tmp2
= tcg_const_i32(0);
7117 case 0: gen_helper_neon_cge_s8(tmp
, tmp
, tmp2
); break;
7118 case 1: gen_helper_neon_cge_s16(tmp
, tmp
, tmp2
); break;
7119 case 2: gen_helper_neon_cge_s32(tmp
, tmp
, tmp2
); break;
7122 tcg_temp_free_i32(tmp2
);
7123 if (op
== NEON_2RM_VCLT0
) {
7124 tcg_gen_not_i32(tmp
, tmp
);
7127 case NEON_2RM_VCEQ0
:
7128 tmp2
= tcg_const_i32(0);
7130 case 0: gen_helper_neon_ceq_u8(tmp
, tmp
, tmp2
); break;
7131 case 1: gen_helper_neon_ceq_u16(tmp
, tmp
, tmp2
); break;
7132 case 2: gen_helper_neon_ceq_u32(tmp
, tmp
, tmp2
); break;
7135 tcg_temp_free_i32(tmp2
);
7139 case 0: gen_helper_neon_abs_s8(tmp
, tmp
); break;
7140 case 1: gen_helper_neon_abs_s16(tmp
, tmp
); break;
7141 case 2: tcg_gen_abs_i32(tmp
, tmp
); break;
7146 tmp2
= tcg_const_i32(0);
7147 gen_neon_rsb(size
, tmp
, tmp2
);
7148 tcg_temp_free_i32(tmp2
);
7150 case NEON_2RM_VCGT0_F
:
7152 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7153 tmp2
= tcg_const_i32(0);
7154 gen_helper_neon_cgt_f32(tmp
, tmp
, tmp2
, fpstatus
);
7155 tcg_temp_free_i32(tmp2
);
7156 tcg_temp_free_ptr(fpstatus
);
7159 case NEON_2RM_VCGE0_F
:
7161 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7162 tmp2
= tcg_const_i32(0);
7163 gen_helper_neon_cge_f32(tmp
, tmp
, tmp2
, fpstatus
);
7164 tcg_temp_free_i32(tmp2
);
7165 tcg_temp_free_ptr(fpstatus
);
7168 case NEON_2RM_VCEQ0_F
:
7170 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7171 tmp2
= tcg_const_i32(0);
7172 gen_helper_neon_ceq_f32(tmp
, tmp
, tmp2
, fpstatus
);
7173 tcg_temp_free_i32(tmp2
);
7174 tcg_temp_free_ptr(fpstatus
);
7177 case NEON_2RM_VCLE0_F
:
7179 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7180 tmp2
= tcg_const_i32(0);
7181 gen_helper_neon_cge_f32(tmp
, tmp2
, tmp
, fpstatus
);
7182 tcg_temp_free_i32(tmp2
);
7183 tcg_temp_free_ptr(fpstatus
);
7186 case NEON_2RM_VCLT0_F
:
7188 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7189 tmp2
= tcg_const_i32(0);
7190 gen_helper_neon_cgt_f32(tmp
, tmp2
, tmp
, fpstatus
);
7191 tcg_temp_free_i32(tmp2
);
7192 tcg_temp_free_ptr(fpstatus
);
7195 case NEON_2RM_VABS_F
:
7198 case NEON_2RM_VNEG_F
:
7202 tmp2
= neon_load_reg(rd
, pass
);
7203 neon_store_reg(rm
, pass
, tmp2
);
7206 tmp2
= neon_load_reg(rd
, pass
);
7208 case 0: gen_neon_trn_u8(tmp
, tmp2
); break;
7209 case 1: gen_neon_trn_u16(tmp
, tmp2
); break;
7212 neon_store_reg(rm
, pass
, tmp2
);
7214 case NEON_2RM_VRINTN
:
7215 case NEON_2RM_VRINTA
:
7216 case NEON_2RM_VRINTM
:
7217 case NEON_2RM_VRINTP
:
7218 case NEON_2RM_VRINTZ
:
7221 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7224 if (op
== NEON_2RM_VRINTZ
) {
7225 rmode
= FPROUNDING_ZERO
;
7227 rmode
= fp_decode_rm
[((op
& 0x6) >> 1) ^ 1];
7230 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
7231 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7233 gen_helper_rints(cpu_F0s
, cpu_F0s
, fpstatus
);
7234 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7236 tcg_temp_free_ptr(fpstatus
);
7237 tcg_temp_free_i32(tcg_rmode
);
7240 case NEON_2RM_VRINTX
:
7242 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7243 gen_helper_rints_exact(cpu_F0s
, cpu_F0s
, fpstatus
);
7244 tcg_temp_free_ptr(fpstatus
);
7247 case NEON_2RM_VCVTAU
:
7248 case NEON_2RM_VCVTAS
:
7249 case NEON_2RM_VCVTNU
:
7250 case NEON_2RM_VCVTNS
:
7251 case NEON_2RM_VCVTPU
:
7252 case NEON_2RM_VCVTPS
:
7253 case NEON_2RM_VCVTMU
:
7254 case NEON_2RM_VCVTMS
:
7256 bool is_signed
= !extract32(insn
, 7, 1);
7257 TCGv_ptr fpst
= get_fpstatus_ptr(1);
7258 TCGv_i32 tcg_rmode
, tcg_shift
;
7259 int rmode
= fp_decode_rm
[extract32(insn
, 8, 2)];
7261 tcg_shift
= tcg_const_i32(0);
7262 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rmode
));
7263 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7267 gen_helper_vfp_tosls(cpu_F0s
, cpu_F0s
,
7270 gen_helper_vfp_touls(cpu_F0s
, cpu_F0s
,
7274 gen_helper_set_neon_rmode(tcg_rmode
, tcg_rmode
,
7276 tcg_temp_free_i32(tcg_rmode
);
7277 tcg_temp_free_i32(tcg_shift
);
7278 tcg_temp_free_ptr(fpst
);
7281 case NEON_2RM_VRECPE
:
7283 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7284 gen_helper_recpe_u32(tmp
, tmp
, fpstatus
);
7285 tcg_temp_free_ptr(fpstatus
);
7288 case NEON_2RM_VRSQRTE
:
7290 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7291 gen_helper_rsqrte_u32(tmp
, tmp
, fpstatus
);
7292 tcg_temp_free_ptr(fpstatus
);
7295 case NEON_2RM_VRECPE_F
:
7297 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7298 gen_helper_recpe_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
7299 tcg_temp_free_ptr(fpstatus
);
7302 case NEON_2RM_VRSQRTE_F
:
7304 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
7305 gen_helper_rsqrte_f32(cpu_F0s
, cpu_F0s
, fpstatus
);
7306 tcg_temp_free_ptr(fpstatus
);
7309 case NEON_2RM_VCVT_FS
: /* VCVT.F32.S32 */
7312 case NEON_2RM_VCVT_FU
: /* VCVT.F32.U32 */
7315 case NEON_2RM_VCVT_SF
: /* VCVT.S32.F32 */
7316 gen_vfp_tosiz(0, 1);
7318 case NEON_2RM_VCVT_UF
: /* VCVT.U32.F32 */
7319 gen_vfp_touiz(0, 1);
7322 /* Reserved op values were caught by the
7323 * neon_2rm_sizes[] check earlier.
7327 if (neon_2rm_is_float_op(op
)) {
7328 tcg_gen_st_f32(cpu_F0s
, cpu_env
,
7329 neon_reg_offset(rd
, pass
));
7331 neon_store_reg(rd
, pass
, tmp
);
7336 } else if ((insn
& (1 << 10)) == 0) {
7338 int n
= ((insn
>> 8) & 3) + 1;
7339 if ((rn
+ n
) > 32) {
7340 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7341 * helper function running off the end of the register file.
7346 if (insn
& (1 << 6)) {
7347 tmp
= neon_load_reg(rd
, 0);
7349 tmp
= tcg_temp_new_i32();
7350 tcg_gen_movi_i32(tmp
, 0);
7352 tmp2
= neon_load_reg(rm
, 0);
7353 tmp4
= tcg_const_i32(rn
);
7354 tmp5
= tcg_const_i32(n
);
7355 gen_helper_neon_tbl(tmp2
, cpu_env
, tmp2
, tmp
, tmp4
, tmp5
);
7356 tcg_temp_free_i32(tmp
);
7357 if (insn
& (1 << 6)) {
7358 tmp
= neon_load_reg(rd
, 1);
7360 tmp
= tcg_temp_new_i32();
7361 tcg_gen_movi_i32(tmp
, 0);
7363 tmp3
= neon_load_reg(rm
, 1);
7364 gen_helper_neon_tbl(tmp3
, cpu_env
, tmp3
, tmp
, tmp4
, tmp5
);
7365 tcg_temp_free_i32(tmp5
);
7366 tcg_temp_free_i32(tmp4
);
7367 neon_store_reg(rd
, 0, tmp2
);
7368 neon_store_reg(rd
, 1, tmp3
);
7369 tcg_temp_free_i32(tmp
);
7370 } else if ((insn
& 0x380) == 0) {
7372 if ((insn
& (7 << 16)) == 0 || (q
&& (rd
& 1))) {
7375 if (insn
& (1 << 19)) {
7376 tmp
= neon_load_reg(rm
, 1);
7378 tmp
= neon_load_reg(rm
, 0);
7380 if (insn
& (1 << 16)) {
7381 gen_neon_dup_u8(tmp
, ((insn
>> 17) & 3) * 8);
7382 } else if (insn
& (1 << 17)) {
7383 if ((insn
>> 18) & 1)
7384 gen_neon_dup_high16(tmp
);
7386 gen_neon_dup_low16(tmp
);
7388 for (pass
= 0; pass
< (q
? 4 : 2); pass
++) {
7389 tmp2
= tcg_temp_new_i32();
7390 tcg_gen_mov_i32(tmp2
, tmp
);
7391 neon_store_reg(rd
, pass
, tmp2
);
7393 tcg_temp_free_i32(tmp
);
7402 static int disas_coproc_insn(DisasContext
*s
, uint32_t insn
)
7404 int cpnum
, is64
, crn
, crm
, opc1
, opc2
, isread
, rt
, rt2
;
7405 const ARMCPRegInfo
*ri
;
7407 cpnum
= (insn
>> 8) & 0xf;
7409 /* First check for coprocessor space used for XScale/iwMMXt insns */
7410 if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && (cpnum
< 2)) {
7411 if (extract32(s
->c15_cpar
, cpnum
, 1) == 0) {
7414 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
7415 return disas_iwmmxt_insn(s
, insn
);
7416 } else if (arm_dc_feature(s
, ARM_FEATURE_XSCALE
)) {
7417 return disas_dsp_insn(s
, insn
);
7422 /* Otherwise treat as a generic register access */
7423 is64
= (insn
& (1 << 25)) == 0;
7424 if (!is64
&& ((insn
& (1 << 4)) == 0)) {
7432 opc1
= (insn
>> 4) & 0xf;
7434 rt2
= (insn
>> 16) & 0xf;
7436 crn
= (insn
>> 16) & 0xf;
7437 opc1
= (insn
>> 21) & 7;
7438 opc2
= (insn
>> 5) & 7;
7441 isread
= (insn
>> 20) & 1;
7442 rt
= (insn
>> 12) & 0xf;
7444 ri
= get_arm_cp_reginfo(s
->cp_regs
,
7445 ENCODE_CP_REG(cpnum
, is64
, s
->ns
, crn
, crm
, opc1
, opc2
));
7447 /* Check access permissions */
7448 if (!cp_access_ok(s
->current_el
, ri
, isread
)) {
7453 (arm_dc_feature(s
, ARM_FEATURE_XSCALE
) && cpnum
< 14)) {
7454 /* Emit code to perform further access permissions checks at
7455 * runtime; this may result in an exception.
7456 * Note that on XScale all cp0..c13 registers do an access check
7457 * call in order to handle c15_cpar.
7460 TCGv_i32 tcg_syn
, tcg_isread
;
7463 /* Note that since we are an implementation which takes an
7464 * exception on a trapped conditional instruction only if the
7465 * instruction passes its condition code check, we can take
7466 * advantage of the clause in the ARM ARM that allows us to set
7467 * the COND field in the instruction to 0xE in all cases.
7468 * We could fish the actual condition out of the insn (ARM)
7469 * or the condexec bits (Thumb) but it isn't necessary.
7474 syndrome
= syn_cp14_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7477 syndrome
= syn_cp14_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7483 syndrome
= syn_cp15_rrt_trap(1, 0xe, opc1
, crm
, rt
, rt2
,
7486 syndrome
= syn_cp15_rt_trap(1, 0xe, opc1
, opc2
, crn
, crm
,
7491 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7492 * so this can only happen if this is an ARMv7 or earlier CPU,
7493 * in which case the syndrome information won't actually be
7496 assert(!arm_dc_feature(s
, ARM_FEATURE_V8
));
7497 syndrome
= syn_uncategorized();
7501 gen_set_condexec(s
);
7502 gen_set_pc_im(s
, s
->pc
- 4);
7503 tmpptr
= tcg_const_ptr(ri
);
7504 tcg_syn
= tcg_const_i32(syndrome
);
7505 tcg_isread
= tcg_const_i32(isread
);
7506 gen_helper_access_check_cp_reg(cpu_env
, tmpptr
, tcg_syn
,
7508 tcg_temp_free_ptr(tmpptr
);
7509 tcg_temp_free_i32(tcg_syn
);
7510 tcg_temp_free_i32(tcg_isread
);
7513 /* Handle special cases first */
7514 switch (ri
->type
& ~(ARM_CP_FLAG_MASK
& ~ARM_CP_SPECIAL
)) {
7521 gen_set_pc_im(s
, s
->pc
);
7522 s
->is_jmp
= DISAS_WFI
;
7528 if ((s
->tb
->cflags
& CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
7537 if (ri
->type
& ARM_CP_CONST
) {
7538 tmp64
= tcg_const_i64(ri
->resetvalue
);
7539 } else if (ri
->readfn
) {
7541 tmp64
= tcg_temp_new_i64();
7542 tmpptr
= tcg_const_ptr(ri
);
7543 gen_helper_get_cp_reg64(tmp64
, cpu_env
, tmpptr
);
7544 tcg_temp_free_ptr(tmpptr
);
7546 tmp64
= tcg_temp_new_i64();
7547 tcg_gen_ld_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7549 tmp
= tcg_temp_new_i32();
7550 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
7551 store_reg(s
, rt
, tmp
);
7552 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
7553 tmp
= tcg_temp_new_i32();
7554 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
7555 tcg_temp_free_i64(tmp64
);
7556 store_reg(s
, rt2
, tmp
);
7559 if (ri
->type
& ARM_CP_CONST
) {
7560 tmp
= tcg_const_i32(ri
->resetvalue
);
7561 } else if (ri
->readfn
) {
7563 tmp
= tcg_temp_new_i32();
7564 tmpptr
= tcg_const_ptr(ri
);
7565 gen_helper_get_cp_reg(tmp
, cpu_env
, tmpptr
);
7566 tcg_temp_free_ptr(tmpptr
);
7568 tmp
= load_cpu_offset(ri
->fieldoffset
);
7571 /* Destination register of r15 for 32 bit loads sets
7572 * the condition codes from the high 4 bits of the value
7575 tcg_temp_free_i32(tmp
);
7577 store_reg(s
, rt
, tmp
);
7582 if (ri
->type
& ARM_CP_CONST
) {
7583 /* If not forbidden by access permissions, treat as WI */
7588 TCGv_i32 tmplo
, tmphi
;
7589 TCGv_i64 tmp64
= tcg_temp_new_i64();
7590 tmplo
= load_reg(s
, rt
);
7591 tmphi
= load_reg(s
, rt2
);
7592 tcg_gen_concat_i32_i64(tmp64
, tmplo
, tmphi
);
7593 tcg_temp_free_i32(tmplo
);
7594 tcg_temp_free_i32(tmphi
);
7596 TCGv_ptr tmpptr
= tcg_const_ptr(ri
);
7597 gen_helper_set_cp_reg64(cpu_env
, tmpptr
, tmp64
);
7598 tcg_temp_free_ptr(tmpptr
);
7600 tcg_gen_st_i64(tmp64
, cpu_env
, ri
->fieldoffset
);
7602 tcg_temp_free_i64(tmp64
);
7607 tmp
= load_reg(s
, rt
);
7608 tmpptr
= tcg_const_ptr(ri
);
7609 gen_helper_set_cp_reg(cpu_env
, tmpptr
, tmp
);
7610 tcg_temp_free_ptr(tmpptr
);
7611 tcg_temp_free_i32(tmp
);
7613 TCGv_i32 tmp
= load_reg(s
, rt
);
7614 store_cpu_offset(tmp
, ri
->fieldoffset
);
7619 if ((s
->tb
->cflags
& CF_USE_ICOUNT
) && (ri
->type
& ARM_CP_IO
)) {
7620 /* I/O operations must end the TB here (whether read or write) */
7623 } else if (!isread
&& !(ri
->type
& ARM_CP_SUPPRESS_TB_END
)) {
7624 /* We default to ending the TB on a coprocessor register write,
7625 * but allow this to be suppressed by the register definition
7626 * (usually only necessary to work around guest bugs).
7634 /* Unknown register; this might be a guest error or a QEMU
7635 * unimplemented feature.
7638 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7639 "64 bit system register cp:%d opc1: %d crm:%d "
7641 isread
? "read" : "write", cpnum
, opc1
, crm
,
7642 s
->ns
? "non-secure" : "secure");
7644 qemu_log_mask(LOG_UNIMP
, "%s access to unsupported AArch32 "
7645 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7647 isread
? "read" : "write", cpnum
, opc1
, crn
, crm
, opc2
,
7648 s
->ns
? "non-secure" : "secure");
7655 /* Store a 64-bit value to a register pair. Clobbers val. */
7656 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
7659 tmp
= tcg_temp_new_i32();
7660 tcg_gen_extrl_i64_i32(tmp
, val
);
7661 store_reg(s
, rlow
, tmp
);
7662 tmp
= tcg_temp_new_i32();
7663 tcg_gen_shri_i64(val
, val
, 32);
7664 tcg_gen_extrl_i64_i32(tmp
, val
);
7665 store_reg(s
, rhigh
, tmp
);
7668 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7669 static void gen_addq_lo(DisasContext
*s
, TCGv_i64 val
, int rlow
)
7674 /* Load value and extend to 64 bits. */
7675 tmp
= tcg_temp_new_i64();
7676 tmp2
= load_reg(s
, rlow
);
7677 tcg_gen_extu_i32_i64(tmp
, tmp2
);
7678 tcg_temp_free_i32(tmp2
);
7679 tcg_gen_add_i64(val
, val
, tmp
);
7680 tcg_temp_free_i64(tmp
);
7683 /* load and add a 64-bit value from a register pair. */
7684 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
7690 /* Load 64-bit value rd:rn. */
7691 tmpl
= load_reg(s
, rlow
);
7692 tmph
= load_reg(s
, rhigh
);
7693 tmp
= tcg_temp_new_i64();
7694 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
7695 tcg_temp_free_i32(tmpl
);
7696 tcg_temp_free_i32(tmph
);
7697 tcg_gen_add_i64(val
, val
, tmp
);
7698 tcg_temp_free_i64(tmp
);
7701 /* Set N and Z flags from hi|lo. */
7702 static void gen_logicq_cc(TCGv_i32 lo
, TCGv_i32 hi
)
7704 tcg_gen_mov_i32(cpu_NF
, hi
);
7705 tcg_gen_or_i32(cpu_ZF
, lo
, hi
);
7708 /* Load/Store exclusive instructions are implemented by remembering
7709 the value/address loaded, and seeing if these are the same
7710 when the store is performed. This should be sufficient to implement
7711 the architecturally mandated semantics, and avoids having to monitor
7712 regular stores. The compare vs the remembered value is done during
7713 the cmpxchg operation, but we must compare the addresses manually. */
7714 static void gen_load_exclusive(DisasContext
*s
, int rt
, int rt2
,
7715 TCGv_i32 addr
, int size
)
7717 TCGv_i32 tmp
= tcg_temp_new_i32();
7718 TCGMemOp opc
= size
| MO_ALIGN
| s
->be_data
;
7723 TCGv_i32 tmp2
= tcg_temp_new_i32();
7724 TCGv_i64 t64
= tcg_temp_new_i64();
7726 gen_aa32_ld_i64(s
, t64
, addr
, get_mem_index(s
), opc
);
7727 tcg_gen_mov_i64(cpu_exclusive_val
, t64
);
7728 tcg_gen_extr_i64_i32(tmp
, tmp2
, t64
);
7729 tcg_temp_free_i64(t64
);
7731 store_reg(s
, rt2
, tmp2
);
7733 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), opc
);
7734 tcg_gen_extu_i32_i64(cpu_exclusive_val
, tmp
);
7737 store_reg(s
, rt
, tmp
);
7738 tcg_gen_extu_i32_i64(cpu_exclusive_addr
, addr
);
7741 static void gen_clrex(DisasContext
*s
)
7743 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7746 static void gen_store_exclusive(DisasContext
*s
, int rd
, int rt
, int rt2
,
7747 TCGv_i32 addr
, int size
)
7749 TCGv_i32 t0
, t1
, t2
;
7752 TCGLabel
*done_label
;
7753 TCGLabel
*fail_label
;
7754 TCGMemOp opc
= size
| MO_ALIGN
| s
->be_data
;
7756 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7762 fail_label
= gen_new_label();
7763 done_label
= gen_new_label();
7764 extaddr
= tcg_temp_new_i64();
7765 tcg_gen_extu_i32_i64(extaddr
, addr
);
7766 tcg_gen_brcond_i64(TCG_COND_NE
, extaddr
, cpu_exclusive_addr
, fail_label
);
7767 tcg_temp_free_i64(extaddr
);
7769 taddr
= gen_aa32_addr(s
, addr
, opc
);
7770 t0
= tcg_temp_new_i32();
7771 t1
= load_reg(s
, rt
);
7773 TCGv_i64 o64
= tcg_temp_new_i64();
7774 TCGv_i64 n64
= tcg_temp_new_i64();
7776 t2
= load_reg(s
, rt2
);
7777 tcg_gen_concat_i32_i64(n64
, t1
, t2
);
7778 tcg_temp_free_i32(t2
);
7779 gen_aa32_frob64(s
, n64
);
7781 tcg_gen_atomic_cmpxchg_i64(o64
, taddr
, cpu_exclusive_val
, n64
,
7782 get_mem_index(s
), opc
);
7783 tcg_temp_free_i64(n64
);
7785 gen_aa32_frob64(s
, o64
);
7786 tcg_gen_setcond_i64(TCG_COND_NE
, o64
, o64
, cpu_exclusive_val
);
7787 tcg_gen_extrl_i64_i32(t0
, o64
);
7789 tcg_temp_free_i64(o64
);
7791 t2
= tcg_temp_new_i32();
7792 tcg_gen_extrl_i64_i32(t2
, cpu_exclusive_val
);
7793 tcg_gen_atomic_cmpxchg_i32(t0
, taddr
, t2
, t1
, get_mem_index(s
), opc
);
7794 tcg_gen_setcond_i32(TCG_COND_NE
, t0
, t0
, t2
);
7795 tcg_temp_free_i32(t2
);
7797 tcg_temp_free_i32(t1
);
7798 tcg_temp_free(taddr
);
7799 tcg_gen_mov_i32(cpu_R
[rd
], t0
);
7800 tcg_temp_free_i32(t0
);
7801 tcg_gen_br(done_label
);
7803 gen_set_label(fail_label
);
7804 tcg_gen_movi_i32(cpu_R
[rd
], 1);
7805 gen_set_label(done_label
);
7806 tcg_gen_movi_i64(cpu_exclusive_addr
, -1);
7812 * @mode: mode field from insn (which stack to store to)
7813 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7814 * @writeback: true if writeback bit set
7816 * Generate code for the SRS (Store Return State) insn.
7818 static void gen_srs(DisasContext
*s
,
7819 uint32_t mode
, uint32_t amode
, bool writeback
)
7826 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
7827 * and specified mode is monitor mode
7828 * - UNDEFINED in Hyp mode
7829 * - UNPREDICTABLE in User or System mode
7830 * - UNPREDICTABLE if the specified mode is:
7831 * -- not implemented
7832 * -- not a valid mode number
7833 * -- a mode that's at a higher exception level
7834 * -- Monitor, if we are Non-secure
7835 * For the UNPREDICTABLE cases we choose to UNDEF.
7837 if (s
->current_el
== 1 && !s
->ns
&& mode
== ARM_CPU_MODE_MON
) {
7838 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(), 3);
7842 if (s
->current_el
== 0 || s
->current_el
== 2) {
7847 case ARM_CPU_MODE_USR
:
7848 case ARM_CPU_MODE_FIQ
:
7849 case ARM_CPU_MODE_IRQ
:
7850 case ARM_CPU_MODE_SVC
:
7851 case ARM_CPU_MODE_ABT
:
7852 case ARM_CPU_MODE_UND
:
7853 case ARM_CPU_MODE_SYS
:
7855 case ARM_CPU_MODE_HYP
:
7856 if (s
->current_el
== 1 || !arm_dc_feature(s
, ARM_FEATURE_EL2
)) {
7860 case ARM_CPU_MODE_MON
:
7861 /* No need to check specifically for "are we non-secure" because
7862 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7863 * so if this isn't EL3 then we must be non-secure.
7865 if (s
->current_el
!= 3) {
7874 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
7875 default_exception_el(s
));
7879 addr
= tcg_temp_new_i32();
7880 tmp
= tcg_const_i32(mode
);
7881 /* get_r13_banked() will raise an exception if called from System mode */
7882 gen_set_condexec(s
);
7883 gen_set_pc_im(s
, s
->pc
- 4);
7884 gen_helper_get_r13_banked(addr
, cpu_env
, tmp
);
7885 tcg_temp_free_i32(tmp
);
7902 tcg_gen_addi_i32(addr
, addr
, offset
);
7903 tmp
= load_reg(s
, 14);
7904 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
7905 tcg_temp_free_i32(tmp
);
7906 tmp
= load_cpu_field(spsr
);
7907 tcg_gen_addi_i32(addr
, addr
, 4);
7908 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
7909 tcg_temp_free_i32(tmp
);
7927 tcg_gen_addi_i32(addr
, addr
, offset
);
7928 tmp
= tcg_const_i32(mode
);
7929 gen_helper_set_r13_banked(cpu_env
, tmp
, addr
);
7930 tcg_temp_free_i32(tmp
);
7932 tcg_temp_free_i32(addr
);
7933 s
->is_jmp
= DISAS_UPDATE
;
7936 static void disas_arm_insn(DisasContext
*s
, unsigned int insn
)
7938 unsigned int cond
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
7945 /* M variants do not implement ARM mode. */
7946 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
7951 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7952 * choose to UNDEF. In ARMv5 and above the space is used
7953 * for miscellaneous unconditional instructions.
7957 /* Unconditional instructions. */
7958 if (((insn
>> 25) & 7) == 1) {
7959 /* NEON Data processing. */
7960 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
7964 if (disas_neon_data_insn(s
, insn
)) {
7969 if ((insn
& 0x0f100000) == 0x04000000) {
7970 /* NEON load/store. */
7971 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
7975 if (disas_neon_ls_insn(s
, insn
)) {
7980 if ((insn
& 0x0f000e10) == 0x0e000a00) {
7982 if (disas_vfp_insn(s
, insn
)) {
7987 if (((insn
& 0x0f30f000) == 0x0510f000) ||
7988 ((insn
& 0x0f30f010) == 0x0710f000)) {
7989 if ((insn
& (1 << 22)) == 0) {
7991 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
7995 /* Otherwise PLD; v5TE+ */
7999 if (((insn
& 0x0f70f000) == 0x0450f000) ||
8000 ((insn
& 0x0f70f010) == 0x0650f000)) {
8002 return; /* PLI; V7 */
8004 if (((insn
& 0x0f700000) == 0x04100000) ||
8005 ((insn
& 0x0f700010) == 0x06100000)) {
8006 if (!arm_dc_feature(s
, ARM_FEATURE_V7MP
)) {
8009 return; /* v7MP: Unallocated memory hint: must NOP */
8012 if ((insn
& 0x0ffffdff) == 0x01010000) {
8015 if (((insn
>> 9) & 1) != !!(s
->be_data
== MO_BE
)) {
8016 gen_helper_setend(cpu_env
);
8017 s
->is_jmp
= DISAS_UPDATE
;
8020 } else if ((insn
& 0x0fffff00) == 0x057ff000) {
8021 switch ((insn
>> 4) & 0xf) {
8029 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
8032 /* We need to break the TB after this insn to execute
8033 * self-modifying code correctly and also to take
8034 * any pending interrupts immediately.
8041 } else if ((insn
& 0x0e5fffe0) == 0x084d0500) {
8044 gen_srs(s
, (insn
& 0x1f), (insn
>> 23) & 3, insn
& (1 << 21));
8046 } else if ((insn
& 0x0e50ffe0) == 0x08100a00) {
8052 rn
= (insn
>> 16) & 0xf;
8053 addr
= load_reg(s
, rn
);
8054 i
= (insn
>> 23) & 3;
8056 case 0: offset
= -4; break; /* DA */
8057 case 1: offset
= 0; break; /* IA */
8058 case 2: offset
= -8; break; /* DB */
8059 case 3: offset
= 4; break; /* IB */
8063 tcg_gen_addi_i32(addr
, addr
, offset
);
8064 /* Load PC into tmp and CPSR into tmp2. */
8065 tmp
= tcg_temp_new_i32();
8066 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
8067 tcg_gen_addi_i32(addr
, addr
, 4);
8068 tmp2
= tcg_temp_new_i32();
8069 gen_aa32_ld32u(s
, tmp2
, addr
, get_mem_index(s
));
8070 if (insn
& (1 << 21)) {
8071 /* Base writeback. */
8073 case 0: offset
= -8; break;
8074 case 1: offset
= 4; break;
8075 case 2: offset
= -4; break;
8076 case 3: offset
= 0; break;
8080 tcg_gen_addi_i32(addr
, addr
, offset
);
8081 store_reg(s
, rn
, addr
);
8083 tcg_temp_free_i32(addr
);
8085 gen_rfe(s
, tmp
, tmp2
);
8087 } else if ((insn
& 0x0e000000) == 0x0a000000) {
8088 /* branch link and change to thumb (blx <offset>) */
8091 val
= (uint32_t)s
->pc
;
8092 tmp
= tcg_temp_new_i32();
8093 tcg_gen_movi_i32(tmp
, val
);
8094 store_reg(s
, 14, tmp
);
8095 /* Sign-extend the 24-bit offset */
8096 offset
= (((int32_t)insn
) << 8) >> 8;
8097 /* offset * 4 + bit24 * 2 + (thumb bit) */
8098 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
8099 /* pipeline offset */
8101 /* protected by ARCH(5); above, near the start of uncond block */
8104 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
8105 if (arm_dc_feature(s
, ARM_FEATURE_IWMMXT
)) {
8106 /* iWMMXt register transfer. */
8107 if (extract32(s
->c15_cpar
, 1, 1)) {
8108 if (!disas_iwmmxt_insn(s
, insn
)) {
8113 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
8114 /* Coprocessor double register transfer. */
8116 } else if ((insn
& 0x0f000010) == 0x0e000010) {
8117 /* Additional coprocessor register transfer. */
8118 } else if ((insn
& 0x0ff10020) == 0x01000000) {
8121 /* cps (privileged) */
8125 if (insn
& (1 << 19)) {
8126 if (insn
& (1 << 8))
8128 if (insn
& (1 << 7))
8130 if (insn
& (1 << 6))
8132 if (insn
& (1 << 18))
8135 if (insn
& (1 << 17)) {
8137 val
|= (insn
& 0x1f);
8140 gen_set_psr_im(s
, mask
, 0, val
);
8147 /* if not always execute, we generate a conditional jump to
8149 s
->condlabel
= gen_new_label();
8150 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
8153 if ((insn
& 0x0f900000) == 0x03000000) {
8154 if ((insn
& (1 << 21)) == 0) {
8156 rd
= (insn
>> 12) & 0xf;
8157 val
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
8158 if ((insn
& (1 << 22)) == 0) {
8160 tmp
= tcg_temp_new_i32();
8161 tcg_gen_movi_i32(tmp
, val
);
8164 tmp
= load_reg(s
, rd
);
8165 tcg_gen_ext16u_i32(tmp
, tmp
);
8166 tcg_gen_ori_i32(tmp
, tmp
, val
<< 16);
8168 store_reg(s
, rd
, tmp
);
8170 if (((insn
>> 12) & 0xf) != 0xf)
8172 if (((insn
>> 16) & 0xf) == 0) {
8173 gen_nop_hint(s
, insn
& 0xff);
8175 /* CPSR = immediate */
8177 shift
= ((insn
>> 8) & 0xf) * 2;
8179 val
= (val
>> shift
) | (val
<< (32 - shift
));
8180 i
= ((insn
& (1 << 22)) != 0);
8181 if (gen_set_psr_im(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
),
8187 } else if ((insn
& 0x0f900000) == 0x01000000
8188 && (insn
& 0x00000090) != 0x00000090) {
8189 /* miscellaneous instructions */
8190 op1
= (insn
>> 21) & 3;
8191 sh
= (insn
>> 4) & 0xf;
8194 case 0x0: /* MSR, MRS */
8195 if (insn
& (1 << 9)) {
8196 /* MSR (banked) and MRS (banked) */
8197 int sysm
= extract32(insn
, 16, 4) |
8198 (extract32(insn
, 8, 1) << 4);
8199 int r
= extract32(insn
, 22, 1);
8203 gen_msr_banked(s
, r
, sysm
, rm
);
8206 int rd
= extract32(insn
, 12, 4);
8208 gen_mrs_banked(s
, r
, sysm
, rd
);
8213 /* MSR, MRS (for PSRs) */
8216 tmp
= load_reg(s
, rm
);
8217 i
= ((op1
& 2) != 0);
8218 if (gen_set_psr(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
, tmp
))
8222 rd
= (insn
>> 12) & 0xf;
8226 tmp
= load_cpu_field(spsr
);
8228 tmp
= tcg_temp_new_i32();
8229 gen_helper_cpsr_read(tmp
, cpu_env
);
8231 store_reg(s
, rd
, tmp
);
8236 /* branch/exchange thumb (bx). */
8238 tmp
= load_reg(s
, rm
);
8240 } else if (op1
== 3) {
8243 rd
= (insn
>> 12) & 0xf;
8244 tmp
= load_reg(s
, rm
);
8245 gen_helper_clz(tmp
, tmp
);
8246 store_reg(s
, rd
, tmp
);
8254 /* Trivial implementation equivalent to bx. */
8255 tmp
= load_reg(s
, rm
);
8266 /* branch link/exchange thumb (blx) */
8267 tmp
= load_reg(s
, rm
);
8268 tmp2
= tcg_temp_new_i32();
8269 tcg_gen_movi_i32(tmp2
, s
->pc
);
8270 store_reg(s
, 14, tmp2
);
8276 uint32_t c
= extract32(insn
, 8, 4);
8278 /* Check this CPU supports ARMv8 CRC instructions.
8279 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8280 * Bits 8, 10 and 11 should be zero.
8282 if (!arm_dc_feature(s
, ARM_FEATURE_CRC
) || op1
== 0x3 ||
8287 rn
= extract32(insn
, 16, 4);
8288 rd
= extract32(insn
, 12, 4);
8290 tmp
= load_reg(s
, rn
);
8291 tmp2
= load_reg(s
, rm
);
8293 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
8294 } else if (op1
== 1) {
8295 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
8297 tmp3
= tcg_const_i32(1 << op1
);
8299 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
8301 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
8303 tcg_temp_free_i32(tmp2
);
8304 tcg_temp_free_i32(tmp3
);
8305 store_reg(s
, rd
, tmp
);
8308 case 0x5: /* saturating add/subtract */
8310 rd
= (insn
>> 12) & 0xf;
8311 rn
= (insn
>> 16) & 0xf;
8312 tmp
= load_reg(s
, rm
);
8313 tmp2
= load_reg(s
, rn
);
8315 gen_helper_double_saturate(tmp2
, cpu_env
, tmp2
);
8317 gen_helper_sub_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8319 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
8320 tcg_temp_free_i32(tmp2
);
8321 store_reg(s
, rd
, tmp
);
8325 int imm16
= extract32(insn
, 0, 4) | (extract32(insn
, 8, 12) << 4);
8334 gen_exception_insn(s
, 4, EXCP_BKPT
,
8335 syn_aa32_bkpt(imm16
, false),
8336 default_exception_el(s
));
8339 /* Hypervisor call (v7) */
8347 /* Secure monitor call (v6+) */
8355 g_assert_not_reached();
8359 case 0x8: /* signed multiply */
8364 rs
= (insn
>> 8) & 0xf;
8365 rn
= (insn
>> 12) & 0xf;
8366 rd
= (insn
>> 16) & 0xf;
8368 /* (32 * 16) >> 16 */
8369 tmp
= load_reg(s
, rm
);
8370 tmp2
= load_reg(s
, rs
);
8372 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
8375 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
8376 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
8377 tmp
= tcg_temp_new_i32();
8378 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
8379 tcg_temp_free_i64(tmp64
);
8380 if ((sh
& 2) == 0) {
8381 tmp2
= load_reg(s
, rn
);
8382 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8383 tcg_temp_free_i32(tmp2
);
8385 store_reg(s
, rd
, tmp
);
8388 tmp
= load_reg(s
, rm
);
8389 tmp2
= load_reg(s
, rs
);
8390 gen_mulxy(tmp
, tmp2
, sh
& 2, sh
& 4);
8391 tcg_temp_free_i32(tmp2
);
8393 tmp64
= tcg_temp_new_i64();
8394 tcg_gen_ext_i32_i64(tmp64
, tmp
);
8395 tcg_temp_free_i32(tmp
);
8396 gen_addq(s
, tmp64
, rn
, rd
);
8397 gen_storeq_reg(s
, rn
, rd
, tmp64
);
8398 tcg_temp_free_i64(tmp64
);
8401 tmp2
= load_reg(s
, rn
);
8402 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
8403 tcg_temp_free_i32(tmp2
);
8405 store_reg(s
, rd
, tmp
);
8412 } else if (((insn
& 0x0e000000) == 0 &&
8413 (insn
& 0x00000090) != 0x90) ||
8414 ((insn
& 0x0e000000) == (1 << 25))) {
8415 int set_cc
, logic_cc
, shiftop
;
8417 op1
= (insn
>> 21) & 0xf;
8418 set_cc
= (insn
>> 20) & 1;
8419 logic_cc
= table_logic_cc
[op1
] & set_cc
;
8421 /* data processing instruction */
8422 if (insn
& (1 << 25)) {
8423 /* immediate operand */
8425 shift
= ((insn
>> 8) & 0xf) * 2;
8427 val
= (val
>> shift
) | (val
<< (32 - shift
));
8429 tmp2
= tcg_temp_new_i32();
8430 tcg_gen_movi_i32(tmp2
, val
);
8431 if (logic_cc
&& shift
) {
8432 gen_set_CF_bit31(tmp2
);
8437 tmp2
= load_reg(s
, rm
);
8438 shiftop
= (insn
>> 5) & 3;
8439 if (!(insn
& (1 << 4))) {
8440 shift
= (insn
>> 7) & 0x1f;
8441 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
8443 rs
= (insn
>> 8) & 0xf;
8444 tmp
= load_reg(s
, rs
);
8445 gen_arm_shift_reg(tmp2
, shiftop
, tmp
, logic_cc
);
8448 if (op1
!= 0x0f && op1
!= 0x0d) {
8449 rn
= (insn
>> 16) & 0xf;
8450 tmp
= load_reg(s
, rn
);
8452 TCGV_UNUSED_I32(tmp
);
8454 rd
= (insn
>> 12) & 0xf;
8457 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8461 store_reg_bx(s
, rd
, tmp
);
8464 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8468 store_reg_bx(s
, rd
, tmp
);
8471 if (set_cc
&& rd
== 15) {
8472 /* SUBS r15, ... is used for exception return. */
8476 gen_sub_CC(tmp
, tmp
, tmp2
);
8477 gen_exception_return(s
, tmp
);
8480 gen_sub_CC(tmp
, tmp
, tmp2
);
8482 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
8484 store_reg_bx(s
, rd
, tmp
);
8489 gen_sub_CC(tmp
, tmp2
, tmp
);
8491 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8493 store_reg_bx(s
, rd
, tmp
);
8497 gen_add_CC(tmp
, tmp
, tmp2
);
8499 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8501 store_reg_bx(s
, rd
, tmp
);
8505 gen_adc_CC(tmp
, tmp
, tmp2
);
8507 gen_add_carry(tmp
, tmp
, tmp2
);
8509 store_reg_bx(s
, rd
, tmp
);
8513 gen_sbc_CC(tmp
, tmp
, tmp2
);
8515 gen_sub_carry(tmp
, tmp
, tmp2
);
8517 store_reg_bx(s
, rd
, tmp
);
8521 gen_sbc_CC(tmp
, tmp2
, tmp
);
8523 gen_sub_carry(tmp
, tmp2
, tmp
);
8525 store_reg_bx(s
, rd
, tmp
);
8529 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
8532 tcg_temp_free_i32(tmp
);
8536 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
8539 tcg_temp_free_i32(tmp
);
8543 gen_sub_CC(tmp
, tmp
, tmp2
);
8545 tcg_temp_free_i32(tmp
);
8549 gen_add_CC(tmp
, tmp
, tmp2
);
8551 tcg_temp_free_i32(tmp
);
8554 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8558 store_reg_bx(s
, rd
, tmp
);
8561 if (logic_cc
&& rd
== 15) {
8562 /* MOVS r15, ... is used for exception return. */
8566 gen_exception_return(s
, tmp2
);
8571 store_reg_bx(s
, rd
, tmp2
);
8575 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
8579 store_reg_bx(s
, rd
, tmp
);
8583 tcg_gen_not_i32(tmp2
, tmp2
);
8587 store_reg_bx(s
, rd
, tmp2
);
8590 if (op1
!= 0x0f && op1
!= 0x0d) {
8591 tcg_temp_free_i32(tmp2
);
8594 /* other instructions */
8595 op1
= (insn
>> 24) & 0xf;
8599 /* multiplies, extra load/stores */
8600 sh
= (insn
>> 5) & 3;
8603 rd
= (insn
>> 16) & 0xf;
8604 rn
= (insn
>> 12) & 0xf;
8605 rs
= (insn
>> 8) & 0xf;
8607 op1
= (insn
>> 20) & 0xf;
8609 case 0: case 1: case 2: case 3: case 6:
8611 tmp
= load_reg(s
, rs
);
8612 tmp2
= load_reg(s
, rm
);
8613 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
8614 tcg_temp_free_i32(tmp2
);
8615 if (insn
& (1 << 22)) {
8616 /* Subtract (mls) */
8618 tmp2
= load_reg(s
, rn
);
8619 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
8620 tcg_temp_free_i32(tmp2
);
8621 } else if (insn
& (1 << 21)) {
8623 tmp2
= load_reg(s
, rn
);
8624 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
8625 tcg_temp_free_i32(tmp2
);
8627 if (insn
& (1 << 20))
8629 store_reg(s
, rd
, tmp
);
8632 /* 64 bit mul double accumulate (UMAAL) */
8634 tmp
= load_reg(s
, rs
);
8635 tmp2
= load_reg(s
, rm
);
8636 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
8637 gen_addq_lo(s
, tmp64
, rn
);
8638 gen_addq_lo(s
, tmp64
, rd
);
8639 gen_storeq_reg(s
, rn
, rd
, tmp64
);
8640 tcg_temp_free_i64(tmp64
);
8642 case 8: case 9: case 10: case 11:
8643 case 12: case 13: case 14: case 15:
8644 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8645 tmp
= load_reg(s
, rs
);
8646 tmp2
= load_reg(s
, rm
);
8647 if (insn
& (1 << 22)) {
8648 tcg_gen_muls2_i32(tmp
, tmp2
, tmp
, tmp2
);
8650 tcg_gen_mulu2_i32(tmp
, tmp2
, tmp
, tmp2
);
8652 if (insn
& (1 << 21)) { /* mult accumulate */
8653 TCGv_i32 al
= load_reg(s
, rn
);
8654 TCGv_i32 ah
= load_reg(s
, rd
);
8655 tcg_gen_add2_i32(tmp
, tmp2
, tmp
, tmp2
, al
, ah
);
8656 tcg_temp_free_i32(al
);
8657 tcg_temp_free_i32(ah
);
8659 if (insn
& (1 << 20)) {
8660 gen_logicq_cc(tmp
, tmp2
);
8662 store_reg(s
, rn
, tmp
);
8663 store_reg(s
, rd
, tmp2
);
8669 rn
= (insn
>> 16) & 0xf;
8670 rd
= (insn
>> 12) & 0xf;
8671 if (insn
& (1 << 23)) {
8672 /* load/store exclusive */
8673 int op2
= (insn
>> 8) & 3;
8674 op1
= (insn
>> 21) & 0x3;
8677 case 0: /* lda/stl */
8683 case 1: /* reserved */
8685 case 2: /* ldaex/stlex */
8688 case 3: /* ldrex/strex */
8697 addr
= tcg_temp_local_new_i32();
8698 load_reg_var(s
, addr
, rn
);
8700 /* Since the emulation does not have barriers,
8701 the acquire/release semantics need no special
8704 if (insn
& (1 << 20)) {
8705 tmp
= tcg_temp_new_i32();
8708 gen_aa32_ld32u(s
, tmp
, addr
,
8712 gen_aa32_ld8u(s
, tmp
, addr
,
8716 gen_aa32_ld16u(s
, tmp
, addr
,
8722 store_reg(s
, rd
, tmp
);
8725 tmp
= load_reg(s
, rm
);
8728 gen_aa32_st32(s
, tmp
, addr
,
8732 gen_aa32_st8(s
, tmp
, addr
,
8736 gen_aa32_st16(s
, tmp
, addr
,
8742 tcg_temp_free_i32(tmp
);
8744 } else if (insn
& (1 << 20)) {
8747 gen_load_exclusive(s
, rd
, 15, addr
, 2);
8749 case 1: /* ldrexd */
8750 gen_load_exclusive(s
, rd
, rd
+ 1, addr
, 3);
8752 case 2: /* ldrexb */
8753 gen_load_exclusive(s
, rd
, 15, addr
, 0);
8755 case 3: /* ldrexh */
8756 gen_load_exclusive(s
, rd
, 15, addr
, 1);
8765 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 2);
8767 case 1: /* strexd */
8768 gen_store_exclusive(s
, rd
, rm
, rm
+ 1, addr
, 3);
8770 case 2: /* strexb */
8771 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 0);
8773 case 3: /* strexh */
8774 gen_store_exclusive(s
, rd
, rm
, 15, addr
, 1);
8780 tcg_temp_free_i32(addr
);
8783 TCGMemOp opc
= s
->be_data
;
8785 /* SWP instruction */
8788 if (insn
& (1 << 22)) {
8791 opc
|= MO_UL
| MO_ALIGN
;
8794 addr
= load_reg(s
, rn
);
8795 taddr
= gen_aa32_addr(s
, addr
, opc
);
8796 tcg_temp_free_i32(addr
);
8798 tmp
= load_reg(s
, rm
);
8799 tcg_gen_atomic_xchg_i32(tmp
, taddr
, tmp
,
8800 get_mem_index(s
), opc
);
8801 tcg_temp_free(taddr
);
8802 store_reg(s
, rd
, tmp
);
8807 bool load
= insn
& (1 << 20);
8808 bool doubleword
= false;
8809 /* Misc load/store */
8810 rn
= (insn
>> 16) & 0xf;
8811 rd
= (insn
>> 12) & 0xf;
8813 if (!load
&& (sh
& 2)) {
8817 /* UNPREDICTABLE; we choose to UNDEF */
8820 load
= (sh
& 1) == 0;
8824 addr
= load_reg(s
, rn
);
8825 if (insn
& (1 << 24))
8826 gen_add_datah_offset(s
, insn
, 0, addr
);
8832 tmp
= load_reg(s
, rd
);
8833 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
8834 tcg_temp_free_i32(tmp
);
8835 tcg_gen_addi_i32(addr
, addr
, 4);
8836 tmp
= load_reg(s
, rd
+ 1);
8837 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
8838 tcg_temp_free_i32(tmp
);
8841 tmp
= tcg_temp_new_i32();
8842 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
8843 store_reg(s
, rd
, tmp
);
8844 tcg_gen_addi_i32(addr
, addr
, 4);
8845 tmp
= tcg_temp_new_i32();
8846 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
8849 address_offset
= -4;
8852 tmp
= tcg_temp_new_i32();
8855 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
8858 gen_aa32_ld8s(s
, tmp
, addr
, get_mem_index(s
));
8862 gen_aa32_ld16s(s
, tmp
, addr
, get_mem_index(s
));
8867 tmp
= load_reg(s
, rd
);
8868 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
8869 tcg_temp_free_i32(tmp
);
8871 /* Perform base writeback before the loaded value to
8872 ensure correct behavior with overlapping index registers.
8873 ldrd with base writeback is undefined if the
8874 destination and index registers overlap. */
8875 if (!(insn
& (1 << 24))) {
8876 gen_add_datah_offset(s
, insn
, address_offset
, addr
);
8877 store_reg(s
, rn
, addr
);
8878 } else if (insn
& (1 << 21)) {
8880 tcg_gen_addi_i32(addr
, addr
, address_offset
);
8881 store_reg(s
, rn
, addr
);
8883 tcg_temp_free_i32(addr
);
8886 /* Complete the load. */
8887 store_reg(s
, rd
, tmp
);
8896 if (insn
& (1 << 4)) {
8898 /* Armv6 Media instructions. */
8900 rn
= (insn
>> 16) & 0xf;
8901 rd
= (insn
>> 12) & 0xf;
8902 rs
= (insn
>> 8) & 0xf;
8903 switch ((insn
>> 23) & 3) {
8904 case 0: /* Parallel add/subtract. */
8905 op1
= (insn
>> 20) & 7;
8906 tmp
= load_reg(s
, rn
);
8907 tmp2
= load_reg(s
, rm
);
8908 sh
= (insn
>> 5) & 7;
8909 if ((op1
& 3) == 0 || sh
== 5 || sh
== 6)
8911 gen_arm_parallel_addsub(op1
, sh
, tmp
, tmp2
);
8912 tcg_temp_free_i32(tmp2
);
8913 store_reg(s
, rd
, tmp
);
8916 if ((insn
& 0x00700020) == 0) {
8917 /* Halfword pack. */
8918 tmp
= load_reg(s
, rn
);
8919 tmp2
= load_reg(s
, rm
);
8920 shift
= (insn
>> 7) & 0x1f;
8921 if (insn
& (1 << 6)) {
8925 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
8926 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
8927 tcg_gen_ext16u_i32(tmp2
, tmp2
);
8931 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
8932 tcg_gen_ext16u_i32(tmp
, tmp
);
8933 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
8935 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
8936 tcg_temp_free_i32(tmp2
);
8937 store_reg(s
, rd
, tmp
);
8938 } else if ((insn
& 0x00200020) == 0x00200000) {
8940 tmp
= load_reg(s
, rm
);
8941 shift
= (insn
>> 7) & 0x1f;
8942 if (insn
& (1 << 6)) {
8945 tcg_gen_sari_i32(tmp
, tmp
, shift
);
8947 tcg_gen_shli_i32(tmp
, tmp
, shift
);
8949 sh
= (insn
>> 16) & 0x1f;
8950 tmp2
= tcg_const_i32(sh
);
8951 if (insn
& (1 << 22))
8952 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
8954 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
8955 tcg_temp_free_i32(tmp2
);
8956 store_reg(s
, rd
, tmp
);
8957 } else if ((insn
& 0x00300fe0) == 0x00200f20) {
8959 tmp
= load_reg(s
, rm
);
8960 sh
= (insn
>> 16) & 0x1f;
8961 tmp2
= tcg_const_i32(sh
);
8962 if (insn
& (1 << 22))
8963 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
8965 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
8966 tcg_temp_free_i32(tmp2
);
8967 store_reg(s
, rd
, tmp
);
8968 } else if ((insn
& 0x00700fe0) == 0x00000fa0) {
8970 tmp
= load_reg(s
, rn
);
8971 tmp2
= load_reg(s
, rm
);
8972 tmp3
= tcg_temp_new_i32();
8973 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
8974 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
8975 tcg_temp_free_i32(tmp3
);
8976 tcg_temp_free_i32(tmp2
);
8977 store_reg(s
, rd
, tmp
);
8978 } else if ((insn
& 0x000003e0) == 0x00000060) {
8979 tmp
= load_reg(s
, rm
);
8980 shift
= (insn
>> 10) & 3;
8981 /* ??? In many cases it's not necessary to do a
8982 rotate, a shift is sufficient. */
8984 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
8985 op1
= (insn
>> 20) & 7;
8987 case 0: gen_sxtb16(tmp
); break;
8988 case 2: gen_sxtb(tmp
); break;
8989 case 3: gen_sxth(tmp
); break;
8990 case 4: gen_uxtb16(tmp
); break;
8991 case 6: gen_uxtb(tmp
); break;
8992 case 7: gen_uxth(tmp
); break;
8993 default: goto illegal_op
;
8996 tmp2
= load_reg(s
, rn
);
8997 if ((op1
& 3) == 0) {
8998 gen_add16(tmp
, tmp2
);
9000 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9001 tcg_temp_free_i32(tmp2
);
9004 store_reg(s
, rd
, tmp
);
9005 } else if ((insn
& 0x003f0f60) == 0x003f0f20) {
9007 tmp
= load_reg(s
, rm
);
9008 if (insn
& (1 << 22)) {
9009 if (insn
& (1 << 7)) {
9013 gen_helper_rbit(tmp
, tmp
);
9016 if (insn
& (1 << 7))
9019 tcg_gen_bswap32_i32(tmp
, tmp
);
9021 store_reg(s
, rd
, tmp
);
9026 case 2: /* Multiplies (Type 3). */
9027 switch ((insn
>> 20) & 0x7) {
9029 if (((insn
>> 6) ^ (insn
>> 7)) & 1) {
9030 /* op2 not 00x or 11x : UNDEF */
9033 /* Signed multiply most significant [accumulate].
9034 (SMMUL, SMMLA, SMMLS) */
9035 tmp
= load_reg(s
, rm
);
9036 tmp2
= load_reg(s
, rs
);
9037 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
9040 tmp
= load_reg(s
, rd
);
9041 if (insn
& (1 << 6)) {
9042 tmp64
= gen_subq_msw(tmp64
, tmp
);
9044 tmp64
= gen_addq_msw(tmp64
, tmp
);
9047 if (insn
& (1 << 5)) {
9048 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
9050 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
9051 tmp
= tcg_temp_new_i32();
9052 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
9053 tcg_temp_free_i64(tmp64
);
9054 store_reg(s
, rn
, tmp
);
9058 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9059 if (insn
& (1 << 7)) {
9062 tmp
= load_reg(s
, rm
);
9063 tmp2
= load_reg(s
, rs
);
9064 if (insn
& (1 << 5))
9065 gen_swap_half(tmp2
);
9066 gen_smul_dual(tmp
, tmp2
);
9067 if (insn
& (1 << 22)) {
9068 /* smlald, smlsld */
9071 tmp64
= tcg_temp_new_i64();
9072 tmp64_2
= tcg_temp_new_i64();
9073 tcg_gen_ext_i32_i64(tmp64
, tmp
);
9074 tcg_gen_ext_i32_i64(tmp64_2
, tmp2
);
9075 tcg_temp_free_i32(tmp
);
9076 tcg_temp_free_i32(tmp2
);
9077 if (insn
& (1 << 6)) {
9078 tcg_gen_sub_i64(tmp64
, tmp64
, tmp64_2
);
9080 tcg_gen_add_i64(tmp64
, tmp64
, tmp64_2
);
9082 tcg_temp_free_i64(tmp64_2
);
9083 gen_addq(s
, tmp64
, rd
, rn
);
9084 gen_storeq_reg(s
, rd
, rn
, tmp64
);
9085 tcg_temp_free_i64(tmp64
);
9087 /* smuad, smusd, smlad, smlsd */
9088 if (insn
& (1 << 6)) {
9089 /* This subtraction cannot overflow. */
9090 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
9092 /* This addition cannot overflow 32 bits;
9093 * however it may overflow considered as a
9094 * signed operation, in which case we must set
9097 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9099 tcg_temp_free_i32(tmp2
);
9102 tmp2
= load_reg(s
, rd
);
9103 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
9104 tcg_temp_free_i32(tmp2
);
9106 store_reg(s
, rn
, tmp
);
9112 if (!arm_dc_feature(s
, ARM_FEATURE_ARM_DIV
)) {
9115 if (((insn
>> 5) & 7) || (rd
!= 15)) {
9118 tmp
= load_reg(s
, rm
);
9119 tmp2
= load_reg(s
, rs
);
9120 if (insn
& (1 << 21)) {
9121 gen_helper_udiv(tmp
, tmp
, tmp2
);
9123 gen_helper_sdiv(tmp
, tmp
, tmp2
);
9125 tcg_temp_free_i32(tmp2
);
9126 store_reg(s
, rn
, tmp
);
9133 op1
= ((insn
>> 17) & 0x38) | ((insn
>> 5) & 7);
9135 case 0: /* Unsigned sum of absolute differences. */
9137 tmp
= load_reg(s
, rm
);
9138 tmp2
= load_reg(s
, rs
);
9139 gen_helper_usad8(tmp
, tmp
, tmp2
);
9140 tcg_temp_free_i32(tmp2
);
9142 tmp2
= load_reg(s
, rd
);
9143 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9144 tcg_temp_free_i32(tmp2
);
9146 store_reg(s
, rn
, tmp
);
9148 case 0x20: case 0x24: case 0x28: case 0x2c:
9149 /* Bitfield insert/clear. */
9151 shift
= (insn
>> 7) & 0x1f;
9152 i
= (insn
>> 16) & 0x1f;
9154 /* UNPREDICTABLE; we choose to UNDEF */
9159 tmp
= tcg_temp_new_i32();
9160 tcg_gen_movi_i32(tmp
, 0);
9162 tmp
= load_reg(s
, rm
);
9165 tmp2
= load_reg(s
, rd
);
9166 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, i
);
9167 tcg_temp_free_i32(tmp2
);
9169 store_reg(s
, rd
, tmp
);
9171 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9172 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
9174 tmp
= load_reg(s
, rm
);
9175 shift
= (insn
>> 7) & 0x1f;
9176 i
= ((insn
>> 16) & 0x1f) + 1;
9181 gen_ubfx(tmp
, shift
, (1u << i
) - 1);
9183 gen_sbfx(tmp
, shift
, i
);
9186 store_reg(s
, rd
, tmp
);
9196 /* Check for undefined extension instructions
9197 * per the ARM Bible IE:
9198 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9200 sh
= (0xf << 20) | (0xf << 4);
9201 if (op1
== 0x7 && ((insn
& sh
) == sh
))
9205 /* load/store byte/word */
9206 rn
= (insn
>> 16) & 0xf;
9207 rd
= (insn
>> 12) & 0xf;
9208 tmp2
= load_reg(s
, rn
);
9209 if ((insn
& 0x01200000) == 0x00200000) {
9211 i
= get_a32_user_mem_index(s
);
9213 i
= get_mem_index(s
);
9215 if (insn
& (1 << 24))
9216 gen_add_data_offset(s
, insn
, tmp2
);
9217 if (insn
& (1 << 20)) {
9219 tmp
= tcg_temp_new_i32();
9220 if (insn
& (1 << 22)) {
9221 gen_aa32_ld8u(s
, tmp
, tmp2
, i
);
9223 gen_aa32_ld32u(s
, tmp
, tmp2
, i
);
9227 tmp
= load_reg(s
, rd
);
9228 if (insn
& (1 << 22)) {
9229 gen_aa32_st8(s
, tmp
, tmp2
, i
);
9231 gen_aa32_st32(s
, tmp
, tmp2
, i
);
9233 tcg_temp_free_i32(tmp
);
9235 if (!(insn
& (1 << 24))) {
9236 gen_add_data_offset(s
, insn
, tmp2
);
9237 store_reg(s
, rn
, tmp2
);
9238 } else if (insn
& (1 << 21)) {
9239 store_reg(s
, rn
, tmp2
);
9241 tcg_temp_free_i32(tmp2
);
9243 if (insn
& (1 << 20)) {
9244 /* Complete the load. */
9245 store_reg_from_load(s
, rd
, tmp
);
9251 int j
, n
, loaded_base
;
9252 bool exc_return
= false;
9253 bool is_load
= extract32(insn
, 20, 1);
9255 TCGv_i32 loaded_var
;
9256 /* load/store multiple words */
9257 /* XXX: store correct base if write back */
9258 if (insn
& (1 << 22)) {
9259 /* LDM (user), LDM (exception return) and STM (user) */
9261 goto illegal_op
; /* only usable in supervisor mode */
9263 if (is_load
&& extract32(insn
, 15, 1)) {
9269 rn
= (insn
>> 16) & 0xf;
9270 addr
= load_reg(s
, rn
);
9272 /* compute total size */
9274 TCGV_UNUSED_I32(loaded_var
);
9277 if (insn
& (1 << i
))
9280 /* XXX: test invalid n == 0 case ? */
9281 if (insn
& (1 << 23)) {
9282 if (insn
& (1 << 24)) {
9284 tcg_gen_addi_i32(addr
, addr
, 4);
9286 /* post increment */
9289 if (insn
& (1 << 24)) {
9291 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
9293 /* post decrement */
9295 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
9300 if (insn
& (1 << i
)) {
9303 tmp
= tcg_temp_new_i32();
9304 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9306 tmp2
= tcg_const_i32(i
);
9307 gen_helper_set_user_reg(cpu_env
, tmp2
, tmp
);
9308 tcg_temp_free_i32(tmp2
);
9309 tcg_temp_free_i32(tmp
);
9310 } else if (i
== rn
) {
9313 } else if (rn
== 15 && exc_return
) {
9314 store_pc_exc_ret(s
, tmp
);
9316 store_reg_from_load(s
, i
, tmp
);
9321 /* special case: r15 = PC + 8 */
9322 val
= (long)s
->pc
+ 4;
9323 tmp
= tcg_temp_new_i32();
9324 tcg_gen_movi_i32(tmp
, val
);
9326 tmp
= tcg_temp_new_i32();
9327 tmp2
= tcg_const_i32(i
);
9328 gen_helper_get_user_reg(tmp
, cpu_env
, tmp2
);
9329 tcg_temp_free_i32(tmp2
);
9331 tmp
= load_reg(s
, i
);
9333 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9334 tcg_temp_free_i32(tmp
);
9337 /* no need to add after the last transfer */
9339 tcg_gen_addi_i32(addr
, addr
, 4);
9342 if (insn
& (1 << 21)) {
9344 if (insn
& (1 << 23)) {
9345 if (insn
& (1 << 24)) {
9348 /* post increment */
9349 tcg_gen_addi_i32(addr
, addr
, 4);
9352 if (insn
& (1 << 24)) {
9355 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
9357 /* post decrement */
9358 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
9361 store_reg(s
, rn
, addr
);
9363 tcg_temp_free_i32(addr
);
9366 store_reg(s
, rn
, loaded_var
);
9369 /* Restore CPSR from SPSR. */
9370 tmp
= load_cpu_field(spsr
);
9371 gen_helper_cpsr_write_eret(cpu_env
, tmp
);
9372 tcg_temp_free_i32(tmp
);
9373 s
->is_jmp
= DISAS_JUMP
;
9382 /* branch (and link) */
9383 val
= (int32_t)s
->pc
;
9384 if (insn
& (1 << 24)) {
9385 tmp
= tcg_temp_new_i32();
9386 tcg_gen_movi_i32(tmp
, val
);
9387 store_reg(s
, 14, tmp
);
9389 offset
= sextract32(insn
<< 2, 0, 26);
9397 if (((insn
>> 8) & 0xe) == 10) {
9399 if (disas_vfp_insn(s
, insn
)) {
9402 } else if (disas_coproc_insn(s
, insn
)) {
9409 gen_set_pc_im(s
, s
->pc
);
9410 s
->svc_imm
= extract32(insn
, 0, 24);
9411 s
->is_jmp
= DISAS_SWI
;
9415 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
9416 default_exception_el(s
));
9422 /* Return true if this is a Thumb-2 logical op. */
9424 thumb2_logic_op(int op
)
9429 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9430 then set condition code flags based on the result of the operation.
9431 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9432 to the high bit of T1.
9433 Returns zero if the opcode is valid. */
9436 gen_thumb2_data_op(DisasContext
*s
, int op
, int conds
, uint32_t shifter_out
,
9437 TCGv_i32 t0
, TCGv_i32 t1
)
9444 tcg_gen_and_i32(t0
, t0
, t1
);
9448 tcg_gen_andc_i32(t0
, t0
, t1
);
9452 tcg_gen_or_i32(t0
, t0
, t1
);
9456 tcg_gen_orc_i32(t0
, t0
, t1
);
9460 tcg_gen_xor_i32(t0
, t0
, t1
);
9465 gen_add_CC(t0
, t0
, t1
);
9467 tcg_gen_add_i32(t0
, t0
, t1
);
9471 gen_adc_CC(t0
, t0
, t1
);
9477 gen_sbc_CC(t0
, t0
, t1
);
9479 gen_sub_carry(t0
, t0
, t1
);
9484 gen_sub_CC(t0
, t0
, t1
);
9486 tcg_gen_sub_i32(t0
, t0
, t1
);
9490 gen_sub_CC(t0
, t1
, t0
);
9492 tcg_gen_sub_i32(t0
, t1
, t0
);
9494 default: /* 5, 6, 7, 9, 12, 15. */
9500 gen_set_CF_bit31(t1
);
9505 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9507 static int disas_thumb2_insn(CPUARMState
*env
, DisasContext
*s
, uint16_t insn_hw1
)
9509 uint32_t insn
, imm
, shift
, offset
;
9510 uint32_t rd
, rn
, rm
, rs
;
9521 if (!(arm_dc_feature(s
, ARM_FEATURE_THUMB2
)
9522 || arm_dc_feature(s
, ARM_FEATURE_M
))) {
9523 /* Thumb-1 cores may need to treat bl and blx as a pair of
9524 16-bit instructions to get correct prefetch abort behavior. */
9526 if ((insn
& (1 << 12)) == 0) {
9528 /* Second half of blx. */
9529 offset
= ((insn
& 0x7ff) << 1);
9530 tmp
= load_reg(s
, 14);
9531 tcg_gen_addi_i32(tmp
, tmp
, offset
);
9532 tcg_gen_andi_i32(tmp
, tmp
, 0xfffffffc);
9534 tmp2
= tcg_temp_new_i32();
9535 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
9536 store_reg(s
, 14, tmp2
);
9540 if (insn
& (1 << 11)) {
9541 /* Second half of bl. */
9542 offset
= ((insn
& 0x7ff) << 1) | 1;
9543 tmp
= load_reg(s
, 14);
9544 tcg_gen_addi_i32(tmp
, tmp
, offset
);
9546 tmp2
= tcg_temp_new_i32();
9547 tcg_gen_movi_i32(tmp2
, s
->pc
| 1);
9548 store_reg(s
, 14, tmp2
);
9552 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
9553 /* Instruction spans a page boundary. Implement it as two
9554 16-bit instructions in case the second half causes an
9556 offset
= ((int32_t)insn
<< 21) >> 9;
9557 tcg_gen_movi_i32(cpu_R
[14], s
->pc
+ 2 + offset
);
9560 /* Fall through to 32-bit decode. */
9563 insn
= arm_lduw_code(env
, s
->pc
, s
->sctlr_b
);
9565 insn
|= (uint32_t)insn_hw1
<< 16;
9567 if ((insn
& 0xf800e800) != 0xf000e800) {
9571 rn
= (insn
>> 16) & 0xf;
9572 rs
= (insn
>> 12) & 0xf;
9573 rd
= (insn
>> 8) & 0xf;
9575 switch ((insn
>> 25) & 0xf) {
9576 case 0: case 1: case 2: case 3:
9577 /* 16-bit instructions. Should never happen. */
9580 if (insn
& (1 << 22)) {
9581 /* Other load/store, table branch. */
9582 if (insn
& 0x01200000) {
9583 /* Load/store doubleword. */
9585 addr
= tcg_temp_new_i32();
9586 tcg_gen_movi_i32(addr
, s
->pc
& ~3);
9588 addr
= load_reg(s
, rn
);
9590 offset
= (insn
& 0xff) * 4;
9591 if ((insn
& (1 << 23)) == 0)
9593 if (insn
& (1 << 24)) {
9594 tcg_gen_addi_i32(addr
, addr
, offset
);
9597 if (insn
& (1 << 20)) {
9599 tmp
= tcg_temp_new_i32();
9600 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9601 store_reg(s
, rs
, tmp
);
9602 tcg_gen_addi_i32(addr
, addr
, 4);
9603 tmp
= tcg_temp_new_i32();
9604 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9605 store_reg(s
, rd
, tmp
);
9608 tmp
= load_reg(s
, rs
);
9609 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9610 tcg_temp_free_i32(tmp
);
9611 tcg_gen_addi_i32(addr
, addr
, 4);
9612 tmp
= load_reg(s
, rd
);
9613 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9614 tcg_temp_free_i32(tmp
);
9616 if (insn
& (1 << 21)) {
9617 /* Base writeback. */
9620 tcg_gen_addi_i32(addr
, addr
, offset
- 4);
9621 store_reg(s
, rn
, addr
);
9623 tcg_temp_free_i32(addr
);
9625 } else if ((insn
& (1 << 23)) == 0) {
9626 /* Load/store exclusive word. */
9627 addr
= tcg_temp_local_new_i32();
9628 load_reg_var(s
, addr
, rn
);
9629 tcg_gen_addi_i32(addr
, addr
, (insn
& 0xff) << 2);
9630 if (insn
& (1 << 20)) {
9631 gen_load_exclusive(s
, rs
, 15, addr
, 2);
9633 gen_store_exclusive(s
, rd
, rs
, 15, addr
, 2);
9635 tcg_temp_free_i32(addr
);
9636 } else if ((insn
& (7 << 5)) == 0) {
9639 addr
= tcg_temp_new_i32();
9640 tcg_gen_movi_i32(addr
, s
->pc
);
9642 addr
= load_reg(s
, rn
);
9644 tmp
= load_reg(s
, rm
);
9645 tcg_gen_add_i32(addr
, addr
, tmp
);
9646 if (insn
& (1 << 4)) {
9648 tcg_gen_add_i32(addr
, addr
, tmp
);
9649 tcg_temp_free_i32(tmp
);
9650 tmp
= tcg_temp_new_i32();
9651 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
9653 tcg_temp_free_i32(tmp
);
9654 tmp
= tcg_temp_new_i32();
9655 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
9657 tcg_temp_free_i32(addr
);
9658 tcg_gen_shli_i32(tmp
, tmp
, 1);
9659 tcg_gen_addi_i32(tmp
, tmp
, s
->pc
);
9660 store_reg(s
, 15, tmp
);
9662 int op2
= (insn
>> 6) & 0x3;
9663 op
= (insn
>> 4) & 0x3;
9668 /* Load/store exclusive byte/halfword/doubleword */
9675 /* Load-acquire/store-release */
9681 /* Load-acquire/store-release exclusive */
9685 addr
= tcg_temp_local_new_i32();
9686 load_reg_var(s
, addr
, rn
);
9688 if (insn
& (1 << 20)) {
9689 tmp
= tcg_temp_new_i32();
9692 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
9695 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
9698 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9703 store_reg(s
, rs
, tmp
);
9705 tmp
= load_reg(s
, rs
);
9708 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
9711 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
9714 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9719 tcg_temp_free_i32(tmp
);
9721 } else if (insn
& (1 << 20)) {
9722 gen_load_exclusive(s
, rs
, rd
, addr
, op
);
9724 gen_store_exclusive(s
, rm
, rs
, rd
, addr
, op
);
9726 tcg_temp_free_i32(addr
);
9729 /* Load/store multiple, RFE, SRS. */
9730 if (((insn
>> 23) & 1) == ((insn
>> 24) & 1)) {
9731 /* RFE, SRS: not available in user mode or on M profile */
9732 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
9735 if (insn
& (1 << 20)) {
9737 addr
= load_reg(s
, rn
);
9738 if ((insn
& (1 << 24)) == 0)
9739 tcg_gen_addi_i32(addr
, addr
, -8);
9740 /* Load PC into tmp and CPSR into tmp2. */
9741 tmp
= tcg_temp_new_i32();
9742 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9743 tcg_gen_addi_i32(addr
, addr
, 4);
9744 tmp2
= tcg_temp_new_i32();
9745 gen_aa32_ld32u(s
, tmp2
, addr
, get_mem_index(s
));
9746 if (insn
& (1 << 21)) {
9747 /* Base writeback. */
9748 if (insn
& (1 << 24)) {
9749 tcg_gen_addi_i32(addr
, addr
, 4);
9751 tcg_gen_addi_i32(addr
, addr
, -4);
9753 store_reg(s
, rn
, addr
);
9755 tcg_temp_free_i32(addr
);
9757 gen_rfe(s
, tmp
, tmp2
);
9760 gen_srs(s
, (insn
& 0x1f), (insn
& (1 << 24)) ? 1 : 2,
9764 int i
, loaded_base
= 0;
9765 TCGv_i32 loaded_var
;
9766 /* Load/store multiple. */
9767 addr
= load_reg(s
, rn
);
9769 for (i
= 0; i
< 16; i
++) {
9770 if (insn
& (1 << i
))
9773 if (insn
& (1 << 24)) {
9774 tcg_gen_addi_i32(addr
, addr
, -offset
);
9777 TCGV_UNUSED_I32(loaded_var
);
9778 for (i
= 0; i
< 16; i
++) {
9779 if ((insn
& (1 << i
)) == 0)
9781 if (insn
& (1 << 20)) {
9783 tmp
= tcg_temp_new_i32();
9784 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
9787 } else if (i
== rn
) {
9791 store_reg(s
, i
, tmp
);
9795 tmp
= load_reg(s
, i
);
9796 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
9797 tcg_temp_free_i32(tmp
);
9799 tcg_gen_addi_i32(addr
, addr
, 4);
9802 store_reg(s
, rn
, loaded_var
);
9804 if (insn
& (1 << 21)) {
9805 /* Base register writeback. */
9806 if (insn
& (1 << 24)) {
9807 tcg_gen_addi_i32(addr
, addr
, -offset
);
9809 /* Fault if writeback register is in register list. */
9810 if (insn
& (1 << rn
))
9812 store_reg(s
, rn
, addr
);
9814 tcg_temp_free_i32(addr
);
9821 op
= (insn
>> 21) & 0xf;
9823 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9826 /* Halfword pack. */
9827 tmp
= load_reg(s
, rn
);
9828 tmp2
= load_reg(s
, rm
);
9829 shift
= ((insn
>> 10) & 0x1c) | ((insn
>> 6) & 0x3);
9830 if (insn
& (1 << 5)) {
9834 tcg_gen_sari_i32(tmp2
, tmp2
, shift
);
9835 tcg_gen_andi_i32(tmp
, tmp
, 0xffff0000);
9836 tcg_gen_ext16u_i32(tmp2
, tmp2
);
9840 tcg_gen_shli_i32(tmp2
, tmp2
, shift
);
9841 tcg_gen_ext16u_i32(tmp
, tmp
);
9842 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff0000);
9844 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
9845 tcg_temp_free_i32(tmp2
);
9846 store_reg(s
, rd
, tmp
);
9848 /* Data processing register constant shift. */
9850 tmp
= tcg_temp_new_i32();
9851 tcg_gen_movi_i32(tmp
, 0);
9853 tmp
= load_reg(s
, rn
);
9855 tmp2
= load_reg(s
, rm
);
9857 shiftop
= (insn
>> 4) & 3;
9858 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
9859 conds
= (insn
& (1 << 20)) != 0;
9860 logic_cc
= (conds
&& thumb2_logic_op(op
));
9861 gen_arm_shift_im(tmp2
, shiftop
, shift
, logic_cc
);
9862 if (gen_thumb2_data_op(s
, op
, conds
, 0, tmp
, tmp2
))
9864 tcg_temp_free_i32(tmp2
);
9866 store_reg(s
, rd
, tmp
);
9868 tcg_temp_free_i32(tmp
);
9872 case 13: /* Misc data processing. */
9873 op
= ((insn
>> 22) & 6) | ((insn
>> 7) & 1);
9874 if (op
< 4 && (insn
& 0xf000) != 0xf000)
9877 case 0: /* Register controlled shift. */
9878 tmp
= load_reg(s
, rn
);
9879 tmp2
= load_reg(s
, rm
);
9880 if ((insn
& 0x70) != 0)
9882 op
= (insn
>> 21) & 3;
9883 logic_cc
= (insn
& (1 << 20)) != 0;
9884 gen_arm_shift_reg(tmp
, op
, tmp2
, logic_cc
);
9887 store_reg_bx(s
, rd
, tmp
);
9889 case 1: /* Sign/zero extend. */
9890 op
= (insn
>> 20) & 7;
9892 case 0: /* SXTAH, SXTH */
9893 case 1: /* UXTAH, UXTH */
9894 case 4: /* SXTAB, SXTB */
9895 case 5: /* UXTAB, UXTB */
9897 case 2: /* SXTAB16, SXTB16 */
9898 case 3: /* UXTAB16, UXTB16 */
9899 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9907 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9911 tmp
= load_reg(s
, rm
);
9912 shift
= (insn
>> 4) & 3;
9913 /* ??? In many cases it's not necessary to do a
9914 rotate, a shift is sufficient. */
9916 tcg_gen_rotri_i32(tmp
, tmp
, shift
* 8);
9917 op
= (insn
>> 20) & 7;
9919 case 0: gen_sxth(tmp
); break;
9920 case 1: gen_uxth(tmp
); break;
9921 case 2: gen_sxtb16(tmp
); break;
9922 case 3: gen_uxtb16(tmp
); break;
9923 case 4: gen_sxtb(tmp
); break;
9924 case 5: gen_uxtb(tmp
); break;
9926 g_assert_not_reached();
9929 tmp2
= load_reg(s
, rn
);
9930 if ((op
>> 1) == 1) {
9931 gen_add16(tmp
, tmp2
);
9933 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
9934 tcg_temp_free_i32(tmp2
);
9937 store_reg(s
, rd
, tmp
);
9939 case 2: /* SIMD add/subtract. */
9940 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9943 op
= (insn
>> 20) & 7;
9944 shift
= (insn
>> 4) & 7;
9945 if ((op
& 3) == 3 || (shift
& 3) == 3)
9947 tmp
= load_reg(s
, rn
);
9948 tmp2
= load_reg(s
, rm
);
9949 gen_thumb2_parallel_addsub(op
, shift
, tmp
, tmp2
);
9950 tcg_temp_free_i32(tmp2
);
9951 store_reg(s
, rd
, tmp
);
9953 case 3: /* Other data processing. */
9954 op
= ((insn
>> 17) & 0x38) | ((insn
>> 4) & 7);
9956 /* Saturating add/subtract. */
9957 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9960 tmp
= load_reg(s
, rn
);
9961 tmp2
= load_reg(s
, rm
);
9963 gen_helper_double_saturate(tmp
, cpu_env
, tmp
);
9965 gen_helper_sub_saturate(tmp
, cpu_env
, tmp2
, tmp
);
9967 gen_helper_add_saturate(tmp
, cpu_env
, tmp
, tmp2
);
9968 tcg_temp_free_i32(tmp2
);
9971 case 0x0a: /* rbit */
9972 case 0x08: /* rev */
9973 case 0x09: /* rev16 */
9974 case 0x0b: /* revsh */
9975 case 0x18: /* clz */
9977 case 0x10: /* sel */
9978 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
9982 case 0x20: /* crc32/crc32c */
9988 if (!arm_dc_feature(s
, ARM_FEATURE_CRC
)) {
9995 tmp
= load_reg(s
, rn
);
9997 case 0x0a: /* rbit */
9998 gen_helper_rbit(tmp
, tmp
);
10000 case 0x08: /* rev */
10001 tcg_gen_bswap32_i32(tmp
, tmp
);
10003 case 0x09: /* rev16 */
10006 case 0x0b: /* revsh */
10009 case 0x10: /* sel */
10010 tmp2
= load_reg(s
, rm
);
10011 tmp3
= tcg_temp_new_i32();
10012 tcg_gen_ld_i32(tmp3
, cpu_env
, offsetof(CPUARMState
, GE
));
10013 gen_helper_sel_flags(tmp
, tmp3
, tmp
, tmp2
);
10014 tcg_temp_free_i32(tmp3
);
10015 tcg_temp_free_i32(tmp2
);
10017 case 0x18: /* clz */
10018 gen_helper_clz(tmp
, tmp
);
10028 uint32_t sz
= op
& 0x3;
10029 uint32_t c
= op
& 0x8;
10031 tmp2
= load_reg(s
, rm
);
10033 tcg_gen_andi_i32(tmp2
, tmp2
, 0xff);
10034 } else if (sz
== 1) {
10035 tcg_gen_andi_i32(tmp2
, tmp2
, 0xffff);
10037 tmp3
= tcg_const_i32(1 << sz
);
10039 gen_helper_crc32c(tmp
, tmp
, tmp2
, tmp3
);
10041 gen_helper_crc32(tmp
, tmp
, tmp2
, tmp3
);
10043 tcg_temp_free_i32(tmp2
);
10044 tcg_temp_free_i32(tmp3
);
10048 g_assert_not_reached();
10051 store_reg(s
, rd
, tmp
);
10053 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
10054 switch ((insn
>> 20) & 7) {
10055 case 0: /* 32 x 32 -> 32 */
10056 case 7: /* Unsigned sum of absolute differences. */
10058 case 1: /* 16 x 16 -> 32 */
10059 case 2: /* Dual multiply add. */
10060 case 3: /* 32 * 16 -> 32msb */
10061 case 4: /* Dual multiply subtract. */
10062 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10063 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10068 op
= (insn
>> 4) & 0xf;
10069 tmp
= load_reg(s
, rn
);
10070 tmp2
= load_reg(s
, rm
);
10071 switch ((insn
>> 20) & 7) {
10072 case 0: /* 32 x 32 -> 32 */
10073 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
10074 tcg_temp_free_i32(tmp2
);
10076 tmp2
= load_reg(s
, rs
);
10078 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
10080 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10081 tcg_temp_free_i32(tmp2
);
10084 case 1: /* 16 x 16 -> 32 */
10085 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
10086 tcg_temp_free_i32(tmp2
);
10088 tmp2
= load_reg(s
, rs
);
10089 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10090 tcg_temp_free_i32(tmp2
);
10093 case 2: /* Dual multiply add. */
10094 case 4: /* Dual multiply subtract. */
10096 gen_swap_half(tmp2
);
10097 gen_smul_dual(tmp
, tmp2
);
10098 if (insn
& (1 << 22)) {
10099 /* This subtraction cannot overflow. */
10100 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10102 /* This addition cannot overflow 32 bits;
10103 * however it may overflow considered as a signed
10104 * operation, in which case we must set the Q flag.
10106 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10108 tcg_temp_free_i32(tmp2
);
10111 tmp2
= load_reg(s
, rs
);
10112 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10113 tcg_temp_free_i32(tmp2
);
10116 case 3: /* 32 * 16 -> 32msb */
10118 tcg_gen_sari_i32(tmp2
, tmp2
, 16);
10121 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
10122 tcg_gen_shri_i64(tmp64
, tmp64
, 16);
10123 tmp
= tcg_temp_new_i32();
10124 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
10125 tcg_temp_free_i64(tmp64
);
10128 tmp2
= load_reg(s
, rs
);
10129 gen_helper_add_setq(tmp
, cpu_env
, tmp
, tmp2
);
10130 tcg_temp_free_i32(tmp2
);
10133 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10134 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
10136 tmp
= load_reg(s
, rs
);
10137 if (insn
& (1 << 20)) {
10138 tmp64
= gen_addq_msw(tmp64
, tmp
);
10140 tmp64
= gen_subq_msw(tmp64
, tmp
);
10143 if (insn
& (1 << 4)) {
10144 tcg_gen_addi_i64(tmp64
, tmp64
, 0x80000000u
);
10146 tcg_gen_shri_i64(tmp64
, tmp64
, 32);
10147 tmp
= tcg_temp_new_i32();
10148 tcg_gen_extrl_i64_i32(tmp
, tmp64
);
10149 tcg_temp_free_i64(tmp64
);
10151 case 7: /* Unsigned sum of absolute differences. */
10152 gen_helper_usad8(tmp
, tmp
, tmp2
);
10153 tcg_temp_free_i32(tmp2
);
10155 tmp2
= load_reg(s
, rs
);
10156 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10157 tcg_temp_free_i32(tmp2
);
10161 store_reg(s
, rd
, tmp
);
10163 case 6: case 7: /* 64-bit multiply, Divide. */
10164 op
= ((insn
>> 4) & 0xf) | ((insn
>> 16) & 0x70);
10165 tmp
= load_reg(s
, rn
);
10166 tmp2
= load_reg(s
, rm
);
10167 if ((op
& 0x50) == 0x10) {
10169 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DIV
)) {
10173 gen_helper_udiv(tmp
, tmp
, tmp2
);
10175 gen_helper_sdiv(tmp
, tmp
, tmp2
);
10176 tcg_temp_free_i32(tmp2
);
10177 store_reg(s
, rd
, tmp
);
10178 } else if ((op
& 0xe) == 0xc) {
10179 /* Dual multiply accumulate long. */
10180 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10181 tcg_temp_free_i32(tmp
);
10182 tcg_temp_free_i32(tmp2
);
10186 gen_swap_half(tmp2
);
10187 gen_smul_dual(tmp
, tmp2
);
10189 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10191 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10193 tcg_temp_free_i32(tmp2
);
10195 tmp64
= tcg_temp_new_i64();
10196 tcg_gen_ext_i32_i64(tmp64
, tmp
);
10197 tcg_temp_free_i32(tmp
);
10198 gen_addq(s
, tmp64
, rs
, rd
);
10199 gen_storeq_reg(s
, rs
, rd
, tmp64
);
10200 tcg_temp_free_i64(tmp64
);
10203 /* Unsigned 64-bit multiply */
10204 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
10208 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10209 tcg_temp_free_i32(tmp2
);
10210 tcg_temp_free_i32(tmp
);
10213 gen_mulxy(tmp
, tmp2
, op
& 2, op
& 1);
10214 tcg_temp_free_i32(tmp2
);
10215 tmp64
= tcg_temp_new_i64();
10216 tcg_gen_ext_i32_i64(tmp64
, tmp
);
10217 tcg_temp_free_i32(tmp
);
10219 /* Signed 64-bit multiply */
10220 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
10225 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10226 tcg_temp_free_i64(tmp64
);
10229 gen_addq_lo(s
, tmp64
, rs
);
10230 gen_addq_lo(s
, tmp64
, rd
);
10231 } else if (op
& 0x40) {
10232 /* 64-bit accumulate. */
10233 gen_addq(s
, tmp64
, rs
, rd
);
10235 gen_storeq_reg(s
, rs
, rd
, tmp64
);
10236 tcg_temp_free_i64(tmp64
);
10241 case 6: case 7: case 14: case 15:
10243 if (((insn
>> 24) & 3) == 3) {
10244 /* Translate into the equivalent ARM encoding. */
10245 insn
= (insn
& 0xe2ffffff) | ((insn
& (1 << 28)) >> 4) | (1 << 28);
10246 if (disas_neon_data_insn(s
, insn
)) {
10249 } else if (((insn
>> 8) & 0xe) == 10) {
10250 if (disas_vfp_insn(s
, insn
)) {
10254 if (insn
& (1 << 28))
10256 if (disas_coproc_insn(s
, insn
)) {
10261 case 8: case 9: case 10: case 11:
10262 if (insn
& (1 << 15)) {
10263 /* Branches, misc control. */
10264 if (insn
& 0x5000) {
10265 /* Unconditional branch. */
10266 /* signextend(hw1[10:0]) -> offset[:12]. */
10267 offset
= ((int32_t)insn
<< 5) >> 9 & ~(int32_t)0xfff;
10268 /* hw1[10:0] -> offset[11:1]. */
10269 offset
|= (insn
& 0x7ff) << 1;
10270 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10271 offset[24:22] already have the same value because of the
10272 sign extension above. */
10273 offset
^= ((~insn
) & (1 << 13)) << 10;
10274 offset
^= ((~insn
) & (1 << 11)) << 11;
10276 if (insn
& (1 << 14)) {
10277 /* Branch and link. */
10278 tcg_gen_movi_i32(cpu_R
[14], s
->pc
| 1);
10282 if (insn
& (1 << 12)) {
10284 gen_jmp(s
, offset
);
10287 offset
&= ~(uint32_t)2;
10288 /* thumb2 bx, no need to check */
10289 gen_bx_im(s
, offset
);
10291 } else if (((insn
>> 23) & 7) == 7) {
10293 if (insn
& (1 << 13))
10296 if (insn
& (1 << 26)) {
10297 if (!(insn
& (1 << 20))) {
10298 /* Hypervisor call (v7) */
10299 int imm16
= extract32(insn
, 16, 4) << 12
10300 | extract32(insn
, 0, 12);
10307 /* Secure monitor call (v6+) */
10315 op
= (insn
>> 20) & 7;
10317 case 0: /* msr cpsr. */
10318 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10319 tmp
= load_reg(s
, rn
);
10320 addr
= tcg_const_i32(insn
& 0xff);
10321 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
10322 tcg_temp_free_i32(addr
);
10323 tcg_temp_free_i32(tmp
);
10328 case 1: /* msr spsr. */
10329 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10333 if (extract32(insn
, 5, 1)) {
10335 int sysm
= extract32(insn
, 8, 4) |
10336 (extract32(insn
, 4, 1) << 4);
10339 gen_msr_banked(s
, r
, sysm
, rm
);
10343 /* MSR (for PSRs) */
10344 tmp
= load_reg(s
, rn
);
10346 msr_mask(s
, (insn
>> 8) & 0xf, op
== 1),
10350 case 2: /* cps, nop-hint. */
10351 if (((insn
>> 8) & 7) == 0) {
10352 gen_nop_hint(s
, insn
& 0xff);
10354 /* Implemented as NOP in user mode. */
10359 if (insn
& (1 << 10)) {
10360 if (insn
& (1 << 7))
10362 if (insn
& (1 << 6))
10364 if (insn
& (1 << 5))
10366 if (insn
& (1 << 9))
10367 imm
= CPSR_A
| CPSR_I
| CPSR_F
;
10369 if (insn
& (1 << 8)) {
10371 imm
|= (insn
& 0x1f);
10374 gen_set_psr_im(s
, offset
, 0, imm
);
10377 case 3: /* Special control operations. */
10379 op
= (insn
>> 4) & 0xf;
10381 case 2: /* clrex */
10386 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
10389 /* We need to break the TB after this insn
10390 * to execute self-modifying code correctly
10391 * and also to take any pending interrupts
10401 /* Trivial implementation equivalent to bx. */
10402 tmp
= load_reg(s
, rn
);
10405 case 5: /* Exception return. */
10409 if (rn
!= 14 || rd
!= 15) {
10412 tmp
= load_reg(s
, rn
);
10413 tcg_gen_subi_i32(tmp
, tmp
, insn
& 0xff);
10414 gen_exception_return(s
, tmp
);
10417 if (extract32(insn
, 5, 1)) {
10419 int sysm
= extract32(insn
, 16, 4) |
10420 (extract32(insn
, 4, 1) << 4);
10422 gen_mrs_banked(s
, 0, sysm
, rd
);
10427 tmp
= tcg_temp_new_i32();
10428 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
10429 addr
= tcg_const_i32(insn
& 0xff);
10430 gen_helper_v7m_mrs(tmp
, cpu_env
, addr
);
10431 tcg_temp_free_i32(addr
);
10433 gen_helper_cpsr_read(tmp
, cpu_env
);
10435 store_reg(s
, rd
, tmp
);
10438 if (extract32(insn
, 5, 1)) {
10440 int sysm
= extract32(insn
, 16, 4) |
10441 (extract32(insn
, 4, 1) << 4);
10443 gen_mrs_banked(s
, 1, sysm
, rd
);
10448 /* Not accessible in user mode. */
10449 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_M
)) {
10452 tmp
= load_cpu_field(spsr
);
10453 store_reg(s
, rd
, tmp
);
10458 /* Conditional branch. */
10459 op
= (insn
>> 22) & 0xf;
10460 /* Generate a conditional jump to next instruction. */
10461 s
->condlabel
= gen_new_label();
10462 arm_gen_test_cc(op
^ 1, s
->condlabel
);
10465 /* offset[11:1] = insn[10:0] */
10466 offset
= (insn
& 0x7ff) << 1;
10467 /* offset[17:12] = insn[21:16]. */
10468 offset
|= (insn
& 0x003f0000) >> 4;
10469 /* offset[31:20] = insn[26]. */
10470 offset
|= ((int32_t)((insn
<< 5) & 0x80000000)) >> 11;
10471 /* offset[18] = insn[13]. */
10472 offset
|= (insn
& (1 << 13)) << 5;
10473 /* offset[19] = insn[11]. */
10474 offset
|= (insn
& (1 << 11)) << 8;
10476 /* jump to the offset */
10477 gen_jmp(s
, s
->pc
+ offset
);
10480 /* Data processing immediate. */
10481 if (insn
& (1 << 25)) {
10482 if (insn
& (1 << 24)) {
10483 if (insn
& (1 << 20))
10485 /* Bitfield/Saturate. */
10486 op
= (insn
>> 21) & 7;
10488 shift
= ((insn
>> 6) & 3) | ((insn
>> 10) & 0x1c);
10490 tmp
= tcg_temp_new_i32();
10491 tcg_gen_movi_i32(tmp
, 0);
10493 tmp
= load_reg(s
, rn
);
10496 case 2: /* Signed bitfield extract. */
10498 if (shift
+ imm
> 32)
10501 gen_sbfx(tmp
, shift
, imm
);
10503 case 6: /* Unsigned bitfield extract. */
10505 if (shift
+ imm
> 32)
10508 gen_ubfx(tmp
, shift
, (1u << imm
) - 1);
10510 case 3: /* Bitfield insert/clear. */
10513 imm
= imm
+ 1 - shift
;
10515 tmp2
= load_reg(s
, rd
);
10516 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, shift
, imm
);
10517 tcg_temp_free_i32(tmp2
);
10522 default: /* Saturate. */
10525 tcg_gen_sari_i32(tmp
, tmp
, shift
);
10527 tcg_gen_shli_i32(tmp
, tmp
, shift
);
10529 tmp2
= tcg_const_i32(imm
);
10532 if ((op
& 1) && shift
== 0) {
10533 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10534 tcg_temp_free_i32(tmp
);
10535 tcg_temp_free_i32(tmp2
);
10538 gen_helper_usat16(tmp
, cpu_env
, tmp
, tmp2
);
10540 gen_helper_usat(tmp
, cpu_env
, tmp
, tmp2
);
10544 if ((op
& 1) && shift
== 0) {
10545 if (!arm_dc_feature(s
, ARM_FEATURE_THUMB_DSP
)) {
10546 tcg_temp_free_i32(tmp
);
10547 tcg_temp_free_i32(tmp2
);
10550 gen_helper_ssat16(tmp
, cpu_env
, tmp
, tmp2
);
10552 gen_helper_ssat(tmp
, cpu_env
, tmp
, tmp2
);
10555 tcg_temp_free_i32(tmp2
);
10558 store_reg(s
, rd
, tmp
);
10560 imm
= ((insn
& 0x04000000) >> 15)
10561 | ((insn
& 0x7000) >> 4) | (insn
& 0xff);
10562 if (insn
& (1 << 22)) {
10563 /* 16-bit immediate. */
10564 imm
|= (insn
>> 4) & 0xf000;
10565 if (insn
& (1 << 23)) {
10567 tmp
= load_reg(s
, rd
);
10568 tcg_gen_ext16u_i32(tmp
, tmp
);
10569 tcg_gen_ori_i32(tmp
, tmp
, imm
<< 16);
10572 tmp
= tcg_temp_new_i32();
10573 tcg_gen_movi_i32(tmp
, imm
);
10576 /* Add/sub 12-bit immediate. */
10578 offset
= s
->pc
& ~(uint32_t)3;
10579 if (insn
& (1 << 23))
10583 tmp
= tcg_temp_new_i32();
10584 tcg_gen_movi_i32(tmp
, offset
);
10586 tmp
= load_reg(s
, rn
);
10587 if (insn
& (1 << 23))
10588 tcg_gen_subi_i32(tmp
, tmp
, imm
);
10590 tcg_gen_addi_i32(tmp
, tmp
, imm
);
10593 store_reg(s
, rd
, tmp
);
10596 int shifter_out
= 0;
10597 /* modified 12-bit immediate. */
10598 shift
= ((insn
& 0x04000000) >> 23) | ((insn
& 0x7000) >> 12);
10599 imm
= (insn
& 0xff);
10602 /* Nothing to do. */
10604 case 1: /* 00XY00XY */
10607 case 2: /* XY00XY00 */
10611 case 3: /* XYXYXYXY */
10615 default: /* Rotated constant. */
10616 shift
= (shift
<< 1) | (imm
>> 7);
10618 imm
= imm
<< (32 - shift
);
10622 tmp2
= tcg_temp_new_i32();
10623 tcg_gen_movi_i32(tmp2
, imm
);
10624 rn
= (insn
>> 16) & 0xf;
10626 tmp
= tcg_temp_new_i32();
10627 tcg_gen_movi_i32(tmp
, 0);
10629 tmp
= load_reg(s
, rn
);
10631 op
= (insn
>> 21) & 0xf;
10632 if (gen_thumb2_data_op(s
, op
, (insn
& (1 << 20)) != 0,
10633 shifter_out
, tmp
, tmp2
))
10635 tcg_temp_free_i32(tmp2
);
10636 rd
= (insn
>> 8) & 0xf;
10638 store_reg(s
, rd
, tmp
);
10640 tcg_temp_free_i32(tmp
);
10645 case 12: /* Load/store single data item. */
10650 if ((insn
& 0x01100000) == 0x01000000) {
10651 if (disas_neon_ls_insn(s
, insn
)) {
10656 op
= ((insn
>> 21) & 3) | ((insn
>> 22) & 4);
10658 if (!(insn
& (1 << 20))) {
10662 /* Byte or halfword load space with dest == r15 : memory hints.
10663 * Catch them early so we don't emit pointless addressing code.
10664 * This space is a mix of:
10665 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10666 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10668 * unallocated hints, which must be treated as NOPs
10669 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10670 * which is easiest for the decoding logic
10671 * Some space which must UNDEF
10673 int op1
= (insn
>> 23) & 3;
10674 int op2
= (insn
>> 6) & 0x3f;
10679 /* UNPREDICTABLE, unallocated hint or
10680 * PLD/PLDW/PLI (literal)
10685 return 0; /* PLD/PLDW/PLI or unallocated hint */
10687 if ((op2
== 0) || ((op2
& 0x3c) == 0x30)) {
10688 return 0; /* PLD/PLDW/PLI or unallocated hint */
10690 /* UNDEF space, or an UNPREDICTABLE */
10694 memidx
= get_mem_index(s
);
10696 addr
= tcg_temp_new_i32();
10698 /* s->pc has already been incremented by 4. */
10699 imm
= s
->pc
& 0xfffffffc;
10700 if (insn
& (1 << 23))
10701 imm
+= insn
& 0xfff;
10703 imm
-= insn
& 0xfff;
10704 tcg_gen_movi_i32(addr
, imm
);
10706 addr
= load_reg(s
, rn
);
10707 if (insn
& (1 << 23)) {
10708 /* Positive offset. */
10709 imm
= insn
& 0xfff;
10710 tcg_gen_addi_i32(addr
, addr
, imm
);
10713 switch ((insn
>> 8) & 0xf) {
10714 case 0x0: /* Shifted Register. */
10715 shift
= (insn
>> 4) & 0xf;
10717 tcg_temp_free_i32(addr
);
10720 tmp
= load_reg(s
, rm
);
10722 tcg_gen_shli_i32(tmp
, tmp
, shift
);
10723 tcg_gen_add_i32(addr
, addr
, tmp
);
10724 tcg_temp_free_i32(tmp
);
10726 case 0xc: /* Negative offset. */
10727 tcg_gen_addi_i32(addr
, addr
, -imm
);
10729 case 0xe: /* User privilege. */
10730 tcg_gen_addi_i32(addr
, addr
, imm
);
10731 memidx
= get_a32_user_mem_index(s
);
10733 case 0x9: /* Post-decrement. */
10735 /* Fall through. */
10736 case 0xb: /* Post-increment. */
10740 case 0xd: /* Pre-decrement. */
10742 /* Fall through. */
10743 case 0xf: /* Pre-increment. */
10744 tcg_gen_addi_i32(addr
, addr
, imm
);
10748 tcg_temp_free_i32(addr
);
10753 if (insn
& (1 << 20)) {
10755 tmp
= tcg_temp_new_i32();
10758 gen_aa32_ld8u(s
, tmp
, addr
, memidx
);
10761 gen_aa32_ld8s(s
, tmp
, addr
, memidx
);
10764 gen_aa32_ld16u(s
, tmp
, addr
, memidx
);
10767 gen_aa32_ld16s(s
, tmp
, addr
, memidx
);
10770 gen_aa32_ld32u(s
, tmp
, addr
, memidx
);
10773 tcg_temp_free_i32(tmp
);
10774 tcg_temp_free_i32(addr
);
10780 store_reg(s
, rs
, tmp
);
10784 tmp
= load_reg(s
, rs
);
10787 gen_aa32_st8(s
, tmp
, addr
, memidx
);
10790 gen_aa32_st16(s
, tmp
, addr
, memidx
);
10793 gen_aa32_st32(s
, tmp
, addr
, memidx
);
10796 tcg_temp_free_i32(tmp
);
10797 tcg_temp_free_i32(addr
);
10800 tcg_temp_free_i32(tmp
);
10803 tcg_gen_addi_i32(addr
, addr
, imm
);
10805 store_reg(s
, rn
, addr
);
10807 tcg_temp_free_i32(addr
);
10819 static void disas_thumb_insn(CPUARMState
*env
, DisasContext
*s
)
10821 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
10828 if (s
->condexec_mask
) {
10829 cond
= s
->condexec_cond
;
10830 if (cond
!= 0x0e) { /* Skip conditional when condition is AL. */
10831 s
->condlabel
= gen_new_label();
10832 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
10837 insn
= arm_lduw_code(env
, s
->pc
, s
->sctlr_b
);
10840 switch (insn
>> 12) {
10844 op
= (insn
>> 11) & 3;
10847 rn
= (insn
>> 3) & 7;
10848 tmp
= load_reg(s
, rn
);
10849 if (insn
& (1 << 10)) {
10851 tmp2
= tcg_temp_new_i32();
10852 tcg_gen_movi_i32(tmp2
, (insn
>> 6) & 7);
10855 rm
= (insn
>> 6) & 7;
10856 tmp2
= load_reg(s
, rm
);
10858 if (insn
& (1 << 9)) {
10859 if (s
->condexec_mask
)
10860 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10862 gen_sub_CC(tmp
, tmp
, tmp2
);
10864 if (s
->condexec_mask
)
10865 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10867 gen_add_CC(tmp
, tmp
, tmp2
);
10869 tcg_temp_free_i32(tmp2
);
10870 store_reg(s
, rd
, tmp
);
10872 /* shift immediate */
10873 rm
= (insn
>> 3) & 7;
10874 shift
= (insn
>> 6) & 0x1f;
10875 tmp
= load_reg(s
, rm
);
10876 gen_arm_shift_im(tmp
, op
, shift
, s
->condexec_mask
== 0);
10877 if (!s
->condexec_mask
)
10879 store_reg(s
, rd
, tmp
);
10883 /* arithmetic large immediate */
10884 op
= (insn
>> 11) & 3;
10885 rd
= (insn
>> 8) & 0x7;
10886 if (op
== 0) { /* mov */
10887 tmp
= tcg_temp_new_i32();
10888 tcg_gen_movi_i32(tmp
, insn
& 0xff);
10889 if (!s
->condexec_mask
)
10891 store_reg(s
, rd
, tmp
);
10893 tmp
= load_reg(s
, rd
);
10894 tmp2
= tcg_temp_new_i32();
10895 tcg_gen_movi_i32(tmp2
, insn
& 0xff);
10898 gen_sub_CC(tmp
, tmp
, tmp2
);
10899 tcg_temp_free_i32(tmp
);
10900 tcg_temp_free_i32(tmp2
);
10903 if (s
->condexec_mask
)
10904 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10906 gen_add_CC(tmp
, tmp
, tmp2
);
10907 tcg_temp_free_i32(tmp2
);
10908 store_reg(s
, rd
, tmp
);
10911 if (s
->condexec_mask
)
10912 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
10914 gen_sub_CC(tmp
, tmp
, tmp2
);
10915 tcg_temp_free_i32(tmp2
);
10916 store_reg(s
, rd
, tmp
);
10922 if (insn
& (1 << 11)) {
10923 rd
= (insn
>> 8) & 7;
10924 /* load pc-relative. Bit 1 of PC is ignored. */
10925 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
10926 val
&= ~(uint32_t)2;
10927 addr
= tcg_temp_new_i32();
10928 tcg_gen_movi_i32(addr
, val
);
10929 tmp
= tcg_temp_new_i32();
10930 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
10931 tcg_temp_free_i32(addr
);
10932 store_reg(s
, rd
, tmp
);
10935 if (insn
& (1 << 10)) {
10936 /* data processing extended or blx */
10937 rd
= (insn
& 7) | ((insn
>> 4) & 8);
10938 rm
= (insn
>> 3) & 0xf;
10939 op
= (insn
>> 8) & 3;
10942 tmp
= load_reg(s
, rd
);
10943 tmp2
= load_reg(s
, rm
);
10944 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
10945 tcg_temp_free_i32(tmp2
);
10946 store_reg(s
, rd
, tmp
);
10949 tmp
= load_reg(s
, rd
);
10950 tmp2
= load_reg(s
, rm
);
10951 gen_sub_CC(tmp
, tmp
, tmp2
);
10952 tcg_temp_free_i32(tmp2
);
10953 tcg_temp_free_i32(tmp
);
10955 case 2: /* mov/cpy */
10956 tmp
= load_reg(s
, rm
);
10957 store_reg(s
, rd
, tmp
);
10959 case 3:/* branch [and link] exchange thumb register */
10960 tmp
= load_reg(s
, rm
);
10961 if (insn
& (1 << 7)) {
10963 val
= (uint32_t)s
->pc
| 1;
10964 tmp2
= tcg_temp_new_i32();
10965 tcg_gen_movi_i32(tmp2
, val
);
10966 store_reg(s
, 14, tmp2
);
10968 /* already thumb, no need to check */
10975 /* data processing register */
10977 rm
= (insn
>> 3) & 7;
10978 op
= (insn
>> 6) & 0xf;
10979 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
10980 /* the shift/rotate ops want the operands backwards */
10989 if (op
== 9) { /* neg */
10990 tmp
= tcg_temp_new_i32();
10991 tcg_gen_movi_i32(tmp
, 0);
10992 } else if (op
!= 0xf) { /* mvn doesn't read its first operand */
10993 tmp
= load_reg(s
, rd
);
10995 TCGV_UNUSED_I32(tmp
);
10998 tmp2
= load_reg(s
, rm
);
11000 case 0x0: /* and */
11001 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
11002 if (!s
->condexec_mask
)
11005 case 0x1: /* eor */
11006 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
11007 if (!s
->condexec_mask
)
11010 case 0x2: /* lsl */
11011 if (s
->condexec_mask
) {
11012 gen_shl(tmp2
, tmp2
, tmp
);
11014 gen_helper_shl_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11015 gen_logic_CC(tmp2
);
11018 case 0x3: /* lsr */
11019 if (s
->condexec_mask
) {
11020 gen_shr(tmp2
, tmp2
, tmp
);
11022 gen_helper_shr_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11023 gen_logic_CC(tmp2
);
11026 case 0x4: /* asr */
11027 if (s
->condexec_mask
) {
11028 gen_sar(tmp2
, tmp2
, tmp
);
11030 gen_helper_sar_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11031 gen_logic_CC(tmp2
);
11034 case 0x5: /* adc */
11035 if (s
->condexec_mask
) {
11036 gen_adc(tmp
, tmp2
);
11038 gen_adc_CC(tmp
, tmp
, tmp2
);
11041 case 0x6: /* sbc */
11042 if (s
->condexec_mask
) {
11043 gen_sub_carry(tmp
, tmp
, tmp2
);
11045 gen_sbc_CC(tmp
, tmp
, tmp2
);
11048 case 0x7: /* ror */
11049 if (s
->condexec_mask
) {
11050 tcg_gen_andi_i32(tmp
, tmp
, 0x1f);
11051 tcg_gen_rotr_i32(tmp2
, tmp2
, tmp
);
11053 gen_helper_ror_cc(tmp2
, cpu_env
, tmp2
, tmp
);
11054 gen_logic_CC(tmp2
);
11057 case 0x8: /* tst */
11058 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
11062 case 0x9: /* neg */
11063 if (s
->condexec_mask
)
11064 tcg_gen_neg_i32(tmp
, tmp2
);
11066 gen_sub_CC(tmp
, tmp
, tmp2
);
11068 case 0xa: /* cmp */
11069 gen_sub_CC(tmp
, tmp
, tmp2
);
11072 case 0xb: /* cmn */
11073 gen_add_CC(tmp
, tmp
, tmp2
);
11076 case 0xc: /* orr */
11077 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
11078 if (!s
->condexec_mask
)
11081 case 0xd: /* mul */
11082 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
11083 if (!s
->condexec_mask
)
11086 case 0xe: /* bic */
11087 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
11088 if (!s
->condexec_mask
)
11091 case 0xf: /* mvn */
11092 tcg_gen_not_i32(tmp2
, tmp2
);
11093 if (!s
->condexec_mask
)
11094 gen_logic_CC(tmp2
);
11101 store_reg(s
, rm
, tmp2
);
11103 tcg_temp_free_i32(tmp
);
11105 store_reg(s
, rd
, tmp
);
11106 tcg_temp_free_i32(tmp2
);
11109 tcg_temp_free_i32(tmp
);
11110 tcg_temp_free_i32(tmp2
);
11115 /* load/store register offset. */
11117 rn
= (insn
>> 3) & 7;
11118 rm
= (insn
>> 6) & 7;
11119 op
= (insn
>> 9) & 7;
11120 addr
= load_reg(s
, rn
);
11121 tmp
= load_reg(s
, rm
);
11122 tcg_gen_add_i32(addr
, addr
, tmp
);
11123 tcg_temp_free_i32(tmp
);
11125 if (op
< 3) { /* store */
11126 tmp
= load_reg(s
, rd
);
11128 tmp
= tcg_temp_new_i32();
11133 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11136 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
11139 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
11141 case 3: /* ldrsb */
11142 gen_aa32_ld8s(s
, tmp
, addr
, get_mem_index(s
));
11145 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11148 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
11151 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
11153 case 7: /* ldrsh */
11154 gen_aa32_ld16s(s
, tmp
, addr
, get_mem_index(s
));
11157 if (op
>= 3) { /* load */
11158 store_reg(s
, rd
, tmp
);
11160 tcg_temp_free_i32(tmp
);
11162 tcg_temp_free_i32(addr
);
11166 /* load/store word immediate offset */
11168 rn
= (insn
>> 3) & 7;
11169 addr
= load_reg(s
, rn
);
11170 val
= (insn
>> 4) & 0x7c;
11171 tcg_gen_addi_i32(addr
, addr
, val
);
11173 if (insn
& (1 << 11)) {
11175 tmp
= tcg_temp_new_i32();
11176 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11177 store_reg(s
, rd
, tmp
);
11180 tmp
= load_reg(s
, rd
);
11181 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11182 tcg_temp_free_i32(tmp
);
11184 tcg_temp_free_i32(addr
);
11188 /* load/store byte immediate offset */
11190 rn
= (insn
>> 3) & 7;
11191 addr
= load_reg(s
, rn
);
11192 val
= (insn
>> 6) & 0x1f;
11193 tcg_gen_addi_i32(addr
, addr
, val
);
11195 if (insn
& (1 << 11)) {
11197 tmp
= tcg_temp_new_i32();
11198 gen_aa32_ld8u(s
, tmp
, addr
, get_mem_index(s
));
11199 store_reg(s
, rd
, tmp
);
11202 tmp
= load_reg(s
, rd
);
11203 gen_aa32_st8(s
, tmp
, addr
, get_mem_index(s
));
11204 tcg_temp_free_i32(tmp
);
11206 tcg_temp_free_i32(addr
);
11210 /* load/store halfword immediate offset */
11212 rn
= (insn
>> 3) & 7;
11213 addr
= load_reg(s
, rn
);
11214 val
= (insn
>> 5) & 0x3e;
11215 tcg_gen_addi_i32(addr
, addr
, val
);
11217 if (insn
& (1 << 11)) {
11219 tmp
= tcg_temp_new_i32();
11220 gen_aa32_ld16u(s
, tmp
, addr
, get_mem_index(s
));
11221 store_reg(s
, rd
, tmp
);
11224 tmp
= load_reg(s
, rd
);
11225 gen_aa32_st16(s
, tmp
, addr
, get_mem_index(s
));
11226 tcg_temp_free_i32(tmp
);
11228 tcg_temp_free_i32(addr
);
11232 /* load/store from stack */
11233 rd
= (insn
>> 8) & 7;
11234 addr
= load_reg(s
, 13);
11235 val
= (insn
& 0xff) * 4;
11236 tcg_gen_addi_i32(addr
, addr
, val
);
11238 if (insn
& (1 << 11)) {
11240 tmp
= tcg_temp_new_i32();
11241 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11242 store_reg(s
, rd
, tmp
);
11245 tmp
= load_reg(s
, rd
);
11246 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11247 tcg_temp_free_i32(tmp
);
11249 tcg_temp_free_i32(addr
);
11253 /* add to high reg */
11254 rd
= (insn
>> 8) & 7;
11255 if (insn
& (1 << 11)) {
11257 tmp
= load_reg(s
, 13);
11259 /* PC. bit 1 is ignored. */
11260 tmp
= tcg_temp_new_i32();
11261 tcg_gen_movi_i32(tmp
, (s
->pc
+ 2) & ~(uint32_t)2);
11263 val
= (insn
& 0xff) * 4;
11264 tcg_gen_addi_i32(tmp
, tmp
, val
);
11265 store_reg(s
, rd
, tmp
);
11270 op
= (insn
>> 8) & 0xf;
11273 /* adjust stack pointer */
11274 tmp
= load_reg(s
, 13);
11275 val
= (insn
& 0x7f) * 4;
11276 if (insn
& (1 << 7))
11277 val
= -(int32_t)val
;
11278 tcg_gen_addi_i32(tmp
, tmp
, val
);
11279 store_reg(s
, 13, tmp
);
11282 case 2: /* sign/zero extend. */
11285 rm
= (insn
>> 3) & 7;
11286 tmp
= load_reg(s
, rm
);
11287 switch ((insn
>> 6) & 3) {
11288 case 0: gen_sxth(tmp
); break;
11289 case 1: gen_sxtb(tmp
); break;
11290 case 2: gen_uxth(tmp
); break;
11291 case 3: gen_uxtb(tmp
); break;
11293 store_reg(s
, rd
, tmp
);
11295 case 4: case 5: case 0xc: case 0xd:
11297 addr
= load_reg(s
, 13);
11298 if (insn
& (1 << 8))
11302 for (i
= 0; i
< 8; i
++) {
11303 if (insn
& (1 << i
))
11306 if ((insn
& (1 << 11)) == 0) {
11307 tcg_gen_addi_i32(addr
, addr
, -offset
);
11309 for (i
= 0; i
< 8; i
++) {
11310 if (insn
& (1 << i
)) {
11311 if (insn
& (1 << 11)) {
11313 tmp
= tcg_temp_new_i32();
11314 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11315 store_reg(s
, i
, tmp
);
11318 tmp
= load_reg(s
, i
);
11319 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11320 tcg_temp_free_i32(tmp
);
11322 /* advance to the next address. */
11323 tcg_gen_addi_i32(addr
, addr
, 4);
11326 TCGV_UNUSED_I32(tmp
);
11327 if (insn
& (1 << 8)) {
11328 if (insn
& (1 << 11)) {
11330 tmp
= tcg_temp_new_i32();
11331 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11332 /* don't set the pc until the rest of the instruction
11336 tmp
= load_reg(s
, 14);
11337 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11338 tcg_temp_free_i32(tmp
);
11340 tcg_gen_addi_i32(addr
, addr
, 4);
11342 if ((insn
& (1 << 11)) == 0) {
11343 tcg_gen_addi_i32(addr
, addr
, -offset
);
11345 /* write back the new stack pointer */
11346 store_reg(s
, 13, addr
);
11347 /* set the new PC value */
11348 if ((insn
& 0x0900) == 0x0900) {
11349 store_reg_from_load(s
, 15, tmp
);
11353 case 1: case 3: case 9: case 11: /* czb */
11355 tmp
= load_reg(s
, rm
);
11356 s
->condlabel
= gen_new_label();
11358 if (insn
& (1 << 11))
11359 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, s
->condlabel
);
11361 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, s
->condlabel
);
11362 tcg_temp_free_i32(tmp
);
11363 offset
= ((insn
& 0xf8) >> 2) | (insn
& 0x200) >> 3;
11364 val
= (uint32_t)s
->pc
+ 2;
11369 case 15: /* IT, nop-hint. */
11370 if ((insn
& 0xf) == 0) {
11371 gen_nop_hint(s
, (insn
>> 4) & 0xf);
11375 s
->condexec_cond
= (insn
>> 4) & 0xe;
11376 s
->condexec_mask
= insn
& 0x1f;
11377 /* No actual code generated for this insn, just setup state. */
11380 case 0xe: /* bkpt */
11382 int imm8
= extract32(insn
, 0, 8);
11384 gen_exception_insn(s
, 2, EXCP_BKPT
, syn_aa32_bkpt(imm8
, true),
11385 default_exception_el(s
));
11389 case 0xa: /* rev, and hlt */
11391 int op1
= extract32(insn
, 6, 2);
11395 int imm6
= extract32(insn
, 0, 6);
11401 /* Otherwise this is rev */
11403 rn
= (insn
>> 3) & 0x7;
11405 tmp
= load_reg(s
, rn
);
11407 case 0: tcg_gen_bswap32_i32(tmp
, tmp
); break;
11408 case 1: gen_rev16(tmp
); break;
11409 case 3: gen_revsh(tmp
); break;
11411 g_assert_not_reached();
11413 store_reg(s
, rd
, tmp
);
11418 switch ((insn
>> 5) & 7) {
11422 if (((insn
>> 3) & 1) != !!(s
->be_data
== MO_BE
)) {
11423 gen_helper_setend(cpu_env
);
11424 s
->is_jmp
= DISAS_UPDATE
;
11433 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
11434 tmp
= tcg_const_i32((insn
& (1 << 4)) != 0);
11437 addr
= tcg_const_i32(19);
11438 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
11439 tcg_temp_free_i32(addr
);
11443 addr
= tcg_const_i32(16);
11444 gen_helper_v7m_msr(cpu_env
, addr
, tmp
);
11445 tcg_temp_free_i32(addr
);
11447 tcg_temp_free_i32(tmp
);
11450 if (insn
& (1 << 4)) {
11451 shift
= CPSR_A
| CPSR_I
| CPSR_F
;
11455 gen_set_psr_im(s
, ((insn
& 7) << 6), 0, shift
);
11470 /* load/store multiple */
11471 TCGv_i32 loaded_var
;
11472 TCGV_UNUSED_I32(loaded_var
);
11473 rn
= (insn
>> 8) & 0x7;
11474 addr
= load_reg(s
, rn
);
11475 for (i
= 0; i
< 8; i
++) {
11476 if (insn
& (1 << i
)) {
11477 if (insn
& (1 << 11)) {
11479 tmp
= tcg_temp_new_i32();
11480 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
11484 store_reg(s
, i
, tmp
);
11488 tmp
= load_reg(s
, i
);
11489 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
11490 tcg_temp_free_i32(tmp
);
11492 /* advance to the next address */
11493 tcg_gen_addi_i32(addr
, addr
, 4);
11496 if ((insn
& (1 << rn
)) == 0) {
11497 /* base reg not in list: base register writeback */
11498 store_reg(s
, rn
, addr
);
11500 /* base reg in list: if load, complete it now */
11501 if (insn
& (1 << 11)) {
11502 store_reg(s
, rn
, loaded_var
);
11504 tcg_temp_free_i32(addr
);
11509 /* conditional branch or swi */
11510 cond
= (insn
>> 8) & 0xf;
11516 gen_set_pc_im(s
, s
->pc
);
11517 s
->svc_imm
= extract32(insn
, 0, 8);
11518 s
->is_jmp
= DISAS_SWI
;
11521 /* generate a conditional jump to next instruction */
11522 s
->condlabel
= gen_new_label();
11523 arm_gen_test_cc(cond
^ 1, s
->condlabel
);
11526 /* jump to the offset */
11527 val
= (uint32_t)s
->pc
+ 2;
11528 offset
= ((int32_t)insn
<< 24) >> 24;
11529 val
+= offset
<< 1;
11534 if (insn
& (1 << 11)) {
11535 if (disas_thumb2_insn(env
, s
, insn
))
11539 /* unconditional branch */
11540 val
= (uint32_t)s
->pc
;
11541 offset
= ((int32_t)insn
<< 21) >> 21;
11542 val
+= (offset
<< 1) + 2;
11547 if (disas_thumb2_insn(env
, s
, insn
))
11553 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
11554 default_exception_el(s
));
11558 gen_exception_insn(s
, 2, EXCP_UDEF
, syn_uncategorized(),
11559 default_exception_el(s
));
11562 static bool insn_crosses_page(CPUARMState
*env
, DisasContext
*s
)
11564 /* Return true if the insn at dc->pc might cross a page boundary.
11565 * (False positives are OK, false negatives are not.)
11569 if ((s
->pc
& 3) == 0) {
11570 /* At a 4-aligned address we can't be crossing a page */
11574 /* This must be a Thumb insn */
11575 insn
= arm_lduw_code(env
, s
->pc
, s
->sctlr_b
);
11577 if ((insn
>> 11) >= 0x1d) {
11578 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11579 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11580 * end up actually treating this as two 16-bit insns (see the
11581 * code at the start of disas_thumb2_insn()) but we don't bother
11582 * to check for that as it is unlikely, and false positives here
11587 /* Definitely a 16-bit insn, can't be crossing a page. */
11591 /* generate intermediate code for basic block 'tb'. */
11592 void gen_intermediate_code(CPUARMState
*env
, TranslationBlock
*tb
)
11594 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11595 CPUState
*cs
= CPU(cpu
);
11596 DisasContext dc1
, *dc
= &dc1
;
11597 target_ulong pc_start
;
11598 target_ulong next_page_start
;
11603 /* generate intermediate code */
11605 /* The A64 decoder has its own top level loop, because it doesn't need
11606 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11608 if (ARM_TBFLAG_AARCH64_STATE(tb
->flags
)) {
11609 gen_intermediate_code_a64(cpu
, tb
);
11617 dc
->is_jmp
= DISAS_NEXT
;
11619 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
11623 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11624 * there is no secure EL1, so we route exceptions to EL3.
11626 dc
->secure_routed_to_el3
= arm_feature(env
, ARM_FEATURE_EL3
) &&
11627 !arm_el_is_aa64(env
, 3);
11628 dc
->thumb
= ARM_TBFLAG_THUMB(tb
->flags
);
11629 dc
->sctlr_b
= ARM_TBFLAG_SCTLR_B(tb
->flags
);
11630 dc
->be_data
= ARM_TBFLAG_BE_DATA(tb
->flags
) ? MO_BE
: MO_LE
;
11631 dc
->condexec_mask
= (ARM_TBFLAG_CONDEXEC(tb
->flags
) & 0xf) << 1;
11632 dc
->condexec_cond
= ARM_TBFLAG_CONDEXEC(tb
->flags
) >> 4;
11633 dc
->mmu_idx
= ARM_TBFLAG_MMUIDX(tb
->flags
);
11634 dc
->current_el
= arm_mmu_idx_to_el(dc
->mmu_idx
);
11635 #if !defined(CONFIG_USER_ONLY)
11636 dc
->user
= (dc
->current_el
== 0);
11638 dc
->ns
= ARM_TBFLAG_NS(tb
->flags
);
11639 dc
->fp_excp_el
= ARM_TBFLAG_FPEXC_EL(tb
->flags
);
11640 dc
->vfp_enabled
= ARM_TBFLAG_VFPEN(tb
->flags
);
11641 dc
->vec_len
= ARM_TBFLAG_VECLEN(tb
->flags
);
11642 dc
->vec_stride
= ARM_TBFLAG_VECSTRIDE(tb
->flags
);
11643 dc
->c15_cpar
= ARM_TBFLAG_XSCALE_CPAR(tb
->flags
);
11644 dc
->cp_regs
= cpu
->cp_regs
;
11645 dc
->features
= env
->features
;
11647 /* Single step state. The code-generation logic here is:
11649 * generate code with no special handling for single-stepping (except
11650 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11651 * this happens anyway because those changes are all system register or
11653 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11654 * emit code for one insn
11655 * emit code to clear PSTATE.SS
11656 * emit code to generate software step exception for completed step
11657 * end TB (as usual for having generated an exception)
11658 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11659 * emit code to generate a software step exception
11662 dc
->ss_active
= ARM_TBFLAG_SS_ACTIVE(tb
->flags
);
11663 dc
->pstate_ss
= ARM_TBFLAG_PSTATE_SS(tb
->flags
);
11664 dc
->is_ldex
= false;
11665 dc
->ss_same_el
= false; /* Can't be true since EL_d must be AArch64 */
11667 cpu_F0s
= tcg_temp_new_i32();
11668 cpu_F1s
= tcg_temp_new_i32();
11669 cpu_F0d
= tcg_temp_new_i64();
11670 cpu_F1d
= tcg_temp_new_i64();
11673 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11674 cpu_M0
= tcg_temp_new_i64();
11675 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
11677 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
11678 if (max_insns
== 0) {
11679 max_insns
= CF_COUNT_MASK
;
11681 if (max_insns
> TCG_MAX_INSNS
) {
11682 max_insns
= TCG_MAX_INSNS
;
11687 tcg_clear_temp_count();
11689 /* A note on handling of the condexec (IT) bits:
11691 * We want to avoid the overhead of having to write the updated condexec
11692 * bits back to the CPUARMState for every instruction in an IT block. So:
11693 * (1) if the condexec bits are not already zero then we write
11694 * zero back into the CPUARMState now. This avoids complications trying
11695 * to do it at the end of the block. (For example if we don't do this
11696 * it's hard to identify whether we can safely skip writing condexec
11697 * at the end of the TB, which we definitely want to do for the case
11698 * where a TB doesn't do anything with the IT state at all.)
11699 * (2) if we are going to leave the TB then we call gen_set_condexec()
11700 * which will write the correct value into CPUARMState if zero is wrong.
11701 * This is done both for leaving the TB at the end, and for leaving
11702 * it because of an exception we know will happen, which is done in
11703 * gen_exception_insn(). The latter is necessary because we need to
11704 * leave the TB with the PC/IT state just prior to execution of the
11705 * instruction which caused the exception.
11706 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11707 * then the CPUARMState will be wrong and we need to reset it.
11708 * This is handled in the same way as restoration of the
11709 * PC in these situations; we save the value of the condexec bits
11710 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11711 * then uses this to restore them after an exception.
11713 * Note that there are no instructions which can read the condexec
11714 * bits, and none which can write non-static values to them, so
11715 * we don't need to care about whether CPUARMState is correct in the
11719 /* Reset the conditional execution bits immediately. This avoids
11720 complications trying to do it at the end of the block. */
11721 if (dc
->condexec_mask
|| dc
->condexec_cond
)
11723 TCGv_i32 tmp
= tcg_temp_new_i32();
11724 tcg_gen_movi_i32(tmp
, 0);
11725 store_cpu_field(tmp
, condexec_bits
);
11728 tcg_gen_insn_start(dc
->pc
,
11729 (dc
->condexec_cond
<< 4) | (dc
->condexec_mask
>> 1),
11733 #ifdef CONFIG_USER_ONLY
11734 /* Intercept jump to the magic kernel page. */
11735 if (dc
->pc
>= 0xffff0000) {
11736 /* We always get here via a jump, so know we are not in a
11737 conditional execution block. */
11738 gen_exception_internal(EXCP_KERNEL_TRAP
);
11739 dc
->is_jmp
= DISAS_EXC
;
11743 if (dc
->pc
>= 0xfffffff0 && arm_dc_feature(dc
, ARM_FEATURE_M
)) {
11744 /* We always get here via a jump, so know we are not in a
11745 conditional execution block. */
11746 gen_exception_internal(EXCP_EXCEPTION_EXIT
);
11747 dc
->is_jmp
= DISAS_EXC
;
11752 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
11754 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
11755 if (bp
->pc
== dc
->pc
) {
11756 if (bp
->flags
& BP_CPU
) {
11757 gen_set_condexec(dc
);
11758 gen_set_pc_im(dc
, dc
->pc
);
11759 gen_helper_check_breakpoints(cpu_env
);
11760 /* End the TB early; it's likely not going to be executed */
11761 dc
->is_jmp
= DISAS_UPDATE
;
11763 gen_exception_internal_insn(dc
, 0, EXCP_DEBUG
);
11764 /* The address covered by the breakpoint must be
11765 included in [tb->pc, tb->pc + tb->size) in order
11766 to for it to be properly cleared -- thus we
11767 increment the PC here so that the logic setting
11768 tb->size below does the right thing. */
11769 /* TODO: Advance PC by correct instruction length to
11770 * avoid disassembler error messages */
11772 goto done_generating
;
11779 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
11783 if (dc
->ss_active
&& !dc
->pstate_ss
) {
11784 /* Singlestep state is Active-pending.
11785 * If we're in this state at the start of a TB then either
11786 * a) we just took an exception to an EL which is being debugged
11787 * and this is the first insn in the exception handler
11788 * b) debug exceptions were masked and we just unmasked them
11789 * without changing EL (eg by clearing PSTATE.D)
11790 * In either case we're going to take a swstep exception in the
11791 * "did not step an insn" case, and so the syndrome ISV and EX
11792 * bits should be zero.
11794 assert(num_insns
== 1);
11795 gen_exception(EXCP_UDEF
, syn_swstep(dc
->ss_same_el
, 0, 0),
11796 default_exception_el(dc
));
11797 goto done_generating
;
11801 disas_thumb_insn(env
, dc
);
11802 if (dc
->condexec_mask
) {
11803 dc
->condexec_cond
= (dc
->condexec_cond
& 0xe)
11804 | ((dc
->condexec_mask
>> 4) & 1);
11805 dc
->condexec_mask
= (dc
->condexec_mask
<< 1) & 0x1f;
11806 if (dc
->condexec_mask
== 0) {
11807 dc
->condexec_cond
= 0;
11811 unsigned int insn
= arm_ldl_code(env
, dc
->pc
, dc
->sctlr_b
);
11813 disas_arm_insn(dc
, insn
);
11816 if (dc
->condjmp
&& !dc
->is_jmp
) {
11817 gen_set_label(dc
->condlabel
);
11821 if (tcg_check_temp_count()) {
11822 fprintf(stderr
, "TCG temporary leak before "TARGET_FMT_lx
"\n",
11826 /* Translation stops when a conditional branch is encountered.
11827 * Otherwise the subsequent code could get translated several times.
11828 * Also stop translation when a page boundary is reached. This
11829 * ensures prefetch aborts occur at the right place. */
11831 /* We want to stop the TB if the next insn starts in a new page,
11832 * or if it spans between this page and the next. This means that
11833 * if we're looking at the last halfword in the page we need to
11834 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11835 * or a 32-bit Thumb insn (which won't).
11836 * This is to avoid generating a silly TB with a single 16-bit insn
11837 * in it at the end of this page (which would execute correctly
11838 * but isn't very efficient).
11840 end_of_page
= (dc
->pc
>= next_page_start
) ||
11841 ((dc
->pc
>= next_page_start
- 3) && insn_crosses_page(env
, dc
));
11843 } while (!dc
->is_jmp
&& !tcg_op_buf_full() &&
11844 !cs
->singlestep_enabled
&&
11848 num_insns
< max_insns
);
11850 if (tb
->cflags
& CF_LAST_IO
) {
11852 /* FIXME: This can theoretically happen with self-modifying
11854 cpu_abort(cs
, "IO on conditional branch instruction");
11859 /* At this stage dc->condjmp will only be set when the skipped
11860 instruction was a conditional branch or trap, and the PC has
11861 already been written. */
11862 if (unlikely(cs
->singlestep_enabled
|| dc
->ss_active
)) {
11863 /* Unconditional and "condition passed" instruction codepath. */
11864 gen_set_condexec(dc
);
11865 switch (dc
->is_jmp
) {
11867 gen_ss_advance(dc
);
11868 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
11869 default_exception_el(dc
));
11872 gen_ss_advance(dc
);
11873 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
11876 gen_ss_advance(dc
);
11877 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
11881 gen_set_pc_im(dc
, dc
->pc
);
11884 if (dc
->ss_active
) {
11885 gen_step_complete_exception(dc
);
11887 /* FIXME: Single stepping a WFI insn will not halt
11889 gen_exception_internal(EXCP_DEBUG
);
11893 /* "Condition failed" instruction codepath. */
11894 gen_set_label(dc
->condlabel
);
11895 gen_set_condexec(dc
);
11896 gen_set_pc_im(dc
, dc
->pc
);
11897 if (dc
->ss_active
) {
11898 gen_step_complete_exception(dc
);
11900 gen_exception_internal(EXCP_DEBUG
);
11904 /* While branches must always occur at the end of an IT block,
11905 there are a few other things that can cause us to terminate
11906 the TB in the middle of an IT block:
11907 - Exception generating instructions (bkpt, swi, undefined).
11909 - Hardware watchpoints.
11910 Hardware breakpoints have already been handled and skip this code.
11912 gen_set_condexec(dc
);
11913 switch(dc
->is_jmp
) {
11915 gen_goto_tb(dc
, 1, dc
->pc
);
11918 gen_set_pc_im(dc
, dc
->pc
);
11922 /* indicate that the hash table must be used to find the next TB */
11923 tcg_gen_exit_tb(0);
11925 case DISAS_TB_JUMP
:
11926 /* nothing more to generate */
11929 gen_helper_wfi(cpu_env
);
11930 /* The helper doesn't necessarily throw an exception, but we
11931 * must go back to the main loop to check for interrupts anyway.
11933 tcg_gen_exit_tb(0);
11936 gen_helper_wfe(cpu_env
);
11939 gen_helper_yield(cpu_env
);
11942 gen_exception(EXCP_SWI
, syn_aa32_svc(dc
->svc_imm
, dc
->thumb
),
11943 default_exception_el(dc
));
11946 gen_exception(EXCP_HVC
, syn_aa32_hvc(dc
->svc_imm
), 2);
11949 gen_exception(EXCP_SMC
, syn_aa32_smc(), 3);
11953 gen_set_label(dc
->condlabel
);
11954 gen_set_condexec(dc
);
11955 gen_goto_tb(dc
, 1, dc
->pc
);
11961 gen_tb_end(tb
, num_insns
);
11964 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
) &&
11965 qemu_log_in_addr_range(pc_start
)) {
11966 qemu_log("----------------\n");
11967 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
11968 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
,
11969 dc
->thumb
| (dc
->sctlr_b
<< 1));
11973 tb
->size
= dc
->pc
- pc_start
;
11974 tb
->icount
= num_insns
;
11977 static const char *cpu_mode_names
[16] = {
11978 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11979 "???", "???", "hyp", "und", "???", "???", "???", "sys"
11982 void arm_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
11985 ARMCPU
*cpu
= ARM_CPU(cs
);
11986 CPUARMState
*env
= &cpu
->env
;
11989 const char *ns_status
;
11992 aarch64_cpu_dump_state(cs
, f
, cpu_fprintf
, flags
);
11996 for(i
=0;i
<16;i
++) {
11997 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
11999 cpu_fprintf(f
, "\n");
12001 cpu_fprintf(f
, " ");
12003 psr
= cpsr_read(env
);
12005 if (arm_feature(env
, ARM_FEATURE_EL3
) &&
12006 (psr
& CPSR_M
) != ARM_CPU_MODE_MON
) {
12007 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
12012 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12014 psr
& (1 << 31) ? 'N' : '-',
12015 psr
& (1 << 30) ? 'Z' : '-',
12016 psr
& (1 << 29) ? 'C' : '-',
12017 psr
& (1 << 28) ? 'V' : '-',
12018 psr
& CPSR_T
? 'T' : 'A',
12020 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
12022 if (flags
& CPU_DUMP_FPU
) {
12023 int numvfpregs
= 0;
12024 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
12027 if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
12030 for (i
= 0; i
< numvfpregs
; i
++) {
12031 uint64_t v
= float64_val(env
->vfp
.regs
[i
]);
12032 cpu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
12033 i
* 2, (uint32_t)v
,
12034 i
* 2 + 1, (uint32_t)(v
>> 32),
12037 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);
12041 void restore_state_to_opc(CPUARMState
*env
, TranslationBlock
*tb
,
12042 target_ulong
*data
)
12046 env
->condexec_bits
= 0;
12047 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;
12049 env
->regs
[15] = data
[0];
12050 env
->condexec_bits
= data
[1];
12051 env
->exception
.syndrome
= data
[2] << ARM_INSN_START_WORD2_SHIFT
;